summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/display
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/display')
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c4
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c209
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c76
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h18
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c784
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c352
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c274
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c283
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c6
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c2
37 files changed, 1307 insertions, 931 deletions
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 60ae2ba52006..dc41868d01ef 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -18,7 +18,7 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_pps.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
struct dp_link_dpll {
int clock;
@@ -637,7 +637,7 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
/* enable with pattern 1 (as per spec) */
intel_dp_program_link_training_pattern(intel_dp, crtc_state,
- DP_TRAINING_PATTERN_1);
+ DP_PHY_DPRX, DP_TRAINING_PATTERN_1);
/*
* Magic for VLV/CHV. We _must_ first set up the register
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index be352e9f0afc..88c427f3c346 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -14,8 +14,8 @@
#include "intel_fifo_underrun.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
-#include "intel_sideband.h"
#include "intel_sdvo.h"
+#include "vlv_sideband.h"
static void intel_hdmi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 9ee62707ec72..168c84a74d30 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -233,7 +233,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
@@ -247,7 +247,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
tmp |= RTERM_SELECT(0x6);
intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
@@ -455,7 +455,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
@@ -470,7 +470,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
tmp);
tmp = intel_de_read(dev_priv,
- ICL_PORT_PCS_DW1_LN0(phy));
+ ICL_PORT_PCS_DW1_LN(0, phy));
tmp &= ~LATENCY_OPTIM_MASK;
tmp |= LATENCY_OPTIM_VAL(0x1);
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy),
@@ -489,7 +489,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* clear common keeper enable bit */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
tmp &= ~COMMON_KEEPER_EN;
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp);
tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy));
@@ -510,7 +510,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* Clear training enable to change swing values */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp &= ~TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
@@ -523,7 +523,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* Set training enable to trigger update */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp |= TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 72cac55c0f0f..e78430001f07 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -186,13 +186,16 @@ void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
acpi_handle dhandle;
+ union acpi_object *obj;
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
return;
- acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID,
- INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL);
+ obj = acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID,
+ INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL);
+ if (obj)
+ ACPI_FREE(obj);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 47234d898549..0be8c00e3db9 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -39,8 +39,10 @@
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
+#include "intel_fb_pin.h"
#include "intel_pm.h"
#include "intel_sprite.h"
+#include "gt/intel_rps.h"
static void intel_plane_state_reset(struct intel_plane_state *plane_state,
struct intel_plane *plane)
@@ -601,6 +603,213 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
return 0;
}
+struct wait_rps_boost {
+ struct wait_queue_entry wait;
+
+ struct drm_crtc *crtc;
+ struct i915_request *request;
+};
+
+static int do_rps_boost(struct wait_queue_entry *_wait,
+ unsigned mode, int sync, void *key)
+{
+ struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
+ struct i915_request *rq = wait->request;
+
+ /*
+ * If we missed the vblank, but the request is already running it
+ * is reasonable to assume that it will complete before the next
+ * vblank without our intervention, so leave RPS alone.
+ */
+ if (!i915_request_started(rq))
+ intel_rps_boost(rq);
+ i915_request_put(rq);
+
+ drm_crtc_vblank_put(wait->crtc);
+
+ list_del(&wait->wait.entry);
+ kfree(wait);
+ return 1;
+}
+
+static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+ struct wait_rps_boost *wait;
+
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
+ return;
+
+ if (drm_crtc_vblank_get(crtc))
+ return;
+
+ wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait) {
+ drm_crtc_vblank_put(crtc);
+ return;
+ }
+
+ wait->request = to_request(dma_fence_get(fence));
+ wait->crtc = crtc;
+
+ wait->wait.func = do_rps_boost;
+ wait->wait.flags = 0;
+
+ add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+}
+
+/**
+ * intel_prepare_plane_fb - Prepare fb for usage on plane
+ * @_plane: drm plane to prepare for
+ * @_new_plane_state: the plane state being prepared
+ *
+ * Prepares a framebuffer for usage on a display plane. Generally this
+ * involves pinning the underlying object and updating the frontbuffer tracking
+ * bits. Some older platforms need special physical address handling for
+ * cursor planes.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int
+intel_prepare_plane_fb(struct drm_plane *_plane,
+ struct drm_plane_state *_new_plane_state)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_plane_state *new_plane_state =
+ to_intel_plane_state(_new_plane_state);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_plane_state->uapi.state);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct intel_plane_state *old_plane_state =
+ intel_atomic_get_old_plane_state(state, plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
+ int ret;
+
+ if (old_obj) {
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state,
+ to_intel_crtc(old_plane_state->hw.crtc));
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+ * current scanout is retired before unpinning the old
+ * framebuffer. Note that we rely on userspace rendering
+ * into the buffer attached to the pipe they are waiting
+ * on. If not, userspace generates a GPU hang with IPEHR
+ * point to the MI_WAIT_FOR_EVENT.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
+ */
+ if (intel_crtc_needs_modeset(crtc_state)) {
+ ret = i915_sw_fence_await_reservation(&state->commit_ready,
+ old_obj->base.resv, NULL,
+ false, 0,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (new_plane_state->uapi.fence) { /* explicit fencing */
+ i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
+ &attr);
+ ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
+ new_plane_state->uapi.fence,
+ i915_fence_timeout(dev_priv),
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!obj)
+ return 0;
+
+
+ ret = intel_plane_pin_fb(new_plane_state);
+ if (ret)
+ return ret;
+
+ i915_gem_object_wait_priority(obj, 0, &attr);
+
+ if (!new_plane_state->uapi.fence) { /* implicit fencing */
+ struct dma_fence *fence;
+
+ ret = i915_sw_fence_await_reservation(&state->commit_ready,
+ obj->base.resv, NULL,
+ false,
+ i915_fence_timeout(dev_priv),
+ GFP_KERNEL);
+ if (ret < 0)
+ goto unpin_fb;
+
+ fence = dma_resv_get_excl_unlocked(obj->base.resv);
+ if (fence) {
+ add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ fence);
+ dma_fence_put(fence);
+ }
+ } else {
+ add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ new_plane_state->uapi.fence);
+ }
+
+ /*
+ * We declare pageflips to be interactive and so merit a small bias
+ * towards upclocking to deliver the frame on time. By only changing
+ * the RPS thresholds to sample more regularly and aim for higher
+ * clocks we can hopefully deliver low power workloads (like kodi)
+ * that are not quite steady state without resorting to forcing
+ * maximum clocks following a vblank miss (see do_rps_boost()).
+ */
+ if (!state->rps_interactive) {
+ intel_rps_mark_interactive(&dev_priv->gt.rps, true);
+ state->rps_interactive = true;
+ }
+
+ return 0;
+
+unpin_fb:
+ intel_plane_unpin_fb(new_plane_state);
+
+ return ret;
+}
+
+/**
+ * intel_cleanup_plane_fb - Cleans up an fb after plane use
+ * @plane: drm plane to clean up for
+ * @_old_plane_state: the state from the previous modeset
+ *
+ * Cleans up a framebuffer that has just been removed from a plane.
+ */
+static void
+intel_cleanup_plane_fb(struct drm_plane *plane,
+ struct drm_plane_state *_old_plane_state)
+{
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(_old_plane_state);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(old_plane_state->uapi.state);
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
+
+ if (!obj)
+ return;
+
+ if (state->rps_interactive) {
+ intel_rps_mark_interactive(&dev_priv->gt.rps, false);
+ state->rps_interactive = false;
+ }
+
+ /* Should only be called after a successful intel_prepare_plane_fb()! */
+ intel_plane_unpin_fb(old_plane_state);
+}
+
static const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index f9776ca85de3..b99907c656bb 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1930,6 +1930,50 @@ static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devd
}
}
+static enum port get_edp_port(struct drm_i915_private *i915)
+{
+ const struct intel_bios_encoder_data *devdata;
+ enum port port;
+
+ for_each_port(port) {
+ devdata = i915->vbt.ports[port];
+
+ if (devdata && intel_bios_encoder_supports_edp(devdata))
+ return port;
+ }
+
+ return PORT_NONE;
+}
+
+/*
+ * FIXME: The power sequencer and backlight code currently do not support more
+ * than one set registers, at least not on anything other than VLV/CHV. It will
+ * clobber the registers. As a temporary workaround, gracefully prevent more
+ * than one eDP from being registered.
+ */
+static void sanitize_dual_edp(struct intel_bios_encoder_data *devdata,
+ enum port port)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+ struct child_device_config *child = &devdata->child;
+ enum port p;
+
+ /* CHV might not clobber PPS registers. */
+ if (IS_CHERRYVIEW(i915))
+ return;
+
+ p = get_edp_port(i915);
+ if (p == PORT_NONE)
+ return;
+
+ drm_dbg_kms(&i915->drm, "both ports %c and %c configured as eDP, "
+ "disabling port %c eDP\n", port_name(p), port_name(port),
+ port_name(port));
+
+ child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+ child->device_type &= ~DEVICE_TYPE_INTERNAL_CONNECTOR;
+}
+
static bool is_port_valid(struct drm_i915_private *i915, enum port port)
{
/*
@@ -1987,6 +2031,9 @@ static void parse_ddi_port(struct drm_i915_private *i915,
supports_typec_usb, supports_tbt,
devdata->dsc != NULL);
+ if (is_edp)
+ sanitize_dual_edp(devdata, port);
+
if (is_dvi)
sanitize_ddc_pin(devdata, port);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 4b94256d7319..8d9d888e9316 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -9,8 +9,8 @@
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
+#include "intel_pcode.h"
#include "intel_pm.h"
-#include "intel_sideband.h"
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index ecb28e8f1eb6..9e466d829019 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -28,8 +28,9 @@
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_pcode.h"
#include "intel_psr.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
/**
* DOC: CDCLK / RAWCLK
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index bacdf8a16bcb..634e8d449457 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -220,13 +220,13 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
return false;
if (DISPLAY_VER(dev_priv) >= 12) {
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN0(phy),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN(0, phy),
ICL_PORT_TX_DW8_ODCC_CLK_SEL |
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK,
ICL_PORT_TX_DW8_ODCC_CLK_SEL |
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2);
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN0(phy),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy),
DCC_MODE_SELECT_MASK,
DCC_MODE_SELECT_CONTINUOSLY);
}
@@ -343,13 +343,13 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
skip_phy_misc:
if (DISPLAY_VER(dev_priv) >= 12) {
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN(0, phy));
val &= ~ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK;
val |= ICL_PORT_TX_DW8_ODCC_CLK_SEL;
val |= ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2;
intel_de_write(dev_priv, ICL_PORT_TX_DW8_GRP(phy), val);
- val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
val &= ~DCC_MODE_SELECT_MASK;
val |= DCC_MODE_SELECT_CONTINUOSLY;
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index f6dcb5aa63f6..11842f212613 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -17,7 +17,7 @@
#include "intel_display_types.h"
#include "intel_display.h"
#include "intel_fb.h"
-
+#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
#include "intel_pm.h"
#include "intel_psr.h"
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 0d4cf7fa8720..1dcfe31e6c6f 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -1023,6 +1023,18 @@ static u8 intel_ddi_dp_preemph_max(struct intel_dp *intel_dp)
return DP_TRAIN_PRE_EMPH_LEVEL_3;
}
+static u32 icl_combo_phy_loadgen_select(const struct intel_crtc_state *crtc_state,
+ int lane)
+{
+ if (crtc_state->port_clock > 600000)
+ return 0;
+
+ if (crtc_state->lane_count == 4)
+ return lane >= 1 ? LOADGEN_SELECT : 0;
+ else
+ return lane == 1 || lane == 2 ? LOADGEN_SELECT : 0;
+}
+
static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
@@ -1047,7 +1059,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
}
/* Set PORT_TX_DW5 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
@@ -1056,7 +1068,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
val |= SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel);
@@ -1067,7 +1079,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
- for (ln = 0; ln <= 3; ln++) {
+ for (ln = 0; ln < 4; ln++) {
val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
@@ -1078,7 +1090,7 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
}
/* Program PORT_TX_DW7 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN(0, phy));
val &= ~N_SCALAR_MASK;
val |= N_SCALAR(trans->entries[level].icl.dw7_n_scalar);
intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val);
@@ -1089,18 +1101,15 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- int width, rate, ln;
u32 val;
-
- width = crtc_state->lane_count;
- rate = crtc_state->port_clock;
+ int ln;
/*
* 1. If port type is eDP or DP,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
val &= ~COMMON_KEEPER_EN;
else
@@ -1109,19 +1118,15 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
/* 2. Program loadgen select */
/*
- * Program PORT_TX_DW4_LN depending on Bit rate and used lanes
+ * Program PORT_TX_DW4 depending on Bit rate and used lanes
* <= 6 GHz and 4 lanes (LN0=0, LN1=1, LN2=1, LN3=1)
* <= 6 GHz and 1,2 lanes (LN0=0, LN1=1, LN2=1, LN3=0)
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
- for (ln = 0; ln <= 3; ln++) {
+ for (ln = 0; ln < 4; ln++) {
val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~LOADGEN_SELECT;
-
- if ((rate <= 600000 && width == 4 && ln >= 1) ||
- (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
- val |= LOADGEN_SELECT;
- }
+ val |= icl_combo_phy_loadgen_select(crtc_state, ln);
intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
}
@@ -1131,7 +1136,7 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
/* 4. Clear training enable to change swing values */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
@@ -1139,7 +1144,7 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
icl_ddi_combo_vswing_program(encoder, crtc_state);
/* 6. Set training enable to trigger update */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
val |= TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
}
@@ -1285,9 +1290,9 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
DKL_TX_DE_EMPAHSIS_COEFF_MASK |
DKL_TX_VSWING_CONTROL_MASK);
- dpcnt_val = DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.dkl_vswing_control);
- dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.dkl_de_emphasis_control);
- dpcnt_val |= DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.dkl_preshoot_control);
+ dpcnt_val = DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing);
+ dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis);
+ dpcnt_val |= DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot);
for (ln = 0; ln < 2; ln++) {
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
@@ -1309,14 +1314,6 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port));
val &= ~DKL_TX_DP20BITMODE;
intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val);
-
- if ((intel_crtc_has_dp_encoder(crtc_state) &&
- crtc_state->port_clock == 162000) ||
- (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
- crtc_state->port_clock == 594000))
- val |= DKL_TX_LOADGEN_SHARING_PMD_DISABLE;
- else
- val &= ~DKL_TX_LOADGEN_SHARING_PMD_DISABLE;
}
}
@@ -1338,13 +1335,20 @@ static int translate_signal_level(struct intel_dp *intel_dp,
return 0;
}
-static int intel_ddi_dp_level(struct intel_dp *intel_dp, int lane)
+static int intel_ddi_dp_level(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int lane)
{
u8 train_set = intel_dp->train_set[lane];
- u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
- return translate_signal_level(intel_dp, signal_levels);
+ if (intel_dp_is_uhbr(crtc_state)) {
+ return train_set & DP_TX_FFE_PRESET_VALUE_MASK;
+ } else {
+ u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+
+ return translate_signal_level(intel_dp, signal_levels);
+ }
}
int intel_ddi_level(struct intel_encoder *encoder,
@@ -1362,7 +1366,8 @@ int intel_ddi_level(struct intel_encoder *encoder,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
level = intel_ddi_hdmi_level(encoder, trans);
else
- level = intel_ddi_dp_level(enc_to_intel_dp(encoder), lane);
+ level = intel_ddi_dp_level(enc_to_intel_dp(encoder), crtc_state,
+ lane);
if (drm_WARN_ON_ONCE(&i915->drm, level >= n_entries))
level = n_entries - 1;
@@ -3937,8 +3942,7 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
intel_display_power_flush_work(i915);
drm_encoder_cleanup(encoder);
- if (dig_port)
- kfree(dig_port->hdcp_port_data.streams);
+ kfree(dig_port->hdcp_port_data.streams);
kfree(dig_port);
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index a2d39131ea53..78cd8f77b49d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -8,6 +8,7 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to
@@ -1611,7 +1612,8 @@ dg2_get_snps_buf_trans(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- if (crtc_state->port_clock > 1000000)
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ intel_dp_is_uhbr(crtc_state))
return intel_get_buf_trans(&dg2_snps_trans_uhbr, n_entries);
else
return intel_get_buf_trans(&dg2_snps_trans, n_entries);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
index 6cdb8e9073c7..2133984a572b 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -34,21 +34,21 @@ struct icl_ddi_buf_trans {
};
struct icl_mg_phy_ddi_buf_trans {
- u32 cri_txdeemph_override_11_6;
- u32 cri_txdeemph_override_5_0;
- u32 cri_txdeemph_override_17_12;
+ u8 cri_txdeemph_override_11_6;
+ u8 cri_txdeemph_override_5_0;
+ u8 cri_txdeemph_override_17_12;
};
struct tgl_dkl_phy_ddi_buf_trans {
- u32 dkl_vswing_control;
- u32 dkl_preshoot_control;
- u32 dkl_de_emphasis_control;
+ u8 vswing;
+ u8 preshoot;
+ u8 de_emphasis;
};
struct dg2_snps_phy_buf_trans {
- u8 snps_vswing;
- u8 snps_pre_cursor;
- u8 snps_post_cursor;
+ u8 vswing;
+ u8 pre_cursor;
+ u8 post_cursor;
};
union intel_ddi_buf_trans_entry {
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 8faa7f729547..ff598b6cd953 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -68,7 +68,6 @@
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object.h"
-#include "gt/intel_rps.h"
#include "gt/gen8_ppgtt.h"
#include "pxp/intel_pxp.h"
@@ -89,26 +88,29 @@
#include "intel_dp_link_training.h"
#include "intel_dpt.h"
#include "intel_fbc.h"
-#include "intel_fdi.h"
#include "intel_fbdev.h"
+#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_overlay.h"
#include "intel_panel.h"
+#include "intel_pcode.h"
#include "intel_pipe_crc.h"
+#include "intel_plane_initial.h"
#include "intel_pm.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
-#include "intel_sideband.h"
+#include "intel_sbi.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vga.h"
#include "i9xx_plane.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "vlv_sideband.h"
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
@@ -854,7 +856,7 @@ unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info
return size;
}
-static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
+bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -864,198 +866,6 @@ static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
}
-static struct i915_vma *
-intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
- const struct i915_ggtt_view *view,
- bool uses_fence,
- unsigned long *out_flags,
- struct i915_address_space *vm)
-{
- struct drm_device *dev = fb->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct i915_vma *vma;
- u32 alignment;
- int ret;
-
- if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
- return ERR_PTR(-EINVAL);
-
- alignment = 4096 * 512;
-
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
-
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
-
- vma = i915_vma_instance(obj, vm, view);
- if (IS_ERR(vma))
- goto err;
-
- if (i915_vma_misplaced(vma, 0, alignment, 0)) {
- ret = i915_vma_unbind(vma);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
- }
-
- ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
-
- vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
-
- i915_gem_object_flush_if_display(obj);
-
- i915_vma_get(vma);
-err:
- atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
-
- return vma;
-}
-
-struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
- bool phys_cursor,
- const struct i915_ggtt_view *view,
- bool uses_fence,
- unsigned long *out_flags)
-{
- struct drm_device *dev = fb->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- intel_wakeref_t wakeref;
- struct i915_gem_ww_ctx ww;
- struct i915_vma *vma;
- unsigned int pinctl;
- u32 alignment;
- int ret;
-
- if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
- return ERR_PTR(-EINVAL);
-
- if (phys_cursor)
- alignment = intel_cursor_alignment(dev_priv);
- else
- alignment = intel_surf_alignment(fb, 0);
- if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
- return ERR_PTR(-EINVAL);
-
- /* Note that the w/a also requires 64 PTE of padding following the
- * bo. We currently fill all unused PTE with the shadow page and so
- * we should always have valid PTE following the scanout preventing
- * the VT-d warning.
- */
- if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
- alignment = 256 * 1024;
-
- /*
- * Global gtt pte registers are special registers which actually forward
- * writes to a chunk of system memory. Which means that there is no risk
- * that the register values disappear as soon as we call
- * intel_runtime_pm_put(), so it is correct to wrap only the
- * pin/unpin/fence and not more.
- */
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
-
- /*
- * Valleyview is definitely limited to scanning out the first
- * 512MiB. Lets presume this behaviour was inherited from the
- * g4x display engine and that all earlier gen are similarly
- * limited. Testing suggests that it is a little more
- * complicated than this. For example, Cherryview appears quite
- * happy to scanout from anywhere within its global aperture.
- */
- pinctl = 0;
- if (HAS_GMCH(dev_priv))
- pinctl |= PIN_MAPPABLE;
-
- i915_gem_ww_ctx_init(&ww, true);
-retry:
- ret = i915_gem_object_lock(obj, &ww);
- if (!ret && phys_cursor)
- ret = i915_gem_object_attach_phys(obj, alignment);
- else if (!ret && HAS_LMEM(dev_priv))
- ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
- /* TODO: Do we need to sync when migration becomes async? */
- if (!ret)
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto err;
-
- if (!ret) {
- vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
- view, pinctl);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unpin;
- }
- }
-
- if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
- /*
- * Install a fence for tiled scan-out. Pre-i965 always needs a
- * fence, whereas 965+ only requires a fence if using
- * framebuffer compression. For simplicity, we always, when
- * possible, install a fence as the cost is not that onerous.
- *
- * If we fail to fence the tiled scanout, then either the
- * modeset will reject the change (which is highly unlikely as
- * the affected systems, all but one, do not have unmappable
- * space) or we will not be able to enable full powersaving
- * techniques (also likely not to apply due to various limits
- * FBC and the like impose on the size of the buffer, which
- * presumably we violated anyway with this unmappable buffer).
- * Anyway, it is presumably better to stumble onwards with
- * something and try to run the system in a "less than optimal"
- * mode that matches the user configuration.
- */
- ret = i915_vma_pin_fence(vma);
- if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
- i915_vma_unpin(vma);
- goto err_unpin;
- }
- ret = 0;
-
- if (vma->fence)
- *out_flags |= PLANE_HAS_FENCE;
- }
-
- i915_vma_get(vma);
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
-err:
- if (ret == -EDEADLK) {
- ret = i915_gem_ww_ctx_backoff(&ww);
- if (!ret)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&ww);
- if (ret)
- vma = ERR_PTR(ret);
-
- atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- return vma;
-}
-
-void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
-{
- if (flags & PLANE_HAS_FENCE)
- i915_vma_unpin_fence(vma);
- i915_vma_unpin(vma);
- i915_vma_put(vma);
-}
-
/*
* Convert the x/y offsets into a linear offset.
* Only valid with 0/180 degree rotation, which is fine since linear
@@ -1241,123 +1051,6 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0);
}
-static struct i915_vma *
-initial_plane_vma(struct drm_i915_private *i915,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- u32 base, size;
-
- if (plane_config->size == 0)
- return NULL;
-
- base = round_down(plane_config->base,
- I915_GTT_MIN_ALIGNMENT);
- size = round_up(plane_config->base + plane_config->size,
- I915_GTT_MIN_ALIGNMENT);
- size -= base;
-
- /*
- * If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features.
- */
- if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
- size * 2 > i915->stolen_usable_size)
- return NULL;
-
- obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
- if (IS_ERR(obj))
- return NULL;
-
- /*
- * Mark it WT ahead of time to avoid changing the
- * cache_level during fbdev initialization. The
- * unbind there would get stuck waiting for rcu.
- */
- i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
- I915_CACHE_WT : I915_CACHE_NONE);
-
- switch (plane_config->tiling) {
- case I915_TILING_NONE:
- break;
- case I915_TILING_X:
- case I915_TILING_Y:
- obj->tiling_and_stride =
- plane_config->fb->base.pitches[0] |
- plane_config->tiling;
- break;
- default:
- MISSING_CASE(plane_config->tiling);
- goto err_obj;
- }
-
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma))
- goto err_obj;
-
- if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
- goto err_obj;
-
- if (i915_gem_object_is_tiled(obj) &&
- !i915_vma_is_map_and_fenceable(vma))
- goto err_obj;
-
- return vma;
-
-err_obj:
- i915_gem_object_put(obj);
- return NULL;
-}
-
-static bool
-intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
- struct drm_framebuffer *fb = &plane_config->fb->base;
- struct i915_vma *vma;
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- break;
- default:
- drm_dbg(&dev_priv->drm,
- "Unsupported modifier for initial FB: 0x%llx\n",
- fb->modifier);
- return false;
- }
-
- vma = initial_plane_vma(dev_priv, plane_config);
- if (!vma)
- return false;
-
- mode_cmd.pixel_format = fb->format->format;
- mode_cmd.width = fb->width;
- mode_cmd.height = fb->height;
- mode_cmd.pitches[0] = fb->pitches[0];
- mode_cmd.modifier[0] = fb->modifier;
- mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
-
- if (intel_framebuffer_init(to_intel_framebuffer(fb),
- vma->obj, &mode_cmd)) {
- drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
- goto err_vma;
- }
-
- plane_config->vma = vma;
- return true;
-
-err_vma:
- i915_vma_put(vma);
- return false;
-}
-
static void
intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state,
@@ -1393,8 +1086,8 @@ static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
}
}
-static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
- struct intel_plane *plane)
+void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
@@ -1439,123 +1132,6 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_wait_for_vblank(dev_priv, crtc->pipe);
}
-static bool
-intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
- const struct intel_initial_plane_config *plane_config,
- struct drm_framebuffer **fb,
- struct i915_vma **vma)
-{
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane *plane =
- to_intel_plane(crtc->base.primary);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (!crtc_state->uapi.active)
- continue;
-
- if (!plane_state->ggtt_vma)
- continue;
-
- if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
- *fb = plane_state->hw.fb;
- *vma = plane_state->ggtt_vma;
- return true;
- }
- }
-
- return false;
-}
-
-static void
-intel_find_initial_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane *plane =
- to_intel_plane(crtc->base.primary);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- struct drm_framebuffer *fb;
- struct i915_vma *vma;
-
- /*
- * TODO:
- * Disable planes if get_initial_plane_config() failed.
- * Make sure things work if the surface base is not page aligned.
- */
- if (!plane_config->fb)
- return;
-
- if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
- fb = &plane_config->fb->base;
- vma = plane_config->vma;
- goto valid_fb;
- }
-
- /*
- * Failed to alloc the obj, check to see if we should share
- * an fb with another CRTC instead
- */
- if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
- goto valid_fb;
-
- /*
- * We've failed to reconstruct the BIOS FB. Current display state
- * indicates that the primary plane is visible, but has a NULL FB,
- * which will lead to problems later if we don't fix it up. The
- * simplest solution is to just disable the primary plane now and
- * pretend the BIOS never had it enabled.
- */
- intel_plane_disable_noatomic(crtc, plane);
- if (crtc_state->bigjoiner) {
- struct intel_crtc *slave =
- crtc_state->bigjoiner_linked_crtc;
- intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
- }
-
- return;
-
-valid_fb:
- plane_state->uapi.rotation = plane_config->rotation;
- intel_fb_fill_view(to_intel_framebuffer(fb),
- plane_state->uapi.rotation, &plane_state->view);
-
- __i915_vma_pin(vma);
- plane_state->ggtt_vma = i915_vma_get(vma);
- if (intel_plane_uses_fence(plane_state) &&
- i915_vma_pin_fence(vma) == 0 && vma->fence)
- plane_state->flags |= PLANE_HAS_FENCE;
-
- plane_state->uapi.src_x = 0;
- plane_state->uapi.src_y = 0;
- plane_state->uapi.src_w = fb->width << 16;
- plane_state->uapi.src_h = fb->height << 16;
-
- plane_state->uapi.crtc_x = 0;
- plane_state->uapi.crtc_y = 0;
- plane_state->uapi.crtc_w = fb->width;
- plane_state->uapi.crtc_h = fb->height;
-
- if (plane_config->tiling)
- dev_priv->preserve_bios_swizzle = true;
-
- plane_state->uapi.fb = fb;
- drm_framebuffer_get(fb);
-
- plane_state->uapi.crtc = &crtc->base;
- intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
-
- atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
-}
-
unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
{
@@ -2313,6 +1889,33 @@ static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
return false;
}
+static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
+ enum pipe pipe, bool enable)
+{
+ if (DISPLAY_VER(i915) == 9) {
+ /*
+ * "Plane N strech max must be programmed to 11b (x1)
+ * when Async flips are enabled on that plane."
+ */
+ intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ SKL_PLANE1_STRETCH_MAX_MASK,
+ enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
+ } else {
+ /* Also needed on HSW/BDW albeit undocumented */
+ intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ HSW_PRI_STRETCH_MAX_MASK,
+ enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
+ }
+}
+
+static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return crtc_state->uapi.async_flip && intel_vtd_active() &&
+ (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
+}
+
static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
@@ -2348,6 +1951,10 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
intel_fbc_post_update(state, crtc);
intel_drrs_page_flip(state, crtc);
+ if (needs_async_flip_vtd_wa(old_crtc_state) &&
+ !needs_async_flip_vtd_wa(new_crtc_state))
+ intel_async_flip_vtd_wa(dev_priv, pipe, false);
+
if (needs_nv12_wa(old_crtc_state) &&
!needs_nv12_wa(new_crtc_state))
skl_wa_827(dev_priv, pipe, false);
@@ -2446,6 +2053,10 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
if (intel_fbc_pre_update(state, crtc))
intel_wait_for_vblank(dev_priv, pipe);
+ if (!needs_async_flip_vtd_wa(old_crtc_state) &&
+ needs_async_flip_vtd_wa(new_crtc_state))
+ intel_async_flip_vtd_wa(dev_priv, pipe, true);
+
/* Display WA 827 */
if (!needs_nv12_wa(old_crtc_state) &&
needs_nv12_wa(new_crtc_state))
@@ -10478,279 +10089,6 @@ static int intel_atomic_commit(struct drm_device *dev,
return 0;
}
-struct wait_rps_boost {
- struct wait_queue_entry wait;
-
- struct drm_crtc *crtc;
- struct i915_request *request;
-};
-
-static int do_rps_boost(struct wait_queue_entry *_wait,
- unsigned mode, int sync, void *key)
-{
- struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
- struct i915_request *rq = wait->request;
-
- /*
- * If we missed the vblank, but the request is already running it
- * is reasonable to assume that it will complete before the next
- * vblank without our intervention, so leave RPS alone.
- */
- if (!i915_request_started(rq))
- intel_rps_boost(rq);
- i915_request_put(rq);
-
- drm_crtc_vblank_put(wait->crtc);
-
- list_del(&wait->wait.entry);
- kfree(wait);
- return 1;
-}
-
-static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
- struct dma_fence *fence)
-{
- struct wait_rps_boost *wait;
-
- if (!dma_fence_is_i915(fence))
- return;
-
- if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
- return;
-
- if (drm_crtc_vblank_get(crtc))
- return;
-
- wait = kmalloc(sizeof(*wait), GFP_KERNEL);
- if (!wait) {
- drm_crtc_vblank_put(crtc);
- return;
- }
-
- wait->request = to_request(dma_fence_get(fence));
- wait->crtc = crtc;
-
- wait->wait.func = do_rps_boost;
- wait->wait.flags = 0;
-
- add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
-}
-
-int intel_plane_pin_fb(struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- struct drm_framebuffer *fb = plane_state->hw.fb;
- struct i915_vma *vma;
- bool phys_cursor =
- plane->id == PLANE_CURSOR &&
- INTEL_INFO(dev_priv)->display.cursor_needs_physical;
-
- if (!intel_fb_uses_dpt(fb)) {
- vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
- &plane_state->view.gtt,
- intel_plane_uses_fence(plane_state),
- &plane_state->flags);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- plane_state->ggtt_vma = vma;
- } else {
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-
- vma = intel_dpt_pin(intel_fb->dpt_vm);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- plane_state->ggtt_vma = vma;
-
- vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
- &plane_state->flags, intel_fb->dpt_vm);
- if (IS_ERR(vma)) {
- intel_dpt_unpin(intel_fb->dpt_vm);
- plane_state->ggtt_vma = NULL;
- return PTR_ERR(vma);
- }
-
- plane_state->dpt_vma = vma;
-
- WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
- }
-
- return 0;
-}
-
-void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
-{
- struct drm_framebuffer *fb = old_plane_state->hw.fb;
- struct i915_vma *vma;
-
- if (!intel_fb_uses_dpt(fb)) {
- vma = fetch_and_zero(&old_plane_state->ggtt_vma);
- if (vma)
- intel_unpin_fb_vma(vma, old_plane_state->flags);
- } else {
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-
- vma = fetch_and_zero(&old_plane_state->dpt_vma);
- if (vma)
- intel_unpin_fb_vma(vma, old_plane_state->flags);
-
- vma = fetch_and_zero(&old_plane_state->ggtt_vma);
- if (vma)
- intel_dpt_unpin(intel_fb->dpt_vm);
- }
-}
-
-/**
- * intel_prepare_plane_fb - Prepare fb for usage on plane
- * @_plane: drm plane to prepare for
- * @_new_plane_state: the plane state being prepared
- *
- * Prepares a framebuffer for usage on a display plane. Generally this
- * involves pinning the underlying object and updating the frontbuffer tracking
- * bits. Some older platforms need special physical address handling for
- * cursor planes.
- *
- * Returns 0 on success, negative error code on failure.
- */
-int
-intel_prepare_plane_fb(struct drm_plane *_plane,
- struct drm_plane_state *_new_plane_state)
-{
- struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
- struct intel_plane *plane = to_intel_plane(_plane);
- struct intel_plane_state *new_plane_state =
- to_intel_plane_state(_new_plane_state);
- struct intel_atomic_state *state =
- to_intel_atomic_state(new_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct intel_plane_state *old_plane_state =
- intel_atomic_get_old_plane_state(state, plane);
- struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
- struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
- int ret;
-
- if (old_obj) {
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state,
- to_intel_crtc(old_plane_state->hw.crtc));
-
- /* Big Hammer, we also need to ensure that any pending
- * MI_WAIT_FOR_EVENT inside a user batch buffer on the
- * current scanout is retired before unpinning the old
- * framebuffer. Note that we rely on userspace rendering
- * into the buffer attached to the pipe they are waiting
- * on. If not, userspace generates a GPU hang with IPEHR
- * point to the MI_WAIT_FOR_EVENT.
- *
- * This should only fail upon a hung GPU, in which case we
- * can safely continue.
- */
- if (intel_crtc_needs_modeset(crtc_state)) {
- ret = i915_sw_fence_await_reservation(&state->commit_ready,
- old_obj->base.resv, NULL,
- false, 0,
- GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
- }
-
- if (new_plane_state->uapi.fence) { /* explicit fencing */
- i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
- &attr);
- ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
- new_plane_state->uapi.fence,
- i915_fence_timeout(dev_priv),
- GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
-
- if (!obj)
- return 0;
-
-
- ret = intel_plane_pin_fb(new_plane_state);
- if (ret)
- return ret;
-
- i915_gem_object_wait_priority(obj, 0, &attr);
-
- if (!new_plane_state->uapi.fence) { /* implicit fencing */
- struct dma_fence *fence;
-
- ret = i915_sw_fence_await_reservation(&state->commit_ready,
- obj->base.resv, NULL,
- false,
- i915_fence_timeout(dev_priv),
- GFP_KERNEL);
- if (ret < 0)
- goto unpin_fb;
-
- fence = dma_resv_get_excl_unlocked(obj->base.resv);
- if (fence) {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- fence);
- dma_fence_put(fence);
- }
- } else {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- new_plane_state->uapi.fence);
- }
-
- /*
- * We declare pageflips to be interactive and so merit a small bias
- * towards upclocking to deliver the frame on time. By only changing
- * the RPS thresholds to sample more regularly and aim for higher
- * clocks we can hopefully deliver low power workloads (like kodi)
- * that are not quite steady state without resorting to forcing
- * maximum clocks following a vblank miss (see do_rps_boost()).
- */
- if (!state->rps_interactive) {
- intel_rps_mark_interactive(&dev_priv->gt.rps, true);
- state->rps_interactive = true;
- }
-
- return 0;
-
-unpin_fb:
- intel_plane_unpin_fb(new_plane_state);
-
- return ret;
-}
-
-/**
- * intel_cleanup_plane_fb - Cleans up an fb after plane use
- * @plane: drm plane to clean up for
- * @_old_plane_state: the state from the previous modeset
- *
- * Cleans up a framebuffer that has just been removed from a plane.
- */
-void
-intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *_old_plane_state)
-{
- struct intel_plane_state *old_plane_state =
- to_intel_plane_state(_old_plane_state);
- struct intel_atomic_state *state =
- to_intel_atomic_state(old_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
-
- if (!obj)
- return;
-
- if (state->rps_interactive) {
- intel_rps_mark_interactive(&dev_priv->gt.rps, false);
- state->rps_interactive = false;
- }
-
- /* Should only be called after a successful intel_prepare_plane_fb()! */
- intel_plane_unpin_fb(old_plane_state);
-}
-
/**
* intel_plane_destroy - destroy a plane
* @plane: plane to destroy
@@ -11580,22 +10918,6 @@ static void intel_mode_config_cleanup(struct drm_i915_private *i915)
drm_mode_config_cleanup(&i915->drm);
}
-static void plane_config_fini(struct intel_initial_plane_config *plane_config)
-{
- if (plane_config->fb) {
- struct drm_framebuffer *fb = &plane_config->fb->base;
-
- /* We may only have the stub and not a full framebuffer */
- if (drm_framebuffer_read_refcount(fb))
- drm_framebuffer_put(fb);
- else
- kfree(fb);
- }
-
- if (plane_config->vma)
- i915_vma_put(plane_config->vma);
-}
-
/* part #1: call before irq install */
int intel_modeset_init_noirq(struct drm_i915_private *i915)
{
@@ -11728,27 +11050,9 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
drm_modeset_unlock_all(dev);
for_each_intel_crtc(dev, crtc) {
- struct intel_initial_plane_config plane_config = {};
-
if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
continue;
-
- /*
- * Note that reserving the BIOS fb up front prevents us
- * from stuffing other stolen allocations like the ring
- * on top. This prevents some ugliness at boot time, and
- * can even allow for smooth boot transitions if the BIOS
- * fb is large enough for the active pipe configuration.
- */
- i915->display->get_initial_plane_config(crtc, &plane_config);
-
- /*
- * If the fb is shared between multiple heads, we'll
- * just get the first one.
- */
- intel_find_initial_plane_obj(crtc, &plane_config);
-
- plane_config_fini(&plane_config);
+ intel_crtc_initial_plane_config(crtc);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 3028072c2cf3..0c76bf57f86b 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -576,19 +576,9 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
-struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, bool phys_cursor,
- const struct i915_ggtt_view *view,
- bool uses_fence,
- unsigned long *out_flags);
-void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
-int intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state);
-void intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state);
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
@@ -619,15 +609,16 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
+bool intel_plane_uses_fence(const struct intel_plane_state *plane_state);
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
u64 modifier);
-int intel_plane_pin_fb(struct intel_plane_state *plane_state);
-void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state);
+void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane);
void intel_display_driver_register(struct drm_i915_private *i915);
void intel_display_driver_unregister(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 309d74fd86ce..e04767695530 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -7,19 +7,19 @@
#include <drm/drm_fourcc.h>
#include "i915_debugfs.h"
+#include "intel_de.h"
#include "intel_display_debugfs.h"
#include "intel_display_power.h"
-#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
+#include "intel_dp_mst.h"
#include "intel_drrs.h"
#include "intel_fbc.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_pm.h"
#include "intel_psr.h"
-#include "intel_sideband.h"
#include "intel_sprite.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -1379,7 +1379,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
continue;
dig_port = enc_to_dig_port(intel_encoder);
- if (!dig_port->dp.can_mst)
+ if (!intel_dp_mst_source_support(&dig_port->dp))
continue;
seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 06e9879aedd7..1672604f9ef7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -3,12 +3,11 @@
* Copyright © 2019 Intel Corporation
*/
-#include "display/intel_crt.h"
-
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
+#include "intel_crt.h"
#include "intel_de.h"
#include "intel_display_power.h"
#include "intel_display_types.h"
@@ -16,12 +15,13 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_hotplug.h"
+#include "intel_pcode.h"
#include "intel_pm.h"
#include "intel_pps.h"
-#include "intel_sideband.h"
#include "intel_snps_phy.h"
#include "intel_tc.h"
#include "intel_vga.h"
+#include "vlv_sideband.h"
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 21ce8bccc645..39e11eaec1a3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1580,7 +1580,6 @@ struct intel_dp {
struct intel_pps pps;
- bool can_mst; /* this port supports mst */
bool is_mst;
int active_mst_links;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 74a657ae131a..9d8132dd4cc5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -66,7 +66,6 @@
#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_psr.h"
-#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
@@ -140,6 +139,9 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
return;
}
+ /*
+ * Sink rates for 8b/10b.
+ */
max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
if (max_lttpr_rate)
@@ -163,6 +165,21 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
drm_dp_dpcd_readb(&intel_dp->aux,
DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates);
+ if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) {
+ /* We have a repeater */
+ if (intel_dp->lttpr_common_caps[0] >= 0x20 &&
+ intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] &
+ DP_PHY_REPEATER_128B132B_SUPPORTED) {
+ /* Repeater supports 128b/132b, valid UHBR rates */
+ uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ } else {
+ /* Does not support 128b/132b */
+ uhbr_rates = 0;
+ }
+ }
+
if (uhbr_rates & DP_UHBR10)
intel_dp->sink_rates[i++] = 1000000;
if (uhbr_rates & DP_UHBR13_5)
@@ -2649,7 +2666,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
return i915->params.enable_dp_mst &&
- intel_dp->can_mst &&
+ intel_dp_mst_source_support(intel_dp) &&
drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
}
@@ -2664,10 +2681,10 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
encoder->base.base.id, encoder->base.name,
- yesno(intel_dp->can_mst), yesno(sink_can_mst),
+ yesno(intel_dp_mst_source_support(intel_dp)), yesno(sink_can_mst),
yesno(i915->params.enable_dp_mst));
- if (!intel_dp->can_mst)
+ if (!intel_dp_mst_source_support(intel_dp))
return;
intel_dp->is_mst = sink_can_mst &&
@@ -5067,7 +5084,7 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
intel_dp = enc_to_intel_dp(encoder);
- if (!intel_dp->can_mst)
+ if (!intel_dp_mst_source_support(intel_dp))
continue;
if (intel_dp->is_mst)
@@ -5091,7 +5108,7 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
intel_dp = enc_to_intel_dp(encoder);
- if (!intel_dp->can_mst)
+ if (!intel_dp_mst_source_support(intel_dp))
continue;
ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index c052ce14894d..85676c953e0a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -25,15 +25,6 @@
#include "intel_dp.h"
#include "intel_dp_link_training.h"
-static void
-intel_dp_dump_link_status(struct drm_device *drm,
- const u8 link_status[DP_LINK_STATUS_SIZE])
-{
- drm_dbg_kms(drm,
- "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
- link_status[0], link_status[1], link_status[2],
- link_status[3], link_status[4], link_status[5]);
-}
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
{
@@ -66,6 +57,7 @@ static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
char phy_name[10];
@@ -73,21 +65,22 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) {
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "failed to read the PHY caps for %s\n",
- phy_name);
+ "[ENCODER:%d:%s][%s] failed to read the PHY caps\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return;
}
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "%s PHY capabilities: %*ph\n",
- phy_name,
+ "[ENCODER:%d:%s][%s] PHY capabilities: %*ph\n",
+ encoder->base.base.id, encoder->base.name, phy_name,
(int)sizeof(intel_dp->lttpr_phy_caps[0]),
phy_caps);
}
static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_dp_is_edp(intel_dp))
return false;
@@ -104,7 +97,8 @@ static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
goto reset_caps;
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "LTTPR common capabilities: %*ph\n",
+ "[ENCODER:%d:%s] LTTPR common capabilities: %*ph\n",
+ encoder->base.base.id, encoder->base.name,
(int)sizeof(intel_dp->lttpr_common_caps),
intel_dp->lttpr_common_caps);
@@ -130,6 +124,8 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
int lttpr_count;
int i;
@@ -161,8 +157,9 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
return 0;
if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
- drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n",
+ encoder->base.base.id, encoder->base.name);
intel_dp_set_lttpr_transparent_mode(intel_dp, true);
intel_dp_reset_lttpr_count(intel_dp);
@@ -307,11 +304,32 @@ static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy);
}
-static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- enum drm_dp_phy dp_phy,
- const u8 link_status[DP_LINK_STATUS_SIZE],
- int lane)
+/* 128b/132b */
+static u8 intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ u8 tx_ffe = 0;
+
+ if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
+ lane = min(lane, crtc_state->lane_count - 1);
+ tx_ffe = drm_dp_get_adjust_tx_ffe_preset(link_status, lane);
+ } else {
+ for (lane = 0; lane < crtc_state->lane_count; lane++)
+ tx_ffe = max(tx_ffe, drm_dp_get_adjust_tx_ffe_preset(link_status, lane));
+ }
+
+ return tx_ffe;
+}
+
+/* 8b/10b */
+static u8 intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
{
u8 v = 0;
u8 p = 0;
@@ -343,14 +361,72 @@ static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
return v | p;
}
+static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ if (intel_dp_is_uhbr(crtc_state))
+ return intel_dp_get_lane_adjust_tx_ffe_preset(intel_dp, crtc_state,
+ dp_phy, link_status, lane);
+ else
+ return intel_dp_get_lane_adjust_vswing_preemph(intel_dp, crtc_state,
+ dp_phy, link_status, lane);
+}
+
+#define TRAIN_REQ_FMT "%d/%d/%d/%d"
+#define _TRAIN_REQ_VSWING_ARGS(link_status, lane) \
+ (drm_dp_get_adjust_request_voltage((link_status), (lane)) >> DP_TRAIN_VOLTAGE_SWING_SHIFT)
+#define TRAIN_REQ_VSWING_ARGS(link_status) \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 0), \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 1), \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 2), \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 3)
+#define _TRAIN_REQ_PREEMPH_ARGS(link_status, lane) \
+ (drm_dp_get_adjust_request_pre_emphasis((link_status), (lane)) >> DP_TRAIN_PRE_EMPHASIS_SHIFT)
+#define TRAIN_REQ_PREEMPH_ARGS(link_status) \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 0), \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 1), \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 2), \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 3)
+#define _TRAIN_REQ_TX_FFE_ARGS(link_status, lane) \
+ drm_dp_get_adjust_tx_ffe_preset((link_status), (lane))
+#define TRAIN_REQ_TX_FFE_ARGS(link_status) \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 0), \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 1), \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 3)
+
void
intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy,
const u8 link_status[DP_LINK_STATUS_SIZE])
{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ char phy_name[10];
int lane;
+ if (intel_dp_is_uhbr(crtc_state)) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
+ "TX FFE request: " TRAIN_REQ_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ crtc_state->lane_count,
+ TRAIN_REQ_TX_FFE_ARGS(link_status));
+ } else {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 8b/10b, lanes: %d, "
+ "vswing request: " TRAIN_REQ_FMT ", "
+ "pre-emphasis request: " TRAIN_REQ_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ crtc_state->lane_count,
+ TRAIN_REQ_VSWING_ARGS(link_status),
+ TRAIN_REQ_PREEMPH_ARGS(link_status));
+ }
+
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] =
intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
@@ -376,7 +452,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
int len;
intel_dp_program_link_training_pattern(intel_dp, crtc_state,
- dp_train_pat);
+ dp_phy, dp_train_pat);
buf[0] = dp_train_pat;
/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
@@ -404,16 +480,19 @@ static char dp_training_pattern_name(u8 train_pat)
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
u8 dp_train_pat)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
+ char phy_name[10];
if (train_pat != DP_TRAINING_PATTERN_DISABLE)
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] Using DP training pattern TPS%c\n",
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Using DP training pattern TPS%c\n",
encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
dp_training_pattern_name(train_pat));
intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
@@ -436,23 +515,39 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
_TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \
_TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \
_TRAIN_SET_PREEMPH_ARGS((train_set)[3])
+#define _TRAIN_SET_TX_FFE_ARGS(train_set) \
+ ((train_set) & DP_TX_FFE_PRESET_VALUE_MASK), ""
+#define TRAIN_SET_TX_FFE_ARGS(train_set) \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[0]), \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[1]), \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[2]), \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[3])
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
char phy_name[10];
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] lanes: %d, "
- "vswing levels: " TRAIN_SET_FMT ", "
- "pre-emphasis levels: " TRAIN_SET_FMT ", at %s\n",
- encoder->base.base.id, encoder->base.name,
- crtc_state->lane_count,
- TRAIN_SET_VSWING_ARGS(intel_dp->train_set),
- TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set),
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
+ if (intel_dp_is_uhbr(crtc_state)) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
+ "TX FFE presets: " TRAIN_SET_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ crtc_state->lane_count,
+ TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set));
+ } else {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 8b/10b, lanes: %d, "
+ "vswing levels: " TRAIN_SET_FMT ", "
+ "pre-emphasis levels: " TRAIN_SET_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ crtc_state->lane_count,
+ TRAIN_SET_VSWING_ARGS(intel_dp->train_set),
+ TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set));
+ }
if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
encoder->set_signal_levels(encoder, crtc_state);
@@ -487,15 +582,55 @@ intel_dp_update_link_train(struct intel_dp *intel_dp,
return ret == crtc_state->lane_count;
}
+/* 128b/132b */
+static bool intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane)
+{
+ return (train_set_lane & DP_TX_FFE_PRESET_VALUE_MASK) ==
+ DP_TX_FFE_PRESET_VALUE_MASK;
+}
+
+/*
+ * 8b/10b
+ *
+ * FIXME: The DP spec is very confusing here, also the Link CTS spec seems to
+ * have self contradicting tests around this area.
+ *
+ * In lieu of better ideas let's just stop when we've reached the max supported
+ * vswing with its max pre-emphasis, which is either 2+1 or 3+0 depending on
+ * whether vswing level 3 is supported or not.
+ */
+static bool intel_dp_lane_max_vswing_reached(u8 train_set_lane)
+{
+ u8 v = (train_set_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ u8 p = (train_set_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if ((train_set_lane & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ return false;
+
+ if (v + p != 3)
+ return false;
+
+ return true;
+}
+
static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
int lane;
- for (lane = 0; lane < crtc_state->lane_count; lane++)
- if ((intel_dp->train_set[lane] &
- DP_TRAIN_MAX_SWING_REACHED) == 0)
- return false;
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ u8 train_set_lane = intel_dp->train_set[lane];
+
+ if (intel_dp_is_uhbr(crtc_state)) {
+ if (!intel_dp_lane_max_tx_ffe_reached(train_set_lane))
+ return false;
+ } else {
+ if (!intel_dp_lane_max_vswing_reached(train_set_lane))
+ return false;
+ }
+ }
return true;
}
@@ -508,7 +643,8 @@ static bool
intel_dp_prepare_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 link_config[2];
u8 link_bw, rate_select;
@@ -520,10 +656,12 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
if (link_bw)
drm_dbg_kms(&i915->drm,
- "Using LINK_BW_SET value %02x\n", link_bw);
+ "[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n",
+ encoder->base.base.id, encoder->base.name, link_bw);
else
drm_dbg_kms(&i915->drm,
- "Using LINK_RATE_SET value %02x\n", rate_select);
+ "[ENCODER:%d:%s] Using LINK_RATE_SET value %02x\n",
+ encoder->base.base.id, encoder->base.name, rate_select);
/* Write the link configuration data */
link_config[0] = link_bw;
@@ -554,17 +692,24 @@ static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_d
drm_dp_lttpr_link_train_clock_recovery_delay();
}
-static bool intel_dp_adjust_request_changed(int lane_count,
+static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state,
const u8 old_link_status[DP_LINK_STATUS_SIZE],
const u8 new_link_status[DP_LINK_STATUS_SIZE])
{
int lane;
- for (lane = 0; lane < lane_count; lane++) {
- u8 old = drm_dp_get_adjust_request_voltage(old_link_status, lane) |
- drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane);
- u8 new = drm_dp_get_adjust_request_voltage(new_link_status, lane) |
- drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane);
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ u8 old, new;
+
+ if (intel_dp_is_uhbr(crtc_state)) {
+ old = drm_dp_get_adjust_tx_ffe_preset(old_link_status, lane);
+ new = drm_dp_get_adjust_tx_ffe_preset(new_link_status, lane);
+ } else {
+ old = drm_dp_get_adjust_request_voltage(old_link_status, lane) |
+ drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane);
+ new = drm_dp_get_adjust_request_voltage(new_link_status, lane) |
+ drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane);
+ }
if (old != new)
return true;
@@ -573,6 +718,22 @@ static bool intel_dp_adjust_request_changed(int lane_count,
return false;
}
+static void
+intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ char phy_name[10];
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ link_status[0], link_status[1], link_status[2],
+ link_status[3], link_status[4], link_status[5]);
+}
+
/*
* Perform the link training clock recovery phase on the given DP PHY using
* training pattern 1.
@@ -582,16 +743,22 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 old_link_status[DP_LINK_STATUS_SIZE] = {};
int voltage_tries, cr_tries, max_cr_tries;
+ u8 link_status[DP_LINK_STATUS_SIZE];
bool max_vswing_reached = false;
+ char phy_name[10];
+
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
/* clock recovery */
if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
- drm_err(&i915->drm, "failed to enable link training\n");
+ drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to enable link training\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
@@ -610,29 +777,35 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
voltage_tries = 1;
for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
- u8 link_status[DP_LINK_STATUS_SIZE];
-
intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy);
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
- drm_err(&i915->drm, "failed to get link status\n");
+ drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to get link status\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
- drm_dbg_kms(&i915->drm, "clock recovery OK\n");
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Clock recovery OK\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return true;
}
if (voltage_tries == 5) {
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
- "Same voltage tried 5 times\n");
+ "[ENCODER:%d:%s][%s] Same voltage tried 5 times\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
if (max_vswing_reached) {
- drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n");
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Max Voltage Swing reached\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
@@ -641,12 +814,12 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
link_status);
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
drm_err(&i915->drm,
- "failed to update link training\n");
+ "[ENCODER:%d:%s][%s] Failed to update link training\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
- if (!intel_dp_adjust_request_changed(crtc_state->lane_count,
- old_link_status, link_status))
+ if (!intel_dp_adjust_request_changed(crtc_state, old_link_status, link_status))
++voltage_tries;
else
voltage_tries = 1;
@@ -655,10 +828,13 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
max_vswing_reached = true;
-
}
+
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_err(&i915->drm,
- "Failed clock recovery %d times, giving up!\n", max_cr_tries);
+ "[ENCODER:%d:%s][%s] Failed clock recovery %d times, giving up!\n",
+ encoder->base.base.id, encoder->base.name, phy_name, max_cr_tries);
+
return false;
}
@@ -742,11 +918,15 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
int tries;
u32 training_pattern;
u8 link_status[DP_LINK_STATUS_SIZE];
bool channel_eq = false;
+ char phy_name[10];
+
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
@@ -756,7 +936,10 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
training_pattern)) {
- drm_err(&i915->drm, "failed to start channel equalization\n");
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s][%s] Failed to start channel equalization\n",
+ encoder->base.base.id, encoder->base.name,
+ phy_name);
return false;
}
@@ -766,25 +949,28 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
drm_err(&i915->drm,
- "failed to get link status\n");
+ "[ENCODER:%d:%s][%s] Failed to get link status\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
break;
}
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status,
crtc_state->lane_count)) {
- intel_dp_dump_link_status(&i915->drm, link_status);
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
- "Clock recovery check failed, cannot "
- "continue channel equalization\n");
+ "[ENCODER:%d:%s][%s] Clock recovery check failed, cannot "
+ "continue channel equalization\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
break;
}
if (drm_dp_channel_eq_ok(link_status,
crtc_state->lane_count)) {
channel_eq = true;
- drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training "
- "successful\n");
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Channel EQ done. DP Training successful\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
break;
}
@@ -793,16 +979,18 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
link_status);
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
drm_err(&i915->drm,
- "failed to update link training\n");
+ "[ENCODER:%d:%s][%s] Failed to update link training\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
break;
}
}
/* Try 5 times, else fail and try at lower BW */
if (tries == 5) {
- intel_dp_dump_link_status(&i915->drm, link_status);
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
- "Channel equalization failed 5 times\n");
+ "[ENCODER:%d:%s][%s] Channel equalization failed 5 times\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
}
return channel_eq;
@@ -839,7 +1027,7 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
intel_dp->link_trained = true;
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
- intel_dp_program_link_training_pattern(intel_dp, crtc_state,
+ intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
DP_TRAINING_PATTERN_DISABLE);
}
@@ -848,7 +1036,8 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
char phy_name[10];
bool ret = false;
@@ -862,12 +1051,12 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp,
out:
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s\n",
- intel_connector->base.base.id,
- intel_connector->base.name,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] Link Training %s at link rate = %d, lane count = %d\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
ret ? "passed" : "failed",
- crtc_state->port_clock, crtc_state->lane_count,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
+ crtc_state->port_clock, crtc_state->lane_count);
return ret;
}
@@ -876,10 +1065,13 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
if (intel_dp->hobl_active) {
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "Link Training failed with HOBL active, not enabling it from now on");
+ "[ENCODER:%d:%s] Link Training failed with HOBL active, "
+ "not enabling it from now on",
+ encoder->base.base.id, encoder->base.name);
intel_dp->hobl_failed = true;
} else if (intel_dp_get_link_train_fallback_values(intel_dp,
crtc_state->port_clock,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 9d24d594368c..6a3a7b37349a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -19,6 +19,7 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const u8 link_status[DP_LINK_STATUS_SIZE]);
void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
u8 dp_train_pat);
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index fd0a31bc3dcd..0de0b4ff4d73 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -977,24 +977,31 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
dig_port->max_lanes,
max_source_rate,
conn_base_id);
- if (ret)
+ if (ret) {
+ intel_dp->mst_mgr.cbs = NULL;
return ret;
-
- intel_dp->can_mst = true;
+ }
return 0;
}
+bool intel_dp_mst_source_support(struct intel_dp *intel_dp)
+{
+ return intel_dp->mst_mgr.cbs;
+}
+
void
intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port)
{
struct intel_dp *intel_dp = &dig_port->dp;
- if (!intel_dp->can_mst)
+ if (!intel_dp_mst_source_support(intel_dp))
return;
drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
/* encoders will get killed by normal cleanup */
+
+ intel_dp->mst_mgr.cbs = NULL;
}
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 6afda4e86b3c..f7301de6cdfb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -8,13 +8,15 @@
#include <linux/types.h>
-struct intel_digital_port;
struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_dp;
int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
+bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 5a2eccb12fe4..44edeb2e55c0 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -21,14 +21,13 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include "display/intel_dp.h"
-
#include "intel_ddi.h"
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dpio_phy.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
/**
* DOC: DPIO
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index b84ed4a1bd95..04a7af8340ca 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -13,8 +13,8 @@
#include "intel_lvds.h"
#include "intel_panel.h"
#include "intel_pps.h"
-#include "intel_sideband.h"
#include "intel_snps_phy.h"
+#include "vlv_sideband.h"
struct intel_limit {
struct {
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index c2a2cd1f84dc..f241bedb8597 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -31,7 +31,6 @@
#include <linux/pinctrl/machine.h>
#include <linux/slab.h>
-#include <asm/intel-mid.h>
#include <asm/unaligned.h>
#include <drm/drm_crtc.h>
@@ -42,7 +41,7 @@
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
#define MIPI_TRANSFER_MODE_SHIFT 0
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
new file mode 100644
index 000000000000..3f77f3013584
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+/**
+ * DOC: display pinning helpers
+ */
+
+#include "intel_display_types.h"
+#include "intel_fb_pin.h"
+#include "intel_fb.h"
+
+#include "intel_dpt.h"
+
+#include "gem/i915_gem_object.h"
+
+static struct i915_vma *
+intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
+ const struct i915_ggtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags,
+ struct i915_address_space *vm)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_vma *vma;
+ u32 alignment;
+ int ret;
+
+ if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
+ return ERR_PTR(-EINVAL);
+
+ alignment = 4096 * 512;
+
+ atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+
+ vma = i915_vma_instance(obj, vm, view);
+ if (IS_ERR(vma))
+ goto err;
+
+ if (i915_vma_misplaced(vma, 0, alignment, 0)) {
+ ret = i915_vma_unbind(vma);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+ }
+
+ ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+
+ vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+
+ i915_gem_object_flush_if_display(obj);
+
+ i915_vma_get(vma);
+err:
+ atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+
+ return vma;
+}
+
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ bool phys_cursor,
+ const struct i915_ggtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ intel_wakeref_t wakeref;
+ struct i915_gem_ww_ctx ww;
+ struct i915_vma *vma;
+ unsigned int pinctl;
+ u32 alignment;
+ int ret;
+
+ if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
+ return ERR_PTR(-EINVAL);
+
+ if (phys_cursor)
+ alignment = intel_cursor_alignment(dev_priv);
+ else
+ alignment = intel_surf_alignment(fb, 0);
+ if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
+ return ERR_PTR(-EINVAL);
+
+ /* Note that the w/a also requires 64 PTE of padding following the
+ * bo. We currently fill all unused PTE with the shadow page and so
+ * we should always have valid PTE following the scanout preventing
+ * the VT-d warning.
+ */
+ if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
+ alignment = 256 * 1024;
+
+ /*
+ * Global gtt pte registers are special registers which actually forward
+ * writes to a chunk of system memory. Which means that there is no risk
+ * that the register values disappear as soon as we call
+ * intel_runtime_pm_put(), so it is correct to wrap only the
+ * pin/unpin/fence and not more.
+ */
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
+ /*
+ * Valleyview is definitely limited to scanning out the first
+ * 512MiB. Lets presume this behaviour was inherited from the
+ * g4x display engine and that all earlier gen are similarly
+ * limited. Testing suggests that it is a little more
+ * complicated than this. For example, Cherryview appears quite
+ * happy to scanout from anywhere within its global aperture.
+ */
+ pinctl = 0;
+ if (HAS_GMCH(dev_priv))
+ pinctl |= PIN_MAPPABLE;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(obj, &ww);
+ if (!ret && phys_cursor)
+ ret = i915_gem_object_attach_phys(obj, alignment);
+ else if (!ret && HAS_LMEM(dev_priv))
+ ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
+ /* TODO: Do we need to sync when migration becomes async? */
+ if (!ret)
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err;
+
+ if (!ret) {
+ vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
+ view, pinctl);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unpin;
+ }
+ }
+
+ if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
+ /*
+ * Install a fence for tiled scan-out. Pre-i965 always needs a
+ * fence, whereas 965+ only requires a fence if using
+ * framebuffer compression. For simplicity, we always, when
+ * possible, install a fence as the cost is not that onerous.
+ *
+ * If we fail to fence the tiled scanout, then either the
+ * modeset will reject the change (which is highly unlikely as
+ * the affected systems, all but one, do not have unmappable
+ * space) or we will not be able to enable full powersaving
+ * techniques (also likely not to apply due to various limits
+ * FBC and the like impose on the size of the buffer, which
+ * presumably we violated anyway with this unmappable buffer).
+ * Anyway, it is presumably better to stumble onwards with
+ * something and try to run the system in a "less than optimal"
+ * mode that matches the user configuration.
+ */
+ ret = i915_vma_pin_fence(vma);
+ if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
+ i915_vma_unpin(vma);
+ goto err_unpin;
+ }
+ ret = 0;
+
+ if (vma->fence)
+ *out_flags |= PLANE_HAS_FENCE;
+ }
+
+ i915_vma_get(vma);
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err:
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (ret)
+ vma = ERR_PTR(ret);
+
+ atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ return vma;
+}
+
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
+{
+ if (flags & PLANE_HAS_FENCE)
+ i915_vma_unpin_fence(vma);
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+}
+
+int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct i915_vma *vma;
+ bool phys_cursor =
+ plane->id == PLANE_CURSOR &&
+ INTEL_INFO(dev_priv)->display.cursor_needs_physical;
+
+ if (!intel_fb_uses_dpt(fb)) {
+ vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
+ &plane_state->view.gtt,
+ intel_plane_uses_fence(plane_state),
+ &plane_state->flags);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ plane_state->ggtt_vma = vma;
+ } else {
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ vma = intel_dpt_pin(intel_fb->dpt_vm);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ plane_state->ggtt_vma = vma;
+
+ vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
+ &plane_state->flags, intel_fb->dpt_vm);
+ if (IS_ERR(vma)) {
+ intel_dpt_unpin(intel_fb->dpt_vm);
+ plane_state->ggtt_vma = NULL;
+ return PTR_ERR(vma);
+ }
+
+ plane_state->dpt_vma = vma;
+
+ WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
+ }
+
+ return 0;
+}
+
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
+{
+ struct drm_framebuffer *fb = old_plane_state->hw.fb;
+ struct i915_vma *vma;
+
+ if (!intel_fb_uses_dpt(fb)) {
+ vma = fetch_and_zero(&old_plane_state->ggtt_vma);
+ if (vma)
+ intel_unpin_fb_vma(vma, old_plane_state->flags);
+ } else {
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ vma = fetch_and_zero(&old_plane_state->dpt_vma);
+ if (vma)
+ intel_unpin_fb_vma(vma, old_plane_state->flags);
+
+ vma = fetch_and_zero(&old_plane_state->ggtt_vma);
+ if (vma)
+ intel_dpt_unpin(intel_fb->dpt_vm);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.h b/drivers/gpu/drm/i915/display/intel_fb_pin.h
new file mode 100644
index 000000000000..e4fcd0218d9d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_FB_PIN_H__
+#define __INTEL_FB_PIN_H__
+
+#include <linux/types.h>
+
+struct drm_framebuffer;
+struct i915_vma;
+struct intel_plane_state;
+struct i915_ggtt_view;
+
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ bool phys_cursor,
+ const struct i915_ggtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags);
+
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
+
+int intel_plane_pin_fb(struct intel_plane_state *plane_state);
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 53484267b2a4..adc3a81be9f7 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -46,6 +46,7 @@
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_fb.h"
+#include "intel_fb_pin.h"
#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 51d6f810e69b..dd2cf0c59921 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -8,7 +8,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
-#include "intel_sideband.h"
+#include "intel_sbi.h"
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 9b9fd9d13043..4509fe7438e8 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -17,12 +17,12 @@
#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_display_power.h"
+#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
-#include "intel_sideband.h"
-#include "intel_connector.h"
+#include "intel_pcode.h"
#define KEY_LOAD_TRIES 5
#define HDCP2_LC_RETRY_CNT 3
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 3c1cec953b42..955f6d07b0e1 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -215,7 +215,7 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
static void intel_hpd_irq_setup(struct drm_i915_private *i915)
{
- if (i915->display_irqs_enabled && i915->hotplug_funcs->hpd_irq_setup)
+ if (i915->display_irqs_enabled && i915->hotplug_funcs)
i915->hotplug_funcs->hpd_irq_setup(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
new file mode 100644
index 000000000000..dcd698a02da2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "intel_display_types.h"
+#include "intel_plane_initial.h"
+#include "intel_atomic_plane.h"
+#include "intel_display.h"
+#include "intel_fb.h"
+
+static bool
+intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
+ const struct intel_initial_plane_config *plane_config,
+ struct drm_framebuffer **fb,
+ struct i915_vma **vma)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (!crtc_state->uapi.active)
+ continue;
+
+ if (!plane_state->ggtt_vma)
+ continue;
+
+ if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
+ *fb = plane_state->hw.fb;
+ *vma = plane_state->ggtt_vma;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static struct i915_vma *
+initial_plane_vma(struct drm_i915_private *i915,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 base, size;
+
+ if (plane_config->size == 0)
+ return NULL;
+
+ base = round_down(plane_config->base,
+ I915_GTT_MIN_ALIGNMENT);
+ size = round_up(plane_config->base + plane_config->size,
+ I915_GTT_MIN_ALIGNMENT);
+ size -= base;
+
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
+ size * 2 > i915->stolen_usable_size)
+ return NULL;
+
+ obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
+ if (IS_ERR(obj))
+ return NULL;
+
+ /*
+ * Mark it WT ahead of time to avoid changing the
+ * cache_level during fbdev initialization. The
+ * unbind there would get stuck waiting for rcu.
+ */
+ i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
+ I915_CACHE_WT : I915_CACHE_NONE);
+
+ switch (plane_config->tiling) {
+ case I915_TILING_NONE:
+ break;
+ case I915_TILING_X:
+ case I915_TILING_Y:
+ obj->tiling_and_stride =
+ plane_config->fb->base.pitches[0] |
+ plane_config->tiling;
+ break;
+ default:
+ MISSING_CASE(plane_config->tiling);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma))
+ goto err_obj;
+
+ if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
+ goto err_obj;
+
+ if (i915_gem_object_is_tiled(obj) &&
+ !i915_vma_is_map_and_fenceable(vma))
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return NULL;
+}
+
+static bool
+intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_framebuffer *fb = &plane_config->fb->base;
+ struct i915_vma *vma;
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ break;
+ default:
+ drm_dbg(&dev_priv->drm,
+ "Unsupported modifier for initial FB: 0x%llx\n",
+ fb->modifier);
+ return false;
+ }
+
+ vma = initial_plane_vma(dev_priv, plane_config);
+ if (!vma)
+ return false;
+
+ mode_cmd.pixel_format = fb->format->format;
+ mode_cmd.width = fb->width;
+ mode_cmd.height = fb->height;
+ mode_cmd.pitches[0] = fb->pitches[0];
+ mode_cmd.modifier[0] = fb->modifier;
+ mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
+
+ if (intel_framebuffer_init(to_intel_framebuffer(fb),
+ vma->obj, &mode_cmd)) {
+ drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
+ goto err_vma;
+ }
+
+ plane_config->vma = vma;
+ return true;
+
+err_vma:
+ i915_vma_put(vma);
+ return false;
+}
+
+static void
+intel_find_initial_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct drm_framebuffer *fb;
+ struct i915_vma *vma;
+
+ /*
+ * TODO:
+ * Disable planes if get_initial_plane_config() failed.
+ * Make sure things work if the surface base is not page aligned.
+ */
+ if (!plane_config->fb)
+ return;
+
+ if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
+ fb = &plane_config->fb->base;
+ vma = plane_config->vma;
+ goto valid_fb;
+ }
+
+ /*
+ * Failed to alloc the obj, check to see if we should share
+ * an fb with another CRTC instead
+ */
+ if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
+ goto valid_fb;
+
+ /*
+ * We've failed to reconstruct the BIOS FB. Current display state
+ * indicates that the primary plane is visible, but has a NULL FB,
+ * which will lead to problems later if we don't fix it up. The
+ * simplest solution is to just disable the primary plane now and
+ * pretend the BIOS never had it enabled.
+ */
+ intel_plane_disable_noatomic(crtc, plane);
+ if (crtc_state->bigjoiner) {
+ struct intel_crtc *slave =
+ crtc_state->bigjoiner_linked_crtc;
+ intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
+ }
+
+ return;
+
+valid_fb:
+ plane_state->uapi.rotation = plane_config->rotation;
+ intel_fb_fill_view(to_intel_framebuffer(fb),
+ plane_state->uapi.rotation, &plane_state->view);
+
+ __i915_vma_pin(vma);
+ plane_state->ggtt_vma = i915_vma_get(vma);
+ if (intel_plane_uses_fence(plane_state) &&
+ i915_vma_pin_fence(vma) == 0 && vma->fence)
+ plane_state->flags |= PLANE_HAS_FENCE;
+
+ plane_state->uapi.src_x = 0;
+ plane_state->uapi.src_y = 0;
+ plane_state->uapi.src_w = fb->width << 16;
+ plane_state->uapi.src_h = fb->height << 16;
+
+ plane_state->uapi.crtc_x = 0;
+ plane_state->uapi.crtc_y = 0;
+ plane_state->uapi.crtc_w = fb->width;
+ plane_state->uapi.crtc_h = fb->height;
+
+ if (plane_config->tiling)
+ dev_priv->preserve_bios_swizzle = true;
+
+ plane_state->uapi.fb = fb;
+ drm_framebuffer_get(fb);
+
+ plane_state->uapi.crtc = &crtc->base;
+ intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
+
+ atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
+}
+
+static void plane_config_fini(struct intel_initial_plane_config *plane_config)
+{
+ if (plane_config->fb) {
+ struct drm_framebuffer *fb = &plane_config->fb->base;
+
+ /* We may only have the stub and not a full framebuffer */
+ if (drm_framebuffer_read_refcount(fb))
+ drm_framebuffer_put(fb);
+ else
+ kfree(fb);
+ }
+
+ if (plane_config->vma)
+ i915_vma_put(plane_config->vma);
+}
+
+void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_initial_plane_config plane_config = {};
+
+ /*
+ * Note that reserving the BIOS fb up front prevents us
+ * from stuffing other stolen allocations like the ring
+ * on top. This prevents some ugliness at boot time, and
+ * can even allow for smooth boot transitions if the BIOS
+ * fb is large enough for the active pipe configuration.
+ */
+ dev_priv->display->get_initial_plane_config(crtc, &plane_config);
+
+ /*
+ * If the fb is shared between multiple heads, we'll
+ * just get the first one.
+ */
+ intel_find_initial_plane_obj(crtc, &plane_config);
+
+ plane_config_fini(&plane_config);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.h b/drivers/gpu/drm/i915/display/intel_plane_initial.h
new file mode 100644
index 000000000000..c7e35ab3182b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_PLANE_INITIAL_H__
+#define __INTEL_PLANE_INITIAL_H__
+
+struct intel_crtc;
+
+void intel_crtc_initial_plane_config(struct intel_crtc *crtc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index b18f08c851dc..5e20f340730f 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -68,9 +68,9 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
for (ln = 0; ln < 4; ln++) {
u32 val = 0;
- val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, trans->entries[level].snps.snps_vswing);
- val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_PRE, trans->entries[level].snps.snps_pre_cursor);
- val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, trans->entries[level].snps.snps_post_cursor);
+ val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, trans->entries[level].snps.vswing);
+ val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_PRE, trans->entries[level].snps.pre_cursor);
+ val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, trans->entries[level].snps.post_cursor);
intel_de_write(dev_priv, SNPS_PHY_TX_EQ(ln, phy), val);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 081b772bfe10..07584695514b 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -40,8 +40,8 @@
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_panel.h"
-#include "intel_sideband.h"
#include "skl_scaler.h"
+#include "vlv_sideband.h"
/* return pixels in terms of txbyteclkhs */
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 0078973cd219..5413b52ab6ba 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -31,7 +31,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
static const u16 lfsr_converts[] = {
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */