diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c')
-rw-r--r-- | drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 435 |
1 files changed, 301 insertions, 134 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 3c072754738d..50c783e19f5a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -82,7 +82,6 @@ #include <drm/drm_atomic_uapi.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_edid.h> #include <drm/drm_vblank.h> @@ -1097,7 +1096,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) /* Initialize hardware. */ memset(&hw_params, 0, sizeof(hw_params)); hw_params.fb_base = adev->gmc.fb_start; - hw_params.fb_offset = adev->gmc.aper_base; + hw_params.fb_offset = adev->vm_manager.vram_base_offset; /* backdoor load firmware and trigger dmub running */ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) @@ -1219,7 +1218,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_ pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; pa_config->system_aperture.fb_base = adev->gmc.fb_start; - pa_config->system_aperture.fb_offset = adev->gmc.aper_base; + pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset; pa_config->system_aperture.fb_top = adev->gmc.fb_end; pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; @@ -1364,7 +1363,44 @@ static const struct dmi_system_id hpd_disconnect_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), + }, + }, {} + /* TODO: refactor this from a fixed table to a dynamic option */ }; static void retrieve_dmi_info(struct amdgpu_display_manager *dm) @@ -1397,9 +1433,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) memset(&init_params, 0, sizeof(init_params)); #endif + mutex_init(&adev->dm.dpia_aux_lock); mutex_init(&adev->dm.dc_lock); mutex_init(&adev->dm.audio_lock); - spin_lock_init(&adev->dm.vblank_lock); if(amdgpu_dm_irq_init(adev)) { DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); @@ -1467,6 +1503,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) case IP_VERSION(3, 0, 1): case IP_VERSION(3, 1, 2): case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): case IP_VERSION(3, 1, 5): case IP_VERSION(3, 1, 6): init_data.flags.gpu_vm_support = true; @@ -1549,6 +1586,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; + /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ + adev->dm.dc->debug.ignore_cable_id = true; + r = dm_dmub_hw_init(adev); if (r) { DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -1634,12 +1674,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) } } - if (amdgpu_dm_initialize_drm_device(adev)) { - DRM_ERROR( - "amdgpu: failed to initialize sw for display support.\n"); - goto error; - } - /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. * It is expected that DMUB will resend any pending notifications at this point, for * example HPD from DPIA. @@ -1647,6 +1681,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (dc_is_dmub_outbox_supported(adev->dm.dc)) dc_enable_dmub_outbox(adev->dm.dc); + if (amdgpu_dm_initialize_drm_device(adev)) { + DRM_ERROR( + "amdgpu: failed to initialize sw for display support.\n"); + goto error; + } + /* create fake encoders for MST */ dm_dp_create_fake_mst_encoders(adev); @@ -1759,6 +1799,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) mutex_destroy(&adev->dm.audio_lock); mutex_destroy(&adev->dm.dc_lock); + mutex_destroy(&adev->dm.dpia_aux_lock); return; } @@ -2810,7 +2851,6 @@ const struct amdgpu_ip_block_version dm_ip_block = static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { .fb_create = amdgpu_display_user_framebuffer_create, .get_format_info = amd_get_format_info, - .output_poll_changed = drm_fb_helper_output_poll_changed, .atomic_check = amdgpu_dm_atomic_check, .atomic_commit = drm_atomic_helper_commit, }; @@ -4590,6 +4630,7 @@ static int dm_early_init(void *handle) adev_to_drm(adev)->dev, &dev_attr_s3_debug); #endif + adev->dc_enabled = true; return 0; } @@ -4829,6 +4870,35 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, return 0; } +static inline void fill_dc_dirty_rect(struct drm_plane *plane, + struct rect *dirty_rect, int32_t x, + int32_t y, int32_t width, int32_t height, + int *i, bool ffu) +{ + if (*i > DC_MAX_DIRTY_RECTS) + return; + + if (*i == DC_MAX_DIRTY_RECTS) + goto out; + + dirty_rect->x = x; + dirty_rect->y = y; + dirty_rect->width = width; + dirty_rect->height = height; + + if (ffu) + drm_dbg(plane->dev, + "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", + plane->base.id, width, height); + else + drm_dbg(plane->dev, + "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", + plane->base.id, x, y, width, height); + +out: + (*i)++; +} + /** * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates * @@ -4849,10 +4919,6 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - * implicitly provide damage clips without any client support via the plane * bounds. - * - * Today, amdgpu_dm only supports the MPO and cursor usecase. - * - * TODO: Also enable for FB_DAMAGE_CLIPS */ static void fill_dc_dirty_rects(struct drm_plane *plane, struct drm_plane_state *old_plane_state, @@ -4863,12 +4929,11 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); struct rect *dirty_rects = flip_addrs->dirty_rects; uint32_t num_clips; + struct drm_mode_rect *clips; bool bb_changed; bool fb_changed; uint32_t i = 0; - flip_addrs->dirty_rect_count = 0; - /* * Cursor plane has it's own dirty rect update interface. See * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data @@ -4876,20 +4941,20 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, if (plane->type == DRM_PLANE_TYPE_CURSOR) return; - /* - * Today, we only consider MPO use-case for PSR SU. If MPO not - * requested, and there is a plane update, do FFU. - */ + num_clips = drm_plane_get_damage_clips_count(new_plane_state); + clips = drm_plane_get_damage_clips(new_plane_state); + if (!dm_crtc_state->mpo_requested) { - dirty_rects[0].x = 0; - dirty_rects[0].y = 0; - dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay; - dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay; - flip_addrs->dirty_rect_count = 1; - DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", - new_plane_state->plane->base.id, - dm_crtc_state->base.mode.crtc_hdisplay, - dm_crtc_state->base.mode.crtc_vdisplay); + if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) + goto ffu; + + for (; flip_addrs->dirty_rect_count < num_clips; clips++) + fill_dc_dirty_rect(new_plane_state->plane, + &dirty_rects[i], clips->x1, + clips->y1, clips->x2 - clips->x1, + clips->y2 - clips->y1, + &flip_addrs->dirty_rect_count, + false); return; } @@ -4900,7 +4965,6 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, * If plane is moved or resized, also add old bounding box to dirty * rects. */ - num_clips = drm_plane_get_damage_clips_count(new_plane_state); fb_changed = old_plane_state->fb->base.id != new_plane_state->fb->base.id; bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || @@ -4908,36 +4972,51 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, old_plane_state->crtc_w != new_plane_state->crtc_w || old_plane_state->crtc_h != new_plane_state->crtc_h); - DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", - new_plane_state->plane->base.id, - bb_changed, fb_changed, num_clips); + drm_dbg(plane->dev, + "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", + new_plane_state->plane->base.id, + bb_changed, fb_changed, num_clips); - if (num_clips || fb_changed || bb_changed) { - dirty_rects[i].x = new_plane_state->crtc_x; - dirty_rects[i].y = new_plane_state->crtc_y; - dirty_rects[i].width = new_plane_state->crtc_w; - dirty_rects[i].height = new_plane_state->crtc_h; - DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n", - new_plane_state->plane->base.id, - dirty_rects[i].x, dirty_rects[i].y, - dirty_rects[i].width, dirty_rects[i].height); - i += 1; - } - - /* Add old plane bounding-box if plane is moved or resized */ if (bb_changed) { - dirty_rects[i].x = old_plane_state->crtc_x; - dirty_rects[i].y = old_plane_state->crtc_y; - dirty_rects[i].width = old_plane_state->crtc_w; - dirty_rects[i].height = old_plane_state->crtc_h; - DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n", - old_plane_state->plane->base.id, - dirty_rects[i].x, dirty_rects[i].y, - dirty_rects[i].width, dirty_rects[i].height); - i += 1; - } + fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], + new_plane_state->crtc_x, + new_plane_state->crtc_y, + new_plane_state->crtc_w, + new_plane_state->crtc_h, &i, false); + + /* Add old plane bounding-box if plane is moved or resized */ + fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], + old_plane_state->crtc_x, + old_plane_state->crtc_y, + old_plane_state->crtc_w, + old_plane_state->crtc_h, &i, false); + } + + if (num_clips) { + for (; i < num_clips; clips++) + fill_dc_dirty_rect(new_plane_state->plane, + &dirty_rects[i], clips->x1, + clips->y1, clips->x2 - clips->x1, + clips->y2 - clips->y1, &i, false); + } else if (fb_changed && !bb_changed) { + fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], + new_plane_state->crtc_x, + new_plane_state->crtc_y, + new_plane_state->crtc_w, + new_plane_state->crtc_h, &i, false); + } + + if (i > DC_MAX_DIRTY_RECTS) + goto ffu; flip_addrs->dirty_rect_count = i; + return; + +ffu: + fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0, + dm_crtc_state->base.mode.crtc_hdisplay, + dm_crtc_state->base.mode.crtc_vdisplay, + &flip_addrs->dirty_rect_count, true); } static void update_stream_scaling_settings(const struct drm_display_mode *mode, @@ -5602,16 +5681,14 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, { struct drm_connector *drm_connector = &aconnector->base; uint32_t link_bandwidth_kbps; - uint32_t max_dsc_target_bpp_limit_override = 0; struct dc *dc = sink->ctx->dc; uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps; uint32_t dsc_max_supported_bw_in_kbps; + uint32_t max_dsc_target_bpp_limit_override = + drm_connector->display_info.max_dsc_bpp; link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)); - if (stream->link && stream->link->local_sink) - max_dsc_target_bpp_limit_override = - stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit; /* Set DSC policy according to dsc_clock_en */ dc_dsc_policy_set_enable_dsc_when_not_needed( @@ -5684,7 +5761,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_connector_state *con_state = dm_state ? &dm_state->base : NULL; struct dc_stream_state *stream = NULL; - struct drm_display_mode mode = *drm_mode; + struct drm_display_mode mode; struct drm_display_mode saved_mode; struct drm_display_mode *freesync_mode = NULL; bool native_mode_found = false; @@ -5692,12 +5769,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; int mode_refresh; int preferred_refresh = 0; + enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; #if defined(CONFIG_DRM_AMD_DC_DCN) struct dsc_dec_dpcd_caps dsc_caps; #endif struct dc_sink *sink = NULL; + drm_mode_init(&mode, drm_mode); memset(&saved_mode, 0, sizeof(saved_mode)); if (aconnector == NULL) { @@ -5815,7 +5894,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) stream->use_vsc_sdp_for_colorimetry = true; } - mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space); + if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) + tf = TRANSFER_FUNC_GAMMA_22; + mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; } @@ -6145,6 +6226,70 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) create_eml_sink(aconnector); } +static enum dc_status dm_validate_stream_and_context(struct dc *dc, + struct dc_stream_state *stream) +{ + enum dc_status dc_result = DC_ERROR_UNEXPECTED; + struct dc_plane_state *dc_plane_state = NULL; + struct dc_state *dc_state = NULL; + + if (!stream) + goto cleanup; + + dc_plane_state = dc_create_plane_state(dc); + if (!dc_plane_state) + goto cleanup; + + dc_state = dc_create_state(dc); + if (!dc_state) + goto cleanup; + + /* populate stream to plane */ + dc_plane_state->src_rect.height = stream->src.height; + dc_plane_state->src_rect.width = stream->src.width; + dc_plane_state->dst_rect.height = stream->src.height; + dc_plane_state->dst_rect.width = stream->src.width; + dc_plane_state->clip_rect.height = stream->src.height; + dc_plane_state->clip_rect.width = stream->src.width; + dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256; + dc_plane_state->plane_size.surface_size.height = stream->src.height; + dc_plane_state->plane_size.surface_size.width = stream->src.width; + dc_plane_state->plane_size.chroma_size.height = stream->src.height; + dc_plane_state->plane_size.chroma_size.width = stream->src.width; + dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; + dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; + dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; + dc_plane_state->rotation = ROTATION_ANGLE_0; + dc_plane_state->is_tiling_rotated = false; + dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL; + + dc_result = dc_validate_stream(dc, stream); + if (dc_result == DC_OK) + dc_result = dc_validate_plane(dc, dc_plane_state); + + if (dc_result == DC_OK) + dc_result = dc_add_stream_to_ctx(dc, dc_state, stream); + + if (dc_result == DC_OK && !dc_add_plane_to_context( + dc, + stream, + dc_plane_state, + dc_state)) + dc_result = DC_FAIL_ATTACH_SURFACES; + + if (dc_result == DC_OK) + dc_result = dc_validate_global_state(dc, dc_state, true); + +cleanup: + if (dc_state) + dc_release_state(dc_state); + + if (dc_plane_state) + dc_plane_state_release(dc_plane_state); + + return dc_result; +} + struct dc_stream_state * create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_display_mode *drm_mode, @@ -6171,6 +6316,9 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); + if (dc_result == DC_OK) + dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); + if (dc_result != DC_OK) { DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", drm_mode->hdisplay, @@ -6459,7 +6607,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, struct drm_connector_state *new_con_state; struct amdgpu_dm_connector *aconnector; struct dm_connector_state *dm_conn_state; - int i, j; + int i, j, ret; int vcpi, pbn_div, pbn, slot_num = 0; for_each_new_connector_in_state(state, connector, new_con_state, i) { @@ -6506,8 +6654,11 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, dm_conn_state->pbn = pbn; dm_conn_state->vcpi_slots = slot_num; - drm_dp_mst_atomic_enable_dsc(state, aconnector->port, dm_conn_state->pbn, - false); + ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, + dm_conn_state->pbn, false); + if (ret < 0) + return ret; + continue; } @@ -7614,9 +7765,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; - fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, - new_crtc_state, - &bundle->flip_addrs[planes_count]); + if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) + fill_dc_dirty_rects(plane, old_plane_state, + new_plane_state, new_crtc_state, + &bundle->flip_addrs[planes_count]); /* * Only allow immediate flips for fast updates that don't @@ -7832,6 +7984,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, */ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && acrtc_attach->dm_irq_params.allow_psr_entry && +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && +#endif !acrtc_state->stream->link->psr_settings.psr_allow_active) amdgpu_dm_psr_enable(acrtc_state->stream); } else { @@ -8293,8 +8448,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) if (amdgpu_dm_crc_window_is_activated(crtc)) { spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); - acrtc->dm_irq_params.crc_window.update_win = true; - acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2; + acrtc->dm_irq_params.window_param.update_win = true; + acrtc->dm_irq_params.window_param.skip_frame_cnt = 2; spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); crc_rd_wrk->crtc = crtc; spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); @@ -9520,10 +9675,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, #if defined(CONFIG_DRM_AMD_DC_DCN) if (dc_resource_is_dsc_encoding_supported(dc)) { - if (!pre_validate_dsc(state, &dm_state, vars)) { - ret = -EINVAL; + ret = pre_validate_dsc(state, &dm_state, vars); + if (ret != 0) goto fail; - } } #endif @@ -9618,9 +9772,9 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } #if defined(CONFIG_DRM_AMD_DC_DCN) - if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) { + ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); + if (ret) { DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); - ret = -EINVAL; goto fail; } @@ -10083,79 +10237,92 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, return value; } -static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, - struct dc_context *ctx, - uint8_t status_type, - uint32_t *operation_result) +int amdgpu_dm_process_dmub_aux_transfer_sync( + struct dc_context *ctx, + unsigned int link_index, + struct aux_payload *payload, + enum aux_return_code_type *operation_result) { struct amdgpu_device *adev = ctx->driver_context; - int return_status = -1; struct dmub_notification *p_notify = adev->dm.dmub_notify; + int ret = -1; - if (is_cmd_aux) { - if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) { - return_status = p_notify->aux_reply.length; - *operation_result = p_notify->result; - } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) { - *operation_result = AUX_RET_ERROR_TIMEOUT; - } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) { - *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; - } else { - *operation_result = AUX_RET_ERROR_UNKNOWN; + mutex_lock(&adev->dm.dpia_aux_lock); + if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) { + *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; + goto out; + } + + if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { + DRM_ERROR("wait_for_completion_timeout timeout!"); + *operation_result = AUX_RET_ERROR_TIMEOUT; + goto out; + } + + if (p_notify->result != AUX_RET_SUCCESS) { + /* + * Transient states before tunneling is enabled could + * lead to this error. We can ignore this for now. + */ + if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { + DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", + payload->address, payload->length, + p_notify->result); } - } else { - if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) { - return_status = 0; - *operation_result = p_notify->sc_status; - } else { - *operation_result = SET_CONFIG_UNKNOWN_ERROR; + *operation_result = AUX_RET_ERROR_INVALID_REPLY; + goto out; + } + + + payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; + if (!payload->write && p_notify->aux_reply.length && + (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { + + if (payload->length != p_notify->aux_reply.length) { + DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", + p_notify->aux_reply.length, + payload->address, payload->length); + *operation_result = AUX_RET_ERROR_INVALID_REPLY; + goto out; } + + memcpy(payload->data, p_notify->aux_reply.data, + p_notify->aux_reply.length); } - return return_status; + /* success */ + ret = p_notify->aux_reply.length; + *operation_result = p_notify->result; +out: + mutex_unlock(&adev->dm.dpia_aux_lock); + return ret; } -int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx, - unsigned int link_index, void *cmd_payload, void *operation_result) +int amdgpu_dm_process_dmub_set_config_sync( + struct dc_context *ctx, + unsigned int link_index, + struct set_config_cmd_payload *payload, + enum set_config_status *operation_result) { struct amdgpu_device *adev = ctx->driver_context; - int ret = 0; + bool is_cmd_complete; + int ret; - if (is_cmd_aux) { - dc_process_dmub_aux_transfer_async(ctx->dc, - link_index, (struct aux_payload *)cmd_payload); - } else if (dc_process_dmub_set_config_async(ctx->dc, link_index, - (struct set_config_cmd_payload *)cmd_payload, - adev->dm.dmub_notify)) { - return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, - ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, - (uint32_t *)operation_result); - } + mutex_lock(&adev->dm.dpia_aux_lock); + is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc, + link_index, payload, adev->dm.dmub_notify); - ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ); - if (ret == 0) { + if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { + ret = 0; + *operation_result = adev->dm.dmub_notify->sc_status; + } else { DRM_ERROR("wait_for_completion_timeout timeout!"); - return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, - ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT, - (uint32_t *)operation_result); + ret = -1; + *operation_result = SET_CONFIG_UNKNOWN_ERROR; } - if (is_cmd_aux) { - if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) { - struct aux_payload *payload = (struct aux_payload *)cmd_payload; - - payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; - if (!payload->write && adev->dm.dmub_notify->aux_reply.length && - payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) { - memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data, - adev->dm.dmub_notify->aux_reply.length); - } - } - } - - return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, - ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, - (uint32_t *)operation_result); + mutex_unlock(&adev->dm.dpia_aux_lock); + return ret; } /* @@ -10167,8 +10334,8 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context */ bool check_seamless_boot_capability(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_VANGOGH: + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(3, 0, 1): if (!adev->mman.keep_stolen_vga_memory) return true; break; |