From 01dc285d5cd89b77686d8baef8482c58d7dc3ead Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Wed, 1 Aug 2018 10:48:23 -0400 Subject: [PATCH 01/19] drm/amd/display: Report non-DP display as disconnected without EDID [Why] Some boards seem to have a problem where HPD is high on HDMI even though no display is connected. We don't want to report these as connected. DP spec still requires us to report DP displays as connected when HPD is high but we can't read the EDID in order to go to fail-safe mode. [How] If connector_signal is not DP abort detection if we can't retrieve the EDID. v2: Add Bugzilla and stable Bugzilla: https://bugs.freedesktop.org/107390 Bugzilla: https://bugs.freedesktop.org/106846 Cc: stable@vger.kernel.org Signed-off-by: Harry Wentland Acked-by: Alex Deucher Reviewed-by: Nicholas Kazlauskas Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 966d2f9c8c99..31cebb645fca 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -744,6 +744,17 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) break; case EDID_NO_RESPONSE: DC_LOG_ERROR("No EDID read.\n"); + + /* + * Abort detection for non-DP connectors if we have + * no EDID + * + * DP needs to report as connected if HDP is high + * even if we have no EDID in order to go to + * fail-safe mode + */ + if (!dc_is_dp_signal(link->connector_signal)) + return false; default: break; } From 16196776185cb19bb8c2318ebd1dbbefb5350d6a Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Thu, 2 Aug 2018 15:32:01 -0400 Subject: [PATCH 02/19] drm/amd/display: Only require EDID read for HDMI and DVI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [Why] VGA sometimes has trouble retrieving the EDID on very long cables, KVM switches, or old displays. [How] Only require EDID read for HDMI and DVI and exempt other types (DP, VGA). We currently don't support VGA but if anyone adds support in the future this might get overlooked. Signed-off-by: Harry Wentland Suggested-by: Michel Dänzer Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 31cebb645fca..89d7c1e99168 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -753,7 +753,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) * even if we have no EDID in order to go to * fail-safe mode */ - if (!dc_is_dp_signal(link->connector_signal)) + if (dc_is_hdmi_signal(link->connector_signal) || + dc_is_dvi_signal(link->connector_signal)) return false; default: break; From e11d41472a50742c16d53c968e143fb498fa482f Mon Sep 17 00:00:00 2001 From: "Leo (Sunpeng) Li" Date: Thu, 19 Jul 2018 08:22:16 -0400 Subject: [PATCH 03/19] drm/amd/display: Use requested HDMI aspect ratio [Why] The DRM mode's HDMI picture aspect ratio field was never saved in dc_stream's timing struct. This causes us to mistake a new stream to have the same timings as the old, even though the user has requested a different aspect ratio. [How] Save DRM's aspect ratio field within dc_stream's timing struct. Bug: https://bugs.freedesktop.org/show_bug.cgi?id=107153 Signed-off-by: Leo (Sunpeng) Li Reviewed-by: Mikita Lipski Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 45e062022461..c1631d61336e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2109,13 +2109,8 @@ convert_color_depth_from_display_info(const struct drm_connector *connector) static enum dc_aspect_ratio get_aspect_ratio(const struct drm_display_mode *mode_in) { - int32_t width = mode_in->crtc_hdisplay * 9; - int32_t height = mode_in->crtc_vdisplay * 16; - - if ((width - height) < 10 && (width - height) > -10) - return ASPECT_RATIO_16_9; - else - return ASPECT_RATIO_4_3; + /* 1-1 mapping, since both enums follow the HDMI spec. */ + return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; } static enum dc_color_space From 0301ccbaf67d3d9aea97156c5eb85233bb5a5178 Mon Sep 17 00:00:00 2001 From: abdoulaye berthe Date: Thu, 19 Jul 2018 15:39:55 -0400 Subject: [PATCH 04/19] drm/amd/display: DP Compliance 400.1.1 failure [Why] 400.1.1 is failing because we are not performing link training when we get an HPD pulse for the same display. This is breaking DP compliance [How] Always perform link training after HPD pulse if the detection reason is not DETECT_REASON_HPDRX. Signed-off-by: abdoulaye berthe Reviewed-by: Wenjing Liu Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 54 ++++++++++--------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 89d7c1e99168..ce65b4d5062e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -764,39 +764,41 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK))) same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid); - // If both edid and dpcd are the same, then discard new sink and revert back to original sink - if ((same_edid) && (same_dpcd)) { - link_disconnect_remap(prev_sink, link); - sink = prev_sink; - prev_sink = NULL; - } else { - if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && - sink_caps.transaction_type == - DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { - /* - * TODO debug why Dell 2413 doesn't like - * two link trainings - */ + if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && + sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX && + reason != DETECT_REASON_HPDRX) { + /* + * TODO debug why Dell 2413 doesn't like + * two link trainings + */ - /* deal with non-mst cases */ - for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) { - int fail_count = 0; + /* deal with non-mst cases */ + for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) { + int fail_count = 0; - dp_verify_link_cap(link, - &link->reported_link_cap, - &fail_count); + dp_verify_link_cap(link, + &link->reported_link_cap, + &fail_count); - if (fail_count == 0) - break; - } + if (fail_count == 0) + break; } - /* HDMI-DVI Dongle */ - if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && - !sink->edid_caps.edid_hdmi) - sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + } else { + // If edid is the same, then discard new sink and revert back to original sink + if (same_edid) { + link_disconnect_remap(prev_sink, link); + sink = prev_sink; + prev_sink = NULL; + + } } + /* HDMI-DVI Dongle */ + if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && + !sink->edid_caps.edid_hdmi) + sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + /* Connectivity log: detection */ for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) { CONN_DATA_DETECT(link, From 620a0d27b211aa03d3f99accfdd58b88e6e0504c Mon Sep 17 00:00:00 2001 From: David Francis Date: Thu, 19 Jul 2018 11:25:05 -0400 Subject: [PATCH 05/19] drm/amd/display: Implement backlight_ops.get_brightness [Why] This hook that is supposed to read the actual backlight value is used in a few places throughout the kernel to setup or force update on backlight [How] Create a dc function that calls the existing abm function, and call that function from amdgpu Signed-off-by: David Francis Reviewed-by: Harry Wentland Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 ++++++- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 9 +++++++++ drivers/gpu/drm/amd/display/dc/dc_link.h | 2 ++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c1631d61336e..dcae658fbc50 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1320,7 +1320,12 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) { - return bd->props.brightness; + struct amdgpu_display_manager *dm = bl_get_data(bd); + int ret = dc_link_get_backlight_level(dm->backlight_link); + + if (ret == DC_ERROR_UNEXPECTED) + return bd->props.brightness; + return ret; } static const struct backlight_ops amdgpu_dm_backlight_ops = { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index ce65b4d5062e..9a6448e2089c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2026,6 +2026,15 @@ enum dc_status dc_link_validate_mode_timing( return DC_OK; } +int dc_link_get_backlight_level(const struct dc_link *link) +{ + struct abm *abm = link->ctx->dc->res_pool->abm; + + if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL) + return DC_ERROR_UNEXPECTED; + + return (int) abm->funcs->get_current_backlight_8_bit(abm); +} bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level, uint32_t frame_ramp, const struct dc_stream_state *stream) diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 070a56926308..22f4ddd219d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -141,6 +141,8 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level, uint32_t frame_ramp, const struct dc_stream_state *stream); +int dc_link_get_backlight_level(const struct dc_link *dc_link); + bool dc_link_set_abm_disable(const struct dc_link *dc_link); bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait); From 53a53f8687faf492df2644d8c18ff0217fc18730 Mon Sep 17 00:00:00 2001 From: David Francis Date: Wed, 18 Jul 2018 16:03:30 -0400 Subject: [PATCH 06/19] drm/amd/display: Read back max backlight value at boot [Why] If there is no program explicitly setting the backlight brightness (for example, during a minimal install of linux), the hardware defaults to maximum brightness but the backlight_device defaults to 0 value. Thus, settings displays the wrong brightness value. [How] When creating the backlight device, set brightness to max Signed-off-by: David Francis Reviewed-by: Harry Wentland Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index dcae658fbc50..34f34823bab5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1340,6 +1340,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) struct backlight_properties props = { 0 }; props.max_brightness = AMDGPU_MAX_BL_LEVEL; + props.brightness = AMDGPU_MAX_BL_LEVEL; props.type = BACKLIGHT_RAW; snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", From d90e9a3bf5e747f0e3b0c8bec96d8699493f63ab Mon Sep 17 00:00:00 2001 From: David Francis Date: Thu, 19 Jul 2018 15:48:24 -0400 Subject: [PATCH 07/19] drm/amd/display: Destroy aux_engines only once [Why] In the dce112 function to destroy the resource pool, engines (the aux engines) is destroyed twice. This has no ill effects but is a tad redundant. [How] Remove the redundant call Signed-off-by: David Francis Reviewed-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 84a05ff2d674..288129343c77 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -677,9 +677,6 @@ static void destruct(struct dce110_resource_pool *pool) pool->base.timing_generators[i] = NULL; } - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - } for (i = 0; i < pool->base.stream_enc_count; i++) { From 78e4405cec6cb0780b27b222685dde33934b38e4 Mon Sep 17 00:00:00 2001 From: David Francis Date: Thu, 12 Jul 2018 15:46:41 -0400 Subject: [PATCH 08/19] drm/amd/display: Implement custom degamma lut on dcn [Why] Custom degamma lut functions are a feature we would like to support on compatible hardware [How] In atomic check, convert from array of drm_color_lut to dc_transfer_func. On hardware commit, allow for possibility of custom degamma. Both are based on the equivalent regamma pipeline. Signed-off-by: David Francis Reviewed-by: Krunoslav Kovac Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- .../amd/display/amdgpu_dm/amdgpu_dm_color.c | 42 ++++++++++++++----- drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 2 + .../amd/display/dc/dcn10/dcn10_hw_sequencer.c | 7 +++- .../amd/display/modules/color/color_gamma.c | 10 +++-- 4 files changed, 46 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index b329393307e5..326f6fb7e0bc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -231,18 +231,21 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc) * preparation for hardware commit. If no lut is specified by user, we default * to SRGB degamma. * - * Currently, we only support degamma bypass, or preprogrammed SRGB degamma. - * Programmable degamma is not supported, and an attempt to do so will return - * -EINVAL. + * We support degamma bypass, predefined SRGB, and custom degamma * * RETURNS: - * 0 on success, -EINVAL if custom degamma curve is given. + * 0 on success + * -EINVAL if crtc_state has a degamma_lut of invalid size + * -ENOMEM if gamma allocation fails */ int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state, struct dc_plane_state *dc_plane_state) { struct drm_property_blob *blob = crtc_state->degamma_lut; struct drm_color_lut *lut; + uint32_t lut_size; + struct dc_gamma *gamma; + bool ret; if (!blob) { /* Default to SRGB */ @@ -258,11 +261,30 @@ int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state, return 0; } - /* Otherwise, assume SRGB, since programmable degamma is not - * supported. - */ - dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED; - dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB; - return -EINVAL; + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + lut_size = blob->length / sizeof(struct drm_color_lut); + gamma->num_entries = lut_size; + if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES) + gamma->type = GAMMA_CUSTOM; + else { + dc_gamma_release(&gamma); + return -EINVAL; + } + + __drm_lut_to_dc_gamma(lut, gamma, false); + + dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; + ret = mod_color_calculate_degamma_params(dc_plane_state->in_transfer_func, gamma, true); + dc_gamma_release(&gamma); + if (!ret) { + dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; + DRM_ERROR("Out of memory when calculating degamma params\n"); + return -ENOMEM; + } + + return 0; } diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 1d1f2d5ece51..b789cb2b354b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -417,6 +417,7 @@ enum { GAMMA_RGB_256_ENTRIES = 256, GAMMA_RGB_FLOAT_1024_ENTRIES = 1024, GAMMA_CS_TFM_1D_ENTRIES = 4096, + GAMMA_CUSTOM_ENTRIES = 4096, GAMMA_MAX_ENTRIES = 4096 }; @@ -424,6 +425,7 @@ enum dc_gamma_type { GAMMA_RGB_256 = 1, GAMMA_RGB_FLOAT_1024 = 2, GAMMA_CS_TFM_1D = 3, + GAMMA_CUSTOM = 4, }; struct dc_csc_transform { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index c87f6e603055..f5d8242eb047 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1213,8 +1213,11 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, } else if (tf->type == TF_TYPE_BYPASS) { dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS); } else { - /*TF_TYPE_DISTRIBUTED_POINTS*/ - result = false; + cm_helper_translate_curve_to_degamma_hw_format(tf, + &dpp_base->degamma_params); + dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, + &dpp_base->degamma_params); + result = true; } return result; diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index ee69c949bfbf..bf29733958c3 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -997,7 +997,9 @@ static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb, * norm_y = 4095*regamma_y, and index is just truncating to nearest integer * lut1 = lut1D[index], lut2 = lut1D[index+1] * - *adjustedY is then linearly interpolating regamma Y between lut1 and lut2 + * adjustedY is then linearly interpolating regamma Y between lut1 and lut2 + * + * Custom degamma on Linux uses the same interpolation math, so is handled here */ static void apply_lut_1d( const struct dc_gamma *ramp, @@ -1018,7 +1020,7 @@ static void apply_lut_1d( struct fixed31_32 delta_lut; struct fixed31_32 delta_index; - if (ramp->type != GAMMA_CS_TFM_1D) + if (ramp->type != GAMMA_CS_TFM_1D && ramp->type != GAMMA_CUSTOM) return; // this is not expected for (i = 0; i < num_hw_points; i++) { @@ -1636,7 +1638,9 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, map_regamma_hw_to_x_user(ramp, coeff, rgb_user, coordinates_x, axix_x, curve, MAX_HW_POINTS, tf_pts, - mapUserRamp); + mapUserRamp && ramp->type != GAMMA_CUSTOM); + if (ramp->type == GAMMA_CUSTOM) + apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); ret = true; From 5ae6fe572929587a304471bf4a641361a45152b5 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Mon, 23 Jul 2018 14:13:23 -0400 Subject: [PATCH 09/19] drm/amd/display: Use calculated disp_clk_khz value for dce110 [Why] The calculated values for actual disp_clk_khz were ignored when notifying pplib of the new display requirements. In order to honor DFS bypass clocks from the hardware, the calculated value should be used. [How] The return value for set_dispclk is now assigned back into new_clocks and correctly carried through into dccg->clks.phyclk_khz. When notifying pplib of new display requirements dccg->clks.phyclk_khz is used instead of dce.dispclk_khz. The value of dce.dispclk_khz was never explicitly set to anything before. A 15% higher display clock value than calculated is no longer requested for dce110 since it now makes use of the calculated value. Since dce112 makes use of dce110's set_bandwidth but not its update_clocks it needs to have the value correctly carried through. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Harry Wentland Reviewed-by: Dmytro Laktyushkin Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 4 ++-- drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c index 0db8d1da3d0e..f17677971d0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c @@ -463,7 +463,7 @@ static void dce12_update_clocks(struct dccg *dccg, if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) { clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz; - dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz); + new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz); dccg->clks.dispclk_khz = new_clocks->dispclk_khz; dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req); @@ -661,7 +661,7 @@ static void dce_update_clocks(struct dccg *dccg, } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) { - dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz); + new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz); dccg->clks.dispclk_khz = new_clocks->dispclk_khz; } } diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 1149c413f6d2..1d98e3678b04 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2530,7 +2530,7 @@ static void pplib_apply_display_requirements( /* TODO: dce11.2*/ pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; - pp_display_cfg->disp_clk_khz = context->bw.dce.dispclk_khz; + pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz; dce110_fill_display_configs(context, pp_display_cfg); @@ -2559,7 +2559,7 @@ void dce110_set_bandwidth( { struct dc_clocks req_clks; - req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100; + req_clks.dispclk_khz = context->bw.dce.dispclk_khz; req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context); if (decrease_allowed) From 9315e2399a2cdc236e8d42c1a21fb1071cdad03d Mon Sep 17 00:00:00 2001 From: Hersen Wu Date: Mon, 16 Jul 2018 11:21:12 -0400 Subject: [PATCH 10/19] drm/amd/display: Fix DP HBR2 Eye Diagram Pattern on Carrizo [why] dp hbr2 eye diagram pattern for raven asic is not stabled. workaround is to use tp4 pattern. But this should not be applied to asic before raven. [how] add new bool varilable in asic caps. for raven asic, use the workaround. for carrizo, vega, do not use workaround. Signed-off-by: Hersen Wu Reviewed-by: Harry Wentland Acked-by: Leo Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 8 +++----- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 2 ++ 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 58ee9aad13fb..160841da72a7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1784,12 +1784,10 @@ static void dp_test_send_link_training(struct dc_link *link) dp_retrain_link_dp_test(link, &link_settings, false); } -/* TODO hbr2 compliance eye output is unstable +/* TODO Raven hbr2 compliance eye output is unstable * (toggling on and off) with debugger break * This caueses intermittent PHY automation failure * Need to look into the root cause */ -static uint8_t force_tps4_for_cp2520 = 1; - static void dp_test_send_phy_test_pattern(struct dc_link *link) { union phy_test_pattern dpcd_test_pattern; @@ -1849,13 +1847,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) break; case PHY_TEST_PATTERN_CP2520_1: /* CP2520 pattern is unstable, temporarily use TPS4 instead */ - test_pattern = (force_tps4_for_cp2520 == 1) ? + test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? DP_TEST_PATTERN_TRAINING_PATTERN4 : DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; break; case PHY_TEST_PATTERN_CP2520_2: /* CP2520 pattern is unstable, temporarily use TPS4 instead */ - test_pattern = (force_tps4_for_cp2520 == 1) ? + test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? DP_TEST_PATTERN_TRAINING_PATTERN4 : DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; break; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index c2d390fba0bf..55bcc3bdc6a3 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -77,6 +77,7 @@ struct dc_caps { bool is_apu; bool dual_link_dvi; bool post_blend_color_processing; + bool force_dp_tps4_for_cp2520; }; struct dc_dcc_surface_param { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 5d169f07d745..6b44ed3697a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1127,6 +1127,8 @@ static bool construct( dc->caps.max_slave_planes = 1; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = false; + /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */ + dc->caps.force_dp_tps4_for_cp2520 = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; From 3e27e10e2ecee0d3a0083f8ae76354ac9c6ad15c Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Thu, 12 Jul 2018 16:44:05 -0400 Subject: [PATCH 11/19] drm/amd/display: Don't share clk source between DP and HDMI [why] Prevent clock source sharing between HDMI and DP connectors. DP shouldn't be sharing its ref clock with phy clock, which caused an issue of older ASICS booting up with multiple diplays plugged in. [how] Add an extra check that would prevent HDMI and DP sharing clk. Signed-off-by: Mikita Lipski Reviewed-by: Hersen Wu Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- .../gpu/drm/amd/display/dc/core/dc_resource.c | 22 ++++++++++++++++++- drivers/gpu/drm/amd/display/dc/dc.h | 1 + .../amd/display/dc/dce100/dce100_resource.c | 2 +- .../drm/amd/display/dc/dce80/dce80_resource.c | 3 +++ 4 files changed, 26 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 2e65715f76a1..4ca41d6e3bcf 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -330,6 +330,9 @@ bool resource_are_streams_timing_synchronizable( != stream2->timing.pix_clk_khz) return false; + if (stream1->clamping.c_depth != stream2->clamping.c_depth) + return false; + if (stream1->phy_pix_clk != stream2->phy_pix_clk && (!dc_is_dp_signal(stream1->signal) || !dc_is_dp_signal(stream2->signal))) @@ -337,6 +340,20 @@ bool resource_are_streams_timing_synchronizable( return true; } +static bool is_dp_and_hdmi_sharable( + struct dc_stream_state *stream1, + struct dc_stream_state *stream2) +{ + if (stream1->ctx->dc->caps.disable_dp_clk_share) + return false; + + if (stream1->clamping.c_depth != COLOR_DEPTH_888 || + stream2->clamping.c_depth != COLOR_DEPTH_888) + return false; + + return true; + +} static bool is_sharable_clk_src( const struct pipe_ctx *pipe_with_clk_src, @@ -348,7 +365,10 @@ static bool is_sharable_clk_src( if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL) return false; - if (dc_is_dp_signal(pipe_with_clk_src->stream->signal)) + if (dc_is_dp_signal(pipe_with_clk_src->stream->signal) || + (dc_is_dp_signal(pipe->stream->signal) && + !is_dp_and_hdmi_sharable(pipe_with_clk_src->stream, + pipe->stream))) return false; if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 55bcc3bdc6a3..3ecd2d614f41 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -78,6 +78,7 @@ struct dc_caps { bool dual_link_dvi; bool post_blend_color_processing; bool force_dp_tps4_for_cp2520; + bool disable_dp_clk_share; }; struct dc_dcc_surface_param { diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index fd2bdae4dcec..3f76e6019546 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -919,7 +919,7 @@ static bool construct( dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; - + dc->caps.disable_dp_clk_share = true; for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce100_timing_generator_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index dc9f3e9afc33..604c62969ead 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -946,6 +946,7 @@ static bool dce80_construct( } dc->caps.max_planes = pool->base.pipe_count; + dc->caps.disable_dp_clk_share = true; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) @@ -1131,6 +1132,7 @@ static bool dce81_construct( } dc->caps.max_planes = pool->base.pipe_count; + dc->caps.disable_dp_clk_share = true; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) @@ -1312,6 +1314,7 @@ static bool dce83_construct( } dc->caps.max_planes = pool->base.pipe_count; + dc->caps.disable_dp_clk_share = true; if (!resource_construct(num_virtual_links, dc, &pool->base, &res_create_funcs)) From ad830e7ab1847f4a014c04496b2581a7497b204f Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Wed, 18 Jul 2018 15:25:34 -0400 Subject: [PATCH 12/19] drm/amd/display: add vbios table check for enabling dp ss Signed-off-by: Dmytro Laktyushkin Reviewed-by: Eric Bernstein Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 1 + drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 5 ++++- drivers/gpu/drm/amd/display/dc/dc_link.h | 1 + drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h | 2 ++ 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 9a6448e2089c..016587675da4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1038,6 +1038,7 @@ static bool construct( link->link_index = init_params->link_index; link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index); + link->dp_ss_off = !!dc_ctx->dc_bios->integrated_info->dp_ss_control;; if (link->link_id.type != OBJECT_TYPE_CONNECTOR) { dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n", diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 160841da72a7..a7553b6d59c2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -953,7 +953,10 @@ enum link_training_result dc_link_dp_perform_link_training( * LINK_SPREAD_05_DOWNSPREAD_30KHZ : * LINK_SPREAD_DISABLED; */ - lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ; + if (link->dp_ss_off) + lt_settings.link_settings.link_spread = LINK_SPREAD_DISABLED; + else + lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ; /* 1. set link rate, lane count and spread*/ dpcd_set_link_settings(link, <_settings); diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 22f4ddd219d1..d43cefbc43d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -73,6 +73,7 @@ struct dc_link { enum dc_irq_source irq_source_hpd; enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */ bool is_hpd_filter_disabled; + bool dp_ss_off; /* caps is the same as reported_link_cap. link_traing use * reported_link_cap. Will clean up. TODO diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h index 36bbad594267..f312834fef50 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h @@ -395,6 +395,8 @@ struct integrated_info { struct i2c_reg_info dp3_ext_hdmi_reg_settings[9]; unsigned char dp3_ext_hdmi_6g_reg_num; struct i2c_reg_info dp3_ext_hdmi_6g_reg_settings[3]; + /* V11 */ + uint32_t dp_ss_control; }; /** From fb7b11e1633e50b9a6b3fffe5cd151474aee9802 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Mon, 30 Jul 2018 12:27:23 -0400 Subject: [PATCH 13/19] drm/amd/display: Add NULL check for enabling dp ss [Why] The pointer for integrated_info can be NULL which causes the system to do a null pointer deference and hang on boot. [How] Add a check to ensure that integrated_info is not null before enabling DP ss. Signed-off-by: Nicholas Kazlauskas Reviewed-by: Sun peng Li Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 016587675da4..a38e7ad36a7e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1038,7 +1038,9 @@ static bool construct( link->link_index = init_params->link_index; link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index); - link->dp_ss_off = !!dc_ctx->dc_bios->integrated_info->dp_ss_control;; + + if (dc_ctx->dc_bios->integrated_info) + link->dp_ss_off = !!dc_ctx->dc_bios->integrated_info->dp_ss_control; if (link->link_id.type != OBJECT_TYPE_CONNECTOR) { dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n", From 99326ee3624b90d176a8f7e2aa3d15dfaa59c8f1 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Thu, 26 Jul 2018 12:17:58 -0400 Subject: [PATCH 14/19] drm/amd/display: program display clock on cache match [Why] We seem to have an issue where high enough display clock will not get set properly during S3 resume if we only call vbios once [How] Expand condition of display clock programming to happen even when cached display clock matches requested display clock Signed-off-by: Dmytro Laktyushkin Reviewed-by: Tony Cheng Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 4 +++- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c index f17677971d0f..684da3db7568 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c @@ -625,7 +625,9 @@ static void dcn1_update_clocks(struct dccg *dccg, } /* dcn1 dppclk is tied to dispclk */ - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) { + /* program dispclk on = as a w/a for sleep resume clock ramping issues */ + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz) + || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) { dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks); dccg->clks.dispclk_khz = new_clocks->dispclk_khz; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index f5d8242eb047..cfcc54f2ce65 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1089,6 +1089,8 @@ static void dcn10_init_hw(struct dc *dc) } enable_power_gating_plane(dc->hwseq, true); + + memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks)); } static void reset_hw_ctx_wrap( From 81aca8e75c1b046865fb2badef95a0dcff6f73de Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Fri, 13 Jul 2018 09:07:35 -0400 Subject: [PATCH 15/19] drm/amd/display: update clk for various HDMI color depths [why] When programming tonga's connector's backend we didn't take in account that HDMI's colour depth might be more than 8bpc therefore we need to add a switch statement that would adjust the pixel clock accordingly. [how] Add a switch statement updating clock by its appropriate coefficient. Signed-off-by: Mikita Lipski Reviewed-by: Charlene Liu Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- .../drm/amd/display/dc/bios/command_table.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 651e1fd4622f..a558bfaa0c46 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -808,6 +808,24 @@ static enum bp_result transmitter_control_v1_5( * (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp) * LVDS mode: usPixelClock = pixel clock */ + if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + switch (cntl->color_depth) { + case COLOR_DEPTH_101010: + params.usSymClock = + cpu_to_le16((le16_to_cpu(params.usSymClock) * 30) / 24); + break; + case COLOR_DEPTH_121212: + params.usSymClock = + cpu_to_le16((le16_to_cpu(params.usSymClock) * 36) / 24); + break; + case COLOR_DEPTH_161616: + params.usSymClock = + cpu_to_le16((le16_to_cpu(params.usSymClock) * 48) / 24); + break; + default: + break; + } + } if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params)) result = BP_RESULT_OK; From 1e1dbd6fd10031bf46d9e44b6ad423e2ee39e2a7 Mon Sep 17 00:00:00 2001 From: Hersen Wu Date: Fri, 27 Jul 2018 14:52:37 -0400 Subject: [PATCH 16/19] drm/amd/display: display connected to dp-1 does not light up [why] for vega, dp set_panel_mode is handled by psp firmware. dal should not program the register again. [how] dal does not program panel mode. Signed-off-by: Hersen Wu Reviewed-by: Charlene Liu Acked-by: Bhawanpreet Lakha Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 5 +++++ drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c | 1 + 3 files changed, 7 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3ecd2d614f41..e2f033d420a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -79,6 +79,7 @@ struct dc_caps { bool post_blend_color_processing; bool force_dp_tps4_for_cp2520; bool disable_dp_clk_share; + bool psp_setup_panel_mode; }; struct dc_dcc_surface_param { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 60e3c6a73d37..752b3d62e793 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -256,6 +256,11 @@ static void setup_panel_mode( enum dp_panel_mode panel_mode) { uint32_t value; + struct dc_context *ctx = enc110->base.ctx; + + /* if psp set panel mode, dal should be program it */ + if (ctx->dc->caps.psp_setup_panel_mode) + return; ASSERT(REG(DP_DPHY_INTERNAL_CTRL)); value = REG_READ(DP_DPHY_INTERNAL_CTRL); diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 61d8e22d23c9..d43f37d99c7d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -883,6 +883,7 @@ static bool construct( dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; + dc->caps.psp_setup_panel_mode = true; dc->debug = debug_defaults; From 4823e5da2ea9061011242db81334d6ebbd2ed0a5 Mon Sep 17 00:00:00 2001 From: Lucas Stach Date: Mon, 6 Aug 2018 15:12:48 +0200 Subject: [PATCH 17/19] drm/scheduler: fix timeout worker setup for out of order job completions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drm_sched_job_finish() is a work item scheduled for each finished job on a unbound system workqueue. This means the workers can execute out of order with regard to the real hardware job completions. If this happens queueing a timeout worker for the first job on the ring mirror list is wrong, as this may be a job which has already finished executing. Fix this by reorganizing the code to always queue the worker for the next job on the list, if this job hasn't finished yet. This is robust against a potential reordering of the finish workers. Also move out the timeout worker cancelling, so that we don't need to take the job list lock twice. As a small optimization list_del is used to remove the job from the ring mirror list, as there is no need to reinit the list head in the job we are about to free. Signed-off-by: Lucas Stach Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 30 +++++++++++++---------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index 1b733229201e..a70c7f7fd6fe 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -552,24 +552,28 @@ static void drm_sched_job_finish(struct work_struct *work) finish_work); struct drm_gpu_scheduler *sched = s_job->sched; - /* remove job from ring_mirror_list */ + /* + * Canceling the timeout without removing our job from the ring mirror + * list is safe, as we will only end up in this worker if our jobs + * finished fence has been signaled. So even if some another worker + * manages to find this job as the next job in the list, the fence + * signaled check below will prevent the timeout to be restarted. + */ + cancel_delayed_work_sync(&s_job->work_tdr); + spin_lock(&sched->job_list_lock); - list_del_init(&s_job->node); - if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { - struct drm_sched_job *next; + /* queue TDR for next job */ + if (sched->timeout != MAX_SCHEDULE_TIMEOUT && + !list_is_last(&s_job->node, &sched->ring_mirror_list)) { + struct drm_sched_job *next = list_next_entry(s_job, node); - spin_unlock(&sched->job_list_lock); - cancel_delayed_work_sync(&s_job->work_tdr); - spin_lock(&sched->job_list_lock); - - /* queue TDR for next job */ - next = list_first_entry_or_null(&sched->ring_mirror_list, - struct drm_sched_job, node); - - if (next) + if (!dma_fence_is_signaled(&next->s_fence->finished)) schedule_delayed_work(&next->work_tdr, sched->timeout); } + /* remove job from ring_mirror_list */ + list_del(&s_job->node); spin_unlock(&sched->job_list_lock); + dma_fence_put(&s_job->s_fence->finished); sched->ops->free_job(s_job); } From 275e6fa8ecfd8ec1eb36a865d6dafc64f4bed462 Mon Sep 17 00:00:00 2001 From: Nayan Deshmukh Date: Thu, 9 Aug 2018 15:18:12 +0530 Subject: [PATCH 18/19] drm/scheduler: fix param documentation We no longer have sched parameter so remove its description as well Signed-off-by: Nayan Deshmukh Signed-off-by: Alex Deucher --- drivers/gpu/drm/scheduler/gpu_scheduler.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c index a70c7f7fd6fe..4fc211e19d6e 100644 --- a/drivers/gpu/drm/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c @@ -249,7 +249,6 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, /** * drm_sched_entity_flush - Flush a context entity * - * @sched: scheduler instance * @entity: scheduler entity * @timeout: time to wait in for Q to become empty in jiffies. * @@ -292,7 +291,6 @@ EXPORT_SYMBOL(drm_sched_entity_flush); /** * drm_sched_entity_cleanup - Destroy a context entity * - * @sched: scheduler instance * @entity: scheduler entity * * This should be called after @drm_sched_entity_do_release. It goes over the @@ -356,7 +354,6 @@ EXPORT_SYMBOL(drm_sched_entity_fini); /** * drm_sched_entity_fini - Destroy a context entity * - * @sched: scheduler instance * @entity: scheduler entity * * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() @@ -719,7 +716,6 @@ EXPORT_SYMBOL(drm_sched_job_recovery); * drm_sched_job_init - init a scheduler job * * @job: scheduler job to init - * @sched: scheduler instance * @entity: scheduler entity to use * @owner: job owner for debugging * From b045d3af7d1f443dd5209a096568e0ce983127e9 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Thu, 9 Aug 2018 10:03:04 +0800 Subject: [PATCH 19/19] drm/amdgpu/sriov: give 8s for recover vram under RUNTIME Extend the timeout for recovering vram bos from shadows on sr-iov to cover the worst case scenario for timeslices and VFs Under runtime, the wait fence time could be quite long when other VFs are in exclusive mode. For example, for 4 VF, every VF's exclusive timeout time is set to 3s, then the worst case is 9s. If the VF number is more than 4,then the worst case time will be longer. The 8s is the test data, with setting to 8s, it will pass the TDR test for 1000 times. SWDEV-161490 Signed-off-by: Monk Liu Signed-off-by: Emily Deng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ec53d8f96d06..3ef34df8937b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3105,7 +3105,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev) long tmo; if (amdgpu_sriov_runtime(adev)) - tmo = msecs_to_jiffies(amdgpu_lockup_timeout); + tmo = msecs_to_jiffies(8000); else tmo = msecs_to_jiffies(100);