Merge branch 'drm-next-4.19' of git://people.freedesktop.org/~agd5f/linux into drm-next

More fixes for 4.19:
- Fixes for scheduler
- Fix for SR-IOV
- Fixes for display

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180809200052.2777-1-alexander.deucher@amd.com
This commit is contained in:
Dave Airlie 2018-08-10 11:43:02 +10:00
commit 557ce95051
21 changed files with 194 additions and 78 deletions

View File

@ -3108,7 +3108,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
long tmo;
if (amdgpu_sriov_runtime(adev))
tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
tmo = msecs_to_jiffies(8000);
else
tmo = msecs_to_jiffies(100);

View File

@ -1320,7 +1320,12 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
{
return bd->props.brightness;
struct amdgpu_display_manager *dm = bl_get_data(bd);
int ret = dc_link_get_backlight_level(dm->backlight_link);
if (ret == DC_ERROR_UNEXPECTED)
return bd->props.brightness;
return ret;
}
static const struct backlight_ops amdgpu_dm_backlight_ops = {
@ -1335,6 +1340,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
struct backlight_properties props = { 0 };
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@ -2109,13 +2115,8 @@ convert_color_depth_from_display_info(const struct drm_connector *connector)
static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode *mode_in)
{
int32_t width = mode_in->crtc_hdisplay * 9;
int32_t height = mode_in->crtc_vdisplay * 16;
if ((width - height) < 10 && (width - height) > -10)
return ASPECT_RATIO_16_9;
else
return ASPECT_RATIO_4_3;
/* 1-1 mapping, since both enums follow the HDMI spec. */
return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
}
static enum dc_color_space

View File

@ -231,18 +231,21 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
* preparation for hardware commit. If no lut is specified by user, we default
* to SRGB degamma.
*
* Currently, we only support degamma bypass, or preprogrammed SRGB degamma.
* Programmable degamma is not supported, and an attempt to do so will return
* -EINVAL.
* We support degamma bypass, predefined SRGB, and custom degamma
*
* RETURNS:
* 0 on success, -EINVAL if custom degamma curve is given.
* 0 on success
* -EINVAL if crtc_state has a degamma_lut of invalid size
* -ENOMEM if gamma allocation fails
*/
int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
struct dc_plane_state *dc_plane_state)
{
struct drm_property_blob *blob = crtc_state->degamma_lut;
struct drm_color_lut *lut;
uint32_t lut_size;
struct dc_gamma *gamma;
bool ret;
if (!blob) {
/* Default to SRGB */
@ -258,11 +261,30 @@ int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
return 0;
}
/* Otherwise, assume SRGB, since programmable degamma is not
* supported.
*/
dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
return -EINVAL;
gamma = dc_create_gamma();
if (!gamma)
return -ENOMEM;
lut_size = blob->length / sizeof(struct drm_color_lut);
gamma->num_entries = lut_size;
if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
gamma->type = GAMMA_CUSTOM;
else {
dc_gamma_release(&gamma);
return -EINVAL;
}
__drm_lut_to_dc_gamma(lut, gamma, false);
dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
ret = mod_color_calculate_degamma_params(dc_plane_state->in_transfer_func, gamma, true);
dc_gamma_release(&gamma);
if (!ret) {
dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
DRM_ERROR("Out of memory when calculating degamma params\n");
return -ENOMEM;
}
return 0;
}

View File

@ -808,6 +808,24 @@ static enum bp_result transmitter_control_v1_5(
* (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
* LVDS mode: usPixelClock = pixel clock
*/
if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
switch (cntl->color_depth) {
case COLOR_DEPTH_101010:
params.usSymClock =
cpu_to_le16((le16_to_cpu(params.usSymClock) * 30) / 24);
break;
case COLOR_DEPTH_121212:
params.usSymClock =
cpu_to_le16((le16_to_cpu(params.usSymClock) * 36) / 24);
break;
case COLOR_DEPTH_161616:
params.usSymClock =
cpu_to_le16((le16_to_cpu(params.usSymClock) * 48) / 24);
break;
default:
break;
}
}
if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
result = BP_RESULT_OK;

View File

@ -744,6 +744,18 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
break;
case EDID_NO_RESPONSE:
DC_LOG_ERROR("No EDID read.\n");
/*
* Abort detection for non-DP connectors if we have
* no EDID
*
* DP needs to report as connected if HDP is high
* even if we have no EDID in order to go to
* fail-safe mode
*/
if (dc_is_hdmi_signal(link->connector_signal) ||
dc_is_dvi_signal(link->connector_signal))
return false;
default:
break;
}
@ -752,39 +764,41 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
// If both edid and dpcd are the same, then discard new sink and revert back to original sink
if ((same_edid) && (same_dpcd)) {
link_disconnect_remap(prev_sink, link);
sink = prev_sink;
prev_sink = NULL;
} else {
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
sink_caps.transaction_type ==
DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
/*
* TODO debug why Dell 2413 doesn't like
* two link trainings
*/
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX &&
reason != DETECT_REASON_HPDRX) {
/*
* TODO debug why Dell 2413 doesn't like
* two link trainings
*/
/* deal with non-mst cases */
for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
int fail_count = 0;
/* deal with non-mst cases */
for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
int fail_count = 0;
dp_verify_link_cap(link,
&link->reported_link_cap,
&fail_count);
dp_verify_link_cap(link,
&link->reported_link_cap,
&fail_count);
if (fail_count == 0)
break;
}
if (fail_count == 0)
break;
}
/* HDMI-DVI Dongle */
if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
!sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
} else {
// If edid is the same, then discard new sink and revert back to original sink
if (same_edid) {
link_disconnect_remap(prev_sink, link);
sink = prev_sink;
prev_sink = NULL;
}
}
/* HDMI-DVI Dongle */
if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
!sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
/* Connectivity log: detection */
for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
CONN_DATA_DETECT(link,
@ -1025,6 +1039,9 @@ static bool construct(
link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
if (dc_ctx->dc_bios->integrated_info)
link->dp_ss_off = !!dc_ctx->dc_bios->integrated_info->dp_ss_control;
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
__func__, init_params->connector_index,
@ -2012,6 +2029,15 @@ enum dc_status dc_link_validate_mode_timing(
return DC_OK;
}
int dc_link_get_backlight_level(const struct dc_link *link)
{
struct abm *abm = link->ctx->dc->res_pool->abm;
if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL)
return DC_ERROR_UNEXPECTED;
return (int) abm->funcs->get_current_backlight_8_bit(abm);
}
bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
uint32_t frame_ramp, const struct dc_stream_state *stream)

View File

@ -953,7 +953,10 @@ enum link_training_result dc_link_dp_perform_link_training(
* LINK_SPREAD_05_DOWNSPREAD_30KHZ :
* LINK_SPREAD_DISABLED;
*/
lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
if (link->dp_ss_off)
lt_settings.link_settings.link_spread = LINK_SPREAD_DISABLED;
else
lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
/* 1. set link rate, lane count and spread*/
dpcd_set_link_settings(link, &lt_settings);

View File

@ -330,6 +330,9 @@ bool resource_are_streams_timing_synchronizable(
!= stream2->timing.pix_clk_khz)
return false;
if (stream1->clamping.c_depth != stream2->clamping.c_depth)
return false;
if (stream1->phy_pix_clk != stream2->phy_pix_clk
&& (!dc_is_dp_signal(stream1->signal)
|| !dc_is_dp_signal(stream2->signal)))
@ -337,6 +340,20 @@ bool resource_are_streams_timing_synchronizable(
return true;
}
static bool is_dp_and_hdmi_sharable(
struct dc_stream_state *stream1,
struct dc_stream_state *stream2)
{
if (stream1->ctx->dc->caps.disable_dp_clk_share)
return false;
if (stream1->clamping.c_depth != COLOR_DEPTH_888 ||
stream2->clamping.c_depth != COLOR_DEPTH_888)
return false;
return true;
}
static bool is_sharable_clk_src(
const struct pipe_ctx *pipe_with_clk_src,
@ -348,7 +365,10 @@ static bool is_sharable_clk_src(
if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
return false;
if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
if (dc_is_dp_signal(pipe_with_clk_src->stream->signal) ||
(dc_is_dp_signal(pipe->stream->signal) &&
!is_dp_and_hdmi_sharable(pipe_with_clk_src->stream,
pipe->stream)))
return false;
if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)

View File

@ -78,6 +78,8 @@ struct dc_caps {
bool dual_link_dvi;
bool post_blend_color_processing;
bool force_dp_tps4_for_cp2520;
bool disable_dp_clk_share;
bool psp_setup_panel_mode;
};
struct dc_dcc_surface_param {

View File

@ -417,6 +417,7 @@ enum {
GAMMA_RGB_256_ENTRIES = 256,
GAMMA_RGB_FLOAT_1024_ENTRIES = 1024,
GAMMA_CS_TFM_1D_ENTRIES = 4096,
GAMMA_CUSTOM_ENTRIES = 4096,
GAMMA_MAX_ENTRIES = 4096
};
@ -424,6 +425,7 @@ enum dc_gamma_type {
GAMMA_RGB_256 = 1,
GAMMA_RGB_FLOAT_1024 = 2,
GAMMA_CS_TFM_1D = 3,
GAMMA_CUSTOM = 4,
};
struct dc_csc_transform {

View File

@ -73,6 +73,7 @@ struct dc_link {
enum dc_irq_source irq_source_hpd;
enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
bool is_hpd_filter_disabled;
bool dp_ss_off;
/* caps is the same as reported_link_cap. link_traing use
* reported_link_cap. Will clean up. TODO
@ -141,6 +142,8 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
uint32_t frame_ramp, const struct dc_stream_state *stream);
int dc_link_get_backlight_level(const struct dc_link *dc_link);
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);

View File

@ -463,7 +463,7 @@ static void dce12_update_clocks(struct dccg *dccg,
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
@ -625,7 +625,9 @@ static void dcn1_update_clocks(struct dccg *dccg,
}
/* dcn1 dppclk is tied to dispclk */
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
/* program dispclk on = as a w/a for sleep resume clock ramping issues */
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
|| new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
@ -661,7 +663,7 @@ static void dce_update_clocks(struct dccg *dccg,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
}
}

View File

@ -256,6 +256,11 @@ static void setup_panel_mode(
enum dp_panel_mode panel_mode)
{
uint32_t value;
struct dc_context *ctx = enc110->base.ctx;
/* if psp set panel mode, dal should be program it */
if (ctx->dc->caps.psp_setup_panel_mode)
return;
ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
value = REG_READ(DP_DPHY_INTERNAL_CTRL);

View File

@ -919,7 +919,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
dc->caps.disable_dp_clk_share = true;
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
dce100_timing_generator_create(

View File

@ -2530,7 +2530,7 @@ static void pplib_apply_display_requirements(
/* TODO: dce11.2*/
pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
pp_display_cfg->disp_clk_khz = context->bw.dce.dispclk_khz;
pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg);
@ -2559,7 +2559,7 @@ void dce110_set_bandwidth(
{
struct dc_clocks req_clks;
req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
req_clks.dispclk_khz = context->bw.dce.dispclk_khz;
req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
if (decrease_allowed)

View File

@ -677,9 +677,6 @@ static void destruct(struct dce110_resource_pool *pool)
pool->base.timing_generators[i] = NULL;
}
if (pool->base.engines[i] != NULL)
dce110_engine_destroy(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.stream_enc_count; i++) {

View File

@ -883,6 +883,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
dc->caps.psp_setup_panel_mode = true;
dc->debug = debug_defaults;

View File

@ -946,6 +946,7 @@ static bool dce80_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
@ -1131,6 +1132,7 @@ static bool dce81_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
@ -1312,6 +1314,7 @@ static bool dce83_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))

View File

@ -1089,6 +1089,8 @@ static void dcn10_init_hw(struct dc *dc)
}
enable_power_gating_plane(dc->hwseq, true);
memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks));
}
static void reset_hw_ctx_wrap(
@ -1213,8 +1215,11 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
} else if (tf->type == TF_TYPE_BYPASS) {
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
} else {
/*TF_TYPE_DISTRIBUTED_POINTS*/
result = false;
cm_helper_translate_curve_to_degamma_hw_format(tf,
&dpp_base->degamma_params);
dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
&dpp_base->degamma_params);
result = true;
}
return result;

View File

@ -395,6 +395,8 @@ struct integrated_info {
struct i2c_reg_info dp3_ext_hdmi_reg_settings[9];
unsigned char dp3_ext_hdmi_6g_reg_num;
struct i2c_reg_info dp3_ext_hdmi_6g_reg_settings[3];
/* V11 */
uint32_t dp_ss_control;
};
/**

View File

@ -997,7 +997,9 @@ static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb,
* norm_y = 4095*regamma_y, and index is just truncating to nearest integer
* lut1 = lut1D[index], lut2 = lut1D[index+1]
*
*adjustedY is then linearly interpolating regamma Y between lut1 and lut2
* adjustedY is then linearly interpolating regamma Y between lut1 and lut2
*
* Custom degamma on Linux uses the same interpolation math, so is handled here
*/
static void apply_lut_1d(
const struct dc_gamma *ramp,
@ -1018,7 +1020,7 @@ static void apply_lut_1d(
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
if (ramp->type != GAMMA_CS_TFM_1D)
if (ramp->type != GAMMA_CS_TFM_1D && ramp->type != GAMMA_CUSTOM)
return; // this is not expected
for (i = 0; i < num_hw_points; i++) {
@ -1636,7 +1638,9 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
coordinates_x, axix_x, curve,
MAX_HW_POINTS, tf_pts,
mapUserRamp);
mapUserRamp && ramp->type != GAMMA_CUSTOM);
if (ramp->type == GAMMA_CUSTOM)
apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
ret = true;

View File

@ -249,7 +249,6 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
/**
* drm_sched_entity_flush - Flush a context entity
*
* @sched: scheduler instance
* @entity: scheduler entity
* @timeout: time to wait in for Q to become empty in jiffies.
*
@ -292,7 +291,6 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
/**
* drm_sched_entity_cleanup - Destroy a context entity
*
* @sched: scheduler instance
* @entity: scheduler entity
*
* This should be called after @drm_sched_entity_do_release. It goes over the
@ -356,7 +354,6 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
/**
* drm_sched_entity_fini - Destroy a context entity
*
* @sched: scheduler instance
* @entity: scheduler entity
*
* Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
@ -552,24 +549,28 @@ static void drm_sched_job_finish(struct work_struct *work)
finish_work);
struct drm_gpu_scheduler *sched = s_job->sched;
/* remove job from ring_mirror_list */
/*
* Canceling the timeout without removing our job from the ring mirror
* list is safe, as we will only end up in this worker if our jobs
* finished fence has been signaled. So even if some another worker
* manages to find this job as the next job in the list, the fence
* signaled check below will prevent the timeout to be restarted.
*/
cancel_delayed_work_sync(&s_job->work_tdr);
spin_lock(&sched->job_list_lock);
list_del_init(&s_job->node);
if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
struct drm_sched_job *next;
/* queue TDR for next job */
if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!list_is_last(&s_job->node, &sched->ring_mirror_list)) {
struct drm_sched_job *next = list_next_entry(s_job, node);
spin_unlock(&sched->job_list_lock);
cancel_delayed_work_sync(&s_job->work_tdr);
spin_lock(&sched->job_list_lock);
/* queue TDR for next job */
next = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
if (next)
if (!dma_fence_is_signaled(&next->s_fence->finished))
schedule_delayed_work(&next->work_tdr, sched->timeout);
}
/* remove job from ring_mirror_list */
list_del(&s_job->node);
spin_unlock(&sched->job_list_lock);
dma_fence_put(&s_job->s_fence->finished);
sched->ops->free_job(s_job);
}
@ -715,7 +716,6 @@ EXPORT_SYMBOL(drm_sched_job_recovery);
* drm_sched_job_init - init a scheduler job
*
* @job: scheduler job to init
* @sched: scheduler instance
* @entity: scheduler entity to use
* @owner: job owner for debugging
*