mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu/display: Use wm_table.entries for dcn301 calculate_wm
[Why] For DGPU Navi, the wm_table.nv_entries are used. These entires are not populated for DCN301 Vangogh APU, but instead wm_table.entries are. [How] Use DCN21 Renoir style wm calculations. Signed-off-by: Leo Li <sunpeng.li@amd.com> Signed-off-by: Zhan Liu <zhan.liu@amd.com> Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com> Acked-by: Zhan Liu <zhan.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
d2c9128595
commit
eda29602f1
|
@ -1619,12 +1619,106 @@ static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
|
|||
dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
|
||||
}
|
||||
|
||||
static void calculate_wm_set_for_vlevel(
|
||||
int vlevel,
|
||||
struct wm_range_table_entry *table_entry,
|
||||
struct dcn_watermarks *wm_set,
|
||||
struct display_mode_lib *dml,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt)
|
||||
{
|
||||
double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
|
||||
|
||||
ASSERT(vlevel < dml->soc.num_states);
|
||||
/* only pipe 0 is read for voltage and dcf/soc clocks */
|
||||
pipes[0].clks_cfg.voltage = vlevel;
|
||||
pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
|
||||
pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
|
||||
|
||||
dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
|
||||
dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
|
||||
dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
|
||||
|
||||
wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
|
||||
wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
|
||||
dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
|
||||
|
||||
}
|
||||
|
||||
static void dcn301_calculate_wm_and_dlg(
|
||||
struct dc *dc, struct dc_state *context,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
int pipe_cnt,
|
||||
int vlevel_req)
|
||||
{
|
||||
int i, pipe_idx;
|
||||
int vlevel, vlevel_max;
|
||||
struct wm_range_table_entry *table_entry;
|
||||
struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
|
||||
|
||||
ASSERT(bw_params);
|
||||
|
||||
vlevel_max = bw_params->clk_table.num_entries - 1;
|
||||
|
||||
/* WM Set D */
|
||||
table_entry = &bw_params->wm_table.entries[WM_D];
|
||||
if (table_entry->wm_type == WM_TYPE_RETRAINING)
|
||||
vlevel = 0;
|
||||
else
|
||||
vlevel = vlevel_max;
|
||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
|
||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
/* WM Set C */
|
||||
table_entry = &bw_params->wm_table.entries[WM_C];
|
||||
vlevel = min(max(vlevel_req, 2), vlevel_max);
|
||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
|
||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
/* WM Set B */
|
||||
table_entry = &bw_params->wm_table.entries[WM_B];
|
||||
vlevel = min(max(vlevel_req, 1), vlevel_max);
|
||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
|
||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
|
||||
/* WM Set A */
|
||||
table_entry = &bw_params->wm_table.entries[WM_A];
|
||||
vlevel = min(vlevel_req, vlevel_max);
|
||||
calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
|
||||
&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
|
||||
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (!context->res_ctx.pipe_ctx[i].stream)
|
||||
continue;
|
||||
|
||||
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
|
||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
|
||||
|
||||
if (dc->config.forced_clocks) {
|
||||
pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
|
||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
|
||||
}
|
||||
if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
|
||||
pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
|
||||
if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
|
||||
pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
|
||||
|
||||
pipe_idx++;
|
||||
}
|
||||
|
||||
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
|
||||
}
|
||||
|
||||
static struct resource_funcs dcn301_res_pool_funcs = {
|
||||
.destroy = dcn301_destroy_resource_pool,
|
||||
.link_enc_create = dcn301_link_encoder_create,
|
||||
.panel_cntl_create = dcn301_panel_cntl_create,
|
||||
.validate_bandwidth = dcn30_validate_bandwidth,
|
||||
.calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
|
||||
.calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
|
||||
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
|
||||
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
|
||||
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
|
||||
|
|
Loading…
Reference in New Issue