mirror of https://gitee.com/openkylin/linux.git
drm fixes for 5.7-rc4
core: - EDID off by one DTD fix - DP mst write return code fix dma-buf: - fix SET_NAME ioctl uapi - doc fixes amdgpu: - Fix a green screen on resume issue - PM fixes for SR-IOV SDMA fix for navi - Renoir display fixes - Cursor and pageflip stuttering fixes - Misc additional display fixes - (uapi) Add additional DCC tiling flags for navi1x i915: - Fix selftest refcnt leak (Xiyu) - Fix gem vma lock (Chris) - Fix gt's i915_request.timeline acquire by checking if cacheline is valid (Chris) - Fix IRQ postinistall fault masks (Matt) qxl: - use after gree fix - fix lost kunmap - release leak fix virtio: - context destruction fix -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJeq5AAAAoJEAx081l5xIa+Id8P/0JLdvrRpHkJgTKwZq2ZDXYn fSfEuj5TUnIeRX4G6Z8KOuKEZwCNL3NdCfDhg3WGhpZ1MBoYEfOOOgejRKeS3UrG ziY+zoxaZIVFpS30TC5XiWO79RjW8dJDJogF5XgBcm26aX2j27dzzaHeJZtdAjrS HTKZgneYd3+B7Aa/UfZmQinHKXee0AykM/gJNbqPqp4FPSHdfEEwn72MmVHyTh1H zrtmzC4tVx7mHbUlwrZCPUJQcbxVuA9E0bmsk8K1xDtnI1r/lB5LldkvV/HjaI9x b5VEAMRoBjgZOqaXC+C/RLBIh8eP4drcKXfjt4MMCXIB6IpqEcr54wVnC7JTA4KF pCYa3UChnmYceqAW+Yz+37F7PbWvfWnA4YQTWR4JwG+42rbR4nc6pRAh2HHCLPGR M+ZiqSOYs0/ROdR2nzD2XsUjSYFksjLGCpyq2OqqOkhm6kbF8eZsL2Rxj9VPwHLk PJhVR/d3Kc7fQpgbEeDvf/4jFlkodNv1/FauIpWRmXtt4WJM/vKPWwx80WvekhNh IOqTrVW3XOZM2XYXVw/xSebBVMbs4+PtO1hRmaA5+yJwQNqU2rc6qLptUVxpib6/ Gwxyp8Wb+7K4vIC8+5QrcKaYByiLBiaFNTIH5vo6vUv/FZHo9veZaiMqKxBptCRq LWONsaxPQqpdHSc/R25Q =4eA4 -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2020-05-01' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Regular scheduled fixes for graphics. Nothing to extreme bunch of amdgpu fixes, i915 and qxl fixes, along with some misc ones. All seems to be progressing normally. core: - EDID off by one DTD fix - DP mst write return code fix dma-buf: - fix SET_NAME ioctl uapi - doc fixes amdgpu: - Fix a green screen on resume issue - PM fixes for SR-IOV SDMA fix for navi - Renoir display fixes - Cursor and pageflip stuttering fixes - Misc additional display fixes - (uapi) Add additional DCC tiling flags for navi1x i915: - Fix selftest refcnt leak (Xiyu) - Fix gem vma lock (Chris) - Fix gt's i915_request.timeline acquire by checking if cacheline is valid (Chris) - Fix IRQ postinistall fault masks (Matt) qxl: - use after gree fix - fix lost kunmap - release leak fix virtio: - context destruction fix" * tag 'drm-fixes-2020-05-01' of git://anongit.freedesktop.org/drm/drm: (26 commits) dma-buf: fix documentation build warnings drm/qxl: qxl_release use after free drm/qxl: lost qxl_bo_kunmap_atomic_page in qxl_image_init_helper() drm/i915: Use proper fault mask in interrupt postinstall too drm/amd/display: Use cursor locking to prevent flip delays drm/amd/display: Update downspread percent to match spreadsheet for DCN2.1 drm/amd/display: Defer cursor update around VUPDATE for all ASIC drm/amd/display: fix rn soc bb update drm/amd/display: check if REFCLK_CNTL register is present drm/amdgpu: bump version for invalidate L2 before SDMA IBs drm/amdgpu: invalidate L2 before SDMA IBs (v2) drm/amdgpu: add tiling flags from Mesa drm/amd/powerplay: avoid using pm_en before it is initialized revised Revert "drm/amd/powerplay: avoid using pm_en before it is initialized" drm/qxl: qxl_release leak in qxl_hw_surface_alloc() drm/qxl: qxl_release leak in qxl_draw_dirty_fb() drm/virtio: only destroy created contexts drm/dp_mst: Fix drm_dp_send_dpcd_write() return code drm/i915/gt: Check cacheline is valid before acquiring drm/i915/gem: Hold obj->vma.lock over for_each_ggtt_vma() ...
This commit is contained in:
commit
477bfeb9a3
|
@ -388,7 +388,8 @@ static long dma_buf_ioctl(struct file *file,
|
|||
|
||||
return ret;
|
||||
|
||||
case DMA_BUF_SET_NAME:
|
||||
case DMA_BUF_SET_NAME_A:
|
||||
case DMA_BUF_SET_NAME_B:
|
||||
return dma_buf_set_name(dmabuf, (const char __user *)arg);
|
||||
|
||||
default:
|
||||
|
@ -655,8 +656,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
|
|||
* calls attach() of dma_buf_ops to allow device-specific attach functionality
|
||||
* @dmabuf: [in] buffer to attach device to.
|
||||
* @dev: [in] device to be attached.
|
||||
* @importer_ops [in] importer operations for the attachment
|
||||
* @importer_priv [in] importer private pointer for the attachment
|
||||
* @importer_ops: [in] importer operations for the attachment
|
||||
* @importer_priv: [in] importer private pointer for the attachment
|
||||
*
|
||||
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
|
||||
* must be cleaned up by calling dma_buf_detach().
|
||||
|
|
|
@ -85,9 +85,10 @@
|
|||
* - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
|
||||
* - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
|
||||
* - 3.36.0 - Allow reading more status registers on si/cik
|
||||
* - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
|
||||
*/
|
||||
#define KMS_DRIVER_MAJOR 3
|
||||
#define KMS_DRIVER_MINOR 36
|
||||
#define KMS_DRIVER_MINOR 37
|
||||
#define KMS_DRIVER_PATCHLEVEL 0
|
||||
|
||||
int amdgpu_vram_limit = 0;
|
||||
|
|
|
@ -73,6 +73,22 @@
|
|||
#define SDMA_OP_AQL_COPY 0
|
||||
#define SDMA_OP_AQL_BARRIER_OR 0
|
||||
|
||||
#define SDMA_GCR_RANGE_IS_PA (1 << 18)
|
||||
#define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16)
|
||||
#define SDMA_GCR_GL2_WB (1 << 15)
|
||||
#define SDMA_GCR_GL2_INV (1 << 14)
|
||||
#define SDMA_GCR_GL2_DISCARD (1 << 13)
|
||||
#define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11)
|
||||
#define SDMA_GCR_GL2_US (1 << 10)
|
||||
#define SDMA_GCR_GL1_INV (1 << 9)
|
||||
#define SDMA_GCR_GLV_INV (1 << 8)
|
||||
#define SDMA_GCR_GLK_INV (1 << 7)
|
||||
#define SDMA_GCR_GLK_WB (1 << 6)
|
||||
#define SDMA_GCR_GLM_INV (1 << 5)
|
||||
#define SDMA_GCR_GLM_WB (1 << 4)
|
||||
#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2)
|
||||
#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0)
|
||||
|
||||
/*define for op field*/
|
||||
#define SDMA_PKT_HEADER_op_offset 0
|
||||
#define SDMA_PKT_HEADER_op_mask 0x000000FF
|
||||
|
|
|
@ -382,6 +382,18 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
|
|||
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
|
||||
uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
|
||||
|
||||
/* Invalidate L2, because if we don't do it, we might get stale cache
|
||||
* lines from previous IBs.
|
||||
*/
|
||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
|
||||
amdgpu_ring_write(ring, 0);
|
||||
amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV |
|
||||
SDMA_GCR_GL2_WB |
|
||||
SDMA_GCR_GLM_INV |
|
||||
SDMA_GCR_GLM_WB) << 16);
|
||||
amdgpu_ring_write(ring, 0xffffff80);
|
||||
amdgpu_ring_write(ring, 0xffff);
|
||||
|
||||
/* An IB packet must end on a 8 DW boundary--the next dword
|
||||
* must be on a 8-dword boundary. Our IB packet below is 6
|
||||
* dwords long, thus add x number of NOPs, such that, in
|
||||
|
@ -1595,7 +1607,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
|
|||
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
|
||||
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
|
||||
10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
|
||||
.emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
|
||||
.emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
|
||||
.emit_ib = sdma_v5_0_ring_emit_ib,
|
||||
.emit_fence = sdma_v5_0_ring_emit_fence,
|
||||
.emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
|
||||
|
|
|
@ -3340,7 +3340,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
|
|||
const union dc_tiling_info *tiling_info,
|
||||
const uint64_t info,
|
||||
struct dc_plane_dcc_param *dcc,
|
||||
struct dc_plane_address *address)
|
||||
struct dc_plane_address *address,
|
||||
bool force_disable_dcc)
|
||||
{
|
||||
struct dc *dc = adev->dm.dc;
|
||||
struct dc_dcc_surface_param input;
|
||||
|
@ -3352,6 +3353,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
|
|||
memset(&input, 0, sizeof(input));
|
||||
memset(&output, 0, sizeof(output));
|
||||
|
||||
if (force_disable_dcc)
|
||||
return 0;
|
||||
|
||||
if (!offset)
|
||||
return 0;
|
||||
|
||||
|
@ -3401,7 +3405,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
|
|||
union dc_tiling_info *tiling_info,
|
||||
struct plane_size *plane_size,
|
||||
struct dc_plane_dcc_param *dcc,
|
||||
struct dc_plane_address *address)
|
||||
struct dc_plane_address *address,
|
||||
bool force_disable_dcc)
|
||||
{
|
||||
const struct drm_framebuffer *fb = &afb->base;
|
||||
int ret;
|
||||
|
@ -3507,7 +3512,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
|
|||
|
||||
ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
|
||||
plane_size, tiling_info,
|
||||
tiling_flags, dcc, address);
|
||||
tiling_flags, dcc, address,
|
||||
force_disable_dcc);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -3599,7 +3605,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
|
|||
const struct drm_plane_state *plane_state,
|
||||
const uint64_t tiling_flags,
|
||||
struct dc_plane_info *plane_info,
|
||||
struct dc_plane_address *address)
|
||||
struct dc_plane_address *address,
|
||||
bool force_disable_dcc)
|
||||
{
|
||||
const struct drm_framebuffer *fb = plane_state->fb;
|
||||
const struct amdgpu_framebuffer *afb =
|
||||
|
@ -3681,7 +3688,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
|
|||
plane_info->rotation, tiling_flags,
|
||||
&plane_info->tiling_info,
|
||||
&plane_info->plane_size,
|
||||
&plane_info->dcc, address);
|
||||
&plane_info->dcc, address,
|
||||
force_disable_dcc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -3704,6 +3712,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
|
|||
struct dc_plane_info plane_info;
|
||||
uint64_t tiling_flags;
|
||||
int ret;
|
||||
bool force_disable_dcc = false;
|
||||
|
||||
ret = fill_dc_scaling_info(plane_state, &scaling_info);
|
||||
if (ret)
|
||||
|
@ -3718,9 +3727,11 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
|
||||
ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
|
||||
&plane_info,
|
||||
&dc_plane_state->address);
|
||||
&dc_plane_state->address,
|
||||
force_disable_dcc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -5342,6 +5353,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
|
|||
uint64_t tiling_flags;
|
||||
uint32_t domain;
|
||||
int r;
|
||||
bool force_disable_dcc = false;
|
||||
|
||||
dm_plane_state_old = to_dm_plane_state(plane->state);
|
||||
dm_plane_state_new = to_dm_plane_state(new_state);
|
||||
|
@ -5400,11 +5412,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
|
|||
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
|
||||
struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
|
||||
|
||||
force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
|
||||
fill_plane_buffer_attributes(
|
||||
adev, afb, plane_state->format, plane_state->rotation,
|
||||
tiling_flags, &plane_state->tiling_info,
|
||||
&plane_state->plane_size, &plane_state->dcc,
|
||||
&plane_state->address);
|
||||
&plane_state->address,
|
||||
force_disable_dcc);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -6676,7 +6690,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
|||
fill_dc_plane_info_and_addr(
|
||||
dm->adev, new_plane_state, tiling_flags,
|
||||
&bundle->plane_infos[planes_count],
|
||||
&bundle->flip_addrs[planes_count].address);
|
||||
&bundle->flip_addrs[planes_count].address,
|
||||
false);
|
||||
|
||||
DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
|
||||
new_plane_state->plane->index,
|
||||
bundle->plane_infos[planes_count].dcc.enable);
|
||||
|
||||
bundle->surface_updates[planes_count].plane_info =
|
||||
&bundle->plane_infos[planes_count];
|
||||
|
@ -8096,7 +8115,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
|
|||
ret = fill_dc_plane_info_and_addr(
|
||||
dm->adev, new_plane_state, tiling_flags,
|
||||
plane_info,
|
||||
&flip_addr->address);
|
||||
&flip_addr->address,
|
||||
false);
|
||||
if (ret)
|
||||
goto cleanup;
|
||||
|
||||
|
|
|
@ -2908,6 +2908,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
|
|||
sizeof(hpd_irq_dpcd_data),
|
||||
"Status: ");
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
|
||||
link->dc->hwss.blank_stream(pipe_ctx);
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
|
||||
|
@ -2927,6 +2933,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
|
|||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
|
||||
dc_link_reallocate_mst_payload(link);
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
|
||||
link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings);
|
||||
}
|
||||
|
||||
status = false;
|
||||
if (out_link_loss)
|
||||
*out_link_loss = true;
|
||||
|
@ -4227,6 +4239,21 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
|
|||
void dpcd_set_source_specific_data(struct dc_link *link)
|
||||
{
|
||||
const uint32_t post_oui_delay = 30; // 30ms
|
||||
uint8_t dspc = 0;
|
||||
enum dc_status ret = DC_ERROR_UNEXPECTED;
|
||||
|
||||
ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
|
||||
sizeof(dspc));
|
||||
|
||||
if (ret != DC_OK) {
|
||||
DC_LOG_ERROR("Error in DP aux read transaction,"
|
||||
" not writing source specific data\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Return if OUI unsupported */
|
||||
if (!(dspc & DP_OUI_SUPPORT))
|
||||
return;
|
||||
|
||||
if (!link->dc->vendor_signature.is_valid) {
|
||||
struct dpcd_amd_signature amd_signature;
|
||||
|
|
|
@ -231,34 +231,6 @@ struct dc_stream_status *dc_stream_get_status(
|
|||
return dc_stream_get_status_from_state(dc->current_state, stream);
|
||||
}
|
||||
|
||||
static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
|
||||
{
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN)
|
||||
unsigned int vupdate_line;
|
||||
unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
|
||||
struct dc_stream_state *stream = pipe_ctx->stream;
|
||||
unsigned int us_per_line;
|
||||
|
||||
if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
|
||||
ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
|
||||
|
||||
vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
|
||||
if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
|
||||
return;
|
||||
|
||||
if (vpos >= vupdate_line)
|
||||
return;
|
||||
|
||||
us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
|
||||
lines_to_vupdate = vupdate_line - vpos;
|
||||
us_to_vupdate = lines_to_vupdate * us_per_line;
|
||||
|
||||
/* 70 us is a conservative estimate of cursor update time*/
|
||||
if (us_to_vupdate < 70)
|
||||
udelay(us_to_vupdate);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
|
||||
|
@ -298,9 +270,7 @@ bool dc_stream_set_cursor_attributes(
|
|||
|
||||
if (!pipe_to_program) {
|
||||
pipe_to_program = pipe_ctx;
|
||||
|
||||
delay_cursor_until_vupdate(pipe_ctx, dc);
|
||||
dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
|
||||
dc->hwss.cursor_lock(dc, pipe_to_program, true);
|
||||
}
|
||||
|
||||
dc->hwss.set_cursor_attribute(pipe_ctx);
|
||||
|
@ -309,7 +279,7 @@ bool dc_stream_set_cursor_attributes(
|
|||
}
|
||||
|
||||
if (pipe_to_program)
|
||||
dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
|
||||
dc->hwss.cursor_lock(dc, pipe_to_program, false);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -349,16 +319,14 @@ bool dc_stream_set_cursor_position(
|
|||
|
||||
if (!pipe_to_program) {
|
||||
pipe_to_program = pipe_ctx;
|
||||
|
||||
delay_cursor_until_vupdate(pipe_ctx, dc);
|
||||
dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
|
||||
dc->hwss.cursor_lock(dc, pipe_to_program, true);
|
||||
}
|
||||
|
||||
dc->hwss.set_cursor_position(pipe_ctx);
|
||||
}
|
||||
|
||||
if (pipe_to_program)
|
||||
dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
|
||||
dc->hwss.cursor_lock(dc, pipe_to_program, false);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -2757,6 +2757,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
|
|||
.disable_plane = dce110_power_down_fe,
|
||||
.pipe_control_lock = dce_pipe_control_lock,
|
||||
.interdependent_update_lock = NULL,
|
||||
.cursor_lock = dce_pipe_control_lock,
|
||||
.prepare_bandwidth = dce110_prepare_bandwidth,
|
||||
.optimize_bandwidth = dce110_optimize_bandwidth,
|
||||
.set_drr = set_drr,
|
||||
|
|
|
@ -1625,6 +1625,16 @@ void dcn10_pipe_control_lock(
|
|||
hws->funcs.verify_allow_pstate_change_high(dc);
|
||||
}
|
||||
|
||||
void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
|
||||
{
|
||||
/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
|
||||
if (!pipe || pipe->top_pipe)
|
||||
return;
|
||||
|
||||
dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
|
||||
pipe->stream_res.opp->inst, lock);
|
||||
}
|
||||
|
||||
static bool wait_for_reset_trigger_to_occur(
|
||||
struct dc_context *dc_ctx,
|
||||
struct timing_generator *tg)
|
||||
|
|
|
@ -49,6 +49,7 @@ void dcn10_pipe_control_lock(
|
|||
struct dc *dc,
|
||||
struct pipe_ctx *pipe,
|
||||
bool lock);
|
||||
void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock);
|
||||
void dcn10_blank_pixel_data(
|
||||
struct dc *dc,
|
||||
struct pipe_ctx *pipe_ctx,
|
||||
|
|
|
@ -50,6 +50,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
|
|||
.disable_audio_stream = dce110_disable_audio_stream,
|
||||
.disable_plane = dcn10_disable_plane,
|
||||
.pipe_control_lock = dcn10_pipe_control_lock,
|
||||
.cursor_lock = dcn10_cursor_lock,
|
||||
.interdependent_update_lock = dcn10_lock_all_pipes,
|
||||
.prepare_bandwidth = dcn10_prepare_bandwidth,
|
||||
.optimize_bandwidth = dcn10_optimize_bandwidth,
|
||||
|
|
|
@ -223,6 +223,9 @@ struct mpcc *mpc1_insert_plane(
|
|||
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id);
|
||||
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id);
|
||||
|
||||
/* Configure VUPDATE lock set for this MPCC to map to the OPP */
|
||||
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id);
|
||||
|
||||
/* update mpc tree mux setting */
|
||||
if (tree->opp_list == insert_above_mpcc) {
|
||||
/* insert the toppest mpcc */
|
||||
|
@ -318,6 +321,7 @@ void mpc1_remove_mpcc(
|
|||
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
|
||||
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
|
||||
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
|
||||
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
|
||||
|
||||
/* mark this mpcc as not in use */
|
||||
mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
|
||||
|
@ -328,6 +332,7 @@ void mpc1_remove_mpcc(
|
|||
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
|
||||
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
|
||||
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
|
||||
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -361,6 +366,7 @@ void mpc1_mpc_init(struct mpc *mpc)
|
|||
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
|
||||
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
|
||||
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
|
||||
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
|
||||
|
||||
mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
|
||||
}
|
||||
|
@ -381,6 +387,7 @@ void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
|
|||
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
|
||||
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
|
||||
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
|
||||
REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
|
||||
|
||||
mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
|
||||
|
||||
|
@ -453,6 +460,13 @@ void mpc1_read_mpcc_state(
|
|||
MPCC_BUSY, &s->busy);
|
||||
}
|
||||
|
||||
void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
|
||||
{
|
||||
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
|
||||
|
||||
REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0);
|
||||
}
|
||||
|
||||
static const struct mpc_funcs dcn10_mpc_funcs = {
|
||||
.read_mpcc_state = mpc1_read_mpcc_state,
|
||||
.insert_plane = mpc1_insert_plane,
|
||||
|
@ -464,6 +478,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
|
|||
.assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
|
||||
.init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
|
||||
.update_blending = mpc1_update_blending,
|
||||
.cursor_lock = mpc1_cursor_lock,
|
||||
.set_denorm = NULL,
|
||||
.set_denorm_clamp = NULL,
|
||||
.set_output_csc = NULL,
|
||||
|
|
|
@ -39,11 +39,12 @@
|
|||
SRII(MPCC_BG_G_Y, MPCC, inst),\
|
||||
SRII(MPCC_BG_R_CR, MPCC, inst),\
|
||||
SRII(MPCC_BG_B_CB, MPCC, inst),\
|
||||
SRII(MPCC_BG_B_CB, MPCC, inst),\
|
||||
SRII(MPCC_SM_CONTROL, MPCC, inst)
|
||||
SRII(MPCC_SM_CONTROL, MPCC, inst),\
|
||||
SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst)
|
||||
|
||||
#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
|
||||
SRII(MUX, MPC_OUT, inst)
|
||||
SRII(MUX, MPC_OUT, inst),\
|
||||
VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst)
|
||||
|
||||
#define MPC_COMMON_REG_VARIABLE_LIST \
|
||||
uint32_t MPCC_TOP_SEL[MAX_MPCC]; \
|
||||
|
@ -55,7 +56,9 @@
|
|||
uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
|
||||
uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
|
||||
uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \
|
||||
uint32_t MUX[MAX_OPP];
|
||||
uint32_t MUX[MAX_OPP]; \
|
||||
uint32_t MPCC_UPDATE_LOCK_SEL[MAX_MPCC]; \
|
||||
uint32_t CUR[MAX_OPP];
|
||||
|
||||
#define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
|
||||
SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\
|
||||
|
@ -78,7 +81,8 @@
|
|||
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\
|
||||
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\
|
||||
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\
|
||||
SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh)
|
||||
SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh),\
|
||||
SF(MPCC0_MPCC_UPDATE_LOCK_SEL, MPCC_UPDATE_LOCK_SEL, mask_sh)
|
||||
|
||||
#define MPC_REG_FIELD_LIST(type) \
|
||||
type MPCC_TOP_SEL;\
|
||||
|
@ -101,7 +105,9 @@
|
|||
type MPCC_SM_FIELD_ALT;\
|
||||
type MPCC_SM_FORCE_NEXT_FRAME_POL;\
|
||||
type MPCC_SM_FORCE_NEXT_TOP_POL;\
|
||||
type MPC_OUT_MUX;
|
||||
type MPC_OUT_MUX;\
|
||||
type MPCC_UPDATE_LOCK_SEL;\
|
||||
type CUR_VUPDATE_LOCK_SET;
|
||||
|
||||
struct dcn_mpc_registers {
|
||||
MPC_COMMON_REG_VARIABLE_LIST
|
||||
|
@ -192,4 +198,6 @@ void mpc1_read_mpcc_state(
|
|||
int mpcc_inst,
|
||||
struct mpcc_state *s);
|
||||
|
||||
void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -181,6 +181,14 @@ enum dcn10_clk_src_array_id {
|
|||
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
|
||||
mm ## block ## id ## _ ## reg_name
|
||||
|
||||
#define VUPDATE_SRII(reg_name, block, id)\
|
||||
.reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \
|
||||
mm ## reg_name ## 0 ## _ ## block ## id
|
||||
|
||||
/* set field/register/bitfield name */
|
||||
#define SFRB(field_name, reg_name, bitfield, post_fix)\
|
||||
.field_name = reg_name ## __ ## bitfield ## post_fix
|
||||
|
||||
/* NBIO */
|
||||
#define NBIO_BASE_INNER(seg) \
|
||||
NBIF_BASE__INST0_SEG ## seg
|
||||
|
@ -419,11 +427,13 @@ static const struct dcn_mpc_registers mpc_regs = {
|
|||
};
|
||||
|
||||
static const struct dcn_mpc_shift mpc_shift = {
|
||||
MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
|
||||
MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\
|
||||
SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT)
|
||||
};
|
||||
|
||||
static const struct dcn_mpc_mask mpc_mask = {
|
||||
MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
|
||||
MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\
|
||||
SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK)
|
||||
};
|
||||
|
||||
#define tg_regs(id)\
|
||||
|
|
|
@ -2294,7 +2294,8 @@ void dcn20_fpga_init_hw(struct dc *dc)
|
|||
|
||||
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
|
||||
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
|
||||
REG_WRITE(REFCLK_CNTL, 0);
|
||||
if (REG(REFCLK_CNTL))
|
||||
REG_WRITE(REFCLK_CNTL, 0);
|
||||
//
|
||||
|
||||
|
||||
|
|
|
@ -52,6 +52,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
|
|||
.disable_plane = dcn20_disable_plane,
|
||||
.pipe_control_lock = dcn20_pipe_control_lock,
|
||||
.interdependent_update_lock = dcn10_lock_all_pipes,
|
||||
.cursor_lock = dcn10_cursor_lock,
|
||||
.prepare_bandwidth = dcn20_prepare_bandwidth,
|
||||
.optimize_bandwidth = dcn20_optimize_bandwidth,
|
||||
.update_bandwidth = dcn20_update_bandwidth,
|
||||
|
|
|
@ -545,6 +545,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
|
|||
.mpc_init = mpc1_mpc_init,
|
||||
.mpc_init_single_inst = mpc1_mpc_init_single_inst,
|
||||
.update_blending = mpc2_update_blending,
|
||||
.cursor_lock = mpc1_cursor_lock,
|
||||
.get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp,
|
||||
.wait_for_idle = mpc2_assert_idle_mpcc,
|
||||
.assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
|
||||
|
|
|
@ -179,7 +179,8 @@
|
|||
SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\
|
||||
SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\
|
||||
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
|
||||
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh)
|
||||
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\
|
||||
SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh)
|
||||
|
||||
/*
|
||||
* DCN2 MPC_OCSC debug status register:
|
||||
|
|
|
@ -508,6 +508,10 @@ enum dcn20_clk_src_array_id {
|
|||
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
|
||||
mm ## block ## id ## _ ## reg_name
|
||||
|
||||
#define VUPDATE_SRII(reg_name, block, id)\
|
||||
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
|
||||
mm ## reg_name ## _ ## block ## id
|
||||
|
||||
/* NBIO */
|
||||
#define NBIO_BASE_INNER(seg) \
|
||||
NBIO_BASE__INST0_SEG ## seg
|
||||
|
|
|
@ -53,6 +53,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
|
|||
.disable_plane = dcn20_disable_plane,
|
||||
.pipe_control_lock = dcn20_pipe_control_lock,
|
||||
.interdependent_update_lock = dcn10_lock_all_pipes,
|
||||
.cursor_lock = dcn10_cursor_lock,
|
||||
.prepare_bandwidth = dcn20_prepare_bandwidth,
|
||||
.optimize_bandwidth = dcn20_optimize_bandwidth,
|
||||
.update_bandwidth = dcn20_update_bandwidth,
|
||||
|
|
|
@ -284,7 +284,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
|
|||
.dram_channel_width_bytes = 4,
|
||||
.fabric_datapath_to_dcn_data_return_bytes = 32,
|
||||
.dcn_downspread_percent = 0.5,
|
||||
.downspread_percent = 0.5,
|
||||
.downspread_percent = 0.38,
|
||||
.dram_page_open_time_ns = 50.0,
|
||||
.dram_rw_turnaround_time_ns = 17.5,
|
||||
.dram_return_buffer_per_channel_bytes = 8192,
|
||||
|
@ -340,6 +340,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
|
|||
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
|
||||
mm ## block ## id ## _ ## reg_name
|
||||
|
||||
#define VUPDATE_SRII(reg_name, block, id)\
|
||||
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
|
||||
mm ## reg_name ## _ ## block ## id
|
||||
|
||||
/* NBIO */
|
||||
#define NBIO_BASE_INNER(seg) \
|
||||
NBIF0_BASE__INST0_SEG ## seg
|
||||
|
@ -1374,64 +1378,49 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
|
|||
{
|
||||
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
|
||||
struct clk_limit_table *clk_table = &bw_params->clk_table;
|
||||
unsigned int i, j, k;
|
||||
int closest_clk_lvl;
|
||||
struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
|
||||
unsigned int i, j, closest_clk_lvl;
|
||||
|
||||
// Default clock levels are used for diags, which may lead to overclocking.
|
||||
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) {
|
||||
if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
|
||||
dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
|
||||
dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
|
||||
dcn2_1_soc.num_chans = bw_params->num_channels;
|
||||
|
||||
/* Vmin: leave lowest DCN clocks, override with dcfclk, fclk, memclk from fuse */
|
||||
dcn2_1_soc.clock_limits[0].state = 0;
|
||||
dcn2_1_soc.clock_limits[0].dcfclk_mhz = clk_table->entries[0].dcfclk_mhz;
|
||||
dcn2_1_soc.clock_limits[0].fabricclk_mhz = clk_table->entries[0].fclk_mhz;
|
||||
dcn2_1_soc.clock_limits[0].socclk_mhz = clk_table->entries[0].socclk_mhz;
|
||||
dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
|
||||
|
||||
/*
|
||||
* Other levels: find closest DCN clocks that fit the given clock limit using dcfclk
|
||||
* as indicator
|
||||
*/
|
||||
|
||||
closest_clk_lvl = -1;
|
||||
/* index currently being filled */
|
||||
k = 1;
|
||||
for (i = 1; i < clk_table->num_entries; i++) {
|
||||
/* loop backwards, skip duplicate state*/
|
||||
for (j = dcn2_1_soc.num_states - 1; j >= k; j--) {
|
||||
ASSERT(clk_table->num_entries);
|
||||
for (i = 0; i < clk_table->num_entries; i++) {
|
||||
/* loop backwards*/
|
||||
for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
|
||||
if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
|
||||
closest_clk_lvl = j;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* if found a lvl that fits, use the DCN clks from it, if not, go to next clk limit*/
|
||||
if (closest_clk_lvl != -1) {
|
||||
dcn2_1_soc.clock_limits[k].state = i;
|
||||
dcn2_1_soc.clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
|
||||
dcn2_1_soc.clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
|
||||
dcn2_1_soc.clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
|
||||
dcn2_1_soc.clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
|
||||
clock_limits[i].state = i;
|
||||
clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
|
||||
clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
|
||||
clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
|
||||
clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
|
||||
|
||||
dcn2_1_soc.clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
|
||||
dcn2_1_soc.clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
|
||||
dcn2_1_soc.clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
|
||||
dcn2_1_soc.clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
|
||||
dcn2_1_soc.clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
|
||||
dcn2_1_soc.clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
|
||||
dcn2_1_soc.clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
|
||||
k++;
|
||||
}
|
||||
clock_limits[i].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
|
||||
clock_limits[i].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
|
||||
clock_limits[i].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
|
||||
clock_limits[i].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
|
||||
clock_limits[i].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
|
||||
clock_limits[i].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
|
||||
clock_limits[i].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
|
||||
}
|
||||
for (i = 0; i < clk_table->num_entries; i++)
|
||||
dcn2_1_soc.clock_limits[i] = clock_limits[i];
|
||||
if (clk_table->num_entries) {
|
||||
dcn2_1_soc.num_states = clk_table->num_entries;
|
||||
/* duplicate last level */
|
||||
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
|
||||
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
|
||||
}
|
||||
dcn2_1_soc.num_states = k;
|
||||
}
|
||||
|
||||
/* duplicate last level */
|
||||
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
|
||||
dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
|
||||
|
||||
dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
|
||||
}
|
||||
|
||||
|
|
|
@ -210,6 +210,22 @@ struct mpc_funcs {
|
|||
struct mpcc_blnd_cfg *blnd_cfg,
|
||||
int mpcc_id);
|
||||
|
||||
/*
|
||||
* Lock cursor updates for the specified OPP.
|
||||
* OPP defines the set of MPCC that are locked together for cursor.
|
||||
*
|
||||
* Parameters:
|
||||
* [in] mpc - MPC context.
|
||||
* [in] opp_id - The OPP to lock cursor updates on
|
||||
* [in] lock - lock/unlock the OPP
|
||||
*
|
||||
* Return: void
|
||||
*/
|
||||
void (*cursor_lock)(
|
||||
struct mpc *mpc,
|
||||
int opp_id,
|
||||
bool lock);
|
||||
|
||||
struct mpcc* (*get_mpcc_for_dpp)(
|
||||
struct mpc_tree *tree,
|
||||
int dpp_id);
|
||||
|
|
|
@ -86,6 +86,7 @@ struct hw_sequencer_funcs {
|
|||
struct dc_state *context, bool lock);
|
||||
void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
|
||||
bool flip_immediate);
|
||||
void (*cursor_lock)(struct dc *dc, struct pipe_ctx *pipe, bool lock);
|
||||
|
||||
/* Timing Related */
|
||||
void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
|
||||
|
|
|
@ -1435,7 +1435,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
|
|||
if (!hwmgr)
|
||||
return -EINVAL;
|
||||
|
||||
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
|
||||
if (!(hwmgr->not_vf && amdgpu_dpm) ||
|
||||
!hwmgr->hwmgr_func->get_asic_baco_capability)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&hwmgr->smu_lock);
|
||||
|
@ -1452,8 +1453,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
|
|||
if (!hwmgr)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(hwmgr->not_vf && amdgpu_dpm) ||
|
||||
!hwmgr->hwmgr_func->get_asic_baco_state)
|
||||
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&hwmgr->smu_lock);
|
||||
|
@ -1470,7 +1470,8 @@ static int pp_set_asic_baco_state(void *handle, int state)
|
|||
if (!hwmgr)
|
||||
return -EINVAL;
|
||||
|
||||
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
|
||||
if (!(hwmgr->not_vf && amdgpu_dpm) ||
|
||||
!hwmgr->hwmgr_func->set_asic_baco_state)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&hwmgr->smu_lock);
|
||||
|
|
|
@ -3442,8 +3442,12 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
|
|||
drm_dp_queue_down_tx(mgr, txmsg);
|
||||
|
||||
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
|
||||
if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
|
||||
ret = -EIO;
|
||||
if (ret > 0) {
|
||||
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
|
||||
ret = -EIO;
|
||||
else
|
||||
ret = size;
|
||||
}
|
||||
|
||||
kfree(txmsg);
|
||||
fail_put:
|
||||
|
|
|
@ -5111,7 +5111,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
|
|||
struct drm_display_mode *mode;
|
||||
unsigned pixel_clock = (timings->pixel_clock[0] |
|
||||
(timings->pixel_clock[1] << 8) |
|
||||
(timings->pixel_clock[2] << 16));
|
||||
(timings->pixel_clock[2] << 16)) + 1;
|
||||
unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
|
||||
unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
|
||||
unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
|
||||
|
|
|
@ -182,21 +182,35 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
|
|||
int tiling_mode, unsigned int stride)
|
||||
{
|
||||
struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt;
|
||||
struct i915_vma *vma;
|
||||
struct i915_vma *vma, *vn;
|
||||
LIST_HEAD(unbind);
|
||||
int ret = 0;
|
||||
|
||||
if (tiling_mode == I915_TILING_NONE)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&ggtt->vm.mutex);
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
for_each_ggtt_vma(vma, obj) {
|
||||
GEM_BUG_ON(vma->vm != &ggtt->vm);
|
||||
|
||||
if (i915_vma_fence_prepare(vma, tiling_mode, stride))
|
||||
continue;
|
||||
|
||||
ret = __i915_vma_unbind(vma);
|
||||
if (ret)
|
||||
break;
|
||||
list_move(&vma->vm_link, &unbind);
|
||||
}
|
||||
spin_unlock(&obj->vma.lock);
|
||||
|
||||
list_for_each_entry_safe(vma, vn, &unbind, vm_link) {
|
||||
ret = __i915_vma_unbind(vma);
|
||||
if (ret) {
|
||||
/* Restore the remaining vma on an error */
|
||||
list_splice(&unbind, &ggtt->vm.bound_list);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&ggtt->vm.mutex);
|
||||
|
||||
return ret;
|
||||
|
@ -268,6 +282,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
for_each_ggtt_vma(vma, obj) {
|
||||
vma->fence_size =
|
||||
i915_gem_fence_size(i915, vma->size, tiling, stride);
|
||||
|
@ -278,6 +293,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
|
|||
if (vma->fence)
|
||||
vma->fence->dirty = true;
|
||||
}
|
||||
spin_unlock(&obj->vma.lock);
|
||||
|
||||
obj->tiling_and_stride = tiling | stride;
|
||||
i915_gem_object_unlock(obj);
|
||||
|
|
|
@ -1477,8 +1477,10 @@ static int igt_ppgtt_pin_update(void *arg)
|
|||
unsigned int page_size = BIT(first);
|
||||
|
||||
obj = i915_gem_object_create_internal(dev_priv, page_size);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
goto out_vm;
|
||||
}
|
||||
|
||||
vma = i915_vma_instance(obj, vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
|
@ -1531,8 +1533,10 @@ static int igt_ppgtt_pin_update(void *arg)
|
|||
}
|
||||
|
||||
obj = i915_gem_object_create_internal(dev_priv, PAGE_SIZE);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
goto out_vm;
|
||||
}
|
||||
|
||||
vma = i915_vma_instance(obj, vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
|
|
|
@ -521,6 +521,8 @@ int intel_timeline_read_hwsp(struct i915_request *from,
|
|||
|
||||
rcu_read_lock();
|
||||
cl = rcu_dereference(from->hwsp_cacheline);
|
||||
if (i915_request_completed(from)) /* confirm cacheline is valid */
|
||||
goto unlock;
|
||||
if (unlikely(!i915_active_acquire_if_busy(&cl->active)))
|
||||
goto unlock; /* seqno wrapped and completed! */
|
||||
if (unlikely(i915_request_completed(from)))
|
||||
|
|
|
@ -3358,7 +3358,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
|
||||
u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
|
||||
u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
|
||||
GEN8_PIPE_CDCLK_CRC_DONE;
|
||||
u32 de_pipe_enables;
|
||||
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
|
||||
u32 de_port_enables;
|
||||
|
@ -3369,13 +3370,10 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
|
|||
de_misc_masked |= GEN8_DE_MISC_GSE;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
|
||||
GEN9_AUX_CHANNEL_D;
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
de_port_masked |= BXT_DE_PORT_GMBUS;
|
||||
} else {
|
||||
de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
|
|
|
@ -158,16 +158,18 @@ vma_create(struct drm_i915_gem_object *obj,
|
|||
|
||||
GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
|
||||
if (i915_is_ggtt(vm)) {
|
||||
if (unlikely(overflows_type(vma->size, u32)))
|
||||
goto err_vma;
|
||||
goto err_unlock;
|
||||
|
||||
vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
|
||||
i915_gem_object_get_tiling(obj),
|
||||
i915_gem_object_get_stride(obj));
|
||||
if (unlikely(vma->fence_size < vma->size || /* overflow */
|
||||
vma->fence_size > vm->total))
|
||||
goto err_vma;
|
||||
goto err_unlock;
|
||||
|
||||
GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
|
||||
|
||||
|
@ -179,8 +181,6 @@ vma_create(struct drm_i915_gem_object *obj,
|
|||
__set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
|
||||
}
|
||||
|
||||
spin_lock(&obj->vma.lock);
|
||||
|
||||
rb = NULL;
|
||||
p = &obj->vma.tree.rb_node;
|
||||
while (*p) {
|
||||
|
@ -225,6 +225,8 @@ vma_create(struct drm_i915_gem_object *obj,
|
|||
|
||||
return vma;
|
||||
|
||||
err_unlock:
|
||||
spin_unlock(&obj->vma.lock);
|
||||
err_vma:
|
||||
i915_vma_free(vma);
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
|
|
@ -480,9 +480,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
|
|||
return ret;
|
||||
|
||||
ret = qxl_release_reserve_list(release, true);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
qxl_release_free(qdev, release);
|
||||
return ret;
|
||||
|
||||
}
|
||||
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
|
||||
cmd->type = QXL_SURFACE_CMD_CREATE;
|
||||
cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
|
||||
|
@ -499,8 +500,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
|
|||
/* no need to add a release to the fence for this surface bo,
|
||||
since it is only released when we ask to destroy the surface
|
||||
and it would never signal otherwise */
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
|
||||
|
||||
surf->hw_surf_alloc = true;
|
||||
spin_lock(&qdev->surf_id_idr_lock);
|
||||
|
@ -542,9 +543,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
|
|||
cmd->surface_id = id;
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
|
||||
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -510,8 +510,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane)
|
|||
cmd->u.set.visible = 1;
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
|
||||
return ret;
|
||||
|
||||
|
@ -652,8 +652,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
|
|||
cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
|
||||
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
|
||||
if (old_cursor_bo != NULL)
|
||||
qxl_bo_unpin(old_cursor_bo);
|
||||
|
@ -700,8 +700,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
|
|||
cmd->type = QXL_CURSOR_HIDE;
|
||||
qxl_release_unmap(qdev, release, &cmd->release_info);
|
||||
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
|
||||
}
|
||||
|
||||
static void qxl_update_dumb_head(struct qxl_device *qdev,
|
||||
|
|
|
@ -209,9 +209,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
|
|||
goto out_release_backoff;
|
||||
|
||||
rects = drawable_set_clipping(qdev, num_clips, clips_bo);
|
||||
if (!rects)
|
||||
if (!rects) {
|
||||
ret = -EINVAL;
|
||||
goto out_release_backoff;
|
||||
|
||||
}
|
||||
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
|
||||
|
||||
drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
|
||||
|
@ -242,8 +243,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
|
|||
}
|
||||
qxl_bo_kunmap(clips_bo);
|
||||
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
|
||||
|
||||
out_release_backoff:
|
||||
if (ret)
|
||||
|
|
|
@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
|
|||
break;
|
||||
default:
|
||||
DRM_ERROR("unsupported image bit depth\n");
|
||||
return -EINVAL; /* TODO: cleanup */
|
||||
qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
|
||||
return -EINVAL;
|
||||
}
|
||||
image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
|
||||
image->u.bitmap.x = width;
|
||||
|
|
|
@ -261,11 +261,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
|
|||
apply_surf_reloc(qdev, &reloc_info[i]);
|
||||
}
|
||||
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
|
||||
if (ret)
|
||||
qxl_release_backoff_reserve_list(release);
|
||||
else
|
||||
qxl_release_fence_buffer_objects(release);
|
||||
|
||||
out_free_bos:
|
||||
out_free_release:
|
||||
|
|
|
@ -53,14 +53,6 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
|
|||
events_clear, &events_clear);
|
||||
}
|
||||
|
||||
static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
|
||||
uint32_t ctx_id)
|
||||
{
|
||||
virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
|
||||
virtio_gpu_notify(vgdev);
|
||||
ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
|
||||
}
|
||||
|
||||
static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
|
||||
void (*work_func)(struct work_struct *work))
|
||||
{
|
||||
|
@ -275,14 +267,17 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
|
|||
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct virtio_gpu_fpriv *vfpriv;
|
||||
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
|
||||
|
||||
if (!vgdev->has_virgl_3d)
|
||||
return;
|
||||
|
||||
vfpriv = file->driver_priv;
|
||||
if (vfpriv->context_created) {
|
||||
virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
|
||||
virtio_gpu_notify(vgdev);
|
||||
}
|
||||
|
||||
virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
|
||||
ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1);
|
||||
mutex_destroy(&vfpriv->context_lock);
|
||||
kfree(vfpriv);
|
||||
file->driver_priv = NULL;
|
||||
|
|
|
@ -329,13 +329,12 @@ struct dma_buf {
|
|||
|
||||
/**
|
||||
* struct dma_buf_attach_ops - importer operations for an attachment
|
||||
* @move_notify: [optional] notification that the DMA-buf is moving
|
||||
*
|
||||
* Attachment operations implemented by the importer.
|
||||
*/
|
||||
struct dma_buf_attach_ops {
|
||||
/**
|
||||
* @move_notify
|
||||
* @move_notify: [optional] notification that the DMA-buf is moving
|
||||
*
|
||||
* If this callback is provided the framework can avoid pinning the
|
||||
* backing store while mappings exists.
|
||||
|
|
|
@ -346,6 +346,10 @@ struct drm_amdgpu_gem_userptr {
|
|||
#define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF
|
||||
#define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43
|
||||
#define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1
|
||||
#define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT 44
|
||||
#define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK 0x1
|
||||
#define AMDGPU_TILING_SCANOUT_SHIFT 63
|
||||
#define AMDGPU_TILING_SCANOUT_MASK 0x1
|
||||
|
||||
/* Set/Get helpers for tiling flags. */
|
||||
#define AMDGPU_TILING_SET(field, value) \
|
||||
|
|
|
@ -39,6 +39,12 @@ struct dma_buf_sync {
|
|||
|
||||
#define DMA_BUF_BASE 'b'
|
||||
#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)
|
||||
|
||||
/* 32/64bitness of this uapi was botched in android, there's no difference
|
||||
* between them in actual uapi, they're just different numbers.
|
||||
*/
|
||||
#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *)
|
||||
#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, u32)
|
||||
#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, u64)
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue