amdgfx, i915, sun4i fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJaoihEAAoJEAx081l5xIa+VukP/iO0Et1GzmcLvE1XHuOwm5q+ tG4IgkLSF50IfU0OHc0BWPFoTeypicRJvuLM+NubMf/dg3l8y/UbIWqOJBntnkNX o3018iw4/pFIvpBE8x2g2ozEk8qEfiaTfZkTfNKuH1ZUFugZR44j9OWbeYmZYd6i GwmSLgLunbE0Bt3XqSHLm4VtbV2FCI1vj65IzYcEWygSa8sugADaeKZ+NvS77MOb yF2d+Tlx5IkD0fkOt34MsDH/0F+RGEUCmgJfgiy+AuA/SD/v7cZQinTOkpzm0LkV YSSugc7XVqjqugEWjt7yniirGlIPfcdJTzUB99OQ4rCGFhpSRZVB54RVGeOch/LS UogyW/cWG9sivgdUg7sXw5ws12NX5LpR8FHwXYxBJRZs+TQXnpEy1Idh4IxZzVF/ wFhqckNxVo1POZLy2xhTyd4ML49JvVAZ4E+3WmniDKXRImD9S7bV35yPXQ2XA2rj KjJaYGntihrvHSvLseOc/cb5KxwvxQGT1yw6fiSGPV1n7kUemzLAMxAmBKnL+IPB QKzB5POnaqOsOg6GBmkXv3nGfLWQq1NIiK6XOlfBpW5g80WxuBlLN+gcgaaG5qKT CJGbVuQe5kptSXSsTXU1+e6oJre2FOtAncq3aQlPKS7EptqtPxSw0PWiZLsVXiVj /sZU6fBlCYGJoXIq25xK =tkxA -----END PGP SIGNATURE----- Merge tag 'drm-fixes-for-v4.16-rc5' of git://people.freedesktop.org/~airlied/linux Pull drm fixes from Dave Airlie: "There are a small set of sun4i and i915 fixes, and many more amdgpu fixes: sun4i: - divide by zero fix - clock and LVDS fixes i915: - fix for perf - race fix amdgpu: - a bit more than we are normally comfortable with at this point, however it does fix a lot of display issues with the new DC code which result in black screens in various configurations along with some run of the mill gpu configuration fixes. I'm happy enough that the fixes are limited to the DC code and should fix a bunch of issues on the new raven ridge APUs that we are seeing shipped now" * tag 'drm-fixes-for-v4.16-rc5' of git://people.freedesktop.org/~airlied/linux: (42 commits) drm/amd/display: validate plane format on primary plane drm/amdgpu:Always save uvd vcpu_bo in VM Mode drm/amdgpu:Correct max uvd handles drm/amd/display: early return if not in vga mode in disable_vga drm/amd/display: Fix takover from VGA mode drm/amd/display: Fix memleaks when atomic check fails. drm/amd/display: Return success when enabling interrupt drm/amd/display: Use crtc enable/disable_vblank hooks drm/amd/display: update infoframe after dig fe is turned on drm/amd/display: fix boot-up on vega10 drm/amd/display: fix cursor related Pstate hang drm/amd/display: Set irq state only on existing crtcs drm/amd/display: Fixed non-native modes not lighting up drm/amd/display: Call update_stream_signal directly from amdgpu_dm drm/amd/display: Make create_stream_for_sink more consistent drm/amd/display: Don't block dual-link DVI modes drm/amd/display: Don't allow dual-link DVI on all ASICs. drm/amd/display: Pass signal directly to enable_tmds_output drm/amd/display: Remove unnecessary fail labels in create_stream_for_sink drm/amd/display: Move MAX_TMDS_CLOCK define to header ...
This commit is contained in:
commit
65307f2e05
|
@ -540,6 +540,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
|
|||
size_t size;
|
||||
u32 retry = 3;
|
||||
|
||||
if (amdgpu_acpi_pcie_notify_device_ready(adev))
|
||||
return -EINVAL;
|
||||
|
||||
/* Get the device handle */
|
||||
handle = ACPI_HANDLE(&adev->pdev->dev);
|
||||
if (!handle)
|
||||
|
|
|
@ -481,7 +481,7 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
|
|||
result = 0;
|
||||
|
||||
if (*pos < 12) {
|
||||
early[0] = amdgpu_ring_get_rptr(ring);
|
||||
early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
|
||||
early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
|
||||
early[2] = ring->wptr & ring->buf_mask;
|
||||
for (i = *pos / 4; i < 3 && size; i++) {
|
||||
|
|
|
@ -299,12 +299,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
|||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i)
|
||||
if (atomic_read(&adev->uvd.handles[i]))
|
||||
break;
|
||||
/* only valid for physical mode */
|
||||
if (adev->asic_type < CHIP_POLARIS10) {
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i)
|
||||
if (atomic_read(&adev->uvd.handles[i]))
|
||||
break;
|
||||
|
||||
if (i == AMDGPU_MAX_UVD_HANDLES)
|
||||
return 0;
|
||||
if (i == adev->uvd.max_handles)
|
||||
return 0;
|
||||
}
|
||||
|
||||
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
|
||||
ptr = adev->uvd.cpu_addr;
|
||||
|
|
|
@ -3093,7 +3093,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
|
|||
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
|
||||
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
|
||||
schedule_work(&adev->hotplug_work);
|
||||
DRM_INFO("IH: HPD%d\n", hpd + 1);
|
||||
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -4384,34 +4384,8 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
|
|||
case CHIP_KAVERI:
|
||||
adev->gfx.config.max_shader_engines = 1;
|
||||
adev->gfx.config.max_tile_pipes = 4;
|
||||
if ((adev->pdev->device == 0x1304) ||
|
||||
(adev->pdev->device == 0x1305) ||
|
||||
(adev->pdev->device == 0x130C) ||
|
||||
(adev->pdev->device == 0x130F) ||
|
||||
(adev->pdev->device == 0x1310) ||
|
||||
(adev->pdev->device == 0x1311) ||
|
||||
(adev->pdev->device == 0x131C)) {
|
||||
adev->gfx.config.max_cu_per_sh = 8;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
} else if ((adev->pdev->device == 0x1309) ||
|
||||
(adev->pdev->device == 0x130A) ||
|
||||
(adev->pdev->device == 0x130D) ||
|
||||
(adev->pdev->device == 0x1313) ||
|
||||
(adev->pdev->device == 0x131D)) {
|
||||
adev->gfx.config.max_cu_per_sh = 6;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
} else if ((adev->pdev->device == 0x1306) ||
|
||||
(adev->pdev->device == 0x1307) ||
|
||||
(adev->pdev->device == 0x130B) ||
|
||||
(adev->pdev->device == 0x130E) ||
|
||||
(adev->pdev->device == 0x1315) ||
|
||||
(adev->pdev->device == 0x131B)) {
|
||||
adev->gfx.config.max_cu_per_sh = 4;
|
||||
adev->gfx.config.max_backends_per_se = 1;
|
||||
} else {
|
||||
adev->gfx.config.max_cu_per_sh = 3;
|
||||
adev->gfx.config.max_backends_per_se = 1;
|
||||
}
|
||||
adev->gfx.config.max_cu_per_sh = 8;
|
||||
adev->gfx.config.max_backends_per_se = 2;
|
||||
adev->gfx.config.max_sh_per_se = 1;
|
||||
adev->gfx.config.max_texture_channel_caches = 4;
|
||||
adev->gfx.config.max_gprs = 256;
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "amdgpu_uvd.h"
|
||||
#include "amdgpu_vce.h"
|
||||
#include "atom.h"
|
||||
#include "amd_pcie.h"
|
||||
#include "amdgpu_powerplay.h"
|
||||
#include "sid.h"
|
||||
#include "si_ih.h"
|
||||
|
@ -1461,8 +1462,8 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
|
|||
{
|
||||
struct pci_dev *root = adev->pdev->bus->self;
|
||||
int bridge_pos, gpu_pos;
|
||||
u32 speed_cntl, mask, current_data_rate;
|
||||
int ret, i;
|
||||
u32 speed_cntl, current_data_rate;
|
||||
int i;
|
||||
u16 tmp16;
|
||||
|
||||
if (pci_is_root_bus(adev->pdev->bus))
|
||||
|
@ -1474,23 +1475,20 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
|
|||
if (adev->flags & AMD_IS_APU)
|
||||
return;
|
||||
|
||||
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
|
||||
if (ret != 0)
|
||||
return;
|
||||
|
||||
if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
|
||||
if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
|
||||
return;
|
||||
|
||||
speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
|
||||
current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
|
||||
LC_CURRENT_DATA_RATE_SHIFT;
|
||||
if (mask & DRM_PCIE_SPEED_80) {
|
||||
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
|
||||
if (current_data_rate == 2) {
|
||||
DRM_INFO("PCIE gen 3 link speeds already enabled\n");
|
||||
return;
|
||||
}
|
||||
DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
|
||||
} else if (mask & DRM_PCIE_SPEED_50) {
|
||||
} else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
|
||||
if (current_data_rate == 1) {
|
||||
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
|
||||
return;
|
||||
|
@ -1506,7 +1504,7 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
|
|||
if (!gpu_pos)
|
||||
return;
|
||||
|
||||
if (mask & DRM_PCIE_SPEED_80) {
|
||||
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
|
||||
if (current_data_rate != 2) {
|
||||
u16 bridge_cfg, gpu_cfg;
|
||||
u16 bridge_cfg2, gpu_cfg2;
|
||||
|
@ -1589,9 +1587,9 @@ static void si_pcie_gen3_enable(struct amdgpu_device *adev)
|
|||
|
||||
pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
|
||||
tmp16 &= ~0xf;
|
||||
if (mask & DRM_PCIE_SPEED_80)
|
||||
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
|
||||
tmp16 |= 3;
|
||||
else if (mask & DRM_PCIE_SPEED_50)
|
||||
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
|
||||
tmp16 |= 2;
|
||||
else
|
||||
tmp16 |= 1;
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "amdgpu_pm.h"
|
||||
#include "amdgpu_dpm.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
#include "amd_pcie.h"
|
||||
#include "sid.h"
|
||||
#include "r600_dpm.h"
|
||||
#include "si_dpm.h"
|
||||
|
@ -3331,29 +3332,6 @@ static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
|
|||
}
|
||||
}
|
||||
|
||||
static enum amdgpu_pcie_gen r600_get_pcie_gen_support(struct amdgpu_device *adev,
|
||||
u32 sys_mask,
|
||||
enum amdgpu_pcie_gen asic_gen,
|
||||
enum amdgpu_pcie_gen default_gen)
|
||||
{
|
||||
switch (asic_gen) {
|
||||
case AMDGPU_PCIE_GEN1:
|
||||
return AMDGPU_PCIE_GEN1;
|
||||
case AMDGPU_PCIE_GEN2:
|
||||
return AMDGPU_PCIE_GEN2;
|
||||
case AMDGPU_PCIE_GEN3:
|
||||
return AMDGPU_PCIE_GEN3;
|
||||
default:
|
||||
if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
|
||||
return AMDGPU_PCIE_GEN3;
|
||||
else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
|
||||
return AMDGPU_PCIE_GEN2;
|
||||
else
|
||||
return AMDGPU_PCIE_GEN1;
|
||||
}
|
||||
return AMDGPU_PCIE_GEN1;
|
||||
}
|
||||
|
||||
static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
|
||||
u32 *p, u32 *u)
|
||||
{
|
||||
|
@ -5028,10 +5006,11 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
|
|||
table->ACPIState.levels[0].vddc.index,
|
||||
&table->ACPIState.levels[0].std_vddc);
|
||||
}
|
||||
table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(adev,
|
||||
si_pi->sys_pcie_mask,
|
||||
si_pi->boot_pcie_gen,
|
||||
AMDGPU_PCIE_GEN1);
|
||||
table->ACPIState.levels[0].gen2PCIE =
|
||||
(u8)amdgpu_get_pcie_gen_support(adev,
|
||||
si_pi->sys_pcie_mask,
|
||||
si_pi->boot_pcie_gen,
|
||||
AMDGPU_PCIE_GEN1);
|
||||
|
||||
if (si_pi->vddc_phase_shed_control)
|
||||
si_populate_phase_shedding_value(adev,
|
||||
|
@ -7168,10 +7147,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
|
|||
pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
|
||||
pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
|
||||
pl->flags = le32_to_cpu(clock_info->si.ulFlags);
|
||||
pl->pcie_gen = r600_get_pcie_gen_support(adev,
|
||||
si_pi->sys_pcie_mask,
|
||||
si_pi->boot_pcie_gen,
|
||||
clock_info->si.ucPCIEGen);
|
||||
pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
|
||||
si_pi->sys_pcie_mask,
|
||||
si_pi->boot_pcie_gen,
|
||||
clock_info->si.ucPCIEGen);
|
||||
|
||||
/* patch up vddc if necessary */
|
||||
ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
|
||||
|
@ -7326,7 +7305,6 @@ static int si_dpm_init(struct amdgpu_device *adev)
|
|||
struct si_power_info *si_pi;
|
||||
struct atom_clock_dividers dividers;
|
||||
int ret;
|
||||
u32 mask;
|
||||
|
||||
si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
|
||||
if (si_pi == NULL)
|
||||
|
@ -7336,11 +7314,9 @@ static int si_dpm_init(struct amdgpu_device *adev)
|
|||
eg_pi = &ni_pi->eg;
|
||||
pi = &eg_pi->rv7xx;
|
||||
|
||||
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
|
||||
if (ret)
|
||||
si_pi->sys_pcie_mask = 0;
|
||||
else
|
||||
si_pi->sys_pcie_mask = mask;
|
||||
si_pi->sys_pcie_mask =
|
||||
(adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
|
||||
CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
|
||||
si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
|
||||
si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
|
||||
|
||||
|
|
|
@ -1037,6 +1037,10 @@ static void handle_hpd_rx_irq(void *param)
|
|||
!is_mst_root_connector) {
|
||||
/* Downstream Port status changed. */
|
||||
if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
|
||||
|
||||
if (aconnector->fake_enable)
|
||||
aconnector->fake_enable = false;
|
||||
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
|
||||
|
@ -2012,30 +2016,32 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
|
|||
dst.width = stream->timing.h_addressable;
|
||||
dst.height = stream->timing.v_addressable;
|
||||
|
||||
rmx_type = dm_state->scaling;
|
||||
if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
|
||||
if (src.width * dst.height <
|
||||
src.height * dst.width) {
|
||||
/* height needs less upscaling/more downscaling */
|
||||
dst.width = src.width *
|
||||
dst.height / src.height;
|
||||
} else {
|
||||
/* width needs less upscaling/more downscaling */
|
||||
dst.height = src.height *
|
||||
dst.width / src.width;
|
||||
if (dm_state) {
|
||||
rmx_type = dm_state->scaling;
|
||||
if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
|
||||
if (src.width * dst.height <
|
||||
src.height * dst.width) {
|
||||
/* height needs less upscaling/more downscaling */
|
||||
dst.width = src.width *
|
||||
dst.height / src.height;
|
||||
} else {
|
||||
/* width needs less upscaling/more downscaling */
|
||||
dst.height = src.height *
|
||||
dst.width / src.width;
|
||||
}
|
||||
} else if (rmx_type == RMX_CENTER) {
|
||||
dst = src;
|
||||
}
|
||||
} else if (rmx_type == RMX_CENTER) {
|
||||
dst = src;
|
||||
}
|
||||
|
||||
dst.x = (stream->timing.h_addressable - dst.width) / 2;
|
||||
dst.y = (stream->timing.v_addressable - dst.height) / 2;
|
||||
dst.x = (stream->timing.h_addressable - dst.width) / 2;
|
||||
dst.y = (stream->timing.v_addressable - dst.height) / 2;
|
||||
|
||||
if (dm_state->underscan_enable) {
|
||||
dst.x += dm_state->underscan_hborder / 2;
|
||||
dst.y += dm_state->underscan_vborder / 2;
|
||||
dst.width -= dm_state->underscan_hborder;
|
||||
dst.height -= dm_state->underscan_vborder;
|
||||
if (dm_state->underscan_enable) {
|
||||
dst.x += dm_state->underscan_hborder / 2;
|
||||
dst.y += dm_state->underscan_vborder / 2;
|
||||
dst.width -= dm_state->underscan_hborder;
|
||||
dst.height -= dm_state->underscan_vborder;
|
||||
}
|
||||
}
|
||||
|
||||
stream->src = src;
|
||||
|
@ -2360,12 +2366,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
|
||||
if (aconnector == NULL) {
|
||||
DRM_ERROR("aconnector is NULL!\n");
|
||||
goto drm_connector_null;
|
||||
}
|
||||
|
||||
if (dm_state == NULL) {
|
||||
DRM_ERROR("dm_state is NULL!\n");
|
||||
goto dm_state_null;
|
||||
return stream;
|
||||
}
|
||||
|
||||
drm_connector = &aconnector->base;
|
||||
|
@ -2377,18 +2378,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
*/
|
||||
if (aconnector->mst_port) {
|
||||
dm_dp_mst_dc_sink_create(drm_connector);
|
||||
goto mst_dc_sink_create_done;
|
||||
return stream;
|
||||
}
|
||||
|
||||
if (create_fake_sink(aconnector))
|
||||
goto stream_create_fail;
|
||||
return stream;
|
||||
}
|
||||
|
||||
stream = dc_create_stream_for_sink(aconnector->dc_sink);
|
||||
|
||||
if (stream == NULL) {
|
||||
DRM_ERROR("Failed to create stream for sink!\n");
|
||||
goto stream_create_fail;
|
||||
return stream;
|
||||
}
|
||||
|
||||
list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
|
||||
|
@ -2414,9 +2415,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
} else {
|
||||
decide_crtc_timing_for_drm_display_mode(
|
||||
&mode, preferred_mode,
|
||||
dm_state->scaling != RMX_OFF);
|
||||
dm_state ? (dm_state->scaling != RMX_OFF) : false);
|
||||
}
|
||||
|
||||
if (!dm_state)
|
||||
drm_mode_set_crtcinfo(&mode, 0);
|
||||
|
||||
fill_stream_properties_from_drm_display_mode(stream,
|
||||
&mode, &aconnector->base);
|
||||
update_stream_scaling_settings(&mode, dm_state, stream);
|
||||
|
@ -2426,10 +2430,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
drm_connector,
|
||||
aconnector->dc_sink);
|
||||
|
||||
stream_create_fail:
|
||||
dm_state_null:
|
||||
drm_connector_null:
|
||||
mst_dc_sink_create_done:
|
||||
update_stream_signal(stream);
|
||||
|
||||
return stream;
|
||||
}
|
||||
|
||||
|
@ -2497,6 +2499,27 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
|
|||
return &state->base;
|
||||
}
|
||||
|
||||
|
||||
static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||
{
|
||||
enum dc_irq_source irq_source;
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
|
||||
return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
|
||||
}
|
||||
|
||||
static int dm_enable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
return dm_set_vblank(crtc, true);
|
||||
}
|
||||
|
||||
static void dm_disable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
dm_set_vblank(crtc, false);
|
||||
}
|
||||
|
||||
/* Implemented only the options currently availible for the driver */
|
||||
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
|
||||
.reset = dm_crtc_reset_state,
|
||||
|
@ -2506,6 +2529,8 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
|
|||
.page_flip = drm_atomic_helper_page_flip,
|
||||
.atomic_duplicate_state = dm_crtc_duplicate_state,
|
||||
.atomic_destroy_state = dm_crtc_destroy_state,
|
||||
.enable_vblank = dm_enable_vblank,
|
||||
.disable_vblank = dm_disable_vblank,
|
||||
};
|
||||
|
||||
static enum drm_connector_status
|
||||
|
@ -2800,7 +2825,7 @@ int amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
stream = dc_create_stream_for_sink(dc_sink);
|
||||
stream = create_stream_for_sink(aconnector, mode, NULL);
|
||||
if (stream == NULL) {
|
||||
DRM_ERROR("Failed to create stream for sink!\n");
|
||||
goto fail;
|
||||
|
@ -3060,6 +3085,9 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
|
|||
if (!dm_plane_state->dc_state)
|
||||
return 0;
|
||||
|
||||
if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
|
||||
return -EINVAL;
|
||||
|
||||
if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
|
||||
return 0;
|
||||
|
||||
|
@ -4632,8 +4660,6 @@ static int dm_update_planes_state(struct dc *dc,
|
|||
bool pflip_needed = !state->allow_modeset;
|
||||
int ret = 0;
|
||||
|
||||
if (pflip_needed)
|
||||
return ret;
|
||||
|
||||
/* Add new planes */
|
||||
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
|
||||
|
@ -4648,6 +4674,8 @@ static int dm_update_planes_state(struct dc *dc,
|
|||
|
||||
/* Remove any changed/removed planes */
|
||||
if (!enable) {
|
||||
if (pflip_needed)
|
||||
continue;
|
||||
|
||||
if (!old_plane_crtc)
|
||||
continue;
|
||||
|
@ -4679,6 +4707,7 @@ static int dm_update_planes_state(struct dc *dc,
|
|||
*lock_and_validation_needed = true;
|
||||
|
||||
} else { /* Add new planes */
|
||||
struct dc_plane_state *dc_new_plane_state;
|
||||
|
||||
if (drm_atomic_plane_disabling(plane->state, new_plane_state))
|
||||
continue;
|
||||
|
@ -4692,38 +4721,50 @@ static int dm_update_planes_state(struct dc *dc,
|
|||
if (!dm_new_crtc_state->stream)
|
||||
continue;
|
||||
|
||||
if (pflip_needed)
|
||||
continue;
|
||||
|
||||
WARN_ON(dm_new_plane_state->dc_state);
|
||||
|
||||
dm_new_plane_state->dc_state = dc_create_plane_state(dc);
|
||||
dc_new_plane_state = dc_create_plane_state(dc);
|
||||
if (!dc_new_plane_state) {
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
|
||||
plane->base.id, new_plane_crtc->base.id);
|
||||
|
||||
if (!dm_new_plane_state->dc_state) {
|
||||
ret = -EINVAL;
|
||||
ret = fill_plane_attributes(
|
||||
new_plane_crtc->dev->dev_private,
|
||||
dc_new_plane_state,
|
||||
new_plane_state,
|
||||
new_crtc_state);
|
||||
if (ret) {
|
||||
dc_plane_state_release(dc_new_plane_state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = fill_plane_attributes(
|
||||
new_plane_crtc->dev->dev_private,
|
||||
dm_new_plane_state->dc_state,
|
||||
new_plane_state,
|
||||
new_crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
||||
/*
|
||||
* Any atomic check errors that occur after this will
|
||||
* not need a release. The plane state will be attached
|
||||
* to the stream, and therefore part of the atomic
|
||||
* state. It'll be released when the atomic state is
|
||||
* cleaned.
|
||||
*/
|
||||
if (!dc_add_plane_to_context(
|
||||
dc,
|
||||
dm_new_crtc_state->stream,
|
||||
dm_new_plane_state->dc_state,
|
||||
dc_new_plane_state,
|
||||
dm_state->context)) {
|
||||
|
||||
dc_plane_state_release(dc_new_plane_state);
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
dm_new_plane_state->dc_state = dc_new_plane_state;
|
||||
|
||||
/* Tell DC to do a full surface update every time there
|
||||
* is a plane change. Inefficient, but works for now.
|
||||
*/
|
||||
|
@ -4737,6 +4778,30 @@ static int dm_update_planes_state(struct dc *dc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_plane *plane;
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
|
||||
|
||||
drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
|
||||
struct drm_plane_state *plane_state =
|
||||
drm_atomic_get_plane_state(state, plane);
|
||||
|
||||
if (IS_ERR(plane_state))
|
||||
return -EDEADLK;
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
|
||||
if (crtc->primary == plane && crtc_state->active) {
|
||||
if (!plane_state->fb)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
|
@ -4760,6 +4825,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
|
|||
goto fail;
|
||||
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
ret = dm_atomic_check_plane_state_fb(state, crtc);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
|
||||
!new_crtc_state->color_mgmt_changed)
|
||||
continue;
|
||||
|
|
|
@ -683,10 +683,8 @@ static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = {
|
|||
|
||||
void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mode_info.num_crtc > 0)
|
||||
adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
|
||||
else
|
||||
adev->crtc_irq.num_types = 0;
|
||||
|
||||
adev->crtc_irq.num_types = adev->mode_info.num_crtc;
|
||||
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
|
||||
|
||||
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
|
||||
|
|
|
@ -189,6 +189,12 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
|
|||
.link = aconnector->dc_link,
|
||||
.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
|
||||
|
||||
/*
|
||||
* TODO: Need to further figure out why ddc.algo is NULL while MST port exists
|
||||
*/
|
||||
if (!aconnector->port || !aconnector->port->aux.ddc.algo)
|
||||
return;
|
||||
|
||||
edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
|
||||
|
||||
if (!edid) {
|
||||
|
|
|
@ -1358,13 +1358,13 @@ enum dc_irq_source dc_interrupt_to_irq_source(
|
|||
return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
|
||||
}
|
||||
|
||||
void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
|
||||
bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
|
||||
{
|
||||
|
||||
if (dc == NULL)
|
||||
return;
|
||||
return false;
|
||||
|
||||
dal_irq_service_set(dc->res_pool->irqs, src, enable);
|
||||
return dal_irq_service_set(dc->res_pool->irqs, src, enable);
|
||||
}
|
||||
|
||||
void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
|
||||
|
|
|
@ -1749,8 +1749,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
|
|||
link->link_enc,
|
||||
pipe_ctx->clock_source->id,
|
||||
display_color_depth,
|
||||
pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A,
|
||||
pipe_ctx->stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
|
||||
pipe_ctx->stream->signal,
|
||||
stream->phy_pix_clk);
|
||||
|
||||
if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
|
||||
|
|
|
@ -1360,9 +1360,6 @@ bool dc_is_stream_scaling_unchanged(
|
|||
return true;
|
||||
}
|
||||
|
||||
/* Maximum TMDS single link pixel clock 165MHz */
|
||||
#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
|
||||
|
||||
static void update_stream_engine_usage(
|
||||
struct resource_context *res_ctx,
|
||||
const struct resource_pool *pool,
|
||||
|
|
|
@ -33,8 +33,7 @@
|
|||
/*******************************************************************************
|
||||
* Private functions
|
||||
******************************************************************************/
|
||||
#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST 297000
|
||||
static void update_stream_signal(struct dc_stream_state *stream)
|
||||
void update_stream_signal(struct dc_stream_state *stream)
|
||||
{
|
||||
|
||||
struct dc_sink *dc_sink = stream->sink;
|
||||
|
@ -45,8 +44,9 @@ static void update_stream_signal(struct dc_stream_state *stream)
|
|||
stream->signal = dc_sink->sink_signal;
|
||||
|
||||
if (dc_is_dvi_signal(stream->signal)) {
|
||||
if (stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK_IN_KHZ_UPMOST &&
|
||||
stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
|
||||
if (stream->ctx->dc->caps.dual_link_dvi &&
|
||||
stream->timing.pix_clk_khz > TMDS_MAX_PIXEL_CLOCK &&
|
||||
stream->sink->sink_signal != SIGNAL_TYPE_DVI_SINGLE_LINK)
|
||||
stream->signal = SIGNAL_TYPE_DVI_DUAL_LINK;
|
||||
else
|
||||
stream->signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
|
||||
|
@ -193,6 +193,7 @@ bool dc_stream_set_cursor_attributes(
|
|||
|
||||
core_dc = stream->ctx->dc;
|
||||
res_ctx = &core_dc->current_state->res_ctx;
|
||||
stream->cursor_attributes = *attributes;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
|
@ -204,34 +205,8 @@ bool dc_stream_set_cursor_attributes(
|
|||
continue;
|
||||
|
||||
|
||||
if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes != NULL)
|
||||
pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
|
||||
pipe_ctx->plane_res.ipp, attributes);
|
||||
|
||||
if (pipe_ctx->plane_res.hubp != NULL &&
|
||||
pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes != NULL)
|
||||
pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
|
||||
pipe_ctx->plane_res.hubp, attributes);
|
||||
|
||||
if (pipe_ctx->plane_res.mi != NULL &&
|
||||
pipe_ctx->plane_res.mi->funcs->set_cursor_attributes != NULL)
|
||||
pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
|
||||
pipe_ctx->plane_res.mi, attributes);
|
||||
|
||||
|
||||
if (pipe_ctx->plane_res.xfm != NULL &&
|
||||
pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes != NULL)
|
||||
pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
|
||||
pipe_ctx->plane_res.xfm, attributes);
|
||||
|
||||
if (pipe_ctx->plane_res.dpp != NULL &&
|
||||
pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes != NULL)
|
||||
pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
|
||||
pipe_ctx->plane_res.dpp, attributes->color_format);
|
||||
core_dc->hwss.set_cursor_attribute(pipe_ctx);
|
||||
}
|
||||
|
||||
stream->cursor_attributes = *attributes;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -255,21 +230,10 @@ bool dc_stream_set_cursor_position(
|
|||
|
||||
core_dc = stream->ctx->dc;
|
||||
res_ctx = &core_dc->current_state->res_ctx;
|
||||
stream->cursor_position = *position;
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
|
||||
struct mem_input *mi = pipe_ctx->plane_res.mi;
|
||||
struct hubp *hubp = pipe_ctx->plane_res.hubp;
|
||||
struct dpp *dpp = pipe_ctx->plane_res.dpp;
|
||||
struct dc_cursor_position pos_cpy = *position;
|
||||
struct dc_cursor_mi_param param = {
|
||||
.pixel_clk_khz = stream->timing.pix_clk_khz,
|
||||
.ref_clk_khz = core_dc->res_pool->ref_clock_inKhz,
|
||||
.viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
|
||||
.viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
|
||||
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
|
||||
};
|
||||
|
||||
if (pipe_ctx->stream != stream ||
|
||||
(!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
|
||||
|
@ -278,33 +242,9 @@ bool dc_stream_set_cursor_position(
|
|||
!pipe_ctx->plane_res.ipp)
|
||||
continue;
|
||||
|
||||
if (pipe_ctx->plane_state->address.type
|
||||
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
|
||||
pos_cpy.enable = false;
|
||||
|
||||
if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
|
||||
pos_cpy.enable = false;
|
||||
|
||||
|
||||
if (ipp != NULL && ipp->funcs->ipp_cursor_set_position != NULL)
|
||||
ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, ¶m);
|
||||
|
||||
if (mi != NULL && mi->funcs->set_cursor_position != NULL)
|
||||
mi->funcs->set_cursor_position(mi, &pos_cpy, ¶m);
|
||||
|
||||
if (!hubp)
|
||||
continue;
|
||||
|
||||
if (hubp->funcs->set_cursor_position != NULL)
|
||||
hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
|
||||
|
||||
if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
|
||||
dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width);
|
||||
|
||||
core_dc->hwss.set_cursor_position(pipe_ctx);
|
||||
}
|
||||
|
||||
stream->cursor_position = *position;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -62,6 +62,7 @@ struct dc_caps {
|
|||
bool dcc_const_color;
|
||||
bool dynamic_audio;
|
||||
bool is_apu;
|
||||
bool dual_link_dvi;
|
||||
};
|
||||
|
||||
struct dc_dcc_surface_param {
|
||||
|
@ -672,7 +673,7 @@ enum dc_irq_source dc_interrupt_to_irq_source(
|
|||
struct dc *dc,
|
||||
uint32_t src_id,
|
||||
uint32_t ext_id);
|
||||
void dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
|
||||
bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable);
|
||||
void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
|
||||
enum dc_irq_source dc_get_hpd_irq_source_at_index(
|
||||
struct dc *dc, uint32_t link_index);
|
||||
|
|
|
@ -237,6 +237,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
|
|||
*/
|
||||
struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
|
||||
|
||||
void update_stream_signal(struct dc_stream_state *stream);
|
||||
|
||||
void dc_stream_retain(struct dc_stream_state *dc_stream);
|
||||
void dc_stream_release(struct dc_stream_state *dc_stream);
|
||||
|
||||
|
|
|
@ -236,6 +236,7 @@
|
|||
SR(D2VGA_CONTROL), \
|
||||
SR(D3VGA_CONTROL), \
|
||||
SR(D4VGA_CONTROL), \
|
||||
SR(VGA_TEST_CONTROL), \
|
||||
SR(DC_IP_REQUEST_CNTL), \
|
||||
BL_REG_LIST()
|
||||
|
||||
|
@ -337,6 +338,7 @@ struct dce_hwseq_registers {
|
|||
uint32_t D2VGA_CONTROL;
|
||||
uint32_t D3VGA_CONTROL;
|
||||
uint32_t D4VGA_CONTROL;
|
||||
uint32_t VGA_TEST_CONTROL;
|
||||
/* MMHUB registers. read only. temporary hack */
|
||||
uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32;
|
||||
uint32_t VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
|
||||
|
@ -493,6 +495,9 @@ struct dce_hwseq_registers {
|
|||
HWS_SF(, DOMAIN6_PG_STATUS, DOMAIN6_PGFSM_PWR_STATUS, mask_sh), \
|
||||
HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
|
||||
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
|
||||
HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
|
||||
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
|
||||
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
|
||||
HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
|
||||
HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
|
||||
|
||||
|
@ -583,7 +588,10 @@ struct dce_hwseq_registers {
|
|||
type DCFCLK_GATE_DIS; \
|
||||
type DCHUBBUB_GLOBAL_TIMER_REFDIV; \
|
||||
type DENTIST_DPPCLK_WDIVIDER; \
|
||||
type DENTIST_DISPCLK_WDIVIDER;
|
||||
type DENTIST_DISPCLK_WDIVIDER; \
|
||||
type VGA_TEST_ENABLE; \
|
||||
type VGA_TEST_RENDER_START; \
|
||||
type D1VGA_MODE_ENABLE;
|
||||
|
||||
struct dce_hwseq_shift {
|
||||
HWSEQ_REG_FIELD_LIST(uint8_t)
|
||||
|
|
|
@ -82,13 +82,6 @@
|
|||
#define DCE110_DIG_FE_SOURCE_SELECT_DIGF 0x20
|
||||
#define DCE110_DIG_FE_SOURCE_SELECT_DIGG 0x40
|
||||
|
||||
/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
|
||||
#define TMDS_MIN_PIXEL_CLOCK 25000
|
||||
/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
|
||||
#define TMDS_MAX_PIXEL_CLOCK 165000
|
||||
/* For current ASICs pixel clock - 600MHz */
|
||||
#define MAX_ENCODER_CLOCK 600000
|
||||
|
||||
enum {
|
||||
DP_MST_UPDATE_MAX_RETRY = 50
|
||||
};
|
||||
|
@ -683,6 +676,7 @@ void dce110_link_encoder_construct(
|
|||
{
|
||||
struct bp_encoder_cap_info bp_cap_info = {0};
|
||||
const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
|
||||
enum bp_result result = BP_RESULT_OK;
|
||||
|
||||
enc110->base.funcs = &dce110_lnk_enc_funcs;
|
||||
enc110->base.ctx = init_data->ctx;
|
||||
|
@ -757,15 +751,24 @@ void dce110_link_encoder_construct(
|
|||
enc110->base.preferred_engine = ENGINE_ID_UNKNOWN;
|
||||
}
|
||||
|
||||
/* default to one to mirror Windows behavior */
|
||||
enc110->base.features.flags.bits.HDMI_6GB_EN = 1;
|
||||
|
||||
result = bp_funcs->get_encoder_cap_info(enc110->base.ctx->dc_bios,
|
||||
enc110->base.id, &bp_cap_info);
|
||||
|
||||
/* Override features with DCE-specific values */
|
||||
if (BP_RESULT_OK == bp_funcs->get_encoder_cap_info(
|
||||
enc110->base.ctx->dc_bios, enc110->base.id,
|
||||
&bp_cap_info)) {
|
||||
if (BP_RESULT_OK == result) {
|
||||
enc110->base.features.flags.bits.IS_HBR2_CAPABLE =
|
||||
bp_cap_info.DP_HBR2_EN;
|
||||
enc110->base.features.flags.bits.IS_HBR3_CAPABLE =
|
||||
bp_cap_info.DP_HBR3_EN;
|
||||
enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
|
||||
} else {
|
||||
dm_logger_write(enc110->base.ctx->logger, LOG_WARNING,
|
||||
"%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
|
||||
__func__,
|
||||
result);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -904,8 +907,7 @@ void dce110_link_encoder_enable_tmds_output(
|
|||
struct link_encoder *enc,
|
||||
enum clock_source_id clock_source,
|
||||
enum dc_color_depth color_depth,
|
||||
bool hdmi,
|
||||
bool dual_link,
|
||||
enum signal_type signal,
|
||||
uint32_t pixel_clock)
|
||||
{
|
||||
struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
|
||||
|
@ -919,16 +921,12 @@ void dce110_link_encoder_enable_tmds_output(
|
|||
cntl.engine_id = enc->preferred_engine;
|
||||
cntl.transmitter = enc110->base.transmitter;
|
||||
cntl.pll_id = clock_source;
|
||||
if (hdmi) {
|
||||
cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A;
|
||||
cntl.lanes_number = 4;
|
||||
} else if (dual_link) {
|
||||
cntl.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
|
||||
cntl.signal = signal;
|
||||
if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK)
|
||||
cntl.lanes_number = 8;
|
||||
} else {
|
||||
cntl.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
|
||||
else
|
||||
cntl.lanes_number = 4;
|
||||
}
|
||||
|
||||
cntl.hpd_sel = enc110->base.hpd_source;
|
||||
|
||||
cntl.pixel_clock = pixel_clock;
|
||||
|
|
|
@ -210,8 +210,7 @@ void dce110_link_encoder_enable_tmds_output(
|
|||
struct link_encoder *enc,
|
||||
enum clock_source_id clock_source,
|
||||
enum dc_color_depth color_depth,
|
||||
bool hdmi,
|
||||
bool dual_link,
|
||||
enum signal_type signal,
|
||||
uint32_t pixel_clock);
|
||||
|
||||
/* enables DP PHY output */
|
||||
|
|
|
@ -852,6 +852,7 @@ static bool construct(
|
|||
dc->caps.max_downscale_ratio = 200;
|
||||
dc->caps.i2c_speed_in_khz = 40;
|
||||
dc->caps.max_cursor_size = 128;
|
||||
dc->caps.dual_link_dvi = true;
|
||||
|
||||
for (i = 0; i < pool->base.pipe_count; i++) {
|
||||
pool->base.timing_generators[i] =
|
||||
|
|
|
@ -688,15 +688,22 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
|
|||
struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
|
||||
struct dc_link *link = pipe_ctx->stream->sink->link;
|
||||
|
||||
/* 1. update AVI info frame (HDMI, DP)
|
||||
* we always need to update info frame
|
||||
*/
|
||||
|
||||
uint32_t active_total_with_borders;
|
||||
uint32_t early_control = 0;
|
||||
struct timing_generator *tg = pipe_ctx->stream_res.tg;
|
||||
|
||||
/* TODOFPGA may change to hwss.update_info_frame */
|
||||
/* For MST, there are multiply stream go to only one link.
|
||||
* connect DIG back_end to front_end while enable_stream and
|
||||
* disconnect them during disable_stream
|
||||
* BY this, it is logic clean to separate stream and link */
|
||||
link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
|
||||
pipe_ctx->stream_res.stream_enc->id, true);
|
||||
|
||||
/* update AVI info frame (HDMI, DP)*/
|
||||
/* TODO: FPGA may change to hwss.update_info_frame */
|
||||
dce110_update_info_frame(pipe_ctx);
|
||||
|
||||
/* enable early control to avoid corruption on DP monitor*/
|
||||
active_total_with_borders =
|
||||
timing->h_addressable
|
||||
|
@ -717,12 +724,8 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
|
|||
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
|
||||
}
|
||||
|
||||
/* For MST, there are multiply stream go to only one link.
|
||||
* connect DIG back_end to front_end while enable_stream and
|
||||
* disconnect them during disable_stream
|
||||
* BY this, it is logic clean to separate stream and link */
|
||||
link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
|
||||
pipe_ctx->stream_res.stream_enc->id, true);
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
@ -1690,9 +1693,13 @@ static void apply_min_clocks(
|
|||
* Check if FBC can be enabled
|
||||
*/
|
||||
static bool should_enable_fbc(struct dc *dc,
|
||||
struct dc_state *context)
|
||||
struct dc_state *context,
|
||||
uint32_t *pipe_idx)
|
||||
{
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0];
|
||||
uint32_t i;
|
||||
struct pipe_ctx *pipe_ctx = NULL;
|
||||
struct resource_context *res_ctx = &context->res_ctx;
|
||||
|
||||
|
||||
ASSERT(dc->fbc_compressor);
|
||||
|
||||
|
@ -1704,6 +1711,14 @@ static bool should_enable_fbc(struct dc *dc,
|
|||
if (context->stream_count != 1)
|
||||
return false;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (res_ctx->pipe_ctx[i].stream) {
|
||||
pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
*pipe_idx = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Only supports eDP */
|
||||
if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP)
|
||||
return false;
|
||||
|
@ -1729,11 +1744,14 @@ static bool should_enable_fbc(struct dc *dc,
|
|||
static void enable_fbc(struct dc *dc,
|
||||
struct dc_state *context)
|
||||
{
|
||||
if (should_enable_fbc(dc, context)) {
|
||||
uint32_t pipe_idx = 0;
|
||||
|
||||
if (should_enable_fbc(dc, context, &pipe_idx)) {
|
||||
/* Program GRPH COMPRESSED ADDRESS and PITCH */
|
||||
struct compr_addr_and_pitch_params params = {0, 0, 0};
|
||||
struct compressor *compr = dc->fbc_compressor;
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[0];
|
||||
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
|
||||
|
||||
|
||||
params.source_view_width = pipe_ctx->stream->timing.h_addressable;
|
||||
params.source_view_height = pipe_ctx->stream->timing.v_addressable;
|
||||
|
@ -2915,6 +2933,49 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx,
|
|||
}
|
||||
}
|
||||
|
||||
void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
|
||||
struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
|
||||
struct mem_input *mi = pipe_ctx->plane_res.mi;
|
||||
struct dc_cursor_mi_param param = {
|
||||
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
|
||||
.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
|
||||
.viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
|
||||
.viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
|
||||
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
|
||||
};
|
||||
|
||||
if (pipe_ctx->plane_state->address.type
|
||||
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
|
||||
pos_cpy.enable = false;
|
||||
|
||||
if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
|
||||
pos_cpy.enable = false;
|
||||
|
||||
if (ipp->funcs->ipp_cursor_set_position)
|
||||
ipp->funcs->ipp_cursor_set_position(ipp, &pos_cpy, ¶m);
|
||||
if (mi->funcs->set_cursor_position)
|
||||
mi->funcs->set_cursor_position(mi, &pos_cpy, ¶m);
|
||||
}
|
||||
|
||||
void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
|
||||
|
||||
if (pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes)
|
||||
pipe_ctx->plane_res.ipp->funcs->ipp_cursor_set_attributes(
|
||||
pipe_ctx->plane_res.ipp, attributes);
|
||||
|
||||
if (pipe_ctx->plane_res.mi->funcs->set_cursor_attributes)
|
||||
pipe_ctx->plane_res.mi->funcs->set_cursor_attributes(
|
||||
pipe_ctx->plane_res.mi, attributes);
|
||||
|
||||
if (pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes)
|
||||
pipe_ctx->plane_res.xfm->funcs->set_cursor_attributes(
|
||||
pipe_ctx->plane_res.xfm, attributes);
|
||||
}
|
||||
|
||||
static void ready_shared_resources(struct dc *dc, struct dc_state *context) {}
|
||||
|
||||
static void optimize_shared_resources(struct dc *dc) {}
|
||||
|
@ -2957,6 +3018,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
|
|||
.edp_backlight_control = hwss_edp_backlight_control,
|
||||
.edp_power_control = hwss_edp_power_control,
|
||||
.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
|
||||
.set_cursor_position = dce110_set_cursor_position,
|
||||
.set_cursor_attribute = dce110_set_cursor_attribute
|
||||
};
|
||||
|
||||
void dce110_hw_sequencer_construct(struct dc *dc)
|
||||
|
|
|
@ -846,6 +846,16 @@ static bool dce110_validate_bandwidth(
|
|||
return result;
|
||||
}
|
||||
|
||||
enum dc_status dce110_validate_plane(const struct dc_plane_state *plane_state,
|
||||
struct dc_caps *caps)
|
||||
{
|
||||
if (((plane_state->dst_rect.width * 2) < plane_state->src_rect.width) ||
|
||||
((plane_state->dst_rect.height * 2) < plane_state->src_rect.height))
|
||||
return DC_FAIL_SURFACE_VALIDATE;
|
||||
|
||||
return DC_OK;
|
||||
}
|
||||
|
||||
static bool dce110_validate_surface_sets(
|
||||
struct dc_state *context)
|
||||
{
|
||||
|
@ -869,6 +879,13 @@ static bool dce110_validate_surface_sets(
|
|||
plane->src_rect.height > 1080))
|
||||
return false;
|
||||
|
||||
/* we don't have the logic to support underlay
|
||||
* only yet so block the use case where we get
|
||||
* NV12 plane as top layer
|
||||
*/
|
||||
if (j == 0)
|
||||
return false;
|
||||
|
||||
/* irrespective of plane format,
|
||||
* stream should be RGB encoded
|
||||
*/
|
||||
|
@ -1021,6 +1038,7 @@ static const struct resource_funcs dce110_res_pool_funcs = {
|
|||
.link_enc_create = dce110_link_encoder_create,
|
||||
.validate_guaranteed = dce110_validate_guaranteed,
|
||||
.validate_bandwidth = dce110_validate_bandwidth,
|
||||
.validate_plane = dce110_validate_plane,
|
||||
.acquire_idle_pipe_for_layer = dce110_acquire_underlay,
|
||||
.add_stream_to_ctx = dce110_add_stream_to_ctx,
|
||||
.validate_global = dce110_validate_global
|
||||
|
|
|
@ -1103,6 +1103,8 @@ static bool construct(
|
|||
dc->caps.max_downscale_ratio = 200;
|
||||
dc->caps.i2c_speed_in_khz = 100;
|
||||
dc->caps.max_cursor_size = 128;
|
||||
dc->caps.dual_link_dvi = true;
|
||||
|
||||
|
||||
/*************************************************
|
||||
* Create resources *
|
||||
|
|
|
@ -835,6 +835,8 @@ static bool construct(
|
|||
dc->caps.max_downscale_ratio = 200;
|
||||
dc->caps.i2c_speed_in_khz = 100;
|
||||
dc->caps.max_cursor_size = 128;
|
||||
dc->caps.dual_link_dvi = true;
|
||||
|
||||
dc->debug = debug_defaults;
|
||||
|
||||
/*************************************************
|
||||
|
|
|
@ -793,6 +793,7 @@ static bool dce80_construct(
|
|||
dc->caps.max_downscale_ratio = 200;
|
||||
dc->caps.i2c_speed_in_khz = 40;
|
||||
dc->caps.max_cursor_size = 128;
|
||||
dc->caps.dual_link_dvi = true;
|
||||
|
||||
/*************************************************
|
||||
* Create resources *
|
||||
|
|
|
@ -238,10 +238,24 @@ static void enable_power_gating_plane(
|
|||
static void disable_vga(
|
||||
struct dce_hwseq *hws)
|
||||
{
|
||||
unsigned int in_vga_mode = 0;
|
||||
|
||||
REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga_mode);
|
||||
|
||||
if (in_vga_mode == 0)
|
||||
return;
|
||||
|
||||
REG_WRITE(D1VGA_CONTROL, 0);
|
||||
REG_WRITE(D2VGA_CONTROL, 0);
|
||||
REG_WRITE(D3VGA_CONTROL, 0);
|
||||
REG_WRITE(D4VGA_CONTROL, 0);
|
||||
|
||||
/* HW Engineer's Notes:
|
||||
* During switch from vga->extended, if we set the VGA_TEST_ENABLE and
|
||||
* then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
|
||||
*
|
||||
* Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
|
||||
* VGA_TEST_ENABLE, to leave it in the same state as before.
|
||||
*/
|
||||
REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
|
||||
REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
|
||||
}
|
||||
|
||||
static void dpp_pg_control(
|
||||
|
@ -1761,6 +1775,11 @@ static void update_dchubp_dpp(
|
|||
&pipe_ctx->plane_res.scl_data.viewport_c);
|
||||
}
|
||||
|
||||
if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
|
||||
dc->hwss.set_cursor_position(pipe_ctx);
|
||||
dc->hwss.set_cursor_attribute(pipe_ctx);
|
||||
}
|
||||
|
||||
if (plane_state->update_flags.bits.full_update) {
|
||||
/*gamut remap*/
|
||||
program_gamut_remap(pipe_ctx);
|
||||
|
@ -2296,7 +2315,7 @@ static bool dcn10_dummy_display_power_gating(
|
|||
return true;
|
||||
}
|
||||
|
||||
void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
|
||||
static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
|
||||
struct timing_generator *tg = pipe_ctx->stream_res.tg;
|
||||
|
@ -2316,12 +2335,46 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
|
|||
}
|
||||
}
|
||||
|
||||
void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
|
||||
static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
|
||||
{
|
||||
if (hws->ctx->dc->res_pool->hubbub != NULL)
|
||||
hubbub1_update_dchub(hws->ctx->dc->res_pool->hubbub, dh_data);
|
||||
}
|
||||
|
||||
static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
|
||||
struct hubp *hubp = pipe_ctx->plane_res.hubp;
|
||||
struct dpp *dpp = pipe_ctx->plane_res.dpp;
|
||||
struct dc_cursor_mi_param param = {
|
||||
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_khz,
|
||||
.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
|
||||
.viewport_x_start = pipe_ctx->plane_res.scl_data.viewport.x,
|
||||
.viewport_width = pipe_ctx->plane_res.scl_data.viewport.width,
|
||||
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz
|
||||
};
|
||||
|
||||
if (pipe_ctx->plane_state->address.type
|
||||
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
|
||||
pos_cpy.enable = false;
|
||||
|
||||
if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
|
||||
pos_cpy.enable = false;
|
||||
|
||||
hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
|
||||
dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width);
|
||||
}
|
||||
|
||||
static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
|
||||
{
|
||||
struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
|
||||
|
||||
pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
|
||||
pipe_ctx->plane_res.hubp, attributes);
|
||||
pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
|
||||
pipe_ctx->plane_res.dpp, attributes->color_format);
|
||||
}
|
||||
|
||||
static const struct hw_sequencer_funcs dcn10_funcs = {
|
||||
.program_gamut_remap = program_gamut_remap,
|
||||
.program_csc_matrix = program_csc_matrix,
|
||||
|
@ -2362,6 +2415,8 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
|
|||
.edp_backlight_control = hwss_edp_backlight_control,
|
||||
.edp_power_control = hwss_edp_power_control,
|
||||
.edp_wait_for_hpd_ready = hwss_edp_wait_for_hpd_ready,
|
||||
.set_cursor_position = dcn10_set_cursor_position,
|
||||
.set_cursor_attribute = dcn10_set_cursor_attribute
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -123,8 +123,7 @@ struct link_encoder_funcs {
|
|||
void (*enable_tmds_output)(struct link_encoder *enc,
|
||||
enum clock_source_id clock_source,
|
||||
enum dc_color_depth color_depth,
|
||||
bool hdmi,
|
||||
bool dual_link,
|
||||
enum signal_type signal,
|
||||
uint32_t pixel_clock);
|
||||
void (*enable_dp_output)(struct link_encoder *enc,
|
||||
const struct dc_link_settings *link_settings,
|
||||
|
|
|
@ -198,6 +198,9 @@ struct hw_sequencer_funcs {
|
|||
bool enable);
|
||||
void (*edp_wait_for_hpd_ready)(struct dc_link *link, bool power_up);
|
||||
|
||||
void (*set_cursor_position)(struct pipe_ctx *pipe);
|
||||
void (*set_cursor_attribute)(struct pipe_ctx *pipe);
|
||||
|
||||
};
|
||||
|
||||
void color_space_to_black_color(
|
||||
|
|
|
@ -217,7 +217,7 @@ bool dce110_vblank_set(
|
|||
core_dc->current_state->res_ctx.pipe_ctx[pipe_offset].stream_res.tg;
|
||||
|
||||
if (enable) {
|
||||
if (!tg->funcs->arm_vert_intr(tg, 2)) {
|
||||
if (!tg || !tg->funcs->arm_vert_intr(tg, 2)) {
|
||||
DC_ERROR("Failed to get VBLANK!\n");
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -42,8 +42,7 @@ static void virtual_link_encoder_enable_tmds_output(
|
|||
struct link_encoder *enc,
|
||||
enum clock_source_id clock_source,
|
||||
enum dc_color_depth color_depth,
|
||||
bool hdmi,
|
||||
bool dual_link,
|
||||
enum signal_type signal,
|
||||
uint32_t pixel_clock) {}
|
||||
|
||||
static void virtual_link_encoder_enable_dp_output(
|
||||
|
|
|
@ -419,11 +419,6 @@ struct bios_event_info {
|
|||
bool backlight_changed;
|
||||
};
|
||||
|
||||
enum {
|
||||
HDMI_PIXEL_CLOCK_IN_KHZ_297 = 297000,
|
||||
TMDS_PIXEL_CLOCK_IN_KHZ_165 = 165000
|
||||
};
|
||||
|
||||
/*
|
||||
* DFS-bypass flag
|
||||
*/
|
||||
|
|
|
@ -26,6 +26,11 @@
|
|||
#ifndef __DC_SIGNAL_TYPES_H__
|
||||
#define __DC_SIGNAL_TYPES_H__
|
||||
|
||||
/* Minimum pixel clock, in KHz. For TMDS signal is 25.00 MHz */
|
||||
#define TMDS_MIN_PIXEL_CLOCK 25000
|
||||
/* Maximum pixel clock, in KHz. For TMDS signal is 165.00 MHz */
|
||||
#define TMDS_MAX_PIXEL_CLOCK 165000
|
||||
|
||||
enum signal_type {
|
||||
SIGNAL_TYPE_NONE = 0L, /* no signal */
|
||||
SIGNAL_TYPE_DVI_SINGLE_LINK = (1 << 0),
|
||||
|
|
|
@ -3205,8 +3205,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
|
|||
* rolling the global seqno forward (since this would complete requests
|
||||
* for which we haven't set the fence error to EIO yet).
|
||||
*/
|
||||
for_each_engine(engine, i915, id)
|
||||
for_each_engine(engine, i915, id) {
|
||||
i915_gem_reset_prepare_engine(engine);
|
||||
engine->submit_request = nop_submit_request;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure no one is running the old callback before we proceed with
|
||||
|
@ -3244,6 +3246,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
|
|||
intel_engine_init_global_seqno(engine,
|
||||
intel_engine_last_submit(engine));
|
||||
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
||||
|
||||
i915_gem_reset_finish_engine(engine);
|
||||
}
|
||||
|
||||
set_bit(I915_WEDGED, &i915->gpu_error.flags);
|
||||
|
|
|
@ -1303,9 +1303,8 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
|
|||
*/
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
dev_priv->perf.oa.exclusive_stream = NULL;
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
free_oa_buffer(dev_priv);
|
||||
|
||||
|
@ -1756,22 +1755,13 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
|
|||
* Note: it's only the RCS/Render context that has any OA state.
|
||||
*/
|
||||
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
|
||||
const struct i915_oa_config *oa_config,
|
||||
bool interruptible)
|
||||
const struct i915_oa_config *oa_config)
|
||||
{
|
||||
struct i915_gem_context *ctx;
|
||||
int ret;
|
||||
unsigned int wait_flags = I915_WAIT_LOCKED;
|
||||
|
||||
if (interruptible) {
|
||||
ret = i915_mutex_lock_interruptible(&dev_priv->drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
wait_flags |= I915_WAIT_INTERRUPTIBLE;
|
||||
} else {
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
}
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
/* Switch away from any user context. */
|
||||
ret = gen8_switch_to_updated_kernel_context(dev_priv, oa_config);
|
||||
|
@ -1819,8 +1809,6 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1863,7 +1851,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
|
|||
* to make sure all slices/subslices are ON before writing to NOA
|
||||
* registers.
|
||||
*/
|
||||
ret = gen8_configure_all_contexts(dev_priv, oa_config, true);
|
||||
ret = gen8_configure_all_contexts(dev_priv, oa_config);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1878,7 +1866,7 @@ static int gen8_enable_metric_set(struct drm_i915_private *dev_priv,
|
|||
static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Reset all contexts' slices/subslices configurations. */
|
||||
gen8_configure_all_contexts(dev_priv, NULL, false);
|
||||
gen8_configure_all_contexts(dev_priv, NULL);
|
||||
|
||||
I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) &
|
||||
~GT_NOA_ENABLE));
|
||||
|
@ -1888,7 +1876,7 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
|
|||
static void gen10_disable_metric_set(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Reset all contexts' slices/subslices configurations. */
|
||||
gen8_configure_all_contexts(dev_priv, NULL, false);
|
||||
gen8_configure_all_contexts(dev_priv, NULL);
|
||||
|
||||
/* Make sure we disable noa to save power. */
|
||||
I915_WRITE(RPM_CONFIG1,
|
||||
|
@ -2138,6 +2126,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
|
|||
if (ret)
|
||||
goto err_oa_buf_alloc;
|
||||
|
||||
ret = i915_mutex_lock_interruptible(&dev_priv->drm);
|
||||
if (ret)
|
||||
goto err_lock;
|
||||
|
||||
ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv,
|
||||
stream->oa_config);
|
||||
if (ret)
|
||||
|
@ -2145,23 +2137,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
|
|||
|
||||
stream->ops = &i915_oa_stream_ops;
|
||||
|
||||
/* Lock device for exclusive_stream access late because
|
||||
* enable_metric_set() might lock as well on gen8+.
|
||||
*/
|
||||
ret = i915_mutex_lock_interruptible(&dev_priv->drm);
|
||||
if (ret)
|
||||
goto err_lock;
|
||||
|
||||
dev_priv->perf.oa.exclusive_stream = stream;
|
||||
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
err_lock:
|
||||
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
|
||||
|
||||
err_enable:
|
||||
dev_priv->perf.oa.ops.disable_metric_set(dev_priv);
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
||||
err_lock:
|
||||
free_oa_buffer(dev_priv);
|
||||
|
||||
err_oa_buf_alloc:
|
||||
|
|
|
@ -719,6 +719,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
|||
struct rb_node *rb;
|
||||
unsigned long flags;
|
||||
|
||||
GEM_TRACE("%s\n", engine->name);
|
||||
|
||||
spin_lock_irqsave(&engine->timeline->lock, flags);
|
||||
|
||||
/* Cancel the requests on the HW and clear the ELSP tracker. */
|
||||
|
@ -765,6 +767,9 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
|||
*/
|
||||
clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
|
||||
|
||||
/* Mark all CS interrupts as complete */
|
||||
execlists->active = 0;
|
||||
|
||||
spin_unlock_irqrestore(&engine->timeline->lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -3221,35 +3221,8 @@ static void cik_gpu_init(struct radeon_device *rdev)
|
|||
case CHIP_KAVERI:
|
||||
rdev->config.cik.max_shader_engines = 1;
|
||||
rdev->config.cik.max_tile_pipes = 4;
|
||||
if ((rdev->pdev->device == 0x1304) ||
|
||||
(rdev->pdev->device == 0x1305) ||
|
||||
(rdev->pdev->device == 0x130C) ||
|
||||
(rdev->pdev->device == 0x130F) ||
|
||||
(rdev->pdev->device == 0x1310) ||
|
||||
(rdev->pdev->device == 0x1311) ||
|
||||
(rdev->pdev->device == 0x131C)) {
|
||||
rdev->config.cik.max_cu_per_sh = 8;
|
||||
rdev->config.cik.max_backends_per_se = 2;
|
||||
} else if ((rdev->pdev->device == 0x1309) ||
|
||||
(rdev->pdev->device == 0x130A) ||
|
||||
(rdev->pdev->device == 0x130D) ||
|
||||
(rdev->pdev->device == 0x1313) ||
|
||||
(rdev->pdev->device == 0x131D)) {
|
||||
rdev->config.cik.max_cu_per_sh = 6;
|
||||
rdev->config.cik.max_backends_per_se = 2;
|
||||
} else if ((rdev->pdev->device == 0x1306) ||
|
||||
(rdev->pdev->device == 0x1307) ||
|
||||
(rdev->pdev->device == 0x130B) ||
|
||||
(rdev->pdev->device == 0x130E) ||
|
||||
(rdev->pdev->device == 0x1315) ||
|
||||
(rdev->pdev->device == 0x1318) ||
|
||||
(rdev->pdev->device == 0x131B)) {
|
||||
rdev->config.cik.max_cu_per_sh = 4;
|
||||
rdev->config.cik.max_backends_per_se = 1;
|
||||
} else {
|
||||
rdev->config.cik.max_cu_per_sh = 3;
|
||||
rdev->config.cik.max_backends_per_se = 1;
|
||||
}
|
||||
rdev->config.cik.max_cu_per_sh = 8;
|
||||
rdev->config.cik.max_backends_per_se = 2;
|
||||
rdev->config.cik.max_sh_per_se = 1;
|
||||
rdev->config.cik.max_texture_channel_caches = 4;
|
||||
rdev->config.cik.max_gprs = 256;
|
||||
|
|
|
@ -93,6 +93,8 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
|
|||
|
||||
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
|
||||
|
||||
drm_crtc_vblank_off(crtc);
|
||||
|
||||
sun4i_tcon_set_status(scrtc->tcon, encoder, false);
|
||||
|
||||
if (crtc->state->event && !crtc->state->active) {
|
||||
|
@ -113,6 +115,8 @@ static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
|
|||
DRM_DEBUG_DRIVER("Enabling the CRTC\n");
|
||||
|
||||
sun4i_tcon_set_status(scrtc->tcon, encoder, true);
|
||||
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
|
||||
static void sun4i_crtc_mode_set_nofb(struct drm_crtc *crtc)
|
||||
|
|
|
@ -132,10 +132,13 @@ static int sun4i_dclk_get_phase(struct clk_hw *hw)
|
|||
static int sun4i_dclk_set_phase(struct clk_hw *hw, int degrees)
|
||||
{
|
||||
struct sun4i_dclk *dclk = hw_to_dclk(hw);
|
||||
u32 val = degrees / 120;
|
||||
|
||||
val <<= 28;
|
||||
|
||||
regmap_update_bits(dclk->regmap, SUN4I_TCON0_IO_POL_REG,
|
||||
GENMASK(29, 28),
|
||||
degrees / 120);
|
||||
val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -92,6 +92,8 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
|
|||
|
||||
DRM_DEBUG_DRIVER("Vertical parameters OK\n");
|
||||
|
||||
tcon->dclk_min_div = 6;
|
||||
tcon->dclk_max_div = 127;
|
||||
rounded_rate = clk_round_rate(tcon->dclk, rate);
|
||||
if (rounded_rate < rate)
|
||||
return MODE_CLOCK_LOW;
|
||||
|
|
|
@ -101,10 +101,12 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
|
|||
return;
|
||||
}
|
||||
|
||||
if (enabled)
|
||||
if (enabled) {
|
||||
clk_prepare_enable(clk);
|
||||
else
|
||||
} else {
|
||||
clk_rate_exclusive_put(clk);
|
||||
clk_disable_unprepare(clk);
|
||||
}
|
||||
}
|
||||
|
||||
static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon,
|
||||
|
@ -873,52 +875,56 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This can only be made optional since we've had DT nodes
|
||||
* without the LVDS reset properties.
|
||||
*
|
||||
* If the property is missing, just disable LVDS, and print a
|
||||
* warning.
|
||||
*/
|
||||
tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds");
|
||||
if (IS_ERR(tcon->lvds_rst)) {
|
||||
dev_err(dev, "Couldn't get our reset line\n");
|
||||
return PTR_ERR(tcon->lvds_rst);
|
||||
} else if (tcon->lvds_rst) {
|
||||
has_lvds_rst = true;
|
||||
reset_control_reset(tcon->lvds_rst);
|
||||
} else {
|
||||
has_lvds_rst = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* This can only be made optional since we've had DT nodes
|
||||
* without the LVDS reset properties.
|
||||
*
|
||||
* If the property is missing, just disable LVDS, and print a
|
||||
* warning.
|
||||
*/
|
||||
if (tcon->quirks->has_lvds_alt) {
|
||||
tcon->lvds_pll = devm_clk_get(dev, "lvds-alt");
|
||||
if (IS_ERR(tcon->lvds_pll)) {
|
||||
if (PTR_ERR(tcon->lvds_pll) == -ENOENT) {
|
||||
has_lvds_alt = false;
|
||||
} else {
|
||||
dev_err(dev, "Couldn't get the LVDS PLL\n");
|
||||
return PTR_ERR(tcon->lvds_pll);
|
||||
}
|
||||
if (tcon->quirks->supports_lvds) {
|
||||
/*
|
||||
* This can only be made optional since we've had DT
|
||||
* nodes without the LVDS reset properties.
|
||||
*
|
||||
* If the property is missing, just disable LVDS, and
|
||||
* print a warning.
|
||||
*/
|
||||
tcon->lvds_rst = devm_reset_control_get_optional(dev, "lvds");
|
||||
if (IS_ERR(tcon->lvds_rst)) {
|
||||
dev_err(dev, "Couldn't get our reset line\n");
|
||||
return PTR_ERR(tcon->lvds_rst);
|
||||
} else if (tcon->lvds_rst) {
|
||||
has_lvds_rst = true;
|
||||
reset_control_reset(tcon->lvds_rst);
|
||||
} else {
|
||||
has_lvds_alt = true;
|
||||
has_lvds_rst = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_lvds_rst || (tcon->quirks->has_lvds_alt && !has_lvds_alt)) {
|
||||
dev_warn(dev,
|
||||
"Missing LVDS properties, Please upgrade your DT\n");
|
||||
dev_warn(dev, "LVDS output disabled\n");
|
||||
can_lvds = false;
|
||||
/*
|
||||
* This can only be made optional since we've had DT
|
||||
* nodes without the LVDS reset properties.
|
||||
*
|
||||
* If the property is missing, just disable LVDS, and
|
||||
* print a warning.
|
||||
*/
|
||||
if (tcon->quirks->has_lvds_alt) {
|
||||
tcon->lvds_pll = devm_clk_get(dev, "lvds-alt");
|
||||
if (IS_ERR(tcon->lvds_pll)) {
|
||||
if (PTR_ERR(tcon->lvds_pll) == -ENOENT) {
|
||||
has_lvds_alt = false;
|
||||
} else {
|
||||
dev_err(dev, "Couldn't get the LVDS PLL\n");
|
||||
return PTR_ERR(tcon->lvds_pll);
|
||||
}
|
||||
} else {
|
||||
has_lvds_alt = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_lvds_rst ||
|
||||
(tcon->quirks->has_lvds_alt && !has_lvds_alt)) {
|
||||
dev_warn(dev, "Missing LVDS properties, Please upgrade your DT\n");
|
||||
dev_warn(dev, "LVDS output disabled\n");
|
||||
can_lvds = false;
|
||||
} else {
|
||||
can_lvds = true;
|
||||
}
|
||||
} else {
|
||||
can_lvds = true;
|
||||
can_lvds = false;
|
||||
}
|
||||
|
||||
ret = sun4i_tcon_init_clocks(dev, tcon);
|
||||
|
@ -1137,7 +1143,7 @@ static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
|
|||
};
|
||||
|
||||
static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
|
||||
/* nothing is supported */
|
||||
.supports_lvds = true,
|
||||
};
|
||||
|
||||
static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
|
||||
|
|
|
@ -175,6 +175,7 @@ struct sun4i_tcon_quirks {
|
|||
bool has_channel_1; /* a33 does not have channel 1 */
|
||||
bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */
|
||||
bool needs_de_be_mux; /* sun6i needs mux to select backend */
|
||||
bool supports_lvds; /* Does the TCON support an LVDS output? */
|
||||
|
||||
/* callback to handle tcon muxing options */
|
||||
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
|
||||
|
|
Loading…
Reference in New Issue