mirror of https://gitee.com/openkylin/linux.git
amdgpu, i915, virtio-gpu, nouveau, sun4i fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJamImYAAoJEAx081l5xIa+kp8P/3xR78pTqrhQLjNyRTHwq5AK 4k8/SwfymW6LU/owkMTB00flS4fCpXzpdMC1fDW6GEfssmlFIf16D3gPsL3mipCG m+KXqyGJfQbTd3OcXxR3NuWx5JC0CCy64imkkJLLB8LjHFZV7d+tXvxFnCHI43e0 aIkBbnD6WGh2v4A0MuuizFHLh24WO+L9fc87moeEiE/zzQ8Ug+q6NLpRu7ehFLI4 QWQn0SivyPcv/YdmJxHhNnUgjMiZHuOMgUioJ8LDjvT5oDDNY/gYDN36PHwkRLsw t2kepc+Hbyb16XAyMcIyZAXGJaHUt8ujHLxAo8oh3QulHOmhii9Y1i27IyiBAYqV dZxB5DNdTm0cCy2UdumDRJU0HEqGBux3Yg4RwcnVa10PRp/haX6VpzLnlqAIeT6k u7j9eP/0x2BGt7QmYpdvIrSx9cPbFWLsHDCX6K8qwljUOoc8V/afI1vF8eYcfj+b iEHWPW6/4Pq9DL2h0jFpNCTTrw+I9nz6iLj5PQPXRTeSfiAT9eC9bvbLV7W2AOsm CR7VqkMbvUH7FqLP+p7HEdyaR8yQevvAq8vZp9HvOIr+abXeRdG4bogCx2mcJ7px mHe4VJOwHY5ACBXb/xTp38b3n9NixJLrOxswzyy/wmBWxmwh1W/EPcfU6vN1bVBL wp/M6WKkCPueeMrZ7mm2 =o75L -----END PGP SIGNATURE----- Merge tag 'drm-fixes-for-v4.16-rc4' of git://people.freedesktop.org/~airlied/linux Pull drm fixes from Dave Airlie: "Pretty much run of the mill drm fixes. amdgpu: - power management fixes - some display fixes - one ppc 32-bit dma fix i915: - two display fixes - three gem fixes sun4i: - display regression fixes nouveau: - display regression fix virtio-gpu: - dumb airlied ioctl fix" * tag 'drm-fixes-for-v4.16-rc4' of git://people.freedesktop.org/~airlied/linux: (25 commits) drm/amdgpu: skip ECC for SRIOV in gmc late_init drm/amd/amdgpu: Correct VRAM width for APUs with GMC9 drm/amdgpu: fix&cleanups for wb_clear drm/amdgpu: Correct sdma_v4 get_wptr(v2) drm/amd/powerplay: fix power over limit on Fiji drm/amdgpu:Fixed wrong emit frame size for enc drm/amdgpu: move WB_FREE to correct place drm/amdgpu: only flush hotplug work without DC drm/amd/display: check for ipp before calling cursor operations drm/i915: Make global seqno known in i915_gem_request_execute tracepoint drm/i915: Clear the in-use marker on execbuf failure drm/i915/cnl: Fix PORT_TX_DW5/7 register address drm/i915/audio: fix check for av_enc_map overflow drm/i915: Fix rsvd2 mask when out-fence is returned virtio-gpu: fix ioctl and expose the fixed status to userspace. drm/sun4i: Protect the TCON pixel clocks drm/sun4i: Enable the output on the pins (tcon0) drm/nouveau: prefer XBGR2101010 for addfb ioctl drm/radeon: insist on 32-bit DMA for Cedar on PPC64/PPC64LE drm/amd/display: VGA black screen from s3 when attached to hook ...
This commit is contained in:
commit
5d60e057d1
|
@ -1156,7 +1156,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
|
|||
/*
|
||||
* Writeback
|
||||
*/
|
||||
#define AMDGPU_MAX_WB 512 /* Reserve at most 512 WB slots for amdgpu-owned rings. */
|
||||
#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */
|
||||
|
||||
struct amdgpu_wb {
|
||||
struct amdgpu_bo *wb_obj;
|
||||
|
|
|
@ -492,7 +492,7 @@ static int amdgpu_device_wb_init(struct amdgpu_device *adev)
|
|||
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
|
||||
|
||||
/* clear wb memory */
|
||||
memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
|
||||
memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -530,8 +530,9 @@ int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
|
|||
*/
|
||||
void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
|
||||
{
|
||||
wb >>= 3;
|
||||
if (wb < adev->wb.num_wb)
|
||||
__clear_bit(wb >> 3, adev->wb.used);
|
||||
__clear_bit(wb, adev->wb.used);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1455,11 +1456,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
|
|||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_blocks[i].status.hw)
|
||||
continue;
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
amdgpu_free_static_csa(adev);
|
||||
amdgpu_device_wb_fini(adev);
|
||||
amdgpu_device_vram_scratch_fini(adev);
|
||||
}
|
||||
|
||||
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
|
||||
adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
|
||||
|
@ -1486,6 +1482,13 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
|
|||
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
|
||||
if (!adev->ip_blocks[i].status.sw)
|
||||
continue;
|
||||
|
||||
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
|
||||
amdgpu_free_static_csa(adev);
|
||||
amdgpu_device_wb_fini(adev);
|
||||
amdgpu_device_vram_scratch_fini(adev);
|
||||
}
|
||||
|
||||
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
|
||||
/* XXX handle errors */
|
||||
if (r) {
|
||||
|
@ -2284,14 +2287,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
|||
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
|
||||
}
|
||||
drm_modeset_unlock_all(dev);
|
||||
} else {
|
||||
/*
|
||||
* There is no equivalent atomic helper to turn on
|
||||
* display, so we defined our own function for this,
|
||||
* once suspend resume is supported by the atomic
|
||||
* framework this will be reworked
|
||||
*/
|
||||
amdgpu_dm_display_resume(adev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2726,7 +2721,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
|
|||
if (amdgpu_device_has_dc_support(adev)) {
|
||||
if (drm_atomic_helper_resume(adev->ddev, state))
|
||||
dev_info(adev->dev, "drm resume failed:%d\n", r);
|
||||
amdgpu_dm_display_resume(adev);
|
||||
} else {
|
||||
drm_helper_resume_force_mode(adev->ddev);
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
|
|||
static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct amdgpu_gtt_mgr *mgr = man->priv;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
drm_mm_takedown(&mgr->mm);
|
||||
spin_unlock(&mgr->lock);
|
||||
kfree(mgr);
|
||||
|
|
|
@ -257,7 +257,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
|
|||
r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
|
||||
if (r) {
|
||||
adev->irq.installed = false;
|
||||
flush_work(&adev->hotplug_work);
|
||||
if (!amdgpu_device_has_dc_support(adev))
|
||||
flush_work(&adev->hotplug_work);
|
||||
cancel_work_sync(&adev->reset_work);
|
||||
return r;
|
||||
}
|
||||
|
@ -282,7 +283,8 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
|
|||
adev->irq.installed = false;
|
||||
if (adev->irq.msi_enabled)
|
||||
pci_disable_msi(adev->pdev);
|
||||
flush_work(&adev->hotplug_work);
|
||||
if (!amdgpu_device_has_dc_support(adev))
|
||||
flush_work(&adev->hotplug_work);
|
||||
cancel_work_sync(&adev->reset_work);
|
||||
}
|
||||
|
||||
|
|
|
@ -634,7 +634,7 @@ static int gmc_v9_0_late_init(void *handle)
|
|||
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
|
||||
BUG_ON(vm_inv_eng[i] > 16);
|
||||
|
||||
if (adev->asic_type == CHIP_VEGA10) {
|
||||
if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
|
||||
r = gmc_v9_0_ecc_available(adev);
|
||||
if (r == 1) {
|
||||
DRM_INFO("ECC is active.\n");
|
||||
|
@ -682,7 +682,10 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
|||
adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
|
||||
if (!adev->mc.vram_width) {
|
||||
/* hbm memory channel size */
|
||||
chansize = 128;
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
chansize = 64;
|
||||
else
|
||||
chansize = 128;
|
||||
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
|
||||
tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
|
||||
|
|
|
@ -238,31 +238,27 @@ static uint64_t sdma_v4_0_ring_get_rptr(struct amdgpu_ring *ring)
|
|||
static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
u64 *wptr = NULL;
|
||||
uint64_t local_wptr = 0;
|
||||
u64 wptr;
|
||||
|
||||
if (ring->use_doorbell) {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
|
||||
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr);
|
||||
*wptr = (*wptr) >> 2;
|
||||
DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
|
||||
wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
|
||||
DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
|
||||
} else {
|
||||
u32 lowbit, highbit;
|
||||
int me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
|
||||
|
||||
wptr = &local_wptr;
|
||||
lowbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR)) >> 2;
|
||||
highbit = RREG32(sdma_v4_0_get_reg_offset(adev, me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
|
||||
|
||||
DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
|
||||
me, highbit, lowbit);
|
||||
*wptr = highbit;
|
||||
*wptr = (*wptr) << 32;
|
||||
*wptr |= lowbit;
|
||||
wptr = highbit;
|
||||
wptr = wptr << 32;
|
||||
wptr |= lowbit;
|
||||
}
|
||||
|
||||
return *wptr;
|
||||
return wptr >> 2;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1618,7 +1618,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {
|
|||
.set_wptr = uvd_v6_0_enc_ring_set_wptr,
|
||||
.emit_frame_size =
|
||||
4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */
|
||||
6 + /* uvd_v6_0_enc_ring_emit_vm_flush */
|
||||
5 + /* uvd_v6_0_enc_ring_emit_vm_flush */
|
||||
5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */
|
||||
1, /* uvd_v6_0_enc_ring_insert_end */
|
||||
.emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */
|
||||
|
|
|
@ -629,11 +629,13 @@ static int dm_resume(void *handle)
|
|||
{
|
||||
struct amdgpu_device *adev = handle;
|
||||
struct amdgpu_display_manager *dm = &adev->dm;
|
||||
int ret = 0;
|
||||
|
||||
/* power on hardware */
|
||||
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
|
||||
|
||||
return 0;
|
||||
ret = amdgpu_dm_display_resume(adev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_dm_display_resume(struct amdgpu_device *adev)
|
||||
|
|
|
@ -1465,7 +1465,7 @@ void decide_link_settings(struct dc_stream_state *stream,
|
|||
/* MST doesn't perform link training for now
|
||||
* TODO: add MST specific link training routine
|
||||
*/
|
||||
if (is_mst_supported(link)) {
|
||||
if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
|
||||
*link_setting = link->verified_link_cap;
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -197,7 +197,8 @@ bool dc_stream_set_cursor_attributes(
|
|||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
|
||||
|
||||
if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
|
||||
if (pipe_ctx->stream != stream || (!pipe_ctx->plane_res.xfm &&
|
||||
!pipe_ctx->plane_res.dpp) || !pipe_ctx->plane_res.ipp)
|
||||
continue;
|
||||
if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state)
|
||||
continue;
|
||||
|
@ -273,7 +274,8 @@ bool dc_stream_set_cursor_position(
|
|||
if (pipe_ctx->stream != stream ||
|
||||
(!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
|
||||
!pipe_ctx->plane_state ||
|
||||
(!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp))
|
||||
(!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
|
||||
!pipe_ctx->plane_res.ipp)
|
||||
continue;
|
||||
|
||||
if (pipe_ctx->plane_state->address.type
|
||||
|
|
|
@ -2756,10 +2756,13 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
|||
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
|
||||
|
||||
|
||||
disable_mclk_switching = ((1 < info.display_count) ||
|
||||
disable_mclk_switching_for_frame_lock ||
|
||||
smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
|
||||
(mode_info.refresh_rate > 120));
|
||||
if (info.display_count == 0)
|
||||
disable_mclk_switching = false;
|
||||
else
|
||||
disable_mclk_switching = ((1 < info.display_count) ||
|
||||
disable_mclk_switching_for_frame_lock ||
|
||||
smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
|
||||
(mode_info.refresh_rate > 120));
|
||||
|
||||
sclk = smu7_ps->performance_levels[0].engine_clock;
|
||||
mclk = smu7_ps->performance_levels[0].memory_clock;
|
||||
|
@ -4534,13 +4537,6 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
|
|||
int tmp_result, result = 0;
|
||||
uint32_t sclk_mask = 0, mclk_mask = 0;
|
||||
|
||||
if (hwmgr->chip_id == CHIP_FIJI) {
|
||||
if (request->type == AMD_PP_GFX_PROFILE)
|
||||
smu7_enable_power_containment(hwmgr);
|
||||
else if (request->type == AMD_PP_COMPUTE_PROFILE)
|
||||
smu7_disable_power_containment(hwmgr);
|
||||
}
|
||||
|
||||
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -3168,10 +3168,13 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
|
|||
disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
|
||||
force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
|
||||
|
||||
disable_mclk_switching = (info.display_count > 1) ||
|
||||
disable_mclk_switching_for_frame_lock ||
|
||||
disable_mclk_switching_for_vr ||
|
||||
force_mclk_high;
|
||||
if (info.display_count == 0)
|
||||
disable_mclk_switching = false;
|
||||
else
|
||||
disable_mclk_switching = (info.display_count > 1) ||
|
||||
disable_mclk_switching_for_frame_lock ||
|
||||
disable_mclk_switching_for_vr ||
|
||||
force_mclk_high;
|
||||
|
||||
sclk = vega10_ps->performance_levels[0].gfx_clock;
|
||||
mclk = vega10_ps->performance_levels[0].mem_clock;
|
||||
|
|
|
@ -121,6 +121,10 @@ int drm_mode_addfb(struct drm_device *dev,
|
|||
r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
|
||||
r.handles[0] = or->handle;
|
||||
|
||||
if (r.pixel_format == DRM_FORMAT_XRGB2101010 &&
|
||||
dev->driver->driver_features & DRIVER_PREFER_XBGR_30BPP)
|
||||
r.pixel_format = DRM_FORMAT_XBGR2101010;
|
||||
|
||||
ret = drm_mode_addfb2(dev, &r, file_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -505,6 +505,8 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
|
|||
list_add_tail(&vma->exec_link, &eb->unbound);
|
||||
if (drm_mm_node_allocated(&vma->node))
|
||||
err = i915_vma_unbind(vma);
|
||||
if (unlikely(err))
|
||||
vma->exec_flags = NULL;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
@ -2410,7 +2412,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
|||
if (out_fence) {
|
||||
if (err == 0) {
|
||||
fd_install(out_fence_fd, out_fence->file);
|
||||
args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
|
||||
args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
|
||||
args->rsvd2 |= (u64)out_fence_fd << 32;
|
||||
out_fence_fd = -1;
|
||||
} else {
|
||||
|
|
|
@ -476,8 +476,6 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
|
|||
GEM_BUG_ON(!irqs_disabled());
|
||||
lockdep_assert_held(&engine->timeline->lock);
|
||||
|
||||
trace_i915_gem_request_execute(request);
|
||||
|
||||
/* Transfer from per-context onto the global per-engine timeline */
|
||||
timeline = engine->timeline;
|
||||
GEM_BUG_ON(timeline == request->timeline);
|
||||
|
@ -501,6 +499,8 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
|
|||
list_move_tail(&request->link, &timeline->requests);
|
||||
spin_unlock(&request->timeline->lock);
|
||||
|
||||
trace_i915_gem_request_execute(request);
|
||||
|
||||
wake_up_all(&request->execute);
|
||||
}
|
||||
|
||||
|
|
|
@ -2027,7 +2027,7 @@ enum i915_power_well_id {
|
|||
#define _CNL_PORT_TX_DW5_LN0_AE 0x162454
|
||||
#define _CNL_PORT_TX_DW5_LN0_B 0x162654
|
||||
#define _CNL_PORT_TX_DW5_LN0_C 0x162C54
|
||||
#define _CNL_PORT_TX_DW5_LN0_D 0x162ED4
|
||||
#define _CNL_PORT_TX_DW5_LN0_D 0x162E54
|
||||
#define _CNL_PORT_TX_DW5_LN0_F 0x162854
|
||||
#define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \
|
||||
_CNL_PORT_TX_DW5_GRP_AE, \
|
||||
|
@ -2058,7 +2058,7 @@ enum i915_power_well_id {
|
|||
#define _CNL_PORT_TX_DW7_LN0_AE 0x16245C
|
||||
#define _CNL_PORT_TX_DW7_LN0_B 0x16265C
|
||||
#define _CNL_PORT_TX_DW7_LN0_C 0x162C5C
|
||||
#define _CNL_PORT_TX_DW7_LN0_D 0x162EDC
|
||||
#define _CNL_PORT_TX_DW7_LN0_D 0x162E5C
|
||||
#define _CNL_PORT_TX_DW7_LN0_F 0x16285C
|
||||
#define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \
|
||||
_CNL_PORT_TX_DW7_GRP_AE, \
|
||||
|
|
|
@ -779,11 +779,11 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
|
||||
return NULL;
|
||||
|
||||
/* MST */
|
||||
if (pipe >= 0) {
|
||||
if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
|
||||
return NULL;
|
||||
|
||||
encoder = dev_priv->av_enc_map[pipe];
|
||||
/*
|
||||
* when bootup, audio driver may not know it is
|
||||
|
|
|
@ -4477,6 +4477,7 @@ nv50_display_create(struct drm_device *dev)
|
|||
nouveau_display(dev)->fini = nv50_display_fini;
|
||||
disp->disp = &nouveau_display(dev)->disp;
|
||||
dev->mode_config.funcs = &nv50_disp_func;
|
||||
dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
|
||||
if (nouveau_atomic)
|
||||
dev->driver->driver_features |= DRIVER_ATOMIC;
|
||||
|
||||
|
|
|
@ -1365,6 +1365,10 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
if ((rdev->flags & RADEON_IS_PCI) &&
|
||||
(rdev->family <= CHIP_RS740))
|
||||
rdev->need_dma32 = true;
|
||||
#ifdef CONFIG_PPC64
|
||||
if (rdev->family == CHIP_CEDAR)
|
||||
rdev->need_dma32 = true;
|
||||
#endif
|
||||
|
||||
dma_bits = rdev->need_dma32 ? 32 : 40;
|
||||
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
|
|
|
@ -47,7 +47,6 @@ static bool radeon_pm_in_vbl(struct radeon_device *rdev);
|
|||
static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
|
||||
static void radeon_pm_update_profile(struct radeon_device *rdev);
|
||||
static void radeon_pm_set_clocks(struct radeon_device *rdev);
|
||||
static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev);
|
||||
|
||||
int radeon_pm_get_type_index(struct radeon_device *rdev,
|
||||
enum radeon_pm_state_type ps_type,
|
||||
|
@ -80,8 +79,6 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
|
|||
radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
|
||||
}
|
||||
mutex_unlock(&rdev->pm.mutex);
|
||||
/* allow new DPM state to be picked */
|
||||
radeon_pm_compute_clocks_dpm(rdev);
|
||||
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
|
||||
if (rdev->pm.profile == PM_PROFILE_AUTO) {
|
||||
mutex_lock(&rdev->pm.mutex);
|
||||
|
@ -885,8 +882,7 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
|
|||
dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
|
||||
/* balanced states don't exist at the moment */
|
||||
if (dpm_state == POWER_STATE_TYPE_BALANCED)
|
||||
dpm_state = rdev->pm.dpm.ac_power ?
|
||||
POWER_STATE_TYPE_PERFORMANCE : POWER_STATE_TYPE_BATTERY;
|
||||
dpm_state = POWER_STATE_TYPE_PERFORMANCE;
|
||||
|
||||
restart_search:
|
||||
/* Pick the best power state based on current conditions */
|
||||
|
|
|
@ -260,7 +260,7 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
|
|||
const struct drm_display_mode *mode)
|
||||
{
|
||||
/* Configure the dot clock */
|
||||
clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
|
||||
clk_set_rate_exclusive(tcon->dclk, mode->crtc_clock * 1000);
|
||||
|
||||
/* Set the resolution */
|
||||
regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
|
||||
|
@ -335,6 +335,9 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
|
|||
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
|
||||
SUN4I_TCON_GCTL_IOMAP_MASK,
|
||||
SUN4I_TCON_GCTL_IOMAP_TCON0);
|
||||
|
||||
/* Enable the output on the pins */
|
||||
regmap_write(tcon->regs, SUN4I_TCON0_IO_TRI_REG, 0xe0000000);
|
||||
}
|
||||
|
||||
static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
|
||||
|
@ -418,7 +421,7 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
|
|||
WARN_ON(!tcon->quirks->has_channel_1);
|
||||
|
||||
/* Configure the dot clock */
|
||||
clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
|
||||
clk_set_rate_exclusive(tcon->sclk1, mode->crtc_clock * 1000);
|
||||
|
||||
/* Adjust clock delay */
|
||||
clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
|
||||
|
|
|
@ -197,6 +197,9 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
|
|||
case VIRTGPU_PARAM_3D_FEATURES:
|
||||
value = vgdev->has_virgl_3d == true ? 1 : 0;
|
||||
break;
|
||||
case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
|
||||
value = 1;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -472,7 +475,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
|
|||
{
|
||||
struct virtio_gpu_device *vgdev = dev->dev_private;
|
||||
struct drm_virtgpu_get_caps *args = data;
|
||||
int size;
|
||||
unsigned size, host_caps_size;
|
||||
int i;
|
||||
int found_valid = -1;
|
||||
int ret;
|
||||
|
@ -481,6 +484,10 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
|
|||
if (vgdev->num_capsets == 0)
|
||||
return -ENOSYS;
|
||||
|
||||
/* don't allow userspace to pass 0 */
|
||||
if (args->size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&vgdev->display_info_lock);
|
||||
for (i = 0; i < vgdev->num_capsets; i++) {
|
||||
if (vgdev->capsets[i].id == args->cap_set_id) {
|
||||
|
@ -496,11 +503,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
size = vgdev->capsets[found_valid].max_size;
|
||||
if (args->size > size) {
|
||||
spin_unlock(&vgdev->display_info_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
host_caps_size = vgdev->capsets[found_valid].max_size;
|
||||
/* only copy to user the minimum of the host caps size or the guest caps size */
|
||||
size = min(args->size, host_caps_size);
|
||||
|
||||
list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
|
||||
if (cache_ent->id == args->cap_set_id &&
|
||||
|
|
|
@ -56,6 +56,7 @@ struct drm_printer;
|
|||
#define DRIVER_ATOMIC 0x10000
|
||||
#define DRIVER_KMS_LEGACY_CONTEXT 0x20000
|
||||
#define DRIVER_SYNCOBJ 0x40000
|
||||
#define DRIVER_PREFER_XBGR_30BPP 0x80000
|
||||
|
||||
/**
|
||||
* struct drm_driver - DRM driver structure
|
||||
|
|
|
@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer {
|
|||
};
|
||||
|
||||
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
|
||||
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
|
||||
|
||||
struct drm_virtgpu_getparam {
|
||||
__u64 param;
|
||||
|
|
Loading…
Reference in New Issue