Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "i915, nouveau and amdgpu/radeon fixes in this: nouveau: Two fixes, one for a regression with dithering and one for a bug hit by the userspace drivers. i915: A few fixes, mostly things heading for stable, two important skylake GT3/4 hangs. radeon/amdgpu: Some audio, suspend/resume and some runtime PM fixes, along with two patches to harden the userptr ABI a bit" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (24 commits) drm: Loongson-3 doesn't fully support wc memory drm/nouveau/gr/gf100: select a stream master to fixup tfb offset queries amdgpu/uvd: add uvd fw version for amdgpu drm/amdgpu: forbid mapping of userptr bo through radeon device file drm/radeon: forbid mapping of userptr bo through radeon device file drm/amdgpu: bump the afmt limit for CZ, ST, Polaris drm/amdgpu: use defines for CRTCs and AMFT blocks drm/dp/mst: Validate port in drm_dp_payload_send_msg() drm/nouveau/kms: fix setting of default values for dithering properties drm/radeon: print a message if ATPX dGPU power control is missing Revert "drm/radeon: disable runtime pm on PX laptops without dGPU power control" drm/amdgpu/acp: fix resume on CZ systems with AZ audio drm/radeon: add a quirk for a XFX R9 270X drm/radeon: print pci revision as well as pci ids on driver load drm/i915: Use fw_domains_put_with_fifo() on HSW drm/i915: Force ringbuffers to not be at offset 0 drm/i915: Adjust size of PIPE_CONTROL used for gen8 render seqno write drm/i915/skl: Fix spurious gpu hang with gt3/gt4 revs drm/i915/skl: Fix rc6 based gpu/system hang drm/i915/userptr: Hold mmref whilst calling get-user-pages ...
This commit is contained in:
commit
d61fb48b2f
|
@ -1591,6 +1591,7 @@ struct amdgpu_uvd {
|
|||
struct amdgpu_bo *vcpu_bo;
|
||||
void *cpu_addr;
|
||||
uint64_t gpu_addr;
|
||||
unsigned fw_version;
|
||||
void *saved_bo;
|
||||
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
|
||||
|
|
|
@ -425,6 +425,10 @@ static int acp_resume(void *handle)
|
|||
struct acp_pm_domain *apd;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
/* return early if no ACP */
|
||||
if (!adev->acp.acp_genpd)
|
||||
return 0;
|
||||
|
||||
/* SMU block will power on ACP irrespective of ACP runtime status.
|
||||
* Power off explicitly based on genpd ACP runtime status so that ACP
|
||||
* hw and ACP-genpd status are in sync.
|
||||
|
|
|
@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
|
|||
fw_info.feature = adev->vce.fb_version;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_UVD:
|
||||
fw_info.ver = 0;
|
||||
fw_info.ver = adev->uvd.fw_version;
|
||||
fw_info.feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_GMC:
|
||||
|
|
|
@ -53,7 +53,7 @@ struct amdgpu_hpd;
|
|||
|
||||
#define AMDGPU_MAX_HPD_PINS 6
|
||||
#define AMDGPU_MAX_CRTCS 6
|
||||
#define AMDGPU_MAX_AFMT_BLOCKS 7
|
||||
#define AMDGPU_MAX_AFMT_BLOCKS 9
|
||||
|
||||
enum amdgpu_rmx_type {
|
||||
RMX_OFF,
|
||||
|
@ -309,8 +309,8 @@ struct amdgpu_mode_info {
|
|||
struct atom_context *atom_context;
|
||||
struct card_info *atom_card_info;
|
||||
bool mode_config_initialized;
|
||||
struct amdgpu_crtc *crtcs[6];
|
||||
struct amdgpu_afmt *afmt[7];
|
||||
struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
|
||||
struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
|
||||
/* DVI-I properties */
|
||||
struct drm_property *coherent_mode_property;
|
||||
/* DAC enable load detect */
|
||||
|
|
|
@ -223,6 +223,8 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
|||
{
|
||||
struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo);
|
||||
|
||||
if (amdgpu_ttm_tt_get_usermm(bo->ttm))
|
||||
return -EPERM;
|
||||
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
|
||||
}
|
||||
|
||||
|
|
|
@ -158,6 +158,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
|||
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
|
||||
version_major, version_minor, family_id);
|
||||
|
||||
adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
|
||||
(family_id << 8));
|
||||
|
||||
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
|
||||
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
|
||||
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
|
||||
|
@ -255,6 +258,8 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
|||
if (i == AMDGPU_MAX_UVD_HANDLES)
|
||||
return 0;
|
||||
|
||||
cancel_delayed_work_sync(&adev->uvd.idle_work);
|
||||
|
||||
size = amdgpu_bo_size(adev->uvd.vcpu_bo);
|
||||
ptr = adev->uvd.cpu_addr;
|
||||
|
||||
|
|
|
@ -234,6 +234,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
|
|||
if (i == AMDGPU_MAX_VCE_HANDLES)
|
||||
return 0;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
/* TODO: suspending running encoding sessions isn't supported */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -1672,13 +1672,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
|
|||
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
|
||||
int i;
|
||||
|
||||
port = drm_dp_get_validated_port_ref(mgr, port);
|
||||
if (!port)
|
||||
return -EINVAL;
|
||||
|
||||
port_num = port->port_num;
|
||||
mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
|
||||
if (!mstb) {
|
||||
mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
|
||||
|
||||
if (!mstb)
|
||||
if (!mstb) {
|
||||
drm_dp_put_port(port);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
|
||||
|
@ -1707,6 +1713,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
|
|||
kfree(txmsg);
|
||||
fail_put:
|
||||
drm_dp_put_mst_branch_device(mstb);
|
||||
drm_dp_put_port(port);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -2634,8 +2634,9 @@ struct drm_i915_cmd_table {
|
|||
|
||||
/* WaRsDisableCoarsePowerGating:skl,bxt */
|
||||
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
|
||||
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
|
||||
IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
|
||||
IS_SKL_GT3(dev) || \
|
||||
IS_SKL_GT4(dev))
|
||||
|
||||
/*
|
||||
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
|
||||
* even when in MSI mode. This results in spurious interrupt warnings if the
|
||||
|
|
|
@ -501,19 +501,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
|||
if (pvec != NULL) {
|
||||
struct mm_struct *mm = obj->userptr.mm->mm;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
while (pinned < npages) {
|
||||
ret = get_user_pages_remote(work->task, mm,
|
||||
obj->userptr.ptr + pinned * PAGE_SIZE,
|
||||
npages - pinned,
|
||||
!obj->userptr.read_only, 0,
|
||||
pvec + pinned, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
ret = -EFAULT;
|
||||
if (atomic_inc_not_zero(&mm->mm_users)) {
|
||||
down_read(&mm->mmap_sem);
|
||||
while (pinned < npages) {
|
||||
ret = get_user_pages_remote
|
||||
(work->task, mm,
|
||||
obj->userptr.ptr + pinned * PAGE_SIZE,
|
||||
npages - pinned,
|
||||
!obj->userptr.read_only, 0,
|
||||
pvec + pinned, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
pinned += ret;
|
||||
pinned += ret;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
|
|
@ -841,11 +841,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
|
|||
if (unlikely(total_bytes > remain_usable)) {
|
||||
/*
|
||||
* The base request will fit but the reserved space
|
||||
* falls off the end. So only need to to wait for the
|
||||
* reserved size after flushing out the remainder.
|
||||
* falls off the end. So don't need an immediate wrap
|
||||
* and only need to effectively wait for the reserved
|
||||
* size space from the start of ringbuffer.
|
||||
*/
|
||||
wait_bytes = remain_actual + ringbuf->reserved_size;
|
||||
need_wrap = true;
|
||||
} else if (total_bytes > ringbuf->space) {
|
||||
/* No wrapping required, just waiting. */
|
||||
wait_bytes = total_bytes;
|
||||
|
@ -1913,15 +1913,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
|
|||
struct intel_ringbuffer *ringbuf = request->ringbuf;
|
||||
int ret;
|
||||
|
||||
ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
|
||||
ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* We're using qword write, seqno should be aligned to 8 bytes. */
|
||||
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
|
||||
|
||||
/* w/a for post sync ops following a GPGPU operation we
|
||||
* need a prior CS_STALL, which is emitted by the flush
|
||||
* following the batch.
|
||||
*/
|
||||
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
|
||||
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
|
||||
intel_logical_ring_emit(ringbuf,
|
||||
(PIPE_CONTROL_GLOBAL_GTT_IVB |
|
||||
PIPE_CONTROL_CS_STALL |
|
||||
|
@ -1929,7 +1932,10 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
|
|||
intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
|
||||
intel_logical_ring_emit(ringbuf, 0);
|
||||
intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
|
||||
/* We're thrashing one dword of HWS. */
|
||||
intel_logical_ring_emit(ringbuf, 0);
|
||||
intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
|
||||
intel_logical_ring_emit(ringbuf, MI_NOOP);
|
||||
return intel_logical_ring_advance_and_submit(request);
|
||||
}
|
||||
|
||||
|
|
|
@ -2876,25 +2876,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
|
|||
const struct drm_plane_state *pstate,
|
||||
int y)
|
||||
{
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
|
||||
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
|
||||
struct drm_framebuffer *fb = pstate->fb;
|
||||
uint32_t width = 0, height = 0;
|
||||
|
||||
width = drm_rect_width(&intel_pstate->src) >> 16;
|
||||
height = drm_rect_height(&intel_pstate->src) >> 16;
|
||||
|
||||
if (intel_rotation_90_or_270(pstate->rotation))
|
||||
swap(width, height);
|
||||
|
||||
/* for planar format */
|
||||
if (fb->pixel_format == DRM_FORMAT_NV12) {
|
||||
if (y) /* y-plane data rate */
|
||||
return intel_crtc->config->pipe_src_w *
|
||||
intel_crtc->config->pipe_src_h *
|
||||
return width * height *
|
||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
else /* uv-plane data rate */
|
||||
return (intel_crtc->config->pipe_src_w/2) *
|
||||
(intel_crtc->config->pipe_src_h/2) *
|
||||
return (width / 2) * (height / 2) *
|
||||
drm_format_plane_cpp(fb->pixel_format, 1);
|
||||
}
|
||||
|
||||
/* for packed formats */
|
||||
return intel_crtc->config->pipe_src_w *
|
||||
intel_crtc->config->pipe_src_h *
|
||||
drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
return width * height * drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2973,8 +2976,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
|||
struct drm_framebuffer *fb = plane->state->fb;
|
||||
int id = skl_wm_plane_id(intel_plane);
|
||||
|
||||
if (fb == NULL)
|
||||
if (!to_intel_plane_state(plane->state)->visible)
|
||||
continue;
|
||||
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
continue;
|
||||
|
||||
|
@ -3000,7 +3004,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
|
|||
uint16_t plane_blocks, y_plane_blocks = 0;
|
||||
int id = skl_wm_plane_id(intel_plane);
|
||||
|
||||
if (pstate->fb == NULL)
|
||||
if (!to_intel_plane_state(pstate)->visible)
|
||||
continue;
|
||||
if (plane->type == DRM_PLANE_TYPE_CURSOR)
|
||||
continue;
|
||||
|
@ -3123,26 +3127,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
|
|||
{
|
||||
struct drm_plane *plane = &intel_plane->base;
|
||||
struct drm_framebuffer *fb = plane->state->fb;
|
||||
struct intel_plane_state *intel_pstate =
|
||||
to_intel_plane_state(plane->state);
|
||||
uint32_t latency = dev_priv->wm.skl_latency[level];
|
||||
uint32_t method1, method2;
|
||||
uint32_t plane_bytes_per_line, plane_blocks_per_line;
|
||||
uint32_t res_blocks, res_lines;
|
||||
uint32_t selected_result;
|
||||
uint8_t cpp;
|
||||
uint32_t width = 0, height = 0;
|
||||
|
||||
if (latency == 0 || !cstate->base.active || !fb)
|
||||
if (latency == 0 || !cstate->base.active || !intel_pstate->visible)
|
||||
return false;
|
||||
|
||||
width = drm_rect_width(&intel_pstate->src) >> 16;
|
||||
height = drm_rect_height(&intel_pstate->src) >> 16;
|
||||
|
||||
if (intel_rotation_90_or_270(plane->state->rotation))
|
||||
swap(width, height);
|
||||
|
||||
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
|
||||
method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
|
||||
cpp, latency);
|
||||
method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
|
||||
cstate->base.adjusted_mode.crtc_htotal,
|
||||
cstate->pipe_src_w,
|
||||
cpp, fb->modifier[0],
|
||||
width,
|
||||
cpp,
|
||||
fb->modifier[0],
|
||||
latency);
|
||||
|
||||
plane_bytes_per_line = cstate->pipe_src_w * cpp;
|
||||
plane_bytes_per_line = width * cpp;
|
||||
plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
|
||||
|
||||
if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
|
||||
|
|
|
@ -968,7 +968,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
|
|||
|
||||
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */
|
||||
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
|
||||
if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) ||
|
||||
if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
|
||||
IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
|
||||
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
|
||||
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
|
||||
|
@ -1085,7 +1085,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
|
|||
WA_SET_BIT_MASKED(HIZ_CHICKEN,
|
||||
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
|
||||
|
||||
if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) {
|
||||
/* This is tied to WaForceContextSaveRestoreNonCoherent */
|
||||
if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
|
||||
/*
|
||||
*Use Force Non-Coherent whenever executing a 3D context. This
|
||||
* is a workaround for a possible hang in the unlikely event
|
||||
|
@ -2090,10 +2091,12 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj = ringbuf->obj;
|
||||
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
|
||||
unsigned flags = PIN_OFFSET_BIAS | 4096;
|
||||
int ret;
|
||||
|
||||
if (HAS_LLC(dev_priv) && !obj->stolen) {
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, 0);
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -2109,7 +2112,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
|
|||
return -ENOMEM;
|
||||
}
|
||||
} else {
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
|
||||
ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
|
||||
flags | PIN_MAPPABLE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -2454,11 +2458,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
|
|||
if (unlikely(total_bytes > remain_usable)) {
|
||||
/*
|
||||
* The base request will fit but the reserved space
|
||||
* falls off the end. So only need to to wait for the
|
||||
* reserved size after flushing out the remainder.
|
||||
* falls off the end. So don't need an immediate wrap
|
||||
* and only need to effectively wait for the reserved
|
||||
* size space from the start of ringbuffer.
|
||||
*/
|
||||
wait_bytes = remain_actual + ringbuf->reserved_size;
|
||||
need_wrap = true;
|
||||
} else if (total_bytes > ringbuf->space) {
|
||||
/* No wrapping required, just waiting. */
|
||||
wait_bytes = total_bytes;
|
||||
|
|
|
@ -1189,7 +1189,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
|
|||
} else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
|
||||
dev_priv->uncore.funcs.force_wake_get =
|
||||
fw_domains_get_with_thread_status;
|
||||
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
|
||||
if (IS_HASWELL(dev))
|
||||
dev_priv->uncore.funcs.force_wake_put =
|
||||
fw_domains_put_with_fifo;
|
||||
else
|
||||
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
|
||||
fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
|
||||
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
|
||||
} else if (IS_IVYBRIDGE(dev)) {
|
||||
|
|
|
@ -1276,18 +1276,18 @@ nouveau_connector_create(struct drm_device *dev, int index)
|
|||
break;
|
||||
default:
|
||||
if (disp->dithering_mode) {
|
||||
nv_connector->dithering_mode = DITHERING_MODE_AUTO;
|
||||
drm_object_attach_property(&connector->base,
|
||||
disp->dithering_mode,
|
||||
nv_connector->
|
||||
dithering_mode);
|
||||
nv_connector->dithering_mode = DITHERING_MODE_AUTO;
|
||||
}
|
||||
if (disp->dithering_depth) {
|
||||
nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
|
||||
drm_object_attach_property(&connector->base,
|
||||
disp->dithering_depth,
|
||||
nv_connector->
|
||||
dithering_depth);
|
||||
nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1832,6 +1832,8 @@ gf100_gr_init(struct gf100_gr *gr)
|
|||
|
||||
gf100_gr_mmio(gr, gr->func->mmio);
|
||||
|
||||
nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
|
||||
|
||||
memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
|
||||
for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
|
||||
do {
|
||||
|
|
|
@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
|
|||
return radeon_atpx_priv.atpx_detected;
|
||||
}
|
||||
|
||||
bool radeon_has_atpx_dgpu_power_cntl(void) {
|
||||
return radeon_atpx_priv.atpx.functions.power_cntl;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_atpx_call - call an ATPX method
|
||||
*
|
||||
|
@ -145,6 +141,13 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
|
|||
*/
|
||||
static int radeon_atpx_validate(struct radeon_atpx *atpx)
|
||||
{
|
||||
/* make sure required functions are enabled */
|
||||
/* dGPU power control is required */
|
||||
if (atpx->functions.power_cntl == false) {
|
||||
printk("ATPX dGPU power cntl not present, forcing\n");
|
||||
atpx->functions.power_cntl = true;
|
||||
}
|
||||
|
||||
if (atpx->functions.px_params) {
|
||||
union acpi_object *info;
|
||||
struct atpx_px_params output;
|
||||
|
|
|
@ -2002,10 +2002,12 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
rdev->mode_info.dither_property,
|
||||
RADEON_FMT_DITHER_DISABLE);
|
||||
|
||||
if (radeon_audio != 0)
|
||||
if (radeon_audio != 0) {
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||
}
|
||||
if (ASIC_IS_DCE5(rdev))
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.output_csc_property,
|
||||
|
@ -2130,6 +2132,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||
}
|
||||
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
|
||||
radeon_connector->dac_load_detect = true;
|
||||
|
@ -2185,6 +2188,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||
}
|
||||
if (ASIC_IS_DCE5(rdev))
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
|
@ -2237,6 +2241,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
|||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
rdev->mode_info.audio_property,
|
||||
RADEON_AUDIO_AUTO);
|
||||
radeon_connector->audio = RADEON_AUDIO_AUTO;
|
||||
}
|
||||
if (ASIC_IS_DCE5(rdev))
|
||||
drm_object_attach_property(&radeon_connector->base.base,
|
||||
|
|
|
@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
|
|||
"LAST",
|
||||
};
|
||||
|
||||
#if defined(CONFIG_VGA_SWITCHEROO)
|
||||
bool radeon_has_atpx_dgpu_power_cntl(void);
|
||||
#else
|
||||
static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
|
||||
#endif
|
||||
|
||||
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
|
||||
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
|
||||
|
||||
|
@ -1305,9 +1299,9 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
}
|
||||
rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
|
||||
|
||||
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
|
||||
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
|
||||
pdev->subsystem_vendor, pdev->subsystem_device);
|
||||
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
|
||||
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
|
||||
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
|
||||
|
||||
/* mutex initialization are all done here so we
|
||||
* can recall function without having locking issues */
|
||||
|
@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
* ignore it */
|
||||
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
|
||||
|
||||
if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
|
||||
if (rdev->flags & RADEON_IS_PX)
|
||||
runtime = true;
|
||||
vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
|
||||
if (runtime)
|
||||
|
|
|
@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
|||
{
|
||||
struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
|
||||
|
||||
if (radeon_ttm_tt_has_userptr(bo->ttm))
|
||||
return -EPERM;
|
||||
return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
|
||||
}
|
||||
|
||||
|
|
|
@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
|
|||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
|
||||
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
|
||||
{ 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
|
|
|
@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
|
|||
{
|
||||
#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
|
||||
return false;
|
||||
#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
|
||||
return false;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue