mirror of https://gitee.com/openkylin/linux.git
drm fixes for 5.10-rc6
ast: - LUT loading regression fix nouveau: - relocations regression fix amdgpu: - ttm init oops fix - Runtime pm fix - SI UVD suspend/resume fix - HDCP fix for headless cards - Sienna Cichlid golden register update i915: - Fix Perf/OA workaround register corruption (Lionel) - Correct a comment statement in GVT (Yan) - Fix GT enable/disable iterrupts, including a race condition that prevented GPU to go idle (Chris) - Free stale request on destroying the virtual engine (Chris) exynos: - config dependency fix mediatek: - unused var removal - horizonal front/back porch formula fix vc4: - wifi and hdmi interference fix - mode rejection fixes - use after free fix - cleanup some code -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJfwLkEAAoJEAx081l5xIa+c0wP/issiX5/aJr7/Cdpqdc78k2P EaBiGVhKF8Pvkiu+tEPjZh2yncmAONHwCdwMZSMJF8sGqDtZ4KxDNAk/qdyoLIQo 1kTrZ8ahhsOYzT3LIQfP4VlkDkAY1+39uRx4IBbhpLTbBt6lC53uO8WsnVp/mELE w9R3w31L6C36KIisgpmYufsTwrJh35ZzuFqv2WaaA5IxM23qFn8me89m4BbYBi43 LN9N7UUfK7v+/UaKP94v0L/ODewWquXSpzb2HKmUTyZCR8j6cnOvZn2S8/kH/god FbsukMsXTcJ98pRXPO6BcRkrwEIUN2lrONK1ZUSn1eiA5cAaIeIHcLJm122eNONy 9KX652ZJYRbT5u33q+tO7xiEQxtA+sbjvB3PvLyvjYUdkEX3AMC4wu4DJTVylPKz +praNftR65qn8AxHkegRdNwd0hTK3yw/cHBCmbJBqsBX99VCgFBbtodzT0eHk59D toh47agDpLAQXUwmtAMaQ0XhNdwhsj13C0GZT6HTuvA0EPrBdavPmcskB/Z/EGZv OirBgMgSwyN8zesLWKPhlqtV5soGh71AmGFczgIm74Da3b9AYQTkV9b2htygKHFI c1Z4Intavf/JMxzlcxT46V7IobR7Vl7FjCYuo1jzFaiTvVeCgEKlMFDaJLG62VSn JS3TgTQHw8W785Pwl/h2 =MTnj -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2020-11-27-1' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Unfortunately this has a bit of thanksgiving stuffing in it, as it a bit larger (at least the vc4 patches) than I like at this point in time. The main thing is it has a bunch of regressions fixes for reports in the last couple of weeks, ast, nouveau and the amdgpu ttm init fix, along with the usual selection of amdgpu and i915 fixes. The vc4 fixes are a few but they are fixes and the nastiest one is a fix for when you have a 2.4Ghz Wifi and a HDMI signal with a clock in that range and there isn't enough shielding and interference happen between the two, the fix adjusts the mode clock to try and avoid the wifi channels in that case. Hopefully you can merge this between turkey slices, and next week should be quieter. ast: - LUT loading regression fix nouveau: - relocations regression fix amdgpu: - ttm init oops fix - Runtime pm fix - SI UVD suspend/resume fix - HDCP fix for headless cards - Sienna Cichlid golden register update i915: - Fix Perf/OA workaround register corruption (Lionel) - Correct a comment statement in GVT (Yan) - Fix GT enable/disable iterrupts, including a race condition that prevented GPU to go idle (Chris) - Free stale request on destroying the virtual engine (Chris) exynos: - config dependency fix mediatek: - unused var removal - horizonal front/back porch formula fix vc4: - wifi and hdmi interference fix - mode rejection fixes - use after free fix - cleanup some code" * tag 'drm-fixes-2020-11-27-1' of git://anongit.freedesktop.org/drm/drm: (28 commits) drm/nouveau: fix relocations applying logic and a double-free drm/ast: Reload gamma LUT after changing primary plane's color format drm/amdgpu: Fix size calculation when init onchip memory drm/amdgpu: update golden setting for sienna_cichlid drm/amd/display: Avoid HDCP initialization in devices without output drm/i915/gt: Free stale request on destroying the virtual engine drm/i915/gt: Don't cancel the interrupt shadow too early drm/i915/gt: Track signaled breadcrumbs outside of the breadcrumb spinlock drm/amdgpu: fix a page fault drm/amdgpu: fix SI UVD firmware validate resume fail drm/amd/amdgpu: fix null pointer in runtime pm drm/i915/gt: Defer enabling the breadcrumb interrupt to after submission drm/i915/gvt: correct a false comment of flag F_UNALIGN drm/i915/perf: workaround register corruption in OATAILPTR drm/vc4: kms: Don't disable the muxing of an active CRTC drm/vc4: kms: Store the unassigned channel list in the state drm/exynos: depend on COMMON_CLK to fix compile tests drm/mediatek: dsi: Modify horizontal front/back porch byte formula drm/vc4: hdmi: Disable Wifi Frequencies dt-bindings: display: Add a property to deal with WiFi coexistence ...
This commit is contained in:
commit
6910b67689
|
@ -76,6 +76,12 @@ properties:
|
|||
resets:
|
||||
maxItems: 1
|
||||
|
||||
wifi-2.4ghz-coexistence:
|
||||
type: boolean
|
||||
description: >
|
||||
Should the pixel frequencies in the WiFi frequencies range be
|
||||
avoided?
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
|
|
|
@ -4852,7 +4852,7 @@ int amdgpu_device_baco_enter(struct drm_device *dev)
|
|||
if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (ras && ras->supported)
|
||||
if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
|
||||
adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
|
||||
|
||||
return amdgpu_dpm_baco_enter(adev);
|
||||
|
@ -4871,7 +4871,7 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ras && ras->supported)
|
||||
if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
|
||||
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -69,10 +69,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
|
|||
|
||||
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
|
||||
unsigned int type,
|
||||
uint64_t size)
|
||||
uint64_t size_in_page)
|
||||
{
|
||||
return ttm_range_man_init(&adev->mman.bdev, type,
|
||||
false, size >> PAGE_SHIFT);
|
||||
false, size_in_page);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -67,6 +67,7 @@ struct amdgpu_uvd {
|
|||
unsigned harvest_config;
|
||||
/* store image width to adjust nb memory state */
|
||||
unsigned decode_image_width;
|
||||
uint32_t keyselect;
|
||||
};
|
||||
|
||||
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
|
||||
|
|
|
@ -3105,6 +3105,8 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] =
|
|||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0 ,mmGCEA_SDP_TAG_RESERVE0, 0xffffffff, 0x10100100),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCEA_SDP_TAG_RESERVE1, 0xffffffff, 0x17000088),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PC_CNTL, 0x003fffff, 0x00280400),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2A_ADDR_MATCH_MASK, 0xffffffff, 0xffffffcf),
|
||||
|
|
|
@ -277,15 +277,8 @@ static void uvd_v3_1_mc_resume(struct amdgpu_device *adev)
|
|||
*/
|
||||
static int uvd_v3_1_fw_validate(struct amdgpu_device *adev)
|
||||
{
|
||||
void *ptr;
|
||||
uint32_t ucode_len, i;
|
||||
uint32_t keysel;
|
||||
|
||||
ptr = adev->uvd.inst[0].cpu_addr;
|
||||
ptr += 192 + 16;
|
||||
memcpy(&ucode_len, ptr, 4);
|
||||
ptr += ucode_len;
|
||||
memcpy(&keysel, ptr, 4);
|
||||
int i;
|
||||
uint32_t keysel = adev->uvd.keyselect;
|
||||
|
||||
WREG32(mmUVD_FW_START, keysel);
|
||||
|
||||
|
@ -550,6 +543,8 @@ static int uvd_v3_1_sw_init(void *handle)
|
|||
struct amdgpu_ring *ring;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
int r;
|
||||
void *ptr;
|
||||
uint32_t ucode_len;
|
||||
|
||||
/* UVD TRAP */
|
||||
r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
|
||||
|
@ -571,6 +566,13 @@ static int uvd_v3_1_sw_init(void *handle)
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
/* Retrieval firmware validate key */
|
||||
ptr = adev->uvd.inst[0].cpu_addr;
|
||||
ptr += 192 + 16;
|
||||
memcpy(&ucode_len, ptr, 4);
|
||||
ptr += ucode_len;
|
||||
memcpy(&adev->uvd.keyselect, ptr, 4);
|
||||
|
||||
r = amdgpu_uvd_entity_init(adev);
|
||||
|
||||
return r;
|
||||
|
|
|
@ -1041,7 +1041,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
amdgpu_dm_init_color_mod();
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_DC_HDCP
|
||||
if (adev->asic_type >= CHIP_RAVEN) {
|
||||
if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
|
||||
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
|
||||
|
||||
if (!adev->dm.hdcp_workqueue)
|
||||
|
|
|
@ -742,7 +742,6 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
|
|||
case DRM_MODE_DPMS_SUSPEND:
|
||||
if (ast->tx_chip_type == AST_TX_DP501)
|
||||
ast_set_dp501_video_output(crtc->dev, 1);
|
||||
ast_crtc_load_lut(ast, crtc);
|
||||
break;
|
||||
case DRM_MODE_DPMS_OFF:
|
||||
if (ast->tx_chip_type == AST_TX_DP501)
|
||||
|
@ -777,6 +776,21 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state)
|
||||
{
|
||||
struct ast_private *ast = to_ast_private(crtc->dev);
|
||||
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state);
|
||||
struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
|
||||
|
||||
/*
|
||||
* The gamma LUT has to be reloaded after changing the primary
|
||||
* plane's color format.
|
||||
*/
|
||||
if (old_ast_crtc_state->format != ast_crtc_state->format)
|
||||
ast_crtc_load_lut(ast, crtc);
|
||||
}
|
||||
|
||||
static void
|
||||
ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
|
@ -830,6 +844,7 @@ ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
|
|||
|
||||
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
|
||||
.atomic_check = ast_crtc_helper_atomic_check,
|
||||
.atomic_flush = ast_crtc_helper_atomic_flush,
|
||||
.atomic_enable = ast_crtc_helper_atomic_enable,
|
||||
.atomic_disable = ast_crtc_helper_atomic_disable,
|
||||
};
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config DRM_EXYNOS
|
||||
tristate "DRM Support for Samsung SoC Exynos Series"
|
||||
depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
|
||||
depends on OF && DRM && COMMON_CLK
|
||||
depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST
|
||||
depends on MMU
|
||||
select DRM_KMS_HELPER
|
||||
select VIDEOMODE_HELPERS
|
||||
|
|
|
@ -30,18 +30,21 @@
|
|||
#include "i915_trace.h"
|
||||
#include "intel_breadcrumbs.h"
|
||||
#include "intel_context.h"
|
||||
#include "intel_engine_pm.h"
|
||||
#include "intel_gt_pm.h"
|
||||
#include "intel_gt_requests.h"
|
||||
|
||||
static void irq_enable(struct intel_engine_cs *engine)
|
||||
static bool irq_enable(struct intel_engine_cs *engine)
|
||||
{
|
||||
if (!engine->irq_enable)
|
||||
return;
|
||||
return false;
|
||||
|
||||
/* Caller disables interrupts */
|
||||
spin_lock(&engine->gt->irq_lock);
|
||||
engine->irq_enable(engine);
|
||||
spin_unlock(&engine->gt->irq_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void irq_disable(struct intel_engine_cs *engine)
|
||||
|
@ -57,12 +60,11 @@ static void irq_disable(struct intel_engine_cs *engine)
|
|||
|
||||
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
|
||||
{
|
||||
lockdep_assert_held(&b->irq_lock);
|
||||
|
||||
if (!b->irq_engine || b->irq_armed)
|
||||
return;
|
||||
|
||||
if (!intel_gt_pm_get_if_awake(b->irq_engine->gt))
|
||||
/*
|
||||
* Since we are waiting on a request, the GPU should be busy
|
||||
* and should have its own rpm reference.
|
||||
*/
|
||||
if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
@ -73,25 +75,24 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
|
|||
*/
|
||||
WRITE_ONCE(b->irq_armed, true);
|
||||
|
||||
/*
|
||||
* Since we are waiting on a request, the GPU should be busy
|
||||
* and should have its own rpm reference. This is tracked
|
||||
* by i915->gt.awake, we can forgo holding our own wakref
|
||||
* for the interrupt as before i915->gt.awake is released (when
|
||||
* the driver is idle) we disarm the breadcrumbs.
|
||||
*/
|
||||
/* Requests may have completed before we could enable the interrupt. */
|
||||
if (!b->irq_enabled++ && irq_enable(b->irq_engine))
|
||||
irq_work_queue(&b->irq_work);
|
||||
}
|
||||
|
||||
if (!b->irq_enabled++)
|
||||
irq_enable(b->irq_engine);
|
||||
static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
|
||||
{
|
||||
if (!b->irq_engine)
|
||||
return;
|
||||
|
||||
spin_lock(&b->irq_lock);
|
||||
if (!b->irq_armed)
|
||||
__intel_breadcrumbs_arm_irq(b);
|
||||
spin_unlock(&b->irq_lock);
|
||||
}
|
||||
|
||||
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
|
||||
{
|
||||
lockdep_assert_held(&b->irq_lock);
|
||||
|
||||
if (!b->irq_engine || !b->irq_armed)
|
||||
return;
|
||||
|
||||
GEM_BUG_ON(!b->irq_enabled);
|
||||
if (!--b->irq_enabled)
|
||||
irq_disable(b->irq_engine);
|
||||
|
@ -105,8 +106,6 @@ static void add_signaling_context(struct intel_breadcrumbs *b,
|
|||
{
|
||||
intel_context_get(ce);
|
||||
list_add_tail(&ce->signal_link, &b->signalers);
|
||||
if (list_is_first(&ce->signal_link, &b->signalers))
|
||||
__intel_breadcrumbs_arm_irq(b);
|
||||
}
|
||||
|
||||
static void remove_signaling_context(struct intel_breadcrumbs *b,
|
||||
|
@ -174,34 +173,65 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
|
|||
intel_engine_add_retire(b->irq_engine, tl);
|
||||
}
|
||||
|
||||
static bool __signal_request(struct i915_request *rq, struct list_head *signals)
|
||||
static bool __signal_request(struct i915_request *rq)
|
||||
{
|
||||
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
|
||||
|
||||
if (!__dma_fence_signal(&rq->fence)) {
|
||||
i915_request_put(rq);
|
||||
return false;
|
||||
}
|
||||
|
||||
list_add_tail(&rq->signal_link, signals);
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct llist_node *
|
||||
slist_add(struct llist_node *node, struct llist_node *head)
|
||||
{
|
||||
node->next = head;
|
||||
return node;
|
||||
}
|
||||
|
||||
static void signal_irq_work(struct irq_work *work)
|
||||
{
|
||||
struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work);
|
||||
const ktime_t timestamp = ktime_get();
|
||||
struct llist_node *signal, *sn;
|
||||
struct intel_context *ce, *cn;
|
||||
struct list_head *pos, *next;
|
||||
LIST_HEAD(signal);
|
||||
|
||||
signal = NULL;
|
||||
if (unlikely(!llist_empty(&b->signaled_requests)))
|
||||
signal = llist_del_all(&b->signaled_requests);
|
||||
|
||||
spin_lock(&b->irq_lock);
|
||||
|
||||
if (list_empty(&b->signalers))
|
||||
/*
|
||||
* Keep the irq armed until the interrupt after all listeners are gone.
|
||||
*
|
||||
* Enabling/disabling the interrupt is rather costly, roughly a couple
|
||||
* of hundred microseconds. If we are proactive and enable/disable
|
||||
* the interrupt around every request that wants a breadcrumb, we
|
||||
* quickly drown in the extra orders of magnitude of latency imposed
|
||||
* on request submission.
|
||||
*
|
||||
* So we try to be lazy, and keep the interrupts enabled until no
|
||||
* more listeners appear within a breadcrumb interrupt interval (that
|
||||
* is until a request completes that no one cares about). The
|
||||
* observation is that listeners come in batches, and will often
|
||||
* listen to a bunch of requests in succession. Though note on icl+,
|
||||
* interrupts are always enabled due to concerns with rc6 being
|
||||
* dysfunctional with per-engine interrupt masking.
|
||||
*
|
||||
* We also try to avoid raising too many interrupts, as they may
|
||||
* be generated by userspace batches and it is unfortunately rather
|
||||
* too easy to drown the CPU under a flood of GPU interrupts. Thus
|
||||
* whenever no one appears to be listening, we turn off the interrupts.
|
||||
* Fewer interrupts should conserve power -- at the very least, fewer
|
||||
* interrupt draw less ire from other users of the system and tools
|
||||
* like powertop.
|
||||
*/
|
||||
if (!signal && b->irq_armed && list_empty(&b->signalers))
|
||||
__intel_breadcrumbs_disarm_irq(b);
|
||||
|
||||
list_splice_init(&b->signaled_requests, &signal);
|
||||
|
||||
list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) {
|
||||
GEM_BUG_ON(list_empty(&ce->signals));
|
||||
|
||||
|
@ -218,7 +248,10 @@ static void signal_irq_work(struct irq_work *work)
|
|||
* spinlock as the callback chain may end up adding
|
||||
* more signalers to the same context or engine.
|
||||
*/
|
||||
__signal_request(rq, &signal);
|
||||
clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
|
||||
if (__signal_request(rq))
|
||||
/* We own signal_node now, xfer to local list */
|
||||
signal = slist_add(&rq->signal_node, signal);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -238,9 +271,9 @@ static void signal_irq_work(struct irq_work *work)
|
|||
|
||||
spin_unlock(&b->irq_lock);
|
||||
|
||||
list_for_each_safe(pos, next, &signal) {
|
||||
llist_for_each_safe(signal, sn, signal) {
|
||||
struct i915_request *rq =
|
||||
list_entry(pos, typeof(*rq), signal_link);
|
||||
llist_entry(signal, typeof(*rq), signal_node);
|
||||
struct list_head cb_list;
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
|
@ -251,6 +284,9 @@ static void signal_irq_work(struct irq_work *work)
|
|||
|
||||
i915_request_put(rq);
|
||||
}
|
||||
|
||||
if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
|
||||
intel_breadcrumbs_arm_irq(b);
|
||||
}
|
||||
|
||||
struct intel_breadcrumbs *
|
||||
|
@ -264,7 +300,7 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)
|
|||
|
||||
spin_lock_init(&b->irq_lock);
|
||||
INIT_LIST_HEAD(&b->signalers);
|
||||
INIT_LIST_HEAD(&b->signaled_requests);
|
||||
init_llist_head(&b->signaled_requests);
|
||||
|
||||
init_irq_work(&b->irq_work, signal_irq_work);
|
||||
|
||||
|
@ -292,21 +328,22 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
|
|||
|
||||
void intel_breadcrumbs_park(struct intel_breadcrumbs *b)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!READ_ONCE(b->irq_armed))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&b->irq_lock, flags);
|
||||
__intel_breadcrumbs_disarm_irq(b);
|
||||
spin_unlock_irqrestore(&b->irq_lock, flags);
|
||||
|
||||
if (!list_empty(&b->signalers))
|
||||
irq_work_queue(&b->irq_work);
|
||||
/* Kick the work once more to drain the signalers */
|
||||
irq_work_sync(&b->irq_work);
|
||||
while (unlikely(READ_ONCE(b->irq_armed))) {
|
||||
local_irq_disable();
|
||||
signal_irq_work(&b->irq_work);
|
||||
local_irq_enable();
|
||||
cond_resched();
|
||||
}
|
||||
GEM_BUG_ON(!list_empty(&b->signalers));
|
||||
}
|
||||
|
||||
void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
|
||||
{
|
||||
irq_work_sync(&b->irq_work);
|
||||
GEM_BUG_ON(!list_empty(&b->signalers));
|
||||
GEM_BUG_ON(b->irq_armed);
|
||||
kfree(b);
|
||||
}
|
||||
|
||||
|
@ -327,7 +364,8 @@ static void insert_breadcrumb(struct i915_request *rq,
|
|||
* its signal completion.
|
||||
*/
|
||||
if (__request_completed(rq)) {
|
||||
if (__signal_request(rq, &b->signaled_requests))
|
||||
if (__signal_request(rq) &&
|
||||
llist_add(&rq->signal_node, &b->signaled_requests))
|
||||
irq_work_queue(&b->irq_work);
|
||||
return;
|
||||
}
|
||||
|
@ -362,9 +400,12 @@ static void insert_breadcrumb(struct i915_request *rq,
|
|||
GEM_BUG_ON(!check_signal_order(ce, rq));
|
||||
set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
|
||||
|
||||
/* Check after attaching to irq, interrupt may have already fired. */
|
||||
if (__request_completed(rq))
|
||||
irq_work_queue(&b->irq_work);
|
||||
/*
|
||||
* Defer enabling the interrupt to after HW submission and recheck
|
||||
* the request as it may have completed and raised the interrupt as
|
||||
* we were attaching it into the lists.
|
||||
*/
|
||||
irq_work_queue(&b->irq_work);
|
||||
}
|
||||
|
||||
bool i915_request_enable_breadcrumb(struct i915_request *rq)
|
||||
|
|
|
@ -35,7 +35,7 @@ struct intel_breadcrumbs {
|
|||
struct intel_engine_cs *irq_engine;
|
||||
|
||||
struct list_head signalers;
|
||||
struct list_head signaled_requests;
|
||||
struct llist_head signaled_requests;
|
||||
|
||||
struct irq_work irq_work; /* for use from inside irq_lock */
|
||||
|
||||
|
|
|
@ -182,6 +182,7 @@
|
|||
struct virtual_engine {
|
||||
struct intel_engine_cs base;
|
||||
struct intel_context context;
|
||||
struct rcu_work rcu;
|
||||
|
||||
/*
|
||||
* We allow only a single request through the virtual engine at a time
|
||||
|
@ -5425,33 +5426,57 @@ static struct list_head *virtual_queue(struct virtual_engine *ve)
|
|||
return &ve->base.execlists.default_priolist.requests[0];
|
||||
}
|
||||
|
||||
static void virtual_context_destroy(struct kref *kref)
|
||||
static void rcu_virtual_context_destroy(struct work_struct *wrk)
|
||||
{
|
||||
struct virtual_engine *ve =
|
||||
container_of(kref, typeof(*ve), context.ref);
|
||||
container_of(wrk, typeof(*ve), rcu.work);
|
||||
unsigned int n;
|
||||
|
||||
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
|
||||
GEM_BUG_ON(ve->request);
|
||||
GEM_BUG_ON(ve->context.inflight);
|
||||
|
||||
/* Preempt-to-busy may leave a stale request behind. */
|
||||
if (unlikely(ve->request)) {
|
||||
struct i915_request *old;
|
||||
|
||||
spin_lock_irq(&ve->base.active.lock);
|
||||
|
||||
old = fetch_and_zero(&ve->request);
|
||||
if (old) {
|
||||
GEM_BUG_ON(!i915_request_completed(old));
|
||||
__i915_request_submit(old);
|
||||
i915_request_put(old);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&ve->base.active.lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush the tasklet in case it is still running on another core.
|
||||
*
|
||||
* This needs to be done before we remove ourselves from the siblings'
|
||||
* rbtrees as in the case it is running in parallel, it may reinsert
|
||||
* the rb_node into a sibling.
|
||||
*/
|
||||
tasklet_kill(&ve->base.execlists.tasklet);
|
||||
|
||||
/* Decouple ourselves from the siblings, no more access allowed. */
|
||||
for (n = 0; n < ve->num_siblings; n++) {
|
||||
struct intel_engine_cs *sibling = ve->siblings[n];
|
||||
struct rb_node *node = &ve->nodes[sibling->id].rb;
|
||||
unsigned long flags;
|
||||
|
||||
if (RB_EMPTY_NODE(node))
|
||||
continue;
|
||||
|
||||
spin_lock_irqsave(&sibling->active.lock, flags);
|
||||
spin_lock_irq(&sibling->active.lock);
|
||||
|
||||
/* Detachment is lazily performed in the execlists tasklet */
|
||||
if (!RB_EMPTY_NODE(node))
|
||||
rb_erase_cached(node, &sibling->execlists.virtual);
|
||||
|
||||
spin_unlock_irqrestore(&sibling->active.lock, flags);
|
||||
spin_unlock_irq(&sibling->active.lock);
|
||||
}
|
||||
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
|
||||
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
|
||||
|
||||
if (ve->context.state)
|
||||
__execlists_context_fini(&ve->context);
|
||||
|
@ -5464,6 +5489,27 @@ static void virtual_context_destroy(struct kref *kref)
|
|||
kfree(ve);
|
||||
}
|
||||
|
||||
static void virtual_context_destroy(struct kref *kref)
|
||||
{
|
||||
struct virtual_engine *ve =
|
||||
container_of(kref, typeof(*ve), context.ref);
|
||||
|
||||
GEM_BUG_ON(!list_empty(&ve->context.signals));
|
||||
|
||||
/*
|
||||
* When destroying the virtual engine, we have to be aware that
|
||||
* it may still be in use from an hardirq/softirq context causing
|
||||
* the resubmission of a completed request (background completion
|
||||
* due to preempt-to-busy). Before we can free the engine, we need
|
||||
* to flush the submission code and tasklets that are still potentially
|
||||
* accessing the engine. Flushing the tasklets requires process context,
|
||||
* and since we can guard the resubmit onto the engine with an RCU read
|
||||
* lock, we can delegate the free of the engine to an RCU worker.
|
||||
*/
|
||||
INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy);
|
||||
queue_rcu_work(system_wq, &ve->rcu);
|
||||
}
|
||||
|
||||
static void virtual_engine_initial_hint(struct virtual_engine *ve)
|
||||
{
|
||||
int swp;
|
||||
|
|
|
@ -255,7 +255,7 @@ struct intel_gvt_mmio {
|
|||
#define F_CMD_ACCESS (1 << 3)
|
||||
/* This reg has been accessed by a VM */
|
||||
#define F_ACCESSED (1 << 4)
|
||||
/* This reg has been accessed through GPU commands */
|
||||
/* This reg could be accessed by unaligned address */
|
||||
#define F_UNALIGN (1 << 6)
|
||||
/* This reg is in GVT's mmio save-restor list and in hardware
|
||||
* logical context image
|
||||
|
|
|
@ -909,8 +909,13 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
|
|||
DRM_I915_PERF_RECORD_OA_REPORT_LOST);
|
||||
if (ret)
|
||||
return ret;
|
||||
intel_uncore_write(uncore, oastatus_reg,
|
||||
oastatus & ~GEN8_OASTATUS_REPORT_LOST);
|
||||
|
||||
intel_uncore_rmw(uncore, oastatus_reg,
|
||||
GEN8_OASTATUS_COUNTER_OVERFLOW |
|
||||
GEN8_OASTATUS_REPORT_LOST,
|
||||
IS_GEN_RANGE(uncore->i915, 8, 10) ?
|
||||
(GEN8_OASTATUS_HEAD_POINTER_WRAP |
|
||||
GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
|
||||
}
|
||||
|
||||
return gen8_append_oa_reports(stream, buf, count, offset);
|
||||
|
|
|
@ -676,6 +676,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
#define GEN7_OASTATUS2_MEM_SELECT_GGTT (1 << 0) /* 0: PPGTT, 1: GGTT */
|
||||
|
||||
#define GEN8_OASTATUS _MMIO(0x2b08)
|
||||
#define GEN8_OASTATUS_TAIL_POINTER_WRAP (1 << 17)
|
||||
#define GEN8_OASTATUS_HEAD_POINTER_WRAP (1 << 16)
|
||||
#define GEN8_OASTATUS_OVERRUN_STATUS (1 << 3)
|
||||
#define GEN8_OASTATUS_COUNTER_OVERFLOW (1 << 2)
|
||||
#define GEN8_OASTATUS_OABUFFER_OVERFLOW (1 << 1)
|
||||
|
|
|
@ -176,7 +176,11 @@ struct i915_request {
|
|||
struct intel_context *context;
|
||||
struct intel_ring *ring;
|
||||
struct intel_timeline __rcu *timeline;
|
||||
struct list_head signal_link;
|
||||
|
||||
union {
|
||||
struct list_head signal_link;
|
||||
struct llist_node signal_node;
|
||||
};
|
||||
|
||||
/*
|
||||
* The rcu epoch of when this request was allocated. Used to judiciously
|
||||
|
|
|
@ -522,15 +522,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
drm_encoder_cleanup(encoder);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
|
||||
.destroy = mtk_dpi_encoder_destroy,
|
||||
};
|
||||
|
||||
static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
|
|
|
@ -444,7 +444,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
|
|||
u32 horizontal_sync_active_byte;
|
||||
u32 horizontal_backporch_byte;
|
||||
u32 horizontal_frontporch_byte;
|
||||
u32 horizontal_front_back_byte;
|
||||
u32 data_phy_cycles_byte;
|
||||
u32 dsi_tmp_buf_bpp, data_phy_cycles;
|
||||
u32 delta;
|
||||
struct mtk_phy_timing *timing = &dsi->phy_timing;
|
||||
|
||||
struct videomode *vm = &dsi->vm;
|
||||
|
@ -466,50 +469,30 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
|
|||
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
|
||||
|
||||
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
|
||||
horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp;
|
||||
horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp - 10;
|
||||
else
|
||||
horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
|
||||
dsi_tmp_buf_bpp;
|
||||
dsi_tmp_buf_bpp - 10;
|
||||
|
||||
data_phy_cycles = timing->lpx + timing->da_hs_prepare +
|
||||
timing->da_hs_zero + timing->da_hs_exit;
|
||||
timing->da_hs_zero + timing->da_hs_exit + 3;
|
||||
|
||||
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
|
||||
if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
|
||||
data_phy_cycles * dsi->lanes + 18) {
|
||||
horizontal_frontporch_byte =
|
||||
vm->hfront_porch * dsi_tmp_buf_bpp -
|
||||
(data_phy_cycles * dsi->lanes + 18) *
|
||||
vm->hfront_porch /
|
||||
(vm->hfront_porch + vm->hback_porch);
|
||||
delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12;
|
||||
|
||||
horizontal_backporch_byte =
|
||||
horizontal_backporch_byte -
|
||||
(data_phy_cycles * dsi->lanes + 18) *
|
||||
vm->hback_porch /
|
||||
(vm->hfront_porch + vm->hback_porch);
|
||||
} else {
|
||||
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
|
||||
horizontal_frontporch_byte = vm->hfront_porch *
|
||||
dsi_tmp_buf_bpp;
|
||||
}
|
||||
horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp;
|
||||
horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte;
|
||||
data_phy_cycles_byte = data_phy_cycles * dsi->lanes + delta;
|
||||
|
||||
if (horizontal_front_back_byte > data_phy_cycles_byte) {
|
||||
horizontal_frontporch_byte -= data_phy_cycles_byte *
|
||||
horizontal_frontporch_byte /
|
||||
horizontal_front_back_byte;
|
||||
|
||||
horizontal_backporch_byte -= data_phy_cycles_byte *
|
||||
horizontal_backporch_byte /
|
||||
horizontal_front_back_byte;
|
||||
} else {
|
||||
if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp >
|
||||
data_phy_cycles * dsi->lanes + 12) {
|
||||
horizontal_frontporch_byte =
|
||||
vm->hfront_porch * dsi_tmp_buf_bpp -
|
||||
(data_phy_cycles * dsi->lanes + 12) *
|
||||
vm->hfront_porch /
|
||||
(vm->hfront_porch + vm->hback_porch);
|
||||
horizontal_backporch_byte = horizontal_backporch_byte -
|
||||
(data_phy_cycles * dsi->lanes + 12) *
|
||||
vm->hback_porch /
|
||||
(vm->hfront_porch + vm->hback_porch);
|
||||
} else {
|
||||
DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n");
|
||||
horizontal_frontporch_byte = vm->hfront_porch *
|
||||
dsi_tmp_buf_bpp;
|
||||
}
|
||||
DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n");
|
||||
}
|
||||
|
||||
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
|
||||
|
|
|
@ -558,8 +558,10 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
|
|||
NV_PRINTK(err, cli, "validating bo list\n");
|
||||
validate_fini(op, chan, NULL, NULL);
|
||||
return ret;
|
||||
} else if (ret > 0) {
|
||||
*apply_relocs = true;
|
||||
}
|
||||
*apply_relocs = ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -662,7 +664,6 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
|
|||
nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
|
||||
}
|
||||
|
||||
u_free(reloc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -872,9 +873,10 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
|||
break;
|
||||
}
|
||||
}
|
||||
u_free(reloc);
|
||||
}
|
||||
out_prevalid:
|
||||
if (!IS_ERR(reloc))
|
||||
u_free(reloc);
|
||||
u_free(bo);
|
||||
u_free(push);
|
||||
|
||||
|
|
|
@ -219,6 +219,7 @@ struct vc4_dev {
|
|||
|
||||
struct drm_modeset_lock ctm_state_lock;
|
||||
struct drm_private_obj ctm_manager;
|
||||
struct drm_private_obj hvs_channels;
|
||||
struct drm_private_obj load_tracker;
|
||||
|
||||
/* List of vc4_debugfs_info_entry for adding to debugfs once
|
||||
|
@ -531,6 +532,9 @@ struct vc4_crtc_state {
|
|||
unsigned int top;
|
||||
unsigned int bottom;
|
||||
} margins;
|
||||
|
||||
/* Transitional state below, only valid during atomic commits */
|
||||
bool update_muxing;
|
||||
};
|
||||
|
||||
#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
|
||||
|
|
|
@ -760,12 +760,54 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
|
|||
{
|
||||
}
|
||||
|
||||
#define WIFI_2_4GHz_CH1_MIN_FREQ 2400000000ULL
|
||||
#define WIFI_2_4GHz_CH1_MAX_FREQ 2422000000ULL
|
||||
|
||||
static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct drm_display_mode *mode = &crtc_state->adjusted_mode;
|
||||
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
|
||||
unsigned long long pixel_rate = mode->clock * 1000;
|
||||
unsigned long long tmds_rate;
|
||||
|
||||
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
|
||||
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
|
||||
(mode->hsync_end % 2) || (mode->htotal % 2)))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* The 1440p@60 pixel rate is in the same range than the first
|
||||
* WiFi channel (between 2.4GHz and 2.422GHz with 22MHz
|
||||
* bandwidth). Slightly lower the frequency to bring it out of
|
||||
* the WiFi range.
|
||||
*/
|
||||
tmds_rate = pixel_rate * 10;
|
||||
if (vc4_hdmi->disable_wifi_frequencies &&
|
||||
(tmds_rate >= WIFI_2_4GHz_CH1_MIN_FREQ &&
|
||||
tmds_rate <= WIFI_2_4GHz_CH1_MAX_FREQ)) {
|
||||
mode->clock = 238560;
|
||||
pixel_rate = mode->clock * 1000;
|
||||
}
|
||||
|
||||
if (pixel_rate > vc4_hdmi->variant->max_pixel_clock)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
|
||||
|
||||
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
|
||||
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
|
||||
(mode->hsync_end % 2) || (mode->htotal % 2)))
|
||||
return MODE_H_ILLEGAL;
|
||||
|
||||
if ((mode->clock * 1000) > vc4_hdmi->variant->max_pixel_clock)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
|
@ -773,6 +815,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
|
|||
}
|
||||
|
||||
static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = {
|
||||
.atomic_check = vc4_hdmi_encoder_atomic_check,
|
||||
.mode_valid = vc4_hdmi_encoder_mode_valid,
|
||||
.disable = vc4_hdmi_encoder_disable,
|
||||
.enable = vc4_hdmi_encoder_enable,
|
||||
|
@ -1694,6 +1737,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
|
|||
vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW;
|
||||
}
|
||||
|
||||
vc4_hdmi->disable_wifi_frequencies =
|
||||
of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence");
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
|
||||
|
@ -1817,6 +1863,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
|
|||
PHY_LANE_2,
|
||||
PHY_LANE_CK,
|
||||
},
|
||||
.unsupported_odd_h_timings = true,
|
||||
|
||||
.init_resources = vc5_hdmi_init_resources,
|
||||
.csc_setup = vc5_hdmi_csc_setup,
|
||||
|
@ -1842,6 +1889,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
|
|||
PHY_LANE_CK,
|
||||
PHY_LANE_2,
|
||||
},
|
||||
.unsupported_odd_h_timings = true,
|
||||
|
||||
.init_resources = vc5_hdmi_init_resources,
|
||||
.csc_setup = vc5_hdmi_csc_setup,
|
||||
|
|
|
@ -62,6 +62,9 @@ struct vc4_hdmi_variant {
|
|||
*/
|
||||
enum vc4_hdmi_phy_channel phy_lane_mapping[4];
|
||||
|
||||
/* The BCM2711 cannot deal with odd horizontal pixel timings */
|
||||
bool unsupported_odd_h_timings;
|
||||
|
||||
/* Callback to get the resources (memory region, interrupts,
|
||||
* clocks, etc) for that variant.
|
||||
*/
|
||||
|
@ -139,6 +142,14 @@ struct vc4_hdmi {
|
|||
int hpd_gpio;
|
||||
bool hpd_active_low;
|
||||
|
||||
/*
|
||||
* On some systems (like the RPi4), some modes are in the same
|
||||
* frequency range than the WiFi channels (1440p@60Hz for
|
||||
* example). Should we take evasive actions because that system
|
||||
* has a wifi adapter?
|
||||
*/
|
||||
bool disable_wifi_frequencies;
|
||||
|
||||
struct cec_adapter *cec_adap;
|
||||
struct cec_msg cec_rx_msg;
|
||||
bool cec_tx_ok;
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
#include "vc4_drv.h"
|
||||
#include "vc4_regs.h"
|
||||
|
||||
#define HVS_NUM_CHANNELS 3
|
||||
|
||||
struct vc4_ctm_state {
|
||||
struct drm_private_state base;
|
||||
struct drm_color_ctm *ctm;
|
||||
|
@ -35,6 +37,17 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
|
|||
return container_of(priv, struct vc4_ctm_state, base);
|
||||
}
|
||||
|
||||
struct vc4_hvs_state {
|
||||
struct drm_private_state base;
|
||||
unsigned int unassigned_channels;
|
||||
};
|
||||
|
||||
static struct vc4_hvs_state *
|
||||
to_vc4_hvs_state(struct drm_private_state *priv)
|
||||
{
|
||||
return container_of(priv, struct vc4_hvs_state, base);
|
||||
}
|
||||
|
||||
struct vc4_load_tracker_state {
|
||||
struct drm_private_state base;
|
||||
u64 hvs_load;
|
||||
|
@ -113,7 +126,7 @@ static int vc4_ctm_obj_init(struct vc4_dev *vc4)
|
|||
drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
|
||||
&vc4_ctm_state_funcs);
|
||||
|
||||
return drmm_add_action(&vc4->base, vc4_ctm_obj_fini, NULL);
|
||||
return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
|
||||
}
|
||||
|
||||
/* Converts a DRM S31.32 value to the HW S0.9 format. */
|
||||
|
@ -169,6 +182,19 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
|
|||
VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
|
||||
}
|
||||
|
||||
static struct vc4_hvs_state *
|
||||
vc4_hvs_get_global_state(struct drm_atomic_state *state)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
|
||||
struct drm_private_state *priv_state;
|
||||
|
||||
priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
|
||||
if (IS_ERR(priv_state))
|
||||
return ERR_CAST(priv_state);
|
||||
|
||||
return to_vc4_hvs_state(priv_state);
|
||||
}
|
||||
|
||||
static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
|
@ -213,10 +239,7 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
|
|||
{
|
||||
struct drm_crtc_state *crtc_state;
|
||||
struct drm_crtc *crtc;
|
||||
unsigned char dsp2_mux = 0;
|
||||
unsigned char dsp3_mux = 3;
|
||||
unsigned char dsp4_mux = 3;
|
||||
unsigned char dsp5_mux = 3;
|
||||
unsigned char mux;
|
||||
unsigned int i;
|
||||
u32 reg;
|
||||
|
||||
|
@ -224,50 +247,59 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
|
|||
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
|
||||
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
|
||||
|
||||
if (!crtc_state->active)
|
||||
if (!vc4_state->update_muxing)
|
||||
continue;
|
||||
|
||||
switch (vc4_crtc->data->hvs_output) {
|
||||
case 2:
|
||||
dsp2_mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
|
||||
mux = (vc4_state->assigned_channel == 2) ? 0 : 1;
|
||||
reg = HVS_READ(SCALER_DISPECTRL);
|
||||
HVS_WRITE(SCALER_DISPECTRL,
|
||||
(reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
|
||||
VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
|
||||
break;
|
||||
|
||||
case 3:
|
||||
dsp3_mux = vc4_state->assigned_channel;
|
||||
if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
|
||||
mux = 3;
|
||||
else
|
||||
mux = vc4_state->assigned_channel;
|
||||
|
||||
reg = HVS_READ(SCALER_DISPCTRL);
|
||||
HVS_WRITE(SCALER_DISPCTRL,
|
||||
(reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
|
||||
VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
|
||||
break;
|
||||
|
||||
case 4:
|
||||
dsp4_mux = vc4_state->assigned_channel;
|
||||
if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
|
||||
mux = 3;
|
||||
else
|
||||
mux = vc4_state->assigned_channel;
|
||||
|
||||
reg = HVS_READ(SCALER_DISPEOLN);
|
||||
HVS_WRITE(SCALER_DISPEOLN,
|
||||
(reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
|
||||
VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
|
||||
|
||||
break;
|
||||
|
||||
case 5:
|
||||
dsp5_mux = vc4_state->assigned_channel;
|
||||
if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
|
||||
mux = 3;
|
||||
else
|
||||
mux = vc4_state->assigned_channel;
|
||||
|
||||
reg = HVS_READ(SCALER_DISPDITHER);
|
||||
HVS_WRITE(SCALER_DISPDITHER,
|
||||
(reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
|
||||
VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
reg = HVS_READ(SCALER_DISPECTRL);
|
||||
HVS_WRITE(SCALER_DISPECTRL,
|
||||
(reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
|
||||
VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX));
|
||||
|
||||
reg = HVS_READ(SCALER_DISPCTRL);
|
||||
HVS_WRITE(SCALER_DISPCTRL,
|
||||
(reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
|
||||
VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX));
|
||||
|
||||
reg = HVS_READ(SCALER_DISPEOLN);
|
||||
HVS_WRITE(SCALER_DISPEOLN,
|
||||
(reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
|
||||
VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX));
|
||||
|
||||
reg = HVS_READ(SCALER_DISPDITHER);
|
||||
HVS_WRITE(SCALER_DISPDITHER,
|
||||
(reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
|
||||
VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX));
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -657,53 +689,123 @@ static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
|
|||
&load_state->base,
|
||||
&vc4_load_tracker_state_funcs);
|
||||
|
||||
return drmm_add_action(&vc4->base, vc4_load_tracker_obj_fini, NULL);
|
||||
return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
|
||||
}
|
||||
|
||||
#define NUM_OUTPUTS 6
|
||||
#define NUM_CHANNELS 3
|
||||
|
||||
static int
|
||||
vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
|
||||
static struct drm_private_state *
|
||||
vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
|
||||
{
|
||||
unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0);
|
||||
struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
|
||||
struct vc4_hvs_state *state;
|
||||
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return NULL;
|
||||
|
||||
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
|
||||
|
||||
state->unassigned_channels = old_state->unassigned_channels;
|
||||
|
||||
return &state->base;
|
||||
}
|
||||
|
||||
static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
|
||||
struct drm_private_state *state)
|
||||
{
|
||||
struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
|
||||
|
||||
kfree(hvs_state);
|
||||
}
|
||||
|
||||
static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
|
||||
.atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
|
||||
.atomic_destroy_state = vc4_hvs_channels_destroy_state,
|
||||
};
|
||||
|
||||
static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
|
||||
{
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
|
||||
drm_atomic_private_obj_fini(&vc4->hvs_channels);
|
||||
}
|
||||
|
||||
static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
|
||||
{
|
||||
struct vc4_hvs_state *state;
|
||||
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return -ENOMEM;
|
||||
|
||||
state->unassigned_channels = GENMASK(HVS_NUM_CHANNELS - 1, 0);
|
||||
drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
|
||||
&state->base,
|
||||
&vc4_hvs_state_funcs);
|
||||
|
||||
return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
|
||||
* the TXP (and therefore all the CRTCs found on that platform).
|
||||
*
|
||||
* The naive (and our initial) implementation would just iterate over
|
||||
* all the active CRTCs, try to find a suitable FIFO, and then remove it
|
||||
* from the pool of available FIFOs. However, there are a few corner
|
||||
* cases that need to be considered:
|
||||
*
|
||||
* - When running in a dual-display setup (so with two CRTCs involved),
|
||||
* we can update the state of a single CRTC (for example by changing
|
||||
* its mode using xrandr under X11) without affecting the other. In
|
||||
* this case, the other CRTC wouldn't be in the state at all, so we
|
||||
* need to consider all the running CRTCs in the DRM device to assign
|
||||
* a FIFO, not just the one in the state.
|
||||
*
|
||||
* - To fix the above, we can't use drm_atomic_get_crtc_state on all
|
||||
* enabled CRTCs to pull their CRTC state into the global state, since
|
||||
* a page flip would start considering their vblank to complete. Since
|
||||
* we don't have a guarantee that they are actually active, that
|
||||
* vblank might never happen, and shouldn't even be considered if we
|
||||
* want to do a page flip on a single CRTC. That can be tested by
|
||||
* doing a modetest -v first on HDMI1 and then on HDMI0.
|
||||
*
|
||||
* - Since we need the pixelvalve to be disabled and enabled back when
|
||||
* the FIFO is changed, we should keep the FIFO assigned for as long
|
||||
* as the CRTC is enabled, only considering it free again once that
|
||||
* CRTC has been disabled. This can be tested by booting X11 on a
|
||||
* single display, and changing the resolution down and then back up.
|
||||
*/
|
||||
static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct vc4_hvs_state *hvs_new_state;
|
||||
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
|
||||
struct drm_crtc *crtc;
|
||||
int i, ret;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* Since the HVS FIFOs are shared across all the pixelvalves and
|
||||
* the TXP (and thus all the CRTCs), we need to pull the current
|
||||
* state of all the enabled CRTCs so that an update to a single
|
||||
* CRTC still keeps the previous FIFOs enabled and assigned to
|
||||
* the same CRTCs, instead of evaluating only the CRTC being
|
||||
* modified.
|
||||
*/
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct drm_crtc_state *crtc_state;
|
||||
|
||||
if (!crtc->state->enable)
|
||||
continue;
|
||||
|
||||
crtc_state = drm_atomic_get_crtc_state(state, crtc);
|
||||
if (IS_ERR(crtc_state))
|
||||
return PTR_ERR(crtc_state);
|
||||
}
|
||||
hvs_new_state = vc4_hvs_get_global_state(state);
|
||||
if (!hvs_new_state)
|
||||
return -EINVAL;
|
||||
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
struct vc4_crtc_state *old_vc4_crtc_state =
|
||||
to_vc4_crtc_state(old_crtc_state);
|
||||
struct vc4_crtc_state *new_vc4_crtc_state =
|
||||
to_vc4_crtc_state(new_crtc_state);
|
||||
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
|
||||
unsigned int matching_channels;
|
||||
|
||||
if (old_crtc_state->enable && !new_crtc_state->enable)
|
||||
new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
|
||||
|
||||
if (!new_crtc_state->enable)
|
||||
/* Nothing to do here, let's skip it */
|
||||
if (old_crtc_state->enable == new_crtc_state->enable)
|
||||
continue;
|
||||
|
||||
if (new_vc4_crtc_state->assigned_channel != VC4_HVS_CHANNEL_DISABLED) {
|
||||
unassigned_channels &= ~BIT(new_vc4_crtc_state->assigned_channel);
|
||||
/* Muxing will need to be modified, mark it as such */
|
||||
new_vc4_crtc_state->update_muxing = true;
|
||||
|
||||
/* If we're disabling our CRTC, we put back our channel */
|
||||
if (!new_crtc_state->enable) {
|
||||
hvs_new_state->unassigned_channels |= BIT(old_vc4_crtc_state->assigned_channel);
|
||||
new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -731,17 +833,29 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
|
|||
* the future, we will need to have something smarter,
|
||||
* but it works so far.
|
||||
*/
|
||||
matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
|
||||
matching_channels = hvs_new_state->unassigned_channels & vc4_crtc->data->hvs_available_channels;
|
||||
if (matching_channels) {
|
||||
unsigned int channel = ffs(matching_channels) - 1;
|
||||
|
||||
new_vc4_crtc_state->assigned_channel = channel;
|
||||
unassigned_channels &= ~BIT(channel);
|
||||
hvs_new_state->unassigned_channels &= ~BIT(channel);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = vc4_pv_muxing_atomic_check(dev, state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vc4_ctm_atomic_check(dev, state);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -808,6 +922,10 @@ int vc4_kms_load(struct drm_device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vc4_hvs_channels_obj_init(vc4);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_mode_config_reset(dev);
|
||||
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
|
Loading…
Reference in New Issue