mirror of https://gitee.com/openkylin/linux.git
Merge branch 'msm-fixes-v5.13-rc6' into msm-next-redo
Syncing up with -rc6 fixes to avoid conflicts with a660 patches. Signed-off-by: Rob Clark <robdclark@chromium.org>
This commit is contained in:
commit
8c08c7b51a
|
@ -157,7 +157,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||
* GPU registers so we need to add 0x1a800 to the register value on A630
|
||||
* to get the right value from PM4.
|
||||
*/
|
||||
get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
|
||||
get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
|
||||
rbmemptr_stats(ring, index, alwayson_start));
|
||||
|
||||
/* Invalidate CCU depth and color */
|
||||
|
@ -187,7 +187,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||
|
||||
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
|
||||
rbmemptr_stats(ring, index, cpcycles_end));
|
||||
get_stats_counter(ring, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800,
|
||||
get_stats_counter(ring, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
|
||||
rbmemptr_stats(ring, index, alwayson_end));
|
||||
|
||||
/* Write the fence to the scratch register */
|
||||
|
@ -206,8 +206,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
|||
OUT_RING(ring, submit->seqno);
|
||||
|
||||
trace_msm_gpu_submit_flush(submit,
|
||||
gmu_read64(&a6xx_gpu->gmu, REG_A6XX_GMU_ALWAYS_ON_COUNTER_L,
|
||||
REG_A6XX_GMU_ALWAYS_ON_COUNTER_H));
|
||||
gpu_read64(gpu, REG_A6XX_CP_ALWAYS_ON_COUNTER_LO,
|
||||
REG_A6XX_CP_ALWAYS_ON_COUNTER_HI));
|
||||
|
||||
a6xx_flush(gpu, ring);
|
||||
}
|
||||
|
@ -462,6 +462,113 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
|
|||
gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? clock_cntl_on : 0);
|
||||
}
|
||||
|
||||
/* For a615, a616, a618, A619, a630, a640 and a680 */
|
||||
static const u32 a6xx_protect[] = {
|
||||
A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
|
||||
A6XX_PROTECT_RDONLY(0x00501, 0x0005),
|
||||
A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
|
||||
A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
|
||||
A6XX_PROTECT_NORDWR(0x00510, 0x0000),
|
||||
A6XX_PROTECT_NORDWR(0x00534, 0x0000),
|
||||
A6XX_PROTECT_NORDWR(0x00800, 0x0082),
|
||||
A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
|
||||
A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
|
||||
A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
|
||||
A6XX_PROTECT_NORDWR(0x00900, 0x004d),
|
||||
A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
|
||||
A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
|
||||
A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
|
||||
A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
|
||||
A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
|
||||
A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
|
||||
A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
|
||||
A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
|
||||
A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
|
||||
A6XX_PROTECT_NORDWR(0x09624, 0x01db),
|
||||
A6XX_PROTECT_NORDWR(0x09e70, 0x0001),
|
||||
A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
|
||||
A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
|
||||
A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
|
||||
A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
|
||||
A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
|
||||
A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
|
||||
A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
|
||||
A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
|
||||
A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
|
||||
A6XX_PROTECT_NORDWR(0x11c00, 0x0000), /* note: infinite range */
|
||||
};
|
||||
|
||||
/* These are for a620 and a650 */
|
||||
static const u32 a650_protect[] = {
|
||||
A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
|
||||
A6XX_PROTECT_RDONLY(0x00501, 0x0005),
|
||||
A6XX_PROTECT_RDONLY(0x0050b, 0x02f4),
|
||||
A6XX_PROTECT_NORDWR(0x0050e, 0x0000),
|
||||
A6XX_PROTECT_NORDWR(0x00510, 0x0000),
|
||||
A6XX_PROTECT_NORDWR(0x00534, 0x0000),
|
||||
A6XX_PROTECT_NORDWR(0x00800, 0x0082),
|
||||
A6XX_PROTECT_NORDWR(0x008a0, 0x0008),
|
||||
A6XX_PROTECT_NORDWR(0x008ab, 0x0024),
|
||||
A6XX_PROTECT_RDONLY(0x008de, 0x00ae),
|
||||
A6XX_PROTECT_NORDWR(0x00900, 0x004d),
|
||||
A6XX_PROTECT_NORDWR(0x0098d, 0x0272),
|
||||
A6XX_PROTECT_NORDWR(0x00e00, 0x0001),
|
||||
A6XX_PROTECT_NORDWR(0x00e03, 0x000c),
|
||||
A6XX_PROTECT_NORDWR(0x03c00, 0x00c3),
|
||||
A6XX_PROTECT_RDONLY(0x03cc4, 0x1fff),
|
||||
A6XX_PROTECT_NORDWR(0x08630, 0x01cf),
|
||||
A6XX_PROTECT_NORDWR(0x08e00, 0x0000),
|
||||
A6XX_PROTECT_NORDWR(0x08e08, 0x0000),
|
||||
A6XX_PROTECT_NORDWR(0x08e50, 0x001f),
|
||||
A6XX_PROTECT_NORDWR(0x08e80, 0x027f),
|
||||
A6XX_PROTECT_NORDWR(0x09624, 0x01db),
|
||||
A6XX_PROTECT_NORDWR(0x09e60, 0x0011),
|
||||
A6XX_PROTECT_NORDWR(0x09e78, 0x0187),
|
||||
A6XX_PROTECT_NORDWR(0x0a630, 0x01cf),
|
||||
A6XX_PROTECT_NORDWR(0x0ae02, 0x0000),
|
||||
A6XX_PROTECT_NORDWR(0x0ae50, 0x032f),
|
||||
A6XX_PROTECT_NORDWR(0x0b604, 0x0000),
|
||||
A6XX_PROTECT_NORDWR(0x0b608, 0x0007),
|
||||
A6XX_PROTECT_NORDWR(0x0be02, 0x0001),
|
||||
A6XX_PROTECT_NORDWR(0x0be20, 0x17df),
|
||||
A6XX_PROTECT_NORDWR(0x0f000, 0x0bff),
|
||||
A6XX_PROTECT_RDONLY(0x0fc00, 0x1fff),
|
||||
A6XX_PROTECT_NORDWR(0x18400, 0x1fff),
|
||||
A6XX_PROTECT_NORDWR(0x1a800, 0x1fff),
|
||||
A6XX_PROTECT_NORDWR(0x1f400, 0x0443),
|
||||
A6XX_PROTECT_RDONLY(0x1f844, 0x007b),
|
||||
A6XX_PROTECT_NORDWR(0x1f887, 0x001b),
|
||||
A6XX_PROTECT_NORDWR(0x1f8c0, 0x0000), /* note: infinite range */
|
||||
};
|
||||
|
||||
static void a6xx_set_cp_protect(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
const u32 *regs = a6xx_protect;
|
||||
unsigned i, count = ARRAY_SIZE(a6xx_protect), count_max = 32;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(a6xx_protect) > 32);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(a650_protect) > 48);
|
||||
|
||||
if (adreno_is_a650(adreno_gpu)) {
|
||||
regs = a650_protect;
|
||||
count = ARRAY_SIZE(a650_protect);
|
||||
count_max = 48;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable access protection to privileged registers, fault on an access
|
||||
* protect violation and select the last span to protect from the start
|
||||
* address all the way to the end of the register address space
|
||||
*/
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, BIT(0) | BIT(1) | BIT(3));
|
||||
|
||||
for (i = 0; i < count - 1; i++)
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(i), regs[i]);
|
||||
/* last CP_PROTECT to have "infinite" length on the last entry */
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(count_max - 1), regs[i]);
|
||||
}
|
||||
|
||||
static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
|
||||
{
|
||||
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
|
||||
|
@ -489,7 +596,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
|
|||
rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1);
|
||||
gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, lower_bit << 1);
|
||||
gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
|
||||
uavflagprd_inv >> 4 | lower_bit << 1);
|
||||
uavflagprd_inv << 4 | lower_bit << 1);
|
||||
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, lower_bit << 21);
|
||||
}
|
||||
|
||||
|
@ -776,41 +883,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
|
|||
}
|
||||
|
||||
/* Protect registers from the CP */
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
|
||||
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
|
||||
A6XX_PROTECT_RDONLY(0x600, 0x51));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
|
||||
A6XX_PROTECT_RDONLY(0xfc00, 0x3));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
|
||||
A6XX_PROTECT_RDONLY(0x0, 0x4f9));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
|
||||
A6XX_PROTECT_RDONLY(0x501, 0xa));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
|
||||
A6XX_PROTECT_RDONLY(0x511, 0x44));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
|
||||
A6XX_PROTECT_RW(0xbe20, 0x11f3));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
|
||||
A6XX_PROTECT_RDONLY(0x980, 0x4));
|
||||
gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
|
||||
a6xx_set_cp_protect(gpu);
|
||||
|
||||
/* Enable expanded apriv for targets that support it */
|
||||
if (gpu->hw_apriv) {
|
||||
|
@ -1211,7 +1284,7 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
|
||||
if (a6xx_gpu->shadow_bo)
|
||||
for (i = 0; i < gpu->nr_rings; i++)
|
||||
a6xx_gpu->shadow[i] = 0;
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ struct a6xx_gpu {
|
|||
* REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
|
||||
* registers starting at _reg.
|
||||
*/
|
||||
#define A6XX_PROTECT_RW(_reg, _len) \
|
||||
#define A6XX_PROTECT_NORDWR(_reg, _len) \
|
||||
((1 << 31) | \
|
||||
(((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
|
||||
|
||||
|
|
|
@ -432,6 +432,7 @@ static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
|
|||
pll_freq += div_u64(tmp64, multiplier);
|
||||
|
||||
vco_rate = pll_freq;
|
||||
pll_10nm->vco_current_rate = vco_rate;
|
||||
|
||||
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
|
||||
pll_10nm->phy->id, (unsigned long)vco_rate, dec, frac);
|
||||
|
|
|
@ -460,6 +460,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
|
|||
pll_freq += div_u64(tmp64, multiplier);
|
||||
|
||||
vco_rate = pll_freq;
|
||||
pll_7nm->vco_current_rate = vco_rate;
|
||||
|
||||
DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
|
||||
pll_7nm->phy->id, (unsigned long)vco_rate, dec, frac);
|
||||
|
|
|
@ -1241,6 +1241,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
|
|||
|
||||
to_msm_bo(obj)->vram_node = &vma->node;
|
||||
|
||||
/* Call chain get_pages() -> update_inactive() tries to
|
||||
* access msm_obj->mm_list, but it is not initialized yet.
|
||||
* To avoid NULL pointer dereference error, initialize
|
||||
* mm_list to be empty.
|
||||
*/
|
||||
INIT_LIST_HEAD(&msm_obj->mm_list);
|
||||
|
||||
msm_gem_lock(obj);
|
||||
pages = get_pages(obj);
|
||||
msm_gem_unlock(obj);
|
||||
|
|
Loading…
Reference in New Issue