mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: handle more than 10 UVD sessions (v2)
Change History -------------- v2: - Make firmware version check correctly. Firmware versions >= 1.80 should all support 40 UVD instances. - Replace AMDGPU_MAX_UVD_HANDLES with max_handles variable. v1: - The firmware can handle upto 40 UVD sessions. Signed-off-by: Arindam Nath <arindam.nath@amd.com> Signed-off-by: Ayyappa Chandolu <ayyappa.chandolu@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
aeba709a15
commit
c036554170
|
@ -1593,16 +1593,19 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev);
|
|||
/*
|
||||
* UVD
|
||||
*/
|
||||
#define AMDGPU_MAX_UVD_HANDLES 10
|
||||
#define AMDGPU_UVD_STACK_SIZE (1024*1024)
|
||||
#define AMDGPU_UVD_HEAP_SIZE (1024*1024)
|
||||
#define AMDGPU_UVD_FIRMWARE_OFFSET 256
|
||||
#define AMDGPU_DEFAULT_UVD_HANDLES 10
|
||||
#define AMDGPU_MAX_UVD_HANDLES 40
|
||||
#define AMDGPU_UVD_STACK_SIZE (200*1024)
|
||||
#define AMDGPU_UVD_HEAP_SIZE (256*1024)
|
||||
#define AMDGPU_UVD_SESSION_SIZE (50*1024)
|
||||
#define AMDGPU_UVD_FIRMWARE_OFFSET 256
|
||||
|
||||
struct amdgpu_uvd {
|
||||
struct amdgpu_bo *vcpu_bo;
|
||||
void *cpu_addr;
|
||||
uint64_t gpu_addr;
|
||||
void *saved_bo;
|
||||
unsigned max_handles;
|
||||
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
|
||||
struct delayed_work idle_work;
|
||||
|
|
|
@ -151,6 +151,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
|
||||
/* Set the default UVD handles that the firmware can handle */
|
||||
adev->uvd.max_handles = AMDGPU_DEFAULT_UVD_HANDLES;
|
||||
|
||||
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
|
||||
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
|
||||
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
|
||||
|
@ -158,8 +161,19 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
|||
DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
|
||||
version_major, version_minor, family_id);
|
||||
|
||||
/*
|
||||
* Limit the number of UVD handles depending on microcode major
|
||||
* and minor versions. The firmware version which has 40 UVD
|
||||
* instances support is 1.80. So all subsequent versions should
|
||||
* also have the same support.
|
||||
*/
|
||||
if ((version_major > 0x01) ||
|
||||
((version_major == 0x01) && (version_minor >= 0x50)))
|
||||
adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
|
||||
|
||||
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
|
||||
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
|
||||
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
|
||||
+ AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
|
||||
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||
|
@ -202,7 +216,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
|||
return r;
|
||||
}
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i) {
|
||||
atomic_set(&adev->uvd.handles[i], 0);
|
||||
adev->uvd.filp[i] = NULL;
|
||||
}
|
||||
|
@ -248,7 +262,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
|
|||
if (adev->uvd.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i)
|
||||
if (atomic_read(&adev->uvd.handles[i]))
|
||||
break;
|
||||
|
||||
|
@ -303,7 +317,7 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
|
|||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i) {
|
||||
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
|
||||
if (handle != 0 && adev->uvd.filp[i] == filp) {
|
||||
struct fence *fence;
|
||||
|
@ -563,7 +577,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
|
|||
amdgpu_bo_kunmap(bo);
|
||||
|
||||
/* try to alloc a new handle */
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i) {
|
||||
if (atomic_read(&adev->uvd.handles[i]) == handle) {
|
||||
DRM_ERROR("Handle 0x%x already in use!\n", handle);
|
||||
return -EINVAL;
|
||||
|
@ -586,7 +600,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
|
|||
return r;
|
||||
|
||||
/* validate the handle */
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i) {
|
||||
if (atomic_read(&adev->uvd.handles[i]) == handle) {
|
||||
if (adev->uvd.filp[i] != ctx->parser->filp) {
|
||||
DRM_ERROR("UVD handle collision detected!\n");
|
||||
|
@ -601,7 +615,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
|
|||
|
||||
case 2:
|
||||
/* it's a destroy msg, free the handle */
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i)
|
||||
atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
|
||||
amdgpu_bo_kunmap(bo);
|
||||
return 0;
|
||||
|
@ -1013,7 +1027,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
|
|||
|
||||
fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
|
||||
|
||||
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
|
||||
for (i = 0; i < adev->uvd.max_handles; ++i)
|
||||
if (atomic_read(&adev->uvd.handles[i]))
|
||||
++handles;
|
||||
|
||||
|
|
|
@ -559,12 +559,13 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
|
|||
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
|
||||
|
||||
addr += size;
|
||||
size = AMDGPU_UVD_STACK_SIZE >> 3;
|
||||
size = AMDGPU_UVD_HEAP_SIZE >> 3;
|
||||
WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
|
||||
WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
|
||||
|
||||
addr += size;
|
||||
size = AMDGPU_UVD_HEAP_SIZE >> 3;
|
||||
size = (AMDGPU_UVD_STACK_SIZE +
|
||||
(AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
|
||||
WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
|
||||
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
|
||||
|
||||
|
|
|
@ -272,12 +272,13 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
|
|||
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
|
||||
|
||||
offset += size;
|
||||
size = AMDGPU_UVD_STACK_SIZE;
|
||||
size = AMDGPU_UVD_HEAP_SIZE;
|
||||
WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
|
||||
WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
|
||||
|
||||
offset += size;
|
||||
size = AMDGPU_UVD_HEAP_SIZE;
|
||||
size = AMDGPU_UVD_STACK_SIZE +
|
||||
(AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
|
||||
WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
|
||||
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
|
||||
|
||||
|
|
|
@ -272,18 +272,21 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
|
|||
WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
|
||||
|
||||
offset += size;
|
||||
size = AMDGPU_UVD_STACK_SIZE;
|
||||
size = AMDGPU_UVD_HEAP_SIZE;
|
||||
WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
|
||||
WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
|
||||
|
||||
offset += size;
|
||||
size = AMDGPU_UVD_HEAP_SIZE;
|
||||
size = AMDGPU_UVD_STACK_SIZE +
|
||||
(AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
|
||||
WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
|
||||
WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
|
||||
|
||||
WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
|
||||
WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
|
||||
WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
|
||||
|
||||
WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles);
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
|
|
@ -111,5 +111,6 @@
|
|||
#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5
|
||||
#define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4
|
||||
#define mmUVD_JPEG_ADDR_CONFIG 0x3a1f
|
||||
#define mmUVD_GP_SCRATCH4 0x3d38
|
||||
|
||||
#endif /* UVD_6_0_D_H */
|
||||
|
|
Loading…
Reference in New Issue