drm/amdgpu: save and restore UVD context with suspend and resume

and revert fix following it accordingly

Revert "drm/amdgpu: stop trying to suspend UVD sessions v2"
Revert "drm/amdgpu: fix the UVD suspend sequence order"

Signed-off-by: Leo Liu <leo.liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Leo Liu 2016-04-01 10:36:06 -04:00 committed by Alex Deucher
parent 749b48faaf
commit 3f99dd814a
5 changed files with 35 additions and 26 deletions

View File

@ -1591,6 +1591,7 @@ struct amdgpu_uvd {
struct amdgpu_bo *vcpu_bo; struct amdgpu_bo *vcpu_bo;
void *cpu_addr; void *cpu_addr;
uint64_t gpu_addr; uint64_t gpu_addr;
void *saved_bo;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
struct delayed_work idle_work; struct delayed_work idle_work;

View File

@ -241,32 +241,34 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
int amdgpu_uvd_suspend(struct amdgpu_device *adev) int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{ {
struct amdgpu_ring *ring = &adev->uvd.ring; unsigned size;
int i, r; void *ptr;
const struct common_firmware_header *hdr;
int i;
if (adev->uvd.vcpu_bo == NULL) if (adev->uvd.vcpu_bo == NULL)
return 0; return 0;
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
uint32_t handle = atomic_read(&adev->uvd.handles[i]); if (atomic_read(&adev->uvd.handles[i]))
if (handle != 0) { break;
struct fence *fence;
amdgpu_uvd_note_usage(adev); if (i == AMDGPU_MAX_UVD_HANDLES)
return 0;
r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
if (r) {
DRM_ERROR("Error destroying UVD (%d)!\n", r);
continue;
}
fence_wait(fence, false); size = amdgpu_bo_size(adev->uvd.vcpu_bo);
fence_put(fence); size -= le32_to_cpu(hdr->ucode_size_bytes);
adev->uvd.filp[i] = NULL; ptr = adev->uvd.cpu_addr;
atomic_set(&adev->uvd.handles[i], 0); ptr += le32_to_cpu(hdr->ucode_size_bytes);
}
} adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
if (!adev->uvd.saved_bo)
return -ENOMEM;
memcpy(adev->uvd.saved_bo, ptr, size);
return 0; return 0;
} }
@ -291,7 +293,12 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
ptr = adev->uvd.cpu_addr; ptr = adev->uvd.cpu_addr;
ptr += le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes);
memset(ptr, 0, size); if (adev->uvd.saved_bo != NULL) {
memcpy(ptr, adev->uvd.saved_bo, size);
kfree(adev->uvd.saved_bo);
adev->uvd.saved_bo = NULL;
} else
memset(ptr, 0, size);
return 0; return 0;
} }

View File

@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
int r; int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_uvd_suspend(adev); r = uvd_v4_2_hw_fini(adev);
if (r) if (r)
return r; return r;
r = uvd_v4_2_hw_fini(adev); r = amdgpu_uvd_suspend(adev);
if (r) if (r)
return r; return r;

View File

@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
int r; int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = amdgpu_uvd_suspend(adev); r = uvd_v5_0_hw_fini(adev);
if (r) if (r)
return r; return r;
r = uvd_v5_0_hw_fini(adev); r = amdgpu_uvd_suspend(adev);
if (r) if (r)
return r; return r;

View File

@ -214,15 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
int r; int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
r = uvd_v6_0_hw_fini(adev);
if (r)
return r;
/* Skip this for APU for now */ /* Skip this for APU for now */
if (!(adev->flags & AMD_IS_APU)) { if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_uvd_suspend(adev); r = amdgpu_uvd_suspend(adev);
if (r) if (r)
return r; return r;
} }
r = uvd_v6_0_hw_fini(adev);
if (r)
return r;
return r; return r;
} }