mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: add vm_needs_flush parameter to amdgpu_copy_buffer
This allows us to flush the system VM here. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
This commit is contained in:
parent
df264f9e08
commit
fc9c8f5459
|
@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||||
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
|
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
|
||||||
false);
|
false, false);
|
||||||
if (r)
|
if (r)
|
||||||
goto exit_do_move;
|
goto exit_do_move;
|
||||||
r = dma_fence_wait(fence, false);
|
r = dma_fence_wait(fence, false);
|
||||||
|
|
|
@ -535,7 +535,7 @@ int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
|
||||||
|
|
||||||
r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
|
r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
|
||||||
amdgpu_bo_size(bo), resv, fence,
|
amdgpu_bo_size(bo), resv, fence,
|
||||||
direct);
|
direct, false);
|
||||||
if (!r)
|
if (!r)
|
||||||
amdgpu_bo_fence(bo, *fence, true);
|
amdgpu_bo_fence(bo, *fence, true);
|
||||||
|
|
||||||
|
@ -588,7 +588,7 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
|
||||||
|
|
||||||
r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
|
r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
|
||||||
amdgpu_bo_size(bo), resv, fence,
|
amdgpu_bo_size(bo), resv, fence,
|
||||||
direct);
|
direct, false);
|
||||||
if (!r)
|
if (!r)
|
||||||
amdgpu_bo_fence(bo, *fence, true);
|
amdgpu_bo_fence(bo, *fence, true);
|
||||||
|
|
||||||
|
|
|
@ -111,7 +111,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
||||||
amdgpu_bo_kunmap(gtt_obj[i]);
|
amdgpu_bo_kunmap(gtt_obj[i]);
|
||||||
|
|
||||||
r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
|
r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
|
||||||
size, NULL, &fence, false);
|
size, NULL, &fence, false, false);
|
||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
|
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
|
||||||
|
@ -156,7 +156,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
||||||
amdgpu_bo_kunmap(vram_obj);
|
amdgpu_bo_kunmap(vram_obj);
|
||||||
|
|
||||||
r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
|
r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
|
||||||
size, NULL, &fence, false);
|
size, NULL, &fence, false, false);
|
||||||
|
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
|
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
|
||||||
|
|
|
@ -318,7 +318,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
||||||
|
|
||||||
r = amdgpu_copy_buffer(ring, old_start, new_start,
|
r = amdgpu_copy_buffer(ring, old_start, new_start,
|
||||||
cur_pages * PAGE_SIZE,
|
cur_pages * PAGE_SIZE,
|
||||||
bo->resv, &next, false);
|
bo->resv, &next, false, false);
|
||||||
if (r)
|
if (r)
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
|
@ -1256,12 +1256,11 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||||
return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
|
return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
|
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||||
uint64_t src_offset,
|
uint64_t dst_offset, uint32_t byte_count,
|
||||||
uint64_t dst_offset,
|
|
||||||
uint32_t byte_count,
|
|
||||||
struct reservation_object *resv,
|
struct reservation_object *resv,
|
||||||
struct dma_fence **fence, bool direct_submit)
|
struct dma_fence **fence, bool direct_submit,
|
||||||
|
bool vm_needs_flush)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
struct amdgpu_job *job;
|
struct amdgpu_job *job;
|
||||||
|
@ -1283,6 +1282,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
job->vm_needs_flush = vm_needs_flush;
|
||||||
if (resv) {
|
if (resv) {
|
||||||
r = amdgpu_sync_resv(adev, &job->sync, resv,
|
r = amdgpu_sync_resv(adev, &job->sync, resv,
|
||||||
AMDGPU_FENCE_OWNER_UNDEFINED);
|
AMDGPU_FENCE_OWNER_UNDEFINED);
|
||||||
|
|
|
@ -61,12 +61,11 @@ int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
|
||||||
const struct ttm_place *place,
|
const struct ttm_place *place,
|
||||||
struct ttm_mem_reg *mem);
|
struct ttm_mem_reg *mem);
|
||||||
|
|
||||||
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
|
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||||
uint64_t src_offset,
|
uint64_t dst_offset, uint32_t byte_count,
|
||||||
uint64_t dst_offset,
|
|
||||||
uint32_t byte_count,
|
|
||||||
struct reservation_object *resv,
|
struct reservation_object *resv,
|
||||||
struct dma_fence **fence, bool direct_submit);
|
struct dma_fence **fence, bool direct_submit,
|
||||||
|
bool vm_needs_flush);
|
||||||
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||||
uint32_t src_data,
|
uint32_t src_data,
|
||||||
struct reservation_object *resv,
|
struct reservation_object *resv,
|
||||||
|
|
Loading…
Reference in New Issue