drm/amdgpu: separate gpu address from bo pin
It could be got by amdgpu_bo_gpu_offset() if need Signed-off-by: Junwei Zhang <Jerry.Zhang@amd.com> Reviewed-by: Michel Dänzer <michel.daenzer@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
b861686b18
commit
7b7c6c81b3
|
@ -251,7 +251,6 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
|||
struct amdgpu_bo *bo = NULL;
|
||||
struct amdgpu_bo_param bp;
|
||||
int r;
|
||||
uint64_t gpu_addr_tmp = 0;
|
||||
void *cpu_ptr_tmp = NULL;
|
||||
|
||||
memset(&bp, 0, sizeof(bp));
|
||||
|
@ -275,8 +274,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
|||
goto allocate_mem_reserve_bo_failed;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT,
|
||||
&gpu_addr_tmp);
|
||||
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
|
||||
goto allocate_mem_pin_bo_failed;
|
||||
|
@ -290,7 +288,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
|
|||
}
|
||||
|
||||
*mem_obj = bo;
|
||||
*gpu_addr = gpu_addr_tmp;
|
||||
*gpu_addr = amdgpu_bo_gpu_offset(bo);
|
||||
*cpu_ptr = cpu_ptr_tmp;
|
||||
|
||||
amdgpu_bo_unreserve(bo);
|
||||
|
|
|
@ -1587,7 +1587,7 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
|
|||
goto bo_reserve_failed;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
|
||||
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
if (ret) {
|
||||
pr_err("Failed to pin bo. ret %d\n", ret);
|
||||
goto pin_failed;
|
||||
|
|
|
@ -95,7 +95,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
|
|||
r = amdgpu_bo_reserve(sobj, false);
|
||||
if (unlikely(r != 0))
|
||||
goto out_cleanup;
|
||||
r = amdgpu_bo_pin(sobj, sdomain, &saddr);
|
||||
r = amdgpu_bo_pin(sobj, sdomain);
|
||||
saddr = amdgpu_bo_gpu_offset(sobj);
|
||||
amdgpu_bo_unreserve(sobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
|
@ -108,7 +109,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
|
|||
r = amdgpu_bo_reserve(dobj, false);
|
||||
if (unlikely(r != 0))
|
||||
goto out_cleanup;
|
||||
r = amdgpu_bo_pin(dobj, ddomain, &daddr);
|
||||
r = amdgpu_bo_pin(dobj, ddomain);
|
||||
daddr = amdgpu_bo_gpu_offset(dobj);
|
||||
amdgpu_bo_unreserve(dobj);
|
||||
if (r) {
|
||||
goto out_cleanup;
|
||||
|
|
|
@ -2758,11 +2758,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
|
|||
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
|
||||
r = amdgpu_bo_reserve(aobj, true);
|
||||
if (r == 0) {
|
||||
r = amdgpu_bo_pin(aobj,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
&amdgpu_crtc->cursor_addr);
|
||||
r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (r != 0)
|
||||
DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
|
||||
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -157,7 +157,6 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
|||
struct amdgpu_bo *new_abo;
|
||||
unsigned long flags;
|
||||
u64 tiling_flags;
|
||||
u64 base;
|
||||
int i, r;
|
||||
|
||||
work = kzalloc(sizeof *work, GFP_KERNEL);
|
||||
|
@ -189,7 +188,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
|||
goto cleanup;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base);
|
||||
r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev));
|
||||
if (unlikely(r != 0)) {
|
||||
DRM_ERROR("failed to pin new abo buffer before flip\n");
|
||||
goto unreserve;
|
||||
|
@ -206,7 +205,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
|||
amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
|
||||
amdgpu_bo_unreserve(new_abo);
|
||||
|
||||
work->base = base;
|
||||
work->base = amdgpu_bo_gpu_offset(new_abo);
|
||||
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
|
||||
amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
|||
}
|
||||
|
||||
|
||||
ret = amdgpu_bo_pin(abo, domain, NULL);
|
||||
ret = amdgpu_bo_pin(abo, domain);
|
||||
if (ret) {
|
||||
amdgpu_bo_unreserve(abo);
|
||||
goto out_unref;
|
||||
|
|
|
@ -143,14 +143,12 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
|
|||
*/
|
||||
int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t gpu_addr;
|
||||
int r;
|
||||
|
||||
r = amdgpu_bo_reserve(adev->gart.robj, false);
|
||||
if (unlikely(r != 0))
|
||||
return r;
|
||||
r = amdgpu_bo_pin(adev->gart.robj,
|
||||
AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
|
||||
r = amdgpu_bo_pin(adev->gart.robj, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (r) {
|
||||
amdgpu_bo_unreserve(adev->gart.robj);
|
||||
return r;
|
||||
|
@ -159,7 +157,7 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
|
|||
if (r)
|
||||
amdgpu_bo_unpin(adev->gart.robj);
|
||||
amdgpu_bo_unreserve(adev->gart.robj);
|
||||
adev->gart.table_addr = gpu_addr;
|
||||
adev->gart.table_addr = amdgpu_bo_gpu_offset(adev->gart.robj);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -252,11 +252,13 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
|
|||
goto error_free;
|
||||
}
|
||||
|
||||
r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
|
||||
r = amdgpu_bo_pin(*bo_ptr, domain);
|
||||
if (r) {
|
||||
dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
|
||||
goto error_unreserve;
|
||||
}
|
||||
if (gpu_addr)
|
||||
*gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
|
||||
|
||||
if (cpu_addr) {
|
||||
r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
|
||||
|
@ -817,7 +819,6 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
|
|||
* @domain: domain to be pinned to
|
||||
* @min_offset: the start of requested address range
|
||||
* @max_offset: the end of requested address range
|
||||
* @gpu_addr: GPU offset of the &amdgpu_bo buffer object
|
||||
*
|
||||
* Pins the buffer object according to requested domain and address range. If
|
||||
* the memory is unbound gart memory, binds the pages into gart table. Adjusts
|
||||
|
@ -835,8 +836,7 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
|
|||
* 0 for success or a negative error code on failure.
|
||||
*/
|
||||
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||
u64 min_offset, u64 max_offset,
|
||||
u64 *gpu_addr)
|
||||
u64 min_offset, u64 max_offset)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
|
@ -868,8 +868,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
return -EINVAL;
|
||||
|
||||
bo->pin_count++;
|
||||
if (gpu_addr)
|
||||
*gpu_addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
if (max_offset != 0) {
|
||||
u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
|
||||
|
@ -912,8 +910,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
}
|
||||
|
||||
bo->pin_count = 1;
|
||||
if (gpu_addr != NULL)
|
||||
*gpu_addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
||||
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
|
@ -931,7 +927,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
* amdgpu_bo_pin - pin an &amdgpu_bo buffer object
|
||||
* @bo: &amdgpu_bo buffer object to be pinned
|
||||
* @domain: domain to be pinned to
|
||||
* @gpu_addr: GPU offset of the &amdgpu_bo buffer object
|
||||
*
|
||||
* A simple wrapper to amdgpu_bo_pin_restricted().
|
||||
* Provides a simpler API for buffers that do not have any strict restrictions
|
||||
|
@ -940,9 +935,9 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
* Returns:
|
||||
* 0 for success or a negative error code on failure.
|
||||
*/
|
||||
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
|
||||
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
|
||||
{
|
||||
return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
|
||||
return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -252,10 +252,9 @@ void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
|
|||
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
|
||||
void amdgpu_bo_unref(struct amdgpu_bo **bo);
|
||||
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
|
||||
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
|
||||
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
||||
u64 min_offset, u64 max_offset,
|
||||
u64 *gpu_addr);
|
||||
u64 min_offset, u64 max_offset);
|
||||
int amdgpu_bo_unpin(struct amdgpu_bo *bo);
|
||||
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
|
||||
int amdgpu_bo_init(struct amdgpu_device *adev);
|
||||
|
|
|
@ -232,7 +232,7 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
|
|||
}
|
||||
|
||||
/* pin buffer into GTT */
|
||||
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
|
||||
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
|
||||
|
|
|
@ -76,11 +76,12 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_reserve(vram_obj, false);
|
||||
if (unlikely(r != 0))
|
||||
goto out_unref;
|
||||
r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
|
||||
r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to pin VRAM object\n");
|
||||
goto out_unres;
|
||||
}
|
||||
vram_addr = amdgpu_bo_gpu_offset(vram_obj);
|
||||
for (i = 0; i < n; i++) {
|
||||
void *gtt_map, *vram_map;
|
||||
void **gart_start, **gart_end;
|
||||
|
@ -97,11 +98,12 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
|
|||
r = amdgpu_bo_reserve(gtt_obj[i], false);
|
||||
if (unlikely(r != 0))
|
||||
goto out_lclean_unref;
|
||||
r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gart_addr);
|
||||
r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to pin GTT object %d\n", i);
|
||||
goto out_lclean_unres;
|
||||
}
|
||||
gart_addr = amdgpu_bo_gpu_offset(gtt_obj[i]);
|
||||
|
||||
r = amdgpu_bo_kmap(gtt_obj[i], >t_map);
|
||||
if (r) {
|
||||
|
|
|
@ -1695,7 +1695,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
|
|||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
adev->fw_vram_usage.start_offset,
|
||||
(adev->fw_vram_usage.start_offset +
|
||||
adev->fw_vram_usage.size), NULL);
|
||||
adev->fw_vram_usage.size));
|
||||
if (r)
|
||||
goto error_pin;
|
||||
r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
|
||||
|
|
|
@ -1855,15 +1855,14 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
if (atomic) {
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
} else {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||
if (!atomic) {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unreserve(abo);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
|
||||
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
|
||||
amdgpu_bo_unreserve(abo);
|
||||
|
@ -2370,13 +2369,14 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
|
||||
|
||||
dce_v10_0_lock_cursor(crtc, true);
|
||||
|
||||
|
|
|
@ -1897,15 +1897,14 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
if (atomic) {
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
} else {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||
if (!atomic) {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unreserve(abo);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
|
||||
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
|
||||
amdgpu_bo_unreserve(abo);
|
||||
|
@ -2449,13 +2448,14 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
|
||||
|
||||
dce_v11_0_lock_cursor(crtc, true);
|
||||
|
||||
|
|
|
@ -1811,15 +1811,14 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
if (atomic) {
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
} else {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||
if (!atomic) {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unreserve(abo);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
|
||||
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
|
||||
amdgpu_bo_unreserve(abo);
|
||||
|
@ -2263,13 +2262,14 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
|
||||
|
||||
dce_v6_0_lock_cursor(crtc, true);
|
||||
|
||||
|
|
|
@ -1786,15 +1786,14 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
if (unlikely(r != 0))
|
||||
return r;
|
||||
|
||||
if (atomic) {
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
} else {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
|
||||
if (!atomic) {
|
||||
r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
if (unlikely(r != 0)) {
|
||||
amdgpu_bo_unreserve(abo);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
fb_location = amdgpu_bo_gpu_offset(abo);
|
||||
|
||||
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
|
||||
amdgpu_bo_unreserve(abo);
|
||||
|
@ -2274,13 +2273,14 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
|
||||
ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
|
||||
amdgpu_bo_unreserve(aobj);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return ret;
|
||||
}
|
||||
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
|
||||
|
||||
dce_v8_0_lock_cursor(crtc, true);
|
||||
|
||||
|
|
|
@ -3094,7 +3094,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
|
|||
else
|
||||
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
|
||||
r = amdgpu_bo_pin(rbo, domain, &afb->address);
|
||||
r = amdgpu_bo_pin(rbo, domain);
|
||||
amdgpu_bo_unreserve(rbo);
|
||||
|
||||
if (unlikely(r != 0)) {
|
||||
|
@ -3102,6 +3102,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
|
|||
DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
|
||||
return r;
|
||||
}
|
||||
afb->address = amdgpu_bo_gpu_offset(rbo);
|
||||
|
||||
amdgpu_bo_ref(rbo);
|
||||
|
||||
|
|
Loading…
Reference in New Issue