drm/amdgpu: use amdgpu_bo_create_kernel more often

Saves us quite a bunch of loc.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2017-07-27 17:24:36 +02:00 committed by Alex Deucher
parent 9d903cbd99
commit a4a0277789
6 changed files with 105 additions and 398 deletions

View File

@ -336,35 +336,11 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
{ {
int r; return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
if (adev->vram_scratch.robj == NULL) { &adev->vram_scratch.robj,
r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, &adev->vram_scratch.gpu_addr,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, (void **)&adev->vram_scratch.ptr);
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, &adev->vram_scratch.robj);
if (r) {
return r;
}
}
r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
if (unlikely(r != 0))
return r;
r = amdgpu_bo_pin(adev->vram_scratch.robj,
AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
if (r) {
amdgpu_bo_unreserve(adev->vram_scratch.robj);
return r;
}
r = amdgpu_bo_kmap(adev->vram_scratch.robj,
(void **)&adev->vram_scratch.ptr);
if (r)
amdgpu_bo_unpin(adev->vram_scratch.robj);
amdgpu_bo_unreserve(adev->vram_scratch.robj);
return r;
} }
static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev) static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)

View File

@ -1232,23 +1232,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Change the size here instead of the init above so only lpfn is affected */ /* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
r = amdgpu_bo_create(adev, adev->mc.stolen_size, PAGE_SIZE, true, r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | &adev->stollen_vga_memory,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL);
NULL, NULL, &adev->stollen_vga_memory);
if (r) {
return r;
}
r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
if (r) if (r)
return r; return r;
r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL);
amdgpu_bo_unreserve(adev->stollen_vga_memory);
if (r) {
amdgpu_bo_unref(&adev->stollen_vga_memory);
return r;
}
DRM_INFO("amdgpu: %uM of VRAM memory ready\n", DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->mc.real_vram_size / (1024 * 1024))); (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));

View File

@ -2273,43 +2273,23 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
if (src_ptr) { if (src_ptr) {
/* save restore block */ /* save restore block */
if (adev->gfx.rlc.save_restore_obj == NULL) { r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.save_restore_obj,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &adev->gfx.rlc.save_restore_gpu_addr,
NULL, NULL, (void **)&adev->gfx.rlc.sr_ptr);
&adev->gfx.rlc.save_restore_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
}
}
r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
if (unlikely(r != 0)) {
gfx_v6_0_rlc_fini(adev);
return r;
}
r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.save_restore_gpu_addr);
if (r) { if (r) {
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); dev_warn(adev->dev, "(%d) create RLC sr bo failed\n",
dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r); r);
gfx_v6_0_rlc_fini(adev); gfx_v6_0_rlc_fini(adev);
return r; return r;
} }
r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
if (r) {
dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
gfx_v6_0_rlc_fini(adev);
return r;
}
/* write the sr buffer */ /* write the sr buffer */
dst_ptr = adev->gfx.rlc.sr_ptr; dst_ptr = adev->gfx.rlc.sr_ptr;
for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
dst_ptr[i] = cpu_to_le32(src_ptr[i]); dst_ptr[i] = cpu_to_le32(src_ptr[i]);
amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj); amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
} }
@ -2319,39 +2299,17 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev); adev->gfx.rlc.clear_state_size = gfx_v6_0_get_csb_size(adev);
dws = adev->gfx.rlc.clear_state_size + (256 / 4); dws = adev->gfx.rlc.clear_state_size + (256 / 4);
if (adev->gfx.rlc.clear_state_obj == NULL) { r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.clear_state_obj,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &adev->gfx.rlc.clear_state_gpu_addr,
NULL, NULL, (void **)&adev->gfx.rlc.cs_ptr);
&adev->gfx.rlc.clear_state_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v6_0_rlc_fini(adev);
return r;
}
}
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
if (unlikely(r != 0)) {
gfx_v6_0_rlc_fini(adev);
return r;
}
r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.clear_state_gpu_addr);
if (r) { if (r) {
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
gfx_v6_0_rlc_fini(adev); gfx_v6_0_rlc_fini(adev);
return r; return r;
} }
r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
gfx_v6_0_rlc_fini(adev);
return r;
}
/* set up the cs buffer */ /* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr; dst_ptr = adev->gfx.rlc.cs_ptr;
reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256; reg_list_mc_addr = adev->gfx.rlc.clear_state_gpu_addr + 256;

View File

@ -2823,33 +2823,14 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
/* allocate space for ALL pipes (even the ones we don't own) */ /* allocate space for ALL pipes (even the ones we don't own) */
mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec mec_hpd_size = adev->gfx.mec.num_mec * adev->gfx.mec.num_pipe_per_mec
* GFX7_MEC_HPD_SIZE * 2; * GFX7_MEC_HPD_SIZE * 2;
if (adev->gfx.mec.hpd_eop_obj == NULL) {
r = amdgpu_bo_create(adev,
mec_hpd_size,
PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
&adev->gfx.mec.hpd_eop_obj);
if (r) {
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
return r;
}
}
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false); r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
if (unlikely(r != 0)) { AMDGPU_GEM_DOMAIN_GTT,
gfx_v7_0_mec_fini(adev); &adev->gfx.mec.hpd_eop_obj,
return r; &adev->gfx.mec.hpd_eop_gpu_addr,
} (void **)&hpd);
r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_gpu_addr);
if (r) { if (r) {
dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); dev_warn(adev->dev, "(%d) create, pin or map of HDP EOP bo failed\n", r);
gfx_v7_0_mec_fini(adev);
return r;
}
r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
if (r) {
dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
gfx_v7_0_mec_fini(adev); gfx_v7_0_mec_fini(adev);
return r; return r;
} }
@ -3108,32 +3089,12 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
struct cik_mqd *mqd; struct cik_mqd *mqd;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id]; struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
if (ring->mqd_obj == NULL) { r = amdgpu_bo_create_reserved(adev, sizeof(struct cik_mqd), PAGE_SIZE,
r = amdgpu_bo_create(adev, AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
sizeof(struct cik_mqd), &mqd_gpu_addr, (void **)&mqd);
PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
&ring->mqd_obj);
if (r) {
dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
return r;
}
}
r = amdgpu_bo_reserve(ring->mqd_obj, false);
if (unlikely(r != 0))
goto out;
r = amdgpu_bo_pin(ring->mqd_obj, AMDGPU_GEM_DOMAIN_GTT,
&mqd_gpu_addr);
if (r) { if (r) {
dev_warn(adev->dev, "(%d) pin MQD bo failed\n", r); dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
goto out_unreserve; return r;
}
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
if (r) {
dev_warn(adev->dev, "(%d) map MQD bo failed\n", r);
goto out_unreserve;
} }
mutex_lock(&adev->srbm_mutex); mutex_lock(&adev->srbm_mutex);
@ -3147,9 +3108,7 @@ static int gfx_v7_0_compute_queue_init(struct amdgpu_device *adev, int ring_id)
mutex_unlock(&adev->srbm_mutex); mutex_unlock(&adev->srbm_mutex);
amdgpu_bo_kunmap(ring->mqd_obj); amdgpu_bo_kunmap(ring->mqd_obj);
out_unreserve:
amdgpu_bo_unreserve(ring->mqd_obj); amdgpu_bo_unreserve(ring->mqd_obj);
out:
return 0; return 0;
} }
@ -3432,39 +3391,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
if (src_ptr) { if (src_ptr) {
/* save restore block */ /* save restore block */
if (adev->gfx.rlc.save_restore_obj == NULL) { r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.save_restore_obj,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | &adev->gfx.rlc.save_restore_gpu_addr,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, (void **)&adev->gfx.rlc.sr_ptr);
NULL, NULL,
&adev->gfx.rlc.save_restore_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
}
}
r = amdgpu_bo_reserve(adev->gfx.rlc.save_restore_obj, false);
if (unlikely(r != 0)) {
gfx_v7_0_rlc_fini(adev);
return r;
}
r = amdgpu_bo_pin(adev->gfx.rlc.save_restore_obj, AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.save_restore_gpu_addr);
if (r) { if (r) {
amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj); dev_warn(adev->dev, "(%d) create, pin or map of RLC sr bo failed\n", r);
dev_warn(adev->dev, "(%d) pin RLC sr bo failed\n", r);
gfx_v7_0_rlc_fini(adev); gfx_v7_0_rlc_fini(adev);
return r; return r;
} }
r = amdgpu_bo_kmap(adev->gfx.rlc.save_restore_obj, (void **)&adev->gfx.rlc.sr_ptr);
if (r) {
dev_warn(adev->dev, "(%d) map RLC sr bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
return r;
}
/* write the sr buffer */ /* write the sr buffer */
dst_ptr = adev->gfx.rlc.sr_ptr; dst_ptr = adev->gfx.rlc.sr_ptr;
for (i = 0; i < adev->gfx.rlc.reg_list_size; i++) for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
@ -3477,39 +3414,17 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
/* clear state block */ /* clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev); adev->gfx.rlc.clear_state_size = dws = gfx_v7_0_get_csb_size(adev);
if (adev->gfx.rlc.clear_state_obj == NULL) { r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.clear_state_obj,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | &adev->gfx.rlc.clear_state_gpu_addr,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, (void **)&adev->gfx.rlc.cs_ptr);
NULL, NULL,
&adev->gfx.rlc.clear_state_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
return r;
}
}
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
if (unlikely(r != 0)) {
gfx_v7_0_rlc_fini(adev);
return r;
}
r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.clear_state_gpu_addr);
if (r) { if (r) {
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
dev_warn(adev->dev, "(%d) pin RLC c bo failed\n", r);
gfx_v7_0_rlc_fini(adev); gfx_v7_0_rlc_fini(adev);
return r; return r;
} }
r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
dev_warn(adev->dev, "(%d) map RLC c bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
return r;
}
/* set up the cs buffer */ /* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr; dst_ptr = adev->gfx.rlc.cs_ptr;
gfx_v7_0_get_csb_buffer(adev, dst_ptr); gfx_v7_0_get_csb_buffer(adev, dst_ptr);
@ -3518,37 +3433,14 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
} }
if (adev->gfx.rlc.cp_table_size) { if (adev->gfx.rlc.cp_table_size) {
if (adev->gfx.rlc.cp_table_obj == NULL) {
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL,
&adev->gfx.rlc.cp_table_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
return r;
}
}
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
if (unlikely(r != 0)) { PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); &adev->gfx.rlc.cp_table_obj,
gfx_v7_0_rlc_fini(adev); &adev->gfx.rlc.cp_table_gpu_addr,
return r; (void **)&adev->gfx.rlc.cp_table_ptr);
}
r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.cp_table_gpu_addr);
if (r) { if (r) {
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
return r;
}
r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
if (r) {
dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
gfx_v7_0_rlc_fini(adev); gfx_v7_0_rlc_fini(adev);
return r; return r;
} }

View File

@ -1278,39 +1278,17 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
/* clear state block */ /* clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev); adev->gfx.rlc.clear_state_size = dws = gfx_v8_0_get_csb_size(adev);
if (adev->gfx.rlc.clear_state_obj == NULL) { r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.clear_state_obj,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | &adev->gfx.rlc.clear_state_gpu_addr,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, (void **)&adev->gfx.rlc.cs_ptr);
NULL, NULL,
&adev->gfx.rlc.clear_state_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v8_0_rlc_fini(adev);
return r;
}
}
r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
if (unlikely(r != 0)) {
gfx_v8_0_rlc_fini(adev);
return r;
}
r = amdgpu_bo_pin(adev->gfx.rlc.clear_state_obj, AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.clear_state_gpu_addr);
if (r) { if (r) {
amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
dev_warn(adev->dev, "(%d) pin RLC cbs bo failed\n", r);
gfx_v8_0_rlc_fini(adev); gfx_v8_0_rlc_fini(adev);
return r; return r;
} }
r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, (void **)&adev->gfx.rlc.cs_ptr);
if (r) {
dev_warn(adev->dev, "(%d) map RLC cbs bo failed\n", r);
gfx_v8_0_rlc_fini(adev);
return r;
}
/* set up the cs buffer */ /* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr; dst_ptr = adev->gfx.rlc.cs_ptr;
gfx_v8_0_get_csb_buffer(adev, dst_ptr); gfx_v8_0_get_csb_buffer(adev, dst_ptr);
@ -1321,34 +1299,13 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
if ((adev->asic_type == CHIP_CARRIZO) || if ((adev->asic_type == CHIP_CARRIZO) ||
(adev->asic_type == CHIP_STONEY)) { (adev->asic_type == CHIP_STONEY)) {
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
if (adev->gfx.rlc.cp_table_obj == NULL) { r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.cp_table_obj,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | &adev->gfx.rlc.cp_table_gpu_addr,
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, (void **)&adev->gfx.rlc.cp_table_ptr);
NULL, NULL,
&adev->gfx.rlc.cp_table_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
return r;
}
}
r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
if (unlikely(r != 0)) {
dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
return r;
}
r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
&adev->gfx.rlc.cp_table_gpu_addr);
if (r) { if (r) {
amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
dev_warn(adev->dev, "(%d) pin RLC cp table bo failed\n", r);
return r;
}
r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
if (r) {
dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
return r; return r;
} }
@ -1389,34 +1346,13 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE; mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
if (adev->gfx.mec.hpd_eop_obj == NULL) { r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
r = amdgpu_bo_create(adev, AMDGPU_GEM_DOMAIN_GTT,
mec_hpd_size, &adev->gfx.mec.hpd_eop_obj,
PAGE_SIZE, true, &adev->gfx.mec.hpd_eop_gpu_addr,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, (void **)&hpd);
&adev->gfx.mec.hpd_eop_obj);
if (r) {
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
return r;
}
}
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
if (unlikely(r != 0)) {
gfx_v8_0_mec_fini(adev);
return r;
}
r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_gpu_addr);
if (r) { if (r) {
dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
gfx_v8_0_mec_fini(adev);
return r;
}
r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
if (r) {
dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
gfx_v8_0_mec_fini(adev);
return r; return r;
} }

View File

@ -774,18 +774,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
if (cs_data) { if (cs_data) {
/* clear state block */ /* clear state block */
adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev); adev->gfx.rlc.clear_state_size = dws = gfx_v9_0_get_csb_size(adev);
if (adev->gfx.rlc.clear_state_obj == NULL) { r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_obj, &adev->gfx.rlc.clear_state_gpu_addr,
&adev->gfx.rlc.clear_state_gpu_addr, (void **)&adev->gfx.rlc.cs_ptr);
(void **)&adev->gfx.rlc.cs_ptr); if (r) {
if (r) { dev_err(adev->dev, "(%d) failed to create rlc csb bo\n",
dev_err(adev->dev, r);
"(%d) failed to create rlc csb bo\n", r); gfx_v9_0_rlc_fini(adev);
gfx_v9_0_rlc_fini(adev); return r;
return r;
}
} }
/* set up the cs buffer */ /* set up the cs buffer */
dst_ptr = adev->gfx.rlc.cs_ptr; dst_ptr = adev->gfx.rlc.cs_ptr;
@ -797,18 +795,16 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_RAVEN) { if (adev->asic_type == CHIP_RAVEN) {
/* TODO: double check the cp_table_size for RV */ /* TODO: double check the cp_table_size for RV */
adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
if (adev->gfx.rlc.cp_table_obj == NULL) { r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
r = amdgpu_bo_create_kernel(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_obj, &adev->gfx.rlc.cp_table_gpu_addr,
&adev->gfx.rlc.cp_table_gpu_addr, (void **)&adev->gfx.rlc.cp_table_ptr);
(void **)&adev->gfx.rlc.cp_table_ptr); if (r) {
if (r) { dev_err(adev->dev,
dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
"(%d) failed to create cp table bo\n", r); gfx_v9_0_rlc_fini(adev);
gfx_v9_0_rlc_fini(adev); return r;
return r;
}
} }
rv_init_cp_jump_table(adev); rv_init_cp_jump_table(adev);
@ -864,33 +860,13 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
amdgpu_gfx_compute_queue_acquire(adev); amdgpu_gfx_compute_queue_acquire(adev);
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE; mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
if (adev->gfx.mec.hpd_eop_obj == NULL) { r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
r = amdgpu_bo_create(adev, AMDGPU_GEM_DOMAIN_GTT,
mec_hpd_size, &adev->gfx.mec.hpd_eop_obj,
PAGE_SIZE, true, &adev->gfx.mec.hpd_eop_gpu_addr,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, (void **)&hpd);
&adev->gfx.mec.hpd_eop_obj);
if (r) {
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
return r;
}
}
r = amdgpu_bo_reserve(adev->gfx.mec.hpd_eop_obj, false);
if (unlikely(r != 0)) {
gfx_v9_0_mec_fini(adev);
return r;
}
r = amdgpu_bo_pin(adev->gfx.mec.hpd_eop_obj, AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_gpu_addr);
if (r) { if (r) {
dev_warn(adev->dev, "(%d) pin HDP EOP bo failed\n", r); dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
gfx_v9_0_mec_fini(adev);
return r;
}
r = amdgpu_bo_kmap(adev->gfx.mec.hpd_eop_obj, (void **)&hpd);
if (r) {
dev_warn(adev->dev, "(%d) map HDP EOP bo failed\n", r);
gfx_v9_0_mec_fini(adev); gfx_v9_0_mec_fini(adev);
return r; return r;
} }
@ -907,42 +883,22 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes));
fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4;
if (adev->gfx.mec.mec_fw_obj == NULL) { r = amdgpu_bo_create_reserved(adev, mec_hdr->header.ucode_size_bytes,
r = amdgpu_bo_create(adev, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
mec_hdr->header.ucode_size_bytes, &adev->gfx.mec.mec_fw_obj,
PAGE_SIZE, true, &adev->gfx.mec.mec_fw_gpu_addr,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, (void **)&fw);
&adev->gfx.mec.mec_fw_obj); if (r) {
if (r) { dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r);
dev_warn(adev->dev, "(%d) create mec firmware bo failed\n", r); gfx_v9_0_mec_fini(adev);
return r; return r;
}
} }
r = amdgpu_bo_reserve(adev->gfx.mec.mec_fw_obj, false);
if (unlikely(r != 0)) {
gfx_v9_0_mec_fini(adev);
return r;
}
r = amdgpu_bo_pin(adev->gfx.mec.mec_fw_obj, AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.mec_fw_gpu_addr);
if (r) {
dev_warn(adev->dev, "(%d) pin mec firmware bo failed\n", r);
gfx_v9_0_mec_fini(adev);
return r;
}
r = amdgpu_bo_kmap(adev->gfx.mec.mec_fw_obj, (void **)&fw);
if (r) {
dev_warn(adev->dev, "(%d) map firmware bo failed\n", r);
gfx_v9_0_mec_fini(adev);
return r;
}
memcpy(fw, fw_data, fw_size); memcpy(fw, fw_data, fw_size);
amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj); amdgpu_bo_kunmap(adev->gfx.mec.mec_fw_obj);
amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj); amdgpu_bo_unreserve(adev->gfx.mec.mec_fw_obj);
return 0; return 0;
} }