mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: cleanup static CSA handling
Move the CSA bo_va from the VM to the fpriv structure. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
c0573af349
commit
0f4b3c6862
|
@ -748,6 +748,7 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
|
|||
struct amdgpu_fpriv {
|
||||
struct amdgpu_vm vm;
|
||||
struct amdgpu_bo_va *prt_va;
|
||||
struct amdgpu_bo_va *csa_va;
|
||||
struct mutex bo_list_lock;
|
||||
struct idr bo_list_handles;
|
||||
struct amdgpu_ctx_mgr ctx_mgr;
|
||||
|
|
|
@ -787,7 +787,8 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
|||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
struct dma_fence *f;
|
||||
bo_va = vm->csa_bo_va;
|
||||
|
||||
bo_va = fpriv->csa_va;
|
||||
BUG_ON(!bo_va);
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
if (r)
|
||||
|
|
|
@ -843,7 +843,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
r = amdgpu_map_static_csa(adev, &fpriv->vm);
|
||||
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
|
||||
if (r)
|
||||
goto out_suspend;
|
||||
}
|
||||
|
@ -896,8 +896,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
|
|||
if (amdgpu_sriov_vf(adev)) {
|
||||
/* TODO: how to handle reserve failure */
|
||||
BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
|
||||
amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
|
||||
fpriv->vm.csa_bo_va = NULL;
|
||||
amdgpu_vm_bo_rmv(adev, fpriv->csa_va);
|
||||
fpriv->csa_va = NULL;
|
||||
amdgpu_bo_unreserve(adev->virt.csa_obj);
|
||||
}
|
||||
|
||||
|
|
|
@ -46,14 +46,14 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
|
|||
* address within META_DATA init package to support SRIOV gfx preemption.
|
||||
*/
|
||||
|
||||
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo_va **bo_va)
|
||||
{
|
||||
int r;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head list;
|
||||
struct amdgpu_bo_list_entry pd;
|
||||
struct ttm_validate_buffer csa_tv;
|
||||
int r;
|
||||
|
||||
INIT_LIST_HEAD(&list);
|
||||
INIT_LIST_HEAD(&csa_tv.head);
|
||||
|
@ -69,34 +69,33 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
return r;
|
||||
}
|
||||
|
||||
bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
|
||||
if (!bo_va) {
|
||||
*bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
|
||||
if (!*bo_va) {
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
DRM_ERROR("failed to create bo_va for static CSA\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR,
|
||||
r = amdgpu_vm_alloc_pts(adev, (*bo_va)->vm, AMDGPU_CSA_VADDR,
|
||||
AMDGPU_CSA_SIZE);
|
||||
if (r) {
|
||||
DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
|
||||
amdgpu_vm_bo_rmv(adev, bo_va);
|
||||
amdgpu_vm_bo_rmv(adev, *bo_va);
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE,
|
||||
r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE,
|
||||
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
|
||||
AMDGPU_PTE_EXECUTABLE);
|
||||
|
||||
if (r) {
|
||||
DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
|
||||
amdgpu_vm_bo_rmv(adev, bo_va);
|
||||
amdgpu_vm_bo_rmv(adev, *bo_va);
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
return r;
|
||||
}
|
||||
|
||||
vm->csa_bo_va = bo_va;
|
||||
ttm_eu_backoff_reservation(&ticket, &list);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -90,7 +90,8 @@ static inline bool is_virtual_machine(void)
|
|||
|
||||
struct amdgpu_vm;
|
||||
int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
|
||||
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo_va **bo_va);
|
||||
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
|
||||
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
|
||||
void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
|
||||
|
|
|
@ -141,8 +141,6 @@ struct amdgpu_vm {
|
|||
u64 client_id;
|
||||
/* dedicated to vm */
|
||||
struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
|
||||
/* each VM will map on CSA */
|
||||
struct amdgpu_bo_va *csa_bo_va;
|
||||
|
||||
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
|
||||
bool use_cpu_for_update;
|
||||
|
|
Loading…
Reference in New Issue