mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: Prepare for hmm_range_register API change (v2)
An upcoming change in the hmm_range_register API requires passing in a pointer to an hmm_mirror instead of mm_struct. To access the hmm_mirror we need pass bo instead of ttm to amdgpu_ttm_tt_get_user_pages because mirror is part of amdgpu_mn structure, which is accessible from bo. v2: fix building without CONFIG_HMM_MIRROR (Arnd) Signed-off-by: Philip Yang <Philip.Yang@amd.com> Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
3e2bb60ab2
commit
e5eaa7cc0c
|
@ -504,7 +504,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, bo->tbo.ttm->pages);
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
|
||||
goto unregister_out;
|
||||
|
@ -1729,8 +1729,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
|
|||
bo = mem->bo;
|
||||
|
||||
/* Get updated user pages */
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
|
||||
bo->tbo.ttm->pages);
|
||||
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
|
||||
if (ret) {
|
||||
pr_debug("%s: Failed to get user pages: %d\n",
|
||||
__func__, ret);
|
||||
|
|
|
@ -633,7 +633,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, e->user_pages);
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
|
||||
if (r) {
|
||||
kvfree(e->user_pages);
|
||||
e->user_pages = NULL;
|
||||
|
|
|
@ -327,8 +327,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||
}
|
||||
|
||||
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
|
||||
bo->tbo.ttm->pages);
|
||||
r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
|
||||
if (r)
|
||||
goto release_object;
|
||||
|
||||
|
|
|
@ -45,48 +45,11 @@
|
|||
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/hmm.h>
|
||||
#include <linux/interval_tree.h>
|
||||
|
||||
#include <drm/drm.h>
|
||||
|
||||
#include "amdgpu.h"
|
||||
#include "amdgpu_amdkfd.h"
|
||||
|
||||
/**
|
||||
* struct amdgpu_mn
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @mm: process address space
|
||||
* @type: type of MMU notifier
|
||||
* @work: destruction work item
|
||||
* @node: hash table node to find structure by adev and mn
|
||||
* @lock: rw semaphore protecting the notifier nodes
|
||||
* @objects: interval tree containing amdgpu_mn_nodes
|
||||
* @mirror: HMM mirror function support
|
||||
*
|
||||
* Data for each amdgpu device and process address space.
|
||||
*/
|
||||
struct amdgpu_mn {
|
||||
/* constant after initialisation */
|
||||
struct amdgpu_device *adev;
|
||||
struct mm_struct *mm;
|
||||
enum amdgpu_mn_type type;
|
||||
|
||||
/* only used on destruction */
|
||||
struct work_struct work;
|
||||
|
||||
/* protected by adev->mn_lock */
|
||||
struct hlist_node node;
|
||||
|
||||
/* objects protected by lock */
|
||||
struct rw_semaphore lock;
|
||||
struct rb_root_cached objects;
|
||||
|
||||
/* HMM mirror */
|
||||
struct hmm_mirror mirror;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct amdgpu_mn_node
|
||||
*
|
||||
|
|
|
@ -24,17 +24,53 @@
|
|||
#ifndef __AMDGPU_MN_H__
|
||||
#define __AMDGPU_MN_H__
|
||||
|
||||
/*
|
||||
* HMM mirror
|
||||
*/
|
||||
struct amdgpu_mn;
|
||||
struct hmm_range;
|
||||
#include <linux/types.h>
|
||||
#include <linux/hmm.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/interval_tree.h>
|
||||
|
||||
enum amdgpu_mn_type {
|
||||
AMDGPU_MN_TYPE_GFX,
|
||||
AMDGPU_MN_TYPE_HSA,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct amdgpu_mn
|
||||
*
|
||||
* @adev: amdgpu device pointer
|
||||
* @mm: process address space
|
||||
* @type: type of MMU notifier
|
||||
* @work: destruction work item
|
||||
* @node: hash table node to find structure by adev and mn
|
||||
* @lock: rw semaphore protecting the notifier nodes
|
||||
* @objects: interval tree containing amdgpu_mn_nodes
|
||||
* @mirror: HMM mirror function support
|
||||
*
|
||||
* Data for each amdgpu device and process address space.
|
||||
*/
|
||||
struct amdgpu_mn {
|
||||
/* constant after initialisation */
|
||||
struct amdgpu_device *adev;
|
||||
struct mm_struct *mm;
|
||||
enum amdgpu_mn_type type;
|
||||
|
||||
/* only used on destruction */
|
||||
struct work_struct work;
|
||||
|
||||
/* protected by adev->mn_lock */
|
||||
struct hlist_node node;
|
||||
|
||||
/* objects protected by lock */
|
||||
struct rw_semaphore lock;
|
||||
struct rb_root_cached objects;
|
||||
|
||||
#ifdef CONFIG_HMM_MIRROR
|
||||
/* HMM mirror */
|
||||
struct hmm_mirror mirror;
|
||||
#endif
|
||||
};
|
||||
|
||||
#if defined(CONFIG_HMM_MIRROR)
|
||||
void amdgpu_mn_lock(struct amdgpu_mn *mn);
|
||||
void amdgpu_mn_unlock(struct amdgpu_mn *mn);
|
||||
|
|
|
@ -731,8 +731,10 @@ struct amdgpu_ttm_tt {
|
|||
|
||||
#define MAX_RETRY_HMM_RANGE_FAULT 16
|
||||
|
||||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
||||
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||
{
|
||||
struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL;
|
||||
struct ttm_tt *ttm = bo->tbo.ttm;
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct mm_struct *mm = gtt->usertask->mm;
|
||||
unsigned long start = gtt->userptr;
|
||||
|
@ -746,6 +748,12 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
|||
if (!mm) /* Happens during process shutdown */
|
||||
return -ESRCH;
|
||||
|
||||
if (unlikely(!mirror)) {
|
||||
DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n");
|
||||
r = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vma = find_vma(mm, start);
|
||||
if (unlikely(!vma || start < vma->vm_start)) {
|
||||
r = -EFAULT;
|
||||
|
|
|
@ -102,10 +102,11 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
|
|||
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
|
||||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
|
||||
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages);
|
||||
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
|
||||
#else
|
||||
static inline int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
||||
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
|
||||
struct page **pages)
|
||||
{
|
||||
return -EPERM;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue