mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: remove the exclusive lock
Finally getting rid of it. Signed-off-by: Christian König <christian.koenig@amd.com>
This commit is contained in:
parent
b7e4dad3e1
commit
0c418f1010
|
@ -1955,7 +1955,6 @@ struct amdgpu_device {
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
struct drm_device *ddev;
|
struct drm_device *ddev;
|
||||||
struct pci_dev *pdev;
|
struct pci_dev *pdev;
|
||||||
struct rw_semaphore exclusive_lock;
|
|
||||||
|
|
||||||
/* ASIC */
|
/* ASIC */
|
||||||
enum amd_asic_type asic_type;
|
enum amd_asic_type asic_type;
|
||||||
|
|
|
@ -831,11 +831,8 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||||
bool reserved_buffers = false;
|
bool reserved_buffers = false;
|
||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
down_read(&adev->exclusive_lock);
|
if (!adev->accel_working)
|
||||||
if (!adev->accel_working) {
|
|
||||||
up_read(&adev->exclusive_lock);
|
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
|
||||||
|
|
||||||
parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
|
parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
|
||||||
if (!parser)
|
if (!parser)
|
||||||
|
@ -843,8 +840,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||||
r = amdgpu_cs_parser_init(parser, data);
|
r = amdgpu_cs_parser_init(parser, data);
|
||||||
if (r) {
|
if (r) {
|
||||||
DRM_ERROR("Failed to initialize parser !\n");
|
DRM_ERROR("Failed to initialize parser !\n");
|
||||||
kfree(parser);
|
amdgpu_cs_parser_fini(parser, r, false);
|
||||||
up_read(&adev->exclusive_lock);
|
|
||||||
r = amdgpu_cs_handle_lockup(adev, r);
|
r = amdgpu_cs_handle_lockup(adev, r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -915,14 +911,12 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||||
|
|
||||||
mutex_unlock(&job->job_lock);
|
mutex_unlock(&job->job_lock);
|
||||||
amdgpu_cs_parser_fini_late(parser);
|
amdgpu_cs_parser_fini_late(parser);
|
||||||
up_read(&adev->exclusive_lock);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
|
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
|
||||||
out:
|
out:
|
||||||
amdgpu_cs_parser_fini(parser, r, reserved_buffers);
|
amdgpu_cs_parser_fini(parser, r, reserved_buffers);
|
||||||
up_read(&adev->exclusive_lock);
|
|
||||||
r = amdgpu_cs_handle_lockup(adev, r);
|
r = amdgpu_cs_handle_lockup(adev, r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1418,7 +1418,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||||
mutex_init(&adev->gfx.gpu_clock_mutex);
|
mutex_init(&adev->gfx.gpu_clock_mutex);
|
||||||
mutex_init(&adev->srbm_mutex);
|
mutex_init(&adev->srbm_mutex);
|
||||||
mutex_init(&adev->grbm_idx_mutex);
|
mutex_init(&adev->grbm_idx_mutex);
|
||||||
init_rwsem(&adev->exclusive_lock);
|
|
||||||
mutex_init(&adev->mn_lock);
|
mutex_init(&adev->mn_lock);
|
||||||
hash_init(adev->mn_hash);
|
hash_init(adev->mn_hash);
|
||||||
|
|
||||||
|
@ -1814,8 +1813,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
||||||
int i, r;
|
int i, r;
|
||||||
int resched;
|
int resched;
|
||||||
|
|
||||||
down_write(&adev->exclusive_lock);
|
|
||||||
|
|
||||||
atomic_inc(&adev->gpu_reset_counter);
|
atomic_inc(&adev->gpu_reset_counter);
|
||||||
|
|
||||||
/* block TTM */
|
/* block TTM */
|
||||||
|
@ -1879,7 +1876,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
|
||||||
dev_info(adev->dev, "GPU reset failed\n");
|
dev_info(adev->dev, "GPU reset failed\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
up_write(&adev->exclusive_lock);
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,11 +47,8 @@ static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
|
||||||
fence = to_amdgpu_fence(*f);
|
fence = to_amdgpu_fence(*f);
|
||||||
if (fence) {
|
if (fence) {
|
||||||
r = fence_wait(&fence->base, false);
|
r = fence_wait(&fence->base, false);
|
||||||
if (r == -EDEADLK) {
|
if (r == -EDEADLK)
|
||||||
up_read(&adev->exclusive_lock);
|
|
||||||
r = amdgpu_gpu_reset(adev);
|
r = amdgpu_gpu_reset(adev);
|
||||||
down_read(&adev->exclusive_lock);
|
|
||||||
}
|
|
||||||
} else
|
} else
|
||||||
r = fence_wait(*f, false);
|
r = fence_wait(*f, false);
|
||||||
|
|
||||||
|
@ -77,7 +74,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
down_read(&adev->exclusive_lock);
|
|
||||||
amdgpu_flip_wait_fence(adev, &work->excl);
|
amdgpu_flip_wait_fence(adev, &work->excl);
|
||||||
for (i = 0; i < work->shared_count; ++i)
|
for (i = 0; i < work->shared_count; ++i)
|
||||||
amdgpu_flip_wait_fence(adev, &work->shared[i]);
|
amdgpu_flip_wait_fence(adev, &work->shared[i]);
|
||||||
|
@ -93,7 +89,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
|
||||||
amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
|
amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
|
||||||
up_read(&adev->exclusive_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -260,16 +260,8 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
|
||||||
lockup_work.work);
|
lockup_work.work);
|
||||||
ring = fence_drv->ring;
|
ring = fence_drv->ring;
|
||||||
|
|
||||||
if (!down_read_trylock(&ring->adev->exclusive_lock)) {
|
if (amdgpu_fence_activity(ring))
|
||||||
/* just reschedule the check if a reset is going on */
|
|
||||||
amdgpu_fence_schedule_check(ring);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (amdgpu_fence_activity(ring)) {
|
|
||||||
wake_up_all(&ring->fence_drv.fence_queue);
|
wake_up_all(&ring->fence_drv.fence_queue);
|
||||||
}
|
|
||||||
up_read(&ring->adev->exclusive_lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -317,18 +309,15 @@ static bool amdgpu_fence_is_signaled(struct fence *f)
|
||||||
{
|
{
|
||||||
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
struct amdgpu_fence *fence = to_amdgpu_fence(f);
|
||||||
struct amdgpu_ring *ring = fence->ring;
|
struct amdgpu_ring *ring = fence->ring;
|
||||||
struct amdgpu_device *adev = ring->adev;
|
|
||||||
|
|
||||||
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
|
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (down_read_trylock(&adev->exclusive_lock)) {
|
amdgpu_fence_process(ring);
|
||||||
amdgpu_fence_process(ring);
|
|
||||||
up_read(&adev->exclusive_lock);
|
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
|
||||||
|
return true;
|
||||||
|
|
||||||
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -181,7 +181,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||||
bool kernel = false;
|
bool kernel = false;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
down_read(&adev->exclusive_lock);
|
|
||||||
/* create a gem object to contain this object in */
|
/* create a gem object to contain this object in */
|
||||||
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
|
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
|
||||||
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
|
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
|
||||||
|
@ -214,11 +213,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||||
|
|
||||||
memset(args, 0, sizeof(*args));
|
memset(args, 0, sizeof(*args));
|
||||||
args->out.handle = handle;
|
args->out.handle = handle;
|
||||||
up_read(&adev->exclusive_lock);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
error_unlock:
|
error_unlock:
|
||||||
up_read(&adev->exclusive_lock);
|
|
||||||
r = amdgpu_gem_handle_lockup(adev, r);
|
r = amdgpu_gem_handle_lockup(adev, r);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
@ -250,8 +247,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||||
return -EACCES;
|
return -EACCES;
|
||||||
}
|
}
|
||||||
|
|
||||||
down_read(&adev->exclusive_lock);
|
|
||||||
|
|
||||||
/* create a gem object to contain this object in */
|
/* create a gem object to contain this object in */
|
||||||
r = amdgpu_gem_object_create(adev, args->size, 0,
|
r = amdgpu_gem_object_create(adev, args->size, 0,
|
||||||
AMDGPU_GEM_DOMAIN_CPU, 0,
|
AMDGPU_GEM_DOMAIN_CPU, 0,
|
||||||
|
@ -293,14 +288,12 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
||||||
goto handle_lockup;
|
goto handle_lockup;
|
||||||
|
|
||||||
args->handle = handle;
|
args->handle = handle;
|
||||||
up_read(&adev->exclusive_lock);
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
release_object:
|
release_object:
|
||||||
drm_gem_object_unreference_unlocked(gobj);
|
drm_gem_object_unreference_unlocked(gobj);
|
||||||
|
|
||||||
handle_lockup:
|
handle_lockup:
|
||||||
up_read(&adev->exclusive_lock);
|
|
||||||
r = amdgpu_gem_handle_lockup(adev, r);
|
r = amdgpu_gem_handle_lockup(adev, r);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
|
Loading…
Reference in New Issue