mirror of https://gitee.com/openkylin/linux.git
Merge branch 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux into drm-next
some amd fixes * 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux: drm/radeon/mst: cleanup code indentation drm/radeon/mst: fix regression in lane/link handling. drm/amdgpu: add invalidate_page callback for userptrs drm/amdgpu: Revert "remove the userptr rmn->lock" drm/amdgpu: clean up path handling for powerplay drm/amd/powerplay: fix memory leak of tdp_table
This commit is contained in:
commit
4604202ca8
|
@ -48,7 +48,8 @@ struct amdgpu_mn {
|
|||
/* protected by adev->mn_lock */
|
||||
struct hlist_node node;
|
||||
|
||||
/* objects protected by mm->mmap_sem */
|
||||
/* objects protected by lock */
|
||||
struct mutex lock;
|
||||
struct rb_root objects;
|
||||
};
|
||||
|
||||
|
@ -72,7 +73,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
|
|||
struct amdgpu_bo *bo, *next_bo;
|
||||
|
||||
mutex_lock(&adev->mn_lock);
|
||||
down_write(&rmn->mm->mmap_sem);
|
||||
mutex_lock(&rmn->lock);
|
||||
hash_del(&rmn->node);
|
||||
rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
|
||||
it.rb) {
|
||||
|
@ -82,7 +83,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
|
|||
}
|
||||
kfree(node);
|
||||
}
|
||||
up_write(&rmn->mm->mmap_sem);
|
||||
mutex_unlock(&rmn->lock);
|
||||
mutex_unlock(&adev->mn_lock);
|
||||
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
|
||||
kfree(rmn);
|
||||
|
@ -104,6 +105,76 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
|
|||
schedule_work(&rmn->work);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_mn_invalidate_node - unmap all BOs of a node
|
||||
*
|
||||
* @node: the node with the BOs to unmap
|
||||
*
|
||||
* We block for all BOs and unmap them by move them
|
||||
* into system domain again.
|
||||
*/
|
||||
static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct amdgpu_bo *bo;
|
||||
long r;
|
||||
|
||||
list_for_each_entry(bo, &node->bos, mn_list) {
|
||||
|
||||
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
|
||||
continue;
|
||||
|
||||
r = amdgpu_bo_reserve(bo, true);
|
||||
if (r) {
|
||||
DRM_ERROR("(%ld) failed to reserve user bo\n", r);
|
||||
continue;
|
||||
}
|
||||
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
|
||||
true, false, MAX_SCHEDULE_TIMEOUT);
|
||||
if (r <= 0)
|
||||
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
|
||||
|
||||
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
||||
if (r)
|
||||
DRM_ERROR("(%ld) failed to validate user bo\n", r);
|
||||
|
||||
amdgpu_bo_unreserve(bo);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_mn_invalidate_page - callback to notify about mm change
|
||||
*
|
||||
* @mn: our notifier
|
||||
* @mn: the mm this callback is about
|
||||
* @address: address of invalidate page
|
||||
*
|
||||
* Invalidation of a single page. Blocks for all BOs mapping it
|
||||
* and unmap them by move them into system domain again.
|
||||
*/
|
||||
static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long address)
|
||||
{
|
||||
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
|
||||
struct interval_tree_node *it;
|
||||
|
||||
mutex_lock(&rmn->lock);
|
||||
|
||||
it = interval_tree_iter_first(&rmn->objects, address, address);
|
||||
if (it) {
|
||||
struct amdgpu_mn_node *node;
|
||||
|
||||
node = container_of(it, struct amdgpu_mn_node, it);
|
||||
amdgpu_mn_invalidate_node(node, address, address);
|
||||
}
|
||||
|
||||
mutex_unlock(&rmn->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_mn_invalidate_range_start - callback to notify about mm change
|
||||
*
|
||||
|
@ -126,44 +197,24 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
|
|||
/* notification is exclusive, but interval is inclusive */
|
||||
end -= 1;
|
||||
|
||||
mutex_lock(&rmn->lock);
|
||||
|
||||
it = interval_tree_iter_first(&rmn->objects, start, end);
|
||||
while (it) {
|
||||
struct amdgpu_mn_node *node;
|
||||
struct amdgpu_bo *bo;
|
||||
long r;
|
||||
|
||||
node = container_of(it, struct amdgpu_mn_node, it);
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
|
||||
list_for_each_entry(bo, &node->bos, mn_list) {
|
||||
|
||||
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
|
||||
end))
|
||||
continue;
|
||||
|
||||
r = amdgpu_bo_reserve(bo, true);
|
||||
if (r) {
|
||||
DRM_ERROR("(%ld) failed to reserve user bo\n", r);
|
||||
continue;
|
||||
}
|
||||
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
|
||||
true, false, MAX_SCHEDULE_TIMEOUT);
|
||||
if (r <= 0)
|
||||
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
|
||||
|
||||
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
|
||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
||||
if (r)
|
||||
DRM_ERROR("(%ld) failed to validate user bo\n", r);
|
||||
|
||||
amdgpu_bo_unreserve(bo);
|
||||
}
|
||||
amdgpu_mn_invalidate_node(node, start, end);
|
||||
}
|
||||
|
||||
mutex_unlock(&rmn->lock);
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops amdgpu_mn_ops = {
|
||||
.release = amdgpu_mn_release,
|
||||
.invalidate_page = amdgpu_mn_invalidate_page,
|
||||
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
|
||||
};
|
||||
|
||||
|
@ -196,6 +247,7 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
|
|||
rmn->adev = adev;
|
||||
rmn->mm = mm;
|
||||
rmn->mn.ops = &amdgpu_mn_ops;
|
||||
mutex_init(&rmn->lock);
|
||||
rmn->objects = RB_ROOT;
|
||||
|
||||
r = __mmu_notifier_register(&rmn->mn, mm);
|
||||
|
@ -242,7 +294,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
|||
|
||||
INIT_LIST_HEAD(&bos);
|
||||
|
||||
down_write(&rmn->mm->mmap_sem);
|
||||
mutex_lock(&rmn->lock);
|
||||
|
||||
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
|
||||
kfree(node);
|
||||
|
@ -256,7 +308,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
|||
if (!node) {
|
||||
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
|
||||
if (!node) {
|
||||
up_write(&rmn->mm->mmap_sem);
|
||||
mutex_unlock(&rmn->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
@ -271,7 +323,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
|||
|
||||
interval_tree_insert(&node->it, &rmn->objects);
|
||||
|
||||
up_write(&rmn->mm->mmap_sem);
|
||||
mutex_unlock(&rmn->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -297,7 +349,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
|
|||
return;
|
||||
}
|
||||
|
||||
down_write(&rmn->mm->mmap_sem);
|
||||
mutex_lock(&rmn->lock);
|
||||
|
||||
/* save the next list entry for later */
|
||||
head = bo->mn_list.next;
|
||||
|
@ -312,6 +364,6 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
|
|||
kfree(node);
|
||||
}
|
||||
|
||||
up_write(&rmn->mm->mmap_sem);
|
||||
mutex_unlock(&rmn->lock);
|
||||
mutex_unlock(&adev->mn_lock);
|
||||
}
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
|
||||
subdir-ccflags-y += -Iinclude/drm \
|
||||
-Idrivers/gpu/drm/amd/powerplay/inc/ \
|
||||
-Idrivers/gpu/drm/amd/include/asic_reg \
|
||||
-Idrivers/gpu/drm/amd/include \
|
||||
-Idrivers/gpu/drm/amd/powerplay/smumgr\
|
||||
-Idrivers/gpu/drm/amd/powerplay/hwmgr \
|
||||
-Idrivers/gpu/drm/amd/powerplay/eventmgr
|
||||
-I$(FULL_AMD_PATH)/powerplay/inc/ \
|
||||
-I$(FULL_AMD_PATH)/include/asic_reg \
|
||||
-I$(FULL_AMD_PATH)/include \
|
||||
-I$(FULL_AMD_PATH)/powerplay/smumgr\
|
||||
-I$(FULL_AMD_PATH)/powerplay/hwmgr \
|
||||
-I$(FULL_AMD_PATH)/powerplay/eventmgr
|
||||
|
||||
AMD_PP_PATH = ../powerplay
|
||||
|
||||
PP_LIBS = smumgr hwmgr eventmgr
|
||||
|
||||
AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix drivers/gpu/drm/amd/powerplay/,$(PP_LIBS)))
|
||||
AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS)))
|
||||
|
||||
include $(AMD_POWERPLAY)
|
||||
|
||||
|
|
|
@ -512,8 +512,10 @@ static int get_cac_tdp_table(
|
|||
|
||||
hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);
|
||||
|
||||
if (NULL == hwmgr->dyn_state.cac_dtp_table)
|
||||
if (NULL == hwmgr->dyn_state.cac_dtp_table) {
|
||||
kfree(tdp_table);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);
|
||||
|
||||
|
|
|
@ -510,6 +510,7 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
|
|||
{
|
||||
struct radeon_encoder_mst *mst_enc;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct radeon_connector_atom_dig *dig_connector;
|
||||
int bpp = 24;
|
||||
|
||||
mst_enc = radeon_encoder->enc_priv;
|
||||
|
@ -523,22 +524,11 @@ static bool radeon_mst_mode_fixup(struct drm_encoder *encoder,
|
|||
|
||||
|
||||
drm_mode_set_crtcinfo(adjusted_mode, 0);
|
||||
{
|
||||
struct radeon_connector_atom_dig *dig_connector;
|
||||
int ret;
|
||||
|
||||
dig_connector = mst_enc->connector->con_priv;
|
||||
ret = radeon_dp_get_dp_link_config(&mst_enc->connector->base,
|
||||
dig_connector->dpcd, adjusted_mode->clock,
|
||||
&dig_connector->dp_lane_count,
|
||||
&dig_connector->dp_clock);
|
||||
if (ret) {
|
||||
dig_connector->dp_lane_count = 0;
|
||||
dig_connector->dp_clock = 0;
|
||||
}
|
||||
DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
|
||||
dig_connector->dp_lane_count, dig_connector->dp_clock);
|
||||
}
|
||||
dig_connector = mst_enc->connector->con_priv;
|
||||
dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd);
|
||||
dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd);
|
||||
DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector,
|
||||
dig_connector->dp_lane_count, dig_connector->dp_clock);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue