drm/vmwgfx: takedown vram manager

Don't bother returning EBUSY, nobody cares enough,
if the driver has a problem, it should deal with it.

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200804025632.3868079-29-airlied@gmail.com
This commit is contained in:
Dave Airlie 2020-08-04 12:56:01 +10:00
parent 6fe1c54353
commit e0830704de
3 changed files with 27 additions and 11 deletions

View File

@ -637,6 +637,17 @@ static int vmw_vram_manager_init(struct vmw_private *dev_priv)
dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
return ret;
}
static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
vmw_thp_fini(dev_priv);
#else
ttm_bo_man_fini(&dev_priv->bdev,
&dev_priv->bdev.man[TTM_PL_VRAM]);
#endif
}
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{
struct vmw_private *dev_priv;
@ -988,7 +999,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
vmw_vram_manager_fini(dev_priv);
out_no_vram:
(void)ttm_bo_device_release(&dev_priv->bdev);
out_no_bdev:
@ -1042,6 +1053,7 @@ static void vmw_driver_unload(struct drm_device *dev)
vmw_release_device_early(dev_priv);
if (dev_priv->has_mob)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
vmw_vram_manager_fini(dev_priv);
(void) ttm_bo_device_release(&dev_priv->bdev);
drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
vmw_release_device_late(dev_priv);

View File

@ -1521,6 +1521,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
/* Transparent hugepage support - vmwgfx_thp.c */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern int vmw_thp_init(struct vmw_private *dev_priv);
void vmw_thp_fini(struct vmw_private *dev_priv);
#endif
/**

View File

@ -135,21 +135,25 @@ int vmw_thp_init(struct vmw_private *dev_priv)
return 0;
}
static int vmw_thp_takedown(struct ttm_mem_type_manager *man)
void vmw_thp_fini(struct vmw_private *dev_priv)
{
struct ttm_mem_type_manager *man = &dev_priv->bdev.man[TTM_PL_VRAM];
struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv;
struct drm_mm *mm = &rman->mm;
int ret;
ttm_mem_type_manager_disable(man);
ret = ttm_mem_type_manager_force_list_clean(&dev_priv->bdev, man);
if (ret)
return;
spin_lock(&rman->lock);
if (drm_mm_clean(mm)) {
drm_mm_takedown(mm);
spin_unlock(&rman->lock);
kfree(rman);
man->priv = NULL;
return 0;
}
drm_mm_clean(mm);
drm_mm_takedown(mm);
spin_unlock(&rman->lock);
return -EBUSY;
kfree(rman);
man->priv = NULL;
ttm_mem_type_manager_cleanup(man);
}
static void vmw_thp_debug(struct ttm_mem_type_manager *man,
@ -163,7 +167,6 @@ static void vmw_thp_debug(struct ttm_mem_type_manager *man,
}
const struct ttm_mem_type_manager_func vmw_thp_func = {
.takedown = vmw_thp_takedown,
.get_node = vmw_thp_get_node,
.put_node = vmw_thp_put_node,
.debug = vmw_thp_debug