From 3f47d00bc65ba11b3e5753d733a74ca4f7af3240 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 5 Jan 2019 09:01:05 +0100 Subject: [PATCH 1/9] drm/vmwgfx: remove CONFIG_X86 ifdefs The driver depends on CONFIG_X86 so these are dead code. Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Hellstrom Signed-off-by: Thomas Hellstrom --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 25afb1d594e3..69e325b2d954 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -565,7 +565,6 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", [vmw_dma_map_populate] = "Keeping DMA mappings.", [vmw_dma_map_bind] = "Giving up DMA mappings early."}; -#ifdef CONFIG_X86 const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); #ifdef CONFIG_INTEL_IOMMU @@ -607,11 +606,6 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) if (dev_priv->map_mode == vmw_dma_alloc_coherent) return -EINVAL; #endif - -#else /* CONFIG_X86 */ - dev_priv->map_mode = vmw_dma_map_populate; -#endif /* CONFIG_X86 */ - DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); return 0; From 9b5bf2421b43ef85568f9b875d6387a114e92efe Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 5 Jan 2019 09:01:06 +0100 Subject: [PATCH 2/9] drm/vmwgfx: remove CONFIG_INTEL_IOMMU ifdefs v2 intel_iommu_enabled is defined as always false for !CONFIG_INTEL_IOMMU, so remove the ifdefs around it. Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Hellstrom Signed-off-by: Thomas Hellstrom --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 21 ++++----------------- 1 file changed, 4 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 69e325b2d954..b7777b5b4a81 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -567,12 +567,10 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) [vmw_dma_map_bind] = "Giving up DMA mappings early."}; const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); -#ifdef CONFIG_INTEL_IOMMU if (intel_iommu_enabled) { dev_priv->map_mode = vmw_dma_map_populate; goto out_fixup; } -#endif if (!(vmw_force_iommu || vmw_force_coherent)) { dev_priv->map_mode = vmw_dma_phys; @@ -589,9 +587,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) dev_priv->map_mode = vmw_dma_map_populate; #endif -#ifdef CONFIG_INTEL_IOMMU out_fixup: -#endif if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu) dev_priv->map_mode = vmw_dma_map_bind; @@ -599,13 +595,11 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) if (vmw_force_coherent) dev_priv->map_mode = vmw_dma_alloc_coherent; -#if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU) - /* - * No coherent page pool - */ - if (dev_priv->map_mode == vmw_dma_alloc_coherent) + /* No TTM coherent page pool? FIXME: Ask TTM instead! */ + if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && + (dev_priv->map_mode == vmw_dma_alloc_coherent)) return -EINVAL; -#endif + DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); return 0; @@ -619,7 +613,6 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) * With 32-bit we can only handle 32 bit PFNs. Optionally set that * restriction also for 64-bit systems. */ -#ifdef CONFIG_INTEL_IOMMU static int vmw_dma_masks(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; @@ -631,12 +624,6 @@ static int vmw_dma_masks(struct vmw_private *dev_priv) } return 0; } -#else -static int vmw_dma_masks(struct vmw_private *dev_priv) -{ - return 0; -} -#endif static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) { From 2b3cd6249b14e25da14f13cd520eb336230a4422 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 5 Jan 2019 09:01:07 +0100 Subject: [PATCH 3/9] drm/vmwgfx: fix the check when to use dma_alloc_coherent Since Linux 4.21 we merged the swiotlb ops into the DMA direct ops, so they would always have a the sync_single methods. But late in the cicle we also removed the direct ops entirely, so we'd see NULL DMA ops. Switch vmw_dma_select_mode to only detect swiotlb presence using swiotlb_nr_tbl() instead. Fixes: 55897af630 ("dma-direct: merge swiotlb_dma_ops into the dma_direct code") Fixes: 356da6d0cd ("dma-mapping: bypass indirect calls for dma-direct") Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Hellstrom Signed-off-by: Thomas Hellstrom --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index b7777b5b4a81..1456101e67a9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -565,7 +565,6 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", [vmw_dma_map_populate] = "Keeping DMA mappings.", [vmw_dma_map_bind] = "Giving up DMA mappings early."}; - const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); if (intel_iommu_enabled) { dev_priv->map_mode = vmw_dma_map_populate; @@ -578,14 +577,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) return 0; } - dev_priv->map_mode = vmw_dma_map_populate; - - if (dma_ops && dma_ops->sync_single_for_cpu) - dev_priv->map_mode = vmw_dma_alloc_coherent; #ifdef CONFIG_SWIOTLB - if (swiotlb_nr_tbl() == 0) - dev_priv->map_mode = vmw_dma_map_populate; + if (swiotlb_nr_tbl()) + dev_priv->map_mode = vmw_dma_alloc_coherent; + else #endif + dev_priv->map_mode = vmw_dma_map_populate; out_fixup: if (dev_priv->map_mode == vmw_dma_map_populate && From 05f9467e70ed7c9cd19fd3d42346941cdc03eef3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 5 Jan 2019 09:01:08 +0100 Subject: [PATCH 4/9] drm/vmwgfx: unwind spaghetti code in vmw_dma_select_mode Just use a simple if/else chain to select the DMA mode. Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Hellstrom Signed-off-by: Thomas Hellstrom --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 35 +++++++++-------------------- 1 file changed, 11 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 1456101e67a9..3e2bcff34032 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -566,31 +566,19 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) [vmw_dma_map_populate] = "Keeping DMA mappings.", [vmw_dma_map_bind] = "Giving up DMA mappings early."}; - if (intel_iommu_enabled) { - dev_priv->map_mode = vmw_dma_map_populate; - goto out_fixup; - } - - if (!(vmw_force_iommu || vmw_force_coherent)) { - dev_priv->map_mode = vmw_dma_phys; - DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); - return 0; - } - -#ifdef CONFIG_SWIOTLB - if (swiotlb_nr_tbl()) - dev_priv->map_mode = vmw_dma_alloc_coherent; - else -#endif - dev_priv->map_mode = vmw_dma_map_populate; - -out_fixup: - if (dev_priv->map_mode == vmw_dma_map_populate && - vmw_restrict_iommu) - dev_priv->map_mode = vmw_dma_map_bind; - if (vmw_force_coherent) dev_priv->map_mode = vmw_dma_alloc_coherent; + else if (intel_iommu_enabled) + dev_priv->map_mode = vmw_dma_map_populate; + else if (!vmw_force_iommu) + dev_priv->map_mode = vmw_dma_phys; + else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl()) + dev_priv->map_mode = vmw_dma_alloc_coherent; + else + dev_priv->map_mode = vmw_dma_map_populate; + + if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu) + dev_priv->map_mode = vmw_dma_map_bind; /* No TTM coherent page pool? FIXME: Ask TTM instead! */ if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && @@ -598,7 +586,6 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) return -EINVAL; DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); - return 0; } From 728354c005c36eaf44b6e5552372b67e60d17f56 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 31 Jan 2019 10:55:37 +0100 Subject: [PATCH 5/9] drm/vmwgfx: Return error code from vmw_execbuf_copy_fence_user The function was unconditionally returning 0, and a caller would have to rely on the returned fence pointer being NULL to detect errors. However, the function vmw_execbuf_copy_fence_user() would expect a non-zero error code in that case and would BUG otherwise. So make sure we return a proper non-zero error code if the fence pointer returned is NULL. Cc: Fixes: ae2a104058e2: ("vmwgfx: Implement fence objects") Signed-off-by: Thomas Hellstrom Reviewed-by: Deepak Rawat --- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index f2d13a72c05d..88b8178d4687 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, *p_fence = NULL; } - return 0; + return ret; } /** From 51fdbeb4ca1a8415c98f87cb877956ae83e71627 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 31 Jan 2019 10:52:21 +0100 Subject: [PATCH 6/9] drm/vmwgfx: Fix an uninitialized fence handle value if vmw_execbuf_fence_commands() fails, The handle value will be uninitialized and a bogus fence handle might be copied to user-space. Cc: Fixes: 2724b2d54cda: ("drm/vmwgfx: Use new validation interface for the modesetting code v2") Reported-by: Dan Carpenter Signed-off-by: Thomas Hellstrom Reviewed-by: Brian Paul #v1 Reviewed-by: Sinclair Yeh #v1 Reviewed-by: Deepak Rawat --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b351fb5214d3..5e257a600cea 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, user_fence_rep) { struct vmw_fence_obj *fence = NULL; - uint32_t handle; - int ret; + uint32_t handle = 0; + int ret = 0; if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || out_fence) From 479d59026fe44f89fa67efa01a4d47e00808e688 Mon Sep 17 00:00:00 2001 From: Deepak Rawat Date: Fri, 21 Dec 2018 14:38:35 -0800 Subject: [PATCH 7/9] drm/vmwgfx: Also check for crtc status while checking for DU active During modeset check it is possible to have all crtc_state's in atomic state. Check for crtc enable status while checking for display unit active status. Only error if enabling a crtc while display unit is not active. Cc: Fixes: 9da6e26c0aae: ("drm/vmwgfx: Fix a layout race condition") Signed-off-by: Deepak Rawat Reviewed-by: Thomas Hellstrom Signed-off-by: Thomas Hellstrom --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 5e257a600cea..ed2f67822f45 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev, struct drm_connector_state *conn_state; struct vmw_connector_state *vmw_conn_state; - if (!du->pref_active) { + if (!du->pref_active && new_crtc_state->enable) { ret = -EINVAL; goto clean; } From 4cbfa1e6c09e98450aab3240e5119b0ab2c9795b Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Mon, 28 Jan 2019 10:31:33 +0100 Subject: [PATCH 8/9] drm/vmwgfx: Fix setting of dma masks Previously we set only the dma mask and not the coherent mask. Fix that. Also, for clarity, make sure both are initially set to 64 bits. Cc: Fixes: 0d00c488f3de: ("drm/vmwgfx: Fix the driver for large dma addresses") Signed-off-by: Thomas Hellstrom Reviewed-by: Deepak Rawat --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 3e2bcff34032..ae9df4432bfc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -600,13 +600,16 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) static int vmw_dma_masks(struct vmw_private *dev_priv) { struct drm_device *dev = dev_priv->dev; + int ret = 0; - if (intel_iommu_enabled && + ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); + if (dev_priv->map_mode != vmw_dma_phys && (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { DRM_INFO("Restricting DMA addresses to 44 bits.\n"); - return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); + return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); } - return 0; + + return ret; } static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) From 9ddac734aa310c5fbc0ec93602335d2a39092451 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 17 Jan 2019 14:34:52 +0100 Subject: [PATCH 9/9] drm/vmwgfx: Improve on IOMMU detection instead of relying on intel_iommu_enabled, use the fact that the dma_map_ops::map_page != dma_direct_map_page. Signed-off-by: Thomas Hellstrom Reviewed-by: Deepak Rawat --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index ae9df4432bfc..7ef5dcb06104 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -26,6 +26,7 @@ **************************************************************************/ #include #include +#include #include #include "vmwgfx_drv.h" @@ -34,7 +35,6 @@ #include #include #include -#include #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" #define VMWGFX_CHIP_SVGAII 0 @@ -545,6 +545,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) dev_priv->initial_height = height; } +/** + * vmw_assume_iommu - Figure out whether coherent dma-remapping might be + * taking place. + * @dev: Pointer to the struct drm_device. + * + * Return: true if iommu present, false otherwise. + */ +static bool vmw_assume_iommu(struct drm_device *dev) +{ + const struct dma_map_ops *ops = get_dma_ops(dev->dev); + + return !dma_is_direct(ops) && ops && + ops->map_page != dma_direct_map_page; +} + /** * vmw_dma_select_mode - Determine how DMA mappings should be set up for this * system. @@ -568,7 +583,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) if (vmw_force_coherent) dev_priv->map_mode = vmw_dma_alloc_coherent; - else if (intel_iommu_enabled) + else if (vmw_assume_iommu(dev_priv->dev)) dev_priv->map_mode = vmw_dma_map_populate; else if (!vmw_force_iommu) dev_priv->map_mode = vmw_dma_phys;