drm etnaviv, imx, amdgpu fixes

-----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJc8H7DAAoJEAx081l5xIa+E/YP/RgQYJk9RwzySPVgb2mL4MIP
 xCwtdX5dX3CABxqObAHoHXvwYmPlNTMh7f262alQv+gn8yuCCtLyKX4lrDycMY3P
 HqHwNnLeB8fRW68FzUlkLRvzv2ooX1wBtFYJsXn89La+4TMajaPbwdNTkJGY9DWJ
 MAL2WcEHBPYviu8mUVKvQA3hC5S53Rg1BzY+mb1H4Nur+wfqckIOIrZxT/6rDZwR
 rrmWgHIoozjvU6nVBhy8hy0RAYCFUwFOKo4KJF9H1lTeVG75Vw32hDYtB1htN1xw
 YxzQdgjB+X3DC7JRbby1LGAUW6xcJgq5RiDuaVBk/l6mBjcqYhmOvXTwGgUl6rcm
 kaCJyfUt8qH2aEVw5Yx0igssYSjcCNvRjGSE39dPi/8R8Qw5DWKTho2/SJ45xFmV
 g6SvpgUCNtx5VrnzGokduxqrLUcTztw9zVxrPOGFVmOy8cec3NTA368++VDmYo+u
 0JEQ5rXrpePaAw3nPAkmkzNDM32MGe7D4iBQKEeYBttkf4gxr0WbxgE1+b2aI5an
 zesf7VEoGK4x1vsNPaAafuEtFypbPjPqIS8A6/SnBwiN8OjaskLHBC5O1zQpLsxv
 MhiF6zaLnpUHZkUj7h2a1tGM5FPGYT+dm1NTMv7gafl07ctux9ojvOZfDNUWp4+u
 QPYmmc6CbG7TzbE6H8RC
 =pg9D
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2019-05-31' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Nothing too crazy, pretty quiet, maybe too quiet.

  amdgpu:
   - a fixed version of the raven firmware fix we previously reverted
   - stolen memory fix

  imx:
   - regression fix

  qxl:
   - remove a bad warning

  etnaviv:
   - VM locking fix"

* tag 'drm-fixes-2019-05-31' of git://anongit.freedesktop.org/drm/drm:
  drm/amdgpu: reserve stollen vram for raven series
  drm/etnaviv: lock MMU while dumping core
  drm/imx: ipuv3-plane: fix atomic update status query for non-plus i.MX6Q
  drm/qxl: drop WARN_ONCE()
  drm/amd/display: Don't load DMCU for Raven 1 (v2)
This commit is contained in:
Linus Torvalds 2019-05-31 08:14:16 -07:00
commit ca19180496
6 changed files with 24 additions and 11 deletions

View File

@ -624,9 +624,8 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
*/
switch (adev->asic_type) {
case CHIP_VEGA10:
return true;
case CHIP_RAVEN:
return (adev->pdev->device == 0x15d8);
return true;
case CHIP_VEGA12:
case CHIP_VEGA20:
default:

View File

@ -29,6 +29,7 @@
#include "dm_services_types.h"
#include "dc.h"
#include "dc/inc/core_types.h"
#include "dal_asic_id.h"
#include "vid.h"
#include "amdgpu.h"
@ -640,7 +641,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
static int load_dmcu_fw(struct amdgpu_device *adev)
{
const char *fw_name_dmcu;
const char *fw_name_dmcu = NULL;
int r;
const struct dmcu_firmware_header_v1_0 *hdr;
@ -663,7 +664,14 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_VEGA20:
return 0;
case CHIP_RAVEN:
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
if (ASICREV_IS_PICASSO(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
else
#endif
return 0;
break;
default:
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);

View File

@ -125,6 +125,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
return;
etnaviv_dump_core = false;
mutex_lock(&gpu->mmu->lock);
mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
/* We always dump registers, mmu, ring and end marker */
@ -167,6 +169,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
PAGE_KERNEL);
if (!iter.start) {
mutex_unlock(&gpu->mmu->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return;
}
@ -234,6 +237,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
obj->base.size);
}
mutex_unlock(&gpu->mmu->lock);
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);

View File

@ -605,7 +605,6 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
ipu_plane->next_buf = !active;
if (ipu_plane_separate_alpha(ipu_plane)) {
active = ipu_idmac_get_current_buffer(ipu_plane->alpha_ch);
ipu_cpmem_set_buffer(ipu_plane->alpha_ch, !active,
@ -710,7 +709,6 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
ipu_idmac_lock_enable(ipu_plane->ipu_ch, num_bursts);
ipu_plane_enable(ipu_plane);
ipu_plane->next_buf = -1;
}
static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
@ -732,10 +730,15 @@ bool ipu_plane_atomic_update_pending(struct drm_plane *plane)
if (ipu_state->use_pre)
return ipu_prg_channel_configure_pending(ipu_plane->ipu_ch);
else if (ipu_plane->next_buf >= 0)
return ipu_idmac_get_current_buffer(ipu_plane->ipu_ch) !=
ipu_plane->next_buf;
/*
* Pretend no update is pending in the non-PRE/PRG case. For this to
* happen, an atomic update would have to be deferred until after the
* start of the next frame and simultaneously interrupt latency would
* have to be high enough to let the atomic update finish and issue an
* event before the previous end of frame interrupt handler can be
* executed.
*/
return false;
}
int ipu_planes_assign_pre(struct drm_device *dev,

View File

@ -27,7 +27,6 @@ struct ipu_plane {
int dp_flow;
bool disabling;
int next_buf;
};
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,

View File

@ -77,6 +77,5 @@ void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
int qxl_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *area)
{
WARN_ONCE(1, "not implemented");
return -ENOSYS;
}