mirror of https://gitee.com/openkylin/linux.git
drm i915, amdgpu, vmwgfx, sun4i, panfrost, gma500 fixes. + revert build breakage
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJc58B8AAoJEAx081l5xIa+mVYP/R0nRBWy8KhQfTFIXavhDCwc cEgMm8K/fssSpmvsFqgx6NDo2SjqaKAzl5fRk+ilSZTFFbA52f/Q5QItQFwBGRk2 ACJVU+e/zILWtJGyRudpk/eQLK63pft0H9HnBcrEdnyrknKe9iQt91XU+UUc2GRS NJGNXZqP+aSwGHfBFxtlmpWgEzcS+iwqcLC8iRtU67WCiOJls50x8pC0awWFpw9V SDen8x6LP0PmcUiJqz4rWLa3/UMH4lmaT14DulPkZBQjaN1Sm3J7+jO4d2fz2qQL YmchtMSxQhfxbon6vxJNlDFqDRy7X+/47nRLToKp5biwGYUa9vp7MWgp3vhc4/Tk LzwYvGhYq81J9NnAqr96FQGStXWzThamjaV6aWbKJ8zwlSki4zPxi5YKZ+xbSVhm aOHjC57cgv98ppg24mHd7smAoHdCePDQz/fB1KNSrAXTdit323LoRiOKHYMyYMGR dtAsDMt2WYaihVJSGK0HP0ZcSem6oGGFz1jRVap+zQ6suVCxdmpLqy0pn/s6QH3r 4Tjxai2iW8oYpL9nHet4SDO2SI4RNUev4vNh84Mr5SddK5N/yAC3QNiCP5ND6LJv kAGoHsRJ7dnlXYtU4hKdT9LrJW7dj6+PkuyPZCPy/1y3qQoROPSVHPZwc98BYMYE aUpQ0E+KvOD2l/CEa2FF =4Wu/ -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2019-05-24-1' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: "Nothing too unusual here for rc2. Except the amdgpu DMCU firmware loading fix caused build breakage with a different set of Kconfig options. I've just reverted it for now until the AMD folks can rewrite it to avoid that problem. i915: - boosting fix - bump ready task fixes - GVT - reset fix, error return, TRTT handling fix amdgpu: - DMCU firmware loading fix - Polaris 10 pci id for kfd - picasso screen corruption fix - SR-IOV fixes - vega driver reload fixes - SMU locking fix - compute profile fix for kfd vmwgfx: - integer overflow fixes - dma sg fix sun4i: - HDMI phy fixes gma500: - LVDS detection fix panfrost: - devfreq selection fix" * tag 'drm-fixes-2019-05-24-1' of git://anongit.freedesktop.org/drm/drm: (32 commits) Revert "drm/amd/display: Don't load DMCU for Raven 1" drm/panfrost: Select devfreq drm/gma500/cdv: Check vbt config bits when detecting lvds panels drm/vmwgfx: integer underflow in vmw_cmd_dx_set_shader() leading to an invalid read drm/vmwgfx: NULL pointer dereference from vmw_cmd_dx_view_define() drm/vmwgfx: Use the dma scatter-gather iterator to get dma addresses drm/vmwgfx: Fix compat mode shader operation drm/vmwgfx: Fix user space handle equal to zero drm/vmwgfx: Don't send drm sysfs hotplug events on initial master set drm/i915/gvt: Fix an error code in ppgtt_populate_spt_by_guest_entry() drm/i915/gvt: do not let TRTTE and 0x4dfc write passthrough to hardware drm/i915/gvt: add 0x4dfc to gen9 save-restore list drm/i915/gvt: Tiled Resources mmios are in-context mmios for gen9+ drm/i915/gvt: use cmd to restore in-context mmios to hw for gen9 platform drm/i915/gvt: emit init breadcrumb for gvt request drm/amdkfd: Fix compute profile switching drm/amdgpu: skip fw pri bo alloc for SRIOV drm/amd/powerplay: fix locking in smu_feature_set_supported() drm/amdgpu/gmc9: set vram_width properly for SR-IOV drm/amdgpu/soc15: skip reset on init ...
This commit is contained in:
commit
a3b25d157d
|
@ -877,13 +877,16 @@ static int psp_load_fw(struct amdgpu_device *adev)
|
|||
if (!psp->cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
&psp->fw_pri_bo,
|
||||
&psp->fw_pri_mc_addr,
|
||||
&psp->fw_pri_buf);
|
||||
if (ret)
|
||||
goto failed;
|
||||
/* this fw pri bo is not used under SRIOV */
|
||||
if (!amdgpu_sriov_vf(psp->adev)) {
|
||||
ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
|
||||
AMDGPU_GEM_DOMAIN_GTT,
|
||||
&psp->fw_pri_bo,
|
||||
&psp->fw_pri_mc_addr,
|
||||
&psp->fw_pri_buf);
|
||||
if (ret)
|
||||
goto failed;
|
||||
}
|
||||
|
||||
ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
|
||||
AMDGPU_GEM_DOMAIN_VRAM,
|
||||
|
|
|
@ -626,6 +626,7 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
|
|||
case CHIP_VEGA10:
|
||||
return true;
|
||||
case CHIP_RAVEN:
|
||||
return (adev->pdev->device == 0x15d8);
|
||||
case CHIP_VEGA12:
|
||||
case CHIP_VEGA20:
|
||||
default:
|
||||
|
@ -812,8 +813,16 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
|
|||
int chansize, numchan;
|
||||
int r;
|
||||
|
||||
if (amdgpu_emu_mode != 1)
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
|
||||
* and DF related registers is not readable, seems hardcord is the
|
||||
* only way to set the correct vram_width
|
||||
*/
|
||||
adev->gmc.vram_width = 2048;
|
||||
} else if (amdgpu_emu_mode != 1) {
|
||||
adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
|
||||
}
|
||||
|
||||
if (!adev->gmc.vram_width) {
|
||||
/* hbm memory channel size */
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
|
|
|
@ -730,6 +730,11 @@ static bool soc15_need_reset_on_init(struct amdgpu_device *adev)
|
|||
{
|
||||
u32 sol_reg;
|
||||
|
||||
/* Just return false for soc15 GPUs. Reset does not seem to
|
||||
* be necessary.
|
||||
*/
|
||||
return false;
|
||||
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -355,6 +355,7 @@ static const struct kfd_deviceid supported_devices[] = {
|
|||
{ 0x67CF, &polaris10_device_info }, /* Polaris10 */
|
||||
{ 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
|
||||
{ 0x67DF, &polaris10_device_info }, /* Polaris10 */
|
||||
{ 0x6FDF, &polaris10_device_info }, /* Polaris10 */
|
||||
{ 0x67E0, &polaris11_device_info }, /* Polaris11 */
|
||||
{ 0x67E1, &polaris11_device_info }, /* Polaris11 */
|
||||
{ 0x67E3, &polaris11_device_info }, /* Polaris11 */
|
||||
|
@ -462,6 +463,7 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
|
|||
kfd->pdev = pdev;
|
||||
kfd->init_complete = false;
|
||||
kfd->kfd2kgd = f2g;
|
||||
atomic_set(&kfd->compute_profile, 0);
|
||||
|
||||
mutex_init(&kfd->doorbell_mutex);
|
||||
memset(&kfd->doorbell_available_index, 0,
|
||||
|
@ -1036,6 +1038,21 @@ void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
|
|||
atomic_inc(&kfd->sram_ecc_flag);
|
||||
}
|
||||
|
||||
void kfd_inc_compute_active(struct kfd_dev *kfd)
|
||||
{
|
||||
if (atomic_inc_return(&kfd->compute_profile) == 1)
|
||||
amdgpu_amdkfd_set_compute_idle(kfd->kgd, false);
|
||||
}
|
||||
|
||||
void kfd_dec_compute_active(struct kfd_dev *kfd)
|
||||
{
|
||||
int count = atomic_dec_return(&kfd->compute_profile);
|
||||
|
||||
if (count == 0)
|
||||
amdgpu_amdkfd_set_compute_idle(kfd->kgd, true);
|
||||
WARN_ONCE(count < 0, "Compute profile ref. count error");
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
/* This function will send a package to HIQ to hang the HWS
|
||||
|
|
|
@ -811,8 +811,8 @@ static int register_process(struct device_queue_manager *dqm,
|
|||
|
||||
retval = dqm->asic_ops.update_qpd(dqm, qpd);
|
||||
|
||||
if (dqm->processes_count++ == 0)
|
||||
amdgpu_amdkfd_set_compute_idle(dqm->dev->kgd, false);
|
||||
dqm->processes_count++;
|
||||
kfd_inc_compute_active(dqm->dev);
|
||||
|
||||
dqm_unlock(dqm);
|
||||
|
||||
|
@ -835,9 +835,8 @@ static int unregister_process(struct device_queue_manager *dqm,
|
|||
if (qpd == cur->qpd) {
|
||||
list_del(&cur->list);
|
||||
kfree(cur);
|
||||
if (--dqm->processes_count == 0)
|
||||
amdgpu_amdkfd_set_compute_idle(
|
||||
dqm->dev->kgd, true);
|
||||
dqm->processes_count--;
|
||||
kfd_dec_compute_active(dqm->dev);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
@ -1539,6 +1538,7 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
|
|||
list_del(&cur->list);
|
||||
kfree(cur);
|
||||
dqm->processes_count--;
|
||||
kfd_dec_compute_active(dqm->dev);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1626,6 +1626,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
|
|||
list_del(&cur->list);
|
||||
kfree(cur);
|
||||
dqm->processes_count--;
|
||||
kfd_dec_compute_active(dqm->dev);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -279,6 +279,9 @@ struct kfd_dev {
|
|||
|
||||
/* SRAM ECC flag */
|
||||
atomic_t sram_ecc_flag;
|
||||
|
||||
/* Compute Profile ref. count */
|
||||
atomic_t compute_profile;
|
||||
};
|
||||
|
||||
enum kfd_mempool {
|
||||
|
@ -978,6 +981,10 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
|
|||
|
||||
bool kfd_is_locked(void);
|
||||
|
||||
/* Compute profile */
|
||||
void kfd_inc_compute_active(struct kfd_dev *dev);
|
||||
void kfd_dec_compute_active(struct kfd_dev *dev);
|
||||
|
||||
/* Debugfs */
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
|
|
|
@ -138,13 +138,14 @@
|
|||
#endif
|
||||
#define RAVEN_UNKNOWN 0xFF
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
|
||||
#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < 0xF0))
|
||||
#endif /* DCN1_01 */
|
||||
#define ASIC_REV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
|
||||
#define RAVEN1_F0 0xF0
|
||||
#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
|
||||
|
||||
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
|
||||
#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
|
||||
#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < 0xF0))
|
||||
#endif /* DCN1_01 */
|
||||
|
||||
#define FAMILY_RV 142 /* DCN 1*/
|
||||
|
||||
|
|
|
@ -280,7 +280,7 @@ int smu_feature_set_supported(struct smu_context *smu, int feature_id,
|
|||
|
||||
WARN_ON(feature_id > feature->feature_num);
|
||||
|
||||
mutex_unlock(&feature->mutex);
|
||||
mutex_lock(&feature->mutex);
|
||||
if (enable)
|
||||
test_and_set_bit(feature_id, feature->supported);
|
||||
else
|
||||
|
|
|
@ -594,6 +594,9 @@ void cdv_intel_lvds_init(struct drm_device *dev,
|
|||
int pipe;
|
||||
u8 pin;
|
||||
|
||||
if (!dev_priv->lvds_enabled_in_vbt)
|
||||
return;
|
||||
|
||||
pin = GMBUS_PORT_PANEL;
|
||||
if (!lvds_is_present_in_vbt(dev, &pin)) {
|
||||
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
|
||||
|
|
|
@ -436,6 +436,9 @@ parse_driver_features(struct drm_psb_private *dev_priv,
|
|||
if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
|
||||
dev_priv->edp.support = 1;
|
||||
|
||||
dev_priv->lvds_enabled_in_vbt = driver->lvds_config != 0;
|
||||
DRM_DEBUG_KMS("LVDS VBT config bits: 0x%x\n", driver->lvds_config);
|
||||
|
||||
/* This bit means to use 96Mhz for DPLL_A or not */
|
||||
if (driver->primary_lfp_id)
|
||||
dev_priv->dplla_96mhz = true;
|
||||
|
|
|
@ -537,6 +537,7 @@ struct drm_psb_private {
|
|||
int lvds_ssc_freq;
|
||||
bool is_lvds_on;
|
||||
bool is_mipi_on;
|
||||
bool lvds_enabled_in_vbt;
|
||||
u32 mipi_ctrl_display;
|
||||
|
||||
unsigned int core_freq;
|
||||
|
|
|
@ -896,12 +896,16 @@ static int cmd_reg_handler(struct parser_exec_state *s,
|
|||
}
|
||||
|
||||
/* TODO
|
||||
* Right now only scan LRI command on KBL and in inhibit context.
|
||||
* It's good enough to support initializing mmio by lri command in
|
||||
* vgpu inhibit context on KBL.
|
||||
* In order to let workload with inhibit context to generate
|
||||
* correct image data into memory, vregs values will be loaded to
|
||||
* hw via LRIs in the workload with inhibit context. But as
|
||||
* indirect context is loaded prior to LRIs in workload, we don't
|
||||
* want reg values specified in indirect context overwritten by
|
||||
* LRIs in workloads. So, when scanning an indirect context, we
|
||||
* update reg values in it into vregs, so LRIs in workload with
|
||||
* inhibit context will restore with correct values
|
||||
*/
|
||||
if ((IS_KABYLAKE(s->vgpu->gvt->dev_priv)
|
||||
|| IS_COFFEELAKE(s->vgpu->gvt->dev_priv)) &&
|
||||
if (IS_GEN(gvt->dev_priv, 9) &&
|
||||
intel_gvt_mmio_is_in_ctx(gvt, offset) &&
|
||||
!strncmp(cmd, "lri", 3)) {
|
||||
intel_gvt_hypervisor_read_gpa(s->vgpu,
|
||||
|
|
|
@ -1076,8 +1076,10 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
|
|||
} else {
|
||||
int type = get_next_pt_type(we->type);
|
||||
|
||||
if (!gtt_type_is_pt(type))
|
||||
if (!gtt_type_is_pt(type)) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
|
||||
if (IS_ERR(spt)) {
|
||||
|
|
|
@ -1364,7 +1364,6 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
u32 trtte = *(u32 *)p_data;
|
||||
|
||||
if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
|
||||
|
@ -1373,11 +1372,6 @@ static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
return -EINVAL;
|
||||
}
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
/* TRTTE is not per-context */
|
||||
|
||||
mmio_hw_access_pre(dev_priv);
|
||||
I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
|
||||
mmio_hw_access_post(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1385,15 +1379,6 @@ static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
static int gen9_trtt_chicken_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
u32 val = *(u32 *)p_data;
|
||||
|
||||
if (val & 1) {
|
||||
/* unblock hw logic */
|
||||
mmio_hw_access_pre(dev_priv);
|
||||
I915_WRITE(_MMIO(offset), val);
|
||||
mmio_hw_access_post(dev_priv);
|
||||
}
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -108,12 +108,13 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
|
|||
{RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
|
||||
{RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
|
||||
{RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
|
||||
{RCS0, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
|
||||
{RCS0, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
|
||||
{RCS0, TRNULLDETCT, 0, false}, /* 0x4de8 */
|
||||
{RCS0, TRINVTILEDETCT, 0, false}, /* 0x4dec */
|
||||
{RCS0, TRVADR, 0, false}, /* 0x4df0 */
|
||||
{RCS0, TRTTE, 0, false}, /* 0x4df4 */
|
||||
{RCS0, TRVATTL3PTRDW(0), 0, true}, /* 0x4de0 */
|
||||
{RCS0, TRVATTL3PTRDW(1), 0, true}, /* 0x4de4 */
|
||||
{RCS0, TRNULLDETCT, 0, true}, /* 0x4de8 */
|
||||
{RCS0, TRINVTILEDETCT, 0, true}, /* 0x4dec */
|
||||
{RCS0, TRVADR, 0, true}, /* 0x4df0 */
|
||||
{RCS0, TRTTE, 0, true}, /* 0x4df4 */
|
||||
{RCS0, _MMIO(0x4dfc), 0, true},
|
||||
|
||||
{BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
|
||||
{BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
|
||||
|
@ -392,10 +393,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
|
|||
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
|
||||
return;
|
||||
|
||||
if (ring_id == RCS0 &&
|
||||
(IS_KABYLAKE(dev_priv) ||
|
||||
IS_BROXTON(dev_priv) ||
|
||||
IS_COFFEELAKE(dev_priv)))
|
||||
if (ring_id == RCS0 && IS_GEN(dev_priv, 9))
|
||||
return;
|
||||
|
||||
if (!pre && !gen9_render_mocs.initialized)
|
||||
|
@ -470,11 +468,10 @@ static void switch_mmio(struct intel_vgpu *pre,
|
|||
continue;
|
||||
/*
|
||||
* No need to do save or restore of the mmio which is in context
|
||||
* state image on kabylake, it's initialized by lri command and
|
||||
* state image on gen9, it's initialized by lri command and
|
||||
* save or restore with context together.
|
||||
*/
|
||||
if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
|
||||
|| IS_COFFEELAKE(dev_priv)) && mmio->in_context)
|
||||
if (IS_GEN(dev_priv, 9) && mmio->in_context)
|
||||
continue;
|
||||
|
||||
// save
|
||||
|
|
|
@ -298,12 +298,29 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
|
|||
struct i915_request *req = workload->req;
|
||||
void *shadow_ring_buffer_va;
|
||||
u32 *cs;
|
||||
int err;
|
||||
|
||||
if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)
|
||||
|| IS_COFFEELAKE(req->i915))
|
||||
&& is_inhibit_context(req->hw_context))
|
||||
if (IS_GEN(req->i915, 9) && is_inhibit_context(req->hw_context))
|
||||
intel_vgpu_restore_inhibit_context(vgpu, req);
|
||||
|
||||
/*
|
||||
* To track whether a request has started on HW, we can emit a
|
||||
* breadcrumb at the beginning of the request and check its
|
||||
* timeline's HWSP to see if the breadcrumb has advanced past the
|
||||
* start of this request. Actually, the request must have the
|
||||
* init_breadcrumb if its timeline set has_init_bread_crumb, or the
|
||||
* scheduler might get a wrong state of it during reset. Since the
|
||||
* requests from gvt always set the has_init_breadcrumb flag, here
|
||||
* need to do the emit_init_breadcrumb for all the requests.
|
||||
*/
|
||||
if (req->engine->emit_init_breadcrumb) {
|
||||
err = req->engine->emit_init_breadcrumb(req);
|
||||
if (err) {
|
||||
gvt_vgpu_err("fail to emit init breadcrumb\n");
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* allocate shadow ring buffer */
|
||||
cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
|
||||
if (IS_ERR(cs)) {
|
||||
|
|
|
@ -20,15 +20,14 @@ enum {
|
|||
I915_PRIORITY_INVALID = INT_MIN
|
||||
};
|
||||
|
||||
#define I915_USER_PRIORITY_SHIFT 3
|
||||
#define I915_USER_PRIORITY_SHIFT 2
|
||||
#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
|
||||
|
||||
#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
|
||||
#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
|
||||
|
||||
#define I915_PRIORITY_WAIT ((u8)BIT(0))
|
||||
#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
|
||||
#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(2))
|
||||
#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(1))
|
||||
|
||||
#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
|
||||
|
||||
|
|
|
@ -502,15 +502,6 @@ void __i915_request_unsubmit(struct i915_request *request)
|
|||
/* We may be recursing from the signal callback of another i915 fence */
|
||||
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
/*
|
||||
* As we do not allow WAIT to preempt inflight requests,
|
||||
* once we have executed a request, along with triggering
|
||||
* any execution callbacks, we must preserve its ordering
|
||||
* within the non-preemptible FIFO.
|
||||
*/
|
||||
BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
|
||||
request->sched.attr.priority |= __NO_PREEMPTION;
|
||||
|
||||
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
|
||||
i915_request_cancel_breadcrumb(request);
|
||||
|
||||
|
@ -582,18 +573,7 @@ semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
|
|||
|
||||
switch (state) {
|
||||
case FENCE_COMPLETE:
|
||||
/*
|
||||
* We only check a small portion of our dependencies
|
||||
* and so cannot guarantee that there remains no
|
||||
* semaphore chain across all. Instead of opting
|
||||
* for the full NOSEMAPHORE boost, we go for the
|
||||
* smaller (but still preempting) boost of
|
||||
* NEWCLIENT. This will be enough to boost over
|
||||
* a busywaiting request (as that cannot be
|
||||
* NEWCLIENT) without accidentally boosting
|
||||
* a busywait over real work elsewhere.
|
||||
*/
|
||||
i915_schedule_bump_priority(request, I915_PRIORITY_NEWCLIENT);
|
||||
i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
|
||||
break;
|
||||
|
||||
case FENCE_FREE:
|
||||
|
@ -874,12 +854,6 @@ emit_semaphore_wait(struct i915_request *to,
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
err = i915_sw_fence_await_dma_fence(&to->semaphore,
|
||||
&from->fence, 0,
|
||||
I915_FENCE_GFP);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* We need to pin the signaler's HWSP until we are finished reading. */
|
||||
err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
|
||||
if (err)
|
||||
|
@ -945,8 +919,18 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
|
|||
&from->fence, 0,
|
||||
I915_FENCE_GFP);
|
||||
}
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
|
||||
ret = i915_sw_fence_await_dma_fence(&to->semaphore,
|
||||
&from->fence, 0,
|
||||
I915_FENCE_GFP);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -1237,7 +1221,7 @@ void i915_request_add(struct i915_request *request)
|
|||
* the bulk clients. (FQ_CODEL)
|
||||
*/
|
||||
if (list_empty(&request->sched.signalers_list))
|
||||
attr.priority |= I915_PRIORITY_NEWCLIENT;
|
||||
attr.priority |= I915_PRIORITY_WAIT;
|
||||
|
||||
engine->schedule(request, &attr);
|
||||
}
|
||||
|
|
|
@ -35,109 +35,6 @@ static inline bool node_signaled(const struct i915_sched_node *node)
|
|||
return i915_request_completed(node_to_request(node));
|
||||
}
|
||||
|
||||
void i915_sched_node_init(struct i915_sched_node *node)
|
||||
{
|
||||
INIT_LIST_HEAD(&node->signalers_list);
|
||||
INIT_LIST_HEAD(&node->waiters_list);
|
||||
INIT_LIST_HEAD(&node->link);
|
||||
node->attr.priority = I915_PRIORITY_INVALID;
|
||||
node->semaphores = 0;
|
||||
node->flags = 0;
|
||||
}
|
||||
|
||||
static struct i915_dependency *
|
||||
i915_dependency_alloc(void)
|
||||
{
|
||||
return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_dependency_free(struct i915_dependency *dep)
|
||||
{
|
||||
kmem_cache_free(global.slab_dependencies, dep);
|
||||
}
|
||||
|
||||
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
|
||||
struct i915_sched_node *signal,
|
||||
struct i915_dependency *dep,
|
||||
unsigned long flags)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
spin_lock_irq(&schedule_lock);
|
||||
|
||||
if (!node_signaled(signal)) {
|
||||
INIT_LIST_HEAD(&dep->dfs_link);
|
||||
list_add(&dep->wait_link, &signal->waiters_list);
|
||||
list_add(&dep->signal_link, &node->signalers_list);
|
||||
dep->signaler = signal;
|
||||
dep->flags = flags;
|
||||
|
||||
/* Keep track of whether anyone on this chain has a semaphore */
|
||||
if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
|
||||
!node_started(signal))
|
||||
node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
|
||||
|
||||
ret = true;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&schedule_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int i915_sched_node_add_dependency(struct i915_sched_node *node,
|
||||
struct i915_sched_node *signal)
|
||||
{
|
||||
struct i915_dependency *dep;
|
||||
|
||||
dep = i915_dependency_alloc();
|
||||
if (!dep)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!__i915_sched_node_add_dependency(node, signal, dep,
|
||||
I915_DEPENDENCY_ALLOC))
|
||||
i915_dependency_free(dep);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_sched_node_fini(struct i915_sched_node *node)
|
||||
{
|
||||
struct i915_dependency *dep, *tmp;
|
||||
|
||||
GEM_BUG_ON(!list_empty(&node->link));
|
||||
|
||||
spin_lock_irq(&schedule_lock);
|
||||
|
||||
/*
|
||||
* Everyone we depended upon (the fences we wait to be signaled)
|
||||
* should retire before us and remove themselves from our list.
|
||||
* However, retirement is run independently on each timeline and
|
||||
* so we may be called out-of-order.
|
||||
*/
|
||||
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
|
||||
GEM_BUG_ON(!node_signaled(dep->signaler));
|
||||
GEM_BUG_ON(!list_empty(&dep->dfs_link));
|
||||
|
||||
list_del(&dep->wait_link);
|
||||
if (dep->flags & I915_DEPENDENCY_ALLOC)
|
||||
i915_dependency_free(dep);
|
||||
}
|
||||
|
||||
/* Remove ourselves from everyone who depends upon us */
|
||||
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
|
||||
GEM_BUG_ON(dep->signaler != node);
|
||||
GEM_BUG_ON(!list_empty(&dep->dfs_link));
|
||||
|
||||
list_del(&dep->signal_link);
|
||||
if (dep->flags & I915_DEPENDENCY_ALLOC)
|
||||
i915_dependency_free(dep);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&schedule_lock);
|
||||
}
|
||||
|
||||
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
|
||||
{
|
||||
return rb_entry(rb, struct i915_priolist, node);
|
||||
|
@ -239,6 +136,11 @@ i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
|
|||
return &p->requests[idx];
|
||||
}
|
||||
|
||||
void __i915_priolist_free(struct i915_priolist *p)
|
||||
{
|
||||
kmem_cache_free(global.slab_priorities, p);
|
||||
}
|
||||
|
||||
struct sched_cache {
|
||||
struct list_head *priolist;
|
||||
};
|
||||
|
@ -273,7 +175,7 @@ static bool inflight(const struct i915_request *rq,
|
|||
return active->hw_context == rq->hw_context;
|
||||
}
|
||||
|
||||
static void __i915_schedule(struct i915_request *rq,
|
||||
static void __i915_schedule(struct i915_sched_node *node,
|
||||
const struct i915_sched_attr *attr)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
|
@ -287,13 +189,13 @@ static void __i915_schedule(struct i915_request *rq,
|
|||
lockdep_assert_held(&schedule_lock);
|
||||
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
|
||||
|
||||
if (i915_request_completed(rq))
|
||||
if (node_signaled(node))
|
||||
return;
|
||||
|
||||
if (prio <= READ_ONCE(rq->sched.attr.priority))
|
||||
if (prio <= READ_ONCE(node->attr.priority))
|
||||
return;
|
||||
|
||||
stack.signaler = &rq->sched;
|
||||
stack.signaler = node;
|
||||
list_add(&stack.dfs_link, &dfs);
|
||||
|
||||
/*
|
||||
|
@ -344,9 +246,9 @@ static void __i915_schedule(struct i915_request *rq,
|
|||
* execlists_submit_request()), we can set our own priority and skip
|
||||
* acquiring the engine locks.
|
||||
*/
|
||||
if (rq->sched.attr.priority == I915_PRIORITY_INVALID) {
|
||||
GEM_BUG_ON(!list_empty(&rq->sched.link));
|
||||
rq->sched.attr = *attr;
|
||||
if (node->attr.priority == I915_PRIORITY_INVALID) {
|
||||
GEM_BUG_ON(!list_empty(&node->link));
|
||||
node->attr = *attr;
|
||||
|
||||
if (stack.dfs_link.next == stack.dfs_link.prev)
|
||||
return;
|
||||
|
@ -355,15 +257,14 @@ static void __i915_schedule(struct i915_request *rq,
|
|||
}
|
||||
|
||||
memset(&cache, 0, sizeof(cache));
|
||||
engine = rq->engine;
|
||||
engine = node_to_request(node)->engine;
|
||||
spin_lock(&engine->timeline.lock);
|
||||
|
||||
/* Fifo and depth-first replacement ensure our deps execute before us */
|
||||
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
|
||||
struct i915_sched_node *node = dep->signaler;
|
||||
|
||||
INIT_LIST_HEAD(&dep->dfs_link);
|
||||
|
||||
node = dep->signaler;
|
||||
engine = sched_lock_engine(node, engine, &cache);
|
||||
lockdep_assert_held(&engine->timeline.lock);
|
||||
|
||||
|
@ -413,13 +314,20 @@ static void __i915_schedule(struct i915_request *rq,
|
|||
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
|
||||
{
|
||||
spin_lock_irq(&schedule_lock);
|
||||
__i915_schedule(rq, attr);
|
||||
__i915_schedule(&rq->sched, attr);
|
||||
spin_unlock_irq(&schedule_lock);
|
||||
}
|
||||
|
||||
static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
|
||||
{
|
||||
struct i915_sched_attr attr = node->attr;
|
||||
|
||||
attr.priority |= bump;
|
||||
__i915_schedule(node, &attr);
|
||||
}
|
||||
|
||||
void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
|
||||
{
|
||||
struct i915_sched_attr attr;
|
||||
unsigned long flags;
|
||||
|
||||
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
|
||||
|
@ -428,17 +336,122 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
|
|||
return;
|
||||
|
||||
spin_lock_irqsave(&schedule_lock, flags);
|
||||
|
||||
attr = rq->sched.attr;
|
||||
attr.priority |= bump;
|
||||
__i915_schedule(rq, &attr);
|
||||
|
||||
__bump_priority(&rq->sched, bump);
|
||||
spin_unlock_irqrestore(&schedule_lock, flags);
|
||||
}
|
||||
|
||||
void __i915_priolist_free(struct i915_priolist *p)
|
||||
void i915_sched_node_init(struct i915_sched_node *node)
|
||||
{
|
||||
kmem_cache_free(global.slab_priorities, p);
|
||||
INIT_LIST_HEAD(&node->signalers_list);
|
||||
INIT_LIST_HEAD(&node->waiters_list);
|
||||
INIT_LIST_HEAD(&node->link);
|
||||
node->attr.priority = I915_PRIORITY_INVALID;
|
||||
node->semaphores = 0;
|
||||
node->flags = 0;
|
||||
}
|
||||
|
||||
static struct i915_dependency *
|
||||
i915_dependency_alloc(void)
|
||||
{
|
||||
return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void
|
||||
i915_dependency_free(struct i915_dependency *dep)
|
||||
{
|
||||
kmem_cache_free(global.slab_dependencies, dep);
|
||||
}
|
||||
|
||||
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
|
||||
struct i915_sched_node *signal,
|
||||
struct i915_dependency *dep,
|
||||
unsigned long flags)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
spin_lock_irq(&schedule_lock);
|
||||
|
||||
if (!node_signaled(signal)) {
|
||||
INIT_LIST_HEAD(&dep->dfs_link);
|
||||
list_add(&dep->wait_link, &signal->waiters_list);
|
||||
list_add(&dep->signal_link, &node->signalers_list);
|
||||
dep->signaler = signal;
|
||||
dep->flags = flags;
|
||||
|
||||
/* Keep track of whether anyone on this chain has a semaphore */
|
||||
if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
|
||||
!node_started(signal))
|
||||
node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
|
||||
|
||||
/*
|
||||
* As we do not allow WAIT to preempt inflight requests,
|
||||
* once we have executed a request, along with triggering
|
||||
* any execution callbacks, we must preserve its ordering
|
||||
* within the non-preemptible FIFO.
|
||||
*/
|
||||
BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK);
|
||||
if (flags & I915_DEPENDENCY_EXTERNAL)
|
||||
__bump_priority(signal, __NO_PREEMPTION);
|
||||
|
||||
ret = true;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&schedule_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int i915_sched_node_add_dependency(struct i915_sched_node *node,
|
||||
struct i915_sched_node *signal)
|
||||
{
|
||||
struct i915_dependency *dep;
|
||||
|
||||
dep = i915_dependency_alloc();
|
||||
if (!dep)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!__i915_sched_node_add_dependency(node, signal, dep,
|
||||
I915_DEPENDENCY_EXTERNAL |
|
||||
I915_DEPENDENCY_ALLOC))
|
||||
i915_dependency_free(dep);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void i915_sched_node_fini(struct i915_sched_node *node)
|
||||
{
|
||||
struct i915_dependency *dep, *tmp;
|
||||
|
||||
GEM_BUG_ON(!list_empty(&node->link));
|
||||
|
||||
spin_lock_irq(&schedule_lock);
|
||||
|
||||
/*
|
||||
* Everyone we depended upon (the fences we wait to be signaled)
|
||||
* should retire before us and remove themselves from our list.
|
||||
* However, retirement is run independently on each timeline and
|
||||
* so we may be called out-of-order.
|
||||
*/
|
||||
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
|
||||
GEM_BUG_ON(!node_signaled(dep->signaler));
|
||||
GEM_BUG_ON(!list_empty(&dep->dfs_link));
|
||||
|
||||
list_del(&dep->wait_link);
|
||||
if (dep->flags & I915_DEPENDENCY_ALLOC)
|
||||
i915_dependency_free(dep);
|
||||
}
|
||||
|
||||
/* Remove ourselves from everyone who depends upon us */
|
||||
list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
|
||||
GEM_BUG_ON(dep->signaler != node);
|
||||
GEM_BUG_ON(!list_empty(&dep->dfs_link));
|
||||
|
||||
list_del(&dep->signal_link);
|
||||
if (dep->flags & I915_DEPENDENCY_ALLOC)
|
||||
i915_dependency_free(dep);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&schedule_lock);
|
||||
}
|
||||
|
||||
static void i915_global_scheduler_shrink(void)
|
||||
|
|
|
@ -66,7 +66,8 @@ struct i915_dependency {
|
|||
struct list_head wait_link;
|
||||
struct list_head dfs_link;
|
||||
unsigned long flags;
|
||||
#define I915_DEPENDENCY_ALLOC BIT(0)
|
||||
#define I915_DEPENDENCY_ALLOC BIT(0)
|
||||
#define I915_DEPENDENCY_EXTERNAL BIT(1)
|
||||
};
|
||||
|
||||
#endif /* _I915_SCHEDULER_TYPES_H_ */
|
||||
|
|
|
@ -164,7 +164,7 @@
|
|||
#define WA_TAIL_DWORDS 2
|
||||
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
|
||||
|
||||
#define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT | I915_PRIORITY_NOSEMAPHORE)
|
||||
#define ACTIVE_PRIORITY (I915_PRIORITY_NOSEMAPHORE)
|
||||
|
||||
static int execlists_context_deferred_alloc(struct intel_context *ce,
|
||||
struct intel_engine_cs *engine);
|
||||
|
|
|
@ -99,12 +99,14 @@ static int live_busywait_preempt(void *arg)
|
|||
ctx_hi = kernel_context(i915);
|
||||
if (!ctx_hi)
|
||||
goto err_unlock;
|
||||
ctx_hi->sched.priority = INT_MAX;
|
||||
ctx_hi->sched.priority =
|
||||
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
|
||||
|
||||
ctx_lo = kernel_context(i915);
|
||||
if (!ctx_lo)
|
||||
goto err_ctx_hi;
|
||||
ctx_lo->sched.priority = INT_MIN;
|
||||
ctx_lo->sched.priority =
|
||||
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
|
||||
|
||||
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
||||
if (IS_ERR(obj)) {
|
||||
|
@ -954,12 +956,14 @@ static int live_preempt_hang(void *arg)
|
|||
ctx_hi = kernel_context(i915);
|
||||
if (!ctx_hi)
|
||||
goto err_spin_lo;
|
||||
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
|
||||
ctx_hi->sched.priority =
|
||||
I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
|
||||
|
||||
ctx_lo = kernel_context(i915);
|
||||
if (!ctx_lo)
|
||||
goto err_ctx_hi;
|
||||
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
|
||||
ctx_lo->sched.priority =
|
||||
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
struct i915_request *rq;
|
||||
|
|
|
@ -9,6 +9,7 @@ config DRM_PANFROST
|
|||
select IOMMU_SUPPORT
|
||||
select IOMMU_IO_PGTABLE_LPAE
|
||||
select DRM_GEM_SHMEM_HELPER
|
||||
select PM_DEVFREQ
|
||||
help
|
||||
DRM driver for ARM Mali Midgard (T6xx, T7xx, T8xx) and
|
||||
Bifrost (G3x, G5x, G7x) GPUs.
|
||||
|
|
|
@ -140,8 +140,8 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
|
|||
return 0;
|
||||
|
||||
ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
|
||||
if (ret == -ENODEV) /* Optional, continue without devfreq */
|
||||
return 0;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
panfrost_devfreq_reset(pfdev);
|
||||
|
||||
|
@ -170,9 +170,6 @@ void panfrost_devfreq_resume(struct panfrost_device *pfdev)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (!pfdev->devfreq.devfreq)
|
||||
return;
|
||||
|
||||
panfrost_devfreq_reset(pfdev);
|
||||
for (i = 0; i < NUM_JOB_SLOTS; i++)
|
||||
pfdev->devfreq.slot[i].busy = false;
|
||||
|
@ -182,9 +179,6 @@ void panfrost_devfreq_resume(struct panfrost_device *pfdev)
|
|||
|
||||
void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
|
||||
{
|
||||
if (!pfdev->devfreq.devfreq)
|
||||
return;
|
||||
|
||||
devfreq_suspend_device(pfdev->devfreq.devfreq);
|
||||
}
|
||||
|
||||
|
@ -194,9 +188,6 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i
|
|||
ktime_t now;
|
||||
ktime_t last;
|
||||
|
||||
if (!pfdev->devfreq.devfreq)
|
||||
return;
|
||||
|
||||
now = ktime_get();
|
||||
last = pfdev->devfreq.slot[slot].time_last_update;
|
||||
|
||||
|
|
|
@ -457,8 +457,9 @@ static void sun6i_dsi_setup_inst_loop(struct sun6i_dsi *dsi,
|
|||
u16 delay = 50 - 1;
|
||||
|
||||
if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
|
||||
delay = (mode->htotal - mode->hdisplay) * 150;
|
||||
delay /= (mode->clock / 1000) * 8;
|
||||
u32 hsync_porch = (mode->htotal - mode->hdisplay) * 150;
|
||||
|
||||
delay = (hsync_porch / ((mode->clock / 1000) * 8));
|
||||
delay -= 50;
|
||||
}
|
||||
|
||||
|
|
|
@ -293,7 +293,8 @@ static int sun8i_hdmi_phy_config_h3(struct dw_hdmi *hdmi,
|
|||
SUN8I_HDMI_PHY_ANA_CFG2_REG_BIGSW |
|
||||
SUN8I_HDMI_PHY_ANA_CFG2_REG_SLV(4);
|
||||
ana_cfg3_init |= SUN8I_HDMI_PHY_ANA_CFG3_REG_AMPCK(9) |
|
||||
SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(13);
|
||||
SUN8I_HDMI_PHY_ANA_CFG3_REG_AMP(13) |
|
||||
SUN8I_HDMI_PHY_ANA_CFG3_REG_EMP(3);
|
||||
}
|
||||
|
||||
regmap_update_bits(phy->regs, SUN8I_HDMI_PHY_ANA_CFG1_REG,
|
||||
|
@ -672,22 +673,13 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
|
|||
goto err_put_clk_pll0;
|
||||
}
|
||||
}
|
||||
|
||||
ret = sun8i_phy_clk_create(phy, dev,
|
||||
phy->variant->has_second_pll);
|
||||
if (ret) {
|
||||
dev_err(dev, "Couldn't create the PHY clock\n");
|
||||
goto err_put_clk_pll1;
|
||||
}
|
||||
|
||||
clk_prepare_enable(phy->clk_phy);
|
||||
}
|
||||
|
||||
phy->rst_phy = of_reset_control_get_shared(node, "phy");
|
||||
if (IS_ERR(phy->rst_phy)) {
|
||||
dev_err(dev, "Could not get phy reset control\n");
|
||||
ret = PTR_ERR(phy->rst_phy);
|
||||
goto err_disable_clk_phy;
|
||||
goto err_put_clk_pll1;
|
||||
}
|
||||
|
||||
ret = reset_control_deassert(phy->rst_phy);
|
||||
|
@ -708,18 +700,29 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
|
|||
goto err_disable_clk_bus;
|
||||
}
|
||||
|
||||
if (phy->variant->has_phy_clk) {
|
||||
ret = sun8i_phy_clk_create(phy, dev,
|
||||
phy->variant->has_second_pll);
|
||||
if (ret) {
|
||||
dev_err(dev, "Couldn't create the PHY clock\n");
|
||||
goto err_disable_clk_mod;
|
||||
}
|
||||
|
||||
clk_prepare_enable(phy->clk_phy);
|
||||
}
|
||||
|
||||
hdmi->phy = phy;
|
||||
|
||||
return 0;
|
||||
|
||||
err_disable_clk_mod:
|
||||
clk_disable_unprepare(phy->clk_mod);
|
||||
err_disable_clk_bus:
|
||||
clk_disable_unprepare(phy->clk_bus);
|
||||
err_deassert_rst_phy:
|
||||
reset_control_assert(phy->rst_phy);
|
||||
err_put_rst_phy:
|
||||
reset_control_put(phy->rst_phy);
|
||||
err_disable_clk_phy:
|
||||
clk_disable_unprepare(phy->clk_phy);
|
||||
err_put_clk_pll1:
|
||||
clk_put(phy->clk_pll1);
|
||||
err_put_clk_pll0:
|
||||
|
|
|
@ -174,7 +174,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
|
|||
kref_init(&base->refcount);
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock(&tdev->object_lock);
|
||||
ret = idr_alloc(&tdev->idr, base, 0, 0, GFP_NOWAIT);
|
||||
ret = idr_alloc(&tdev->idr, base, 1, 0, GFP_NOWAIT);
|
||||
spin_unlock(&tdev->object_lock);
|
||||
idr_preload_end();
|
||||
if (ret < 0)
|
||||
|
|
|
@ -1239,7 +1239,13 @@ static int vmw_master_set(struct drm_device *dev,
|
|||
}
|
||||
|
||||
dev_priv->active_master = vmaster;
|
||||
drm_sysfs_hotplug_event(dev);
|
||||
|
||||
/*
|
||||
* Inform a new master that the layout may have changed while
|
||||
* it was gone.
|
||||
*/
|
||||
if (!from_open)
|
||||
drm_sysfs_hotplug_event(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -296,7 +296,7 @@ struct vmw_sg_table {
|
|||
struct vmw_piter {
|
||||
struct page **pages;
|
||||
const dma_addr_t *addrs;
|
||||
struct sg_page_iter iter;
|
||||
struct sg_dma_page_iter iter;
|
||||
unsigned long i;
|
||||
unsigned long num_pages;
|
||||
bool (*next)(struct vmw_piter *);
|
||||
|
|
|
@ -2010,6 +2010,11 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
|
|||
return 0;
|
||||
|
||||
if (cmd->body.shid != SVGA3D_INVALID_ID) {
|
||||
/*
|
||||
* This is the compat shader path - Per device guest-backed
|
||||
* shaders, but user-space thinks it's per context host-
|
||||
* backed shaders.
|
||||
*/
|
||||
res = vmw_shader_lookup(vmw_context_res_man(ctx),
|
||||
cmd->body.shid, cmd->body.type);
|
||||
if (!IS_ERR(res)) {
|
||||
|
@ -2017,6 +2022,14 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
|
|||
VMW_RES_DIRTY_NONE);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = vmw_resource_relocation_add
|
||||
(sw_context, res,
|
||||
vmw_ptr_diff(sw_context->buf_start,
|
||||
&cmd->body.shid),
|
||||
vmw_res_rel_normal);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2193,7 +2206,8 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
|
|||
|
||||
cmd = container_of(header, typeof(*cmd), header);
|
||||
|
||||
if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
|
||||
if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
|
||||
cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
|
||||
VMW_DEBUG_USER("Illegal shader type %u.\n",
|
||||
(unsigned int) cmd->body.type);
|
||||
return -EINVAL;
|
||||
|
@ -2414,6 +2428,10 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
|
|||
return -EINVAL;
|
||||
|
||||
cmd = container_of(header, typeof(*cmd), header);
|
||||
if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
|
||||
VMW_DEBUG_USER("Invalid surface id.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
|
||||
VMW_RES_DIRTY_NONE, user_surface_converter,
|
||||
&cmd->sid, &srf);
|
||||
|
|
|
@ -266,7 +266,9 @@ static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
|
|||
|
||||
static bool __vmw_piter_sg_next(struct vmw_piter *viter)
|
||||
{
|
||||
return __sg_page_iter_next(&viter->iter);
|
||||
bool ret = __vmw_piter_non_sg_next(viter);
|
||||
|
||||
return __sg_page_iter_dma_next(&viter->iter) && ret;
|
||||
}
|
||||
|
||||
|
||||
|
@ -284,12 +286,6 @@ static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
|
|||
return viter->pages[viter->i];
|
||||
}
|
||||
|
||||
static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
|
||||
{
|
||||
return sg_page_iter_page(&viter->iter);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Helper functions to return the DMA address of the current page.
|
||||
*
|
||||
|
@ -311,13 +307,7 @@ static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
|
|||
|
||||
static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
|
||||
{
|
||||
/*
|
||||
* FIXME: This driver wrongly mixes DMA and CPU SG list iteration and
|
||||
* needs revision. See
|
||||
* https://lore.kernel.org/lkml/20190104223531.GA1705@ziepe.ca/
|
||||
*/
|
||||
return sg_page_iter_dma_address(
|
||||
container_of(&viter->iter, struct sg_dma_page_iter, base));
|
||||
return sg_page_iter_dma_address(&viter->iter);
|
||||
}
|
||||
|
||||
|
||||
|
@ -336,26 +326,23 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
|
|||
{
|
||||
viter->i = p_offset - 1;
|
||||
viter->num_pages = vsgt->num_pages;
|
||||
viter->page = &__vmw_piter_non_sg_page;
|
||||
viter->pages = vsgt->pages;
|
||||
switch (vsgt->mode) {
|
||||
case vmw_dma_phys:
|
||||
viter->next = &__vmw_piter_non_sg_next;
|
||||
viter->dma_address = &__vmw_piter_phys_addr;
|
||||
viter->page = &__vmw_piter_non_sg_page;
|
||||
viter->pages = vsgt->pages;
|
||||
break;
|
||||
case vmw_dma_alloc_coherent:
|
||||
viter->next = &__vmw_piter_non_sg_next;
|
||||
viter->dma_address = &__vmw_piter_dma_addr;
|
||||
viter->page = &__vmw_piter_non_sg_page;
|
||||
viter->addrs = vsgt->addrs;
|
||||
viter->pages = vsgt->pages;
|
||||
break;
|
||||
case vmw_dma_map_populate:
|
||||
case vmw_dma_map_bind:
|
||||
viter->next = &__vmw_piter_sg_next;
|
||||
viter->dma_address = &__vmw_piter_sg_addr;
|
||||
viter->page = &__vmw_piter_sg_page;
|
||||
__sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
|
||||
__sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
|
||||
vsgt->sgt->orig_nents, p_offset);
|
||||
break;
|
||||
default:
|
||||
|
|
Loading…
Reference in New Issue