i915, amdgpu, armada, sun4i and tegra fixes
-----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJbS+izAAoJEAx081l5xIa+2rwP/iFP60zmr9OTuDlOvW3VMWDw pqQI6dGQQaeTCm47jg9g5Qe7BZQ/DUopfGZSeWbSkgOKZ+zbxaUU9yZAfiGxJ0Uu KF9mZ7eywMiX1hUEk08YSQEYaWLzrqI8hJ3vR6gR8eU6PlxYSGglCLPowbedj6Yq Knf3JwNjpAnqpNRHuyTmUPxQNdJ3Jax58E7Xt9/IiNYvZ4CI/f1bKTQUmhGgsK4i J06sS80qO9TSq5plcWbgUtl0D1GqQpJGGzYW05ueR8G86Y+7XVWmAujZMDAY8U0i 365MWYO/mjv8YhNPeG1q9ySWj0sAkH6k59L7fiFA5TtEL9Sr/qZteWhpd/rx1VXM lvilJHNlk7G6e8ra+mSk+wn6dyVeekBQU91Y+6B6tIg32ODJx3Z/azxBR8vHUaCk 6ALCb5+t3B9b790D2RMXjKObMwh24v01VQsrTNzEU/FedzEDPQ6I39zM6Py+jAAw dEEWHs4Ne4wCiD8l3nZ494OAYX25oqf8AxY8B/Wjk/V14zXqT7B7EsyZ+ubAdDRM lYWpntDrtQPKWxBg70NOHIYZfmkz31mGdAF+iTXbUXqQlvww1LiqSszPGgawOGDH Vz6BagkUsXBtqyu3klgukRClk9sh1Wx6ncrKk/a6fYOHBqWALdLDg04XPm/DHy70 giqxkAgLKXA1mJIeZpUE =dnYu -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2018-07-16-1' of git://anongit.freedesktop.org/drm/drm Pull drm fixes from Dave Airlie: - two AGP fixes in here - a bunch of mostly amdgpu fixes - sun4i build fix - two armada fixes - some tegra fixes - one i915 core and one i915 gvt fix * tag 'drm-fixes-2018-07-16-1' of git://anongit.freedesktop.org/drm/drm: drm/amdgpu/pp/smu7: use a local variable for toc indexing amd/dc/dce100: On dce100, set clocks to 0 on suspend drm/amd/display: Convert 10kHz clks from PPLib into kHz for Vega drm/amdgpu: Verify root PD is mapped into kernel address space (v4) drm/amd/display: fix invalid function table override drm/amdgpu: Reserve VM root shared fence slot for command submission (v3) Revert "drm/amd/display: Don't return ddc result and read_bytes in same return value" char: amd64-agp: Use 64-bit arithmetic instead of 32-bit char: agp: Change return type to vm_fault_t drm/i915: Fix hotplug irq ack on i965/g4x drm/armada: fix irq handling drm/armada: fix colorkey mode property drm/tegra: Fix comparison operator for buffer size gpu: host1x: Check whether size of unpin isn't 0 gpu: host1x: Skip IOMMU initialization if firewall is enabled drm/sun4i: link in front-end code if needed drm/i915/gvt: update vreg on inhibit context lri command
This commit is contained in:
commit
706bf68b43
|
@ -11,7 +11,7 @@
|
|||
|
||||
#include "agp.h"
|
||||
|
||||
static int alpha_core_agp_vm_fault(struct vm_fault *vmf)
|
||||
static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf)
|
||||
{
|
||||
alpha_agp_info *agp = agp_bridge->dev_private_data;
|
||||
dma_addr_t dma_addr;
|
||||
|
|
|
@ -156,7 +156,7 @@ static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
|
|||
|
||||
/* Address to map to */
|
||||
pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
|
||||
aperturebase = tmp << 25;
|
||||
aperturebase = (u64)tmp << 25;
|
||||
aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
|
||||
|
||||
enable_gart_translation(hammer, gatt_table);
|
||||
|
@ -277,7 +277,7 @@ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
|
|||
pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
|
||||
nb_order = (nb_order >> 1) & 7;
|
||||
pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
|
||||
nb_aper = nb_base << 25;
|
||||
nb_aper = (u64)nb_base << 25;
|
||||
|
||||
/* Northbridge seems to contain crap. Try the AGP bridge. */
|
||||
|
||||
|
|
|
@ -927,6 +927,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
|||
r = amdgpu_bo_vm_update_pte(p);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return amdgpu_cs_sync_rings(p);
|
||||
|
|
|
@ -107,6 +107,9 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
|
|||
return;
|
||||
list_add_tail(&base->bo_list, &bo->va);
|
||||
|
||||
if (bo->tbo.type == ttm_bo_type_kernel)
|
||||
list_move(&base->vm_status, &vm->relocated);
|
||||
|
||||
if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
|
||||
return;
|
||||
|
||||
|
@ -468,7 +471,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
|
|||
pt->parent = amdgpu_bo_ref(parent->base.bo);
|
||||
|
||||
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
|
||||
list_move(&entry->base.vm_status, &vm->relocated);
|
||||
}
|
||||
|
||||
if (level < AMDGPU_VM_PTB) {
|
||||
|
|
|
@ -83,22 +83,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
|||
enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
|
||||
I2C_MOT_TRUE : I2C_MOT_FALSE;
|
||||
enum ddc_result res;
|
||||
uint32_t read_bytes = msg->size;
|
||||
ssize_t read_bytes;
|
||||
|
||||
if (WARN_ON(msg->size > 16))
|
||||
return -E2BIG;
|
||||
|
||||
switch (msg->request & ~DP_AUX_I2C_MOT) {
|
||||
case DP_AUX_NATIVE_READ:
|
||||
res = dal_ddc_service_read_dpcd_data(
|
||||
read_bytes = dal_ddc_service_read_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
false,
|
||||
I2C_MOT_UNDEF,
|
||||
msg->address,
|
||||
msg->buffer,
|
||||
msg->size,
|
||||
&read_bytes);
|
||||
break;
|
||||
msg->size);
|
||||
return read_bytes;
|
||||
case DP_AUX_NATIVE_WRITE:
|
||||
res = dal_ddc_service_write_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
|
@ -109,15 +108,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
|||
msg->size);
|
||||
break;
|
||||
case DP_AUX_I2C_READ:
|
||||
res = dal_ddc_service_read_dpcd_data(
|
||||
read_bytes = dal_ddc_service_read_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
true,
|
||||
mot,
|
||||
msg->address,
|
||||
msg->buffer,
|
||||
msg->size,
|
||||
&read_bytes);
|
||||
break;
|
||||
msg->size);
|
||||
return read_bytes;
|
||||
case DP_AUX_I2C_WRITE:
|
||||
res = dal_ddc_service_write_dpcd_data(
|
||||
TO_DM_AUX(aux)->ddc_service,
|
||||
|
@ -139,9 +137,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
|
|||
r == DDC_RESULT_SUCESSFULL);
|
||||
#endif
|
||||
|
||||
if (res != DDC_RESULT_SUCESSFULL)
|
||||
return -EIO;
|
||||
return read_bytes;
|
||||
return msg->size;
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
|
|
|
@ -255,8 +255,9 @@ static void pp_to_dc_clock_levels_with_latency(
|
|||
DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
|
||||
|
||||
for (i = 0; i < clk_level_info->num_levels; i++) {
|
||||
DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
|
||||
clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
|
||||
DRM_DEBUG("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz);
|
||||
/* translate 10kHz to kHz */
|
||||
clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10;
|
||||
clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -629,14 +629,13 @@ bool dal_ddc_service_query_ddc_data(
|
|||
return ret;
|
||||
}
|
||||
|
||||
enum ddc_result dal_ddc_service_read_dpcd_data(
|
||||
ssize_t dal_ddc_service_read_dpcd_data(
|
||||
struct ddc_service *ddc,
|
||||
bool i2c,
|
||||
enum i2c_mot_mode mot,
|
||||
uint32_t address,
|
||||
uint8_t *data,
|
||||
uint32_t len,
|
||||
uint32_t *read)
|
||||
uint32_t len)
|
||||
{
|
||||
struct aux_payload read_payload = {
|
||||
.i2c_over_aux = i2c,
|
||||
|
@ -653,8 +652,6 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
|
|||
.mot = mot
|
||||
};
|
||||
|
||||
*read = 0;
|
||||
|
||||
if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return DDC_RESULT_FAILED_INVALID_OPERATION;
|
||||
|
@ -664,8 +661,7 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
|
|||
ddc->ctx->i2caux,
|
||||
ddc->ddc_pin,
|
||||
&command)) {
|
||||
*read = command.payloads->length;
|
||||
return DDC_RESULT_SUCESSFULL;
|
||||
return (ssize_t)command.payloads->length;
|
||||
}
|
||||
|
||||
return DDC_RESULT_FAILED_OPERATION;
|
||||
|
|
|
@ -741,6 +741,29 @@ static struct mem_input_funcs dce_mi_funcs = {
|
|||
.mem_input_is_flip_pending = dce_mi_is_flip_pending
|
||||
};
|
||||
|
||||
static struct mem_input_funcs dce112_mi_funcs = {
|
||||
.mem_input_program_display_marks = dce112_mi_program_display_marks,
|
||||
.allocate_mem_input = dce_mi_allocate_dmif,
|
||||
.free_mem_input = dce_mi_free_dmif,
|
||||
.mem_input_program_surface_flip_and_addr =
|
||||
dce_mi_program_surface_flip_and_addr,
|
||||
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
|
||||
.mem_input_program_surface_config =
|
||||
dce_mi_program_surface_config,
|
||||
.mem_input_is_flip_pending = dce_mi_is_flip_pending
|
||||
};
|
||||
|
||||
static struct mem_input_funcs dce120_mi_funcs = {
|
||||
.mem_input_program_display_marks = dce120_mi_program_display_marks,
|
||||
.allocate_mem_input = dce_mi_allocate_dmif,
|
||||
.free_mem_input = dce_mi_free_dmif,
|
||||
.mem_input_program_surface_flip_and_addr =
|
||||
dce_mi_program_surface_flip_and_addr,
|
||||
.mem_input_program_pte_vm = dce_mi_program_pte_vm,
|
||||
.mem_input_program_surface_config =
|
||||
dce_mi_program_surface_config,
|
||||
.mem_input_is_flip_pending = dce_mi_is_flip_pending
|
||||
};
|
||||
|
||||
void dce_mem_input_construct(
|
||||
struct dce_mem_input *dce_mi,
|
||||
|
@ -769,7 +792,7 @@ void dce112_mem_input_construct(
|
|||
const struct dce_mem_input_mask *mi_mask)
|
||||
{
|
||||
dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
|
||||
dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks;
|
||||
dce_mi->base.funcs = &dce112_mi_funcs;
|
||||
}
|
||||
|
||||
void dce120_mem_input_construct(
|
||||
|
@ -781,5 +804,5 @@ void dce120_mem_input_construct(
|
|||
const struct dce_mem_input_mask *mi_mask)
|
||||
{
|
||||
dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
|
||||
dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
|
||||
dce_mi->base.funcs = &dce120_mi_funcs;
|
||||
}
|
||||
|
|
|
@ -678,9 +678,22 @@ bool dce100_validate_bandwidth(
|
|||
struct dc *dc,
|
||||
struct dc_state *context)
|
||||
{
|
||||
int i;
|
||||
bool at_least_one_pipe = false;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (context->res_ctx.pipe_ctx[i].stream)
|
||||
at_least_one_pipe = true;
|
||||
}
|
||||
|
||||
if (at_least_one_pipe) {
|
||||
/* TODO implement when needed but for now hardcode max value*/
|
||||
context->bw.dce.dispclk_khz = 681000;
|
||||
context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
|
||||
} else {
|
||||
context->bw.dce.dispclk_khz = 0;
|
||||
context->bw.dce.yclk_khz = 0;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -102,14 +102,13 @@ bool dal_ddc_service_query_ddc_data(
|
|||
uint8_t *read_buf,
|
||||
uint32_t read_size);
|
||||
|
||||
enum ddc_result dal_ddc_service_read_dpcd_data(
|
||||
ssize_t dal_ddc_service_read_dpcd_data(
|
||||
struct ddc_service *ddc,
|
||||
bool i2c,
|
||||
enum i2c_mot_mode mot,
|
||||
uint32_t address,
|
||||
uint8_t *data,
|
||||
uint32_t len,
|
||||
uint32_t *read);
|
||||
uint32_t len);
|
||||
|
||||
enum ddc_result dal_ddc_service_write_dpcd_data(
|
||||
struct ddc_service *ddc,
|
||||
|
|
|
@ -381,6 +381,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
|
|||
uint32_t fw_to_load;
|
||||
int result = 0;
|
||||
struct SMU_DRAMData_TOC *toc;
|
||||
uint32_t num_entries = 0;
|
||||
|
||||
if (!hwmgr->reload_fw) {
|
||||
pr_info("skip reloading...\n");
|
||||
|
@ -422,41 +423,41 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
|
|||
}
|
||||
|
||||
toc = (struct SMU_DRAMData_TOC *)smu_data->header;
|
||||
toc->num_entries = 0;
|
||||
toc->structure_version = 1;
|
||||
|
||||
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
|
||||
UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
|
||||
UCODE_ID_RLC_G, &toc->entry[num_entries++]),
|
||||
"Failed to Get Firmware Entry.", return -EINVAL);
|
||||
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
|
||||
UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
|
||||
UCODE_ID_CP_CE, &toc->entry[num_entries++]),
|
||||
"Failed to Get Firmware Entry.", return -EINVAL);
|
||||
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
|
||||
UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
|
||||
UCODE_ID_CP_PFP, &toc->entry[num_entries++]),
|
||||
"Failed to Get Firmware Entry.", return -EINVAL);
|
||||
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
|
||||
UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
|
||||
UCODE_ID_CP_ME, &toc->entry[num_entries++]),
|
||||
"Failed to Get Firmware Entry.", return -EINVAL);
|
||||
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
|
||||
UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
|
||||
UCODE_ID_CP_MEC, &toc->entry[num_entries++]),
|
||||
"Failed to Get Firmware Entry.", return -EINVAL);
|
||||
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
|
||||
UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
|
||||
UCODE_ID_CP_MEC_JT1, &toc->entry[num_entries++]),
|
||||
"Failed to Get Firmware Entry.", return -EINVAL);
|
||||
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
|
||||
UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
|
||||
UCODE_ID_CP_MEC_JT2, &toc->entry[num_entries++]),
|
||||
"Failed to Get Firmware Entry.", return -EINVAL);
|
||||
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
|
||||
UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
|
||||
UCODE_ID_SDMA0, &toc->entry[num_entries++]),
|
||||
"Failed to Get Firmware Entry.", return -EINVAL);
|
||||
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
|
||||
UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
|
||||
UCODE_ID_SDMA1, &toc->entry[num_entries++]),
|
||||
"Failed to Get Firmware Entry.", return -EINVAL);
|
||||
if (!hwmgr->not_vf)
|
||||
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
|
||||
UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
|
||||
UCODE_ID_MEC_STORAGE, &toc->entry[num_entries++]),
|
||||
"Failed to Get Firmware Entry.", return -EINVAL);
|
||||
|
||||
toc->num_entries = num_entries;
|
||||
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
|
||||
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
|
||||
|
||||
|
|
|
@ -519,8 +519,9 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
|
|||
u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
|
||||
|
||||
/*
|
||||
* This is rediculous - rather than writing bits to clear, we
|
||||
* have to set the actual status register value. This is racy.
|
||||
* Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR
|
||||
* is set. Writing has some other effect to acknowledge the IRQ -
|
||||
* without this, we only get a single IRQ.
|
||||
*/
|
||||
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
|
||||
|
||||
|
@ -1116,16 +1117,22 @@ armada_drm_crtc_set_property(struct drm_crtc *crtc,
|
|||
static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dcrtc->irq_lock, flags);
|
||||
armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA);
|
||||
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dcrtc->irq_lock, flags);
|
||||
armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA);
|
||||
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_funcs armada_crtc_funcs = {
|
||||
|
@ -1415,6 +1422,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
|
|||
CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
|
||||
writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
|
||||
writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
|
||||
readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
|
||||
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
|
||||
|
||||
ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
|
||||
|
|
|
@ -160,6 +160,7 @@ enum {
|
|||
CFG_ALPHAM_GRA = 0x1 << 16,
|
||||
CFG_ALPHAM_CFG = 0x2 << 16,
|
||||
CFG_ALPHA_MASK = 0xff << 8,
|
||||
#define CFG_ALPHA(x) ((x) << 8)
|
||||
CFG_PIXCMD_MASK = 0xff,
|
||||
};
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ struct armada_ovl_plane_properties {
|
|||
uint16_t contrast;
|
||||
uint16_t saturation;
|
||||
uint32_t colorkey_mode;
|
||||
uint32_t colorkey_enable;
|
||||
};
|
||||
|
||||
struct armada_ovl_plane {
|
||||
|
@ -54,11 +55,13 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
|
|||
writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
|
||||
|
||||
spin_lock_irq(&dcrtc->irq_lock);
|
||||
armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
|
||||
armada_updatel(prop->colorkey_mode,
|
||||
CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
|
||||
dcrtc->base + LCD_SPU_DMA_CTRL1);
|
||||
|
||||
armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
|
||||
if (dcrtc->variant->has_spu_adv_reg)
|
||||
armada_updatel(prop->colorkey_enable,
|
||||
ADV_GRACOLORKEY | ADV_VIDCOLORKEY,
|
||||
dcrtc->base + LCD_SPU_ADV_REG);
|
||||
spin_unlock_irq(&dcrtc->irq_lock);
|
||||
}
|
||||
|
||||
|
@ -321,8 +324,17 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane,
|
|||
dplane->prop.colorkey_vb |= K2B(val);
|
||||
update_attr = true;
|
||||
} else if (property == priv->colorkey_mode_prop) {
|
||||
dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
|
||||
dplane->prop.colorkey_mode |= CFG_CKMODE(val);
|
||||
if (val == CKMODE_DISABLE) {
|
||||
dplane->prop.colorkey_mode =
|
||||
CFG_CKMODE(CKMODE_DISABLE) |
|
||||
CFG_ALPHAM_CFG | CFG_ALPHA(255);
|
||||
dplane->prop.colorkey_enable = 0;
|
||||
} else {
|
||||
dplane->prop.colorkey_mode =
|
||||
CFG_CKMODE(val) |
|
||||
CFG_ALPHAM_GRA | CFG_ALPHA(0);
|
||||
dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
|
||||
}
|
||||
update_attr = true;
|
||||
} else if (property == priv->brightness_prop) {
|
||||
dplane->prop.brightness = val - 256;
|
||||
|
@ -453,7 +465,9 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
|
|||
dplane->prop.colorkey_yr = 0xfefefe00;
|
||||
dplane->prop.colorkey_ug = 0x01010100;
|
||||
dplane->prop.colorkey_vb = 0x01010100;
|
||||
dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
|
||||
dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
|
||||
CFG_ALPHAM_GRA | CFG_ALPHA(0);
|
||||
dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
|
||||
dplane->prop.brightness = 0;
|
||||
dplane->prop.contrast = 0x4000;
|
||||
dplane->prop.saturation = 0x4000;
|
||||
|
|
|
@ -862,6 +862,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
|
|||
{
|
||||
struct intel_vgpu *vgpu = s->vgpu;
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
u32 ctx_sr_ctl;
|
||||
|
||||
if (offset + 4 > gvt->device_info.mmio_size) {
|
||||
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
|
||||
|
@ -894,6 +895,28 @@ static int cmd_reg_handler(struct parser_exec_state *s,
|
|||
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
|
||||
}
|
||||
|
||||
/* TODO
|
||||
* Right now only scan LRI command on KBL and in inhibit context.
|
||||
* It's good enough to support initializing mmio by lri command in
|
||||
* vgpu inhibit context on KBL.
|
||||
*/
|
||||
if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
|
||||
intel_gvt_mmio_is_in_ctx(gvt, offset) &&
|
||||
!strncmp(cmd, "lri", 3)) {
|
||||
intel_gvt_hypervisor_read_gpa(s->vgpu,
|
||||
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
|
||||
/* check inhibit context */
|
||||
if (ctx_sr_ctl & 1) {
|
||||
u32 data = cmd_val(s, index + 1);
|
||||
|
||||
if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
|
||||
intel_vgpu_mask_mmio_write(vgpu,
|
||||
offset, &data, 4);
|
||||
else
|
||||
vgpu_vreg(vgpu, offset) = data;
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Update the global mask if this MMIO is a masked-MMIO */
|
||||
intel_gvt_mmio_set_cmd_accessed(gvt, offset);
|
||||
return 0;
|
||||
|
|
|
@ -268,6 +268,8 @@ struct intel_gvt_mmio {
|
|||
#define F_CMD_ACCESSED (1 << 5)
|
||||
/* This reg could be accessed by unaligned address */
|
||||
#define F_UNALIGN (1 << 6)
|
||||
/* This reg is saved/restored in context */
|
||||
#define F_IN_CTX (1 << 7)
|
||||
|
||||
struct gvt_mmio_block *mmio_block;
|
||||
unsigned int num_mmio_block;
|
||||
|
@ -639,6 +641,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
|
|||
return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
* Returns:
|
||||
* True if a MMIO has a in-context mask, false if it isn't.
|
||||
*
|
||||
*/
|
||||
static inline bool intel_gvt_mmio_is_in_ctx(
|
||||
struct intel_gvt *gvt, unsigned int offset)
|
||||
{
|
||||
return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
|
||||
* @gvt: a GVT device
|
||||
* @offset: register offset
|
||||
*
|
||||
*/
|
||||
static inline void intel_gvt_mmio_set_in_ctx(
|
||||
struct intel_gvt *gvt, unsigned int offset)
|
||||
{
|
||||
gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
|
||||
}
|
||||
|
||||
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
|
||||
int intel_gvt_debugfs_init(struct intel_gvt *gvt);
|
||||
|
|
|
@ -3045,6 +3045,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_mask_mmio_write - write mask register
|
||||
* @vgpu: a vGPU
|
||||
* @offset: access offset
|
||||
* @p_data: write data buffer
|
||||
* @bytes: access data length
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 mask, old_vreg;
|
||||
|
||||
old_vreg = vgpu_vreg(vgpu, offset);
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
mask = vgpu_vreg(vgpu, offset) >> 16;
|
||||
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
|
||||
(vgpu_vreg(vgpu, offset) & mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
|
||||
* force-nopriv register
|
||||
|
|
|
@ -98,4 +98,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
|
|||
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *pdata, unsigned int bytes, bool is_read);
|
||||
|
||||
int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes);
|
||||
#endif
|
||||
|
|
|
@ -581,7 +581,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
|
|||
|
||||
for (mmio = gvt->engine_mmio_list.mmio;
|
||||
i915_mmio_reg_valid(mmio->reg); mmio++) {
|
||||
if (mmio->in_context)
|
||||
if (mmio->in_context) {
|
||||
gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
|
||||
intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1998,10 +1998,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
|
|||
|
||||
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
|
||||
u32 hotplug_status = 0, hotplug_status_mask;
|
||||
int i;
|
||||
|
||||
if (hotplug_status)
|
||||
if (IS_G4X(dev_priv) ||
|
||||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
|
||||
DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
|
||||
else
|
||||
hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
|
||||
|
||||
/*
|
||||
* We absolutely have to clear all the pending interrupt
|
||||
* bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
|
||||
* interrupt bit won't have an edge, and the i965/g4x
|
||||
* edge triggered IIR will not notice that an interrupt
|
||||
* is still pending. We can't use PORT_HOTPLUG_EN to
|
||||
* guarantee the edge as the act of toggling the enable
|
||||
* bits can itself generate a new hotplug interrupt :(
|
||||
*/
|
||||
for (i = 0; i < 10; i++) {
|
||||
u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
|
||||
|
||||
if (tmp == 0)
|
||||
return hotplug_status;
|
||||
|
||||
hotplug_status |= tmp;
|
||||
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
|
||||
}
|
||||
|
||||
WARN_ONCE(1,
|
||||
"PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
|
||||
I915_READ(PORT_HOTPLUG_STAT));
|
||||
|
||||
return hotplug_status;
|
||||
}
|
||||
|
|
|
@ -32,7 +32,10 @@ obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o
|
|||
obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
|
||||
obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
|
||||
|
||||
obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o sun4i-frontend.o
|
||||
obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o
|
||||
ifdef CONFIG_DRM_SUN4I_BACKEND
|
||||
obj-$(CONFIG_DRM_SUN4I) += sun4i-frontend.o
|
||||
endif
|
||||
obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o
|
||||
obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o
|
||||
obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o
|
||||
|
|
|
@ -398,7 +398,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
|
|||
* unaligned offset is malformed and cause commands stream
|
||||
* corruption on the buffer address relocation.
|
||||
*/
|
||||
if (offset & 3 || offset >= obj->gem.size) {
|
||||
if (offset & 3 || offset > obj->gem.size) {
|
||||
err = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
|
|
@ -218,6 +218,9 @@ static int host1x_probe(struct platform_device *pdev)
|
|||
return err;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
|
||||
goto skip_iommu;
|
||||
|
||||
host->group = iommu_group_get(&pdev->dev);
|
||||
if (host->group) {
|
||||
struct iommu_domain_geometry *geometry;
|
||||
|
|
|
@ -569,7 +569,8 @@ void host1x_job_unpin(struct host1x_job *job)
|
|||
for (i = 0; i < job->num_unpins; i++) {
|
||||
struct host1x_job_unpin_data *unpin = &job->unpins[i];
|
||||
|
||||
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
|
||||
if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
|
||||
unpin->size && host->domain) {
|
||||
iommu_unmap(host->domain, job->addr_phys[i],
|
||||
unpin->size);
|
||||
free_iova(&host->iova,
|
||||
|
|
Loading…
Reference in New Issue