mirror of https://gitee.com/openkylin/linux.git
- More TGL enabling work (Michel, Jose, Lucas)
- Fixes on DP MST (Ville) - More GTT and Execlists fixes and improvements (Chris) - Code style clean-up on hdmi and dp side (Jani) - Fix null pointer dereferrence (Xiong) - Fix a couple of missing serialization on selftests (Chris) - More vm locking rework (Chris) -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJdXo8xAAoJEPpiX2QO6xPKiGcH/RKd+kjlORORlcLFn8YLhjui 5GM/+8TArbFND/8Ld+dTNro/YKGH4yB1Q/XESKZFWrVAGc8l6ikIY8lN8q1oV2lL k6na3kO5uO4dZt+k7Y/uHOSxzs437pxeqjTkZsYzpjy9fR3ZvrteL5wEWRFbuh9q JiCoqoPH1VbaaaFOvj/UlKzoEP2bMclzq/oE6B19XcLA51d2vfQ6cfufffw3sj9q ulx/AMvdCcXTVZ8zIhY4SjaQSCRC6TeMBmWxLfHKCn3AZ4XR2riNRHx4Fylcyyzf yN/EEIs/AUW5LaC34tZlu57kkc75QRllkDyozox6a6EH3QU7VkFoOIA3deyBMz4= =bilG -----END PGP SIGNATURE----- Merge tag 'drm-intel-next-2019-08-22' of git://anongit.freedesktop.org/drm/drm-intel into drm-next - More TGL enabling work (Michel, Jose, Lucas) - Fixes on DP MST (Ville) - More GTT and Execlists fixes and improvements (Chris) - Code style clean-up on hdmi and dp side (Jani) - Fix null pointer dereferrence (Xiong) - Fix a couple of missing serialization on selftests (Chris) - More vm locking rework (Chris) drm-intel-next-2019-08-20: - GuC and HuC related fixes and improvements (Daniele, Michal) - Improve debug with more engine information and rework on debugfs files (Chris, Stuart) - Simplify appearture address handling (Chris) - Other fixes and cleanups around engines and execlists (Chris) - Selftests fixes (Matt, Chris) - Gen11 cache flush related fixes and improvements (Mika) - More work around requests, timelines and locks to allow removal of struct_mutex (Chris) - Add missing CML PCI ID (Anusha) - More work on the new i915 buddy allocator (Matt) - More headers, files and directories reorg (Daniele) - Improvements on ggtt’s get pdp (Mika) - Fix GPU reset (Chris) - Fix GPIO pins on gen11 (Matt) - Fix HW readout for crtc_clock in HDMI mode (Imre) - Sanitize display Phy during unitit to workaround messages of HW state change during suspend (Imre) - Be defensive when starting vma activity (Chris) - More Tiger Lake enabling work (Michel, Daniele, Lucas) - Relax pd_used assertion (Chris) drm-intel-next-2019-08-13: - More Tiger Lake enabling work (Lucas, Jose, Tomasz, Michel, Jordan, Anusha, Vandita) - More selftest organization reworks, fixes and improvements (Lucas, Chris) - Simplifications on GEM code like context and cleanup_early (Chris, Daniele) - GuC and HuC related fixes and improvements (Daniele, Michal, Chris) - Some clean up and fixes on headers, Makefile, and generated files (Lucas, Jani) - MOCS setup clean up (Tvrtko) - More Elkhartlake enabling work (Jose, Matt) - Fix engine reset by clearing in flight execlists requests (Chris) - Fix possible memory leak on intel_hdcp_auth_downstream (Wei) - Introduce intel_gt_runtime_suspend/resume (Daniele) - PMU improvements (Tvrtko) - Flush extra hard after writing relocations through the GTT (Chris) - Documentations fixes (Michal, Chris) - Report dma_reserv allocation failure (Chris) - Improvements around shrinker (Chris) - More improvements around engine handling (Chris) - Also more s/dev_priv/i915 (Chris) - Abstract display suspend/resume operations (Rodrigo/Jani) - Drop VM_IO from GTT mappings (Chris) - Fix some NULL vs IS_ERR conditions (Dan) - General improvements on error state (Chris) - Isolate i915_getparam_iocrtl to its own file (Chris) - Perf OA object refactor (Umesh) - Ignore central i915->kernel_context and allocate it directly (Chris) - More fixes and improvements around wakerefs (Chris) - Clean-up and improvements around debugfs (Chris) - Free the imported shmemfs file for phys objects (Chris) - Many other fix and cleanups around engines and execlists (Chris) - Split out uncore_mmio_debug (Daniele) - Memory management fixes for blk and gtt (Matt) - Introduction of buddy allocator to handle huge-pages for GTT (Matt) - Fix ICL and TGL PG3 power domains (Anshuman) - Extract GT IRQ to gt/ (Andi) - Drop last_fence tracking in favor of whole vma->active (Chris) - Make overlay to use i915_active instead of i915_active_request (Chris) - Move misc display IRQ handling to its own function (Jose) - Introduce new _TRANS2() macro in preparation for some coming PSR related work (Jose) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Rodrigo Vivi <rodrigo.vivi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190823051435.GA23885@intel.com
This commit is contained in:
commit
29d9d76a72
|
@ -91,9 +91,6 @@ Frontbuffer Tracking
|
|||
.. kernel-doc:: drivers/gpu/drm/i915/display/intel_frontbuffer.c
|
||||
:internal:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem.c
|
||||
:functions: i915_gem_track_fb
|
||||
|
||||
Display FIFO Underrun Reporting
|
||||
-------------------------------
|
||||
|
||||
|
|
|
@ -41,13 +41,16 @@ subdir-ccflags-y += -I$(srctree)/$(src)
|
|||
# core driver code
|
||||
i915-y += i915_drv.o \
|
||||
i915_irq.o \
|
||||
i915_getparam.o \
|
||||
i915_params.o \
|
||||
i915_pci.o \
|
||||
i915_scatterlist.o \
|
||||
i915_suspend.o \
|
||||
i915_sysfs.o \
|
||||
i915_utils.o \
|
||||
intel_csr.o \
|
||||
intel_device_info.o \
|
||||
intel_pch.o \
|
||||
intel_pm.o \
|
||||
intel_runtime_pm.o \
|
||||
intel_sideband.o \
|
||||
|
@ -59,6 +62,7 @@ i915-y += \
|
|||
i915_memcpy.o \
|
||||
i915_mm.o \
|
||||
i915_sw_fence.o \
|
||||
i915_sw_fence_work.o \
|
||||
i915_syncmap.o \
|
||||
i915_user_extensions.o
|
||||
|
||||
|
@ -72,9 +76,13 @@ gt-y += \
|
|||
gt/intel_breadcrumbs.o \
|
||||
gt/intel_context.o \
|
||||
gt/intel_engine_cs.o \
|
||||
gt/intel_engine_pool.o \
|
||||
gt/intel_engine_pm.o \
|
||||
gt/intel_engine_user.o \
|
||||
gt/intel_gt.o \
|
||||
gt/intel_gt_irq.o \
|
||||
gt/intel_gt_pm.o \
|
||||
gt/intel_gt_pm_irq.o \
|
||||
gt/intel_hangcheck.o \
|
||||
gt/intel_lrc.o \
|
||||
gt/intel_renderstate.o \
|
||||
|
@ -90,8 +98,6 @@ gt-y += \
|
|||
gt/gen7_renderstate.o \
|
||||
gt/gen8_renderstate.o \
|
||||
gt/gen9_renderstate.o
|
||||
gt-$(CONFIG_DRM_I915_SELFTEST) += \
|
||||
gt/mock_engine.o
|
||||
i915-y += $(gt-y)
|
||||
|
||||
# GEM (Graphics Execution Management) code
|
||||
|
@ -123,8 +129,8 @@ gem-y += \
|
|||
i915-y += \
|
||||
$(gem-y) \
|
||||
i915_active.o \
|
||||
i915_buddy.o \
|
||||
i915_cmd_parser.o \
|
||||
i915_gem_batch_pool.o \
|
||||
i915_gem_evict.o \
|
||||
i915_gem_fence_reg.o \
|
||||
i915_gem_gtt.o \
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dvo_dev.h"
|
||||
|
||||
#define CH7017_TV_DISPLAY_MODE 0x00
|
||||
|
|
|
@ -26,7 +26,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
|
||||
**************************************************************************/
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dvo_dev.h"
|
||||
|
||||
#define CH7xxx_REG_VID 0x4a
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dvo_dev.h"
|
||||
|
||||
/*
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_reg.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dvo_dev.h"
|
||||
|
||||
#define NS2501_VID 0x1305
|
||||
|
|
|
@ -26,7 +26,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|||
|
||||
**************************************************************************/
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dvo_dev.h"
|
||||
|
||||
#define SIL164_VID 0x0001
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dvo_dev.h"
|
||||
|
||||
/* register definitions according to the TFP410 data sheet */
|
||||
|
|
|
@ -403,8 +403,8 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
|
|||
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
|
||||
I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
|
||||
|
||||
/* For EHL set latency optimization for PCS_DW1 lanes */
|
||||
if (IS_ELKHARTLAKE(dev_priv)) {
|
||||
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
|
||||
if (IS_ELKHARTLAKE(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) {
|
||||
tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
|
||||
tmp &= ~LATENCY_OPTIM_MASK;
|
||||
tmp |= LATENCY_OPTIM_VAL(0);
|
||||
|
@ -530,18 +530,20 @@ static void gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder)
|
|||
* a value '0' inside TA_PARAM_REGISTERS otherwise
|
||||
* leave all fields at HW default values.
|
||||
*/
|
||||
if (intel_dsi_bitrate(intel_dsi) <= 800000) {
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
|
||||
tmp &= ~TA_SURE_MASK;
|
||||
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
|
||||
I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
|
||||
if (IS_GEN(dev_priv, 11)) {
|
||||
if (intel_dsi_bitrate(intel_dsi) <= 800000) {
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
|
||||
tmp &= ~TA_SURE_MASK;
|
||||
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
|
||||
I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
|
||||
|
||||
/* shadow register inside display core */
|
||||
tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
|
||||
tmp &= ~TA_SURE_MASK;
|
||||
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
|
||||
I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
|
||||
/* shadow register inside display core */
|
||||
tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
|
||||
tmp &= ~TA_SURE_MASK;
|
||||
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
|
||||
I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -605,7 +607,10 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
|
|||
I915_WRITE(ICL_DPCLKA_CFGCR0, val);
|
||||
|
||||
for_each_dsi_phy(phy, intel_dsi->phys) {
|
||||
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
|
||||
else
|
||||
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
|
||||
}
|
||||
I915_WRITE(ICL_DPCLKA_CFGCR0, val);
|
||||
|
||||
|
@ -680,6 +685,11 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
|
|||
break;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
if (is_vid_mode(intel_dsi))
|
||||
tmp |= BLANKING_PACKET_ENABLE;
|
||||
}
|
||||
|
||||
/* program DSI operation mode */
|
||||
if (is_vid_mode(intel_dsi)) {
|
||||
tmp &= ~OP_MODE_MASK;
|
||||
|
@ -862,6 +872,15 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
|
|||
dsi_trans = dsi_port_to_transcoder(port);
|
||||
I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
|
||||
}
|
||||
|
||||
/* program TRANS_VBLANK register, should be same as vtotal programmed */
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
dsi_trans = dsi_port_to_transcoder(port);
|
||||
I915_WRITE(VBLANK(dsi_trans),
|
||||
(vactive - 1) | ((vtotal - 1) << 16));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
|
||||
|
@ -879,10 +898,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
|
|||
I915_WRITE(PIPECONF(dsi_trans), tmp);
|
||||
|
||||
/* wait for transcoder to be enabled */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
PIPECONF(dsi_trans),
|
||||
I965_PIPECONF_ACTIVE,
|
||||
I965_PIPECONF_ACTIVE, 10))
|
||||
if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans),
|
||||
I965_PIPECONF_ACTIVE, 10))
|
||||
DRM_ERROR("DSI transcoder not enabled\n");
|
||||
}
|
||||
}
|
||||
|
@ -940,6 +957,8 @@ static void
|
|||
gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
|
||||
const struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
/* step 4a: power up all lanes of the DDI used by DSI */
|
||||
gen11_dsi_power_up_lanes(encoder);
|
||||
|
||||
|
@ -962,7 +981,8 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder,
|
|||
gen11_dsi_configure_transcoder(encoder, pipe_config);
|
||||
|
||||
/* Step 4l: Gate DDI clocks */
|
||||
gen11_dsi_gate_clocks(encoder);
|
||||
if (IS_GEN(dev_priv, 11))
|
||||
gen11_dsi_gate_clocks(encoder);
|
||||
}
|
||||
|
||||
static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
|
||||
|
@ -1058,9 +1078,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
|
|||
I915_WRITE(PIPECONF(dsi_trans), tmp);
|
||||
|
||||
/* wait for transcoder to be disabled */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
PIPECONF(dsi_trans),
|
||||
I965_PIPECONF_ACTIVE, 0, 50))
|
||||
if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans),
|
||||
I965_PIPECONF_ACTIVE, 50))
|
||||
DRM_ERROR("DSI trancoder not disabled\n");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#include <drm/drm_plane_helper.h>
|
||||
|
||||
#include "intel_atomic.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_sprite.h"
|
||||
|
||||
|
|
|
@ -35,8 +35,9 @@
|
|||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_plane_helper.h>
|
||||
|
||||
#include "i915_trace.h"
|
||||
#include "intel_atomic_plane.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_pm.h"
|
||||
#include "intel_sprite.h"
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_audio.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_lpe_audio.h"
|
||||
|
||||
/**
|
||||
|
|
|
@ -1341,21 +1341,6 @@ static const u8 cnp_ddc_pin_map[] = {
|
|||
};
|
||||
|
||||
static const u8 icp_ddc_pin_map[] = {
|
||||
[ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
|
||||
[ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
|
||||
[ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
|
||||
[ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
|
||||
[ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
|
||||
[ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
|
||||
};
|
||||
|
||||
static const u8 mcc_ddc_pin_map[] = {
|
||||
[MCC_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
|
||||
[MCC_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
|
||||
[MCC_DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
|
||||
};
|
||||
|
||||
static const u8 tgp_ddc_pin_map[] = {
|
||||
[ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
|
||||
[ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
|
||||
[TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT,
|
||||
|
@ -1372,13 +1357,7 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
|
|||
const u8 *ddc_pin_map;
|
||||
int n_entries;
|
||||
|
||||
if (HAS_PCH_TGP(dev_priv)) {
|
||||
ddc_pin_map = tgp_ddc_pin_map;
|
||||
n_entries = ARRAY_SIZE(tgp_ddc_pin_map);
|
||||
} else if (HAS_PCH_MCC(dev_priv)) {
|
||||
ddc_pin_map = mcc_ddc_pin_map;
|
||||
n_entries = ARRAY_SIZE(mcc_ddc_pin_map);
|
||||
} else if (HAS_PCH_ICP(dev_priv)) {
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
|
||||
ddc_pin_map = icp_ddc_pin_map;
|
||||
n_entries = ARRAY_SIZE(icp_ddc_pin_map);
|
||||
} else if (HAS_PCH_CNP(dev_priv)) {
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include <drm/drm_atomic_state_helper.h>
|
||||
|
||||
#include "intel_bw.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_sideband.h"
|
||||
|
||||
/* Parameters for Qclk Geyserville (QGV) */
|
||||
|
@ -322,6 +322,20 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv,
|
|||
return data_rate;
|
||||
}
|
||||
|
||||
static struct intel_bw_state *
|
||||
intel_atomic_get_bw_state(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct drm_private_state *bw_state;
|
||||
|
||||
bw_state = drm_atomic_get_private_obj_state(&state->base,
|
||||
&dev_priv->bw_obj);
|
||||
if (IS_ERR(bw_state))
|
||||
return ERR_CAST(bw_state);
|
||||
|
||||
return to_intel_bw_state(bw_state);
|
||||
}
|
||||
|
||||
int intel_bw_atomic_check(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
|
||||
#include <drm/drm_atomic.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_display.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
|
@ -24,20 +23,6 @@ struct intel_bw_state {
|
|||
|
||||
#define to_intel_bw_state(x) container_of((x), struct intel_bw_state, base)
|
||||
|
||||
static inline struct intel_bw_state *
|
||||
intel_atomic_get_bw_state(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct drm_private_state *bw_state;
|
||||
|
||||
bw_state = drm_atomic_get_private_obj_state(&state->base,
|
||||
&dev_priv->bw_obj);
|
||||
if (IS_ERR(bw_state))
|
||||
return ERR_CAST(bw_state);
|
||||
|
||||
return to_intel_bw_state(bw_state);
|
||||
}
|
||||
|
||||
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
|
||||
int intel_bw_init(struct drm_i915_private *dev_priv);
|
||||
int intel_bw_atomic_check(struct intel_atomic_state *state);
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
*/
|
||||
|
||||
#include "intel_cdclk.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_sideband.h"
|
||||
|
||||
/**
|
||||
|
@ -969,9 +969,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
|
|||
|
||||
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
|
||||
5))
|
||||
if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
|
||||
DRM_ERROR("DPLL0 not locked\n");
|
||||
|
||||
dev_priv->cdclk.hw.vco = vco;
|
||||
|
@ -983,9 +981,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
|
|||
static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
|
||||
1))
|
||||
if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
|
||||
DRM_ERROR("Couldn't disable DPLL0\n");
|
||||
|
||||
dev_priv->cdclk.hw.vco = 0;
|
||||
|
@ -1309,9 +1305,8 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
|
|||
I915_WRITE(BXT_DE_PLL_ENABLE, 0);
|
||||
|
||||
/* Timeout 200us */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
|
||||
1))
|
||||
if (intel_de_wait_for_clear(dev_priv,
|
||||
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
|
||||
DRM_ERROR("timeout waiting for DE PLL unlock\n");
|
||||
|
||||
dev_priv->cdclk.hw.vco = 0;
|
||||
|
@ -1330,11 +1325,8 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
|
|||
I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
|
||||
|
||||
/* Timeout 200us */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
BXT_DE_PLL_ENABLE,
|
||||
BXT_DE_PLL_LOCK,
|
||||
BXT_DE_PLL_LOCK,
|
||||
1))
|
||||
if (intel_de_wait_for_set(dev_priv,
|
||||
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
|
||||
DRM_ERROR("timeout waiting for DE PLL lock\n");
|
||||
|
||||
dev_priv->cdclk.hw.vco = vco;
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
*/
|
||||
|
||||
#include "intel_color.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
|
||||
#define CTM_COEFF_SIGN (1ULL << 63)
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
*/
|
||||
|
||||
#include "intel_combo_phy.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
|
||||
#define for_each_combo_phy(__dev_priv, __phy) \
|
||||
for ((__phy) = PHY_A; (__phy) < I915_MAX_PHYS; (__phy)++) \
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_hdcp.h"
|
||||
|
||||
int intel_connector_init(struct intel_connector *connector)
|
||||
|
@ -118,7 +118,7 @@ int intel_connector_register(struct drm_connector *connector)
|
|||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (i915_inject_probe_failure()) {
|
||||
if (i915_inject_probe_failure(to_i915(connector->dev))) {
|
||||
ret = -EFAULT;
|
||||
goto err_backlight;
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
#include "intel_connector.h"
|
||||
#include "intel_crt.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_fifo_underrun.h"
|
||||
#include "intel_gmbus.h"
|
||||
#include "intel_hotplug.h"
|
||||
|
@ -443,9 +443,9 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
|
|||
|
||||
I915_WRITE(crt->adpa_reg, adpa);
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
if (intel_de_wait_for_clear(dev_priv,
|
||||
crt->adpa_reg,
|
||||
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
|
||||
ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
|
||||
1000))
|
||||
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
|
||||
|
||||
|
@ -497,10 +497,8 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
|
|||
|
||||
I915_WRITE(crt->adpa_reg, adpa);
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
crt->adpa_reg,
|
||||
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
|
||||
1000)) {
|
||||
if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg,
|
||||
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
|
||||
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
|
||||
I915_WRITE(crt->adpa_reg, save_adpa);
|
||||
}
|
||||
|
@ -550,9 +548,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
|
|||
CRT_HOTPLUG_FORCE_DETECT,
|
||||
CRT_HOTPLUG_FORCE_DETECT);
|
||||
/* wait for FORCE_DETECT to go off */
|
||||
if (intel_wait_for_register(&dev_priv->uncore, PORT_HOTPLUG_EN,
|
||||
CRT_HOTPLUG_FORCE_DETECT, 0,
|
||||
1000))
|
||||
if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN,
|
||||
CRT_HOTPLUG_FORCE_DETECT, 1000))
|
||||
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
|
||||
}
|
||||
|
||||
|
|
|
@ -32,10 +32,10 @@
|
|||
#include "intel_combo_phy.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dp_link_training.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_fifo_underrun.h"
|
||||
#include "intel_gmbus.h"
|
||||
|
@ -1467,8 +1467,8 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
|
|||
else if (intel_crtc_has_dp_encoder(pipe_config))
|
||||
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
|
||||
&pipe_config->dp_m_n);
|
||||
else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
|
||||
dotclock = pipe_config->port_clock * 2 / 3;
|
||||
else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
|
||||
dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp;
|
||||
else
|
||||
dotclock = pipe_config->port_clock;
|
||||
|
||||
|
@ -2015,6 +2015,12 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
|
|||
for_each_pipe(dev_priv, p) {
|
||||
enum transcoder cpu_transcoder = (enum transcoder)p;
|
||||
unsigned int port_mask, ddi_select;
|
||||
intel_wakeref_t trans_wakeref;
|
||||
|
||||
trans_wakeref = intel_display_power_get_if_enabled(dev_priv,
|
||||
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
|
||||
if (!trans_wakeref)
|
||||
continue;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12) {
|
||||
port_mask = TGL_TRANS_DDI_PORT_MASK;
|
||||
|
@ -2025,6 +2031,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
|
|||
}
|
||||
|
||||
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
|
||||
trans_wakeref);
|
||||
|
||||
if ((tmp & port_mask) != ddi_select)
|
||||
continue;
|
||||
|
@ -2921,6 +2929,12 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
|
|||
if (!intel_phy_is_combo(dev_priv, phy))
|
||||
I915_WRITE(DDI_CLK_SEL(port),
|
||||
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
|
||||
else if (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C)
|
||||
/*
|
||||
* MG does not exist but the programming is required
|
||||
* to ungate DDIC and DDID
|
||||
*/
|
||||
I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
|
||||
} else if (IS_CANNONLAKE(dev_priv)) {
|
||||
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
|
||||
val = I915_READ(DPCLKA_CFGCR0);
|
||||
|
@ -2961,7 +2975,8 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
|
|||
enum phy phy = intel_port_to_phy(dev_priv, port);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11) {
|
||||
if (!intel_phy_is_combo(dev_priv, phy))
|
||||
if (!intel_phy_is_combo(dev_priv, phy) ||
|
||||
(IS_ELKHARTLAKE(dev_priv) && port >= PORT_C))
|
||||
I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
|
||||
} else if (IS_CANNONLAKE(dev_priv)) {
|
||||
I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
|
||||
|
@ -3124,10 +3139,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
|
|||
val |= DP_TP_CTL_FEC_ENABLE;
|
||||
I915_WRITE(DP_TP_CTL(port), val);
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
|
||||
DP_TP_STATUS_FEC_ENABLE_LIVE,
|
||||
DP_TP_STATUS_FEC_ENABLE_LIVE,
|
||||
1))
|
||||
if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
|
||||
DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
|
||||
DRM_ERROR("Timed out waiting for FEC Enable Status\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -62,9 +62,9 @@
|
|||
#include "intel_atomic.h"
|
||||
#include "intel_atomic_plane.h"
|
||||
#include "intel_bw.h"
|
||||
#include "intel_color.h"
|
||||
#include "intel_cdclk.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_color.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_fbc.h"
|
||||
#include "intel_fbdev.h"
|
||||
#include "intel_fifo_underrun.h"
|
||||
|
@ -1077,9 +1077,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
|
|||
i915_reg_t reg = PIPECONF(cpu_transcoder);
|
||||
|
||||
/* Wait for the Pipe State to go off */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
reg, I965_PIPECONF_ACTIVE, 0,
|
||||
100))
|
||||
if (intel_de_wait_for_clear(dev_priv, reg,
|
||||
I965_PIPECONF_ACTIVE, 100))
|
||||
WARN(1, "pipe_off wait timed out\n");
|
||||
} else {
|
||||
intel_wait_for_pipe_scanline_stopped(crtc);
|
||||
|
@ -1383,11 +1382,7 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
|
|||
POSTING_READ(DPLL(pipe));
|
||||
udelay(150);
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
DPLL(pipe),
|
||||
DPLL_LOCK_VLV,
|
||||
DPLL_LOCK_VLV,
|
||||
1))
|
||||
if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
|
||||
DRM_ERROR("DPLL %d failed to lock\n", pipe);
|
||||
}
|
||||
|
||||
|
@ -1436,9 +1431,7 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
|
|||
I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
|
||||
|
||||
/* Check PLL is locked */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
|
||||
1))
|
||||
if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
|
||||
DRM_ERROR("PLL %d failed to lock\n", pipe);
|
||||
}
|
||||
|
||||
|
@ -1617,9 +1610,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
|
|||
BUG();
|
||||
}
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
dpll_reg, port_mask, expected_mask,
|
||||
1000))
|
||||
if (intel_de_wait_for_register(dev_priv, dpll_reg,
|
||||
port_mask, expected_mask, 1000))
|
||||
WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
|
||||
port_name(dport->base.port),
|
||||
I915_READ(dpll_reg) & port_mask, expected_mask);
|
||||
|
@ -1678,9 +1670,7 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
|
|||
}
|
||||
|
||||
I915_WRITE(reg, val | TRANS_ENABLE);
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
|
||||
100))
|
||||
if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
|
||||
DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
|
||||
}
|
||||
|
||||
|
@ -1708,11 +1698,8 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
|
|||
val |= TRANS_PROGRESSIVE;
|
||||
|
||||
I915_WRITE(LPT_TRANSCONF, val);
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
LPT_TRANSCONF,
|
||||
TRANS_STATE_ENABLE,
|
||||
TRANS_STATE_ENABLE,
|
||||
100))
|
||||
if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
|
||||
TRANS_STATE_ENABLE, 100))
|
||||
DRM_ERROR("Failed to enable PCH transcoder\n");
|
||||
}
|
||||
|
||||
|
@ -1734,9 +1721,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
|
|||
val &= ~TRANS_ENABLE;
|
||||
I915_WRITE(reg, val);
|
||||
/* wait for PCH transcoder off, transcoder state */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
reg, TRANS_STATE_ENABLE, 0,
|
||||
50))
|
||||
if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
|
||||
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
|
||||
|
||||
if (HAS_PCH_CPT(dev_priv)) {
|
||||
|
@ -1756,9 +1741,8 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
|
|||
val &= ~TRANS_ENABLE;
|
||||
I915_WRITE(LPT_TRANSCONF, val);
|
||||
/* wait for PCH transcoder off, transcoder state */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
|
||||
50))
|
||||
if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
|
||||
TRANS_STATE_ENABLE, 50))
|
||||
DRM_ERROR("Failed to disable PCH transcoder\n");
|
||||
|
||||
/* Workaround: clear timing override bit. */
|
||||
|
@ -3049,12 +3033,13 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
|
|||
{
|
||||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj = NULL;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
|
||||
struct drm_framebuffer *fb = &plane_config->fb->base;
|
||||
u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
|
||||
u32 size_aligned = round_up(plane_config->base + plane_config->size,
|
||||
PAGE_SIZE);
|
||||
struct drm_i915_gem_object *obj;
|
||||
bool ret = false;
|
||||
|
||||
size_aligned -= base_aligned;
|
||||
|
||||
|
@ -3096,7 +3081,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
|
|||
break;
|
||||
default:
|
||||
MISSING_CASE(plane_config->tiling);
|
||||
return false;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mode_cmd.pixel_format = fb->format->format;
|
||||
|
@ -3108,16 +3093,15 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
|
|||
|
||||
if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
|
||||
DRM_DEBUG_KMS("intel fb init failed\n");
|
||||
goto out_unref_obj;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
||||
DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
|
||||
return true;
|
||||
|
||||
out_unref_obj:
|
||||
ret = true;
|
||||
out:
|
||||
i915_gem_object_put(obj);
|
||||
return false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -3174,6 +3158,12 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
|
|||
intel_disable_plane(plane, crtc_state);
|
||||
}
|
||||
|
||||
static struct intel_frontbuffer *
|
||||
to_intel_frontbuffer(struct drm_framebuffer *fb)
|
||||
{
|
||||
return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
|
||||
struct intel_initial_plane_config *plane_config)
|
||||
|
@ -3181,7 +3171,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
|
|||
struct drm_device *dev = intel_crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_crtc *c;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_plane *primary = intel_crtc->base.primary;
|
||||
struct drm_plane_state *plane_state = primary->state;
|
||||
struct intel_plane *intel_plane = to_intel_plane(primary);
|
||||
|
@ -3257,8 +3246,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
|
|||
return;
|
||||
}
|
||||
|
||||
obj = intel_fb_obj(fb);
|
||||
intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
|
||||
intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
|
||||
|
||||
plane_state->src_x = 0;
|
||||
plane_state->src_y = 0;
|
||||
|
@ -3273,14 +3261,14 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
|
|||
intel_state->base.src = drm_plane_state_src(plane_state);
|
||||
intel_state->base.dst = drm_plane_state_dest(plane_state);
|
||||
|
||||
if (i915_gem_object_is_tiled(obj))
|
||||
if (plane_config->tiling)
|
||||
dev_priv->preserve_bios_swizzle = true;
|
||||
|
||||
plane_state->fb = fb;
|
||||
plane_state->crtc = &intel_crtc->base;
|
||||
|
||||
atomic_or(to_intel_plane(primary)->frontbuffer_bit,
|
||||
&obj->frontbuffer_bits);
|
||||
&to_intel_frontbuffer(fb)->bits);
|
||||
}
|
||||
|
||||
static int skl_max_plane_width(const struct drm_framebuffer *fb,
|
||||
|
@ -5693,9 +5681,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
|
|||
* and don't wait for vblanks until the end of crtc_enable, then
|
||||
* the HW state readout code will complain that the expected
|
||||
* IPS_CTL value is not the one we read. */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
IPS_CTL, IPS_ENABLE, IPS_ENABLE,
|
||||
50))
|
||||
if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
|
||||
DRM_ERROR("Timed out waiting for IPS enable\n");
|
||||
}
|
||||
}
|
||||
|
@ -5716,9 +5702,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
|
|||
* 42ms timeout value leads to occasional timeouts so use 100ms
|
||||
* instead.
|
||||
*/
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
IPS_CTL, IPS_ENABLE, 0,
|
||||
100))
|
||||
if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
|
||||
DRM_ERROR("Timed out waiting for IPS disable\n");
|
||||
} else {
|
||||
I915_WRITE(IPS_CTL, 0);
|
||||
|
@ -6683,7 +6667,7 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
|
|||
if (phy == PHY_NONE)
|
||||
return false;
|
||||
|
||||
if (IS_ELKHARTLAKE(dev_priv) || INTEL_GEN(dev_priv) >= 12)
|
||||
if (IS_ELKHARTLAKE(dev_priv))
|
||||
return phy <= PHY_C;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
|
@ -10354,10 +10338,9 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
|
|||
tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 12)
|
||||
port = (tmp & TGL_TRANS_DDI_PORT_MASK) >>
|
||||
TGL_TRANS_DDI_PORT_SHIFT;
|
||||
port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
|
||||
else
|
||||
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
|
||||
port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
icelake_get_ddi_pll(dev_priv, port, pipe_config);
|
||||
|
@ -14133,9 +14116,9 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state)
|
|||
|
||||
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
|
||||
new_plane_state, i)
|
||||
i915_gem_track_fb(intel_fb_obj(old_plane_state->base.fb),
|
||||
intel_fb_obj(new_plane_state->base.fb),
|
||||
plane->frontbuffer_bit);
|
||||
intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb),
|
||||
to_intel_frontbuffer(new_plane_state->base.fb),
|
||||
plane->frontbuffer_bit);
|
||||
}
|
||||
|
||||
static int intel_atomic_commit(struct drm_device *dev,
|
||||
|
@ -14419,7 +14402,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
|
|||
return ret;
|
||||
|
||||
fb_obj_bump_render_priority(obj);
|
||||
intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
|
||||
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
|
||||
|
||||
if (!new_state->fence) { /* implicit fencing */
|
||||
struct dma_fence *fence;
|
||||
|
@ -14682,13 +14665,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
|
|||
struct drm_modeset_acquire_ctx *ctx)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
|
||||
int ret;
|
||||
struct drm_plane_state *old_plane_state, *new_plane_state;
|
||||
struct intel_plane *intel_plane = to_intel_plane(plane);
|
||||
struct drm_framebuffer *old_fb;
|
||||
struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->state);
|
||||
struct intel_crtc_state *new_crtc_state;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* When crtc is inactive or there is a modeset pending,
|
||||
|
@ -14756,11 +14738,10 @@ intel_legacy_cursor_update(struct drm_plane *plane,
|
|||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
|
||||
|
||||
old_fb = old_plane_state->fb;
|
||||
i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
|
||||
intel_plane->frontbuffer_bit);
|
||||
intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_FLIP);
|
||||
intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->fb),
|
||||
to_intel_frontbuffer(fb),
|
||||
intel_plane->frontbuffer_bit);
|
||||
|
||||
/* Swap plane state */
|
||||
plane->state = new_plane_state;
|
||||
|
@ -15318,7 +15299,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
|
|||
/* TODO: initialize TC ports as well */
|
||||
intel_ddi_init(dev_priv, PORT_A);
|
||||
intel_ddi_init(dev_priv, PORT_B);
|
||||
intel_ddi_init(dev_priv, PORT_C);
|
||||
icl_dsi_init(dev_priv);
|
||||
} else if (IS_ELKHARTLAKE(dev_priv)) {
|
||||
intel_ddi_init(dev_priv, PORT_A);
|
||||
intel_ddi_init(dev_priv, PORT_B);
|
||||
|
@ -15540,15 +15521,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
|
|||
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
|
||||
{
|
||||
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
|
||||
drm_framebuffer_cleanup(fb);
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
WARN_ON(!obj->framebuffer_references--);
|
||||
i915_gem_object_unlock(obj);
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
intel_frontbuffer_put(intel_fb->frontbuffer);
|
||||
|
||||
kfree(intel_fb);
|
||||
}
|
||||
|
@ -15576,7 +15551,7 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
|
|||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
|
||||
i915_gem_object_flush_if_display(obj);
|
||||
intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
|
||||
intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -15598,8 +15573,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
|
|||
int ret = -EINVAL;
|
||||
int i;
|
||||
|
||||
intel_fb->frontbuffer = intel_frontbuffer_get(obj);
|
||||
if (!intel_fb->frontbuffer)
|
||||
return -ENOMEM;
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
obj->framebuffer_references++;
|
||||
tiling = i915_gem_object_get_tiling(obj);
|
||||
stride = i915_gem_object_get_stride(obj);
|
||||
i915_gem_object_unlock(obj);
|
||||
|
@ -15716,9 +15694,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
|
|||
return 0;
|
||||
|
||||
err:
|
||||
i915_gem_object_lock(obj);
|
||||
obj->framebuffer_references--;
|
||||
i915_gem_object_unlock(obj);
|
||||
intel_frontbuffer_put(intel_fb->frontbuffer);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -15736,8 +15712,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
|
|||
return ERR_PTR(-ENOENT);
|
||||
|
||||
fb = intel_framebuffer_create(obj, &mode_cmd);
|
||||
if (IS_ERR(fb))
|
||||
i915_gem_object_put(obj);
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
return fb;
|
||||
}
|
||||
|
@ -16126,7 +16101,6 @@ static int intel_initial_commit(struct drm_device *dev)
|
|||
int intel_modeset_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
enum pipe pipe;
|
||||
struct intel_crtc *crtc;
|
||||
int ret;
|
||||
|
@ -16206,8 +16180,6 @@ int intel_modeset_init(struct drm_device *dev)
|
|||
dev->mode_config.cursor_height = 256;
|
||||
}
|
||||
|
||||
dev->mode_config.fb_base = ggtt->gmadr.start;
|
||||
|
||||
DRM_DEBUG_KMS("%d display pipe%s available.\n",
|
||||
INTEL_INFO(dev_priv)->num_pipes,
|
||||
INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
|
||||
|
|
|
@ -28,8 +28,30 @@
|
|||
#include <drm/drm_util.h>
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
enum link_m_n_set;
|
||||
struct dpll;
|
||||
struct drm_connector;
|
||||
struct drm_device;
|
||||
struct drm_encoder;
|
||||
struct drm_file;
|
||||
struct drm_framebuffer;
|
||||
struct drm_i915_error_state_buf;
|
||||
struct drm_i915_gem_object;
|
||||
struct drm_i915_private;
|
||||
struct drm_modeset_acquire_ctx;
|
||||
struct drm_plane;
|
||||
struct drm_plane_state;
|
||||
struct i915_ggtt_view;
|
||||
struct intel_crtc;
|
||||
struct intel_crtc_state;
|
||||
struct intel_digital_port;
|
||||
struct intel_dp;
|
||||
struct intel_encoder;
|
||||
struct intel_load_detect_pipe;
|
||||
struct intel_plane;
|
||||
struct intel_plane_state;
|
||||
struct intel_remapped_info;
|
||||
struct intel_rotation_info;
|
||||
|
||||
enum i915_gpio {
|
||||
GPIOA,
|
||||
|
@ -400,4 +422,171 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
|
|||
bool intel_plane_can_remap(const struct intel_plane_state *plane_state);
|
||||
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
|
||||
|
||||
void intel_plane_destroy(struct drm_plane *plane);
|
||||
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
|
||||
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
|
||||
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
|
||||
const char *name, u32 reg, int ref_freq);
|
||||
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
|
||||
const char *name, u32 reg);
|
||||
void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
|
||||
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
|
||||
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
|
||||
unsigned int intel_fb_xy_to_linear(int x, int y,
|
||||
const struct intel_plane_state *state,
|
||||
int plane);
|
||||
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
|
||||
int color_plane, unsigned int height);
|
||||
void intel_add_fb_offsets(int *x, int *y,
|
||||
const struct intel_plane_state *state, int plane);
|
||||
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
|
||||
unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
|
||||
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
|
||||
int intel_display_suspend(struct drm_device *dev);
|
||||
void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
|
||||
void intel_encoder_destroy(struct drm_encoder *encoder);
|
||||
struct drm_display_mode *
|
||||
intel_encoder_current_mode(struct intel_encoder *encoder);
|
||||
bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy);
|
||||
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
|
||||
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
|
||||
enum port port);
|
||||
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
|
||||
|
||||
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
|
||||
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dport,
|
||||
unsigned int expected_mask);
|
||||
int intel_get_load_detect_pipe(struct drm_connector *connector,
|
||||
const struct drm_display_mode *mode,
|
||||
struct intel_load_detect_pipe *old,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
void intel_release_load_detect_pipe(struct drm_connector *connector,
|
||||
struct intel_load_detect_pipe *old,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
struct i915_vma *
|
||||
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
|
||||
const struct i915_ggtt_view *view,
|
||||
bool uses_fence,
|
||||
unsigned long *out_flags);
|
||||
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
|
||||
struct drm_framebuffer *
|
||||
intel_framebuffer_create(struct drm_i915_gem_object *obj,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd);
|
||||
int intel_prepare_plane_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_state);
|
||||
void intel_cleanup_plane_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state);
|
||||
|
||||
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
|
||||
int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
const struct dpll *dpll);
|
||||
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
|
||||
bool intel_fuzzy_clock_check(int clock1, int clock2);
|
||||
|
||||
void intel_prepare_reset(struct drm_i915_private *dev_priv);
|
||||
void intel_finish_reset(struct drm_i915_private *dev_priv);
|
||||
void intel_dp_get_m_n(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
|
||||
enum link_m_n_set m_n);
|
||||
void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
|
||||
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
|
||||
struct dpll *best_clock);
|
||||
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
|
||||
|
||||
bool intel_crtc_active(struct intel_crtc *crtc);
|
||||
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
|
||||
void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
|
||||
void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
|
||||
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
|
||||
enum intel_display_power_domain
|
||||
intel_aux_power_domain(struct intel_digital_port *dig_port);
|
||||
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
|
||||
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
|
||||
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
|
||||
int skl_max_scale(const struct intel_crtc_state *crtc_state,
|
||||
u32 pixel_format);
|
||||
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
|
||||
u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
|
||||
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
|
||||
int plane);
|
||||
int skl_check_plane_surface(struct intel_plane_state *plane_state);
|
||||
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
|
||||
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
|
||||
unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
|
||||
u32 pixel_format, u64 modifier,
|
||||
unsigned int rotation);
|
||||
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
|
||||
|
||||
struct intel_display_error_state *
|
||||
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
|
||||
void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
|
||||
struct intel_display_error_state *error);
|
||||
|
||||
/* modesetting */
|
||||
void intel_modeset_init_hw(struct drm_device *dev);
|
||||
int intel_modeset_init(struct drm_device *dev);
|
||||
void intel_modeset_driver_remove(struct drm_device *dev);
|
||||
int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state);
|
||||
void intel_display_resume(struct drm_device *dev);
|
||||
void i915_redisable_vga(struct drm_i915_private *dev_priv);
|
||||
void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
|
||||
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* modesetting asserts */
|
||||
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
void assert_pll(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state);
|
||||
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
|
||||
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
|
||||
void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
|
||||
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
|
||||
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
|
||||
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state);
|
||||
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
|
||||
#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
|
||||
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
|
||||
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
|
||||
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
|
||||
|
||||
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
|
||||
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
|
||||
* which may not necessarily be a user visible problem. This will either
|
||||
* WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
|
||||
* enable distros and users to tailor their preferred amount of i915 abrt
|
||||
* spam.
|
||||
*/
|
||||
#define I915_STATE_WARN(condition, format...) ({ \
|
||||
int __ret_warn_on = !!(condition); \
|
||||
if (unlikely(__ret_warn_on)) \
|
||||
if (!WARN(i915_modparams.verbose_state_checks, format)) \
|
||||
DRM_ERROR(format); \
|
||||
unlikely(__ret_warn_on); \
|
||||
})
|
||||
|
||||
#define I915_STATE_WARN_ON(x) \
|
||||
I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
|
||||
|
||||
#endif
|
||||
|
|
|
@ -13,8 +13,9 @@
|
|||
#include "intel_cdclk.h"
|
||||
#include "intel_combo_phy.h"
|
||||
#include "intel_csr.h"
|
||||
#include "intel_display_power.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_hotplug.h"
|
||||
#include "intel_sideband.h"
|
||||
#include "intel_tc.h"
|
||||
|
@ -318,11 +319,8 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
|
|||
int pw_idx = power_well->desc->hsw.idx;
|
||||
|
||||
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
regs->driver,
|
||||
HSW_PWR_WELL_CTL_STATE(pw_idx),
|
||||
HSW_PWR_WELL_CTL_STATE(pw_idx),
|
||||
1)) {
|
||||
if (intel_de_wait_for_set(dev_priv, regs->driver,
|
||||
HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
|
||||
DRM_DEBUG_KMS("%s power well enable timeout\n",
|
||||
power_well->desc->name);
|
||||
|
||||
|
@ -379,9 +377,8 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
|
|||
enum skl_power_gate pg)
|
||||
{
|
||||
/* Timeout 5us for PG#0, for other PGs 1us */
|
||||
WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
|
||||
SKL_FUSE_PG_DIST_STATUS(pg),
|
||||
SKL_FUSE_PG_DIST_STATUS(pg), 1));
|
||||
WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
|
||||
SKL_FUSE_PG_DIST_STATUS(pg), 1));
|
||||
}
|
||||
|
||||
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
|
@ -727,7 +724,7 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
|
|||
return mask;
|
||||
}
|
||||
|
||||
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
|
||||
static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
|
@ -787,7 +784,7 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
|
|||
dev_priv->csr.dc_state = val & mask;
|
||||
}
|
||||
|
||||
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
|
||||
static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_can_enable_dc9(dev_priv);
|
||||
|
||||
|
@ -802,7 +799,7 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
|
|||
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
|
||||
}
|
||||
|
||||
void bxt_disable_dc9(struct drm_i915_private *dev_priv)
|
||||
static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_can_disable_dc9(dev_priv);
|
||||
|
||||
|
@ -856,7 +853,7 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
|
|||
assert_csr_loaded(dev_priv);
|
||||
}
|
||||
|
||||
void gen9_enable_dc5(struct drm_i915_private *dev_priv)
|
||||
static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_can_enable_dc5(dev_priv);
|
||||
|
||||
|
@ -880,7 +877,7 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
|
|||
assert_csr_loaded(dev_priv);
|
||||
}
|
||||
|
||||
void skl_enable_dc6(struct drm_i915_private *dev_priv)
|
||||
static void skl_enable_dc6(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
assert_can_enable_dc6(dev_priv);
|
||||
|
||||
|
@ -966,8 +963,7 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
|
|||
"Unexpected DBuf power power state (0x%08x)\n", tmp);
|
||||
}
|
||||
|
||||
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_cdclk_state cdclk_state = {};
|
||||
|
||||
|
@ -991,6 +987,12 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
|
|||
intel_combo_phy_init(dev_priv);
|
||||
}
|
||||
|
||||
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
gen9_disable_dc_states(dev_priv);
|
||||
}
|
||||
|
||||
static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
|
||||
struct i915_power_well *power_well)
|
||||
{
|
||||
|
@ -1379,11 +1381,8 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
|
|||
* The PHY may be busy with some initial calibration and whatnot,
|
||||
* so the power state can take a while to actually change.
|
||||
*/
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
DISPLAY_PHY_STATUS,
|
||||
phy_status_mask,
|
||||
phy_status,
|
||||
10))
|
||||
if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
|
||||
phy_status_mask, phy_status, 10))
|
||||
DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
|
||||
I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
|
||||
phy_status, dev_priv->chv_phy_control);
|
||||
|
@ -1414,11 +1413,8 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
|
|||
vlv_set_power_well(dev_priv, power_well, true);
|
||||
|
||||
/* Poll for phypwrgood signal */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
DISPLAY_PHY_STATUS,
|
||||
PHY_POWERGOOD(phy),
|
||||
PHY_POWERGOOD(phy),
|
||||
1))
|
||||
if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
|
||||
PHY_POWERGOOD(phy), 1))
|
||||
DRM_ERROR("Display PHY %d is not power up\n", phy);
|
||||
|
||||
vlv_dpio_get(dev_priv);
|
||||
|
@ -2482,15 +2478,10 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
|||
BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
|
||||
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_B) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_C) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_D) | \
|
||||
|
@ -2558,12 +2549,14 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
|||
|
||||
#define TGL_PW_5_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_PIPE_D) | \
|
||||
BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
|
||||
BIT_ULL(POWER_DOMAIN_PIPE_D_PANEL_FITTER) | \
|
||||
BIT_ULL(POWER_DOMAIN_INIT))
|
||||
|
||||
#define TGL_PW_4_POWER_DOMAINS ( \
|
||||
TGL_PW_5_POWER_DOMAINS | \
|
||||
BIT_ULL(POWER_DOMAIN_PIPE_C) | \
|
||||
BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
|
||||
BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
|
||||
BIT_ULL(POWER_DOMAIN_INIT))
|
||||
|
||||
|
@ -2571,21 +2564,13 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
|||
TGL_PW_4_POWER_DOMAINS | \
|
||||
BIT_ULL(POWER_DOMAIN_PIPE_B) | \
|
||||
BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
|
||||
BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
|
||||
BIT_ULL(POWER_DOMAIN_TRANSCODER_D) | \
|
||||
BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) | \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TC1) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TC2) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_TC3) | \
|
||||
|
@ -4342,8 +4327,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(LCPLL_CTL, val);
|
||||
POSTING_READ(LCPLL_CTL);
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
|
||||
LCPLL_PLL_LOCK, 0, 1))
|
||||
if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
|
||||
DRM_ERROR("LCPLL still locked\n");
|
||||
|
||||
val = hsw_read_dcomp(dev_priv);
|
||||
|
@ -4398,8 +4382,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
|
|||
val &= ~LCPLL_PLL_DISABLE;
|
||||
I915_WRITE(LCPLL_CTL, val);
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore, LCPLL_CTL,
|
||||
LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5))
|
||||
if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
|
||||
DRM_ERROR("LCPLL not locked yet\n");
|
||||
|
||||
if (val & LCPLL_CD_SOURCE_FCLK) {
|
||||
|
@ -4441,7 +4424,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
|
|||
* For more, read "Display Sequences for Package C8" on the hardware
|
||||
* documentation.
|
||||
*/
|
||||
void hsw_enable_pc8(struct drm_i915_private *dev_priv)
|
||||
static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
|
@ -4457,7 +4440,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
|
|||
hsw_disable_lcpll(dev_priv, true, true);
|
||||
}
|
||||
|
||||
void hsw_disable_pc8(struct drm_i915_private *dev_priv)
|
||||
static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
|
@ -4532,7 +4515,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
|
|||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
struct i915_power_well *well;
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
gen9_disable_dc_states(dev_priv);
|
||||
|
||||
gen9_dbuf_disable(dev_priv);
|
||||
|
||||
|
@ -4557,8 +4540,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
|
|||
usleep_range(10, 30); /* 10 us delay per Bspec */
|
||||
}
|
||||
|
||||
void bxt_display_core_init(struct drm_i915_private *dev_priv,
|
||||
bool resume)
|
||||
static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
struct i915_power_well *well;
|
||||
|
@ -4589,12 +4571,12 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
|
|||
intel_csr_load_program(dev_priv);
|
||||
}
|
||||
|
||||
void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
|
||||
static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
struct i915_power_well *well;
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
gen9_disable_dc_states(dev_priv);
|
||||
|
||||
gen9_dbuf_disable(dev_priv);
|
||||
|
||||
|
@ -4654,7 +4636,7 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
|
|||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
struct i915_power_well *well;
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
gen9_disable_dc_states(dev_priv);
|
||||
|
||||
/* 1. Disable all display engine functions -> aready done */
|
||||
|
||||
|
@ -4680,8 +4662,8 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
|
|||
intel_combo_phy_uninit(dev_priv);
|
||||
}
|
||||
|
||||
void icl_display_core_init(struct drm_i915_private *dev_priv,
|
||||
bool resume)
|
||||
static void icl_display_core_init(struct drm_i915_private *dev_priv,
|
||||
bool resume)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
struct i915_power_well *well;
|
||||
|
@ -4716,12 +4698,12 @@ void icl_display_core_init(struct drm_i915_private *dev_priv,
|
|||
intel_csr_load_program(dev_priv);
|
||||
}
|
||||
|
||||
void icl_display_core_uninit(struct drm_i915_private *dev_priv)
|
||||
static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
struct i915_power_well *well;
|
||||
|
||||
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
|
||||
gen9_disable_dc_states(dev_priv);
|
||||
|
||||
/* 1. Disable all display engine functions -> aready done */
|
||||
|
||||
|
@ -5193,3 +5175,58 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
|
|||
}
|
||||
|
||||
#endif
|
||||
|
||||
void intel_display_power_suspend_late(struct drm_i915_private *i915)
|
||||
{
|
||||
if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915))
|
||||
bxt_enable_dc9(i915);
|
||||
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
|
||||
hsw_enable_pc8(i915);
|
||||
}
|
||||
|
||||
void intel_display_power_resume_early(struct drm_i915_private *i915)
|
||||
{
|
||||
if (INTEL_GEN(i915) >= 11 || IS_GEN9_LP(i915)) {
|
||||
gen9_sanitize_dc_state(i915);
|
||||
bxt_disable_dc9(i915);
|
||||
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
|
||||
hsw_disable_pc8(i915);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_display_power_suspend(struct drm_i915_private *i915)
|
||||
{
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
icl_display_core_uninit(i915);
|
||||
bxt_enable_dc9(i915);
|
||||
} else if (IS_GEN9_LP(i915)) {
|
||||
bxt_display_core_uninit(i915);
|
||||
bxt_enable_dc9(i915);
|
||||
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
|
||||
hsw_enable_pc8(i915);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_display_power_resume(struct drm_i915_private *i915)
|
||||
{
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
bxt_disable_dc9(i915);
|
||||
icl_display_core_init(i915, true);
|
||||
if (i915->csr.dmc_payload) {
|
||||
if (i915->csr.allowed_dc_mask &
|
||||
DC_STATE_EN_UPTO_DC6)
|
||||
skl_enable_dc6(i915);
|
||||
else if (i915->csr.allowed_dc_mask &
|
||||
DC_STATE_EN_UPTO_DC5)
|
||||
gen9_enable_dc5(i915);
|
||||
}
|
||||
} else if (IS_GEN9_LP(i915)) {
|
||||
bxt_disable_dc9(i915);
|
||||
bxt_display_core_init(i915, true);
|
||||
if (i915->csr.dmc_payload &&
|
||||
(i915->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
|
||||
gen9_enable_dc5(i915);
|
||||
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
|
||||
hsw_disable_pc8(i915);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -92,6 +92,27 @@ enum intel_display_power_domain {
|
|||
POWER_DOMAIN_NUM,
|
||||
};
|
||||
|
||||
/*
|
||||
* i915_power_well_id:
|
||||
*
|
||||
* IDs used to look up power wells. Power wells accessed directly bypassing
|
||||
* the power domains framework must be assigned a unique ID. The rest of power
|
||||
* wells must be assigned DISP_PW_ID_NONE.
|
||||
*/
|
||||
enum i915_power_well_id {
|
||||
DISP_PW_ID_NONE,
|
||||
|
||||
VLV_DISP_PW_DISP2D,
|
||||
BXT_DISP_PW_DPIO_CMN_A,
|
||||
VLV_DISP_PW_DPIO_CMN_BC,
|
||||
GLK_DISP_PW_DPIO_CMN_C,
|
||||
CHV_DISP_PW_DPIO_CMN_D,
|
||||
HSW_DISP_PW_GLOBAL,
|
||||
SKL_DISP_PW_MISC_IO,
|
||||
SKL_DISP_PW_1,
|
||||
SKL_DISP_PW_2,
|
||||
};
|
||||
|
||||
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
|
||||
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
|
||||
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
|
||||
|
@ -232,27 +253,20 @@ struct i915_power_domains {
|
|||
for_each_power_well_reverse(__dev_priv, __power_well) \
|
||||
for_each_if((__power_well)->desc->domains & (__domain_mask))
|
||||
|
||||
void skl_enable_dc6(struct drm_i915_private *dev_priv);
|
||||
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
|
||||
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
|
||||
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
|
||||
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
|
||||
|
||||
int intel_power_domains_init(struct drm_i915_private *dev_priv);
|
||||
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
|
||||
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
|
||||
void intel_power_domains_driver_remove(struct drm_i915_private *dev_priv);
|
||||
void icl_display_core_init(struct drm_i915_private *dev_priv, bool resume);
|
||||
void icl_display_core_uninit(struct drm_i915_private *dev_priv);
|
||||
void intel_power_domains_enable(struct drm_i915_private *dev_priv);
|
||||
void intel_power_domains_disable(struct drm_i915_private *dev_priv);
|
||||
void intel_power_domains_suspend(struct drm_i915_private *dev_priv,
|
||||
enum i915_drm_suspend_mode);
|
||||
void intel_power_domains_resume(struct drm_i915_private *dev_priv);
|
||||
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
|
||||
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
|
||||
void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
|
||||
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
|
||||
|
||||
void intel_display_power_suspend_late(struct drm_i915_private *i915);
|
||||
void intel_display_power_resume_early(struct drm_i915_private *i915);
|
||||
void intel_display_power_suspend(struct drm_i915_private *i915);
|
||||
void intel_display_power_resume(struct drm_i915_private *i915);
|
||||
|
||||
const char *
|
||||
intel_display_power_domain_str(struct drm_i915_private *i915,
|
||||
|
|
|
@ -22,8 +22,9 @@
|
|||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*/
|
||||
#ifndef __INTEL_DRV_H__
|
||||
#define __INTEL_DRV_H__
|
||||
|
||||
#ifndef __INTEL_DISPLAY_TYPES_H__
|
||||
#define __INTEL_DISPLAY_TYPES_H__
|
||||
|
||||
#include <linux/async.h>
|
||||
#include <linux/i2c.h>
|
||||
|
@ -67,8 +68,23 @@ enum intel_output_type {
|
|||
INTEL_OUTPUT_DP_MST = 11,
|
||||
};
|
||||
|
||||
enum hdmi_force_audio {
|
||||
HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
|
||||
HDMI_AUDIO_OFF, /* force turn off HDMI audio */
|
||||
HDMI_AUDIO_AUTO, /* trust EDID */
|
||||
HDMI_AUDIO_ON, /* force turn on HDMI audio */
|
||||
};
|
||||
|
||||
/* "Broadcast RGB" property */
|
||||
enum intel_broadcast_rgb {
|
||||
INTEL_BROADCAST_RGB_AUTO,
|
||||
INTEL_BROADCAST_RGB_FULL,
|
||||
INTEL_BROADCAST_RGB_LIMITED,
|
||||
};
|
||||
|
||||
struct intel_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
struct intel_frontbuffer *frontbuffer;
|
||||
struct intel_rotation_info rot_info;
|
||||
|
||||
/* for each plane in the normal GTT view */
|
||||
|
@ -851,7 +867,7 @@ struct intel_crtc_state {
|
|||
|
||||
/*
|
||||
* Frequence the dpll for the port should run at. Differs from the
|
||||
* adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
|
||||
* adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also
|
||||
* already multiplied by pixel_multiplier.
|
||||
*/
|
||||
int port_clock;
|
||||
|
@ -1473,41 +1489,6 @@ intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
|
|||
}
|
||||
|
||||
/* intel_display.c */
|
||||
void intel_plane_destroy(struct drm_plane *plane);
|
||||
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
|
||||
int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
|
||||
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
|
||||
const char *name, u32 reg, int ref_freq);
|
||||
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
|
||||
const char *name, u32 reg);
|
||||
void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
|
||||
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
|
||||
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
|
||||
unsigned int intel_fb_xy_to_linear(int x, int y,
|
||||
const struct intel_plane_state *state,
|
||||
int plane);
|
||||
unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
|
||||
int color_plane, unsigned int height);
|
||||
void intel_add_fb_offsets(int *x, int *y,
|
||||
const struct intel_plane_state *state, int plane);
|
||||
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
|
||||
unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info);
|
||||
bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv);
|
||||
int intel_display_suspend(struct drm_device *dev);
|
||||
void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
|
||||
void intel_encoder_destroy(struct drm_encoder *encoder);
|
||||
struct drm_display_mode *
|
||||
intel_encoder_current_mode(struct intel_encoder *encoder);
|
||||
bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy);
|
||||
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
|
||||
enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
|
||||
enum port port);
|
||||
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
static inline bool
|
||||
intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
|
||||
enum intel_output_type type)
|
||||
|
@ -1536,108 +1517,9 @@ intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe)
|
|||
intel_wait_for_vblank(dev_priv, pipe);
|
||||
}
|
||||
|
||||
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
|
||||
|
||||
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
|
||||
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
|
||||
struct intel_digital_port *dport,
|
||||
unsigned int expected_mask);
|
||||
int intel_get_load_detect_pipe(struct drm_connector *connector,
|
||||
const struct drm_display_mode *mode,
|
||||
struct intel_load_detect_pipe *old,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
void intel_release_load_detect_pipe(struct drm_connector *connector,
|
||||
struct intel_load_detect_pipe *old,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
struct i915_vma *
|
||||
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
|
||||
const struct i915_ggtt_view *view,
|
||||
bool uses_fence,
|
||||
unsigned long *out_flags);
|
||||
void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
|
||||
struct drm_framebuffer *
|
||||
intel_framebuffer_create(struct drm_i915_gem_object *obj,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd);
|
||||
int intel_prepare_plane_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_state);
|
||||
void intel_cleanup_plane_fb(struct drm_plane *plane,
|
||||
struct drm_plane_state *old_state);
|
||||
|
||||
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
|
||||
int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
|
||||
const struct dpll *dpll);
|
||||
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
|
||||
bool intel_fuzzy_clock_check(int clock1, int clock2);
|
||||
|
||||
/* modesetting asserts */
|
||||
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe);
|
||||
void assert_pll(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state);
|
||||
#define assert_pll_enabled(d, p) assert_pll(d, p, true)
|
||||
#define assert_pll_disabled(d, p) assert_pll(d, p, false)
|
||||
void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
|
||||
#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
|
||||
#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
|
||||
void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
|
||||
enum pipe pipe, bool state);
|
||||
#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
|
||||
#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
|
||||
void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
|
||||
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
|
||||
#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
|
||||
void intel_prepare_reset(struct drm_i915_private *dev_priv);
|
||||
void intel_finish_reset(struct drm_i915_private *dev_priv);
|
||||
void intel_dp_get_m_n(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
|
||||
enum link_m_n_set m_n);
|
||||
void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
|
||||
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
|
||||
struct dpll *best_clock);
|
||||
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
|
||||
|
||||
bool intel_crtc_active(struct intel_crtc *crtc);
|
||||
bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state);
|
||||
void hsw_enable_ips(const struct intel_crtc_state *crtc_state);
|
||||
void hsw_disable_ips(const struct intel_crtc_state *crtc_state);
|
||||
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
|
||||
enum intel_display_power_domain
|
||||
intel_aux_power_domain(struct intel_digital_port *dig_port);
|
||||
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
|
||||
u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
|
||||
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
|
||||
int skl_max_scale(const struct intel_crtc_state *crtc_state,
|
||||
u32 pixel_format);
|
||||
|
||||
static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
|
||||
{
|
||||
return i915_ggtt_offset(state->vma);
|
||||
}
|
||||
|
||||
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state);
|
||||
u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_plane_state *plane_state);
|
||||
u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state);
|
||||
u32 skl_plane_stride(const struct intel_plane_state *plane_state,
|
||||
int plane);
|
||||
int skl_check_plane_surface(struct intel_plane_state *plane_state);
|
||||
int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
|
||||
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha);
|
||||
unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
|
||||
u32 pixel_format, u64 modifier,
|
||||
unsigned int rotation);
|
||||
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
|
||||
|
||||
#endif /* __INTEL_DRV_H__ */
|
||||
#endif /* __INTEL_DISPLAY_TYPES_H__ */
|
|
@ -44,15 +44,16 @@
|
|||
|
||||
#include "i915_debugfs.h"
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_atomic.h"
|
||||
#include "intel_audio.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dp_link_training.h"
|
||||
#include "intel_dp_mst.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_fifo_underrun.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_hdmi.h"
|
||||
|
@ -2370,9 +2371,8 @@ static void wait_panel_status(struct intel_dp *intel_dp,
|
|||
I915_READ(pp_stat_reg),
|
||||
I915_READ(pp_ctrl_reg));
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
pp_stat_reg, mask, value,
|
||||
5000))
|
||||
if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
|
||||
mask, value, 5000))
|
||||
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
|
||||
I915_READ(pp_stat_reg),
|
||||
I915_READ(pp_ctrl_reg));
|
||||
|
@ -3959,10 +3959,8 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
|
|||
if (port == PORT_A)
|
||||
return;
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
|
||||
DP_TP_STATUS_IDLE_DONE,
|
||||
DP_TP_STATUS_IDLE_DONE,
|
||||
1))
|
||||
if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
|
||||
DP_TP_STATUS_IDLE_DONE, 1))
|
||||
DRM_ERROR("Timed out waiting for DP idle patterns\n");
|
||||
}
|
||||
|
||||
|
@ -4146,10 +4144,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
|
|||
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
|
||||
drm_dp_is_branch(intel_dp->dpcd));
|
||||
|
||||
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
|
||||
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
|
||||
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
|
||||
|
||||
/*
|
||||
* Read the eDP display control registers.
|
||||
*
|
||||
|
@ -5818,47 +5812,49 @@ struct hdcp2_dp_errata_stream_type {
|
|||
u8 stream_type;
|
||||
} __packed;
|
||||
|
||||
static struct hdcp2_dp_msg_data {
|
||||
struct hdcp2_dp_msg_data {
|
||||
u8 msg_id;
|
||||
u32 offset;
|
||||
bool msg_detectable;
|
||||
u32 timeout;
|
||||
u32 timeout2; /* Added for non_paired situation */
|
||||
} hdcp2_msg_data[] = {
|
||||
{HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
|
||||
{HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
|
||||
false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
|
||||
{HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
|
||||
false, 0, 0},
|
||||
{HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
|
||||
false, 0, 0},
|
||||
{HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
|
||||
true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
|
||||
HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
|
||||
{HDCP_2_2_AKE_SEND_PAIRING_INFO,
|
||||
DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
|
||||
HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
|
||||
{HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
|
||||
{HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
|
||||
false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
|
||||
{HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
|
||||
0, 0},
|
||||
{HDCP_2_2_REP_SEND_RECVID_LIST,
|
||||
DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
|
||||
HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
|
||||
{HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
|
||||
0, 0},
|
||||
{HDCP_2_2_REP_STREAM_MANAGE,
|
||||
DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
|
||||
0, 0},
|
||||
{HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
|
||||
false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
|
||||
};
|
||||
|
||||
static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
|
||||
{ HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0 },
|
||||
{ HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
|
||||
false, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
|
||||
{ HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
|
||||
false, 0, 0 },
|
||||
{ HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
|
||||
false, 0, 0 },
|
||||
{ HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
|
||||
true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
|
||||
HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
|
||||
{ HDCP_2_2_AKE_SEND_PAIRING_INFO,
|
||||
DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
|
||||
HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
|
||||
{ HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0 },
|
||||
{ HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
|
||||
false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0 },
|
||||
{ HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
|
||||
0, 0 },
|
||||
{ HDCP_2_2_REP_SEND_RECVID_LIST,
|
||||
DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
|
||||
HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
|
||||
{ HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
|
||||
0, 0 },
|
||||
{ HDCP_2_2_REP_STREAM_MANAGE,
|
||||
DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
|
||||
0, 0 },
|
||||
{ HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
|
||||
false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
|
||||
/* local define to shovel this through the write_2_2 interface */
|
||||
#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
|
||||
{HDCP_2_2_ERRATA_DP_STREAM_TYPE,
|
||||
DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
|
||||
0, 0},
|
||||
};
|
||||
{ HDCP_2_2_ERRATA_DP_STREAM_TYPE,
|
||||
DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
|
||||
0, 0 },
|
||||
};
|
||||
|
||||
static inline
|
||||
int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
|
||||
|
@ -5912,7 +5908,7 @@ int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
|
|||
|
||||
static ssize_t
|
||||
intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
|
||||
struct hdcp2_dp_msg_data *hdcp2_msg_data)
|
||||
const struct hdcp2_dp_msg_data *hdcp2_msg_data)
|
||||
{
|
||||
struct intel_dp *dp = &intel_dig_port->dp;
|
||||
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
|
||||
|
@ -5951,13 +5947,13 @@ intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
|
||||
static const struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
|
||||
if (hdcp2_msg_data[i].msg_id == msg_id)
|
||||
return &hdcp2_msg_data[i];
|
||||
for (i = 0; i < ARRAY_SIZE(hdcp2_dp_msg_data); i++)
|
||||
if (hdcp2_dp_msg_data[i].msg_id == msg_id)
|
||||
return &hdcp2_dp_msg_data[i];
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -5971,7 +5967,7 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
|
|||
unsigned int offset;
|
||||
u8 *byte = buf;
|
||||
ssize_t ret, bytes_to_write, len;
|
||||
struct hdcp2_dp_msg_data *hdcp2_msg_data;
|
||||
const struct hdcp2_dp_msg_data *hdcp2_msg_data;
|
||||
|
||||
hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
|
||||
if (!hdcp2_msg_data)
|
||||
|
@ -6035,7 +6031,7 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
|
|||
unsigned int offset;
|
||||
u8 *byte = buf;
|
||||
ssize_t ret, bytes_to_recv, len;
|
||||
struct hdcp2_dp_msg_data *hdcp2_msg_data;
|
||||
const struct hdcp2_dp_msg_data *hdcp2_msg_data;
|
||||
|
||||
hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
|
||||
if (!hdcp2_msg_data)
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp_aux_backlight.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
|
||||
{
|
||||
|
|
|
@ -21,9 +21,9 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dp_link_training.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
static void
|
||||
intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
|
||||
|
|
|
@ -32,10 +32,10 @@
|
|||
#include "intel_audio.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dp_mst.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
|
@ -338,11 +338,8 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
|
|||
|
||||
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
DP_TP_STATUS(port),
|
||||
DP_TP_STATUS_ACT_SENT,
|
||||
DP_TP_STATUS_ACT_SENT,
|
||||
1))
|
||||
if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port),
|
||||
DP_TP_STATUS_ACT_SENT, 1))
|
||||
DRM_ERROR("Timed out waiting for ACT sent\n");
|
||||
|
||||
drm_dp_check_act_status(&intel_dp->mst_mgr);
|
||||
|
@ -539,7 +536,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
|
|||
|
||||
intel_attach_force_audio_property(connector);
|
||||
intel_attach_broadcast_rgb_property(connector);
|
||||
drm_connector_attach_max_bpc_property(connector, 6, 12);
|
||||
|
||||
/*
|
||||
* Reuse the prop from the SST connector because we're
|
||||
* not allowed to create new props after device registration.
|
||||
*/
|
||||
connector->max_bpc_property =
|
||||
intel_dp->attached_connector->base.max_bpc_property;
|
||||
if (connector->max_bpc_property)
|
||||
drm_connector_attach_max_bpc_property(connector, 6, 12);
|
||||
|
||||
return connector;
|
||||
|
||||
|
@ -602,7 +607,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
|
|||
intel_encoder->type = INTEL_OUTPUT_DP_MST;
|
||||
intel_encoder->power_domain = intel_dig_port->base.power_domain;
|
||||
intel_encoder->port = intel_dig_port->base.port;
|
||||
intel_encoder->crtc_mask = 0x7;
|
||||
intel_encoder->crtc_mask = BIT(pipe);
|
||||
intel_encoder->cloneable = 0;
|
||||
|
||||
intel_encoder->compute_config = intel_dp_mst_compute_config;
|
||||
|
@ -631,6 +636,12 @@ intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
|
|||
return true;
|
||||
}
|
||||
|
||||
int
|
||||
intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
|
||||
{
|
||||
return intel_dig_port->dp.active_mst_links;
|
||||
}
|
||||
|
||||
int
|
||||
intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
|
||||
{
|
||||
|
|
|
@ -6,15 +6,10 @@
|
|||
#ifndef __INTEL_DP_MST_H__
|
||||
#define __INTEL_DP_MST_H__
|
||||
|
||||
#include "intel_drv.h"
|
||||
struct intel_digital_port;
|
||||
|
||||
int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
|
||||
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
|
||||
static inline int
|
||||
intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
|
||||
{
|
||||
return intel_dig_port->dp.active_mst_links;
|
||||
}
|
||||
|
||||
int intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port);
|
||||
|
||||
#endif /* __INTEL_DP_MST_H__ */
|
||||
|
|
|
@ -23,8 +23,8 @@
|
|||
|
||||
#include "display/intel_dp.h"
|
||||
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_sideband.h"
|
||||
|
||||
/**
|
||||
|
@ -345,10 +345,8 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
|
|||
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
|
||||
enum dpio_phy phy)
|
||||
{
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
BXT_PORT_REF_DW3(phy),
|
||||
GRC_DONE, GRC_DONE,
|
||||
10))
|
||||
if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy),
|
||||
GRC_DONE, 10))
|
||||
DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
|
||||
}
|
||||
|
||||
|
|
|
@ -21,9 +21,9 @@
|
|||
* DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_dpll_mgr.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
/**
|
||||
* DOC: Display PLLs
|
||||
|
@ -1000,11 +1000,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(regs[id].ctl,
|
||||
I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
DPLL_STATUS,
|
||||
DPLL_LOCK(id),
|
||||
DPLL_LOCK(id),
|
||||
5))
|
||||
if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
|
||||
DRM_ERROR("DPLL %d not locked\n", id);
|
||||
}
|
||||
|
||||
|
@ -2016,11 +2012,8 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(CNL_DPLL_ENABLE(id), val);
|
||||
|
||||
/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
CNL_DPLL_ENABLE(id),
|
||||
PLL_POWER_STATE,
|
||||
PLL_POWER_STATE,
|
||||
5))
|
||||
if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
|
||||
PLL_POWER_STATE, 5))
|
||||
DRM_ERROR("PLL %d Power not enabled\n", id);
|
||||
|
||||
/*
|
||||
|
@ -2057,11 +2050,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(CNL_DPLL_ENABLE(id), val);
|
||||
|
||||
/* 7. Wait for PLL lock status in DPLL_ENABLE. */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
CNL_DPLL_ENABLE(id),
|
||||
PLL_LOCK,
|
||||
PLL_LOCK,
|
||||
5))
|
||||
if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
|
||||
DRM_ERROR("PLL %d not locked\n", id);
|
||||
|
||||
/*
|
||||
|
@ -2105,11 +2094,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(CNL_DPLL_ENABLE(id), val);
|
||||
|
||||
/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
CNL_DPLL_ENABLE(id),
|
||||
PLL_LOCK,
|
||||
0,
|
||||
5))
|
||||
if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
|
||||
DRM_ERROR("PLL %d locked\n", id);
|
||||
|
||||
/*
|
||||
|
@ -2127,11 +2112,8 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(CNL_DPLL_ENABLE(id), val);
|
||||
|
||||
/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
CNL_DPLL_ENABLE(id),
|
||||
PLL_POWER_STATE,
|
||||
0,
|
||||
5))
|
||||
if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
|
||||
PLL_POWER_STATE, 5))
|
||||
DRM_ERROR("PLL %d Power not disabled\n", id);
|
||||
}
|
||||
|
||||
|
@ -3252,8 +3234,7 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
|
|||
* The spec says we need to "wait" but it also says it should be
|
||||
* immediate.
|
||||
*/
|
||||
if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
|
||||
PLL_POWER_STATE, PLL_POWER_STATE, 1))
|
||||
if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
|
||||
DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
|
||||
}
|
||||
|
||||
|
@ -3268,8 +3249,7 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(enable_reg, val);
|
||||
|
||||
/* Timeout is actually 600us. */
|
||||
if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
|
||||
PLL_LOCK, PLL_LOCK, 1))
|
||||
if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
|
||||
DRM_ERROR("PLL %d not locked\n", pll->info->id);
|
||||
}
|
||||
|
||||
|
@ -3364,8 +3344,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
|
|||
I915_WRITE(enable_reg, val);
|
||||
|
||||
/* Timeout is actually 1us. */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
enable_reg, PLL_LOCK, 0, 1))
|
||||
if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
|
||||
DRM_ERROR("PLL %d locked\n", pll->info->id);
|
||||
|
||||
/* DVFS post sequence would be here. See the comment above. */
|
||||
|
@ -3378,8 +3357,7 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
|
|||
* The spec says we need to "wait" but it also says it should be
|
||||
* immediate.
|
||||
*/
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
enable_reg, PLL_POWER_STATE, 0, 1))
|
||||
if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
|
||||
DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,8 @@
|
|||
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_mipi_dsi.h>
|
||||
#include "intel_drv.h"
|
||||
|
||||
#include "intel_display_types.h"
|
||||
|
||||
#define INTEL_DSI_VIDEO_MODE 0
|
||||
#define INTEL_DSI_COMMAND_MODE 1
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#include <video/mipi_display.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_dsi_dcs_backlight.h"
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
#include <video/mipi_display.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_sideband.h"
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dvo.h"
|
||||
#include "intel_dvo_dev.h"
|
||||
#include "intel_gmbus.h"
|
||||
|
|
|
@ -41,7 +41,7 @@
|
|||
#include <drm/drm_fourcc.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_fbc.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
|
||||
|
@ -110,9 +110,8 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
|
|||
I915_WRITE(FBC_CONTROL, fbc_ctl);
|
||||
|
||||
/* Wait for compressing bit to clear */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
FBC_STATUS, FBC_STAT_COMPRESSING, 0,
|
||||
10)) {
|
||||
if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
|
||||
FBC_STAT_COMPRESSING, 10)) {
|
||||
DRM_DEBUG_KMS("FBC idle timed out\n");
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -43,17 +43,18 @@
|
|||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_fbdev.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
|
||||
static struct intel_frontbuffer *to_frontbuffer(struct intel_fbdev *ifbdev)
|
||||
{
|
||||
return ifbdev->fb->frontbuffer;
|
||||
}
|
||||
|
||||
static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(&ifbdev->fb->base);
|
||||
unsigned int origin =
|
||||
ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU;
|
||||
|
||||
intel_fb_obj_invalidate(obj, origin);
|
||||
intel_frontbuffer_invalidate(to_frontbuffer(ifbdev), ORIGIN_CPU);
|
||||
}
|
||||
|
||||
static int intel_fbdev_set_par(struct fb_info *info)
|
||||
|
@ -120,7 +121,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
|||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_mode_fb_cmd2 mode_cmd = {};
|
||||
struct drm_i915_gem_object *obj;
|
||||
int size, ret;
|
||||
int size;
|
||||
|
||||
/* we don't do packed 24bpp */
|
||||
if (sizes->surface_bpp == 24)
|
||||
|
@ -147,24 +148,16 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
|
|||
obj = i915_gem_object_create_shmem(dev_priv, size);
|
||||
if (IS_ERR(obj)) {
|
||||
DRM_ERROR("failed to allocate framebuffer\n");
|
||||
ret = PTR_ERR(obj);
|
||||
goto err;
|
||||
return PTR_ERR(obj);
|
||||
}
|
||||
|
||||
fb = intel_framebuffer_create(obj, &mode_cmd);
|
||||
if (IS_ERR(fb)) {
|
||||
ret = PTR_ERR(fb);
|
||||
goto err_obj;
|
||||
}
|
||||
i915_gem_object_put(obj);
|
||||
if (IS_ERR(fb))
|
||||
return PTR_ERR(fb);
|
||||
|
||||
ifbdev->fb = to_intel_framebuffer(fb);
|
||||
|
||||
return 0;
|
||||
|
||||
err_obj:
|
||||
i915_gem_object_put(obj);
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intelfb_create(struct drm_fb_helper *helper,
|
||||
|
@ -180,7 +173,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
const struct i915_ggtt_view view = {
|
||||
.type = I915_GGTT_VIEW_NORMAL,
|
||||
};
|
||||
struct drm_framebuffer *fb;
|
||||
intel_wakeref_t wakeref;
|
||||
struct fb_info *info;
|
||||
struct i915_vma *vma;
|
||||
|
@ -226,8 +218,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
fb = &ifbdev->fb->base;
|
||||
intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_DIRTYFB);
|
||||
intel_frontbuffer_flush(to_frontbuffer(ifbdev), ORIGIN_DIRTYFB);
|
||||
|
||||
info = drm_fb_helper_alloc_fbi(helper);
|
||||
if (IS_ERR(info)) {
|
||||
|
@ -236,17 +227,14 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
goto out_unpin;
|
||||
}
|
||||
|
||||
ifbdev->helper.fb = fb;
|
||||
ifbdev->helper.fb = &ifbdev->fb->base;
|
||||
|
||||
info->fbops = &intelfb_ops;
|
||||
|
||||
/* setup aperture base/size for vesafb takeover */
|
||||
info->apertures->ranges[0].base = dev->mode_config.fb_base;
|
||||
info->apertures->ranges[0].base = ggtt->gmadr.start;
|
||||
info->apertures->ranges[0].size = ggtt->mappable_end;
|
||||
|
||||
info->fix.smem_start = dev->mode_config.fb_base + i915_ggtt_offset(vma);
|
||||
info->fix.smem_len = vma->node.size;
|
||||
|
||||
vaddr = i915_vma_pin_iomap(vma);
|
||||
if (IS_ERR(vaddr)) {
|
||||
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
|
||||
|
@ -256,19 +244,24 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
|||
info->screen_base = vaddr;
|
||||
info->screen_size = vma->node.size;
|
||||
|
||||
/* Our framebuffer is the entirety of fbdev's system memory */
|
||||
info->fix.smem_start = (unsigned long)info->screen_base;
|
||||
info->fix.smem_len = info->screen_size;
|
||||
|
||||
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
|
||||
|
||||
/* If the object is shmemfs backed, it will have given us zeroed pages.
|
||||
* If the object is stolen however, it will be full of whatever
|
||||
* garbage was left in there.
|
||||
*/
|
||||
if (intel_fb_obj(fb)->stolen && !prealloc)
|
||||
if (vma->obj->stolen && !prealloc)
|
||||
memset_io(info->screen_base, 0, info->screen_size);
|
||||
|
||||
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
|
||||
|
||||
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n",
|
||||
fb->width, fb->height, i915_ggtt_offset(vma));
|
||||
ifbdev->fb->base.width, ifbdev->fb->base.height,
|
||||
i915_ggtt_offset(vma));
|
||||
ifbdev->vma = vma;
|
||||
ifbdev->vma_flags = flags;
|
||||
|
||||
|
|
|
@ -26,7 +26,8 @@
|
|||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_fbc.h"
|
||||
#include "intel_fifo_underrun.h"
|
||||
|
||||
|
|
|
@ -30,11 +30,11 @@
|
|||
* Many features require us to track changes to the currently active
|
||||
* frontbuffer, especially rendering targeted at the frontbuffer.
|
||||
*
|
||||
* To be able to do so GEM tracks frontbuffers using a bitmask for all possible
|
||||
* frontbuffer slots through i915_gem_track_fb(). The function in this file are
|
||||
* then called when the contents of the frontbuffer are invalidated, when
|
||||
* frontbuffer rendering has stopped again to flush out all the changes and when
|
||||
* the frontbuffer is exchanged with a flip. Subsystems interested in
|
||||
* To be able to do so we track frontbuffers using a bitmask for all possible
|
||||
* frontbuffer slots through intel_frontbuffer_track(). The functions in this
|
||||
* file are then called when the contents of the frontbuffer are invalidated,
|
||||
* when frontbuffer rendering has stopped again to flush out all the changes
|
||||
* and when the frontbuffer is exchanged with a flip. Subsystems interested in
|
||||
* frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
|
||||
* into the relevant places and filter for the frontbuffer slots that they are
|
||||
* interested int.
|
||||
|
@ -58,33 +58,14 @@
|
|||
#include "display/intel_dp.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_fbc.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
#include "intel_psr.h"
|
||||
|
||||
void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin,
|
||||
unsigned int frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
|
||||
if (origin == ORIGIN_CS) {
|
||||
spin_lock(&dev_priv->fb_tracking.lock);
|
||||
dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
|
||||
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
|
||||
spin_unlock(&dev_priv->fb_tracking.lock);
|
||||
}
|
||||
|
||||
might_sleep();
|
||||
intel_psr_invalidate(dev_priv, frontbuffer_bits, origin);
|
||||
intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
|
||||
intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flush - flush frontbuffer
|
||||
* @dev_priv: i915 device
|
||||
* frontbuffer_flush - flush frontbuffer
|
||||
* @i915: i915 device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
* @origin: which operation caused the flush
|
||||
*
|
||||
|
@ -94,45 +75,27 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
|||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
|
||||
unsigned frontbuffer_bits,
|
||||
enum fb_op_origin origin)
|
||||
static void frontbuffer_flush(struct drm_i915_private *i915,
|
||||
unsigned int frontbuffer_bits,
|
||||
enum fb_op_origin origin)
|
||||
{
|
||||
/* Delay flushing when rings are still busy.*/
|
||||
spin_lock(&dev_priv->fb_tracking.lock);
|
||||
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
|
||||
spin_unlock(&dev_priv->fb_tracking.lock);
|
||||
spin_lock(&i915->fb_tracking.lock);
|
||||
frontbuffer_bits &= ~i915->fb_tracking.busy_bits;
|
||||
spin_unlock(&i915->fb_tracking.lock);
|
||||
|
||||
if (!frontbuffer_bits)
|
||||
return;
|
||||
|
||||
might_sleep();
|
||||
intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
|
||||
intel_psr_flush(dev_priv, frontbuffer_bits, origin);
|
||||
intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
|
||||
}
|
||||
|
||||
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin,
|
||||
unsigned int frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
|
||||
|
||||
if (origin == ORIGIN_CS) {
|
||||
spin_lock(&dev_priv->fb_tracking.lock);
|
||||
/* Filter out new bits since rendering started. */
|
||||
frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
|
||||
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
spin_unlock(&dev_priv->fb_tracking.lock);
|
||||
}
|
||||
|
||||
if (frontbuffer_bits)
|
||||
intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
|
||||
intel_edp_drrs_flush(i915, frontbuffer_bits);
|
||||
intel_psr_flush(i915, frontbuffer_bits, origin);
|
||||
intel_fbc_flush(i915, frontbuffer_bits, origin);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
|
||||
* @dev_priv: i915 device
|
||||
* @i915: i915 device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called after scheduling a flip on @obj. The actual
|
||||
|
@ -142,19 +105,19 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
|||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
|
||||
void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
spin_lock(&dev_priv->fb_tracking.lock);
|
||||
dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
|
||||
spin_lock(&i915->fb_tracking.lock);
|
||||
i915->fb_tracking.flip_bits |= frontbuffer_bits;
|
||||
/* Remove stale busy bits due to the old buffer. */
|
||||
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
spin_unlock(&dev_priv->fb_tracking.lock);
|
||||
i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
spin_unlock(&i915->fb_tracking.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
|
||||
* @dev_priv: i915 device
|
||||
* @i915: i915 device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called after the flip has been latched and will complete
|
||||
|
@ -162,23 +125,22 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
|
|||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
|
||||
void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
spin_lock(&dev_priv->fb_tracking.lock);
|
||||
spin_lock(&i915->fb_tracking.lock);
|
||||
/* Mask any cancelled flips. */
|
||||
frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
|
||||
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
|
||||
spin_unlock(&dev_priv->fb_tracking.lock);
|
||||
frontbuffer_bits &= i915->fb_tracking.flip_bits;
|
||||
i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
|
||||
spin_unlock(&i915->fb_tracking.lock);
|
||||
|
||||
if (frontbuffer_bits)
|
||||
intel_frontbuffer_flush(dev_priv,
|
||||
frontbuffer_bits, ORIGIN_FLIP);
|
||||
frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_flip - synchronous frontbuffer flip
|
||||
* @dev_priv: i915 device
|
||||
* @i915: i915 device
|
||||
* @frontbuffer_bits: frontbuffer plane tracking bits
|
||||
*
|
||||
* This function gets called after scheduling a flip on @obj. This is for
|
||||
|
@ -187,13 +149,160 @@ void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
|
|||
*
|
||||
* Can be called without any locks held.
|
||||
*/
|
||||
void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
|
||||
void intel_frontbuffer_flip(struct drm_i915_private *i915,
|
||||
unsigned frontbuffer_bits)
|
||||
{
|
||||
spin_lock(&dev_priv->fb_tracking.lock);
|
||||
spin_lock(&i915->fb_tracking.lock);
|
||||
/* Remove stale busy bits due to the old buffer. */
|
||||
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
spin_unlock(&dev_priv->fb_tracking.lock);
|
||||
i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
spin_unlock(&i915->fb_tracking.lock);
|
||||
|
||||
intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
|
||||
frontbuffer_flush(i915, frontbuffer_bits, ORIGIN_FLIP);
|
||||
}
|
||||
|
||||
void __intel_fb_invalidate(struct intel_frontbuffer *front,
|
||||
enum fb_op_origin origin,
|
||||
unsigned int frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
|
||||
|
||||
if (origin == ORIGIN_CS) {
|
||||
spin_lock(&i915->fb_tracking.lock);
|
||||
i915->fb_tracking.busy_bits |= frontbuffer_bits;
|
||||
i915->fb_tracking.flip_bits &= ~frontbuffer_bits;
|
||||
spin_unlock(&i915->fb_tracking.lock);
|
||||
}
|
||||
|
||||
might_sleep();
|
||||
intel_psr_invalidate(i915, frontbuffer_bits, origin);
|
||||
intel_edp_drrs_invalidate(i915, frontbuffer_bits);
|
||||
intel_fbc_invalidate(i915, frontbuffer_bits, origin);
|
||||
}
|
||||
|
||||
void __intel_fb_flush(struct intel_frontbuffer *front,
|
||||
enum fb_op_origin origin,
|
||||
unsigned int frontbuffer_bits)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(front->obj->base.dev);
|
||||
|
||||
if (origin == ORIGIN_CS) {
|
||||
spin_lock(&i915->fb_tracking.lock);
|
||||
/* Filter out new bits since rendering started. */
|
||||
frontbuffer_bits &= i915->fb_tracking.busy_bits;
|
||||
i915->fb_tracking.busy_bits &= ~frontbuffer_bits;
|
||||
spin_unlock(&i915->fb_tracking.lock);
|
||||
}
|
||||
|
||||
if (frontbuffer_bits)
|
||||
frontbuffer_flush(i915, frontbuffer_bits, origin);
|
||||
}
|
||||
|
||||
static int frontbuffer_active(struct i915_active *ref)
|
||||
{
|
||||
struct intel_frontbuffer *front =
|
||||
container_of(ref, typeof(*front), write);
|
||||
|
||||
kref_get(&front->ref);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void frontbuffer_retire(struct i915_active *ref)
|
||||
{
|
||||
struct intel_frontbuffer *front =
|
||||
container_of(ref, typeof(*front), write);
|
||||
|
||||
intel_frontbuffer_flush(front, ORIGIN_CS);
|
||||
intel_frontbuffer_put(front);
|
||||
}
|
||||
|
||||
static void frontbuffer_release(struct kref *ref)
|
||||
__releases(&to_i915(front->obj->base.dev)->fb_tracking.lock)
|
||||
{
|
||||
struct intel_frontbuffer *front =
|
||||
container_of(ref, typeof(*front), ref);
|
||||
|
||||
front->obj->frontbuffer = NULL;
|
||||
spin_unlock(&to_i915(front->obj->base.dev)->fb_tracking.lock);
|
||||
|
||||
i915_gem_object_put(front->obj);
|
||||
kfree(front);
|
||||
}
|
||||
|
||||
struct intel_frontbuffer *
|
||||
intel_frontbuffer_get(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
struct intel_frontbuffer *front;
|
||||
|
||||
spin_lock(&i915->fb_tracking.lock);
|
||||
front = obj->frontbuffer;
|
||||
if (front)
|
||||
kref_get(&front->ref);
|
||||
spin_unlock(&i915->fb_tracking.lock);
|
||||
if (front)
|
||||
return front;
|
||||
|
||||
front = kmalloc(sizeof(*front), GFP_KERNEL);
|
||||
if (!front)
|
||||
return NULL;
|
||||
|
||||
front->obj = obj;
|
||||
kref_init(&front->ref);
|
||||
atomic_set(&front->bits, 0);
|
||||
i915_active_init(i915, &front->write,
|
||||
frontbuffer_active, frontbuffer_retire);
|
||||
|
||||
spin_lock(&i915->fb_tracking.lock);
|
||||
if (obj->frontbuffer) {
|
||||
kfree(front);
|
||||
front = obj->frontbuffer;
|
||||
kref_get(&front->ref);
|
||||
} else {
|
||||
i915_gem_object_get(obj);
|
||||
obj->frontbuffer = front;
|
||||
}
|
||||
spin_unlock(&i915->fb_tracking.lock);
|
||||
|
||||
return front;
|
||||
}
|
||||
|
||||
void intel_frontbuffer_put(struct intel_frontbuffer *front)
|
||||
{
|
||||
kref_put_lock(&front->ref,
|
||||
frontbuffer_release,
|
||||
&to_i915(front->obj->base.dev)->fb_tracking.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_frontbuffer_track - update frontbuffer tracking
|
||||
* @old: current buffer for the frontbuffer slots
|
||||
* @new: new buffer for the frontbuffer slots
|
||||
* @frontbuffer_bits: bitmask of frontbuffer slots
|
||||
*
|
||||
* This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
|
||||
* from @old and setting them in @new. Both @old and @new can be NULL.
|
||||
*/
|
||||
void intel_frontbuffer_track(struct intel_frontbuffer *old,
|
||||
struct intel_frontbuffer *new,
|
||||
unsigned int frontbuffer_bits)
|
||||
{
|
||||
/*
|
||||
* Control of individual bits within the mask are guarded by
|
||||
* the owning plane->mutex, i.e. we can never see concurrent
|
||||
* manipulation of individual bits. But since the bitfield as a whole
|
||||
* is updated using RMW, we need to use atomics in order to update
|
||||
* the bits.
|
||||
*/
|
||||
BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
|
||||
BITS_PER_TYPE(atomic_t));
|
||||
|
||||
if (old) {
|
||||
WARN_ON(!(atomic_read(&old->bits) & frontbuffer_bits));
|
||||
atomic_andnot(frontbuffer_bits, &old->bits);
|
||||
}
|
||||
|
||||
if (new) {
|
||||
WARN_ON(atomic_read(&new->bits) & frontbuffer_bits);
|
||||
atomic_or(frontbuffer_bits, &new->bits);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,10 @@
|
|||
#ifndef __INTEL_FRONTBUFFER_H__
|
||||
#define __INTEL_FRONTBUFFER_H__
|
||||
|
||||
#include "gem/i915_gem_object.h"
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/kref.h>
|
||||
|
||||
#include "i915_active.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
struct drm_i915_gem_object;
|
||||
|
@ -37,23 +40,30 @@ enum fb_op_origin {
|
|||
ORIGIN_DIRTYFB,
|
||||
};
|
||||
|
||||
void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
|
||||
struct intel_frontbuffer {
|
||||
struct kref ref;
|
||||
atomic_t bits;
|
||||
struct i915_active write;
|
||||
struct drm_i915_gem_object *obj;
|
||||
};
|
||||
|
||||
void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
|
||||
void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
|
||||
unsigned frontbuffer_bits);
|
||||
void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
|
||||
void intel_frontbuffer_flip(struct drm_i915_private *i915,
|
||||
unsigned frontbuffer_bits);
|
||||
|
||||
void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin,
|
||||
unsigned int frontbuffer_bits);
|
||||
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin,
|
||||
unsigned int frontbuffer_bits);
|
||||
struct intel_frontbuffer *
|
||||
intel_frontbuffer_get(struct drm_i915_gem_object *obj);
|
||||
|
||||
void __intel_fb_invalidate(struct intel_frontbuffer *front,
|
||||
enum fb_op_origin origin,
|
||||
unsigned int frontbuffer_bits);
|
||||
|
||||
/**
|
||||
* intel_fb_obj_invalidate - invalidate frontbuffer object
|
||||
* @obj: GEM object to invalidate
|
||||
* intel_frontbuffer_invalidate - invalidate frontbuffer object
|
||||
* @front: GEM object to invalidate
|
||||
* @origin: which operation caused the invalidation
|
||||
*
|
||||
* This function gets called every time rendering on the given object starts and
|
||||
|
@ -62,37 +72,53 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
|||
* until the rendering completes or a flip on this frontbuffer plane is
|
||||
* scheduled.
|
||||
*/
|
||||
static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin)
|
||||
static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
|
||||
enum fb_op_origin origin)
|
||||
{
|
||||
unsigned int frontbuffer_bits;
|
||||
|
||||
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
|
||||
if (!front)
|
||||
return false;
|
||||
|
||||
frontbuffer_bits = atomic_read(&front->bits);
|
||||
if (!frontbuffer_bits)
|
||||
return false;
|
||||
|
||||
__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
|
||||
__intel_fb_invalidate(front, origin, frontbuffer_bits);
|
||||
return true;
|
||||
}
|
||||
|
||||
void __intel_fb_flush(struct intel_frontbuffer *front,
|
||||
enum fb_op_origin origin,
|
||||
unsigned int frontbuffer_bits);
|
||||
|
||||
/**
|
||||
* intel_fb_obj_flush - flush frontbuffer object
|
||||
* @obj: GEM object to flush
|
||||
* intel_frontbuffer_flush - flush frontbuffer object
|
||||
* @front: GEM object to flush
|
||||
* @origin: which operation caused the flush
|
||||
*
|
||||
* This function gets called every time rendering on the given object has
|
||||
* completed and frontbuffer caching can be started again.
|
||||
*/
|
||||
static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
|
||||
enum fb_op_origin origin)
|
||||
static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
|
||||
enum fb_op_origin origin)
|
||||
{
|
||||
unsigned int frontbuffer_bits;
|
||||
|
||||
frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
|
||||
if (!front)
|
||||
return;
|
||||
|
||||
frontbuffer_bits = atomic_read(&front->bits);
|
||||
if (!frontbuffer_bits)
|
||||
return;
|
||||
|
||||
__intel_fb_obj_flush(obj, origin, frontbuffer_bits);
|
||||
__intel_fb_flush(front, origin, frontbuffer_bits);
|
||||
}
|
||||
|
||||
void intel_frontbuffer_track(struct intel_frontbuffer *old,
|
||||
struct intel_frontbuffer *new,
|
||||
unsigned int frontbuffer_bits);
|
||||
|
||||
void intel_frontbuffer_put(struct intel_frontbuffer *front);
|
||||
|
||||
#endif /* __INTEL_FRONTBUFFER_H__ */
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_gmbus.h"
|
||||
|
||||
struct gmbus_pin {
|
||||
|
@ -80,21 +80,6 @@ static const struct gmbus_pin gmbus_pins_cnp[] = {
|
|||
};
|
||||
|
||||
static const struct gmbus_pin gmbus_pins_icp[] = {
|
||||
[GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
|
||||
[GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
|
||||
[GMBUS_PIN_9_TC1_ICP] = { "tc1", GPIOJ },
|
||||
[GMBUS_PIN_10_TC2_ICP] = { "tc2", GPIOK },
|
||||
[GMBUS_PIN_11_TC3_ICP] = { "tc3", GPIOL },
|
||||
[GMBUS_PIN_12_TC4_ICP] = { "tc4", GPIOM },
|
||||
};
|
||||
|
||||
static const struct gmbus_pin gmbus_pins_mcc[] = {
|
||||
[GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
|
||||
[GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
|
||||
[GMBUS_PIN_9_TC1_ICP] = { "dpc", GPIOJ },
|
||||
};
|
||||
|
||||
static const struct gmbus_pin gmbus_pins_tgp[] = {
|
||||
[GMBUS_PIN_1_BXT] = { "dpa", GPIOB },
|
||||
[GMBUS_PIN_2_BXT] = { "dpb", GPIOC },
|
||||
[GMBUS_PIN_3_BXT] = { "dpc", GPIOD },
|
||||
|
@ -110,11 +95,7 @@ static const struct gmbus_pin gmbus_pins_tgp[] = {
|
|||
static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
|
||||
unsigned int pin)
|
||||
{
|
||||
if (HAS_PCH_TGP(dev_priv))
|
||||
return &gmbus_pins_tgp[pin];
|
||||
else if (HAS_PCH_MCC(dev_priv))
|
||||
return &gmbus_pins_mcc[pin];
|
||||
else if (HAS_PCH_ICP(dev_priv))
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||
return &gmbus_pins_icp[pin];
|
||||
else if (HAS_PCH_CNP(dev_priv))
|
||||
return &gmbus_pins_cnp[pin];
|
||||
|
@ -133,11 +114,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
|
|||
{
|
||||
unsigned int size;
|
||||
|
||||
if (HAS_PCH_TGP(dev_priv))
|
||||
size = ARRAY_SIZE(gmbus_pins_tgp);
|
||||
else if (HAS_PCH_MCC(dev_priv))
|
||||
size = ARRAY_SIZE(gmbus_pins_mcc);
|
||||
else if (HAS_PCH_ICP(dev_priv))
|
||||
if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
|
||||
size = ARRAY_SIZE(gmbus_pins_icp);
|
||||
else if (HAS_PCH_CNP(dev_priv))
|
||||
size = ARRAY_SIZE(gmbus_pins_cnp);
|
||||
|
|
|
@ -11,6 +11,28 @@
|
|||
struct drm_i915_private;
|
||||
struct i2c_adapter;
|
||||
|
||||
#define GMBUS_PIN_DISABLED 0
|
||||
#define GMBUS_PIN_SSC 1
|
||||
#define GMBUS_PIN_VGADDC 2
|
||||
#define GMBUS_PIN_PANEL 3
|
||||
#define GMBUS_PIN_DPD_CHV 3 /* HDMID_CHV */
|
||||
#define GMBUS_PIN_DPC 4 /* HDMIC */
|
||||
#define GMBUS_PIN_DPB 5 /* SDVO, HDMIB */
|
||||
#define GMBUS_PIN_DPD 6 /* HDMID */
|
||||
#define GMBUS_PIN_RESERVED 7 /* 7 reserved */
|
||||
#define GMBUS_PIN_1_BXT 1 /* BXT+ (atom) and CNP+ (big core) */
|
||||
#define GMBUS_PIN_2_BXT 2
|
||||
#define GMBUS_PIN_3_BXT 3
|
||||
#define GMBUS_PIN_4_CNP 4
|
||||
#define GMBUS_PIN_9_TC1_ICP 9
|
||||
#define GMBUS_PIN_10_TC2_ICP 10
|
||||
#define GMBUS_PIN_11_TC3_ICP 11
|
||||
#define GMBUS_PIN_12_TC4_ICP 12
|
||||
#define GMBUS_PIN_13_TC5_TGP 13
|
||||
#define GMBUS_PIN_14_TC6_TGP 14
|
||||
|
||||
#define GMBUS_NUM_PINS 15 /* including 0 */
|
||||
|
||||
int intel_gmbus_setup(struct drm_i915_private *dev_priv);
|
||||
void intel_gmbus_teardown(struct drm_i915_private *dev_priv);
|
||||
bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
|
||||
|
|
|
@ -14,7 +14,8 @@
|
|||
#include <drm/i915_component.h>
|
||||
|
||||
#include "i915_reg.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_power.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_sideband.h"
|
||||
|
||||
|
@ -244,8 +245,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
|
|||
static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
|
||||
{
|
||||
I915_WRITE(HDCP_SHA_TEXT, sha_text);
|
||||
if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
|
||||
HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
|
||||
if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
|
||||
DRM_ERROR("Timed out waiting for SHA1 ready\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -475,9 +475,8 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
|
|||
|
||||
/* Tell the HW we're done with the hash and wait for it to ACK */
|
||||
I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
|
||||
if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
|
||||
HDCP_SHA1_COMPLETE,
|
||||
HDCP_SHA1_COMPLETE, 1)) {
|
||||
if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
|
||||
HDCP_SHA1_COMPLETE, 1)) {
|
||||
DRM_ERROR("Timed out waiting for SHA1 complete\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -540,7 +539,8 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
|
|||
|
||||
if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
|
||||
DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
|
||||
return -EPERM;
|
||||
ret = -EPERM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -619,9 +619,8 @@ static int intel_hdcp_auth(struct intel_connector *connector)
|
|||
I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
|
||||
|
||||
/* Wait for An to be acquired */
|
||||
if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
|
||||
HDCP_STATUS_AN_READY,
|
||||
HDCP_STATUS_AN_READY, 1)) {
|
||||
if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
|
||||
HDCP_STATUS_AN_READY, 1)) {
|
||||
DRM_ERROR("Timed out waiting for An\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -705,9 +704,9 @@ static int intel_hdcp_auth(struct intel_connector *connector)
|
|||
}
|
||||
|
||||
/* Wait for encryption confirmation */
|
||||
if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
|
||||
HDCP_STATUS_ENC, HDCP_STATUS_ENC,
|
||||
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
|
||||
if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port),
|
||||
HDCP_STATUS_ENC,
|
||||
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
|
||||
DRM_ERROR("Timed out waiting for encryption\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -737,8 +736,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
|
|||
|
||||
hdcp->hdcp_encrypted = false;
|
||||
I915_WRITE(PORT_HDCP_CONF(port), 0);
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
PORT_HDCP_STATUS(port), ~0, 0,
|
||||
if (intel_de_wait_for_clear(dev_priv, PORT_HDCP_STATUS(port), ~0,
|
||||
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
|
||||
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
|
||||
return -ETIMEDOUT;
|
||||
|
@ -1515,10 +1513,9 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
|
|||
CTL_LINK_ENCRYPTION_REQ);
|
||||
}
|
||||
|
||||
ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
|
||||
LINK_ENCRYPTION_STATUS,
|
||||
LINK_ENCRYPTION_STATUS,
|
||||
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
|
||||
ret = intel_de_wait_for_set(dev_priv, HDCP2_STATUS_DDI(port),
|
||||
LINK_ENCRYPTION_STATUS,
|
||||
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1536,8 +1533,8 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
|
|||
I915_WRITE(HDCP2_CTL_DDI(port),
|
||||
I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
|
||||
|
||||
ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
|
||||
LINK_ENCRYPTION_STATUS, 0x0,
|
||||
ret = intel_de_wait_for_clear(dev_priv, HDCP2_STATUS_DDI(port),
|
||||
LINK_ENCRYPTION_STATUS,
|
||||
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
|
||||
if (ret == -ETIMEDOUT)
|
||||
DRM_DEBUG_KMS("Disable Encryption Timedout");
|
||||
|
|
|
@ -45,17 +45,17 @@
|
|||
#include "intel_audio.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_fifo_underrun.h"
|
||||
#include "intel_gmbus.h"
|
||||
#include "intel_hdcp.h"
|
||||
#include "intel_hdmi.h"
|
||||
#include "intel_hotplug.h"
|
||||
#include "intel_lspcon.h"
|
||||
#include "intel_sdvo.h"
|
||||
#include "intel_panel.h"
|
||||
#include "intel_sdvo.h"
|
||||
#include "intel_sideband.h"
|
||||
|
||||
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
|
||||
|
@ -1514,29 +1514,28 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
|
|||
return true;
|
||||
}
|
||||
|
||||
static struct hdcp2_hdmi_msg_data {
|
||||
struct hdcp2_hdmi_msg_data {
|
||||
u8 msg_id;
|
||||
u32 timeout;
|
||||
u32 timeout2;
|
||||
} hdcp2_msg_data[] = {
|
||||
{HDCP_2_2_AKE_INIT, 0, 0},
|
||||
{HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0},
|
||||
{HDCP_2_2_AKE_NO_STORED_KM, 0, 0},
|
||||
{HDCP_2_2_AKE_STORED_KM, 0, 0},
|
||||
{HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
|
||||
HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
|
||||
{HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS,
|
||||
0},
|
||||
{HDCP_2_2_LC_INIT, 0, 0},
|
||||
{HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0},
|
||||
{HDCP_2_2_SKE_SEND_EKS, 0, 0},
|
||||
{HDCP_2_2_REP_SEND_RECVID_LIST,
|
||||
HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
|
||||
{HDCP_2_2_REP_SEND_ACK, 0, 0},
|
||||
{HDCP_2_2_REP_STREAM_MANAGE, 0, 0},
|
||||
{HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS,
|
||||
0},
|
||||
};
|
||||
};
|
||||
|
||||
static const struct hdcp2_hdmi_msg_data hdcp2_msg_data[] = {
|
||||
{ HDCP_2_2_AKE_INIT, 0, 0 },
|
||||
{ HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0 },
|
||||
{ HDCP_2_2_AKE_NO_STORED_KM, 0, 0 },
|
||||
{ HDCP_2_2_AKE_STORED_KM, 0, 0 },
|
||||
{ HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
|
||||
HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS },
|
||||
{ HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, 0 },
|
||||
{ HDCP_2_2_LC_INIT, 0, 0 },
|
||||
{ HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0 },
|
||||
{ HDCP_2_2_SKE_SEND_EKS, 0, 0 },
|
||||
{ HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 },
|
||||
{ HDCP_2_2_REP_SEND_ACK, 0, 0 },
|
||||
{ HDCP_2_2_REP_STREAM_MANAGE, 0, 0 },
|
||||
{ HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 },
|
||||
};
|
||||
|
||||
static
|
||||
int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_hotplug.h"
|
||||
|
||||
/**
|
||||
|
@ -104,6 +104,12 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
|
|||
if (IS_CNL_WITH_PORT_F(dev_priv))
|
||||
return HPD_PORT_E;
|
||||
return HPD_PORT_F;
|
||||
case PORT_G:
|
||||
return HPD_PORT_G;
|
||||
case PORT_H:
|
||||
return HPD_PORT_H;
|
||||
case PORT_I:
|
||||
return HPD_PORT_I;
|
||||
default:
|
||||
MISSING_CASE(port);
|
||||
return HPD_NONE;
|
||||
|
|
|
@ -27,8 +27,8 @@
|
|||
#include <drm/drm_dp_dual_mode_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_lspcon.h"
|
||||
|
||||
/* LSPCON OUI Vendor ID(signatures) */
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
#include "i915_drv.h"
|
||||
#include "intel_atomic.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_gmbus.h"
|
||||
#include "intel_lvds.h"
|
||||
#include "intel_panel.h"
|
||||
|
@ -318,8 +318,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
|
|||
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
|
||||
POSTING_READ(lvds_encoder->reg);
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
PP_STATUS(0), PP_ON, PP_ON, 5000))
|
||||
if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
|
||||
DRM_ERROR("timed out waiting for panel to power on\n");
|
||||
|
||||
intel_panel_enable_backlight(pipe_config, conn_state);
|
||||
|
@ -333,8 +332,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
|
|||
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
|
||||
|
||||
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
PP_STATUS(0), PP_ON, 0, 1000))
|
||||
if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
|
||||
DRM_ERROR("timed out waiting for panel to power off\n");
|
||||
|
||||
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#include "display/intel_panel.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_opregion.h"
|
||||
|
||||
#define OPREGION_HEADER_OFFSET 0
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_reg.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
#include "intel_overlay.h"
|
||||
|
||||
|
@ -191,7 +191,8 @@ struct intel_overlay {
|
|||
struct overlay_registers __iomem *regs;
|
||||
u32 flip_addr;
|
||||
/* flip handling */
|
||||
struct i915_active_request last_flip;
|
||||
struct i915_active last_flip;
|
||||
void (*flip_complete)(struct intel_overlay *ovl);
|
||||
};
|
||||
|
||||
static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
|
||||
|
@ -217,30 +218,25 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
|
|||
PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
|
||||
}
|
||||
|
||||
static void intel_overlay_submit_request(struct intel_overlay *overlay,
|
||||
struct i915_request *rq,
|
||||
i915_active_retire_fn retire)
|
||||
static struct i915_request *
|
||||
alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
|
||||
{
|
||||
GEM_BUG_ON(i915_active_request_peek(&overlay->last_flip,
|
||||
&overlay->i915->drm.struct_mutex));
|
||||
i915_active_request_set_retire_fn(&overlay->last_flip, retire,
|
||||
&overlay->i915->drm.struct_mutex);
|
||||
__i915_active_request_set(&overlay->last_flip, rq);
|
||||
i915_request_add(rq);
|
||||
}
|
||||
struct i915_request *rq;
|
||||
int err;
|
||||
|
||||
static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
|
||||
struct i915_request *rq,
|
||||
i915_active_retire_fn retire)
|
||||
{
|
||||
intel_overlay_submit_request(overlay, rq, retire);
|
||||
return i915_active_request_retire(&overlay->last_flip,
|
||||
&overlay->i915->drm.struct_mutex);
|
||||
}
|
||||
overlay->flip_complete = fn;
|
||||
|
||||
static struct i915_request *alloc_request(struct intel_overlay *overlay)
|
||||
{
|
||||
return i915_request_create(overlay->context);
|
||||
rq = i915_request_create(overlay->context);
|
||||
if (IS_ERR(rq))
|
||||
return rq;
|
||||
|
||||
err = i915_active_ref(&overlay->last_flip, rq->timeline, rq);
|
||||
if (err) {
|
||||
i915_request_add(rq);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return rq;
|
||||
}
|
||||
|
||||
/* overlay needs to be disable in OCMD reg */
|
||||
|
@ -252,7 +248,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
|
||||
WARN_ON(overlay->active);
|
||||
|
||||
rq = alloc_request(overlay);
|
||||
rq = alloc_request(overlay, NULL);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
|
@ -273,7 +269,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
|
|||
*cs++ = MI_NOOP;
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
return intel_overlay_do_wait_request(overlay, rq, NULL);
|
||||
i915_request_add(rq);
|
||||
|
||||
return i915_active_wait(&overlay->last_flip);
|
||||
}
|
||||
|
||||
static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
|
||||
|
@ -283,9 +281,9 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
|
|||
|
||||
WARN_ON(overlay->old_vma);
|
||||
|
||||
i915_gem_track_fb(overlay->vma ? overlay->vma->obj : NULL,
|
||||
vma ? vma->obj : NULL,
|
||||
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
|
||||
vma ? vma->obj->frontbuffer : NULL,
|
||||
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
|
||||
intel_frontbuffer_flip_prepare(overlay->i915,
|
||||
INTEL_FRONTBUFFER_OVERLAY(pipe));
|
||||
|
@ -317,7 +315,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
if (tmp & (1 << 17))
|
||||
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
|
||||
|
||||
rq = alloc_request(overlay);
|
||||
rq = alloc_request(overlay, NULL);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
|
@ -332,8 +330,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
|
|||
intel_ring_advance(rq, cs);
|
||||
|
||||
intel_overlay_flip_prepare(overlay, vma);
|
||||
|
||||
intel_overlay_submit_request(overlay, rq, NULL);
|
||||
i915_request_add(rq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -354,20 +351,13 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
|
|||
}
|
||||
|
||||
static void
|
||||
intel_overlay_release_old_vid_tail(struct i915_active_request *active,
|
||||
struct i915_request *rq)
|
||||
intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
|
||||
{
|
||||
struct intel_overlay *overlay =
|
||||
container_of(active, typeof(*overlay), last_flip);
|
||||
|
||||
intel_overlay_release_old_vma(overlay);
|
||||
}
|
||||
|
||||
static void intel_overlay_off_tail(struct i915_active_request *active,
|
||||
struct i915_request *rq)
|
||||
static void intel_overlay_off_tail(struct intel_overlay *overlay)
|
||||
{
|
||||
struct intel_overlay *overlay =
|
||||
container_of(active, typeof(*overlay), last_flip);
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
|
||||
intel_overlay_release_old_vma(overlay);
|
||||
|
@ -380,6 +370,16 @@ static void intel_overlay_off_tail(struct i915_active_request *active,
|
|||
i830_overlay_clock_gating(dev_priv, true);
|
||||
}
|
||||
|
||||
static void
|
||||
intel_overlay_last_flip_retire(struct i915_active *active)
|
||||
{
|
||||
struct intel_overlay *overlay =
|
||||
container_of(active, typeof(*overlay), last_flip);
|
||||
|
||||
if (overlay->flip_complete)
|
||||
overlay->flip_complete(overlay);
|
||||
}
|
||||
|
||||
/* overlay needs to be disabled in OCMD reg */
|
||||
static int intel_overlay_off(struct intel_overlay *overlay)
|
||||
{
|
||||
|
@ -394,7 +394,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
* of the hw. Do it in both cases */
|
||||
flip_addr |= OFC_UPDATE;
|
||||
|
||||
rq = alloc_request(overlay);
|
||||
rq = alloc_request(overlay, intel_overlay_off_tail);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
|
@ -417,17 +417,16 @@ static int intel_overlay_off(struct intel_overlay *overlay)
|
|||
intel_ring_advance(rq, cs);
|
||||
|
||||
intel_overlay_flip_prepare(overlay, NULL);
|
||||
i915_request_add(rq);
|
||||
|
||||
return intel_overlay_do_wait_request(overlay, rq,
|
||||
intel_overlay_off_tail);
|
||||
return i915_active_wait(&overlay->last_flip);
|
||||
}
|
||||
|
||||
/* recover from an interruption due to a signal
|
||||
* We have to be careful not to repeat work forever an make forward progess. */
|
||||
static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
|
||||
{
|
||||
return i915_active_request_retire(&overlay->last_flip,
|
||||
&overlay->i915->drm.struct_mutex);
|
||||
return i915_active_wait(&overlay->last_flip);
|
||||
}
|
||||
|
||||
/* Wait for pending overlay flip and release old frame.
|
||||
|
@ -437,43 +436,40 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
|
|||
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = overlay->i915;
|
||||
struct i915_request *rq;
|
||||
u32 *cs;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
/* Only wait if there is actually an old frame to release to
|
||||
/*
|
||||
* Only wait if there is actually an old frame to release to
|
||||
* guarantee forward progress.
|
||||
*/
|
||||
if (!overlay->old_vma)
|
||||
return 0;
|
||||
|
||||
if (I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
|
||||
/* synchronous slowpath */
|
||||
struct i915_request *rq;
|
||||
if (!(I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
|
||||
intel_overlay_release_old_vid_tail(overlay);
|
||||
return 0;
|
||||
}
|
||||
|
||||
rq = alloc_request(overlay);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
rq = alloc_request(overlay, intel_overlay_release_old_vid_tail);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
||||
cs = intel_ring_begin(rq, 2);
|
||||
if (IS_ERR(cs)) {
|
||||
i915_request_add(rq);
|
||||
return PTR_ERR(cs);
|
||||
}
|
||||
cs = intel_ring_begin(rq, 2);
|
||||
if (IS_ERR(cs)) {
|
||||
i915_request_add(rq);
|
||||
return PTR_ERR(cs);
|
||||
}
|
||||
|
||||
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
|
||||
*cs++ = MI_NOOP;
|
||||
intel_ring_advance(rq, cs);
|
||||
*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
|
||||
*cs++ = MI_NOOP;
|
||||
intel_ring_advance(rq, cs);
|
||||
|
||||
ret = intel_overlay_do_wait_request(overlay, rq,
|
||||
intel_overlay_release_old_vid_tail);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else
|
||||
intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL);
|
||||
i915_request_add(rq);
|
||||
|
||||
return 0;
|
||||
return i915_active_wait(&overlay->last_flip);
|
||||
}
|
||||
|
||||
void intel_overlay_reset(struct drm_i915_private *dev_priv)
|
||||
|
@ -772,11 +768,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
|
|||
ret = PTR_ERR(vma);
|
||||
goto out_pin_section;
|
||||
}
|
||||
intel_fb_obj_flush(new_bo, ORIGIN_DIRTYFB);
|
||||
|
||||
ret = i915_vma_put_fence(vma);
|
||||
if (ret)
|
||||
goto out_unpin;
|
||||
intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
|
||||
|
||||
if (!overlay->active) {
|
||||
u32 oconfig;
|
||||
|
@ -1375,7 +1367,9 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
|
|||
overlay->contrast = 75;
|
||||
overlay->saturation = 146;
|
||||
|
||||
INIT_ACTIVE_REQUEST(&overlay->last_flip);
|
||||
i915_active_init(dev_priv,
|
||||
&overlay->last_flip,
|
||||
NULL, intel_overlay_last_flip_retire);
|
||||
|
||||
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
|
||||
if (ret)
|
||||
|
@ -1409,6 +1403,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
|
|||
WARN_ON(overlay->active);
|
||||
|
||||
i915_gem_object_put(overlay->reg_bo);
|
||||
i915_active_fini(&overlay->last_flip);
|
||||
|
||||
kfree(overlay);
|
||||
}
|
||||
|
|
|
@ -35,8 +35,8 @@
|
|||
#include <linux/pwm.h>
|
||||
|
||||
#include "intel_connector.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp_aux_backlight.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_dsi_dcs_backlight.h"
|
||||
#include "intel_panel.h"
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
#include <linux/seq_file.h>
|
||||
|
||||
#include "intel_atomic.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_pipe_crc.h"
|
||||
|
||||
static const char * const pipe_crc_sources[] = {
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include "display/intel_dp.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_psr.h"
|
||||
#include "intel_sprite.h"
|
||||
|
||||
|
@ -825,8 +825,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
|
|||
}
|
||||
|
||||
/* Wait till PSR is idle */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
psr_status, psr_status_mask, 0, 2000))
|
||||
if (intel_de_wait_for_clear(dev_priv, psr_status,
|
||||
psr_status_mask, 2000))
|
||||
DRM_ERROR("Timed out waiting PSR idle state\n");
|
||||
|
||||
/* Disable PSR on Sink */
|
||||
|
@ -988,7 +988,7 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
|
|||
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
|
||||
err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
|
||||
err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
|
||||
if (err)
|
||||
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_quirks.h"
|
||||
|
||||
/*
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
#include "i915_drv.h"
|
||||
#include "intel_atomic.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_fifo_underrun.h"
|
||||
#include "intel_gmbus.h"
|
||||
#include "intel_hdmi.h"
|
||||
|
|
|
@ -40,8 +40,9 @@
|
|||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_atomic_plane.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_frontbuffer.h"
|
||||
#include "intel_pm.h"
|
||||
#include "intel_psr.h"
|
||||
|
@ -330,6 +331,12 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id)
|
||||
{
|
||||
return INTEL_GEN(dev_priv) >= 11 &&
|
||||
icl_hdr_plane_mask() & BIT(plane_id);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
skl_plane_max_stride(struct intel_plane *plane,
|
||||
u32 pixel_format, u64 modifier,
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_display.h"
|
||||
|
||||
struct drm_device;
|
||||
|
@ -49,11 +48,6 @@ static inline u8 icl_hdr_plane_mask(void)
|
|||
BIT(PLANE_SPRITE0) | BIT(PLANE_SPRITE1);
|
||||
}
|
||||
|
||||
static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
|
||||
enum plane_id plane_id)
|
||||
{
|
||||
return INTEL_GEN(dev_priv) >= 11 &&
|
||||
icl_hdr_plane_mask() & BIT(plane_id);
|
||||
}
|
||||
bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id);
|
||||
|
||||
#endif /* __INTEL_SPRITE_H__ */
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_display.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp_mst.h"
|
||||
#include "intel_tc.h"
|
||||
|
||||
|
@ -503,6 +504,12 @@ void intel_tc_port_unlock(struct intel_digital_port *dig_port)
|
|||
wakeref);
|
||||
}
|
||||
|
||||
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
|
||||
{
|
||||
return mutex_is_locked(&dig_port->tc_lock) ||
|
||||
dig_port->tc_link_refcount;
|
||||
}
|
||||
|
||||
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
|
||||
int required_lanes)
|
||||
{
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include <linux/mutex.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "intel_drv.h"
|
||||
struct intel_digital_port;
|
||||
|
||||
bool intel_tc_port_connected(struct intel_digital_port *dig_port);
|
||||
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
|
||||
|
@ -23,12 +23,7 @@ void intel_tc_port_unlock(struct intel_digital_port *dig_port);
|
|||
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
|
||||
int required_lanes);
|
||||
void intel_tc_port_put_link(struct intel_digital_port *dig_port);
|
||||
|
||||
static inline int intel_tc_port_ref_held(struct intel_digital_port *dig_port)
|
||||
{
|
||||
return mutex_is_locked(&dig_port->tc_lock) ||
|
||||
dig_port->tc_link_refcount;
|
||||
}
|
||||
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port);
|
||||
|
||||
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy);
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_hotplug.h"
|
||||
#include "intel_tv.h"
|
||||
|
||||
|
|
|
@ -317,9 +317,6 @@ enum vbt_gmbus_ddi {
|
|||
ICL_DDC_BUS_PORT_4,
|
||||
TGL_DDC_BUS_PORT_5,
|
||||
TGL_DDC_BUS_PORT_6,
|
||||
MCC_DDC_BUS_DDI_A = 0x1,
|
||||
MCC_DDC_BUS_DDI_B,
|
||||
MCC_DDC_BUS_DDI_C = 0x4,
|
||||
};
|
||||
|
||||
#define DP_AUX_A 0x40
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_vdsc.h"
|
||||
|
||||
enum ROW_INDEX_BPP {
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
#include "i915_drv.h"
|
||||
#include "intel_atomic.h"
|
||||
#include "intel_connector.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_fifo_underrun.h"
|
||||
#include "intel_panel.h"
|
||||
|
@ -84,9 +84,8 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
|
|||
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
|
||||
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
MIPI_GEN_FIFO_STAT(port), mask, mask,
|
||||
100))
|
||||
if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port),
|
||||
mask, 100))
|
||||
DRM_ERROR("DPI FIFOs are not empty\n");
|
||||
}
|
||||
|
||||
|
@ -154,10 +153,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
|
|||
|
||||
/* note: this is never true for reads */
|
||||
if (packet.payload_length) {
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
MIPI_GEN_FIFO_STAT(port),
|
||||
data_mask, 0,
|
||||
50))
|
||||
if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
|
||||
data_mask, 50))
|
||||
DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
|
||||
|
||||
write_data(dev_priv, data_reg, packet.payload,
|
||||
|
@ -168,10 +165,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
|
|||
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
|
||||
}
|
||||
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
MIPI_GEN_FIFO_STAT(port),
|
||||
ctrl_mask, 0,
|
||||
50)) {
|
||||
if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
|
||||
ctrl_mask, 50)) {
|
||||
DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
|
||||
}
|
||||
|
||||
|
@ -180,10 +175,8 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
|
|||
/* ->rx_len is set only for reads */
|
||||
if (msg->rx_len) {
|
||||
data_mask = GEN_READ_DATA_AVAIL;
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
MIPI_INTR_STAT(port),
|
||||
data_mask, data_mask,
|
||||
50))
|
||||
if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port),
|
||||
data_mask, 50))
|
||||
DRM_ERROR("Timeout waiting for read data.\n");
|
||||
|
||||
read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
|
||||
|
@ -240,9 +233,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
|
|||
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
|
||||
|
||||
mask = SPL_PKT_SENT_INTERRUPT;
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
MIPI_INTR_STAT(port), mask, mask,
|
||||
100))
|
||||
if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100))
|
||||
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
|
||||
|
||||
return 0;
|
||||
|
@ -359,11 +350,8 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
|
|||
|
||||
/* Wait for Pwr ACK */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
MIPI_CTRL(port),
|
||||
GLK_MIPIIO_PORT_POWERED,
|
||||
GLK_MIPIIO_PORT_POWERED,
|
||||
20))
|
||||
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
|
||||
GLK_MIPIIO_PORT_POWERED, 20))
|
||||
DRM_ERROR("MIPIO port is powergated\n");
|
||||
}
|
||||
|
||||
|
@ -385,11 +373,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
|
|||
|
||||
/* Wait for MIPI PHY status bit to set */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
MIPI_CTRL(port),
|
||||
GLK_PHY_STATUS_PORT_READY,
|
||||
GLK_PHY_STATUS_PORT_READY,
|
||||
20))
|
||||
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
|
||||
GLK_PHY_STATUS_PORT_READY, 20))
|
||||
DRM_ERROR("PHY is not ON\n");
|
||||
}
|
||||
|
||||
|
@ -413,11 +398,8 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
|
|||
I915_WRITE(MIPI_DEVICE_READY(port), val);
|
||||
|
||||
/* Wait for ULPS active */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
MIPI_CTRL(port),
|
||||
GLK_ULPS_NOT_ACTIVE,
|
||||
0,
|
||||
20))
|
||||
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
|
||||
GLK_ULPS_NOT_ACTIVE, 20))
|
||||
DRM_ERROR("ULPS not active\n");
|
||||
|
||||
/* Exit ULPS */
|
||||
|
@ -440,21 +422,15 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
|
|||
|
||||
/* Wait for Stop state */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
MIPI_CTRL(port),
|
||||
GLK_DATA_LANE_STOP_STATE,
|
||||
GLK_DATA_LANE_STOP_STATE,
|
||||
20))
|
||||
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
|
||||
GLK_DATA_LANE_STOP_STATE, 20))
|
||||
DRM_ERROR("Date lane not in STOP state\n");
|
||||
}
|
||||
|
||||
/* Wait for AFE LATCH */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
BXT_MIPI_PORT_CTRL(port),
|
||||
AFE_LATCHOUT,
|
||||
AFE_LATCHOUT,
|
||||
20))
|
||||
if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port),
|
||||
AFE_LATCHOUT, 20))
|
||||
DRM_ERROR("D-PHY not entering LP-11 state\n");
|
||||
}
|
||||
}
|
||||
|
@ -554,17 +530,15 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
|
|||
|
||||
/* Wait for MIPI PHY status bit to unset */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
MIPI_CTRL(port),
|
||||
GLK_PHY_STATUS_PORT_READY, 0, 20))
|
||||
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
|
||||
GLK_PHY_STATUS_PORT_READY, 20))
|
||||
DRM_ERROR("PHY is not turning OFF\n");
|
||||
}
|
||||
|
||||
/* Wait for Pwr ACK bit to unset */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
MIPI_CTRL(port),
|
||||
GLK_MIPIIO_PORT_POWERED, 0, 20))
|
||||
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
|
||||
GLK_MIPIIO_PORT_POWERED, 20))
|
||||
DRM_ERROR("MIPI IO Port is not powergated\n");
|
||||
}
|
||||
}
|
||||
|
@ -583,9 +557,8 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
|
|||
|
||||
/* Wait for MIPI PHY status bit to unset */
|
||||
for_each_dsi_port(port, intel_dsi->ports) {
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
MIPI_CTRL(port),
|
||||
GLK_PHY_STATUS_PORT_READY, 0, 20))
|
||||
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
|
||||
GLK_PHY_STATUS_PORT_READY, 20))
|
||||
DRM_ERROR("PHY is not turning OFF\n");
|
||||
}
|
||||
|
||||
|
@ -633,9 +606,8 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
|
|||
* Port A only. MIPI Port C has no similar bit for checking.
|
||||
*/
|
||||
if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
|
||||
intel_wait_for_register(&dev_priv->uncore,
|
||||
port_ctrl, AFE_LATCHOUT, 0,
|
||||
30))
|
||||
intel_de_wait_for_clear(dev_priv, port_ctrl,
|
||||
AFE_LATCHOUT, 30))
|
||||
DRM_ERROR("DSI LP not going Low\n");
|
||||
|
||||
/* Disable MIPI PHY transparent latch */
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include <linux/kernel.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dsi.h"
|
||||
#include "intel_sideband.h"
|
||||
|
||||
|
@ -246,11 +246,8 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
|
|||
* PLL lock should deassert within 200us.
|
||||
* Wait up to 1ms before timing out.
|
||||
*/
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
BXT_DSI_PLL_ENABLE,
|
||||
BXT_DSI_PLL_LOCKED,
|
||||
0,
|
||||
1))
|
||||
if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
|
||||
BXT_DSI_PLL_LOCKED, 1))
|
||||
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
|
||||
}
|
||||
|
||||
|
@ -396,8 +393,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
|
|||
else
|
||||
txesc2_div = 10;
|
||||
|
||||
I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
|
||||
I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
|
||||
I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
|
||||
I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
|
||||
}
|
||||
|
||||
/* Program BXT Mipi clocks and dividers */
|
||||
|
@ -530,11 +527,8 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
|
|||
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
|
||||
|
||||
/* Timeout and fail if PLL not locked */
|
||||
if (intel_wait_for_register(&dev_priv->uncore,
|
||||
BXT_DSI_PLL_ENABLE,
|
||||
BXT_DSI_PLL_LOCKED,
|
||||
BXT_DSI_PLL_LOCKED,
|
||||
1)) {
|
||||
if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
|
||||
BXT_DSI_PLL_LOCKED, 1)) {
|
||||
DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -8,87 +8,67 @@
|
|||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_clflush.h"
|
||||
|
||||
static DEFINE_SPINLOCK(clflush_lock);
|
||||
#include "i915_sw_fence_work.h"
|
||||
#include "i915_trace.h"
|
||||
|
||||
struct clflush {
|
||||
struct dma_fence dma; /* Must be first for dma_fence_free() */
|
||||
struct i915_sw_fence wait;
|
||||
struct work_struct work;
|
||||
struct dma_fence_work base;
|
||||
struct drm_i915_gem_object *obj;
|
||||
};
|
||||
|
||||
static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
|
||||
{
|
||||
return DRIVER_NAME;
|
||||
}
|
||||
|
||||
static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
|
||||
{
|
||||
return "clflush";
|
||||
}
|
||||
|
||||
static void i915_clflush_release(struct dma_fence *fence)
|
||||
{
|
||||
struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
|
||||
|
||||
i915_sw_fence_fini(&clflush->wait);
|
||||
|
||||
BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
|
||||
dma_fence_free(&clflush->dma);
|
||||
}
|
||||
|
||||
static const struct dma_fence_ops i915_clflush_ops = {
|
||||
.get_driver_name = i915_clflush_get_driver_name,
|
||||
.get_timeline_name = i915_clflush_get_timeline_name,
|
||||
.release = i915_clflush_release,
|
||||
};
|
||||
|
||||
static void __i915_do_clflush(struct drm_i915_gem_object *obj)
|
||||
static void __do_clflush(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
drm_clflush_sg(obj->mm.pages);
|
||||
intel_fb_obj_flush(obj, ORIGIN_CPU);
|
||||
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
|
||||
}
|
||||
|
||||
static void i915_clflush_work(struct work_struct *work)
|
||||
static int clflush_work(struct dma_fence_work *base)
|
||||
{
|
||||
struct clflush *clflush = container_of(work, typeof(*clflush), work);
|
||||
struct drm_i915_gem_object *obj = clflush->obj;
|
||||
struct clflush *clflush = container_of(base, typeof(*clflush), base);
|
||||
struct drm_i915_gem_object *obj = fetch_and_zero(&clflush->obj);
|
||||
int err;
|
||||
|
||||
if (i915_gem_object_pin_pages(obj)) {
|
||||
DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
__i915_do_clflush(obj);
|
||||
err = i915_gem_object_pin_pages(obj);
|
||||
if (err)
|
||||
goto put;
|
||||
|
||||
__do_clflush(obj);
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
|
||||
out:
|
||||
put:
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
dma_fence_signal(&clflush->dma);
|
||||
dma_fence_put(&clflush->dma);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __i915_sw_fence_call
|
||||
i915_clflush_notify(struct i915_sw_fence *fence,
|
||||
enum i915_sw_fence_notify state)
|
||||
static void clflush_release(struct dma_fence_work *base)
|
||||
{
|
||||
struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
|
||||
struct clflush *clflush = container_of(base, typeof(*clflush), base);
|
||||
|
||||
switch (state) {
|
||||
case FENCE_COMPLETE:
|
||||
schedule_work(&clflush->work);
|
||||
break;
|
||||
if (clflush->obj)
|
||||
i915_gem_object_put(clflush->obj);
|
||||
}
|
||||
|
||||
case FENCE_FREE:
|
||||
dma_fence_put(&clflush->dma);
|
||||
break;
|
||||
}
|
||||
static const struct dma_fence_work_ops clflush_ops = {
|
||||
.name = "clflush",
|
||||
.work = clflush_work,
|
||||
.release = clflush_release,
|
||||
};
|
||||
|
||||
return NOTIFY_DONE;
|
||||
static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct clflush *clflush;
|
||||
|
||||
GEM_BUG_ON(!obj->cache_dirty);
|
||||
|
||||
clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
|
||||
if (!clflush)
|
||||
return NULL;
|
||||
|
||||
dma_fence_work_init(&clflush->base, &clflush_ops);
|
||||
clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
|
||||
|
||||
return clflush;
|
||||
}
|
||||
|
||||
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
||||
|
@ -126,33 +106,16 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
|||
|
||||
clflush = NULL;
|
||||
if (!(flags & I915_CLFLUSH_SYNC))
|
||||
clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
|
||||
clflush = clflush_work_create(obj);
|
||||
if (clflush) {
|
||||
GEM_BUG_ON(!obj->cache_dirty);
|
||||
|
||||
dma_fence_init(&clflush->dma,
|
||||
&i915_clflush_ops,
|
||||
&clflush_lock,
|
||||
to_i915(obj->base.dev)->mm.unordered_timeline,
|
||||
0);
|
||||
i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
|
||||
|
||||
clflush->obj = i915_gem_object_get(obj);
|
||||
INIT_WORK(&clflush->work, i915_clflush_work);
|
||||
|
||||
dma_fence_get(&clflush->dma);
|
||||
|
||||
i915_sw_fence_await_reservation(&clflush->wait,
|
||||
obj->base.resv, NULL,
|
||||
true, I915_FENCE_TIMEOUT,
|
||||
i915_sw_fence_await_reservation(&clflush->base.chain,
|
||||
obj->base.resv, NULL, true,
|
||||
I915_FENCE_TIMEOUT,
|
||||
I915_FENCE_GFP);
|
||||
|
||||
dma_resv_add_excl_fence(obj->base.resv,
|
||||
&clflush->dma);
|
||||
|
||||
i915_sw_fence_commit(&clflush->wait);
|
||||
dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
|
||||
dma_fence_work_commit(&clflush->base);
|
||||
} else if (obj->mm.pages) {
|
||||
__i915_do_clflush(obj);
|
||||
__do_clflush(obj);
|
||||
} else {
|
||||
GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
|
||||
}
|
||||
|
|
|
@ -2,10 +2,13 @@
|
|||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
#include "i915_gem_client_blt.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "gt/intel_context.h"
|
||||
#include "gt/intel_engine_pm.h"
|
||||
#include "gt/intel_engine_pool.h"
|
||||
#include "i915_gem_client_blt.h"
|
||||
#include "i915_gem_object_blt.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
struct i915_sleeve {
|
||||
struct i915_vma *vma;
|
||||
|
@ -152,10 +155,11 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence,
|
|||
static void clear_pages_worker(struct work_struct *work)
|
||||
{
|
||||
struct clear_pages_work *w = container_of(work, typeof(*w), work);
|
||||
struct drm_i915_private *i915 = w->ce->gem_context->i915;
|
||||
struct drm_i915_private *i915 = w->ce->engine->i915;
|
||||
struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
|
||||
struct i915_vma *vma = w->sleeve->vma;
|
||||
struct i915_request *rq;
|
||||
struct i915_vma *batch;
|
||||
int err = w->dma.error;
|
||||
|
||||
if (unlikely(err))
|
||||
|
@ -175,10 +179,16 @@ static void clear_pages_worker(struct work_struct *work)
|
|||
if (unlikely(err))
|
||||
goto out_unlock;
|
||||
|
||||
rq = i915_request_create(w->ce);
|
||||
batch = intel_emit_vma_fill_blt(w->ce, vma, w->value);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
rq = intel_context_create_request(w->ce);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto out_unpin;
|
||||
goto out_batch;
|
||||
}
|
||||
|
||||
/* There's no way the fence has signalled */
|
||||
|
@ -186,6 +196,10 @@ static void clear_pages_worker(struct work_struct *work)
|
|||
clear_pages_dma_fence_cb))
|
||||
GEM_BUG_ON(1);
|
||||
|
||||
err = intel_emit_vma_mark_active(batch, rq);
|
||||
if (unlikely(err))
|
||||
goto out_request;
|
||||
|
||||
if (w->ce->engine->emit_init_breadcrumb) {
|
||||
err = w->ce->engine->emit_init_breadcrumb(rq);
|
||||
if (unlikely(err))
|
||||
|
@ -197,11 +211,13 @@ static void clear_pages_worker(struct work_struct *work)
|
|||
* keep track of the GPU activity within this vma/request, and
|
||||
* propagate the signal from the request to w->dma.
|
||||
*/
|
||||
err = i915_active_ref(&vma->active, rq->fence.context, rq);
|
||||
err = i915_active_ref(&vma->active, rq->timeline, rq);
|
||||
if (err)
|
||||
goto out_request;
|
||||
|
||||
err = intel_emit_vma_fill_blt(rq, vma, w->value);
|
||||
err = w->ce->engine->emit_bb_start(rq,
|
||||
batch->node.start, batch->node.size,
|
||||
0);
|
||||
out_request:
|
||||
if (unlikely(err)) {
|
||||
i915_request_skip(rq, err);
|
||||
|
@ -209,6 +225,8 @@ static void clear_pages_worker(struct work_struct *work)
|
|||
}
|
||||
|
||||
i915_request_add(rq);
|
||||
out_batch:
|
||||
intel_emit_vma_release(w->ce, batch);
|
||||
out_unpin:
|
||||
i915_vma_unpin(vma);
|
||||
out_unlock:
|
||||
|
@ -249,7 +267,6 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
|
|||
struct i915_page_sizes *page_sizes,
|
||||
u32 value)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
struct clear_pages_work *work;
|
||||
struct i915_sleeve *sleeve;
|
||||
int err;
|
||||
|
@ -272,11 +289,7 @@ int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
|
|||
|
||||
init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
|
||||
|
||||
dma_fence_init(&work->dma,
|
||||
&clear_pages_work_ops,
|
||||
&fence_lock,
|
||||
i915->mm.unordered_timeline,
|
||||
0);
|
||||
dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
|
||||
i915_sw_fence_init(&work->wait, clear_pages_work_notify);
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
|
|
|
@ -70,6 +70,7 @@
|
|||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "gt/intel_lrc_reg.h"
|
||||
#include "gt/intel_engine_user.h"
|
||||
|
||||
#include "i915_gem_context.h"
|
||||
#include "i915_globals.h"
|
||||
|
@ -158,7 +159,7 @@ lookup_user_engine(struct i915_gem_context *ctx,
|
|||
if (!engine)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
idx = engine->id;
|
||||
idx = engine->legacy_idx;
|
||||
} else {
|
||||
idx = ci->engine_instance;
|
||||
}
|
||||
|
@ -172,7 +173,9 @@ static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp)
|
|||
|
||||
lockdep_assert_held(&i915->contexts.mutex);
|
||||
|
||||
if (INTEL_GEN(i915) >= 11)
|
||||
if (INTEL_GEN(i915) >= 12)
|
||||
max = GEN12_MAX_CONTEXT_HW_ID;
|
||||
else if (INTEL_GEN(i915) >= 11)
|
||||
max = GEN11_MAX_CONTEXT_HW_ID;
|
||||
else if (USES_GUC_SUBMISSION(i915))
|
||||
/*
|
||||
|
@ -278,6 +281,7 @@ static void free_engines_rcu(struct rcu_head *rcu)
|
|||
|
||||
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
|
||||
{
|
||||
const struct intel_gt *gt = &ctx->i915->gt;
|
||||
struct intel_engine_cs *engine;
|
||||
struct i915_gem_engines *e;
|
||||
enum intel_engine_id id;
|
||||
|
@ -287,7 +291,7 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
|
|||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
init_rcu_head(&e->rcu);
|
||||
for_each_engine(engine, ctx->i915, id) {
|
||||
for_each_engine(engine, gt, id) {
|
||||
struct intel_context *ce;
|
||||
|
||||
ce = intel_context_create(ctx, engine);
|
||||
|
@ -297,8 +301,8 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
|
|||
}
|
||||
|
||||
e->engines[id] = ce;
|
||||
e->num_engines = id + 1;
|
||||
}
|
||||
e->num_engines = id;
|
||||
|
||||
return e;
|
||||
}
|
||||
|
@ -397,30 +401,6 @@ static void context_close(struct i915_gem_context *ctx)
|
|||
i915_gem_context_put(ctx);
|
||||
}
|
||||
|
||||
static u32 default_desc_template(const struct drm_i915_private *i915,
|
||||
const struct i915_address_space *vm)
|
||||
{
|
||||
u32 address_mode;
|
||||
u32 desc;
|
||||
|
||||
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
|
||||
|
||||
address_mode = INTEL_LEGACY_32B_CONTEXT;
|
||||
if (vm && i915_vm_is_4lvl(vm))
|
||||
address_mode = INTEL_LEGACY_64B_CONTEXT;
|
||||
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||
|
||||
if (IS_GEN(i915, 8))
|
||||
desc |= GEN8_CTX_L3LLC_COHERENT;
|
||||
|
||||
/* TODO: WaDisableLiteRestore when we start using semaphore
|
||||
* signalling between Command Streamers
|
||||
* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
|
||||
*/
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
||||
static struct i915_gem_context *
|
||||
__create_context(struct drm_i915_private *i915)
|
||||
{
|
||||
|
@ -458,9 +438,6 @@ __create_context(struct drm_i915_private *i915)
|
|||
i915_gem_context_set_bannable(ctx);
|
||||
i915_gem_context_set_recoverable(ctx);
|
||||
|
||||
ctx->ring_size = 4 * PAGE_SIZE;
|
||||
ctx->desc_template = default_desc_template(i915, NULL);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
|
||||
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
|
||||
|
||||
|
@ -471,21 +448,34 @@ __create_context(struct drm_i915_private *i915)
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static void
|
||||
context_apply_all(struct i915_gem_context *ctx,
|
||||
void (*fn)(struct intel_context *ce, void *data),
|
||||
void *data)
|
||||
{
|
||||
struct i915_gem_engines_iter it;
|
||||
struct intel_context *ce;
|
||||
|
||||
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
|
||||
fn(ce, data);
|
||||
i915_gem_context_unlock_engines(ctx);
|
||||
}
|
||||
|
||||
static void __apply_ppgtt(struct intel_context *ce, void *vm)
|
||||
{
|
||||
i915_vm_put(ce->vm);
|
||||
ce->vm = i915_vm_get(vm);
|
||||
}
|
||||
|
||||
static struct i915_address_space *
|
||||
__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
|
||||
{
|
||||
struct i915_address_space *old = ctx->vm;
|
||||
struct i915_gem_engines_iter it;
|
||||
struct intel_context *ce;
|
||||
|
||||
GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
|
||||
|
||||
ctx->vm = i915_vm_get(vm);
|
||||
ctx->desc_template = default_desc_template(ctx->i915, vm);
|
||||
|
||||
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
|
||||
i915_vm_put(ce->vm);
|
||||
ce->vm = i915_vm_get(vm);
|
||||
}
|
||||
i915_gem_context_unlock_engines(ctx);
|
||||
context_apply_all(ctx, __apply_ppgtt, vm);
|
||||
|
||||
return old;
|
||||
}
|
||||
|
@ -501,6 +491,29 @@ static void __assign_ppgtt(struct i915_gem_context *ctx,
|
|||
i915_vm_put(vm);
|
||||
}
|
||||
|
||||
static void __set_timeline(struct intel_timeline **dst,
|
||||
struct intel_timeline *src)
|
||||
{
|
||||
struct intel_timeline *old = *dst;
|
||||
|
||||
*dst = src ? intel_timeline_get(src) : NULL;
|
||||
|
||||
if (old)
|
||||
intel_timeline_put(old);
|
||||
}
|
||||
|
||||
static void __apply_timeline(struct intel_context *ce, void *timeline)
|
||||
{
|
||||
__set_timeline(&ce->timeline, timeline);
|
||||
}
|
||||
|
||||
static void __assign_timeline(struct i915_gem_context *ctx,
|
||||
struct intel_timeline *timeline)
|
||||
{
|
||||
__set_timeline(&ctx->timeline, timeline);
|
||||
context_apply_all(ctx, __apply_timeline, timeline);
|
||||
}
|
||||
|
||||
static struct i915_gem_context *
|
||||
i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
|
||||
{
|
||||
|
@ -543,7 +556,8 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
|
|||
return ERR_CAST(timeline);
|
||||
}
|
||||
|
||||
ctx->timeline = timeline;
|
||||
__assign_timeline(ctx, timeline);
|
||||
intel_timeline_put(timeline);
|
||||
}
|
||||
|
||||
trace_i915_context_create(ctx);
|
||||
|
@ -551,53 +565,6 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
|
|||
return ctx;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_context_create_gvt - create a GVT GEM context
|
||||
* @dev: drm device *
|
||||
*
|
||||
* This function is used to create a GVT specific GEM context.
|
||||
*
|
||||
* Returns:
|
||||
* pointer to i915_gem_context on success, error pointer if failed
|
||||
*
|
||||
*/
|
||||
struct i915_gem_context *
|
||||
i915_gem_context_create_gvt(struct drm_device *dev)
|
||||
{
|
||||
struct i915_gem_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ctx = i915_gem_create_context(to_i915(dev), 0);
|
||||
if (IS_ERR(ctx))
|
||||
goto out;
|
||||
|
||||
ret = i915_gem_context_pin_hw_id(ctx);
|
||||
if (ret) {
|
||||
context_close(ctx);
|
||||
ctx = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ctx->file_priv = ERR_PTR(-EBADF);
|
||||
i915_gem_context_set_closed(ctx); /* not user accessible */
|
||||
i915_gem_context_clear_bannable(ctx);
|
||||
i915_gem_context_set_force_single_submission(ctx);
|
||||
if (!USES_GUC_SUBMISSION(to_i915(dev)))
|
||||
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
|
||||
|
||||
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
|
||||
out:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ctx;
|
||||
}
|
||||
|
||||
static void
|
||||
destroy_kernel_context(struct i915_gem_context **ctxp)
|
||||
{
|
||||
|
@ -629,7 +596,6 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
|
|||
|
||||
i915_gem_context_clear_bannable(ctx);
|
||||
ctx->sched.priority = I915_USER_PRIORITY(prio);
|
||||
ctx->ring_size = PAGE_SIZE;
|
||||
|
||||
GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
|
||||
|
||||
|
@ -944,7 +910,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
|
|||
if (emit)
|
||||
err = emit(rq, data);
|
||||
if (err == 0)
|
||||
err = i915_active_ref(&cb->base, rq->fence.context, rq);
|
||||
err = i915_active_ref(&cb->base, rq->timeline, rq);
|
||||
|
||||
i915_request_add(rq);
|
||||
if (err)
|
||||
|
@ -1194,7 +1160,7 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
|
|||
{
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(INTEL_GEN(ce->gem_context->i915) < 8);
|
||||
GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
|
||||
|
||||
ret = intel_context_lock_pinned(ce);
|
||||
if (ret)
|
||||
|
@ -1216,7 +1182,7 @@ __intel_context_reconfigure_sseu(struct intel_context *ce,
|
|||
static int
|
||||
intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
|
||||
{
|
||||
struct drm_i915_private *i915 = ce->gem_context->i915;
|
||||
struct drm_i915_private *i915 = ce->engine->i915;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
|
||||
|
@ -1613,6 +1579,7 @@ set_engines(struct i915_gem_context *ctx,
|
|||
for (n = 0; n < num_engines; n++) {
|
||||
struct i915_engine_class_instance ci;
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_context *ce;
|
||||
|
||||
if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
|
||||
__free_engines(set.engines, n);
|
||||
|
@ -1635,11 +1602,13 @@ set_engines(struct i915_gem_context *ctx,
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
set.engines->engines[n] = intel_context_create(ctx, engine);
|
||||
if (!set.engines->engines[n]) {
|
||||
ce = intel_context_create(ctx, engine);
|
||||
if (IS_ERR(ce)) {
|
||||
__free_engines(set.engines, n);
|
||||
return -ENOMEM;
|
||||
return PTR_ERR(ce);
|
||||
}
|
||||
|
||||
set.engines->engines[n] = ce;
|
||||
}
|
||||
set.engines->num_engines = num_engines;
|
||||
|
||||
|
@ -1753,7 +1722,7 @@ get_engines(struct i915_gem_context *ctx,
|
|||
|
||||
if (e->engines[n]) {
|
||||
ci.engine_class = e->engines[n]->engine->uabi_class;
|
||||
ci.engine_instance = e->engines[n]->engine->instance;
|
||||
ci.engine_instance = e->engines[n]->engine->uabi_instance;
|
||||
}
|
||||
|
||||
if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
|
||||
|
@ -1988,13 +1957,8 @@ static int clone_sseu(struct i915_gem_context *dst,
|
|||
static int clone_timeline(struct i915_gem_context *dst,
|
||||
struct i915_gem_context *src)
|
||||
{
|
||||
if (src->timeline) {
|
||||
GEM_BUG_ON(src->timeline == dst->timeline);
|
||||
|
||||
if (dst->timeline)
|
||||
intel_timeline_put(dst->timeline);
|
||||
dst->timeline = intel_timeline_get(src->timeline);
|
||||
}
|
||||
if (src->timeline)
|
||||
__assign_timeline(dst, src->timeline);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -141,8 +141,6 @@ int i915_gem_context_open(struct drm_i915_private *i915,
|
|||
void i915_gem_context_close(struct drm_file *file);
|
||||
|
||||
void i915_gem_context_release(struct kref *ctx_ref);
|
||||
struct i915_gem_context *
|
||||
i915_gem_context_create_gvt(struct drm_device *dev);
|
||||
|
||||
int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file);
|
||||
|
|
|
@ -169,11 +169,6 @@ struct i915_gem_context {
|
|||
|
||||
struct i915_sched_attr sched;
|
||||
|
||||
/** ring_size: size for allocating the per-engine ring buffer */
|
||||
u32 ring_size;
|
||||
/** desc_template: invariant fields for the HW context descriptor */
|
||||
u32 desc_template;
|
||||
|
||||
/** guilty_count: How many times this context has caused a GPU hang. */
|
||||
atomic_t guilty_count;
|
||||
/**
|
||||
|
|
|
@ -221,6 +221,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
|||
* state and so involves less work.
|
||||
*/
|
||||
if (atomic_read(&obj->bind_count)) {
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
|
||||
/* Before we change the PTE, the GPU must not be accessing it.
|
||||
* If we wait upon the object, we know that all the bound
|
||||
* VMA are no longer active.
|
||||
|
@ -232,18 +234,30 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!HAS_LLC(to_i915(obj->base.dev)) &&
|
||||
cache_level != I915_CACHE_NONE) {
|
||||
/* Access to snoopable pages through the GTT is
|
||||
if (!HAS_LLC(i915) && cache_level != I915_CACHE_NONE) {
|
||||
intel_wakeref_t wakeref =
|
||||
intel_runtime_pm_get(&i915->runtime_pm);
|
||||
|
||||
/*
|
||||
* Access to snoopable pages through the GTT is
|
||||
* incoherent and on some machines causes a hard
|
||||
* lockup. Relinquish the CPU mmaping to force
|
||||
* userspace to refault in the pages and we can
|
||||
* then double check if the GTT mapping is still
|
||||
* valid for that pointer access.
|
||||
*/
|
||||
i915_gem_object_release_mmap(obj);
|
||||
ret = mutex_lock_interruptible(&i915->ggtt.vm.mutex);
|
||||
if (ret) {
|
||||
intel_runtime_pm_put(&i915->runtime_pm,
|
||||
wakeref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* As we no longer need a fence for GTT access,
|
||||
if (obj->userfault_count)
|
||||
__i915_gem_object_release_mmap(obj);
|
||||
|
||||
/*
|
||||
* As we no longer need a fence for GTT access,
|
||||
* we can relinquish it now (and so prevent having
|
||||
* to steal a fence from someone else on the next
|
||||
* fence request). Note GPU activity would have
|
||||
|
@ -251,12 +265,17 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
|
|||
* supposed to be linear.
|
||||
*/
|
||||
for_each_ggtt_vma(vma, obj) {
|
||||
ret = i915_vma_put_fence(vma);
|
||||
ret = i915_vma_revoke_fence(vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&i915->ggtt.vm.mutex);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
/* We either have incoherent backing store and
|
||||
/*
|
||||
* We either have incoherent backing store and
|
||||
* so no GTT access or the architecture is fully
|
||||
* coherent. In such cases, existing GTT mmaps
|
||||
* ignore the cache bit in the PTE and we can
|
||||
|
@ -551,13 +570,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline enum fb_op_origin
|
||||
fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
|
||||
{
|
||||
return (domain == I915_GEM_DOMAIN_GTT ?
|
||||
obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when user space prepares to use an object with the CPU, either
|
||||
* through the mmap ioctl's mapping or a GTT mapping.
|
||||
|
@ -661,9 +673,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
i915_gem_object_unlock(obj);
|
||||
|
||||
if (write_domain != 0)
|
||||
intel_fb_obj_invalidate(obj,
|
||||
fb_write_origin(obj, write_domain));
|
||||
if (write_domain)
|
||||
intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
|
||||
|
||||
out_unpin:
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
|
@ -783,7 +794,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
|
||||
out:
|
||||
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
|
||||
intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
|
||||
obj->mm.dirty = true;
|
||||
/* return with the pages pinned */
|
||||
return 0;
|
||||
|
|
|
@ -16,14 +16,15 @@
|
|||
|
||||
#include "gem/i915_gem_ioctls.h"
|
||||
#include "gt/intel_context.h"
|
||||
#include "gt/intel_engine_pool.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/intel_gt_pm.h"
|
||||
|
||||
#include "i915_gem_ioctls.h"
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_clflush.h"
|
||||
#include "i915_gem_context.h"
|
||||
#include "i915_gem_ioctls.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
enum {
|
||||
FORCE_CPU_RELOC = 1,
|
||||
|
@ -734,63 +735,6 @@ static int eb_select_context(struct i915_execbuffer *eb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
|
||||
{
|
||||
struct i915_request *rq;
|
||||
|
||||
/*
|
||||
* Completely unscientific finger-in-the-air estimates for suitable
|
||||
* maximum user request size (to avoid blocking) and then backoff.
|
||||
*/
|
||||
if (intel_ring_update_space(ring) >= PAGE_SIZE)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Find a request that after waiting upon, there will be at least half
|
||||
* the ring available. The hysteresis allows us to compete for the
|
||||
* shared ring and should mean that we sleep less often prior to
|
||||
* claiming our resources, but not so long that the ring completely
|
||||
* drains before we can submit our next request.
|
||||
*/
|
||||
list_for_each_entry(rq, &ring->request_list, ring_link) {
|
||||
if (__intel_ring_space(rq->postfix,
|
||||
ring->emit, ring->size) > ring->size / 2)
|
||||
break;
|
||||
}
|
||||
if (&rq->ring_link == &ring->request_list)
|
||||
return NULL; /* weird, we will check again later for real */
|
||||
|
||||
return i915_request_get(rq);
|
||||
}
|
||||
|
||||
static int eb_wait_for_ring(const struct i915_execbuffer *eb)
|
||||
{
|
||||
struct i915_request *rq;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Apply a light amount of backpressure to prevent excessive hogs
|
||||
* from blocking waiting for space whilst holding struct_mutex and
|
||||
* keeping all of their resources pinned.
|
||||
*/
|
||||
|
||||
rq = __eb_wait_for_ring(eb->context->ring);
|
||||
if (rq) {
|
||||
mutex_unlock(&eb->i915->drm.struct_mutex);
|
||||
|
||||
if (i915_request_wait(rq,
|
||||
I915_WAIT_INTERRUPTIBLE,
|
||||
MAX_SCHEDULE_TIMEOUT) < 0)
|
||||
ret = -EINTR;
|
||||
|
||||
i915_request_put(rq);
|
||||
|
||||
mutex_lock(&eb->i915->drm.struct_mutex);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int eb_lookup_vmas(struct i915_execbuffer *eb)
|
||||
{
|
||||
struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
|
||||
|
@ -1014,11 +958,12 @@ static void reloc_cache_reset(struct reloc_cache *cache)
|
|||
kunmap_atomic(vaddr);
|
||||
i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
|
||||
} else {
|
||||
wmb();
|
||||
io_mapping_unmap_atomic((void __iomem *)vaddr);
|
||||
if (cache->node.allocated) {
|
||||
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
|
||||
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
|
||||
|
||||
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
|
||||
io_mapping_unmap_atomic((void __iomem *)vaddr);
|
||||
|
||||
if (cache->node.allocated) {
|
||||
ggtt->vm.clear_range(&ggtt->vm,
|
||||
cache->node.start,
|
||||
cache->node.size);
|
||||
|
@ -1073,11 +1018,15 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
|
|||
void *vaddr;
|
||||
|
||||
if (cache->vaddr) {
|
||||
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
|
||||
io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
|
||||
} else {
|
||||
struct i915_vma *vma;
|
||||
int err;
|
||||
|
||||
if (i915_gem_object_is_tiled(obj))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (use_cpu_reloc(cache, obj))
|
||||
return NULL;
|
||||
|
||||
|
@ -1089,8 +1038,8 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
|
|||
|
||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
|
||||
PIN_MAPPABLE |
|
||||
PIN_NONBLOCK |
|
||||
PIN_NONFAULT);
|
||||
PIN_NONBLOCK /* NOWARN */ |
|
||||
PIN_NOEVICT);
|
||||
if (IS_ERR(vma)) {
|
||||
memset(&cache->node, 0, sizeof(cache->node));
|
||||
err = drm_mm_insert_node_in_range
|
||||
|
@ -1101,12 +1050,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
|
|||
if (err) /* no inactive aperture space, use cpu reloc */
|
||||
return NULL;
|
||||
} else {
|
||||
err = i915_vma_put_fence(vma);
|
||||
if (err) {
|
||||
i915_vma_unpin(vma);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
cache->node.start = vma->node.start;
|
||||
cache->node.mm = (void *)vma;
|
||||
}
|
||||
|
@ -1114,7 +1057,6 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
|
|||
|
||||
offset = cache->node.start;
|
||||
if (cache->node.allocated) {
|
||||
wmb();
|
||||
ggtt->vm.insert_page(&ggtt->vm,
|
||||
i915_gem_object_get_dma_address(obj, page),
|
||||
offset, I915_CACHE_NONE, 0);
|
||||
|
@ -1197,25 +1139,26 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
|
|||
unsigned int len)
|
||||
{
|
||||
struct reloc_cache *cache = &eb->reloc_cache;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct intel_engine_pool_node *pool;
|
||||
struct i915_request *rq;
|
||||
struct i915_vma *batch;
|
||||
u32 *cmd;
|
||||
int err;
|
||||
|
||||
obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
|
||||
if (IS_ERR(obj))
|
||||
return PTR_ERR(obj);
|
||||
pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
|
||||
if (IS_ERR(pool))
|
||||
return PTR_ERR(pool);
|
||||
|
||||
cmd = i915_gem_object_pin_map(obj,
|
||||
cmd = i915_gem_object_pin_map(pool->obj,
|
||||
cache->has_llc ?
|
||||
I915_MAP_FORCE_WB :
|
||||
I915_MAP_FORCE_WC);
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
if (IS_ERR(cmd))
|
||||
return PTR_ERR(cmd);
|
||||
if (IS_ERR(cmd)) {
|
||||
err = PTR_ERR(cmd);
|
||||
goto out_pool;
|
||||
}
|
||||
|
||||
batch = i915_vma_instance(obj, vma->vm, NULL);
|
||||
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto err_unmap;
|
||||
|
@ -1231,6 +1174,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
|
|||
goto err_unpin;
|
||||
}
|
||||
|
||||
err = intel_engine_pool_mark_active(pool, rq);
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
err = reloc_move_to_gpu(rq, vma);
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
@ -1242,8 +1189,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
|
|||
goto skip_request;
|
||||
|
||||
i915_vma_lock(batch);
|
||||
GEM_BUG_ON(!dma_resv_test_signaled_rcu(batch->resv, true));
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
err = i915_request_await_object(rq, batch->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
i915_vma_unlock(batch);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
@ -1256,7 +1204,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
|
|||
cache->rq_size = 0;
|
||||
|
||||
/* Return with batch mapping (cmd) still pinned */
|
||||
return 0;
|
||||
goto out_pool;
|
||||
|
||||
skip_request:
|
||||
i915_request_skip(rq, err);
|
||||
|
@ -1265,7 +1213,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
|
|||
err_unpin:
|
||||
i915_vma_unpin(batch);
|
||||
err_unmap:
|
||||
i915_gem_object_unpin_map(obj);
|
||||
i915_gem_object_unpin_map(pool->obj);
|
||||
out_pool:
|
||||
intel_engine_pool_put(pool);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -2009,18 +1959,17 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
|
|||
|
||||
static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
|
||||
{
|
||||
struct drm_i915_gem_object *shadow_batch_obj;
|
||||
struct intel_engine_pool_node *pool;
|
||||
struct i915_vma *vma;
|
||||
int err;
|
||||
|
||||
shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
|
||||
PAGE_ALIGN(eb->batch_len));
|
||||
if (IS_ERR(shadow_batch_obj))
|
||||
return ERR_CAST(shadow_batch_obj);
|
||||
pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
|
||||
if (IS_ERR(pool))
|
||||
return ERR_CAST(pool);
|
||||
|
||||
err = intel_engine_cmd_parser(eb->engine,
|
||||
eb->batch->obj,
|
||||
shadow_batch_obj,
|
||||
pool->obj,
|
||||
eb->batch_start_offset,
|
||||
eb->batch_len,
|
||||
is_master);
|
||||
|
@ -2029,12 +1978,12 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
|
|||
vma = NULL;
|
||||
else
|
||||
vma = ERR_PTR(err);
|
||||
goto out;
|
||||
goto err;
|
||||
}
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
|
||||
vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
|
||||
if (IS_ERR(vma))
|
||||
goto out;
|
||||
goto err;
|
||||
|
||||
eb->vma[eb->buffer_count] = i915_vma_get(vma);
|
||||
eb->flags[eb->buffer_count] =
|
||||
|
@ -2042,16 +1991,24 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
|
|||
vma->exec_flags = &eb->flags[eb->buffer_count];
|
||||
eb->buffer_count++;
|
||||
|
||||
out:
|
||||
i915_gem_object_unpin_pages(shadow_batch_obj);
|
||||
vma->private = pool;
|
||||
return vma;
|
||||
|
||||
err:
|
||||
intel_engine_pool_put(pool);
|
||||
return vma;
|
||||
}
|
||||
|
||||
static void
|
||||
add_to_client(struct i915_request *rq, struct drm_file *file)
|
||||
{
|
||||
rq->file_priv = file->driver_priv;
|
||||
list_add_tail(&rq->client_link, &rq->file_priv->mm.request_list);
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
|
||||
rq->file_priv = file_priv;
|
||||
|
||||
spin_lock(&file_priv->mm.lock);
|
||||
list_add_tail(&rq->client_link, &file_priv->mm.request_list);
|
||||
spin_unlock(&file_priv->mm.lock);
|
||||
}
|
||||
|
||||
static int eb_submit(struct i915_execbuffer *eb)
|
||||
|
@ -2091,6 +2048,12 @@ static int eb_submit(struct i915_execbuffer *eb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int num_vcs_engines(const struct drm_i915_private *i915)
|
||||
{
|
||||
return hweight64(INTEL_INFO(i915)->engine_mask &
|
||||
GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
|
||||
}
|
||||
|
||||
/*
|
||||
* Find one BSD ring to dispatch the corresponding BSD command.
|
||||
* The engine index is returned.
|
||||
|
@ -2103,8 +2066,8 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
|
|||
|
||||
/* Check whether the file_priv has already selected one ring. */
|
||||
if ((int)file_priv->bsd_engine < 0)
|
||||
file_priv->bsd_engine = atomic_fetch_xor(1,
|
||||
&dev_priv->mm.bsd_engine_dispatch_index);
|
||||
file_priv->bsd_engine =
|
||||
get_random_int() % num_vcs_engines(dev_priv);
|
||||
|
||||
return file_priv->bsd_engine;
|
||||
}
|
||||
|
@ -2117,8 +2080,73 @@ static const enum intel_engine_id user_ring_map[] = {
|
|||
[I915_EXEC_VEBOX] = VECS0
|
||||
};
|
||||
|
||||
static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
|
||||
static struct i915_request *eb_throttle(struct intel_context *ce)
|
||||
{
|
||||
struct intel_ring *ring = ce->ring;
|
||||
struct intel_timeline *tl = ce->timeline;
|
||||
struct i915_request *rq;
|
||||
|
||||
/*
|
||||
* Completely unscientific finger-in-the-air estimates for suitable
|
||||
* maximum user request size (to avoid blocking) and then backoff.
|
||||
*/
|
||||
if (intel_ring_update_space(ring) >= PAGE_SIZE)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* Find a request that after waiting upon, there will be at least half
|
||||
* the ring available. The hysteresis allows us to compete for the
|
||||
* shared ring and should mean that we sleep less often prior to
|
||||
* claiming our resources, but not so long that the ring completely
|
||||
* drains before we can submit our next request.
|
||||
*/
|
||||
list_for_each_entry(rq, &tl->requests, link) {
|
||||
if (rq->ring != ring)
|
||||
continue;
|
||||
|
||||
if (__intel_ring_space(rq->postfix,
|
||||
ring->emit, ring->size) > ring->size / 2)
|
||||
break;
|
||||
}
|
||||
if (&rq->link == &tl->requests)
|
||||
return NULL; /* weird, we will check again later for real */
|
||||
|
||||
return i915_request_get(rq);
|
||||
}
|
||||
|
||||
static int
|
||||
__eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (likely(atomic_inc_not_zero(&ce->pin_count)))
|
||||
return 0;
|
||||
|
||||
err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = __intel_context_do_pin(ce);
|
||||
mutex_unlock(&eb->i915->drm.struct_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void
|
||||
__eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce)
|
||||
{
|
||||
if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
|
||||
return;
|
||||
|
||||
mutex_lock(&eb->i915->drm.struct_mutex);
|
||||
intel_context_unpin(ce);
|
||||
mutex_unlock(&eb->i915->drm.struct_mutex);
|
||||
}
|
||||
|
||||
static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
|
||||
{
|
||||
struct intel_timeline *tl;
|
||||
struct i915_request *rq;
|
||||
int err;
|
||||
|
||||
/*
|
||||
|
@ -2134,18 +2162,64 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
|
|||
* GGTT space, so do this first before we reserve a seqno for
|
||||
* ourselves.
|
||||
*/
|
||||
err = intel_context_pin(ce);
|
||||
err = __eb_pin_context(eb, ce);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* Take a local wakeref for preparing to dispatch the execbuf as
|
||||
* we expect to access the hardware fairly frequently in the
|
||||
* process, and require the engine to be kept awake between accesses.
|
||||
* Upon dispatch, we acquire another prolonged wakeref that we hold
|
||||
* until the timeline is idle, which in turn releases the wakeref
|
||||
* taken on the engine, and the parent device.
|
||||
*/
|
||||
tl = intel_context_timeline_lock(ce);
|
||||
if (IS_ERR(tl)) {
|
||||
err = PTR_ERR(tl);
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
intel_context_enter(ce);
|
||||
rq = eb_throttle(ce);
|
||||
|
||||
intel_context_timeline_unlock(tl);
|
||||
|
||||
if (rq) {
|
||||
if (i915_request_wait(rq,
|
||||
I915_WAIT_INTERRUPTIBLE,
|
||||
MAX_SCHEDULE_TIMEOUT) < 0) {
|
||||
i915_request_put(rq);
|
||||
err = -EINTR;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
i915_request_put(rq);
|
||||
}
|
||||
|
||||
eb->engine = ce->engine;
|
||||
eb->context = ce;
|
||||
return 0;
|
||||
|
||||
err_exit:
|
||||
mutex_lock(&tl->mutex);
|
||||
intel_context_exit(ce);
|
||||
intel_context_timeline_unlock(tl);
|
||||
err_unpin:
|
||||
__eb_unpin_context(eb, ce);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void eb_unpin_context(struct i915_execbuffer *eb)
|
||||
static void eb_unpin_engine(struct i915_execbuffer *eb)
|
||||
{
|
||||
intel_context_unpin(eb->context);
|
||||
struct intel_context *ce = eb->context;
|
||||
struct intel_timeline *tl = ce->timeline;
|
||||
|
||||
mutex_lock(&tl->mutex);
|
||||
intel_context_exit(ce);
|
||||
mutex_unlock(&tl->mutex);
|
||||
|
||||
__eb_unpin_context(eb, ce);
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
|
@ -2163,7 +2237,7 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(i915, VCS1)) {
|
||||
if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
|
||||
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
|
||||
|
||||
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
|
||||
|
@ -2190,9 +2264,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
|
|||
}
|
||||
|
||||
static int
|
||||
eb_select_engine(struct i915_execbuffer *eb,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_execbuffer2 *args)
|
||||
eb_pin_engine(struct i915_execbuffer *eb,
|
||||
struct drm_file *file,
|
||||
struct drm_i915_gem_execbuffer2 *args)
|
||||
{
|
||||
struct intel_context *ce;
|
||||
unsigned int idx;
|
||||
|
@ -2207,7 +2281,7 @@ eb_select_engine(struct i915_execbuffer *eb,
|
|||
if (IS_ERR(ce))
|
||||
return PTR_ERR(ce);
|
||||
|
||||
err = eb_pin_context(eb, ce);
|
||||
err = __eb_pin_engine(eb, ce);
|
||||
intel_context_put(ce);
|
||||
|
||||
return err;
|
||||
|
@ -2425,25 +2499,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
|||
if (unlikely(err))
|
||||
goto err_destroy;
|
||||
|
||||
/*
|
||||
* Take a local wakeref for preparing to dispatch the execbuf as
|
||||
* we expect to access the hardware fairly frequently in the
|
||||
* process. Upon first dispatch, we acquire another prolonged
|
||||
* wakeref that we hold until the GPU has been idle for at least
|
||||
* 100ms.
|
||||
*/
|
||||
intel_gt_pm_get(&eb.i915->gt);
|
||||
err = eb_pin_engine(&eb, file, args);
|
||||
if (unlikely(err))
|
||||
goto err_context;
|
||||
|
||||
err = i915_mutex_lock_interruptible(dev);
|
||||
if (err)
|
||||
goto err_rpm;
|
||||
|
||||
err = eb_select_engine(&eb, file, args);
|
||||
if (unlikely(err))
|
||||
goto err_unlock;
|
||||
|
||||
err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
|
||||
if (unlikely(err))
|
||||
goto err_engine;
|
||||
|
||||
err = eb_relocate(&eb);
|
||||
|
@ -2570,6 +2631,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
|||
* to explicitly hold another reference here.
|
||||
*/
|
||||
eb.request->batch = eb.batch;
|
||||
if (eb.batch->private)
|
||||
intel_engine_pool_mark_active(eb.batch->private, eb.request);
|
||||
|
||||
trace_i915_request_queue(eb.request, eb.batch_flags);
|
||||
err = eb_submit(&eb);
|
||||
|
@ -2594,15 +2657,15 @@ i915_gem_do_execbuffer(struct drm_device *dev,
|
|||
err_batch_unpin:
|
||||
if (eb.batch_flags & I915_DISPATCH_SECURE)
|
||||
i915_vma_unpin(eb.batch);
|
||||
if (eb.batch->private)
|
||||
intel_engine_pool_put(eb.batch->private);
|
||||
err_vma:
|
||||
if (eb.exec)
|
||||
eb_release_vmas(&eb);
|
||||
err_engine:
|
||||
eb_unpin_context(&eb);
|
||||
err_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
err_rpm:
|
||||
intel_gt_pm_put(&eb.i915->gt);
|
||||
err_engine:
|
||||
eb_unpin_engine(&eb);
|
||||
err_context:
|
||||
i915_gem_context_put(eb.gem_context);
|
||||
err_destroy:
|
||||
eb_destroy(&eb);
|
||||
|
|
|
@ -69,8 +69,7 @@ i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
|
|||
|
||||
i915_sw_fence_init(&stub->chain, stub_notify);
|
||||
dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
|
||||
to_i915(obj->base.dev)->mm.unordered_timeline,
|
||||
0);
|
||||
0, 0);
|
||||
|
||||
if (i915_sw_fence_await_reservation(&stub->chain,
|
||||
obj->base.resv, NULL,
|
||||
|
|
|
@ -13,8 +13,8 @@
|
|||
#include "i915_gem_gtt.h"
|
||||
#include "i915_gem_ioctls.h"
|
||||
#include "i915_gem_object.h"
|
||||
#include "i915_trace.h"
|
||||
#include "i915_vma.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
static inline bool
|
||||
__vma_matches(struct vm_area_struct *vma, struct file *filp,
|
||||
|
@ -101,9 +101,6 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
|
|||
up_write(&mm->mmap_sem);
|
||||
if (IS_ERR_VALUE(addr))
|
||||
goto err;
|
||||
|
||||
/* This may race, but that's ok, it only gets set */
|
||||
WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
|
||||
}
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
|
@ -267,15 +264,15 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
|
|||
/* Now pin it into the GTT as needed */
|
||||
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
|
||||
PIN_MAPPABLE |
|
||||
PIN_NONBLOCK |
|
||||
PIN_NONFAULT);
|
||||
PIN_NONBLOCK /* NOWARN */ |
|
||||
PIN_NOSEARCH);
|
||||
if (IS_ERR(vma)) {
|
||||
/* Use a partial view if it is bigger than available space */
|
||||
struct i915_ggtt_view view =
|
||||
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
|
||||
unsigned int flags;
|
||||
|
||||
flags = PIN_MAPPABLE;
|
||||
flags = PIN_MAPPABLE | PIN_NOSEARCH;
|
||||
if (view.type == I915_GGTT_VIEW_NORMAL)
|
||||
flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
|
||||
|
||||
|
@ -283,10 +280,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
|
|||
* Userspace is now writing through an untracked VMA, abandon
|
||||
* all hope that the hardware is able to track future writes.
|
||||
*/
|
||||
obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
|
||||
if (IS_ERR(vma) && !view.type) {
|
||||
if (IS_ERR(vma)) {
|
||||
flags = PIN_MAPPABLE;
|
||||
view.type = I915_GGTT_VIEW_PARTIAL;
|
||||
vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
|
||||
|
@ -310,14 +306,17 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
|
|||
if (ret)
|
||||
goto err_fence;
|
||||
|
||||
/* Mark as being mmapped into userspace for later revocation */
|
||||
assert_rpm_wakelock_held(rpm);
|
||||
|
||||
/* Mark as being mmapped into userspace for later revocation */
|
||||
mutex_lock(&i915->ggtt.vm.mutex);
|
||||
if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
|
||||
list_add(&obj->userfault_link, &i915->ggtt.userfault_list);
|
||||
mutex_unlock(&i915->ggtt.vm.mutex);
|
||||
|
||||
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
|
||||
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
|
||||
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
|
||||
GEM_BUG_ON(!obj->userfault_count);
|
||||
|
||||
i915_vma_set_ggtt_write(vma);
|
||||
|
||||
|
@ -412,8 +411,8 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
|
|||
* requirement that operations to the GGTT be made holding the RPM
|
||||
* wakeref.
|
||||
*/
|
||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
|
||||
mutex_lock(&i915->ggtt.vm.mutex);
|
||||
|
||||
if (!obj->userfault_count)
|
||||
goto out;
|
||||
|
@ -430,6 +429,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
|
|||
wmb();
|
||||
|
||||
out:
|
||||
mutex_unlock(&i915->ggtt.vm.mutex);
|
||||
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "i915_gem_context.h"
|
||||
#include "i915_gem_object.h"
|
||||
#include "i915_globals.h"
|
||||
#include "i915_trace.h"
|
||||
|
||||
static struct i915_global_object {
|
||||
struct i915_global base;
|
||||
|
@ -45,16 +46,6 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj)
|
|||
return kmem_cache_free(global.slab_objects, obj);
|
||||
}
|
||||
|
||||
static void
|
||||
frontbuffer_retire(struct i915_active_request *active,
|
||||
struct i915_request *request)
|
||||
{
|
||||
struct drm_i915_gem_object *obj =
|
||||
container_of(active, typeof(*obj), frontbuffer_write);
|
||||
|
||||
intel_fb_obj_flush(obj, ORIGIN_CS);
|
||||
}
|
||||
|
||||
void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||
const struct drm_i915_gem_object_ops *ops)
|
||||
{
|
||||
|
@ -63,17 +54,14 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
|||
spin_lock_init(&obj->vma.lock);
|
||||
INIT_LIST_HEAD(&obj->vma.list);
|
||||
|
||||
INIT_LIST_HEAD(&obj->mm.link);
|
||||
|
||||
INIT_LIST_HEAD(&obj->lut_list);
|
||||
INIT_LIST_HEAD(&obj->batch_pool_link);
|
||||
|
||||
init_rcu_head(&obj->rcu);
|
||||
|
||||
obj->ops = ops;
|
||||
|
||||
obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
|
||||
i915_active_request_init(&obj->frontbuffer_write,
|
||||
NULL, frontbuffer_retire);
|
||||
|
||||
obj->mm.madv = I915_MADV_WILLNEED;
|
||||
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
|
||||
mutex_init(&obj->mm.get_page.lock);
|
||||
|
@ -185,7 +173,6 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
|||
|
||||
GEM_BUG_ON(atomic_read(&obj->bind_count));
|
||||
GEM_BUG_ON(obj->userfault_count);
|
||||
GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
|
||||
GEM_BUG_ON(!list_empty(&obj->lut_list));
|
||||
|
||||
atomic_set(&obj->mm.pages_pin_count, 0);
|
||||
|
@ -209,48 +196,18 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
|
|||
|
||||
void i915_gem_flush_free_objects(struct drm_i915_private *i915)
|
||||
{
|
||||
struct llist_node *freed;
|
||||
struct llist_node *freed = llist_del_all(&i915->mm.free_list);
|
||||
|
||||
/* Free the oldest, most stale object to keep the free_list short */
|
||||
freed = NULL;
|
||||
if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
|
||||
/* Only one consumer of llist_del_first() allowed */
|
||||
spin_lock(&i915->mm.free_lock);
|
||||
freed = llist_del_first(&i915->mm.free_list);
|
||||
spin_unlock(&i915->mm.free_lock);
|
||||
}
|
||||
if (unlikely(freed)) {
|
||||
freed->next = NULL;
|
||||
if (unlikely(freed))
|
||||
__i915_gem_free_objects(i915, freed);
|
||||
}
|
||||
}
|
||||
|
||||
static void __i915_gem_free_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
container_of(work, struct drm_i915_private, mm.free_work);
|
||||
struct llist_node *freed;
|
||||
|
||||
/*
|
||||
* All file-owned VMA should have been released by this point through
|
||||
* i915_gem_close_object(), or earlier by i915_gem_context_close().
|
||||
* However, the object may also be bound into the global GTT (e.g.
|
||||
* older GPUs without per-process support, or for direct access through
|
||||
* the GTT either for the user or for scanout). Those VMA still need to
|
||||
* unbound now.
|
||||
*/
|
||||
|
||||
spin_lock(&i915->mm.free_lock);
|
||||
while ((freed = llist_del_all(&i915->mm.free_list))) {
|
||||
spin_unlock(&i915->mm.free_lock);
|
||||
|
||||
__i915_gem_free_objects(i915, freed);
|
||||
if (need_resched())
|
||||
return;
|
||||
|
||||
spin_lock(&i915->mm.free_lock);
|
||||
}
|
||||
spin_unlock(&i915->mm.free_lock);
|
||||
i915_gem_flush_free_objects(i915);
|
||||
}
|
||||
|
||||
void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
||||
|
@ -258,6 +215,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
|||
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
|
||||
GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
|
||||
|
||||
/*
|
||||
* Before we free the object, make sure any pure RCU-only
|
||||
* read-side critical sections are complete, e.g.
|
||||
|
@ -273,14 +232,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
|||
* or else we may oom whilst there are plenty of deferred
|
||||
* freed objects.
|
||||
*/
|
||||
if (i915_gem_object_has_pages(obj) &&
|
||||
i915_gem_object_is_shrinkable(obj)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&i915->mm.obj_lock, flags);
|
||||
list_del_init(&obj->mm.link);
|
||||
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
||||
}
|
||||
i915_gem_object_make_unshrinkable(obj);
|
||||
|
||||
/*
|
||||
* Since we require blocking on struct_mutex to unbind the freed
|
||||
|
@ -296,13 +248,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
|
|||
queue_work(i915->wq, &i915->mm.free_work);
|
||||
}
|
||||
|
||||
static inline enum fb_op_origin
|
||||
fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
|
||||
{
|
||||
return (domain == I915_GEM_DOMAIN_GTT ?
|
||||
obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
|
||||
}
|
||||
|
||||
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return !(obj->cache_level == I915_CACHE_NONE ||
|
||||
|
@ -325,8 +270,7 @@ i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
|
|||
for_each_ggtt_vma(vma, obj)
|
||||
intel_gt_flush_ggtt_writes(vma->vm->gt);
|
||||
|
||||
intel_fb_obj_flush(obj,
|
||||
fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
|
||||
intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
|
||||
|
||||
for_each_ggtt_vma(vma, obj) {
|
||||
if (vma->iomap)
|
||||
|
|
|
@ -161,7 +161,7 @@ i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
|
|||
static inline bool
|
||||
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return READ_ONCE(obj->framebuffer_references);
|
||||
return READ_ONCE(obj->frontbuffer);
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
|
@ -394,6 +394,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
|
|||
unsigned int flags);
|
||||
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
|
||||
|
||||
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
|
||||
void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
|
||||
|
||||
static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->cache_dirty)
|
||||
|
|
|
@ -3,44 +3,124 @@
|
|||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "gt/intel_context.h"
|
||||
#include "gt/intel_engine_pm.h"
|
||||
#include "gt/intel_engine_pool.h"
|
||||
#include "gt/intel_gt.h"
|
||||
#include "i915_gem_clflush.h"
|
||||
#include "i915_gem_object_blt.h"
|
||||
|
||||
#include "i915_gem_clflush.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
int intel_emit_vma_fill_blt(struct i915_request *rq,
|
||||
struct i915_vma *vma,
|
||||
u32 value)
|
||||
struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
|
||||
struct i915_vma *vma,
|
||||
u32 value)
|
||||
{
|
||||
u32 *cs;
|
||||
struct drm_i915_private *i915 = ce->vm->i915;
|
||||
const u32 block_size = S16_MAX * PAGE_SIZE;
|
||||
struct intel_engine_pool_node *pool;
|
||||
struct i915_vma *batch;
|
||||
u64 offset;
|
||||
u64 count;
|
||||
u64 rem;
|
||||
u32 size;
|
||||
u32 *cmd;
|
||||
int err;
|
||||
|
||||
cs = intel_ring_begin(rq, 8);
|
||||
if (IS_ERR(cs))
|
||||
return PTR_ERR(cs);
|
||||
GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
|
||||
intel_engine_pm_get(ce->engine);
|
||||
|
||||
if (INTEL_GEN(rq->i915) >= 8) {
|
||||
*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
|
||||
*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
|
||||
*cs++ = 0;
|
||||
*cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
|
||||
*cs++ = lower_32_bits(vma->node.start);
|
||||
*cs++ = upper_32_bits(vma->node.start);
|
||||
*cs++ = value;
|
||||
*cs++ = MI_NOOP;
|
||||
} else {
|
||||
*cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
|
||||
*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
|
||||
*cs++ = 0;
|
||||
*cs++ = vma->size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
|
||||
*cs++ = vma->node.start;
|
||||
*cs++ = value;
|
||||
*cs++ = MI_NOOP;
|
||||
*cs++ = MI_NOOP;
|
||||
count = div_u64(vma->size, block_size);
|
||||
size = (1 + 8 * count) * sizeof(u32);
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
pool = intel_engine_pool_get(&ce->engine->pool, size);
|
||||
if (IS_ERR(pool)) {
|
||||
err = PTR_ERR(pool);
|
||||
goto out_pm;
|
||||
}
|
||||
|
||||
intel_ring_advance(rq, cs);
|
||||
cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
|
||||
if (IS_ERR(cmd)) {
|
||||
err = PTR_ERR(cmd);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
return 0;
|
||||
rem = vma->size;
|
||||
offset = vma->node.start;
|
||||
|
||||
do {
|
||||
u32 size = min_t(u64, rem, block_size);
|
||||
|
||||
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
|
||||
|
||||
if (INTEL_GEN(i915) >= 8) {
|
||||
*cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
|
||||
*cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
|
||||
*cmd++ = 0;
|
||||
*cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
|
||||
*cmd++ = lower_32_bits(offset);
|
||||
*cmd++ = upper_32_bits(offset);
|
||||
*cmd++ = value;
|
||||
} else {
|
||||
*cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
|
||||
*cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
|
||||
*cmd++ = 0;
|
||||
*cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
|
||||
*cmd++ = offset;
|
||||
*cmd++ = value;
|
||||
}
|
||||
|
||||
/* Allow ourselves to be preempted in between blocks. */
|
||||
*cmd++ = MI_ARB_CHECK;
|
||||
|
||||
offset += size;
|
||||
rem -= size;
|
||||
} while (rem);
|
||||
|
||||
*cmd = MI_BATCH_BUFFER_END;
|
||||
intel_gt_chipset_flush(ce->vm->gt);
|
||||
|
||||
i915_gem_object_unpin_map(pool->obj);
|
||||
|
||||
batch = i915_vma_instance(pool->obj, ce->vm, NULL);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(batch, 0, 0, PIN_USER);
|
||||
if (unlikely(err))
|
||||
goto out_put;
|
||||
|
||||
batch->private = pool;
|
||||
return batch;
|
||||
|
||||
out_put:
|
||||
intel_engine_pool_put(pool);
|
||||
out_pm:
|
||||
intel_engine_pm_put(ce->engine);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
|
||||
{
|
||||
int err;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
i915_vma_unlock(vma);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
return intel_engine_pool_mark_active(vma->private, rq);
|
||||
}
|
||||
|
||||
void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
|
||||
{
|
||||
i915_vma_unpin(vma);
|
||||
intel_engine_pool_put(vma->private);
|
||||
intel_engine_pm_put(ce->engine);
|
||||
}
|
||||
|
||||
int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
|
||||
|
@ -48,6 +128,7 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
|
|||
u32 value)
|
||||
{
|
||||
struct i915_request *rq;
|
||||
struct i915_vma *batch;
|
||||
struct i915_vma *vma;
|
||||
int err;
|
||||
|
||||
|
@ -65,12 +146,22 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
|
|||
i915_gem_object_unlock(obj);
|
||||
}
|
||||
|
||||
rq = i915_request_create(ce);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
batch = intel_emit_vma_fill_blt(ce, vma, value);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
rq = intel_context_create_request(ce);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto out_batch;
|
||||
}
|
||||
|
||||
err = intel_emit_vma_mark_active(batch, rq);
|
||||
if (unlikely(err))
|
||||
goto out_request;
|
||||
|
||||
err = i915_request_await_object(rq, obj, true);
|
||||
if (unlikely(err))
|
||||
goto out_request;
|
||||
|
@ -82,22 +173,229 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
|
|||
}
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (unlikely(err))
|
||||
goto out_request;
|
||||
|
||||
err = intel_emit_vma_fill_blt(rq, vma, value);
|
||||
err = ce->engine->emit_bb_start(rq,
|
||||
batch->node.start, batch->node.size,
|
||||
0);
|
||||
out_request:
|
||||
if (unlikely(err))
|
||||
i915_request_skip(rq, err);
|
||||
|
||||
i915_request_add(rq);
|
||||
out_batch:
|
||||
intel_emit_vma_release(ce, batch);
|
||||
out_unpin:
|
||||
i915_vma_unpin(vma);
|
||||
return err;
|
||||
}
|
||||
|
||||
struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
|
||||
struct i915_vma *src,
|
||||
struct i915_vma *dst)
|
||||
{
|
||||
struct drm_i915_private *i915 = ce->vm->i915;
|
||||
const u32 block_size = S16_MAX * PAGE_SIZE;
|
||||
struct intel_engine_pool_node *pool;
|
||||
struct i915_vma *batch;
|
||||
u64 src_offset, dst_offset;
|
||||
u64 count, rem;
|
||||
u32 size, *cmd;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(src->size != dst->size);
|
||||
|
||||
GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
|
||||
intel_engine_pm_get(ce->engine);
|
||||
|
||||
count = div_u64(dst->size, block_size);
|
||||
size = (1 + 11 * count) * sizeof(u32);
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
pool = intel_engine_pool_get(&ce->engine->pool, size);
|
||||
if (IS_ERR(pool)) {
|
||||
err = PTR_ERR(pool);
|
||||
goto out_pm;
|
||||
}
|
||||
|
||||
cmd = i915_gem_object_pin_map(pool->obj, I915_MAP_WC);
|
||||
if (IS_ERR(cmd)) {
|
||||
err = PTR_ERR(cmd);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
rem = src->size;
|
||||
src_offset = src->node.start;
|
||||
dst_offset = dst->node.start;
|
||||
|
||||
do {
|
||||
size = min_t(u64, rem, block_size);
|
||||
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
|
||||
|
||||
if (INTEL_GEN(i915) >= 9) {
|
||||
*cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
|
||||
*cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
|
||||
*cmd++ = 0;
|
||||
*cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
|
||||
*cmd++ = lower_32_bits(dst_offset);
|
||||
*cmd++ = upper_32_bits(dst_offset);
|
||||
*cmd++ = 0;
|
||||
*cmd++ = PAGE_SIZE;
|
||||
*cmd++ = lower_32_bits(src_offset);
|
||||
*cmd++ = upper_32_bits(src_offset);
|
||||
} else if (INTEL_GEN(i915) >= 8) {
|
||||
*cmd++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
|
||||
*cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
|
||||
*cmd++ = 0;
|
||||
*cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
|
||||
*cmd++ = lower_32_bits(dst_offset);
|
||||
*cmd++ = upper_32_bits(dst_offset);
|
||||
*cmd++ = 0;
|
||||
*cmd++ = PAGE_SIZE;
|
||||
*cmd++ = lower_32_bits(src_offset);
|
||||
*cmd++ = upper_32_bits(src_offset);
|
||||
} else {
|
||||
*cmd++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
|
||||
*cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
|
||||
*cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
|
||||
*cmd++ = dst_offset;
|
||||
*cmd++ = PAGE_SIZE;
|
||||
*cmd++ = src_offset;
|
||||
}
|
||||
|
||||
/* Allow ourselves to be preempted in between blocks. */
|
||||
*cmd++ = MI_ARB_CHECK;
|
||||
|
||||
src_offset += size;
|
||||
dst_offset += size;
|
||||
rem -= size;
|
||||
} while (rem);
|
||||
|
||||
*cmd = MI_BATCH_BUFFER_END;
|
||||
intel_gt_chipset_flush(ce->vm->gt);
|
||||
|
||||
i915_gem_object_unpin_map(pool->obj);
|
||||
|
||||
batch = i915_vma_instance(pool->obj, ce->vm, NULL);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(batch, 0, 0, PIN_USER);
|
||||
if (unlikely(err))
|
||||
goto out_put;
|
||||
|
||||
batch->private = pool;
|
||||
return batch;
|
||||
|
||||
out_put:
|
||||
intel_engine_pool_put(pool);
|
||||
out_pm:
|
||||
intel_engine_pm_put(ce->engine);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int move_to_gpu(struct i915_vma *vma, struct i915_request *rq, bool write)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
if (obj->cache_dirty & ~obj->cache_coherent)
|
||||
i915_gem_clflush_object(obj, 0);
|
||||
|
||||
return i915_request_await_object(rq, obj, write);
|
||||
}
|
||||
|
||||
int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
|
||||
struct drm_i915_gem_object *dst,
|
||||
struct intel_context *ce)
|
||||
{
|
||||
struct drm_gem_object *objs[] = { &src->base, &dst->base };
|
||||
struct i915_address_space *vm = ce->vm;
|
||||
struct i915_vma *vma[2], *batch;
|
||||
struct ww_acquire_ctx acquire;
|
||||
struct i915_request *rq;
|
||||
int err, i;
|
||||
|
||||
vma[0] = i915_vma_instance(src, vm, NULL);
|
||||
if (IS_ERR(vma[0]))
|
||||
return PTR_ERR(vma[0]);
|
||||
|
||||
err = i915_vma_pin(vma[0], 0, 0, PIN_USER);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
vma[1] = i915_vma_instance(dst, vm, NULL);
|
||||
if (IS_ERR(vma[1]))
|
||||
goto out_unpin_src;
|
||||
|
||||
err = i915_vma_pin(vma[1], 0, 0, PIN_USER);
|
||||
if (unlikely(err))
|
||||
goto out_unpin_src;
|
||||
|
||||
batch = intel_emit_vma_copy_blt(ce, vma[0], vma[1]);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto out_unpin_dst;
|
||||
}
|
||||
|
||||
rq = intel_context_create_request(ce);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto out_batch;
|
||||
}
|
||||
|
||||
err = intel_emit_vma_mark_active(batch, rq);
|
||||
if (unlikely(err))
|
||||
goto out_request;
|
||||
|
||||
err = drm_gem_lock_reservations(objs, ARRAY_SIZE(objs), &acquire);
|
||||
if (unlikely(err))
|
||||
goto out_request;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(vma); i++) {
|
||||
err = move_to_gpu(vma[i], rq, i);
|
||||
if (unlikely(err))
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(vma); i++) {
|
||||
unsigned int flags = i ? EXEC_OBJECT_WRITE : 0;
|
||||
|
||||
err = i915_vma_move_to_active(vma[i], rq, flags);
|
||||
if (unlikely(err))
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (rq->engine->emit_init_breadcrumb) {
|
||||
err = rq->engine->emit_init_breadcrumb(rq);
|
||||
if (unlikely(err))
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
err = rq->engine->emit_bb_start(rq,
|
||||
batch->node.start, batch->node.size,
|
||||
0);
|
||||
out_unlock:
|
||||
drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
|
||||
out_request:
|
||||
if (unlikely(err))
|
||||
i915_request_skip(rq, err);
|
||||
|
||||
i915_request_add(rq);
|
||||
out_batch:
|
||||
intel_emit_vma_release(ce, batch);
|
||||
out_unpin_dst:
|
||||
i915_vma_unpin(vma[1]);
|
||||
out_unpin_src:
|
||||
i915_vma_unpin(vma[0]);
|
||||
return err;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftests/i915_gem_object_blt.c"
|
||||
#endif
|
||||
|
|
|
@ -8,17 +8,30 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct drm_i915_gem_object;
|
||||
struct intel_context;
|
||||
struct i915_request;
|
||||
struct i915_vma;
|
||||
#include "gt/intel_context.h"
|
||||
#include "gt/intel_engine_pm.h"
|
||||
#include "gt/intel_engine_pool.h"
|
||||
#include "i915_vma.h"
|
||||
|
||||
int intel_emit_vma_fill_blt(struct i915_request *rq,
|
||||
struct i915_vma *vma,
|
||||
u32 value);
|
||||
struct drm_i915_gem_object;
|
||||
|
||||
struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
|
||||
struct i915_vma *vma,
|
||||
u32 value);
|
||||
|
||||
struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
|
||||
struct i915_vma *src,
|
||||
struct i915_vma *dst);
|
||||
|
||||
int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq);
|
||||
void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma);
|
||||
|
||||
int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
|
||||
struct intel_context *ce,
|
||||
u32 value);
|
||||
|
||||
int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
|
||||
struct drm_i915_gem_object *dst,
|
||||
struct intel_context *ce);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include "i915_selftest.h"
|
||||
|
||||
struct drm_i915_gem_object;
|
||||
struct intel_fronbuffer;
|
||||
|
||||
/*
|
||||
* struct i915_lut_handle tracks the fast lookups from handle to vma used
|
||||
|
@ -114,7 +115,6 @@ struct drm_i915_gem_object {
|
|||
unsigned int userfault_count;
|
||||
struct list_head userfault_link;
|
||||
|
||||
struct list_head batch_pool_link;
|
||||
I915_SELFTEST_DECLARE(struct list_head st_link);
|
||||
|
||||
/*
|
||||
|
@ -142,9 +142,7 @@ struct drm_i915_gem_object {
|
|||
*/
|
||||
u16 write_domain;
|
||||
|
||||
atomic_t frontbuffer_bits;
|
||||
unsigned int frontbuffer_ggtt_origin; /* write once */
|
||||
struct i915_active_request frontbuffer_write;
|
||||
struct intel_frontbuffer *frontbuffer;
|
||||
|
||||
/** Current tiling stride for the object, if it's tiled. */
|
||||
unsigned int tiling_and_stride;
|
||||
|
@ -225,9 +223,6 @@ struct drm_i915_gem_object {
|
|||
bool quirked:1;
|
||||
} mm;
|
||||
|
||||
/** References from framebuffers, locks out tiling changes. */
|
||||
unsigned int framebuffer_references;
|
||||
|
||||
/** Record of address bit 17 of each page at last unbind. */
|
||||
unsigned long *bit_17;
|
||||
|
||||
|
|
|
@ -153,24 +153,13 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
|
|||
struct sg_table *
|
||||
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
struct sg_table *pages;
|
||||
|
||||
pages = fetch_and_zero(&obj->mm.pages);
|
||||
if (IS_ERR_OR_NULL(pages))
|
||||
return pages;
|
||||
|
||||
if (i915_gem_object_is_shrinkable(obj)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&i915->mm.obj_lock, flags);
|
||||
|
||||
list_del(&obj->mm.link);
|
||||
i915->mm.shrink_count--;
|
||||
i915->mm.shrink_memory -= obj->base.size;
|
||||
|
||||
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
||||
}
|
||||
i915_gem_object_make_unshrinkable(obj);
|
||||
|
||||
if (obj->mm.mapping) {
|
||||
void *ptr;
|
||||
|
|
|
@ -133,9 +133,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
|
|||
drm_pci_free(obj->base.dev, obj->phys_handle);
|
||||
}
|
||||
|
||||
static void phys_release(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
fput(obj->base.filp);
|
||||
}
|
||||
|
||||
static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
|
||||
.get_pages = i915_gem_object_get_pages_phys,
|
||||
.put_pages = i915_gem_object_put_pages_phys,
|
||||
|
||||
.release = phys_release,
|
||||
};
|
||||
|
||||
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
|
||||
|
|
|
@ -34,12 +34,9 @@ static void i915_gem_park(struct drm_i915_private *i915)
|
|||
|
||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
for_each_engine(engine, i915, id)
|
||||
call_idle_barriers(engine); /* cleanup after wedging */
|
||||
i915_gem_batch_pool_fini(&engine->batch_pool);
|
||||
}
|
||||
|
||||
intel_timelines_park(i915);
|
||||
i915_vma_parked(i915);
|
||||
|
||||
i915_globals_park();
|
||||
|
@ -132,7 +129,9 @@ static bool switch_to_kernel_context_sync(struct intel_gt *gt)
|
|||
}
|
||||
} while (i915_retire_requests(gt->i915) && result);
|
||||
|
||||
GEM_BUG_ON(gt->awake);
|
||||
if (intel_gt_pm_wait_for_idle(gt))
|
||||
result = false;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -163,13 +162,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
|
|||
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
/*
|
||||
* Assert that we successfully flushed all the work and
|
||||
* reset the GPU back to its idle, low power state.
|
||||
*/
|
||||
GEM_BUG_ON(i915->gt.awake);
|
||||
flush_work(&i915->gem.idle_work);
|
||||
|
||||
cancel_delayed_work_sync(&i915->gt.hangcheck.work);
|
||||
|
||||
i915_gem_drain_freed_objects(i915);
|
||||
|
@ -246,8 +238,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
|
|||
{
|
||||
GEM_TRACE("\n");
|
||||
|
||||
WARN_ON(i915->gt.awake);
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "i915_drv.h"
|
||||
#include "i915_gem_object.h"
|
||||
#include "i915_scatterlist.h"
|
||||
#include "i915_trace.h"
|
||||
|
||||
/*
|
||||
* Move pages to appropriate lru and release the pagevec, decrementing the
|
||||
|
|
|
@ -459,13 +459,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
|
|||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_shrinker_register - Register the i915 shrinker
|
||||
* @i915: i915 device
|
||||
*
|
||||
* This function registers and sets up the i915 shrinker and OOM handler.
|
||||
*/
|
||||
void i915_gem_shrinker_register(struct drm_i915_private *i915)
|
||||
void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
|
||||
{
|
||||
i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
|
||||
i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
|
||||
|
@ -480,13 +474,7 @@ void i915_gem_shrinker_register(struct drm_i915_private *i915)
|
|||
WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_shrinker_unregister - Unregisters the i915 shrinker
|
||||
* @i915: i915 device
|
||||
*
|
||||
* This function unregisters the i915 shrinker and OOM handler.
|
||||
*/
|
||||
void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
|
||||
void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
|
||||
{
|
||||
WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
|
||||
WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
|
||||
|
@ -530,3 +518,61 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
|
|||
if (unlock)
|
||||
mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
|
||||
}
|
||||
|
||||
#define obj_to_i915(obj__) to_i915((obj__)->base.dev)
|
||||
|
||||
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
/*
|
||||
* We can only be called while the pages are pinned or when
|
||||
* the pages are released. If pinned, we should only be called
|
||||
* from a single caller under controlled conditions; and on release
|
||||
* only one caller may release us. Neither the two may cross.
|
||||
*/
|
||||
if (!list_empty(&obj->mm.link)) { /* pinned by caller */
|
||||
struct drm_i915_private *i915 = obj_to_i915(obj);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&i915->mm.obj_lock, flags);
|
||||
GEM_BUG_ON(list_empty(&obj->mm.link));
|
||||
|
||||
list_del_init(&obj->mm.link);
|
||||
i915->mm.shrink_count--;
|
||||
i915->mm.shrink_memory -= obj->base.size;
|
||||
|
||||
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
|
||||
struct list_head *head)
|
||||
{
|
||||
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
|
||||
GEM_BUG_ON(!list_empty(&obj->mm.link));
|
||||
|
||||
if (i915_gem_object_is_shrinkable(obj)) {
|
||||
struct drm_i915_private *i915 = obj_to_i915(obj);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&i915->mm.obj_lock, flags);
|
||||
GEM_BUG_ON(!kref_read(&obj->base.refcount));
|
||||
|
||||
list_add_tail(&obj->mm.link, head);
|
||||
i915->mm.shrink_count++;
|
||||
i915->mm.shrink_memory += obj->base.size;
|
||||
|
||||
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
__i915_gem_object_make_shrinkable(obj,
|
||||
&obj_to_i915(obj)->mm.shrink_list);
|
||||
}
|
||||
|
||||
void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
__i915_gem_object_make_shrinkable(obj,
|
||||
&obj_to_i915(obj)->mm.purge_list);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __I915_GEM_SHRINKER_H__
|
||||
#define __I915_GEM_SHRINKER_H__
|
||||
|
||||
#include <linux/bits.h>
|
||||
|
||||
struct drm_i915_private;
|
||||
struct mutex;
|
||||
|
||||
/* i915_gem_shrinker.c */
|
||||
unsigned long i915_gem_shrink(struct drm_i915_private *i915,
|
||||
unsigned long target,
|
||||
unsigned long *nr_scanned,
|
||||
unsigned flags);
|
||||
#define I915_SHRINK_UNBOUND BIT(0)
|
||||
#define I915_SHRINK_BOUND BIT(1)
|
||||
#define I915_SHRINK_ACTIVE BIT(2)
|
||||
#define I915_SHRINK_VMAPS BIT(3)
|
||||
#define I915_SHRINK_WRITEBACK BIT(4)
|
||||
|
||||
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
|
||||
void i915_gem_driver_register__shrinker(struct drm_i915_private *i915);
|
||||
void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915);
|
||||
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
|
||||
struct mutex *mutex);
|
||||
|
||||
#endif /* __I915_GEM_SHRINKER_H__ */
|
|
@ -11,6 +11,7 @@
|
|||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_stolen.h"
|
||||
|
||||
/*
|
||||
* The BIOS typically reserves some of the system's memory for the exclusive
|
||||
|
@ -362,12 +363,16 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
|
|||
mutex_init(&dev_priv->mm.stolen_lock);
|
||||
|
||||
if (intel_vgpu_active(dev_priv)) {
|
||||
DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
|
||||
dev_notice(dev_priv->drm.dev,
|
||||
"%s, disabling use of stolen memory\n",
|
||||
"iGVT-g active");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
|
||||
DRM_INFO("DMAR active, disabling use of stolen memory\n");
|
||||
dev_notice(dev_priv->drm.dev,
|
||||
"%s, disabling use of stolen memory\n",
|
||||
"DMAR active");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __I915_GEM_STOLEN_H__
|
||||
#define __I915_GEM_STOLEN_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct drm_i915_private;
|
||||
struct drm_mm_node;
|
||||
struct drm_i915_gem_object;
|
||||
|
||||
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
|
||||
struct drm_mm_node *node, u64 size,
|
||||
unsigned alignment);
|
||||
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
|
||||
struct drm_mm_node *node, u64 size,
|
||||
unsigned alignment, u64 start,
|
||||
u64 end);
|
||||
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
|
||||
struct drm_mm_node *node);
|
||||
int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
|
||||
void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
|
||||
resource_size_t size);
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
|
||||
resource_size_t stolen_offset,
|
||||
resource_size_t gtt_offset,
|
||||
resource_size_t size);
|
||||
|
||||
#endif /* __I915_GEM_STOLEN_H__ */
|
|
@ -12,11 +12,10 @@
|
|||
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_gem_ioctls.h"
|
||||
#include "i915_gem_object.h"
|
||||
#include "i915_scatterlist.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
struct i915_mm_struct {
|
||||
struct mm_struct *mm;
|
||||
|
|
|
@ -20,32 +20,19 @@ int i915_gemfs_init(struct drm_i915_private *i915)
|
|||
if (!type)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* By creating our own shmemfs mountpoint, we can pass in
|
||||
* mount flags that better match our usecase.
|
||||
*
|
||||
* One example, although it is probably better with a per-file
|
||||
* control, is selecting huge page allocations ("huge=within_size").
|
||||
* Currently unused due to bandwidth issues (slow reads) on Broadwell+.
|
||||
*/
|
||||
|
||||
gemfs = kern_mount(type);
|
||||
if (IS_ERR(gemfs))
|
||||
return PTR_ERR(gemfs);
|
||||
|
||||
/*
|
||||
* Enable huge-pages for objects that are at least HPAGE_PMD_SIZE, most
|
||||
* likely 2M. Note that within_size may overallocate huge-pages, if say
|
||||
* we allocate an object of size 2M + 4K, we may get 2M + 2M, but under
|
||||
* memory pressure shmem should split any huge-pages which can be
|
||||
* shrunk.
|
||||
*/
|
||||
|
||||
if (has_transparent_hugepage()) {
|
||||
struct super_block *sb = gemfs->mnt_sb;
|
||||
/* FIXME: Disabled until we get W/A for read BW issue. */
|
||||
char options[] = "huge=never";
|
||||
int flags = 0;
|
||||
int err;
|
||||
|
||||
err = sb->s_op->remount_fs(sb, &flags, options);
|
||||
if (err) {
|
||||
kern_unmount(gemfs);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
i915->mm.gemfs = gemfs;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -879,126 +879,22 @@ static int igt_mock_ppgtt_64K(void *arg)
|
|||
return err;
|
||||
}
|
||||
|
||||
static struct i915_vma *
|
||||
gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
|
||||
{
|
||||
struct drm_i915_private *i915 = vma->vm->i915;
|
||||
const int gen = INTEL_GEN(i915);
|
||||
unsigned int count = vma->size >> PAGE_SHIFT;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *batch;
|
||||
unsigned int size;
|
||||
u32 *cmd;
|
||||
int n;
|
||||
int err;
|
||||
|
||||
size = (1 + 4 * count) * sizeof(u32);
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
obj = i915_gem_object_create_internal(i915, size);
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
|
||||
if (IS_ERR(cmd)) {
|
||||
err = PTR_ERR(cmd);
|
||||
goto err;
|
||||
}
|
||||
|
||||
offset += vma->node.start;
|
||||
|
||||
for (n = 0; n < count; n++) {
|
||||
if (gen >= 8) {
|
||||
*cmd++ = MI_STORE_DWORD_IMM_GEN4;
|
||||
*cmd++ = lower_32_bits(offset);
|
||||
*cmd++ = upper_32_bits(offset);
|
||||
*cmd++ = val;
|
||||
} else if (gen >= 4) {
|
||||
*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
|
||||
(gen < 6 ? MI_USE_GGTT : 0);
|
||||
*cmd++ = 0;
|
||||
*cmd++ = offset;
|
||||
*cmd++ = val;
|
||||
} else {
|
||||
*cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
|
||||
*cmd++ = offset;
|
||||
*cmd++ = val;
|
||||
}
|
||||
|
||||
offset += PAGE_SIZE;
|
||||
}
|
||||
|
||||
*cmd = MI_BATCH_BUFFER_END;
|
||||
intel_gt_chipset_flush(vma->vm->gt);
|
||||
|
||||
i915_gem_object_unpin_map(obj);
|
||||
|
||||
batch = i915_vma_instance(obj, vma->vm, NULL);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto err;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(batch, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
return batch;
|
||||
|
||||
err:
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int gpu_write(struct i915_vma *vma,
|
||||
struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine,
|
||||
u32 dword,
|
||||
u32 value)
|
||||
u32 dw,
|
||||
u32 val)
|
||||
{
|
||||
struct i915_request *rq;
|
||||
struct i915_vma *batch;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(!intel_engine_can_store_dword(engine));
|
||||
|
||||
batch = gpu_write_dw(vma, dword * sizeof(u32), value);
|
||||
if (IS_ERR(batch))
|
||||
return PTR_ERR(batch);
|
||||
|
||||
rq = igt_request_alloc(ctx, engine);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_batch;
|
||||
}
|
||||
|
||||
i915_vma_lock(batch);
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
i915_vma_unlock(batch);
|
||||
i915_gem_object_lock(vma->obj);
|
||||
err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
|
||||
i915_gem_object_unlock(vma->obj);
|
||||
if (err)
|
||||
goto err_request;
|
||||
return err;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_gem_object_set_to_gtt_domain(vma->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
err = engine->emit_bb_start(rq,
|
||||
batch->node.start, batch->node.size,
|
||||
0);
|
||||
err_request:
|
||||
if (err)
|
||||
i915_request_skip(rq, err);
|
||||
i915_request_add(rq);
|
||||
err_batch:
|
||||
i915_vma_unpin(batch);
|
||||
i915_vma_close(batch);
|
||||
i915_vma_put(batch);
|
||||
|
||||
return err;
|
||||
return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32),
|
||||
vma->size >> PAGE_SHIFT, val);
|
||||
}
|
||||
|
||||
static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include "selftests/igt_flush_test.h"
|
||||
#include "selftests/mock_drm.h"
|
||||
#include "huge_gem_object.h"
|
||||
#include "mock_context.h"
|
||||
|
||||
static int igt_client_fill(void *arg)
|
||||
|
@ -24,15 +25,19 @@ static int igt_client_fill(void *arg)
|
|||
prandom_seed_state(&prng, i915_selftest.random_seed);
|
||||
|
||||
do {
|
||||
u32 sz = prandom_u32_state(&prng) % SZ_32M;
|
||||
const u32 max_block_size = S16_MAX * PAGE_SIZE;
|
||||
u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
|
||||
u32 phys_sz = sz % (max_block_size + 1);
|
||||
u32 val = prandom_u32_state(&prng);
|
||||
u32 i;
|
||||
|
||||
sz = round_up(sz, PAGE_SIZE);
|
||||
phys_sz = round_up(phys_sz, PAGE_SIZE);
|
||||
|
||||
pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
|
||||
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
|
||||
phys_sz, sz, val);
|
||||
|
||||
obj = i915_gem_object_create_internal(i915, sz);
|
||||
obj = huge_gem_object(i915, phys_sz, sz);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
goto err_flush;
|
||||
|
@ -54,7 +59,8 @@ static int igt_client_fill(void *arg)
|
|||
* values after we do the set_to_cpu_domain and pick it up as a
|
||||
* test failure.
|
||||
*/
|
||||
memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
|
||||
memset32(vaddr, val ^ 0xdeadbeaf,
|
||||
huge_gem_object_phys_size(obj) / sizeof(u32));
|
||||
|
||||
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
|
||||
obj->cache_dirty = true;
|
||||
|
@ -71,7 +77,7 @@ static int igt_client_fill(void *arg)
|
|||
if (err)
|
||||
goto err_unpin;
|
||||
|
||||
for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
|
||||
for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
|
||||
if (vaddr[i] != val) {
|
||||
pr_err("vaddr[%u]=%x, expected=%x\n", i,
|
||||
vaddr[i], val);
|
||||
|
|
|
@ -228,7 +228,9 @@ static int gpu_set(struct drm_i915_gem_object *obj,
|
|||
intel_ring_advance(rq, cs);
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
i915_vma_unpin(vma);
|
||||
|
||||
|
|
|
@ -156,70 +156,6 @@ static int live_nop_switch(void *arg)
|
|||
return err;
|
||||
}
|
||||
|
||||
static struct i915_vma *
|
||||
gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
const int gen = INTEL_GEN(vma->vm->i915);
|
||||
unsigned long n, size;
|
||||
u32 *cmd;
|
||||
int err;
|
||||
|
||||
size = (4 * count + 1) * sizeof(u32);
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
obj = i915_gem_object_create_internal(vma->vm->i915, size);
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
|
||||
if (IS_ERR(cmd)) {
|
||||
err = PTR_ERR(cmd);
|
||||
goto err;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
|
||||
offset += vma->node.start;
|
||||
|
||||
for (n = 0; n < count; n++) {
|
||||
if (gen >= 8) {
|
||||
*cmd++ = MI_STORE_DWORD_IMM_GEN4;
|
||||
*cmd++ = lower_32_bits(offset);
|
||||
*cmd++ = upper_32_bits(offset);
|
||||
*cmd++ = value;
|
||||
} else if (gen >= 4) {
|
||||
*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
|
||||
(gen < 6 ? MI_USE_GGTT : 0);
|
||||
*cmd++ = 0;
|
||||
*cmd++ = offset;
|
||||
*cmd++ = value;
|
||||
} else {
|
||||
*cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
|
||||
*cmd++ = offset;
|
||||
*cmd++ = value;
|
||||
}
|
||||
offset += PAGE_SIZE;
|
||||
}
|
||||
*cmd = MI_BATCH_BUFFER_END;
|
||||
i915_gem_object_flush_map(obj);
|
||||
i915_gem_object_unpin_map(obj);
|
||||
|
||||
vma = i915_vma_instance(obj, vma->vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
err = PTR_ERR(vma);
|
||||
goto err;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
return vma;
|
||||
|
||||
err:
|
||||
i915_gem_object_put(obj);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static unsigned long real_page_count(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
|
||||
|
@ -236,10 +172,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
|
|||
unsigned int dw)
|
||||
{
|
||||
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
|
||||
struct i915_request *rq;
|
||||
struct i915_vma *vma;
|
||||
struct i915_vma *batch;
|
||||
unsigned int flags;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(obj->base.size > vm->total);
|
||||
|
@ -250,7 +183,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
|
|||
return PTR_ERR(vma);
|
||||
|
||||
i915_gem_object_lock(obj);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, false);
|
||||
err = i915_gem_object_set_to_gtt_domain(obj, true);
|
||||
i915_gem_object_unlock(obj);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -259,70 +192,23 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
/* Within the GTT the huge objects maps every page onto
|
||||
/*
|
||||
* Within the GTT the huge objects maps every page onto
|
||||
* its 1024 real pages (using phys_pfn = dma_pfn % 1024).
|
||||
* We set the nth dword within the page using the nth
|
||||
* mapping via the GTT - this should exercise the GTT mapping
|
||||
* whilst checking that each context provides a unique view
|
||||
* into the object.
|
||||
*/
|
||||
batch = gpu_fill_dw(vma,
|
||||
(dw * real_page_count(obj)) << PAGE_SHIFT |
|
||||
(dw * sizeof(u32)),
|
||||
real_page_count(obj),
|
||||
dw);
|
||||
if (IS_ERR(batch)) {
|
||||
err = PTR_ERR(batch);
|
||||
goto err_vma;
|
||||
}
|
||||
|
||||
rq = igt_request_alloc(ctx, engine);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_batch;
|
||||
}
|
||||
|
||||
flags = 0;
|
||||
if (INTEL_GEN(vm->i915) <= 5)
|
||||
flags |= I915_DISPATCH_SECURE;
|
||||
|
||||
err = engine->emit_bb_start(rq,
|
||||
batch->node.start, batch->node.size,
|
||||
flags);
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
i915_vma_lock(batch);
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
i915_vma_unlock(batch);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
i915_request_add(rq);
|
||||
|
||||
i915_vma_unpin(batch);
|
||||
i915_vma_close(batch);
|
||||
i915_vma_put(batch);
|
||||
|
||||
err = igt_gpu_fill_dw(vma,
|
||||
ctx,
|
||||
engine,
|
||||
(dw * real_page_count(obj)) << PAGE_SHIFT |
|
||||
(dw * sizeof(u32)),
|
||||
real_page_count(obj),
|
||||
dw);
|
||||
i915_vma_unpin(vma);
|
||||
|
||||
return 0;
|
||||
|
||||
skip_request:
|
||||
i915_request_skip(rq, err);
|
||||
err_request:
|
||||
i915_request_add(rq);
|
||||
err_batch:
|
||||
i915_vma_unpin(batch);
|
||||
i915_vma_put(batch);
|
||||
err_vma:
|
||||
i915_vma_unpin(vma);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -780,13 +666,17 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
|
|||
goto err_request;
|
||||
|
||||
i915_vma_lock(batch);
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
err = i915_request_await_object(rq, batch->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
i915_vma_unlock(batch);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
@ -821,8 +711,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
|
|||
#define TEST_RESET BIT(2)
|
||||
|
||||
static int
|
||||
__sseu_prepare(struct drm_i915_private *i915,
|
||||
const char *name,
|
||||
__sseu_prepare(const char *name,
|
||||
unsigned int flags,
|
||||
struct intel_context *ce,
|
||||
struct igt_spinner **spin)
|
||||
|
@ -838,14 +727,11 @@ __sseu_prepare(struct drm_i915_private *i915,
|
|||
if (!*spin)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = igt_spinner_init(*spin, i915);
|
||||
ret = igt_spinner_init(*spin, ce->engine->gt);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
rq = igt_spinner_create_request(*spin,
|
||||
ce->gem_context,
|
||||
ce->engine,
|
||||
MI_NOOP);
|
||||
rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
|
||||
if (IS_ERR(rq)) {
|
||||
ret = PTR_ERR(rq);
|
||||
goto err_fini;
|
||||
|
@ -871,8 +757,7 @@ __sseu_prepare(struct drm_i915_private *i915,
|
|||
}
|
||||
|
||||
static int
|
||||
__read_slice_count(struct drm_i915_private *i915,
|
||||
struct intel_context *ce,
|
||||
__read_slice_count(struct intel_context *ce,
|
||||
struct drm_i915_gem_object *obj,
|
||||
struct igt_spinner *spin,
|
||||
u32 *rpcs)
|
||||
|
@ -901,7 +786,7 @@ __read_slice_count(struct drm_i915_private *i915,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(i915) >= 11) {
|
||||
if (INTEL_GEN(ce->engine->i915) >= 11) {
|
||||
s_mask = GEN11_RPCS_S_CNT_MASK;
|
||||
s_shift = GEN11_RPCS_S_CNT_SHIFT;
|
||||
} else {
|
||||
|
@ -944,8 +829,7 @@ __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
|
|||
}
|
||||
|
||||
static int
|
||||
__sseu_finish(struct drm_i915_private *i915,
|
||||
const char *name,
|
||||
__sseu_finish(const char *name,
|
||||
unsigned int flags,
|
||||
struct intel_context *ce,
|
||||
struct drm_i915_gem_object *obj,
|
||||
|
@ -962,14 +846,13 @@ __sseu_finish(struct drm_i915_private *i915,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = __read_slice_count(i915, ce, obj,
|
||||
ret = __read_slice_count(ce, obj,
|
||||
flags & TEST_RESET ? NULL : spin, &rpcs);
|
||||
ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = __read_slice_count(i915, ce->engine->kernel_context, obj,
|
||||
NULL, &rpcs);
|
||||
ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs);
|
||||
ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
|
||||
|
||||
out:
|
||||
|
@ -977,11 +860,12 @@ __sseu_finish(struct drm_i915_private *i915,
|
|||
igt_spinner_end(spin);
|
||||
|
||||
if ((flags & TEST_IDLE) && ret == 0) {
|
||||
ret = i915_gem_wait_for_idle(i915, 0, MAX_SCHEDULE_TIMEOUT);
|
||||
ret = i915_gem_wait_for_idle(ce->engine->i915,
|
||||
0, MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = __read_slice_count(i915, ce, obj, NULL, &rpcs);
|
||||
ret = __read_slice_count(ce, obj, NULL, &rpcs);
|
||||
ret = __check_rpcs(name, rpcs, ret, expected,
|
||||
"Context", " after idle!");
|
||||
}
|
||||
|
@ -990,8 +874,7 @@ __sseu_finish(struct drm_i915_private *i915,
|
|||
}
|
||||
|
||||
static int
|
||||
__sseu_test(struct drm_i915_private *i915,
|
||||
const char *name,
|
||||
__sseu_test(const char *name,
|
||||
unsigned int flags,
|
||||
struct intel_context *ce,
|
||||
struct drm_i915_gem_object *obj,
|
||||
|
@ -1000,7 +883,7 @@ __sseu_test(struct drm_i915_private *i915,
|
|||
struct igt_spinner *spin = NULL;
|
||||
int ret;
|
||||
|
||||
ret = __sseu_prepare(i915, name, flags, ce, &spin);
|
||||
ret = __sseu_prepare(name, flags, ce, &spin);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1008,7 +891,7 @@ __sseu_test(struct drm_i915_private *i915,
|
|||
if (ret)
|
||||
goto out_spin;
|
||||
|
||||
ret = __sseu_finish(i915, name, flags, ce, obj,
|
||||
ret = __sseu_finish(name, flags, ce, obj,
|
||||
hweight32(sseu.slice_mask), spin);
|
||||
|
||||
out_spin:
|
||||
|
@ -1088,22 +971,22 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
|
|||
goto out_context;
|
||||
|
||||
/* First set the default mask. */
|
||||
ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu);
|
||||
ret = __sseu_test(name, flags, ce, obj, engine->sseu);
|
||||
if (ret)
|
||||
goto out_fail;
|
||||
|
||||
/* Then set a power-gated configuration. */
|
||||
ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
|
||||
ret = __sseu_test(name, flags, ce, obj, pg_sseu);
|
||||
if (ret)
|
||||
goto out_fail;
|
||||
|
||||
/* Back to defaults. */
|
||||
ret = __sseu_test(i915, name, flags, ce, obj, engine->sseu);
|
||||
ret = __sseu_test(name, flags, ce, obj, engine->sseu);
|
||||
if (ret)
|
||||
goto out_fail;
|
||||
|
||||
/* One last power-gated configuration for the road. */
|
||||
ret = __sseu_test(i915, name, flags, ce, obj, pg_sseu);
|
||||
ret = __sseu_test(name, flags, ce, obj, pg_sseu);
|
||||
if (ret)
|
||||
goto out_fail;
|
||||
|
||||
|
@ -1339,7 +1222,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
|
|||
goto err_request;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
err = i915_request_await_object(rq, vma->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, 0);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
@ -1436,7 +1321,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
|
|||
goto err_request;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
|
|
@ -351,7 +351,10 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
|
|||
}
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq,
|
||||
EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
|
||||
i915_request_add(rq);
|
||||
|
@ -382,7 +385,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
|
|||
|
||||
static void disable_retire_worker(struct drm_i915_private *i915)
|
||||
{
|
||||
i915_gem_shrinker_unregister(i915);
|
||||
i915_gem_driver_unregister__shrinker(i915);
|
||||
|
||||
intel_gt_pm_get(&i915->gt);
|
||||
|
||||
|
@ -398,7 +401,7 @@ static void restore_retire_worker(struct drm_i915_private *i915)
|
|||
igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
i915_gem_shrinker_register(i915);
|
||||
i915_gem_driver_register__shrinker(i915);
|
||||
}
|
||||
|
||||
static void mmap_offset_lock(struct drm_i915_private *i915)
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include "selftests/igt_flush_test.h"
|
||||
#include "selftests/mock_drm.h"
|
||||
#include "huge_gem_object.h"
|
||||
#include "mock_context.h"
|
||||
|
||||
static int igt_fill_blt(void *arg)
|
||||
|
@ -23,16 +24,26 @@ static int igt_fill_blt(void *arg)
|
|||
|
||||
prandom_seed_state(&prng, i915_selftest.random_seed);
|
||||
|
||||
/*
|
||||
* XXX: needs some threads to scale all these tests, also maybe throw
|
||||
* in submission from higher priority context to see if we are
|
||||
* preempted for very large objects...
|
||||
*/
|
||||
|
||||
do {
|
||||
u32 sz = prandom_u32_state(&prng) % SZ_32M;
|
||||
const u32 max_block_size = S16_MAX * PAGE_SIZE;
|
||||
u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
|
||||
u32 phys_sz = sz % (max_block_size + 1);
|
||||
u32 val = prandom_u32_state(&prng);
|
||||
u32 i;
|
||||
|
||||
sz = round_up(sz, PAGE_SIZE);
|
||||
phys_sz = round_up(phys_sz, PAGE_SIZE);
|
||||
|
||||
pr_debug("%s with sz=%x, val=%x\n", __func__, sz, val);
|
||||
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
|
||||
phys_sz, sz, val);
|
||||
|
||||
obj = i915_gem_object_create_internal(i915, sz);
|
||||
obj = huge_gem_object(i915, phys_sz, sz);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
goto err_flush;
|
||||
|
@ -48,7 +59,8 @@ static int igt_fill_blt(void *arg)
|
|||
* Make sure the potentially async clflush does its job, if
|
||||
* required.
|
||||
*/
|
||||
memset32(vaddr, val ^ 0xdeadbeaf, obj->base.size / sizeof(u32));
|
||||
memset32(vaddr, val ^ 0xdeadbeaf,
|
||||
huge_gem_object_phys_size(obj) / sizeof(u32));
|
||||
|
||||
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
|
||||
obj->cache_dirty = true;
|
||||
|
@ -65,7 +77,7 @@ static int igt_fill_blt(void *arg)
|
|||
if (err)
|
||||
goto err_unpin;
|
||||
|
||||
for (i = 0; i < obj->base.size / sizeof(u32); ++i) {
|
||||
for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
|
||||
if (vaddr[i] != val) {
|
||||
pr_err("vaddr[%u]=%x, expected=%x\n", i,
|
||||
vaddr[i], val);
|
||||
|
@ -91,10 +103,116 @@ static int igt_fill_blt(void *arg)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int igt_copy_blt(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct intel_context *ce = i915->engine[BCS0]->kernel_context;
|
||||
struct drm_i915_gem_object *src, *dst;
|
||||
struct rnd_state prng;
|
||||
IGT_TIMEOUT(end);
|
||||
u32 *vaddr;
|
||||
int err = 0;
|
||||
|
||||
prandom_seed_state(&prng, i915_selftest.random_seed);
|
||||
|
||||
do {
|
||||
const u32 max_block_size = S16_MAX * PAGE_SIZE;
|
||||
u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
|
||||
u32 phys_sz = sz % (max_block_size + 1);
|
||||
u32 val = prandom_u32_state(&prng);
|
||||
u32 i;
|
||||
|
||||
sz = round_up(sz, PAGE_SIZE);
|
||||
phys_sz = round_up(phys_sz, PAGE_SIZE);
|
||||
|
||||
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
|
||||
phys_sz, sz, val);
|
||||
|
||||
src = huge_gem_object(i915, phys_sz, sz);
|
||||
if (IS_ERR(src)) {
|
||||
err = PTR_ERR(src);
|
||||
goto err_flush;
|
||||
}
|
||||
|
||||
vaddr = i915_gem_object_pin_map(src, I915_MAP_WB);
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
goto err_put_src;
|
||||
}
|
||||
|
||||
memset32(vaddr, val,
|
||||
huge_gem_object_phys_size(src) / sizeof(u32));
|
||||
|
||||
i915_gem_object_unpin_map(src);
|
||||
|
||||
if (!(src->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
|
||||
src->cache_dirty = true;
|
||||
|
||||
dst = huge_gem_object(i915, phys_sz, sz);
|
||||
if (IS_ERR(dst)) {
|
||||
err = PTR_ERR(dst);
|
||||
goto err_put_src;
|
||||
}
|
||||
|
||||
vaddr = i915_gem_object_pin_map(dst, I915_MAP_WB);
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
goto err_put_dst;
|
||||
}
|
||||
|
||||
memset32(vaddr, val ^ 0xdeadbeaf,
|
||||
huge_gem_object_phys_size(dst) / sizeof(u32));
|
||||
|
||||
if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
|
||||
dst->cache_dirty = true;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
err = i915_gem_object_copy_blt(src, dst, ce);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
if (err)
|
||||
goto err_unpin;
|
||||
|
||||
i915_gem_object_lock(dst);
|
||||
err = i915_gem_object_set_to_cpu_domain(dst, false);
|
||||
i915_gem_object_unlock(dst);
|
||||
if (err)
|
||||
goto err_unpin;
|
||||
|
||||
for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
|
||||
if (vaddr[i] != val) {
|
||||
pr_err("vaddr[%u]=%x, expected=%x\n", i,
|
||||
vaddr[i], val);
|
||||
err = -EINVAL;
|
||||
goto err_unpin;
|
||||
}
|
||||
}
|
||||
|
||||
i915_gem_object_unpin_map(dst);
|
||||
|
||||
i915_gem_object_put(src);
|
||||
i915_gem_object_put(dst);
|
||||
} while (!time_after(jiffies, end));
|
||||
|
||||
goto err_flush;
|
||||
|
||||
err_unpin:
|
||||
i915_gem_object_unpin_map(dst);
|
||||
err_put_dst:
|
||||
i915_gem_object_put(dst);
|
||||
err_put_src:
|
||||
i915_gem_object_put(src);
|
||||
err_flush:
|
||||
if (err == -ENOMEM)
|
||||
err = 0;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(igt_fill_blt),
|
||||
SUBTEST(igt_copy_blt),
|
||||
};
|
||||
|
||||
if (intel_gt_is_wedged(&i915->gt))
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
#include "gem/i915_gem_context.h"
|
||||
#include "gem/i915_gem_pm.h"
|
||||
#include "gt/intel_context.h"
|
||||
#include "i915_vma.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
#include "i915_request.h"
|
||||
|
||||
|
@ -23,7 +25,7 @@ igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
|
|||
* GGTT space, so do this first before we reserve a seqno for
|
||||
* ourselves.
|
||||
*/
|
||||
ce = i915_gem_context_get_engine(ctx, engine->id);
|
||||
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
|
||||
if (IS_ERR(ce))
|
||||
return ERR_CAST(ce);
|
||||
|
||||
|
@ -32,3 +34,140 @@ igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
|
|||
|
||||
return rq;
|
||||
}
|
||||
|
||||
struct i915_vma *
|
||||
igt_emit_store_dw(struct i915_vma *vma,
|
||||
u64 offset,
|
||||
unsigned long count,
|
||||
u32 val)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
const int gen = INTEL_GEN(vma->vm->i915);
|
||||
unsigned long n, size;
|
||||
u32 *cmd;
|
||||
int err;
|
||||
|
||||
size = (4 * count + 1) * sizeof(u32);
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
obj = i915_gem_object_create_internal(vma->vm->i915, size);
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
|
||||
if (IS_ERR(cmd)) {
|
||||
err = PTR_ERR(cmd);
|
||||
goto err;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
|
||||
offset += vma->node.start;
|
||||
|
||||
for (n = 0; n < count; n++) {
|
||||
if (gen >= 8) {
|
||||
*cmd++ = MI_STORE_DWORD_IMM_GEN4;
|
||||
*cmd++ = lower_32_bits(offset);
|
||||
*cmd++ = upper_32_bits(offset);
|
||||
*cmd++ = val;
|
||||
} else if (gen >= 4) {
|
||||
*cmd++ = MI_STORE_DWORD_IMM_GEN4 |
|
||||
(gen < 6 ? MI_USE_GGTT : 0);
|
||||
*cmd++ = 0;
|
||||
*cmd++ = offset;
|
||||
*cmd++ = val;
|
||||
} else {
|
||||
*cmd++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
|
||||
*cmd++ = offset;
|
||||
*cmd++ = val;
|
||||
}
|
||||
offset += PAGE_SIZE;
|
||||
}
|
||||
*cmd = MI_BATCH_BUFFER_END;
|
||||
i915_gem_object_unpin_map(obj);
|
||||
|
||||
vma = i915_vma_instance(obj, vma->vm, NULL);
|
||||
if (IS_ERR(vma)) {
|
||||
err = PTR_ERR(vma);
|
||||
goto err;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(vma, 0, 0, PIN_USER);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
return vma;
|
||||
|
||||
err:
|
||||
i915_gem_object_put(obj);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
int igt_gpu_fill_dw(struct i915_vma *vma,
|
||||
struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine,
|
||||
u64 offset,
|
||||
unsigned long count,
|
||||
u32 val)
|
||||
{
|
||||
struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
|
||||
struct i915_request *rq;
|
||||
struct i915_vma *batch;
|
||||
unsigned int flags;
|
||||
int err;
|
||||
|
||||
GEM_BUG_ON(vma->size > vm->total);
|
||||
GEM_BUG_ON(!intel_engine_can_store_dword(engine));
|
||||
GEM_BUG_ON(!i915_vma_is_pinned(vma));
|
||||
|
||||
batch = igt_emit_store_dw(vma, offset, count, val);
|
||||
if (IS_ERR(batch))
|
||||
return PTR_ERR(batch);
|
||||
|
||||
rq = igt_request_alloc(ctx, engine);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_batch;
|
||||
}
|
||||
|
||||
flags = 0;
|
||||
if (INTEL_GEN(vm->i915) <= 5)
|
||||
flags |= I915_DISPATCH_SECURE;
|
||||
|
||||
err = engine->emit_bb_start(rq,
|
||||
batch->node.start, batch->node.size,
|
||||
flags);
|
||||
if (err)
|
||||
goto err_request;
|
||||
|
||||
i915_vma_lock(batch);
|
||||
err = i915_request_await_object(rq, batch->obj, false);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(batch, rq, 0);
|
||||
i915_vma_unlock(batch);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
i915_vma_lock(vma);
|
||||
err = i915_request_await_object(rq, vma->obj, true);
|
||||
if (err == 0)
|
||||
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unlock(vma);
|
||||
if (err)
|
||||
goto skip_request;
|
||||
|
||||
i915_request_add(rq);
|
||||
|
||||
i915_vma_unpin(batch);
|
||||
i915_vma_close(batch);
|
||||
i915_vma_put(batch);
|
||||
|
||||
return 0;
|
||||
|
||||
skip_request:
|
||||
i915_request_skip(rq, err);
|
||||
err_request:
|
||||
i915_request_add(rq);
|
||||
err_batch:
|
||||
i915_vma_unpin(batch);
|
||||
i915_vma_put(batch);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -7,11 +7,27 @@
|
|||
#ifndef __IGT_GEM_UTILS_H__
|
||||
#define __IGT_GEM_UTILS_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct i915_request;
|
||||
struct i915_gem_context;
|
||||
struct intel_engine_cs;
|
||||
struct i915_vma;
|
||||
|
||||
struct i915_request *
|
||||
igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
|
||||
|
||||
struct i915_vma *
|
||||
igt_emit_store_dw(struct i915_vma *vma,
|
||||
u64 offset,
|
||||
unsigned long count,
|
||||
u32 val);
|
||||
|
||||
int igt_gpu_fill_dw(struct i915_vma *vma,
|
||||
struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine,
|
||||
u64 offset,
|
||||
unsigned long count,
|
||||
u32 val);
|
||||
|
||||
#endif /* __IGT_GEM_UTILS_H__ */
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <uapi/linux/sched/types.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
|
||||
static void irq_enable(struct intel_engine_cs *engine)
|
||||
{
|
||||
|
@ -34,9 +35,9 @@ static void irq_enable(struct intel_engine_cs *engine)
|
|||
return;
|
||||
|
||||
/* Caller disables interrupts */
|
||||
spin_lock(&engine->i915->irq_lock);
|
||||
spin_lock(&engine->gt->irq_lock);
|
||||
engine->irq_enable(engine);
|
||||
spin_unlock(&engine->i915->irq_lock);
|
||||
spin_unlock(&engine->gt->irq_lock);
|
||||
}
|
||||
|
||||
static void irq_disable(struct intel_engine_cs *engine)
|
||||
|
@ -45,9 +46,9 @@ static void irq_disable(struct intel_engine_cs *engine)
|
|||
return;
|
||||
|
||||
/* Caller disables interrupts */
|
||||
spin_lock(&engine->i915->irq_lock);
|
||||
spin_lock(&engine->gt->irq_lock);
|
||||
engine->irq_disable(engine);
|
||||
spin_unlock(&engine->i915->irq_lock);
|
||||
spin_unlock(&engine->gt->irq_lock);
|
||||
}
|
||||
|
||||
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
|
||||
|
@ -66,14 +67,15 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
|
|||
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
unsigned long flags;
|
||||
|
||||
if (!b->irq_armed)
|
||||
return;
|
||||
|
||||
spin_lock_irq(&b->irq_lock);
|
||||
spin_lock_irqsave(&b->irq_lock, flags);
|
||||
if (b->irq_armed)
|
||||
__intel_breadcrumbs_disarm_irq(b);
|
||||
spin_unlock_irq(&b->irq_lock);
|
||||
spin_unlock_irqrestore(&b->irq_lock, flags);
|
||||
}
|
||||
|
||||
static inline bool __request_completed(const struct i915_request *rq)
|
||||
|
@ -212,28 +214,6 @@ static void signal_irq_work(struct irq_work *work)
|
|||
intel_engine_breadcrumbs_irq(engine);
|
||||
}
|
||||
|
||||
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
|
||||
spin_lock_irq(&b->irq_lock);
|
||||
if (!b->irq_enabled++)
|
||||
irq_enable(engine);
|
||||
GEM_BUG_ON(!b->irq_enabled); /* no overflow! */
|
||||
spin_unlock_irq(&b->irq_lock);
|
||||
}
|
||||
|
||||
void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_breadcrumbs *b = &engine->breadcrumbs;
|
||||
|
||||
spin_lock_irq(&b->irq_lock);
|
||||
GEM_BUG_ON(!b->irq_enabled); /* no underflow! */
|
||||
if (!--b->irq_enabled)
|
||||
irq_disable(engine);
|
||||
spin_unlock_irq(&b->irq_lock);
|
||||
}
|
||||
|
||||
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
|
||||
{
|
||||
struct intel_engine_cs *engine =
|
||||
|
|
|
@ -53,6 +53,14 @@ int __intel_context_do_pin(struct intel_context *ce)
|
|||
if (likely(!atomic_read(&ce->pin_count))) {
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
|
||||
err = ce->ops->alloc(ce);
|
||||
if (unlikely(err))
|
||||
goto err;
|
||||
|
||||
__set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
|
||||
}
|
||||
|
||||
err = 0;
|
||||
with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
|
||||
err = ce->ops->pin(ce);
|
||||
|
@ -60,7 +68,7 @@ int __intel_context_do_pin(struct intel_context *ce)
|
|||
goto err;
|
||||
|
||||
GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
|
||||
ce->engine->name, ce->ring->timeline->fence_context,
|
||||
ce->engine->name, ce->timeline->fence_context,
|
||||
ce->ring->head, ce->ring->tail);
|
||||
|
||||
i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
|
||||
|
@ -90,7 +98,7 @@ void intel_context_unpin(struct intel_context *ce)
|
|||
|
||||
if (likely(atomic_dec_and_test(&ce->pin_count))) {
|
||||
GEM_TRACE("%s context:%llx retire\n",
|
||||
ce->engine->name, ce->ring->timeline->fence_context);
|
||||
ce->engine->name, ce->timeline->fence_context);
|
||||
|
||||
ce->ops->unpin(ce);
|
||||
|
||||
|
@ -118,7 +126,7 @@ static int __context_pin_state(struct i915_vma *vma)
|
|||
* And mark it as a globally pinned object to let the shrinker know
|
||||
* it cannot reclaim the object until we release it.
|
||||
*/
|
||||
vma->obj->pin_global++;
|
||||
i915_vma_make_unshrinkable(vma);
|
||||
vma->obj->mm.dirty = true;
|
||||
|
||||
return 0;
|
||||
|
@ -126,8 +134,8 @@ static int __context_pin_state(struct i915_vma *vma)
|
|||
|
||||
static void __context_unpin_state(struct i915_vma *vma)
|
||||
{
|
||||
vma->obj->pin_global--;
|
||||
__i915_vma_unpin(vma);
|
||||
i915_vma_make_shrinkable(vma);
|
||||
}
|
||||
|
||||
static void __intel_context_retire(struct i915_active *active)
|
||||
|
@ -135,11 +143,12 @@ static void __intel_context_retire(struct i915_active *active)
|
|||
struct intel_context *ce = container_of(active, typeof(*ce), active);
|
||||
|
||||
GEM_TRACE("%s context:%llx retire\n",
|
||||
ce->engine->name, ce->ring->timeline->fence_context);
|
||||
ce->engine->name, ce->timeline->fence_context);
|
||||
|
||||
if (ce->state)
|
||||
__context_unpin_state(ce->state);
|
||||
|
||||
intel_timeline_unpin(ce->timeline);
|
||||
intel_ring_unpin(ce->ring);
|
||||
intel_context_put(ce);
|
||||
}
|
||||
|
@ -155,25 +164,21 @@ static int __intel_context_active(struct i915_active *active)
|
|||
if (err)
|
||||
goto err_put;
|
||||
|
||||
err = intel_timeline_pin(ce->timeline);
|
||||
if (err)
|
||||
goto err_ring;
|
||||
|
||||
if (!ce->state)
|
||||
return 0;
|
||||
|
||||
err = __context_pin_state(ce->state);
|
||||
if (err)
|
||||
goto err_ring;
|
||||
|
||||
/* Preallocate tracking nodes */
|
||||
if (!i915_gem_context_is_kernel(ce->gem_context)) {
|
||||
err = i915_active_acquire_preallocate_barrier(&ce->active,
|
||||
ce->engine);
|
||||
if (err)
|
||||
goto err_state;
|
||||
}
|
||||
goto err_timeline;
|
||||
|
||||
return 0;
|
||||
|
||||
err_state:
|
||||
__context_unpin_state(ce->state);
|
||||
err_timeline:
|
||||
intel_timeline_unpin(ce->timeline);
|
||||
err_ring:
|
||||
intel_ring_unpin(ce->ring);
|
||||
err_put:
|
||||
|
@ -181,6 +186,34 @@ static int __intel_context_active(struct i915_active *active)
|
|||
return err;
|
||||
}
|
||||
|
||||
int intel_context_active_acquire(struct intel_context *ce)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = i915_active_acquire(&ce->active);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Preallocate tracking nodes */
|
||||
if (!i915_gem_context_is_kernel(ce->gem_context)) {
|
||||
err = i915_active_acquire_preallocate_barrier(&ce->active,
|
||||
ce->engine);
|
||||
if (err) {
|
||||
i915_active_release(&ce->active);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void intel_context_active_release(struct intel_context *ce)
|
||||
{
|
||||
/* Nodes preallocated in intel_context_active() */
|
||||
i915_active_acquire_barrier(&ce->active);
|
||||
i915_active_release(&ce->active);
|
||||
}
|
||||
|
||||
void
|
||||
intel_context_init(struct intel_context *ce,
|
||||
struct i915_gem_context *ctx,
|
||||
|
@ -192,10 +225,13 @@ intel_context_init(struct intel_context *ce,
|
|||
|
||||
ce->gem_context = ctx;
|
||||
ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
|
||||
if (ctx->timeline)
|
||||
ce->timeline = intel_timeline_get(ctx->timeline);
|
||||
|
||||
ce->engine = engine;
|
||||
ce->ops = engine->cops;
|
||||
ce->sseu = engine->sseu;
|
||||
ce->ring = __intel_context_ring_size(SZ_16K);
|
||||
|
||||
INIT_LIST_HEAD(&ce->signal_link);
|
||||
INIT_LIST_HEAD(&ce->signals);
|
||||
|
@ -208,6 +244,8 @@ intel_context_init(struct intel_context *ce,
|
|||
|
||||
void intel_context_fini(struct intel_context *ce)
|
||||
{
|
||||
if (ce->timeline)
|
||||
intel_timeline_put(ce->timeline);
|
||||
i915_vm_put(ce->vm);
|
||||
|
||||
mutex_destroy(&ce->pin_mutex);
|
||||
|
@ -242,17 +280,19 @@ int __init i915_global_context_init(void)
|
|||
void intel_context_enter_engine(struct intel_context *ce)
|
||||
{
|
||||
intel_engine_pm_get(ce->engine);
|
||||
intel_timeline_enter(ce->timeline);
|
||||
}
|
||||
|
||||
void intel_context_exit_engine(struct intel_context *ce)
|
||||
{
|
||||
intel_timeline_exit(ce->timeline);
|
||||
intel_engine_pm_put(ce->engine);
|
||||
}
|
||||
|
||||
int intel_context_prepare_remote_request(struct intel_context *ce,
|
||||
struct i915_request *rq)
|
||||
{
|
||||
struct intel_timeline *tl = ce->ring->timeline;
|
||||
struct intel_timeline *tl = ce->timeline;
|
||||
int err;
|
||||
|
||||
/* Only suitable for use in remotely modifying this context */
|
||||
|
@ -266,10 +306,10 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
|
|||
|
||||
/* Queue this switch after current activity by this context. */
|
||||
err = i915_active_request_set(&tl->last_request, rq);
|
||||
mutex_unlock(&tl->mutex);
|
||||
if (err)
|
||||
goto unlock;
|
||||
return err;
|
||||
}
|
||||
lockdep_assert_held(&tl->mutex);
|
||||
|
||||
/*
|
||||
* Guarantee context image and the timeline remains pinned until the
|
||||
|
@ -279,12 +319,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
|
|||
* words transfer the pinned ce object to tracked active request.
|
||||
*/
|
||||
GEM_BUG_ON(i915_active_is_idle(&ce->active));
|
||||
err = i915_active_ref(&ce->active, rq->fence.context, rq);
|
||||
|
||||
unlock:
|
||||
if (rq->timeline != tl)
|
||||
mutex_unlock(&tl->mutex);
|
||||
return err;
|
||||
return i915_active_ref(&ce->active, rq->timeline, rq);
|
||||
}
|
||||
|
||||
struct i915_request *intel_context_create_request(struct intel_context *ce)
|
||||
|
@ -301,3 +336,7 @@ struct i915_request *intel_context_create_request(struct intel_context *ce)
|
|||
|
||||
return rq;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
#include "selftest_context.c"
|
||||
#endif
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include "i915_active.h"
|
||||
#include "intel_context_types.h"
|
||||
#include "intel_engine_types.h"
|
||||
#include "intel_timeline_types.h"
|
||||
|
||||
void intel_context_init(struct intel_context *ce,
|
||||
struct i915_gem_context *ctx,
|
||||
|
@ -88,33 +89,27 @@ void intel_context_exit_engine(struct intel_context *ce);
|
|||
|
||||
static inline void intel_context_enter(struct intel_context *ce)
|
||||
{
|
||||
lockdep_assert_held(&ce->timeline->mutex);
|
||||
if (!ce->active_count++)
|
||||
ce->ops->enter(ce);
|
||||
}
|
||||
|
||||
static inline void intel_context_mark_active(struct intel_context *ce)
|
||||
{
|
||||
lockdep_assert_held(&ce->timeline->mutex);
|
||||
++ce->active_count;
|
||||
}
|
||||
|
||||
static inline void intel_context_exit(struct intel_context *ce)
|
||||
{
|
||||
lockdep_assert_held(&ce->timeline->mutex);
|
||||
GEM_BUG_ON(!ce->active_count);
|
||||
if (!--ce->active_count)
|
||||
ce->ops->exit(ce);
|
||||
}
|
||||
|
||||
static inline int intel_context_active_acquire(struct intel_context *ce)
|
||||
{
|
||||
return i915_active_acquire(&ce->active);
|
||||
}
|
||||
|
||||
static inline void intel_context_active_release(struct intel_context *ce)
|
||||
{
|
||||
/* Nodes preallocated in intel_context_active() */
|
||||
i915_active_acquire_barrier(&ce->active);
|
||||
i915_active_release(&ce->active);
|
||||
}
|
||||
int intel_context_active_acquire(struct intel_context *ce);
|
||||
void intel_context_active_release(struct intel_context *ce);
|
||||
|
||||
static inline struct intel_context *intel_context_get(struct intel_context *ce)
|
||||
{
|
||||
|
@ -127,17 +122,24 @@ static inline void intel_context_put(struct intel_context *ce)
|
|||
kref_put(&ce->ref, ce->ops->destroy);
|
||||
}
|
||||
|
||||
static inline int __must_check
|
||||
static inline struct intel_timeline *__must_check
|
||||
intel_context_timeline_lock(struct intel_context *ce)
|
||||
__acquires(&ce->ring->timeline->mutex)
|
||||
__acquires(&ce->timeline->mutex)
|
||||
{
|
||||
return mutex_lock_interruptible(&ce->ring->timeline->mutex);
|
||||
struct intel_timeline *tl = ce->timeline;
|
||||
int err;
|
||||
|
||||
err = mutex_lock_interruptible(&tl->mutex);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
return tl;
|
||||
}
|
||||
|
||||
static inline void intel_context_timeline_unlock(struct intel_context *ce)
|
||||
__releases(&ce->ring->timeline->mutex)
|
||||
static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
|
||||
__releases(&tl->mutex)
|
||||
{
|
||||
mutex_unlock(&ce->ring->timeline->mutex);
|
||||
mutex_unlock(&tl->mutex);
|
||||
}
|
||||
|
||||
int intel_context_prepare_remote_request(struct intel_context *ce,
|
||||
|
@ -145,4 +147,9 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
|
|||
|
||||
struct i915_request *intel_context_create_request(struct intel_context *ce);
|
||||
|
||||
static inline struct intel_ring *__intel_context_ring_size(u64 sz)
|
||||
{
|
||||
return u64_to_ptr(struct intel_ring, sz);
|
||||
}
|
||||
|
||||
#endif /* __INTEL_CONTEXT_H__ */
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue