drm/i915: Rename global i915 to i915_modparams
Our global struct with params is named exactly the same way as new preferred name for the drm_i915_private function parameter. To avoid such name reuse lets use different name for the global. v5: pure rename v6: fix Credits-to: Coccinelle @@ identifier n; @@ ( - i915.n + i915_modparams.n ) Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com> Cc: Jani Nikula <jani.nikula@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Ville Syrjala <ville.syrjala@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Acked-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Signed-off-by: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20170919193846.38060-1-michal.wajdeczko@intel.com
This commit is contained in:
parent
a3df2c857c
commit
4f044a88a8
|
@ -293,7 +293,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
|
|||
*/
|
||||
if (mmio->in_context &&
|
||||
((ctx_ctrl & inhibit_mask) != inhibit_mask) &&
|
||||
i915.enable_execlists)
|
||||
i915_modparams.enable_execlists)
|
||||
continue;
|
||||
|
||||
if (mmio->mask)
|
||||
|
|
|
@ -66,7 +66,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
|
|||
#undef PRINT_FLAG
|
||||
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x);
|
||||
#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915_modparams.x);
|
||||
I915_PARAMS_FOR_EACH(PRINT_PARAM);
|
||||
#undef PRINT_PARAM
|
||||
kernel_param_unlock(THIS_MODULE);
|
||||
|
@ -1266,7 +1266,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
|
|||
if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
|
||||
seq_puts(m, "struct_mutex blocked for reset\n");
|
||||
|
||||
if (!i915.enable_hangcheck) {
|
||||
if (!i915_modparams.enable_hangcheck) {
|
||||
seq_puts(m, "Hangcheck disabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -1701,7 +1701,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
|
|||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
seq_printf(m, "Enabled by kernel parameter: %s\n",
|
||||
yesno(i915.enable_ips));
|
||||
yesno(i915_modparams.enable_ips));
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8) {
|
||||
seq_puts(m, "Currently: unknown\n");
|
||||
|
@ -2016,7 +2016,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
|
|||
enum intel_engine_id id;
|
||||
int ret;
|
||||
|
||||
if (!i915.enable_execlists) {
|
||||
if (!i915_modparams.enable_execlists) {
|
||||
seq_printf(m, "Logical Ring Contexts are disabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -2592,7 +2592,7 @@ static int i915_guc_log_control_get(void *data, u64 *val)
|
|||
if (!dev_priv->guc.log.vma)
|
||||
return -EINVAL;
|
||||
|
||||
*val = i915.guc_log_level;
|
||||
*val = i915_modparams.guc_log_level;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3310,7 +3310,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
|
|||
seq_printf(m, "\tBBADDR: 0x%08x_%08x\n",
|
||||
upper_32_bits(addr), lower_32_bits(addr));
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
if (i915_modparams.enable_execlists) {
|
||||
const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
|
||||
u32 ptr, read, write;
|
||||
unsigned int idx;
|
||||
|
@ -3406,7 +3406,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
|
|||
enum intel_engine_id id;
|
||||
int j, ret;
|
||||
|
||||
if (!i915.semaphores) {
|
||||
if (!i915_modparams.semaphores) {
|
||||
seq_puts(m, "Semaphores are disabled\n");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -58,12 +58,12 @@ static unsigned int i915_load_fail_count;
|
|||
|
||||
bool __i915_inject_load_failure(const char *func, int line)
|
||||
{
|
||||
if (i915_load_fail_count >= i915.inject_load_failure)
|
||||
if (i915_load_fail_count >= i915_modparams.inject_load_failure)
|
||||
return false;
|
||||
|
||||
if (++i915_load_fail_count == i915.inject_load_failure) {
|
||||
if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
|
||||
DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
|
||||
i915.inject_load_failure, func, line);
|
||||
i915_modparams.inject_load_failure, func, line);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -106,8 +106,8 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
|
|||
|
||||
static bool i915_error_injected(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return i915.inject_load_failure &&
|
||||
i915_load_fail_count == i915.inject_load_failure;
|
||||
return i915_modparams.inject_load_failure &&
|
||||
i915_load_fail_count == i915_modparams.inject_load_failure;
|
||||
}
|
||||
|
||||
#define i915_load_error(dev_priv, fmt, ...) \
|
||||
|
@ -321,7 +321,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
|||
value = USES_PPGTT(dev_priv);
|
||||
break;
|
||||
case I915_PARAM_HAS_SEMAPHORES:
|
||||
value = i915.semaphores;
|
||||
value = i915_modparams.semaphores;
|
||||
break;
|
||||
case I915_PARAM_HAS_SECURE_BATCHES:
|
||||
value = capable(CAP_SYS_ADMIN);
|
||||
|
@ -340,7 +340,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
|
|||
return -ENODEV;
|
||||
break;
|
||||
case I915_PARAM_HAS_GPU_RESET:
|
||||
value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
|
||||
value = i915_modparams.enable_hangcheck &&
|
||||
intel_has_gpu_reset(dev_priv);
|
||||
if (value && intel_has_reset_engine(dev_priv))
|
||||
value = 2;
|
||||
break;
|
||||
|
@ -1031,9 +1032,9 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void intel_sanitize_options(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
i915.enable_execlists =
|
||||
i915_modparams.enable_execlists =
|
||||
intel_sanitize_enable_execlists(dev_priv,
|
||||
i915.enable_execlists);
|
||||
i915_modparams.enable_execlists);
|
||||
|
||||
/*
|
||||
* i915.enable_ppgtt is read-only, so do an early pass to validate the
|
||||
|
@ -1041,12 +1042,15 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
|
|||
* do this now so that we can print out any log messages once rather
|
||||
* than every time we check intel_enable_ppgtt().
|
||||
*/
|
||||
i915.enable_ppgtt =
|
||||
intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
|
||||
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
|
||||
i915_modparams.enable_ppgtt =
|
||||
intel_sanitize_enable_ppgtt(dev_priv,
|
||||
i915_modparams.enable_ppgtt);
|
||||
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915_modparams.enable_ppgtt);
|
||||
|
||||
i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
|
||||
DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
|
||||
i915_modparams.semaphores =
|
||||
intel_sanitize_semaphores(dev_priv, i915_modparams.semaphores);
|
||||
DRM_DEBUG_DRIVER("use GPU semaphores? %s\n",
|
||||
yesno(i915_modparams.semaphores));
|
||||
|
||||
intel_uc_sanitize_options(dev_priv);
|
||||
|
||||
|
@ -1277,7 +1281,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
int ret;
|
||||
|
||||
/* Enable nuclear pageflip on ILK+ */
|
||||
if (!i915.nuclear_pageflip && match_info->gen < 5)
|
||||
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
|
||||
driver.driver_features &= ~DRIVER_ATOMIC;
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -93,7 +93,7 @@
|
|||
#define I915_STATE_WARN(condition, format...) ({ \
|
||||
int __ret_warn_on = !!(condition); \
|
||||
if (unlikely(__ret_warn_on)) \
|
||||
if (!WARN(i915.verbose_state_checks, format)) \
|
||||
if (!WARN(i915_modparams.verbose_state_checks, format)) \
|
||||
DRM_ERROR(format); \
|
||||
unlikely(__ret_warn_on); \
|
||||
})
|
||||
|
@ -3076,9 +3076,9 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
|
||||
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
|
||||
((dev_priv)->info.has_logical_ring_contexts)
|
||||
#define USES_PPGTT(dev_priv) (i915.enable_ppgtt)
|
||||
#define USES_FULL_PPGTT(dev_priv) (i915.enable_ppgtt >= 2)
|
||||
#define USES_FULL_48BIT_PPGTT(dev_priv) (i915.enable_ppgtt == 3)
|
||||
#define USES_PPGTT(dev_priv) (i915_modparams.enable_ppgtt)
|
||||
#define USES_FULL_PPGTT(dev_priv) (i915_modparams.enable_ppgtt >= 2)
|
||||
#define USES_FULL_48BIT_PPGTT(dev_priv) (i915_modparams.enable_ppgtt == 3)
|
||||
|
||||
#define HAS_OVERLAY(dev_priv) ((dev_priv)->info.has_overlay)
|
||||
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
|
||||
|
@ -3279,7 +3279,7 @@ static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
unsigned long delay;
|
||||
|
||||
if (unlikely(!i915.enable_hangcheck))
|
||||
if (unlikely(!i915_modparams.enable_hangcheck))
|
||||
return;
|
||||
|
||||
/* Don't continually defer the hangcheck so that it is always run at
|
||||
|
|
|
@ -4738,7 +4738,7 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
|
|||
return false;
|
||||
|
||||
/* TODO: make semaphores and Execlists play nicely together */
|
||||
if (i915.enable_execlists)
|
||||
if (i915_modparams.enable_execlists)
|
||||
return false;
|
||||
|
||||
if (value >= 0)
|
||||
|
@ -4759,7 +4759,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
|
||||
|
||||
if (!i915.enable_execlists) {
|
||||
if (!i915_modparams.enable_execlists) {
|
||||
dev_priv->gt.resume = intel_legacy_submission_resume;
|
||||
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
|
||||
} else {
|
||||
|
|
|
@ -314,7 +314,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
|
|||
* present or not in use we still need a small bias as ring wraparound
|
||||
* at offset 0 sometimes hangs. No idea why.
|
||||
*/
|
||||
if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
|
||||
if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading)
|
||||
ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
|
||||
else
|
||||
ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
|
||||
|
@ -407,7 +407,7 @@ i915_gem_context_create_gvt(struct drm_device *dev)
|
|||
i915_gem_context_set_closed(ctx); /* not user accessible */
|
||||
i915_gem_context_clear_bannable(ctx);
|
||||
i915_gem_context_set_force_single_submission(ctx);
|
||||
if (!i915.enable_guc_submission)
|
||||
if (!i915_modparams.enable_guc_submission)
|
||||
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
|
||||
|
||||
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
|
||||
|
@ -431,7 +431,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
if (intel_vgpu_active(dev_priv) &&
|
||||
HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
|
||||
if (!i915.enable_execlists) {
|
||||
if (!i915_modparams.enable_execlists) {
|
||||
DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -483,7 +483,7 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
|
||||
/* Force the GPU state to be restored on enabling */
|
||||
if (!i915.enable_execlists) {
|
||||
if (!i915_modparams.enable_execlists) {
|
||||
struct i915_gem_context *ctx;
|
||||
|
||||
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
|
||||
|
@ -568,7 +568,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 flags)
|
|||
enum intel_engine_id id;
|
||||
const int num_rings =
|
||||
/* Use an extended w/a on gen7 if signalling from other rings */
|
||||
(i915.semaphores && INTEL_GEN(dev_priv) == 7) ?
|
||||
(i915_modparams.semaphores && INTEL_GEN(dev_priv) == 7) ?
|
||||
INTEL_INFO(dev_priv)->num_rings - 1 :
|
||||
0;
|
||||
int len;
|
||||
|
@ -837,7 +837,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
|
|||
struct intel_engine_cs *engine = req->engine;
|
||||
|
||||
lockdep_assert_held(&req->i915->drm.struct_mutex);
|
||||
if (i915.enable_execlists)
|
||||
if (i915_modparams.enable_execlists)
|
||||
return 0;
|
||||
|
||||
if (!req->ctx->engine[engine->id].state) {
|
||||
|
|
|
@ -1585,7 +1585,7 @@ static int eb_prefault_relocations(const struct i915_execbuffer *eb)
|
|||
const unsigned int count = eb->buffer_count;
|
||||
unsigned int i;
|
||||
|
||||
if (unlikely(i915.prefault_disable))
|
||||
if (unlikely(i915_modparams.prefault_disable))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
|
|
@ -180,7 +180,7 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) {
|
||||
if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) {
|
||||
if (has_full_48bit_ppgtt)
|
||||
return 3;
|
||||
|
||||
|
@ -1972,7 +1972,7 @@ int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
|
|||
/* In the case of execlists, PPGTT is enabled by the context descriptor
|
||||
* and the PDPs are contained within the context itself. We don't
|
||||
* need to do anything here. */
|
||||
if (i915.enable_execlists)
|
||||
if (i915_modparams.enable_execlists)
|
||||
return 0;
|
||||
|
||||
if (!USES_PPGTT(dev_priv))
|
||||
|
@ -3292,7 +3292,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
|
|||
* currently don't have any bits spare to pass in this upper
|
||||
* restriction!
|
||||
*/
|
||||
if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
|
||||
if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) {
|
||||
ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
|
||||
ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
|
||||
}
|
||||
|
|
|
@ -1554,7 +1554,7 @@ static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
|
|||
struct i915_gpu_state *error)
|
||||
{
|
||||
/* Capturing log buf contents won't be useful if logging was disabled */
|
||||
if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
|
||||
if (!dev_priv->guc.log.vma || (i915_modparams.guc_log_level < 0))
|
||||
return;
|
||||
|
||||
error->guc_log = i915_error_object_create(dev_priv,
|
||||
|
@ -1696,7 +1696,7 @@ static int capture(void *data)
|
|||
ktime_to_timeval(ktime_sub(ktime_get(),
|
||||
error->i915->gt.last_init_time));
|
||||
|
||||
error->params = i915;
|
||||
error->params = i915_modparams;
|
||||
#define DUP(T, x) dup_param(#T, &error->params.x);
|
||||
I915_PARAMS_FOR_EACH(DUP);
|
||||
#undef DUP
|
||||
|
@ -1751,7 +1751,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
|
|||
struct i915_gpu_state *error;
|
||||
unsigned long flags;
|
||||
|
||||
if (!i915.error_capture)
|
||||
if (!i915_modparams.error_capture)
|
||||
return;
|
||||
|
||||
if (READ_ONCE(dev_priv->gpu_error.first_error))
|
||||
|
|
|
@ -1245,7 +1245,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
|
|||
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
|
||||
return 0;
|
||||
|
||||
if (i915.guc_log_level >= 0)
|
||||
if (i915_modparams.guc_log_level >= 0)
|
||||
gen9_enable_guc_interrupts(dev_priv);
|
||||
|
||||
ctx = dev_priv->kernel_context;
|
||||
|
|
|
@ -1357,7 +1357,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
|
|||
|
||||
if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
|
||||
notify_ring(engine);
|
||||
tasklet |= i915.enable_guc_submission;
|
||||
tasklet |= i915_modparams.enable_guc_submission;
|
||||
}
|
||||
|
||||
if (tasklet)
|
||||
|
|
|
@ -26,13 +26,13 @@
|
|||
#include "i915_drv.h"
|
||||
|
||||
#define i915_param_named(name, T, perm, desc) \
|
||||
module_param_named(name, i915.name, T, perm); \
|
||||
module_param_named(name, i915_modparams.name, T, perm); \
|
||||
MODULE_PARM_DESC(name, desc)
|
||||
#define i915_param_named_unsafe(name, T, perm, desc) \
|
||||
module_param_named_unsafe(name, i915.name, T, perm); \
|
||||
module_param_named_unsafe(name, i915_modparams.name, T, perm); \
|
||||
MODULE_PARM_DESC(name, desc)
|
||||
|
||||
struct i915_params i915 __read_mostly = {
|
||||
struct i915_params i915_modparams __read_mostly = {
|
||||
.modeset = -1,
|
||||
.panel_ignore_lid = 1,
|
||||
.semaphores = -1,
|
||||
|
|
|
@ -76,7 +76,7 @@ struct i915_params {
|
|||
};
|
||||
#undef MEMBER
|
||||
|
||||
extern struct i915_params i915 __read_mostly;
|
||||
extern struct i915_params i915_modparams __read_mostly;
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -631,7 +631,7 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
(struct intel_device_info *) ent->driver_data;
|
||||
int err;
|
||||
|
||||
if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) {
|
||||
if (IS_ALPHA_SUPPORT(intel_info) && !i915_modparams.alpha_support) {
|
||||
DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n"
|
||||
"See CONFIG_DRM_I915_ALPHA_SUPPORT or i915.alpha_support module parameter\n"
|
||||
"to enable support in this kernel version, or check for kernel updates.\n");
|
||||
|
@ -689,10 +689,10 @@ static int __init i915_init(void)
|
|||
* vga_text_mode_force boot option.
|
||||
*/
|
||||
|
||||
if (i915.modeset == 0)
|
||||
if (i915_modparams.modeset == 0)
|
||||
use_kms = false;
|
||||
|
||||
if (vgacon_text_force() && i915.modeset == -1)
|
||||
if (vgacon_text_force() && i915_modparams.modeset == -1)
|
||||
use_kms = false;
|
||||
|
||||
if (!use_kms) {
|
||||
|
|
|
@ -1214,7 +1214,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = stream->dev_priv;
|
||||
|
||||
if (i915.enable_execlists)
|
||||
if (i915_modparams.enable_execlists)
|
||||
dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
|
||||
else {
|
||||
struct intel_engine_cs *engine = dev_priv->engine[RCS];
|
||||
|
@ -1260,7 +1260,7 @@ static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = stream->dev_priv;
|
||||
|
||||
if (i915.enable_execlists) {
|
||||
if (i915_modparams.enable_execlists) {
|
||||
dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
|
||||
} else {
|
||||
struct intel_engine_cs *engine = dev_priv->engine[RCS];
|
||||
|
@ -3408,7 +3408,7 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
|
|||
dev_priv->perf.oa.timestamp_frequency = 12500000;
|
||||
|
||||
dev_priv->perf.oa.oa_formats = hsw_oa_formats;
|
||||
} else if (i915.enable_execlists) {
|
||||
} else if (i915_modparams.enable_execlists) {
|
||||
/* Note: that although we could theoretically also support the
|
||||
* legacy ringbuffer mode on BDW (and earlier iterations of
|
||||
* this driver, before upstreaming did this) it didn't seem
|
||||
|
|
|
@ -356,7 +356,7 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
|
|||
struct drm_display_mode *panel_fixed_mode;
|
||||
int index;
|
||||
|
||||
index = i915.vbt_sdvo_panel_type;
|
||||
index = i915_modparams.vbt_sdvo_panel_type;
|
||||
if (index == -2) {
|
||||
DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
|
||||
return;
|
||||
|
@ -675,8 +675,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
|||
uint8_t vswing;
|
||||
|
||||
/* Don't read from VBT if module parameter has valid value*/
|
||||
if (i915.edp_vswing) {
|
||||
dev_priv->vbt.edp.low_vswing = i915.edp_vswing == 1;
|
||||
if (i915_modparams.edp_vswing) {
|
||||
dev_priv->vbt.edp.low_vswing =
|
||||
i915_modparams.edp_vswing == 1;
|
||||
} else {
|
||||
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
|
||||
dev_priv->vbt.edp.low_vswing = vswing == 0;
|
||||
|
|
|
@ -712,7 +712,7 @@ intel_crt_detect(struct drm_connector *connector,
|
|||
* broken monitor (without edid) to work behind a broken kvm (that fails
|
||||
* to have the right resistors for HP detection) needs to fix this up.
|
||||
* For now just bail out. */
|
||||
if (I915_HAS_HOTPLUG(dev_priv) && !i915.load_detect_test) {
|
||||
if (I915_HAS_HOTPLUG(dev_priv) && !i915_modparams.load_detect_test) {
|
||||
status = connector_status_disconnected;
|
||||
goto out;
|
||||
}
|
||||
|
@ -730,7 +730,7 @@ intel_crt_detect(struct drm_connector *connector,
|
|||
else if (INTEL_GEN(dev_priv) < 4)
|
||||
status = intel_crt_load_detect(crt,
|
||||
to_intel_crtc(connector->state->crtc)->pipe);
|
||||
else if (i915.load_detect_test)
|
||||
else if (i915_modparams.load_detect_test)
|
||||
status = connector_status_disconnected;
|
||||
else
|
||||
status = connector_status_unknown;
|
||||
|
|
|
@ -343,7 +343,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
|
|||
info->num_sprites[pipe] = 1;
|
||||
}
|
||||
|
||||
if (i915.disable_display) {
|
||||
if (i915_modparams.disable_display) {
|
||||
DRM_INFO("Display disabled (module parameter)\n");
|
||||
info->num_pipes = 0;
|
||||
} else if (info->num_pipes > 0 &&
|
||||
|
|
|
@ -3701,7 +3701,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
|
|||
|
||||
|
||||
/* reset doesn't touch the display */
|
||||
if (!i915.force_reset_modeset_test &&
|
||||
if (!i915_modparams.force_reset_modeset_test &&
|
||||
!gpu_reset_clobbers_display(dev_priv))
|
||||
return;
|
||||
|
||||
|
@ -3757,7 +3757,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
|
|||
int ret;
|
||||
|
||||
/* reset doesn't touch the display */
|
||||
if (!i915.force_reset_modeset_test &&
|
||||
if (!i915_modparams.force_reset_modeset_test &&
|
||||
!gpu_reset_clobbers_display(dev_priv))
|
||||
return;
|
||||
|
||||
|
@ -6313,7 +6313,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
|
|||
struct drm_device *dev = crtc->base.dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
pipe_config->ips_enabled = i915.enable_ips &&
|
||||
pipe_config->ips_enabled = i915_modparams.enable_ips &&
|
||||
hsw_crtc_supports_ips(crtc) &&
|
||||
pipe_config_supports_ips(dev_priv, pipe_config);
|
||||
}
|
||||
|
@ -6494,8 +6494,8 @@ intel_link_compute_m_n(int bits_per_pixel, int nlanes,
|
|||
|
||||
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (i915.panel_use_ssc >= 0)
|
||||
return i915.panel_use_ssc != 0;
|
||||
if (i915_modparams.panel_use_ssc >= 0)
|
||||
return i915_modparams.panel_use_ssc != 0;
|
||||
return dev_priv->vbt.lvds_use_ssc
|
||||
&& !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
|
||||
}
|
||||
|
@ -12084,7 +12084,7 @@ static int intel_atomic_check(struct drm_device *dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (i915.fastboot &&
|
||||
if (i915_modparams.fastboot &&
|
||||
intel_pipe_config_compare(dev_priv,
|
||||
to_intel_crtc_state(old_crtc_state),
|
||||
pipe_config, true)) {
|
||||
|
|
|
@ -3848,7 +3848,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
|
|||
{
|
||||
u8 mstm_cap;
|
||||
|
||||
if (!i915.enable_dp_mst)
|
||||
if (!i915_modparams.enable_dp_mst)
|
||||
return false;
|
||||
|
||||
if (!intel_dp->can_mst)
|
||||
|
@ -3866,7 +3866,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
|
|||
static void
|
||||
intel_dp_configure_mst(struct intel_dp *intel_dp)
|
||||
{
|
||||
if (!i915.enable_dp_mst)
|
||||
if (!i915_modparams.enable_dp_mst)
|
||||
return;
|
||||
|
||||
if (!intel_dp->can_mst)
|
||||
|
|
|
@ -264,7 +264,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
|
|||
{
|
||||
struct intel_panel *panel = &intel_connector->panel;
|
||||
|
||||
if (!i915.enable_dpcd_backlight)
|
||||
if (!i915_modparams.enable_dpcd_backlight)
|
||||
return -ENODEV;
|
||||
|
||||
if (!intel_dp_aux_display_control_capable(intel_connector))
|
||||
|
|
|
@ -1902,7 +1902,7 @@ void intel_init_ipc(struct drm_i915_private *dev_priv);
|
|||
void intel_enable_ipc(struct drm_i915_private *dev_priv);
|
||||
static inline int intel_enable_rc6(void)
|
||||
{
|
||||
return i915.enable_rc6;
|
||||
return i915_modparams.enable_rc6;
|
||||
}
|
||||
|
||||
/* intel_sdvo.c */
|
||||
|
|
|
@ -153,7 +153,7 @@ __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
|
|||
case 9:
|
||||
return GEN9_LR_CONTEXT_RENDER_SIZE;
|
||||
case 8:
|
||||
return i915.enable_execlists ?
|
||||
return i915_modparams.enable_execlists ?
|
||||
GEN8_LR_CONTEXT_RENDER_SIZE :
|
||||
GEN8_CXT_TOTAL_SIZE;
|
||||
case 7:
|
||||
|
@ -301,7 +301,7 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
|
|||
&intel_engine_classes[engine->class];
|
||||
int (*init)(struct intel_engine_cs *engine);
|
||||
|
||||
if (i915.enable_execlists)
|
||||
if (i915_modparams.enable_execlists)
|
||||
init = class_info->init_execlists;
|
||||
else
|
||||
init = class_info->init_legacy;
|
||||
|
|
|
@ -859,7 +859,7 @@ static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!i915.enable_fbc) {
|
||||
if (!i915_modparams.enable_fbc) {
|
||||
fbc->no_fbc_reason = "disabled per module param or by default";
|
||||
return false;
|
||||
}
|
||||
|
@ -1310,8 +1310,8 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (i915.enable_fbc >= 0)
|
||||
return !!i915.enable_fbc;
|
||||
if (i915_modparams.enable_fbc >= 0)
|
||||
return !!i915_modparams.enable_fbc;
|
||||
|
||||
if (!HAS_FBC(dev_priv))
|
||||
return 0;
|
||||
|
@ -1355,8 +1355,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
|
|||
if (need_fbc_vtd_wa(dev_priv))
|
||||
mkwrite_device_info(dev_priv)->has_fbc = false;
|
||||
|
||||
i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
|
||||
DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
|
||||
i915_modparams.enable_fbc = intel_sanitize_fbc_option(dev_priv);
|
||||
DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n",
|
||||
i915_modparams.enable_fbc);
|
||||
|
||||
if (!HAS_FBC(dev_priv)) {
|
||||
fbc->no_fbc_reason = "unsupported by this chipset";
|
||||
|
|
|
@ -131,14 +131,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
params[GUC_CTL_LOG_PARAMS] = guc->log.flags;
|
||||
|
||||
if (i915.guc_log_level >= 0) {
|
||||
if (i915_modparams.guc_log_level >= 0) {
|
||||
params[GUC_CTL_DEBUG] =
|
||||
i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
|
||||
i915_modparams.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
|
||||
} else
|
||||
params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
|
||||
|
||||
/* If GuC submission is enabled, set up additional parameters here */
|
||||
if (i915.enable_guc_submission) {
|
||||
if (i915_modparams.enable_guc_submission) {
|
||||
u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
|
||||
u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
|
||||
u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
|
||||
|
@ -368,7 +368,8 @@ int intel_guc_init_hw(struct intel_guc *guc)
|
|||
guc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS;
|
||||
|
||||
DRM_INFO("GuC %s (firmware %s [version %u.%u])\n",
|
||||
i915.enable_guc_submission ? "submission enabled" : "loaded",
|
||||
i915_modparams.enable_guc_submission ? "submission enabled" :
|
||||
"loaded",
|
||||
guc->fw.path,
|
||||
guc->fw.major_ver_found, guc->fw.minor_ver_found);
|
||||
|
||||
|
@ -390,8 +391,8 @@ int intel_guc_select_fw(struct intel_guc *guc)
|
|||
guc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
|
||||
guc->fw.type = INTEL_UC_FW_TYPE_GUC;
|
||||
|
||||
if (i915.guc_firmware_path) {
|
||||
guc->fw.path = i915.guc_firmware_path;
|
||||
if (i915_modparams.guc_firmware_path) {
|
||||
guc->fw.path = i915_modparams.guc_firmware_path;
|
||||
guc->fw.major_ver_wanted = 0;
|
||||
guc->fw.minor_ver_wanted = 0;
|
||||
} else if (IS_SKYLAKE(dev_priv)) {
|
||||
|
|
|
@ -144,7 +144,7 @@ static int guc_log_relay_file_create(struct intel_guc *guc)
|
|||
struct dentry *log_dir;
|
||||
int ret;
|
||||
|
||||
if (i915.guc_log_level < 0)
|
||||
if (i915_modparams.guc_log_level < 0)
|
||||
return 0;
|
||||
|
||||
/* For now create the log file in /sys/kernel/debug/dri/0 dir */
|
||||
|
@ -480,7 +480,7 @@ static int guc_log_late_setup(struct intel_guc *guc)
|
|||
guc_log_runtime_destroy(guc);
|
||||
err:
|
||||
/* logging will remain off */
|
||||
i915.guc_log_level = -1;
|
||||
i915_modparams.guc_log_level = -1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -502,7 +502,8 @@ static void guc_flush_logs(struct intel_guc *guc)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
|
||||
if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
|
||||
if (!i915_modparams.enable_guc_submission ||
|
||||
(i915_modparams.guc_log_level < 0))
|
||||
return;
|
||||
|
||||
/* First disable the interrupts, will be renabled afterwards */
|
||||
|
@ -529,8 +530,8 @@ int intel_guc_log_create(struct intel_guc *guc)
|
|||
|
||||
GEM_BUG_ON(guc->log.vma);
|
||||
|
||||
if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
|
||||
i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
|
||||
if (i915_modparams.guc_log_level > GUC_LOG_VERBOSITY_MAX)
|
||||
i915_modparams.guc_log_level = GUC_LOG_VERBOSITY_MAX;
|
||||
|
||||
/* The first page is to save log buffer state. Allocate one
|
||||
* extra page for others in case for overlap */
|
||||
|
@ -555,7 +556,7 @@ int intel_guc_log_create(struct intel_guc *guc)
|
|||
|
||||
guc->log.vma = vma;
|
||||
|
||||
if (i915.guc_log_level >= 0) {
|
||||
if (i915_modparams.guc_log_level >= 0) {
|
||||
ret = guc_log_runtime_create(guc);
|
||||
if (ret < 0)
|
||||
goto err_vma;
|
||||
|
@ -576,7 +577,7 @@ int intel_guc_log_create(struct intel_guc *guc)
|
|||
i915_vma_unpin_and_release(&guc->log.vma);
|
||||
err:
|
||||
/* logging will be off */
|
||||
i915.guc_log_level = -1;
|
||||
i915_modparams.guc_log_level = -1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -600,7 +601,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
|
|||
return -EINVAL;
|
||||
|
||||
/* This combination doesn't make sense & won't have any effect */
|
||||
if (!log_param.logging_enabled && (i915.guc_log_level < 0))
|
||||
if (!log_param.logging_enabled && (i915_modparams.guc_log_level < 0))
|
||||
return 0;
|
||||
|
||||
ret = guc_log_control(guc, log_param.value);
|
||||
|
@ -610,7 +611,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
|
|||
}
|
||||
|
||||
if (log_param.logging_enabled) {
|
||||
i915.guc_log_level = log_param.verbosity;
|
||||
i915_modparams.guc_log_level = log_param.verbosity;
|
||||
|
||||
/* If log_level was set as -1 at boot time, then the relay channel file
|
||||
* wouldn't have been created by now and interrupts also would not have
|
||||
|
@ -633,7 +634,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
|
|||
guc_flush_logs(guc);
|
||||
|
||||
/* As logging is disabled, update log level to reflect that */
|
||||
i915.guc_log_level = -1;
|
||||
i915_modparams.guc_log_level = -1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -641,7 +642,8 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
|
|||
|
||||
void i915_guc_log_register(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!i915.enable_guc_submission || i915.guc_log_level < 0)
|
||||
if (!i915_modparams.enable_guc_submission ||
|
||||
(i915_modparams.guc_log_level < 0))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
@ -651,7 +653,7 @@ void i915_guc_log_register(struct drm_i915_private *dev_priv)
|
|||
|
||||
void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!i915.enable_guc_submission)
|
||||
if (!i915_modparams.enable_guc_submission)
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
|
|
|
@ -58,7 +58,7 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!i915.enable_gvt)
|
||||
if (!i915_modparams.enable_gvt)
|
||||
return;
|
||||
|
||||
if (intel_vgpu_active(dev_priv)) {
|
||||
|
@ -73,7 +73,7 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
|
|||
|
||||
return;
|
||||
bail:
|
||||
i915.enable_gvt = 0;
|
||||
i915_modparams.enable_gvt = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -90,17 +90,17 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (!i915.enable_gvt) {
|
||||
if (!i915_modparams.enable_gvt) {
|
||||
DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!i915.enable_execlists) {
|
||||
if (!i915_modparams.enable_execlists) {
|
||||
DRM_ERROR("i915 GVT-g loading failed due to disabled execlists mode\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (i915.enable_guc_submission) {
|
||||
if (i915_modparams.enable_guc_submission) {
|
||||
DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -123,7 +123,7 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
|
|||
return 0;
|
||||
|
||||
bail:
|
||||
i915.enable_gvt = 0;
|
||||
i915_modparams.enable_gvt = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -428,7 +428,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
unsigned int hung = 0, stuck = 0;
|
||||
int busy_count = 0;
|
||||
|
||||
if (!i915.enable_hangcheck)
|
||||
if (!i915_modparams.enable_hangcheck)
|
||||
return;
|
||||
|
||||
if (!READ_ONCE(dev_priv->gt.awake))
|
||||
|
|
|
@ -155,8 +155,8 @@ void intel_huc_select_fw(struct intel_huc *huc)
|
|||
huc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
|
||||
huc->fw.type = INTEL_UC_FW_TYPE_HUC;
|
||||
|
||||
if (i915.huc_firmware_path) {
|
||||
huc->fw.path = i915.huc_firmware_path;
|
||||
if (i915_modparams.huc_firmware_path) {
|
||||
huc->fw.path = i915_modparams.huc_firmware_path;
|
||||
huc->fw.major_ver_wanted = 0;
|
||||
huc->fw.minor_ver_wanted = 0;
|
||||
} else if (IS_SKYLAKE(dev_priv)) {
|
||||
|
|
|
@ -244,7 +244,7 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enabl
|
|||
|
||||
if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
|
||||
USES_PPGTT(dev_priv) &&
|
||||
i915.use_mmio_flip >= 0)
|
||||
i915_modparams.use_mmio_flip >= 0)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
|
@ -1324,7 +1324,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
|||
engine->csb_head = -1;
|
||||
|
||||
/* After a GPU reset, we may have requests to replay */
|
||||
if (!i915.enable_guc_submission && engine->execlist_first)
|
||||
if (!i915_modparams.enable_guc_submission && engine->execlist_first)
|
||||
tasklet_schedule(&engine->irq_tasklet);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -880,8 +880,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
|
|||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
/* use the module option value if specified */
|
||||
if (i915.lvds_channel_mode > 0)
|
||||
return i915.lvds_channel_mode == 2;
|
||||
if (i915_modparams.lvds_channel_mode > 0)
|
||||
return i915_modparams.lvds_channel_mode == 2;
|
||||
|
||||
/* single channel LVDS is limited to 112 MHz */
|
||||
if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
|
||||
|
|
|
@ -921,7 +921,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
struct intel_opregion *opregion = &dev_priv->opregion;
|
||||
const struct firmware *fw = NULL;
|
||||
const char *name = i915.vbt_firmware;
|
||||
const char *name = i915_modparams.vbt_firmware;
|
||||
int ret;
|
||||
|
||||
if (!name || !*name)
|
||||
|
|
|
@ -379,13 +379,13 @@ enum drm_connector_status
|
|||
intel_panel_detect(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Assume that the BIOS does not lie through the OpRegion... */
|
||||
if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
|
||||
if (!i915_modparams.panel_ignore_lid && dev_priv->opregion.lid_state) {
|
||||
return *dev_priv->opregion.lid_state & 0x1 ?
|
||||
connector_status_connected :
|
||||
connector_status_disconnected;
|
||||
}
|
||||
|
||||
switch (i915.panel_ignore_lid) {
|
||||
switch (i915_modparams.panel_ignore_lid) {
|
||||
case -2:
|
||||
return connector_status_connected;
|
||||
case -1:
|
||||
|
@ -465,10 +465,10 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
|
|||
|
||||
WARN_ON(panel->backlight.max == 0);
|
||||
|
||||
if (i915.invert_brightness < 0)
|
||||
if (i915_modparams.invert_brightness < 0)
|
||||
return val;
|
||||
|
||||
if (i915.invert_brightness > 0 ||
|
||||
if (i915_modparams.invert_brightness > 0 ||
|
||||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
|
||||
return panel->backlight.max - val + panel->backlight.min;
|
||||
}
|
||||
|
|
|
@ -7825,7 +7825,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
|
|||
* RPM depends on RC6 to save restore the GT HW context, so make RC6 a
|
||||
* requirement.
|
||||
*/
|
||||
if (!i915.enable_rc6) {
|
||||
if (!i915_modparams.enable_rc6) {
|
||||
DRM_INFO("RC6 disabled, disabling runtime PM support\n");
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
}
|
||||
|
@ -7882,7 +7882,7 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
|
|||
if (IS_VALLEYVIEW(dev_priv))
|
||||
valleyview_cleanup_gt_powersave(dev_priv);
|
||||
|
||||
if (!i915.enable_rc6)
|
||||
if (!i915_modparams.enable_rc6)
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
|
@ -8004,7 +8004,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
|
|||
if (IS_ERR(req))
|
||||
goto unlock;
|
||||
|
||||
if (!i915.enable_execlists && i915_switch_context(req) == 0)
|
||||
if (!i915_modparams.enable_execlists && i915_switch_context(req) == 0)
|
||||
rcs->init_context(req);
|
||||
|
||||
/* Mark the device busy, calling intel_enable_gt_powersave() */
|
||||
|
|
|
@ -396,7 +396,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!i915.enable_psr) {
|
||||
if (!i915_modparams.enable_psr) {
|
||||
DRM_DEBUG_KMS("PSR disable by flag\n");
|
||||
return false;
|
||||
}
|
||||
|
@ -943,8 +943,8 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
|
|||
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
|
||||
|
||||
/* Per platform default: all disabled. */
|
||||
if (i915.enable_psr == -1)
|
||||
i915.enable_psr = 0;
|
||||
if (i915_modparams.enable_psr == -1)
|
||||
i915_modparams.enable_psr = 0;
|
||||
|
||||
/* Set link_standby x link_off defaults */
|
||||
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
|
@ -958,11 +958,11 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
|
|||
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
|
||||
|
||||
/* Override link_standby x link_off defaults */
|
||||
if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) {
|
||||
if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
|
||||
DRM_DEBUG_KMS("PSR: Forcing link standby\n");
|
||||
dev_priv->psr.link_standby = true;
|
||||
}
|
||||
if (i915.enable_psr == 3 && dev_priv->psr.link_standby) {
|
||||
if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
|
||||
DRM_DEBUG_KMS("PSR: Forcing main link off\n");
|
||||
dev_priv->psr.link_standby = false;
|
||||
}
|
||||
|
|
|
@ -1882,7 +1882,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
|
|||
struct drm_i915_gem_object *obj;
|
||||
int ret, i;
|
||||
|
||||
if (!i915.semaphores)
|
||||
if (!i915_modparams.semaphores)
|
||||
return;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
|
||||
|
@ -1982,7 +1982,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
|
|||
i915_gem_object_put(obj);
|
||||
err:
|
||||
DRM_DEBUG_DRIVER("Failed to allocate space for semaphores, disabling\n");
|
||||
i915.semaphores = 0;
|
||||
i915_modparams.semaphores = 0;
|
||||
}
|
||||
|
||||
static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
|
||||
|
@ -2039,7 +2039,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
|
|||
|
||||
engine->emit_breadcrumb = i9xx_emit_breadcrumb;
|
||||
engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
|
||||
if (i915.semaphores) {
|
||||
if (i915_modparams.semaphores) {
|
||||
int num_rings;
|
||||
|
||||
engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
|
||||
|
@ -2083,7 +2083,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
|
|||
engine->emit_breadcrumb = gen8_render_emit_breadcrumb;
|
||||
engine->emit_breadcrumb_sz = gen8_render_emit_breadcrumb_sz;
|
||||
engine->emit_flush = gen8_render_ring_flush;
|
||||
if (i915.semaphores) {
|
||||
if (i915_modparams.semaphores) {
|
||||
int num_rings;
|
||||
|
||||
engine->semaphore.signal = gen8_rcs_signal;
|
||||
|
|
|
@ -2413,7 +2413,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
|
|||
mask = 0;
|
||||
}
|
||||
|
||||
if (!i915.disable_power_well)
|
||||
if (!i915_modparams.disable_power_well)
|
||||
max_dc = 0;
|
||||
|
||||
if (enable_dc >= 0 && enable_dc <= max_dc) {
|
||||
|
@ -2471,10 +2471,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
struct i915_power_domains *power_domains = &dev_priv->power_domains;
|
||||
|
||||
i915.disable_power_well = sanitize_disable_power_well_option(dev_priv,
|
||||
i915.disable_power_well);
|
||||
dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
|
||||
i915.enable_dc);
|
||||
i915_modparams.disable_power_well =
|
||||
sanitize_disable_power_well_option(dev_priv,
|
||||
i915_modparams.disable_power_well);
|
||||
dev_priv->csr.allowed_dc_mask =
|
||||
get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc);
|
||||
|
||||
BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
|
||||
|
||||
|
@ -2535,7 +2536,7 @@ void intel_power_domains_fini(struct drm_i915_private *dev_priv)
|
|||
intel_display_set_init_power(dev_priv, true);
|
||||
|
||||
/* Remove the refcount we took to keep power well support disabled. */
|
||||
if (!i915.disable_power_well)
|
||||
if (!i915_modparams.disable_power_well)
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
|
||||
|
||||
/*
|
||||
|
@ -2995,7 +2996,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
|
|||
/* For now, we need the power well to be always enabled. */
|
||||
intel_display_set_init_power(dev_priv, true);
|
||||
/* Disable power support if the user asked so. */
|
||||
if (!i915.disable_power_well)
|
||||
if (!i915_modparams.disable_power_well)
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
|
||||
intel_power_domains_sync_hw(dev_priv);
|
||||
power_domains->initializing = false;
|
||||
|
@ -3014,7 +3015,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
|
|||
* Even if power well support was disabled we still want to disable
|
||||
* power wells while we are system suspended.
|
||||
*/
|
||||
if (!i915.disable_power_well)
|
||||
if (!i915_modparams.disable_power_well)
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
|
||||
|
||||
if (IS_CANNONLAKE(dev_priv))
|
||||
|
|
|
@ -63,35 +63,35 @@ static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
|
|||
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!HAS_GUC(dev_priv)) {
|
||||
if (i915.enable_guc_loading > 0 ||
|
||||
i915.enable_guc_submission > 0)
|
||||
if (i915_modparams.enable_guc_loading > 0 ||
|
||||
i915_modparams.enable_guc_submission > 0)
|
||||
DRM_INFO("Ignoring GuC options, no hardware\n");
|
||||
|
||||
i915.enable_guc_loading = 0;
|
||||
i915.enable_guc_submission = 0;
|
||||
i915_modparams.enable_guc_loading = 0;
|
||||
i915_modparams.enable_guc_submission = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/* A negative value means "use platform default" */
|
||||
if (i915.enable_guc_loading < 0)
|
||||
i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
|
||||
if (i915_modparams.enable_guc_loading < 0)
|
||||
i915_modparams.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
|
||||
|
||||
/* Verify firmware version */
|
||||
if (i915.enable_guc_loading) {
|
||||
if (i915_modparams.enable_guc_loading) {
|
||||
if (HAS_HUC_UCODE(dev_priv))
|
||||
intel_huc_select_fw(&dev_priv->huc);
|
||||
|
||||
if (intel_guc_select_fw(&dev_priv->guc))
|
||||
i915.enable_guc_loading = 0;
|
||||
i915_modparams.enable_guc_loading = 0;
|
||||
}
|
||||
|
||||
/* Can't enable guc submission without guc loaded */
|
||||
if (!i915.enable_guc_loading)
|
||||
i915.enable_guc_submission = 0;
|
||||
if (!i915_modparams.enable_guc_loading)
|
||||
i915_modparams.enable_guc_submission = 0;
|
||||
|
||||
/* A negative value means "use platform default" */
|
||||
if (i915.enable_guc_submission < 0)
|
||||
i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
|
||||
if (i915_modparams.enable_guc_submission < 0)
|
||||
i915_modparams.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
|
||||
}
|
||||
|
||||
static void gen8_guc_raise_irq(struct intel_guc *guc)
|
||||
|
@ -290,7 +290,7 @@ static void guc_init_send_regs(struct intel_guc *guc)
|
|||
|
||||
static void guc_capture_load_err_log(struct intel_guc *guc)
|
||||
{
|
||||
if (!guc->log.vma || i915.guc_log_level < 0)
|
||||
if (!guc->log.vma || i915_modparams.guc_log_level < 0)
|
||||
return;
|
||||
|
||||
if (!guc->load_err_log)
|
||||
|
@ -333,7 +333,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
|
|||
struct intel_guc *guc = &dev_priv->guc;
|
||||
int ret, attempts;
|
||||
|
||||
if (!i915.enable_guc_loading)
|
||||
if (!i915_modparams.enable_guc_loading)
|
||||
return 0;
|
||||
|
||||
guc_disable_communication(guc);
|
||||
|
@ -342,7 +342,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
|
|||
/* We need to notify the guc whenever we change the GGTT */
|
||||
i915_ggtt_enable_guc(dev_priv);
|
||||
|
||||
if (i915.enable_guc_submission) {
|
||||
if (i915_modparams.enable_guc_submission) {
|
||||
/*
|
||||
* This is stuff we need to have available at fw load time
|
||||
* if we are planning to enable submission later
|
||||
|
@ -391,8 +391,8 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
|
|||
goto err_log_capture;
|
||||
|
||||
intel_guc_auth_huc(dev_priv);
|
||||
if (i915.enable_guc_submission) {
|
||||
if (i915.guc_log_level >= 0)
|
||||
if (i915_modparams.enable_guc_submission) {
|
||||
if (i915_modparams.guc_log_level >= 0)
|
||||
gen9_enable_guc_interrupts(dev_priv);
|
||||
|
||||
ret = i915_guc_submission_enable(dev_priv);
|
||||
|
@ -417,23 +417,24 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
|
|||
err_log_capture:
|
||||
guc_capture_load_err_log(guc);
|
||||
err_submission:
|
||||
if (i915.enable_guc_submission)
|
||||
if (i915_modparams.enable_guc_submission)
|
||||
i915_guc_submission_fini(dev_priv);
|
||||
err_guc:
|
||||
i915_ggtt_disable_guc(dev_priv);
|
||||
|
||||
DRM_ERROR("GuC init failed\n");
|
||||
if (i915.enable_guc_loading > 1 || i915.enable_guc_submission > 1)
|
||||
if (i915_modparams.enable_guc_loading > 1 ||
|
||||
i915_modparams.enable_guc_submission > 1)
|
||||
ret = -EIO;
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
if (i915.enable_guc_submission) {
|
||||
i915.enable_guc_submission = 0;
|
||||
if (i915_modparams.enable_guc_submission) {
|
||||
i915_modparams.enable_guc_submission = 0;
|
||||
DRM_NOTE("Falling back from GuC submission to execlist mode\n");
|
||||
}
|
||||
|
||||
i915.enable_guc_loading = 0;
|
||||
i915_modparams.enable_guc_loading = 0;
|
||||
DRM_NOTE("GuC firmware loading disabled\n");
|
||||
|
||||
return ret;
|
||||
|
@ -443,15 +444,15 @@ void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
guc_free_load_err_log(&dev_priv->guc);
|
||||
|
||||
if (!i915.enable_guc_loading)
|
||||
if (!i915_modparams.enable_guc_loading)
|
||||
return;
|
||||
|
||||
if (i915.enable_guc_submission)
|
||||
if (i915_modparams.enable_guc_submission)
|
||||
i915_guc_submission_disable(dev_priv);
|
||||
|
||||
guc_disable_communication(&dev_priv->guc);
|
||||
|
||||
if (i915.enable_guc_submission) {
|
||||
if (i915_modparams.enable_guc_submission) {
|
||||
gen9_disable_guc_interrupts(dev_priv);
|
||||
i915_guc_submission_fini(dev_priv);
|
||||
}
|
||||
|
|
|
@ -436,7 +436,8 @@ void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
|
|||
|
||||
void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
|
||||
i915_modparams.enable_rc6 =
|
||||
sanitize_rc6_option(dev_priv, i915_modparams.enable_rc6);
|
||||
|
||||
/* BIOS often leaves RC6 enabled, but disable it for hw init */
|
||||
intel_sanitize_gt_powersave(dev_priv);
|
||||
|
@ -507,10 +508,10 @@ void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
|
|||
dev_priv->uncore.user_forcewake.saved_mmio_check =
|
||||
dev_priv->uncore.unclaimed_mmio_check;
|
||||
dev_priv->uncore.user_forcewake.saved_mmio_debug =
|
||||
i915.mmio_debug;
|
||||
i915_modparams.mmio_debug;
|
||||
|
||||
dev_priv->uncore.unclaimed_mmio_check = 0;
|
||||
i915.mmio_debug = 0;
|
||||
i915_modparams.mmio_debug = 0;
|
||||
}
|
||||
spin_unlock_irq(&dev_priv->uncore.lock);
|
||||
}
|
||||
|
@ -532,7 +533,7 @@ void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
|
|||
|
||||
dev_priv->uncore.unclaimed_mmio_check =
|
||||
dev_priv->uncore.user_forcewake.saved_mmio_check;
|
||||
i915.mmio_debug =
|
||||
i915_modparams.mmio_debug =
|
||||
dev_priv->uncore.user_forcewake.saved_mmio_debug;
|
||||
|
||||
intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
|
||||
|
@ -841,7 +842,8 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
|
|||
"Unclaimed %s register 0x%x\n",
|
||||
read ? "read from" : "write to",
|
||||
i915_mmio_reg_offset(reg)))
|
||||
i915.mmio_debug--; /* Only report the first N failures */
|
||||
/* Only report the first N failures */
|
||||
i915_modparams.mmio_debug--;
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -850,7 +852,7 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
|
|||
const bool read,
|
||||
const bool before)
|
||||
{
|
||||
if (likely(!i915.mmio_debug))
|
||||
if (likely(!i915_modparams.mmio_debug))
|
||||
return;
|
||||
|
||||
__unclaimed_reg_debug(dev_priv, reg, read, before);
|
||||
|
@ -1703,7 +1705,7 @@ typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
|
|||
|
||||
static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!i915.reset)
|
||||
if (!i915_modparams.reset)
|
||||
return NULL;
|
||||
|
||||
if (INTEL_INFO(dev_priv)->gen >= 8)
|
||||
|
@ -1777,7 +1779,7 @@ bool intel_has_reset_engine(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
return (dev_priv->info.has_reset_engine &&
|
||||
!dev_priv->guc.execbuf_client &&
|
||||
i915.reset >= 2);
|
||||
i915_modparams.reset >= 2);
|
||||
}
|
||||
|
||||
int intel_guc_reset(struct drm_i915_private *dev_priv)
|
||||
|
@ -1802,7 +1804,7 @@ bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
|
|||
bool
|
||||
intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (unlikely(i915.mmio_debug ||
|
||||
if (unlikely(i915_modparams.mmio_debug ||
|
||||
dev_priv->uncore.unclaimed_mmio_check <= 0))
|
||||
return false;
|
||||
|
||||
|
@ -1810,7 +1812,7 @@ intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
|
|||
DRM_DEBUG("Unclaimed register detected, "
|
||||
"enabling oneshot unclaimed register reporting. "
|
||||
"Please use i915.mmio_debug=N for more information.\n");
|
||||
i915.mmio_debug++;
|
||||
i915_modparams.mmio_debug++;
|
||||
dev_priv->uncore.unclaimed_mmio_check--;
|
||||
return true;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue