mirror of https://gitee.com/openkylin/linux.git
drm/i915/gvt: mark symbols static where possible
Mark all local functions & variables as static. Signed-off-by: Du, Changbin <changbin.du@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
This commit is contained in:
parent
321927db98
commit
999ccb4017
|
@ -480,8 +480,8 @@ struct parser_exec_state {
|
||||||
#define gmadr_dw_number(s) \
|
#define gmadr_dw_number(s) \
|
||||||
(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
|
(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
|
||||||
|
|
||||||
unsigned long bypass_scan_mask = 0;
|
static unsigned long bypass_scan_mask = 0;
|
||||||
bool bypass_batch_buffer_scan = true;
|
static bool bypass_batch_buffer_scan = true;
|
||||||
|
|
||||||
/* ring ALL, type = 0 */
|
/* ring ALL, type = 0 */
|
||||||
static struct sub_op_bits sub_op_mi[] = {
|
static struct sub_op_bits sub_op_mi[] = {
|
||||||
|
@ -960,7 +960,7 @@ struct cmd_interrupt_event {
|
||||||
int mi_user_interrupt;
|
int mi_user_interrupt;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cmd_interrupt_event cmd_interrupt_events[] = {
|
static struct cmd_interrupt_event cmd_interrupt_events[] = {
|
||||||
[RCS] = {
|
[RCS] = {
|
||||||
.pipe_control_notify = RCS_PIPE_CONTROL,
|
.pipe_control_notify = RCS_PIPE_CONTROL,
|
||||||
.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
|
.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
|
||||||
|
|
|
@ -120,7 +120,7 @@ static unsigned char virtual_dp_monitor_edid[] = {
|
||||||
|
|
||||||
#define DPCD_HEADER_SIZE 0xb
|
#define DPCD_HEADER_SIZE 0xb
|
||||||
|
|
||||||
u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
|
static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
|
||||||
0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
|
0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -623,7 +623,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
|
||||||
(list_empty(q) ? NULL : container_of(q->prev, \
|
(list_empty(q) ? NULL : container_of(q->prev, \
|
||||||
struct intel_vgpu_workload, list))
|
struct intel_vgpu_workload, list))
|
||||||
|
|
||||||
bool submit_context(struct intel_vgpu *vgpu, int ring_id,
|
static bool submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||||
struct execlist_ctx_descriptor_format *desc,
|
struct execlist_ctx_descriptor_format *desc,
|
||||||
bool emulate_schedule_in)
|
bool emulate_schedule_in)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1921,7 +1921,7 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool intel_gvt_create_scratch_page(struct intel_vgpu *vgpu)
|
static bool create_scratch_page(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
|
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
|
||||||
void *p;
|
void *p;
|
||||||
|
@ -1955,7 +1955,7 @@ bool intel_gvt_create_scratch_page(struct intel_vgpu *vgpu)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_gvt_release_scratch_page(struct intel_vgpu *vgpu)
|
static void release_scratch_page(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
if (vgpu->gtt.scratch_page != NULL) {
|
if (vgpu->gtt.scratch_page != NULL) {
|
||||||
__free_page(vgpu->gtt.scratch_page);
|
__free_page(vgpu->gtt.scratch_page);
|
||||||
|
@ -1995,7 +1995,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
|
||||||
|
|
||||||
gtt->ggtt_mm = ggtt_mm;
|
gtt->ggtt_mm = ggtt_mm;
|
||||||
|
|
||||||
intel_gvt_create_scratch_page(vgpu);
|
create_scratch_page(vgpu);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2015,7 +2015,7 @@ void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
|
||||||
struct intel_vgpu_mm *mm;
|
struct intel_vgpu_mm *mm;
|
||||||
|
|
||||||
ppgtt_free_all_shadow_page(vgpu);
|
ppgtt_free_all_shadow_page(vgpu);
|
||||||
intel_gvt_release_scratch_page(vgpu);
|
release_scratch_page(vgpu);
|
||||||
|
|
||||||
list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
|
list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
|
||||||
mm = container_of(pos, struct intel_vgpu_mm, list);
|
mm = container_of(pos, struct intel_vgpu_mm, list);
|
||||||
|
|
|
@ -50,7 +50,7 @@
|
||||||
static void update_upstream_irq(struct intel_vgpu *vgpu,
|
static void update_upstream_irq(struct intel_vgpu *vgpu,
|
||||||
struct intel_gvt_irq_info *info);
|
struct intel_gvt_irq_info *info);
|
||||||
|
|
||||||
const char * const irq_name[INTEL_GVT_EVENT_MAX] = {
|
static const char * const irq_name[INTEL_GVT_EVENT_MAX] = {
|
||||||
[RCS_MI_USER_INTERRUPT] = "Render CS MI USER INTERRUPT",
|
[RCS_MI_USER_INTERRUPT] = "Render CS MI USER INTERRUPT",
|
||||||
[RCS_DEBUG] = "Render EU debug from SVG",
|
[RCS_DEBUG] = "Render EU debug from SVG",
|
||||||
[RCS_MMIO_SYNC_FLUSH] = "Render MMIO sync flush status",
|
[RCS_MMIO_SYNC_FLUSH] = "Render MMIO sync flush status",
|
||||||
|
|
|
@ -236,7 +236,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
|
||||||
list_del_init(&vgpu_data->list);
|
list_del_init(&vgpu_data->list);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
|
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
|
||||||
.init = tbs_sched_init,
|
.init = tbs_sched_init,
|
||||||
.clean = tbs_sched_clean,
|
.clean = tbs_sched_clean,
|
||||||
.init_vgpu = tbs_sched_init_vgpu,
|
.init_vgpu = tbs_sched_init_vgpu,
|
||||||
|
|
|
@ -41,7 +41,8 @@
|
||||||
#define RING_CTX_OFF(x) \
|
#define RING_CTX_OFF(x) \
|
||||||
offsetof(struct execlist_ring_context, x)
|
offsetof(struct execlist_ring_context, x)
|
||||||
|
|
||||||
void set_context_pdp_root_pointer(struct execlist_ring_context *ring_context,
|
static void set_context_pdp_root_pointer(
|
||||||
|
struct execlist_ring_context *ring_context,
|
||||||
u32 pdp[8])
|
u32 pdp[8])
|
||||||
{
|
{
|
||||||
struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
|
struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
|
||||||
|
|
Loading…
Reference in New Issue