mirror of https://gitee.com/openkylin/linux.git
Merge tag 'gvt-next-2019-08-13' of https://github.com/intel/gvt-linux into drm-intel-next-queued
gvt-next-2019-08-13 - Enhance command parser for extra length check (Fred) - remove debugfs function return check (Greg) - batch buffer end double check after shadow copy (Tina) - one typo fix (Zhenyu) - klocwork warning fix (Zhi) - use struct_size() helper (Gustavo) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> From: Zhenyu Wang <zhenyuw@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20190813100604.GG19140@zhen-hp.sh.intel.com
This commit is contained in:
commit
710bb9cfee
|
@ -374,21 +374,37 @@ typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
|
|||
#define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
|
||||
#define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
|
||||
|
||||
#define DWORD_FIELD(dword, end, start) \
|
||||
FIELD_GET(GENMASK(end, start), cmd_val(s, dword))
|
||||
|
||||
#define OP_LENGTH_BIAS 2
|
||||
#define CMD_LEN(value) (value + OP_LENGTH_BIAS)
|
||||
|
||||
static int gvt_check_valid_cmd_length(int len, int valid_len)
|
||||
{
|
||||
if (valid_len != len) {
|
||||
gvt_err("len is not valid: len=%u valid_len=%u\n",
|
||||
len, valid_len);
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct cmd_info {
|
||||
const char *name;
|
||||
u32 opcode;
|
||||
|
||||
#define F_LEN_MASK (1U<<0)
|
||||
#define F_LEN_MASK 3U
|
||||
#define F_LEN_CONST 1U
|
||||
#define F_LEN_VAR 0U
|
||||
/* value is const although LEN maybe variable */
|
||||
#define F_LEN_VAR_FIXED (1<<1)
|
||||
|
||||
/*
|
||||
* command has its own ip advance logic
|
||||
* e.g. MI_BATCH_START, MI_BATCH_END
|
||||
*/
|
||||
#define F_IP_ADVANCE_CUSTOM (1<<1)
|
||||
|
||||
#define F_POST_HANDLE (1<<2)
|
||||
#define F_IP_ADVANCE_CUSTOM (1<<2)
|
||||
u32 flag;
|
||||
|
||||
#define R_RCS BIT(RCS0)
|
||||
|
@ -418,9 +434,12 @@ struct cmd_info {
|
|||
* flag == F_LEN_VAR : length bias bits
|
||||
* Note: length is in DWord
|
||||
*/
|
||||
u8 len;
|
||||
u32 len;
|
||||
|
||||
parser_cmd_handler handler;
|
||||
|
||||
/* valid length in DWord */
|
||||
u32 valid_len;
|
||||
};
|
||||
|
||||
struct cmd_entry {
|
||||
|
@ -944,6 +963,18 @@ static int cmd_handler_lri(struct parser_exec_state *s)
|
|||
int i, ret = 0;
|
||||
int cmd_len = cmd_length(s);
|
||||
struct intel_gvt *gvt = s->vgpu->gvt;
|
||||
u32 valid_len = CMD_LEN(1);
|
||||
|
||||
/*
|
||||
* Official intel docs are somewhat sloppy , check the definition of
|
||||
* MI_LOAD_REGISTER_IMM.
|
||||
*/
|
||||
#define MAX_VALID_LEN 127
|
||||
if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) {
|
||||
gvt_err("len is not valid: len=%u valid_len=%u\n",
|
||||
cmd_len, valid_len);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
for (i = 1; i < cmd_len; i += 2) {
|
||||
if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
|
||||
|
@ -1375,6 +1406,15 @@ static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
|
|||
int ret;
|
||||
int i;
|
||||
int len = cmd_length(s);
|
||||
u32 valid_len = CMD_LEN(1);
|
||||
|
||||
/* Flip Type == Stereo 3D Flip */
|
||||
if (DWORD_FIELD(2, 1, 0) == 2)
|
||||
valid_len++;
|
||||
ret = gvt_check_valid_cmd_length(cmd_length(s),
|
||||
valid_len);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = decode_mi_display_flip(s, &info);
|
||||
if (ret) {
|
||||
|
@ -1494,12 +1534,21 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
|
|||
int op_size = (cmd_length(s) - 3) * sizeof(u32);
|
||||
int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
|
||||
unsigned long gma, gma_low, gma_high;
|
||||
u32 valid_len = CMD_LEN(2);
|
||||
int ret = 0;
|
||||
|
||||
/* check ppggt */
|
||||
if (!(cmd_val(s, 0) & (1 << 22)))
|
||||
return 0;
|
||||
|
||||
/* check if QWORD */
|
||||
if (DWORD_FIELD(0, 21, 21))
|
||||
valid_len++;
|
||||
ret = gvt_check_valid_cmd_length(cmd_length(s),
|
||||
valid_len);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
gma = cmd_val(s, 2) & GENMASK(31, 2);
|
||||
|
||||
if (gmadr_bytes == 8) {
|
||||
|
@ -1542,11 +1591,20 @@ static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
|
|||
int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
|
||||
sizeof(u32);
|
||||
unsigned long gma, gma_high;
|
||||
u32 valid_len = CMD_LEN(1);
|
||||
int ret = 0;
|
||||
|
||||
if (!(cmd_val(s, 0) & (1 << 22)))
|
||||
return ret;
|
||||
|
||||
/* check if QWORD */
|
||||
if (DWORD_FIELD(0, 20, 19) == 1)
|
||||
valid_len += 8;
|
||||
ret = gvt_check_valid_cmd_length(cmd_length(s),
|
||||
valid_len);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
gma = cmd_val(s, 1) & GENMASK(31, 2);
|
||||
if (gmadr_bytes == 8) {
|
||||
gma_high = cmd_val(s, 2) & GENMASK(15, 0);
|
||||
|
@ -1584,6 +1642,16 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
|
|||
bool index_mode = false;
|
||||
int ret = 0;
|
||||
u32 hws_pga, val;
|
||||
u32 valid_len = CMD_LEN(2);
|
||||
|
||||
ret = gvt_check_valid_cmd_length(cmd_length(s),
|
||||
valid_len);
|
||||
if (ret) {
|
||||
/* Check again for Qword */
|
||||
ret = gvt_check_valid_cmd_length(cmd_length(s),
|
||||
++valid_len);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Check post-sync and ppgtt bit */
|
||||
if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
|
||||
|
@ -1661,7 +1729,9 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
|
||||
static int find_bb_size(struct parser_exec_state *s,
|
||||
unsigned long *bb_size,
|
||||
unsigned long *bb_end_cmd_offset)
|
||||
{
|
||||
unsigned long gma = 0;
|
||||
const struct cmd_info *info;
|
||||
|
@ -1673,6 +1743,7 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
|
|||
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
|
||||
|
||||
*bb_size = 0;
|
||||
*bb_end_cmd_offset = 0;
|
||||
|
||||
/* get the start gm address of the batch buffer */
|
||||
gma = get_gma_bb_from_cmd(s, 1);
|
||||
|
@ -1708,6 +1779,10 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
|
|||
/* chained batch buffer */
|
||||
bb_end = true;
|
||||
}
|
||||
|
||||
if (bb_end)
|
||||
*bb_end_cmd_offset = *bb_size;
|
||||
|
||||
cmd_len = get_cmd_length(info, cmd) << 2;
|
||||
*bb_size += cmd_len;
|
||||
gma += cmd_len;
|
||||
|
@ -1716,12 +1791,36 @@ static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int audit_bb_end(struct parser_exec_state *s, void *va)
|
||||
{
|
||||
struct intel_vgpu *vgpu = s->vgpu;
|
||||
u32 cmd = *(u32 *)va;
|
||||
const struct cmd_info *info;
|
||||
|
||||
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
|
||||
if (info == NULL) {
|
||||
gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
|
||||
cmd, get_opcode(cmd, s->ring_id),
|
||||
(s->buf_addr_type == PPGTT_BUFFER) ?
|
||||
"ppgtt" : "ggtt", s->ring_id, s->workload);
|
||||
return -EBADRQC;
|
||||
}
|
||||
|
||||
if ((info->opcode == OP_MI_BATCH_BUFFER_END) ||
|
||||
((info->opcode == OP_MI_BATCH_BUFFER_START) &&
|
||||
(BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)))
|
||||
return 0;
|
||||
|
||||
return -EBADRQC;
|
||||
}
|
||||
|
||||
static int perform_bb_shadow(struct parser_exec_state *s)
|
||||
{
|
||||
struct intel_vgpu *vgpu = s->vgpu;
|
||||
struct intel_vgpu_shadow_bb *bb;
|
||||
unsigned long gma = 0;
|
||||
unsigned long bb_size;
|
||||
unsigned long bb_end_cmd_offset;
|
||||
int ret = 0;
|
||||
struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
|
||||
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
|
||||
|
@ -1732,7 +1831,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
|||
if (gma == INTEL_GVT_INVALID_ADDR)
|
||||
return -EFAULT;
|
||||
|
||||
ret = find_bb_size(s, &bb_size);
|
||||
ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -1788,6 +1887,10 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
|||
goto err_unmap;
|
||||
}
|
||||
|
||||
ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset);
|
||||
if (ret)
|
||||
goto err_unmap;
|
||||
|
||||
INIT_LIST_HEAD(&bb->list);
|
||||
list_add(&bb->list, &s->workload->shadow_bb);
|
||||
|
||||
|
@ -1912,21 +2015,24 @@ static const struct cmd_info cmd_info[] = {
|
|||
{"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
|
||||
NULL},
|
||||
|
||||
{"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
|
||||
{"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR,
|
||||
R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
|
||||
|
||||
{"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
|
||||
0, 8, NULL},
|
||||
{"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR | F_LEN_VAR_FIXED,
|
||||
R_ALL, D_ALL, 0, 8, NULL, CMD_LEN(1)},
|
||||
|
||||
{"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
|
||||
|
||||
{"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
|
||||
{"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS,
|
||||
D_ALL, 0, 8, NULL, CMD_LEN(0)},
|
||||
|
||||
{"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
|
||||
D_BDW_PLUS, 0, 8, NULL},
|
||||
{"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL,
|
||||
F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 0, 8,
|
||||
NULL, CMD_LEN(0)},
|
||||
|
||||
{"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL,
|
||||
D_BDW_PLUS, ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
|
||||
{"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT,
|
||||
F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_1(2),
|
||||
8, cmd_handler_mi_semaphore_wait, CMD_LEN(2)},
|
||||
|
||||
{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
|
||||
ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
|
||||
|
@ -1940,8 +2046,9 @@ static const struct cmd_info cmd_info[] = {
|
|||
{"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
|
||||
cmd_handler_mi_update_gtt},
|
||||
|
||||
{"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
|
||||
D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
|
||||
{"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM,
|
||||
F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
|
||||
cmd_handler_srm, CMD_LEN(2)},
|
||||
|
||||
{"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
|
||||
cmd_handler_mi_flush_dw},
|
||||
|
@ -1949,26 +2056,30 @@ static const struct cmd_info cmd_info[] = {
|
|||
{"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
|
||||
10, cmd_handler_mi_clflush},
|
||||
|
||||
{"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
|
||||
D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
|
||||
{"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT,
|
||||
F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(1), 6,
|
||||
cmd_handler_mi_report_perf_count, CMD_LEN(2)},
|
||||
|
||||
{"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
|
||||
D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
|
||||
{"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM,
|
||||
F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
|
||||
cmd_handler_lrm, CMD_LEN(2)},
|
||||
|
||||
{"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
|
||||
D_ALL, 0, 8, cmd_handler_lrr},
|
||||
{"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG,
|
||||
F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8,
|
||||
cmd_handler_lrr, CMD_LEN(1)},
|
||||
|
||||
{"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
|
||||
D_ALL, 0, 8, NULL},
|
||||
{"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM,
|
||||
F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0,
|
||||
8, NULL, CMD_LEN(2)},
|
||||
|
||||
{"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
|
||||
ADDR_FIX_1(2), 8, NULL},
|
||||
{"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR | F_LEN_VAR_FIXED,
|
||||
R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL, CMD_LEN(2)},
|
||||
|
||||
{"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
|
||||
ADDR_FIX_1(2), 8, NULL},
|
||||
|
||||
{"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
|
||||
8, cmd_handler_mi_op_2e},
|
||||
{"MI_OP_2E", OP_MI_2E, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS,
|
||||
ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e, CMD_LEN(3)},
|
||||
|
||||
{"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
|
||||
8, cmd_handler_mi_op_2f},
|
||||
|
@ -1978,8 +2089,8 @@ static const struct cmd_info cmd_info[] = {
|
|||
cmd_handler_mi_batch_buffer_start},
|
||||
|
||||
{"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
|
||||
F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
|
||||
cmd_handler_mi_conditional_batch_buffer_end},
|
||||
F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
|
||||
cmd_handler_mi_conditional_batch_buffer_end, CMD_LEN(2)},
|
||||
|
||||
{"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
|
||||
R_RCS | R_BCS, D_ALL, 0, 2, NULL},
|
||||
|
@ -2569,6 +2680,13 @@ static int cmd_parser_exec(struct parser_exec_state *s)
|
|||
cmd_length(s), s->buf_type, s->buf_addr_type,
|
||||
s->workload, info->name);
|
||||
|
||||
if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
|
||||
ret = gvt_check_valid_cmd_length(cmd_length(s),
|
||||
info->valid_len);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (info->handler) {
|
||||
ret = info->handler(s);
|
||||
if (ret < 0) {
|
||||
|
|
|
@ -189,36 +189,19 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
|
|||
/**
|
||||
* intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*/
|
||||
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
|
||||
void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct dentry *ent;
|
||||
char name[16] = "";
|
||||
|
||||
snprintf(name, 16, "vgpu%d", vgpu->id);
|
||||
vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
|
||||
if (!vgpu->debugfs)
|
||||
return -ENOMEM;
|
||||
|
||||
ent = debugfs_create_bool("active", 0444, vgpu->debugfs,
|
||||
&vgpu->active);
|
||||
if (!ent)
|
||||
return -ENOMEM;
|
||||
|
||||
ent = debugfs_create_file("mmio_diff", 0444, vgpu->debugfs,
|
||||
vgpu, &vgpu_mmio_diff_fops);
|
||||
if (!ent)
|
||||
return -ENOMEM;
|
||||
|
||||
ent = debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs,
|
||||
vgpu, &vgpu_scan_nonprivbb_fops);
|
||||
if (!ent)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active);
|
||||
debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu,
|
||||
&vgpu_mmio_diff_fops);
|
||||
debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
|
||||
&vgpu_scan_nonprivbb_fops);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -234,27 +217,15 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
|
|||
/**
|
||||
* intel_gvt_debugfs_init - register gvt debugfs root entry
|
||||
* @gvt: GVT device
|
||||
*
|
||||
* Returns:
|
||||
* zero on success, negative if failed.
|
||||
*/
|
||||
int intel_gvt_debugfs_init(struct intel_gvt *gvt)
|
||||
void intel_gvt_debugfs_init(struct intel_gvt *gvt)
|
||||
{
|
||||
struct drm_minor *minor = gvt->dev_priv->drm.primary;
|
||||
struct dentry *ent;
|
||||
|
||||
gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
|
||||
if (!gvt->debugfs_root) {
|
||||
gvt_err("Cannot create debugfs dir\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ent = debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
|
||||
&gvt->mmio.num_tracked_mmio);
|
||||
if (!ent)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root,
|
||||
&gvt->mmio.num_tracked_mmio);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -375,9 +375,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
|||
}
|
||||
gvt->idle_vgpu = vgpu;
|
||||
|
||||
ret = intel_gvt_debugfs_init(gvt);
|
||||
if (ret)
|
||||
gvt_err("debugfs registration failed, go on.\n");
|
||||
intel_gvt_debugfs_init(gvt);
|
||||
|
||||
gvt_dbg_core("gvt device initialization is done\n");
|
||||
dev_priv->gvt = gvt;
|
||||
|
|
|
@ -334,6 +334,10 @@ struct intel_gvt {
|
|||
struct {
|
||||
struct engine_mmio *mmio;
|
||||
int ctx_mmio_count[I915_NUM_ENGINES];
|
||||
u32 *tlb_mmio_offset_list;
|
||||
u32 tlb_mmio_offset_list_cnt;
|
||||
u32 *mocs_mmio_offset_list;
|
||||
u32 mocs_mmio_offset_list_cnt;
|
||||
} engine_mmio_list;
|
||||
|
||||
struct dentry *debugfs_root;
|
||||
|
@ -682,9 +686,9 @@ static inline void intel_gvt_mmio_set_in_ctx(
|
|||
gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
|
||||
}
|
||||
|
||||
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
|
||||
int intel_gvt_debugfs_init(struct intel_gvt *gvt);
|
||||
void intel_gvt_debugfs_init(struct intel_gvt *gvt);
|
||||
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
|
||||
|
||||
|
||||
|
|
|
@ -672,7 +672,7 @@ void intel_gvt_clean_irq(struct intel_gvt *gvt)
|
|||
hrtimer_cancel(&irq->vblank_timer.timer);
|
||||
}
|
||||
|
||||
#define VBLNAK_TIMER_PERIOD 16000000
|
||||
#define VBLANK_TIMER_PERIOD 16000000
|
||||
|
||||
/**
|
||||
* intel_gvt_init_irq - initialize GVT-g IRQ emulation subsystem
|
||||
|
@ -704,7 +704,7 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
|
|||
|
||||
hrtimer_init(&vblank_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
vblank_timer->timer.function = vblank_timer_fn;
|
||||
vblank_timer->period = VBLNAK_TIMER_PERIOD;
|
||||
vblank_timer->period = VBLANK_TIMER_PERIOD;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1306,7 +1306,6 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||
unsigned int i;
|
||||
int ret;
|
||||
struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
|
||||
size_t size;
|
||||
int nr_areas = 1;
|
||||
int cap_type_id;
|
||||
|
||||
|
@ -1349,9 +1348,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||
VFIO_REGION_INFO_FLAG_WRITE;
|
||||
info.size = gvt_aperture_sz(vgpu->gvt);
|
||||
|
||||
size = sizeof(*sparse) +
|
||||
(nr_areas * sizeof(*sparse->areas));
|
||||
sparse = kzalloc(size, GFP_KERNEL);
|
||||
sparse = kzalloc(struct_size(sparse, areas, nr_areas),
|
||||
GFP_KERNEL);
|
||||
if (!sparse)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1416,9 +1414,9 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
|
|||
switch (cap_type_id) {
|
||||
case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
|
||||
ret = vfio_info_add_capability(&caps,
|
||||
&sparse->header, sizeof(*sparse) +
|
||||
(sparse->nr_areas *
|
||||
sizeof(*sparse->areas)));
|
||||
&sparse->header,
|
||||
struct_size(sparse, areas,
|
||||
sparse->nr_areas));
|
||||
if (ret) {
|
||||
kfree(sparse);
|
||||
return ret;
|
||||
|
@ -1798,9 +1796,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
|
|||
"kvmgt_nr_cache_entries",
|
||||
0444, vgpu->debugfs,
|
||||
&vgpu->vdev.nr_cache_entries);
|
||||
if (!info->debugfs_cache_entries)
|
||||
gvt_vgpu_err("Cannot create kvmgt debugfs entry\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -148,19 +148,27 @@ static struct {
|
|||
u32 l3cc_table[GEN9_MOCS_SIZE / 2];
|
||||
} gen9_render_mocs;
|
||||
|
||||
static u32 gen9_mocs_mmio_offset_list[] = {
|
||||
[RCS0] = 0xc800,
|
||||
[VCS0] = 0xc900,
|
||||
[VCS1] = 0xca00,
|
||||
[BCS0] = 0xcc00,
|
||||
[VECS0] = 0xcb00,
|
||||
};
|
||||
|
||||
static void load_render_mocs(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_gvt *gvt = dev_priv->gvt;
|
||||
i915_reg_t offset;
|
||||
u32 regs[] = {
|
||||
[RCS0] = 0xc800,
|
||||
[VCS0] = 0xc900,
|
||||
[VCS1] = 0xca00,
|
||||
[BCS0] = 0xcc00,
|
||||
[VECS0] = 0xcb00,
|
||||
};
|
||||
u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
|
||||
u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
|
||||
int ring_id, i;
|
||||
|
||||
for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
|
||||
/* Platform doesn't have mocs mmios. */
|
||||
if (!regs)
|
||||
return;
|
||||
|
||||
for (ring_id = 0; ring_id < cnt; ring_id++) {
|
||||
if (!HAS_ENGINE(dev_priv, ring_id))
|
||||
continue;
|
||||
offset.reg = regs[ring_id];
|
||||
|
@ -327,22 +335,28 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static u32 gen8_tlb_mmio_offset_list[] = {
|
||||
[RCS0] = 0x4260,
|
||||
[VCS0] = 0x4264,
|
||||
[VCS1] = 0x4268,
|
||||
[BCS0] = 0x426c,
|
||||
[VECS0] = 0x4270,
|
||||
};
|
||||
|
||||
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list;
|
||||
u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt;
|
||||
enum forcewake_domains fw;
|
||||
i915_reg_t reg;
|
||||
u32 regs[] = {
|
||||
[RCS0] = 0x4260,
|
||||
[VCS0] = 0x4264,
|
||||
[VCS1] = 0x4268,
|
||||
[BCS0] = 0x426c,
|
||||
[VECS0] = 0x4270,
|
||||
};
|
||||
|
||||
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
|
||||
if (!regs)
|
||||
return;
|
||||
|
||||
if (WARN_ON(ring_id >= cnt))
|
||||
return;
|
||||
|
||||
if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
|
||||
|
@ -565,10 +579,17 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
|
|||
{
|
||||
struct engine_mmio *mmio;
|
||||
|
||||
if (INTEL_GEN(gvt->dev_priv) >= 9)
|
||||
if (INTEL_GEN(gvt->dev_priv) >= 9) {
|
||||
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
|
||||
else
|
||||
gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
|
||||
gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
|
||||
gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list;
|
||||
gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list);
|
||||
} else {
|
||||
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
|
||||
gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
|
||||
gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
|
||||
}
|
||||
|
||||
for (mmio = gvt->engine_mmio_list.mmio;
|
||||
i915_mmio_reg_valid(mmio->reg); mmio++) {
|
||||
|
|
|
@ -420,9 +420,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
|||
if (ret)
|
||||
goto out_clean_submission;
|
||||
|
||||
ret = intel_gvt_debugfs_add_vgpu(vgpu);
|
||||
if (ret)
|
||||
goto out_clean_sched_policy;
|
||||
intel_gvt_debugfs_add_vgpu(vgpu);
|
||||
|
||||
ret = intel_gvt_hypervisor_set_opregion(vgpu);
|
||||
if (ret)
|
||||
|
|
Loading…
Reference in New Issue