|
@@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static inline bool is_force_nonpriv_mmio(unsigned int offset)
|
|
|
|
+{
|
|
|
|
+ return (offset >= 0x24d0 && offset < 0x2500);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int force_nonpriv_reg_handler(struct parser_exec_state *s,
|
|
|
|
+ unsigned int offset, unsigned int index)
|
|
|
|
+{
|
|
|
|
+ struct intel_gvt *gvt = s->vgpu->gvt;
|
|
|
|
+ unsigned int data = cmd_val(s, index + 1);
|
|
|
|
+
|
|
|
|
+ if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
|
|
|
|
+ gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
|
|
|
|
+ offset, data);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
static int cmd_reg_handler(struct parser_exec_state *s,
|
|
static int cmd_reg_handler(struct parser_exec_state *s,
|
|
unsigned int offset, unsigned int index, char *cmd)
|
|
unsigned int offset, unsigned int index, char *cmd)
|
|
{
|
|
{
|
|
@@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s,
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
|
struct intel_gvt *gvt = vgpu->gvt;
|
|
|
|
|
|
if (offset + 4 > gvt->device_info.mmio_size) {
|
|
if (offset + 4 > gvt->device_info.mmio_size) {
|
|
- gvt_err("%s access to (%x) outside of MMIO range\n",
|
|
|
|
|
|
+ gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
|
|
cmd, offset);
|
|
cmd, offset);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
|
|
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
|
|
- gvt_err("vgpu%d: %s access to non-render register (%x)\n",
|
|
|
|
- s->vgpu->id, cmd, offset);
|
|
|
|
|
|
+ gvt_vgpu_err("%s access to non-render register (%x)\n",
|
|
|
|
+ cmd, offset);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
if (is_shadowed_mmio(offset)) {
|
|
if (is_shadowed_mmio(offset)) {
|
|
- gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
|
|
|
|
- s->vgpu->id, offset);
|
|
|
|
|
|
+ gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (is_force_nonpriv_mmio(offset) &&
|
|
|
|
+ force_nonpriv_reg_handler(s, offset, index))
|
|
|
|
+ return -EINVAL;
|
|
|
|
+
|
|
if (offset == i915_mmio_reg_offset(DERRMR) ||
|
|
if (offset == i915_mmio_reg_offset(DERRMR) ||
|
|
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
|
|
offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
|
|
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
|
|
/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
|
|
@@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
|
|
ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
|
|
ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
|
|
else if (post_sync == 1) {
|
|
else if (post_sync == 1) {
|
|
/* check ggtt*/
|
|
/* check ggtt*/
|
|
- if ((cmd_val(s, 2) & (1 << 2))) {
|
|
|
|
|
|
+ if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
|
|
gma = cmd_val(s, 2) & GENMASK(31, 3);
|
|
gma = cmd_val(s, 2) & GENMASK(31, 3);
|
|
if (gmadr_bytes == 8)
|
|
if (gmadr_bytes == 8)
|
|
gma |= (cmd_gma_hi(s, 3)) << 32;
|
|
gma |= (cmd_gma_hi(s, 3)) << 32;
|
|
@@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
|
|
struct mi_display_flip_command_info *info)
|
|
struct mi_display_flip_command_info *info)
|
|
{
|
|
{
|
|
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
|
|
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
|
|
|
|
+ struct intel_vgpu *vgpu = s->vgpu;
|
|
u32 dword0 = cmd_val(s, 0);
|
|
u32 dword0 = cmd_val(s, 0);
|
|
u32 dword1 = cmd_val(s, 1);
|
|
u32 dword1 = cmd_val(s, 1);
|
|
u32 dword2 = cmd_val(s, 2);
|
|
u32 dword2 = cmd_val(s, 2);
|
|
@@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
|
|
break;
|
|
break;
|
|
|
|
|
|
default:
|
|
default:
|
|
- gvt_err("unknown plane code %d\n", plane);
|
|
|
|
|
|
+ gvt_vgpu_err("unknown plane code %d\n", plane);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip(
|
|
static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
|
|
static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
|
|
{
|
|
{
|
|
struct mi_display_flip_command_info info;
|
|
struct mi_display_flip_command_info info;
|
|
|
|
+ struct intel_vgpu *vgpu = s->vgpu;
|
|
int ret;
|
|
int ret;
|
|
int i;
|
|
int i;
|
|
int len = cmd_length(s);
|
|
int len = cmd_length(s);
|
|
|
|
|
|
ret = decode_mi_display_flip(s, &info);
|
|
ret = decode_mi_display_flip(s, &info);
|
|
if (ret) {
|
|
if (ret) {
|
|
- gvt_err("fail to decode MI display flip command\n");
|
|
|
|
|
|
+ gvt_vgpu_err("fail to decode MI display flip command\n");
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
ret = check_mi_display_flip(s, &info);
|
|
ret = check_mi_display_flip(s, &info);
|
|
if (ret) {
|
|
if (ret) {
|
|
- gvt_err("invalid MI display flip command\n");
|
|
|
|
|
|
+ gvt_vgpu_err("invalid MI display flip command\n");
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
ret = update_plane_mmio_from_mi_display_flip(s, &info);
|
|
ret = update_plane_mmio_from_mi_display_flip(s, &info);
|
|
if (ret) {
|
|
if (ret) {
|
|
- gvt_err("fail to update plane mmio\n");
|
|
|
|
|
|
+ gvt_vgpu_err("fail to update plane mmio\n");
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
if (op_size > max_surface_size) {
|
|
if (op_size > max_surface_size) {
|
|
- gvt_err("command address audit fail name %s\n", s->info->name);
|
|
|
|
|
|
+ gvt_vgpu_err("command address audit fail name %s\n",
|
|
|
|
+ s->info->name);
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
err:
|
|
err:
|
|
- gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
|
|
|
|
|
|
+ gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
|
|
s->info->name, guest_gma, op_size);
|
|
s->info->name, guest_gma, op_size);
|
|
|
|
|
|
pr_err("cmd dump: ");
|
|
pr_err("cmd dump: ");
|
|
@@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
|
|
|
|
|
|
static inline int unexpected_cmd(struct parser_exec_state *s)
|
|
static inline int unexpected_cmd(struct parser_exec_state *s)
|
|
{
|
|
{
|
|
- gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
|
|
|
|
- s->vgpu->id, s->info->name);
|
|
|
|
|
|
+ struct intel_vgpu *vgpu = s->vgpu;
|
|
|
|
+
|
|
|
|
+ gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
|
|
|
|
+
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
|
|
while (gma != end_gma) {
|
|
while (gma != end_gma) {
|
|
gpa = intel_vgpu_gma_to_gpa(mm, gma);
|
|
gpa = intel_vgpu_gma_to_gpa(mm, gma);
|
|
if (gpa == INTEL_GVT_INVALID_ADDR) {
|
|
if (gpa == INTEL_GVT_INVALID_ADDR) {
|
|
- gvt_err("invalid gma address: %lx\n", gma);
|
|
|
|
|
|
+ gvt_vgpu_err("invalid gma address: %lx\n", gma);
|
|
return -EFAULT;
|
|
return -EFAULT;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
|
|
uint32_t bb_size = 0;
|
|
uint32_t bb_size = 0;
|
|
uint32_t cmd_len = 0;
|
|
uint32_t cmd_len = 0;
|
|
bool met_bb_end = false;
|
|
bool met_bb_end = false;
|
|
|
|
+ struct intel_vgpu *vgpu = s->vgpu;
|
|
u32 cmd;
|
|
u32 cmd;
|
|
|
|
|
|
/* get the start gm address of the batch buffer */
|
|
/* get the start gm address of the batch buffer */
|
|
@@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
|
|
|
|
|
|
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
|
|
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
|
|
if (info == NULL) {
|
|
if (info == NULL) {
|
|
- gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
|
|
|
|
|
|
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
|
|
cmd, get_opcode(cmd, s->ring_id));
|
|
cmd, get_opcode(cmd, s->ring_id));
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
@@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
|
|
gma, gma + 4, &cmd);
|
|
gma, gma + 4, &cmd);
|
|
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
|
|
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
|
|
if (info == NULL) {
|
|
if (info == NULL) {
|
|
- gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
|
|
|
|
|
|
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
|
|
cmd, get_opcode(cmd, s->ring_id));
|
|
cmd, get_opcode(cmd, s->ring_id));
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
@@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s)
|
|
static int perform_bb_shadow(struct parser_exec_state *s)
|
|
static int perform_bb_shadow(struct parser_exec_state *s)
|
|
{
|
|
{
|
|
struct intel_shadow_bb_entry *entry_obj;
|
|
struct intel_shadow_bb_entry *entry_obj;
|
|
|
|
+ struct intel_vgpu *vgpu = s->vgpu;
|
|
unsigned long gma = 0;
|
|
unsigned long gma = 0;
|
|
uint32_t bb_size;
|
|
uint32_t bb_size;
|
|
void *dst = NULL;
|
|
void *dst = NULL;
|
|
@@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
|
|
|
|
|
ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
|
|
ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
|
|
if (ret) {
|
|
if (ret) {
|
|
- gvt_err("failed to set shadow batch to CPU\n");
|
|
|
|
|
|
+ gvt_vgpu_err("failed to set shadow batch to CPU\n");
|
|
goto unmap_src;
|
|
goto unmap_src;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1644,8 +1673,8 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
|
ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
|
|
ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
|
|
gma, gma + bb_size,
|
|
gma, gma + bb_size,
|
|
dst);
|
|
dst);
|
|
- if (ret < 0) {
|
|
|
|
- gvt_err("fail to copy guest ring buffer\n");
|
|
|
|
|
|
+ if (ret) {
|
|
|
|
+ gvt_vgpu_err("fail to copy guest ring buffer\n");
|
|
goto unmap_src;
|
|
goto unmap_src;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
|
|
{
|
|
{
|
|
bool second_level;
|
|
bool second_level;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
+ struct intel_vgpu *vgpu = s->vgpu;
|
|
|
|
|
|
if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
|
|
if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
|
|
- gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
|
|
|
|
|
|
+ gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
|
|
second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
|
|
if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
|
|
if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
|
|
- gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
|
|
|
|
|
|
+ gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
|
|
if (batch_buffer_needs_scan(s)) {
|
|
if (batch_buffer_needs_scan(s)) {
|
|
ret = perform_bb_shadow(s);
|
|
ret = perform_bb_shadow(s);
|
|
if (ret < 0)
|
|
if (ret < 0)
|
|
- gvt_err("invalid shadow batch buffer\n");
|
|
|
|
|
|
+ gvt_vgpu_err("invalid shadow batch buffer\n");
|
|
} else {
|
|
} else {
|
|
/* emulate a batch buffer end to do return right */
|
|
/* emulate a batch buffer end to do return right */
|
|
ret = cmd_handler_mi_batch_buffer_end(s);
|
|
ret = cmd_handler_mi_batch_buffer_end(s);
|
|
@@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
|
|
int ret = 0;
|
|
int ret = 0;
|
|
cycles_t t0, t1, t2;
|
|
cycles_t t0, t1, t2;
|
|
struct parser_exec_state s_before_advance_custom;
|
|
struct parser_exec_state s_before_advance_custom;
|
|
|
|
+ struct intel_vgpu *vgpu = s->vgpu;
|
|
|
|
|
|
t0 = get_cycles();
|
|
t0 = get_cycles();
|
|
|
|
|
|
@@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
|
|
|
|
|
|
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
|
|
info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
|
|
if (info == NULL) {
|
|
if (info == NULL) {
|
|
- gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
|
|
|
|
|
|
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
|
|
cmd, get_opcode(cmd, s->ring_id));
|
|
cmd, get_opcode(cmd, s->ring_id));
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
@@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
|
|
if (info->handler) {
|
|
if (info->handler) {
|
|
ret = info->handler(s);
|
|
ret = info->handler(s);
|
|
if (ret < 0) {
|
|
if (ret < 0) {
|
|
- gvt_err("%s handler error\n", info->name);
|
|
|
|
|
|
+ gvt_vgpu_err("%s handler error\n", info->name);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
|
|
if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
|
|
if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
|
|
ret = cmd_advance_default(s);
|
|
ret = cmd_advance_default(s);
|
|
if (ret) {
|
|
if (ret) {
|
|
- gvt_err("%s IP advance error\n", info->name);
|
|
|
|
|
|
+ gvt_vgpu_err("%s IP advance error\n", info->name);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s,
|
|
|
|
|
|
unsigned long gma_head, gma_tail, gma_bottom;
|
|
unsigned long gma_head, gma_tail, gma_bottom;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
+ struct intel_vgpu *vgpu = s->vgpu;
|
|
|
|
|
|
gma_head = rb_start + rb_head;
|
|
gma_head = rb_start + rb_head;
|
|
gma_tail = rb_start + rb_tail;
|
|
gma_tail = rb_start + rb_tail;
|
|
@@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s,
|
|
if (s->buf_type == RING_BUFFER_INSTRUCTION) {
|
|
if (s->buf_type == RING_BUFFER_INSTRUCTION) {
|
|
if (!(s->ip_gma >= rb_start) ||
|
|
if (!(s->ip_gma >= rb_start) ||
|
|
!(s->ip_gma < gma_bottom)) {
|
|
!(s->ip_gma < gma_bottom)) {
|
|
- gvt_err("ip_gma %lx out of ring scope."
|
|
|
|
|
|
+ gvt_vgpu_err("ip_gma %lx out of ring scope."
|
|
"(base:0x%lx, bottom: 0x%lx)\n",
|
|
"(base:0x%lx, bottom: 0x%lx)\n",
|
|
s->ip_gma, rb_start,
|
|
s->ip_gma, rb_start,
|
|
gma_bottom);
|
|
gma_bottom);
|
|
@@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s,
|
|
return -EINVAL;
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
|
|
if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
|
|
- gvt_err("ip_gma %lx out of range."
|
|
|
|
|
|
+ gvt_vgpu_err("ip_gma %lx out of range."
|
|
"base 0x%lx head 0x%lx tail 0x%lx\n",
|
|
"base 0x%lx head 0x%lx tail 0x%lx\n",
|
|
s->ip_gma, rb_start,
|
|
s->ip_gma, rb_start,
|
|
rb_head, rb_tail);
|
|
rb_head, rb_tail);
|
|
@@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s,
|
|
}
|
|
}
|
|
ret = cmd_parser_exec(s);
|
|
ret = cmd_parser_exec(s);
|
|
if (ret) {
|
|
if (ret) {
|
|
- gvt_err("cmd parser error\n");
|
|
|
|
|
|
+ gvt_vgpu_err("cmd parser error\n");
|
|
parser_exec_state_dump(s);
|
|
parser_exec_state_dump(s);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
@@ -2634,8 +2666,8 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
|
|
if (gma_head > gma_tail) {
|
|
if (gma_head > gma_tail) {
|
|
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
|
|
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
|
|
gma_head, gma_top, cs);
|
|
gma_head, gma_top, cs);
|
|
- if (ret < 0) {
|
|
|
|
- gvt_err("fail to copy guest ring buffer\n");
|
|
|
|
|
|
+ if (ret) {
|
|
|
|
+ gvt_vgpu_err("fail to copy guest ring buffer\n");
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
cs += ret / sizeof(u32);
|
|
cs += ret / sizeof(u32);
|
|
@@ -2644,8 +2676,8 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
|
|
|
|
|
|
/* copy head or start <-> tail */
|
|
/* copy head or start <-> tail */
|
|
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
|
|
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
|
|
- if (ret < 0) {
|
|
|
|
- gvt_err("fail to copy guest ring buffer\n");
|
|
|
|
|
|
+ if (ret) {
|
|
|
|
+ gvt_vgpu_err("fail to copy guest ring buffer\n");
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
cs += ret / sizeof(u32);
|
|
cs += ret / sizeof(u32);
|
|
@@ -2656,16 +2688,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
|
|
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
|
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
|
|
+ struct intel_vgpu *vgpu = workload->vgpu;
|
|
|
|
|
|
ret = shadow_workload_ring_buffer(workload);
|
|
ret = shadow_workload_ring_buffer(workload);
|
|
if (ret) {
|
|
if (ret) {
|
|
- gvt_err("fail to shadow workload ring_buffer\n");
|
|
|
|
|
|
+ gvt_vgpu_err("fail to shadow workload ring_buffer\n");
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
ret = scan_workload(workload);
|
|
ret = scan_workload(workload);
|
|
if (ret) {
|
|
if (ret) {
|
|
- gvt_err("scan workload error\n");
|
|
|
|
|
|
+ gvt_vgpu_err("scan workload error\n");
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
return 0;
|
|
return 0;
|
|
@@ -2675,6 +2708,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
{
|
|
{
|
|
int ctx_size = wa_ctx->indirect_ctx.size;
|
|
int ctx_size = wa_ctx->indirect_ctx.size;
|
|
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
|
|
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
|
|
|
|
+ struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
|
|
struct drm_i915_gem_object *obj;
|
|
struct drm_i915_gem_object *obj;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
void *map;
|
|
void *map;
|
|
@@ -2688,14 +2722,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
/* get the va of the shadow batch buffer */
|
|
/* get the va of the shadow batch buffer */
|
|
map = i915_gem_object_pin_map(obj, I915_MAP_WB);
|
|
map = i915_gem_object_pin_map(obj, I915_MAP_WB);
|
|
if (IS_ERR(map)) {
|
|
if (IS_ERR(map)) {
|
|
- gvt_err("failed to vmap shadow indirect ctx\n");
|
|
|
|
|
|
+ gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
|
|
ret = PTR_ERR(map);
|
|
ret = PTR_ERR(map);
|
|
goto put_obj;
|
|
goto put_obj;
|
|
}
|
|
}
|
|
|
|
|
|
ret = i915_gem_object_set_to_cpu_domain(obj, false);
|
|
ret = i915_gem_object_set_to_cpu_domain(obj, false);
|
|
if (ret) {
|
|
if (ret) {
|
|
- gvt_err("failed to set shadow indirect ctx to CPU\n");
|
|
|
|
|
|
+ gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
|
|
goto unmap_src;
|
|
goto unmap_src;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2703,8 +2737,8 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
wa_ctx->workload->vgpu->gtt.ggtt_mm,
|
|
wa_ctx->workload->vgpu->gtt.ggtt_mm,
|
|
guest_gma, guest_gma + ctx_size,
|
|
guest_gma, guest_gma + ctx_size,
|
|
map);
|
|
map);
|
|
- if (ret < 0) {
|
|
|
|
- gvt_err("fail to copy guest indirect ctx\n");
|
|
|
|
|
|
+ if (ret) {
|
|
|
|
+ gvt_vgpu_err("fail to copy guest indirect ctx\n");
|
|
goto unmap_src;
|
|
goto unmap_src;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2738,13 +2772,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
{
|
|
{
|
|
int ret;
|
|
int ret;
|
|
|
|
+ struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
|
|
|
|
|
|
if (wa_ctx->indirect_ctx.size == 0)
|
|
if (wa_ctx->indirect_ctx.size == 0)
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
ret = shadow_indirect_ctx(wa_ctx);
|
|
ret = shadow_indirect_ctx(wa_ctx);
|
|
if (ret) {
|
|
if (ret) {
|
|
- gvt_err("fail to shadow indirect ctx\n");
|
|
|
|
|
|
+ gvt_vgpu_err("fail to shadow indirect ctx\n");
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2752,7 +2787,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|
|
|
|
|
ret = scan_wa_ctx(wa_ctx);
|
|
ret = scan_wa_ctx(wa_ctx);
|
|
if (ret) {
|
|
if (ret) {
|
|
- gvt_err("scan wa ctx error\n");
|
|
|
|
|
|
+ gvt_vgpu_err("scan wa ctx error\n");
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|