|
@@ -864,9 +864,9 @@ void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu)
|
|
|
clean_workloads(vgpu, ALL_ENGINES);
|
|
|
|
|
|
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
|
|
|
- kfree(vgpu->reserve_ring_buffer_va[i]);
|
|
|
- vgpu->reserve_ring_buffer_va[i] = NULL;
|
|
|
- vgpu->reserve_ring_buffer_size[i] = 0;
|
|
|
+ kfree(vgpu->ring_scan_buffer[i]);
|
|
|
+ vgpu->ring_scan_buffer[i] = NULL;
|
|
|
+ vgpu->ring_scan_buffer_size[i] = 0;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -881,21 +881,21 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
|
|
|
|
|
|
/* each ring has a shadow ring buffer until vgpu destroyed */
|
|
|
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
|
|
|
- vgpu->reserve_ring_buffer_va[i] =
|
|
|
+ vgpu->ring_scan_buffer[i] =
|
|
|
kmalloc(RESERVE_RING_BUFFER_SIZE, GFP_KERNEL);
|
|
|
- if (!vgpu->reserve_ring_buffer_va[i]) {
|
|
|
- gvt_vgpu_err("fail to alloc reserve ring buffer\n");
|
|
|
+ if (!vgpu->ring_scan_buffer[i]) {
|
|
|
+ gvt_vgpu_err("fail to alloc ring scan buffer\n");
|
|
|
goto out;
|
|
|
}
|
|
|
- vgpu->reserve_ring_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
|
|
|
+ vgpu->ring_scan_buffer_size[i] = RESERVE_RING_BUFFER_SIZE;
|
|
|
}
|
|
|
return 0;
|
|
|
out:
|
|
|
for_each_engine(engine, vgpu->gvt->dev_priv, i) {
|
|
|
- if (vgpu->reserve_ring_buffer_size[i]) {
|
|
|
- kfree(vgpu->reserve_ring_buffer_va[i]);
|
|
|
- vgpu->reserve_ring_buffer_va[i] = NULL;
|
|
|
- vgpu->reserve_ring_buffer_size[i] = 0;
|
|
|
+ if (vgpu->ring_scan_buffer_size[i]) {
|
|
|
+ kfree(vgpu->ring_scan_buffer[i]);
|
|
|
+ vgpu->ring_scan_buffer[i] = NULL;
|
|
|
+ vgpu->ring_scan_buffer_size[i] = 0;
|
|
|
}
|
|
|
}
|
|
|
return -ENOMEM;
|