mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
Only GVT fixes:
- Two warnings fix for runtime pm and usr copy (Xiong, Zhenyu) - OA context fix for vGPU profiling (Min) - privilege batch buffer reloc fix (Fred) -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJaqwRYAAoJEPpiX2QO6xPKZT0H/30rwEPNL0Z9LMrZY/WC4Yr8 0Hbc7jn0pYgCV+Cm8E1OPwQv2lqZMx5VAPV7ggQqaVjQam629NVODVUVNyMorQ7l uy3kbi3vAcazuvaApUchMDppfl919FP5QXFKaEm8HU3C8oROaKO6lZKc6OJFd1Bu eNLwg3FqI0JXRUqfggzyaqtpV8bMXzLVeQh98wlGbpkEF0yvXlTPProLJx4WX2kq /8OULi2k7g9QxOd8S9l3TdSHpOJKPzoDecvGW6WFM+0q5POs+Ybk69yU32irt/Fk 2dNbqn8GgPXTkFMaBzlBFqq6Kgh4y1b6eQDjQC1oZGOyjK/HwtNteZHREEzg15A= =fg6j -----END PGP SIGNATURE----- Merge tag 'drm-intel-fixes-2018-03-15' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes Only GVT fixes: - Two warnings fix for runtime pm and usr copy (Xiong, Zhenyu) - OA context fix for vGPU profiling (Min) - privilege batch buffer reloc fix (Fred) * tag 'drm-intel-fixes-2018-03-15' of git://anongit.freedesktop.org/drm/drm-intel: drm/i915/gvt: fix user copy warning by whitelist workload rb_tail field drm/i915/gvt: Correct the privilege shadow batch buffer address drm/i915/gvt: keep oa config in shadow ctx drm/i915/gvt: Add runtime_pm_get/put into gvt_switch_mmio
This commit is contained in:
commit
3a1b5de36f
4 changed files with 82 additions and 4 deletions
|
@ -471,6 +471,7 @@ struct parser_exec_state {
|
|||
* used when ret from 2nd level batch buffer
|
||||
*/
|
||||
int saved_buf_addr_type;
|
||||
bool is_ctx_wa;
|
||||
|
||||
struct cmd_info *info;
|
||||
|
||||
|
@ -1715,6 +1716,11 @@ static int perform_bb_shadow(struct parser_exec_state *s)
|
|||
bb->accessing = true;
|
||||
bb->bb_start_cmd_va = s->ip_va;
|
||||
|
||||
if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
|
||||
bb->bb_offset = s->ip_va - s->rb_va;
|
||||
else
|
||||
bb->bb_offset = 0;
|
||||
|
||||
/*
|
||||
* ip_va saves the virtual address of the shadow batch buffer, while
|
||||
* ip_gma saves the graphics address of the original batch buffer.
|
||||
|
@ -2571,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
|
|||
s.ring_tail = gma_tail;
|
||||
s.rb_va = workload->shadow_ring_buffer_va;
|
||||
s.workload = workload;
|
||||
s.is_ctx_wa = false;
|
||||
|
||||
if ((bypass_scan_mask & (1 << workload->ring_id)) ||
|
||||
gma_head == gma_tail)
|
||||
|
@ -2624,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
|
|||
s.ring_tail = gma_tail;
|
||||
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
|
||||
s.workload = workload;
|
||||
s.is_ctx_wa = true;
|
||||
|
||||
if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -394,9 +394,11 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
|
|||
* performace for batch mmio read/write, so we need
|
||||
* handle forcewake mannually.
|
||||
*/
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
switch_mmio(pre, next, ring_id);
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -52,6 +52,54 @@ static void set_context_pdp_root_pointer(
|
|||
pdp_pair[i].val = pdp[7 - i];
|
||||
}
|
||||
|
||||
/*
|
||||
* when populating shadow ctx from guest, we should not overrride oa related
|
||||
* registers, so that they will not be overlapped by guest oa configs. Thus
|
||||
* made it possible to capture oa data from host for both host and guests.
|
||||
*/
|
||||
static void sr_oa_regs(struct intel_vgpu_workload *workload,
|
||||
u32 *reg_state, bool save)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
||||
u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
|
||||
u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
|
||||
int i = 0;
|
||||
u32 flex_mmio[] = {
|
||||
i915_mmio_reg_offset(EU_PERF_CNTL0),
|
||||
i915_mmio_reg_offset(EU_PERF_CNTL1),
|
||||
i915_mmio_reg_offset(EU_PERF_CNTL2),
|
||||
i915_mmio_reg_offset(EU_PERF_CNTL3),
|
||||
i915_mmio_reg_offset(EU_PERF_CNTL4),
|
||||
i915_mmio_reg_offset(EU_PERF_CNTL5),
|
||||
i915_mmio_reg_offset(EU_PERF_CNTL6),
|
||||
};
|
||||
|
||||
if (!workload || !reg_state || workload->ring_id != RCS)
|
||||
return;
|
||||
|
||||
if (save) {
|
||||
workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
|
||||
u32 state_offset = ctx_flexeu0 + i * 2;
|
||||
|
||||
workload->flex_mmio[i] = reg_state[state_offset + 1];
|
||||
}
|
||||
} else {
|
||||
reg_state[ctx_oactxctrl] =
|
||||
i915_mmio_reg_offset(GEN8_OACTXCONTROL);
|
||||
reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
|
||||
u32 state_offset = ctx_flexeu0 + i * 2;
|
||||
u32 mmio = flex_mmio[i];
|
||||
|
||||
reg_state[state_offset] = mmio;
|
||||
reg_state[state_offset + 1] = workload->flex_mmio[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
|
@ -98,6 +146,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
|||
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
|
||||
shadow_ring_context = kmap(page);
|
||||
|
||||
sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
|
||||
#define COPY_REG(name) \
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
|
||||
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
|
||||
|
@ -122,6 +171,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
|
|||
sizeof(*shadow_ring_context),
|
||||
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
|
||||
|
||||
sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
|
||||
kunmap(page);
|
||||
return 0;
|
||||
}
|
||||
|
@ -376,6 +426,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
|||
goto err;
|
||||
}
|
||||
|
||||
/* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
|
||||
* is only updated into ring_scan_buffer, not real ring address
|
||||
* allocated in later copy_workload_to_ring_buffer. pls be noted
|
||||
* shadow_ring_buffer_va is now pointed to real ring buffer va
|
||||
* in copy_workload_to_ring_buffer.
|
||||
*/
|
||||
|
||||
if (bb->bb_offset)
|
||||
bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
|
||||
+ bb->bb_offset;
|
||||
|
||||
/* relocate shadow batch buffer */
|
||||
bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
|
||||
if (gmadr_bytes == 8)
|
||||
|
@ -1044,10 +1105,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
|
|||
|
||||
bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
|
||||
|
||||
s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
|
||||
sizeof(struct intel_vgpu_workload), 0,
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
NULL);
|
||||
s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
|
||||
sizeof(struct intel_vgpu_workload), 0,
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
offsetof(struct intel_vgpu_workload, rb_tail),
|
||||
sizeof_field(struct intel_vgpu_workload, rb_tail),
|
||||
NULL);
|
||||
|
||||
if (!s->workloads) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -110,6 +110,10 @@ struct intel_vgpu_workload {
|
|||
/* shadow batch buffer */
|
||||
struct list_head shadow_bb;
|
||||
struct intel_shadow_wa_ctx wa_ctx;
|
||||
|
||||
/* oa registers */
|
||||
u32 oactxctrl;
|
||||
u32 flex_mmio[7];
|
||||
};
|
||||
|
||||
struct intel_vgpu_shadow_bb {
|
||||
|
@ -120,6 +124,7 @@ struct intel_vgpu_shadow_bb {
|
|||
u32 *bb_start_cmd_va;
|
||||
unsigned int clflush;
|
||||
bool accessing;
|
||||
unsigned long bb_offset;
|
||||
};
|
||||
|
||||
#define workload_q_head(vgpu, ring_id) \
|
||||
|
|
Loading…
Reference in a new issue