From 9174c1d6196d612799808009ec2796df021ab625 Mon Sep 17 00:00:00 2001 From: Xiaolin Zhang Date: Tue, 7 Aug 2018 20:39:16 +0800 Subject: [PATCH 1/6] drm/i915/gvt: emulate gen9 dbuf ctl register access there is below call track at boot time when booting guest with kabylake vgpu with specifal configuration and this try to fix it. [drm:gen9_dbuf_enable [i915]] *ERROR* DBuf power enable timeout ------------[ cut here ]------------ WARNING: gen9_dc_off_power_well_enable+0x224/0x230 [i915] Unexpected DBuf power power state (0x8000000a) Hardware name: Red Hat KVM, BIOS 1.11.0-2.el7 04/01/2014 Call Trace: [] dump_stack+0x19/0x1b [] __warn+0xd8/0x100 [] warn_slowpath_fmt+0x5f/0x80 [] gen9_dc_off_power_well_enable+0x224/0x230 [i915] [] intel_power_well_enable+0x42/0x50 [i915] [] __intel_display_power_get_domain+0x8a/0xb0 [i915] [] intel_display_power_get+0x33/0x50 [i915] [] intel_display_set_init_power+0x45/0x50 [i915] [] intel_power_domains_init_hw+0x63/0x8a0 [i915] [] i915_driver_load+0xae3/0x1760 [i915] [] ? nvmem_register+0x500/0x500 [] i915_pci_probe+0x2c/0x50 [i915] [] local_pci_probe+0x4a/0xb0 [] pci_device_probe+0x109/0x160 [] driver_probe_device+0xc5/0x3e0 [] __driver_attach+0x93/0xa0 [] ? __device_attach+0x50/0x50 [] bus_for_each_dev+0x75/0xc0 [] driver_attach+0x1e/0x20 [] bus_add_driver+0x200/0x2d0 [] driver_register+0x64/0xf0 [] __pci_register_driver+0xa5/0xc0 [] ? 0xffffffffc0928fff [] i915_init+0x59/0x5c [i915] [] do_one_initcall+0xba/0x240 [] load_module+0x272c/0x2bc0 [] ? ddebug_proc_write+0xf0/0xf0 [] SyS_init_module+0xc5/0x110 [] system_call_fastpath+0x1c/0x21 Signed-off-by: Xiaolin Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 7a58ca5551977..450e730743a13 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1296,6 +1296,19 @@ static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu, return 0; } +static int gen9_dbuf_ctl_mmio_write(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + write_vreg(vgpu, offset, p_data, bytes); + + if (vgpu_vreg(vgpu, offset) & DBUF_POWER_REQUEST) + vgpu_vreg(vgpu, offset) |= DBUF_POWER_STATE; + else + vgpu_vreg(vgpu, offset) &= ~DBUF_POWER_STATE; + + return 0; +} + static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -2812,6 +2825,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL, skl_power_well_ctl_write); + MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write); + MMIO_D(_MMIO(0xa210), D_SKL_PLUS); MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); @@ -2987,8 +3002,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) NULL, gen9_trtte_write); MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); - MMIO_D(_MMIO(0x45008), D_SKL_PLUS); - MMIO_D(_MMIO(0x46430), D_SKL_PLUS); MMIO_D(_MMIO(0x46520), D_SKL_PLUS); From c8ab5ac30ccc20a31672ab0f8938a6271dfe4122 Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Mon, 20 Aug 2018 16:46:34 +0800 Subject: [PATCH 2/6] drm/i915/gvt: Make correct handling to vreg BXT_PHY_CTL_FAMILY Guest kernel will write to BXT_PHY_CTL_FAMILY to reset DDI PHY and pull BXT_PHY_CTL to check PHY status. Previous handling will set/reset BXT_PHY_CTL of all PHYs at same time on receiving vreg write to some BXT_PHY_CTL_FAMILY. If some BXT_PHY_CTL is already enabled, following reset to another BXT_PHY_CTL_FAMILY will clear the enabled BXT_PHY_CTL, which result in guest kernel print: ----------------------------------- [drm:intel_ddi_get_hw_state [i915]] *ERROR* Port B enabled but PHY powered down? (PHY_CTL 00000000) ----------------------------------- The correct handling should operate BXT_PHY_CTL_FAMILY and BXT_PHY_CTL on the same DDI. v2: Use correct reg define. The naming looks confusing, however current i915_reg.h bind DPIO_PHY0 to _PHY_CTL_FAMILY_DDI and bind DPIO_PHY1 to _PHY_CTL_FAMILY_EDP, pairing to _BXT_PHY_CTL_DDI_A and _BXT_PHY_CTL_DDI_B respectively. v3: v2 incorrectly map _PHY_CTL_FAMILY_EDP to _BXT_PHY_CTL_DDI_A. BXT_PHY_CTL() looks up DDI using PORTx but not PHYx. Based on DPIO_PHY to DDI mapping, make correct vreg handle to BXT_PHY_CTL on receiving vreg write to BXT_PHY_CTL_FAMILY. (He, Min) Current mapping according to bxt_power_wells: dpio-common-a: >>> DPIO_PHY1 >>> BXT_DPIO_CMN_A_POWER_DOMAINS >>> POWER_DOMAIN_PORT_DDI_A_LANES >>> PORT_A dpio-common-bc: >>> DPIO_PHY0 >>> BXT_DPIO_CMN_BC_POWER_DOMAINS >>> POWER_DOMAIN_PORT_DDI_B_LANES | POWER_DOMAIN_PORT_DDI_C_LANES >>> PORT_B or PORT_C Signed-off-by: Colin Xu Reviewed-by: He, Min Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 450e730743a13..d0db55a796276 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1538,9 +1538,15 @@ static int bxt_phy_ctl_family_write(struct intel_vgpu *vgpu, u32 v = *(u32 *)p_data; u32 data = v & COMMON_RESET_DIS ? BXT_PHY_LANE_ENABLED : 0; - vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; - vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; - vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; + switch (offset) { + case _PHY_CTL_FAMILY_EDP: + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_A) = data; + break; + case _PHY_CTL_FAMILY_DDI: + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_B) = data; + vgpu_vreg(vgpu, _BXT_PHY_CTL_DDI_C) = data; + break; + } vgpu_vreg(vgpu, offset) = v; From b9b824a55876275f8506c1c187558ab22d879f73 Mon Sep 17 00:00:00 2001 From: Colin Xu Date: Fri, 17 Aug 2018 16:42:24 +0800 Subject: [PATCH 3/6] drm/i915/gvt: Handle GEN9_WM_CHICKEN3 with F_CMD_ACCESS. Recent patch introduce strict check on scanning cmd: Commit 8d458ea0ec33 ("drm/i915/gvt: return error on cmd access") Before 8d458ea0ec33, if cmd_reg_handler() checks that a cmd access a mmio that not marked as F_CMD_ACCESS, it simply returns 0 and log an error. Now it will return -EBADRQC which will cause the workload fail to submit. On BXT, i915 applies WaClearHIZ_WM_CHICKEN3 which will program GEN9_WM_CHICKEN3 by LRI when init wa ctx. If it has no F_CMD_ACCESS flag, vgpu will fail to start. Also add F_MODE_MASK since it's mode mask reg. v2: Refresh commit message to elaborate issue symptom in detail. v3: Make SKL_PLUS share same handling since GEN9_WM_CHICKEN3 should be F_CMD_ACCESS from HW aspect. (yan, zhenyu) Signed-off-by: Colin Xu Acked-by: Zhao Yan Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index d0db55a796276..72afa518edd91 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -3044,7 +3044,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x44500), D_SKL_PLUS); MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, - NULL, NULL); + NULL, NULL); + MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, + NULL, NULL); MMIO_D(_MMIO(0x4ab8), D_KBL); MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); From b2b599fb54f90ae395ddc51f0d49e4f28244a8f8 Mon Sep 17 00:00:00 2001 From: Hang Yuan Date: Wed, 29 Aug 2018 17:15:56 +0800 Subject: [PATCH 4/6] drm/i915/gvt: move intel_runtime_pm_get out of spin_lock in stop_schedule pm_runtime_get_sync in intel_runtime_pm_get might sleep if i915 device is not active. When stop vgpu schedule, the device may be inactive. So need to move runtime_pm_get out of spin_lock/unlock. Fixes: b24881e0b0b6("drm/i915/gvt: Add runtime_pm_get/put into gvt_switch_mmio Cc: Signed-off-by: Hang Yuan Signed-off-by: Xiong Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/mmio_context.c | 2 -- drivers/gpu/drm/i915/gvt/sched_policy.c | 3 +++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 42e1e6bdcc2cf..e872f4847fbe0 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -562,11 +562,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, * performace for batch mmio read/write, so we need * handle forcewake mannually. */ - intel_runtime_pm_get(dev_priv); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); switch_mmio(pre, next, ring_id); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); } /** diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 09d7bb72b4ff3..985fe81794ddc 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -426,6 +426,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) &vgpu->gvt->scheduler; int ring_id; struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; if (!vgpu_data->active) return; @@ -444,6 +445,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) scheduler->current_vgpu = NULL; } + intel_runtime_pm_get(dev_priv); spin_lock_bh(&scheduler->mmio_context_lock); for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { if (scheduler->engine_owner[ring_id] == vgpu) { @@ -452,5 +454,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) } } spin_unlock_bh(&scheduler->mmio_context_lock); + intel_runtime_pm_put(dev_priv); mutex_unlock(&vgpu->gvt->sched_lock); } From b244ffa15c8b1aabdc117c0b6008086df7b668b7 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Thu, 30 Aug 2018 10:50:36 +0800 Subject: [PATCH 5/6] drm/i915/gvt: Fix drm_format_mod value for vGPU plane Physical plane's tiling mode value is given directly as drm_format_mod for plane query, which is not correct fourcc code. Fix it by using correct intel tiling fourcc mod definition. Current qemu seems also doesn't correctly utilize drm_format_mod for plane object setting. Anyway this is required to fix the usage. v3: use DRM_FORMAT_MOD_LINEAR, fix comment v2: Fix missed old 'tiled' use for stride calculation Fixes: e546e281d33d ("drm/i915/gvt: Dmabuf support for GVT-g") Cc: Tina Zhang Cc: Gerd Hoffmann Cc: Colin Xu Reviewed-by: Colin Xu Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 33 +++++++++++++++++++++------ drivers/gpu/drm/i915/gvt/fb_decoder.c | 5 ++-- drivers/gpu/drm/i915/gvt/fb_decoder.h | 2 +- 3 files changed, 29 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 6e3f56684f4ec..51ed99a378033 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -170,20 +170,22 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, unsigned int tiling_mode = 0; unsigned int stride = 0; - switch (info->drm_format_mod << 10) { - case PLANE_CTL_TILED_LINEAR: + switch (info->drm_format_mod) { + case DRM_FORMAT_MOD_LINEAR: tiling_mode = I915_TILING_NONE; break; - case PLANE_CTL_TILED_X: + case I915_FORMAT_MOD_X_TILED: tiling_mode = I915_TILING_X; stride = info->stride; break; - case PLANE_CTL_TILED_Y: + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Yf_TILED: tiling_mode = I915_TILING_Y; stride = info->stride; break; default: - gvt_dbg_core("not supported tiling mode\n"); + gvt_dbg_core("invalid drm_format_mod %llx for tiling\n", + info->drm_format_mod); } obj->tiling_and_stride = tiling_mode | stride; } else { @@ -222,9 +224,26 @@ static int vgpu_get_plane_info(struct drm_device *dev, info->height = p.height; info->stride = p.stride; info->drm_format = p.drm_format; - info->drm_format_mod = p.tiled; + + switch (p.tiled) { + case PLANE_CTL_TILED_LINEAR: + info->drm_format_mod = DRM_FORMAT_MOD_LINEAR; + break; + case PLANE_CTL_TILED_X: + info->drm_format_mod = I915_FORMAT_MOD_X_TILED; + break; + case PLANE_CTL_TILED_Y: + info->drm_format_mod = I915_FORMAT_MOD_Y_TILED; + break; + case PLANE_CTL_TILED_YF: + info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED; + break; + default: + gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); + } + info->size = (((p.stride * p.height * p.bpp) / 8) + - (PAGE_SIZE - 1)) >> PAGE_SHIFT; + (PAGE_SIZE - 1)) >> PAGE_SHIFT; } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { ret = intel_vgpu_decode_cursor_plane(vgpu, &c); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index face664be3e8e..481896fb712ab 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -220,8 +220,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) { - plane->tiled = (val & PLANE_CTL_TILED_MASK) >> - _PLANE_CTL_TILED_SHIFT; + plane->tiled = val & PLANE_CTL_TILED_MASK; fmt = skl_format_to_drm( val & PLANE_CTL_FORMAT_MASK, val & PLANE_CTL_ORDER_RGBX, @@ -260,7 +259,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, return -EINVAL; } - plane->stride = intel_vgpu_get_stride(vgpu, pipe, (plane->tiled << 10), + plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled, (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) ? diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h index cb055f3c81a29..60c155085029c 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.h +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h @@ -101,7 +101,7 @@ struct intel_gvt; /* color space conversion and gamma correction are not included */ struct intel_vgpu_primary_plane_format { u8 enabled; /* plane is enabled */ - u8 tiled; /* X-tiled */ + u32 tiled; /* tiling mode: linear, X-tiled, Y tiled, etc */ u8 bpp; /* bits per pixel */ u32 hw_format; /* format field in the PRI_CTL register */ u32 drm_format; /* format in DRM definition */ From 54ff01fd0d44b9681615f77c15fe9ea6dfadb501 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Thu, 30 Aug 2018 11:33:43 +0800 Subject: [PATCH 6/6] drm/i915/gvt: Give new born vGPU higher scheduling chance This trys to give new born vGPU with higher scheduling chance not only with adding to sched list head and also have higher priority for workload sched for 2 seconds after starting to schedule it. In order for fast GPU execution during VM boot, and ensure guest driver setup with required state given in time. This fixes recent failure seen on one VM with multiple linux VMs running on kernel with commit 2621cefaa42b3("drm/i915: Provide a timeout to i915_gem_wait_for_idle() on setup"), which had shorter setup timeout that caused context state init failed. v2: change to 2s for higher scheduling period Cc: Yuan Hang Reviewed-by: Hang Yuan Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/sched_policy.c | 34 ++++++++++++++++++++----- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 985fe81794ddc..c32e7d5e86291 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) return false; } +/* We give 2 seconds higher prio for vGPU during start */ +#define GVT_SCHED_VGPU_PRI_TIME 2 + struct vgpu_sched_data { struct list_head lru_list; struct intel_vgpu *vgpu; bool active; - + bool pri_sched; + ktime_t pri_time; ktime_t sched_in_time; ktime_t sched_time; ktime_t left_ts; @@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) if (!vgpu_has_pending_workload(vgpu_data->vgpu)) continue; + if (vgpu_data->pri_sched) { + if (ktime_before(ktime_get(), vgpu_data->pri_time)) { + vgpu = vgpu_data->vgpu; + break; + } else + vgpu_data->pri_sched = false; + } + /* Return the vGPU only if it has time slice left */ if (vgpu_data->left_ts > 0) { vgpu = vgpu_data->vgpu; @@ -202,6 +214,7 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data) struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct vgpu_sched_data *vgpu_data; struct intel_vgpu *vgpu = NULL; + /* no active vgpu or has already had a target */ if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) goto out; @@ -209,12 +222,13 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data) vgpu = find_busy_vgpu(sched_data); if (vgpu) { scheduler->next_vgpu = vgpu; - - /* Move the last used vGPU to the tail of lru_list */ vgpu_data = vgpu->sched_data; - list_del_init(&vgpu_data->lru_list); - list_add_tail(&vgpu_data->lru_list, - &sched_data->lru_runq_head); + if (!vgpu_data->pri_sched) { + /* Move the last used vGPU to the tail of lru_list */ + list_del_init(&vgpu_data->lru_list); + list_add_tail(&vgpu_data->lru_list, + &sched_data->lru_runq_head); + } } else { scheduler->next_vgpu = gvt->idle_vgpu; } @@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) { struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + ktime_t now; if (!list_empty(&vgpu_data->lru_list)) return; - list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head); + now = ktime_get(); + vgpu_data->pri_time = ktime_add(now, + ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0)); + vgpu_data->pri_sched = true; + + list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head); if (!hrtimer_active(&sched_data->timer)) hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),