drm/i915/guc: always use Command Transport Buffers

Now that we've moved the Gen9 GuC blobs to version 32 we have CTB
support on all gens, so no need to restrict the usage to Gen11+.
Note that MMIO communication is still required for CTB initialization.

v2: fix commit message nits (Michal)

Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190606224225.14287-1-daniele.ceraolospurio@intel.com
This commit is contained in:
Daniele Ceraolo Spurio 2019-06-06 15:42:24 -07:00 committed by Chris Wilson
parent 6be306bee7
commit 33ec6c9eb3
7 changed files with 10 additions and 72 deletions

View file

@ -2393,7 +2393,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
* properties, so we have separate macros to test them.
*/
#define HAS_GUC(dev_priv) (INTEL_INFO(dev_priv)->has_guc)
#define HAS_GUC_CT(dev_priv) (INTEL_INFO(dev_priv)->has_guc_ct)
#define HAS_GUC_UCODE(dev_priv) (HAS_GUC(dev_priv))
#define HAS_GUC_SCHED(dev_priv) (HAS_GUC(dev_priv))

View file

@ -746,7 +746,6 @@ static const struct intel_device_info intel_cannonlake_info = {
}, \
GEN(11), \
.ddb_size = 2048, \
.has_guc_ct = 1, \
.has_logical_ring_elsq = 1, \
.color = { .degamma_lut_size = 33, .gamma_lut_size = 1024 }

View file

@ -112,7 +112,6 @@ enum intel_ppgtt_type {
func(has_reset_engine); \
func(has_fpga_dbg); \
func(has_guc); \
func(has_guc_ct); \
func(has_l3_dpf); \
func(has_llc); \
func(has_logical_ring_contexts); \

View file

@ -56,7 +56,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
enum forcewake_domains fw_domains = 0;
unsigned int i;
if (HAS_GUC_CT(dev_priv) && INTEL_GEN(dev_priv) >= 11) {
if (INTEL_GEN(dev_priv) >= 11) {
guc->send_regs.base =
i915_mmio_reg_offset(GEN11_SOFT_SCRATCH(0));
guc->send_regs.count = GEN11_SOFT_SCRATCH_COUNT;
@ -232,11 +232,9 @@ int intel_guc_init(struct intel_guc *guc)
goto err_log;
GEM_BUG_ON(!guc->ads_vma);
if (HAS_GUC_CT(dev_priv)) {
ret = intel_guc_ct_init(&guc->ct);
if (ret)
goto err_ads;
}
ret = intel_guc_ct_init(&guc->ct);
if (ret)
goto err_ads;
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(dev_priv);
@ -262,8 +260,7 @@ void intel_guc_fini(struct intel_guc *guc)
i915_ggtt_disable_guc(dev_priv);
if (HAS_GUC_CT(dev_priv))
intel_guc_ct_fini(&guc->ct);
intel_guc_ct_fini(&guc->ct);
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
@ -430,9 +427,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
/* If CT is available, we expect to use MMIO only during init/fini */
GEM_BUG_ON(HAS_GUC_CT(dev_priv) &&
*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
*action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
*action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
mutex_lock(&guc->send_mutex);
intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
@ -481,33 +477,6 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
return ret;
}
void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
u32 msg, val;
/*
* Sample the log buffer flush related bits & clear them out now
* itself from the message identity register to minimize the
* probability of losing a flush interrupt, when there are back
* to back flush interrupts.
* There can be a new flush interrupt, for different log buffer
* type (like for ISR), whilst Host is handling one (for DPC).
* Since same bit is used in message register for ISR & DPC, it
* could happen that GuC sets the bit for 2nd interrupt but Host
* clears out the bit on handling the 1st interrupt.
*/
disable_rpm_wakeref_asserts(dev_priv);
spin_lock(&guc->irq_lock);
val = I915_READ(SOFT_SCRATCH(15));
msg = val & guc->msg_enabled_mask;
I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
spin_unlock(&guc->irq_lock);
enable_rpm_wakeref_asserts(dev_priv);
intel_guc_to_host_process_recv_msg(guc, &msg, 1);
}
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len)
{

View file

@ -165,7 +165,6 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size);
void intel_guc_to_host_event_handler(struct intel_guc *guc);
void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc);
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
const u32 *payload, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);

View file

@ -848,8 +848,6 @@ static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
* Allocate memory required for communication via
* the CT channel.
*
* Shall only be called for platforms with HAS_GUC_CT.
*
* Return: 0 on success, a negative errno code on failure.
*/
int intel_guc_ct_init(struct intel_guc_ct *ct)
@ -875,8 +873,6 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
*
* Deallocate memory required for communication via
* the CT channel.
*
* Shall only be called for platforms with HAS_GUC_CT.
*/
void intel_guc_ct_fini(struct intel_guc_ct *ct)
{
@ -890,19 +886,14 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
* intel_guc_ct_enable - Enable buffer based command transport.
* @ct: pointer to CT struct
*
* Shall only be called for platforms with HAS_GUC_CT.
*
* Return: 0 on success, a negative errno code on failure.
*/
int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
struct drm_i915_private *i915 = guc_to_i915(guc);
struct intel_guc_ct_channel *ctch = &ct->host_channel;
int err;
GEM_BUG_ON(!HAS_GUC_CT(i915));
if (ctch->enabled)
return 0;
@ -920,17 +911,12 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
/**
* intel_guc_ct_disable - Disable buffer based command transport.
* @ct: pointer to CT struct
*
* Shall only be called for platforms with HAS_GUC_CT.
*/
void intel_guc_ct_disable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
struct drm_i915_private *i915 = guc_to_i915(guc);
struct intel_guc_ct_channel *ctch = &ct->host_channel;
GEM_BUG_ON(!HAS_GUC_CT(i915));
if (!ctch->enabled)
return;

View file

@ -235,24 +235,14 @@ static void guc_disable_interrupts(struct intel_guc *guc)
static int guc_enable_communication(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_i915(guc);
guc_enable_interrupts(guc);
if (HAS_GUC_CT(i915))
return intel_guc_ct_enable(&guc->ct);
guc->send = intel_guc_send_mmio;
guc->handler = intel_guc_to_host_event_handler_mmio;
return 0;
return intel_guc_ct_enable(&guc->ct);
}
static void guc_stop_communication(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_i915(guc);
if (HAS_GUC_CT(i915))
intel_guc_ct_stop(&guc->ct);
intel_guc_ct_stop(&guc->ct);
guc->send = intel_guc_send_nop;
guc->handler = intel_guc_to_host_event_handler_nop;
@ -260,10 +250,7 @@ static void guc_stop_communication(struct intel_guc *guc)
static void guc_disable_communication(struct intel_guc *guc)
{
struct drm_i915_private *i915 = guc_to_i915(guc);
if (HAS_GUC_CT(i915))
intel_guc_ct_disable(&guc->ct);
intel_guc_ct_disable(&guc->ct);
guc_disable_interrupts(guc);