drm/i915: Extract general GT interrupt handlers

i915_irq.c is large. It serves as the central dispatch and handler for
all of our device interrupts. Lets break it up by pulling out the GT
interrupt handlers.

Based on a patch by Chris Wilson.

Signed-off-by: Andi Shyti <andi.shyti@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190811210633.18417-1-chris@chris-wilson.co.uk
This commit is contained in:
Andi Shyti 2019-08-11 22:06:33 +01:00 committed by Chris Wilson
parent d762043f7a
commit cf1c97dcb9
10 changed files with 593 additions and 551 deletions

View file

@ -80,6 +80,7 @@ gt-y += \
gt/intel_engine_pm.o \
gt/intel_engine_user.o \
gt/intel_gt.o \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
gt/intel_gt_pm_irq.o \
gt/intel_hangcheck.o \

View file

@ -35,9 +35,9 @@ static void irq_enable(struct intel_engine_cs *engine)
return;
/* Caller disables interrupts */
spin_lock(&engine->i915->irq_lock);
spin_lock(&engine->gt->irq_lock);
engine->irq_enable(engine);
spin_unlock(&engine->i915->irq_lock);
spin_unlock(&engine->gt->irq_lock);
}
static void irq_disable(struct intel_engine_cs *engine)
@ -46,9 +46,9 @@ static void irq_disable(struct intel_engine_cs *engine)
return;
/* Caller disables interrupts */
spin_lock(&engine->i915->irq_lock);
spin_lock(&engine->gt->irq_lock);
engine->irq_disable(engine);
spin_unlock(&engine->i915->irq_lock);
spin_unlock(&engine->gt->irq_lock);
}
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)

View file

@ -0,0 +1,455 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include <linux/sched/clock.h>
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_uncore.h"
static void guc_irq_handler(struct intel_guc *guc, u16 iir)
{
if (iir & GUC_INTR_GUC2HOST)
intel_guc_to_host_event_handler(guc);
}
static void
cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
{
bool tasklet = false;
if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
tasklet = true;
if (iir & GT_RENDER_USER_INTERRUPT) {
intel_engine_breadcrumbs_irq(engine);
tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
}
if (tasklet)
tasklet_hi_schedule(&engine->execlists.tasklet);
}
static u32
gen11_gt_engine_identity(struct intel_gt *gt,
const unsigned int bank, const unsigned int bit)
{
void __iomem * const regs = gt->uncore->regs;
u32 timeout_ts;
u32 ident;
lockdep_assert_held(&gt->irq_lock);
raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
/*
* NB: Specs do not specify how long to spin wait,
* so we do ~100us as an educated guess.
*/
timeout_ts = (local_clock() >> 10) + 100;
do {
ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
} while (!(ident & GEN11_INTR_DATA_VALID) &&
!time_after32(local_clock() >> 10, timeout_ts));
if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
bank, bit, ident);
return 0;
}
raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
GEN11_INTR_DATA_VALID);
return ident;
}
static void
gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
const u16 iir)
{
if (instance == OTHER_GUC_INSTANCE)
return guc_irq_handler(&gt->uc.guc, iir);
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(gt, iir);
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
}
static void
gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
const u8 instance, const u16 iir)
{
struct intel_engine_cs *engine;
if (instance <= MAX_ENGINE_INSTANCE)
engine = gt->engine_class[class][instance];
else
engine = NULL;
if (likely(engine))
return cs_irq_handler(engine, iir);
WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
class, instance);
}
static void
gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
{
const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
if (unlikely(!intr))
return;
if (class <= COPY_ENGINE_CLASS)
return gen11_engine_irq_handler(gt, class, instance, intr);
if (class == OTHER_CLASS)
return gen11_other_irq_handler(gt, instance, intr);
WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
class, instance, intr);
}
static void
gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
{
void __iomem * const regs = gt->uncore->regs;
unsigned long intr_dw;
unsigned int bit;
lockdep_assert_held(&gt->irq_lock);
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
for_each_set_bit(bit, &intr_dw, 32) {
const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
gen11_gt_identity_handler(gt, ident);
}
/* Clear must be after shared has been served for engine */
raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
}
void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
{
unsigned int bank;
spin_lock(&gt->irq_lock);
for (bank = 0; bank < 2; bank++) {
if (master_ctl & GEN11_GT_DW_IRQ(bank))
gen11_gt_bank_handler(gt, bank);
}
spin_unlock(&gt->irq_lock);
}
bool gen11_gt_reset_one_iir(struct intel_gt *gt,
const unsigned int bank, const unsigned int bit)
{
void __iomem * const regs = gt->uncore->regs;
u32 dw;
lockdep_assert_held(&gt->irq_lock);
dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
if (dw & BIT(bit)) {
/*
* According to the BSpec, DW_IIR bits cannot be cleared without
* first servicing the Selector & Shared IIR registers.
*/
gen11_gt_engine_identity(gt, bank, bit);
/*
* We locked GT INT DW by reading it. If we want to (try
* to) recover from this successfully, we need to clear
* our bit, otherwise we are locking the register for
* everybody.
*/
raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
return true;
}
return false;
}
void gen11_gt_irq_reset(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
/* Disable RCS, BCS, VCS and VECS class engines. */
intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
}
void gen11_gt_irq_postinstall(struct intel_gt *gt)
{
const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
struct intel_uncore *uncore = gt->uncore;
const u32 dmask = irqs << 16 | irqs;
const u32 smask = irqs << 16;
BUILD_BUG_ON(irqs & 0xffff0000);
/* Enable RCS, BCS, VCS and VECS class interrupts. */
intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled.
*/
gt->pm_ier = 0x0;
gt->pm_imr = ~gt->pm_ier;
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
/* Same thing for GuC interrupts */
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
}
void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]);
}
static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
{
if (!HAS_L3_DPF(gt->i915))
return;
spin_lock(&gt->irq_lock);
gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
spin_unlock(&gt->irq_lock);
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
gt->i915->l3_parity.which_slice |= 1 << 1;
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
gt->i915->l3_parity.which_slice |= 1 << 0;
schedule_work(&gt->i915->l3_parity.error_work);
}
void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(gt->engine_class[RENDER_CLASS][0]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(gt->engine_class[COPY_ENGINE_CLASS][0]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
if (gt_iir & GT_PARITY_ERROR(gt->i915))
gen7_parity_error_irq_handler(gt, gt_iir);
}
void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
{
void __iomem * const regs = gt->uncore->regs;
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
if (likely(gt_iir[0]))
raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
}
if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
if (likely(gt_iir[1]))
raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
}
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
if (likely(gt_iir[2]))
raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
if (likely(gt_iir[3]))
raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
}
}
void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
{
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
}
if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
}
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
gen6_rps_irq_handler(gt->i915, gt_iir[2]);
guc_irq_handler(&gt->uc.guc, gt_iir[2] >> 16);
}
}
void gen8_gt_irq_reset(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
GEN8_IRQ_RESET_NDX(uncore, GT, 0);
GEN8_IRQ_RESET_NDX(uncore, GT, 1);
GEN8_IRQ_RESET_NDX(uncore, GT, 2);
GEN8_IRQ_RESET_NDX(uncore, GT, 3);
}
void gen8_gt_irq_postinstall(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
/* These are interrupts we'll toggle with the ring mask register */
u32 gt_interrupts[] = {
(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
0,
(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
};
gt->pm_ier = 0x0;
gt->pm_imr = ~gt->pm_ier;
GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled. Same wil be the case for GuC interrupts.
*/
GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
static void gen5_gt_update_irq(struct intel_gt *gt,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
lockdep_assert_held(&gt->irq_lock);
GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
gt->gt_imr &= ~interrupt_mask;
gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
}
void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
{
gen5_gt_update_irq(gt, mask, mask);
intel_uncore_posting_read_fw(gt->uncore, GTIMR);
}
void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
{
gen5_gt_update_irq(gt, mask, 0);
}
void gen5_gt_irq_reset(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
GEN3_IRQ_RESET(uncore, GT);
if (INTEL_GEN(gt->i915) >= 6)
GEN3_IRQ_RESET(uncore, GEN6_PM);
}
void gen5_gt_irq_postinstall(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
u32 pm_irqs = 0;
u32 gt_irqs = 0;
gt->gt_imr = ~0;
if (HAS_L3_DPF(gt->i915)) {
/* L3 parity interrupt is always unmasked. */
gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
gt_irqs |= GT_PARITY_ERROR(gt->i915);
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
if (IS_GEN(gt->i915, 5))
gt_irqs |= ILK_BSD_USER_INTERRUPT;
else
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
if (INTEL_GEN(gt->i915) >= 6) {
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
if (HAS_ENGINE(gt->i915, VECS0)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
}
gt->pm_imr = 0xffffffff;
GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);
}
}

View file

@ -0,0 +1,44 @@
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#ifndef INTEL_GT_IRQ_H
#define INTEL_GT_IRQ_H
#include <linux/types.h>
struct intel_gt;
#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
GEN8_GT_BCS_IRQ | \
GEN8_GT_VCS0_IRQ | \
GEN8_GT_VCS1_IRQ | \
GEN8_GT_VECS_IRQ | \
GEN8_GT_PM_IRQ | \
GEN8_GT_GUC_IRQ)
void gen11_gt_irq_reset(struct intel_gt *gt);
void gen11_gt_irq_postinstall(struct intel_gt *gt);
void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl);
bool gen11_gt_reset_one_iir(struct intel_gt *gt,
const unsigned int bank,
const unsigned int bit);
void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
void gen5_gt_irq_postinstall(struct intel_gt *gt);
void gen5_gt_irq_reset(struct intel_gt *gt);
void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask);
void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask);
void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
void gen8_gt_irq_reset(struct intel_gt *gt);
void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
void gen8_gt_irq_postinstall(struct intel_gt *gt);
#endif /* INTEL_GT_IRQ_H */

View file

@ -7,6 +7,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
static void write_pm_imr(struct intel_gt *gt)

View file

@ -74,6 +74,7 @@ struct intel_gt {
struct i915_vma *scratch;
spinlock_t irq_lock;
u32 gt_imr;
u32 pm_ier;
u32 pm_imr;

View file

@ -37,6 +37,7 @@
#include "i915_trace.h"
#include "intel_context.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
#include "intel_reset.h"
#include "intel_workarounds.h"
@ -983,13 +984,13 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
static void
gen5_irq_enable(struct intel_engine_cs *engine)
{
gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
}
static void
gen5_irq_disable(struct intel_engine_cs *engine)
{
gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
}
static void
@ -1050,14 +1051,14 @@ gen6_irq_enable(struct intel_engine_cs *engine)
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
ENGINE_POSTING_READ(engine, RING_IMR);
gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
}
static void
gen6_irq_disable(struct intel_engine_cs *engine)
{
ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
}
static void

View file

@ -1427,7 +1427,6 @@ struct drm_i915_private {
u32 irq_mask;
u32 de_irq_mask[I915_MAX_PIPES];
};
u32 gt_irq_mask;
u32 pm_rps_events;
u32 pipestat_irq_mask[I915_MAX_PIPES];

View file

@ -44,6 +44,7 @@
#include "display/intel_psr.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
#include "i915_drv.h"
@ -174,8 +175,8 @@ static const u32 hpd_tgp[HPD_NUM_PINS] = {
[HPD_PORT_I] = SDE_TC6_HOTPLUG_TGP,
};
static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier)
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier)
{
intel_uncore_write(uncore, imr, 0xffffffff);
intel_uncore_posting_read(uncore, imr);
@ -189,7 +190,7 @@ static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
intel_uncore_posting_read(uncore, iir);
}
static void gen2_irq_reset(struct intel_uncore *uncore)
void gen2_irq_reset(struct intel_uncore *uncore)
{
intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
intel_uncore_posting_read16(uncore, GEN2_IMR);
@ -203,19 +204,6 @@ static void gen2_irq_reset(struct intel_uncore *uncore)
intel_uncore_posting_read16(uncore, GEN2_IIR);
}
#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
({ \
unsigned int which_ = which; \
gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
})
#define GEN3_IRQ_RESET(uncore, type) \
gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
#define GEN2_IRQ_RESET(uncore) \
gen2_irq_reset(uncore)
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
@ -249,10 +237,10 @@ static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
intel_uncore_posting_read16(uncore, GEN2_IIR);
}
static void gen3_irq_init(struct intel_uncore *uncore,
i915_reg_t imr, u32 imr_val,
i915_reg_t ier, u32 ier_val,
i915_reg_t iir)
void gen3_irq_init(struct intel_uncore *uncore,
i915_reg_t imr, u32 imr_val,
i915_reg_t ier, u32 ier_val,
i915_reg_t iir)
{
gen3_assert_iir_is_zero(uncore, iir);
@ -261,8 +249,8 @@ static void gen3_irq_init(struct intel_uncore *uncore,
intel_uncore_posting_read(uncore, imr);
}
static void gen2_irq_init(struct intel_uncore *uncore,
u32 imr_val, u32 ier_val)
void gen2_irq_init(struct intel_uncore *uncore,
u32 imr_val, u32 ier_val)
{
gen2_assert_iir_is_zero(uncore);
@ -271,27 +259,6 @@ static void gen2_irq_init(struct intel_uncore *uncore,
intel_uncore_posting_read16(uncore, GEN2_IMR);
}
#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
({ \
unsigned int which_ = which; \
gen3_irq_init((uncore), \
GEN8_##type##_IMR(which_), imr_val, \
GEN8_##type##_IER(which_), ier_val, \
GEN8_##type##_IIR(which_)); \
})
#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
gen3_irq_init((uncore), \
type##IMR, imr_val, \
type##IER, ier_val, \
type##IIR)
#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
gen2_irq_init((uncore), imr_val, ier_val)
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
static void guc_irq_handler(struct intel_guc *guc, u16 guc_iir);
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
@ -330,41 +297,6 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
spin_unlock_irq(&dev_priv->irq_lock);
}
static u32
gen11_gt_engine_identity(struct intel_gt *gt,
const unsigned int bank, const unsigned int bit);
static bool gen11_reset_one_iir(struct intel_gt *gt,
const unsigned int bank,
const unsigned int bit)
{
void __iomem * const regs = gt->uncore->regs;
u32 dw;
lockdep_assert_held(&gt->i915->irq_lock);
dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
if (dw & BIT(bit)) {
/*
* According to the BSpec, DW_IIR bits cannot be cleared without
* first servicing the Selector & Shared IIR registers.
*/
gen11_gt_engine_identity(gt, bank, bit);
/*
* We locked GT INT DW by reading it. If we want to (try
* to) recover from this succesfully, we need to clear
* our bit, otherwise we are locking the register for
* everybody.
*/
raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
return true;
}
return false;
}
/**
* ilk_update_display_irq - update DEIMR
* @dev_priv: driver private
@ -395,39 +327,6 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
}
}
/**
* ilk_update_gt_irq - update GTIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
dev_priv->gt_irq_mask &= ~interrupt_mask;
dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
}
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
intel_uncore_posting_read_fw(&dev_priv->uncore, GTIMR);
}
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask)
{
ilk_update_gt_irq(dev_priv, mask, 0);
}
static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
{
WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11);
@ -441,7 +340,7 @@ void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
spin_lock_irq(&gt->irq_lock);
while (gen11_reset_one_iir(gt, 0, GEN11_GTPM))
while (gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM))
;
dev_priv->gt_pm.rps.pm_iir = 0;
@ -471,7 +370,7 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
WARN_ON_ONCE(rps->pm_iir);
if (INTEL_GEN(dev_priv) >= 11)
WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GTPM));
WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM));
else
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
@ -566,7 +465,7 @@ void gen11_reset_guc_interrupts(struct intel_guc *guc)
struct intel_gt *gt = guc_to_gt(guc);
spin_lock_irq(&gt->irq_lock);
gen11_reset_one_iir(gt, 0, GEN11_GUC);
gen11_gt_reset_one_iir(gt, 0, GEN11_GUC);
spin_unlock_irq(&gt->irq_lock);
}
@ -578,7 +477,7 @@ void gen11_enable_guc_interrupts(struct intel_guc *guc)
if (!guc->interrupts.enabled) {
u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
WARN_ON_ONCE(gen11_reset_one_iir(gt, 0, GEN11_GUC));
WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC));
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events);
intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events);
guc->interrupts.enabled = true;
@ -1379,6 +1278,7 @@ static void ivybridge_parity_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), l3_parity.error_work);
struct intel_gt *gt = &dev_priv->gt;
u32 error_status, row, bank, subbank;
char *parity_event[6];
u32 misccpctl;
@ -1440,144 +1340,13 @@ static void ivybridge_parity_work(struct work_struct *work)
out:
WARN_ON(dev_priv->l3_parity.which_slice);
spin_lock_irq(&dev_priv->irq_lock);
gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
spin_unlock_irq(&dev_priv->irq_lock);
spin_lock_irq(&gt->irq_lock);
gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
spin_unlock_irq(&gt->irq_lock);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
u32 iir)
{
if (!HAS_L3_DPF(dev_priv))
return;
spin_lock(&dev_priv->irq_lock);
gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
spin_unlock(&dev_priv->irq_lock);
iir &= GT_PARITY_ERROR(dev_priv);
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
dev_priv->l3_parity.which_slice |= 1 << 1;
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
dev_priv->l3_parity.which_slice |= 1 << 0;
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
}
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
if (gt_iir & GT_PARITY_ERROR(dev_priv))
ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
}
static void
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
{
bool tasklet = false;
if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
tasklet = true;
if (iir & GT_RENDER_USER_INTERRUPT) {
intel_engine_breadcrumbs_irq(engine);
tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
}
if (tasklet)
tasklet_hi_schedule(&engine->execlists.tasklet);
}
static void gen8_gt_irq_ack(struct drm_i915_private *i915,
u32 master_ctl, u32 gt_iir[4])
{
void __iomem * const regs = i915->uncore.regs;
#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
GEN8_GT_BCS_IRQ | \
GEN8_GT_VCS0_IRQ | \
GEN8_GT_VCS1_IRQ | \
GEN8_GT_VECS_IRQ | \
GEN8_GT_PM_IRQ | \
GEN8_GT_GUC_IRQ)
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
if (likely(gt_iir[0]))
raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
}
if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
if (likely(gt_iir[1]))
raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
}
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
if (likely(gt_iir[2]))
raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
if (likely(gt_iir[3]))
raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
}
}
static void gen8_gt_irq_handler(struct drm_i915_private *i915,
u32 master_ctl, u32 gt_iir[4])
{
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
gen8_cs_irq_handler(i915->engine[RCS0],
gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
gen8_cs_irq_handler(i915->engine[BCS0],
gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
}
if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
gen8_cs_irq_handler(i915->engine[VCS0],
gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
gen8_cs_irq_handler(i915->engine[VCS1],
gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
gen8_cs_irq_handler(i915->engine[VECS0],
gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
}
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
gen6_rps_irq_handler(i915, gt_iir[2]);
guc_irq_handler(&i915->gt.uc.guc, gt_iir[2] >> 16);
}
}
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
{
switch (pin) {
@ -1886,7 +1655,7 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
/* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */
static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_rps *rps = &i915->gt_pm.rps;
@ -1906,7 +1675,7 @@ static void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
schedule_work(&rps->work);
}
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
struct intel_gt *gt = &dev_priv->gt;
@ -1931,12 +1700,6 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
static void guc_irq_handler(struct intel_guc *guc, u16 iir)
{
if (iir & GUC_INTR_GUC2HOST)
intel_guc_to_host_event_handler(guc);
}
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
{
enum pipe pipe;
@ -2244,7 +2007,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
if (gt_iir)
snb_gt_irq_handler(dev_priv, gt_iir);
gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
if (pm_iir)
gen6_rps_irq_handler(dev_priv, pm_iir);
@ -2302,7 +2065,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
ier = I915_READ(VLV_IER);
I915_WRITE(VLV_IER, 0);
gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@ -2326,7 +2089,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@ -2756,9 +2519,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
if (INTEL_GEN(dev_priv) >= 6)
snb_gt_irq_handler(dev_priv, gt_iir);
gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
else
ilk_gt_irq_handler(dev_priv, gt_iir);
gen5_gt_irq_handler(&dev_priv->gt, gt_iir);
}
de_iir = I915_READ(DEIIR);
@ -3069,7 +2832,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
}
/* Find, clear, then process each source of interrupt */
gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
@ -3080,134 +2843,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
gen8_master_intr_enable(regs);
gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir);
gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
return IRQ_HANDLED;
}
static u32
gen11_gt_engine_identity(struct intel_gt *gt,
const unsigned int bank, const unsigned int bit)
{
void __iomem * const regs = gt->uncore->regs;
u32 timeout_ts;
u32 ident;
lockdep_assert_held(&gt->i915->irq_lock);
raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
/*
* NB: Specs do not specify how long to spin wait,
* so we do ~100us as an educated guess.
*/
timeout_ts = (local_clock() >> 10) + 100;
do {
ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
} while (!(ident & GEN11_INTR_DATA_VALID) &&
!time_after32(local_clock() >> 10, timeout_ts));
if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
bank, bit, ident);
return 0;
}
raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
GEN11_INTR_DATA_VALID);
return ident;
}
static void
gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
const u16 iir)
{
if (instance == OTHER_GUC_INSTANCE)
return guc_irq_handler(&gt->uc.guc, iir);
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(gt, iir);
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
}
static void
gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
const u8 instance, const u16 iir)
{
struct intel_engine_cs *engine;
if (instance <= MAX_ENGINE_INSTANCE)
engine = gt->engine_class[class][instance];
else
engine = NULL;
if (likely(engine))
return gen8_cs_irq_handler(engine, iir);
WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
class, instance);
}
static void
gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
{
const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
if (unlikely(!intr))
return;
if (class <= COPY_ENGINE_CLASS)
return gen11_engine_irq_handler(gt, class, instance, intr);
if (class == OTHER_CLASS)
return gen11_other_irq_handler(gt, instance, intr);
WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
class, instance, intr);
}
static void
gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
{
void __iomem * const regs = gt->uncore->regs;
unsigned long intr_dw;
unsigned int bit;
lockdep_assert_held(&gt->i915->irq_lock);
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
for_each_set_bit(bit, &intr_dw, 32) {
const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
gen11_gt_identity_handler(gt, ident);
}
/* Clear must be after shared has been served for engine */
raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
}
static void
gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
{
struct drm_i915_private *i915 = gt->i915;
unsigned int bank;
spin_lock(&i915->irq_lock);
for (bank = 0; bank < 2; bank++) {
if (master_ctl & GEN11_GT_DW_IRQ(bank))
gen11_gt_bank_handler(gt, bank);
}
spin_unlock(&i915->irq_lock);
}
static u32
gen11_gu_misc_irq_ack(struct intel_gt *gt, const u32 master_ctl)
{
@ -3516,15 +3156,6 @@ static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
POSTING_READ(SDEIER);
}
static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
GEN3_IRQ_RESET(uncore, GT);
if (INTEL_GEN(dev_priv) >= 6)
GEN3_IRQ_RESET(uncore, GEN6_PM);
}
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@ -3589,7 +3220,7 @@ static void ironlake_irq_reset(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
}
gen5_gt_irq_reset(dev_priv);
gen5_gt_irq_reset(&dev_priv->gt);
ibx_irq_reset(dev_priv);
}
@ -3599,7 +3230,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
I915_WRITE(VLV_MASTER_IER, 0);
POSTING_READ(VLV_MASTER_IER);
gen5_gt_irq_reset(dev_priv);
gen5_gt_irq_reset(&dev_priv->gt);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@ -3607,16 +3238,6 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
GEN8_IRQ_RESET_NDX(uncore, GT, 0);
GEN8_IRQ_RESET_NDX(uncore, GT, 1);
GEN8_IRQ_RESET_NDX(uncore, GT, 2);
GEN8_IRQ_RESET_NDX(uncore, GT, 3);
}
static void gen8_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@ -3624,7 +3245,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
gen8_master_intr_disable(dev_priv->uncore.regs);
gen8_gt_irq_reset(dev_priv);
gen8_gt_irq_reset(&dev_priv->gt);
intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
@ -3642,27 +3263,6 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv)
ibx_irq_reset(dev_priv);
}
static void gen11_gt_irq_reset(struct intel_gt *gt)
{
struct intel_uncore *uncore = gt->uncore;
/* Disable RCS, BCS, VCS and VECS class engines. */
intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
}
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@ -3744,7 +3344,7 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
gen8_gt_irq_reset(dev_priv);
gen8_gt_irq_reset(&dev_priv->gt);
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
@ -4057,44 +3657,6 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
spt_hpd_detection_setup(dev_priv);
}
static void gen5_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
u32 pm_irqs, gt_irqs;
pm_irqs = gt_irqs = 0;
dev_priv->gt_irq_mask = ~0;
if (HAS_L3_DPF(dev_priv)) {
/* L3 parity interrupt is always unmasked. */
dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv);
gt_irqs |= GT_PARITY_ERROR(dev_priv);
}
gt_irqs |= GT_RENDER_USER_INTERRUPT;
if (IS_GEN(dev_priv, 5)) {
gt_irqs |= ILK_BSD_USER_INTERRUPT;
} else {
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
}
GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_GEN(dev_priv) >= 6) {
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
if (HAS_ENGINE(dev_priv, VECS0)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
dev_priv->gt.pm_ier |= PM_VEBOX_USER_INTERRUPT;
}
dev_priv->gt.pm_imr = 0xffffffff;
GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->gt.pm_imr, pm_irqs);
}
}
static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@ -4128,7 +3690,7 @@ static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv)
GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
display_mask | extra_mask);
gen5_gt_irq_postinstall(dev_priv);
gen5_gt_irq_postinstall(&dev_priv->gt);
ilk_hpd_detection_setup(dev_priv);
@ -4177,7 +3739,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
gen5_gt_irq_postinstall(dev_priv);
gen5_gt_irq_postinstall(&dev_priv->gt);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@ -4188,41 +3750,6 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
POSTING_READ(VLV_MASTER_IER);
}
static void gen8_gt_irq_postinstall(struct drm_i915_private *i915)
{
struct intel_gt *gt = &i915->gt;
struct intel_uncore *uncore = gt->uncore;
/* These are interrupts we'll toggle with the ring mask register */
u32 gt_interrupts[] = {
(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
0,
(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
};
gt->pm_ier = 0x0;
gt->pm_imr = ~gt->pm_ier;
GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled. Same wil be the case for GuC interrupts.
*/
GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
struct intel_uncore *uncore = &dev_priv->uncore;
@ -4298,7 +3825,7 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_pre_postinstall(dev_priv);
gen8_gt_irq_postinstall(dev_priv);
gen8_gt_irq_postinstall(&dev_priv->gt);
gen8_de_irq_postinstall(dev_priv);
if (HAS_PCH_SPLIT(dev_priv))
@ -4307,40 +3834,6 @@ static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
gen8_master_intr_enable(dev_priv->uncore.regs);
}
static void gen11_gt_irq_postinstall(struct intel_gt *gt)
{
const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
struct intel_uncore *uncore = gt->uncore;
const u32 dmask = irqs << 16 | irqs;
const u32 smask = irqs << 16;
BUILD_BUG_ON(irqs & 0xffff0000);
/* Enable RCS, BCS, VCS and VECS class interrupts. */
intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
/* Unmask irqs on RCS, BCS, VCS and VECS engines. */
intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled.
*/
gt->pm_ier = 0x0;
gt->pm_imr = ~gt->pm_ier;
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
/* Same thing for GuC interrupts */
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
}
static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
{
u32 mask = SDE_GMBUS_ICP;
@ -4383,7 +3876,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
gen8_gt_irq_postinstall(dev_priv);
gen8_gt_irq_postinstall(&dev_priv->gt);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)

View file

@ -10,6 +10,7 @@
#include <linux/types.h>
#include "display/intel_display.h"
#include "i915_reg.h"
struct drm_crtc;
struct drm_device;
@ -19,6 +20,10 @@ struct intel_crtc;
struct intel_crtc;
struct intel_gt;
struct intel_guc;
struct intel_uncore;
void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir);
void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
void intel_irq_init(struct drm_i915_private *dev_priv);
void intel_irq_fini(struct drm_i915_private *dev_priv);
@ -127,4 +132,46 @@ void i965_disable_vblank(struct drm_crtc *crtc);
void ilk_disable_vblank(struct drm_crtc *crtc);
void bdw_disable_vblank(struct drm_crtc *crtc);
void gen2_irq_reset(struct intel_uncore *uncore);
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier);
void gen2_irq_init(struct intel_uncore *uncore,
u32 imr_val, u32 ier_val);
void gen3_irq_init(struct intel_uncore *uncore,
i915_reg_t imr, u32 imr_val,
i915_reg_t ier, u32 ier_val,
i915_reg_t iir);
#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
({ \
unsigned int which_ = which; \
gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
})
#define GEN3_IRQ_RESET(uncore, type) \
gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
#define GEN2_IRQ_RESET(uncore) \
gen2_irq_reset(uncore)
#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
({ \
unsigned int which_ = which; \
gen3_irq_init((uncore), \
GEN8_##type##_IMR(which_), imr_val, \
GEN8_##type##_IER(which_), ier_val, \
GEN8_##type##_IIR(which_)); \
})
#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
gen3_irq_init((uncore), \
type##IMR, imr_val, \
type##IER, ier_val, \
type##IIR)
#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
gen2_irq_init((uncore), imr_val, ier_val)
#endif /* __I915_IRQ_H__ */