drm/nouveau/intr: support multiple trees, and explicit interfaces

Turing adds a second top-level interrupt tree in HW, in addition to the
trees available via NV_PMC.  Most of the interrupts we care about are
exposed in both trees, but not all of them, and we have some rather
nasty hacks to route the fault buffer interrupts.

Ampere removes the NV_PMC trees entirely.

Here we add some infrastructure to be able to handle all of this more
cleanly, as well as providing more explicit control over handlers.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Lyude Paul <lyude@redhat.com>
This commit is contained in:
Ben Skeggs 2022-06-01 20:46:52 +10:00
parent 727fd72f24
commit 3ebd64aa3c
3 changed files with 344 additions and 2 deletions

View File

@ -63,6 +63,8 @@ struct nvkm_device {
struct list_head subdev;
struct {
struct list_head intr;
struct list_head prio[NVKM_INTR_PRIO_NR];
spinlock_t lock;
int irq;
bool alloc;

View File

@ -3,10 +3,70 @@
#define __NVKM_INTR_H__
#include <core/os.h>
struct nvkm_device;
struct nvkm_subdev;
enum nvkm_intr_prio {
NVKM_INTR_PRIO_VBLANK = 0,
NVKM_INTR_PRIO_NORMAL,
NVKM_INTR_PRIO_NR
};
enum nvkm_intr_type {
NVKM_INTR_SUBDEV = -1, /* lookup vector by requesting subdev, in mapping table. */
NVKM_INTR_VECTOR_0 = 0,
};
struct nvkm_intr {
const struct nvkm_intr_func {
bool (*pending)(struct nvkm_intr *);
void (*unarm)(struct nvkm_intr *);
void (*rearm)(struct nvkm_intr *);
void (*block)(struct nvkm_intr *, int leaf, u32 mask);
void (*allow)(struct nvkm_intr *, int leaf, u32 mask);
void (*reset)(struct nvkm_intr *, int leaf, u32 mask);
} *func;
const struct nvkm_intr_data {
int type; /* enum nvkm_subdev_type (+ve), enum nvkm_intr_type (-ve) */
int inst;
int leaf;
u32 mask; /* 0-terminated. */
} *data;
struct nvkm_subdev *subdev;
int leaves;
u32 *stat;
u32 *mask;
struct list_head head;
};
void nvkm_intr_ctor(struct nvkm_device *);
void nvkm_intr_dtor(struct nvkm_device *);
int nvkm_intr_install(struct nvkm_device *);
void nvkm_intr_unarm(struct nvkm_device *);
void nvkm_intr_rearm(struct nvkm_device *);
int nvkm_intr_add(const struct nvkm_intr_func *, const struct nvkm_intr_data *,
struct nvkm_subdev *, int leaves, struct nvkm_intr *);
void nvkm_intr_block(struct nvkm_subdev *, enum nvkm_intr_type);
void nvkm_intr_allow(struct nvkm_subdev *, enum nvkm_intr_type);
struct nvkm_inth;
typedef irqreturn_t (*nvkm_inth_func)(struct nvkm_inth *);
struct nvkm_inth {
struct nvkm_intr *intr;
int leaf;
u32 mask;
nvkm_inth_func func;
atomic_t allowed;
struct list_head head;
};
int nvkm_inth_add(struct nvkm_intr *, enum nvkm_intr_type, enum nvkm_intr_prio,
struct nvkm_subdev *, nvkm_inth_func, struct nvkm_inth *);
void nvkm_inth_allow(struct nvkm_inth *);
void nvkm_inth_block(struct nvkm_inth *);
#endif

View File

@ -20,19 +20,147 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <core/intr.h>
#include <core/device.h>
#include <core/subdev.h>
#include <subdev/pci.h>
#include <subdev/top.h>
#include <subdev/mc.h>
static int
nvkm_intr_xlat(struct nvkm_subdev *subdev, struct nvkm_intr *intr,
enum nvkm_intr_type type, int *leaf, u32 *mask)
{
struct nvkm_device *device = subdev->device;
if (type < NVKM_INTR_VECTOR_0) {
if (type == NVKM_INTR_SUBDEV) {
const struct nvkm_intr_data *data = intr->data;
struct nvkm_top_device *tdev;
while (data && data->mask) {
if (data->type == NVKM_SUBDEV_TOP) {
list_for_each_entry(tdev, &device->top->device, head) {
if (tdev->intr >= 0 &&
tdev->type == subdev->type &&
tdev->inst == subdev->inst) {
if (data->mask & BIT(tdev->intr)) {
*leaf = data->leaf;
*mask = BIT(tdev->intr);
return 0;
}
}
}
} else
if (data->type == subdev->type && data->inst == subdev->inst) {
*leaf = data->leaf;
*mask = data->mask;
return 0;
}
data++;
}
} else {
return -ENOSYS;
}
} else {
if (type < intr->leaves * sizeof(*intr->stat) * 8) {
*leaf = type / 32;
*mask = BIT(type % 32);
return 0;
}
}
return -EINVAL;
}
static struct nvkm_intr *
nvkm_intr_find(struct nvkm_subdev *subdev, enum nvkm_intr_type type, int *leaf, u32 *mask)
{
struct nvkm_intr *intr;
int ret;
list_for_each_entry(intr, &subdev->device->intr.intr, head) {
ret = nvkm_intr_xlat(subdev, intr, type, leaf, mask);
if (ret == 0)
return intr;
}
return NULL;
}
static void
nvkm_intr_allow_locked(struct nvkm_intr *intr, int leaf, u32 mask)
{
intr->mask[leaf] |= mask;
if (intr->func->allow) {
if (intr->func->reset)
intr->func->reset(intr, leaf, mask);
intr->func->allow(intr, leaf, mask);
}
}
void
nvkm_intr_allow(struct nvkm_subdev *subdev, enum nvkm_intr_type type)
{
struct nvkm_device *device = subdev->device;
struct nvkm_intr *intr;
unsigned long flags;
int leaf;
u32 mask;
intr = nvkm_intr_find(subdev, type, &leaf, &mask);
if (intr) {
nvkm_debug(intr->subdev, "intr %d/%08x allowed by %s\n", leaf, mask, subdev->name);
spin_lock_irqsave(&device->intr.lock, flags);
nvkm_intr_allow_locked(intr, leaf, mask);
spin_unlock_irqrestore(&device->intr.lock, flags);
}
}
static void
nvkm_intr_block_locked(struct nvkm_intr *intr, int leaf, u32 mask)
{
intr->mask[leaf] &= ~mask;
if (intr->func->block)
intr->func->block(intr, leaf, mask);
}
void
nvkm_intr_block(struct nvkm_subdev *subdev, enum nvkm_intr_type type)
{
struct nvkm_device *device = subdev->device;
struct nvkm_intr *intr;
unsigned long flags;
int leaf;
u32 mask;
intr = nvkm_intr_find(subdev, type, &leaf, &mask);
if (intr) {
nvkm_debug(intr->subdev, "intr %d/%08x blocked by %s\n", leaf, mask, subdev->name);
spin_lock_irqsave(&device->intr.lock, flags);
nvkm_intr_block_locked(intr, leaf, mask);
spin_unlock_irqrestore(&device->intr.lock, flags);
}
}
static void
nvkm_intr_rearm_locked(struct nvkm_device *device)
{
struct nvkm_intr *intr;
list_for_each_entry(intr, &device->intr.intr, head)
intr->func->rearm(intr);
nvkm_mc_intr_rearm(device);
}
static void
nvkm_intr_unarm_locked(struct nvkm_device *device)
{
struct nvkm_intr *intr;
list_for_each_entry(intr, &device->intr.intr, head)
intr->func->unarm(intr);
nvkm_mc_intr_unarm(device);
}
@ -40,9 +168,13 @@ static irqreturn_t
nvkm_intr(int irq, void *arg)
{
struct nvkm_device *device = arg;
struct nvkm_intr *intr;
struct nvkm_inth *inth;
irqreturn_t ret = IRQ_NONE;
bool handled;
bool pending = false, handled;
int prio, leaf;
/* Disable all top-level interrupt sources, and re-arm MSI interrupts. */
spin_lock(&device->intr.lock);
if (!device->intr.armed)
goto done_unlock;
@ -50,20 +182,103 @@ nvkm_intr(int irq, void *arg)
nvkm_intr_unarm_locked(device);
nvkm_pci_msi_rearm(device);
/* Fetch pending interrupt masks. */
list_for_each_entry(intr, &device->intr.intr, head) {
if (intr->func->pending(intr))
pending = true;
}
nvkm_mc_intr(device, &handled);
if (handled)
ret = IRQ_HANDLED;
if (!pending)
goto done;
/* Check that GPU is still on the bus by reading NV_PMC_BOOT_0. */
if (WARN_ON(nvkm_rd32(device, 0x000000) == 0xffffffff))
goto done;
/* Execute handlers. */
for (prio = 0; prio < ARRAY_SIZE(device->intr.prio); prio++) {
list_for_each_entry(inth, &device->intr.prio[prio], head) {
struct nvkm_intr *intr = inth->intr;
if (intr->stat[inth->leaf] & inth->mask) {
if (atomic_read(&inth->allowed)) {
if (intr->func->reset)
intr->func->reset(intr, inth->leaf, inth->mask);
if (inth->func(inth) == IRQ_HANDLED)
ret = IRQ_HANDLED;
}
}
}
}
/* Nothing handled? Some debugging/protection from IRQ storms is in order... */
if (ret == IRQ_NONE) {
list_for_each_entry(intr, &device->intr.intr, head) {
for (leaf = 0; leaf < intr->leaves; leaf++) {
if (intr->stat[leaf]) {
nvkm_warn(intr->subdev, "intr%d: %08x\n",
leaf, intr->stat[leaf]);
nvkm_intr_block_locked(intr, leaf, intr->stat[leaf]);
}
}
}
}
done:
/* Re-enable all top-level interrupt sources. */
nvkm_intr_rearm_locked(device);
done_unlock:
spin_unlock(&device->intr.lock);
return ret;
}
int
nvkm_intr_add(const struct nvkm_intr_func *func, const struct nvkm_intr_data *data,
struct nvkm_subdev *subdev, int leaves, struct nvkm_intr *intr)
{
struct nvkm_device *device = subdev->device;
int i;
intr->func = func;
intr->data = data;
intr->subdev = subdev;
intr->leaves = leaves;
intr->stat = kcalloc(leaves, sizeof(*intr->stat), GFP_KERNEL);
intr->mask = kcalloc(leaves, sizeof(*intr->mask), GFP_KERNEL);
if (!intr->stat || !intr->mask) {
kfree(intr->stat);
return -ENOMEM;
}
if (intr->subdev->debug >= NV_DBG_DEBUG) {
for (i = 0; i < intr->leaves; i++)
intr->mask[i] = ~0;
}
spin_lock_irq(&device->intr.lock);
list_add_tail(&intr->head, &device->intr.intr);
spin_unlock_irq(&device->intr.lock);
return 0;
}
void
nvkm_intr_rearm(struct nvkm_device *device)
{
struct nvkm_intr *intr;
int i;
spin_lock_irq(&device->intr.lock);
list_for_each_entry(intr, &device->intr.intr, head) {
for (i = 0; intr->func->block && i < intr->leaves; i++) {
intr->func->block(intr, i, ~0);
intr->func->allow(intr, i, intr->mask[i]);
}
}
nvkm_intr_rearm_locked(device);
device->intr.armed = true;
spin_unlock_irq(&device->intr.lock);
@ -98,6 +313,14 @@ nvkm_intr_install(struct nvkm_device *device)
void
nvkm_intr_dtor(struct nvkm_device *device)
{
struct nvkm_intr *intr, *intt;
list_for_each_entry_safe(intr, intt, &device->intr.intr, head) {
list_del(&intr->head);
kfree(intr->mask);
kfree(intr->stat);
}
if (device->intr.alloc)
free_irq(device->intr.irq, device);
}
@ -105,5 +328,62 @@ nvkm_intr_dtor(struct nvkm_device *device)
void
nvkm_intr_ctor(struct nvkm_device *device)
{
int i;
INIT_LIST_HEAD(&device->intr.intr);
for (i = 0; i < ARRAY_SIZE(device->intr.prio); i++)
INIT_LIST_HEAD(&device->intr.prio[i]);
spin_lock_init(&device->intr.lock);
device->intr.armed = false;
}
void
nvkm_inth_block(struct nvkm_inth *inth)
{
if (unlikely(!inth->intr))
return;
atomic_set(&inth->allowed, 0);
}
void
nvkm_inth_allow(struct nvkm_inth *inth)
{
struct nvkm_intr *intr = inth->intr;
unsigned long flags;
if (unlikely(!inth->intr))
return;
spin_lock_irqsave(&intr->subdev->device->intr.lock, flags);
if (!atomic_xchg(&inth->allowed, 1)) {
if ((intr->mask[inth->leaf] & inth->mask) != inth->mask)
nvkm_intr_allow_locked(intr, inth->leaf, inth->mask);
}
spin_unlock_irqrestore(&intr->subdev->device->intr.lock, flags);
}
int
nvkm_inth_add(struct nvkm_intr *intr, enum nvkm_intr_type type, enum nvkm_intr_prio prio,
struct nvkm_subdev *subdev, nvkm_inth_func func, struct nvkm_inth *inth)
{
struct nvkm_device *device = subdev->device;
int ret;
if (WARN_ON(inth->mask))
return -EBUSY;
ret = nvkm_intr_xlat(subdev, intr, type, &inth->leaf, &inth->mask);
if (ret)
return ret;
nvkm_debug(intr->subdev, "intr %d/%08x requested by %s\n",
inth->leaf, inth->mask, subdev->name);
inth->intr = intr;
inth->func = func;
atomic_set(&inth->allowed, 0);
list_add_tail(&inth->head, &device->intr.prio[prio]);
return 0;
}