drm/nouveau/intr: add nvkm_subdev_intr() compatibility

It's quite a lot of tedious and error-prone work to switch over all the
subdevs at once, so allow an nvkm_intr to request new-style handlers to
be created that wrap the existing interfaces.

This will allow a more gradual transition.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Lyude Paul <lyude@redhat.com>
This commit is contained in:
Ben Skeggs 2022-06-01 20:46:53 +10:00
parent 3ebd64aa3c
commit a7ab200aeb
4 changed files with 65 additions and 0 deletions

View File

@ -69,6 +69,7 @@ struct nvkm_device {
int irq;
bool alloc;
bool armed;
bool legacy_done;
} intr;
};

View File

@ -30,6 +30,7 @@ struct nvkm_intr {
int inst;
int leaf;
u32 mask; /* 0-terminated. */
bool legacy; /* auto-create "legacy" nvkm_subdev_intr() handler */
} *data;
struct nvkm_subdev *subdev;

View File

@ -21,6 +21,8 @@ struct nvkm_subdev {
u32 debug;
struct list_head head;
struct nvkm_inth inth;
void **pself;
bool oneinit;
};

View File

@ -265,12 +265,73 @@ nvkm_intr_add(const struct nvkm_intr_func *func, const struct nvkm_intr_data *da
return 0;
}
static irqreturn_t
nvkm_intr_subdev(struct nvkm_inth *inth)
{
struct nvkm_subdev *subdev = container_of(inth, typeof(*subdev), inth);
nvkm_subdev_intr(subdev);
return IRQ_HANDLED;
}
static void
nvkm_intr_subdev_add_dev(struct nvkm_intr *intr, enum nvkm_subdev_type type, int inst)
{
struct nvkm_subdev *subdev;
enum nvkm_intr_prio prio;
int ret;
subdev = nvkm_device_subdev(intr->subdev->device, type, inst);
if (!subdev || !subdev->func->intr)
return;
if (type == NVKM_ENGINE_DISP)
prio = NVKM_INTR_PRIO_VBLANK;
else
prio = NVKM_INTR_PRIO_NORMAL;
ret = nvkm_inth_add(intr, NVKM_INTR_SUBDEV, prio, subdev, nvkm_intr_subdev, &subdev->inth);
if (WARN_ON(ret))
return;
nvkm_inth_allow(&subdev->inth);
}
static void
nvkm_intr_subdev_add(struct nvkm_intr *intr)
{
const struct nvkm_intr_data *data;
struct nvkm_device *device = intr->subdev->device;
struct nvkm_top_device *tdev;
for (data = intr->data; data && data->mask; data++) {
if (data->legacy) {
if (data->type == NVKM_SUBDEV_TOP) {
list_for_each_entry(tdev, &device->top->device, head) {
if (tdev->intr < 0 || !(data->mask & BIT(tdev->intr)))
continue;
nvkm_intr_subdev_add_dev(intr, tdev->type, tdev->inst);
}
} else {
nvkm_intr_subdev_add_dev(intr, data->type, data->inst);
}
}
}
}
void
nvkm_intr_rearm(struct nvkm_device *device)
{
struct nvkm_intr *intr;
int i;
if (unlikely(!device->intr.legacy_done)) {
list_for_each_entry(intr, &device->intr.intr, head)
nvkm_intr_subdev_add(intr);
device->intr.legacy_done = true;
}
spin_lock_irq(&device->intr.lock);
list_for_each_entry(intr, &device->intr.intr, head) {
for (i = 0; intr->func->block && i < intr->leaves; i++) {