drm/nouveau/device: cleaner abstraction for device resource functions

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ben Skeggs 2015-08-20 14:54:23 +10:00
parent 2b700825e7
commit 7e8820fed7
18 changed files with 71 additions and 62 deletions

View file

@ -148,6 +148,8 @@ struct nvkm_device_func {
int (*preinit)(struct nvkm_device *);
int (*init)(struct nvkm_device *);
void (*fini)(struct nvkm_device *, bool suspend);
resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
};
struct nvkm_device_quirk {
@ -242,12 +244,6 @@ nv_device_base(struct nvkm_device *device)
&device->platformdev->dev;
}
resource_size_t
nv_device_resource_start(struct nvkm_device *device, unsigned int bar);
resource_size_t
nv_device_resource_len(struct nvkm_device *device, unsigned int bar);
struct platform_device;
enum nv_bus_type {

View file

@ -1351,6 +1351,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->device);
struct nvkm_mem *node = mem->mm_node;
int ret;
@ -1379,7 +1380,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
/* fallthrough, tiled memory */
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = nv_device_resource_start(nvxx_device(&drm->device), 1);
mem->bus.base = device->func->resource_addr(device, 1);
mem->bus.is_iomem = true;
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
struct nvkm_bar *bar = nvxx_bar(&drm->device);
@ -1419,8 +1420,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nvif_device *device = &drm->device;
u32 mappable = nv_device_resource_len(nvxx_device(device), 1) >> PAGE_SHIFT;
struct nvkm_device *device = nvxx_device(&drm->device);
u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
int i, ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got

View file

@ -150,7 +150,8 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
*/
args.target = NV_DMA_V0_TARGET_PCI;
args.access = NV_DMA_V0_ACCESS_RDWR;
args.start = nv_device_resource_start(nvxx_device(device), 1);
args.start = nvxx_device(device)->func->
resource_addr(nvxx_device(device), 1);
args.limit = args.start + device->info.ram_user - 1;
} else {
args.target = NV_DMA_V0_TARGET_VRAM;

View file

@ -445,6 +445,7 @@ int
nouveau_display_create(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_device *device = nvxx_device(&drm->device);
struct nouveau_display *disp;
int ret;
@ -457,7 +458,7 @@ nouveau_display_create(struct drm_device *dev)
drm_mode_create_dvi_i_properties(dev);
dev->mode_config.funcs = &nouveau_mode_config_funcs;
dev->mode_config.fb_base = nv_device_resource_start(nvxx_device(&drm->device), 1);
dev->mode_config.fb_base = device->func->resource_addr(device, 1);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;

View file

@ -335,6 +335,7 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
int
nouveau_ttm_init(struct nouveau_drm *drm)
{
struct nvkm_device *device = nvxx_device(&drm->device);
struct drm_device *dev = drm->dev;
u32 bits;
int ret;
@ -381,8 +382,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
return ret;
}
drm->ttm.mtrr = arch_phys_wc_add(nv_device_resource_start(nvxx_device(&drm->device), 1),
nv_device_resource_len(nvxx_device(&drm->device), 1));
drm->ttm.mtrr = arch_phys_wc_add(device->func->resource_addr(device, 1),
device->func->resource_size(device, 1));
/* GART init */
if (drm->agp.stat != ENABLED) {

View file

@ -2262,36 +2262,6 @@ nvkm_device_init(struct nvkm_device *device)
return ret;
}
resource_size_t
nv_device_resource_start(struct nvkm_device *device, unsigned int bar)
{
if (nv_device_is_pci(device)) {
return pci_resource_start(device->pdev, bar);
} else {
struct resource *res;
res = platform_get_resource(device->platformdev,
IORESOURCE_MEM, bar);
if (!res)
return 0;
return res->start;
}
}
resource_size_t
nv_device_resource_len(struct nvkm_device *device, unsigned int bar)
{
if (nv_device_is_pci(device)) {
return pci_resource_len(device->pdev, bar);
} else {
struct resource *res;
res = platform_get_resource(device->platformdev,
IORESOURCE_MEM, bar);
if (!res)
return 0;
return resource_size(res);
}
}
void
nvkm_device_del(struct nvkm_device **pdevice)
{
@ -2363,8 +2333,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
if (ret)
goto done;
mmio_base = nv_device_resource_start(device, 0);
mmio_size = nv_device_resource_len(device, 0);
mmio_base = device->func->resource_addr(device, 0);
mmio_size = device->func->resource_size(device, 0);
/* identify the chipset, and determine classes of subdev/engines */
if (detect) {

View file

@ -30,6 +30,20 @@ nvkm_device_pci(struct nvkm_device *device)
return container_of(device, struct nvkm_device_pci, device);
}
static resource_size_t
nvkm_device_pci_resource_addr(struct nvkm_device *device, unsigned bar)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
return pci_resource_start(pdev->pdev, bar);
}
static resource_size_t
nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar)
{
struct nvkm_device_pci *pdev = nvkm_device_pci(device);
return pci_resource_len(pdev->pdev, bar);
}
static void
nvkm_device_pci_fini(struct nvkm_device *device, bool suspend)
{
@ -68,6 +82,8 @@ nvkm_device_pci_func = {
.dtor = nvkm_device_pci_dtor,
.preinit = nvkm_device_pci_preinit,
.fini = nvkm_device_pci_fini,
.resource_addr = nvkm_device_pci_resource_addr,
.resource_size = nvkm_device_pci_resource_size,
};
int

View file

@ -26,9 +26,30 @@
#include "priv.h"
static struct nvkm_device_tegra *
nvkm_device_tegra(struct nvkm_device *obj)
nvkm_device_tegra(struct nvkm_device *device)
{
return container_of(obj, struct nvkm_device_tegra, device);
return container_of(device, struct nvkm_device_tegra, device);
}
static struct resource *
nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar)
{
struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar);
}
static resource_size_t
nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar)
{
struct resource *res = nvkm_device_tegra_resource(device, bar);
return res ? res->start : 0;
}
static resource_size_t
nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
{
struct resource *res = nvkm_device_tegra_resource(device, bar);
return res ? resource_size(res) : 0;
}
static irqreturn_t
@ -79,6 +100,8 @@ nvkm_device_tegra_func = {
.tegra = nvkm_device_tegra,
.init = nvkm_device_tegra_init,
.fini = nvkm_device_tegra_fini,
.resource_addr = nvkm_device_tegra_resource_addr,
.resource_size = nvkm_device_tegra_resource_size,
};
int

View file

@ -198,8 +198,8 @@ nvkm_udevice_map(struct nvkm_object *object, u64 *addr, u32 *size)
{
struct nvkm_udevice *udev = nvkm_udevice(object);
struct nvkm_device *device = udev->device;
*addr = nv_device_resource_start(device, 0);
*size = nv_device_resource_len(device, 0);
*addr = device->func->resource_addr(device, 0);
*size = device->func->resource_size(device, 0);
return 0;
}

View file

@ -195,7 +195,7 @@ nv50_disp_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
struct nv50_disp_chan *chan = nv50_disp_chan(object);
struct nv50_disp *disp = chan->root->disp;
struct nvkm_device *device = disp->base.engine.subdev.device;
*addr = nv_device_resource_start(device, 0) +
*addr = device->func->resource_addr(device, 0) +
0x640000 + (chan->chid * 0x1000);
*size = 0x001000;
return 0;

View file

@ -406,7 +406,7 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
spin_unlock_irqrestore(&fifo->lock, flags);
/* determine address of this channel's user registers */
chan->addr = nv_device_resource_start(device, bar) +
chan->addr = device->func->resource_addr(device, bar) +
base + user * chan->chid;
chan->size = user;

View file

@ -296,7 +296,7 @@ nv20_gr_init(struct nvkm_gr *base)
nvkm_wr32(device, NV10_PGRAPH_SURFACE, tmp);
/* begin RAM config */
vramsz = nv_device_resource_len(device, 1) - 1;
vramsz = device->func->resource_size(device, 1) - 1;
nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));
nvkm_wr32(device, 0x4009A8, nvkm_rd32(device, 0x100204));
nvkm_wr32(device, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);

View file

@ -385,7 +385,7 @@ nv40_gr_init(struct nvkm_gr *base)
}
/* begin RAM config */
vramsz = nv_device_resource_len(device, 1) - 1;
vramsz = device->func->resource_size(device, 1) - 1;
switch (device->chipset) {
case 0x40:
nvkm_wr32(device, 0x4009A4, nvkm_rd32(device, 0x100200));

View file

@ -58,7 +58,7 @@ gf100_bar_ctor_vm(struct gf100_bar *bar, struct gf100_bar_vm *bar_vm,
if (ret)
return ret;
bar_len = nv_device_resource_len(device, bar_nr);
bar_len = device->func->resource_size(device, bar_nr);
ret = nvkm_vm_new(device, 0, bar_len, 0, key, &vm);
if (ret)

View file

@ -82,7 +82,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR3 */
start = 0x0100000000ULL;
limit = start + nv_device_resource_len(device, 3);
limit = start + device->func->resource_size(device, 3);
ret = nvkm_vm_new(device, start, limit, start, &bar3_lock, &vm);
if (ret)
@ -115,7 +115,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR1 */
start = 0x0000000000ULL;
limit = start + nv_device_resource_len(device, 1);
limit = start + device->func->resource_size(device, 1);
ret = nvkm_vm_new(device, start, limit--, start, &bar1_lock, &vm);
if (ret)

View file

@ -47,8 +47,8 @@
static inline struct io_mapping *
fbmem_init(struct nvkm_device *dev)
{
return io_mapping_create_wc(nv_device_resource_start(dev, 1),
nv_device_resource_len(dev, 1));
return io_mapping_create_wc(dev->func->resource_addr(dev, 1),
dev->func->resource_size(dev, 1));
}
static inline void

View file

@ -243,13 +243,13 @@ nv40_instmem_new(struct nvkm_device *device, int index,
*pimem = &imem->base;
/* map bar */
if (nv_device_resource_len(device, 2))
if (device->func->resource_size(device, 2))
bar = 2;
else
bar = 3;
imem->iomem = ioremap(nv_device_resource_start(device, bar),
nv_device_resource_len(device, bar));
imem->iomem = ioremap(device->func->resource_addr(device, bar),
device->func->resource_size(device, bar));
if (!imem->iomem) {
nvkm_error(&imem->base.subdev, "unable to map PRAMIN BAR\n");
return -EFAULT;

View file

@ -81,7 +81,7 @@ nv50_instobj_boot(struct nvkm_memory *memory, struct nvkm_vm *vm)
ret = nvkm_vm_get(vm, size, 12, NV_MEM_ACCESS_RW, &iobj->bar);
if (ret == 0) {
map = ioremap(nv_device_resource_start(device, 3) +
map = ioremap(device->func->resource_addr(device, 3) +
(u32)iobj->bar.offset, size);
if (map) {
nvkm_memory_map(memory, &iobj->bar, 0);