mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-29 23:53:32 +00:00
drm/nouveau/imem/nv04-nv40: directly use instmem for vbios/ramfc/ramro
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
parent
adb53d2773
commit
5b1ab0c2c7
8 changed files with 91 additions and 99 deletions
|
@ -13,10 +13,10 @@ struct nvkm_instmem {
|
|||
|
||||
const struct nvkm_instmem_func *func;
|
||||
|
||||
struct nvkm_gpuobj *vbios;
|
||||
struct nvkm_memory *vbios;
|
||||
struct nvkm_ramht *ramht;
|
||||
struct nvkm_gpuobj *ramro;
|
||||
struct nvkm_gpuobj *ramfc;
|
||||
struct nvkm_memory *ramro;
|
||||
struct nvkm_memory *ramfc;
|
||||
};
|
||||
|
||||
struct nvkm_instmem_func {
|
||||
|
|
|
@ -56,6 +56,7 @@ nv04_fifo_object_attach(struct nvkm_object *parent,
|
|||
{
|
||||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
u32 context, chid = chan->base.chid;
|
||||
int ret;
|
||||
|
||||
|
@ -83,7 +84,7 @@ nv04_fifo_object_attach(struct nvkm_object *parent,
|
|||
context |= chid << 24;
|
||||
|
||||
mutex_lock(&nv_subdev(fifo)->mutex);
|
||||
ret = nvkm_ramht_insert(fifo->ramht, chid, handle, context);
|
||||
ret = nvkm_ramht_insert(imem->ramht, chid, handle, context);
|
||||
mutex_unlock(&nv_subdev(fifo)->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -92,8 +93,9 @@ void
|
|||
nv04_fifo_object_detach(struct nvkm_object *parent, int cookie)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
mutex_lock(&nv_subdev(fifo)->mutex);
|
||||
nvkm_ramht_remove(fifo->ramht, cookie);
|
||||
nvkm_ramht_remove(imem->ramht, cookie);
|
||||
mutex_unlock(&nv_subdev(fifo)->mutex);
|
||||
}
|
||||
|
||||
|
@ -115,6 +117,7 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent,
|
|||
struct nv03_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nv04_fifo *fifo = (void *)engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct nv04_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
|
@ -142,18 +145,18 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent,
|
|||
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
|
||||
chan->ramfc = chan->base.chid * 32;
|
||||
|
||||
nvkm_kmap(fifo->ramfc);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x10,
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x10,
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
#ifdef __BIG_ENDIAN
|
||||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
|
||||
nvkm_done(fifo->ramfc);
|
||||
nvkm_done(imem->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -162,13 +165,14 @@ nv04_fifo_chan_dtor(struct nvkm_object *object)
|
|||
{
|
||||
struct nv04_fifo *fifo = (void *)object->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct ramfc_desc *c = fifo->ramfc_desc;
|
||||
|
||||
nvkm_kmap(fifo->ramfc);
|
||||
nvkm_kmap(imem->ramfc);
|
||||
do {
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + c->ctxp, 0x00000000);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + c->ctxp, 0x00000000);
|
||||
} while ((++c)->bits);
|
||||
nvkm_done(fifo->ramfc);
|
||||
nvkm_done(imem->ramfc);
|
||||
|
||||
nvkm_fifo_channel_destroy(&chan->base);
|
||||
}
|
||||
|
@ -198,8 +202,8 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
|
|||
{
|
||||
struct nv04_fifo *fifo = (void *)object->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)object;
|
||||
struct nvkm_gpuobj *fctx = fifo->ramfc;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
struct nvkm_memory *fctx = device->imem->ramfc;
|
||||
struct ramfc_desc *c;
|
||||
unsigned long flags;
|
||||
u32 data = chan->ramfc;
|
||||
|
@ -574,8 +578,6 @@ nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = (void *)parent;
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
struct nv04_fifo *fifo;
|
||||
int ret;
|
||||
|
||||
|
@ -584,10 +586,6 @@ nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_ramht_ref(imem->ramht, &fifo->ramht);
|
||||
nvkm_gpuobj_ref(imem->ramro, &fifo->ramro);
|
||||
nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc);
|
||||
|
||||
nv_subdev(fifo)->unit = 0x00000100;
|
||||
nv_subdev(fifo)->intr = nv04_fifo_intr;
|
||||
nv_engine(fifo)->cclass = &nv04_fifo_cclass;
|
||||
|
@ -602,9 +600,6 @@ void
|
|||
nv04_fifo_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv04_fifo *fifo = (void *)object;
|
||||
nvkm_gpuobj_ref(NULL, &fifo->ramfc);
|
||||
nvkm_gpuobj_ref(NULL, &fifo->ramro);
|
||||
nvkm_ramht_ref(NULL, &fifo->ramht);
|
||||
nvkm_fifo_destroy(&fifo->base);
|
||||
}
|
||||
|
||||
|
@ -613,6 +608,10 @@ nv04_fifo_init(struct nvkm_object *object)
|
|||
{
|
||||
struct nv04_fifo *fifo = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
struct nvkm_ramht *ramht = imem->ramht;
|
||||
struct nvkm_memory *ramro = imem->ramro;
|
||||
struct nvkm_memory *ramfc = imem->ramfc;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_init(&fifo->base);
|
||||
|
@ -623,10 +622,10 @@ nv04_fifo_init(struct nvkm_object *object)
|
|||
nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
|
||||
|
||||
nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
|
||||
((fifo->ramht->bits - 9) << 16) |
|
||||
(fifo->ramht->gpuobj.addr >> 8));
|
||||
nvkm_wr32(device, NV03_PFIFO_RAMRO, fifo->ramro->addr >> 8);
|
||||
nvkm_wr32(device, NV03_PFIFO_RAMFC, fifo->ramfc->addr >> 8);
|
||||
((ramht->bits - 9) << 16) |
|
||||
(ramht->gpuobj.addr >> 8));
|
||||
nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
|
||||
nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8);
|
||||
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max);
|
||||
|
||||
|
|
|
@ -142,9 +142,6 @@ struct ramfc_desc {
|
|||
struct nv04_fifo {
|
||||
struct nvkm_fifo base;
|
||||
struct ramfc_desc *ramfc_desc;
|
||||
struct nvkm_ramht *ramht;
|
||||
struct nvkm_gpuobj *ramro;
|
||||
struct nvkm_gpuobj *ramfc;
|
||||
};
|
||||
|
||||
struct nv04_fifo_base {
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
|
||||
#include <core/client.h>
|
||||
#include <core/engctx.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/instmem.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
@ -59,6 +58,7 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent,
|
|||
struct nv03_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nv04_fifo *fifo = (void *)engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct nv04_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
|
@ -86,18 +86,18 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent,
|
|||
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
|
||||
chan->ramfc = chan->base.chid * 32;
|
||||
|
||||
nvkm_kmap(fifo->ramfc);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x14,
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
#ifdef __BIG_ENDIAN
|
||||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
|
||||
nvkm_done(fifo->ramfc);
|
||||
nvkm_done(imem->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -145,8 +145,6 @@ nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = (void *)parent;
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
struct nv04_fifo *fifo;
|
||||
int ret;
|
||||
|
||||
|
@ -155,10 +153,6 @@ nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_ramht_ref(imem->ramht, &fifo->ramht);
|
||||
nvkm_gpuobj_ref(imem->ramro, &fifo->ramro);
|
||||
nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc);
|
||||
|
||||
nv_subdev(fifo)->unit = 0x00000100;
|
||||
nv_subdev(fifo)->intr = nv04_fifo_intr;
|
||||
nv_engine(fifo)->cclass = &nv10_fifo_cclass;
|
||||
|
|
|
@ -64,6 +64,7 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent,
|
|||
struct nv03_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nv04_fifo *fifo = (void *)engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct nv04_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
|
@ -93,18 +94,18 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent,
|
|||
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
|
||||
chan->ramfc = chan->base.chid * 64;
|
||||
|
||||
nvkm_kmap(fifo->ramfc);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x14,
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x14,
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
#ifdef __BIG_ENDIAN
|
||||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
|
||||
nvkm_done(fifo->ramfc);
|
||||
nvkm_done(imem->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -152,8 +153,6 @@ nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = (void *)parent;
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
struct nv04_fifo *fifo;
|
||||
int ret;
|
||||
|
||||
|
@ -162,10 +161,6 @@ nv17_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_ramht_ref(imem->ramht, &fifo->ramht);
|
||||
nvkm_gpuobj_ref(imem->ramro, &fifo->ramro);
|
||||
nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc);
|
||||
|
||||
nv_subdev(fifo)->unit = 0x00000100;
|
||||
nv_subdev(fifo)->intr = nv04_fifo_intr;
|
||||
nv_engine(fifo)->cclass = &nv17_fifo_cclass;
|
||||
|
@ -181,6 +176,10 @@ nv17_fifo_init(struct nvkm_object *object)
|
|||
{
|
||||
struct nv04_fifo *fifo = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
struct nvkm_ramht *ramht = imem->ramht;
|
||||
struct nvkm_memory *ramro = imem->ramro;
|
||||
struct nvkm_memory *ramfc = imem->ramfc;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_init(&fifo->base);
|
||||
|
@ -191,10 +190,11 @@ nv17_fifo_init(struct nvkm_object *object)
|
|||
nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
|
||||
|
||||
nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
|
||||
((fifo->ramht->bits - 9) << 16) |
|
||||
(fifo->ramht->gpuobj.addr >> 8));
|
||||
nvkm_wr32(device, NV03_PFIFO_RAMRO, fifo->ramro->addr >> 8);
|
||||
nvkm_wr32(device, NV03_PFIFO_RAMFC, fifo->ramfc->addr >> 8 | 0x00010000);
|
||||
((ramht->bits - 9) << 16) |
|
||||
(ramht->gpuobj.addr >> 8));
|
||||
nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
|
||||
nvkm_wr32(device, NV03_PFIFO_RAMFC, nvkm_memory_addr(ramfc) >> 8 |
|
||||
0x00010000);
|
||||
|
||||
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max);
|
||||
|
||||
|
|
|
@ -69,6 +69,7 @@ nv40_fifo_object_attach(struct nvkm_object *parent,
|
|||
{
|
||||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
u32 context, chid = chan->base.chid;
|
||||
int ret;
|
||||
|
||||
|
@ -95,7 +96,7 @@ nv40_fifo_object_attach(struct nvkm_object *parent,
|
|||
context |= chid << 23;
|
||||
|
||||
mutex_lock(&nv_subdev(fifo)->mutex);
|
||||
ret = nvkm_ramht_insert(fifo->ramht, chid, handle, context);
|
||||
ret = nvkm_ramht_insert(imem->ramht, chid, handle, context);
|
||||
mutex_unlock(&nv_subdev(fifo)->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
@ -106,6 +107,7 @@ nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
|
|||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
unsigned long flags;
|
||||
u32 reg, ctx;
|
||||
|
||||
|
@ -130,9 +132,9 @@ nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
|
|||
|
||||
if ((nvkm_rd32(device, 0x003204) & fifo->base.max) == chan->base.chid)
|
||||
nvkm_wr32(device, reg, nv_engctx(engctx)->addr);
|
||||
nvkm_kmap(fifo->ramfc);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
|
||||
nvkm_done(fifo->ramfc);
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
|
||||
nvkm_done(imem->ramfc);
|
||||
|
||||
nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
|
||||
spin_unlock_irqrestore(&fifo->base.lock, flags);
|
||||
|
@ -146,6 +148,7 @@ nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
|||
struct nv04_fifo *fifo = (void *)parent->engine;
|
||||
struct nv04_fifo_chan *chan = (void *)parent;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
unsigned long flags;
|
||||
u32 reg, ctx;
|
||||
|
||||
|
@ -169,9 +172,9 @@ nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
|
|||
|
||||
if ((nvkm_rd32(device, 0x003204) & fifo->base.max) == chan->base.chid)
|
||||
nvkm_wr32(device, reg, 0x00000000);
|
||||
nvkm_kmap(fifo->ramfc);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + ctx, 0x00000000);
|
||||
nvkm_done(fifo->ramfc);
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + ctx, 0x00000000);
|
||||
nvkm_done(imem->ramfc);
|
||||
|
||||
nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
|
||||
spin_unlock_irqrestore(&fifo->base.lock, flags);
|
||||
|
@ -187,6 +190,7 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
struct nv03_channel_dma_v0 v0;
|
||||
} *args = data;
|
||||
struct nv04_fifo *fifo = (void *)engine;
|
||||
struct nvkm_instmem *imem = fifo->base.engine.subdev.device->imem;
|
||||
struct nv04_fifo_chan *chan;
|
||||
int ret;
|
||||
|
||||
|
@ -216,19 +220,19 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
|
||||
chan->ramfc = chan->base.chid * 128;
|
||||
|
||||
nvkm_kmap(fifo->ramfc);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x18, 0x30000000 |
|
||||
nvkm_kmap(imem->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x00, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x04, args->v0.offset);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x18, 0x30000000 |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
|
||||
#ifdef __BIG_ENDIAN
|
||||
NV_PFIFO_CACHE1_BIG_ENDIAN |
|
||||
#endif
|
||||
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
|
||||
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
|
||||
nvkm_done(fifo->ramfc);
|
||||
nvkm_wo32(imem->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
|
||||
nvkm_done(imem->ramfc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -276,8 +280,6 @@ nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = (void *)parent;
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
struct nv04_fifo *fifo;
|
||||
int ret;
|
||||
|
||||
|
@ -286,10 +288,6 @@ nv40_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_ramht_ref(imem->ramht, &fifo->ramht);
|
||||
nvkm_gpuobj_ref(imem->ramro, &fifo->ramro);
|
||||
nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc);
|
||||
|
||||
nv_subdev(fifo)->unit = 0x00000100;
|
||||
nv_subdev(fifo)->intr = nv04_fifo_intr;
|
||||
nv_engine(fifo)->cclass = &nv40_fifo_cclass;
|
||||
|
@ -306,6 +304,10 @@ nv40_fifo_init(struct nvkm_object *object)
|
|||
struct nv04_fifo *fifo = (void *)object;
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
struct nvkm_fb *fb = device->fb;
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
struct nvkm_ramht *ramht = imem->ramht;
|
||||
struct nvkm_memory *ramro = imem->ramro;
|
||||
struct nvkm_memory *ramfc = imem->ramfc;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_fifo_init(&fifo->base);
|
||||
|
@ -317,9 +319,9 @@ nv40_fifo_init(struct nvkm_object *object)
|
|||
nvkm_wr32(device, 0x002058, 0x00000001);
|
||||
|
||||
nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
|
||||
((fifo->ramht->bits - 9) << 16) |
|
||||
(fifo->ramht->gpuobj.addr >> 8));
|
||||
nvkm_wr32(device, NV03_PFIFO_RAMRO, fifo->ramro->addr >> 8);
|
||||
((ramht->bits - 9) << 16) |
|
||||
(ramht->gpuobj.addr >> 8));
|
||||
nvkm_wr32(device, NV03_PFIFO_RAMRO, nvkm_memory_addr(ramro) >> 8);
|
||||
|
||||
switch (nv_device(fifo)->chipset) {
|
||||
case 0x47:
|
||||
|
@ -337,8 +339,8 @@ nv40_fifo_init(struct nvkm_object *object)
|
|||
default:
|
||||
nvkm_wr32(device, 0x002230, 0x00000000);
|
||||
nvkm_wr32(device, 0x002220, ((fb->ram->size - 512 * 1024 +
|
||||
fifo->ramfc->addr) >> 16) |
|
||||
0x00030000);
|
||||
nvkm_memory_addr(ramfc)) >> 16) |
|
||||
0x00030000);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -154,10 +154,10 @@ static void
|
|||
nv04_instmem_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv04_instmem *imem = (void *)object;
|
||||
nvkm_gpuobj_ref(NULL, &imem->base.ramfc);
|
||||
nvkm_gpuobj_ref(NULL, &imem->base.ramro);
|
||||
nvkm_memory_del(&imem->base.ramfc);
|
||||
nvkm_memory_del(&imem->base.ramro);
|
||||
nvkm_ramht_ref(NULL, &imem->base.ramht);
|
||||
nvkm_gpuobj_ref(NULL, &imem->base.vbios);
|
||||
nvkm_memory_del(&imem->base.vbios);
|
||||
nvkm_mm_fini(&imem->heap);
|
||||
nvkm_instmem_destroy(&imem->base);
|
||||
}
|
||||
|
@ -173,6 +173,7 @@ nv04_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = (void *)parent;
|
||||
struct nv04_instmem *imem;
|
||||
int ret;
|
||||
|
||||
|
@ -191,7 +192,7 @@ nv04_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
return ret;
|
||||
|
||||
/* 0x00000-0x10000: reserve for probable vbios image */
|
||||
ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x10000, 0, 0,
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
|
||||
&imem->base.vbios);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -203,14 +204,13 @@ nv04_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
return ret;
|
||||
|
||||
/* 0x18000-0x18800: reserve for RAMFC (enough for 32 nv30 channels) */
|
||||
ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x00800, 0,
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00800, 0, true,
|
||||
&imem->base.ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* 0x18800-0x18a00: reserve for RAMRO */
|
||||
ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x00200, 0, 0,
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x00200, 0, false,
|
||||
&imem->base.ramro);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
|
|
@ -155,10 +155,10 @@ static void
|
|||
nv40_instmem_dtor(struct nvkm_object *object)
|
||||
{
|
||||
struct nv40_instmem *imem = (void *)object;
|
||||
nvkm_gpuobj_ref(NULL, &imem->base.ramfc);
|
||||
nvkm_gpuobj_ref(NULL, &imem->base.ramro);
|
||||
nvkm_memory_del(&imem->base.ramfc);
|
||||
nvkm_memory_del(&imem->base.ramro);
|
||||
nvkm_ramht_ref(NULL, &imem->base.ramht);
|
||||
nvkm_gpuobj_ref(NULL, &imem->base.vbios);
|
||||
nvkm_memory_del(&imem->base.vbios);
|
||||
nvkm_mm_fini(&imem->heap);
|
||||
if (imem->iomem)
|
||||
iounmap(imem->iomem);
|
||||
|
@ -221,7 +221,7 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
return ret;
|
||||
|
||||
/* 0x00000-0x10000: reserve for probable vbios image */
|
||||
ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x10000, 0, 0,
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x10000, 0, false,
|
||||
&imem->base.vbios);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -235,7 +235,7 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
/* 0x18000-0x18200: reserve for RAMRO
|
||||
* 0x18200-0x20000: padding
|
||||
*/
|
||||
ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x08000, 0, 0,
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x08000, 0, false,
|
||||
&imem->base.ramro);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -243,8 +243,8 @@ nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
|
|||
/* 0x20000-0x21000: reserve for RAMFC
|
||||
* 0x21000-0x40000: padding and some unknown crap
|
||||
*/
|
||||
ret = nvkm_gpuobj_new(nv_object(imem), NULL, 0x20000, 0,
|
||||
NVOBJ_FLAG_ZERO_ALLOC, &imem->base.ramfc);
|
||||
ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x20000, 0, true,
|
||||
&imem->base.ramfc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
Loading…
Reference in a new issue