drm/nouveau/fifo: switch to gpuobj accessor macros

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
This commit is contained in:
Ben Skeggs 2015-08-20 14:54:14 +10:00
parent 3f532ef1e2
commit 5444e770e3
8 changed files with 218 additions and 156 deletions

View File

@ -65,14 +65,16 @@ g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
}
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
nv_wo32(base->eng, addr + 0x00, 0x00190000);
nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
upper_32_bits(start));
nv_wo32(base->eng, addr + 0x10, 0x00000000);
nv_wo32(base->eng, addr + 0x14, 0x00000000);
nvkm_kmap(base->eng);
nvkm_wo32(base->eng, addr + 0x00, 0x00190000);
nvkm_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
nvkm_wo32(base->eng, addr + 0x08, lower_32_bits(start));
nvkm_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
upper_32_bits(start));
nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
bar->flush(bar);
nvkm_done(base->eng);
return 0;
}
@ -119,13 +121,15 @@ g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
return -EBUSY;
}
nv_wo32(base->eng, addr + 0x00, 0x00000000);
nv_wo32(base->eng, addr + 0x04, 0x00000000);
nv_wo32(base->eng, addr + 0x08, 0x00000000);
nv_wo32(base->eng, addr + 0x0c, 0x00000000);
nv_wo32(base->eng, addr + 0x10, 0x00000000);
nv_wo32(base->eng, addr + 0x14, 0x00000000);
nvkm_kmap(base->eng);
nvkm_wo32(base->eng, addr + 0x00, 0x00000000);
nvkm_wo32(base->eng, addr + 0x04, 0x00000000);
nvkm_wo32(base->eng, addr + 0x08, 0x00000000);
nvkm_wo32(base->eng, addr + 0x0c, 0x00000000);
nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
bar->flush(bar);
nvkm_done(base->eng);
return 0;
}
@ -216,23 +220,25 @@ g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
nv_parent(chan)->object_attach = g84_fifo_object_attach;
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
nv_wo32(base->ramfc, 0x3c, 0x003f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
nv_wo32(base->ramfc, 0x4c, 0xffffffff);
nv_wo32(base->ramfc, 0x60, 0x7fffffff);
nv_wo32(base->ramfc, 0x78, 0x00000000);
nv_wo32(base->ramfc, 0x7c, 0x30000001);
nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj.node->offset >> 4));
nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
nvkm_kmap(base->ramfc);
nvkm_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
nvkm_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
nvkm_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
nvkm_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
nvkm_wo32(base->ramfc, 0x3c, 0x003f6078);
nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
nvkm_wo32(base->ramfc, 0x4c, 0xffffffff);
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
nvkm_wo32(base->ramfc, 0x78, 0x00000000);
nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj.node->offset >> 4));
nvkm_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
nvkm_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
bar->flush(bar);
nvkm_done(base->ramfc);
return 0;
}
@ -294,20 +300,22 @@ g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
ioffset = args->v0.ioffset;
ilength = order_base_2(args->v0.ilength / 8);
nv_wo32(base->ramfc, 0x3c, 0x403f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
nv_wo32(base->ramfc, 0x60, 0x7fffffff);
nv_wo32(base->ramfc, 0x78, 0x00000000);
nv_wo32(base->ramfc, 0x7c, 0x30000001);
nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj.node->offset >> 4));
nv_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
nv_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
nvkm_kmap(base->ramfc);
nvkm_wo32(base->ramfc, 0x3c, 0x403f6078);
nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
nvkm_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
nvkm_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
nvkm_wo32(base->ramfc, 0x78, 0x00000000);
nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj.node->offset >> 4));
nvkm_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
nvkm_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
bar->flush(bar);
nvkm_done(base->ramfc);
return 0;
}

View File

@ -86,15 +86,17 @@ gf100_fifo_runlist_update(struct gf100_fifo *fifo)
cur = fifo->runlist.mem[fifo->runlist.active];
fifo->runlist.active = !fifo->runlist.active;
nvkm_kmap(cur);
for (i = 0, p = 0; i < 128; i++) {
struct gf100_fifo_chan *chan = (void *)fifo->base.channel[i];
if (chan && chan->state == RUNNING) {
nv_wo32(cur, p + 0, i);
nv_wo32(cur, p + 4, 0x00000004);
nvkm_wo32(cur, p + 0, i);
nvkm_wo32(cur, p + 4, 0x00000004);
p += 8;
}
}
bar->flush(bar);
nvkm_done(cur);
nvkm_wr32(device, 0x002270, cur->addr >> 12);
nvkm_wr32(device, 0x002274, 0x01f00000 | (p >> 3));
@ -112,6 +114,7 @@ gf100_fifo_context_attach(struct nvkm_object *parent,
{
struct nvkm_bar *bar = nvkm_bar(parent);
struct gf100_fifo_base *base = (void *)parent->parent;
struct nvkm_gpuobj *engn = &base->base.gpuobj;
struct nvkm_engctx *ectx = (void *)object;
u32 addr;
int ret;
@ -137,9 +140,11 @@ gf100_fifo_context_attach(struct nvkm_object *parent,
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
}
nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
nvkm_kmap(engn);
nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
bar->flush(bar);
nvkm_done(engn);
return 0;
}
@ -150,6 +155,7 @@ gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
struct gf100_fifo *fifo = (void *)parent->engine;
struct gf100_fifo_base *base = (void *)parent->parent;
struct gf100_fifo_chan *chan = (void *)parent;
struct nvkm_gpuobj *engn = &base->base.gpuobj;
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bar *bar = device->bar;
@ -178,9 +184,11 @@ gf100_fifo_context_detach(struct nvkm_object *parent, bool suspend,
return -EBUSY;
}
nv_wo32(base, addr + 0x00, 0x00000000);
nv_wo32(base, addr + 0x04, 0x00000000);
nvkm_kmap(engn);
nvkm_wo32(engn, addr + 0x00, 0x00000000);
nvkm_wo32(engn, addr + 0x04, 0x00000000);
bar->flush(bar);
nvkm_done(engn);
return 0;
}
@ -196,6 +204,7 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct gf100_fifo *fifo = (void *)engine;
struct gf100_fifo_base *base = (void *)parent;
struct gf100_fifo_chan *chan;
struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
u64 usermem, ioffset, ilength;
int ret, i;
@ -231,26 +240,30 @@ gf100_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
ioffset = args->v0.ioffset;
ilength = order_base_2(args->v0.ilength / 8);
nvkm_kmap(fifo->user.mem);
for (i = 0; i < 0x1000; i += 4)
nv_wo32(fifo->user.mem, usermem + i, 0x00000000);
nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
nvkm_done(fifo->user.mem);
nv_wo32(base, 0x08, lower_32_bits(fifo->user.mem->addr + usermem));
nv_wo32(base, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem));
nv_wo32(base, 0x10, 0x0000face);
nv_wo32(base, 0x30, 0xfffff902);
nv_wo32(base, 0x48, lower_32_bits(ioffset));
nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
nv_wo32(base, 0x54, 0x00000002);
nv_wo32(base, 0x84, 0x20400000);
nv_wo32(base, 0x94, 0x30000001);
nv_wo32(base, 0x9c, 0x00000100);
nv_wo32(base, 0xa4, 0x1f1f1f1f);
nv_wo32(base, 0xa8, 0x1f1f1f1f);
nv_wo32(base, 0xac, 0x0000001f);
nv_wo32(base, 0xb8, 0xf8000000);
nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
nvkm_kmap(ramfc);
nvkm_wo32(ramfc, 0x08, lower_32_bits(fifo->user.mem->addr + usermem));
nvkm_wo32(ramfc, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem));
nvkm_wo32(ramfc, 0x10, 0x0000face);
nvkm_wo32(ramfc, 0x30, 0xfffff902);
nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
nvkm_wo32(ramfc, 0x54, 0x00000002);
nvkm_wo32(ramfc, 0x84, 0x20400000);
nvkm_wo32(ramfc, 0x94, 0x30000001);
nvkm_wo32(ramfc, 0x9c, 0x00000100);
nvkm_wo32(ramfc, 0xa4, 0x1f1f1f1f);
nvkm_wo32(ramfc, 0xa8, 0x1f1f1f1f);
nvkm_wo32(ramfc, 0xac, 0x0000001f);
nvkm_wo32(ramfc, 0xb8, 0xf8000000);
nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
bar->flush(bar);
nvkm_done(ramfc);
return 0;
}
@ -341,10 +354,12 @@ gf100_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
nv_wo32(base, 0x0208, 0xffffffff);
nv_wo32(base, 0x020c, 0x000000ff);
nvkm_kmap(&base->base.gpuobj);
nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
nvkm_done(&base->base.gpuobj);
ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
if (ret)

View File

@ -106,15 +106,17 @@ gk104_fifo_runlist_update(struct gk104_fifo *fifo, u32 engine)
cur = engn->runlist[engn->cur_runlist];
engn->cur_runlist = !engn->cur_runlist;
nvkm_kmap(cur);
for (i = 0, p = 0; i < fifo->base.max; i++) {
struct gk104_fifo_chan *chan = (void *)fifo->base.channel[i];
if (chan && chan->state == RUNNING && chan->engine == engine) {
nv_wo32(cur, p + 0, i);
nv_wo32(cur, p + 4, 0x00000000);
nvkm_wo32(cur, p + 0, i);
nvkm_wo32(cur, p + 4, 0x00000000);
p += 8;
}
}
bar->flush(bar);
nvkm_done(cur);
nvkm_wr32(device, 0x002270, cur->addr >> 12);
nvkm_wr32(device, 0x002274, (engine << 20) | (p >> 3));
@ -132,6 +134,7 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
{
struct nvkm_bar *bar = nvkm_bar(parent);
struct gk104_fifo_base *base = (void *)parent->parent;
struct nvkm_gpuobj *engn = &base->base.gpuobj;
struct nvkm_engctx *ectx = (void *)object;
u32 addr;
int ret;
@ -161,9 +164,11 @@ gk104_fifo_context_attach(struct nvkm_object *parent,
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
}
nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
nv_wo32(base, addr + 0x04, upper_32_bits(ectx->vma.offset));
nvkm_kmap(engn);
nvkm_wo32(engn, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
nvkm_wo32(engn, addr + 0x04, upper_32_bits(ectx->vma.offset));
bar->flush(bar);
nvkm_done(engn);
return 0;
}
@ -195,6 +200,7 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
struct nvkm_bar *bar = nvkm_bar(parent);
struct gk104_fifo_base *base = (void *)parent->parent;
struct gk104_fifo_chan *chan = (void *)parent;
struct nvkm_gpuobj *engn = &base->base.gpuobj;
u32 addr;
int ret;
@ -216,9 +222,11 @@ gk104_fifo_context_detach(struct nvkm_object *parent, bool suspend,
return ret;
if (addr) {
nv_wo32(base, addr + 0x00, 0x00000000);
nv_wo32(base, addr + 0x04, 0x00000000);
nvkm_kmap(engn);
nvkm_wo32(engn, addr + 0x00, 0x00000000);
nvkm_wo32(engn, addr + 0x04, 0x00000000);
bar->flush(bar);
nvkm_done(engn);
}
return 0;
@ -237,6 +245,7 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct gk104_fifo_base *base = (void *)parent;
struct gk104_fifo_chan *chan;
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
struct nvkm_gpuobj *ramfc = &base->base.gpuobj;
u64 usermem, ioffset, ilength;
int ret, i;
@ -282,24 +291,28 @@ gk104_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
ioffset = args->v0.ioffset;
ilength = order_base_2(args->v0.ilength / 8);
nvkm_kmap(fifo->user.mem);
for (i = 0; i < 0x200; i += 4)
nv_wo32(fifo->user.mem, usermem + i, 0x00000000);
nvkm_wo32(fifo->user.mem, usermem + i, 0x00000000);
nvkm_done(fifo->user.mem);
nv_wo32(base, 0x08, lower_32_bits(fifo->user.mem->addr + usermem));
nv_wo32(base, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem));
nv_wo32(base, 0x10, 0x0000face);
nv_wo32(base, 0x30, 0xfffff902);
nv_wo32(base, 0x48, lower_32_bits(ioffset));
nv_wo32(base, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
nv_wo32(base, 0x84, 0x20400000);
nv_wo32(base, 0x94, 0x30000001);
nv_wo32(base, 0x9c, 0x00000100);
nv_wo32(base, 0xac, 0x0000001f);
nv_wo32(base, 0xe8, chan->base.chid);
nv_wo32(base, 0xb8, 0xf8000000);
nv_wo32(base, 0xf8, 0x10003080); /* 0x002310 */
nv_wo32(base, 0xfc, 0x10000010); /* 0x002350 */
nvkm_kmap(ramfc);
nvkm_wo32(ramfc, 0x08, lower_32_bits(fifo->user.mem->addr + usermem));
nvkm_wo32(ramfc, 0x0c, upper_32_bits(fifo->user.mem->addr + usermem));
nvkm_wo32(ramfc, 0x10, 0x0000face);
nvkm_wo32(ramfc, 0x30, 0xfffff902);
nvkm_wo32(ramfc, 0x48, lower_32_bits(ioffset));
nvkm_wo32(ramfc, 0x4c, upper_32_bits(ioffset) | (ilength << 16));
nvkm_wo32(ramfc, 0x84, 0x20400000);
nvkm_wo32(ramfc, 0x94, 0x30000001);
nvkm_wo32(ramfc, 0x9c, 0x00000100);
nvkm_wo32(ramfc, 0xac, 0x0000001f);
nvkm_wo32(ramfc, 0xe8, chan->base.chid);
nvkm_wo32(ramfc, 0xb8, 0xf8000000);
nvkm_wo32(ramfc, 0xf8, 0x10003080); /* 0x002310 */
nvkm_wo32(ramfc, 0xfc, 0x10000010); /* 0x002350 */
bar->flush(bar);
nvkm_done(ramfc);
return 0;
}
@ -387,10 +400,12 @@ gk104_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
nv_wo32(base, 0x0200, lower_32_bits(base->pgd->addr));
nv_wo32(base, 0x0204, upper_32_bits(base->pgd->addr));
nv_wo32(base, 0x0208, 0xffffffff);
nv_wo32(base, 0x020c, 0x000000ff);
nvkm_kmap(&base->base.gpuobj);
nvkm_wo32(&base->base.gpuobj, 0x0200, lower_32_bits(base->pgd->addr));
nvkm_wo32(&base->base.gpuobj, 0x0204, upper_32_bits(base->pgd->addr));
nvkm_wo32(&base->base.gpuobj, 0x0208, 0xffffffff);
nvkm_wo32(&base->base.gpuobj, 0x020c, 0x000000ff);
nvkm_done(&base->base.gpuobj);
ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
if (ret)

View File

@ -142,16 +142,18 @@ nv04_fifo_chan_ctor(struct nvkm_object *parent,
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
chan->ramfc = chan->base.chid * 32;
nv_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
nv_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
nv_wo32(fifo->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
nv_wo32(fifo->ramfc, chan->ramfc + 0x10,
nvkm_kmap(fifo->ramfc);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x10,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_done(fifo->ramfc);
return 0;
}
@ -162,9 +164,11 @@ nv04_fifo_chan_dtor(struct nvkm_object *object)
struct nv04_fifo_chan *chan = (void *)object;
struct ramfc_desc *c = fifo->ramfc_desc;
nvkm_kmap(fifo->ramfc);
do {
nv_wo32(fifo->ramfc, chan->ramfc + c->ctxp, 0x00000000);
nvkm_wo32(fifo->ramfc, chan->ramfc + c->ctxp, 0x00000000);
} while ((++c)->bits);
nvkm_done(fifo->ramfc);
nvkm_fifo_channel_destroy(&chan->base);
}
@ -217,8 +221,8 @@ nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
u32 rm = ((1ULL << c->bits) - 1) << c->regs;
u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs;
u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm);
nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
} while ((++c)->bits);
c = fifo->ramfc_desc;

View File

@ -86,16 +86,18 @@ nv10_fifo_chan_ctor(struct nvkm_object *parent,
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
chan->ramfc = chan->base.chid * 32;
nv_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
nv_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
nv_wo32(fifo->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nv_wo32(fifo->ramfc, chan->ramfc + 0x14,
nvkm_kmap(fifo->ramfc);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_done(fifo->ramfc);
return 0;
}

View File

@ -93,16 +93,18 @@ nv17_fifo_chan_ctor(struct nvkm_object *parent,
nv_parent(chan)->context_attach = nv04_fifo_context_attach;
chan->ramfc = chan->base.chid * 64;
nv_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
nv_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
nv_wo32(fifo->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nv_wo32(fifo->ramfc, chan->ramfc + 0x14,
nvkm_kmap(fifo->ramfc);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x14,
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nvkm_done(fifo->ramfc);
return 0;
}

View File

@ -130,7 +130,9 @@ nv40_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *engctx)
if ((nvkm_rd32(device, 0x003204) & fifo->base.max) == chan->base.chid)
nvkm_wr32(device, reg, nv_engctx(engctx)->addr);
nv_wo32(fifo->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
nvkm_kmap(fifo->ramfc);
nvkm_wo32(fifo->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
nvkm_done(fifo->ramfc);
nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&fifo->base.lock, flags);
@ -167,7 +169,9 @@ nv40_fifo_context_detach(struct nvkm_object *parent, bool suspend,
if ((nvkm_rd32(device, 0x003204) & fifo->base.max) == chan->base.chid)
nvkm_wr32(device, reg, 0x00000000);
nv_wo32(fifo->ramfc, chan->ramfc + ctx, 0x00000000);
nvkm_kmap(fifo->ramfc);
nvkm_wo32(fifo->ramfc, chan->ramfc + ctx, 0x00000000);
nvkm_done(fifo->ramfc);
nvkm_mask(device, 0x002500, 0x00000001, 0x00000001);
spin_unlock_irqrestore(&fifo->base.lock, flags);
@ -212,17 +216,19 @@ nv40_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
nv_parent(chan)->object_detach = nv04_fifo_object_detach;
chan->ramfc = chan->base.chid * 128;
nv_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
nv_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
nv_wo32(fifo->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nv_wo32(fifo->ramfc, chan->ramfc + 0x18, 0x30000000 |
nvkm_kmap(fifo->ramfc);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x18, 0x30000000 |
NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
#ifdef __BIG_ENDIAN
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
nv_wo32(fifo->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
nvkm_wo32(fifo->ramfc, chan->ramfc + 0x3c, 0x0001ffff);
nvkm_done(fifo->ramfc);
return 0;
}

View File

@ -49,12 +49,13 @@ nv50_fifo_playlist_update_locked(struct nv50_fifo *fifo)
cur = fifo->playlist[fifo->cur_playlist];
fifo->cur_playlist = !fifo->cur_playlist;
nvkm_kmap(cur);
for (i = fifo->base.min, p = 0; i < fifo->base.max; i++) {
if (nvkm_rd32(device, 0x002600 + (i * 4)) & 0x80000000)
nv_wo32(cur, p++ * 4, i);
nvkm_wo32(cur, p++ * 4, i);
}
bar->flush(bar);
nvkm_done(cur);
nvkm_wr32(device, 0x0032f4, cur->addr >> 12);
nvkm_wr32(device, 0x0032ec, p);
@ -88,14 +89,17 @@ nv50_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
}
nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
nv_wo32(base->eng, addr + 0x00, 0x00190000);
nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
nv_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
upper_32_bits(start));
nv_wo32(base->eng, addr + 0x10, 0x00000000);
nv_wo32(base->eng, addr + 0x14, 0x00000000);
nvkm_kmap(base->eng);
nvkm_wo32(base->eng, addr + 0x00, 0x00190000);
nvkm_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
nvkm_wo32(base->eng, addr + 0x08, lower_32_bits(start));
nvkm_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
upper_32_bits(start));
nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
bar->flush(bar);
nvkm_done(base->eng);
return 0;
}
@ -148,13 +152,15 @@ nv50_fifo_context_detach(struct nvkm_object *parent, bool suspend,
nvkm_wr32(device, 0x00b860, me);
if (ret == 0) {
nv_wo32(base->eng, addr + 0x00, 0x00000000);
nv_wo32(base->eng, addr + 0x04, 0x00000000);
nv_wo32(base->eng, addr + 0x08, 0x00000000);
nv_wo32(base->eng, addr + 0x0c, 0x00000000);
nv_wo32(base->eng, addr + 0x10, 0x00000000);
nv_wo32(base->eng, addr + 0x14, 0x00000000);
nvkm_kmap(base->eng);
nvkm_wo32(base->eng, addr + 0x00, 0x00000000);
nvkm_wo32(base->eng, addr + 0x04, 0x00000000);
nvkm_wo32(base->eng, addr + 0x08, 0x00000000);
nvkm_wo32(base->eng, addr + 0x0c, 0x00000000);
nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
bar->flush(bar);
nvkm_done(base->eng);
}
return ret;
@ -234,21 +240,23 @@ nv50_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
if (ret)
return ret;
nv_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
nv_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
nv_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
nv_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
nv_wo32(base->ramfc, 0x3c, 0x003f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
nv_wo32(base->ramfc, 0x4c, 0xffffffff);
nv_wo32(base->ramfc, 0x60, 0x7fffffff);
nv_wo32(base->ramfc, 0x78, 0x00000000);
nv_wo32(base->ramfc, 0x7c, 0x30000001);
nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj.node->offset >> 4));
nvkm_kmap(base->ramfc);
nvkm_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
nvkm_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
nvkm_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
nvkm_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
nvkm_wo32(base->ramfc, 0x3c, 0x003f6078);
nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
nvkm_wo32(base->ramfc, 0x4c, 0xffffffff);
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
nvkm_wo32(base->ramfc, 0x78, 0x00000000);
nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj.node->offset >> 4));
bar->flush(bar);
nvkm_done(base->ramfc);
return 0;
}
@ -300,18 +308,20 @@ nv50_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
ioffset = args->v0.ioffset;
ilength = order_base_2(args->v0.ilength / 8);
nv_wo32(base->ramfc, 0x3c, 0x403f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
nv_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
nv_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
nv_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
nv_wo32(base->ramfc, 0x60, 0x7fffffff);
nv_wo32(base->ramfc, 0x78, 0x00000000);
nv_wo32(base->ramfc, 0x7c, 0x30000001);
nv_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj.node->offset >> 4));
nvkm_kmap(base->ramfc);
nvkm_wo32(base->ramfc, 0x3c, 0x403f6078);
nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
nvkm_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
nvkm_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
nvkm_wo32(base->ramfc, 0x78, 0x00000000);
nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
(4 << 24) /* SEARCH_FULL */ |
(chan->ramht->gpuobj.node->offset >> 4));
bar->flush(bar);
nvkm_done(base->ramfc);
return 0;
}