mirror of https://gitee.com/openkylin/linux.git
Merge remote branch 'nouveau/drm-nouveau-next' of /ssd/git/drm-nouveau-next into drm-fixes
* 'nouveau/drm-nouveau-next' of /ssd/git/drm-nouveau-next: drm/nouveau: fix gpu page faults triggered by plymouthd drm/nouveau: greatly simplify mm, killing some bugs in the process drm/nvc0: enable protection of system-use-only structures in vm drm/nv40: initialise 0x17xx on all chipsets that have it drm/nv40: make detection of 0x4097-ful chipsets available everywhere
This commit is contained in:
commit
51fda92223
|
@ -160,6 +160,7 @@ enum nouveau_flags {
|
||||||
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
|
#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
|
||||||
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
|
#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
|
||||||
#define NVOBJ_FLAG_VM (1 << 3)
|
#define NVOBJ_FLAG_VM (1 << 3)
|
||||||
|
#define NVOBJ_FLAG_VM_USER (1 << 4)
|
||||||
|
|
||||||
#define NVOBJ_CINST_GLOBAL 0xdeadbeef
|
#define NVOBJ_CINST_GLOBAL 0xdeadbeef
|
||||||
|
|
||||||
|
@ -1576,6 +1577,20 @@ nv_match_device(struct drm_device *dev, unsigned device,
|
||||||
dev->pdev->subsystem_device == sub_device;
|
dev->pdev->subsystem_device == sub_device;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* returns 1 if device is one of the nv4x using the 0x4497 object class,
|
||||||
|
* helpful to determine a number of other hardware features
|
||||||
|
*/
|
||||||
|
static inline int
|
||||||
|
nv44_graph_class(struct drm_device *dev)
|
||||||
|
{
|
||||||
|
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
|
if ((dev_priv->chipset & 0xf0) == 0x60)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
return !(0x0baf & (1 << (dev_priv->chipset & 0x0f)));
|
||||||
|
}
|
||||||
|
|
||||||
/* memory type/access flags, do not match hardware values */
|
/* memory type/access flags, do not match hardware values */
|
||||||
#define NV_MEM_ACCESS_RO 1
|
#define NV_MEM_ACCESS_RO 1
|
||||||
#define NV_MEM_ACCESS_WO 2
|
#define NV_MEM_ACCESS_WO 2
|
||||||
|
|
|
@ -352,8 +352,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
|
||||||
FBINFO_HWACCEL_IMAGEBLIT;
|
FBINFO_HWACCEL_IMAGEBLIT;
|
||||||
info->flags |= FBINFO_CAN_FORCE_OUTPUT;
|
info->flags |= FBINFO_CAN_FORCE_OUTPUT;
|
||||||
info->fbops = &nouveau_fbcon_sw_ops;
|
info->fbops = &nouveau_fbcon_sw_ops;
|
||||||
info->fix.smem_start = dev->mode_config.fb_base +
|
info->fix.smem_start = nvbo->bo.mem.bus.base +
|
||||||
(nvbo->bo.mem.start << PAGE_SHIFT);
|
nvbo->bo.mem.bus.offset;
|
||||||
info->fix.smem_len = size;
|
info->fix.smem_len = size;
|
||||||
|
|
||||||
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
|
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
|
||||||
|
|
|
@ -742,30 +742,24 @@ nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
|
||||||
{
|
{
|
||||||
struct nouveau_mm *mm = man->priv;
|
struct nouveau_mm *mm = man->priv;
|
||||||
struct nouveau_mm_node *r;
|
struct nouveau_mm_node *r;
|
||||||
u64 total = 0, ttotal[3] = {}, tused[3] = {}, tfree[3] = {};
|
u32 total = 0, free = 0;
|
||||||
int i;
|
|
||||||
|
|
||||||
mutex_lock(&mm->mutex);
|
mutex_lock(&mm->mutex);
|
||||||
list_for_each_entry(r, &mm->nodes, nl_entry) {
|
list_for_each_entry(r, &mm->nodes, nl_entry) {
|
||||||
printk(KERN_DEBUG "%s %s-%d: 0x%010llx 0x%010llx\n",
|
printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
|
||||||
prefix, r->free ? "free" : "used", r->type,
|
prefix, r->type, ((u64)r->offset << 12),
|
||||||
((u64)r->offset << 12),
|
|
||||||
(((u64)r->offset + r->length) << 12));
|
(((u64)r->offset + r->length) << 12));
|
||||||
|
|
||||||
total += r->length;
|
total += r->length;
|
||||||
ttotal[r->type] += r->length;
|
if (!r->type)
|
||||||
if (r->free)
|
free += r->length;
|
||||||
tfree[r->type] += r->length;
|
|
||||||
else
|
|
||||||
tused[r->type] += r->length;
|
|
||||||
}
|
}
|
||||||
mutex_unlock(&mm->mutex);
|
mutex_unlock(&mm->mutex);
|
||||||
|
|
||||||
printk(KERN_DEBUG "%s total: 0x%010llx\n", prefix, total << 12);
|
printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
|
||||||
for (i = 0; i < 3; i++) {
|
prefix, (u64)total << 12, (u64)free << 12);
|
||||||
printk(KERN_DEBUG "%s type %d: 0x%010llx, "
|
printk(KERN_DEBUG "%s block: 0x%08x\n",
|
||||||
"used 0x%010llx, free 0x%010llx\n", prefix,
|
prefix, mm->block_size << 12);
|
||||||
i, ttotal[i] << 12, tused[i] << 12, tfree[i] << 12);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
|
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
|
||||||
|
|
|
@ -48,175 +48,76 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
|
||||||
|
|
||||||
b->offset = a->offset;
|
b->offset = a->offset;
|
||||||
b->length = size;
|
b->length = size;
|
||||||
b->free = a->free;
|
|
||||||
b->type = a->type;
|
b->type = a->type;
|
||||||
a->offset += size;
|
a->offset += size;
|
||||||
a->length -= size;
|
a->length -= size;
|
||||||
list_add_tail(&b->nl_entry, &a->nl_entry);
|
list_add_tail(&b->nl_entry, &a->nl_entry);
|
||||||
if (b->free)
|
if (b->type == 0)
|
||||||
list_add_tail(&b->fl_entry, &a->fl_entry);
|
list_add_tail(&b->fl_entry, &a->fl_entry);
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nouveau_mm_node *
|
#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
|
||||||
nouveau_mm_merge(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
|
list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
|
||||||
{
|
|
||||||
struct nouveau_mm_node *prev, *next;
|
|
||||||
|
|
||||||
/* try to merge with free adjacent entries of same type */
|
|
||||||
prev = list_entry(this->nl_entry.prev, struct nouveau_mm_node, nl_entry);
|
|
||||||
if (this->nl_entry.prev != &rmm->nodes) {
|
|
||||||
if (prev->free && prev->type == this->type) {
|
|
||||||
prev->length += this->length;
|
|
||||||
region_put(rmm, this);
|
|
||||||
this = prev;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
|
|
||||||
if (this->nl_entry.next != &rmm->nodes) {
|
|
||||||
if (next->free && next->type == this->type) {
|
|
||||||
next->offset = this->offset;
|
|
||||||
next->length += this->length;
|
|
||||||
region_put(rmm, this);
|
|
||||||
this = next;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
|
nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
|
||||||
{
|
{
|
||||||
u32 block_s, block_l;
|
struct nouveau_mm_node *prev = node(this, prev);
|
||||||
|
struct nouveau_mm_node *next = node(this, next);
|
||||||
|
|
||||||
this->free = true;
|
|
||||||
list_add(&this->fl_entry, &rmm->free);
|
list_add(&this->fl_entry, &rmm->free);
|
||||||
this = nouveau_mm_merge(rmm, this);
|
|
||||||
|
|
||||||
/* any entirely free blocks now? we'll want to remove typing
|
|
||||||
* on them now so they can be use for any memory allocation
|
|
||||||
*/
|
|
||||||
block_s = roundup(this->offset, rmm->block_size);
|
|
||||||
if (block_s + rmm->block_size > this->offset + this->length)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* split off any still-typed region at the start */
|
|
||||||
if (block_s != this->offset) {
|
|
||||||
if (!region_split(rmm, this, block_s - this->offset))
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* split off the soon-to-be-untyped block(s) */
|
|
||||||
block_l = rounddown(this->length, rmm->block_size);
|
|
||||||
if (block_l != this->length) {
|
|
||||||
this = region_split(rmm, this, block_l);
|
|
||||||
if (!this)
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* mark as having no type, and retry merge with any adjacent
|
|
||||||
* untyped blocks
|
|
||||||
*/
|
|
||||||
this->type = 0;
|
this->type = 0;
|
||||||
nouveau_mm_merge(rmm, this);
|
|
||||||
|
if (prev && prev->type == 0) {
|
||||||
|
prev->length += this->length;
|
||||||
|
region_put(rmm, this);
|
||||||
|
this = prev;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (next && next->type == 0) {
|
||||||
|
next->offset = this->offset;
|
||||||
|
next->length += this->length;
|
||||||
|
region_put(rmm, this);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
|
nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
|
||||||
u32 align, struct nouveau_mm_node **pnode)
|
u32 align, struct nouveau_mm_node **pnode)
|
||||||
{
|
{
|
||||||
struct nouveau_mm_node *this, *tmp, *next;
|
struct nouveau_mm_node *prev, *this, *next;
|
||||||
u32 splitoff, avail, alloc;
|
u32 min = size_nc ? size_nc : size;
|
||||||
|
u32 align_mask = align - 1;
|
||||||
|
u32 splitoff;
|
||||||
|
u32 s, e;
|
||||||
|
|
||||||
list_for_each_entry_safe(this, tmp, &rmm->free, fl_entry) {
|
list_for_each_entry(this, &rmm->free, fl_entry) {
|
||||||
next = list_entry(this->nl_entry.next, struct nouveau_mm_node, nl_entry);
|
e = this->offset + this->length;
|
||||||
if (this->nl_entry.next == &rmm->nodes)
|
s = this->offset;
|
||||||
next = NULL;
|
|
||||||
|
|
||||||
/* skip wrongly typed blocks */
|
prev = node(this, prev);
|
||||||
if (this->type && this->type != type)
|
if (prev && prev->type != type)
|
||||||
|
s = roundup(s, rmm->block_size);
|
||||||
|
|
||||||
|
next = node(this, next);
|
||||||
|
if (next && next->type != type)
|
||||||
|
e = rounddown(e, rmm->block_size);
|
||||||
|
|
||||||
|
s = (s + align_mask) & ~align_mask;
|
||||||
|
e &= ~align_mask;
|
||||||
|
if (s > e || e - s < min)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* account for alignment */
|
splitoff = s - this->offset;
|
||||||
splitoff = this->offset & (align - 1);
|
if (splitoff && !region_split(rmm, this, splitoff))
|
||||||
if (splitoff)
|
|
||||||
splitoff = align - splitoff;
|
|
||||||
|
|
||||||
if (this->length <= splitoff)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/* determine total memory available from this, and
|
|
||||||
* the next block (if appropriate)
|
|
||||||
*/
|
|
||||||
avail = this->length;
|
|
||||||
if (next && next->free && (!next->type || next->type == type))
|
|
||||||
avail += next->length;
|
|
||||||
|
|
||||||
avail -= splitoff;
|
|
||||||
|
|
||||||
/* determine allocation size */
|
|
||||||
if (size_nc) {
|
|
||||||
alloc = min(avail, size);
|
|
||||||
alloc = rounddown(alloc, size_nc);
|
|
||||||
if (alloc == 0)
|
|
||||||
continue;
|
|
||||||
} else {
|
|
||||||
alloc = size;
|
|
||||||
if (avail < alloc)
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* untyped block, split off a chunk that's a multiple
|
|
||||||
* of block_size and type it
|
|
||||||
*/
|
|
||||||
if (!this->type) {
|
|
||||||
u32 block = roundup(alloc + splitoff, rmm->block_size);
|
|
||||||
if (this->length < block)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
this = region_split(rmm, this, block);
|
|
||||||
if (!this)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
this->type = type;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* stealing memory from adjacent block */
|
|
||||||
if (alloc > this->length) {
|
|
||||||
u32 amount = alloc - (this->length - splitoff);
|
|
||||||
|
|
||||||
if (!next->type) {
|
|
||||||
amount = roundup(amount, rmm->block_size);
|
|
||||||
|
|
||||||
next = region_split(rmm, next, amount);
|
|
||||||
if (!next)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
next->type = type;
|
|
||||||
}
|
|
||||||
|
|
||||||
this->length += amount;
|
|
||||||
next->offset += amount;
|
|
||||||
next->length -= amount;
|
|
||||||
if (!next->length) {
|
|
||||||
list_del(&next->nl_entry);
|
|
||||||
list_del(&next->fl_entry);
|
|
||||||
kfree(next);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (splitoff) {
|
|
||||||
if (!region_split(rmm, this, splitoff))
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
this = region_split(rmm, this, alloc);
|
|
||||||
if (this == NULL)
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
this->free = false;
|
this = region_split(rmm, this, min(size, e - s));
|
||||||
|
if (!this)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
this->type = type;
|
||||||
list_del(&this->fl_entry);
|
list_del(&this->fl_entry);
|
||||||
*pnode = this;
|
*pnode = this;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -234,7 +135,6 @@ nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
|
||||||
heap = kzalloc(sizeof(*heap), GFP_KERNEL);
|
heap = kzalloc(sizeof(*heap), GFP_KERNEL);
|
||||||
if (!heap)
|
if (!heap)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
heap->free = true;
|
|
||||||
heap->offset = roundup(offset, block);
|
heap->offset = roundup(offset, block);
|
||||||
heap->length = rounddown(offset + length, block) - heap->offset;
|
heap->length = rounddown(offset + length, block) - heap->offset;
|
||||||
|
|
||||||
|
|
|
@ -30,9 +30,7 @@ struct nouveau_mm_node {
|
||||||
struct list_head fl_entry;
|
struct list_head fl_entry;
|
||||||
struct list_head rl_entry;
|
struct list_head rl_entry;
|
||||||
|
|
||||||
bool free;
|
u8 type;
|
||||||
int type;
|
|
||||||
|
|
||||||
u32 offset;
|
u32 offset;
|
||||||
u32 length;
|
u32 length;
|
||||||
};
|
};
|
||||||
|
|
|
@ -451,8 +451,7 @@ nv40_graph_register(struct drm_device *dev)
|
||||||
NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
|
NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
|
||||||
|
|
||||||
/* curie */
|
/* curie */
|
||||||
if (dev_priv->chipset >= 0x60 ||
|
if (nv44_graph_class(dev))
|
||||||
0x00005450 & (1 << (dev_priv->chipset & 0x0f)))
|
|
||||||
NVOBJ_CLASS(dev, 0x4497, GR);
|
NVOBJ_CLASS(dev, 0x4497, GR);
|
||||||
else
|
else
|
||||||
NVOBJ_CLASS(dev, 0x4097, GR);
|
NVOBJ_CLASS(dev, 0x4097, GR);
|
||||||
|
|
|
@ -117,17 +117,6 @@
|
||||||
* - get vs count from 0x1540
|
* - get vs count from 0x1540
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int
|
|
||||||
nv40_graph_4097(struct drm_device *dev)
|
|
||||||
{
|
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
|
||||||
|
|
||||||
if ((dev_priv->chipset & 0xf0) == 0x60)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return !!(0x0baf & (1 << dev_priv->chipset));
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
static int
|
||||||
nv40_graph_vs_count(struct drm_device *dev)
|
nv40_graph_vs_count(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
|
@ -219,7 +208,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
|
||||||
gr_def(ctx, 0x4009dc, 0x80000000);
|
gr_def(ctx, 0x4009dc, 0x80000000);
|
||||||
} else {
|
} else {
|
||||||
cp_ctx(ctx, 0x400840, 20);
|
cp_ctx(ctx, 0x400840, 20);
|
||||||
if (!nv40_graph_4097(ctx->dev)) {
|
if (nv44_graph_class(ctx->dev)) {
|
||||||
for (i = 0; i < 8; i++)
|
for (i = 0; i < 8; i++)
|
||||||
gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
|
gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
|
||||||
}
|
}
|
||||||
|
@ -228,7 +217,7 @@ nv40_graph_construct_general(struct nouveau_grctx *ctx)
|
||||||
gr_def(ctx, 0x400888, 0x00000040);
|
gr_def(ctx, 0x400888, 0x00000040);
|
||||||
cp_ctx(ctx, 0x400894, 11);
|
cp_ctx(ctx, 0x400894, 11);
|
||||||
gr_def(ctx, 0x400894, 0x00000040);
|
gr_def(ctx, 0x400894, 0x00000040);
|
||||||
if (nv40_graph_4097(ctx->dev)) {
|
if (!nv44_graph_class(ctx->dev)) {
|
||||||
for (i = 0; i < 8; i++)
|
for (i = 0; i < 8; i++)
|
||||||
gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
|
gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
|
||||||
}
|
}
|
||||||
|
@ -546,7 +535,7 @@ nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
|
||||||
static void
|
static void
|
||||||
nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
|
nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
|
||||||
{
|
{
|
||||||
int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084;
|
int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
|
||||||
|
|
||||||
cp_out (ctx, 0x300000);
|
cp_out (ctx, 0x300000);
|
||||||
cp_lsr (ctx, len - 4);
|
cp_lsr (ctx, len - 4);
|
||||||
|
@ -582,11 +571,11 @@ nv40_graph_construct_shader(struct nouveau_grctx *ctx)
|
||||||
} else {
|
} else {
|
||||||
b0_offset = 0x1d40/4; /* 2200 */
|
b0_offset = 0x1d40/4; /* 2200 */
|
||||||
b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
|
b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
|
||||||
vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4;
|
vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
|
||||||
}
|
}
|
||||||
|
|
||||||
cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
|
cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
|
||||||
cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029);
|
cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
|
||||||
|
|
||||||
offset = ctx->ctxvals_pos;
|
offset = ctx->ctxvals_pos;
|
||||||
ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
|
ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
|
||||||
|
|
|
@ -6,27 +6,17 @@
|
||||||
int
|
int
|
||||||
nv40_mc_init(struct drm_device *dev)
|
nv40_mc_init(struct drm_device *dev)
|
||||||
{
|
{
|
||||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
|
||||||
uint32_t tmp;
|
|
||||||
|
|
||||||
/* Power up everything, resetting each individual unit will
|
/* Power up everything, resetting each individual unit will
|
||||||
* be done later if needed.
|
* be done later if needed.
|
||||||
*/
|
*/
|
||||||
nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
|
nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
|
||||||
|
|
||||||
switch (dev_priv->chipset) {
|
if (nv44_graph_class(dev)) {
|
||||||
case 0x44:
|
u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
|
||||||
case 0x46: /* G72 */
|
|
||||||
case 0x4e:
|
|
||||||
case 0x4c: /* C51_G7X */
|
|
||||||
tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
|
|
||||||
nv_wr32(dev, NV40_PMC_1700, tmp);
|
nv_wr32(dev, NV40_PMC_1700, tmp);
|
||||||
nv_wr32(dev, NV40_PMC_1704, 0);
|
nv_wr32(dev, NV40_PMC_1704, 0);
|
||||||
nv_wr32(dev, NV40_PMC_1708, 0);
|
nv_wr32(dev, NV40_PMC_1708, 0);
|
||||||
nv_wr32(dev, NV40_PMC_170C, tmp);
|
nv_wr32(dev, NV40_PMC_170C, tmp);
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -332,8 +332,11 @@ nv50_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
|
||||||
gpuobj->vinst = node->vram->offset;
|
gpuobj->vinst = node->vram->offset;
|
||||||
|
|
||||||
if (gpuobj->flags & NVOBJ_FLAG_VM) {
|
if (gpuobj->flags & NVOBJ_FLAG_VM) {
|
||||||
ret = nouveau_vm_get(dev_priv->chan_vm, size, 12,
|
u32 flags = NV_MEM_ACCESS_RW;
|
||||||
NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS,
|
if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
|
||||||
|
flags |= NV_MEM_ACCESS_SYS;
|
||||||
|
|
||||||
|
ret = nouveau_vm_get(dev_priv->chan_vm, size, 12, flags,
|
||||||
&node->chan_vma);
|
&node->chan_vma);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
vram->put(dev, &node->vram);
|
vram->put(dev, &node->vram);
|
||||||
|
|
|
@ -105,7 +105,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096, NVOBJ_FLAG_VM,
|
ret = nouveau_gpuobj_new(dev, NULL, 384 * 1024, 4096,
|
||||||
|
NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
|
||||||
&grch->unk418810);
|
&grch->unk418810);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -48,8 +48,8 @@ nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
|
||||||
phys >>= 8;
|
phys >>= 8;
|
||||||
|
|
||||||
phys |= 0x00000001; /* present */
|
phys |= 0x00000001; /* present */
|
||||||
// if (vma->access & NV_MEM_ACCESS_SYS)
|
if (vma->access & NV_MEM_ACCESS_SYS)
|
||||||
// phys |= 0x00000002;
|
phys |= 0x00000002;
|
||||||
|
|
||||||
phys |= ((u64)target << 32);
|
phys |= ((u64)target << 32);
|
||||||
phys |= ((u64)memtype << 36);
|
phys |= ((u64)memtype << 36);
|
||||||
|
|
Loading…
Reference in New Issue