mirror of https://gitee.com/openkylin/linux.git
Merge branch 'drm-next-3.19-wip' of git://people.freedesktop.org/~agd5f/linux into drm-next
- More cursor and hotspot handling fixes - Fix some typos in the new smc fan control code and enable on CI - VM and CS cleanups * 'drm-next-3.19-wip' of git://people.freedesktop.org/~agd5f/linux: drm/radeon: enable smc fan control on CI drm/radeon: use pointers instead of indexes for CS chunks drm/radeon: remove duplicates check drm/ttm: optionally move duplicates to a separate list drm/radeon: check the right ring in radeon_evict_flags() drm/radeon: fix copy paste typos in fan control for si/ci drm/radeon: Hide cursor on CRTCs used by fbdev (v2) drm/radeon: add spinlock for BO_VA status protection (v2) drm/radeon: fence PT updates as shared drm/radeon: rename radeon_cs_reloc to radeon_bo_list drm/radeon: drop the handle from radeon_cs_reloc drm/radeon drop gobj from radeon_cs_reloc drm/radeon: fix typo in new fan control registers for SI/CI drm/radeon: sync all BOs involved in a CS drm/radeon: Move hotspot handling out of radeon_set_cursor drm/radeon: Re-show the cursor after a modeset
This commit is contained in:
commit
d58e0d9034
|
@ -264,7 +264,8 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
|
|||
if (list_is_singular(&release->bos))
|
||||
return 0;
|
||||
|
||||
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr);
|
||||
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
|
||||
!no_intr, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -2039,6 +2039,7 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
|
|||
atombios_crtc_set_base(crtc, x, y, old_fb);
|
||||
atombios_overscan_setup(crtc, mode, adjusted_mode);
|
||||
atombios_scaler_setup(crtc);
|
||||
radeon_cursor_reset(crtc);
|
||||
/* update the hw version fpr dpm */
|
||||
radeon_crtc->hw_mode = *adjusted_mode;
|
||||
|
||||
|
|
|
@ -937,7 +937,7 @@ static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
|
|||
tmp |= TMIN(0);
|
||||
WREG32_SMC(CG_FDO_CTRL2, tmp);
|
||||
|
||||
tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
|
||||
tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
|
||||
tmp |= FDO_PWM_MODE(mode);
|
||||
WREG32_SMC(CG_FDO_CTRL2, tmp);
|
||||
}
|
||||
|
@ -1162,7 +1162,7 @@ static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
|
|||
tmp |= TARGET_PERIOD(tach_period);
|
||||
WREG32_SMC(CG_TACH_CTRL, tmp);
|
||||
|
||||
ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
|
||||
ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1178,7 +1178,7 @@ static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
|
|||
tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
|
||||
WREG32_SMC(CG_FDO_CTRL2, tmp);
|
||||
|
||||
tmp = RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK;
|
||||
tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
|
||||
tmp |= TMIN(pi->t_min);
|
||||
WREG32_SMC(CG_FDO_CTRL2, tmp);
|
||||
pi->fan_ctrl_is_in_default_mode = true;
|
||||
|
@ -5849,7 +5849,6 @@ int ci_dpm_init(struct radeon_device *rdev)
|
|||
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
|
||||
|
||||
pi->fan_ctrl_is_in_default_mode = true;
|
||||
rdev->pm.dpm.fan.ucode_fan_control = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -213,18 +213,18 @@
|
|||
|
||||
#define CG_FDO_CTRL0 0xC0300064
|
||||
#define FDO_STATIC_DUTY(x) ((x) << 0)
|
||||
#define FDO_STATIC_DUTY_MASK 0x0000000F
|
||||
#define FDO_STATIC_DUTY_MASK 0x000000FF
|
||||
#define FDO_STATIC_DUTY_SHIFT 0
|
||||
#define CG_FDO_CTRL1 0xC0300068
|
||||
#define FMAX_DUTY100(x) ((x) << 0)
|
||||
#define FMAX_DUTY100_MASK 0x0000000F
|
||||
#define FMAX_DUTY100_MASK 0x000000FF
|
||||
#define FMAX_DUTY100_SHIFT 0
|
||||
#define CG_FDO_CTRL2 0xC030006C
|
||||
#define TMIN(x) ((x) << 0)
|
||||
#define TMIN_MASK 0x0000000F
|
||||
#define TMIN_MASK 0x000000FF
|
||||
#define TMIN_SHIFT 0
|
||||
#define FDO_PWM_MODE(x) ((x) << 11)
|
||||
#define FDO_PWM_MODE_MASK (3 << 11)
|
||||
#define FDO_PWM_MODE_MASK (7 << 11)
|
||||
#define FDO_PWM_MODE_SHIFT 11
|
||||
#define TACH_PWM_RESP_RATE(x) ((x) << 25)
|
||||
#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#define MIN(a,b) (((a)<(b))?(a):(b))
|
||||
|
||||
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_reloc **cs_reloc);
|
||||
struct radeon_bo_list **cs_reloc);
|
||||
struct evergreen_cs_track {
|
||||
u32 group_size;
|
||||
u32 nbanks;
|
||||
|
@ -1094,7 +1094,7 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
|
|||
static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
{
|
||||
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
|
||||
struct radeon_cs_reloc *reloc;
|
||||
struct radeon_bo_list *reloc;
|
||||
u32 last_reg;
|
||||
u32 m, i, tmp, *ib;
|
||||
int r;
|
||||
|
@ -1792,7 +1792,7 @@ static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
|||
static int evergreen_packet3_check(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt)
|
||||
{
|
||||
struct radeon_cs_reloc *reloc;
|
||||
struct radeon_bo_list *reloc;
|
||||
struct evergreen_cs_track *track;
|
||||
volatile u32 *ib;
|
||||
unsigned idx;
|
||||
|
@ -2661,7 +2661,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
|
|||
p->track = NULL;
|
||||
return r;
|
||||
}
|
||||
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
|
||||
} while (p->idx < p->chunk_ib->length_dw);
|
||||
#if 0
|
||||
for (r = 0; r < p->ib.length_dw; r++) {
|
||||
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
|
||||
|
@ -2684,8 +2684,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
|
|||
**/
|
||||
int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
{
|
||||
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
|
||||
struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
|
||||
struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
|
||||
struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
|
||||
u32 header, cmd, count, sub_cmd;
|
||||
volatile u32 *ib = p->ib.ptr;
|
||||
u32 idx;
|
||||
|
@ -3100,7 +3100,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
|
|||
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
|
||||
} while (p->idx < p->chunk_ib->length_dw);
|
||||
#if 0
|
||||
for (r = 0; r < p->ib->length_dw; r++) {
|
||||
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
|
||||
|
|
|
@ -1254,7 +1254,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
|
|||
int r;
|
||||
u32 tile_flags = 0;
|
||||
u32 tmp;
|
||||
struct radeon_cs_reloc *reloc;
|
||||
struct radeon_bo_list *reloc;
|
||||
u32 value;
|
||||
|
||||
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
|
||||
|
@ -1293,7 +1293,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
|
|||
int idx)
|
||||
{
|
||||
unsigned c, i;
|
||||
struct radeon_cs_reloc *reloc;
|
||||
struct radeon_bo_list *reloc;
|
||||
struct r100_cs_track *track;
|
||||
int r = 0;
|
||||
volatile uint32_t *ib;
|
||||
|
@ -1542,7 +1542,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
|
|||
struct radeon_cs_packet *pkt,
|
||||
unsigned idx, unsigned reg)
|
||||
{
|
||||
struct radeon_cs_reloc *reloc;
|
||||
struct radeon_bo_list *reloc;
|
||||
struct r100_cs_track *track;
|
||||
volatile uint32_t *ib;
|
||||
uint32_t tmp;
|
||||
|
@ -1901,7 +1901,7 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
|
|||
static int r100_packet3_check(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt)
|
||||
{
|
||||
struct radeon_cs_reloc *reloc;
|
||||
struct radeon_bo_list *reloc;
|
||||
struct r100_cs_track *track;
|
||||
unsigned idx;
|
||||
volatile uint32_t *ib;
|
||||
|
@ -2061,7 +2061,7 @@ int r100_cs_parse(struct radeon_cs_parser *p)
|
|||
}
|
||||
if (r)
|
||||
return r;
|
||||
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
|
||||
} while (p->idx < p->chunk_ib->length_dw);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
|
|||
struct radeon_cs_packet *pkt,
|
||||
unsigned idx, unsigned reg)
|
||||
{
|
||||
struct radeon_cs_reloc *reloc;
|
||||
struct radeon_bo_list *reloc;
|
||||
struct r100_cs_track *track;
|
||||
volatile uint32_t *ib;
|
||||
uint32_t tmp;
|
||||
|
|
|
@ -598,7 +598,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
|||
struct radeon_cs_packet *pkt,
|
||||
unsigned idx, unsigned reg)
|
||||
{
|
||||
struct radeon_cs_reloc *reloc;
|
||||
struct radeon_bo_list *reloc;
|
||||
struct r100_cs_track *track;
|
||||
volatile uint32_t *ib;
|
||||
uint32_t tmp, tile_flags = 0;
|
||||
|
@ -1142,7 +1142,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
|||
static int r300_packet3_check(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt)
|
||||
{
|
||||
struct radeon_cs_reloc *reloc;
|
||||
struct radeon_bo_list *reloc;
|
||||
struct r100_cs_track *track;
|
||||
volatile uint32_t *ib;
|
||||
unsigned idx;
|
||||
|
@ -1283,7 +1283,7 @@ int r300_cs_parse(struct radeon_cs_parser *p)
|
|||
if (r) {
|
||||
return r;
|
||||
}
|
||||
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
|
||||
} while (p->idx < p->chunk_ib->length_dw);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -969,7 +969,7 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
|
|||
static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
||||
{
|
||||
struct r600_cs_track *track = (struct r600_cs_track *)p->track;
|
||||
struct radeon_cs_reloc *reloc;
|
||||
struct radeon_bo_list *reloc;
|
||||
u32 m, i, tmp, *ib;
|
||||
int r;
|
||||
|
||||
|
@ -1626,7 +1626,7 @@ static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
|
|||
static int r600_packet3_check(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt)
|
||||
{
|
||||
struct radeon_cs_reloc *reloc;
|
||||
struct radeon_bo_list *reloc;
|
||||
struct r600_cs_track *track;
|
||||
volatile u32 *ib;
|
||||
unsigned idx;
|
||||
|
@ -2316,7 +2316,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
|
|||
p->track = NULL;
|
||||
return r;
|
||||
}
|
||||
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
|
||||
} while (p->idx < p->chunk_ib->length_dw);
|
||||
#if 0
|
||||
for (r = 0; r < p->ib.length_dw; r++) {
|
||||
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
|
||||
|
@ -2351,10 +2351,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
|
|||
|
||||
static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
|
||||
{
|
||||
if (p->chunk_relocs_idx == -1) {
|
||||
if (p->chunk_relocs == NULL) {
|
||||
return 0;
|
||||
}
|
||||
p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
|
||||
p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
|
||||
if (p->relocs == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -2398,7 +2398,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
|
|||
/* Copy the packet into the IB, the parser will read from the
|
||||
* input memory (cached) and write to the IB (which can be
|
||||
* uncached). */
|
||||
ib_chunk = &parser.chunks[parser.chunk_ib_idx];
|
||||
ib_chunk = parser.chunk_ib;
|
||||
parser.ib.length_dw = ib_chunk->length_dw;
|
||||
*l = parser.ib.length_dw;
|
||||
if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
|
||||
|
@ -2435,24 +2435,24 @@ void r600_cs_legacy_init(void)
|
|||
* GPU offset using the provided start.
|
||||
**/
|
||||
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_reloc **cs_reloc)
|
||||
struct radeon_bo_list **cs_reloc)
|
||||
{
|
||||
struct radeon_cs_chunk *relocs_chunk;
|
||||
unsigned idx;
|
||||
|
||||
*cs_reloc = NULL;
|
||||
if (p->chunk_relocs_idx == -1) {
|
||||
if (p->chunk_relocs == NULL) {
|
||||
DRM_ERROR("No relocation chunk !\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
|
||||
relocs_chunk = p->chunk_relocs;
|
||||
idx = p->dma_reloc_idx;
|
||||
if (idx >= p->nrelocs) {
|
||||
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
|
||||
idx, p->nrelocs);
|
||||
return -EINVAL;
|
||||
}
|
||||
*cs_reloc = p->relocs_ptr[idx];
|
||||
*cs_reloc = &p->relocs[idx];
|
||||
p->dma_reloc_idx++;
|
||||
return 0;
|
||||
}
|
||||
|
@ -2472,8 +2472,8 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
|
|||
**/
|
||||
int r600_dma_cs_parse(struct radeon_cs_parser *p)
|
||||
{
|
||||
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
|
||||
struct radeon_cs_reloc *src_reloc, *dst_reloc;
|
||||
struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
|
||||
struct radeon_bo_list *src_reloc, *dst_reloc;
|
||||
u32 header, cmd, count, tiled;
|
||||
volatile u32 *ib = p->ib.ptr;
|
||||
u32 idx, idx_value;
|
||||
|
@ -2619,7 +2619,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
|
|||
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
|
||||
} while (p->idx < p->chunk_ib->length_dw);
|
||||
#if 0
|
||||
for (r = 0; r < p->ib->length_dw; r++) {
|
||||
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
|
||||
|
|
|
@ -450,6 +450,15 @@ struct radeon_mman {
|
|||
#endif
|
||||
};
|
||||
|
||||
struct radeon_bo_list {
|
||||
struct radeon_bo *robj;
|
||||
struct ttm_validate_buffer tv;
|
||||
uint64_t gpu_offset;
|
||||
unsigned prefered_domains;
|
||||
unsigned allowed_domains;
|
||||
uint32_t tiling_flags;
|
||||
};
|
||||
|
||||
/* bo virtual address in a specific vm */
|
||||
struct radeon_bo_va {
|
||||
/* protected by bo being reserved */
|
||||
|
@ -920,6 +929,9 @@ struct radeon_vm {
|
|||
|
||||
struct rb_root va;
|
||||
|
||||
/* protecting invalidated and freed */
|
||||
spinlock_t status_lock;
|
||||
|
||||
/* BOs moved, but not yet updated in the PT */
|
||||
struct list_head invalidated;
|
||||
|
||||
|
@ -1044,19 +1056,7 @@ void cayman_dma_fini(struct radeon_device *rdev);
|
|||
/*
|
||||
* CS.
|
||||
*/
|
||||
struct radeon_cs_reloc {
|
||||
struct drm_gem_object *gobj;
|
||||
struct radeon_bo *robj;
|
||||
struct ttm_validate_buffer tv;
|
||||
uint64_t gpu_offset;
|
||||
unsigned prefered_domains;
|
||||
unsigned allowed_domains;
|
||||
uint32_t tiling_flags;
|
||||
uint32_t handle;
|
||||
};
|
||||
|
||||
struct radeon_cs_chunk {
|
||||
uint32_t chunk_id;
|
||||
uint32_t length_dw;
|
||||
uint32_t *kdata;
|
||||
void __user *user_ptr;
|
||||
|
@ -1074,16 +1074,15 @@ struct radeon_cs_parser {
|
|||
unsigned idx;
|
||||
/* relocations */
|
||||
unsigned nrelocs;
|
||||
struct radeon_cs_reloc *relocs;
|
||||
struct radeon_cs_reloc **relocs_ptr;
|
||||
struct radeon_cs_reloc *vm_bos;
|
||||
struct radeon_bo_list *relocs;
|
||||
struct radeon_bo_list *vm_bos;
|
||||
struct list_head validated;
|
||||
unsigned dma_reloc_idx;
|
||||
/* indices of various chunks */
|
||||
int chunk_ib_idx;
|
||||
int chunk_relocs_idx;
|
||||
int chunk_flags_idx;
|
||||
int chunk_const_ib_idx;
|
||||
struct radeon_cs_chunk *chunk_ib;
|
||||
struct radeon_cs_chunk *chunk_relocs;
|
||||
struct radeon_cs_chunk *chunk_flags;
|
||||
struct radeon_cs_chunk *chunk_const_ib;
|
||||
struct radeon_ib ib;
|
||||
struct radeon_ib const_ib;
|
||||
void *track;
|
||||
|
@ -1097,7 +1096,7 @@ struct radeon_cs_parser {
|
|||
|
||||
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
|
||||
{
|
||||
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
|
||||
struct radeon_cs_chunk *ibc = p->chunk_ib;
|
||||
|
||||
if (ibc->kdata)
|
||||
return ibc->kdata[idx];
|
||||
|
@ -2975,7 +2974,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
|
|||
void radeon_vm_manager_fini(struct radeon_device *rdev);
|
||||
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
|
||||
struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
|
||||
struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm,
|
||||
struct list_head *head);
|
||||
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
|
||||
|
@ -3089,7 +3088,7 @@ bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
|
|||
void radeon_cs_dump_packet(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_packet *pkt);
|
||||
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_reloc **cs_reloc,
|
||||
struct radeon_bo_list **cs_reloc,
|
||||
int nomm);
|
||||
int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
|
||||
uint32_t *vline_start_end,
|
||||
|
|
|
@ -77,22 +77,18 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
|||
struct drm_device *ddev = p->rdev->ddev;
|
||||
struct radeon_cs_chunk *chunk;
|
||||
struct radeon_cs_buckets buckets;
|
||||
unsigned i, j;
|
||||
bool duplicate, need_mmap_lock = false;
|
||||
unsigned i;
|
||||
bool need_mmap_lock = false;
|
||||
int r;
|
||||
|
||||
if (p->chunk_relocs_idx == -1) {
|
||||
if (p->chunk_relocs == NULL) {
|
||||
return 0;
|
||||
}
|
||||
chunk = &p->chunks[p->chunk_relocs_idx];
|
||||
chunk = p->chunk_relocs;
|
||||
p->dma_reloc_idx = 0;
|
||||
/* FIXME: we assume that each relocs use 4 dwords */
|
||||
p->nrelocs = chunk->length_dw / 4;
|
||||
p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
|
||||
if (p->relocs_ptr == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
|
||||
p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
|
||||
if (p->relocs == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -101,31 +97,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
|||
|
||||
for (i = 0; i < p->nrelocs; i++) {
|
||||
struct drm_radeon_cs_reloc *r;
|
||||
struct drm_gem_object *gobj;
|
||||
unsigned priority;
|
||||
|
||||
duplicate = false;
|
||||
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
|
||||
for (j = 0; j < i; j++) {
|
||||
if (r->handle == p->relocs[j].handle) {
|
||||
p->relocs_ptr[i] = &p->relocs[j];
|
||||
duplicate = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (duplicate) {
|
||||
p->relocs[i].handle = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
|
||||
r->handle);
|
||||
if (p->relocs[i].gobj == NULL) {
|
||||
gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
|
||||
if (gobj == NULL) {
|
||||
DRM_ERROR("gem object lookup failed 0x%x\n",
|
||||
r->handle);
|
||||
return -ENOENT;
|
||||
}
|
||||
p->relocs_ptr[i] = &p->relocs[i];
|
||||
p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
|
||||
p->relocs[i].robj = gem_to_radeon_bo(gobj);
|
||||
|
||||
/* The userspace buffer priorities are from 0 to 15. A higher
|
||||
* number means the buffer is more important.
|
||||
|
@ -184,7 +166,6 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
|
|||
|
||||
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
|
||||
p->relocs[i].tv.shared = !r->write_domain;
|
||||
p->relocs[i].handle = r->handle;
|
||||
|
||||
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
|
||||
priority);
|
||||
|
@ -251,22 +232,20 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
|
|||
|
||||
static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
|
||||
{
|
||||
int i, r = 0;
|
||||
struct radeon_bo_list *reloc;
|
||||
int r;
|
||||
|
||||
for (i = 0; i < p->nrelocs; i++) {
|
||||
list_for_each_entry(reloc, &p->validated, tv.head) {
|
||||
struct reservation_object *resv;
|
||||
|
||||
if (!p->relocs[i].robj)
|
||||
continue;
|
||||
|
||||
resv = p->relocs[i].robj->tbo.resv;
|
||||
resv = reloc->robj->tbo.resv;
|
||||
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
|
||||
p->relocs[i].tv.shared);
|
||||
reloc->tv.shared);
|
||||
|
||||
if (r)
|
||||
break;
|
||||
return r;
|
||||
}
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
|
||||
|
@ -286,10 +265,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|||
p->idx = 0;
|
||||
p->ib.sa_bo = NULL;
|
||||
p->const_ib.sa_bo = NULL;
|
||||
p->chunk_ib_idx = -1;
|
||||
p->chunk_relocs_idx = -1;
|
||||
p->chunk_flags_idx = -1;
|
||||
p->chunk_const_ib_idx = -1;
|
||||
p->chunk_ib = NULL;
|
||||
p->chunk_relocs = NULL;
|
||||
p->chunk_flags = NULL;
|
||||
p->chunk_const_ib = NULL;
|
||||
p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
|
||||
if (p->chunks_array == NULL) {
|
||||
return -ENOMEM;
|
||||
|
@ -316,24 +295,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|||
return -EFAULT;
|
||||
}
|
||||
p->chunks[i].length_dw = user_chunk.length_dw;
|
||||
p->chunks[i].chunk_id = user_chunk.chunk_id;
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
|
||||
p->chunk_relocs_idx = i;
|
||||
if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
|
||||
p->chunk_relocs = &p->chunks[i];
|
||||
}
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
|
||||
p->chunk_ib_idx = i;
|
||||
if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
|
||||
p->chunk_ib = &p->chunks[i];
|
||||
/* zero length IB isn't useful */
|
||||
if (p->chunks[i].length_dw == 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
|
||||
p->chunk_const_ib_idx = i;
|
||||
if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
|
||||
p->chunk_const_ib = &p->chunks[i];
|
||||
/* zero length CONST IB isn't useful */
|
||||
if (p->chunks[i].length_dw == 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
|
||||
p->chunk_flags_idx = i;
|
||||
if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
|
||||
p->chunk_flags = &p->chunks[i];
|
||||
/* zero length flags aren't useful */
|
||||
if (p->chunks[i].length_dw == 0)
|
||||
return -EINVAL;
|
||||
|
@ -342,10 +320,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|||
size = p->chunks[i].length_dw;
|
||||
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
|
||||
p->chunks[i].user_ptr = cdata;
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
|
||||
if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
|
||||
continue;
|
||||
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
|
||||
if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
|
||||
if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
|
||||
continue;
|
||||
}
|
||||
|
@ -358,7 +336,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|||
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
|
||||
return -EFAULT;
|
||||
}
|
||||
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
|
||||
if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
|
||||
p->cs_flags = p->chunks[i].kdata[0];
|
||||
if (p->chunks[i].length_dw > 1)
|
||||
ring = p->chunks[i].kdata[1];
|
||||
|
@ -399,8 +377,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
|
|||
static int cmp_size_smaller_first(void *priv, struct list_head *a,
|
||||
struct list_head *b)
|
||||
{
|
||||
struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
|
||||
struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
|
||||
struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
|
||||
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
|
||||
|
||||
/* Sort A before B if A is smaller. */
|
||||
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
|
||||
|
@ -441,13 +419,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
|
|||
|
||||
if (parser->relocs != NULL) {
|
||||
for (i = 0; i < parser->nrelocs; i++) {
|
||||
if (parser->relocs[i].gobj)
|
||||
drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
|
||||
struct radeon_bo *bo = parser->relocs[i].robj;
|
||||
if (bo == NULL)
|
||||
continue;
|
||||
|
||||
drm_gem_object_unreference_unlocked(&bo->gem_base);
|
||||
}
|
||||
}
|
||||
kfree(parser->track);
|
||||
kfree(parser->relocs);
|
||||
kfree(parser->relocs_ptr);
|
||||
drm_free_large(parser->vm_bos);
|
||||
for (i = 0; i < parser->nchunks; i++)
|
||||
drm_free_large(parser->chunks[i].kdata);
|
||||
|
@ -462,7 +442,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
|
|||
{
|
||||
int r;
|
||||
|
||||
if (parser->chunk_ib_idx == -1)
|
||||
if (parser->chunk_ib == NULL)
|
||||
return 0;
|
||||
|
||||
if (parser->cs_flags & RADEON_CS_USE_VM)
|
||||
|
@ -505,9 +485,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
|
|||
if (r)
|
||||
return r;
|
||||
|
||||
radeon_sync_resv(p->rdev, &p->ib.sync, vm->page_directory->tbo.resv,
|
||||
true);
|
||||
|
||||
r = radeon_vm_clear_freed(rdev, vm);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -525,10 +502,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
|
|||
for (i = 0; i < p->nrelocs; i++) {
|
||||
struct radeon_bo *bo;
|
||||
|
||||
/* ignore duplicates */
|
||||
if (p->relocs_ptr[i] != &p->relocs[i])
|
||||
continue;
|
||||
|
||||
bo = p->relocs[i].robj;
|
||||
bo_va = radeon_vm_bo_find(vm, bo);
|
||||
if (bo_va == NULL) {
|
||||
|
@ -553,7 +526,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
|
|||
struct radeon_vm *vm = &fpriv->vm;
|
||||
int r;
|
||||
|
||||
if (parser->chunk_ib_idx == -1)
|
||||
if (parser->chunk_ib == NULL)
|
||||
return 0;
|
||||
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
|
||||
return 0;
|
||||
|
@ -587,7 +560,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
|
|||
}
|
||||
|
||||
if ((rdev->family >= CHIP_TAHITI) &&
|
||||
(parser->chunk_const_ib_idx != -1)) {
|
||||
(parser->chunk_const_ib != NULL)) {
|
||||
r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
|
||||
} else {
|
||||
r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
|
||||
|
@ -614,7 +587,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
|
|||
struct radeon_vm *vm = NULL;
|
||||
int r;
|
||||
|
||||
if (parser->chunk_ib_idx == -1)
|
||||
if (parser->chunk_ib == NULL)
|
||||
return 0;
|
||||
|
||||
if (parser->cs_flags & RADEON_CS_USE_VM) {
|
||||
|
@ -622,8 +595,8 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
|
|||
vm = &fpriv->vm;
|
||||
|
||||
if ((rdev->family >= CHIP_TAHITI) &&
|
||||
(parser->chunk_const_ib_idx != -1)) {
|
||||
ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
|
||||
(parser->chunk_const_ib != NULL)) {
|
||||
ib_chunk = parser->chunk_const_ib;
|
||||
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
|
||||
DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
|
@ -642,13 +615,13 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
ib_chunk = &parser->chunks[parser->chunk_ib_idx];
|
||||
ib_chunk = parser->chunk_ib;
|
||||
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
|
||||
DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
ib_chunk = &parser->chunks[parser->chunk_ib_idx];
|
||||
ib_chunk = parser->chunk_ib;
|
||||
|
||||
r = radeon_ib_get(rdev, parser->ring, &parser->ib,
|
||||
vm, ib_chunk->length_dw * 4);
|
||||
|
@ -740,7 +713,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
|
|||
struct radeon_cs_packet *pkt,
|
||||
unsigned idx)
|
||||
{
|
||||
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
|
||||
struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
|
||||
struct radeon_device *rdev = p->rdev;
|
||||
uint32_t header;
|
||||
|
||||
|
@ -834,7 +807,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p,
|
|||
* GPU offset using the provided start.
|
||||
**/
|
||||
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
|
||||
struct radeon_cs_reloc **cs_reloc,
|
||||
struct radeon_bo_list **cs_reloc,
|
||||
int nomm)
|
||||
{
|
||||
struct radeon_cs_chunk *relocs_chunk;
|
||||
|
@ -842,12 +815,12 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
|
|||
unsigned idx;
|
||||
int r;
|
||||
|
||||
if (p->chunk_relocs_idx == -1) {
|
||||
if (p->chunk_relocs == NULL) {
|
||||
DRM_ERROR("No relocation chunk !\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
*cs_reloc = NULL;
|
||||
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
|
||||
relocs_chunk = p->chunk_relocs;
|
||||
r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -873,6 +846,6 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
|
|||
(u64)relocs_chunk->kdata[idx + 3] << 32;
|
||||
(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
|
||||
} else
|
||||
*cs_reloc = p->relocs_ptr[(idx / 4)];
|
||||
*cs_reloc = &p->relocs[(idx / 4)];
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -227,11 +227,24 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
|
||||
uint64_t gpu_addr, int hot_x, int hot_y)
|
||||
static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||
struct radeon_bo *robj = gem_to_radeon_bo(obj);
|
||||
uint64_t gpu_addr;
|
||||
int ret;
|
||||
|
||||
ret = radeon_bo_reserve(robj, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto fail;
|
||||
/* Only 27 bit offset for legacy cursor */
|
||||
ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
|
||||
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
|
||||
&gpu_addr);
|
||||
radeon_bo_unreserve(robj);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
|
||||
|
@ -253,18 +266,12 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
|
|||
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
|
||||
}
|
||||
|
||||
if (hot_x != radeon_crtc->cursor_hot_x ||
|
||||
hot_y != radeon_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
return 0;
|
||||
|
||||
x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
|
||||
y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
|
||||
fail:
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
radeon_cursor_move_locked(crtc, x, y);
|
||||
|
||||
radeon_crtc->cursor_hot_x = hot_x;
|
||||
radeon_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
@ -276,10 +283,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
int32_t hot_y)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct radeon_bo *robj;
|
||||
uint64_t gpu_addr;
|
||||
int ret;
|
||||
|
||||
if (!handle) {
|
||||
|
@ -301,41 +305,76 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
robj = gem_to_radeon_bo(obj);
|
||||
ret = radeon_bo_reserve(robj, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto fail;
|
||||
/* Only 27 bit offset for legacy cursor */
|
||||
ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
|
||||
ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
|
||||
&gpu_addr);
|
||||
radeon_bo_unreserve(robj);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
radeon_crtc->cursor_width = width;
|
||||
radeon_crtc->cursor_height = height;
|
||||
|
||||
radeon_lock_cursor(crtc, true);
|
||||
radeon_set_cursor(crtc, obj, gpu_addr, hot_x, hot_y);
|
||||
radeon_show_cursor(crtc);
|
||||
|
||||
if (hot_x != radeon_crtc->cursor_hot_x ||
|
||||
hot_y != radeon_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
|
||||
y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
|
||||
|
||||
radeon_cursor_move_locked(crtc, x, y);
|
||||
|
||||
radeon_crtc->cursor_hot_x = hot_x;
|
||||
radeon_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
ret = radeon_set_cursor(crtc, obj);
|
||||
|
||||
if (ret)
|
||||
DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
|
||||
ret);
|
||||
else
|
||||
radeon_show_cursor(crtc);
|
||||
|
||||
radeon_lock_cursor(crtc, false);
|
||||
|
||||
unpin:
|
||||
if (radeon_crtc->cursor_bo) {
|
||||
robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
|
||||
struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
|
||||
ret = radeon_bo_reserve(robj, false);
|
||||
if (likely(ret == 0)) {
|
||||
radeon_bo_unpin(robj);
|
||||
radeon_bo_unreserve(robj);
|
||||
}
|
||||
drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
|
||||
if (radeon_crtc->cursor_bo != obj)
|
||||
drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
|
||||
}
|
||||
|
||||
radeon_crtc->cursor_bo = obj;
|
||||
return 0;
|
||||
fail:
|
||||
drm_gem_object_unreference_unlocked(obj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* radeon_cursor_reset - Re-set the current cursor, if any.
|
||||
*
|
||||
* @crtc: drm crtc
|
||||
*
|
||||
* If the CRTC passed in currently has a cursor assigned, this function
|
||||
* makes sure it's visible.
|
||||
*/
|
||||
void radeon_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
int ret;
|
||||
|
||||
if (radeon_crtc->cursor_bo) {
|
||||
radeon_lock_cursor(crtc, true);
|
||||
|
||||
radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
|
||||
radeon_crtc->cursor_y);
|
||||
|
||||
ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
|
||||
if (ret)
|
||||
DRM_ERROR("radeon_set_cursor returned %d, not showing "
|
||||
"cursor\n", ret);
|
||||
else
|
||||
radeon_show_cursor(crtc);
|
||||
|
||||
radeon_lock_cursor(crtc, false);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,10 +48,40 @@ struct radeon_fbdev {
|
|||
struct radeon_device *rdev;
|
||||
};
|
||||
|
||||
/**
|
||||
* radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
|
||||
*
|
||||
* @info: fbdev info
|
||||
*
|
||||
* This function hides the cursor on all CRTCs used by fbdev.
|
||||
*/
|
||||
static int radeon_fb_helper_set_par(struct fb_info *info)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = drm_fb_helper_set_par(info);
|
||||
|
||||
/* XXX: with universal plane support fbdev will automatically disable
|
||||
* all non-primary planes (including the cursor)
|
||||
*/
|
||||
if (ret == 0) {
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < fb_helper->crtc_count; i++) {
|
||||
struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
|
||||
|
||||
radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct fb_ops radeonfb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = drm_fb_helper_check_var,
|
||||
.fb_set_par = drm_fb_helper_set_par,
|
||||
.fb_set_par = radeon_fb_helper_set_par,
|
||||
.fb_fillrect = cfb_fillrect,
|
||||
.fb_copyarea = cfb_copyarea,
|
||||
.fb_imageblit = cfb_imageblit,
|
||||
|
|
|
@ -548,7 +548,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
|
|||
struct radeon_bo_va *bo_va)
|
||||
{
|
||||
struct ttm_validate_buffer tv, *entry;
|
||||
struct radeon_cs_reloc *vm_bos;
|
||||
struct radeon_bo_list *vm_bos;
|
||||
struct ww_acquire_ctx ticket;
|
||||
struct list_head list;
|
||||
unsigned domain;
|
||||
|
@ -564,7 +564,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
|
|||
if (!vm_bos)
|
||||
return;
|
||||
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true);
|
||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
|
|
|
@ -1054,6 +1054,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
|
|||
DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
|
||||
}
|
||||
}
|
||||
radeon_cursor_reset(crtc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -818,6 +818,7 @@ extern int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
int32_t hot_y);
|
||||
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
|
||||
int x, int y);
|
||||
extern void radeon_cursor_reset(struct drm_crtc *crtc);
|
||||
|
||||
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
|
||||
unsigned int flags,
|
||||
|
|
|
@ -502,19 +502,20 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
|
|||
struct ww_acquire_ctx *ticket,
|
||||
struct list_head *head, int ring)
|
||||
{
|
||||
struct radeon_cs_reloc *lobj;
|
||||
struct radeon_bo *bo;
|
||||
struct radeon_bo_list *lobj;
|
||||
struct list_head duplicates;
|
||||
int r;
|
||||
u64 bytes_moved = 0, initial_bytes_moved;
|
||||
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
|
||||
|
||||
r = ttm_eu_reserve_buffers(ticket, head, true);
|
||||
INIT_LIST_HEAD(&duplicates);
|
||||
r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
|
||||
if (unlikely(r != 0)) {
|
||||
return r;
|
||||
}
|
||||
|
||||
list_for_each_entry(lobj, head, tv.head) {
|
||||
bo = lobj->robj;
|
||||
struct radeon_bo *bo = lobj->robj;
|
||||
if (!bo->pin_count) {
|
||||
u32 domain = lobj->prefered_domains;
|
||||
u32 allowed = lobj->allowed_domains;
|
||||
|
@ -562,6 +563,12 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
|
|||
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
|
||||
lobj->tiling_flags = bo->tiling_flags;
|
||||
}
|
||||
|
||||
list_for_each_entry(lobj, &duplicates, tv.head) {
|
||||
lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
|
||||
lobj->tiling_flags = lobj->robj->tiling_flags;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ TRACE_EVENT(radeon_cs,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->ring = p->ring;
|
||||
__entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
|
||||
__entry->dw = p->chunk_ib->length_dw;
|
||||
__entry->fences = radeon_fence_count_emitted(
|
||||
p->rdev, p->ring);
|
||||
),
|
||||
|
|
|
@ -196,7 +196,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
|
|||
rbo = container_of(bo, struct radeon_bo, tbo);
|
||||
switch (bo->mem.mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
|
||||
if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
|
||||
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
|
||||
else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
|
||||
bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
|
||||
|
|
|
@ -488,12 +488,12 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
|
|||
unsigned buf_sizes[], bool *has_msg_cmd)
|
||||
{
|
||||
struct radeon_cs_chunk *relocs_chunk;
|
||||
struct radeon_cs_reloc *reloc;
|
||||
struct radeon_bo_list *reloc;
|
||||
unsigned idx, cmd, offset;
|
||||
uint64_t start, end;
|
||||
int r;
|
||||
|
||||
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
|
||||
relocs_chunk = p->chunk_relocs;
|
||||
offset = radeon_get_ib_value(p, data0);
|
||||
idx = radeon_get_ib_value(p, data1);
|
||||
if (idx >= relocs_chunk->length_dw) {
|
||||
|
@ -502,7 +502,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
reloc = p->relocs_ptr[(idx / 4)];
|
||||
reloc = &p->relocs[(idx / 4)];
|
||||
start = reloc->gpu_offset;
|
||||
end = start + radeon_bo_size(reloc->robj);
|
||||
start += offset;
|
||||
|
@ -610,13 +610,13 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
|
|||
[0x00000003] = 2048,
|
||||
};
|
||||
|
||||
if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
|
||||
if (p->chunk_ib->length_dw % 16) {
|
||||
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
|
||||
p->chunks[p->chunk_ib_idx].length_dw);
|
||||
p->chunk_ib->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (p->chunk_relocs_idx == -1) {
|
||||
if (p->chunk_relocs == NULL) {
|
||||
DRM_ERROR("No relocation chunk !\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -640,7 +640,7 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
|
|||
DRM_ERROR("Unknown packet type %d !\n", pkt.type);
|
||||
return -EINVAL;
|
||||
}
|
||||
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
|
||||
} while (p->idx < p->chunk_ib->length_dw);
|
||||
|
||||
if (!has_msg_cmd) {
|
||||
DRM_ERROR("UVD-IBs need a msg command!\n");
|
||||
|
|
|
@ -453,11 +453,11 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
|
|||
unsigned size)
|
||||
{
|
||||
struct radeon_cs_chunk *relocs_chunk;
|
||||
struct radeon_cs_reloc *reloc;
|
||||
struct radeon_bo_list *reloc;
|
||||
uint64_t start, end, offset;
|
||||
unsigned idx;
|
||||
|
||||
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
|
||||
relocs_chunk = p->chunk_relocs;
|
||||
offset = radeon_get_ib_value(p, lo);
|
||||
idx = radeon_get_ib_value(p, hi);
|
||||
|
||||
|
@ -467,7 +467,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
reloc = p->relocs_ptr[(idx / 4)];
|
||||
reloc = &p->relocs[(idx / 4)];
|
||||
start = reloc->gpu_offset;
|
||||
end = start + radeon_bo_size(reloc->robj);
|
||||
start += offset;
|
||||
|
@ -534,7 +534,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
|
|||
uint32_t *size = &tmp;
|
||||
int i, r;
|
||||
|
||||
while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
|
||||
while (p->idx < p->chunk_ib->length_dw) {
|
||||
uint32_t len = radeon_get_ib_value(p, p->idx);
|
||||
uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
|
||||
|
||||
|
|
|
@ -125,41 +125,37 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
|
|||
* Add the page directory to the list of BOs to
|
||||
* validate for command submission (cayman+).
|
||||
*/
|
||||
struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
|
||||
struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct radeon_cs_reloc *list;
|
||||
struct radeon_bo_list *list;
|
||||
unsigned i, idx;
|
||||
|
||||
list = drm_malloc_ab(vm->max_pde_used + 2,
|
||||
sizeof(struct radeon_cs_reloc));
|
||||
sizeof(struct radeon_bo_list));
|
||||
if (!list)
|
||||
return NULL;
|
||||
|
||||
/* add the vm page table to the list */
|
||||
list[0].gobj = NULL;
|
||||
list[0].robj = vm->page_directory;
|
||||
list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[0].tv.bo = &vm->page_directory->tbo;
|
||||
list[0].tv.shared = true;
|
||||
list[0].tiling_flags = 0;
|
||||
list[0].handle = 0;
|
||||
list_add(&list[0].tv.head, head);
|
||||
|
||||
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
|
||||
if (!vm->page_tables[i].bo)
|
||||
continue;
|
||||
|
||||
list[idx].gobj = NULL;
|
||||
list[idx].robj = vm->page_tables[i].bo;
|
||||
list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
|
||||
list[idx].tv.bo = &list[idx].robj->tbo;
|
||||
list[idx].tv.shared = true;
|
||||
list[idx].tiling_flags = 0;
|
||||
list[idx].handle = 0;
|
||||
list_add(&list[idx++].tv.head, head);
|
||||
}
|
||||
|
||||
|
@ -491,7 +487,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
|
|||
tmp->vm = vm;
|
||||
tmp->addr = bo_va->addr;
|
||||
tmp->bo = radeon_bo_ref(bo_va->bo);
|
||||
spin_lock(&vm->status_lock);
|
||||
list_add(&tmp->vm_status, &vm->freed);
|
||||
spin_unlock(&vm->status_lock);
|
||||
}
|
||||
|
||||
interval_tree_remove(&bo_va->it, &vm->va);
|
||||
|
@ -802,11 +800,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
|
|||
*
|
||||
* Global and local mutex must be locked!
|
||||
*/
|
||||
static void radeon_vm_update_ptes(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t start, uint64_t end,
|
||||
uint64_t dst, uint32_t flags)
|
||||
static int radeon_vm_update_ptes(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm,
|
||||
struct radeon_ib *ib,
|
||||
uint64_t start, uint64_t end,
|
||||
uint64_t dst, uint32_t flags)
|
||||
{
|
||||
uint64_t mask = RADEON_VM_PTE_COUNT - 1;
|
||||
uint64_t last_pte = ~0, last_dst = ~0;
|
||||
|
@ -819,8 +817,12 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
|
|||
struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
|
||||
unsigned nptes;
|
||||
uint64_t pte;
|
||||
int r;
|
||||
|
||||
radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
|
||||
r = reservation_object_reserve_shared(pt->tbo.resv);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if ((addr & ~mask) == (end & ~mask))
|
||||
nptes = end - addr;
|
||||
|
@ -854,6 +856,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
|
|||
last_pte + 8 * count,
|
||||
last_dst, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -878,7 +882,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
|
|||
end >>= radeon_vm_block_size;
|
||||
|
||||
for (i = start; i <= end; ++i)
|
||||
radeon_bo_fence(vm->page_tables[i].bo, fence, false);
|
||||
radeon_bo_fence(vm->page_tables[i].bo, fence, true);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -911,7 +915,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
list_del_init(&bo_va->vm_status);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
|
||||
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
|
||||
|
@ -987,9 +993,13 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
|
|||
radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
|
||||
}
|
||||
|
||||
radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
|
||||
bo_va->it.last + 1, addr,
|
||||
radeon_vm_page_flags(bo_va->flags));
|
||||
r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
|
||||
bo_va->it.last + 1, addr,
|
||||
radeon_vm_page_flags(bo_va->flags));
|
||||
if (r) {
|
||||
radeon_ib_free(rdev, &ib);
|
||||
return r;
|
||||
}
|
||||
|
||||
radeon_asic_vm_pad_ib(rdev, &ib);
|
||||
WARN_ON(ib.length_dw > ndw);
|
||||
|
@ -1022,17 +1032,25 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
|
|||
int radeon_vm_clear_freed(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_bo_va *bo_va, *tmp;
|
||||
struct radeon_bo_va *bo_va;
|
||||
int r;
|
||||
|
||||
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
|
||||
spin_lock(&vm->status_lock);
|
||||
while (!list_empty(&vm->freed)) {
|
||||
bo_va = list_first_entry(&vm->freed,
|
||||
struct radeon_bo_va, vm_status);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
r = radeon_vm_bo_update(rdev, bo_va, NULL);
|
||||
radeon_bo_unref(&bo_va->bo);
|
||||
radeon_fence_unref(&bo_va->last_pt_update);
|
||||
kfree(bo_va);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
}
|
||||
spin_unlock(&vm->status_lock);
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
@ -1051,14 +1069,23 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
|
|||
int radeon_vm_clear_invalids(struct radeon_device *rdev,
|
||||
struct radeon_vm *vm)
|
||||
{
|
||||
struct radeon_bo_va *bo_va, *tmp;
|
||||
struct radeon_bo_va *bo_va;
|
||||
int r;
|
||||
|
||||
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
|
||||
spin_lock(&vm->status_lock);
|
||||
while (!list_empty(&vm->invalidated)) {
|
||||
bo_va = list_first_entry(&vm->invalidated,
|
||||
struct radeon_bo_va, vm_status);
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
r = radeon_vm_bo_update(rdev, bo_va, NULL);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
spin_lock(&vm->status_lock);
|
||||
}
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1081,6 +1108,7 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
|
|||
|
||||
mutex_lock(&vm->mutex);
|
||||
interval_tree_remove(&bo_va->it, &vm->va);
|
||||
spin_lock(&vm->status_lock);
|
||||
list_del(&bo_va->vm_status);
|
||||
|
||||
if (bo_va->addr) {
|
||||
|
@ -1090,6 +1118,7 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
|
|||
radeon_fence_unref(&bo_va->last_pt_update);
|
||||
kfree(bo_va);
|
||||
}
|
||||
spin_unlock(&vm->status_lock);
|
||||
|
||||
mutex_unlock(&vm->mutex);
|
||||
}
|
||||
|
@ -1110,10 +1139,10 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
|
|||
|
||||
list_for_each_entry(bo_va, &bo->va, bo_list) {
|
||||
if (bo_va->addr) {
|
||||
mutex_lock(&bo_va->vm->mutex);
|
||||
spin_lock(&bo_va->vm->status_lock);
|
||||
list_del(&bo_va->vm_status);
|
||||
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
|
||||
mutex_unlock(&bo_va->vm->mutex);
|
||||
spin_unlock(&bo_va->vm->status_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1141,6 +1170,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
|
|||
}
|
||||
mutex_init(&vm->mutex);
|
||||
vm->va = RB_ROOT;
|
||||
spin_lock_init(&vm->status_lock);
|
||||
INIT_LIST_HEAD(&vm->invalidated);
|
||||
INIT_LIST_HEAD(&vm->freed);
|
||||
|
||||
|
|
|
@ -5893,7 +5893,7 @@ static void si_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
|
|||
tmp |= TMIN(0);
|
||||
WREG32(CG_FDO_CTRL2, tmp);
|
||||
|
||||
tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
|
||||
tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
|
||||
tmp |= FDO_PWM_MODE(mode);
|
||||
WREG32(CG_FDO_CTRL2, tmp);
|
||||
}
|
||||
|
@ -6098,7 +6098,7 @@ static int si_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
|
|||
tmp |= TARGET_PERIOD(tach_period);
|
||||
WREG32(CG_TACH_CTRL, tmp);
|
||||
|
||||
si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
|
||||
si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -6114,7 +6114,7 @@ static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev)
|
|||
tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
|
||||
WREG32(CG_FDO_CTRL2, tmp);
|
||||
|
||||
tmp = RREG32(CG_FDO_CTRL2) & TMIN_MASK;
|
||||
tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
|
||||
tmp |= TMIN(si_pi->t_min);
|
||||
WREG32(CG_FDO_CTRL2, tmp);
|
||||
si_pi->fan_ctrl_is_in_default_mode = true;
|
||||
|
|
|
@ -208,18 +208,18 @@
|
|||
|
||||
#define CG_FDO_CTRL0 0x754
|
||||
#define FDO_STATIC_DUTY(x) ((x) << 0)
|
||||
#define FDO_STATIC_DUTY_MASK 0x0000000F
|
||||
#define FDO_STATIC_DUTY_MASK 0x000000FF
|
||||
#define FDO_STATIC_DUTY_SHIFT 0
|
||||
#define CG_FDO_CTRL1 0x758
|
||||
#define FMAX_DUTY100(x) ((x) << 0)
|
||||
#define FMAX_DUTY100_MASK 0x0000000F
|
||||
#define FMAX_DUTY100_MASK 0x000000FF
|
||||
#define FMAX_DUTY100_SHIFT 0
|
||||
#define CG_FDO_CTRL2 0x75C
|
||||
#define TMIN(x) ((x) << 0)
|
||||
#define TMIN_MASK 0x0000000F
|
||||
#define TMIN_MASK 0x000000FF
|
||||
#define TMIN_SHIFT 0
|
||||
#define FDO_PWM_MODE(x) ((x) << 11)
|
||||
#define FDO_PWM_MODE_MASK (3 << 11)
|
||||
#define FDO_PWM_MODE_MASK (7 << 11)
|
||||
#define FDO_PWM_MODE_SHIFT 11
|
||||
#define TACH_PWM_RESP_RATE(x) ((x) << 25)
|
||||
#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
|
||||
|
|
|
@ -93,7 +93,8 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
|
|||
*/
|
||||
|
||||
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list, bool intr)
|
||||
struct list_head *list, bool intr,
|
||||
struct list_head *dups)
|
||||
{
|
||||
struct ttm_bo_global *glob;
|
||||
struct ttm_validate_buffer *entry;
|
||||
|
@ -117,6 +118,13 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
|||
__ttm_bo_unreserve(bo);
|
||||
|
||||
ret = -EBUSY;
|
||||
|
||||
} else if (ret == -EALREADY && dups) {
|
||||
struct ttm_validate_buffer *safe = entry;
|
||||
entry = list_prev_entry(entry, head);
|
||||
list_del(&safe->head);
|
||||
list_add(&safe->head, dups);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
|
|
|
@ -2487,7 +2487,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
|
|||
if (unlikely(ret != 0))
|
||||
goto out_err_nores;
|
||||
|
||||
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true);
|
||||
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
|
||||
true, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
|
||||
|
@ -2677,7 +2678,8 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
|
|||
query_val.shared = false;
|
||||
list_add_tail(&query_val.head, &validate_list);
|
||||
|
||||
ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
|
||||
ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
|
||||
false, NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
vmw_execbuf_unpin_panic(dev_priv);
|
||||
goto out_no_reserve;
|
||||
|
|
|
@ -1222,7 +1222,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
|
|||
val_buf->bo = ttm_bo_reference(&res->backup->base);
|
||||
val_buf->shared = false;
|
||||
list_add_tail(&val_buf->head, &val_list);
|
||||
ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
|
||||
ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_reserve;
|
||||
|
||||
|
|
|
@ -68,6 +68,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
|||
* non-blocking reserves should be tried.
|
||||
* @list: thread private list of ttm_validate_buffer structs.
|
||||
* @intr: should the wait be interruptible
|
||||
* @dups: [out] optional list of duplicates.
|
||||
*
|
||||
* Tries to reserve bos pointed to by the list entries for validation.
|
||||
* If the function returns 0, all buffers are marked as "unfenced",
|
||||
|
@ -83,6 +84,11 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
|||
* calling process receives a signal while waiting. In that case, no
|
||||
* buffers on the list will be reserved upon return.
|
||||
*
|
||||
* If dups is non NULL all buffers already reserved by the current thread
|
||||
* (e.g. duplicates) are added to this list, otherwise -EALREADY is returned
|
||||
* on the first already reserved buffer and all buffers from the list are
|
||||
* unreserved again.
|
||||
*
|
||||
* Buffers reserved by this function should be unreserved by
|
||||
* a call to either ttm_eu_backoff_reservation() or
|
||||
* ttm_eu_fence_buffer_objects() when command submission is complete or
|
||||
|
@ -90,7 +96,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
|
|||
*/
|
||||
|
||||
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
|
||||
struct list_head *list, bool intr);
|
||||
struct list_head *list, bool intr,
|
||||
struct list_head *dups);
|
||||
|
||||
/**
|
||||
* function ttm_eu_fence_buffer_objects.
|
||||
|
|
Loading…
Reference in New Issue