Merge branch 'drm-next-4.7' of git://people.freedesktop.org/~agd5f/linux into drm-next

More amdgpu fixes for 4.7.  Highlights:
- enable async pageflips
- UVD fixes for polaris
- lots of GPUVM fixes
- whitespace and code cleanups
- misc bug fixes

* 'drm-next-4.7' of git://people.freedesktop.org/~agd5f/linux: (32 commits)
  drm/amd/powerplay: rewrite pp_sw_init to make code readable
  drm/amdgpu/dce11: fix audio offset for asics with >7 audio pins
  drm/amdgpu: fix and cleanup user fence handling v2
  drm/amdgpu: move VM fields into job
  drm/amdgpu: move the context from the IBs into the job
  drm/amdgpu: move context switch handling into common code v2
  drm/amdgpu: move preamble IB handling into common code
  drm/amdgpu/gfx7: fix pipeline sync
  amdgpu/uvd: separate context buffer from DPB
  drm/amdgpu: use fence_context to judge ctx switch v2
  drm/amd/amdgpu:  Added more named DRM info messages for debugging
  drm/amd/amdgpu: Add name field to amd_ip_funcs (v2)
  drm/amdgpu: Support DRM_MODE_PAGE_FLIP_ASYNC (v2)
  drm/amdgpu/dce11: don't share PLLs on Polaris
  drm/amdgpu: Drop unused parameter for *get_sleep_divider_id_from_clock
  drm/amdgpu: Simplify calculation in *get_sleep_divider_id_from_clock
  drm/amdgpu: Use max macro in *get_sleep_divider_id_from_clock
  drm/amd/powerplay: Use defined constants for minium engine clock
  drm/amdgpu: add missing licenses on a couple of files
  drm/amdgpu: fetch cu_info once at init
  ...
This commit is contained in:
Dave Airlie 2016-05-12 11:16:55 +10:00
commit 7d02067261
62 changed files with 514 additions and 396 deletions

View File

@ -283,7 +283,8 @@ struct amdgpu_ring_funcs {
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
/* command emit functions */ /* command emit functions */
void (*emit_ib)(struct amdgpu_ring *ring, void (*emit_ib)(struct amdgpu_ring *ring,
struct amdgpu_ib *ib); struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags); uint64_t seq, unsigned flags);
void (*emit_pipeline_sync)(struct amdgpu_ring *ring); void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
@ -367,13 +368,6 @@ struct amdgpu_fence_driver {
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0) #define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
#define AMDGPU_FENCE_FLAG_INT (1 << 1) #define AMDGPU_FENCE_FLAG_INT (1 << 1)
struct amdgpu_user_fence {
/* write-back bo */
struct amdgpu_bo *bo;
/* write-back address offset to bo start */
uint32_t offset;
};
int amdgpu_fence_driver_init(struct amdgpu_device *adev); int amdgpu_fence_driver_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_fini(struct amdgpu_device *adev); void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
@ -507,9 +501,10 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv); struct drm_file *file_priv);
unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, struct drm_gem_object *
struct dma_buf_attachment *attach, amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sg); struct dma_buf_attachment *attach,
struct sg_table *sg);
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj, struct drm_gem_object *gobj,
int flags); int flags);
@ -726,6 +721,7 @@ struct amdgpu_flip_work {
unsigned shared_count; unsigned shared_count;
struct fence **shared; struct fence **shared;
struct fence_cb cb; struct fence_cb cb;
bool async;
}; };
@ -738,17 +734,7 @@ struct amdgpu_ib {
uint32_t length_dw; uint32_t length_dw;
uint64_t gpu_addr; uint64_t gpu_addr;
uint32_t *ptr; uint32_t *ptr;
struct amdgpu_user_fence *user;
struct amdgpu_vm *vm;
unsigned vm_id;
uint64_t vm_pd_addr;
struct amdgpu_ctx *ctx;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
uint32_t flags; uint32_t flags;
/* resulting sequence number */
uint64_t sequence;
}; };
enum amdgpu_ring_type { enum amdgpu_ring_type {
@ -762,7 +748,7 @@ enum amdgpu_ring_type {
extern const struct amd_sched_backend_ops amdgpu_sched_ops; extern const struct amd_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job); struct amdgpu_job **job, struct amdgpu_vm *vm);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
struct amdgpu_job **job); struct amdgpu_job **job);
@ -777,7 +763,7 @@ struct amdgpu_ring {
struct amdgpu_device *adev; struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs; const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv; struct amdgpu_fence_driver fence_drv;
struct amd_gpu_scheduler sched; struct amd_gpu_scheduler sched;
spinlock_t fence_lock; spinlock_t fence_lock;
struct amdgpu_bo *ring_obj; struct amdgpu_bo *ring_obj;
@ -805,7 +791,7 @@ struct amdgpu_ring {
unsigned wptr_offs; unsigned wptr_offs;
unsigned next_rptr_offs; unsigned next_rptr_offs;
unsigned fence_offs; unsigned fence_offs;
struct amdgpu_ctx *current_ctx; uint64_t current_ctx;
enum amdgpu_ring_type type; enum amdgpu_ring_type type;
char name[16]; char name[16];
unsigned cond_exe_offs; unsigned cond_exe_offs;
@ -885,6 +871,9 @@ struct amdgpu_vm {
/* Scheduler entity for page table updates */ /* Scheduler entity for page table updates */
struct amd_sched_entity entity; struct amd_sched_entity entity;
/* client id */
u64 client_id;
}; };
struct amdgpu_vm_id { struct amdgpu_vm_id {
@ -893,7 +882,7 @@ struct amdgpu_vm_id {
struct amdgpu_sync active; struct amdgpu_sync active;
struct fence *last_flush; struct fence *last_flush;
struct amdgpu_ring *last_user; struct amdgpu_ring *last_user;
atomic_long_t owner; atomic64_t owner;
uint64_t pd_gpu_addr; uint64_t pd_gpu_addr;
/* last flushed PD/PT update */ /* last flushed PD/PT update */
@ -924,6 +913,8 @@ struct amdgpu_vm_manager {
struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS]; struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
unsigned vm_pte_num_rings; unsigned vm_pte_num_rings;
atomic_t vm_pte_next_ring; atomic_t vm_pte_next_ring;
/* client id counter */
atomic64_t client_counter;
}; };
void amdgpu_vm_manager_init(struct amdgpu_device *adev); void amdgpu_vm_manager_init(struct amdgpu_device *adev);
@ -1143,6 +1134,12 @@ struct amdgpu_gca_config {
uint32_t macrotile_mode_array[16]; uint32_t macrotile_mode_array[16];
}; };
struct amdgpu_cu_info {
uint32_t number; /* total active CU number */
uint32_t ao_cu_mask;
uint32_t bitmap[4][4];
};
struct amdgpu_gfx { struct amdgpu_gfx {
struct mutex gpu_clock_mutex; struct mutex gpu_clock_mutex;
struct amdgpu_gca_config config; struct amdgpu_gca_config config;
@ -1175,17 +1172,19 @@ struct amdgpu_gfx {
struct amdgpu_irq_src priv_reg_irq; struct amdgpu_irq_src priv_reg_irq;
struct amdgpu_irq_src priv_inst_irq; struct amdgpu_irq_src priv_inst_irq;
/* gfx status */ /* gfx status */
uint32_t gfx_current_status; uint32_t gfx_current_status;
/* ce ram size*/ /* ce ram size*/
unsigned ce_ram_size; unsigned ce_ram_size;
struct amdgpu_cu_info cu_info;
}; };
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned size, struct amdgpu_ib *ib); unsigned size, struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f); void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
struct fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ib, struct fence *last_vm_update, struct amdgpu_ib *ib, struct fence *last_vm_update,
struct fence **f); struct amdgpu_job *job, struct fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev); int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
@ -1210,7 +1209,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring);
struct amdgpu_cs_chunk { struct amdgpu_cs_chunk {
uint32_t chunk_id; uint32_t chunk_id;
uint32_t length_dw; uint32_t length_dw;
uint32_t *kdata; void *kdata;
}; };
struct amdgpu_cs_parser { struct amdgpu_cs_parser {
@ -1241,13 +1240,25 @@ struct amdgpu_cs_parser {
struct amdgpu_job { struct amdgpu_job {
struct amd_sched_job base; struct amd_sched_job base;
struct amdgpu_device *adev; struct amdgpu_device *adev;
struct amdgpu_vm *vm;
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
struct amdgpu_sync sync; struct amdgpu_sync sync;
struct amdgpu_ib *ibs; struct amdgpu_ib *ibs;
struct fence *fence; /* the hw fence */ struct fence *fence; /* the hw fence */
uint32_t num_ibs; uint32_t num_ibs;
void *owner; void *owner;
struct amdgpu_user_fence uf; uint64_t ctx;
unsigned vm_id;
uint64_t vm_pd_addr;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
/* user fence handling */
struct amdgpu_bo *uf_bo;
uint32_t uf_offset;
uint64_t uf_sequence;
}; };
#define to_amdgpu_job(sched_job) \ #define to_amdgpu_job(sched_job) \
container_of((sched_job), struct amdgpu_job, base) container_of((sched_job), struct amdgpu_job, base)
@ -1694,7 +1705,7 @@ struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq; struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq; struct amdgpu_irq_src illegal_inst_irq;
int num_instances; int num_instances;
}; };
/* /*
@ -1787,13 +1798,6 @@ struct amdgpu_allowed_register_entry {
bool grbm_indexed; bool grbm_indexed;
}; };
struct amdgpu_cu_info {
uint32_t number; /* total active CU number */
uint32_t ao_cu_mask;
uint32_t bitmap[4][4];
};
/* /*
* ASIC specific functions. * ASIC specific functions.
*/ */
@ -1811,7 +1815,6 @@ struct amdgpu_asic_funcs {
u32 (*get_xclk)(struct amdgpu_device *adev); u32 (*get_xclk)(struct amdgpu_device *adev);
/* get the gpu clock counter */ /* get the gpu clock counter */
uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info);
/* MM block clocks */ /* MM block clocks */
int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
@ -1948,11 +1951,11 @@ struct amdgpu_device {
bool shutdown; bool shutdown;
bool need_dma32; bool need_dma32;
bool accel_working; bool accel_working;
struct work_struct reset_work; struct work_struct reset_work;
struct notifier_block acpi_nb; struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
unsigned debugfs_count; unsigned debugfs_count;
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
#endif #endif
@ -2203,7 +2206,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
@ -2215,7 +2217,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib)) #define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
@ -2238,7 +2240,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
#define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base)) #define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))

View File

@ -480,6 +480,7 @@ static int acp_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs acp_ip_funcs = { const struct amd_ip_funcs acp_ip_funcs = {
.name = "acp_ip",
.early_init = acp_early_init, .early_init = acp_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = acp_sw_init, .sw_init = acp_sw_init,

View File

@ -263,7 +263,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
for (i = 0; i < args->in.bo_number; ++i) { for (i = 0; i < args->in.bo_number; ++i) {
if (copy_from_user(&info[i], uptr, bytes)) if (copy_from_user(&info[i], uptr, bytes))
goto error_free; goto error_free;
uptr += args->in.bo_info_size; uptr += args->in.bo_info_size;
} }
} }
@ -271,7 +271,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
switch (args->in.operation) { switch (args->in.operation) {
case AMDGPU_BO_LIST_OP_CREATE: case AMDGPU_BO_LIST_OP_CREATE:
r = amdgpu_bo_list_create(fpriv, &list, &handle); r = amdgpu_bo_list_create(fpriv, &list, &handle);
if (r) if (r)
goto error_free; goto error_free;
r = amdgpu_bo_list_set(adev, filp, list, info, r = amdgpu_bo_list_set(adev, filp, list, info,
@ -281,7 +281,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
goto error_free; goto error_free;
break; break;
case AMDGPU_BO_LIST_OP_DESTROY: case AMDGPU_BO_LIST_OP_DESTROY:
amdgpu_bo_list_destroy(fpriv, handle); amdgpu_bo_list_destroy(fpriv, handle);
handle = 0; handle = 0;

View File

@ -793,7 +793,6 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
struct cgs_system_info *sys_info) struct cgs_system_info *sys_info)
{ {
CGS_FUNC_ADEV; CGS_FUNC_ADEV;
struct amdgpu_cu_info cu_info;
if (NULL == sys_info) if (NULL == sys_info)
return -ENODEV; return -ENODEV;
@ -818,8 +817,7 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
sys_info->value = adev->pg_flags; sys_info->value = adev->pg_flags;
break; break;
case CGS_SYSTEM_INFO_GFX_CU_INFO: case CGS_SYSTEM_INFO_GFX_CU_INFO:
amdgpu_asic_get_cu_info(adev, &cu_info); sys_info->value = adev->gfx.cu_info.number;
sys_info->value = cu_info.number;
break; break;
default: default:
return -ENODEV; return -ENODEV;

View File

@ -87,44 +87,42 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
} }
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct amdgpu_user_fence *uf, struct drm_amdgpu_cs_chunk_fence *data,
struct drm_amdgpu_cs_chunk_fence *fence_data) uint32_t *offset)
{ {
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
uint32_t handle;
handle = fence_data->handle;
gobj = drm_gem_object_lookup(p->adev->ddev, p->filp, gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
fence_data->handle); data->handle);
if (gobj == NULL) if (gobj == NULL)
return -EINVAL; return -EINVAL;
uf->bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
uf->offset = fence_data->offset;
if (amdgpu_ttm_tt_get_usermm(uf->bo->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
return -EINVAL;
}
p->uf_entry.robj = amdgpu_bo_ref(uf->bo);
p->uf_entry.priority = 0; p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo; p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
p->uf_entry.tv.shared = true; p->uf_entry.tv.shared = true;
p->uf_entry.user_pages = NULL; p->uf_entry.user_pages = NULL;
*offset = data->offset;
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
amdgpu_bo_unref(&p->uf_entry.robj);
return -EINVAL;
}
return 0; return 0;
} }
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{ {
struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_cs *cs = data; union drm_amdgpu_cs *cs = data;
uint64_t *chunk_array_user; uint64_t *chunk_array_user;
uint64_t *chunk_array; uint64_t *chunk_array;
struct amdgpu_user_fence uf = {};
unsigned size, num_ibs = 0; unsigned size, num_ibs = 0;
uint32_t uf_offset = 0;
int i; int i;
int ret; int ret;
@ -199,7 +197,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
goto free_partial_kdata; goto free_partial_kdata;
} }
ret = amdgpu_cs_user_fence_chunk(p, &uf, (void *)p->chunks[i].kdata); ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
&uf_offset);
if (ret) if (ret)
goto free_partial_kdata; goto free_partial_kdata;
@ -214,11 +213,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
} }
} }
ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job); ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
if (ret) if (ret)
goto free_all_kdata; goto free_all_kdata;
p->job->uf = uf; if (p->uf_entry.robj) {
p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj);
p->job->uf_offset = uf_offset;
}
kfree(chunk_array); kfree(chunk_array);
return 0; return 0;
@ -376,7 +378,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&duplicates); INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
if (p->job->uf.bo) if (p->uf_entry.robj)
list_add(&p->uf_entry.tv.head, &p->validated); list_add(&p->uf_entry.tv.head, &p->validated);
if (need_mmap_lock) if (need_mmap_lock)
@ -472,6 +474,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto error_validate; goto error_validate;
if (p->bo_list) { if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj;
struct amdgpu_bo *gws = p->bo_list->gws_obj;
struct amdgpu_bo *oa = p->bo_list->oa_obj;
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
unsigned i; unsigned i;
@ -480,6 +485,19 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
} }
if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds);
p->job->gds_size = amdgpu_bo_size(gds);
}
if (gws) {
p->job->gws_base = amdgpu_bo_gpu_offset(gws);
p->job->gws_size = amdgpu_bo_size(gws);
}
if (oa) {
p->job->oa_base = amdgpu_bo_gpu_offset(oa);
p->job->oa_size = amdgpu_bo_size(oa);
}
} }
error_validate: error_validate:
@ -740,41 +758,14 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
ib->length_dw = chunk_ib->ib_bytes / 4; ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags; ib->flags = chunk_ib->flags;
ib->ctx = parser->ctx;
j++; j++;
} }
/* add GDS resources to first IB */ /* UVD & VCE fw doesn't support user fences */
if (parser->bo_list) { if (parser->job->uf_bo && (
struct amdgpu_bo *gds = parser->bo_list->gds_obj; parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
struct amdgpu_bo *gws = parser->bo_list->gws_obj; parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
struct amdgpu_bo *oa = parser->bo_list->oa_obj; return -EINVAL;
struct amdgpu_ib *ib = &parser->job->ibs[0];
if (gds) {
ib->gds_base = amdgpu_bo_gpu_offset(gds);
ib->gds_size = amdgpu_bo_size(gds);
}
if (gws) {
ib->gws_base = amdgpu_bo_gpu_offset(gws);
ib->gws_size = amdgpu_bo_size(gws);
}
if (oa) {
ib->oa_base = amdgpu_bo_gpu_offset(oa);
ib->oa_size = amdgpu_bo_size(oa);
}
}
/* wrap the last IB with user fence */
if (parser->job->uf.bo) {
struct amdgpu_ib *ib = &parser->job->ibs[parser->job->num_ibs - 1];
/* UVD & VCE fw doesn't support user fences */
if (parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
parser->job->ring->type == AMDGPU_RING_TYPE_VCE)
return -EINVAL;
ib->user = &parser->job->uf;
}
return 0; return 0;
} }
@ -839,6 +830,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs) union drm_amdgpu_cs *cs)
{ {
struct amdgpu_ring *ring = p->job->ring; struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct fence *fence; struct fence *fence;
struct amdgpu_job *job; struct amdgpu_job *job;
int r; int r;
@ -847,19 +839,19 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
p->job = NULL; p->job = NULL;
r = amd_sched_job_init(&job->base, &ring->sched, r = amd_sched_job_init(&job->base, &ring->sched,
&p->ctx->rings[ring->idx].entity, entity, amdgpu_job_timeout_func,
amdgpu_job_timeout_func, amdgpu_job_free_func,
amdgpu_job_free_func, p->filp, &fence);
p->filp, &fence);
if (r) { if (r) {
amdgpu_job_free(job); amdgpu_job_free(job);
return r; return r;
} }
job->owner = p->filp; job->owner = p->filp;
job->ctx = entity->fence_context;
p->fence = fence_get(fence); p->fence = fence_get(fence);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence); cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
job->ibs[job->num_ibs - 1].sequence = cs->out.handle; job->uf_sequence = cs->out.handle;
trace_amdgpu_cs_ioctl(job); trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base); amd_sched_entity_push_job(&job->base);

View File

@ -348,7 +348,7 @@ static int amdgpu_doorbell_init(struct amdgpu_device *adev)
adev->doorbell.base = pci_resource_start(adev->pdev, 2); adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2); adev->doorbell.size = pci_resource_len(adev->pdev, 2);
adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32), adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
AMDGPU_DOORBELL_MAX_ASSIGNMENT+1); AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
if (adev->doorbell.num_doorbells == 0) if (adev->doorbell.num_doorbells == 0)
return -EINVAL; return -EINVAL;
@ -1196,7 +1196,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
if (r == -ENOENT) { if (r == -ENOENT) {
adev->ip_block_status[i].valid = false; adev->ip_block_status[i].valid = false;
} else if (r) { } else if (r) {
DRM_ERROR("early_init %d failed %d\n", i, r); DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r; return r;
} else { } else {
adev->ip_block_status[i].valid = true; adev->ip_block_status[i].valid = true;
@ -1219,7 +1219,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
continue; continue;
r = adev->ip_blocks[i].funcs->sw_init((void *)adev); r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
if (r) { if (r) {
DRM_ERROR("sw_init %d failed %d\n", i, r); DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r; return r;
} }
adev->ip_block_status[i].sw = true; adev->ip_block_status[i].sw = true;
@ -1252,7 +1252,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
continue; continue;
r = adev->ip_blocks[i].funcs->hw_init((void *)adev); r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
if (r) { if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r); DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r; return r;
} }
adev->ip_block_status[i].hw = true; adev->ip_block_status[i].hw = true;
@ -1272,13 +1272,13 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
if (r) { if (r) {
DRM_ERROR("set_clockgating_state(gate) %d failed %d\n", i, r); DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r; return r;
} }
if (adev->ip_blocks[i].funcs->late_init) { if (adev->ip_blocks[i].funcs->late_init) {
r = adev->ip_blocks[i].funcs->late_init((void *)adev); r = adev->ip_blocks[i].funcs->late_init((void *)adev);
if (r) { if (r) {
DRM_ERROR("late_init %d failed %d\n", i, r); DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r; return r;
} }
} }
@ -1302,13 +1302,13 @@ static int amdgpu_fini(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE); AMD_CG_STATE_UNGATE);
if (r) { if (r) {
DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r); DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r; return r;
} }
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
/* XXX handle errors */ /* XXX handle errors */
if (r) { if (r) {
DRM_DEBUG("hw_fini %d failed %d\n", i, r); DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
} }
adev->ip_block_status[i].hw = false; adev->ip_block_status[i].hw = false;
} }
@ -1319,7 +1319,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
/* XXX handle errors */ /* XXX handle errors */
if (r) { if (r) {
DRM_DEBUG("sw_fini %d failed %d\n", i, r); DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
} }
adev->ip_block_status[i].sw = false; adev->ip_block_status[i].sw = false;
adev->ip_block_status[i].valid = false; adev->ip_block_status[i].valid = false;
@ -1347,14 +1347,14 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE); AMD_CG_STATE_UNGATE);
if (r) { if (r) {
DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r); DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
} }
} }
/* XXX handle errors */ /* XXX handle errors */
r = adev->ip_blocks[i].funcs->suspend(adev); r = adev->ip_blocks[i].funcs->suspend(adev);
/* XXX handle errors */ /* XXX handle errors */
if (r) { if (r) {
DRM_ERROR("suspend %d failed %d\n", i, r); DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
} }
} }
@ -1370,7 +1370,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
continue; continue;
r = adev->ip_blocks[i].funcs->resume(adev); r = adev->ip_blocks[i].funcs->resume(adev);
if (r) { if (r) {
DRM_ERROR("resume %d failed %d\n", i, r); DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r);
return r; return r;
} }
} }

View File

@ -132,7 +132,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
vblank->linedur_ns / 1000, stat, vpos, hpos); vblank->linedur_ns / 1000, stat, vpos, hpos);
/* Do the flip (mmio) */ /* Do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base); adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
/* Set the flip status */ /* Set the flip status */
amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
@ -197,6 +197,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
work->event = event; work->event = event;
work->adev = adev; work->adev = adev;
work->crtc_id = amdgpu_crtc->crtc_id; work->crtc_id = amdgpu_crtc->crtc_id;
work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
/* schedule unpin of the old buffer */ /* schedule unpin of the old buffer */
old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);

View File

@ -43,7 +43,7 @@ struct amdgpu_ring;
struct amdgpu_bo; struct amdgpu_bo;
struct amdgpu_gds_asic_info { struct amdgpu_gds_asic_info {
uint32_t total_size; uint32_t total_size;
uint32_t gfx_partition_size; uint32_t gfx_partition_size;
uint32_t cs_partition_size; uint32_t cs_partition_size;
}; };
@ -52,8 +52,8 @@ struct amdgpu_gds {
struct amdgpu_gds_asic_info mem; struct amdgpu_gds_asic_info mem;
struct amdgpu_gds_asic_info gws; struct amdgpu_gds_asic_info gws;
struct amdgpu_gds_asic_info oa; struct amdgpu_gds_asic_info oa;
/* At present, GDS, GWS and OA resources for gfx (graphics) /* At present, GDS, GWS and OA resources for gfx (graphics)
* is always pre-allocated and available for graphics operation. * is always pre-allocated and available for graphics operation.
* Such resource is shared between all gfx clients. * Such resource is shared between all gfx clients.
* TODO: move this operation to user space * TODO: move this operation to user space
* */ * */

View File

@ -74,9 +74,6 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo); ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
} }
ib->vm = vm;
ib->vm_id = 0;
return 0; return 0;
} }
@ -89,7 +86,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* *
* Free an IB (all asics). * Free an IB (all asics).
*/ */
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f) void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
struct fence *f)
{ {
amdgpu_sa_bo_free(adev, &ib->sa_bo, f); amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
} }
@ -117,29 +115,37 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fen
*/ */
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ibs, struct fence *last_vm_update, struct amdgpu_ib *ibs, struct fence *last_vm_update,
struct fence **f) struct amdgpu_job *job, struct fence **f)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0]; struct amdgpu_ib *ib = &ibs[0];
struct amdgpu_ctx *ctx, *old_ctx; bool skip_preamble, need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm; struct amdgpu_vm *vm;
struct fence *hwf; struct fence *hwf;
unsigned i, patch_offset = ~0; uint64_t ctx;
unsigned i;
int r = 0; int r = 0;
if (num_ibs == 0) if (num_ibs == 0)
return -EINVAL; return -EINVAL;
ctx = ibs->ctx; /* ring tests don't use a job */
vm = ibs->vm; if (job) {
vm = job->vm;
ctx = job->ctx;
} else {
vm = NULL;
ctx = 0;
}
if (!ring->ready) { if (!ring->ready) {
dev_err(adev->dev, "couldn't schedule ib\n"); dev_err(adev->dev, "couldn't schedule ib\n");
return -EINVAL; return -EINVAL;
} }
if (vm && !ibs->vm_id) { if (vm && !job->vm_id) {
dev_err(adev->dev, "VM IB without ID\n"); dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL; return -EINVAL;
} }
@ -154,58 +160,54 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
patch_offset = amdgpu_ring_init_cond_exec(ring); patch_offset = amdgpu_ring_init_cond_exec(ring);
if (vm) { if (vm) {
/* do context switch */ r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
r = amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr, job->gds_base, job->gds_size,
ib->gds_base, ib->gds_size, job->gws_base, job->gws_size,
ib->gws_base, ib->gws_size, job->oa_base, job->oa_size);
ib->oa_base, ib->oa_size);
if (r) { if (r) {
amdgpu_ring_undo(ring); amdgpu_ring_undo(ring);
return r; return r;
} }
if (ring->funcs->emit_hdp_flush)
amdgpu_ring_emit_hdp_flush(ring);
} }
if (ring->funcs->emit_hdp_flush)
amdgpu_ring_emit_hdp_flush(ring);
/* always set cond_exec_polling to CONTINUE */ /* always set cond_exec_polling to CONTINUE */
*ring->cond_exe_cpu_addr = 1; *ring->cond_exe_cpu_addr = 1;
old_ctx = ring->current_ctx; skip_preamble = ring->current_ctx == ctx;
need_ctx_switch = ring->current_ctx != ctx;
for (i = 0; i < num_ibs; ++i) { for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i]; ib = &ibs[i];
if (ib->ctx != ctx || ib->vm != vm) { /* drop preamble IBs if we don't have a context switch */
ring->current_ctx = old_ctx; if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
if (ib->vm_id) continue;
amdgpu_vm_reset_id(adev, ib->vm_id);
amdgpu_ring_undo(ring); amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
return -EINVAL; need_ctx_switch);
} need_ctx_switch = false;
amdgpu_ring_emit_ib(ring, ib);
ring->current_ctx = ctx;
} }
if (vm) { if (ring->funcs->emit_hdp_invalidate)
if (ring->funcs->emit_hdp_invalidate) amdgpu_ring_emit_hdp_invalidate(ring);
amdgpu_ring_emit_hdp_invalidate(ring);
}
r = amdgpu_fence_emit(ring, &hwf); r = amdgpu_fence_emit(ring, &hwf);
if (r) { if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r); dev_err(adev->dev, "failed to emit fence (%d)\n", r);
ring->current_ctx = old_ctx; if (job && job->vm_id)
if (ib->vm_id) amdgpu_vm_reset_id(adev, job->vm_id);
amdgpu_vm_reset_id(adev, ib->vm_id);
amdgpu_ring_undo(ring); amdgpu_ring_undo(ring);
return r; return r;
} }
/* wrap the last IB with fence */ /* wrap the last IB with fence */
if (ib->user) { if (job && job->uf_bo) {
uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo); uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo);
addr += ib->user->offset;
amdgpu_ring_emit_fence(ring, addr, ib->sequence, addr += job->uf_offset;
amdgpu_ring_emit_fence(ring, addr, job->uf_sequence,
AMDGPU_FENCE_FLAG_64BIT); AMDGPU_FENCE_FLAG_64BIT);
} }
@ -215,6 +217,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (patch_offset != ~0 && ring->funcs->patch_cond_exec) if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset); amdgpu_ring_patch_cond_exec(ring, patch_offset);
ring->current_ctx = ctx;
amdgpu_ring_commit(ring); amdgpu_ring_commit(ring);
return 0; return 0;
} }

View File

@ -46,7 +46,7 @@ void amdgpu_job_timeout_func(struct work_struct *work)
} }
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job) struct amdgpu_job **job, struct amdgpu_vm *vm)
{ {
size_t size = sizeof(struct amdgpu_job); size_t size = sizeof(struct amdgpu_job);
@ -60,6 +60,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
return -ENOMEM; return -ENOMEM;
(*job)->adev = adev; (*job)->adev = adev;
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1]; (*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs; (*job)->num_ibs = num_ibs;
INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler); INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
@ -74,7 +75,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
{ {
int r; int r;
r = amdgpu_job_alloc(adev, 1, job); r = amdgpu_job_alloc(adev, 1, job, NULL);
if (r) if (r)
return r; return r;
@ -96,7 +97,7 @@ void amdgpu_job_free(struct amdgpu_job *job)
amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f); amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f);
fence_put(job->fence); fence_put(job->fence);
amdgpu_bo_unref(&job->uf.bo); amdgpu_bo_unref(&job->uf_bo);
amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sync);
if (!job->base.use_sched) if (!job->base.use_sched)
@ -121,14 +122,13 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
return -EINVAL; return -EINVAL;
r = amd_sched_job_init(&job->base, &ring->sched, r = amd_sched_job_init(&job->base, &ring->sched,
entity, entity, amdgpu_job_timeout_func,
amdgpu_job_timeout_func, amdgpu_job_free_func, owner, &fence);
amdgpu_job_free_func,
owner, &fence);
if (r) if (r)
return r; return r;
job->owner = owner; job->owner = owner;
job->ctx = entity->fence_context;
*f = fence_get(fence); *f = fence_get(fence);
amd_sched_entity_push_job(&job->base); amd_sched_entity_push_job(&job->base);
@ -138,27 +138,19 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
{ {
struct amdgpu_job *job = to_amdgpu_job(sched_job); struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->ibs->vm; struct amdgpu_vm *vm = job->vm;
struct fence *fence = amdgpu_sync_get_fence(&job->sync); struct fence *fence = amdgpu_sync_get_fence(&job->sync);
if (fence == NULL && vm && !job->ibs->vm_id) { if (fence == NULL && vm && !job->vm_id) {
struct amdgpu_ring *ring = job->ring; struct amdgpu_ring *ring = job->ring;
unsigned i, vm_id;
uint64_t vm_pd_addr;
int r; int r;
r = amdgpu_vm_grab_id(vm, ring, &job->sync, r = amdgpu_vm_grab_id(vm, ring, &job->sync,
&job->base.s_fence->base, &job->base.s_fence->base,
&vm_id, &vm_pd_addr); &job->vm_id, &job->vm_pd_addr);
if (r) if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r); DRM_ERROR("Error getting VM ID (%d)\n", r);
else {
for (i = 0; i < job->num_ibs; ++i) {
job->ibs[i].vm_id = vm_id;
job->ibs[i].vm_pd_addr = vm_pd_addr;
}
}
fence = amdgpu_sync_get_fence(&job->sync); fence = amdgpu_sync_get_fence(&job->sync);
} }
@ -186,7 +178,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
trace_amdgpu_sched_run_job(job); trace_amdgpu_sched_run_job(job);
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
job->sync.last_vm_update, &fence); job->sync.last_vm_update, job, &fence);
if (r) { if (r) {
DRM_ERROR("Error scheduling IBs (%d)\n", r); DRM_ERROR("Error scheduling IBs (%d)\n", r);
goto err; goto err;

View File

@ -427,7 +427,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
} }
case AMDGPU_INFO_DEV_INFO: { case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device dev_info = {}; struct drm_amdgpu_info_device dev_info = {};
struct amdgpu_cu_info cu_info;
dev_info.device_id = dev->pdev->device; dev_info.device_id = dev->pdev->device;
dev_info.chip_rev = adev->rev_id; dev_info.chip_rev = adev->rev_id;
@ -461,11 +460,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
AMDGPU_GPU_PAGE_SIZE; AMDGPU_GPU_PAGE_SIZE;
dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE; dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
amdgpu_asic_get_cu_info(adev, &cu_info); dev_info.cu_active_number = adev->gfx.cu_info.number;
dev_info.cu_active_number = cu_info.number; dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
dev_info.cu_ao_mask = cu_info.ao_cu_mask;
dev_info.ce_ram_size = adev->gfx.ce_ram_size; dev_info.ce_ram_size = adev->gfx.ce_ram_size;
memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
sizeof(adev->gfx.cu_info.bitmap));
dev_info.vram_type = adev->mc.vram_type; dev_info.vram_type = adev->mc.vram_type;
dev_info.vram_bit_width = adev->mc.vram_width; dev_info.vram_bit_width = adev->mc.vram_width;
dev_info.vce_harvest_config = adev->vce.harvest_config; dev_info.vce_harvest_config = adev->vce.harvest_config;

View File

@ -283,7 +283,7 @@ struct amdgpu_display_funcs {
u32 (*hpd_get_gpio_reg)(struct amdgpu_device *adev); u32 (*hpd_get_gpio_reg)(struct amdgpu_device *adev);
/* pageflipping */ /* pageflipping */
void (*page_flip)(struct amdgpu_device *adev, void (*page_flip)(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base); int crtc_id, u64 crtc_base, bool async);
int (*page_flip_get_scanoutpos)(struct amdgpu_device *adev, int crtc, int (*page_flip_get_scanoutpos)(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position); u32 *vbl, u32 *position);
/* display topology setup */ /* display topology setup */
@ -530,7 +530,7 @@ struct amdgpu_framebuffer {
((em) == ATOM_ENCODER_MODE_DP_MST)) ((em) == ATOM_ENCODER_MODE_DP_MST))
/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */ /* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */
#define USE_REAL_VBLANKSTART (1 << 30) #define USE_REAL_VBLANKSTART (1 << 30)
#define GET_DISTANCE_TO_VBLANKSTART (1 << 31) #define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
void amdgpu_link_encoder_connector(struct drm_device *dev); void amdgpu_link_encoder_connector(struct drm_device *dev);

View File

@ -304,6 +304,7 @@ static int amdgpu_pp_soft_reset(void *handle)
} }
const struct amd_ip_funcs amdgpu_pp_ip_funcs = { const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.name = "amdgpu_powerplay",
.early_init = amdgpu_pp_early_init, .early_init = amdgpu_pp_early_init,
.late_init = amdgpu_pp_late_init, .late_init = amdgpu_pp_late_init,
.sw_init = amdgpu_pp_sw_init, .sw_init = amdgpu_pp_sw_init,

View File

@ -57,9 +57,10 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
ttm_bo_kunmap(&bo->dma_buf_vmap); ttm_bo_kunmap(&bo->dma_buf_vmap);
} }
struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, struct drm_gem_object *
struct dma_buf_attachment *attach, amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sg) struct dma_buf_attachment *attach,
struct sg_table *sg)
{ {
struct reservation_object *resv = attach->dmabuf->resv; struct reservation_object *resv = attach->dmabuf->resv;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;

View File

@ -41,13 +41,15 @@
/* 1 second timeout */ /* 1 second timeout */
#define UVD_IDLE_TIMEOUT_MS 1000 #define UVD_IDLE_TIMEOUT_MS 1000
/* Polaris10/11 firmware version */
#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
/* Firmware Names */ /* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin" #define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
#define FIRMWARE_KABINI "radeon/kabini_uvd.bin" #define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin" #define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin" #define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin" #define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
#endif #endif
#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
@ -184,6 +186,12 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) | adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
(family_id << 8)); (family_id << 8));
if ((adev->asic_type == CHIP_POLARIS10 ||
adev->asic_type == CHIP_POLARIS11) &&
(adev->uvd.fw_version < FW_1_66_16))
DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
version_major, version_minor);
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+ AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles; + AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
@ -414,7 +422,8 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
* *
* Peek into the decode message and calculate the necessary buffer sizes. * Peek into the decode message and calculate the necessary buffer sizes.
*/ */
static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
unsigned buf_sizes[])
{ {
unsigned stream_type = msg[4]; unsigned stream_type = msg[4];
unsigned width = msg[6]; unsigned width = msg[6];
@ -436,7 +445,6 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
switch (stream_type) { switch (stream_type) {
case 0: /* H264 */ case 0: /* H264 */
case 7: /* H264 Perf */
switch(level) { switch(level) {
case 30: case 30:
num_dpb_buffer = 8100 / fs_in_mb; num_dpb_buffer = 8100 / fs_in_mb;
@ -514,6 +522,54 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
break; break;
case 7: /* H264 Perf */
switch(level) {
case 30:
num_dpb_buffer = 8100 / fs_in_mb;
break;
case 31:
num_dpb_buffer = 18000 / fs_in_mb;
break;
case 32:
num_dpb_buffer = 20480 / fs_in_mb;
break;
case 41:
num_dpb_buffer = 32768 / fs_in_mb;
break;
case 42:
num_dpb_buffer = 34816 / fs_in_mb;
break;
case 50:
num_dpb_buffer = 110400 / fs_in_mb;
break;
case 51:
num_dpb_buffer = 184320 / fs_in_mb;
break;
default:
num_dpb_buffer = 184320 / fs_in_mb;
break;
}
num_dpb_buffer++;
if (num_dpb_buffer > 17)
num_dpb_buffer = 17;
/* reference picture buffer */
min_dpb_size = image_size * num_dpb_buffer;
if (adev->asic_type < CHIP_POLARIS10){
/* macroblock context buffer */
min_dpb_size +=
width_in_mb * height_in_mb * num_dpb_buffer * 192;
/* IT surface buffer */
min_dpb_size += width_in_mb * height_in_mb * 32;
} else {
/* macroblock context buffer */
min_ctx_size =
width_in_mb * height_in_mb * num_dpb_buffer * 192;
}
break;
case 16: /* H265 */ case 16: /* H265 */
image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2; image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
image_size = ALIGN(image_size, 256); image_size = ALIGN(image_size, 256);
@ -609,7 +665,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
case 1: case 1:
/* it's a decode msg, calc buffer sizes */ /* it's a decode msg, calc buffer sizes */
r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); r = amdgpu_uvd_cs_msg_decode(adev, msg, ctx->buf_sizes);
amdgpu_bo_kunmap(bo); amdgpu_bo_kunmap(bo);
if (r) if (r)
return r; return r;
@ -910,7 +966,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16; ib->length_dw = 16;
if (direct) { if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f; job->fence = f;
if (r) if (r)
goto err_free; goto err_free;

View File

@ -41,9 +41,9 @@
/* Firmware Names */ /* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
#define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin" #define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
#define FIRMWARE_KABINI "radeon/kabini_vce.bin" #define FIRMWARE_KABINI "radeon/kabini_vce.bin"
#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin" #define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin" #define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
#define FIRMWARE_MULLINS "radeon/mullins_vce.bin" #define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
#endif #endif
#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin" #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
@ -436,7 +436,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i) for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0; ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f; job->fence = f;
if (r) if (r)
goto err; goto err;
@ -498,7 +498,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0; ib->ptr[i] = 0x0;
if (direct) { if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f; job->fence = f;
if (r) if (r)
goto err; goto err;
@ -762,7 +762,8 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
* @ib: the IB to execute * @ib: the IB to execute
* *
*/ */
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{ {
amdgpu_ring_write(ring, VCE_CMD_IB); amdgpu_ring_write(ring, VCE_CMD_IB);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));

View File

@ -34,7 +34,8 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct fence **fence); bool direct, struct fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags); unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);

View File

@ -185,7 +185,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (!id) if (!id)
continue; continue;
if (atomic_long_read(&id->owner) != (long)vm) if (atomic64_read(&id->owner) != vm->client_id)
continue; continue;
if (pd_addr != id->pd_gpu_addr) if (pd_addr != id->pd_gpu_addr)
@ -261,7 +261,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
list_move_tail(&id->list, &adev->vm_manager.ids_lru); list_move_tail(&id->list, &adev->vm_manager.ids_lru);
id->last_user = ring; id->last_user = ring;
atomic_long_set(&id->owner, (long)vm); atomic64_set(&id->owner, vm->client_id);
vm->ids[ring->idx] = id; vm->ids[ring->idx] = id;
*vm_id = id - adev->vm_manager.ids; *vm_id = id - adev->vm_manager.ids;
@ -300,10 +300,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
int r; int r;
if (ring->funcs->emit_pipeline_sync && ( if (ring->funcs->emit_pipeline_sync && (
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed)) pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
ring->type == AMDGPU_RING_TYPE_COMPUTE))
amdgpu_ring_emit_pipeline_sync(ring); amdgpu_ring_emit_pipeline_sync(ring);
if (pd_addr != AMDGPU_VM_NO_FLUSH) { if (ring->funcs->emit_vm_flush &&
pd_addr != AMDGPU_VM_NO_FLUSH) {
struct fence *fence; struct fence *fence;
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id); trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
@ -1386,6 +1388,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
vm->ids[i] = NULL; vm->ids[i] = NULL;
vm->va = RB_ROOT; vm->va = RB_ROOT;
vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
spin_lock_init(&vm->status_lock); spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated); INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->cleared); INIT_LIST_HEAD(&vm->cleared);
@ -1477,15 +1480,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory); amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence); fence_put(vm->page_directory_fence);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_vm_id *id = vm->ids[i];
if (!id)
continue;
atomic_long_cmpxchg(&id->owner, (long)vm, 0);
}
} }
/** /**
@ -1510,6 +1504,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
} }
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
atomic64_set(&adev->vm_manager.client_counter, 0);
} }
/** /**

View File

@ -92,7 +92,7 @@
#define ATOM_WS_AND_MASK 0x45 #define ATOM_WS_AND_MASK 0x45
#define ATOM_WS_FB_WINDOW 0x46 #define ATOM_WS_FB_WINDOW 0x46
#define ATOM_WS_ATTRIBUTES 0x47 #define ATOM_WS_ATTRIBUTES 0x47
#define ATOM_WS_REGPTR 0x48 #define ATOM_WS_REGPTR 0x48
#define ATOM_IIO_NOP 0 #define ATOM_IIO_NOP 0
#define ATOM_IIO_START 1 #define ATOM_IIO_START 1

View File

@ -2549,19 +2549,17 @@ static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
return 0; return 0;
} }
static u8 ci_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
u32 sclk, u32 min_sclk_in_sr)
{ {
u32 i; u32 i;
u32 tmp; u32 tmp;
u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ? u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
if (sclk < min) if (sclk < min)
return 0; return 0;
for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
tmp = sclk / (1 << i); tmp = sclk >> i;
if (tmp >= min || i == 0) if (tmp >= min || i == 0)
break; break;
} }
@ -3358,8 +3356,7 @@ static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
graphic_level->PowerThrottle = 0; graphic_level->PowerThrottle = 0;
if (pi->caps_sclk_ds) if (pi->caps_sclk_ds)
graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(adev, graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
engine_clock,
CISLAND_MINIMUM_ENGINE_CLOCK); CISLAND_MINIMUM_ENGINE_CLOCK);
graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
@ -6363,7 +6360,7 @@ static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
} }
static int ci_dpm_process_interrupt(struct amdgpu_device *adev, static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source, struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry) struct amdgpu_iv_entry *entry)
{ {
bool queue_thermal = false; bool queue_thermal = false;
@ -6405,6 +6402,7 @@ static int ci_dpm_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs ci_dpm_ip_funcs = { const struct amd_ip_funcs ci_dpm_ip_funcs = {
.name = "ci_dpm",
.early_init = ci_dpm_early_init, .early_init = ci_dpm_early_init,
.late_init = ci_dpm_late_init, .late_init = ci_dpm_late_init,
.sw_init = ci_dpm_sw_init, .sw_init = ci_dpm_sw_init,

View File

@ -2007,7 +2007,6 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.get_xclk = &cik_get_xclk, .get_xclk = &cik_get_xclk,
.set_uvd_clocks = &cik_set_uvd_clocks, .set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks, .set_vce_clocks = &cik_set_vce_clocks,
.get_cu_info = &gfx_v7_0_get_cu_info,
/* these should be moved to their own ip modules */ /* these should be moved to their own ip modules */
.get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
.wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle, .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
@ -2233,6 +2232,7 @@ static int cik_common_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs cik_common_ip_funcs = { const struct amd_ip_funcs cik_common_ip_funcs = {
.name = "cik_common",
.early_init = cik_common_early_init, .early_init = cik_common_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = cik_common_sw_init, .sw_init = cik_common_sw_init,

View File

@ -243,7 +243,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
u32 ring_index = adev->irq.ih.rptr >> 2; u32 ring_index = adev->irq.ih.rptr >> 2;
uint32_t dw[4]; uint32_t dw[4];
dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
@ -415,6 +415,7 @@ static int cik_ih_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs cik_ih_ip_funcs = { const struct amd_ip_funcs cik_ih_ip_funcs = {
.name = "cik_ih",
.early_init = cik_ih_early_init, .early_init = cik_ih_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = cik_ih_sw_init, .sw_init = cik_ih_sw_init,

View File

@ -210,9 +210,10 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (CIK). * Schedule an IB in the DMA ring (CIK).
*/ */
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib) struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{ {
u32 extra_bits = ib->vm_id & 0xf; u32 extra_bits = vm_id & 0xf;
u32 next_rptr = ring->wptr + 5; u32 next_rptr = ring->wptr + 5;
while ((next_rptr & 7) != 4) while ((next_rptr & 7) != 4)
@ -643,7 +644,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[3] = 1; ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF; ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5; ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r) if (r)
goto err1; goto err1;
@ -1223,6 +1224,7 @@ static int cik_sdma_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs cik_sdma_ip_funcs = { const struct amd_ip_funcs cik_sdma_ip_funcs = {
.name = "cik_sdma",
.early_init = cik_sdma_early_init, .early_init = cik_sdma_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = cik_sdma_sw_init, .sw_init = cik_sdma_sw_init,

View File

@ -190,8 +190,8 @@
# define MACRO_TILE_ASPECT(x) ((x) << 4) # define MACRO_TILE_ASPECT(x) ((x) << 4)
# define NUM_BANKS(x) ((x) << 6) # define NUM_BANKS(x) ((x) << 6)
#define MSG_ENTER_RLC_SAFE_MODE 1 #define MSG_ENTER_RLC_SAFE_MODE 1
#define MSG_EXIT_RLC_SAFE_MODE 0 #define MSG_EXIT_RLC_SAFE_MODE 0
/* /*
* PM4 * PM4

View File

@ -2230,6 +2230,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
} }
const struct amd_ip_funcs cz_dpm_ip_funcs = { const struct amd_ip_funcs cz_dpm_ip_funcs = {
.name = "cz_dpm",
.early_init = cz_dpm_early_init, .early_init = cz_dpm_early_init,
.late_init = cz_dpm_late_init, .late_init = cz_dpm_late_init,
.sw_init = cz_dpm_sw_init, .sw_init = cz_dpm_sw_init,

View File

@ -222,7 +222,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
u32 ring_index = adev->irq.ih.rptr >> 2; u32 ring_index = adev->irq.ih.rptr >> 2;
uint32_t dw[4]; uint32_t dw[4];
dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]); dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]); dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
@ -396,6 +396,7 @@ static int cz_ih_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs cz_ih_ip_funcs = { const struct amd_ip_funcs cz_ih_ip_funcs = {
.name = "cz_ih",
.early_init = cz_ih_early_init, .early_init = cz_ih_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = cz_ih_sw_init, .sw_init = cz_ih_sw_init,

View File

@ -77,7 +77,7 @@ struct cz_smu_private_data {
uint8_t driver_buffer_length; uint8_t driver_buffer_length;
uint8_t scratch_buffer_length; uint8_t scratch_buffer_length;
uint16_t toc_entry_used_count; uint16_t toc_entry_used_count;
uint16_t toc_entry_initialize_index; uint16_t toc_entry_initialize_index;
uint16_t toc_entry_power_profiling_index; uint16_t toc_entry_power_profiling_index;
uint16_t toc_entry_aram; uint16_t toc_entry_aram;
uint16_t toc_entry_ih_register_restore_task_index; uint16_t toc_entry_ih_register_restore_task_index;

View File

@ -284,10 +284,16 @@ static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
* surface base address. * surface base address.
*/ */
static void dce_v10_0_page_flip(struct amdgpu_device *adev, static void dce_v10_0_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base) int crtc_id, u64 crtc_base, bool async)
{ {
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
u32 tmp;
/* flip at hsync for async, default is vsync */
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* update the primary scanout address */ /* update the primary scanout address */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base)); upper_32_bits(crtc_base));
@ -2211,6 +2217,14 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
dce_v10_0_vga_enable(crtc, false); dce_v10_0_vga_enable(crtc, false);
/* Make sure surface address is updated at vertical blank rather than
* horizontal blank
*/
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location)); upper_32_bits(fb_location));
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@ -2261,13 +2275,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h); (viewport_w << 16) | viewport_h);
/* pageflip setup */
/* make sure flip is at vb rather than hb */
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* set pageflip to happen only at start of vblank interval (front porch) */ /* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
@ -2992,6 +2999,8 @@ static int dce_v10_0_sw_init(void *handle)
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.async_page_flip = true;
adev->ddev->mode_config.max_width = 16384; adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384; adev->ddev->mode_config.max_height = 16384;
@ -3490,6 +3499,7 @@ static int dce_v10_0_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs dce_v10_0_ip_funcs = { const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.name = "dce_v10_0",
.early_init = dce_v10_0_early_init, .early_init = dce_v10_0_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = dce_v10_0_sw_init, .sw_init = dce_v10_0_sw_init,

View File

@ -302,10 +302,17 @@ static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
* surface base address. * surface base address.
*/ */
static void dce_v11_0_page_flip(struct amdgpu_device *adev, static void dce_v11_0_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base) int crtc_id, u64 crtc_base, bool async)
{ {
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
u32 tmp;
/* flip at hsync for async, default is vsync */
/* use UPDATE_IMMEDIATE_EN instead for async? */
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* update the scanout addresses */ /* update the scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base)); upper_32_bits(crtc_base));
@ -1595,6 +1602,7 @@ static const u32 pin_offsets[] =
AUD4_REGISTER_OFFSET, AUD4_REGISTER_OFFSET,
AUD5_REGISTER_OFFSET, AUD5_REGISTER_OFFSET,
AUD6_REGISTER_OFFSET, AUD6_REGISTER_OFFSET,
AUD7_REGISTER_OFFSET,
}; };
static int dce_v11_0_audio_init(struct amdgpu_device *adev) static int dce_v11_0_audio_init(struct amdgpu_device *adev)
@ -2185,6 +2193,14 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
dce_v11_0_vga_enable(crtc, false); dce_v11_0_vga_enable(crtc, false);
/* Make sure surface address is updated at vertical blank rather than
* horizontal blank
*/
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location)); upper_32_bits(fb_location));
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@ -2235,13 +2251,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h); (viewport_w << 16) | viewport_h);
/* pageflip setup */
/* make sure flip is at vb rather than hb */
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* set pageflip to happen only at start of vblank interval (front porch) */ /* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
@ -2419,10 +2428,6 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
return ATOM_DP_DTO; return ATOM_DP_DTO;
/* use the same PPLL for all monitors with the same clock */
pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
switch (amdgpu_encoder->encoder_id) { switch (amdgpu_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@ -3046,6 +3051,8 @@ static int dce_v11_0_sw_init(void *handle)
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.async_page_flip = true;
adev->ddev->mode_config.max_width = 16384; adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384; adev->ddev->mode_config.max_height = 16384;
@ -3553,6 +3560,7 @@ static int dce_v11_0_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs dce_v11_0_ip_funcs = { const struct amd_ip_funcs dce_v11_0_ip_funcs = {
.name = "dce_v11_0",
.early_init = dce_v11_0_early_init, .early_init = dce_v11_0_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = dce_v11_0_sw_init, .sw_init = dce_v11_0_sw_init,

View File

@ -233,10 +233,13 @@ static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
* surface base address. * surface base address.
*/ */
static void dce_v8_0_page_flip(struct amdgpu_device *adev, static void dce_v8_0_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base) int crtc_id, u64 crtc_base, bool async)
{ {
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
/* flip at hsync for async, default is vsync */
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
/* update the primary scanout addresses */ /* update the primary scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base)); upper_32_bits(crtc_base));
@ -1999,7 +2002,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
uint32_t fb_format, fb_pitch_pixels; uint32_t fb_format, fb_pitch_pixels;
u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT); u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
u32 pipe_config; u32 pipe_config;
u32 tmp, viewport_w, viewport_h; u32 viewport_w, viewport_h;
int r; int r;
bool bypass_lut = false; bool bypass_lut = false;
@ -2135,6 +2138,11 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
dce_v8_0_vga_enable(crtc, false); dce_v8_0_vga_enable(crtc, false);
/* Make sure surface address is updated at vertical blank rather than
* horizontal blank
*/
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(fb_location)); upper_32_bits(fb_location));
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@ -2182,12 +2190,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
(viewport_w << 16) | viewport_h); (viewport_w << 16) | viewport_h);
/* pageflip setup */
/* make sure flip is at vb rather than hb */
tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
tmp &= ~GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK;
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
/* set pageflip to happen only at start of vblank interval (front porch) */ /* set pageflip to happen only at start of vblank interval (front porch) */
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3); WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 3);
@ -2902,6 +2904,8 @@ static int dce_v8_0_sw_init(void *handle)
adev->ddev->mode_config.funcs = &amdgpu_mode_funcs; adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
adev->ddev->mode_config.async_page_flip = true;
adev->ddev->mode_config.max_width = 16384; adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384; adev->ddev->mode_config.max_height = 16384;
@ -3420,6 +3424,7 @@ static int dce_v8_0_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs dce_v8_0_ip_funcs = { const struct amd_ip_funcs dce_v8_0_ip_funcs = {
.name = "dce_v8_0",
.early_init = dce_v8_0_early_init, .early_init = dce_v8_0_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = dce_v8_0_sw_init, .sw_init = dce_v8_0_sw_init,

View File

@ -143,6 +143,7 @@ static int fiji_dpm_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs fiji_dpm_ip_funcs = { const struct amd_ip_funcs fiji_dpm_ip_funcs = {
.name = "fiji_dpm",
.early_init = fiji_dpm_early_init, .early_init = fiji_dpm_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = fiji_dpm_sw_init, .sw_init = fiji_dpm_sw_init,

View File

@ -53,7 +53,6 @@
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev); static void gfx_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev); static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev);
int gfx_v7_0_get_cu_info(struct amdgpu_device *, struct amdgpu_cu_info *);
MODULE_FIRMWARE("radeon/bonaire_pfp.bin"); MODULE_FIRMWARE("radeon/bonaire_pfp.bin");
MODULE_FIRMWARE("radeon/bonaire_me.bin"); MODULE_FIRMWARE("radeon/bonaire_me.bin");
@ -882,6 +881,7 @@ static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer);
static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev); static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device *adev);
static void gfx_v7_0_init_pg(struct amdgpu_device *adev); static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
/* /*
* Core functions * Core functions
@ -1718,6 +1718,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
gfx_v7_0_tiling_mode_table_init(adev); gfx_v7_0_tiling_mode_table_init(adev);
gfx_v7_0_setup_rb(adev); gfx_v7_0_setup_rb(adev);
gfx_v7_0_get_cu_info(adev);
/* set HW defaults for 3D engine */ /* set HW defaults for 3D engine */
WREG32(mmCP_MEQ_THRESHOLDS, WREG32(mmCP_MEQ_THRESHOLDS,
@ -2029,17 +2030,13 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
* on the gfx ring for execution by the GPU. * on the gfx ring for execution by the GPU.
*/ */
static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib) struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{ {
bool need_ctx_switch = ring->current_ctx != ib->ctx;
u32 header, control = 0; u32 header, control = 0;
u32 next_rptr = ring->wptr + 5; u32 next_rptr = ring->wptr + 5;
/* drop the CE preamble IB for the same context */ if (ctx_switch)
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
return;
if (need_ctx_switch)
next_rptr += 2; next_rptr += 2;
next_rptr += 4; next_rptr += 4;
@ -2050,7 +2047,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr); amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */ /* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (need_ctx_switch) { if (ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
} }
@ -2060,7 +2057,7 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (ib->vm_id << 24); control |= ib->length_dw | (vm_id << 24);
amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
@ -2073,7 +2070,8 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
} }
static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib) struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{ {
u32 header, control = 0; u32 header, control = 0;
u32 next_rptr = ring->wptr + 5; u32 next_rptr = ring->wptr + 5;
@ -2088,7 +2086,7 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (ib->vm_id << 24); control |= ib->length_dw | (vm_id << 24);
amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
@ -2136,7 +2134,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF; ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3; ib.length_dw = 3;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r) if (r)
goto err2; goto err2;
@ -3053,6 +3051,19 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
{ {
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
if (usepfp) { if (usepfp) {
/* synce CE with ME to prevent CE fetch CEIB before context switch done */ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
@ -3080,18 +3091,6 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vm_id, uint64_t pd_addr)
{ {
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@ -3869,18 +3868,13 @@ static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev) static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device *adev)
{ {
uint32_t tmp, active_cu_number; u32 tmp;
struct amdgpu_cu_info cu_info;
gfx_v7_0_get_cu_info(adev, &cu_info); WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
tmp = cu_info.ao_cu_mask;
active_cu_number = cu_info.number;
WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, tmp);
tmp = RREG32(mmRLC_MAX_PG_CU); tmp = RREG32(mmRLC_MAX_PG_CU);
tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK; tmp &= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK;
tmp |= (active_cu_number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT); tmp |= (adev->gfx.cu_info.number << RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT);
WREG32(mmRLC_MAX_PG_CU, tmp); WREG32(mmRLC_MAX_PG_CU, tmp);
} }
@ -4898,6 +4892,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs gfx_v7_0_ip_funcs = { const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init, .early_init = gfx_v7_0_early_init,
.late_init = gfx_v7_0_late_init, .late_init = gfx_v7_0_late_init,
.sw_init = gfx_v7_0_sw_init, .sw_init = gfx_v7_0_sw_init,
@ -5015,14 +5010,11 @@ static void gfx_v7_0_set_gds_init(struct amdgpu_device *adev)
} }
int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
struct amdgpu_cu_info *cu_info)
{ {
int i, j, k, counter, active_cu_number = 0; int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
if (!adev || !cu_info)
return -EINVAL;
memset(cu_info, 0, sizeof(*cu_info)); memset(cu_info, 0, sizeof(*cu_info));
@ -5053,6 +5045,4 @@ int gfx_v7_0_get_cu_info(struct amdgpu_device *adev,
cu_info->number = active_cu_number; cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask; cu_info->ao_cu_mask = ao_cu_mask;
return 0;
} }

View File

@ -32,6 +32,5 @@ void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev);
void gfx_v7_0_rlc_stop(struct amdgpu_device *adev); void gfx_v7_0_rlc_stop(struct amdgpu_device *adev);
uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev); uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev);
void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
int gfx_v7_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
#endif #endif

View File

@ -603,7 +603,7 @@ static const u32 stoney_golden_settings_a11[] =
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f, mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002, mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1, mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
@ -636,6 +636,7 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev); static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev); static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev);
static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev); static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev);
static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev);
static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
{ {
@ -800,7 +801,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF; ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3; ib.length_dw = 3;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r) if (r)
goto err2; goto err2;
@ -1551,7 +1552,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* shedule the ib on the ring */ /* shedule the ib on the ring */
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r) { if (r) {
DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
goto fail; goto fail;
@ -3431,6 +3432,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
gfx_v8_0_tiling_mode_table_init(adev); gfx_v8_0_tiling_mode_table_init(adev);
gfx_v8_0_setup_rb(adev); gfx_v8_0_setup_rb(adev);
gfx_v8_0_get_cu_info(adev);
/* XXX SH_MEM regs */ /* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */ /* where to put LDS, scratch, GPUVM in FSA64 space */
@ -5644,17 +5646,13 @@ static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
} }
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib) struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{ {
bool need_ctx_switch = ring->current_ctx != ib->ctx;
u32 header, control = 0; u32 header, control = 0;
u32 next_rptr = ring->wptr + 5; u32 next_rptr = ring->wptr + 5;
/* drop the CE preamble IB for the same context */ if (ctx_switch)
if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
return;
if (need_ctx_switch)
next_rptr += 2; next_rptr += 2;
next_rptr += 4; next_rptr += 4;
@ -5665,7 +5663,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, next_rptr); amdgpu_ring_write(ring, next_rptr);
/* insert SWITCH_BUFFER packet before first IB in the ring frame */ /* insert SWITCH_BUFFER packet before first IB in the ring frame */
if (need_ctx_switch) { if (ctx_switch) {
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
} }
@ -5675,7 +5673,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
else else
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (ib->vm_id << 24); control |= ib->length_dw | (vm_id << 24);
amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
@ -5688,7 +5686,8 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
} }
static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
struct amdgpu_ib *ib) struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{ {
u32 header, control = 0; u32 header, control = 0;
u32 next_rptr = ring->wptr + 5; u32 next_rptr = ring->wptr + 5;
@ -5704,7 +5703,7 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
control |= ib->length_dw | (ib->vm_id << 24); control |= ib->length_dw | (vm_id << 24);
amdgpu_ring_write(ring, header); amdgpu_ring_write(ring, header);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
@ -6064,6 +6063,7 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev,
} }
const struct amd_ip_funcs gfx_v8_0_ip_funcs = { const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init, .early_init = gfx_v8_0_early_init,
.late_init = gfx_v8_0_late_init, .late_init = gfx_v8_0_late_init,
.sw_init = gfx_v8_0_sw_init, .sw_init = gfx_v8_0_sw_init,
@ -6212,14 +6212,11 @@ static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
return (~data) & mask; return (~data) & mask;
} }
int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
struct amdgpu_cu_info *cu_info)
{ {
int i, j, k, counter, active_cu_number = 0; int i, j, k, counter, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
if (!adev || !cu_info)
return -EINVAL;
memset(cu_info, 0, sizeof(*cu_info)); memset(cu_info, 0, sizeof(*cu_info));
@ -6250,6 +6247,4 @@ int gfx_v8_0_get_cu_info(struct amdgpu_device *adev,
cu_info->number = active_cu_number; cu_info->number = active_cu_number;
cu_info->ao_cu_mask = ao_cu_mask; cu_info->ao_cu_mask = ao_cu_mask;
return 0;
} }

View File

@ -28,6 +28,5 @@ extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev); uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev);
void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
int gfx_v8_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
#endif #endif

View File

@ -1261,6 +1261,7 @@ static int gmc_v7_0_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs gmc_v7_0_ip_funcs = { const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.name = "gmc_v7_0",
.early_init = gmc_v7_0_early_init, .early_init = gmc_v7_0_early_init,
.late_init = gmc_v7_0_late_init, .late_init = gmc_v7_0_late_init,
.sw_init = gmc_v7_0_sw_init, .sw_init = gmc_v7_0_sw_init,

View File

@ -1423,6 +1423,7 @@ static int gmc_v8_0_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs gmc_v8_0_ip_funcs = { const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.name = "gmc_v8_0",
.early_init = gmc_v8_0_early_init, .early_init = gmc_v8_0_early_init,
.late_init = gmc_v8_0_late_init, .late_init = gmc_v8_0_late_init,
.sw_init = gmc_v8_0_sw_init, .sw_init = gmc_v8_0_sw_init,

View File

@ -157,6 +157,7 @@ static int iceland_dpm_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs iceland_dpm_ip_funcs = { const struct amd_ip_funcs iceland_dpm_ip_funcs = {
.name = "iceland_dpm",
.early_init = iceland_dpm_early_init, .early_init = iceland_dpm_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = iceland_dpm_sw_init, .sw_init = iceland_dpm_sw_init,

View File

@ -394,6 +394,7 @@ static int iceland_ih_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs iceland_ih_ip_funcs = { const struct amd_ip_funcs iceland_ih_ip_funcs = {
.name = "iceland_ih",
.early_init = iceland_ih_early_init, .early_init = iceland_ih_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = iceland_ih_sw_init, .sw_init = iceland_ih_sw_init,

View File

@ -135,11 +135,6 @@ static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable)
#endif #endif
} }
static u32 sumo_get_sleep_divider_from_id(u32 id)
{
return 1 << id;
}
static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev,
struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
ATOM_AVAILABLE_SCLK_LIST *table) ATOM_AVAILABLE_SCLK_LIST *table)
@ -2176,8 +2171,7 @@ static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
struct kv_power_info *pi = kv_get_pi(adev); struct kv_power_info *pi = kv_get_pi(adev);
u32 i; u32 i;
u32 temp; u32 temp;
u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ? u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK);
min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
if (sclk < min) if (sclk < min)
return 0; return 0;
@ -2186,7 +2180,7 @@ static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
return 0; return 0;
for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
temp = sclk / sumo_get_sleep_divider_from_id(i); temp = sclk >> i;
if (temp >= min) if (temp >= min)
break; break;
} }
@ -3244,6 +3238,7 @@ static int kv_dpm_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs kv_dpm_ip_funcs = { const struct amd_ip_funcs kv_dpm_ip_funcs = {
.name = "kv_dpm",
.early_init = kv_dpm_early_init, .early_init = kv_dpm_early_init,
.late_init = kv_dpm_late_init, .late_init = kv_dpm_late_init,
.sw_init = kv_dpm_sw_init, .sw_init = kv_dpm_sw_init,

View File

@ -242,9 +242,10 @@ static void sdma_v2_4_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VI). * Schedule an IB in the DMA ring (VI).
*/ */
static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib) struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{ {
u32 vmid = ib->vm_id & 0xf; u32 vmid = vm_id & 0xf;
u32 next_rptr = ring->wptr + 5; u32 next_rptr = ring->wptr + 5;
while ((next_rptr & 7) != 2) while ((next_rptr & 7) != 2)
@ -701,7 +702,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8; ib.length_dw = 8;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r) if (r)
goto err1; goto err1;
@ -1230,6 +1231,7 @@ static int sdma_v2_4_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs sdma_v2_4_ip_funcs = { const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.name = "sdma_v2_4",
.early_init = sdma_v2_4_early_init, .early_init = sdma_v2_4_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = sdma_v2_4_sw_init, .sw_init = sdma_v2_4_sw_init,

View File

@ -400,9 +400,10 @@ static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* Schedule an IB in the DMA ring (VI). * Schedule an IB in the DMA ring (VI).
*/ */
static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib) struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{ {
u32 vmid = ib->vm_id & 0xf; u32 vmid = vm_id & 0xf;
u32 next_rptr = ring->wptr + 5; u32 next_rptr = ring->wptr + 5;
while ((next_rptr & 7) != 2) while ((next_rptr & 7) != 2)
@ -925,7 +926,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8; ib.length_dw = 8;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r) if (r)
goto err1; goto err1;
@ -1542,6 +1543,7 @@ static int sdma_v3_0_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs sdma_v3_0_ip_funcs = { const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.name = "sdma_v3_0",
.early_init = sdma_v3_0_early_init, .early_init = sdma_v3_0_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = sdma_v3_0_sw_init, .sw_init = sdma_v3_0_sw_init,

View File

@ -143,6 +143,7 @@ static int tonga_dpm_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs tonga_dpm_ip_funcs = { const struct amd_ip_funcs tonga_dpm_ip_funcs = {
.name = "tonga_dpm",
.early_init = tonga_dpm_early_init, .early_init = tonga_dpm_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = tonga_dpm_sw_init, .sw_init = tonga_dpm_sw_init,

View File

@ -417,6 +417,7 @@ static int tonga_ih_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs tonga_ih_ip_funcs = { const struct amd_ip_funcs tonga_ih_ip_funcs = {
.name = "tonga_ih",
.early_init = tonga_ih_early_init, .early_init = tonga_ih_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = tonga_ih_sw_init, .sw_init = tonga_ih_sw_init,

View File

@ -489,7 +489,8 @@ static int uvd_v4_2_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer * Write ring commands to execute the indirect buffer
*/ */
static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib) struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{ {
amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
amdgpu_ring_write(ring, ib->gpu_addr); amdgpu_ring_write(ring, ib->gpu_addr);
@ -739,6 +740,7 @@ static int uvd_v4_2_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs uvd_v4_2_ip_funcs = { const struct amd_ip_funcs uvd_v4_2_ip_funcs = {
.name = "uvd_v4_2",
.early_init = uvd_v4_2_early_init, .early_init = uvd_v4_2_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = uvd_v4_2_sw_init, .sw_init = uvd_v4_2_sw_init,

View File

@ -539,7 +539,8 @@ static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer * Write ring commands to execute the indirect buffer
*/ */
static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib) struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{ {
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@ -791,6 +792,7 @@ static int uvd_v5_0_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs uvd_v5_0_ip_funcs = { const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
.name = "uvd_v5_0",
.early_init = uvd_v5_0_early_init, .early_init = uvd_v5_0_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = uvd_v5_0_sw_init, .sw_init = uvd_v5_0_sw_init,

View File

@ -631,7 +631,8 @@ static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring)
* Write ring commands to execute the indirect buffer * Write ring commands to execute the indirect buffer
*/ */
static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib) struct amdgpu_ib *ib,
unsigned vm_id, bool ctx_switch)
{ {
amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@ -895,6 +896,7 @@ static int uvd_v6_0_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs uvd_v6_0_ip_funcs = { const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
.name = "uvd_v6_0",
.early_init = uvd_v6_0_early_init, .early_init = uvd_v6_0_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = uvd_v6_0_sw_init, .sw_init = uvd_v6_0_sw_init,

View File

@ -567,6 +567,7 @@ static int vce_v2_0_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs vce_v2_0_ip_funcs = { const struct amd_ip_funcs vce_v2_0_ip_funcs = {
.name = "vce_v2_0",
.early_init = vce_v2_0_early_init, .early_init = vce_v2_0_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = vce_v2_0_sw_init, .sw_init = vce_v2_0_sw_init,

View File

@ -40,9 +40,9 @@
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
#define VCE_V3_0_FW_SIZE (384 * 1024) #define VCE_V3_0_FW_SIZE (384 * 1024)
#define VCE_V3_0_STACK_SIZE (64 * 1024) #define VCE_V3_0_STACK_SIZE (64 * 1024)
@ -674,6 +674,7 @@ static int vce_v3_0_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs vce_v3_0_ip_funcs = { const struct amd_ip_funcs vce_v3_0_ip_funcs = {
.name = "vce_v3_0",
.early_init = vce_v3_0_early_init, .early_init = vce_v3_0_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = vce_v3_0_sw_init, .sw_init = vce_v3_0_sw_init,

View File

@ -1118,7 +1118,6 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.get_xclk = &vi_get_xclk, .get_xclk = &vi_get_xclk,
.set_uvd_clocks = &vi_set_uvd_clocks, .set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks, .set_vce_clocks = &vi_set_vce_clocks,
.get_cu_info = &gfx_v8_0_get_cu_info,
/* these should be moved to their own ip modules */ /* these should be moved to their own ip modules */
.get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
.wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
@ -1193,7 +1192,8 @@ static int vi_common_early_init(void *handle)
adev->external_rev_id = adev->rev_id + 0x50; adev->external_rev_id = adev->rev_id + 0x50;
break; break;
case CHIP_CARRIZO: case CHIP_CARRIZO:
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS | AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_CP_LS |
@ -1408,6 +1408,7 @@ static int vi_common_set_powergating_state(void *handle,
} }
const struct amd_ip_funcs vi_common_ip_funcs = { const struct amd_ip_funcs vi_common_ip_funcs = {
.name = "vi_common",
.early_init = vi_common_early_init, .early_init = vi_common_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = vi_common_sw_init, .sw_init = vi_common_sw_init,

View File

@ -54,7 +54,8 @@
#define AUD3_REGISTER_OFFSET (0x17b4 - 0x17a8) #define AUD3_REGISTER_OFFSET (0x17b4 - 0x17a8)
#define AUD4_REGISTER_OFFSET (0x17b8 - 0x17a8) #define AUD4_REGISTER_OFFSET (0x17b8 - 0x17a8)
#define AUD5_REGISTER_OFFSET (0x17bc - 0x17a8) #define AUD5_REGISTER_OFFSET (0x17bc - 0x17a8)
#define AUD6_REGISTER_OFFSET (0x17c4 - 0x17a8) #define AUD6_REGISTER_OFFSET (0x17c0 - 0x17a8)
#define AUD7_REGISTER_OFFSET (0x17c4 - 0x17a8)
/* hpd instance offsets */ /* hpd instance offsets */
#define HPD0_REGISTER_OFFSET (0x1898 - 0x1898) #define HPD0_REGISTER_OFFSET (0x1898 - 0x1898)
@ -365,7 +366,7 @@
#define VCE_CMD_IB 0x00000002 #define VCE_CMD_IB 0x00000002
#define VCE_CMD_FENCE 0x00000003 #define VCE_CMD_FENCE 0x00000003
#define VCE_CMD_TRAP 0x00000004 #define VCE_CMD_TRAP 0x00000004
#define VCE_CMD_IB_AUTO 0x00000005 #define VCE_CMD_IB_AUTO 0x00000005
#define VCE_CMD_SEMAPHORE 0x00000006 #define VCE_CMD_SEMAPHORE 0x00000006
#endif #endif

View File

@ -143,6 +143,8 @@ enum amd_pm_state_type {
}; };
struct amd_ip_funcs { struct amd_ip_funcs {
/* Name of IP block */
char *name;
/* sets up early driver state (pre sw_init), does not configure hw - Optional */ /* sets up early driver state (pre sw_init), does not configure hw - Optional */
int (*early_init)(void *handle); int (*early_init)(void *handle);
/* sets up late driver/hw state (post hw_init) - Optional */ /* sets up late driver/hw state (post hw_init) - Optional */

View File

@ -68,15 +68,18 @@ static int pp_sw_init(void *handle)
return -EINVAL; return -EINVAL;
ret = hwmgr->pptable_func->pptable_init(hwmgr); ret = hwmgr->pptable_func->pptable_init(hwmgr);
if (ret == 0)
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
if (ret) if (ret)
printk(KERN_ERR "amdgpu: powerplay initialization failed\n"); goto err;
else
printk(KERN_INFO "amdgpu: powerplay initialized\n");
ret = hwmgr->hwmgr_func->backend_init(hwmgr);
if (ret)
goto err;
pr_info("amdgpu: powerplay initialized\n");
return 0;
err:
pr_err("amdgpu: powerplay initialization failed\n");
return ret; return ret;
} }
@ -340,6 +343,7 @@ static int pp_resume(void *handle)
} }
const struct amd_ip_funcs pp_ip_funcs = { const struct amd_ip_funcs pp_ip_funcs = {
.name = "powerplay",
.early_init = pp_early_init, .early_init = pp_early_init,
.late_init = NULL, .late_init = NULL,
.sw_init = pp_sw_init, .sw_init = pp_sw_init,

View File

@ -465,14 +465,14 @@ static int fiji_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
table_info->vdd_dep_on_mclk; table_info->vdd_dep_on_mclk;
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
"VDD dependency on SCLK table is missing. \ "VDD dependency on SCLK table is missing. \
This table is mandatory", return -EINVAL); This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
"VDD dependency on SCLK table has to have is missing. \ "VDD dependency on SCLK table has to have is missing. \
This table is mandatory", return -EINVAL); This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
"VDD dependency on MCLK table is missing. \ "VDD dependency on MCLK table is missing. \
This table is mandatory", return -EINVAL); This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
"VDD dependency on MCLK table has to have is missing. \ "VDD dependency on MCLK table has to have is missing. \
@ -1898,16 +1898,16 @@ static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
return 0; return 0;
} }
static uint8_t fiji_get_sleep_divider_id_from_clock(struct pp_hwmgr *hwmgr, static uint8_t fiji_get_sleep_divider_id_from_clock(uint32_t clock,
uint32_t clock, uint32_t clock_insr) uint32_t clock_insr)
{ {
uint8_t i; uint8_t i;
uint32_t temp; uint32_t temp;
uint32_t min = clock_insr > 2500 ? clock_insr : 2500; uint32_t min = max(clock_insr, (uint32_t)FIJI_MINIMUM_ENGINE_CLOCK);
PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0); PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
for (i = FIJI_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { for (i = FIJI_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
temp = clock / (1UL << i); temp = clock >> i;
if (temp >= min || i == 0) if (temp >= min || i == 0)
break; break;
@ -1961,7 +1961,7 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr; data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
level->DeepSleepDivId = fiji_get_sleep_divider_id_from_clock(hwmgr, clock, level->DeepSleepDivId = fiji_get_sleep_divider_id_from_clock(clock,
hwmgr->display_config.min_core_set_clock_in_sr); hwmgr->display_config.min_core_set_clock_in_sr);

View File

@ -1175,11 +1175,11 @@ static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) if (phm_cap_enabled(hwmgr->platformDescriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep))
level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR); level->DeepSleepDivId = PhwFiji_GetSleepDividerIdFromClock(hwmgr, clock, minClocks.engineClockInSR);
*/ */
PP_ASSERT_WITH_CODE((clock >= 2500), "Engine clock can't satisfy stutter requirement!", return 0); PP_ASSERT_WITH_CODE((clock >= POLARIS10_MINIMUM_ENGINE_CLOCK), "Engine clock can't satisfy stutter requirement!", return 0);
for (i = POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) { for (i = POLARIS10_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
temp = clock / (1UL << i); temp = clock >> i;
if (temp >= 2500 || i == 0) if (temp >= POLARIS10_MINIMUM_ENGINE_CLOCK || i == 0)
break; break;
} }
@ -2900,14 +2900,14 @@ static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
table_info->vdd_dep_on_mclk; table_info->vdd_dep_on_mclk;
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
"VDD dependency on SCLK table is missing. \ "VDD dependency on SCLK table is missing. \
This table is mandatory", return -EINVAL); This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
"VDD dependency on SCLK table has to have is missing. \ "VDD dependency on SCLK table has to have is missing. \
This table is mandatory", return -EINVAL); This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
"VDD dependency on MCLK table is missing. \ "VDD dependency on MCLK table is missing. \
This table is mandatory", return -EINVAL); This table is mandatory", return -EINVAL);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
"VDD dependency on MCLK table has to have is missing. \ "VDD dependency on MCLK table has to have is missing. \
@ -4628,7 +4628,7 @@ int polaris10_upload_mc_firmware(struct pp_hwmgr *hwmgr)
data->need_long_memory_training = true; data->need_long_memory_training = true;
/* /*
* PPMCME_FirmwareDescriptorEntry *pfd = NULL; * PPMCME_FirmwareDescriptorEntry *pfd = NULL;
pfd = &tonga_mcmeFirmware; pfd = &tonga_mcmeFirmware;
if (0 == PHM_READ_FIELD(hwmgr->device, MC_SEQ_SUP_CNTL, RUN)) if (0 == PHM_READ_FIELD(hwmgr->device, MC_SEQ_SUP_CNTL, RUN))
polaris10_load_mc_microcode(hwmgr, pfd->dpmThreshold, polaris10_load_mc_microcode(hwmgr, pfd->dpmThreshold,

View File

@ -1,3 +1,26 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/errno.h> #include <linux/errno.h>
#include "linux/delay.h" #include "linux/delay.h"
#include "hwmgr.h" #include "hwmgr.h"

View File

@ -1041,10 +1041,10 @@ int atomctrl_calculate_voltage_evv_on_sclk(
} }
/** atomctrl_get_voltage_evv_on_sclk gets voltage via call to ATOM COMMAND table. /** atomctrl_get_voltage_evv_on_sclk gets voltage via call to ATOM COMMAND table.
* @param hwmgr input: pointer to hwManager * @param hwmgr input: pointer to hwManager
* @param voltage_type input: type of EVV voltage VDDC or VDDGFX * @param voltage_type input: type of EVV voltage VDDC or VDDGFX
* @param sclk input: in 10Khz unit. DPM state SCLK frequency * @param sclk input: in 10Khz unit. DPM state SCLK frequency
* which is define in PPTable SCLK/VDDC dependence * which is define in PPTable SCLK/VDDC dependence
* table associated with this virtual_voltage_Id * table associated with this virtual_voltage_Id
* @param virtual_voltage_Id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08 * @param virtual_voltage_Id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
* @param voltage output: real voltage level in unit of mv * @param voltage output: real voltage level in unit of mv

View File

@ -2416,18 +2416,17 @@ int tonga_calculate_sclk_params(struct pp_hwmgr *hwmgr,
return 0; return 0;
} }
static uint8_t tonga_get_sleep_divider_id_from_clock(struct pp_hwmgr *hwmgr, static uint8_t tonga_get_sleep_divider_id_from_clock(uint32_t engine_clock,
uint32_t engine_clock, uint32_t min_engine_clock_in_sr) uint32_t min_engine_clock_in_sr)
{ {
uint32_t i, temp; uint32_t i, temp;
uint32_t min = (min_engine_clock_in_sr > TONGA_MINIMUM_ENGINE_CLOCK) ? uint32_t min = max(min_engine_clock_in_sr, (uint32_t)TONGA_MINIMUM_ENGINE_CLOCK);
min_engine_clock_in_sr : TONGA_MINIMUM_ENGINE_CLOCK;
PP_ASSERT_WITH_CODE((engine_clock >= min), PP_ASSERT_WITH_CODE((engine_clock >= min),
"Engine clock can't satisfy stutter requirement!", return 0); "Engine clock can't satisfy stutter requirement!", return 0);
for (i = TONGA_MAX_DEEPSLEEP_DIVIDER_ID;; i--) { for (i = TONGA_MAX_DEEPSLEEP_DIVIDER_ID;; i--) {
temp = engine_clock / (1 << i); temp = engine_clock >> i;
if(temp >= min || i == 0) if(temp >= min || i == 0)
break; break;
@ -2487,7 +2486,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, uint32_t
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) PHM_PlatformCaps_SclkDeepSleep))
graphic_level->DeepSleepDivId = graphic_level->DeepSleepDivId =
tonga_get_sleep_divider_id_from_clock(hwmgr, engine_clock, tonga_get_sleep_divider_id_from_clock(engine_clock,
data->display_timing.min_clock_insr); data->display_timing.min_clock_insr);
/* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
@ -2683,7 +2682,7 @@ static int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
struct TONGA_DLL_SPEED_SETTING { struct TONGA_DLL_SPEED_SETTING {
uint16_t Min; /* Minimum Data Rate*/ uint16_t Min; /* Minimum Data Rate*/
uint16_t Max; /* Maximum Data Rate*/ uint16_t Max; /* Maximum Data Rate*/
uint32_t dll_speed; /* The desired DLL_SPEED setting*/ uint32_t dll_speed; /* The desired DLL_SPEED setting*/
}; };
static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
@ -3316,14 +3315,14 @@ static int tonga_set_private_var_based_on_pptale(struct pp_hwmgr *hwmgr)
pptable_info->vdd_dep_on_mclk; pptable_info->vdd_dep_on_mclk;
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL, PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
"VDD dependency on SCLK table is missing. \ "VDD dependency on SCLK table is missing. \
This table is mandatory", return -1); This table is mandatory", return -1);
PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
"VDD dependency on SCLK table has to have is missing. \ "VDD dependency on SCLK table has to have is missing. \
This table is mandatory", return -1); This table is mandatory", return -1);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL, PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
"VDD dependency on MCLK table is missing. \ "VDD dependency on MCLK table is missing. \
This table is mandatory", return -1); This table is mandatory", return -1);
PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
"VDD dependency on MCLK table has to have is missing. \ "VDD dependency on MCLK table has to have is missing. \

View File

@ -74,7 +74,7 @@ struct tonga_power_state {
}; };
struct _phw_tonga_dpm_level { struct _phw_tonga_dpm_level {
bool enabled; bool enabled;
uint32_t value; uint32_t value;
uint32_t param1; uint32_t param1;
}; };
@ -237,20 +237,20 @@ struct tonga_hwmgr {
irq_handler_func_t ctf_callback; irq_handler_func_t ctf_callback;
void *ctf_context; void *ctf_context;
phw_tonga_clock_registers clock_registers; phw_tonga_clock_registers clock_registers;
phw_tonga_voltage_smio_registers voltage_smio_registers; phw_tonga_voltage_smio_registers voltage_smio_registers;
bool is_memory_GDDR5; bool is_memory_GDDR5;
uint16_t acpi_vddc; uint16_t acpi_vddc;
bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */ bool pspp_notify_required; /* Flag to indicate if PSPP notification to SBIOS is required */
uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */ uint16_t force_pcie_gen; /* The forced PCI-E speed if not 0xffff */
uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */ uint16_t acpi_pcie_gen; /* The PCI-E speed at ACPI time */
uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */ uint32_t pcie_gen_cap; /* The PCI-E speed capabilities bitmap from CAIL */
uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */ uint32_t pcie_lane_cap; /* The PCI-E lane capabilities bitmap from CAIL */
uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */ uint32_t pcie_spc_cap; /* Symbol Per Clock Capabilities from registry */
phw_tonga_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/ phw_tonga_leakage_voltage vddc_leakage; /* The Leakage VDDC supported (based on leakage ID).*/
phw_tonga_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */ phw_tonga_leakage_voltage vddcgfx_leakage; /* The Leakage VDDC supported (based on leakage ID). */
phw_tonga_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */ phw_tonga_leakage_voltage vddci_leakage; /* The Leakage VDDCI supported (based on leakage ID). */
uint32_t mvdd_control; uint32_t mvdd_control;
uint32_t vddc_mask_low; uint32_t vddc_mask_low;
@ -263,8 +263,8 @@ struct tonga_hwmgr {
uint32_t mclk_stutter_mode_threshold; uint32_t mclk_stutter_mode_threshold;
uint32_t mclk_edc_enable_threshold; uint32_t mclk_edc_enable_threshold;
uint32_t mclk_edc_wr_enable_threshold; uint32_t mclk_edc_wr_enable_threshold;
bool is_uvd_enabled; bool is_uvd_enabled;
bool is_xdma_enabled; bool is_xdma_enabled;
phw_tonga_vbios_boot_state vbios_boot_state; phw_tonga_vbios_boot_state vbios_boot_state;
bool battery_state; bool battery_state;

View File

@ -500,7 +500,7 @@ struct phm_dynamic_state_info {
struct phm_ppm_table *ppm_parameter_table; struct phm_ppm_table *ppm_parameter_table;
struct phm_cac_tdp_table *cac_dtp_table; struct phm_cac_tdp_table *cac_dtp_table;
struct phm_clock_voltage_dependency_table *vdd_gfx_dependency_on_sclk; struct phm_clock_voltage_dependency_table *vdd_gfx_dependency_on_sclk;
struct phm_vq_budgeting_table *vq_budgeting_table; struct phm_vq_budgeting_table *vq_budgeting_table;
}; };
struct pp_fan_info { struct pp_fan_info {

View File

@ -1,3 +1,25 @@
/*
* Copyright 2016 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
// CZ Ucode Loading Definitions // CZ Ucode Loading Definitions
#ifndef SMU_UCODE_XFER_CZ_H #ifndef SMU_UCODE_XFER_CZ_H
#define SMU_UCODE_XFER_CZ_H #define SMU_UCODE_XFER_CZ_H

View File

@ -74,7 +74,7 @@ struct amd_sched_fence {
struct amd_gpu_scheduler *sched; struct amd_gpu_scheduler *sched;
spinlock_t lock; spinlock_t lock;
void *owner; void *owner;
struct amd_sched_job *s_job; struct amd_sched_job *s_job;
}; };
struct amd_sched_job { struct amd_sched_job {