Merge branch 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux into drm-next

- Expose thermal thresholds through hwmon properly
- Rework HDP flushing for rings and CPU
- Improved dual-link DVI handling in DC
- Lots of code clean up
- Additional DC clean up
- Allow scanout from system memory on CZ/BR/ST
- Improved PASID/VM integration
- Expose GPU voltage and power via hwmon
- Initial wattman-like support
- Initial power profiles for use-case optimized performance
- Rework GPUVM TLB flushing
- Rework IP offset handling for SOC15 asics
- Add CRC support in DC
- Fixes for mmhub powergating
- Initial regamma/degamma/CTM support in DC
- ttm cleanups and simplifications
- ttm OOM avoidance fixes

* 'drm-next-4.17' of git://people.freedesktop.org/~agd5f/linux: (348 commits)
  Revert "drm/radeon/pm: autoswitch power state when in balanced mode"
  drm/radeon: use drm_gem_private_object_init
  drm/amdgpu: use drm_gem_private_object_init
  drm/amdgpu: mitigate workaround for i915
  drm/amdgpu: implement amdgpu_gem_map_(attach/detach)
  drm/amdgpu/powerplay/smu7: drop refresh rate checks for mclk switching
  drm/amdgpu/cgs: add refresh rate checking to non-DC display code
  drm/amd/powerplay/smu7: allow mclk switching with no displays
  drm/amd/powerplay/vega10: allow mclk switching with no displays
  drm/amd/powerplay: use PP_CAP macro for disable_mclk_switching_for_frame_lock
  drm/amd/powerplay: remove unused headers
  drm/amdgpu_gem: fix error handling path in amdgpu_gem_va_update_vm
  drm/amdgpu: update the PASID mapping only on demand
  drm/amdgpu: separate PASID mapping from VM flush v2
  drm/amd/display: Fix increment when sampling OTF in DCE
  drm/amd/display: De PQ implementation
  drm/amd/display: Remove unused dm_pp_ interfaces
  drm/amd/display: Add logging for aux DPCD access
  drm/amd/display: Set vsc pack revision when DPCD revision is >= 1.2
  drm/amd/display: provide an interface to query firmware version
  ...
This commit is contained in:
Dave Airlie 2018-02-28 11:44:29 +10:00
commit 8bb5b22255
279 changed files with 7955 additions and 14370 deletions

View File

@ -63,7 +63,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
amdgpu-y += \ amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o
# add GMC block # add GMC block
amdgpu-y += \ amdgpu-y += \

View File

@ -68,6 +68,7 @@
#include "amdgpu_vce.h" #include "amdgpu_vce.h"
#include "amdgpu_vcn.h" #include "amdgpu_vcn.h"
#include "amdgpu_mn.h" #include "amdgpu_mn.h"
#include "amdgpu_gmc.h"
#include "amdgpu_dm.h" #include "amdgpu_dm.h"
#include "amdgpu_virt.h" #include "amdgpu_virt.h"
#include "amdgpu_gart.h" #include "amdgpu_gart.h"
@ -127,6 +128,7 @@ extern int amdgpu_job_hang_limit;
extern int amdgpu_lbpw; extern int amdgpu_lbpw;
extern int amdgpu_compute_multipipe; extern int amdgpu_compute_multipipe;
extern int amdgpu_gpu_recovery; extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
#ifdef CONFIG_DRM_AMDGPU_SI #ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support; extern int amdgpu_si_support;
@ -318,13 +320,6 @@ struct amdgpu_vm_pte_funcs {
void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count, uint64_t value, unsigned count,
uint32_t incr); uint32_t incr);
/* maximum nums of PTEs/PDEs in a single operation */
uint32_t set_max_nums_pte_pde;
/* number of dw to reserve per operation */
unsigned set_pte_pde_num_dw;
/* for linear pte/pde updates without addr mapping */ /* for linear pte/pde updates without addr mapping */
void (*set_pte_pde)(struct amdgpu_ib *ib, void (*set_pte_pde)(struct amdgpu_ib *ib,
uint64_t pe, uint64_t pe,
@ -332,28 +327,6 @@ struct amdgpu_vm_pte_funcs {
uint32_t incr, uint64_t flags); uint32_t incr, uint64_t flags);
}; };
/* provided by the gmc block */
struct amdgpu_gart_funcs {
/* flush the vm tlb via mmio */
void (*flush_gpu_tlb)(struct amdgpu_device *adev,
uint32_t vmid);
/* write pte/pde updates using the cpu */
int (*set_pte_pde)(struct amdgpu_device *adev,
void *cpu_pt_addr, /* cpu addr of page table */
uint32_t gpu_page_idx, /* pte/pde to update */
uint64_t addr, /* addr to write into pte/pde */
uint64_t flags); /* access flags */
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
/* set pte flags based per asic */
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
uint32_t (*get_invalidate_req)(unsigned int vmid);
};
/* provided by the ih block */ /* provided by the ih block */
struct amdgpu_ih_funcs { struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */ /* ring read/write ptr handling, called from interrupt context */
@ -418,8 +391,8 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj, struct drm_gem_object *gobj,
int flags); int flags);
int amdgpu_gem_prime_pin(struct drm_gem_object *obj); struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
void amdgpu_gem_prime_unpin(struct drm_gem_object *obj); struct dma_buf *dma_buf);
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
@ -493,56 +466,6 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
int amdgpu_fence_slab_init(void); int amdgpu_fence_slab_init(void);
void amdgpu_fence_slab_fini(void); void amdgpu_fence_slab_fini(void);
/*
* VMHUB structures, functions & helpers
*/
struct amdgpu_vmhub {
uint32_t ctx0_ptb_addr_lo32;
uint32_t ctx0_ptb_addr_hi32;
uint32_t vm_inv_eng0_req;
uint32_t vm_inv_eng0_ack;
uint32_t vm_context0_cntl;
uint32_t vm_l2_pro_fault_status;
uint32_t vm_l2_pro_fault_cntl;
};
/*
* GPU MC structures, functions & helpers
*/
struct amdgpu_mc {
resource_size_t aper_size;
resource_size_t aper_base;
resource_size_t agp_base;
/* for some chips with <= 32MB we need to lie
* about vram size near mc fb location */
u64 mc_vram_size;
u64 visible_vram_size;
u64 gart_size;
u64 gart_start;
u64 gart_end;
u64 vram_start;
u64 vram_end;
unsigned vram_width;
u64 real_vram_size;
int vram_mtrr;
u64 mc_mask;
const struct firmware *fw; /* MC firmware */
uint32_t fw_version;
struct amdgpu_irq_src vm_fault;
uint32_t vram_type;
uint32_t srbm_soft_reset;
bool prt_warning;
uint64_t stolen_size;
/* apertures */
u64 shared_aperture_start;
u64 shared_aperture_end;
u64 private_aperture_start;
u64 private_aperture_end;
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
bool translate_further;
};
/* /*
* GPU doorbell structures, functions & helpers * GPU doorbell structures, functions & helpers
*/ */
@ -1125,8 +1048,9 @@ struct amdgpu_job {
void *owner; void *owner;
uint64_t fence_ctx; /* the fence_context this job uses */ uint64_t fence_ctx; /* the fence_context this job uses */
bool vm_needs_flush; bool vm_needs_flush;
unsigned vmid;
uint64_t vm_pd_addr; uint64_t vm_pd_addr;
unsigned vmid;
unsigned pasid;
uint32_t gds_base, gds_size; uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size; uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size; uint32_t oa_base, oa_size;
@ -1288,6 +1212,11 @@ struct amdgpu_asic_funcs {
void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
/* get config memsize register */ /* get config memsize register */
u32 (*get_config_memsize)(struct amdgpu_device *adev); u32 (*get_config_memsize)(struct amdgpu_device *adev);
/* flush hdp write queue */
void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
/* invalidate hdp read cache */
void (*invalidate_hdp)(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
}; };
/* /*
@ -1431,7 +1360,7 @@ struct amdgpu_nbio_funcs {
u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
u32 (*get_rev_id)(struct amdgpu_device *adev); u32 (*get_rev_id)(struct amdgpu_device *adev);
void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
void (*hdp_flush)(struct amdgpu_device *adev); void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
u32 (*get_memsize)(struct amdgpu_device *adev); u32 (*get_memsize)(struct amdgpu_device *adev);
void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
bool use_doorbell, int doorbell_index); bool use_doorbell, int doorbell_index);
@ -1574,7 +1503,7 @@ struct amdgpu_device {
struct amdgpu_clock clock; struct amdgpu_clock clock;
/* MC */ /* MC */
struct amdgpu_mc mc; struct amdgpu_gmc gmc;
struct amdgpu_gart gart; struct amdgpu_gart gart;
struct amdgpu_dummy_page dummy_page; struct amdgpu_dummy_page dummy_page;
struct amdgpu_vm_manager vm_manager; struct amdgpu_vm_manager vm_manager;
@ -1726,6 +1655,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
int emu_soc_asic_init(struct amdgpu_device *adev);
/* /*
* Registers read & write functions. * Registers read & write functions.
*/ */
@ -1838,13 +1769,17 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
#define amdgpu_gart_get_vm_pde(adev, level, dst, flags) (adev)->gart.gart_funcs->get_vm_pde((adev), (level), (dst), (flags)) #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
@ -1857,11 +1792,11 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d)) #define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b)) #define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
@ -1871,7 +1806,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
@ -1894,16 +1828,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job* job, bool force); struct amdgpu_job* job, bool force);
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev);
void amdgpu_update_display_priority(struct amdgpu_device *adev); void amdgpu_display_update_priority(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes); u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_device_vram_location(struct amdgpu_device *adev, void amdgpu_device_vram_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc, u64 base); struct amdgpu_gmc *mc, u64 base);
void amdgpu_device_gart_location(struct amdgpu_device *adev, void amdgpu_device_gart_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc); struct amdgpu_gmc *mc);
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
int amdgpu_ttm_init(struct amdgpu_device *adev); int amdgpu_ttm_init(struct amdgpu_device *adev);

View File

@ -216,8 +216,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
return -ENOMEM; return -ENOMEM;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT, r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, 0, AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
&(*mem)->bo);
if (r) { if (r) {
dev_err(adev->dev, dev_err(adev->dev,
"failed to allocate BO for amdkfd (%d)\n", r); "failed to allocate BO for amdkfd (%d)\n", r);
@ -281,24 +280,29 @@ void get_local_mem_info(struct kgd_dev *kgd,
struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask : uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
~((1ULL << 32) - 1); ~((1ULL << 32) - 1);
resource_size_t aper_limit = adev->mc.aper_base + adev->mc.aper_size; resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
memset(mem_info, 0, sizeof(*mem_info)); memset(mem_info, 0, sizeof(*mem_info));
if (!(adev->mc.aper_base & address_mask || aper_limit & address_mask)) { if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
mem_info->local_mem_size_public = adev->mc.visible_vram_size; mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
mem_info->local_mem_size_private = adev->mc.real_vram_size - mem_info->local_mem_size_private = adev->gmc.real_vram_size -
adev->mc.visible_vram_size; adev->gmc.visible_vram_size;
} else { } else {
mem_info->local_mem_size_public = 0; mem_info->local_mem_size_public = 0;
mem_info->local_mem_size_private = adev->mc.real_vram_size; mem_info->local_mem_size_private = adev->gmc.real_vram_size;
} }
mem_info->vram_width = adev->mc.vram_width; mem_info->vram_width = adev->gmc.vram_width;
pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n", pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
&adev->mc.aper_base, &aper_limit, &adev->gmc.aper_base, &aper_limit,
mem_info->local_mem_size_public, mem_info->local_mem_size_public,
mem_info->local_mem_size_private); mem_info->local_mem_size_private);
if (amdgpu_emu_mode == 1) {
mem_info->mem_clk_max = 100;
return;
}
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
mem_info->mem_clk_max = adev->clock.default_mclk / 100; mem_info->mem_clk_max = adev->clock.default_mclk / 100;
else else
@ -319,6 +323,9 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
/* the sclk is in quantas of 10kHz */ /* the sclk is in quantas of 10kHz */
if (amdgpu_emu_mode == 1)
return 100;
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return adev->clock.default_sclk / 100; return adev->clock.default_sclk / 100;

View File

@ -568,6 +568,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
/* HG _PR3 doesn't seem to work on this A+A weston board */ /* HG _PR3 doesn't seem to work on this A+A weston board */
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0 },
}; };

View File

@ -81,7 +81,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
n = AMDGPU_BENCHMARK_ITERATIONS; n = AMDGPU_BENCHMARK_ITERATIONS;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
NULL, 0, &sobj); NULL, &sobj);
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }
@ -94,7 +94,7 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
goto out_cleanup; goto out_cleanup;
} }
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
NULL, 0, &dobj); NULL, &dobj);
if (r) { if (r) {
goto out_cleanup; goto out_cleanup;
} }

View File

@ -233,8 +233,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
for (i = 0; i < list->num_entries; i++) { for (i = 0; i < list->num_entries; i++) {
unsigned priority = list->array[i].priority; unsigned priority = list->array[i].priority;
list_add_tail(&list->array[i].tv.head, if (!list->array[i].robj->parent)
&bucket[priority]); list_add_tail(&list->array[i].tv.head,
&bucket[priority]);
list->array[i].user_pages = NULL; list->array[i].user_pages = NULL;
} }

View File

@ -109,7 +109,7 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
*handle = 0; *handle = 0;
ret = amdgpu_bo_create(adev, size, align, true, domain, flags, ret = amdgpu_bo_create(adev, size, align, true, domain, flags,
NULL, NULL, 0, &obj); NULL, NULL, &obj);
if (ret) { if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret); DRM_ERROR("(%d) bo create failed\n", ret);
return ret; return ret;
@ -953,6 +953,11 @@ static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
(amdgpu_crtc->v_border * 2); (amdgpu_crtc->v_border * 2);
mode_info->vblank_time_us = vblank_lines * line_time_us; mode_info->vblank_time_us = vblank_lines * line_time_us;
mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
/* we have issues with mclk switching with refresh rates
* over 120 hz on the non-DC code.
*/
if (mode_info->refresh_rate > 120)
mode_info->vblank_time_us = 0;
mode_info = NULL; mode_info = NULL;
} }
} }
@ -1187,6 +1192,18 @@ static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
return amdgpu_cgs_acpi_eval_object(cgs_device, &info); return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
} }
static int amdgpu_cgs_set_temperature_range(struct cgs_device *cgs_device,
int min_temperature,
int max_temperature)
{
CGS_FUNC_ADEV;
adev->pm.dpm.thermal.min_temp = min_temperature;
adev->pm.dpm.thermal.max_temp = max_temperature;
return 0;
}
static const struct cgs_ops amdgpu_cgs_ops = { static const struct cgs_ops amdgpu_cgs_ops = {
.alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem, .alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
.free_gpu_mem = amdgpu_cgs_free_gpu_mem, .free_gpu_mem = amdgpu_cgs_free_gpu_mem,
@ -1214,6 +1231,7 @@ static const struct cgs_ops amdgpu_cgs_ops = {
.enter_safe_mode = amdgpu_cgs_enter_safe_mode, .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
.lock_grbm_idx = amdgpu_cgs_lock_grbm_idx, .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx,
.register_pp_handle = amdgpu_cgs_register_pp_handle, .register_pp_handle = amdgpu_cgs_register_pp_handle,
.set_temperature_range = amdgpu_cgs_set_temperature_range,
}; };
static const struct cgs_os_ops amdgpu_cgs_os_ops = { static const struct cgs_os_ops amdgpu_cgs_os_ops = {

View File

@ -877,7 +877,7 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_disconnected; ret = connector_status_disconnected;
if (amdgpu_connector->ddc_bus) if (amdgpu_connector->ddc_bus)
dret = amdgpu_ddc_probe(amdgpu_connector, false); dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
if (dret) { if (dret) {
amdgpu_connector->detected_by_load = false; amdgpu_connector->detected_by_load = false;
amdgpu_connector_free_edid(connector); amdgpu_connector_free_edid(connector);
@ -998,7 +998,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
} }
if (amdgpu_connector->ddc_bus) if (amdgpu_connector->ddc_bus)
dret = amdgpu_ddc_probe(amdgpu_connector, false); dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
if (dret) { if (dret) {
amdgpu_connector->detected_by_load = false; amdgpu_connector->detected_by_load = false;
amdgpu_connector_free_edid(connector); amdgpu_connector_free_edid(connector);
@ -1401,7 +1401,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
/* setup ddc on the bridge */ /* setup ddc on the bridge */
amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder); amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder);
/* bridge chips are always aux */ /* bridge chips are always aux */
if (amdgpu_ddc_probe(amdgpu_connector, true)) /* try DDC */ /* try DDC */
if (amdgpu_display_ddc_probe(amdgpu_connector, true))
ret = connector_status_connected; ret = connector_status_connected;
else if (amdgpu_connector->dac_load_detect) { /* try load detection */ else if (amdgpu_connector->dac_load_detect) { /* try load detection */
const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@ -1421,7 +1422,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected; ret = connector_status_connected;
} else { } else {
/* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */ /* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
if (amdgpu_ddc_probe(amdgpu_connector, false)) if (amdgpu_display_ddc_probe(amdgpu_connector,
false))
ret = connector_status_connected; ret = connector_status_connected;
} }
} }

View File

@ -257,7 +257,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
return; return;
} }
total_vram = adev->mc.real_vram_size - adev->vram_pin_size; total_vram = adev->gmc.real_vram_size - adev->vram_pin_size;
used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
@ -302,8 +302,8 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
*max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
/* Do the same for visible VRAM if half of it is free */ /* Do the same for visible VRAM if half of it is free */
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size) {
u64 total_vis_vram = adev->mc.visible_vram_size; u64 total_vis_vram = adev->gmc.visible_vram_size;
u64 used_vis_vram = u64 used_vis_vram =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
@ -359,7 +359,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
* to move it. Don't move anything if the threshold is zero. * to move it. Don't move anything if the threshold is zero.
*/ */
if (p->bytes_moved < p->bytes_moved_threshold) { if (p->bytes_moved < p->bytes_moved_threshold) {
if (adev->mc.visible_vram_size < adev->mc.real_vram_size && if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
(bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
/* And don't move a CPU_ACCESS_REQUIRED BO to limited /* And don't move a CPU_ACCESS_REQUIRED BO to limited
* visible VRAM if we've depleted our allowance to do * visible VRAM if we've depleted our allowance to do
@ -381,9 +381,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
p->bytes_moved += ctx.bytes_moved; p->bytes_moved += ctx.bytes_moved;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size && if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM && bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
p->bytes_moved_vis += ctx.bytes_moved; p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
@ -437,9 +437,9 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
/* Good we can try to move this BO somewhere else */ /* Good we can try to move this BO somewhere else */
amdgpu_ttm_placement_from_domain(bo, other); amdgpu_ttm_placement_from_domain(bo, other);
update_bytes_moved_vis = update_bytes_moved_vis =
adev->mc.visible_vram_size < adev->mc.real_vram_size && adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM && bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT; bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT;
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
bytes_moved = atomic64_read(&adev->num_bytes_moved) - bytes_moved = atomic64_read(&adev->num_bytes_moved) -
@ -542,7 +542,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&duplicates); INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
if (p->uf_entry.robj) if (p->uf_entry.robj && !p->uf_entry.robj->parent)
list_add(&p->uf_entry.tv.head, &p->validated); list_add(&p->uf_entry.tv.head, &p->validated);
while (1) { while (1) {

View File

@ -544,7 +544,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
* as parameter. * as parameter.
*/ */
void amdgpu_device_vram_location(struct amdgpu_device *adev, void amdgpu_device_vram_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc, u64 base) struct amdgpu_gmc *mc, u64 base)
{ {
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20; uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
@ -570,11 +570,11 @@ void amdgpu_device_vram_location(struct amdgpu_device *adev,
* FIXME: when reducing GTT size align new size on power of 2. * FIXME: when reducing GTT size align new size on power of 2.
*/ */
void amdgpu_device_gart_location(struct amdgpu_device *adev, void amdgpu_device_gart_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc) struct amdgpu_gmc *mc)
{ {
u64 size_af, size_bf; u64 size_af, size_bf;
size_af = adev->mc.mc_mask - mc->vram_end; size_af = adev->gmc.mc_mask - mc->vram_end;
size_bf = mc->vram_start; size_bf = mc->vram_start;
if (size_bf > size_af) { if (size_bf > size_af) {
if (mc->gart_size > size_bf) { if (mc->gart_size > size_bf) {
@ -608,7 +608,7 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,
*/ */
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
{ {
u64 space_needed = roundup_pow_of_two(adev->mc.real_vram_size); u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size);
u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1; u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1;
struct pci_bus *root; struct pci_bus *root;
struct resource *res; struct resource *res;
@ -1036,7 +1036,7 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
if (!ip_block_version) if (!ip_block_version)
return -EINVAL; return -EINVAL;
DRM_DEBUG("add ip block number %d <%s>\n", adev->num_ip_blocks, DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
ip_block_version->funcs->name); ip_block_version->funcs->name);
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
@ -1310,6 +1310,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
return r; return r;
} }
adev->ip_blocks[i].status.sw = true; adev->ip_blocks[i].status.sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */ /* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_device_vram_scratch_init(adev); r = amdgpu_device_vram_scratch_init(adev);
@ -1343,8 +1344,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.sw) if (!adev->ip_blocks[i].status.sw)
continue; continue;
/* gmc hw init is done early */ if (adev->ip_blocks[i].status.hw)
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
continue; continue;
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
if (r) { if (r) {
@ -1378,6 +1378,9 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)
{ {
int i = 0, r; int i = 0, r;
if (amdgpu_emu_mode == 1)
return 0;
for (i = 0; i < adev->num_ip_blocks; i++) { for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid) if (!adev->ip_blocks[i].status.valid)
continue; continue;
@ -1483,6 +1486,9 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false; adev->ip_blocks[i].status.hw = false;
} }
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) { for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.sw) if (!adev->ip_blocks[i].status.sw)
continue; continue;
@ -1701,6 +1707,8 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_BONAIRE: case CHIP_BONAIRE:
case CHIP_HAWAII: case CHIP_HAWAII:
case CHIP_KAVERI: case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_CARRIZO: case CHIP_CARRIZO:
case CHIP_STONEY: case CHIP_STONEY:
case CHIP_POLARIS11: case CHIP_POLARIS11:
@ -1711,9 +1719,6 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA) #if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
return amdgpu_dc != 0; return amdgpu_dc != 0;
#endif #endif
case CHIP_KABINI:
case CHIP_MULLINS:
return amdgpu_dc > 0;
case CHIP_VEGA10: case CHIP_VEGA10:
#if defined(CONFIG_DRM_AMD_DC_DCN1_0) #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN: case CHIP_RAVEN:
@ -1768,14 +1773,16 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->flags = flags; adev->flags = flags;
adev->asic_type = flags & AMD_ASIC_MASK; adev->asic_type = flags & AMD_ASIC_MASK;
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
adev->mc.gart_size = 512 * 1024 * 1024; if (amdgpu_emu_mode == 1)
adev->usec_timeout *= 2;
adev->gmc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false; adev->accel_working = false;
adev->num_rings = 0; adev->num_rings = 0;
adev->mman.buffer_funcs = NULL; adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL; adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL; adev->vm_manager.vm_pte_funcs = NULL;
adev->vm_manager.vm_pte_num_rings = 0; adev->vm_manager.vm_pte_num_rings = 0;
adev->gart.gart_funcs = NULL; adev->gmc.gmc_funcs = NULL;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
@ -1882,6 +1889,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (runtime) if (runtime)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
if (amdgpu_emu_mode == 1) {
/* post the asic on emulation mode */
emu_soc_asic_init(adev);
goto fence_driver_init;
}
/* Read BIOS */ /* Read BIOS */
if (!amdgpu_get_bios(adev)) { if (!amdgpu_get_bios(adev)) {
r = -EINVAL; r = -EINVAL;
@ -1934,6 +1947,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
amdgpu_atombios_i2c_init(adev); amdgpu_atombios_i2c_init(adev);
} }
fence_driver_init:
/* Fence driver */ /* Fence driver */
r = amdgpu_fence_driver_init(adev); r = amdgpu_fence_driver_init(adev);
if (r) { if (r) {
@ -2076,7 +2090,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
/* free i2c buses */ /* free i2c buses */
if (!amdgpu_device_has_dc_support(adev)) if (!amdgpu_device_has_dc_support(adev))
amdgpu_i2c_fini(adev); amdgpu_i2c_fini(adev);
amdgpu_atombios_fini(adev);
if (amdgpu_emu_mode != 1)
amdgpu_atombios_fini(adev);
kfree(adev->bios); kfree(adev->bios);
adev->bios = NULL; adev->bios = NULL;
if (!pci_is_thunderbolt_attached(adev->pdev)) if (!pci_is_thunderbolt_attached(adev->pdev))
@ -2284,14 +2301,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
} }
drm_modeset_unlock_all(dev); drm_modeset_unlock_all(dev);
} else {
/*
* There is no equivalent atomic helper to turn on
* display, so we defined our own function for this,
* once suspend resume is supported by the atomic
* framework this will be reworked
*/
amdgpu_dm_display_resume(adev);
} }
} }
@ -2726,7 +2735,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
if (amdgpu_device_has_dc_support(adev)) { if (amdgpu_device_has_dc_support(adev)) {
if (drm_atomic_helper_resume(adev->ddev, state)) if (drm_atomic_helper_resume(adev->ddev, state))
dev_info(adev->dev, "drm resume failed:%d\n", r); dev_info(adev->dev, "drm resume failed:%d\n", r);
amdgpu_dm_display_resume(adev);
} else { } else {
drm_helper_resume_force_mode(adev->ddev); drm_helper_resume_force_mode(adev->ddev);
} }

View File

@ -29,6 +29,7 @@
#include "amdgpu_i2c.h" #include "amdgpu_i2c.h"
#include "atom.h" #include "atom.h"
#include "amdgpu_connectors.h" #include "amdgpu_connectors.h"
#include "amdgpu_display.h"
#include <asm/div64.h> #include <asm/div64.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
@ -36,7 +37,8 @@
#include <drm/drm_edid.h> #include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb) static void amdgpu_display_flip_callback(struct dma_fence *f,
struct dma_fence_cb *cb)
{ {
struct amdgpu_flip_work *work = struct amdgpu_flip_work *work =
container_of(cb, struct amdgpu_flip_work, cb); container_of(cb, struct amdgpu_flip_work, cb);
@ -45,8 +47,8 @@ static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
schedule_work(&work->flip_work.work); schedule_work(&work->flip_work.work);
} }
static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
struct dma_fence **f) struct dma_fence **f)
{ {
struct dma_fence *fence= *f; struct dma_fence *fence= *f;
@ -55,14 +57,15 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
*f = NULL; *f = NULL;
if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) if (!dma_fence_add_callback(fence, &work->cb,
amdgpu_display_flip_callback))
return true; return true;
dma_fence_put(fence); dma_fence_put(fence);
return false; return false;
} }
static void amdgpu_flip_work_func(struct work_struct *__work) static void amdgpu_display_flip_work_func(struct work_struct *__work)
{ {
struct delayed_work *delayed_work = struct delayed_work *delayed_work =
container_of(__work, struct delayed_work, work); container_of(__work, struct delayed_work, work);
@ -76,20 +79,20 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
unsigned i; unsigned i;
int vpos, hpos; int vpos, hpos;
if (amdgpu_flip_handle_fence(work, &work->excl)) if (amdgpu_display_flip_handle_fence(work, &work->excl))
return; return;
for (i = 0; i < work->shared_count; ++i) for (i = 0; i < work->shared_count; ++i)
if (amdgpu_flip_handle_fence(work, &work->shared[i])) if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
return; return;
/* Wait until we're out of the vertical blank period before the one /* Wait until we're out of the vertical blank period before the one
* targeted by the flip * targeted by the flip
*/ */
if (amdgpu_crtc->enabled && if (amdgpu_crtc->enabled &&
(amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, (amdgpu_display_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
&vpos, &hpos, NULL, NULL, &vpos, &hpos, NULL, NULL,
&crtc->hwmode) &crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank - (int)(work->target_vblank -
@ -117,7 +120,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
/* /*
* Handle unpin events outside the interrupt handler proper. * Handle unpin events outside the interrupt handler proper.
*/ */
static void amdgpu_unpin_work_func(struct work_struct *__work) static void amdgpu_display_unpin_work_func(struct work_struct *__work)
{ {
struct amdgpu_flip_work *work = struct amdgpu_flip_work *work =
container_of(__work, struct amdgpu_flip_work, unpin_work); container_of(__work, struct amdgpu_flip_work, unpin_work);
@ -139,11 +142,11 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
kfree(work); kfree(work);
} }
int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_framebuffer *fb, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, struct drm_pending_vblank_event *event,
uint32_t page_flip_flags, uint32_t target, uint32_t page_flip_flags, uint32_t target,
struct drm_modeset_acquire_ctx *ctx) struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
@ -162,8 +165,8 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
if (work == NULL) if (work == NULL)
return -ENOMEM; return -ENOMEM;
INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func); INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func); INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
work->event = event; work->event = event;
work->adev = adev; work->adev = adev;
@ -189,7 +192,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup; goto cleanup;
} }
r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base); r = amdgpu_bo_pin(new_abo, amdgpu_display_framebuffer_domains(adev), &base);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
DRM_ERROR("failed to pin new abo buffer before flip\n"); DRM_ERROR("failed to pin new abo buffer before flip\n");
goto unreserve; goto unreserve;
@ -228,7 +231,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
/* update crtc fb */ /* update crtc fb */
crtc->primary->fb = fb; crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags); spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
amdgpu_flip_work_func(&work->flip_work.work); amdgpu_display_flip_work_func(&work->flip_work.work);
return 0; return 0;
pflip_cleanup: pflip_cleanup:
@ -254,8 +257,8 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
return r; return r;
} }
int amdgpu_crtc_set_config(struct drm_mode_set *set, int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx) struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_device *dev; struct drm_device *dev;
struct amdgpu_device *adev; struct amdgpu_device *adev;
@ -352,7 +355,7 @@ static const char *hpd_names[6] = {
"HPD6", "HPD6",
}; };
void amdgpu_print_display_setup(struct drm_device *dev) void amdgpu_display_print_display_setup(struct drm_device *dev)
{ {
struct drm_connector *connector; struct drm_connector *connector;
struct amdgpu_connector *amdgpu_connector; struct amdgpu_connector *amdgpu_connector;
@ -429,11 +432,11 @@ void amdgpu_print_display_setup(struct drm_device *dev)
} }
/** /**
* amdgpu_ddc_probe * amdgpu_display_ddc_probe
* *
*/ */
bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
bool use_aux) bool use_aux)
{ {
u8 out = 0x0; u8 out = 0x0;
u8 buf[8]; u8 buf[8];
@ -479,7 +482,7 @@ bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector,
return true; return true;
} }
static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) static void amdgpu_display_user_framebuffer_destroy(struct drm_framebuffer *fb)
{ {
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
@ -488,9 +491,10 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
kfree(amdgpu_fb); kfree(amdgpu_fb);
} }
static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb, static int amdgpu_display_user_framebuffer_create_handle(
struct drm_file *file_priv, struct drm_framebuffer *fb,
unsigned int *handle) struct drm_file *file_priv,
unsigned int *handle)
{ {
struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
@ -498,15 +502,28 @@ static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb,
} }
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.destroy = amdgpu_user_framebuffer_destroy, .destroy = amdgpu_display_user_framebuffer_destroy,
.create_handle = amdgpu_user_framebuffer_create_handle, .create_handle = amdgpu_display_user_framebuffer_create_handle,
}; };
int uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev)
amdgpu_framebuffer_init(struct drm_device *dev, {
struct amdgpu_framebuffer *rfb, uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj) #if defined(CONFIG_DRM_AMD_DC)
if (adev->asic_type >= CHIP_CARRIZO && adev->asic_type < CHIP_RAVEN &&
adev->flags & AMD_IS_APU &&
amdgpu_device_asic_has_dc_support(adev->asic_type))
domain |= AMDGPU_GEM_DOMAIN_GTT;
#endif
return domain;
}
int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{ {
int ret; int ret;
rfb->obj = obj; rfb->obj = obj;
@ -520,9 +537,9 @@ amdgpu_framebuffer_init(struct drm_device *dev,
} }
struct drm_framebuffer * struct drm_framebuffer *
amdgpu_user_framebuffer_create(struct drm_device *dev, amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd) const struct drm_mode_fb_cmd2 *mode_cmd)
{ {
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb; struct amdgpu_framebuffer *amdgpu_fb;
@ -547,7 +564,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj); ret = amdgpu_display_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
if (ret) { if (ret) {
kfree(amdgpu_fb); kfree(amdgpu_fb);
drm_gem_object_put_unlocked(obj); drm_gem_object_put_unlocked(obj);
@ -558,7 +575,7 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
} }
const struct drm_mode_config_funcs amdgpu_mode_funcs = { const struct drm_mode_config_funcs amdgpu_mode_funcs = {
.fb_create = amdgpu_user_framebuffer_create, .fb_create = amdgpu_display_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed, .output_poll_changed = drm_fb_helper_output_poll_changed,
}; };
@ -580,7 +597,7 @@ static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
{ AMDGPU_FMT_DITHER_ENABLE, "on" }, { AMDGPU_FMT_DITHER_ENABLE, "on" },
}; };
int amdgpu_modeset_create_props(struct amdgpu_device *adev) int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
{ {
int sz; int sz;
@ -629,7 +646,7 @@ int amdgpu_modeset_create_props(struct amdgpu_device *adev)
return 0; return 0;
} }
void amdgpu_update_display_priority(struct amdgpu_device *adev) void amdgpu_display_update_priority(struct amdgpu_device *adev)
{ {
/* adjustment options for the display watermarks */ /* adjustment options for the display watermarks */
if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2)) if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
@ -639,7 +656,7 @@ void amdgpu_update_display_priority(struct amdgpu_device *adev)
} }
static bool is_hdtv_mode(const struct drm_display_mode *mode) static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
{ {
/* try and guess if this is a tv or a monitor */ /* try and guess if this is a tv or a monitor */
if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
@ -651,9 +668,9 @@ static bool is_hdtv_mode(const struct drm_display_mode *mode)
return false; return false;
} }
bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode) struct drm_display_mode *adjusted_mode)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder; struct drm_encoder *encoder;
@ -696,7 +713,7 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
((amdgpu_encoder->underscan_type == UNDERSCAN_ON) || ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) && ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
is_hdtv_mode(mode)))) { amdgpu_display_is_hdtv_mode(mode)))) {
if (amdgpu_encoder->underscan_hborder != 0) if (amdgpu_encoder->underscan_hborder != 0)
amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder; amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
else else
@ -764,10 +781,10 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
* unknown small number of scanlines wrt. real scanout position. * unknown small number of scanlines wrt. real scanout position.
* *
*/ */
int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
unsigned int flags, int *vpos, int *hpos, unsigned int pipe, unsigned int flags, int *vpos,
ktime_t *stime, ktime_t *etime, int *hpos, ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode) const struct drm_display_mode *mode)
{ {
u32 vbl = 0, position = 0; u32 vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0; int vbl_start, vbl_end, vtotal, ret = 0;
@ -859,7 +876,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
return ret; return ret;
} }
int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc) int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
{ {
if (crtc < 0 || crtc >= adev->mode_info.num_crtc) if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
return AMDGPU_CRTC_IRQ_NONE; return AMDGPU_CRTC_IRQ_NONE;

View File

@ -23,9 +23,10 @@
#ifndef __AMDGPU_DISPLAY_H__ #ifndef __AMDGPU_DISPLAY_H__
#define __AMDGPU_DISPLAY_H__ #define __AMDGPU_DISPLAY_H__
uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev);
struct drm_framebuffer * struct drm_framebuffer *
amdgpu_user_framebuffer_create(struct drm_device *dev, amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd); const struct drm_mode_fb_cmd2 *mode_cmd);
#endif #endif

View File

@ -265,9 +265,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_read_sensor(adev, idx, value, size) \ #define amdgpu_dpm_read_sensor(adev, idx, value, size) \
((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size))) ((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size)))
#define amdgpu_dpm_get_temperature(adev) \
((adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_fan_control_mode(adev, m) \ #define amdgpu_dpm_set_fan_control_mode(adev, m) \
((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m))) ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
@ -328,8 +325,8 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_set_mclk_od(adev, value) \ #define amdgpu_dpm_set_mclk_od(adev, value) \
((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
#define amdgpu_dpm_dispatch_task(adev, task_id, input, output) \ #define amdgpu_dpm_dispatch_task(adev, task_id, user_state) \
((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (input), (output)) ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (user_state))
#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ #define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
@ -366,6 +363,22 @@ enum amdgpu_pcie_gen {
(adev)->powerplay.pp_handle, virtual_addr_low, \ (adev)->powerplay.pp_handle, virtual_addr_low, \
virtual_addr_hi, mc_addr_low, mc_addr_hi, size) virtual_addr_hi, mc_addr_low, mc_addr_hi, size)
#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
((adev)->powerplay.pp_funcs->get_power_profile_mode(\
(adev)->powerplay.pp_handle, buf))
#define amdgpu_dpm_set_power_profile_mode(adev, parameter, size) \
((adev)->powerplay.pp_funcs->set_power_profile_mode(\
(adev)->powerplay.pp_handle, parameter, size))
#define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \
((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
(adev)->powerplay.pp_handle, type, parameter, size))
#define amdgpu_dpm_set_mmhub_powergating_by_smu(adev) \
((adev)->powerplay.pp_funcs->set_mmhub_powergating_by_smu( \
(adev)->powerplay.pp_handle))
struct amdgpu_dpm { struct amdgpu_dpm {
struct amdgpu_ps *ps; struct amdgpu_ps *ps;
/* number of valid power states */ /* number of valid power states */

View File

@ -73,9 +73,11 @@
* - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
* - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
* - 3.23.0 - Add query for VRAM lost counter * - 3.23.0 - Add query for VRAM lost counter
* - 3.24.0 - Add high priority compute support for gfx9
* - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
*/ */
#define KMS_DRIVER_MAJOR 3 #define KMS_DRIVER_MAJOR 3
#define KMS_DRIVER_MINOR 23 #define KMS_DRIVER_MINOR 25
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0; int amdgpu_vram_limit = 0;
@ -119,7 +121,7 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32; uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL; char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL; char *amdgpu_virtual_display = NULL;
uint amdgpu_pp_feature_mask = 0xffffffff; uint amdgpu_pp_feature_mask = 0x3fff;
int amdgpu_ngg = 0; int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0; int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0; int amdgpu_pos_buf_per_se = 0;
@ -129,6 +131,7 @@ int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1; int amdgpu_lbpw = -1;
int amdgpu_compute_multipipe = -1; int amdgpu_compute_multipipe = -1;
int amdgpu_gpu_recovery = -1; /* auto */ int amdgpu_gpu_recovery = -1; /* auto */
int amdgpu_emu_mode = 0;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@ -284,6 +287,9 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto"); MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto");
module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444); module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable");
module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
#ifdef CONFIG_DRM_AMDGPU_SI #ifdef CONFIG_DRM_AMDGPU_SI
#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE) #if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
@ -576,6 +582,11 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
struct drm_device *dev; struct drm_device *dev;
unsigned long flags = ent->driver_data; unsigned long flags = ent->driver_data;
int ret, retry = 0; int ret, retry = 0;
bool supports_atomic = false;
if (!amdgpu_virtual_display &&
amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
supports_atomic = true;
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
DRM_INFO("This hardware requires experimental hardware support.\n" DRM_INFO("This hardware requires experimental hardware support.\n"
@ -596,6 +607,13 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
if (ret) if (ret)
return ret; return ret;
/* warn the user if they mix atomic and non-atomic capable GPUs */
if ((kms_driver.driver_features & DRIVER_ATOMIC) && !supports_atomic)
DRM_ERROR("Mixing atomic and non-atomic capable GPUs!\n");
/* support atomic early so the atomic debugfs stuff gets created */
if (supports_atomic)
kms_driver.driver_features |= DRIVER_ATOMIC;
dev = drm_dev_alloc(&kms_driver, &pdev->dev); dev = drm_dev_alloc(&kms_driver, &pdev->dev);
if (IS_ERR(dev)) if (IS_ERR(dev))
return PTR_ERR(dev); return PTR_ERR(dev);
@ -835,8 +853,8 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
ktime_t *stime, ktime_t *etime, ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode) const struct drm_display_mode *mode)
{ {
return amdgpu_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos, return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
stime, etime, mode); stime, etime, mode);
} }
static struct drm_driver kms_driver = { static struct drm_driver kms_driver = {
@ -854,9 +872,6 @@ static struct drm_driver kms_driver = {
.disable_vblank = amdgpu_disable_vblank_kms, .disable_vblank = amdgpu_disable_vblank_kms,
.get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos, .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
.get_scanout_position = amdgpu_get_crtc_scanout_position, .get_scanout_position = amdgpu_get_crtc_scanout_position,
.irq_preinstall = amdgpu_irq_preinstall,
.irq_postinstall = amdgpu_irq_postinstall,
.irq_uninstall = amdgpu_irq_uninstall,
.irq_handler = amdgpu_irq_handler, .irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms, .ioctls = amdgpu_ioctls_kms,
.gem_free_object_unlocked = amdgpu_gem_object_free, .gem_free_object_unlocked = amdgpu_gem_object_free,
@ -869,9 +884,7 @@ static struct drm_driver kms_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = amdgpu_gem_prime_export, .gem_prime_export = amdgpu_gem_prime_export,
.gem_prime_import = drm_gem_prime_import, .gem_prime_import = amdgpu_gem_prime_import,
.gem_prime_pin = amdgpu_gem_prime_pin,
.gem_prime_unpin = amdgpu_gem_prime_unpin,
.gem_prime_res_obj = amdgpu_gem_prime_res_obj, .gem_prime_res_obj = amdgpu_gem_prime_res_obj,
.gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table, .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table, .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,

View File

@ -38,6 +38,8 @@
#include <linux/vga_switcheroo.h> #include <linux/vga_switcheroo.h>
#include "amdgpu_display.h"
/* object hierarchy - /* object hierarchy -
this contains a helper + a amdgpu fb this contains a helper + a amdgpu fb
the helper contains a pointer to amdgpu framebuffer baseclass. the helper contains a pointer to amdgpu framebuffer baseclass.
@ -124,7 +126,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
struct drm_gem_object *gobj = NULL; struct drm_gem_object *gobj = NULL;
struct amdgpu_bo *abo = NULL; struct amdgpu_bo *abo = NULL;
bool fb_tiled = false; /* useful for testing */ bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0; u32 tiling_flags = 0, domain;
int ret; int ret;
int aligned_size, size; int aligned_size, size;
int height = mode_cmd->height; int height = mode_cmd->height;
@ -135,12 +137,12 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
/* need to align pitch with crtc limits */ /* need to align pitch with crtc limits */
mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
fb_tiled); fb_tiled);
domain = amdgpu_display_framebuffer_domains(adev);
height = ALIGN(mode_cmd->height, 8); height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height; size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE); aligned_size = ALIGN(size, PAGE_SIZE);
ret = amdgpu_gem_object_create(adev, aligned_size, 0, ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED, AMDGPU_GEM_CREATE_VRAM_CLEARED,
@ -166,7 +168,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
} }
ret = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, NULL); ret = amdgpu_bo_pin(abo, domain, NULL);
if (ret) { if (ret) {
amdgpu_bo_unreserve(abo); amdgpu_bo_unreserve(abo);
goto out_unref; goto out_unref;
@ -225,7 +227,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->par = rfbdev; info->par = rfbdev;
info->skip_vt_switch = true; info->skip_vt_switch = true;
ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj); ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
&mode_cmd, gobj);
if (ret) { if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret); DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto out; goto out;
@ -242,8 +245,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->fbops = &amdgpufb_ops; info->fbops = &amdgpufb_ops;
tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start; tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
info->fix.smem_start = adev->mc.aper_base + tmp; info->fix.smem_start = adev->gmc.aper_base + tmp;
info->fix.smem_len = amdgpu_bo_size(abo); info->fix.smem_len = amdgpu_bo_size(abo);
info->screen_base = amdgpu_bo_kptr(abo); info->screen_base = amdgpu_bo_kptr(abo);
info->screen_size = amdgpu_bo_size(abo); info->screen_size = amdgpu_bo_size(abo);
@ -252,7 +255,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
/* setup aperture base/size for vesafb takeover */ /* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base; info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = adev->mc.aper_size; info->apertures->ranges[0].size = adev->gmc.aper_size;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@ -262,7 +265,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
} }
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base); DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo)); DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
DRM_INFO("fb depth is %d\n", fb->format->depth); DRM_INFO("fb depth is %d\n", fb->format->depth);
DRM_INFO(" pitch is %d\n", fb->pitches[0]); DRM_INFO(" pitch is %d\n", fb->pitches[0]);
@ -319,7 +322,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
return 0; return 0;
/* select 8 bpp console on low vram cards */ /* select 8 bpp console on low vram cards */
if (adev->mc.real_vram_size <= (32*1024*1024)) if (adev->gmc.real_vram_size <= (32*1024*1024))
bpp_sel = 8; bpp_sel = 8;
rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL); rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL);

View File

@ -120,7 +120,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, 0, &adev->gart.robj); NULL, NULL, &adev->gart.robj);
if (r) { if (r) {
return r; return r;
} }
@ -241,13 +241,14 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
continue; continue;
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
t, page_base, flags); t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE; page_base += AMDGPU_GPU_PAGE_SIZE;
} }
} }
mb(); mb();
amdgpu_gart_flush_gpu_tlb(adev, 0); amdgpu_asic_flush_hdp(adev, NULL);
amdgpu_gmc_flush_gpu_tlb(adev, 0);
return 0; return 0;
} }
@ -279,7 +280,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
for (i = 0; i < pages; i++) { for (i = 0; i < pages; i++) {
page_base = dma_addr[i]; page_base = dma_addr[i];
for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) { for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
amdgpu_gart_set_pte_pde(adev, dst, t, page_base, flags); amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE; page_base += AMDGPU_GPU_PAGE_SIZE;
} }
} }
@ -329,7 +330,8 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
return r; return r;
mb(); mb();
amdgpu_gart_flush_gpu_tlb(adev, 0); amdgpu_asic_flush_hdp(adev, NULL);
amdgpu_gmc_flush_gpu_tlb(adev, 0);
return 0; return 0;
} }
@ -357,8 +359,8 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
if (r) if (r)
return r; return r;
/* Compute table size */ /* Compute table size */
adev->gart.num_cpu_pages = adev->mc.gart_size / PAGE_SIZE; adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
adev->gart.num_gpu_pages = adev->mc.gart_size / AMDGPU_GPU_PAGE_SIZE; adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages); adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);

View File

@ -31,7 +31,6 @@
*/ */
struct amdgpu_device; struct amdgpu_device;
struct amdgpu_bo; struct amdgpu_bo;
struct amdgpu_gart_funcs;
#define AMDGPU_GPU_PAGE_SIZE 4096 #define AMDGPU_GPU_PAGE_SIZE 4096
#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
@ -52,8 +51,6 @@ struct amdgpu_gart {
/* Asic default pte flags */ /* Asic default pte flags */
uint64_t gart_pte_flags; uint64_t gart_pte_flags;
const struct amdgpu_gart_funcs *gart_funcs;
}; };
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);

View File

@ -60,7 +60,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
retry: retry:
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
flags, NULL, resv, 0, &bo); flags, NULL, resv, &bo);
if (r) { if (r) {
if (r != -ERESTARTSYS) { if (r != -ERESTARTSYS) {
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
@ -523,12 +523,13 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
goto error; goto error;
if (operation == AMDGPU_VA_OP_MAP || if (operation == AMDGPU_VA_OP_MAP ||
operation == AMDGPU_VA_OP_REPLACE) operation == AMDGPU_VA_OP_REPLACE) {
r = amdgpu_vm_bo_update(adev, bo_va, false); r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
goto error;
}
r = amdgpu_vm_update_directories(adev, vm); r = amdgpu_vm_update_directories(adev, vm);
if (r)
goto error;
error: error:
if (r && r != -ERESTARTSYS) if (r && r != -ERESTARTSYS)
@ -634,7 +635,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (r) if (r)
goto error_backoff; goto error_backoff;
va_flags = amdgpu_vm_get_pte_flags(adev, args->flags); va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size, args->offset_in_bo, args->map_size,
va_flags); va_flags);
@ -654,7 +655,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (r) if (r)
goto error_backoff; goto error_backoff;
va_flags = amdgpu_vm_get_pte_flags(adev, args->flags); va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size, args->offset_in_bo, args->map_size,
va_flags); va_flags);

View File

@ -0,0 +1,112 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
#ifndef __AMDGPU_GMC_H__
#define __AMDGPU_GMC_H__
#include <linux/types.h>
#include "amdgpu_irq.h"
struct firmware;
/*
* VMHUB structures, functions & helpers
*/
struct amdgpu_vmhub {
uint32_t ctx0_ptb_addr_lo32;
uint32_t ctx0_ptb_addr_hi32;
uint32_t vm_inv_eng0_req;
uint32_t vm_inv_eng0_ack;
uint32_t vm_context0_cntl;
uint32_t vm_l2_pro_fault_status;
uint32_t vm_l2_pro_fault_cntl;
};
/*
* GPU MC structures, functions & helpers
*/
struct amdgpu_gmc_funcs {
/* flush the vm tlb via mmio */
void (*flush_gpu_tlb)(struct amdgpu_device *adev,
uint32_t vmid);
/* flush the vm tlb via ring */
uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr);
/* Change the VMID -> PASID mapping */
void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
unsigned pasid);
/* write pte/pde updates using the cpu */
int (*set_pte_pde)(struct amdgpu_device *adev,
void *cpu_pt_addr, /* cpu addr of page table */
uint32_t gpu_page_idx, /* pte/pde to update */
uint64_t addr, /* addr to write into pte/pde */
uint64_t flags); /* access flags */
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
/* set pte flags based per asic */
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
uint32_t flags);
/* get the pde for a given mc addr */
void (*get_vm_pde)(struct amdgpu_device *adev, int level,
u64 *dst, u64 *flags);
};
struct amdgpu_gmc {
resource_size_t aper_size;
resource_size_t aper_base;
/* for some chips with <= 32MB we need to lie
* about vram size near mc fb location */
u64 mc_vram_size;
u64 visible_vram_size;
u64 gart_size;
u64 gart_start;
u64 gart_end;
u64 vram_start;
u64 vram_end;
unsigned vram_width;
u64 real_vram_size;
int vram_mtrr;
u64 mc_mask;
const struct firmware *fw; /* MC firmware */
uint32_t fw_version;
struct amdgpu_irq_src vm_fault;
uint32_t vram_type;
uint32_t srbm_soft_reset;
bool prt_warning;
uint64_t stolen_size;
/* apertures */
u64 shared_aperture_start;
u64 shared_aperture_end;
u64 private_aperture_start;
u64 private_aperture_end;
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
bool translate_further;
const struct amdgpu_gmc_funcs *gmc_funcs;
};
#endif

View File

@ -56,7 +56,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
return -ENOMEM; return -ENOMEM;
start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS; start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
size = (adev->mc.gart_size >> PAGE_SHIFT) - start; size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
drm_mm_init(&mgr->mm, start, size); drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock); spin_lock_init(&mgr->lock);
atomic64_set(&mgr->available, p_size); atomic64_set(&mgr->available, p_size);
@ -75,7 +75,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
{ {
struct amdgpu_gtt_mgr *mgr = man->priv; struct amdgpu_gtt_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
drm_mm_takedown(&mgr->mm); drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock); spin_unlock(&mgr->lock);
kfree(mgr); kfree(mgr);

View File

@ -184,12 +184,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->init_cond_exec) if (ring->funcs->init_cond_exec)
patch_offset = amdgpu_ring_init_cond_exec(ring); patch_offset = amdgpu_ring_init_cond_exec(ring);
if (ring->funcs->emit_hdp_flush
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
&& !(adev->flags & AMD_IS_APU) if (!(adev->flags & AMD_IS_APU))
#endif #endif
) {
amdgpu_ring_emit_hdp_flush(ring); if (ring->funcs->emit_hdp_flush)
amdgpu_ring_emit_hdp_flush(ring);
else
amdgpu_asic_flush_hdp(adev, ring);
}
skip_preamble = ring->current_ctx == fence_ctx; skip_preamble = ring->current_ctx == fence_ctx;
need_ctx_switch = ring->current_ctx != fence_ctx; need_ctx_switch = ring->current_ctx != fence_ctx;
@ -219,12 +222,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->emit_tmz) if (ring->funcs->emit_tmz)
amdgpu_ring_emit_tmz(ring, false); amdgpu_ring_emit_tmz(ring, false);
if (ring->funcs->emit_hdp_invalidate
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
&& !(adev->flags & AMD_IS_APU) if (!(adev->flags & AMD_IS_APU))
#endif #endif
) amdgpu_asic_invalidate_hdp(adev, ring);
amdgpu_ring_emit_hdp_invalidate(ring);
r = amdgpu_fence_emit(ring, f); r = amdgpu_fence_emit(ring, f);
if (r) { if (r) {

View File

@ -40,6 +40,12 @@
*/ */
static DEFINE_IDA(amdgpu_pasid_ida); static DEFINE_IDA(amdgpu_pasid_ida);
/* Helper to free pasid from a fence callback */
struct amdgpu_pasid_cb {
struct dma_fence_cb cb;
unsigned int pasid;
};
/** /**
* amdgpu_pasid_alloc - Allocate a PASID * amdgpu_pasid_alloc - Allocate a PASID
* @bits: Maximum width of the PASID in bits, must be at least 1 * @bits: Maximum width of the PASID in bits, must be at least 1
@ -63,6 +69,9 @@ int amdgpu_pasid_alloc(unsigned int bits)
break; break;
} }
if (pasid >= 0)
trace_amdgpu_pasid_allocated(pasid);
return pasid; return pasid;
} }
@ -72,9 +81,86 @@ int amdgpu_pasid_alloc(unsigned int bits)
*/ */
void amdgpu_pasid_free(unsigned int pasid) void amdgpu_pasid_free(unsigned int pasid)
{ {
trace_amdgpu_pasid_freed(pasid);
ida_simple_remove(&amdgpu_pasid_ida, pasid); ida_simple_remove(&amdgpu_pasid_ida, pasid);
} }
static void amdgpu_pasid_free_cb(struct dma_fence *fence,
struct dma_fence_cb *_cb)
{
struct amdgpu_pasid_cb *cb =
container_of(_cb, struct amdgpu_pasid_cb, cb);
amdgpu_pasid_free(cb->pasid);
dma_fence_put(fence);
kfree(cb);
}
/**
* amdgpu_pasid_free_delayed - free pasid when fences signal
*
* @resv: reservation object with the fences to wait for
* @pasid: pasid to free
*
* Free the pasid only after all the fences in resv are signaled.
*/
void amdgpu_pasid_free_delayed(struct reservation_object *resv,
unsigned int pasid)
{
struct dma_fence *fence, **fences;
struct amdgpu_pasid_cb *cb;
unsigned count;
int r;
r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
if (r)
goto fallback;
if (count == 0) {
amdgpu_pasid_free(pasid);
return;
}
if (count == 1) {
fence = fences[0];
kfree(fences);
} else {
uint64_t context = dma_fence_context_alloc(1);
struct dma_fence_array *array;
array = dma_fence_array_create(count, fences, context,
1, false);
if (!array) {
kfree(fences);
goto fallback;
}
fence = &array->base;
}
cb = kmalloc(sizeof(*cb), GFP_KERNEL);
if (!cb) {
/* Last resort when we are OOM */
dma_fence_wait(fence, false);
dma_fence_put(fence);
amdgpu_pasid_free(pasid);
} else {
cb->pasid = pasid;
if (dma_fence_add_callback(fence, &cb->cb,
amdgpu_pasid_free_cb))
amdgpu_pasid_free_cb(fence, &cb->cb);
}
return;
fallback:
/* Not enough memory for the delayed delete, as last resort
* block for all the fences to complete.
*/
reservation_object_wait_timeout_rcu(resv, true, false,
MAX_SCHEDULE_TIMEOUT);
amdgpu_pasid_free(pasid);
}
/* /*
* VMID manager * VMID manager
* *
@ -96,64 +182,210 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
atomic_read(&adev->gpu_reset_counter); atomic_read(&adev->gpu_reset_counter);
} }
/* idr_mgr->lock must be held */ /**
static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm, * amdgpu_vm_grab_idle - grab idle VMID
struct amdgpu_ring *ring, *
struct amdgpu_sync *sync, * @vm: vm to allocate id for
struct dma_fence *fence, * @ring: ring we want to submit job to
struct amdgpu_job *job) * @sync: sync object where we add dependencies
* @idle: resulting idle VMID
*
* Try to find an idle VMID, if none is idle add a fence to wait to the sync
* object. Returns -ENOMEM when we are out of memory.
*/
static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
struct amdgpu_sync *sync,
struct amdgpu_vmid **idle)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct dma_fence **fences;
unsigned i;
int r;
if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false);
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
if (!fences)
return -ENOMEM;
/* Check if we have an idle VMID */
i = 0;
list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
if (!fences[i])
break;
++i;
}
/* If we can't find a idle VMID to use, wait till one becomes available */
if (&(*idle)->list == &id_mgr->ids_lru) {
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
struct dma_fence_array *array;
unsigned j;
*idle = NULL;
for (j = 0; j < i; ++j)
dma_fence_get(fences[j]);
array = dma_fence_array_create(i, fences, fence_context,
seqno, true);
if (!array) {
for (j = 0; j < i; ++j)
dma_fence_put(fences[j]);
kfree(fences);
return -ENOMEM;
}
r = amdgpu_sync_fence(adev, sync, &array->base, false);
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = &array->base;
return r;
}
kfree(fences);
return 0;
}
/**
* amdgpu_vm_grab_reserved - try to assign reserved VMID
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
* @sync: sync object where we add dependencies
* @fence: fence protecting ID from reuse
* @job: job who wants to use the VMID
*
* Try to assign a reserved VMID.
*/
static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
struct amdgpu_sync *sync,
struct dma_fence *fence,
struct amdgpu_job *job,
struct amdgpu_vmid **id)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub; unsigned vmhub = ring->funcs->vmhub;
uint64_t fence_context = adev->fence_context + ring->idx; uint64_t fence_context = adev->fence_context + ring->idx;
struct amdgpu_vmid *id = vm->reserved_vmid[vmhub];
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct dma_fence *updates = sync->last_vm_update; struct dma_fence *updates = sync->last_vm_update;
int r = 0;
struct dma_fence *flushed, *tmp;
bool needs_flush = vm->use_cpu_for_update; bool needs_flush = vm->use_cpu_for_update;
int r = 0;
*id = vm->reserved_vmid[vmhub];
if (updates && (*id)->flushed_updates &&
updates->context == (*id)->flushed_updates->context &&
!dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL;
if ((*id)->owner != vm->entity.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
updates || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
!dma_fence_is_signaled((*id)->last_flush))) {
struct dma_fence *tmp;
flushed = id->flushed_updates;
if ((amdgpu_vmid_had_gpu_reset(adev, id)) ||
(atomic64_read(&id->owner) != vm->entity.fence_context) ||
(job->vm_pd_addr != id->pd_gpu_addr) ||
(updates && (!flushed || updates->context != flushed->context ||
dma_fence_is_later(updates, flushed))) ||
(!id->last_flush || (id->last_flush->context != fence_context &&
!dma_fence_is_signaled(id->last_flush)))) {
needs_flush = true;
/* to prevent one context starved by another context */ /* to prevent one context starved by another context */
id->pd_gpu_addr = 0; (*id)->pd_gpu_addr = 0;
tmp = amdgpu_sync_peek_fence(&id->active, ring); tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
if (tmp) { if (tmp) {
*id = NULL;
r = amdgpu_sync_fence(adev, sync, tmp, false); r = amdgpu_sync_fence(adev, sync, tmp, false);
return r; return r;
} }
needs_flush = true;
} }
/* Good we can use this VMID. Remember this submission as /* Good we can use this VMID. Remember this submission as
* user of the VMID. * user of the VMID.
*/ */
r = amdgpu_sync_fence(ring->adev, &id->active, fence, false); r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
if (r) if (r)
goto out; return r;
if (updates && (!flushed || updates->context != flushed->context || if (updates) {
dma_fence_is_later(updates, flushed))) { dma_fence_put((*id)->flushed_updates);
dma_fence_put(id->flushed_updates); (*id)->flushed_updates = dma_fence_get(updates);
id->flushed_updates = dma_fence_get(updates);
} }
id->pd_gpu_addr = job->vm_pd_addr;
atomic64_set(&id->owner, vm->entity.fence_context);
job->vm_needs_flush = needs_flush; job->vm_needs_flush = needs_flush;
if (needs_flush) { return 0;
dma_fence_put(id->last_flush); }
id->last_flush = NULL;
/**
* amdgpu_vm_grab_used - try to reuse a VMID
*
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
* @sync: sync object where we add dependencies
* @fence: fence protecting ID from reuse
* @job: job who wants to use the VMID
* @id: resulting VMID
*
* Try to reuse a VMID for this submission.
*/
static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct amdgpu_ring *ring,
struct amdgpu_sync *sync,
struct dma_fence *fence,
struct amdgpu_job *job,
struct amdgpu_vmid **id)
{
struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx;
struct dma_fence *updates = sync->last_vm_update;
int r;
job->vm_needs_flush = vm->use_cpu_for_update;
/* Check if we can use a VMID already assigned to this VM */
list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
bool needs_flush = vm->use_cpu_for_update;
struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */
if ((*id)->owner != vm->entity.fence_context)
continue;
if ((*id)->pd_gpu_addr != job->vm_pd_addr)
continue;
if (!(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
!dma_fence_is_signaled((*id)->last_flush)))
needs_flush = true;
flushed = (*id)->flushed_updates;
if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
needs_flush = true;
/* Concurrent flushes are only possible starting with Vega10 */
if (adev->asic_type < CHIP_VEGA10 && needs_flush)
continue;
/* Good, we can use this VMID. Remember this submission as
* user of the VMID.
*/
r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
if (r)
return r;
if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
dma_fence_put((*id)->flushed_updates);
(*id)->flushed_updates = dma_fence_get(updates);
}
job->vm_needs_flush |= needs_flush;
return 0;
} }
job->vmid = id - id_mgr->ids;
trace_amdgpu_vm_grab_id(vm, ring, job); *id = NULL;
out: return 0;
return r;
} }
/** /**
@ -163,6 +395,7 @@ static int amdgpu_vmid_grab_reserved_locked(struct amdgpu_vm *vm,
* @ring: ring we want to submit job to * @ring: ring we want to submit job to
* @sync: sync object where we add dependencies * @sync: sync object where we add dependencies
* @fence: fence protecting ID from reuse * @fence: fence protecting ID from reuse
* @job: job who wants to use the VMID
* *
* Allocate an id for the vm, adding fences to the sync obj as necessary. * Allocate an id for the vm, adding fences to the sync obj as necessary.
*/ */
@ -173,135 +406,53 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
unsigned vmhub = ring->funcs->vmhub; unsigned vmhub = ring->funcs->vmhub;
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
uint64_t fence_context = adev->fence_context + ring->idx; struct amdgpu_vmid *idle = NULL;
struct dma_fence *updates = sync->last_vm_update; struct amdgpu_vmid *id = NULL;
struct amdgpu_vmid *id, *idle;
struct dma_fence **fences;
unsigned i;
int r = 0; int r = 0;
mutex_lock(&id_mgr->lock); mutex_lock(&id_mgr->lock);
if (vm->reserved_vmid[vmhub]) { r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
r = amdgpu_vmid_grab_reserved_locked(vm, ring, sync, fence, job); if (r || !idle)
mutex_unlock(&id_mgr->lock);
return r;
}
fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
if (!fences) {
mutex_unlock(&id_mgr->lock);
return -ENOMEM;
}
/* Check if we have an idle VMID */
i = 0;
list_for_each_entry(idle, &id_mgr->ids_lru, list) {
fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
if (!fences[i])
break;
++i;
}
/* If we can't find a idle VMID to use, wait till one becomes available */
if (&idle->list == &id_mgr->ids_lru) {
u64 fence_context = adev->vm_manager.fence_context + ring->idx;
unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
struct dma_fence_array *array;
unsigned j;
for (j = 0; j < i; ++j)
dma_fence_get(fences[j]);
array = dma_fence_array_create(i, fences, fence_context,
seqno, true);
if (!array) {
for (j = 0; j < i; ++j)
dma_fence_put(fences[j]);
kfree(fences);
r = -ENOMEM;
goto error;
}
r = amdgpu_sync_fence(ring->adev, sync, &array->base, false);
dma_fence_put(&array->base);
if (r)
goto error;
mutex_unlock(&id_mgr->lock);
return 0;
}
kfree(fences);
job->vm_needs_flush = vm->use_cpu_for_update;
/* Check if we can use a VMID already assigned to this VM */
list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
struct dma_fence *flushed;
bool needs_flush = vm->use_cpu_for_update;
/* Check all the prerequisites to using this VMID */
if (amdgpu_vmid_had_gpu_reset(adev, id))
continue;
if (atomic64_read(&id->owner) != vm->entity.fence_context)
continue;
if (job->vm_pd_addr != id->pd_gpu_addr)
continue;
if (!id->last_flush ||
(id->last_flush->context != fence_context &&
!dma_fence_is_signaled(id->last_flush)))
needs_flush = true;
flushed = id->flushed_updates;
if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
needs_flush = true;
/* Concurrent flushes are only possible starting with Vega10 */
if (adev->asic_type < CHIP_VEGA10 && needs_flush)
continue;
/* Good we can use this VMID. Remember this submission as
* user of the VMID.
*/
r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
if (r)
goto error;
if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates);
}
if (needs_flush)
goto needs_flush;
else
goto no_flush_needed;
};
/* Still no ID to use? Then use the idle one found earlier */
id = idle;
/* Remember this submission as user of the VMID */
r = amdgpu_sync_fence(ring->adev, &id->active, fence, false);
if (r)
goto error; goto error;
if (vm->reserved_vmid[vmhub]) {
r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
if (r || !id)
goto error;
} else {
r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
if (r)
goto error;
if (!id) {
struct dma_fence *updates = sync->last_vm_update;
/* Still no ID to use? Then use the idle one found earlier */
id = idle;
/* Remember this submission as user of the VMID */
r = amdgpu_sync_fence(ring->adev, &id->active,
fence, false);
if (r)
goto error;
dma_fence_put(id->flushed_updates);
id->flushed_updates = dma_fence_get(updates);
job->vm_needs_flush = true;
}
list_move_tail(&id->list, &id_mgr->ids_lru);
}
id->pd_gpu_addr = job->vm_pd_addr; id->pd_gpu_addr = job->vm_pd_addr;
dma_fence_put(id->flushed_updates); id->owner = vm->entity.fence_context;
id->flushed_updates = dma_fence_get(updates);
atomic64_set(&id->owner, vm->entity.fence_context);
needs_flush:
job->vm_needs_flush = true;
dma_fence_put(id->last_flush);
id->last_flush = NULL;
no_flush_needed:
list_move_tail(&id->list, &id_mgr->ids_lru);
if (job->vm_needs_flush) {
dma_fence_put(id->last_flush);
id->last_flush = NULL;
}
job->vmid = id - id_mgr->ids; job->vmid = id - id_mgr->ids;
job->pasid = vm->pasid;
trace_amdgpu_vm_grab_id(vm, ring, job); trace_amdgpu_vm_grab_id(vm, ring, job);
error: error:
@ -370,13 +521,15 @@ void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vmid *id = &id_mgr->ids[vmid]; struct amdgpu_vmid *id = &id_mgr->ids[vmid];
atomic64_set(&id->owner, 0); mutex_lock(&id_mgr->lock);
id->owner = 0;
id->gds_base = 0; id->gds_base = 0;
id->gds_size = 0; id->gds_size = 0;
id->gws_base = 0; id->gws_base = 0;
id->gws_size = 0; id->gws_size = 0;
id->oa_base = 0; id->oa_base = 0;
id->oa_size = 0; id->oa_size = 0;
mutex_unlock(&id_mgr->lock);
} }
/** /**
@ -454,6 +607,7 @@ void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
amdgpu_sync_free(&id->active); amdgpu_sync_free(&id->active);
dma_fence_put(id->flushed_updates); dma_fence_put(id->flushed_updates);
dma_fence_put(id->last_flush); dma_fence_put(id->last_flush);
dma_fence_put(id->pasid_mapping);
} }
} }
} }

View File

@ -43,7 +43,7 @@ struct amdgpu_vmid {
struct list_head list; struct list_head list;
struct amdgpu_sync active; struct amdgpu_sync active;
struct dma_fence *last_flush; struct dma_fence *last_flush;
atomic64_t owner; uint64_t owner;
uint64_t pd_gpu_addr; uint64_t pd_gpu_addr;
/* last flushed PD/PT update */ /* last flushed PD/PT update */
@ -57,6 +57,9 @@ struct amdgpu_vmid {
uint32_t gws_size; uint32_t gws_size;
uint32_t oa_base; uint32_t oa_base;
uint32_t oa_size; uint32_t oa_size;
unsigned pasid;
struct dma_fence *pasid_mapping;
}; };
struct amdgpu_vmid_mgr { struct amdgpu_vmid_mgr {
@ -69,6 +72,8 @@ struct amdgpu_vmid_mgr {
int amdgpu_pasid_alloc(unsigned int bits); int amdgpu_pasid_alloc(unsigned int bits);
void amdgpu_pasid_free(unsigned int pasid); void amdgpu_pasid_free(unsigned int pasid);
void amdgpu_pasid_free_delayed(struct reservation_object *resv,
unsigned int pasid);
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id); struct amdgpu_vmid *id);

View File

@ -109,7 +109,7 @@ struct amdgpu_iv_entry {
unsigned vmid_src; unsigned vmid_src;
uint64_t timestamp; uint64_t timestamp;
unsigned timestamp_src; unsigned timestamp_src;
unsigned pas_id; unsigned pasid;
unsigned pasid_src; unsigned pasid_src;
unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW]; unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW];
const uint32_t *iv_entry; const uint32_t *iv_entry;

View File

@ -92,7 +92,7 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
} }
/* Disable *all* interrupts */ /* Disable *all* interrupts */
static void amdgpu_irq_disable_all(struct amdgpu_device *adev) void amdgpu_irq_disable_all(struct amdgpu_device *adev)
{ {
unsigned long irqflags; unsigned long irqflags;
unsigned i, j, k; unsigned i, j, k;
@ -122,55 +122,6 @@ static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
spin_unlock_irqrestore(&adev->irq.lock, irqflags); spin_unlock_irqrestore(&adev->irq.lock, irqflags);
} }
/**
* amdgpu_irq_preinstall - drm irq preinstall callback
*
* @dev: drm dev pointer
*
* Gets the hw ready to enable irqs (all asics).
* This function disables all interrupt sources on the GPU.
*/
void amdgpu_irq_preinstall(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
/* Disable *all* interrupts */
amdgpu_irq_disable_all(adev);
/* Clear bits */
amdgpu_ih_process(adev);
}
/**
* amdgpu_irq_postinstall - drm irq preinstall callback
*
* @dev: drm dev pointer
*
* Handles stuff to be done after enabling irqs (all asics).
* Returns 0 on success.
*/
int amdgpu_irq_postinstall(struct drm_device *dev)
{
dev->max_vblank_count = 0x00ffffff;
return 0;
}
/**
* amdgpu_irq_uninstall - drm irq uninstall callback
*
* @dev: drm dev pointer
*
* This function disables all interrupt sources on the GPU (all asics).
*/
void amdgpu_irq_uninstall(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
if (adev == NULL) {
return;
}
amdgpu_irq_disable_all(adev);
}
/** /**
* amdgpu_irq_handler - irq handler * amdgpu_irq_handler - irq handler
* *
@ -261,6 +212,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
cancel_work_sync(&adev->reset_work); cancel_work_sync(&adev->reset_work);
return r; return r;
} }
adev->ddev->max_vblank_count = 0x00ffffff;
DRM_DEBUG("amdgpu: irq initialized.\n"); DRM_DEBUG("amdgpu: irq initialized.\n");
return 0; return 0;

View File

@ -78,9 +78,7 @@ struct amdgpu_irq {
uint32_t srbm_soft_reset; uint32_t srbm_soft_reset;
}; };
void amdgpu_irq_preinstall(struct drm_device *dev); void amdgpu_irq_disable_all(struct amdgpu_device *adev);
int amdgpu_irq_postinstall(struct drm_device *dev);
void amdgpu_irq_uninstall(struct drm_device *dev);
irqreturn_t amdgpu_irq_handler(int irq, void *arg); irqreturn_t amdgpu_irq_handler(int irq, void *arg);
int amdgpu_irq_init(struct amdgpu_device *adev); int amdgpu_irq_init(struct amdgpu_device *adev);

View File

@ -191,7 +191,7 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->feature = 0; fw_info->feature = 0;
break; break;
case AMDGPU_INFO_FW_GMC: case AMDGPU_INFO_FW_GMC:
fw_info->ver = adev->mc.fw_version; fw_info->ver = adev->gmc.fw_version;
fw_info->feature = 0; fw_info->feature = 0;
break; break;
case AMDGPU_INFO_FW_GFX_ME: case AMDGPU_INFO_FW_GFX_ME:
@ -470,9 +470,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_INFO_VRAM_GTT: { case AMDGPU_INFO_VRAM_GTT: {
struct drm_amdgpu_info_vram_gtt vram_gtt; struct drm_amdgpu_info_vram_gtt vram_gtt;
vram_gtt.vram_size = adev->mc.real_vram_size; vram_gtt.vram_size = adev->gmc.real_vram_size;
vram_gtt.vram_size -= adev->vram_pin_size; vram_gtt.vram_size -= adev->vram_pin_size;
vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size; vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size;
vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size); vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size;
vram_gtt.gtt_size *= PAGE_SIZE; vram_gtt.gtt_size *= PAGE_SIZE;
@ -484,17 +484,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct drm_amdgpu_memory_info mem; struct drm_amdgpu_memory_info mem;
memset(&mem, 0, sizeof(mem)); memset(&mem, 0, sizeof(mem));
mem.vram.total_heap_size = adev->mc.real_vram_size; mem.vram.total_heap_size = adev->gmc.real_vram_size;
mem.vram.usable_heap_size = mem.vram.usable_heap_size =
adev->mc.real_vram_size - adev->vram_pin_size; adev->gmc.real_vram_size - adev->vram_pin_size;
mem.vram.heap_usage = mem.vram.heap_usage =
amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size = mem.cpu_accessible_vram.total_heap_size =
adev->mc.visible_vram_size; adev->gmc.visible_vram_size;
mem.cpu_accessible_vram.usable_heap_size = mem.cpu_accessible_vram.usable_heap_size =
adev->mc.visible_vram_size - adev->gmc.visible_vram_size -
(adev->vram_pin_size - adev->invisible_pin_size); (adev->vram_pin_size - adev->invisible_pin_size);
mem.cpu_accessible_vram.heap_usage = mem.cpu_accessible_vram.heap_usage =
amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
@ -580,11 +580,16 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION; dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_SIZE;
/* Older VCE FW versions are buggy and can handle only 40bits */
if (adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
vm_size = min(vm_size, 1ULL << 40);
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE; dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info.virtual_address_max = dev_info.virtual_address_max =
min(vm_size, AMDGPU_VA_HOLE_START); min(vm_size, AMDGPU_VA_HOLE_START);
vm_size -= AMDGPU_VA_RESERVED_SIZE;
if (vm_size > AMDGPU_VA_HOLE_START) { if (vm_size > AMDGPU_VA_HOLE_START) {
dev_info.high_va_offset = AMDGPU_VA_HOLE_END; dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size; dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
@ -599,8 +604,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
sizeof(adev->gfx.cu_info.ao_cu_bitmap)); sizeof(adev->gfx.cu_info.ao_cu_bitmap));
memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0], memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
sizeof(adev->gfx.cu_info.bitmap)); sizeof(adev->gfx.cu_info.bitmap));
dev_info.vram_type = adev->mc.vram_type; dev_info.vram_type = adev->gmc.vram_type;
dev_info.vram_bit_width = adev->mc.vram_width; dev_info.vram_bit_width = adev->gmc.vram_width;
dev_info.vce_harvest_config = adev->vce.harvest_config; dev_info.vce_harvest_config = adev->vce.harvest_config;
dev_info.gc_double_offchip_lds_buf = dev_info.gc_double_offchip_lds_buf =
adev->gfx.config.double_offchip_lds_buf; adev->gfx.config.double_offchip_lds_buf;
@ -758,6 +763,24 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return -EINVAL; return -EINVAL;
} }
break; break;
case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
/* get stable pstate sclk in Mhz */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
ui32 /= 100;
break;
case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
/* get stable pstate mclk in Mhz */
if (amdgpu_dpm_read_sensor(adev,
AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
(void *)&ui32, &ui32_size)) {
return -EINVAL;
}
ui32 /= 100;
break;
default: default:
DRM_DEBUG_KMS("Invalid request %d\n", DRM_DEBUG_KMS("Invalid request %d\n",
info->sensor_info.type); info->sensor_info.type);
@ -805,7 +828,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv; struct amdgpu_fpriv *fpriv;
int r; int r, pasid;
file_priv->driver_priv = NULL; file_priv->driver_priv = NULL;
@ -819,28 +842,25 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
goto out_suspend; goto out_suspend;
} }
r = amdgpu_vm_init(adev, &fpriv->vm, pasid = amdgpu_pasid_alloc(16);
AMDGPU_VM_CONTEXT_GFX, 0); if (pasid < 0) {
if (r) { dev_warn(adev->dev, "No more PASIDs available!");
kfree(fpriv); pasid = 0;
goto out_suspend;
} }
r = amdgpu_vm_init(adev, &fpriv->vm, AMDGPU_VM_CONTEXT_GFX, pasid);
if (r)
goto error_pasid;
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL); fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
if (!fpriv->prt_va) { if (!fpriv->prt_va) {
r = -ENOMEM; r = -ENOMEM;
amdgpu_vm_fini(adev, &fpriv->vm); goto error_vm;
kfree(fpriv);
goto out_suspend;
} }
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va); r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
if (r) { if (r)
amdgpu_vm_fini(adev, &fpriv->vm); goto error_vm;
kfree(fpriv);
goto out_suspend;
}
} }
mutex_init(&fpriv->bo_list_lock); mutex_init(&fpriv->bo_list_lock);
@ -849,6 +869,16 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr); amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
file_priv->driver_priv = fpriv; file_priv->driver_priv = fpriv;
goto out_suspend;
error_vm:
amdgpu_vm_fini(adev, &fpriv->vm);
error_pasid:
if (pasid)
amdgpu_pasid_free(pasid);
kfree(fpriv);
out_suspend: out_suspend:
pm_runtime_mark_last_busy(dev->dev); pm_runtime_mark_last_busy(dev->dev);
@ -871,6 +901,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_bo_list *list; struct amdgpu_bo_list *list;
struct amdgpu_bo *pd;
unsigned int pasid;
int handle; int handle;
if (!fpriv) if (!fpriv)
@ -895,7 +927,13 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_bo_unreserve(adev->virt.csa_obj); amdgpu_bo_unreserve(adev->virt.csa_obj);
} }
pasid = fpriv->vm.pasid;
pd = amdgpu_bo_ref(fpriv->vm.root.base.bo);
amdgpu_vm_fini(adev, &fpriv->vm); amdgpu_vm_fini(adev, &fpriv->vm);
if (pasid)
amdgpu_pasid_free_delayed(pd->tbo.resv, pasid);
amdgpu_bo_unref(&pd);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle) idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
amdgpu_bo_list_free(list); amdgpu_bo_list_free(list);
@ -947,11 +985,11 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
*/ */
do { do {
count = amdgpu_display_vblank_get_counter(adev, pipe); count = amdgpu_display_vblank_get_counter(adev, pipe);
/* Ask amdgpu_get_crtc_scanoutpos to return vpos as /* Ask amdgpu_display_get_crtc_scanoutpos to return
* distance to start of vblank, instead of regular * vpos as distance to start of vblank, instead of
* vertical scanout pos. * regular vertical scanout pos.
*/ */
stat = amdgpu_get_crtc_scanoutpos( stat = amdgpu_display_get_crtc_scanoutpos(
dev, pipe, GET_DISTANCE_TO_VBLANKSTART, dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
&vpos, &hpos, NULL, NULL, &vpos, &hpos, NULL, NULL,
&adev->mode_info.crtcs[pipe]->base.hwmode); &adev->mode_info.crtcs[pipe]->base.hwmode);
@ -992,7 +1030,7 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe) int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe); int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
return amdgpu_irq_get(adev, &adev->crtc_irq, idx); return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
} }
@ -1008,7 +1046,7 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe) void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe); int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
amdgpu_irq_put(adev, &adev->crtc_irq, idx); amdgpu_irq_put(adev, &adev->crtc_irq, idx);
} }

View File

@ -267,8 +267,6 @@ struct amdgpu_display_funcs {
void (*bandwidth_update)(struct amdgpu_device *adev); void (*bandwidth_update)(struct amdgpu_device *adev);
/* get frame count */ /* get frame count */
u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc); u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc);
/* wait for vblank */
void (*vblank_wait)(struct amdgpu_device *adev, int crtc);
/* set backlight level */ /* set backlight level */
void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder, void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder,
u8 level); u8 level);
@ -608,7 +606,7 @@ struct amdgpu_mst_connector {
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \ #define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
((em) == ATOM_ENCODER_MODE_DP_MST)) ((em) == ATOM_ENCODER_MODE_DP_MST))
/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */ /* Driver internal use only flags of amdgpu_display_get_crtc_scanoutpos() */
#define DRM_SCANOUTPOS_VALID (1 << 0) #define DRM_SCANOUTPOS_VALID (1 << 0)
#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1) #define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
#define DRM_SCANOUTPOS_ACCURATE (1 << 2) #define DRM_SCANOUTPOS_ACCURATE (1 << 2)
@ -627,30 +625,31 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder); u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder); struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder);
bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool use_aux); bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
bool use_aux);
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder); void amdgpu_encoder_set_active_device(struct drm_encoder *encoder);
int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
unsigned int flags, int *vpos, int *hpos, unsigned int pipe, unsigned int flags, int *vpos,
ktime_t *stime, ktime_t *etime, int *hpos, ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode); const struct drm_display_mode *mode);
int amdgpu_framebuffer_init(struct drm_device *dev, int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb, struct amdgpu_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd, const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj); struct drm_gem_object *obj);
int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb); int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void amdgpu_enc_destroy(struct drm_encoder *encoder); void amdgpu_enc_destroy(struct drm_encoder *encoder);
void amdgpu_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); void amdgpu_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
void amdgpu_panel_mode_fixup(struct drm_encoder *encoder, void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode); struct drm_display_mode *adjusted_mode);
int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc); int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
/* fbdev layer */ /* fbdev layer */
int amdgpu_fbdev_init(struct amdgpu_device *adev); int amdgpu_fbdev_init(struct amdgpu_device *adev);
@ -662,15 +661,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled); int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled);
/* amdgpu_display.c */ /* amdgpu_display.c */
void amdgpu_print_display_setup(struct drm_device *dev); void amdgpu_display_print_display_setup(struct drm_device *dev);
int amdgpu_modeset_create_props(struct amdgpu_device *adev); int amdgpu_display_modeset_create_props(struct amdgpu_device *adev);
int amdgpu_crtc_set_config(struct drm_mode_set *set, int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx); struct drm_modeset_acquire_ctx *ctx);
int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_framebuffer *fb, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, struct drm_pending_vblank_event *event,
uint32_t page_flip_flags, uint32_t target, uint32_t page_flip_flags, uint32_t target,
struct drm_modeset_acquire_ctx *ctx); struct drm_modeset_acquire_ctx *ctx);
extern const struct drm_mode_config_funcs amdgpu_mode_funcs; extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
#endif #endif

View File

@ -83,7 +83,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
u32 c = 0; u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) { if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
places[c].fpfn = 0; places[c].fpfn = 0;
places[c].lpfn = 0; places[c].lpfn = 0;
@ -103,7 +103,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_GTT) { if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0; places[c].fpfn = 0;
if (flags & AMDGPU_GEM_CREATE_SHADOW) if (flags & AMDGPU_GEM_CREATE_SHADOW)
places[c].lpfn = adev->mc.gart_size >> PAGE_SHIFT; places[c].lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
else else
places[c].lpfn = 0; places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_TT; places[c].flags = TTM_PL_FLAG_TT;
@ -190,7 +190,7 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
r = amdgpu_bo_create(adev, size, align, true, domain, r = amdgpu_bo_create(adev, size, align, true, domain,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, 0, bo_ptr); NULL, NULL, bo_ptr);
if (r) { if (r) {
dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
r); r);
@ -336,7 +336,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bool kernel, u32 domain, u64 flags, bool kernel, u32 domain, u64 flags,
struct sg_table *sg, struct sg_table *sg,
struct reservation_object *resv, struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr) struct amdgpu_bo **bo_ptr)
{ {
struct ttm_operation_ctx ctx = { struct ttm_operation_ctx ctx = {
@ -372,11 +371,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL) if (bo == NULL)
return -ENOMEM; return -ENOMEM;
r = drm_gem_object_init(adev->ddev, &bo->gem_base, size); drm_gem_private_object_init(adev->ddev, &bo->gem_base, size);
if (unlikely(r)) {
kfree(bo);
return r;
}
INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->shadow_list);
INIT_LIST_HEAD(&bo->va); INIT_LIST_HEAD(&bo->va);
bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
@ -428,9 +423,9 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
if (adev->mc.visible_vram_size < adev->mc.real_vram_size && if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
bo->tbo.mem.mem_type == TTM_PL_VRAM && bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT) bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved); ctx.bytes_moved);
else else
@ -443,7 +438,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
struct dma_fence *fence; struct dma_fence *fence;
r = amdgpu_fill_buffer(bo, init_value, bo->tbo.resv, &fence); r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
if (unlikely(r)) if (unlikely(r))
goto fail_unreserve; goto fail_unreserve;
@ -484,7 +479,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
AMDGPU_GEM_DOMAIN_GTT, AMDGPU_GEM_DOMAIN_GTT,
AMDGPU_GEM_CREATE_CPU_GTT_USWC | AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW, AMDGPU_GEM_CREATE_SHADOW,
NULL, bo->tbo.resv, 0, NULL, bo->tbo.resv,
&bo->shadow); &bo->shadow);
if (!r) { if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo); bo->shadow->parent = amdgpu_bo_ref(bo);
@ -496,22 +491,18 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
return r; return r;
} }
/* init_value will only take effect when flags contains
* AMDGPU_GEM_CREATE_VRAM_CLEARED.
*/
int amdgpu_bo_create(struct amdgpu_device *adev, int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align, unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags, bool kernel, u32 domain, u64 flags,
struct sg_table *sg, struct sg_table *sg,
struct reservation_object *resv, struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr) struct amdgpu_bo **bo_ptr)
{ {
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r; int r;
r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain, r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
parent_flags, sg, resv, init_value, bo_ptr); parent_flags, sg, resv, bo_ptr);
if (r) if (r)
return r; return r;
@ -832,25 +823,25 @@ static const char *amdgpu_vram_names[] = {
int amdgpu_bo_init(struct amdgpu_device *adev) int amdgpu_bo_init(struct amdgpu_device *adev)
{ {
/* reserve PAT memory space to WC for VRAM */ /* reserve PAT memory space to WC for VRAM */
arch_io_reserve_memtype_wc(adev->mc.aper_base, arch_io_reserve_memtype_wc(adev->gmc.aper_base,
adev->mc.aper_size); adev->gmc.aper_size);
/* Add an MTRR for the VRAM */ /* Add an MTRR for the VRAM */
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
adev->mc.aper_size); adev->gmc.aper_size);
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
adev->mc.mc_vram_size >> 20, adev->gmc.mc_vram_size >> 20,
(unsigned long long)adev->mc.aper_size >> 20); (unsigned long long)adev->gmc.aper_size >> 20);
DRM_INFO("RAM width %dbits %s\n", DRM_INFO("RAM width %dbits %s\n",
adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]); adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
return amdgpu_ttm_init(adev); return amdgpu_ttm_init(adev);
} }
void amdgpu_bo_fini(struct amdgpu_device *adev) void amdgpu_bo_fini(struct amdgpu_device *adev)
{ {
amdgpu_ttm_fini(adev); amdgpu_ttm_fini(adev);
arch_phys_wc_del(adev->mc.vram_mtrr); arch_phys_wc_del(adev->gmc.vram_mtrr);
arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size); arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
} }
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
@ -980,7 +971,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
size = bo->mem.num_pages << PAGE_SHIFT; size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT; offset = bo->mem.start << PAGE_SHIFT;
if ((offset + size) <= adev->mc.visible_vram_size) if ((offset + size) <= adev->gmc.visible_vram_size)
return 0; return 0;
/* Can't move a pinned BO to visible VRAM */ /* Can't move a pinned BO to visible VRAM */
@ -1003,7 +994,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
offset = bo->mem.start << PAGE_SHIFT; offset = bo->mem.start << PAGE_SHIFT;
/* this should never happen */ /* this should never happen */
if (bo->mem.mem_type == TTM_PL_VRAM && if (bo->mem.mem_type == TTM_PL_VRAM &&
(offset + size) > adev->mc.visible_vram_size) (offset + size) > adev->gmc.visible_vram_size)
return -EINVAL; return -EINVAL;
return 0; return 0;

View File

@ -206,7 +206,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
bool kernel, u32 domain, u64 flags, bool kernel, u32 domain, u64 flags,
struct sg_table *sg, struct sg_table *sg,
struct reservation_object *resv, struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr); struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_reserved(struct amdgpu_device *adev, int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align, unsigned long size, int align,

View File

@ -116,7 +116,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
} }
if (adev->powerplay.pp_funcs->dispatch_tasks) { if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL); amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
} else { } else {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
adev->pm.dpm.user_state = state; adev->pm.dpm.user_state = state;
@ -316,7 +316,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (state != POWER_STATE_TYPE_INTERNAL_BOOT && if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
state != POWER_STATE_TYPE_DEFAULT) { state != POWER_STATE_TYPE_DEFAULT) {
amdgpu_dpm_dispatch_task(adev, amdgpu_dpm_dispatch_task(adev,
AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL); AMD_PP_TASK_ENABLE_USER_STATE, &state);
adev->pp_force_state_enabled = true; adev->pp_force_state_enabled = true;
} }
} }
@ -360,6 +360,90 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
return count; return count;
} }
static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
int ret;
uint32_t parameter_size = 0;
long parameter[64];
char buf_cpy[128];
char *tmp_str;
char *sub_str;
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
if (count > 127)
return -EINVAL;
if (*buf == 's')
type = PP_OD_EDIT_SCLK_VDDC_TABLE;
else if (*buf == 'm')
type = PP_OD_EDIT_MCLK_VDDC_TABLE;
else if(*buf == 'r')
type = PP_OD_RESTORE_DEFAULT_TABLE;
else if (*buf == 'c')
type = PP_OD_COMMIT_DPM_TABLE;
else
return -EINVAL;
memcpy(buf_cpy, buf, count+1);
tmp_str = buf_cpy;
while (isspace(*++tmp_str));
while (tmp_str[0]) {
sub_str = strsep(&tmp_str, delimiter);
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
parameter_size++;
while (isspace(*tmp_str))
tmp_str++;
}
if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
parameter, parameter_size);
if (ret)
return -EINVAL;
if (type == PP_OD_COMMIT_DPM_TABLE) {
if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
return count;
} else {
return -EINVAL;
}
}
return count;
}
static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint32_t size = 0;
if (adev->powerplay.pp_funcs->print_clock_levels) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
return size;
} else {
return snprintf(buf, PAGE_SIZE, "\n");
}
}
static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
@ -530,7 +614,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
if (adev->powerplay.pp_funcs->dispatch_tasks) { if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
} else { } else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev); amdgpu_pm_compute_clocks(adev);
@ -574,7 +658,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
if (adev->powerplay.pp_funcs->dispatch_tasks) { if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL); amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
} else { } else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev); amdgpu_pm_compute_clocks(adev);
@ -584,6 +668,72 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
return count; return count;
} }
static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
if (adev->powerplay.pp_funcs->get_power_profile_mode)
return amdgpu_dpm_get_power_profile_mode(adev, buf);
return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
int ret = 0xff;
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint32_t parameter_size = 0;
long parameter[64];
char *sub_str, buf_cpy[128];
char *tmp_str;
uint32_t i = 0;
char tmp[2];
long int profile_mode = 0;
const char delimiter[3] = {' ', '\n', '\0'};
tmp[0] = *(buf);
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
if (ret)
goto fail;
if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (count < 2 || count > 127)
return -EINVAL;
while (isspace(*++buf))
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
while (tmp_str[0]) {
sub_str = strsep(&tmp_str, delimiter);
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret) {
count = -EINVAL;
goto fail;
}
parameter_size++;
while (isspace(*tmp_str))
tmp_str++;
}
}
parameter[parameter_size] = profile_mode;
if (adev->powerplay.pp_funcs->set_power_profile_mode)
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
if (!ret)
return count;
fail:
return -EINVAL;
}
static ssize_t amdgpu_get_pp_power_profile(struct device *dev, static ssize_t amdgpu_get_pp_power_profile(struct device *dev,
char *buf, struct amd_pp_profile *query) char *buf, struct amd_pp_profile *query)
{ {
@ -772,6 +922,12 @@ static DEVICE_ATTR(pp_gfx_power_profile, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(pp_compute_power_profile, S_IRUGO | S_IWUSR, static DEVICE_ATTR(pp_compute_power_profile, S_IRUGO | S_IWUSR,
amdgpu_get_pp_compute_power_profile, amdgpu_get_pp_compute_power_profile,
amdgpu_set_pp_compute_power_profile); amdgpu_set_pp_compute_power_profile);
static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
amdgpu_get_pp_power_profile_mode,
amdgpu_set_pp_power_profile_mode);
static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
amdgpu_get_pp_od_clk_voltage,
amdgpu_set_pp_od_clk_voltage);
static ssize_t amdgpu_hwmon_show_temp(struct device *dev, static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
@ -779,17 +935,23 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
{ {
struct amdgpu_device *adev = dev_get_drvdata(dev); struct amdgpu_device *adev = dev_get_drvdata(dev);
struct drm_device *ddev = adev->ddev; struct drm_device *ddev = adev->ddev;
int temp; int r, temp, size = sizeof(temp);
/* Can't get temperature when the card is off */ /* Can't get temperature when the card is off */
if ((adev->flags & AMD_IS_PX) && if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL; return -EINVAL;
if (!adev->powerplay.pp_funcs->get_temperature) /* sanity check PP is enabled */
temp = 0; if (!(adev->powerplay.pp_funcs &&
else adev->powerplay.pp_funcs->read_sensor))
temp = amdgpu_dpm_get_temperature(adev); return -EINVAL;
/* get the temperature */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp, &size);
if (r)
return r;
return snprintf(buf, PAGE_SIZE, "%d\n", temp); return snprintf(buf, PAGE_SIZE, "%d\n", temp);
} }
@ -834,6 +996,11 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
int err; int err;
int value; int value;
/* Can't adjust fan when the card is off */
if ((adev->flags & AMD_IS_PX) &&
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
if (!adev->powerplay.pp_funcs->set_fan_control_mode) if (!adev->powerplay.pp_funcs->set_fan_control_mode)
return -EINVAL; return -EINVAL;
@ -868,6 +1035,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
int err; int err;
u32 value; u32 value;
/* Can't adjust fan when the card is off */
if ((adev->flags & AMD_IS_PX) &&
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
err = kstrtou32(buf, 10, &value); err = kstrtou32(buf, 10, &value);
if (err) if (err)
return err; return err;
@ -891,6 +1063,11 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err; int err;
u32 speed = 0; u32 speed = 0;
/* Can't adjust fan when the card is off */
if ((adev->flags & AMD_IS_PX) &&
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
if (adev->powerplay.pp_funcs->get_fan_speed_percent) { if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
if (err) if (err)
@ -910,6 +1087,11 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err; int err;
u32 speed = 0; u32 speed = 0;
/* Can't adjust fan when the card is off */
if ((adev->flags & AMD_IS_PX) &&
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
if (err) if (err)
@ -919,6 +1101,175 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
return sprintf(buf, "%i\n", speed); return sprintf(buf, "%i\n", speed);
} }
static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
struct drm_device *ddev = adev->ddev;
u32 vddgfx;
int r, size = sizeof(vddgfx);
/* Can't get voltage when the card is off */
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
/* sanity check PP is enabled */
if (!(adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->read_sensor))
return -EINVAL;
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
(void *)&vddgfx, &size);
if (r)
return r;
return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
}
static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "vddgfx\n");
}
static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
struct drm_device *ddev = adev->ddev;
u32 vddnb;
int r, size = sizeof(vddnb);
/* only APUs have vddnb */
if (adev->flags & AMD_IS_APU)
return -EINVAL;
/* Can't get voltage when the card is off */
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
/* sanity check PP is enabled */
if (!(adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->read_sensor))
return -EINVAL;
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
(void *)&vddnb, &size);
if (r)
return r;
return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
}
static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "vddnb\n");
}
static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
struct drm_device *ddev = adev->ddev;
struct pp_gpu_power query = {0};
int r, size = sizeof(query);
unsigned uw;
/* Can't get power when the card is off */
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
/* sanity check PP is enabled */
if (!(adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->read_sensor))
return -EINVAL;
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
(void *)&query, &size);
if (r)
return r;
/* convert to microwatts */
uw = (query.average_gpu_power >> 8) * 1000000;
return snprintf(buf, PAGE_SIZE, "%u\n", uw);
}
static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%i\n", 0);
}
static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t limit = 0;
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
return snprintf(buf, PAGE_SIZE, "\n");
}
}
static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t limit = 0;
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
return snprintf(buf, PAGE_SIZE, "\n");
}
}
static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
u32 value;
err = kstrtou32(buf, 10, &value);
if (err)
return err;
value = value / 1000000; /* convert to Watt */
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
if (err)
return err;
} else {
return -EINVAL;
}
return count;
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
@ -927,6 +1278,14 @@ static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_
static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
static struct attribute *hwmon_attributes[] = { static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr,
@ -937,6 +1296,14 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_pwm1_min.dev_attr.attr, &sensor_dev_attr_pwm1_min.dev_attr.attr,
&sensor_dev_attr_pwm1_max.dev_attr.attr, &sensor_dev_attr_pwm1_max.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_label.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_label.dev_attr.attr,
&sensor_dev_attr_power1_average.dev_attr.attr,
&sensor_dev_attr_power1_cap_max.dev_attr.attr,
&sensor_dev_attr_power1_cap_min.dev_attr.attr,
&sensor_dev_attr_power1_cap.dev_attr.attr,
NULL NULL
}; };
@ -947,9 +1314,19 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct amdgpu_device *adev = dev_get_drvdata(dev); struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode; umode_t effective_mode = attr->mode;
/* no skipping for powerplay */ /* handle non-powerplay limitations */
if (adev->powerplay.cgs_device) if (!adev->powerplay.cgs_device) {
return effective_mode; /* Skip fan attributes if fan is not present */
if (adev->pm.no_fan &&
(attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
/* requires powerplay */
if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
return 0;
}
/* Skip limit attributes if DPM is not enabled */ /* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled && if (!adev->pm.dpm_enabled &&
@ -961,14 +1338,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0; return 0;
/* Skip fan attributes if fan is not present */
if (adev->pm.no_fan &&
(attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
/* mask fan attributes if we have no bindings for this asic to expose */ /* mask fan attributes if we have no bindings for this asic to expose */
if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
@ -982,6 +1351,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
effective_mode &= ~S_IWUSR; effective_mode &= ~S_IWUSR;
if ((adev->flags & AMD_IS_APU) &&
(attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
return 0;
/* hide max/min values if we can't both query and manage the fan */ /* hide max/min values if we can't both query and manage the fan */
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
!adev->powerplay.pp_funcs->get_fan_speed_percent) && !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
@ -989,8 +1364,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0; return 0;
/* requires powerplay */ /* only APUs have vddnb */
if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr) if (!(adev->flags & AMD_IS_APU) &&
(attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
attr == &sensor_dev_attr_in1_label.dev_attr.attr))
return 0; return 0;
return effective_mode; return effective_mode;
@ -1013,13 +1390,15 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
pm.dpm.thermal.work); pm.dpm.thermal.work);
/* switch to the thermal state */ /* switch to the thermal state */
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
int temp, size = sizeof(temp);
if (!adev->pm.dpm_enabled) if (!adev->pm.dpm_enabled)
return; return;
if (adev->powerplay.pp_funcs->get_temperature) { if (adev->powerplay.pp_funcs &&
int temp = amdgpu_dpm_get_temperature(adev); adev->powerplay.pp_funcs->read_sensor &&
!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp, &size)) {
if (temp < adev->pm.dpm.thermal.min_temp) if (temp < adev->pm.dpm.thermal.min_temp)
/* switch back the user state */ /* switch back the user state */
dpm_state = adev->pm.dpm.user_state; dpm_state = adev->pm.dpm.user_state;
@ -1319,9 +1698,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled == 0) if (adev->pm.dpm_enabled == 0)
return 0; return 0;
if (adev->powerplay.pp_funcs->get_temperature == NULL)
return 0;
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
DRIVER_NAME, adev, DRIVER_NAME, adev,
hwmon_groups); hwmon_groups);
@ -1405,6 +1781,20 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
return ret; return ret;
} }
ret = device_create_file(adev->dev,
&dev_attr_pp_power_profile_mode);
if (ret) {
DRM_ERROR("failed to create device file "
"pp_power_profile_mode\n");
return ret;
}
ret = device_create_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
if (ret) {
DRM_ERROR("failed to create device file "
"pp_od_clk_voltage\n");
return ret;
}
ret = amdgpu_debugfs_pm_init(adev); ret = amdgpu_debugfs_pm_init(adev);
if (ret) { if (ret) {
DRM_ERROR("Failed to register debugfs file for dpm!\n"); DRM_ERROR("Failed to register debugfs file for dpm!\n");
@ -1440,6 +1830,10 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
&dev_attr_pp_gfx_power_profile); &dev_attr_pp_gfx_power_profile);
device_remove_file(adev->dev, device_remove_file(adev->dev,
&dev_attr_pp_compute_power_profile); &dev_attr_pp_compute_power_profile);
device_remove_file(adev->dev,
&dev_attr_pp_power_profile_mode);
device_remove_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
} }
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@ -1462,7 +1856,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
} }
if (adev->powerplay.pp_funcs->dispatch_tasks) { if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL); amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
} else { } else {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
adev->pm.dpm.new_active_crtcs = 0; adev->pm.dpm.new_active_crtcs = 0;
@ -1512,6 +1906,10 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
seq_printf(m, "\t%u MHz (MCLK)\n", value/100); seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (SCLK)\n", value/100); seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
seq_printf(m, "\t%u mV (VDDGFX)\n", value); seq_printf(m, "\t%u mV (VDDGFX)\n", value);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))

View File

@ -26,9 +26,12 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_display.h"
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
static const struct dma_buf_ops amdgpu_dmabuf_ops;
struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{ {
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
@ -103,7 +106,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
ww_mutex_lock(&resv->lock, NULL); ww_mutex_lock(&resv->lock, NULL);
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo); AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
ww_mutex_unlock(&resv->lock); ww_mutex_unlock(&resv->lock);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
@ -112,49 +115,72 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
return &bo->gem_base; return &bo->gem_base;
} }
int amdgpu_gem_prime_pin(struct drm_gem_object *obj) static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
struct device *target_dev,
struct dma_buf_attachment *attach)
{ {
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
long ret = 0; long r;
ret = amdgpu_bo_reserve(bo, false); r = drm_gem_map_attach(dma_buf, target_dev, attach);
if (unlikely(ret != 0)) if (r)
return ret; return r;
/* r = amdgpu_bo_reserve(bo, false);
* Wait for all shared fences to complete before we switch to future if (unlikely(r != 0))
* use of exclusive fence on this prime shared bo. goto error_detach;
*/
ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
MAX_SCHEDULE_TIMEOUT); if (dma_buf->ops != &amdgpu_dmabuf_ops) {
if (unlikely(ret < 0)) { /*
DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); * Wait for all shared fences to complete before we switch to future
amdgpu_bo_unreserve(bo); * use of exclusive fence on this prime shared bo.
return ret; */
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false,
MAX_SCHEDULE_TIMEOUT);
if (unlikely(r < 0)) {
DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
goto error_unreserve;
}
} }
/* pin buffer into GTT */ /* pin buffer into GTT */
ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
if (likely(ret == 0)) if (r)
goto error_unreserve;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count++; bo->prime_shared_count++;
error_unreserve:
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
return ret;
error_detach:
if (r)
drm_gem_map_detach(dma_buf, attach);
return r;
} }
void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) static void amdgpu_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{ {
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
int ret = 0; int ret = 0;
ret = amdgpu_bo_reserve(bo, true); ret = amdgpu_bo_reserve(bo, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return; goto error;
amdgpu_bo_unpin(bo); amdgpu_bo_unpin(bo);
if (bo->prime_shared_count) if (dma_buf->ops != &amdgpu_dmabuf_ops && bo->prime_shared_count)
bo->prime_shared_count--; bo->prime_shared_count--;
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
error:
drm_gem_map_detach(dma_buf, attach);
} }
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
@ -164,6 +190,50 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
return bo->tbo.resv; return bo->tbo.resv;
} }
static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction direction)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_operation_ctx ctx = { true, false };
u32 domain = amdgpu_display_framebuffer_domains(adev);
int ret;
bool reads = (direction == DMA_BIDIRECTIONAL ||
direction == DMA_FROM_DEVICE);
if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
return 0;
/* move to gtt */
ret = amdgpu_bo_reserve(bo, false);
if (unlikely(ret != 0))
return ret;
if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}
amdgpu_bo_unreserve(bo);
return ret;
}
static const struct dma_buf_ops amdgpu_dmabuf_ops = {
.attach = amdgpu_gem_map_attach,
.detach = amdgpu_gem_map_detach,
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.begin_cpu_access = amdgpu_gem_begin_cpu_access,
.map = drm_gem_dmabuf_kmap,
.map_atomic = drm_gem_dmabuf_kmap_atomic,
.unmap = drm_gem_dmabuf_kunmap,
.unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
};
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj, struct drm_gem_object *gobj,
int flags) int flags)
@ -176,7 +246,30 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
buf = drm_gem_prime_export(dev, gobj, flags); buf = drm_gem_prime_export(dev, gobj, flags);
if (!IS_ERR(buf)) if (!IS_ERR(buf)) {
buf->file->f_mapping = dev->anon_inode->i_mapping; buf->file->f_mapping = dev->anon_inode->i_mapping;
buf->ops = &amdgpu_dmabuf_ops;
}
return buf; return buf;
} }
struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf)
{
struct drm_gem_object *obj;
if (dma_buf->ops == &amdgpu_dmabuf_ops) {
obj = dma_buf->priv;
if (obj->dev == dev) {
/*
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf.
*/
drm_gem_object_get(obj);
return obj;
}
}
return drm_gem_prime_import(dev, dma_buf);
}

View File

@ -51,29 +51,10 @@ static int psp_sw_init(void *handle)
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
psp->init_microcode = psp_v3_1_init_microcode; psp_v3_1_set_psp_funcs(psp);
psp->bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv;
psp->bootloader_load_sos = psp_v3_1_bootloader_load_sos;
psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
psp->ring_init = psp_v3_1_ring_init;
psp->ring_create = psp_v3_1_ring_create;
psp->ring_stop = psp_v3_1_ring_stop;
psp->ring_destroy = psp_v3_1_ring_destroy;
psp->cmd_submit = psp_v3_1_cmd_submit;
psp->compare_sram_data = psp_v3_1_compare_sram_data;
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
psp->mode1_reset = psp_v3_1_mode1_reset;
break; break;
case CHIP_RAVEN: case CHIP_RAVEN:
psp->init_microcode = psp_v10_0_init_microcode; psp_v10_0_set_psp_funcs(psp);
psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
psp->ring_init = psp_v10_0_ring_init;
psp->ring_create = psp_v10_0_ring_create;
psp->ring_stop = psp_v10_0_ring_stop;
psp->ring_destroy = psp_v10_0_ring_destroy;
psp->cmd_submit = psp_v10_0_cmd_submit;
psp->compare_sram_data = psp_v10_0_compare_sram_data;
psp->mode1_reset = psp_v10_0_mode1_reset;
break; break;
default: default:
return -EINVAL; return -EINVAL;
@ -512,19 +493,8 @@ static int psp_resume(void *handle)
return ret; return ret;
} }
static bool psp_check_reset(void* handle) int psp_gpu_reset(struct amdgpu_device *adev)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU)
return true;
return false;
}
static int psp_reset(void* handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return psp_mode1_reset(&adev->psp); return psp_mode1_reset(&adev->psp);
} }
@ -571,9 +541,9 @@ const struct amd_ip_funcs psp_ip_funcs = {
.suspend = psp_suspend, .suspend = psp_suspend,
.resume = psp_resume, .resume = psp_resume,
.is_idle = NULL, .is_idle = NULL,
.check_soft_reset = psp_check_reset, .check_soft_reset = NULL,
.wait_for_idle = NULL, .wait_for_idle = NULL,
.soft_reset = psp_reset, .soft_reset = NULL,
.set_clockgating_state = psp_set_clockgating_state, .set_clockgating_state = psp_set_clockgating_state,
.set_powergating_state = psp_set_powergating_state, .set_powergating_state = psp_set_powergating_state,
}; };

View File

@ -33,6 +33,8 @@
#define PSP_ASD_SHARED_MEM_SIZE 0x4000 #define PSP_ASD_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000 #define PSP_1_MEG 0x100000
struct psp_context;
enum psp_ring_type enum psp_ring_type
{ {
PSP_RING_TYPE__INVALID = 0, PSP_RING_TYPE__INVALID = 0,
@ -53,12 +55,8 @@ struct psp_ring
uint32_t ring_size; uint32_t ring_size;
}; };
struct psp_context struct psp_funcs
{ {
struct amdgpu_device *adev;
struct psp_ring km_ring;
struct psp_gfx_cmd_resp *cmd;
int (*init_microcode)(struct psp_context *psp); int (*init_microcode)(struct psp_context *psp);
int (*bootloader_load_sysdrv)(struct psp_context *psp); int (*bootloader_load_sysdrv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp); int (*bootloader_load_sos)(struct psp_context *psp);
@ -77,6 +75,15 @@ struct psp_context
enum AMDGPU_UCODE_ID ucode_type); enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp); bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp); int (*mode1_reset)(struct psp_context *psp);
};
struct psp_context
{
struct amdgpu_device *adev;
struct psp_ring km_ring;
struct psp_gfx_cmd_resp *cmd;
const struct psp_funcs *funcs;
/* fence buffer */ /* fence buffer */
struct amdgpu_bo *fw_pri_bo; struct amdgpu_bo *fw_pri_bo;
@ -123,25 +130,25 @@ struct amdgpu_psp_funcs {
enum AMDGPU_UCODE_ID); enum AMDGPU_UCODE_ID);
}; };
#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type)) #define psp_prep_cmd_buf(ucode, type) (psp)->funcs->prep_cmd_buf((ucode), (type))
#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type)) #define psp_ring_init(psp, type) (psp)->funcs->ring_init((psp), (type))
#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type)) #define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
#define psp_ring_stop(psp, type) (psp)->ring_stop((psp), (type)) #define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type))) #define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \ #define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
(psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index)) (psp)->funcs->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
#define psp_compare_sram_data(psp, ucode, type) \ #define psp_compare_sram_data(psp, ucode, type) \
(psp)->compare_sram_data((psp), (ucode), (type)) (psp)->funcs->compare_sram_data((psp), (ucode), (type))
#define psp_init_microcode(psp) \ #define psp_init_microcode(psp) \
((psp)->init_microcode ? (psp)->init_microcode((psp)) : 0) ((psp)->funcs->init_microcode ? (psp)->funcs->init_microcode((psp)) : 0)
#define psp_bootloader_load_sysdrv(psp) \ #define psp_bootloader_load_sysdrv(psp) \
((psp)->bootloader_load_sysdrv ? (psp)->bootloader_load_sysdrv((psp)) : 0) ((psp)->funcs->bootloader_load_sysdrv ? (psp)->funcs->bootloader_load_sysdrv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \ #define psp_bootloader_load_sos(psp) \
((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0) ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \ #define psp_smu_reload_quirk(psp) \
((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false) ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
#define psp_mode1_reset(psp) \ #define psp_mode1_reset(psp) \
((psp)->mode1_reset ? (psp)->mode1_reset((psp)) : false) ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
extern const struct amd_ip_funcs psp_ip_funcs; extern const struct amd_ip_funcs psp_ip_funcs;
@ -151,4 +158,6 @@ extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
extern const struct amdgpu_ip_block_version psp_v10_0_ip_block; extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
int psp_gpu_reset(struct amdgpu_device *adev);
#endif #endif

View File

@ -360,6 +360,9 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
amdgpu_debugfs_ring_fini(ring); amdgpu_debugfs_ring_fini(ring);
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->adev->rings[ring->idx] = NULL; ring->adev->rings[ring->idx] = NULL;
} }

View File

@ -128,7 +128,6 @@ struct amdgpu_ring_funcs {
void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid, void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr); uint64_t pd_addr);
void (*emit_hdp_flush)(struct amdgpu_ring *ring); void (*emit_hdp_flush)(struct amdgpu_ring *ring);
void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
uint32_t gds_base, uint32_t gds_size, uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size, uint32_t gws_base, uint32_t gws_size,
@ -151,6 +150,8 @@ struct amdgpu_ring_funcs {
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags); void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg); void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start); void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
/* priority functions */ /* priority functions */
void (*set_priority) (struct amdgpu_ring *ring, void (*set_priority) (struct amdgpu_ring *ring,
@ -195,6 +196,7 @@ struct amdgpu_ring {
u64 cond_exe_gpu_addr; u64 cond_exe_gpu_addr;
volatile u32 *cond_exe_cpu_addr; volatile u32 *cond_exe_cpu_addr;
unsigned vm_inv_eng; unsigned vm_inv_eng;
struct dma_fence *vmid_wait;
bool has_compute_vm_bug; bool has_compute_vm_bug;
atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX]; atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];

View File

@ -64,7 +64,7 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&sa_manager->flist[i]); INIT_LIST_HEAD(&sa_manager->flist[i]);
r = amdgpu_bo_create(adev, size, align, true, domain, r = amdgpu_bo_create(adev, size, align, true, domain,
0, NULL, NULL, 0, &sa_manager->bo); 0, NULL, NULL, &sa_manager->bo);
if (r) { if (r) {
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r); dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
return r; return r;

View File

@ -42,7 +42,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
/* Number of tests = /* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size * (Total GTT - IB pool - writeback page - ring buffers) / test size
*/ */
n = adev->mc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024; n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
if (adev->rings[i]) if (adev->rings[i])
n -= adev->rings[i]->ring_size; n -= adev->rings[i]->ring_size;
@ -61,7 +61,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM, 0, AMDGPU_GEM_DOMAIN_VRAM, 0,
NULL, NULL, 0, &vram_obj); NULL, NULL, &vram_obj);
if (r) { if (r) {
DRM_ERROR("Failed to create VRAM object\n"); DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup; goto out_cleanup;
@ -82,7 +82,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL, AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
NULL, 0, gtt_obj + i); NULL, gtt_obj + i);
if (r) { if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i); DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean; goto out_lclean;
@ -142,10 +142,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
"0x%16llx/0x%16llx)\n", "0x%16llx/0x%16llx)\n",
i, *vram_start, gart_start, i, *vram_start, gart_start,
(unsigned long long) (unsigned long long)
(gart_addr - adev->mc.gart_start + (gart_addr - adev->gmc.gart_start +
(void*)gart_start - gtt_map), (void*)gart_start - gtt_map),
(unsigned long long) (unsigned long long)
(vram_addr - adev->mc.vram_start + (vram_addr - adev->gmc.vram_start +
(void*)gart_start - gtt_map)); (void*)gart_start - gtt_map));
amdgpu_bo_kunmap(vram_obj); amdgpu_bo_kunmap(vram_obj);
goto out_lclean_unpin; goto out_lclean_unpin;
@ -187,10 +187,10 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
"0x%16llx/0x%16llx)\n", "0x%16llx/0x%16llx)\n",
i, *gart_start, vram_start, i, *gart_start, vram_start,
(unsigned long long) (unsigned long long)
(vram_addr - adev->mc.vram_start + (vram_addr - adev->gmc.vram_start +
(void*)vram_start - vram_map), (void*)vram_start - vram_map),
(unsigned long long) (unsigned long long)
(gart_addr - adev->mc.gart_start + (gart_addr - adev->gmc.gart_start +
(void*)vram_start - vram_map)); (void*)vram_start - vram_map));
amdgpu_bo_kunmap(gtt_obj[i]); amdgpu_bo_kunmap(gtt_obj[i]);
goto out_lclean_unpin; goto out_lclean_unpin;
@ -200,7 +200,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(gtt_obj[i]); amdgpu_bo_kunmap(gtt_obj[i]);
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
gart_addr - adev->mc.gart_start); gart_addr - adev->gmc.gart_start);
continue; continue;
out_lclean_unpin: out_lclean_unpin:

View File

@ -86,7 +86,7 @@ TRACE_EVENT(amdgpu_iv,
__field(unsigned, vmid_src) __field(unsigned, vmid_src)
__field(uint64_t, timestamp) __field(uint64_t, timestamp)
__field(unsigned, timestamp_src) __field(unsigned, timestamp_src)
__field(unsigned, pas_id) __field(unsigned, pasid)
__array(unsigned, src_data, 4) __array(unsigned, src_data, 4)
), ),
TP_fast_assign( TP_fast_assign(
@ -97,16 +97,16 @@ TRACE_EVENT(amdgpu_iv,
__entry->vmid_src = iv->vmid_src; __entry->vmid_src = iv->vmid_src;
__entry->timestamp = iv->timestamp; __entry->timestamp = iv->timestamp;
__entry->timestamp_src = iv->timestamp_src; __entry->timestamp_src = iv->timestamp_src;
__entry->pas_id = iv->pas_id; __entry->pasid = iv->pasid;
__entry->src_data[0] = iv->src_data[0]; __entry->src_data[0] = iv->src_data[0];
__entry->src_data[1] = iv->src_data[1]; __entry->src_data[1] = iv->src_data[1];
__entry->src_data[2] = iv->src_data[2]; __entry->src_data[2] = iv->src_data[2];
__entry->src_data[3] = iv->src_data[3]; __entry->src_data[3] = iv->src_data[3];
), ),
TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n", TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x\n",
__entry->client_id, __entry->src_id, __entry->client_id, __entry->src_id,
__entry->ring_id, __entry->vmid, __entry->ring_id, __entry->vmid,
__entry->timestamp, __entry->pas_id, __entry->timestamp, __entry->pasid,
__entry->src_data[0], __entry->src_data[1], __entry->src_data[0], __entry->src_data[1],
__entry->src_data[2], __entry->src_data[3]) __entry->src_data[2], __entry->src_data[3])
); );
@ -217,7 +217,7 @@ TRACE_EVENT(amdgpu_vm_grab_id,
struct amdgpu_job *job), struct amdgpu_job *job),
TP_ARGS(vm, ring, job), TP_ARGS(vm, ring, job),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct amdgpu_vm *, vm) __field(u32, pasid)
__field(u32, ring) __field(u32, ring)
__field(u32, vmid) __field(u32, vmid)
__field(u32, vm_hub) __field(u32, vm_hub)
@ -226,15 +226,15 @@ TRACE_EVENT(amdgpu_vm_grab_id,
), ),
TP_fast_assign( TP_fast_assign(
__entry->vm = vm; __entry->pasid = vm->pasid;
__entry->ring = ring->idx; __entry->ring = ring->idx;
__entry->vmid = job->vmid; __entry->vmid = job->vmid;
__entry->vm_hub = ring->funcs->vmhub, __entry->vm_hub = ring->funcs->vmhub,
__entry->pd_addr = job->vm_pd_addr; __entry->pd_addr = job->vm_pd_addr;
__entry->needs_flush = job->vm_needs_flush; __entry->needs_flush = job->vm_needs_flush;
), ),
TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u", TP_printk("pasid=%d, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
__entry->vm, __entry->ring, __entry->vmid, __entry->pasid, __entry->ring, __entry->vmid,
__entry->vm_hub, __entry->pd_addr, __entry->needs_flush) __entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
); );
@ -378,6 +378,28 @@ TRACE_EVENT(amdgpu_vm_flush,
__entry->vm_hub,__entry->pd_addr) __entry->vm_hub,__entry->pd_addr)
); );
DECLARE_EVENT_CLASS(amdgpu_pasid,
TP_PROTO(unsigned pasid),
TP_ARGS(pasid),
TP_STRUCT__entry(
__field(unsigned, pasid)
),
TP_fast_assign(
__entry->pasid = pasid;
),
TP_printk("pasid=%u", __entry->pasid)
);
DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_allocated,
TP_PROTO(unsigned pasid),
TP_ARGS(pasid)
);
DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_freed,
TP_PROTO(unsigned pasid),
TP_ARGS(pasid)
);
TRACE_EVENT(amdgpu_bo_list_set, TRACE_EVENT(amdgpu_bo_list_set,
TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo), TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo),
TP_ARGS(list, bo), TP_ARGS(list, bo),

View File

@ -161,7 +161,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break; break;
case TTM_PL_TT: case TTM_PL_TT:
man->func = &amdgpu_gtt_mgr_func; man->func = &amdgpu_gtt_mgr_func;
man->gpu_offset = adev->mc.gart_start; man->gpu_offset = adev->gmc.gart_start;
man->available_caching = TTM_PL_MASK_CACHING; man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED; man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@ -169,7 +169,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
case TTM_PL_VRAM: case TTM_PL_VRAM:
/* "On-card" video ram */ /* "On-card" video ram */
man->func = &amdgpu_vram_mgr_func; man->func = &amdgpu_vram_mgr_func;
man->gpu_offset = adev->mc.vram_start; man->gpu_offset = adev->gmc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED | man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE; TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
@ -217,9 +217,9 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
adev->mman.buffer_funcs_ring && adev->mman.buffer_funcs_ring &&
adev->mman.buffer_funcs_ring->ready == false) { adev->mman.buffer_funcs_ring->ready == false) {
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else if (adev->mc.visible_vram_size < adev->mc.real_vram_size && } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
unsigned fpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
struct drm_mm_node *node = bo->mem.mm_node; struct drm_mm_node *node = bo->mem.mm_node;
unsigned long pages_left; unsigned long pages_left;
@ -638,9 +638,9 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
case TTM_PL_VRAM: case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */ /* check if it's visible */
if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) if ((mem->bus.offset + mem->bus.size) > adev->gmc.visible_vram_size)
return -EINVAL; return -EINVAL;
mem->bus.base = adev->mc.aper_base; mem->bus.base = adev->gmc.aper_base;
mem->bus.is_iomem = true; mem->bus.is_iomem = true;
break; break;
default: default:
@ -891,7 +891,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
placement.num_busy_placement = 1; placement.num_busy_placement = 1;
placement.busy_placement = &placements; placement.busy_placement = &placements;
placements.fpfn = 0; placements.fpfn = 0;
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
TTM_PL_FLAG_TT; TTM_PL_FLAG_TT;
@ -997,9 +997,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
return 0;
if (gtt && gtt->userptr) { if (gtt && gtt->userptr) {
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg) if (!ttm->sg)
@ -1212,7 +1209,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset); nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
pos = (nodes->start << PAGE_SHIFT) + offset; pos = (nodes->start << PAGE_SHIFT) + offset;
while (len && pos < adev->mc.mc_vram_size) { while (len && pos < adev->gmc.mc_vram_size) {
uint64_t aligned_pos = pos & ~(uint64_t)3; uint64_t aligned_pos = pos & ~(uint64_t)3;
uint32_t bytes = 4 - (pos & 3); uint32_t bytes = 4 - (pos & 3);
uint32_t shift = (pos & 3) * 8; uint32_t shift = (pos & 3) * 8;
@ -1298,7 +1295,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
struct ttm_operation_ctx ctx = { false, false }; struct ttm_operation_ctx ctx = { false, false };
int r = 0; int r = 0;
int i; int i;
u64 vram_size = adev->mc.visible_vram_size; u64 vram_size = adev->gmc.visible_vram_size;
u64 offset = adev->fw_vram_usage.start_offset; u64 offset = adev->fw_vram_usage.start_offset;
u64 size = adev->fw_vram_usage.size; u64 size = adev->fw_vram_usage.size;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
@ -1312,7 +1309,7 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, r = amdgpu_bo_create(adev, adev->fw_vram_usage.size,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, 0, AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL,
&adev->fw_vram_usage.reserved_bo); &adev->fw_vram_usage.reserved_bo);
if (r) if (r)
goto error_create; goto error_create;
@ -1387,8 +1384,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r; return r;
} }
adev->mman.initialized = true; adev->mman.initialized = true;
/* We opt to avoid OOM on system pages allocations */
adev->mman.bdev.no_retry = true;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
adev->mc.real_vram_size >> PAGE_SHIFT); adev->gmc.real_vram_size >> PAGE_SHIFT);
if (r) { if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n"); DRM_ERROR("Failed initializing VRAM heap.\n");
return r; return r;
@ -1397,11 +1398,11 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Reduce size of CPU-visible VRAM if requested */ /* Reduce size of CPU-visible VRAM if requested */
vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024; vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
if (amdgpu_vis_vram_limit > 0 && if (amdgpu_vis_vram_limit > 0 &&
vis_vram_limit <= adev->mc.visible_vram_size) vis_vram_limit <= adev->gmc.visible_vram_size)
adev->mc.visible_vram_size = vis_vram_limit; adev->gmc.visible_vram_size = vis_vram_limit;
/* Change the size here instead of the init above so only lpfn is affected */ /* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
/* /*
*The reserved vram for firmware must be pinned to the specified *The reserved vram for firmware must be pinned to the specified
@ -1412,21 +1413,21 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r; return r;
} }
r = amdgpu_bo_create_kernel(adev, adev->mc.stolen_size, PAGE_SIZE, r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
&adev->stolen_vga_memory, &adev->stolen_vga_memory,
NULL, NULL); NULL, NULL);
if (r) if (r)
return r; return r;
DRM_INFO("amdgpu: %uM of VRAM memory ready\n", DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->mc.real_vram_size / (1024 * 1024))); (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
if (amdgpu_gtt_size == -1) { if (amdgpu_gtt_size == -1) {
struct sysinfo si; struct sysinfo si;
si_meminfo(&si); si_meminfo(&si);
gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
adev->mc.mc_vram_size), adev->gmc.mc_vram_size),
((uint64_t)si.totalram * si.mem_unit * 3/4)); ((uint64_t)si.totalram * si.mem_unit * 3/4));
} }
else else
@ -1559,7 +1560,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
*addr = adev->mc.gart_start; *addr = adev->gmc.gart_start;
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE; AMDGPU_GPU_PAGE_SIZE;
@ -1677,13 +1678,12 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
} }
int amdgpu_fill_buffer(struct amdgpu_bo *bo, int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint64_t src_data, uint32_t src_data,
struct reservation_object *resv, struct reservation_object *resv,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
uint32_t max_bytes = 8 * uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
adev->vm_manager.vm_pte_funcs->set_max_nums_pte_pde;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *mm_node; struct drm_mm_node *mm_node;
@ -1714,9 +1714,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
num_pages -= mm_node->size; num_pages -= mm_node->size;
++mm_node; ++mm_node;
} }
num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
/* num of dwords for each SDMA_OP_PTEPDE cmd */
num_dw = num_loops * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
/* for IB padding */ /* for IB padding */
num_dw += 64; num_dw += 64;
@ -1741,16 +1739,12 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t byte_count = mm_node->size << PAGE_SHIFT; uint32_t byte_count = mm_node->size << PAGE_SHIFT;
uint64_t dst_addr; uint64_t dst_addr;
WARN_ONCE(byte_count & 0x7, "size should be a multiple of 8");
dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem); dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
while (byte_count) { while (byte_count) {
uint32_t cur_size_in_bytes = min(byte_count, max_bytes); uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
amdgpu_vm_set_pte_pde(adev, &job->ibs[0], amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
dst_addr, 0, dst_addr, cur_size_in_bytes);
cur_size_in_bytes >> 3, 0,
src_data);
dst_addr += cur_size_in_bytes; dst_addr += cur_size_in_bytes;
byte_count -= cur_size_in_bytes; byte_count -= cur_size_in_bytes;
@ -1811,14 +1805,14 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
if (*pos >= adev->mc.mc_vram_size) if (*pos >= adev->gmc.mc_vram_size)
return -ENXIO; return -ENXIO;
while (size) { while (size) {
unsigned long flags; unsigned long flags;
uint32_t value; uint32_t value;
if (*pos >= adev->mc.mc_vram_size) if (*pos >= adev->gmc.mc_vram_size)
return result; return result;
spin_lock_irqsave(&adev->mmio_idx_lock, flags); spin_lock_irqsave(&adev->mmio_idx_lock, flags);
@ -1850,14 +1844,14 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
if (size & 0x3 || *pos & 0x3) if (size & 0x3 || *pos & 0x3)
return -EINVAL; return -EINVAL;
if (*pos >= adev->mc.mc_vram_size) if (*pos >= adev->gmc.mc_vram_size)
return -ENXIO; return -ENXIO;
while (size) { while (size) {
unsigned long flags; unsigned long flags;
uint32_t value; uint32_t value;
if (*pos >= adev->mc.mc_vram_size) if (*pos >= adev->gmc.mc_vram_size)
return result; return result;
r = get_user(value, (uint32_t *)buf); r = get_user(value, (uint32_t *)buf);
@ -2001,9 +1995,9 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
if (IS_ERR(ent)) if (IS_ERR(ent))
return PTR_ERR(ent); return PTR_ERR(ent);
if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM) if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
i_size_write(ent->d_inode, adev->mc.mc_vram_size); i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
else if (ttm_debugfs_entries[count].domain == TTM_PL_TT) else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
i_size_write(ent->d_inode, adev->mc.gart_size); i_size_write(ent->d_inode, adev->gmc.gart_size);
adev->mman.debugfs_entries[count] = ent; adev->mman.debugfs_entries[count] = ent;
} }

View File

@ -86,7 +86,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
struct reservation_object *resv, struct reservation_object *resv,
struct dma_fence **f); struct dma_fence **f);
int amdgpu_fill_buffer(struct amdgpu_bo *bo, int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint64_t src_data, uint32_t src_data,
struct reservation_object *resv, struct reservation_object *resv,
struct dma_fence **fence); struct dma_fence **fence);

View File

@ -952,37 +952,28 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
bool direct, struct dma_fence **fence) bool direct, struct dma_fence **fence)
{ {
struct ttm_operation_ctx ctx = { true, false }; struct amdgpu_device *adev = ring->adev;
struct ttm_validate_buffer tv; struct dma_fence *f = NULL;
struct ww_acquire_ctx ticket;
struct list_head head;
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
struct amdgpu_device *adev = ring->adev;
uint64_t addr;
uint32_t data[4]; uint32_t data[4];
int i, r; uint64_t addr;
long r;
int i;
memset(&tv, 0, sizeof(tv)); amdgpu_bo_kunmap(bo);
tv.bo = &bo->tbo; amdgpu_bo_unpin(bo);
INIT_LIST_HEAD(&head);
list_add(&tv.head, &head);
r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
if (r)
return r;
if (!ring->adev->uvd.address_64_bit) { if (!ring->adev->uvd.address_64_bit) {
struct ttm_operation_ctx ctx = { true, false };
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo); amdgpu_uvd_force_into_uvd_segment(bo);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto err;
} }
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto err;
r = amdgpu_job_alloc_with_ib(adev, 64, &job); r = amdgpu_job_alloc_with_ib(adev, 64, &job);
if (r) if (r)
goto err; goto err;
@ -1014,6 +1005,14 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16; ib->length_dw = 16;
if (direct) { if (direct) {
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false,
msecs_to_jiffies(10));
if (r == 0)
r = -ETIMEDOUT;
if (r < 0)
goto err_free;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
job->fence = dma_fence_get(f); job->fence = dma_fence_get(f);
if (r) if (r)
@ -1021,17 +1020,23 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
amdgpu_job_free(job); amdgpu_job_free(job);
} else { } else {
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r)
goto err_free;
r = amdgpu_job_submit(job, ring, &adev->uvd.entity, r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &f); AMDGPU_FENCE_OWNER_UNDEFINED, &f);
if (r) if (r)
goto err_free; goto err_free;
} }
ttm_eu_fence_buffer_objects(&ticket, &head, f); amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
if (fence) if (fence)
*fence = dma_fence_get(f); *fence = dma_fence_get(f);
amdgpu_bo_unref(&bo);
dma_fence_put(f); dma_fence_put(f);
return 0; return 0;
@ -1040,7 +1045,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
amdgpu_job_free(job); amdgpu_job_free(job);
err: err:
ttm_eu_backoff_reservation(&ticket, &head); amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r; return r;
} }
@ -1051,31 +1057,16 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo; struct amdgpu_bo *bo = NULL;
uint32_t *msg; uint32_t *msg;
int r, i; int r, i;
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | &bo, NULL, (void **)&msg);
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, 0, &bo);
if (r) if (r)
return r; return r;
r = amdgpu_bo_reserve(bo, false);
if (r) {
amdgpu_bo_unref(&bo);
return r;
}
r = amdgpu_bo_kmap(bo, (void **)&msg);
if (r) {
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
}
/* stitch together an UVD create msg */ /* stitch together an UVD create msg */
msg[0] = cpu_to_le32(0x00000de4); msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000000); msg[1] = cpu_to_le32(0x00000000);
@ -1091,9 +1082,6 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = 11; i < 1024; ++i) for (i = 11; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0); msg[i] = cpu_to_le32(0x0);
amdgpu_bo_kunmap(bo);
amdgpu_bo_unreserve(bo);
return amdgpu_uvd_send_msg(ring, bo, true, fence); return amdgpu_uvd_send_msg(ring, bo, true, fence);
} }
@ -1101,31 +1089,16 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence) bool direct, struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo; struct amdgpu_bo *bo = NULL;
uint32_t *msg; uint32_t *msg;
int r, i; int r, i;
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | &bo, NULL, (void **)&msg);
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, 0, &bo);
if (r) if (r)
return r; return r;
r = amdgpu_bo_reserve(bo, false);
if (r) {
amdgpu_bo_unref(&bo);
return r;
}
r = amdgpu_bo_kmap(bo, (void **)&msg);
if (r) {
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
}
/* stitch together an UVD destroy msg */ /* stitch together an UVD destroy msg */
msg[0] = cpu_to_le32(0x00000de4); msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000002); msg[1] = cpu_to_le32(0x00000002);
@ -1134,9 +1107,6 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = 4; i < 1024; ++i) for (i = 4; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0); msg[i] = cpu_to_le32(0x0);
amdgpu_bo_kunmap(bo);
amdgpu_bo_unreserve(bo);
return amdgpu_uvd_send_msg(ring, bo, direct, fence); return amdgpu_uvd_send_msg(ring, bo, direct, fence);
} }

View File

@ -30,6 +30,8 @@
#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
#define AMDGPU_VCE_FW_53_45 ((53 << 24) | (45 << 16))
struct amdgpu_vce { struct amdgpu_vce {
struct amdgpu_bo *vcpu_bo; struct amdgpu_bo *vcpu_bo;
uint64_t gpu_addr; uint64_t gpu_addr;

View File

@ -270,34 +270,17 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
return r; return r;
} }
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
bool direct, struct dma_fence **fence) struct amdgpu_bo *bo, bool direct,
struct dma_fence **fence)
{ {
struct ttm_operation_ctx ctx = { true, false }; struct amdgpu_device *adev = ring->adev;
struct ttm_validate_buffer tv; struct dma_fence *f = NULL;
struct ww_acquire_ctx ticket;
struct list_head head;
struct amdgpu_job *job; struct amdgpu_job *job;
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
struct amdgpu_device *adev = ring->adev;
uint64_t addr; uint64_t addr;
int i, r; int i, r;
memset(&tv, 0, sizeof(tv));
tv.bo = &bo->tbo;
INIT_LIST_HEAD(&head);
list_add(&tv.head, &head);
r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
if (r)
return r;
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto err;
r = amdgpu_job_alloc_with_ib(adev, 64, &job); r = amdgpu_job_alloc_with_ib(adev, 64, &job);
if (r) if (r)
goto err; goto err;
@ -330,11 +313,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
goto err_free; goto err_free;
} }
ttm_eu_fence_buffer_objects(&ticket, &head, f); amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
if (fence) if (fence)
*fence = dma_fence_get(f); *fence = dma_fence_get(f);
amdgpu_bo_unref(&bo);
dma_fence_put(f); dma_fence_put(f);
return 0; return 0;
@ -343,7 +327,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
amdgpu_job_free(job); amdgpu_job_free(job);
err: err:
ttm_eu_backoff_reservation(&ticket, &head); amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r; return r;
} }
@ -351,31 +336,16 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo; struct amdgpu_bo *bo = NULL;
uint32_t *msg; uint32_t *msg;
int r, i; int r, i;
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | &bo, NULL, (void **)&msg);
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, 0, &bo);
if (r) if (r)
return r; return r;
r = amdgpu_bo_reserve(bo, false);
if (r) {
amdgpu_bo_unref(&bo);
return r;
}
r = amdgpu_bo_kmap(bo, (void **)&msg);
if (r) {
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
}
msg[0] = cpu_to_le32(0x00000028); msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000038); msg[1] = cpu_to_le32(0x00000038);
msg[2] = cpu_to_le32(0x00000001); msg[2] = cpu_to_le32(0x00000001);
@ -393,9 +363,6 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
for (i = 14; i < 1024; ++i) for (i = 14; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0); msg[i] = cpu_to_le32(0x0);
amdgpu_bo_kunmap(bo);
amdgpu_bo_unreserve(bo);
return amdgpu_vcn_dec_send_msg(ring, bo, true, fence); return amdgpu_vcn_dec_send_msg(ring, bo, true, fence);
} }
@ -403,31 +370,16 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
bool direct, struct dma_fence **fence) bool direct, struct dma_fence **fence)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
struct amdgpu_bo *bo; struct amdgpu_bo *bo = NULL;
uint32_t *msg; uint32_t *msg;
int r, i; int r, i;
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | &bo, NULL, (void **)&msg);
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
NULL, NULL, 0, &bo);
if (r) if (r)
return r; return r;
r = amdgpu_bo_reserve(bo, false);
if (r) {
amdgpu_bo_unref(&bo);
return r;
}
r = amdgpu_bo_kmap(bo, (void **)&msg);
if (r) {
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
}
msg[0] = cpu_to_le32(0x00000028); msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000018); msg[1] = cpu_to_le32(0x00000018);
msg[2] = cpu_to_le32(0x00000000); msg[2] = cpu_to_le32(0x00000000);
@ -437,9 +389,6 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
for (i = 6; i < 1024; ++i) for (i = 6; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0); msg[i] = cpu_to_le32(0x0);
amdgpu_bo_kunmap(bo);
amdgpu_bo_unreserve(bo);
return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence); return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence);
} }

View File

@ -24,6 +24,18 @@
#include "amdgpu.h" #include "amdgpu.h"
#define MAX_KIQ_REG_WAIT 100000000 /* in usecs */ #define MAX_KIQ_REG_WAIT 100000000 /* in usecs */
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
{
uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
addr -= AMDGPU_VA_RESERVED_SIZE;
if (addr >= AMDGPU_VA_HOLE_START)
addr |= AMDGPU_VA_HOLE_END;
return addr;
}
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
{ {
/* By now all MMIO pages except mailbox are blocked */ /* By now all MMIO pages except mailbox are blocked */
@ -55,14 +67,14 @@ void amdgpu_free_static_csa(struct amdgpu_device *adev) {
/* /*
* amdgpu_map_static_csa should be called during amdgpu_vm_init * amdgpu_map_static_csa should be called during amdgpu_vm_init
* it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE" * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
* to this VM, and each command submission of GFX should use this virtual * submission of GFX should use this virtual address within META_DATA init
* address within META_DATA init package to support SRIOV gfx preemption. * package to support SRIOV gfx preemption.
*/ */
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va) struct amdgpu_bo_va **bo_va)
{ {
uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct list_head list; struct list_head list;
struct amdgpu_bo_list_entry pd; struct amdgpu_bo_list_entry pd;
@ -90,7 +102,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return -ENOMEM; return -ENOMEM;
} }
r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, AMDGPU_CSA_VADDR, r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
AMDGPU_CSA_SIZE); AMDGPU_CSA_SIZE);
if (r) { if (r) {
DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r); DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
@ -99,7 +111,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r; return r;
} }
r = amdgpu_vm_bo_map(adev, *bo_va, AMDGPU_CSA_VADDR, 0, AMDGPU_CSA_SIZE, r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, AMDGPU_CSA_SIZE,
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
AMDGPU_PTE_EXECUTABLE); AMDGPU_PTE_EXECUTABLE);

View File

@ -251,8 +251,7 @@ struct amdgpu_virt {
uint32_t gim_feature; uint32_t gim_feature;
}; };
#define AMDGPU_CSA_SIZE (8 * 1024) #define AMDGPU_CSA_SIZE (8 * 1024)
#define AMDGPU_CSA_VADDR (AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE)
#define amdgpu_sriov_enabled(adev) \ #define amdgpu_sriov_enabled(adev) \
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
@ -279,6 +278,8 @@ static inline bool is_virtual_machine(void)
} }
struct amdgpu_vm; struct amdgpu_vm;
uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
int amdgpu_allocate_static_csa(struct amdgpu_device *adev); int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,

View File

@ -75,7 +75,8 @@ struct amdgpu_pte_update_params {
/* indirect buffer to fill with commands */ /* indirect buffer to fill with commands */
struct amdgpu_ib *ib; struct amdgpu_ib *ib;
/* Function which actually does the update */ /* Function which actually does the update */
void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe, void (*func)(struct amdgpu_pte_update_params *params,
struct amdgpu_bo *bo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr, uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags); uint64_t flags);
/* The next two are used during VM update by CPU /* The next two are used during VM update by CPU
@ -256,6 +257,104 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
return ready; return ready;
} }
/**
* amdgpu_vm_clear_bo - initially clear the PDs/PTs
*
* @adev: amdgpu_device pointer
* @bo: BO to clear
* @level: level this BO is at
*
* Root PD needs to be reserved when calling this.
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_bo *bo,
unsigned level, bool pte_support_ats)
{
struct ttm_operation_ctx ctx = { true, false };
struct dma_fence *fence = NULL;
unsigned entries, ats_entries;
struct amdgpu_ring *ring;
struct amdgpu_job *job;
uint64_t addr;
int r;
addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
if (pte_support_ats) {
if (level == adev->vm_manager.root_level) {
ats_entries = amdgpu_vm_level_shift(adev, level);
ats_entries += AMDGPU_GPU_PAGE_SHIFT;
ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
ats_entries = min(ats_entries, entries);
entries -= ats_entries;
} else {
ats_entries = entries;
entries = 0;
}
} else {
ats_entries = 0;
}
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
return r;
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto error;
r = amdgpu_job_alloc_with_ib(adev, 64, &job);
if (r)
goto error;
if (ats_entries) {
uint64_t ats_value;
ats_value = AMDGPU_PTE_DEFAULT_ATC;
if (level != AMDGPU_VM_PTB)
ats_value |= AMDGPU_PDE_PTE;
amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
ats_entries, 0, ats_value);
addr += ats_entries * 8;
}
if (entries)
amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
entries, 0, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > 64);
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r)
goto error_free;
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
if (r)
goto error_free;
amdgpu_bo_fence(bo, fence, true);
dma_fence_put(fence);
if (bo->shadow)
return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
level, pte_support_ats);
return 0;
error_free:
amdgpu_job_free(job);
error:
return r;
}
/** /**
* amdgpu_vm_alloc_levels - allocate the PD/PT levels * amdgpu_vm_alloc_levels - allocate the PD/PT levels
* *
@ -270,13 +369,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
struct amdgpu_vm_pt *parent, struct amdgpu_vm_pt *parent,
uint64_t saddr, uint64_t eaddr, uint64_t saddr, uint64_t eaddr,
unsigned level) unsigned level, bool ats)
{ {
unsigned shift = amdgpu_vm_level_shift(adev, level); unsigned shift = amdgpu_vm_level_shift(adev, level);
unsigned pt_idx, from, to; unsigned pt_idx, from, to;
int r;
u64 flags; u64 flags;
uint64_t init_value = 0; int r;
if (!parent->entries) { if (!parent->entries) {
unsigned num_entries = amdgpu_vm_num_entries(adev, level); unsigned num_entries = amdgpu_vm_num_entries(adev, level);
@ -299,21 +397,13 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
saddr = saddr & ((1 << shift) - 1); saddr = saddr & ((1 << shift) - 1);
eaddr = eaddr & ((1 << shift) - 1); eaddr = eaddr & ((1 << shift) - 1);
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
AMDGPU_GEM_CREATE_VRAM_CLEARED;
if (vm->use_cpu_for_update) if (vm->use_cpu_for_update)
flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
else else
flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS | flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_SHADOW); AMDGPU_GEM_CREATE_SHADOW);
if (vm->pte_support_ats) {
init_value = AMDGPU_PTE_DEFAULT_ATC;
if (level != AMDGPU_VM_PTB)
init_value |= AMDGPU_PDE_PTE;
}
/* walk over the address space and allocate the page tables */ /* walk over the address space and allocate the page tables */
for (pt_idx = from; pt_idx <= to; ++pt_idx) { for (pt_idx = from; pt_idx <= to; ++pt_idx) {
struct reservation_object *resv = vm->root.base.bo->tbo.resv; struct reservation_object *resv = vm->root.base.bo->tbo.resv;
@ -324,15 +414,22 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
r = amdgpu_bo_create(adev, r = amdgpu_bo_create(adev,
amdgpu_vm_bo_size(adev, level), amdgpu_vm_bo_size(adev, level),
AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_GEM_DOMAIN_VRAM, flags,
flags, NULL, resv, &pt);
NULL, resv, init_value, &pt);
if (r) if (r)
return r; return r;
r = amdgpu_vm_clear_bo(adev, vm, pt, level, ats);
if (r) {
amdgpu_bo_unref(&pt->shadow);
amdgpu_bo_unref(&pt);
return r;
}
if (vm->use_cpu_for_update) { if (vm->use_cpu_for_update) {
r = amdgpu_bo_kmap(pt, NULL); r = amdgpu_bo_kmap(pt, NULL);
if (r) { if (r) {
amdgpu_bo_unref(&pt->shadow);
amdgpu_bo_unref(&pt); amdgpu_bo_unref(&pt);
return r; return r;
} }
@ -356,7 +453,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
uint64_t sub_eaddr = (pt_idx == to) ? eaddr : uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
((1 << shift) - 1); ((1 << shift) - 1);
r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr, r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
sub_eaddr, level); sub_eaddr, level, ats);
if (r) if (r)
return r; return r;
} }
@ -379,26 +476,29 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm, struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size) uint64_t saddr, uint64_t size)
{ {
uint64_t last_pfn;
uint64_t eaddr; uint64_t eaddr;
bool ats = false;
/* validate the parameters */ /* validate the parameters */
if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK) if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL; return -EINVAL;
eaddr = saddr + size - 1; eaddr = saddr + size - 1;
last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
if (last_pfn >= adev->vm_manager.max_pfn) { if (vm->pte_support_ats)
dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n", ats = saddr < AMDGPU_VA_HOLE_START;
last_pfn, adev->vm_manager.max_pfn);
return -EINVAL;
}
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE;
if (eaddr >= adev->vm_manager.max_pfn) {
dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
eaddr, adev->vm_manager.max_pfn);
return -EINVAL;
}
return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
adev->vm_manager.root_level); adev->vm_manager.root_level, ats);
} }
/** /**
@ -465,7 +565,7 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev) static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
{ {
return (adev->mc.real_vram_size == adev->mc.visible_vram_size); return (adev->gmc.real_vram_size == adev->gmc.visible_vram_size);
} }
/** /**
@ -491,14 +591,24 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
id->oa_base != job->oa_base || id->oa_base != job->oa_base ||
id->oa_size != job->oa_size); id->oa_size != job->oa_size);
bool vm_flush_needed = job->vm_needs_flush; bool vm_flush_needed = job->vm_needs_flush;
bool pasid_mapping_needed = id->pasid != job->pasid ||
!id->pasid_mapping ||
!dma_fence_is_signaled(id->pasid_mapping);
struct dma_fence *fence = NULL;
unsigned patch_offset = 0; unsigned patch_offset = 0;
int r; int r;
if (amdgpu_vmid_had_gpu_reset(adev, id)) { if (amdgpu_vmid_had_gpu_reset(adev, id)) {
gds_switch_needed = true; gds_switch_needed = true;
vm_flush_needed = true; vm_flush_needed = true;
pasid_mapping_needed = true;
} }
gds_switch_needed &= !!ring->funcs->emit_gds_switch;
vm_flush_needed &= !!ring->funcs->emit_vm_flush;
pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
ring->funcs->emit_wreg;
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync) if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
return 0; return 0;
@ -508,23 +618,36 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
if (need_pipe_sync) if (need_pipe_sync)
amdgpu_ring_emit_pipeline_sync(ring); amdgpu_ring_emit_pipeline_sync(ring);
if (ring->funcs->emit_vm_flush && vm_flush_needed) { if (vm_flush_needed) {
struct dma_fence *fence;
trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr); trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr); amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
}
if (pasid_mapping_needed)
amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
if (vm_flush_needed || pasid_mapping_needed) {
r = amdgpu_fence_emit(ring, &fence); r = amdgpu_fence_emit(ring, &fence);
if (r) if (r)
return r; return r;
}
if (vm_flush_needed) {
mutex_lock(&id_mgr->lock); mutex_lock(&id_mgr->lock);
dma_fence_put(id->last_flush); dma_fence_put(id->last_flush);
id->last_flush = fence; id->last_flush = dma_fence_get(fence);
id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); id->current_gpu_reset_count =
atomic_read(&adev->gpu_reset_counter);
mutex_unlock(&id_mgr->lock); mutex_unlock(&id_mgr->lock);
} }
if (pasid_mapping_needed) {
id->pasid = job->pasid;
dma_fence_put(id->pasid_mapping);
id->pasid_mapping = dma_fence_get(fence);
}
dma_fence_put(fence);
if (ring->funcs->emit_gds_switch && gds_switch_needed) { if (ring->funcs->emit_gds_switch && gds_switch_needed) {
id->gds_base = job->gds_base; id->gds_base = job->gds_base;
id->gds_size = job->gds_size; id->gds_size = job->gds_size;
@ -578,6 +701,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* amdgpu_vm_do_set_ptes - helper to call the right asic function * amdgpu_vm_do_set_ptes - helper to call the right asic function
* *
* @params: see amdgpu_pte_update_params definition * @params: see amdgpu_pte_update_params definition
* @bo: PD/PT to update
* @pe: addr of the page entry * @pe: addr of the page entry
* @addr: dst addr to write into pe * @addr: dst addr to write into pe
* @count: number of page entries to update * @count: number of page entries to update
@ -588,10 +712,12 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* to setup the page table using the DMA. * to setup the page table using the DMA.
*/ */
static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params, static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
struct amdgpu_bo *bo,
uint64_t pe, uint64_t addr, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, unsigned count, uint32_t incr,
uint64_t flags) uint64_t flags)
{ {
pe += amdgpu_bo_gpu_offset(bo);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
if (count < 3) { if (count < 3) {
@ -608,6 +734,7 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
* amdgpu_vm_do_copy_ptes - copy the PTEs from the GART * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
* *
* @params: see amdgpu_pte_update_params definition * @params: see amdgpu_pte_update_params definition
* @bo: PD/PT to update
* @pe: addr of the page entry * @pe: addr of the page entry
* @addr: dst addr to write into pe * @addr: dst addr to write into pe
* @count: number of page entries to update * @count: number of page entries to update
@ -617,13 +744,14 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
* Traces the parameters and calls the DMA function to copy the PTEs. * Traces the parameters and calls the DMA function to copy the PTEs.
*/ */
static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params, static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
struct amdgpu_bo *bo,
uint64_t pe, uint64_t addr, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, unsigned count, uint32_t incr,
uint64_t flags) uint64_t flags)
{ {
uint64_t src = (params->src + (addr >> 12) * 8); uint64_t src = (params->src + (addr >> 12) * 8);
pe += amdgpu_bo_gpu_offset(bo);
trace_amdgpu_vm_copy_ptes(pe, src, count); trace_amdgpu_vm_copy_ptes(pe, src, count);
amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count); amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
@ -657,6 +785,7 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
* amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
* *
* @params: see amdgpu_pte_update_params definition * @params: see amdgpu_pte_update_params definition
* @bo: PD/PT to update
* @pe: kmap addr of the page entry * @pe: kmap addr of the page entry
* @addr: dst addr to write into pe * @addr: dst addr to write into pe
* @count: number of page entries to update * @count: number of page entries to update
@ -666,6 +795,7 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
* Write count number of PT/PD entries directly. * Write count number of PT/PD entries directly.
*/ */
static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params, static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
struct amdgpu_bo *bo,
uint64_t pe, uint64_t addr, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, unsigned count, uint32_t incr,
uint64_t flags) uint64_t flags)
@ -673,14 +803,16 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
unsigned int i; unsigned int i;
uint64_t value; uint64_t value;
pe += (unsigned long)amdgpu_bo_kptr(bo);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
value = params->pages_addr ? value = params->pages_addr ?
amdgpu_vm_map_gart(params->pages_addr, addr) : amdgpu_vm_map_gart(params->pages_addr, addr) :
addr; addr;
amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe, amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
i, value, flags); i, value, flags);
addr += incr; addr += incr;
} }
} }
@ -714,8 +846,7 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
struct amdgpu_vm_pt *parent, struct amdgpu_vm_pt *parent,
struct amdgpu_vm_pt *entry) struct amdgpu_vm_pt *entry)
{ {
struct amdgpu_bo *bo = entry->base.bo, *shadow = NULL, *pbo; struct amdgpu_bo *bo = parent->base.bo, *pbo;
uint64_t pd_addr, shadow_addr = 0;
uint64_t pde, pt, flags; uint64_t pde, pt, flags;
unsigned level; unsigned level;
@ -723,29 +854,17 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
if (entry->huge) if (entry->huge)
return; return;
if (vm->use_cpu_for_update) { for (level = 0, pbo = bo->parent; pbo; ++level)
pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
} else {
pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
shadow = parent->base.bo->shadow;
if (shadow)
shadow_addr = amdgpu_bo_gpu_offset(shadow);
}
for (level = 0, pbo = parent->base.bo->parent; pbo; ++level)
pbo = pbo->parent; pbo = pbo->parent;
level += params->adev->vm_manager.root_level; level += params->adev->vm_manager.root_level;
pt = amdgpu_bo_gpu_offset(bo); pt = amdgpu_bo_gpu_offset(entry->base.bo);
flags = AMDGPU_PTE_VALID; flags = AMDGPU_PTE_VALID;
amdgpu_gart_get_vm_pde(params->adev, level, &pt, &flags); amdgpu_gmc_get_vm_pde(params->adev, level, &pt, &flags);
if (shadow) { pde = (entry - parent->entries) * 8;
pde = shadow_addr + (entry - parent->entries) * 8; if (bo->shadow)
params->func(params, pde, pt, 1, 0, flags); params->func(params, bo->shadow, pde, pt, 1, 0, flags);
} params->func(params, bo, pde, pt, 1, 0, flags);
pde = pd_addr + (entry - parent->entries) * 8;
params->func(params, pde, pt, 1, 0, flags);
} }
/* /*
@ -856,7 +975,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
if (vm->use_cpu_for_update) { if (vm->use_cpu_for_update) {
/* Flush HDP */ /* Flush HDP */
mb(); mb();
amdgpu_gart_flush_gpu_tlb(adev, 0); amdgpu_asic_flush_hdp(adev, NULL);
} else if (params.ib->length_dw == 0) { } else if (params.ib->length_dw == 0) {
amdgpu_job_free(job); amdgpu_job_free(job);
} else { } else {
@ -870,11 +989,6 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
amdgpu_ring_pad_ib(ring, params.ib); amdgpu_ring_pad_ib(ring, params.ib);
amdgpu_sync_resv(adev, &job->sync, root->tbo.resv, amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
AMDGPU_FENCE_OWNER_VM, false); AMDGPU_FENCE_OWNER_VM, false);
if (root->shadow)
amdgpu_sync_resv(adev, &job->sync,
root->shadow->tbo.resv,
AMDGPU_FENCE_OWNER_VM, false);
WARN_ON(params.ib->length_dw > ndw); WARN_ON(params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity, r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &fence); AMDGPU_FENCE_OWNER_VM, &fence);
@ -946,7 +1060,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
unsigned nptes, uint64_t dst, unsigned nptes, uint64_t dst,
uint64_t flags) uint64_t flags)
{ {
uint64_t pd_addr, pde; uint64_t pde;
/* In the case of a mixed PT the PDE must point to it*/ /* In the case of a mixed PT the PDE must point to it*/
if (p->adev->asic_type >= CHIP_VEGA10 && !p->src && if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
@ -967,21 +1081,12 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
} }
entry->huge = true; entry->huge = true;
amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0, amdgpu_gmc_get_vm_pde(p->adev, AMDGPU_VM_PDB0, &dst, &flags);
&dst, &flags);
if (p->func == amdgpu_vm_cpu_set_ptes) { pde = (entry - parent->entries) * 8;
pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo); if (parent->base.bo->shadow)
} else { p->func(p, parent->base.bo->shadow, pde, dst, 1, 0, flags);
if (parent->base.bo->shadow) { p->func(p, parent->base.bo, pde, dst, 1, 0, flags);
pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
pde = pd_addr + (entry - parent->entries) * 8;
p->func(p, pde, dst, 1, 0, flags);
}
pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
}
pde = pd_addr + (entry - parent->entries) * 8;
p->func(p, pde, dst, 1, 0, flags);
} }
/** /**
@ -1007,7 +1112,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
uint64_t addr, pe_start; uint64_t addr, pe_start;
struct amdgpu_bo *pt; struct amdgpu_bo *pt;
unsigned nptes; unsigned nptes;
bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
/* walk over the address space and update the page tables */ /* walk over the address space and update the page tables */
for (addr = start; addr < end; addr += nptes, for (addr = start; addr < end; addr += nptes,
@ -1030,20 +1134,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
continue; continue;
pt = entry->base.bo; pt = entry->base.bo;
if (use_cpu_update) { pe_start = (addr & mask) * 8;
pe_start = (unsigned long)amdgpu_bo_kptr(pt); if (pt->shadow)
} else { params->func(params, pt->shadow, pe_start, dst, nptes,
if (pt->shadow) { AMDGPU_GPU_PAGE_SIZE, flags);
pe_start = amdgpu_bo_gpu_offset(pt->shadow); params->func(params, pt, pe_start, dst, nptes,
pe_start += (addr & mask) * 8;
params->func(params, pe_start, dst, nptes,
AMDGPU_GPU_PAGE_SIZE, flags);
}
pe_start = amdgpu_bo_gpu_offset(pt);
}
pe_start += (addr & mask) * 8;
params->func(params, pe_start, dst, nptes,
AMDGPU_GPU_PAGE_SIZE, flags); AMDGPU_GPU_PAGE_SIZE, flags);
} }
@ -1204,11 +1299,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
} else { } else {
/* set page commands needed */ /* set page commands needed */
ndw += ncmds * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw; ndw += ncmds * 10;
/* extra commands for begin/end fragments */ /* extra commands for begin/end fragments */
ndw += 2 * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw ndw += 2 * 10 * adev->vm_manager.fragment_size;
* adev->vm_manager.fragment_size;
params.func = amdgpu_vm_do_set_ptes; params.func = amdgpu_vm_do_set_ptes;
} }
@ -1457,7 +1551,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
if (vm->use_cpu_for_update) { if (vm->use_cpu_for_update) {
/* Flush HDP */ /* Flush HDP */
mb(); mb();
amdgpu_gart_flush_gpu_tlb(adev, 0); amdgpu_asic_flush_hdp(adev, NULL);
} }
spin_lock(&vm->status_lock); spin_lock(&vm->status_lock);
@ -1485,7 +1579,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
spin_lock_irqsave(&adev->vm_manager.prt_lock, flags); spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
enable = !!atomic_read(&adev->vm_manager.num_prt_users); enable = !!atomic_read(&adev->vm_manager.num_prt_users);
adev->gart.gart_funcs->set_prt(adev, enable); adev->gmc.gmc_funcs->set_prt(adev, enable);
spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags); spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
} }
@ -1494,7 +1588,7 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
*/ */
static void amdgpu_vm_prt_get(struct amdgpu_device *adev) static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
{ {
if (!adev->gart.gart_funcs->set_prt) if (!adev->gmc.gmc_funcs->set_prt)
return; return;
if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1) if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
@ -1529,7 +1623,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
{ {
struct amdgpu_prt_cb *cb; struct amdgpu_prt_cb *cb;
if (!adev->gart.gart_funcs->set_prt) if (!adev->gmc.gmc_funcs->set_prt)
return; return;
cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL); cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
@ -1623,16 +1717,16 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct dma_fence **fence) struct dma_fence **fence)
{ {
struct amdgpu_bo_va_mapping *mapping; struct amdgpu_bo_va_mapping *mapping;
uint64_t init_pte_value = 0;
struct dma_fence *f = NULL; struct dma_fence *f = NULL;
int r; int r;
uint64_t init_pte_value = 0;
while (!list_empty(&vm->freed)) { while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed, mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list); struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list); list_del(&mapping->list);
if (vm->pte_support_ats) if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC; init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm, r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
@ -2262,11 +2356,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
{ {
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT(adev) * 8); AMDGPU_VM_PTE_COUNT(adev) * 8);
uint64_t init_pde_value = 0, flags;
unsigned ring_instance; unsigned ring_instance;
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
struct drm_sched_rq *rq; struct drm_sched_rq *rq;
unsigned long size; unsigned long size;
uint64_t flags;
int r, i; int r, i;
vm->va = RB_ROOT_CACHED; vm->va = RB_ROOT_CACHED;
@ -2295,23 +2389,19 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE); AMDGPU_VM_USE_CPU_FOR_COMPUTE);
if (adev->asic_type == CHIP_RAVEN) { if (adev->asic_type == CHIP_RAVEN)
vm->pte_support_ats = true; vm->pte_support_ats = true;
init_pde_value = AMDGPU_PTE_DEFAULT_ATC } else {
| AMDGPU_PDE_PTE;
}
} else
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_GFX); AMDGPU_VM_USE_CPU_FOR_GFX);
}
DRM_DEBUG_DRIVER("VM update mode is %s\n", DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA"); vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)), WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
"CPU update of VM recommended only for large BAR system\n"); "CPU update of VM recommended only for large BAR system\n");
vm->last_update = NULL; vm->last_update = NULL;
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
AMDGPU_GEM_CREATE_VRAM_CLEARED;
if (vm->use_cpu_for_update) if (vm->use_cpu_for_update)
flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
else else
@ -2320,8 +2410,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level); size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM, r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM,
flags, NULL, NULL, init_pde_value, flags, NULL, NULL, &vm->root.base.bo);
&vm->root.base.bo);
if (r) if (r)
goto error_free_sched_entity; goto error_free_sched_entity;
@ -2329,6 +2418,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r) if (r)
goto error_free_root; goto error_free_root;
r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
adev->vm_manager.root_level,
vm->pte_support_ats);
if (r)
goto error_unreserve;
vm->root.base.vm = vm; vm->root.base.vm = vm;
list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va); list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
list_add_tail(&vm->root.base.vm_status, &vm->evicted); list_add_tail(&vm->root.base.vm_status, &vm->evicted);
@ -2352,6 +2447,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return 0; return 0;
error_unreserve:
amdgpu_bo_unreserve(vm->root.base.bo);
error_free_root: error_free_root:
amdgpu_bo_unref(&vm->root.base.bo->shadow); amdgpu_bo_unref(&vm->root.base.bo->shadow);
amdgpu_bo_unref(&vm->root.base.bo); amdgpu_bo_unref(&vm->root.base.bo);
@ -2405,7 +2503,7 @@ static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{ {
struct amdgpu_bo_va_mapping *mapping, *tmp; struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt; bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
struct amdgpu_bo *root; struct amdgpu_bo *root;
u64 fault; u64 fault;
int i, r; int i, r;

View File

@ -99,7 +99,7 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_MMHUB 1 #define AMDGPU_MMHUB 1
/* hardcode that limit for now */ /* hardcode that limit for now */
#define AMDGPU_VA_RESERVED_SIZE (8ULL << 20) #define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
/* VA hole for 48bit addresses on Vega10 */ /* VA hole for 48bit addresses on Vega10 */
#define AMDGPU_VA_HOLE_START 0x0000800000000000ULL #define AMDGPU_VA_HOLE_START 0x0000800000000000ULL

View File

@ -89,11 +89,11 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
uint64_t start = node->start << PAGE_SHIFT; uint64_t start = node->start << PAGE_SHIFT;
uint64_t end = (node->size + node->start) << PAGE_SHIFT; uint64_t end = (node->size + node->start) << PAGE_SHIFT;
if (start >= adev->mc.visible_vram_size) if (start >= adev->gmc.visible_vram_size)
return 0; return 0;
return (end > adev->mc.visible_vram_size ? return (end > adev->gmc.visible_vram_size ?
adev->mc.visible_vram_size : end) - start; adev->gmc.visible_vram_size : end) - start;
} }
/** /**

View File

@ -905,7 +905,7 @@ static bool ci_dpm_vblank_too_short(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
/* disable mclk switching if the refresh is >120Hz, even if the /* disable mclk switching if the refresh is >120Hz, even if the
* blanking period would allow it * blanking period would allow it
@ -2954,7 +2954,7 @@ static int ci_calculate_mclk_params(struct amdgpu_device *adev,
mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK; mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT); mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK | mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK); MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) | mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
@ -3077,7 +3077,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
(memory_clock <= pi->mclk_strobe_mode_threshold)) (memory_clock <= pi->mclk_strobe_mode_threshold))
memory_level->StrobeEnable = 1; memory_level->StrobeEnable = 1;
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
memory_level->StrobeRatio = memory_level->StrobeRatio =
ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable); ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
if (pi->mclk_edc_enable_threshold && if (pi->mclk_edc_enable_threshold &&
@ -3752,7 +3752,7 @@ static int ci_init_smc_table(struct amdgpu_device *adev)
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
if (ulv->supported) { if (ulv->supported) {
@ -4549,12 +4549,12 @@ static int ci_set_mc_special_registers(struct amdgpu_device *adev,
for (k = 0; k < table->num_entries; k++) { for (k = 0; k < table->num_entries; k++) {
table->mc_reg_table_entry[k].mc_data[j] = table->mc_reg_table_entry[k].mc_data[j] =
(temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
table->mc_reg_table_entry[k].mc_data[j] |= 0x100; table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
} }
j++; j++;
if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) { if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
return -EINVAL; return -EINVAL;
table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD; table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
@ -6639,9 +6639,10 @@ static int ci_dpm_force_clock_level(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev); struct ci_power_info *pi = ci_get_pi(adev);
if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO | if (adev->pm.dpm.forced_level != AMD_DPM_FORCED_LEVEL_MANUAL)
AMD_DPM_FORCED_LEVEL_LOW | return -EINVAL;
AMD_DPM_FORCED_LEVEL_HIGH))
if (mask == 0)
return -EINVAL; return -EINVAL;
switch (type) { switch (type) {
@ -6662,15 +6663,15 @@ static int ci_dpm_force_clock_level(void *handle,
case PP_PCIE: case PP_PCIE:
{ {
uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask; uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
uint32_t level = 0;
while (tmp >>= 1) if (!pi->pcie_dpm_key_disabled) {
level++; if (fls(tmp) != ffs(tmp))
amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_UnForceLevel);
if (!pi->pcie_dpm_key_disabled) else
amdgpu_ci_send_msg_to_smc_with_parameter(adev, amdgpu_ci_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_PCIeDPM_ForceLevel, PPSMC_MSG_PCIeDPM_ForceLevel,
level); fls(tmp) - 1);
}
break; break;
} }
default: default:
@ -7029,7 +7030,6 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
}; };
const struct amd_pm_funcs ci_dpm_funcs = { const struct amd_pm_funcs ci_dpm_funcs = {
.get_temperature = &ci_dpm_get_temp,
.pre_set_power_state = &ci_dpm_pre_set_power_state, .pre_set_power_state = &ci_dpm_pre_set_power_state,
.set_power_state = &ci_dpm_set_power_state, .set_power_state = &ci_dpm_set_power_state,
.post_set_power_state = &ci_dpm_post_set_power_state, .post_set_power_state = &ci_dpm_post_set_power_state,

View File

@ -1715,6 +1715,27 @@ static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
} }
static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
}
}
static void cik_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_DEBUG0, 1);
RREG32(mmHDP_DEBUG0);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
}
}
static const struct amdgpu_asic_funcs cik_asic_funcs = static const struct amdgpu_asic_funcs cik_asic_funcs =
{ {
.read_disabled_bios = &cik_read_disabled_bios, .read_disabled_bios = &cik_read_disabled_bios,
@ -1726,6 +1747,8 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.set_uvd_clocks = &cik_set_uvd_clocks, .set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks, .set_vce_clocks = &cik_set_vce_clocks,
.get_config_memsize = &cik_get_config_memsize, .get_config_memsize = &cik_get_config_memsize,
.flush_hdp = &cik_flush_hdp,
.invalidate_hdp = &cik_invalidate_hdp,
}; };
static int cik_common_early_init(void *handle) static int cik_common_early_init(void *handle)

View File

@ -24,6 +24,8 @@
#ifndef __CIK_H__ #ifndef __CIK_H__
#define __CIK_H__ #define __CIK_H__
#define CIK_FLUSH_GPU_TLB_NUM_WREG 3
void cik_srbm_select(struct amdgpu_device *adev, void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid); u32 me, u32 pipe, u32 queue, u32 vmid);
int cik_set_ip_blocks(struct amdgpu_device *adev); int cik_set_ip_blocks(struct amdgpu_device *adev);

View File

@ -281,7 +281,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
entry->src_data[0] = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vmid = (dw[2] >> 8) & 0xff; entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
adev->irq.ih.rptr += 16; adev->irq.ih.rptr += 16;

View File

@ -261,13 +261,6 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
} }
static void cik_sdma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
amdgpu_ring_write(ring, mmHDP_DEBUG0);
amdgpu_ring_write(ring, 1);
}
/** /**
* cik_sdma_ring_emit_fence - emit a fence on the DMA ring * cik_sdma_ring_emit_fence - emit a fence on the DMA ring
* *
@ -317,7 +310,7 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
if ((adev->mman.buffer_funcs_ring == sdma0) || if ((adev->mman.buffer_funcs_ring == sdma0) ||
(adev->mman.buffer_funcs_ring == sdma1)) (adev->mman.buffer_funcs_ring == sdma1))
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
@ -517,7 +510,7 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
} }
if (adev->mman.buffer_funcs_ring == ring) if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
} }
return 0; return 0;
@ -885,18 +878,7 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
if (vmid < 8) {
amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else {
amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
}
amdgpu_ring_write(ring, pd_addr >> 12);
/* flush TLB */
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 1 << vmid);
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
@ -906,6 +888,14 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
} }
static void cik_sdma_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, val);
}
static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
bool enable) bool enable)
{ {
@ -1279,9 +1269,9 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.set_wptr = cik_sdma_ring_set_wptr, .set_wptr = cik_sdma_ring_set_wptr,
.emit_frame_size = .emit_frame_size =
6 + /* cik_sdma_ring_emit_hdp_flush */ 6 + /* cik_sdma_ring_emit_hdp_flush */
3 + /* cik_sdma_ring_emit_hdp_invalidate */ 3 + /* hdp invalidate */
6 + /* cik_sdma_ring_emit_pipeline_sync */ 6 + /* cik_sdma_ring_emit_pipeline_sync */
12 + /* cik_sdma_ring_emit_vm_flush */ CIK_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* cik_sdma_ring_emit_vm_flush */
9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */ 9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */ .emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
.emit_ib = cik_sdma_ring_emit_ib, .emit_ib = cik_sdma_ring_emit_ib,
@ -1289,11 +1279,11 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync, .emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
.emit_vm_flush = cik_sdma_ring_emit_vm_flush, .emit_vm_flush = cik_sdma_ring_emit_vm_flush,
.emit_hdp_flush = cik_sdma_ring_emit_hdp_flush, .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
.emit_hdp_invalidate = cik_sdma_ring_emit_hdp_invalidate,
.test_ring = cik_sdma_ring_test_ring, .test_ring = cik_sdma_ring_test_ring,
.test_ib = cik_sdma_ring_test_ib, .test_ib = cik_sdma_ring_test_ib,
.insert_nop = cik_sdma_ring_insert_nop, .insert_nop = cik_sdma_ring_insert_nop,
.pad_ib = cik_sdma_ring_pad_ib, .pad_ib = cik_sdma_ring_pad_ib,
.emit_wreg = cik_sdma_ring_emit_wreg,
}; };
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev) static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
@ -1391,9 +1381,6 @@ static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
.copy_pte = cik_sdma_vm_copy_pte, .copy_pte = cik_sdma_vm_copy_pte,
.write_pte = cik_sdma_vm_write_pte, .write_pte = cik_sdma_vm_write_pte,
.set_max_nums_pte_pde = 0x1fffff >> 3,
.set_pte_pde_num_dw = 10,
.set_pte_pde = cik_sdma_vm_set_pte_pde, .set_pte_pde = cik_sdma_vm_set_pte_pde,
}; };

View File

@ -260,7 +260,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
entry->src_data[0] = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vmid = (dw[2] >> 8) & 0xff; entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
adev->irq.ih.rptr += 16; adev->irq.ih.rptr += 16;

View File

@ -190,66 +190,6 @@ static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
} }
static bool dce_v10_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
{
if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
return true;
else
return false;
}
static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
{
u32 pos1, pos2;
pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
if (pos1 != pos2)
return true;
else
return false;
}
/**
* dce_v10_0_vblank_wait - vblank wait asic callback.
*
* @adev: amdgpu_device pointer
* @crtc: crtc to wait for vblank on
*
* Wait for vblank on the requested crtc (evergreen+).
*/
static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
{
unsigned i = 100;
if (crtc >= adev->mode_info.num_crtc)
return;
if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
return;
/* depending on when we hit vblank, we may be close to active; if so,
* wait for another frame.
*/
while (dce_v10_0_is_in_vblank(adev, crtc)) {
if (i++ == 100) {
i = 0;
if (!dce_v10_0_is_counter_moving(adev, crtc))
break;
}
}
while (!dce_v10_0_is_in_vblank(adev, crtc)) {
if (i++ == 100) {
i = 0;
if (!dce_v10_0_is_counter_moving(adev, crtc))
break;
}
}
}
static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{ {
if (crtc >= adev->mode_info.num_crtc) if (crtc >= adev->mode_info.num_crtc)
@ -1205,7 +1145,7 @@ static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev)
u32 num_heads = 0, lb_size; u32 num_heads = 0, lb_size;
int i; int i;
amdgpu_update_display_priority(adev); amdgpu_display_update_priority(adev);
for (i = 0; i < adev->mode_info.num_crtc; i++) { for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->mode_info.crtcs[i]->base.enabled) if (adev->mode_info.crtcs[i]->base.enabled)
@ -2517,9 +2457,9 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
.cursor_set2 = dce_v10_0_crtc_cursor_set2, .cursor_set2 = dce_v10_0_crtc_cursor_set2,
.cursor_move = dce_v10_0_crtc_cursor_move, .cursor_move = dce_v10_0_crtc_cursor_move,
.gamma_set = dce_v10_0_crtc_gamma_set, .gamma_set = dce_v10_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config, .set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v10_0_crtc_destroy, .destroy = dce_v10_0_crtc_destroy,
.page_flip_target = amdgpu_crtc_page_flip_target, .page_flip_target = amdgpu_display_crtc_page_flip_target,
}; };
static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@ -2537,7 +2477,8 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v10_0_vga_enable(crtc, false); dce_v10_0_vga_enable(crtc, false);
/* Make sure VBLANK and PFLIP interrupts are still enabled */ /* Make sure VBLANK and PFLIP interrupts are still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); type = amdgpu_display_crtc_idx_to_irq_type(adev,
amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_crtc_vblank_on(crtc); drm_crtc_vblank_on(crtc);
@ -2676,7 +2617,7 @@ static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL; amdgpu_crtc->connector = NULL;
return false; return false;
} }
if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false; return false;
if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
return false; return false;
@ -2824,9 +2765,9 @@ static int dce_v10_0_sw_init(void *handle)
adev->ddev->mode_config.preferred_depth = 24; adev->ddev->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1; adev->ddev->mode_config.prefer_shadow = 1;
adev->ddev->mode_config.fb_base = adev->mc.aper_base; adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_modeset_create_props(adev); r = amdgpu_display_modeset_create_props(adev);
if (r) if (r)
return r; return r;
@ -2841,7 +2782,7 @@ static int dce_v10_0_sw_init(void *handle)
} }
if (amdgpu_atombios_get_connector_info_from_object_table(adev)) if (amdgpu_atombios_get_connector_info_from_object_table(adev))
amdgpu_print_display_setup(adev->ddev); amdgpu_display_print_display_setup(adev->ddev);
else else
return -EINVAL; return -EINVAL;
@ -3249,7 +3190,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
{ {
unsigned crtc = entry->src_id - 1; unsigned crtc = entry->src_id - 1;
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, crtc);
switch (entry->src_data[0]) { switch (entry->src_data[0]) {
case 0: /* vblank */ case 0: /* vblank */
@ -3601,7 +3542,6 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
static const struct amdgpu_display_funcs dce_v10_0_display_funcs = { static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
.bandwidth_update = &dce_v10_0_bandwidth_update, .bandwidth_update = &dce_v10_0_bandwidth_update,
.vblank_get_counter = &dce_v10_0_vblank_get_counter, .vblank_get_counter = &dce_v10_0_vblank_get_counter,
.vblank_wait = &dce_v10_0_vblank_wait,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v10_0_hpd_sense, .hpd_sense = &dce_v10_0_hpd_sense,

View File

@ -207,66 +207,6 @@ static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
} }
static bool dce_v11_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
{
if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
return true;
else
return false;
}
static bool dce_v11_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
{
u32 pos1, pos2;
pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
if (pos1 != pos2)
return true;
else
return false;
}
/**
* dce_v11_0_vblank_wait - vblank wait asic callback.
*
* @adev: amdgpu_device pointer
* @crtc: crtc to wait for vblank on
*
* Wait for vblank on the requested crtc (evergreen+).
*/
static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc)
{
unsigned i = 100;
if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
return;
if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
return;
/* depending on when we hit vblank, we may be close to active; if so,
* wait for another frame.
*/
while (dce_v11_0_is_in_vblank(adev, crtc)) {
if (i++ == 100) {
i = 0;
if (!dce_v11_0_is_counter_moving(adev, crtc))
break;
}
}
while (!dce_v11_0_is_in_vblank(adev, crtc)) {
if (i++ == 100) {
i = 0;
if (!dce_v11_0_is_counter_moving(adev, crtc))
break;
}
}
}
static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{ {
if (crtc < 0 || crtc >= adev->mode_info.num_crtc) if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
@ -1229,7 +1169,7 @@ static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
u32 num_heads = 0, lb_size; u32 num_heads = 0, lb_size;
int i; int i;
amdgpu_update_display_priority(adev); amdgpu_display_update_priority(adev);
for (i = 0; i < adev->mode_info.num_crtc; i++) { for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->mode_info.crtcs[i]->base.enabled) if (adev->mode_info.crtcs[i]->base.enabled)
@ -2592,9 +2532,9 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
.cursor_set2 = dce_v11_0_crtc_cursor_set2, .cursor_set2 = dce_v11_0_crtc_cursor_set2,
.cursor_move = dce_v11_0_crtc_cursor_move, .cursor_move = dce_v11_0_crtc_cursor_move,
.gamma_set = dce_v11_0_crtc_gamma_set, .gamma_set = dce_v11_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config, .set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v11_0_crtc_destroy, .destroy = dce_v11_0_crtc_destroy,
.page_flip_target = amdgpu_crtc_page_flip_target, .page_flip_target = amdgpu_display_crtc_page_flip_target,
}; };
static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@ -2612,7 +2552,8 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v11_0_vga_enable(crtc, false); dce_v11_0_vga_enable(crtc, false);
/* Make sure VBLANK and PFLIP interrupts are still enabled */ /* Make sure VBLANK and PFLIP interrupts are still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); type = amdgpu_display_crtc_idx_to_irq_type(adev,
amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_crtc_vblank_on(crtc); drm_crtc_vblank_on(crtc);
@ -2779,7 +2720,7 @@ static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL; amdgpu_crtc->connector = NULL;
return false; return false;
} }
if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false; return false;
if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
return false; return false;
@ -2939,9 +2880,9 @@ static int dce_v11_0_sw_init(void *handle)
adev->ddev->mode_config.preferred_depth = 24; adev->ddev->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1; adev->ddev->mode_config.prefer_shadow = 1;
adev->ddev->mode_config.fb_base = adev->mc.aper_base; adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_modeset_create_props(adev); r = amdgpu_display_modeset_create_props(adev);
if (r) if (r)
return r; return r;
@ -2957,7 +2898,7 @@ static int dce_v11_0_sw_init(void *handle)
} }
if (amdgpu_atombios_get_connector_info_from_object_table(adev)) if (amdgpu_atombios_get_connector_info_from_object_table(adev))
amdgpu_print_display_setup(adev->ddev); amdgpu_display_print_display_setup(adev->ddev);
else else
return -EINVAL; return -EINVAL;
@ -3368,7 +3309,8 @@ static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
{ {
unsigned crtc = entry->src_id - 1; unsigned crtc = entry->src_id - 1;
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
crtc);
switch (entry->src_data[0]) { switch (entry->src_data[0]) {
case 0: /* vblank */ case 0: /* vblank */
@ -3725,7 +3667,6 @@ static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
.bandwidth_update = &dce_v11_0_bandwidth_update, .bandwidth_update = &dce_v11_0_bandwidth_update,
.vblank_get_counter = &dce_v11_0_vblank_get_counter, .vblank_get_counter = &dce_v11_0_vblank_get_counter,
.vblank_wait = &dce_v11_0_vblank_wait,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v11_0_hpd_sense, .hpd_sense = &dce_v11_0_hpd_sense,

View File

@ -142,64 +142,6 @@ static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
} }
static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
{
if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK)
return true;
else
return false;
}
static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
{
u32 pos1, pos2;
pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
if (pos1 != pos2)
return true;
else
return false;
}
/**
* dce_v6_0_wait_for_vblank - vblank wait asic callback.
*
* @crtc: crtc to wait for vblank on
*
* Wait for vblank on the requested crtc (evergreen+).
*/
static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
{
unsigned i = 100;
if (crtc >= adev->mode_info.num_crtc)
return;
if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
return;
/* depending on when we hit vblank, we may be close to active; if so,
* wait for another frame.
*/
while (dce_v6_0_is_in_vblank(adev, crtc)) {
if (i++ == 100) {
i = 0;
if (!dce_v6_0_is_counter_moving(adev, crtc))
break;
}
}
while (!dce_v6_0_is_in_vblank(adev, crtc)) {
if (i++ == 100) {
i = 0;
if (!dce_v6_0_is_counter_moving(adev, crtc))
break;
}
}
}
static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{ {
if (crtc >= adev->mode_info.num_crtc) if (crtc >= adev->mode_info.num_crtc)
@ -1108,7 +1050,7 @@ static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
if (!adev->mode_info.mode_config_initialized) if (!adev->mode_info.mode_config_initialized)
return; return;
amdgpu_update_display_priority(adev); amdgpu_display_update_priority(adev);
for (i = 0; i < adev->mode_info.num_crtc; i++) { for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->mode_info.crtcs[i]->base.enabled) if (adev->mode_info.crtcs[i]->base.enabled)
@ -2407,9 +2349,9 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
.cursor_set2 = dce_v6_0_crtc_cursor_set2, .cursor_set2 = dce_v6_0_crtc_cursor_set2,
.cursor_move = dce_v6_0_crtc_cursor_move, .cursor_move = dce_v6_0_crtc_cursor_move,
.gamma_set = dce_v6_0_crtc_gamma_set, .gamma_set = dce_v6_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config, .set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v6_0_crtc_destroy, .destroy = dce_v6_0_crtc_destroy,
.page_flip_target = amdgpu_crtc_page_flip_target, .page_flip_target = amdgpu_display_crtc_page_flip_target,
}; };
static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode) static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@ -2425,7 +2367,8 @@ static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
/* Make sure VBLANK and PFLIP interrupts are still enabled */ /* Make sure VBLANK and PFLIP interrupts are still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); type = amdgpu_display_crtc_idx_to_irq_type(adev,
amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_crtc_vblank_on(crtc); drm_crtc_vblank_on(crtc);
@ -2562,7 +2505,7 @@ static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL; amdgpu_crtc->connector = NULL;
return false; return false;
} }
if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false; return false;
if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
return false; return false;
@ -2693,9 +2636,9 @@ static int dce_v6_0_sw_init(void *handle)
adev->ddev->mode_config.max_height = 16384; adev->ddev->mode_config.max_height = 16384;
adev->ddev->mode_config.preferred_depth = 24; adev->ddev->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1; adev->ddev->mode_config.prefer_shadow = 1;
adev->ddev->mode_config.fb_base = adev->mc.aper_base; adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_modeset_create_props(adev); r = amdgpu_display_modeset_create_props(adev);
if (r) if (r)
return r; return r;
@ -2711,7 +2654,7 @@ static int dce_v6_0_sw_init(void *handle)
ret = amdgpu_atombios_get_connector_info_from_object_table(adev); ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
if (ret) if (ret)
amdgpu_print_display_setup(adev->ddev); amdgpu_display_print_display_setup(adev->ddev);
else else
return -EINVAL; return -EINVAL;
@ -2966,7 +2909,8 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
{ {
unsigned crtc = entry->src_id - 1; unsigned crtc = entry->src_id - 1;
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
crtc);
switch (entry->src_data[0]) { switch (entry->src_data[0]) {
case 0: /* vblank */ case 0: /* vblank */
@ -3407,7 +3351,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
static const struct amdgpu_display_funcs dce_v6_0_display_funcs = { static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
.bandwidth_update = &dce_v6_0_bandwidth_update, .bandwidth_update = &dce_v6_0_bandwidth_update,
.vblank_get_counter = &dce_v6_0_vblank_get_counter, .vblank_get_counter = &dce_v6_0_vblank_get_counter,
.vblank_wait = &dce_v6_0_vblank_wait,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v6_0_hpd_sense, .hpd_sense = &dce_v6_0_hpd_sense,

View File

@ -140,66 +140,6 @@ static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
} }
static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
{
if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
return true;
else
return false;
}
static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
{
u32 pos1, pos2;
pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
if (pos1 != pos2)
return true;
else
return false;
}
/**
* dce_v8_0_vblank_wait - vblank wait asic callback.
*
* @adev: amdgpu_device pointer
* @crtc: crtc to wait for vblank on
*
* Wait for vblank on the requested crtc (evergreen+).
*/
static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
{
unsigned i = 100;
if (crtc >= adev->mode_info.num_crtc)
return;
if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
return;
/* depending on when we hit vblank, we may be close to active; if so,
* wait for another frame.
*/
while (dce_v8_0_is_in_vblank(adev, crtc)) {
if (i++ == 100) {
i = 0;
if (!dce_v8_0_is_counter_moving(adev, crtc))
break;
}
}
while (!dce_v8_0_is_in_vblank(adev, crtc)) {
if (i++ == 100) {
i = 0;
if (!dce_v8_0_is_counter_moving(adev, crtc))
break;
}
}
}
static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{ {
if (crtc >= adev->mode_info.num_crtc) if (crtc >= adev->mode_info.num_crtc)
@ -1144,7 +1084,7 @@ static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
u32 num_heads = 0, lb_size; u32 num_heads = 0, lb_size;
int i; int i;
amdgpu_update_display_priority(adev); amdgpu_display_update_priority(adev);
for (i = 0; i < adev->mode_info.num_crtc; i++) { for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->mode_info.crtcs[i]->base.enabled) if (adev->mode_info.crtcs[i]->base.enabled)
@ -2421,9 +2361,9 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
.cursor_set2 = dce_v8_0_crtc_cursor_set2, .cursor_set2 = dce_v8_0_crtc_cursor_set2,
.cursor_move = dce_v8_0_crtc_cursor_move, .cursor_move = dce_v8_0_crtc_cursor_move,
.gamma_set = dce_v8_0_crtc_gamma_set, .gamma_set = dce_v8_0_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config, .set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v8_0_crtc_destroy, .destroy = dce_v8_0_crtc_destroy,
.page_flip_target = amdgpu_crtc_page_flip_target, .page_flip_target = amdgpu_display_crtc_page_flip_target,
}; };
static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@ -2441,7 +2381,8 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v8_0_vga_enable(crtc, false); dce_v8_0_vga_enable(crtc, false);
/* Make sure VBLANK and PFLIP interrupts are still enabled */ /* Make sure VBLANK and PFLIP interrupts are still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); type = amdgpu_display_crtc_idx_to_irq_type(adev,
amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type); amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_crtc_vblank_on(crtc); drm_crtc_vblank_on(crtc);
@ -2587,7 +2528,7 @@ static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL; amdgpu_crtc->connector = NULL;
return false; return false;
} }
if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false; return false;
if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
return false; return false;
@ -2724,9 +2665,9 @@ static int dce_v8_0_sw_init(void *handle)
adev->ddev->mode_config.preferred_depth = 24; adev->ddev->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1; adev->ddev->mode_config.prefer_shadow = 1;
adev->ddev->mode_config.fb_base = adev->mc.aper_base; adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_modeset_create_props(adev); r = amdgpu_display_modeset_create_props(adev);
if (r) if (r)
return r; return r;
@ -2741,7 +2682,7 @@ static int dce_v8_0_sw_init(void *handle)
} }
if (amdgpu_atombios_get_connector_info_from_object_table(adev)) if (amdgpu_atombios_get_connector_info_from_object_table(adev))
amdgpu_print_display_setup(adev->ddev); amdgpu_display_print_display_setup(adev->ddev);
else else
return -EINVAL; return -EINVAL;
@ -3063,7 +3004,8 @@ static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
{ {
unsigned crtc = entry->src_id - 1; unsigned crtc = entry->src_id - 1;
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc); unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
crtc);
switch (entry->src_data[0]) { switch (entry->src_data[0]) {
case 0: /* vblank */ case 0: /* vblank */
@ -3491,7 +3433,6 @@ static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
static const struct amdgpu_display_funcs dce_v8_0_display_funcs = { static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
.bandwidth_update = &dce_v8_0_bandwidth_update, .bandwidth_update = &dce_v8_0_bandwidth_update,
.vblank_get_counter = &dce_v8_0_vblank_get_counter, .vblank_get_counter = &dce_v8_0_vblank_get_counter,
.vblank_wait = &dce_v8_0_vblank_wait,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v8_0_hpd_sense, .hpd_sense = &dce_v8_0_hpd_sense,

View File

@ -48,19 +48,6 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad
int crtc, int crtc,
enum amdgpu_interrupt_state state); enum amdgpu_interrupt_state state);
/**
* dce_virtual_vblank_wait - vblank wait asic callback.
*
* @adev: amdgpu_device pointer
* @crtc: crtc to wait for vblank on
*
* Wait for vblank on the requested crtc (evergreen+).
*/
static void dce_virtual_vblank_wait(struct amdgpu_device *adev, int crtc)
{
return;
}
static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc) static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{ {
return 0; return 0;
@ -130,9 +117,9 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
.cursor_set2 = NULL, .cursor_set2 = NULL,
.cursor_move = NULL, .cursor_move = NULL,
.gamma_set = dce_virtual_crtc_gamma_set, .gamma_set = dce_virtual_crtc_gamma_set,
.set_config = amdgpu_crtc_set_config, .set_config = amdgpu_display_crtc_set_config,
.destroy = dce_virtual_crtc_destroy, .destroy = dce_virtual_crtc_destroy,
.page_flip_target = amdgpu_crtc_page_flip_target, .page_flip_target = amdgpu_display_crtc_page_flip_target,
}; };
static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode) static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
@ -149,7 +136,8 @@ static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_ON: case DRM_MODE_DPMS_ON:
amdgpu_crtc->enabled = true; amdgpu_crtc->enabled = true;
/* Make sure VBLANK interrupts are still enabled */ /* Make sure VBLANK interrupts are still enabled */
type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); type = amdgpu_display_crtc_idx_to_irq_type(adev,
amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type); amdgpu_irq_update(adev, &adev->crtc_irq, type);
drm_crtc_vblank_on(crtc); drm_crtc_vblank_on(crtc);
break; break;
@ -406,9 +394,9 @@ static int dce_virtual_sw_init(void *handle)
adev->ddev->mode_config.preferred_depth = 24; adev->ddev->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1; adev->ddev->mode_config.prefer_shadow = 1;
adev->ddev->mode_config.fb_base = adev->mc.aper_base; adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_modeset_create_props(adev); r = amdgpu_display_modeset_create_props(adev);
if (r) if (r)
return r; return r;
@ -653,7 +641,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
static const struct amdgpu_display_funcs dce_virtual_display_funcs = { static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
.bandwidth_update = &dce_virtual_bandwidth_update, .bandwidth_update = &dce_virtual_bandwidth_update,
.vblank_get_counter = &dce_virtual_vblank_get_counter, .vblank_get_counter = &dce_virtual_vblank_get_counter,
.vblank_wait = &dce_virtual_vblank_wait,
.backlight_set_level = NULL, .backlight_set_level = NULL,
.backlight_get_level = NULL, .backlight_get_level = NULL,
.hpd_sense = &dce_virtual_hpd_sense, .hpd_sense = &dce_virtual_hpd_sense,

View File

@ -0,0 +1,33 @@
/*
* Copyright 2018 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu.h"
#include "soc15.h"
#include "soc15_common.h"
#include "soc15_hw_ip.h"
int emu_soc_asic_init(struct amdgpu_device *adev)
{
return 0;
}

View File

@ -38,6 +38,7 @@
#include "dce/dce_6_0_sh_mask.h" #include "dce/dce_6_0_sh_mask.h"
#include "gca/gfx_7_2_enum.h" #include "gca/gfx_7_2_enum.h"
#include "si_enums.h" #include "si_enums.h"
#include "si.h"
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev); static void gfx_v6_0_set_irq_funcs(struct amdgpu_device *adev);
@ -1808,17 +1809,6 @@ static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring)
return r; return r;
} }
static void gfx_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
/* flush hdp cache */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0x1);
}
static void gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring *ring) static void gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
{ {
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0)); amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
@ -1826,24 +1816,6 @@ static void gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
EVENT_INDEX(0)); EVENT_INDEX(0));
} }
/**
* gfx_v6_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
*
* @adev: amdgpu_device pointer
* @ridx: amdgpu ring index
*
* Emits an hdp invalidate on the cp.
*/
static void gfx_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, mmHDP_DEBUG0);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0x1);
}
static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
u64 seq, unsigned flags) u64 seq, unsigned flags)
{ {
@ -2358,25 +2330,7 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
{ {
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
/* write new base address */ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
if (vmid < 8) {
amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid ));
} else {
amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8)));
}
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, pd_addr >> 12);
/* bits 0-15 are the VM contexts0-15 */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1 << vmid);
/* wait for the invalidate to complete */ /* wait for the invalidate to complete */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@ -2401,6 +2355,18 @@ static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
} }
} }
static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, val);
}
static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev) static void gfx_v6_0_rlc_fini(struct amdgpu_device *adev)
{ {
@ -3511,23 +3477,21 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
.get_wptr = gfx_v6_0_ring_get_wptr, .get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_gfx, .set_wptr = gfx_v6_0_ring_set_wptr_gfx,
.emit_frame_size = .emit_frame_size =
5 + /* gfx_v6_0_ring_emit_hdp_flush */ 5 + 5 + /* hdp flush / invalidate */
5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */ 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
17 + 6 + /* gfx_v6_0_ring_emit_vm_flush */ SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */ 3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */
.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */ .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib, .emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence, .emit_fence = gfx_v6_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v6_0_ring_emit_vm_flush, .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
.emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v6_0_ring_test_ring, .test_ring = gfx_v6_0_ring_test_ring,
.test_ib = gfx_v6_0_ring_test_ib, .test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl, .emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
.emit_wreg = gfx_v6_0_ring_emit_wreg,
}; };
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = { static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
@ -3538,21 +3502,19 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
.get_wptr = gfx_v6_0_ring_get_wptr, .get_wptr = gfx_v6_0_ring_get_wptr,
.set_wptr = gfx_v6_0_ring_set_wptr_compute, .set_wptr = gfx_v6_0_ring_set_wptr_compute,
.emit_frame_size = .emit_frame_size =
5 + /* gfx_v6_0_ring_emit_hdp_flush */ 5 + 5 + /* hdp flush / invalidate */
5 + /* gfx_v6_0_ring_emit_hdp_invalidate */
7 + /* gfx_v6_0_ring_emit_pipeline_sync */ 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
17 + /* gfx_v6_0_ring_emit_vm_flush */ SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v6_0_ring_emit_vm_flush */
14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */ 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */ .emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib, .emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence, .emit_fence = gfx_v6_0_ring_emit_fence,
.emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync, .emit_pipeline_sync = gfx_v6_0_ring_emit_pipeline_sync,
.emit_vm_flush = gfx_v6_0_ring_emit_vm_flush, .emit_vm_flush = gfx_v6_0_ring_emit_vm_flush,
.emit_hdp_flush = gfx_v6_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = gfx_v6_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v6_0_ring_test_ring, .test_ring = gfx_v6_0_ring_test_ring,
.test_ib = gfx_v6_0_ring_test_ib, .test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
.emit_wreg = gfx_v6_0_ring_emit_wreg,
}; };
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev) static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)

View File

@ -1946,7 +1946,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
if (i == 0) if (i == 0)
sh_mem_base = 0; sh_mem_base = 0;
else else
sh_mem_base = adev->mc.shared_aperture_start >> 48; sh_mem_base = adev->gmc.shared_aperture_start >> 48;
cik_srbm_select(adev, 0, 0, 0, i); cik_srbm_select(adev, 0, 0, 0, i);
/* CP and shaders */ /* CP and shaders */
WREG32(mmSH_MEM_CONFIG, sh_mem_cfg); WREG32(mmSH_MEM_CONFIG, sh_mem_cfg);
@ -2147,26 +2147,6 @@ static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
EVENT_INDEX(0)); EVENT_INDEX(0));
} }
/**
* gfx_v7_0_ring_emit_hdp_invalidate - emit an hdp invalidate on the cp
*
* @adev: amdgpu_device pointer
* @ridx: amdgpu ring index
*
* Emits an hdp invalidate on the cp.
*/
static void gfx_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0) |
WR_CONFIRM));
amdgpu_ring_write(ring, mmHDP_DEBUG0);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1);
}
/** /**
* gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
* *
@ -3243,26 +3223,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
{ {
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
if (vmid < 8) {
amdgpu_ring_write(ring,
(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else {
amdgpu_ring_write(ring,
(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
}
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, pd_addr >> 12);
/* bits 0-15 are the VM contexts0-15 */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1 << vmid);
/* wait for the invalidate to complete */ /* wait for the invalidate to complete */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@ -3289,6 +3250,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
} }
} }
static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, val);
}
/* /*
* RLC * RLC
* The RLC is a multi-purpose microengine that handles a * The RLC is a multi-purpose microengine that handles a
@ -5115,10 +5089,10 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_frame_size = .emit_frame_size =
20 + /* gfx_v7_0_ring_emit_gds_switch */ 20 + /* gfx_v7_0_ring_emit_gds_switch */
7 + /* gfx_v7_0_ring_emit_hdp_flush */ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ 5 + /* hdp invalidate */
12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */ 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */ 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
17 + 6 + /* gfx_v7_0_ring_emit_vm_flush */ CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/ 3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */ .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
.emit_ib = gfx_v7_0_ring_emit_ib_gfx, .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
@ -5127,12 +5101,12 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v7_0_ring_test_ring, .test_ring = gfx_v7_0_ring_test_ring,
.test_ib = gfx_v7_0_ring_test_ib, .test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl, .emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
.emit_wreg = gfx_v7_0_ring_emit_wreg,
}; };
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@ -5146,9 +5120,9 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.emit_frame_size = .emit_frame_size =
20 + /* gfx_v7_0_ring_emit_gds_switch */ 20 + /* gfx_v7_0_ring_emit_gds_switch */
7 + /* gfx_v7_0_ring_emit_hdp_flush */ 7 + /* gfx_v7_0_ring_emit_hdp_flush */
5 + /* gfx_v7_0_ring_emit_hdp_invalidate */ 5 + /* hdp invalidate */
7 + /* gfx_v7_0_ring_emit_pipeline_sync */ 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
17 + /* gfx_v7_0_ring_emit_vm_flush */ CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */ 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */ .emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_compute */
.emit_ib = gfx_v7_0_ring_emit_ib_compute, .emit_ib = gfx_v7_0_ring_emit_ib_compute,
@ -5157,11 +5131,11 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v7_0_ring_emit_gds_switch, .emit_gds_switch = gfx_v7_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = gfx_v7_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v7_0_ring_test_ring, .test_ring = gfx_v7_0_ring_test_ring,
.test_ib = gfx_v7_0_ring_test_ib, .test_ib = gfx_v7_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v7_0_ring_emit_wreg,
}; };
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev) static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)

View File

@ -3796,7 +3796,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE, tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED); SH_MEM_ALIGNMENT_MODE_UNALIGNED);
WREG32(mmSH_MEM_CONFIG, tmp); WREG32(mmSH_MEM_CONFIG, tmp);
tmp = adev->mc.shared_aperture_start >> 48; tmp = adev->gmc.shared_aperture_start >> 48;
WREG32(mmSH_MEM_BASES, tmp); WREG32(mmSH_MEM_BASES, tmp);
} }
@ -4847,6 +4847,9 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
/* reset MQD to a clean status */ /* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx]) if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
amdgpu_ring_clear_ring(ring);
} else { } else {
amdgpu_ring_clear_ring(ring); amdgpu_ring_clear_ring(ring);
} }
@ -4921,13 +4924,6 @@ static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev)
/* Test KCQs */ /* Test KCQs */
for (i = 0; i < adev->gfx.num_compute_rings; i++) { for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i]; ring = &adev->gfx.compute_ring[i];
if (adev->in_gpu_reset) {
/* move reset ring buffer to here to workaround
* compute ring test failed
*/
ring->wptr = 0;
amdgpu_ring_clear_ring(ring);
}
ring->ready = true; ring->ready = true;
r = amdgpu_ring_test_ring(ring); r = amdgpu_ring_test_ring(ring);
if (r) if (r)
@ -6230,19 +6226,6 @@ static void gfx_v8_0_ring_emit_vgt_flush(struct amdgpu_ring *ring)
EVENT_INDEX(0)); EVENT_INDEX(0));
} }
static void gfx_v8_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0) |
WR_CONFIRM));
amdgpu_ring_write(ring, mmHDP_DEBUG0);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1);
}
static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vmid, bool ctx_switch) unsigned vmid, bool ctx_switch)
@ -6332,28 +6315,7 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
{ {
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
WRITE_DATA_DST_SEL(0)) |
WR_CONFIRM);
if (vmid < 8) {
amdgpu_ring_write(ring,
(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else {
amdgpu_ring_write(ring,
(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
}
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, pd_addr >> 12);
/* bits 0-15 are the VM contexts0-15 */
/* invalidate the cache */
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1 << vmid);
/* wait for the invalidate to complete */ /* wait for the invalidate to complete */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
@ -6617,8 +6579,22 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val) uint32_t val)
{ {
uint32_t cmd;
switch (ring->funcs->type) {
case AMDGPU_RING_TYPE_GFX:
cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
break;
case AMDGPU_RING_TYPE_KIQ:
cmd = 1 << 16; /* no inc addr */
break;
default:
cmd = WR_CONFIRM;
break;
}
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */ amdgpu_ring_write(ring, cmd);
amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, val); amdgpu_ring_write(ring, val);
@ -6871,7 +6847,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_frame_size = /* maximum 215dw if count 16 IBs in */ .emit_frame_size = /* maximum 215dw if count 16 IBs in */
5 + /* COND_EXEC */ 5 + /* COND_EXEC */
7 + /* PIPELINE_SYNC */ 7 + /* PIPELINE_SYNC */
19 + /* VM_FLUSH */ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 9 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */ 8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */ 20 + /* GDS switch */
4 + /* double SWITCH_BUFFER, 4 + /* double SWITCH_BUFFER,
@ -6893,7 +6869,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v8_0_ring_test_ring, .test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib, .test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
@ -6902,6 +6877,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_cntxcntl = gfx_v8_ring_emit_cntxcntl, .emit_cntxcntl = gfx_v8_ring_emit_cntxcntl,
.init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec, .init_cond_exec = gfx_v8_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec, .patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
}; };
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@ -6915,9 +6891,9 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.emit_frame_size = .emit_frame_size =
20 + /* gfx_v8_0_ring_emit_gds_switch */ 20 + /* gfx_v8_0_ring_emit_gds_switch */
7 + /* gfx_v8_0_ring_emit_hdp_flush */ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ 5 + /* hdp_invalidate */
7 + /* gfx_v8_0_ring_emit_pipeline_sync */ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
17 + /* gfx_v8_0_ring_emit_vm_flush */ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */ 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute, .emit_ib = gfx_v8_0_ring_emit_ib_compute,
@ -6926,12 +6902,12 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v8_0_ring_emit_gds_switch, .emit_gds_switch = gfx_v8_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v8_0_ring_test_ring, .test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib, .test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.set_priority = gfx_v8_0_ring_set_priority_compute, .set_priority = gfx_v8_0_ring_set_priority_compute,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
}; };
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
@ -6945,7 +6921,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
.emit_frame_size = .emit_frame_size =
20 + /* gfx_v8_0_ring_emit_gds_switch */ 20 + /* gfx_v8_0_ring_emit_gds_switch */
7 + /* gfx_v8_0_ring_emit_hdp_flush */ 7 + /* gfx_v8_0_ring_emit_hdp_flush */
5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ 5 + /* hdp_invalidate */
7 + /* gfx_v8_0_ring_emit_pipeline_sync */ 7 + /* gfx_v8_0_ring_emit_pipeline_sync */
17 + /* gfx_v8_0_ring_emit_vm_flush */ 17 + /* gfx_v8_0_ring_emit_vm_flush */
7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */
@ -7151,12 +7127,12 @@ static void gfx_v8_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
} ce_payload = {}; } ce_payload = {};
if (ring->adev->virt.chained_ib_support) { if (ring->adev->virt.chained_ib_support) {
ce_payload_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096 + ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload); offsetof(struct vi_gfx_meta_data_chained_ib, ce_payload);
cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2; cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2;
} else { } else {
ce_payload_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096 + ce_payload_addr = amdgpu_csa_vaddr(ring->adev) +
offsetof(struct vi_gfx_meta_data, ce_payload); offsetof(struct vi_gfx_meta_data, ce_payload);
cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2; cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2;
} }
@ -7179,7 +7155,7 @@ static void gfx_v8_0_ring_emit_de_meta(struct amdgpu_ring *ring)
struct vi_de_ib_state_chained_ib chained; struct vi_de_ib_state_chained_ib chained;
} de_payload = {}; } de_payload = {};
csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096; csa_addr = amdgpu_csa_vaddr(ring->adev);
gds_addr = csa_addr + 4096; gds_addr = csa_addr + 4096;
if (ring->adev->virt.chained_ib_support) { if (ring->adev->virt.chained_ib_support) {
de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr); de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr);

View File

@ -1539,7 +1539,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
SH_MEM_ALIGNMENT_MODE_UNALIGNED); SH_MEM_ALIGNMENT_MODE_UNALIGNED);
WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp); WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp);
tmp = adev->mc.shared_aperture_start >> 48; tmp = adev->gmc.shared_aperture_start >> 48;
WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp); WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp);
} }
} }
@ -3585,14 +3585,6 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
ref_and_mask, ref_and_mask, 0x20); ref_and_mask, ref_and_mask, 0x20);
} }
static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
gfx_v9_0_write_data_to_reg(ring, 0, true,
SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
}
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
struct amdgpu_ib *ib, struct amdgpu_ib *ib,
unsigned vmid, bool ctx_switch) unsigned vmid, bool ctx_switch)
@ -3686,32 +3678,10 @@ static void gfx_v9_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
pd_addr |= flags;
gfx_v9_0_write_data_to_reg(ring, usepfp, true,
hub->ctx0_ptb_addr_lo32 + (2 * vmid),
lower_32_bits(pd_addr));
gfx_v9_0_write_data_to_reg(ring, usepfp, true,
hub->ctx0_ptb_addr_hi32 + (2 * vmid),
upper_32_bits(pd_addr));
gfx_v9_0_write_data_to_reg(ring, usepfp, true,
hub->vm_inv_eng0_req + eng, req);
/* wait for the invalidate to complete */
gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, hub->vm_inv_eng0_ack +
eng, 0, 1 << vmid, 1 << vmid, 0x20);
/* compute doesn't have PFP */ /* compute doesn't have PFP */
if (usepfp) { if (ring->funcs->type == AMDGPU_RING_TYPE_GFX) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */ /* sync PFP to ME, otherwise we might get invalid PFP reads */
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
amdgpu_ring_write(ring, 0x0); amdgpu_ring_write(ring, 0x0);
@ -3735,6 +3705,105 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
return wptr; return wptr;
} }
static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
bool acquire)
{
struct amdgpu_device *adev = ring->adev;
int pipe_num, tmp, reg;
int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
/* first me only has 2 entries, GFX and HP3D */
if (ring->me > 0)
pipe_num -= 2;
reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
tmp = RREG32(reg);
tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
WREG32(reg, tmp);
}
static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
bool acquire)
{
int i, pipe;
bool reserve;
struct amdgpu_ring *iring;
mutex_lock(&adev->gfx.pipe_reserve_mutex);
pipe = amdgpu_gfx_queue_to_bit(adev, ring->me, ring->pipe, 0);
if (acquire)
set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
else
clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
/* Clear all reservations - everyone reacquires all resources */
for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
true);
for (i = 0; i < adev->gfx.num_compute_rings; ++i)
gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
true);
} else {
/* Lower all pipes without a current reservation */
for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
iring = &adev->gfx.gfx_ring[i];
pipe = amdgpu_gfx_queue_to_bit(adev,
iring->me,
iring->pipe,
0);
reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
gfx_v9_0_ring_set_pipe_percent(iring, reserve);
}
for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
iring = &adev->gfx.compute_ring[i];
pipe = amdgpu_gfx_queue_to_bit(adev,
iring->me,
iring->pipe,
0);
reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
gfx_v9_0_ring_set_pipe_percent(iring, reserve);
}
}
mutex_unlock(&adev->gfx.pipe_reserve_mutex);
}
static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
bool acquire)
{
uint32_t pipe_priority = acquire ? 0x2 : 0x0;
uint32_t queue_priority = acquire ? 0xf : 0x0;
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
WREG32_SOC15(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
WREG32_SOC15(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
}
static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
enum drm_sched_priority priority)
{
struct amdgpu_device *adev = ring->adev;
bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
return;
gfx_v9_0_hqd_set_priority(adev, ring, acquire);
gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
}
static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring) static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
@ -3788,7 +3857,7 @@ static void gfx_v9_0_ring_emit_ce_meta(struct amdgpu_ring *ring)
int cnt; int cnt;
cnt = (sizeof(ce_payload) >> 2) + 4 - 2; cnt = (sizeof(ce_payload) >> 2) + 4 - 2;
csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096; csa_addr = amdgpu_csa_vaddr(ring->adev);
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt)); amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
@ -3806,7 +3875,7 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
uint64_t csa_addr, gds_addr; uint64_t csa_addr, gds_addr;
int cnt; int cnt;
csa_addr = AMDGPU_VA_RESERVED_SIZE - 2 * 4096; csa_addr = amdgpu_csa_vaddr(ring->adev);
gds_addr = csa_addr + 4096; gds_addr = csa_addr + 4096;
de_payload.gds_backup_addrlo = lower_32_bits(gds_addr); de_payload.gds_backup_addrlo = lower_32_bits(gds_addr);
de_payload.gds_backup_addrhi = upper_32_bits(gds_addr); de_payload.gds_backup_addrhi = upper_32_bits(gds_addr);
@ -3904,15 +3973,34 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
} }
static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val) uint32_t val)
{ {
uint32_t cmd = 0;
switch (ring->funcs->type) {
case AMDGPU_RING_TYPE_GFX:
cmd = WRITE_DATA_ENGINE_SEL(1) | WR_CONFIRM;
break;
case AMDGPU_RING_TYPE_KIQ:
cmd = (1 << 16); /* no inc addr */
break;
default:
cmd = WR_CONFIRM;
break;
}
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */ amdgpu_ring_write(ring, cmd);
amdgpu_ring_write(ring, reg); amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, val); amdgpu_ring_write(ring, val);
} }
static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20);
}
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state) enum amdgpu_interrupt_state state)
{ {
@ -4199,7 +4287,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_frame_size = /* totally 242 maximum if 16 IBs */ .emit_frame_size = /* totally 242 maximum if 16 IBs */
5 + /* COND_EXEC */ 5 + /* COND_EXEC */
7 + /* PIPELINE_SYNC */ 7 + /* PIPELINE_SYNC */
24 + /* VM_FLUSH */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */ 8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */ 20 + /* GDS switch */
4 + /* double SWITCH_BUFFER, 4 + /* double SWITCH_BUFFER,
@ -4221,7 +4311,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush, .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v9_0_ring_test_ring, .test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib, .test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
@ -4231,6 +4320,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec, .init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec, .patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
.emit_tmz = gfx_v9_0_ring_emit_tmz, .emit_tmz = gfx_v9_0_ring_emit_tmz,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
}; };
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@ -4245,9 +4336,11 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_frame_size = .emit_frame_size =
20 + /* gfx_v9_0_ring_emit_gds_switch */ 20 + /* gfx_v9_0_ring_emit_gds_switch */
7 + /* gfx_v9_0_ring_emit_hdp_flush */ 7 + /* gfx_v9_0_ring_emit_hdp_flush */
5 + /* gfx_v9_0_ring_emit_hdp_invalidate */ 5 + /* hdp invalidate */
7 + /* gfx_v9_0_ring_emit_pipeline_sync */ 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
24 + /* gfx_v9_0_ring_emit_vm_flush */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */ 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute, .emit_ib = gfx_v9_0_ring_emit_ib_compute,
@ -4256,11 +4349,13 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_vm_flush = gfx_v9_0_ring_emit_vm_flush, .emit_vm_flush = gfx_v9_0_ring_emit_vm_flush,
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch, .emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush, .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v9_0_ring_test_ring, .test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib, .test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.set_priority = gfx_v9_0_ring_set_priority_compute,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
}; };
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@ -4275,9 +4370,11 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.emit_frame_size = .emit_frame_size =
20 + /* gfx_v9_0_ring_emit_gds_switch */ 20 + /* gfx_v9_0_ring_emit_gds_switch */
7 + /* gfx_v9_0_ring_emit_hdp_flush */ 7 + /* gfx_v9_0_ring_emit_hdp_flush */
5 + /* gfx_v9_0_ring_emit_hdp_invalidate */ 5 + /* hdp invalidate */
7 + /* gfx_v9_0_ring_emit_pipeline_sync */ 7 + /* gfx_v9_0_ring_emit_pipeline_sync */
24 + /* gfx_v9_0_ring_emit_vm_flush */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */ 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */ .emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute, .emit_ib = gfx_v9_0_ring_emit_ib_compute,
@ -4288,6 +4385,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.emit_rreg = gfx_v9_0_ring_emit_rreg, .emit_rreg = gfx_v9_0_ring_emit_rreg,
.emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
}; };
static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev)

View File

@ -40,7 +40,7 @@ static void gfxhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
uint64_t value; uint64_t value;
BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL)); BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
value = adev->gart.table_addr - adev->mc.vram_start value = adev->gart.table_addr - adev->gmc.vram_start
+ adev->vm_manager.vram_base_offset; + adev->vm_manager.vram_base_offset;
value &= 0x0000FFFFFFFFF000ULL; value &= 0x0000FFFFFFFFF000ULL;
value |= 0x1; /*valid bit*/ value |= 0x1; /*valid bit*/
@ -57,14 +57,14 @@ static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
gfxhub_v1_0_init_gart_pt_regs(adev); gfxhub_v1_0_init_gart_pt_regs(adev);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->mc.gart_start >> 12)); (u32)(adev->gmc.gart_start >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
(u32)(adev->mc.gart_start >> 44)); (u32)(adev->gmc.gart_start >> 44));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
(u32)(adev->mc.gart_end >> 12)); (u32)(adev->gmc.gart_end >> 12));
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
(u32)(adev->mc.gart_end >> 44)); (u32)(adev->gmc.gart_end >> 44));
} }
static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@ -78,12 +78,12 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
/* Program the system aperture low logical page number. */ /* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->mc.vram_start >> 18); adev->gmc.vram_start >> 18);
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->mc.vram_end >> 18); adev->gmc.vram_end >> 18);
/* Set default page address. */ /* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->mc.vram_start value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ adev->vm_manager.vram_base_offset; + adev->vm_manager.vram_base_offset;
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12)); (u32)(value >> 12));
@ -143,7 +143,7 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp); WREG32_SOC15(GC, 0, mmVM_L2_CNTL2, tmp);
tmp = mmVM_L2_CNTL3_DEFAULT; tmp = mmVM_L2_CNTL3_DEFAULT;
if (adev->mc.translate_further) { if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 9); L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
@ -195,7 +195,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
num_level = adev->vm_manager.num_level; num_level = adev->vm_manager.num_level;
block_size = adev->vm_manager.block_size; block_size = adev->vm_manager.block_size;
if (adev->mc.translate_further) if (adev->gmc.translate_further)
num_level -= 1; num_level -= 1;
else else
block_size -= 9; block_size -= 9;
@ -257,9 +257,9 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
* SRIOV driver need to program them * SRIOV driver need to program them
*/ */
WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE,
adev->mc.vram_start >> 24); adev->gmc.vram_start >> 24);
WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP,
adev->mc.vram_end >> 24); adev->gmc.vram_end >> 24);
} }
/* GART Enable. */ /* GART Enable. */

View File

@ -37,7 +37,7 @@
#include "dce/dce_6_0_sh_mask.h" #include "dce/dce_6_0_sh_mask.h"
#include "si_enums.h" #include "si_enums.h"
static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev); static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v6_0_wait_for_idle(void *handle); static int gmc_v6_0_wait_for_idle(void *handle);
@ -137,19 +137,19 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin"); snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
else else
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
err = request_firmware(&adev->mc.fw, fw_name, adev->dev); err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
if (err) if (err)
goto out; goto out;
err = amdgpu_ucode_validate(adev->mc.fw); err = amdgpu_ucode_validate(adev->gmc.fw);
out: out:
if (err) { if (err) {
dev_err(adev->dev, dev_err(adev->dev,
"si_mc: Failed to load firmware \"%s\"\n", "si_mc: Failed to load firmware \"%s\"\n",
fw_name); fw_name);
release_firmware(adev->mc.fw); release_firmware(adev->gmc.fw);
adev->mc.fw = NULL; adev->gmc.fw = NULL;
} }
return err; return err;
} }
@ -162,20 +162,20 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
int i, regs_size, ucode_size; int i, regs_size, ucode_size;
const struct mc_firmware_header_v1_0 *hdr; const struct mc_firmware_header_v1_0 *hdr;
if (!adev->mc.fw) if (!adev->gmc.fw)
return -EINVAL; return -EINVAL;
hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
amdgpu_ucode_print_mc_hdr(&hdr->header); amdgpu_ucode_print_mc_hdr(&hdr->header);
adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
new_io_mc_regs = (const __le32 *) new_io_mc_regs = (const __le32 *)
(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
new_fw_data = (const __le32 *) new_fw_data = (const __le32 *)
(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK; running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK;
@ -218,12 +218,12 @@ static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev)
} }
static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc) struct amdgpu_gmc *mc)
{ {
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24; base <<= 24;
amdgpu_device_vram_location(adev, &adev->mc, base); amdgpu_device_vram_location(adev, &adev->gmc, base);
amdgpu_device_gart_location(adev, mc); amdgpu_device_gart_location(adev, mc);
} }
@ -260,9 +260,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
} }
/* Update configuration */ /* Update configuration */
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->mc.vram_start >> 12); adev->gmc.vram_start >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->mc.vram_end >> 12); adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->vram_scratch.gpu_addr >> 12); adev->vram_scratch.gpu_addr >> 12);
WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_BASE, 0);
@ -320,56 +320,69 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
numchan = 16; numchan = 16;
break; break;
} }
adev->mc.vram_width = numchan * chansize; adev->gmc.vram_width = numchan * chansize;
/* size in MB on si */ /* size in MB on si */
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
if (!(adev->flags & AMD_IS_APU)) { if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_device_resize_fb_bar(adev); r = amdgpu_device_resize_fb_bar(adev);
if (r) if (r)
return r; return r;
} }
adev->mc.aper_base = pci_resource_start(adev->pdev, 0); adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
adev->mc.aper_size = pci_resource_len(adev->pdev, 0); adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
adev->mc.visible_vram_size = adev->mc.aper_size; adev->gmc.visible_vram_size = adev->gmc.aper_size;
/* set the gart size */ /* set the gart size */
if (amdgpu_gart_size == -1) { if (amdgpu_gart_size == -1) {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_HAINAN: /* no MM engines */ case CHIP_HAINAN: /* no MM engines */
default: default:
adev->mc.gart_size = 256ULL << 20; adev->gmc.gart_size = 256ULL << 20;
break; break;
case CHIP_VERDE: /* UVD, VCE do not support GPUVM */ case CHIP_VERDE: /* UVD, VCE do not support GPUVM */
case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */ case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */
case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */ case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */
case CHIP_OLAND: /* UVD, VCE do not support GPUVM */ case CHIP_OLAND: /* UVD, VCE do not support GPUVM */
adev->mc.gart_size = 1024ULL << 20; adev->gmc.gart_size = 1024ULL << 20;
break; break;
} }
} else { } else {
adev->mc.gart_size = (u64)amdgpu_gart_size << 20; adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
} }
gmc_v6_0_vram_gtt_location(adev, &adev->mc); gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
return 0; return 0;
} }
static void gmc_v6_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
uint32_t vmid)
{ {
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
} }
static int gmc_v6_0_gart_set_pte_pde(struct amdgpu_device *adev, static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
void *cpu_pt_addr, unsigned vmid, uint64_t pd_addr)
uint32_t gpu_page_idx, {
uint64_t addr, uint32_t reg;
uint64_t flags)
/* write new base address */
if (vmid < 8)
reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
else
reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8);
amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
/* bits 0-15 are the VM contexts0-15 */
amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
return pd_addr;
}
static int gmc_v6_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
uint32_t gpu_page_idx, uint64_t addr,
uint64_t flags)
{ {
void __iomem *ptr = (void *)cpu_pt_addr; void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value; uint64_t value;
@ -433,9 +446,9 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
{ {
u32 tmp; u32 tmp;
if (enable && !adev->mc.prt_warning) { if (enable && !adev->gmc.prt_warning) {
dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
adev->mc.prt_warning = true; adev->gmc.prt_warning = true;
} }
tmp = RREG32(mmVM_PRT_CNTL); tmp = RREG32(mmVM_PRT_CNTL);
@ -455,7 +468,8 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable)
if (enable) { if (enable) {
uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
uint32_t high = adev->vm_manager.max_pfn; uint32_t high = adev->vm_manager.max_pfn -
(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
@ -515,8 +529,8 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
(field << VM_L2_CNTL3__BANK_SELECT__SHIFT) | (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) |
(field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
/* setup context0 */ /* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12)); (u32)(adev->dummy_page.addr >> 12));
@ -561,9 +575,9 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
else else
gmc_v6_0_set_fault_enable_default(adev, true); gmc_v6_0_set_fault_enable_default(adev, true);
gmc_v6_0_gart_flush_gpu_tlb(adev, 0); gmc_v6_0_flush_gpu_tlb(adev, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->mc.gart_size >> 20), (unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr); (unsigned long long)adev->gart.table_addr);
adev->gart.ready = true; adev->gart.ready = true;
return 0; return 0;
@ -795,7 +809,7 @@ static int gmc_v6_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v6_0_set_gart_funcs(adev); gmc_v6_0_set_gmc_funcs(adev);
gmc_v6_0_set_irq_funcs(adev); gmc_v6_0_set_irq_funcs(adev);
return 0; return 0;
@ -806,7 +820,7 @@ static int gmc_v6_0_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
else else
return 0; return 0;
} }
@ -818,26 +832,26 @@ static int gmc_v6_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else { } else {
u32 tmp = RREG32(mmMC_SEQ_MISC0); u32 tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK; tmp &= MC_SEQ_MISC0__MT__MASK;
adev->mc.vram_type = gmc_v6_0_convert_vram_type(tmp); adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp);
} }
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault); r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
if (r) if (r)
return r; return r;
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault); r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
if (r) if (r)
return r; return r;
amdgpu_vm_adjust_size(adev, 64, 9, 1, 40); amdgpu_vm_adjust_size(adev, 64, 9, 1, 40);
adev->mc.mc_mask = 0xffffffffffULL; adev->gmc.mc_mask = 0xffffffffffULL;
adev->mc.stolen_size = 256 * 1024; adev->gmc.stolen_size = 256 * 1024;
adev->need_dma32 = false; adev->need_dma32 = false;
dma_bits = adev->need_dma32 ? 32 : 40; dma_bits = adev->need_dma32 ? 32 : 40;
@ -902,8 +916,8 @@ static int gmc_v6_0_sw_fini(void *handle)
amdgpu_vm_manager_fini(adev); amdgpu_vm_manager_fini(adev);
gmc_v6_0_gart_fini(adev); gmc_v6_0_gart_fini(adev);
amdgpu_bo_fini(adev); amdgpu_bo_fini(adev);
release_firmware(adev->mc.fw); release_firmware(adev->gmc.fw);
adev->mc.fw = NULL; adev->gmc.fw = NULL;
return 0; return 0;
} }
@ -934,7 +948,7 @@ static int gmc_v6_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v6_0_gart_disable(adev); gmc_v6_0_gart_disable(adev);
return 0; return 0;
@ -1129,9 +1143,10 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
.set_powergating_state = gmc_v6_0_set_powergating_state, .set_powergating_state = gmc_v6_0_set_powergating_state,
}; };
static const struct amdgpu_gart_funcs gmc_v6_0_gart_funcs = { static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v6_0_gart_flush_gpu_tlb, .flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
.set_pte_pde = gmc_v6_0_gart_set_pte_pde, .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
.set_pte_pde = gmc_v6_0_set_pte_pde,
.set_prt = gmc_v6_0_set_prt, .set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde, .get_vm_pde = gmc_v6_0_get_vm_pde,
.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
@ -1142,16 +1157,16 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
.process = gmc_v6_0_process_interrupt, .process = gmc_v6_0_process_interrupt,
}; };
static void gmc_v6_0_set_gart_funcs(struct amdgpu_device *adev) static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
{ {
if (adev->gart.gart_funcs == NULL) if (adev->gmc.gmc_funcs == NULL)
adev->gart.gart_funcs = &gmc_v6_0_gart_funcs; adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
} }
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->mc.vm_fault.num_types = 1; adev->gmc.vm_fault.num_types = 1;
adev->mc.vm_fault.funcs = &gmc_v6_0_irq_funcs; adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs;
} }
const struct amdgpu_ip_block_version gmc_v6_0_ip_block = const struct amdgpu_ip_block_version gmc_v6_0_ip_block =

View File

@ -43,7 +43,7 @@
#include "amdgpu_atombios.h" #include "amdgpu_atombios.h"
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v7_0_wait_for_idle(void *handle); static int gmc_v7_0_wait_for_idle(void *handle);
@ -152,16 +152,16 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
else else
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
err = request_firmware(&adev->mc.fw, fw_name, adev->dev); err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
if (err) if (err)
goto out; goto out;
err = amdgpu_ucode_validate(adev->mc.fw); err = amdgpu_ucode_validate(adev->gmc.fw);
out: out:
if (err) { if (err) {
pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name); pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
release_firmware(adev->mc.fw); release_firmware(adev->gmc.fw);
adev->mc.fw = NULL; adev->gmc.fw = NULL;
} }
return err; return err;
} }
@ -182,19 +182,19 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
u32 running; u32 running;
int i, ucode_size, regs_size; int i, ucode_size, regs_size;
if (!adev->mc.fw) if (!adev->gmc.fw)
return -EINVAL; return -EINVAL;
hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
amdgpu_ucode_print_mc_hdr(&hdr->header); amdgpu_ucode_print_mc_hdr(&hdr->header);
adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
io_mc_regs = (const __le32 *) io_mc_regs = (const __le32 *)
(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
fw_data = (const __le32 *) fw_data = (const __le32 *)
(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
@ -236,12 +236,12 @@ static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev)
} }
static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc) struct amdgpu_gmc *mc)
{ {
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24; base <<= 24;
amdgpu_device_vram_location(adev, &adev->mc, base); amdgpu_device_vram_location(adev, &adev->gmc, base);
amdgpu_device_gart_location(adev, mc); amdgpu_device_gart_location(adev, mc);
} }
@ -284,9 +284,9 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
} }
/* Update configuration */ /* Update configuration */
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->mc.vram_start >> 12); adev->gmc.vram_start >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->mc.vram_end >> 12); adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->vram_scratch.gpu_addr >> 12); adev->vram_scratch.gpu_addr >> 12);
WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_BASE, 0);
@ -319,8 +319,8 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
{ {
int r; int r;
adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev); adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
if (!adev->mc.vram_width) { if (!adev->gmc.vram_width) {
u32 tmp; u32 tmp;
int chansize, numchan; int chansize, numchan;
@ -362,38 +362,38 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
numchan = 16; numchan = 16;
break; break;
} }
adev->mc.vram_width = numchan * chansize; adev->gmc.vram_width = numchan * chansize;
} }
/* size in MB on si */ /* size in MB on si */
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
if (!(adev->flags & AMD_IS_APU)) { if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_device_resize_fb_bar(adev); r = amdgpu_device_resize_fb_bar(adev);
if (r) if (r)
return r; return r;
} }
adev->mc.aper_base = pci_resource_start(adev->pdev, 0); adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
adev->mc.aper_size = pci_resource_len(adev->pdev, 0); adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
adev->mc.aper_size = adev->mc.real_vram_size; adev->gmc.aper_size = adev->gmc.real_vram_size;
} }
#endif #endif
/* In case the PCI BAR is larger than the actual amount of vram */ /* In case the PCI BAR is larger than the actual amount of vram */
adev->mc.visible_vram_size = adev->mc.aper_size; adev->gmc.visible_vram_size = adev->gmc.aper_size;
if (adev->mc.visible_vram_size > adev->mc.real_vram_size) if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size; adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */ /* set the gart size */
if (amdgpu_gart_size == -1) { if (amdgpu_gart_size == -1) {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_TOPAZ: /* no MM engines */ case CHIP_TOPAZ: /* no MM engines */
default: default:
adev->mc.gart_size = 256ULL << 20; adev->gmc.gart_size = 256ULL << 20;
break; break;
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */ case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */
@ -401,15 +401,15 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */ case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */
case CHIP_KABINI: /* UVD, VCE do not support GPUVM */ case CHIP_KABINI: /* UVD, VCE do not support GPUVM */
case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */ case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */
adev->mc.gart_size = 1024ULL << 20; adev->gmc.gart_size = 1024ULL << 20;
break; break;
#endif #endif
} }
} else { } else {
adev->mc.gart_size = (u64)amdgpu_gart_size << 20; adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
} }
gmc_v7_0_vram_gtt_location(adev, &adev->mc); gmc_v7_0_vram_gtt_location(adev, &adev->gmc);
return 0; return 0;
} }
@ -422,25 +422,44 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
*/ */
/** /**
* gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vmid: vm instance to flush * @vmid: vm instance to flush
* *
* Flush the TLB for the requested page table (CIK). * Flush the TLB for the requested page table (CIK).
*/ */
static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid)
uint32_t vmid)
{ {
/* flush hdp cache */
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
/* bits 0-15 are the VM contexts0-15 */ /* bits 0-15 are the VM contexts0-15 */
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
} }
static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
uint32_t reg;
if (vmid < 8)
reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
else
reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
/* bits 0-15 are the VM contexts0-15 */
amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
return pd_addr;
}
static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
unsigned pasid)
{
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
/** /**
* gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO * gmc_v7_0_set_pte_pde - update the page tables using MMIO
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @cpu_pt_addr: cpu address of the page table * @cpu_pt_addr: cpu address of the page table
@ -450,11 +469,9 @@ static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
* *
* Update the page tables using the CPU. * Update the page tables using the CPU.
*/ */
static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device *adev, static int gmc_v7_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
void *cpu_pt_addr, uint32_t gpu_page_idx, uint64_t addr,
uint32_t gpu_page_idx, uint64_t flags)
uint64_t addr,
uint64_t flags)
{ {
void __iomem *ptr = (void *)cpu_pt_addr; void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value; uint64_t value;
@ -524,9 +541,9 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
{ {
uint32_t tmp; uint32_t tmp;
if (enable && !adev->mc.prt_warning) { if (enable && !adev->gmc.prt_warning) {
dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
adev->mc.prt_warning = true; adev->gmc.prt_warning = true;
} }
tmp = RREG32(mmVM_PRT_CNTL); tmp = RREG32(mmVM_PRT_CNTL);
@ -548,7 +565,8 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable)
if (enable) { if (enable) {
uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
uint32_t high = adev->vm_manager.max_pfn; uint32_t high = adev->vm_manager.max_pfn -
(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
@ -622,8 +640,8 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field);
WREG32(mmVM_L2_CNTL3, tmp); WREG32(mmVM_L2_CNTL3, tmp);
/* setup context0 */ /* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12)); (u32)(adev->dummy_page.addr >> 12));
@ -675,9 +693,9 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmCHUB_CONTROL, tmp); WREG32(mmCHUB_CONTROL, tmp);
} }
gmc_v7_0_gart_flush_gpu_tlb(adev, 0); gmc_v7_0_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->mc.gart_size >> 20), (unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr); (unsigned long long)adev->gart.table_addr);
adev->gart.ready = true; adev->gart.ready = true;
return 0; return 0;
@ -750,21 +768,21 @@ static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
* *
* Print human readable fault information (CIK). * Print human readable fault information (CIK).
*/ */
static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
u32 status, u32 addr, u32 mc_client) u32 addr, u32 mc_client, unsigned pasid)
{ {
u32 mc_id;
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
PROTECTIONS); PROTECTIONS);
char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
(mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
u32 mc_id;
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID); MEMORY_CLIENT_ID);
dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
protections, vmid, addr, protections, vmid, pasid, addr,
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_RW) ? MEMORY_CLIENT_RW) ?
"write" : "read", block, mc_client, mc_id); "write" : "read", block, mc_client, mc_id);
@ -922,16 +940,16 @@ static int gmc_v7_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v7_0_set_gart_funcs(adev); gmc_v7_0_set_gmc_funcs(adev);
gmc_v7_0_set_irq_funcs(adev); gmc_v7_0_set_irq_funcs(adev);
adev->mc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->mc.shared_aperture_end = adev->gmc.shared_aperture_end =
adev->mc.shared_aperture_start + (4ULL << 30) - 1; adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
adev->mc.private_aperture_start = adev->gmc.private_aperture_start =
adev->mc.shared_aperture_end + 1; adev->gmc.shared_aperture_end + 1;
adev->mc.private_aperture_end = adev->gmc.private_aperture_end =
adev->mc.private_aperture_start + (4ULL << 30) - 1; adev->gmc.private_aperture_start + (4ULL << 30) - 1;
return 0; return 0;
} }
@ -941,7 +959,7 @@ static int gmc_v7_0_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
else else
return 0; return 0;
} }
@ -953,18 +971,18 @@ static int gmc_v7_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else { } else {
u32 tmp = RREG32(mmMC_SEQ_MISC0); u32 tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK; tmp &= MC_SEQ_MISC0__MT__MASK;
adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp); adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp);
} }
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault); r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
if (r) if (r)
return r; return r;
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault); r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
if (r) if (r)
return r; return r;
@ -978,9 +996,9 @@ static int gmc_v7_0_sw_init(void *handle)
* This is the max address of the GPU's * This is the max address of the GPU's
* internal address space. * internal address space.
*/ */
adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
adev->mc.stolen_size = 256 * 1024; adev->gmc.stolen_size = 256 * 1024;
/* set DMA mask + need_dma32 flags. /* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits. * PCIE - can handle 40-bits.
@ -1051,8 +1069,8 @@ static int gmc_v7_0_sw_fini(void *handle)
amdgpu_vm_manager_fini(adev); amdgpu_vm_manager_fini(adev);
gmc_v7_0_gart_fini(adev); gmc_v7_0_gart_fini(adev);
amdgpu_bo_fini(adev); amdgpu_bo_fini(adev);
release_firmware(adev->mc.fw); release_firmware(adev->gmc.fw);
adev->mc.fw = NULL; adev->gmc.fw = NULL;
return 0; return 0;
} }
@ -1085,7 +1103,7 @@ static int gmc_v7_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v7_0_gart_disable(adev); gmc_v7_0_gart_disable(adev);
return 0; return 0;
@ -1259,7 +1277,8 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
addr); addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status); status);
gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client); gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client,
entry->pasid);
} }
return 0; return 0;
@ -1308,9 +1327,11 @@ static const struct amd_ip_funcs gmc_v7_0_ip_funcs = {
.set_powergating_state = gmc_v7_0_set_powergating_state, .set_powergating_state = gmc_v7_0_set_powergating_state,
}; };
static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs = { static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v7_0_gart_flush_gpu_tlb, .flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
.set_pte_pde = gmc_v7_0_gart_set_pte_pde, .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
.set_pte_pde = gmc_v7_0_set_pte_pde,
.set_prt = gmc_v7_0_set_prt, .set_prt = gmc_v7_0_set_prt,
.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags, .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
.get_vm_pde = gmc_v7_0_get_vm_pde .get_vm_pde = gmc_v7_0_get_vm_pde
@ -1321,16 +1342,16 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
.process = gmc_v7_0_process_interrupt, .process = gmc_v7_0_process_interrupt,
}; };
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev) static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
{ {
if (adev->gart.gart_funcs == NULL) if (adev->gmc.gmc_funcs == NULL)
adev->gart.gart_funcs = &gmc_v7_0_gart_funcs; adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
} }
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->mc.vm_fault.num_types = 1; adev->gmc.vm_fault.num_types = 1;
adev->mc.vm_fault.funcs = &gmc_v7_0_irq_funcs; adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs;
} }
const struct amdgpu_ip_block_version gmc_v7_0_ip_block = const struct amdgpu_ip_block_version gmc_v7_0_ip_block =

View File

@ -45,7 +45,7 @@
#include "amdgpu_atombios.h" #include "amdgpu_atombios.h"
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v8_0_wait_for_idle(void *handle); static int gmc_v8_0_wait_for_idle(void *handle);
@ -236,16 +236,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
} }
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
err = request_firmware(&adev->mc.fw, fw_name, adev->dev); err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
if (err) if (err)
goto out; goto out;
err = amdgpu_ucode_validate(adev->mc.fw); err = amdgpu_ucode_validate(adev->gmc.fw);
out: out:
if (err) { if (err) {
pr_err("mc: Failed to load firmware \"%s\"\n", fw_name); pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
release_firmware(adev->mc.fw); release_firmware(adev->gmc.fw);
adev->mc.fw = NULL; adev->gmc.fw = NULL;
} }
return err; return err;
} }
@ -274,19 +274,19 @@ static int gmc_v8_0_tonga_mc_load_microcode(struct amdgpu_device *adev)
if (amdgpu_sriov_bios(adev)) if (amdgpu_sriov_bios(adev))
return 0; return 0;
if (!adev->mc.fw) if (!adev->gmc.fw)
return -EINVAL; return -EINVAL;
hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
amdgpu_ucode_print_mc_hdr(&hdr->header); amdgpu_ucode_print_mc_hdr(&hdr->header);
adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
io_mc_regs = (const __le32 *) io_mc_regs = (const __le32 *)
(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
fw_data = (const __le32 *) fw_data = (const __le32 *)
(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN);
@ -350,19 +350,19 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
if (vbios_version == 0) if (vbios_version == 0)
return 0; return 0;
if (!adev->mc.fw) if (!adev->gmc.fw)
return -EINVAL; return -EINVAL;
hdr = (const struct mc_firmware_header_v1_0 *)adev->mc.fw->data; hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data;
amdgpu_ucode_print_mc_hdr(&hdr->header); amdgpu_ucode_print_mc_hdr(&hdr->header);
adev->mc.fw_version = le32_to_cpu(hdr->header.ucode_version); adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version);
regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2);
io_mc_regs = (const __le32 *) io_mc_regs = (const __le32 *)
(adev->mc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes));
ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
fw_data = (const __le32 *) fw_data = (const __le32 *)
(adev->mc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
data = RREG32(mmMC_SEQ_MISC0); data = RREG32(mmMC_SEQ_MISC0);
data &= ~(0x40); data &= ~(0x40);
@ -398,7 +398,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
} }
static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev, static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc) struct amdgpu_gmc *mc)
{ {
u64 base = 0; u64 base = 0;
@ -406,7 +406,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24; base <<= 24;
amdgpu_device_vram_location(adev, &adev->mc, base); amdgpu_device_vram_location(adev, &adev->gmc, base);
amdgpu_device_gart_location(adev, mc); amdgpu_device_gart_location(adev, mc);
} }
@ -449,18 +449,18 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
} }
/* Update configuration */ /* Update configuration */
WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->mc.vram_start >> 12); adev->gmc.vram_start >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->mc.vram_end >> 12); adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
adev->vram_scratch.gpu_addr >> 12); adev->vram_scratch.gpu_addr >> 12);
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
tmp = ((adev->mc.vram_end >> 24) & 0xFFFF) << 16; tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((adev->mc.vram_start >> 24) & 0xFFFF); tmp |= ((adev->gmc.vram_start >> 24) & 0xFFFF);
WREG32(mmMC_VM_FB_LOCATION, tmp); WREG32(mmMC_VM_FB_LOCATION, tmp);
/* XXX double check these! */ /* XXX double check these! */
WREG32(mmHDP_NONSURFACE_BASE, (adev->mc.vram_start >> 8)); WREG32(mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30)); WREG32(mmHDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF); WREG32(mmHDP_NONSURFACE_SIZE, 0x3FFFFFFF);
} }
@ -495,8 +495,8 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
{ {
int r; int r;
adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev); adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev);
if (!adev->mc.vram_width) { if (!adev->gmc.vram_width) {
u32 tmp; u32 tmp;
int chansize, numchan; int chansize, numchan;
@ -538,31 +538,31 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
numchan = 16; numchan = 16;
break; break;
} }
adev->mc.vram_width = numchan * chansize; adev->gmc.vram_width = numchan * chansize;
} }
/* size in MB on si */ /* size in MB on si */
adev->mc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
if (!(adev->flags & AMD_IS_APU)) { if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_device_resize_fb_bar(adev); r = amdgpu_device_resize_fb_bar(adev);
if (r) if (r)
return r; return r;
} }
adev->mc.aper_base = pci_resource_start(adev->pdev, 0); adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
adev->mc.aper_size = pci_resource_len(adev->pdev, 0); adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
adev->mc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22;
adev->mc.aper_size = adev->mc.real_vram_size; adev->gmc.aper_size = adev->gmc.real_vram_size;
} }
#endif #endif
/* In case the PCI BAR is larger than the actual amount of vram */ /* In case the PCI BAR is larger than the actual amount of vram */
adev->mc.visible_vram_size = adev->mc.aper_size; adev->gmc.visible_vram_size = adev->gmc.aper_size;
if (adev->mc.visible_vram_size > adev->mc.real_vram_size) if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size; adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */ /* set the gart size */
if (amdgpu_gart_size == -1) { if (amdgpu_gart_size == -1) {
@ -571,20 +571,20 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
case CHIP_POLARIS10: /* all engines support GPUVM */ case CHIP_POLARIS10: /* all engines support GPUVM */
case CHIP_POLARIS12: /* all engines support GPUVM */ case CHIP_POLARIS12: /* all engines support GPUVM */
default: default:
adev->mc.gart_size = 256ULL << 20; adev->gmc.gart_size = 256ULL << 20;
break; break;
case CHIP_TONGA: /* UVD, VCE do not support GPUVM */ case CHIP_TONGA: /* UVD, VCE do not support GPUVM */
case CHIP_FIJI: /* UVD, VCE do not support GPUVM */ case CHIP_FIJI: /* UVD, VCE do not support GPUVM */
case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */ case CHIP_CARRIZO: /* UVD, VCE do not support GPUVM, DCE SG support */
case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */ case CHIP_STONEY: /* UVD does not support GPUVM, DCE SG support */
adev->mc.gart_size = 1024ULL << 20; adev->gmc.gart_size = 1024ULL << 20;
break; break;
} }
} else { } else {
adev->mc.gart_size = (u64)amdgpu_gart_size << 20; adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
} }
gmc_v8_0_vram_gtt_location(adev, &adev->mc); gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
return 0; return 0;
} }
@ -597,25 +597,45 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
*/ */
/** /**
* gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback * gmc_v8_0_flush_gpu_tlb - gart tlb flush callback
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vmid: vm instance to flush * @vmid: vm instance to flush
* *
* Flush the TLB for the requested page table (CIK). * Flush the TLB for the requested page table (CIK).
*/ */
static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, static void gmc_v8_0_flush_gpu_tlb(struct amdgpu_device *adev,
uint32_t vmid) uint32_t vmid)
{ {
/* flush hdp cache */
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
/* bits 0-15 are the VM contexts0-15 */ /* bits 0-15 are the VM contexts0-15 */
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
} }
static uint64_t gmc_v8_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
uint32_t reg;
if (vmid < 8)
reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
else
reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12);
/* bits 0-15 are the VM contexts0-15 */
amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid);
return pd_addr;
}
static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
unsigned pasid)
{
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
/** /**
* gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO * gmc_v8_0_set_pte_pde - update the page tables using MMIO
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @cpu_pt_addr: cpu address of the page table * @cpu_pt_addr: cpu address of the page table
@ -625,11 +645,9 @@ static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
* *
* Update the page tables using the CPU. * Update the page tables using the CPU.
*/ */
static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device *adev, static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
void *cpu_pt_addr, uint32_t gpu_page_idx, uint64_t addr,
uint32_t gpu_page_idx, uint64_t flags)
uint64_t addr,
uint64_t flags)
{ {
void __iomem *ptr = (void *)cpu_pt_addr; void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value; uint64_t value;
@ -723,9 +741,9 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
{ {
u32 tmp; u32 tmp;
if (enable && !adev->mc.prt_warning) { if (enable && !adev->gmc.prt_warning) {
dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n");
adev->mc.prt_warning = true; adev->gmc.prt_warning = true;
} }
tmp = RREG32(mmVM_PRT_CNTL); tmp = RREG32(mmVM_PRT_CNTL);
@ -747,7 +765,8 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable)
if (enable) { if (enable) {
uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT;
uint32_t high = adev->vm_manager.max_pfn; uint32_t high = adev->vm_manager.max_pfn -
(AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT);
WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low);
WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low);
@ -837,8 +856,8 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP, 0);
WREG32(mmVM_L2_CNTL4, tmp); WREG32(mmVM_L2_CNTL4, tmp);
/* setup context0 */ /* setup context0 */
WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->mc.gart_start >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->mc.gart_end >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12);
WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12); WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, adev->gart.table_addr >> 12);
WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(adev->dummy_page.addr >> 12)); (u32)(adev->dummy_page.addr >> 12));
@ -891,9 +910,9 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
else else
gmc_v8_0_set_fault_enable_default(adev, true); gmc_v8_0_set_fault_enable_default(adev, true);
gmc_v8_0_gart_flush_gpu_tlb(adev, 0); gmc_v8_0_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->mc.gart_size >> 20), (unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr); (unsigned long long)adev->gart.table_addr);
adev->gart.ready = true; adev->gart.ready = true;
return 0; return 0;
@ -966,21 +985,21 @@ static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
* *
* Print human readable fault information (CIK). * Print human readable fault information (CIK).
*/ */
static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, static void gmc_v8_0_vm_decode_fault(struct amdgpu_device *adev, u32 status,
u32 status, u32 addr, u32 mc_client) u32 addr, u32 mc_client, unsigned pasid)
{ {
u32 mc_id;
u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
PROTECTIONS); PROTECTIONS);
char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff,
(mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; (mc_client >> 8) & 0xff, mc_client & 0xff, 0 };
u32 mc_id;
mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_ID); MEMORY_CLIENT_ID);
dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
protections, vmid, addr, protections, vmid, pasid, addr,
REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
MEMORY_CLIENT_RW) ? MEMORY_CLIENT_RW) ?
"write" : "read", block, mc_client, mc_id); "write" : "read", block, mc_client, mc_id);
@ -1012,16 +1031,16 @@ static int gmc_v8_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v8_0_set_gart_funcs(adev); gmc_v8_0_set_gmc_funcs(adev);
gmc_v8_0_set_irq_funcs(adev); gmc_v8_0_set_irq_funcs(adev);
adev->mc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->mc.shared_aperture_end = adev->gmc.shared_aperture_end =
adev->mc.shared_aperture_start + (4ULL << 30) - 1; adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
adev->mc.private_aperture_start = adev->gmc.private_aperture_start =
adev->mc.shared_aperture_end + 1; adev->gmc.shared_aperture_end + 1;
adev->mc.private_aperture_end = adev->gmc.private_aperture_end =
adev->mc.private_aperture_start + (4ULL << 30) - 1; adev->gmc.private_aperture_start + (4ULL << 30) - 1;
return 0; return 0;
} }
@ -1031,7 +1050,7 @@ static int gmc_v8_0_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
else else
return 0; return 0;
} }
@ -1045,7 +1064,7 @@ static int gmc_v8_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->flags & AMD_IS_APU) { if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
} else { } else {
u32 tmp; u32 tmp;
@ -1054,14 +1073,14 @@ static int gmc_v8_0_sw_init(void *handle)
else else
tmp = RREG32(mmMC_SEQ_MISC0); tmp = RREG32(mmMC_SEQ_MISC0);
tmp &= MC_SEQ_MISC0__MT__MASK; tmp &= MC_SEQ_MISC0__MT__MASK;
adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp); adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp);
} }
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault); r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault);
if (r) if (r)
return r; return r;
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault); r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault);
if (r) if (r)
return r; return r;
@ -1075,9 +1094,9 @@ static int gmc_v8_0_sw_init(void *handle)
* This is the max address of the GPU's * This is the max address of the GPU's
* internal address space. * internal address space.
*/ */
adev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
adev->mc.stolen_size = 256 * 1024; adev->gmc.stolen_size = 256 * 1024;
/* set DMA mask + need_dma32 flags. /* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits. * PCIE - can handle 40-bits.
@ -1149,8 +1168,8 @@ static int gmc_v8_0_sw_fini(void *handle)
amdgpu_vm_manager_fini(adev); amdgpu_vm_manager_fini(adev);
gmc_v8_0_gart_fini(adev); gmc_v8_0_gart_fini(adev);
amdgpu_bo_fini(adev); amdgpu_bo_fini(adev);
release_firmware(adev->mc.fw); release_firmware(adev->gmc.fw);
adev->mc.fw = NULL; adev->gmc.fw = NULL;
return 0; return 0;
} }
@ -1191,7 +1210,7 @@ static int gmc_v8_0_hw_fini(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v8_0_gart_disable(adev); gmc_v8_0_gart_disable(adev);
return 0; return 0;
@ -1271,10 +1290,10 @@ static bool gmc_v8_0_check_soft_reset(void *handle)
SRBM_SOFT_RESET, SOFT_RESET_MC, 1); SRBM_SOFT_RESET, SOFT_RESET_MC, 1);
} }
if (srbm_soft_reset) { if (srbm_soft_reset) {
adev->mc.srbm_soft_reset = srbm_soft_reset; adev->gmc.srbm_soft_reset = srbm_soft_reset;
return true; return true;
} else { } else {
adev->mc.srbm_soft_reset = 0; adev->gmc.srbm_soft_reset = 0;
return false; return false;
} }
} }
@ -1283,7 +1302,7 @@ static int gmc_v8_0_pre_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->mc.srbm_soft_reset) if (!adev->gmc.srbm_soft_reset)
return 0; return 0;
gmc_v8_0_mc_stop(adev); gmc_v8_0_mc_stop(adev);
@ -1299,9 +1318,9 @@ static int gmc_v8_0_soft_reset(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 srbm_soft_reset; u32 srbm_soft_reset;
if (!adev->mc.srbm_soft_reset) if (!adev->gmc.srbm_soft_reset)
return 0; return 0;
srbm_soft_reset = adev->mc.srbm_soft_reset; srbm_soft_reset = adev->gmc.srbm_soft_reset;
if (srbm_soft_reset) { if (srbm_soft_reset) {
u32 tmp; u32 tmp;
@ -1329,7 +1348,7 @@ static int gmc_v8_0_post_soft_reset(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (!adev->mc.srbm_soft_reset) if (!adev->gmc.srbm_soft_reset)
return 0; return 0;
gmc_v8_0_mc_resume(adev); gmc_v8_0_mc_resume(adev);
@ -1410,7 +1429,8 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
addr); addr);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status); status);
gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client); gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client,
entry->pasid);
} }
return 0; return 0;
@ -1642,9 +1662,11 @@ static const struct amd_ip_funcs gmc_v8_0_ip_funcs = {
.get_clockgating_state = gmc_v8_0_get_clockgating_state, .get_clockgating_state = gmc_v8_0_get_clockgating_state,
}; };
static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs = { static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v8_0_gart_flush_gpu_tlb, .flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
.set_pte_pde = gmc_v8_0_gart_set_pte_pde, .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
.set_pte_pde = gmc_v8_0_set_pte_pde,
.set_prt = gmc_v8_0_set_prt, .set_prt = gmc_v8_0_set_prt,
.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags, .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
.get_vm_pde = gmc_v8_0_get_vm_pde .get_vm_pde = gmc_v8_0_get_vm_pde
@ -1655,16 +1677,16 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
.process = gmc_v8_0_process_interrupt, .process = gmc_v8_0_process_interrupt,
}; };
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev) static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
{ {
if (adev->gart.gart_funcs == NULL) if (adev->gmc.gmc_funcs == NULL)
adev->gart.gart_funcs = &gmc_v8_0_gart_funcs; adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
} }
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->mc.vm_fault.num_types = 1; adev->gmc.vm_fault.num_types = 1;
adev->mc.vm_fault.funcs = &gmc_v8_0_irq_funcs; adev->gmc.vm_fault.funcs = &gmc_v8_0_irq_funcs;
} }
const struct amdgpu_ip_block_version gmc_v8_0_ip_block = const struct amdgpu_ip_block_version gmc_v8_0_ip_block =

View File

@ -34,6 +34,7 @@
#include "vega10_enum.h" #include "vega10_enum.h"
#include "mmhub/mmhub_1_0_offset.h" #include "mmhub/mmhub_1_0_offset.h"
#include "athub/athub_1_0_offset.h" #include "athub/athub_1_0_offset.h"
#include "oss/osssys_4_0_offset.h"
#include "soc15.h" #include "soc15.h"
#include "soc15_common.h" #include "soc15_common.h"
@ -263,10 +264,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
if (printk_ratelimit()) { if (printk_ratelimit()) {
dev_err(adev->dev, dev_err(adev->dev,
"[%s] VMC page fault (src_id:%u ring:%u vmid:%u pas_id:%u)\n", "[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u)\n",
entry->vmid_src ? "mmhub" : "gfxhub", entry->vmid_src ? "mmhub" : "gfxhub",
entry->src_id, entry->ring_id, entry->vmid, entry->src_id, entry->ring_id, entry->vmid,
entry->pas_id); entry->pasid);
dev_err(adev->dev, " at page 0x%016llx from %d\n", dev_err(adev->dev, " at page 0x%016llx from %d\n",
addr, entry->client_id); addr, entry->client_id);
if (!amdgpu_sriov_vf(adev)) if (!amdgpu_sriov_vf(adev))
@ -285,8 +286,8 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
{ {
adev->mc.vm_fault.num_types = 1; adev->gmc.vm_fault.num_types = 1;
adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs; adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
} }
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid) static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
@ -316,24 +317,21 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
*/ */
/** /**
* gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @vmid: vm instance to flush * @vmid: vm instance to flush
* *
* Flush the TLB for the requested page table. * Flush the TLB for the requested page table.
*/ */
static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev, static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
uint32_t vmid) uint32_t vmid)
{ {
/* Use register 17 for GART */ /* Use register 17 for GART */
const unsigned eng = 17; const unsigned eng = 17;
unsigned i, j; unsigned i, j;
/* flush hdp cache */ spin_lock(&adev->gmc.invalidate_lock);
adev->nbio_funcs->hdp_flush(adev);
spin_lock(&adev->mc.invalidate_lock);
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &adev->vmhub[i]; struct amdgpu_vmhub *hub = &adev->vmhub[i];
@ -366,11 +364,52 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
DRM_ERROR("Timeout waiting for VM flush ACK!\n"); DRM_ERROR("Timeout waiting for VM flush ACK!\n");
} }
spin_unlock(&adev->mc.invalidate_lock); spin_unlock(&adev->gmc.invalidate_lock);
}
static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
amdgpu_gmc_get_vm_pde(adev, -1, &pd_addr, &flags);
pd_addr |= flags;
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
lower_32_bits(pd_addr));
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
upper_32_bits(pd_addr));
amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req);
/* wait for the invalidate to complete */
amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng,
1 << vmid, 1 << vmid);
return pd_addr;
}
static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
unsigned pasid)
{
struct amdgpu_device *adev = ring->adev;
uint32_t reg;
if (ring->funcs->vmhub == AMDGPU_GFXHUB)
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
else
reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
amdgpu_ring_emit_wreg(ring, reg, pasid);
} }
/** /**
* gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO * gmc_v9_0_set_pte_pde - update the page tables using MMIO
* *
* @adev: amdgpu_device pointer * @adev: amdgpu_device pointer
* @cpu_pt_addr: cpu address of the page table * @cpu_pt_addr: cpu address of the page table
@ -380,11 +419,9 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
* *
* Update the page tables using the CPU. * Update the page tables using the CPU.
*/ */
static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev, static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
void *cpu_pt_addr, uint32_t gpu_page_idx, uint64_t addr,
uint32_t gpu_page_idx, uint64_t flags)
uint64_t addr,
uint64_t flags)
{ {
void __iomem *ptr = (void *)cpu_pt_addr; void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value; uint64_t value;
@ -475,10 +512,10 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
{ {
if (!(*flags & AMDGPU_PDE_PTE)) if (!(*flags & AMDGPU_PDE_PTE))
*addr = adev->vm_manager.vram_base_offset + *addr - *addr = adev->vm_manager.vram_base_offset + *addr -
adev->mc.vram_start; adev->gmc.vram_start;
BUG_ON(*addr & 0xFFFF00000000003FULL); BUG_ON(*addr & 0xFFFF00000000003FULL);
if (!adev->mc.translate_further) if (!adev->gmc.translate_further)
return; return;
if (level == AMDGPU_VM_PDB1) { if (level == AMDGPU_VM_PDB1) {
@ -494,34 +531,35 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
} }
} }
static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = { static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb, .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.set_pte_pde = gmc_v9_0_gart_set_pte_pde, .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.get_invalidate_req = gmc_v9_0_get_invalidate_req, .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
.set_pte_pde = gmc_v9_0_set_pte_pde,
.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
.get_vm_pde = gmc_v9_0_get_vm_pde .get_vm_pde = gmc_v9_0_get_vm_pde
}; };
static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
{ {
if (adev->gart.gart_funcs == NULL) if (adev->gmc.gmc_funcs == NULL)
adev->gart.gart_funcs = &gmc_v9_0_gart_funcs; adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
} }
static int gmc_v9_0_early_init(void *handle) static int gmc_v9_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v9_0_set_gart_funcs(adev); gmc_v9_0_set_gmc_funcs(adev);
gmc_v9_0_set_irq_funcs(adev); gmc_v9_0_set_irq_funcs(adev);
adev->mc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->mc.shared_aperture_end = adev->gmc.shared_aperture_end =
adev->mc.shared_aperture_start + (4ULL << 30) - 1; adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
adev->mc.private_aperture_start = adev->gmc.private_aperture_start =
adev->mc.shared_aperture_end + 1; adev->gmc.shared_aperture_end + 1;
adev->mc.private_aperture_end = adev->gmc.private_aperture_end =
adev->mc.private_aperture_start + (4ULL << 30) - 1; adev->gmc.private_aperture_start + (4ULL << 30) - 1;
return 0; return 0;
} }
@ -647,16 +685,16 @@ static int gmc_v9_0_late_init(void *handle)
} }
} }
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
} }
static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
struct amdgpu_mc *mc) struct amdgpu_gmc *mc)
{ {
u64 base = 0; u64 base = 0;
if (!amdgpu_sriov_vf(adev)) if (!amdgpu_sriov_vf(adev))
base = mmhub_v1_0_get_fb_location(adev); base = mmhub_v1_0_get_fb_location(adev);
amdgpu_device_vram_location(adev, &adev->mc, base); amdgpu_device_vram_location(adev, &adev->gmc, base);
amdgpu_device_gart_location(adev, mc); amdgpu_device_gart_location(adev, mc);
/* base offset of vram pages */ /* base offset of vram pages */
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
@ -680,8 +718,9 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
int chansize, numchan; int chansize, numchan;
int r; int r;
adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); if (amdgpu_emu_mode != 1)
if (!adev->mc.vram_width) { adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
if (!adev->gmc.vram_width) {
/* hbm memory channel size */ /* hbm memory channel size */
chansize = 128; chansize = 128;
@ -718,43 +757,49 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
numchan = 2; numchan = 2;
break; break;
} }
adev->mc.vram_width = numchan * chansize; adev->gmc.vram_width = numchan * chansize;
} }
/* size in MB on si */ /* size in MB on si */
adev->mc.mc_vram_size = adev->gmc.mc_vram_size =
adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL; adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->mc.real_vram_size = adev->mc.mc_vram_size; adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
if (!(adev->flags & AMD_IS_APU)) { if (!(adev->flags & AMD_IS_APU)) {
r = amdgpu_device_resize_fb_bar(adev); r = amdgpu_device_resize_fb_bar(adev);
if (r) if (r)
return r; return r;
} }
adev->mc.aper_base = pci_resource_start(adev->pdev, 0); adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
adev->mc.aper_size = pci_resource_len(adev->pdev, 0); adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
if (adev->flags & AMD_IS_APU) {
adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
#endif
/* In case the PCI BAR is larger than the actual amount of vram */ /* In case the PCI BAR is larger than the actual amount of vram */
adev->mc.visible_vram_size = adev->mc.aper_size; adev->gmc.visible_vram_size = adev->gmc.aper_size;
if (adev->mc.visible_vram_size > adev->mc.real_vram_size) if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size; adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */ /* set the gart size */
if (amdgpu_gart_size == -1) { if (amdgpu_gart_size == -1) {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: /* all engines support GPUVM */ case CHIP_VEGA10: /* all engines support GPUVM */
default: default:
adev->mc.gart_size = 256ULL << 20; adev->gmc.gart_size = 256ULL << 20;
break; break;
case CHIP_RAVEN: /* DCE SG support */ case CHIP_RAVEN: /* DCE SG support */
adev->mc.gart_size = 1024ULL << 20; adev->gmc.gart_size = 1024ULL << 20;
break; break;
} }
} else { } else {
adev->mc.gart_size = (u64)amdgpu_gart_size << 20; adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
} }
gmc_v9_0_vram_gtt_location(adev, &adev->mc); gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
return 0; return 0;
} }
@ -786,23 +831,23 @@ static int gmc_v9_0_sw_init(void *handle)
gfxhub_v1_0_init(adev); gfxhub_v1_0_init(adev);
mmhub_v1_0_init(adev); mmhub_v1_0_init(adev);
spin_lock_init(&adev->mc.invalidate_lock); spin_lock_init(&adev->gmc.invalidate_lock);
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_RAVEN: case CHIP_RAVEN:
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
if (adev->rev_id == 0x0 || adev->rev_id == 0x1) { if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48); amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
} else { } else {
/* vm_size is 128TB + 512GB for legacy 3-level page support */ /* vm_size is 128TB + 512GB for legacy 3-level page support */
amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48); amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
adev->mc.translate_further = adev->gmc.translate_further =
adev->vm_manager.num_level > 1; adev->vm_manager.num_level > 1;
} }
break; break;
case CHIP_VEGA10: case CHIP_VEGA10:
/* XXX Don't know how to get VRAM type yet. */ /* XXX Don't know how to get VRAM type yet. */
adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM; adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
/* /*
* To fulfill 4-level page support, * To fulfill 4-level page support,
* vm size is 256TB (48bit), maximum size of Vega10, * vm size is 256TB (48bit), maximum size of Vega10,
@ -816,9 +861,9 @@ static int gmc_v9_0_sw_init(void *handle)
/* This interrupt is VMC page fault.*/ /* This interrupt is VMC page fault.*/
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0, r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
&adev->mc.vm_fault); &adev->gmc.vm_fault);
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0, r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
&adev->mc.vm_fault); &adev->gmc.vm_fault);
if (r) if (r)
return r; return r;
@ -827,13 +872,13 @@ static int gmc_v9_0_sw_init(void *handle)
* This is the max address of the GPU's * This is the max address of the GPU's
* internal address space. * internal address space.
*/ */
adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
/* /*
* It needs to reserve 8M stolen memory for vega10 * It needs to reserve 8M stolen memory for vega10
* TODO: Figure out how to avoid that... * TODO: Figure out how to avoid that...
*/ */
adev->mc.stolen_size = 8 * 1024 * 1024; adev->gmc.stolen_size = 8 * 1024 * 1024;
/* set DMA mask + need_dma32 flags. /* set DMA mask + need_dma32 flags.
* PCIE - can handle 44-bits. * PCIE - can handle 44-bits.
@ -975,7 +1020,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
/* After HDP is initialized, flush HDP.*/ /* After HDP is initialized, flush HDP.*/
adev->nbio_funcs->hdp_flush(adev); adev->nbio_funcs->hdp_flush(adev, NULL);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false; value = false;
@ -984,10 +1029,10 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v1_0_set_fault_enable_default(adev, value); gfxhub_v1_0_set_fault_enable_default(adev, value);
mmhub_v1_0_set_fault_enable_default(adev, value); mmhub_v1_0_set_fault_enable_default(adev, value);
gmc_v9_0_gart_flush_gpu_tlb(adev, 0); gmc_v9_0_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->mc.gart_size >> 20), (unsigned)(adev->gmc.gart_size >> 20),
(unsigned long long)adev->gart.table_addr); (unsigned long long)adev->gart.table_addr);
adev->gart.ready = true; adev->gart.ready = true;
return 0; return 0;
@ -1038,7 +1083,7 @@ static int gmc_v9_0_hw_fini(void *handle)
return 0; return 0;
} }
amdgpu_irq_put(adev, &adev->mc.vm_fault, 0); amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v9_0_gart_disable(adev); gmc_v9_0_gart_disable(adev);
return 0; return 0;

View File

@ -260,7 +260,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
entry->src_data[0] = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vmid = (dw[2] >> 8) & 0xff; entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
adev->irq.ih.rptr += 16; adev->irq.ih.rptr += 16;

View File

@ -3319,7 +3319,6 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
}; };
const struct amd_pm_funcs kv_dpm_funcs = { const struct amd_pm_funcs kv_dpm_funcs = {
.get_temperature = &kv_dpm_get_temp,
.pre_set_power_state = &kv_dpm_pre_set_power_state, .pre_set_power_state = &kv_dpm_pre_set_power_state,
.set_power_state = &kv_dpm_set_power_state, .set_power_state = &kv_dpm_set_power_state,
.post_set_power_state = &kv_dpm_post_set_power_state, .post_set_power_state = &kv_dpm_post_set_power_state,

View File

@ -50,7 +50,7 @@ static void mmhub_v1_0_init_gart_pt_regs(struct amdgpu_device *adev)
uint64_t value; uint64_t value;
BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL)); BUG_ON(adev->gart.table_addr & (~0x0000FFFFFFFFF000ULL));
value = adev->gart.table_addr - adev->mc.vram_start + value = adev->gart.table_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset; adev->vm_manager.vram_base_offset;
value &= 0x0000FFFFFFFFF000ULL; value &= 0x0000FFFFFFFFF000ULL;
value |= 0x1; /* valid bit */ value |= 0x1; /* valid bit */
@ -67,14 +67,14 @@ static void mmhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
mmhub_v1_0_init_gart_pt_regs(adev); mmhub_v1_0_init_gart_pt_regs(adev);
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
(u32)(adev->mc.gart_start >> 12)); (u32)(adev->gmc.gart_start >> 12));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
(u32)(adev->mc.gart_start >> 44)); (u32)(adev->gmc.gart_start >> 44));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
(u32)(adev->mc.gart_end >> 12)); (u32)(adev->gmc.gart_end >> 12));
WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
(u32)(adev->mc.gart_end >> 44)); (u32)(adev->gmc.gart_end >> 44));
} }
static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@ -89,12 +89,12 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
/* Program the system aperture low logical page number. */ /* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
adev->mc.vram_start >> 18); adev->gmc.vram_start >> 18);
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->mc.vram_end >> 18); adev->gmc.vram_end >> 18);
/* Set default page address. */ /* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->mc.vram_start + value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset; adev->vm_manager.vram_base_offset;
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12)); (u32)(value >> 12));
@ -155,7 +155,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp); WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL2, tmp);
if (adev->mc.translate_further) { if (adev->gmc.translate_further) {
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12); tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
L2_CACHE_BIGK_FRAGMENT_SIZE, 9); L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
@ -207,7 +207,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
num_level = adev->vm_manager.num_level; num_level = adev->vm_manager.num_level;
block_size = adev->vm_manager.block_size; block_size = adev->vm_manager.block_size;
if (adev->mc.translate_further) if (adev->gmc.translate_further)
num_level -= 1; num_level -= 1;
else else
block_size -= 9; block_size -= 9;
@ -272,21 +272,21 @@ static const struct pctl_data pctl0_data[] = {
{0x11, 0x6a684}, {0x11, 0x6a684},
{0x19, 0xea68e}, {0x19, 0xea68e},
{0x29, 0xa69e}, {0x29, 0xa69e},
{0x2b, 0x34a6c0}, {0x2b, 0x0010a6c0},
{0x61, 0x83a707}, {0x3d, 0x83a707},
{0xe6, 0x8a7a4}, {0xc2, 0x8a7a4},
{0xf0, 0x1a7b8}, {0xcc, 0x1a7b8},
{0xf3, 0xfa7cc}, {0xcf, 0xfa7cc},
{0x104, 0x17a7dd}, {0xe0, 0x17a7dd},
{0x11d, 0xa7dc}, {0xf9, 0xa7dc},
{0x11f, 0x12a7f5}, {0xfb, 0x12a7f5},
{0x133, 0xa808}, {0x10f, 0xa808},
{0x135, 0x12a810}, {0x111, 0x12a810},
{0x149, 0x7a82c} {0x125, 0x7a82c}
}; };
#define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data)) #define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data))
#define PCTL0_RENG_EXEC_END_PTR 0x151 #define PCTL0_RENG_EXEC_END_PTR 0x12d
#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640 #define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
#define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833 #define PCTL0_STCTRL_REG_SAVE_RANGE0_LIMIT 0xa833
@ -385,10 +385,9 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
return; return;
/****************** pctl0 **********************/
pctl0_misc = RREG32_SOC15(MMHUB, 0, mmPCTL0_MISC); pctl0_misc = RREG32_SOC15(MMHUB, 0, mmPCTL0_MISC);
pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE); pctl0_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE);
pctl1_misc = RREG32_SOC15(MMHUB, 0, mmPCTL1_MISC);
pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE);
/* Light sleep must be disabled before writing to pctl0 registers */ /* Light sleep must be disabled before writing to pctl0 registers */
pctl0_misc &= ~PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK; pctl0_misc &= ~PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
@ -402,12 +401,13 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev)
pctl0_data[i].data); pctl0_data[i].data);
} }
/* Set the reng execute end ptr for pctl0 */ /* Re-enable light sleep */
pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, pctl0_misc |= PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
PCTL0_RENG_EXECUTE, WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc);
RENG_EXECUTE_END_PTR,
PCTL0_RENG_EXEC_END_PTR); /****************** pctl1 **********************/
WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute); pctl1_misc = RREG32_SOC15(MMHUB, 0, mmPCTL1_MISC);
pctl1_reng_execute = RREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE);
/* Light sleep must be disabled before writing to pctl1 registers */ /* Light sleep must be disabled before writing to pctl1 registers */
pctl1_misc &= ~PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK; pctl1_misc &= ~PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
@ -421,20 +421,25 @@ void mmhub_v1_0_initialize_power_gating(struct amdgpu_device *adev)
pctl1_data[i].data); pctl1_data[i].data);
} }
/* Re-enable light sleep */
pctl1_misc |= PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc);
mmhub_v1_0_power_gating_write_save_ranges(adev);
/* Set the reng execute end ptr for pctl0 */
pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
PCTL0_RENG_EXECUTE,
RENG_EXECUTE_END_PTR,
PCTL0_RENG_EXEC_END_PTR);
WREG32_SOC15(MMHUB, 0, mmPCTL0_RENG_EXECUTE, pctl0_reng_execute);
/* Set the reng execute end ptr for pctl1 */ /* Set the reng execute end ptr for pctl1 */
pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute, pctl1_reng_execute = REG_SET_FIELD(pctl1_reng_execute,
PCTL1_RENG_EXECUTE, PCTL1_RENG_EXECUTE,
RENG_EXECUTE_END_PTR, RENG_EXECUTE_END_PTR,
PCTL1_RENG_EXEC_END_PTR); PCTL1_RENG_EXEC_END_PTR);
WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute); WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
mmhub_v1_0_power_gating_write_save_ranges(adev);
/* Re-enable light sleep */
pctl0_misc |= PCTL0_MISC__RENG_MEM_LS_ENABLE_MASK;
WREG32_SOC15(MMHUB, 0, mmPCTL0_MISC, pctl0_misc);
pctl1_misc |= PCTL1_MISC__RENG_MEM_LS_ENABLE_MASK;
WREG32_SOC15(MMHUB, 0, mmPCTL1_MISC, pctl1_misc);
} }
void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev, void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
@ -466,6 +471,9 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
RENG_EXECUTE_ON_REG_UPDATE, 1); RENG_EXECUTE_ON_REG_UPDATE, 1);
WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute); WREG32_SOC15(MMHUB, 0, mmPCTL1_RENG_EXECUTE, pctl1_reng_execute);
if (adev->powerplay.pp_funcs->set_mmhub_powergating_by_smu)
amdgpu_dpm_set_mmhub_powergating_by_smu(adev);
} else { } else {
pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute, pctl0_reng_execute = REG_SET_FIELD(pctl0_reng_execute,
PCTL0_RENG_EXECUTE, PCTL0_RENG_EXECUTE,
@ -494,9 +502,9 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
* SRIOV driver need to program them * SRIOV driver need to program them
*/ */
WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE, WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_BASE,
adev->mc.vram_start >> 24); adev->gmc.vram_start >> 24);
WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP, WREG32_SOC15(MMHUB, 0, mmMC_VM_FB_LOCATION_TOP,
adev->mc.vram_end >> 24); adev->gmc.vram_end >> 24);
} }
/* GART Enable. */ /* GART Enable. */

View File

@ -53,9 +53,16 @@ static void nbio_v6_1_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
} }
static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev) static void nbio_v6_1_hdp_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{ {
WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL, 0); if (!ring || !ring->funcs->emit_wreg)
WREG32_SOC15_NO_KIQ(NBIO, 0,
mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL,
0);
else
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
NBIO, 0, mmBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL), 0);
} }
static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev) static u32 nbio_v6_1_get_memsize(struct amdgpu_device *adev)

View File

@ -53,9 +53,14 @@ static void nbio_v7_0_mc_access_enable(struct amdgpu_device *adev, bool enable)
WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0);
} }
static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev) static void nbio_v7_0_hdp_flush(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{ {
WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0); if (!ring || !ring->funcs->emit_wreg)
WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0);
else
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0);
} }
static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev) static u32 nbio_v7_0_get_memsize(struct amdgpu_device *adev)

View File

@ -87,7 +87,7 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
return 0; return 0;
} }
int psp_v10_0_init_microcode(struct psp_context *psp) static int psp_v10_0_init_microcode(struct psp_context *psp)
{ {
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;
const char *chip_name; const char *chip_name;
@ -133,7 +133,8 @@ int psp_v10_0_init_microcode(struct psp_context *psp)
return err; return err;
} }
int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd) static int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd)
{ {
int ret; int ret;
uint64_t fw_mem_mc_addr = ucode->mc_addr; uint64_t fw_mem_mc_addr = ucode->mc_addr;
@ -152,7 +153,8 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm
return ret; return ret;
} }
int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) static int psp_v10_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{ {
int ret = 0; int ret = 0;
struct psp_ring *ring; struct psp_ring *ring;
@ -177,7 +179,8 @@ int psp_v10_0_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
return 0; return 0;
} }
int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) static int psp_v10_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{ {
int ret = 0; int ret = 0;
unsigned int psp_ring_reg = 0; unsigned int psp_ring_reg = 0;
@ -208,7 +211,8 @@ int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
return ret; return ret;
} }
int psp_v10_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type) static int psp_v10_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{ {
int ret = 0; int ret = 0;
struct psp_ring *ring; struct psp_ring *ring;
@ -231,7 +235,8 @@ int psp_v10_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
return ret; return ret;
} }
int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) static int psp_v10_0_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type)
{ {
int ret = 0; int ret = 0;
struct psp_ring *ring = &psp->km_ring; struct psp_ring *ring = &psp->km_ring;
@ -248,10 +253,10 @@ int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type
return ret; return ret;
} }
int psp_v10_0_cmd_submit(struct psp_context *psp, static int psp_v10_0_cmd_submit(struct psp_context *psp,
struct amdgpu_firmware_info *ucode, struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
int index) int index)
{ {
unsigned int psp_write_ptr_reg = 0; unsigned int psp_write_ptr_reg = 0;
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem; struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
@ -298,9 +303,9 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
static int static int
psp_v10_0_sram_map(struct amdgpu_device *adev, psp_v10_0_sram_map(struct amdgpu_device *adev,
unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
unsigned int *sram_data_reg_offset, unsigned int *sram_data_reg_offset,
enum AMDGPU_UCODE_ID ucode_id) enum AMDGPU_UCODE_ID ucode_id)
{ {
int ret = 0; int ret = 0;
@ -383,9 +388,9 @@ psp_v10_0_sram_map(struct amdgpu_device *adev,
return ret; return ret;
} }
bool psp_v10_0_compare_sram_data(struct psp_context *psp, static bool psp_v10_0_compare_sram_data(struct psp_context *psp,
struct amdgpu_firmware_info *ucode, struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type) enum AMDGPU_UCODE_ID ucode_type)
{ {
int err = 0; int err = 0;
unsigned int fw_sram_reg_val = 0; unsigned int fw_sram_reg_val = 0;
@ -419,8 +424,25 @@ bool psp_v10_0_compare_sram_data(struct psp_context *psp,
} }
int psp_v10_0_mode1_reset(struct psp_context *psp) static int psp_v10_0_mode1_reset(struct psp_context *psp)
{ {
DRM_INFO("psp mode 1 reset not supported now! \n"); DRM_INFO("psp mode 1 reset not supported now! \n");
return -EINVAL; return -EINVAL;
} }
static const struct psp_funcs psp_v10_0_funcs = {
.init_microcode = psp_v10_0_init_microcode,
.prep_cmd_buf = psp_v10_0_prep_cmd_buf,
.ring_init = psp_v10_0_ring_init,
.ring_create = psp_v10_0_ring_create,
.ring_stop = psp_v10_0_ring_stop,
.ring_destroy = psp_v10_0_ring_destroy,
.cmd_submit = psp_v10_0_cmd_submit,
.compare_sram_data = psp_v10_0_compare_sram_data,
.mode1_reset = psp_v10_0_mode1_reset,
};
void psp_v10_0_set_psp_funcs(struct psp_context *psp)
{
psp->funcs = &psp_v10_0_funcs;
}

View File

@ -27,24 +27,6 @@
#include "amdgpu_psp.h" #include "amdgpu_psp.h"
extern int psp_v10_0_init_microcode(struct psp_context *psp); void psp_v10_0_set_psp_funcs(struct psp_context *psp);
extern int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd);
extern int psp_v10_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v10_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v10_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v10_0_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v10_0_cmd_submit(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
int index);
extern bool psp_v10_0_compare_sram_data(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type);
extern int psp_v10_0_mode1_reset(struct psp_context *psp);
#endif #endif

View File

@ -93,7 +93,7 @@ psp_v3_1_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *t
return 0; return 0;
} }
int psp_v3_1_init_microcode(struct psp_context *psp) static int psp_v3_1_init_microcode(struct psp_context *psp)
{ {
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;
const char *chip_name; const char *chip_name;
@ -161,7 +161,7 @@ int psp_v3_1_init_microcode(struct psp_context *psp)
return err; return err;
} }
int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
{ {
int ret; int ret;
uint32_t psp_gfxdrv_command_reg = 0; uint32_t psp_gfxdrv_command_reg = 0;
@ -202,7 +202,7 @@ int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
return ret; return ret;
} }
int psp_v3_1_bootloader_load_sos(struct psp_context *psp) static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
{ {
int ret; int ret;
unsigned int psp_gfxdrv_command_reg = 0; unsigned int psp_gfxdrv_command_reg = 0;
@ -243,7 +243,8 @@ int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
return ret; return ret;
} }
int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd) static int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd)
{ {
int ret; int ret;
uint64_t fw_mem_mc_addr = ucode->mc_addr; uint64_t fw_mem_mc_addr = ucode->mc_addr;
@ -262,7 +263,8 @@ int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd
return ret; return ret;
} }
int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type) static int psp_v3_1_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{ {
int ret = 0; int ret = 0;
struct psp_ring *ring; struct psp_ring *ring;
@ -287,7 +289,8 @@ int psp_v3_1_ring_init(struct psp_context *psp, enum psp_ring_type ring_type)
return 0; return 0;
} }
int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) static int psp_v3_1_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{ {
int ret = 0; int ret = 0;
unsigned int psp_ring_reg = 0; unsigned int psp_ring_reg = 0;
@ -318,7 +321,8 @@ int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
return ret; return ret;
} }
int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type) static int psp_v3_1_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{ {
int ret = 0; int ret = 0;
struct psp_ring *ring; struct psp_ring *ring;
@ -341,7 +345,8 @@ int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
return ret; return ret;
} }
int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) static int psp_v3_1_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type)
{ {
int ret = 0; int ret = 0;
struct psp_ring *ring = &psp->km_ring; struct psp_ring *ring = &psp->km_ring;
@ -358,10 +363,10 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
return ret; return ret;
} }
int psp_v3_1_cmd_submit(struct psp_context *psp, static int psp_v3_1_cmd_submit(struct psp_context *psp,
struct amdgpu_firmware_info *ucode, struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
int index) int index)
{ {
unsigned int psp_write_ptr_reg = 0; unsigned int psp_write_ptr_reg = 0;
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem; struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
@ -410,9 +415,9 @@ int psp_v3_1_cmd_submit(struct psp_context *psp,
static int static int
psp_v3_1_sram_map(struct amdgpu_device *adev, psp_v3_1_sram_map(struct amdgpu_device *adev,
unsigned int *sram_offset, unsigned int *sram_addr_reg_offset, unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
unsigned int *sram_data_reg_offset, unsigned int *sram_data_reg_offset,
enum AMDGPU_UCODE_ID ucode_id) enum AMDGPU_UCODE_ID ucode_id)
{ {
int ret = 0; int ret = 0;
@ -495,9 +500,9 @@ psp_v3_1_sram_map(struct amdgpu_device *adev,
return ret; return ret;
} }
bool psp_v3_1_compare_sram_data(struct psp_context *psp, static bool psp_v3_1_compare_sram_data(struct psp_context *psp,
struct amdgpu_firmware_info *ucode, struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type) enum AMDGPU_UCODE_ID ucode_type)
{ {
int err = 0; int err = 0;
unsigned int fw_sram_reg_val = 0; unsigned int fw_sram_reg_val = 0;
@ -530,7 +535,7 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
return true; return true;
} }
bool psp_v3_1_smu_reload_quirk(struct psp_context *psp) static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
{ {
struct amdgpu_device *adev = psp->adev; struct amdgpu_device *adev = psp->adev;
uint32_t reg; uint32_t reg;
@ -541,7 +546,7 @@ bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false; return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
} }
int psp_v3_1_mode1_reset(struct psp_context *psp) static int psp_v3_1_mode1_reset(struct psp_context *psp)
{ {
int ret; int ret;
uint32_t offset; uint32_t offset;
@ -574,3 +579,23 @@ int psp_v3_1_mode1_reset(struct psp_context *psp)
return 0; return 0;
} }
static const struct psp_funcs psp_v3_1_funcs = {
.init_microcode = psp_v3_1_init_microcode,
.bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv,
.bootloader_load_sos = psp_v3_1_bootloader_load_sos,
.prep_cmd_buf = psp_v3_1_prep_cmd_buf,
.ring_init = psp_v3_1_ring_init,
.ring_create = psp_v3_1_ring_create,
.ring_stop = psp_v3_1_ring_stop,
.ring_destroy = psp_v3_1_ring_destroy,
.cmd_submit = psp_v3_1_cmd_submit,
.compare_sram_data = psp_v3_1_compare_sram_data,
.smu_reload_quirk = psp_v3_1_smu_reload_quirk,
.mode1_reset = psp_v3_1_mode1_reset,
};
void psp_v3_1_set_psp_funcs(struct psp_context *psp)
{
psp->funcs = &psp_v3_1_funcs;
}

View File

@ -32,26 +32,6 @@ enum { PSP_BINARY_ALIGNMENT = 64 };
enum { PSP_BOOTLOADER_1_MEG_ALIGNMENT = 0x100000 }; enum { PSP_BOOTLOADER_1_MEG_ALIGNMENT = 0x100000 };
enum { PSP_BOOTLOADER_8_MEM_ALIGNMENT = 0x800000 }; enum { PSP_BOOTLOADER_8_MEM_ALIGNMENT = 0x800000 };
extern int psp_v3_1_init_microcode(struct psp_context *psp); void psp_v3_1_set_psp_funcs(struct psp_context *psp);
extern int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp);
extern int psp_v3_1_bootloader_load_sos(struct psp_context *psp);
extern int psp_v3_1_prep_cmd_buf(struct amdgpu_firmware_info *ucode,
struct psp_gfx_cmd_resp *cmd);
extern int psp_v3_1_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_cmd_submit(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr,
int index);
extern bool psp_v3_1_compare_sram_data(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type);
extern bool psp_v3_1_smu_reload_quirk(struct psp_context *psp);
extern int psp_v3_1_mode1_reset(struct psp_context *psp);
#endif #endif

View File

@ -289,13 +289,6 @@ static void sdma_v2_4_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
} }
static void sdma_v2_4_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, mmHDP_DEBUG0);
amdgpu_ring_write(ring, 1);
}
/** /**
* sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring * sdma_v2_4_ring_emit_fence - emit a fence on the DMA ring
* *
@ -346,7 +339,7 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
if ((adev->mman.buffer_funcs_ring == sdma0) || if ((adev->mman.buffer_funcs_ring == sdma0) ||
(adev->mman.buffer_funcs_ring == sdma1)) (adev->mman.buffer_funcs_ring == sdma1))
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
@ -491,7 +484,7 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
} }
if (adev->mman.buffer_funcs_ring == ring) if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
} }
return 0; return 0;
@ -861,20 +854,7 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring, static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
if (vmid < 8) {
amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else {
amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
}
amdgpu_ring_write(ring, pd_addr >> 12);
/* flush TLB */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 1 << vmid);
/* wait for flush */ /* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
@ -888,6 +868,15 @@ static void sdma_v2_4_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
} }
static void sdma_v2_4_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, val);
}
static int sdma_v2_4_early_init(void *handle) static int sdma_v2_4_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -1203,9 +1192,9 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.set_wptr = sdma_v2_4_ring_set_wptr, .set_wptr = sdma_v2_4_ring_set_wptr,
.emit_frame_size = .emit_frame_size =
6 + /* sdma_v2_4_ring_emit_hdp_flush */ 6 + /* sdma_v2_4_ring_emit_hdp_flush */
3 + /* sdma_v2_4_ring_emit_hdp_invalidate */ 3 + /* hdp invalidate */
6 + /* sdma_v2_4_ring_emit_pipeline_sync */ 6 + /* sdma_v2_4_ring_emit_pipeline_sync */
12 + /* sdma_v2_4_ring_emit_vm_flush */ VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v2_4_ring_emit_vm_flush */
10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */ 10 + 10 + 10, /* sdma_v2_4_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */ .emit_ib_size = 7 + 6, /* sdma_v2_4_ring_emit_ib */
.emit_ib = sdma_v2_4_ring_emit_ib, .emit_ib = sdma_v2_4_ring_emit_ib,
@ -1213,11 +1202,11 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync, .emit_pipeline_sync = sdma_v2_4_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v2_4_ring_emit_vm_flush, .emit_vm_flush = sdma_v2_4_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush, .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
.emit_hdp_invalidate = sdma_v2_4_ring_emit_hdp_invalidate,
.test_ring = sdma_v2_4_ring_test_ring, .test_ring = sdma_v2_4_ring_test_ring,
.test_ib = sdma_v2_4_ring_test_ib, .test_ib = sdma_v2_4_ring_test_ib,
.insert_nop = sdma_v2_4_ring_insert_nop, .insert_nop = sdma_v2_4_ring_insert_nop,
.pad_ib = sdma_v2_4_ring_pad_ib, .pad_ib = sdma_v2_4_ring_pad_ib,
.emit_wreg = sdma_v2_4_ring_emit_wreg,
}; };
static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev) static void sdma_v2_4_set_ring_funcs(struct amdgpu_device *adev)
@ -1316,9 +1305,6 @@ static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
.copy_pte = sdma_v2_4_vm_copy_pte, .copy_pte = sdma_v2_4_vm_copy_pte,
.write_pte = sdma_v2_4_vm_write_pte, .write_pte = sdma_v2_4_vm_write_pte,
.set_max_nums_pte_pde = 0x1fffff >> 3,
.set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v2_4_vm_set_pte_pde, .set_pte_pde = sdma_v2_4_vm_set_pte_pde,
}; };

View File

@ -460,14 +460,6 @@ static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
} }
static void sdma_v3_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, mmHDP_DEBUG0);
amdgpu_ring_write(ring, 1);
}
/** /**
* sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
* *
@ -518,7 +510,7 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
if ((adev->mman.buffer_funcs_ring == sdma0) || if ((adev->mman.buffer_funcs_ring == sdma0) ||
(adev->mman.buffer_funcs_ring == sdma1)) (adev->mman.buffer_funcs_ring == sdma1))
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
@ -758,7 +750,7 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
} }
if (adev->mman.buffer_funcs_ring == ring) if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
} }
return 0; return 0;
@ -1127,20 +1119,7 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
if (vmid < 8) {
amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
} else {
amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8));
}
amdgpu_ring_write(ring, pd_addr >> 12);
/* flush TLB */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
amdgpu_ring_write(ring, 1 << vmid);
/* wait for flush */ /* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
@ -1154,6 +1133,15 @@ static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
} }
static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, val);
}
static int sdma_v3_0_early_init(void *handle) static int sdma_v3_0_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -1637,9 +1625,9 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.set_wptr = sdma_v3_0_ring_set_wptr, .set_wptr = sdma_v3_0_ring_set_wptr,
.emit_frame_size = .emit_frame_size =
6 + /* sdma_v3_0_ring_emit_hdp_flush */ 6 + /* sdma_v3_0_ring_emit_hdp_flush */
3 + /* sdma_v3_0_ring_emit_hdp_invalidate */ 3 + /* hdp invalidate */
6 + /* sdma_v3_0_ring_emit_pipeline_sync */ 6 + /* sdma_v3_0_ring_emit_pipeline_sync */
12 + /* sdma_v3_0_ring_emit_vm_flush */ VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v3_0_ring_emit_vm_flush */
10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */ 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */ .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */
.emit_ib = sdma_v3_0_ring_emit_ib, .emit_ib = sdma_v3_0_ring_emit_ib,
@ -1647,11 +1635,11 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync, .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v3_0_ring_emit_vm_flush, .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush, .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = sdma_v3_0_ring_emit_hdp_invalidate,
.test_ring = sdma_v3_0_ring_test_ring, .test_ring = sdma_v3_0_ring_test_ring,
.test_ib = sdma_v3_0_ring_test_ib, .test_ib = sdma_v3_0_ring_test_ib,
.insert_nop = sdma_v3_0_ring_insert_nop, .insert_nop = sdma_v3_0_ring_insert_nop,
.pad_ib = sdma_v3_0_ring_pad_ib, .pad_ib = sdma_v3_0_ring_pad_ib,
.emit_wreg = sdma_v3_0_ring_emit_wreg,
}; };
static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev)
@ -1750,10 +1738,6 @@ static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
.copy_pte = sdma_v3_0_vm_copy_pte, .copy_pte = sdma_v3_0_vm_copy_pte,
.write_pte = sdma_v3_0_vm_write_pte, .write_pte = sdma_v3_0_vm_write_pte,
/* not 0x3fffff due to HW limitation */
.set_max_nums_pte_pde = 0x3fffe0 >> 3,
.set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v3_0_vm_set_pte_pde, .set_pte_pde = sdma_v3_0_vm_set_pte_pde,
}; };

View File

@ -375,16 +375,6 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
} }
static void sdma_v4_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE));
amdgpu_ring_write(ring, 1);
}
/** /**
* sdma_v4_0_ring_emit_fence - emit a fence on the DMA ring * sdma_v4_0_ring_emit_fence - emit a fence on the DMA ring
* *
@ -440,7 +430,7 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
if ((adev->mman.buffer_funcs_ring == sdma0) || if ((adev->mman.buffer_funcs_ring == sdma0) ||
(adev->mman.buffer_funcs_ring == sdma1)) (adev->mman.buffer_funcs_ring == sdma1))
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
for (i = 0; i < adev->sdma.num_instances; i++) { for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL)); rb_cntl = RREG32(sdma_v4_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
@ -682,7 +672,7 @@ static int sdma_v4_0_gfx_resume(struct amdgpu_device *adev)
} }
if (adev->mman.buffer_funcs_ring == ring) if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
} }
@ -1135,38 +1125,28 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid); }
uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags);
pd_addr |= flags;
static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2); amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, lower_32_bits(pd_addr)); amdgpu_ring_write(ring, val);
}
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); uint32_t val, uint32_t mask)
amdgpu_ring_write(ring, hub->ctx0_ptb_addr_hi32 + vmid * 2); {
amdgpu_ring_write(ring, upper_32_bits(pd_addr));
/* flush TLB */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
amdgpu_ring_write(ring, hub->vm_inv_eng0_req + eng);
amdgpu_ring_write(ring, req);
/* wait for flush */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */ SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2); amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 1 << vmid); /* reference */ amdgpu_ring_write(ring, val); /* reference */
amdgpu_ring_write(ring, 1 << vmid); /* mask */ amdgpu_ring_write(ring, mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
} }
@ -1592,9 +1572,11 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.set_wptr = sdma_v4_0_ring_set_wptr, .set_wptr = sdma_v4_0_ring_set_wptr,
.emit_frame_size = .emit_frame_size =
6 + /* sdma_v4_0_ring_emit_hdp_flush */ 6 + /* sdma_v4_0_ring_emit_hdp_flush */
3 + /* sdma_v4_0_ring_emit_hdp_invalidate */ 3 + /* hdp invalidate */
6 + /* sdma_v4_0_ring_emit_pipeline_sync */ 6 + /* sdma_v4_0_ring_emit_pipeline_sync */
18 + /* sdma_v4_0_ring_emit_vm_flush */ /* sdma_v4_0_ring_emit_vm_flush */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */ 10 + 10 + 10, /* sdma_v4_0_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */ .emit_ib_size = 7 + 6, /* sdma_v4_0_ring_emit_ib */
.emit_ib = sdma_v4_0_ring_emit_ib, .emit_ib = sdma_v4_0_ring_emit_ib,
@ -1602,11 +1584,12 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = {
.emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync, .emit_pipeline_sync = sdma_v4_0_ring_emit_pipeline_sync,
.emit_vm_flush = sdma_v4_0_ring_emit_vm_flush, .emit_vm_flush = sdma_v4_0_ring_emit_vm_flush,
.emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush, .emit_hdp_flush = sdma_v4_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = sdma_v4_0_ring_emit_hdp_invalidate,
.test_ring = sdma_v4_0_ring_test_ring, .test_ring = sdma_v4_0_ring_test_ring,
.test_ib = sdma_v4_0_ring_test_ib, .test_ib = sdma_v4_0_ring_test_ib,
.insert_nop = sdma_v4_0_ring_insert_nop, .insert_nop = sdma_v4_0_ring_insert_nop,
.pad_ib = sdma_v4_0_ring_pad_ib, .pad_ib = sdma_v4_0_ring_pad_ib,
.emit_wreg = sdma_v4_0_ring_emit_wreg,
.emit_reg_wait = sdma_v4_0_ring_emit_reg_wait,
}; };
static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
@ -1705,9 +1688,6 @@ static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
.copy_pte = sdma_v4_0_vm_copy_pte, .copy_pte = sdma_v4_0_vm_copy_pte,
.write_pte = sdma_v4_0_vm_write_pte, .write_pte = sdma_v4_0_vm_write_pte,
.set_max_nums_pte_pde = 0x400000 >> 3,
.set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v4_0_vm_set_pte_pde, .set_pte_pde = sdma_v4_0_vm_set_pte_pde,
}; };

View File

@ -1230,6 +1230,27 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev)
adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
} }
static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
}
}
static void si_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_DEBUG0, 1);
RREG32(mmHDP_DEBUG0);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
}
}
static const struct amdgpu_asic_funcs si_asic_funcs = static const struct amdgpu_asic_funcs si_asic_funcs =
{ {
.read_disabled_bios = &si_read_disabled_bios, .read_disabled_bios = &si_read_disabled_bios,
@ -1241,6 +1262,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.set_uvd_clocks = &si_set_uvd_clocks, .set_uvd_clocks = &si_set_uvd_clocks,
.set_vce_clocks = NULL, .set_vce_clocks = NULL,
.get_config_memsize = &si_get_config_memsize, .get_config_memsize = &si_get_config_memsize,
.flush_hdp = &si_flush_hdp,
.invalidate_hdp = &si_invalidate_hdp,
}; };
static uint32_t si_get_rev_id(struct amdgpu_device *adev) static uint32_t si_get_rev_id(struct amdgpu_device *adev)

View File

@ -24,6 +24,8 @@
#ifndef __SI_H__ #ifndef __SI_H__
#define __SI_H__ #define __SI_H__
#define SI_FLUSH_GPU_TLB_NUM_WREG 2
void si_srbm_select(struct amdgpu_device *adev, void si_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid); u32 me, u32 pipe, u32 queue, u32 vmid);
int si_set_ip_blocks(struct amdgpu_device *adev); int si_set_ip_blocks(struct amdgpu_device *adev);

View File

@ -24,6 +24,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_trace.h" #include "amdgpu_trace.h"
#include "si.h"
#include "sid.h" #include "sid.h"
const u32 sdma_offsets[SDMA_MAX_INSTANCE] = const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
@ -74,20 +75,6 @@ static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
} }
static void si_dma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
amdgpu_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL));
amdgpu_ring_write(ring, 1);
}
static void si_dma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
amdgpu_ring_write(ring, (0xf << 16) | (HDP_DEBUG0));
amdgpu_ring_write(ring, 1);
}
/** /**
* si_dma_ring_emit_fence - emit a fence on the DMA ring * si_dma_ring_emit_fence - emit a fence on the DMA ring
* *
@ -134,7 +121,7 @@ static void si_dma_stop(struct amdgpu_device *adev)
WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl); WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
if (adev->mman.buffer_funcs_ring == ring) if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->gmc.visible_vram_size);
ring->ready = false; ring->ready = false;
} }
} }
@ -197,7 +184,7 @@ static int si_dma_start(struct amdgpu_device *adev)
} }
if (adev->mman.buffer_funcs_ring == ring) if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size); amdgpu_ttm_set_active_vram_size(adev, adev->gmc.real_vram_size);
} }
return 0; return 0;
@ -475,17 +462,7 @@ static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring, static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
if (vmid < 8)
amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid));
else
amdgpu_ring_write(ring, (0xf << 16) | (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8)));
amdgpu_ring_write(ring, pd_addr >> 12);
/* bits 0-7 are the VM contexts0-7 */
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
amdgpu_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST));
amdgpu_ring_write(ring, 1 << vmid);
/* wait for invalidate to complete */ /* wait for invalidate to complete */
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
@ -496,6 +473,14 @@ static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
} }
static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
amdgpu_ring_write(ring, (0xf << 16) | reg);
amdgpu_ring_write(ring, val);
}
static int si_dma_early_init(void *handle) static int si_dma_early_init(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -772,22 +757,20 @@ static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
.get_wptr = si_dma_ring_get_wptr, .get_wptr = si_dma_ring_get_wptr,
.set_wptr = si_dma_ring_set_wptr, .set_wptr = si_dma_ring_set_wptr,
.emit_frame_size = .emit_frame_size =
3 + /* si_dma_ring_emit_hdp_flush */ 3 + 3 + /* hdp flush / invalidate */
3 + /* si_dma_ring_emit_hdp_invalidate */
6 + /* si_dma_ring_emit_pipeline_sync */ 6 + /* si_dma_ring_emit_pipeline_sync */
12 + /* si_dma_ring_emit_vm_flush */ SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */
9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */ 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */ .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
.emit_ib = si_dma_ring_emit_ib, .emit_ib = si_dma_ring_emit_ib,
.emit_fence = si_dma_ring_emit_fence, .emit_fence = si_dma_ring_emit_fence,
.emit_pipeline_sync = si_dma_ring_emit_pipeline_sync, .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
.emit_vm_flush = si_dma_ring_emit_vm_flush, .emit_vm_flush = si_dma_ring_emit_vm_flush,
.emit_hdp_flush = si_dma_ring_emit_hdp_flush,
.emit_hdp_invalidate = si_dma_ring_emit_hdp_invalidate,
.test_ring = si_dma_ring_test_ring, .test_ring = si_dma_ring_test_ring,
.test_ib = si_dma_ring_test_ib, .test_ib = si_dma_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
.pad_ib = si_dma_ring_pad_ib, .pad_ib = si_dma_ring_pad_ib,
.emit_wreg = si_dma_ring_emit_wreg,
}; };
static void si_dma_set_ring_funcs(struct amdgpu_device *adev) static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
@ -891,9 +874,6 @@ static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
.copy_pte = si_dma_vm_copy_pte, .copy_pte = si_dma_vm_copy_pte,
.write_pte = si_dma_vm_write_pte, .write_pte = si_dma_vm_write_pte,
.set_max_nums_pte_pde = 0xffff8 >> 3,
.set_pte_pde_num_dw = 9,
.set_pte_pde = si_dma_vm_set_pte_pde, .set_pte_pde = si_dma_vm_set_pte_pde,
}; };

View File

@ -3064,7 +3064,7 @@ static bool si_dpm_vblank_too_short(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
/* we never hit the non-gddr5 limit so disable it */ /* we never hit the non-gddr5 limit so disable it */
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
if (vblank_time < switch_limit) if (vblank_time < switch_limit)
return true; return true;
@ -4350,7 +4350,7 @@ static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
if (mclk <= pi->mclk_strobe_mode_threshold) if (mclk <= pi->mclk_strobe_mode_threshold)
strobe_mode = true; strobe_mode = true;
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
result = si_get_mclk_frequency_ratio(mclk, strobe_mode); result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
else else
result = si_get_ddr3_mclk_frequency_ratio(mclk); result = si_get_ddr3_mclk_frequency_ratio(mclk);
@ -4937,7 +4937,7 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp); table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen; table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
table->initialState.levels[0].strobeMode = table->initialState.levels[0].strobeMode =
si_get_strobe_mode_settings(adev, si_get_strobe_mode_settings(adev,
initial_state->performance_levels[0].mclk); initial_state->performance_levels[0].mclk);
@ -5208,7 +5208,7 @@ static int si_init_smc_table(struct amdgpu_device *adev)
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY) if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
@ -5385,7 +5385,7 @@ static int si_populate_mclk_value(struct amdgpu_device *adev,
mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK; mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div); mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK); mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) | mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
YCLK_POST_DIV(mpll_param.post_div); YCLK_POST_DIV(mpll_param.post_div);
@ -5397,7 +5397,7 @@ static int si_populate_mclk_value(struct amdgpu_device *adev,
u32 tmp; u32 tmp;
u32 reference_clock = adev->clock.mpll.reference_freq; u32 reference_clock = adev->clock.mpll.reference_freq;
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
freq_nom = memory_clock * 4; freq_nom = memory_clock * 4;
else else
freq_nom = memory_clock * 2; freq_nom = memory_clock * 2;
@ -5489,7 +5489,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
level->mcFlags |= SISLANDS_SMC_MC_PG_EN; level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
} }
if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
if (pl->mclk > pi->mclk_edc_enable_threshold) if (pl->mclk > pi->mclk_edc_enable_threshold)
level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG; level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
@ -5860,12 +5860,12 @@ static int si_set_mc_special_registers(struct amdgpu_device *adev,
table->mc_reg_table_entry[k].mc_data[j] = table->mc_reg_table_entry[k].mc_data[j] =
(temp_reg & 0xffff0000) | (temp_reg & 0xffff0000) |
(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
table->mc_reg_table_entry[k].mc_data[j] |= 0x100; table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
} }
j++; j++;
if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) { if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
return -EINVAL; return -EINVAL;
table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD; table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
@ -8056,7 +8056,6 @@ const struct amd_ip_funcs si_dpm_ip_funcs = {
}; };
const struct amd_pm_funcs si_dpm_funcs = { const struct amd_pm_funcs si_dpm_funcs = {
.get_temperature = &si_dpm_get_temp,
.pre_set_power_state = &si_dpm_pre_set_power_state, .pre_set_power_state = &si_dpm_pre_set_power_state,
.set_power_state = &si_dpm_set_power_state, .set_power_state = &si_dpm_set_power_state,
.post_set_power_state = &si_dpm_post_set_power_state, .post_set_power_state = &si_dpm_post_set_power_state,

View File

@ -417,12 +417,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
pci_save_state(adev->pdev); pci_save_state(adev->pdev);
for (i = 0; i < AMDGPU_MAX_IP_NUM; i++) { psp_gpu_reset(adev);
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP){
adev->ip_blocks[i].version->funcs->soft_reset((void *)adev);
break;
}
}
pci_restore_state(adev->pdev); pci_restore_state(adev->pdev);
@ -583,6 +578,21 @@ static uint32_t soc15_get_rev_id(struct amdgpu_device *adev)
return adev->nbio_funcs->get_rev_id(adev); return adev->nbio_funcs->get_rev_id(adev);
} }
static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
adev->nbio_funcs->hdp_flush(adev, ring);
}
static void soc15_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg)
WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
else
amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
}
static const struct amdgpu_asic_funcs soc15_asic_funcs = static const struct amdgpu_asic_funcs soc15_asic_funcs =
{ {
.read_disabled_bios = &soc15_read_disabled_bios, .read_disabled_bios = &soc15_read_disabled_bios,
@ -594,6 +604,8 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.set_uvd_clocks = &soc15_set_uvd_clocks, .set_uvd_clocks = &soc15_set_uvd_clocks,
.set_vce_clocks = &soc15_set_vce_clocks, .set_vce_clocks = &soc15_set_vce_clocks,
.get_config_memsize = &soc15_get_config_memsize, .get_config_memsize = &soc15_get_config_memsize,
.flush_hdp = &soc15_flush_hdp,
.invalidate_hdp = &soc15_invalidate_hdp,
}; };
static int soc15_common_early_init(void *handle) static int soc15_common_early_init(void *handle)

View File

@ -27,6 +27,9 @@
#include "nbio_v6_1.h" #include "nbio_v6_1.h"
#include "nbio_v7_0.h" #include "nbio_v7_0.h"
#define SOC15_FLUSH_GPU_TLB_NUM_WREG 4
#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1
extern const struct amd_ip_funcs soc15_common_ip_funcs; extern const struct amd_ip_funcs soc15_common_ip_funcs;
struct soc15_reg_golden { struct soc15_reg_golden {

View File

@ -271,7 +271,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
entry->src_data[0] = dw[1] & 0xfffffff; entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff; entry->ring_id = dw[2] & 0xff;
entry->vmid = (dw[2] >> 8) & 0xff; entry->vmid = (dw[2] >> 8) & 0xff;
entry->pas_id = (dw[2] >> 16) & 0xffff; entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */ /* wptr/rptr are in bytes! */
adev->irq.ih.rptr += 16; adev->irq.ih.rptr += 16;

View File

@ -463,32 +463,6 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
amdgpu_ring_write(ring, 2); amdgpu_ring_write(ring, 2);
} }
/**
* uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush
*
* @ring: amdgpu_ring pointer
*
* Emits an hdp flush.
*/
static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
amdgpu_ring_write(ring, 0);
}
/**
* uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate
*
* @ring: amdgpu_ring pointer
*
* Emits an hdp invalidate.
*/
static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
amdgpu_ring_write(ring, 1);
}
/** /**
* uvd_v4_2_ring_test_ring - register write test * uvd_v4_2_ring_test_ring - register write test
* *
@ -765,14 +739,10 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.set_wptr = uvd_v4_2_ring_set_wptr, .set_wptr = uvd_v4_2_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs, .parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_frame_size = .emit_frame_size =
2 + /* uvd_v4_2_ring_emit_hdp_flush */
2 + /* uvd_v4_2_ring_emit_hdp_invalidate */
14, /* uvd_v4_2_ring_emit_fence x1 no user fence */ 14, /* uvd_v4_2_ring_emit_fence x1 no user fence */
.emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */ .emit_ib_size = 4, /* uvd_v4_2_ring_emit_ib */
.emit_ib = uvd_v4_2_ring_emit_ib, .emit_ib = uvd_v4_2_ring_emit_ib,
.emit_fence = uvd_v4_2_ring_emit_fence, .emit_fence = uvd_v4_2_ring_emit_fence,
.emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
.emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate,
.test_ring = uvd_v4_2_ring_test_ring, .test_ring = uvd_v4_2_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib, .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,

View File

@ -478,32 +478,6 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
amdgpu_ring_write(ring, 2); amdgpu_ring_write(ring, 2);
} }
/**
* uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
*
* @ring: amdgpu_ring pointer
*
* Emits an hdp flush.
*/
static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
amdgpu_ring_write(ring, 0);
}
/**
* uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
*
* @ring: amdgpu_ring pointer
*
* Emits an hdp invalidate.
*/
static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
amdgpu_ring_write(ring, 1);
}
/** /**
* uvd_v5_0_ring_test_ring - register write test * uvd_v5_0_ring_test_ring - register write test
* *
@ -873,14 +847,10 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.set_wptr = uvd_v5_0_ring_set_wptr, .set_wptr = uvd_v5_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs, .parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_frame_size = .emit_frame_size =
2 + /* uvd_v5_0_ring_emit_hdp_flush */
2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
14, /* uvd_v5_0_ring_emit_fence x1 no user fence */ 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
.emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */ .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
.emit_ib = uvd_v5_0_ring_emit_ib, .emit_ib = uvd_v5_0_ring_emit_ib,
.emit_fence = uvd_v5_0_ring_emit_fence, .emit_fence = uvd_v5_0_ring_emit_fence,
.emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v5_0_ring_test_ring, .test_ring = uvd_v5_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib, .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,

View File

@ -963,32 +963,6 @@ static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP); amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
} }
/**
* uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
*
* @ring: amdgpu_ring pointer
*
* Emits an hdp flush.
*/
static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
amdgpu_ring_write(ring, 0);
}
/**
* uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
*
* @ring: amdgpu_ring pointer
*
* Emits an hdp invalidate.
*/
static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
amdgpu_ring_write(ring, 1);
}
/** /**
* uvd_v6_0_ring_test_ring - register write test * uvd_v6_0_ring_test_ring - register write test
* *
@ -1072,29 +1046,21 @@ static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw); amdgpu_ring_write(ring, ib->length_dw);
} }
static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr) uint32_t reg, uint32_t val)
{ {
uint32_t reg;
if (vmid < 8)
reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid;
else
reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8;
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, reg << 2); amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
amdgpu_ring_write(ring, pd_addr >> 12); amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0x8); amdgpu_ring_write(ring, 0x8);
}
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); unsigned vmid, uint64_t pd_addr)
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); {
amdgpu_ring_write(ring, 1 << vmid); amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
amdgpu_ring_write(ring, 0x8);
amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
@ -1140,7 +1106,7 @@ static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring)
} }
static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB); amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB);
amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, vmid);
@ -1562,21 +1528,19 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
.set_wptr = uvd_v6_0_ring_set_wptr, .set_wptr = uvd_v6_0_ring_set_wptr,
.parse_cs = amdgpu_uvd_ring_parse_cs, .parse_cs = amdgpu_uvd_ring_parse_cs,
.emit_frame_size = .emit_frame_size =
2 + /* uvd_v6_0_ring_emit_hdp_flush */ 6 + 6 + /* hdp flush / invalidate */
2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
10 + /* uvd_v6_0_ring_emit_pipeline_sync */ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
14, /* uvd_v6_0_ring_emit_fence x1 no user fence */ 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */
.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib, .emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence, .emit_fence = uvd_v6_0_ring_emit_fence,
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v6_0_ring_test_ring, .test_ring = uvd_v6_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib, .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use, .begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use, .end_use = amdgpu_uvd_ring_end_use,
.emit_wreg = uvd_v6_0_ring_emit_wreg,
}; };
static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
@ -1588,24 +1552,22 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
.get_wptr = uvd_v6_0_ring_get_wptr, .get_wptr = uvd_v6_0_ring_get_wptr,
.set_wptr = uvd_v6_0_ring_set_wptr, .set_wptr = uvd_v6_0_ring_set_wptr,
.emit_frame_size = .emit_frame_size =
2 + /* uvd_v6_0_ring_emit_hdp_flush */ 6 + 6 + /* hdp flush / invalidate */
2 + /* uvd_v6_0_ring_emit_hdp_invalidate */
10 + /* uvd_v6_0_ring_emit_pipeline_sync */ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */
20 + /* uvd_v6_0_ring_emit_vm_flush */ VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */
14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */ 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */
.emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */
.emit_ib = uvd_v6_0_ring_emit_ib, .emit_ib = uvd_v6_0_ring_emit_ib,
.emit_fence = uvd_v6_0_ring_emit_fence, .emit_fence = uvd_v6_0_ring_emit_fence,
.emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
.emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync, .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
.emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v6_0_ring_test_ring, .test_ring = uvd_v6_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib, .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use, .begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use, .end_use = amdgpu_uvd_ring_end_use,
.emit_wreg = uvd_v6_0_ring_emit_wreg,
}; };
static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = {

View File

@ -25,6 +25,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_uvd.h" #include "amdgpu_uvd.h"
#include "soc15.h"
#include "soc15d.h" #include "soc15d.h"
#include "soc15_common.h" #include "soc15_common.h"
#include "mmsch_v1_0.h" #include "mmsch_v1_0.h"
@ -1134,37 +1135,6 @@ static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP); amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP);
} }
/**
* uvd_v7_0_ring_emit_hdp_flush - emit an hdp flush
*
* @ring: amdgpu_ring pointer
*
* Emits an hdp flush.
*/
static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(NBIF, 0,
mmHDP_MEM_COHERENCY_FLUSH_CNTL), 0));
amdgpu_ring_write(ring, 0);
}
/**
* uvd_v7_0_ring_hdp_invalidate - emit an hdp invalidate
*
* @ring: amdgpu_ring pointer
*
* Emits an hdp invalidate.
*/
static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
amdgpu_ring_write(ring, 1);
}
/** /**
* uvd_v7_0_ring_test_ring - register write test * uvd_v7_0_ring_test_ring - register write test
* *
@ -1255,33 +1225,33 @@ static void uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw); amdgpu_ring_write(ring, ib->length_dw);
} }
static void uvd_v7_0_vm_reg_write(struct amdgpu_ring *ring, static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t data0, uint32_t data1) uint32_t reg, uint32_t val)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, data0); amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
amdgpu_ring_write(ring, data1); amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
amdgpu_ring_write(ring, 8); amdgpu_ring_write(ring, 8);
} }
static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring, static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t data0, uint32_t data1, uint32_t mask) uint32_t val, uint32_t mask)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, data0); amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
amdgpu_ring_write(ring, data1); amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0)); PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
amdgpu_ring_write(ring, mask); amdgpu_ring_write(ring, mask);
@ -1294,37 +1264,15 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
uint32_t data0, data1, mask; uint32_t data0, data1, mask;
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
pd_addr |= flags;
data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2; /* wait for reg writes */
data1 = upper_32_bits(pd_addr); data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
uvd_v7_0_vm_reg_write(ring, data0, data1);
data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
data1 = lower_32_bits(pd_addr);
uvd_v7_0_vm_reg_write(ring, data0, data1);
data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
data1 = lower_32_bits(pd_addr); data1 = lower_32_bits(pd_addr);
mask = 0xffffffff; mask = 0xffffffff;
uvd_v7_0_vm_reg_wait(ring, data0, data1, mask); uvd_v7_0_ring_emit_reg_wait(ring, data0, data1, mask);
/* flush TLB */
data0 = (hub->vm_inv_eng0_req + eng) << 2;
data1 = req;
uvd_v7_0_vm_reg_write(ring, data0, data1);
/* wait for flush */
data0 = (hub->vm_inv_eng0_ack + eng) << 2;
data1 = 1 << vmid;
mask = 1 << vmid;
uvd_v7_0_vm_reg_wait(ring, data0, data1, mask);
} }
static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
@ -1342,40 +1290,34 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, HEVC_ENC_CMD_END); amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
} }
static void uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val,
uint32_t mask)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, mask);
amdgpu_ring_write(ring, val);
}
static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
pd_addr |= flags;
/* wait for reg writes */
uvd_v7_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
lower_32_bits(pd_addr), 0xffffffff);
}
static void uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2); amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr)); amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
/* flush TLB */
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
amdgpu_ring_write(ring, req);
/* wait for flush */
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
amdgpu_ring_write(ring, 1 << vmid);
amdgpu_ring_write(ring, 1 << vmid);
} }
#if 0 #if 0
@ -1712,22 +1654,23 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.get_wptr = uvd_v7_0_ring_get_wptr, .get_wptr = uvd_v7_0_ring_get_wptr,
.set_wptr = uvd_v7_0_ring_set_wptr, .set_wptr = uvd_v7_0_ring_set_wptr,
.emit_frame_size = .emit_frame_size =
2 + /* uvd_v7_0_ring_emit_hdp_flush */ 6 + 6 + /* hdp flush / invalidate */
2 + /* uvd_v7_0_ring_emit_hdp_invalidate */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
34 + /* uvd_v7_0_ring_emit_vm_flush */ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
8 + /* uvd_v7_0_ring_emit_vm_flush */
14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */ 14 + 14, /* uvd_v7_0_ring_emit_fence x2 vm fence */
.emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */ .emit_ib_size = 8, /* uvd_v7_0_ring_emit_ib */
.emit_ib = uvd_v7_0_ring_emit_ib, .emit_ib = uvd_v7_0_ring_emit_ib,
.emit_fence = uvd_v7_0_ring_emit_fence, .emit_fence = uvd_v7_0_ring_emit_fence,
.emit_vm_flush = uvd_v7_0_ring_emit_vm_flush, .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush,
.emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush,
.emit_hdp_invalidate = uvd_v7_0_ring_emit_hdp_invalidate,
.test_ring = uvd_v7_0_ring_test_ring, .test_ring = uvd_v7_0_ring_test_ring,
.test_ib = amdgpu_uvd_ring_test_ib, .test_ib = amdgpu_uvd_ring_test_ib,
.insert_nop = uvd_v7_0_ring_insert_nop, .insert_nop = uvd_v7_0_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use, .begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use, .end_use = amdgpu_uvd_ring_end_use,
.emit_wreg = uvd_v7_0_ring_emit_wreg,
.emit_reg_wait = uvd_v7_0_ring_emit_reg_wait,
}; };
static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = { static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
@ -1740,7 +1683,10 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
.get_wptr = uvd_v7_0_enc_ring_get_wptr, .get_wptr = uvd_v7_0_enc_ring_get_wptr,
.set_wptr = uvd_v7_0_enc_ring_set_wptr, .set_wptr = uvd_v7_0_enc_ring_set_wptr,
.emit_frame_size = .emit_frame_size =
17 + /* uvd_v7_0_enc_ring_emit_vm_flush */ 3 + 3 + /* hdp flush / invalidate */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
4 + /* uvd_v7_0_enc_ring_emit_vm_flush */
5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */ 5 + 5 + /* uvd_v7_0_enc_ring_emit_fence x2 vm fence */
1, /* uvd_v7_0_enc_ring_insert_end */ 1, /* uvd_v7_0_enc_ring_insert_end */
.emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */ .emit_ib_size = 5, /* uvd_v7_0_enc_ring_emit_ib */
@ -1754,6 +1700,8 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_uvd_ring_begin_use, .begin_use = amdgpu_uvd_ring_begin_use,
.end_use = amdgpu_uvd_ring_end_use, .end_use = amdgpu_uvd_ring_end_use,
.emit_wreg = uvd_v7_0_enc_ring_emit_wreg,
.emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait,
}; };
static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev) static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)

View File

@ -844,7 +844,7 @@ static void vce_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
} }
static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring, static void vce_v3_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB); amdgpu_ring_write(ring, VCE_CMD_UPDATE_PTB);
amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, vmid);

View File

@ -28,6 +28,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_vce.h" #include "amdgpu_vce.h"
#include "soc15.h"
#include "soc15d.h" #include "soc15d.h"
#include "soc15_common.h" #include "soc15_common.h"
#include "mmsch_v1_0.h" #include "mmsch_v1_0.h"
@ -964,40 +965,33 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, VCE_CMD_END); amdgpu_ring_write(ring, VCE_CMD_END);
} }
static void vce_v4_0_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask)
{
amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, mask);
amdgpu_ring_write(ring, val);
}
static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring, static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
pd_addr |= flags;
/* wait for reg writes */
vce_v4_0_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
lower_32_bits(pd_addr), 0xffffffff);
}
static void vce_v4_0_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE); amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2); amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, upper_32_bits(pd_addr)); amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
/* flush TLB */
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
amdgpu_ring_write(ring, req);
/* wait for flush */
amdgpu_ring_write(ring, VCE_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
amdgpu_ring_write(ring, 1 << vmid);
amdgpu_ring_write(ring, 1 << vmid);
} }
static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev, static int vce_v4_0_set_interrupt_state(struct amdgpu_device *adev,
@ -1069,7 +1063,9 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
.set_wptr = vce_v4_0_ring_set_wptr, .set_wptr = vce_v4_0_ring_set_wptr,
.parse_cs = amdgpu_vce_ring_parse_cs_vm, .parse_cs = amdgpu_vce_ring_parse_cs_vm,
.emit_frame_size = .emit_frame_size =
17 + /* vce_v4_0_emit_vm_flush */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
4 + /* vce_v4_0_emit_vm_flush */
5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */ 5 + 5 + /* amdgpu_vce_ring_emit_fence x2 vm fence */
1, /* vce_v4_0_ring_insert_end */ 1, /* vce_v4_0_ring_insert_end */
.emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */ .emit_ib_size = 5, /* vce_v4_0_ring_emit_ib */
@ -1083,6 +1079,8 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vce_ring_begin_use, .begin_use = amdgpu_vce_ring_begin_use,
.end_use = amdgpu_vce_ring_end_use, .end_use = amdgpu_vce_ring_end_use,
.emit_wreg = vce_v4_0_emit_wreg,
.emit_reg_wait = vce_v4_0_emit_reg_wait,
}; };
static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev) static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev)

View File

@ -25,6 +25,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_vcn.h" #include "amdgpu_vcn.h"
#include "soc15.h"
#include "soc15d.h" #include "soc15d.h"
#include "soc15_common.h" #include "soc15_common.h"
@ -808,21 +809,6 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1); amdgpu_ring_write(ring, VCN_DEC_CMD_TRAP << 1);
} }
/**
* vcn_v1_0_dec_ring_hdp_invalidate - emit an hdp invalidate
*
* @ring: amdgpu_ring pointer
*
* Emits an hdp invalidate.
*/
static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
amdgpu_ring_write(ring, 1);
}
/** /**
* vcn_v1_0_dec_ring_emit_ib - execute indirect buffer * vcn_v1_0_dec_ring_emit_ib - execute indirect buffer
* *
@ -852,33 +838,18 @@ static void vcn_v1_0_dec_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw); amdgpu_ring_write(ring, ib->length_dw);
} }
static void vcn_v1_0_dec_vm_reg_write(struct amdgpu_ring *ring, static void vcn_v1_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring,
uint32_t data0, uint32_t data1) uint32_t reg, uint32_t val,
uint32_t mask)
{ {
struct amdgpu_device *adev = ring->adev; struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, data0); amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
amdgpu_ring_write(ring, data1); amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
}
static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
uint32_t data0, uint32_t data1, uint32_t mask)
{
struct amdgpu_device *adev = ring->adev;
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
amdgpu_ring_write(ring, data0);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
amdgpu_ring_write(ring, data1);
amdgpu_ring_write(ring, amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0)); PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0));
amdgpu_ring_write(ring, mask); amdgpu_ring_write(ring, mask);
@ -888,40 +859,34 @@ static void vcn_v1_0_dec_vm_reg_wait(struct amdgpu_ring *ring,
} }
static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring, static void vcn_v1_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vmid, uint64_t pd_addr) unsigned vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
uint32_t data0, data1, mask; uint32_t data0, data1, mask;
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
pd_addr |= flags;
data0 = (hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2; /* wait for register write */
data1 = upper_32_bits(pd_addr); data0 = hub->ctx0_ptb_addr_lo32 + vmid * 2;
vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
data1 = lower_32_bits(pd_addr);
vcn_v1_0_dec_vm_reg_write(ring, data0, data1);
data0 = (hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2;
data1 = lower_32_bits(pd_addr); data1 = lower_32_bits(pd_addr);
mask = 0xffffffff; mask = 0xffffffff;
vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask); vcn_v1_0_dec_ring_emit_reg_wait(ring, data0, data1, mask);
}
/* flush TLB */ static void vcn_v1_0_dec_ring_emit_wreg(struct amdgpu_ring *ring,
data0 = (hub->vm_inv_eng0_req + eng) << 2; uint32_t reg, uint32_t val)
data1 = req; {
vcn_v1_0_dec_vm_reg_write(ring, data0, data1); struct amdgpu_device *adev = ring->adev;
/* wait for flush */ amdgpu_ring_write(ring,
data0 = (hub->vm_inv_eng0_ack + eng) << 2; PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0));
data1 = 1 << vmid; amdgpu_ring_write(ring, reg << 2);
mask = 1 << vmid; amdgpu_ring_write(ring,
vcn_v1_0_dec_vm_reg_wait(ring, data0, data1, mask); PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0));
amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring,
PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0));
amdgpu_ring_write(ring, VCN_DEC_CMD_WRITE_REG << 1);
} }
/** /**
@ -1020,43 +985,34 @@ static void vcn_v1_0_enc_ring_emit_ib(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, ib->length_dw); amdgpu_ring_write(ring, ib->length_dw);
} }
static void vcn_v1_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val,
uint32_t mask)
{
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, reg << 2);
amdgpu_ring_write(ring, mask);
amdgpu_ring_write(ring, val);
}
static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, static void vcn_v1_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vmid, uint64_t pd_addr) unsigned int vmid, uint64_t pd_addr)
{ {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub]; struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vmid);
uint64_t flags = AMDGPU_PTE_VALID;
unsigned eng = ring->vm_inv_eng;
amdgpu_gart_get_vm_pde(ring->adev, -1, &pd_addr, &flags); pd_addr = amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
pd_addr |= flags;
/* wait for reg writes */
vcn_v1_0_enc_ring_emit_reg_wait(ring, hub->ctx0_ptb_addr_lo32 + vmid * 2,
lower_32_bits(pd_addr), 0xffffffff);
}
static void vcn_v1_0_enc_ring_emit_wreg(struct amdgpu_ring *ring,
uint32_t reg, uint32_t val)
{
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE); amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, amdgpu_ring_write(ring, reg << 2);
(hub->ctx0_ptb_addr_hi32 + vmid * 2) << 2); amdgpu_ring_write(ring, val);
amdgpu_ring_write(ring, upper_32_bits(pd_addr));
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring,
(hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring,
(hub->ctx0_ptb_addr_lo32 + vmid * 2) << 2);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, lower_32_bits(pd_addr));
/* flush TLB */
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_req + eng) << 2);
amdgpu_ring_write(ring, req);
/* wait for flush */
amdgpu_ring_write(ring, VCN_ENC_CMD_REG_WAIT);
amdgpu_ring_write(ring, (hub->vm_inv_eng0_ack + eng) << 2);
amdgpu_ring_write(ring, 1 << vmid);
amdgpu_ring_write(ring, 1 << vmid);
} }
static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev, static int vcn_v1_0_set_interrupt_state(struct amdgpu_device *adev,
@ -1133,15 +1089,16 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.get_wptr = vcn_v1_0_dec_ring_get_wptr, .get_wptr = vcn_v1_0_dec_ring_get_wptr,
.set_wptr = vcn_v1_0_dec_ring_set_wptr, .set_wptr = vcn_v1_0_dec_ring_set_wptr,
.emit_frame_size = .emit_frame_size =
2 + /* vcn_v1_0_dec_ring_emit_hdp_invalidate */ 6 + 6 + /* hdp invalidate / flush */
34 + /* vcn_v1_0_dec_ring_emit_vm_flush */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
8 + /* vcn_v1_0_dec_ring_emit_vm_flush */
14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */ 14 + 14 + /* vcn_v1_0_dec_ring_emit_fence x2 vm fence */
6, 6,
.emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */ .emit_ib_size = 8, /* vcn_v1_0_dec_ring_emit_ib */
.emit_ib = vcn_v1_0_dec_ring_emit_ib, .emit_ib = vcn_v1_0_dec_ring_emit_ib,
.emit_fence = vcn_v1_0_dec_ring_emit_fence, .emit_fence = vcn_v1_0_dec_ring_emit_fence,
.emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush, .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush,
.emit_hdp_invalidate = vcn_v1_0_dec_ring_emit_hdp_invalidate,
.test_ring = amdgpu_vcn_dec_ring_test_ring, .test_ring = amdgpu_vcn_dec_ring_test_ring,
.test_ib = amdgpu_vcn_dec_ring_test_ib, .test_ib = amdgpu_vcn_dec_ring_test_ib,
.insert_nop = vcn_v1_0_ring_insert_nop, .insert_nop = vcn_v1_0_ring_insert_nop,
@ -1150,6 +1107,8 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vcn_ring_begin_use, .begin_use = amdgpu_vcn_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use, .end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
}; };
static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
@ -1162,7 +1121,9 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.get_wptr = vcn_v1_0_enc_ring_get_wptr, .get_wptr = vcn_v1_0_enc_ring_get_wptr,
.set_wptr = vcn_v1_0_enc_ring_set_wptr, .set_wptr = vcn_v1_0_enc_ring_set_wptr,
.emit_frame_size = .emit_frame_size =
17 + /* vcn_v1_0_enc_ring_emit_vm_flush */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
4 + /* vcn_v1_0_enc_ring_emit_vm_flush */
5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */ 5 + 5 + /* vcn_v1_0_enc_ring_emit_fence x2 vm fence */
1, /* vcn_v1_0_enc_ring_insert_end */ 1, /* vcn_v1_0_enc_ring_insert_end */
.emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */ .emit_ib_size = 5, /* vcn_v1_0_enc_ring_emit_ib */
@ -1176,6 +1137,8 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
.pad_ib = amdgpu_ring_generic_pad_ib, .pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_vcn_ring_begin_use, .begin_use = amdgpu_vcn_ring_begin_use,
.end_use = amdgpu_vcn_ring_end_use, .end_use = amdgpu_vcn_ring_end_use,
.emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
.emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
}; };
static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev)

View File

@ -333,7 +333,7 @@ static void vega10_ih_decode_iv(struct amdgpu_device *adev,
entry->vmid_src = (dw[0] >> 31); entry->vmid_src = (dw[0] >> 31);
entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32); entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
entry->timestamp_src = dw[2] >> 31; entry->timestamp_src = dw[2] >> 31;
entry->pas_id = dw[3] & 0xffff; entry->pasid = dw[3] & 0xffff;
entry->pasid_src = dw[3] >> 31; entry->pasid_src = dw[3] >> 31;
entry->src_data[0] = dw[4]; entry->src_data[0] = dw[4];
entry->src_data[1] = dw[5]; entry->src_data[1] = dw[5];

View File

@ -24,7 +24,8 @@
#include "soc15.h" #include "soc15.h"
#include "soc15_common.h" #include "soc15_common.h"
#include "soc15ip.h" #include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
int vega10_reg_base_init(struct amdgpu_device *adev) int vega10_reg_base_init(struct amdgpu_device *adev)
{ {

View File

@ -856,6 +856,27 @@ static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
} }
static void vi_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
}
}
static void vi_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
WREG32(mmHDP_DEBUG0, 1);
RREG32(mmHDP_DEBUG0);
} else {
amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
}
}
static const struct amdgpu_asic_funcs vi_asic_funcs = static const struct amdgpu_asic_funcs vi_asic_funcs =
{ {
.read_disabled_bios = &vi_read_disabled_bios, .read_disabled_bios = &vi_read_disabled_bios,
@ -867,6 +888,8 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.set_uvd_clocks = &vi_set_uvd_clocks, .set_uvd_clocks = &vi_set_uvd_clocks,
.set_vce_clocks = &vi_set_vce_clocks, .set_vce_clocks = &vi_set_vce_clocks,
.get_config_memsize = &vi_get_config_memsize, .get_config_memsize = &vi_get_config_memsize,
.flush_hdp = &vi_flush_hdp,
.invalidate_hdp = &vi_invalidate_hdp,
}; };
#define CZ_REV_BRISTOL(rev) \ #define CZ_REV_BRISTOL(rev) \

View File

@ -24,6 +24,8 @@
#ifndef __VI_H__ #ifndef __VI_H__
#define __VI_H__ #define __VI_H__
#define VI_FLUSH_GPU_TLB_NUM_WREG 3
void vi_srbm_select(struct amdgpu_device *adev, void vi_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid); u32 me, u32 pipe, u32 queue, u32 vmid);
int vi_set_ip_blocks(struct amdgpu_device *adev); int vi_set_ip_blocks(struct amdgpu_device *adev);

Some files were not shown because too many files have changed in this diff Show More