mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: make function pointers mandatory
We always want those to be setup correctly. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Huang Rui <ray.huang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
0dd1e5bbec
commit
f54b30d70b
|
@ -468,8 +468,7 @@ static const struct amdgpu_ih_funcs cik_ih_funcs = {
|
|||
|
||||
static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->irq.ih_funcs == NULL)
|
||||
adev->irq.ih_funcs = &cik_ih_funcs;
|
||||
adev->irq.ih_funcs = &cik_ih_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version cik_ih_ip_block =
|
||||
|
|
|
@ -1370,10 +1370,8 @@ static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
|
|||
|
||||
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mman.buffer_funcs == NULL) {
|
||||
adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
|
||||
static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
|
||||
|
@ -1389,15 +1387,13 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||
struct drm_gpu_scheduler *sched;
|
||||
unsigned i;
|
||||
|
||||
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
||||
adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
sched = &adev->sdma.instance[i].ring.sched;
|
||||
adev->vm_manager.vm_pte_rqs[i] =
|
||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
}
|
||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
||||
adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
sched = &adev->sdma.instance[i].ring.sched;
|
||||
adev->vm_manager.vm_pte_rqs[i] =
|
||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
}
|
||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version cik_sdma_ip_block =
|
||||
|
|
|
@ -449,8 +449,7 @@ static const struct amdgpu_ih_funcs cz_ih_funcs = {
|
|||
|
||||
static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->irq.ih_funcs == NULL)
|
||||
adev->irq.ih_funcs = &cz_ih_funcs;
|
||||
adev->irq.ih_funcs = &cz_ih_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version cz_ih_ip_block =
|
||||
|
|
|
@ -3570,8 +3570,7 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
|
|||
|
||||
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mode_info.funcs == NULL)
|
||||
adev->mode_info.funcs = &dce_v10_0_display_funcs;
|
||||
adev->mode_info.funcs = &dce_v10_0_display_funcs;
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
|
||||
|
|
|
@ -3702,8 +3702,7 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
|
|||
|
||||
static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mode_info.funcs == NULL)
|
||||
adev->mode_info.funcs = &dce_v11_0_display_funcs;
|
||||
adev->mode_info.funcs = &dce_v11_0_display_funcs;
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
|
||||
|
|
|
@ -3376,8 +3376,7 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
|
|||
|
||||
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mode_info.funcs == NULL)
|
||||
adev->mode_info.funcs = &dce_v6_0_display_funcs;
|
||||
adev->mode_info.funcs = &dce_v6_0_display_funcs;
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
|
||||
|
|
|
@ -3458,8 +3458,7 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
|
|||
|
||||
static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mode_info.funcs == NULL)
|
||||
adev->mode_info.funcs = &dce_v8_0_display_funcs;
|
||||
adev->mode_info.funcs = &dce_v8_0_display_funcs;
|
||||
}
|
||||
|
||||
static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
|
||||
|
|
|
@ -649,8 +649,7 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
|
|||
|
||||
static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mode_info.funcs == NULL)
|
||||
adev->mode_info.funcs = &dce_virtual_display_funcs;
|
||||
adev->mode_info.funcs = &dce_virtual_display_funcs;
|
||||
}
|
||||
|
||||
static int dce_virtual_pageflip(struct amdgpu_device *adev,
|
||||
|
|
|
@ -1180,8 +1180,7 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = {
|
|||
|
||||
static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->gmc.gmc_funcs == NULL)
|
||||
adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
|
||||
adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs;
|
||||
}
|
||||
|
||||
static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
|
|
|
@ -1388,8 +1388,7 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = {
|
|||
|
||||
static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->gmc.gmc_funcs == NULL)
|
||||
adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
|
||||
adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs;
|
||||
}
|
||||
|
||||
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
|
|
|
@ -1733,8 +1733,7 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = {
|
|||
|
||||
static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->gmc.gmc_funcs == NULL)
|
||||
adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
|
||||
adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs;
|
||||
}
|
||||
|
||||
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
|
|
|
@ -593,8 +593,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
|
|||
|
||||
static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->gmc.gmc_funcs == NULL)
|
||||
adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
|
||||
adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
|
||||
}
|
||||
|
||||
static int gmc_v9_0_early_init(void *handle)
|
||||
|
|
|
@ -447,8 +447,7 @@ static const struct amdgpu_ih_funcs iceland_ih_funcs = {
|
|||
|
||||
static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->irq.ih_funcs == NULL)
|
||||
adev->irq.ih_funcs = &iceland_ih_funcs;
|
||||
adev->irq.ih_funcs = &iceland_ih_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version iceland_ih_ip_block =
|
||||
|
|
|
@ -1296,10 +1296,8 @@ static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = {
|
|||
|
||||
static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mman.buffer_funcs == NULL) {
|
||||
adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
|
||||
static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
|
||||
|
@ -1315,15 +1313,13 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||
struct drm_gpu_scheduler *sched;
|
||||
unsigned i;
|
||||
|
||||
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
||||
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
sched = &adev->sdma.instance[i].ring.sched;
|
||||
adev->vm_manager.vm_pte_rqs[i] =
|
||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
}
|
||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
||||
adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
sched = &adev->sdma.instance[i].ring.sched;
|
||||
adev->vm_manager.vm_pte_rqs[i] =
|
||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
}
|
||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
|
||||
|
|
|
@ -1736,10 +1736,8 @@ static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
|
|||
|
||||
static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mman.buffer_funcs == NULL) {
|
||||
adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
|
||||
static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
|
||||
|
@ -1755,15 +1753,13 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||
struct drm_gpu_scheduler *sched;
|
||||
unsigned i;
|
||||
|
||||
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
||||
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
sched = &adev->sdma.instance[i].ring.sched;
|
||||
adev->vm_manager.vm_pte_rqs[i] =
|
||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
}
|
||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
||||
adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
sched = &adev->sdma.instance[i].ring.sched;
|
||||
adev->vm_manager.vm_pte_rqs[i] =
|
||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
}
|
||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
|
||||
|
|
|
@ -1807,10 +1807,8 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
|
|||
|
||||
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mman.buffer_funcs == NULL) {
|
||||
adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
|
||||
static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
|
||||
|
@ -1826,15 +1824,13 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||
struct drm_gpu_scheduler *sched;
|
||||
unsigned i;
|
||||
|
||||
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
||||
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
sched = &adev->sdma.instance[i].ring.sched;
|
||||
adev->vm_manager.vm_pte_rqs[i] =
|
||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
}
|
||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
||||
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
sched = &adev->sdma.instance[i].ring.sched;
|
||||
adev->vm_manager.vm_pte_rqs[i] =
|
||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
}
|
||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
|
||||
|
|
|
@ -863,10 +863,8 @@ static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
|
|||
|
||||
static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->mman.buffer_funcs == NULL) {
|
||||
adev->mman.buffer_funcs = &si_dma_buffer_funcs;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
adev->mman.buffer_funcs = &si_dma_buffer_funcs;
|
||||
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
|
||||
}
|
||||
|
||||
static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
|
||||
|
@ -882,15 +880,13 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
|
|||
struct drm_gpu_scheduler *sched;
|
||||
unsigned i;
|
||||
|
||||
if (adev->vm_manager.vm_pte_funcs == NULL) {
|
||||
adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
sched = &adev->sdma.instance[i].ring.sched;
|
||||
adev->vm_manager.vm_pte_rqs[i] =
|
||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
}
|
||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
||||
adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
|
||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||
sched = &adev->sdma.instance[i].ring.sched;
|
||||
adev->vm_manager.vm_pte_rqs[i] =
|
||||
&sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
|
||||
}
|
||||
adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version si_dma_ip_block =
|
||||
|
|
|
@ -308,8 +308,7 @@ static const struct amdgpu_ih_funcs si_ih_funcs = {
|
|||
|
||||
static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->irq.ih_funcs == NULL)
|
||||
adev->irq.ih_funcs = &si_ih_funcs;
|
||||
adev->irq.ih_funcs = &si_ih_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version si_ih_ip_block =
|
||||
|
|
|
@ -513,8 +513,7 @@ static const struct amdgpu_ih_funcs tonga_ih_funcs = {
|
|||
|
||||
static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->irq.ih_funcs == NULL)
|
||||
adev->irq.ih_funcs = &tonga_ih_funcs;
|
||||
adev->irq.ih_funcs = &tonga_ih_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version tonga_ih_ip_block =
|
||||
|
|
|
@ -494,8 +494,7 @@ static const struct amdgpu_ih_funcs vega10_ih_funcs = {
|
|||
|
||||
static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
if (adev->irq.ih_funcs == NULL)
|
||||
adev->irq.ih_funcs = &vega10_ih_funcs;
|
||||
adev->irq.ih_funcs = &vega10_ih_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version vega10_ih_ip_block =
|
||||
|
|
Loading…
Reference in New Issue