mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: Create generic DF struct in adev
The only data fabric information the adev struct currently contains is a function pointer table. In the near future, we will be adding some cached DF information into adev. As such, this patch creates a new amdgpu_df struct for adev. Right now, it only containst the old function pointer table, but new stuff will be added soon. Signed-off-by: Joseph Greathouse <Joseph.Greathouse@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
61e50646f0
commit
bdf84a80e0
|
@ -90,6 +90,7 @@
|
|||
#include "amdgpu_mes.h"
|
||||
#include "amdgpu_umc.h"
|
||||
#include "amdgpu_mmhub.h"
|
||||
#include "amdgpu_df.h"
|
||||
|
||||
#define MAX_GPU_INSTANCE 16
|
||||
|
||||
|
@ -664,29 +665,6 @@ struct amdgpu_mmio_remap {
|
|||
resource_size_t bus_addr;
|
||||
};
|
||||
|
||||
struct amdgpu_df_funcs {
|
||||
void (*sw_init)(struct amdgpu_device *adev);
|
||||
void (*sw_fini)(struct amdgpu_device *adev);
|
||||
void (*enable_broadcast_mode)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
|
||||
u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
|
||||
void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
void (*get_clockgating_state)(struct amdgpu_device *adev,
|
||||
u32 *flags);
|
||||
void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
|
||||
int is_enable);
|
||||
int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
|
||||
int is_disable);
|
||||
void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
|
||||
uint64_t *count);
|
||||
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
|
||||
void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
|
||||
uint32_t ficadl_val, uint32_t ficadh_val);
|
||||
};
|
||||
/* Define the HW IP blocks will be used in driver , add more if necessary */
|
||||
enum amd_hw_ip_block_type {
|
||||
GC_HWIP = 1,
|
||||
|
@ -930,6 +908,9 @@ struct amdgpu_device {
|
|||
bool enable_mes;
|
||||
struct amdgpu_mes mes;
|
||||
|
||||
/* df */
|
||||
struct amdgpu_df df;
|
||||
|
||||
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
|
||||
int num_ip_blocks;
|
||||
struct mutex mn_lock;
|
||||
|
@ -943,8 +924,6 @@ struct amdgpu_device {
|
|||
/* soc15 register offset based on ip, instance and segment */
|
||||
uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
|
||||
|
||||
const struct amdgpu_df_funcs *df_funcs;
|
||||
|
||||
/* delayed work_func for deferring clockgating during resume */
|
||||
struct delayed_work delayed_init_work;
|
||||
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
/*
|
||||
* Copyright 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_DF_H__
|
||||
#define __AMDGPU_DF_H__
|
||||
|
||||
struct amdgpu_df_hash_status {
|
||||
bool hash_64k;
|
||||
bool hash_2m;
|
||||
bool hash_1g;
|
||||
};
|
||||
|
||||
struct amdgpu_df_funcs {
|
||||
void (*sw_init)(struct amdgpu_device *adev);
|
||||
void (*sw_fini)(struct amdgpu_device *adev);
|
||||
void (*enable_broadcast_mode)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
|
||||
u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
|
||||
void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
void (*get_clockgating_state)(struct amdgpu_device *adev,
|
||||
u32 *flags);
|
||||
void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
|
||||
int is_enable);
|
||||
int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
|
||||
int is_disable);
|
||||
void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
|
||||
uint64_t *count);
|
||||
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
|
||||
void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
|
||||
uint32_t ficadl_val, uint32_t ficadh_val);
|
||||
};
|
||||
|
||||
struct amdgpu_df {
|
||||
struct amdgpu_df_hash_status hash_status;
|
||||
const struct amdgpu_df_funcs *funcs;
|
||||
};
|
||||
|
||||
#endif /* __AMDGPU_DF_H__ */
|
|
@ -74,9 +74,9 @@ static void amdgpu_perf_start(struct perf_event *event, int flags)
|
|||
switch (pe->pmu_perf_type) {
|
||||
case PERF_TYPE_AMDGPU_DF:
|
||||
if (!(flags & PERF_EF_RELOAD))
|
||||
pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
|
||||
pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1);
|
||||
|
||||
pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 0);
|
||||
pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 0);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -101,7 +101,7 @@ static void amdgpu_perf_read(struct perf_event *event)
|
|||
|
||||
switch (pe->pmu_perf_type) {
|
||||
case PERF_TYPE_AMDGPU_DF:
|
||||
pe->adev->df_funcs->pmc_get_count(pe->adev, hwc->conf,
|
||||
pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->conf,
|
||||
&count);
|
||||
break;
|
||||
default:
|
||||
|
@ -126,7 +126,7 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags)
|
|||
|
||||
switch (pe->pmu_perf_type) {
|
||||
case PERF_TYPE_AMDGPU_DF:
|
||||
pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 0);
|
||||
pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 0);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -156,7 +156,7 @@ static int amdgpu_perf_add(struct perf_event *event, int flags)
|
|||
|
||||
switch (pe->pmu_perf_type) {
|
||||
case PERF_TYPE_AMDGPU_DF:
|
||||
retval = pe->adev->df_funcs->pmc_start(pe->adev, hwc->conf, 1);
|
||||
retval = pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1);
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
|
@ -184,7 +184,7 @@ static void amdgpu_perf_del(struct perf_event *event, int flags)
|
|||
|
||||
switch (pe->pmu_perf_type) {
|
||||
case PERF_TYPE_AMDGPU_DF:
|
||||
pe->adev->df_funcs->pmc_stop(pe->adev, hwc->conf, 1);
|
||||
pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -146,16 +146,16 @@ static ssize_t amdgpu_xgmi_show_error(struct device *dev,
|
|||
ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
|
||||
ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
|
||||
|
||||
fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_ctl_in);
|
||||
fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
|
||||
if (fica_out != 0x1f)
|
||||
pr_err("xGMI error counters not enabled!\n");
|
||||
|
||||
fica_out = adev->df_funcs->get_fica(adev, ficaa_pie_status_in);
|
||||
fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
|
||||
|
||||
if ((fica_out & 0xffff) == 2)
|
||||
error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
|
||||
|
||||
adev->df_funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
|
||||
adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", error_count);
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev)
|
|||
{
|
||||
int fb_channel_number;
|
||||
|
||||
fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
|
||||
fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
|
||||
|
||||
return df_v1_7_channel_number[fb_channel_number];
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
|||
u32 tmp;
|
||||
|
||||
/* Put DF on broadcast mode */
|
||||
adev->df_funcs->enable_broadcast_mode(adev, true);
|
||||
adev->df.funcs->enable_broadcast_mode(adev, true);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) {
|
||||
tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater);
|
||||
|
@ -92,7 +92,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
/* Exit boradcast mode */
|
||||
adev->df_funcs->enable_broadcast_mode(adev, false);
|
||||
adev->df.funcs->enable_broadcast_mode(adev, false);
|
||||
}
|
||||
|
||||
static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev,
|
||||
|
|
|
@ -311,7 +311,7 @@ static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev)
|
|||
{
|
||||
int fb_channel_number;
|
||||
|
||||
fb_channel_number = adev->df_funcs->get_fb_channel_number(adev);
|
||||
fb_channel_number = adev->df.funcs->get_fb_channel_number(adev);
|
||||
if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number))
|
||||
fb_channel_number = 0;
|
||||
|
||||
|
@ -325,7 +325,7 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
|||
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG) {
|
||||
/* Put DF on broadcast mode */
|
||||
adev->df_funcs->enable_broadcast_mode(adev, true);
|
||||
adev->df.funcs->enable_broadcast_mode(adev, true);
|
||||
|
||||
if (enable) {
|
||||
tmp = RREG32_SOC15(DF, 0,
|
||||
|
@ -344,7 +344,7 @@ static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
|||
}
|
||||
|
||||
/* Exit broadcast mode */
|
||||
adev->df_funcs->enable_broadcast_mode(adev, false);
|
||||
adev->df.funcs->enable_broadcast_mode(adev, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -817,8 +817,8 @@ static int gmc_v9_0_late_init(void *handle)
|
|||
r = amdgpu_atomfirmware_mem_ecc_supported(adev);
|
||||
if (!r) {
|
||||
DRM_INFO("ECC is not present.\n");
|
||||
if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
|
||||
adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
|
||||
if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
|
||||
adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
|
||||
} else {
|
||||
DRM_INFO("ECC is active.\n");
|
||||
}
|
||||
|
@ -1023,7 +1023,7 @@ static int gmc_v9_0_sw_init(void *handle)
|
|||
else
|
||||
chansize = 128;
|
||||
|
||||
numchan = adev->df_funcs->get_hbm_channel_number(adev);
|
||||
numchan = adev->df.funcs->get_hbm_channel_number(adev);
|
||||
adev->gmc.vram_width = numchan * chansize;
|
||||
}
|
||||
|
||||
|
|
|
@ -677,9 +677,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
|
||||
adev->df_funcs = &df_v3_6_funcs;
|
||||
adev->df.funcs = &df_v3_6_funcs;
|
||||
else
|
||||
adev->df_funcs = &df_v1_7_funcs;
|
||||
adev->df.funcs = &df_v1_7_funcs;
|
||||
|
||||
adev->rev_id = soc15_get_rev_id(adev);
|
||||
adev->nbio.funcs->detect_hw_virt(adev);
|
||||
|
@ -1247,7 +1247,7 @@ static int soc15_common_sw_init(void *handle)
|
|||
if (amdgpu_sriov_vf(adev))
|
||||
xgpu_ai_mailbox_add_irq_id(adev);
|
||||
|
||||
adev->df_funcs->sw_init(adev);
|
||||
adev->df.funcs->sw_init(adev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1257,7 +1257,7 @@ static int soc15_common_sw_fini(void *handle)
|
|||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
amdgpu_nbio_ras_fini(adev);
|
||||
adev->df_funcs->sw_fini(adev);
|
||||
adev->df.funcs->sw_fini(adev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1478,7 +1478,7 @@ static int soc15_common_set_clockgating_state(void *handle,
|
|||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
soc15_update_rom_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
adev->df_funcs->update_medium_grain_clock_gating(adev,
|
||||
adev->df.funcs->update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE ? true : false);
|
||||
break;
|
||||
case CHIP_RAVEN:
|
||||
|
@ -1536,7 +1536,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
|
|||
if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
|
||||
*flags |= AMD_CG_SUPPORT_ROM_MGCG;
|
||||
|
||||
adev->df_funcs->get_clockgating_state(adev, flags);
|
||||
adev->df.funcs->get_clockgating_state(adev, flags);
|
||||
}
|
||||
|
||||
static int soc15_common_set_powergating_state(void *handle,
|
||||
|
|
Loading…
Reference in New Issue