mirror of https://gitee.com/openkylin/linux.git
drm/amdgpu: simplify and cleanup setting the dma mask
Use dma_set_mask_and_coherent to set both masks in one go, and remove the no longer required fallback, as the kernel now always accepts larger than required DMA masks. Fail the driver probe if we can't set the DMA mask, as that means the system can only support a larger mask. Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
03127c58b9
commit
244511f386
|
@ -789,7 +789,6 @@ struct amdgpu_device {
|
|||
int usec_timeout;
|
||||
const struct amdgpu_asic_funcs *asic_funcs;
|
||||
bool shutdown;
|
||||
bool need_dma32;
|
||||
bool need_swiotlb;
|
||||
bool accel_working;
|
||||
struct notifier_block acpi_nb;
|
||||
|
|
|
@ -592,7 +592,6 @@ static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
|||
static int gmc_v10_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
int dma_bits;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
gfxhub_v2_0_init(adev);
|
||||
|
@ -642,26 +641,10 @@ static int gmc_v10_0_sw_init(void *handle)
|
|||
else
|
||||
adev->gmc.stolen_size = 9 * 1024 *1024;
|
||||
|
||||
/*
|
||||
* Set DMA mask + need_dma32 flags.
|
||||
* PCIE - can handle 44-bits.
|
||||
* IGP - can handle 44-bits
|
||||
* PCI - dma32 for legacy pci gart, 44 bits on navi10
|
||||
*/
|
||||
adev->need_dma32 = false;
|
||||
dma_bits = adev->need_dma32 ? 32 : 44;
|
||||
|
||||
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
|
||||
if (r) {
|
||||
adev->need_dma32 = true;
|
||||
dma_bits = 32;
|
||||
printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
|
||||
}
|
||||
|
||||
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (r) {
|
||||
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
|
||||
printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = gmc_v10_0_mc_init(adev);
|
||||
|
|
|
@ -839,7 +839,6 @@ static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
|||
static int gmc_v6_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
int dma_bits;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
|
@ -862,20 +861,12 @@ static int gmc_v6_0_sw_init(void *handle)
|
|||
|
||||
adev->gmc.mc_mask = 0xffffffffffULL;
|
||||
|
||||
adev->need_dma32 = false;
|
||||
dma_bits = adev->need_dma32 ? 32 : 40;
|
||||
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
|
||||
if (r) {
|
||||
adev->need_dma32 = true;
|
||||
dma_bits = 32;
|
||||
dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
|
||||
return r;
|
||||
}
|
||||
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (r) {
|
||||
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
|
||||
dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
|
||||
}
|
||||
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
|
||||
adev->need_swiotlb = drm_need_swiotlb(44);
|
||||
|
||||
r = gmc_v6_0_init_microcode(adev);
|
||||
if (r) {
|
||||
|
|
|
@ -959,7 +959,6 @@ static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
|||
static int gmc_v7_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
int dma_bits;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
|
@ -990,25 +989,12 @@ static int gmc_v7_0_sw_init(void *handle)
|
|||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
||||
|
||||
/* set DMA mask + need_dma32 flags.
|
||||
* PCIE - can handle 40-bits.
|
||||
* IGP - can handle 40-bits
|
||||
* PCI - dma32 for legacy pci gart, 40 bits on newer asics
|
||||
*/
|
||||
adev->need_dma32 = false;
|
||||
dma_bits = adev->need_dma32 ? 32 : 40;
|
||||
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
|
||||
if (r) {
|
||||
adev->need_dma32 = true;
|
||||
dma_bits = 32;
|
||||
pr_warn("amdgpu: No suitable DMA available\n");
|
||||
return r;
|
||||
}
|
||||
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (r) {
|
||||
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
|
||||
pr_warn("amdgpu: No coherent DMA available\n");
|
||||
}
|
||||
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
|
||||
adev->need_swiotlb = drm_need_swiotlb(40);
|
||||
|
||||
r = gmc_v7_0_init_microcode(adev);
|
||||
if (r) {
|
||||
|
|
|
@ -1079,7 +1079,6 @@ static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
|||
static int gmc_v8_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
int dma_bits;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
|
@ -1116,25 +1115,12 @@ static int gmc_v8_0_sw_init(void *handle)
|
|||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
||||
|
||||
/* set DMA mask + need_dma32 flags.
|
||||
* PCIE - can handle 40-bits.
|
||||
* IGP - can handle 40-bits
|
||||
* PCI - dma32 for legacy pci gart, 40 bits on newer asics
|
||||
*/
|
||||
adev->need_dma32 = false;
|
||||
dma_bits = adev->need_dma32 ? 32 : 40;
|
||||
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
|
||||
if (r) {
|
||||
adev->need_dma32 = true;
|
||||
dma_bits = 32;
|
||||
pr_warn("amdgpu: No suitable DMA available\n");
|
||||
return r;
|
||||
}
|
||||
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (r) {
|
||||
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
|
||||
pr_warn("amdgpu: No coherent DMA available\n");
|
||||
}
|
||||
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
|
||||
adev->need_swiotlb = drm_need_swiotlb(40);
|
||||
|
||||
r = gmc_v8_0_init_microcode(adev);
|
||||
if (r) {
|
||||
|
|
|
@ -1090,7 +1090,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
|
|||
static int gmc_v9_0_sw_init(void *handle)
|
||||
{
|
||||
int r;
|
||||
int dma_bits;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
gfxhub_v1_0_init(adev);
|
||||
|
@ -1174,25 +1173,12 @@ static int gmc_v9_0_sw_init(void *handle)
|
|||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
|
||||
|
||||
/* set DMA mask + need_dma32 flags.
|
||||
* PCIE - can handle 44-bits.
|
||||
* IGP - can handle 44-bits
|
||||
* PCI - dma32 for legacy pci gart, 44 bits on vega10
|
||||
*/
|
||||
adev->need_dma32 = false;
|
||||
dma_bits = adev->need_dma32 ? 32 : 44;
|
||||
r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
|
||||
if (r) {
|
||||
adev->need_dma32 = true;
|
||||
dma_bits = 32;
|
||||
printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
|
||||
return r;
|
||||
}
|
||||
r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (r) {
|
||||
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
|
||||
printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
|
||||
}
|
||||
adev->need_swiotlb = drm_need_swiotlb(dma_bits);
|
||||
adev->need_swiotlb = drm_need_swiotlb(44);
|
||||
|
||||
if (adev->gmc.xgmi.supported) {
|
||||
r = gfxhub_v1_1_get_xgmi_info(adev);
|
||||
|
|
Loading…
Reference in New Issue