Merge branch 'stable/for-linus-5.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb

Pull swiotlb fixes from Konrad Rzeszutek Wilk:
 "Two tiny fixes for issues that make drivers under Xen unhappy under
  certain conditions"

* 'stable/for-linus-5.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb:
  swiotlb: remove the tbl_dma_addr argument to swiotlb_tbl_map_single
  swiotlb: fix "x86: Don't panic if can not alloc buffer for swiotlb"
This commit is contained in:
Linus Torvalds 2020-11-11 14:15:06 -08:00
commit 3d5e28bff7
4 changed files with 17 additions and 23 deletions

View File

@ -3818,9 +3818,8 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
* page aligned, we don't need to use a bounce page. * page aligned, we don't need to use a bounce page.
*/ */
if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) { if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
tlb_addr = swiotlb_tbl_map_single(dev, tlb_addr = swiotlb_tbl_map_single(dev, paddr, size,
phys_to_dma_unencrypted(dev, io_tlb_start), aligned_size, dir, attrs);
paddr, size, aligned_size, dir, attrs);
if (tlb_addr == DMA_MAPPING_ERROR) { if (tlb_addr == DMA_MAPPING_ERROR) {
goto swiotlb_error; goto swiotlb_error;
} else { } else {

View File

@ -395,8 +395,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/ */
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
map = swiotlb_tbl_map_single(dev, virt_to_phys(xen_io_tlb_start), map = swiotlb_tbl_map_single(dev, phys, size, size, dir, attrs);
phys, size, size, dir, attrs);
if (map == (phys_addr_t)DMA_MAPPING_ERROR) if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;

View File

@ -45,13 +45,9 @@ enum dma_sync_target {
SYNC_FOR_DEVICE = 1, SYNC_FOR_DEVICE = 1,
}; };
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
dma_addr_t tbl_dma_addr, size_t mapping_size, size_t alloc_size,
phys_addr_t phys, enum dma_data_direction dir, unsigned long attrs);
size_t mapping_size,
size_t alloc_size,
enum dma_data_direction dir,
unsigned long attrs);
extern void swiotlb_tbl_unmap_single(struct device *hwdev, extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr, phys_addr_t tlb_addr,

View File

@ -229,6 +229,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
} }
io_tlb_index = 0; io_tlb_index = 0;
no_iotlb_memory = false;
if (verbose) if (verbose)
swiotlb_print_info(); swiotlb_print_info();
@ -260,9 +261,11 @@ swiotlb_init(int verbose)
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
return; return;
if (io_tlb_start) if (io_tlb_start) {
memblock_free_early(io_tlb_start, memblock_free_early(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
io_tlb_start = 0;
}
pr_warn("Cannot allocate buffer"); pr_warn("Cannot allocate buffer");
no_iotlb_memory = true; no_iotlb_memory = true;
} }
@ -360,6 +363,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
} }
io_tlb_index = 0; io_tlb_index = 0;
no_iotlb_memory = false;
swiotlb_print_info(); swiotlb_print_info();
@ -441,14 +445,11 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
} }
} }
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
dma_addr_t tbl_dma_addr, size_t mapping_size, size_t alloc_size,
phys_addr_t orig_addr, enum dma_data_direction dir, unsigned long attrs)
size_t mapping_size,
size_t alloc_size,
enum dma_data_direction dir,
unsigned long attrs)
{ {
dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(hwdev, io_tlb_start);
unsigned long flags; unsigned long flags;
phys_addr_t tlb_addr; phys_addr_t tlb_addr;
unsigned int nslots, stride, index, wrap; unsigned int nslots, stride, index, wrap;
@ -667,9 +668,8 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size, trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
swiotlb_force); swiotlb_force);
swiotlb_addr = swiotlb_tbl_map_single(dev, swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir,
phys_to_dma_unencrypted(dev, io_tlb_start), attrs);
paddr, size, size, dir, attrs);
if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR) if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;