mirror of https://gitee.com/openkylin/linux.git
A few small dma-mapping fixes for Linux 4.17-rc3:
- don't loop to try GFP_DMA allocations if ZONE_DMA is not actually enabled (regression in 4.16) - don't try to do virt_to_page before we know we actuall have a valid page in dma_common_mmap - a comment fixup related to the above fix -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAlrgD/MLHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYMcZA//S4F6uOyVDF5PNhA15A2m8StATurE38bOExpjJaAC FLBt150+mZp/XNRIYAvcqbv5NftFmsu1Cq7svSk62V7FcxPpmC2/EYmSGR+QXYAz PASUjsGkiPkeh+WpCcarfAGKSAPW0GW+I0TIOlU+/r3S5ez3loCoOFicA6syXedt 0Ntm4CLTQRXHC/yZWkAa3SJ+MAcTP5JxMpvnxXa4/gvA4q9Xu9R9I0KqLuQt0kmP xzti3BqVBI/Or+4EkNzTjYUs5hBtCdOkViHzLVMQp8KadFPgAIN8rVZUQjdSylnF PPzmB5PiTErQ2DQ0cYU0+9r9y/xUlR5e+C7gGh16RGfvnqcuCPalZmvfv2xPv2jm 4S/ekgkeqBOKiF7E3JetqSy/LDVRmweqgJRQZnbK9MFFz1BY+TBtYLyQMwvmgZK8 PcFDPXNHiS+RklEO++gp5Dh+7spFSP14fDjSSEFA93y7MsGcqJ4cTq1cLKWpuBBo Bq6JB0lW8ZYx448e3T34pgui1tSN/mSrrJckrmbCuJ/webwEbbxQcHBGih+gSC4z dziaP9bwr8svSq712xUr/2QNDnL2NVa4e/7F+j3mU59Uhe0ADWuMygpwq0gJVRCP VMUD0t+OMUla7t41IX+MWVDNjzD1QhtD3gaxA1B+tnCz9DuJDL62RAwfXRzkvzKw ios= =/UUj -----END PGP SIGNATURE----- Merge tag 'dma-mapping-4.17-3' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping fixes from Christoph Hellwig: "A few small dma-mapping fixes for Linux 4.17-rc3: - don't loop to try GFP_DMA allocations if ZONE_DMA is not actually enabled (regression in 4.16) - don't try to do virt_to_page before we know we actuall have a valid page in dma_common_mmap - a comment fixup related to the above fix" * tag 'dma-mapping-4.17-3' of git://git.infradead.org/users/hch/dma-mapping: dma-mapping: postpone cpu addr translation on mmap dma-coherent: clarify dma_mmap_from_dev_coherent documentation dma-direct: don't retry allocation for no-op GFP_DMA
This commit is contained in:
commit
26ed24e429
|
@ -312,8 +312,9 @@ static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem,
|
|||
* This checks whether the memory was allocated from the per-device
|
||||
* coherent memory pool and if so, maps that memory to the provided vma.
|
||||
*
|
||||
* Returns 1 if we correctly mapped the memory, or 0 if the caller should
|
||||
* proceed with mapping memory from generic pools.
|
||||
* Returns 1 if @vaddr belongs to the device coherent pool and the caller
|
||||
* should return @ret, or 0 if they should proceed with mapping memory from
|
||||
* generic areas.
|
||||
*/
|
||||
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
void *vaddr, size_t size, int *ret)
|
||||
|
|
|
@ -226,7 +226,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
|||
#ifndef CONFIG_ARCH_NO_COHERENT_DMA_MMAP
|
||||
unsigned long user_count = vma_pages(vma);
|
||||
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
@ -234,12 +233,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
|||
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
||||
if (off < count && user_count <= (count - off)) {
|
||||
if (off < count && user_count <= (count - off))
|
||||
ret = remap_pfn_range(vma, vma->vm_start,
|
||||
pfn + off,
|
||||
page_to_pfn(virt_to_page(cpu_addr)) + off,
|
||||
user_count << PAGE_SHIFT,
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
#endif /* !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -84,7 +84,8 @@ void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||
__free_pages(page, page_order);
|
||||
page = NULL;
|
||||
|
||||
if (dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) &&
|
||||
dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
|
||||
!(gfp & GFP_DMA)) {
|
||||
gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
|
||||
goto again;
|
||||
|
|
Loading…
Reference in New Issue