Merge branch 'asm-generic-io' of https://github.com/thierryreding/linux into asm-generic
Pull asm-generic/io.h overhaul from Thierry Reding * 'asm-generic-io' of https://github.com/thierryreding/linux: arm64: Use include/asm-generic/io.h ARM: Use include/asm-generic/io.h asm-generic/io.h: Implement generic {read,write}s*() asm-generic/io.h: Reconcile I/O accessor overrides /dev/mem: Use more consistent data types Change xlate_dev_{kmem,mem}_ptr() prototypes ARM: ixp4xx: Properly override I/O accessors ARM: ixp4xx: Fix build with IXP4XX_INDIRECT_PCI ARM: ebsa110: Properly override I/O accessors ARC: Remove redundant PCI_IOBASE declaration Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
commit
1d0eeac777
|
@ -13,8 +13,6 @@
|
|||
#include <asm/byteorder.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#define PCI_IOBASE ((void __iomem *)0)
|
||||
|
||||
extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
|
||||
extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
|
||||
unsigned long flags);
|
||||
|
|
|
@ -47,13 +47,13 @@ extern void atomic_io_modify_relaxed(void __iomem *reg, u32 mask, u32 set);
|
|||
* Generic IO read/write. These perform native-endian accesses. Note
|
||||
* that some architectures will want to re-define __raw_{read,write}w.
|
||||
*/
|
||||
extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen);
|
||||
extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
|
||||
extern void __raw_writesl(void __iomem *addr, const void *data, int longlen);
|
||||
void __raw_writesb(volatile void __iomem *addr, const void *data, int bytelen);
|
||||
void __raw_writesw(volatile void __iomem *addr, const void *data, int wordlen);
|
||||
void __raw_writesl(volatile void __iomem *addr, const void *data, int longlen);
|
||||
|
||||
extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
|
||||
extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
|
||||
extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
|
||||
void __raw_readsb(const volatile void __iomem *addr, void *data, int bytelen);
|
||||
void __raw_readsw(const volatile void __iomem *addr, void *data, int wordlen);
|
||||
void __raw_readsl(const volatile void __iomem *addr, void *data, int longlen);
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
/*
|
||||
|
@ -69,6 +69,7 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
|
|||
* writeback addressing modes as these incur a significant performance
|
||||
* overhead (the address generation must be emulated in software).
|
||||
*/
|
||||
#define __raw_writew __raw_writew
|
||||
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("strh %1, %0"
|
||||
|
@ -76,6 +77,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr)
|
|||
: "r" (val));
|
||||
}
|
||||
|
||||
#define __raw_readw __raw_readw
|
||||
static inline u16 __raw_readw(const volatile void __iomem *addr)
|
||||
{
|
||||
u16 val;
|
||||
|
@ -86,6 +88,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
|
|||
}
|
||||
#endif
|
||||
|
||||
#define __raw_writeb __raw_writeb
|
||||
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("strb %1, %0"
|
||||
|
@ -93,6 +96,7 @@ static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
|
|||
: "r" (val));
|
||||
}
|
||||
|
||||
#define __raw_writel __raw_writel
|
||||
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("str %1, %0"
|
||||
|
@ -100,6 +104,7 @@ static inline void __raw_writel(u32 val, volatile void __iomem *addr)
|
|||
: "r" (val));
|
||||
}
|
||||
|
||||
#define __raw_readb __raw_readb
|
||||
static inline u8 __raw_readb(const volatile void __iomem *addr)
|
||||
{
|
||||
u8 val;
|
||||
|
@ -109,6 +114,7 @@ static inline u8 __raw_readb(const volatile void __iomem *addr)
|
|||
return val;
|
||||
}
|
||||
|
||||
#define __raw_readl __raw_readl
|
||||
static inline u32 __raw_readl(const volatile void __iomem *addr)
|
||||
{
|
||||
u32 val;
|
||||
|
@ -267,20 +273,6 @@ extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
|
|||
#define insl(p,d,l) __raw_readsl(__io(p),d,l)
|
||||
#endif
|
||||
|
||||
#define outb_p(val,port) outb((val),(port))
|
||||
#define outw_p(val,port) outw((val),(port))
|
||||
#define outl_p(val,port) outl((val),(port))
|
||||
#define inb_p(port) inb((port))
|
||||
#define inw_p(port) inw((port))
|
||||
#define inl_p(port) inl((port))
|
||||
|
||||
#define outsb_p(port,from,len) outsb(port,from,len)
|
||||
#define outsw_p(port,from,len) outsw(port,from,len)
|
||||
#define outsl_p(port,from,len) outsl(port,from,len)
|
||||
#define insb_p(port,to,len) insb(port,to,len)
|
||||
#define insw_p(port,to,len) insw(port,to,len)
|
||||
#define insl_p(port,to,len) insl(port,to,len)
|
||||
|
||||
/*
|
||||
* String version of IO memory access ops:
|
||||
*/
|
||||
|
@ -347,39 +339,41 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
|
|||
#define iounmap __arm_iounmap
|
||||
|
||||
/*
|
||||
* io{read,write}{8,16,32} macros
|
||||
* io{read,write}{16,32}be() macros
|
||||
*/
|
||||
#ifndef ioread8
|
||||
#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __iormb(); __v; })
|
||||
#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __iormb(); __v; })
|
||||
#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __iormb(); __v; })
|
||||
#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
|
||||
#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
|
||||
|
||||
#define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
|
||||
#define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
|
||||
|
||||
#define iowrite8(v,p) ({ __iowmb(); __raw_writeb(v, p); })
|
||||
#define iowrite16(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); })
|
||||
#define iowrite32(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); })
|
||||
|
||||
#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
|
||||
#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
|
||||
|
||||
#define ioread8_rep(p,d,c) __raw_readsb(p,d,c)
|
||||
#define ioread16_rep(p,d,c) __raw_readsw(p,d,c)
|
||||
#define ioread32_rep(p,d,c) __raw_readsl(p,d,c)
|
||||
|
||||
#define iowrite8_rep(p,s,c) __raw_writesb(p,s,c)
|
||||
#define iowrite16_rep(p,s,c) __raw_writesw(p,s,c)
|
||||
#define iowrite32_rep(p,s,c) __raw_writesl(p,s,c)
|
||||
#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
|
||||
#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
|
||||
|
||||
#ifndef ioport_map
|
||||
#define ioport_map ioport_map
|
||||
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
|
||||
#endif
|
||||
#ifndef ioport_unmap
|
||||
#define ioport_unmap ioport_unmap
|
||||
extern void ioport_unmap(void __iomem *addr);
|
||||
#endif
|
||||
|
||||
struct pci_dev;
|
||||
|
||||
#define pci_iounmap pci_iounmap
|
||||
extern void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
|
||||
|
||||
/*
|
||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||
* access
|
||||
*/
|
||||
#define xlate_dev_mem_ptr(p) __va(p)
|
||||
|
||||
/*
|
||||
* Convert a virtual cached pointer to an uncached pointer
|
||||
*/
|
||||
#define xlate_dev_kmem_ptr(p) p
|
||||
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
/*
|
||||
* can the hardware map this into one segment or not, given no other
|
||||
* constraints.
|
||||
|
@ -401,17 +395,6 @@ extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
|
|||
extern int devmem_is_allowed(unsigned long pfn);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||
* access
|
||||
*/
|
||||
#define xlate_dev_mem_ptr(p) __va(p)
|
||||
|
||||
/*
|
||||
* Convert a virtual cached pointer to an uncached pointer
|
||||
*/
|
||||
#define xlate_dev_kmem_ptr(p) p
|
||||
|
||||
/*
|
||||
* Register ISA memory and port locations for glibc iopl/inb/outb
|
||||
* emulation.
|
||||
|
|
|
@ -274,11 +274,13 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
|
|||
* translation for translating DMA addresses. Use the driver
|
||||
* DMA support - see dma-mapping.h.
|
||||
*/
|
||||
#define virt_to_phys virt_to_phys
|
||||
static inline phys_addr_t virt_to_phys(const volatile void *x)
|
||||
{
|
||||
return __virt_to_phys((unsigned long)(x));
|
||||
}
|
||||
|
||||
#define phys_to_virt phys_to_virt
|
||||
static inline void *phys_to_virt(phys_addr_t x)
|
||||
{
|
||||
return (void *)__phys_to_virt(x);
|
||||
|
@ -322,11 +324,13 @@ static inline phys_addr_t __virt_to_idmap(unsigned long x)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_VIRT_TO_BUS
|
||||
#define virt_to_bus virt_to_bus
|
||||
static inline __deprecated unsigned long virt_to_bus(void *x)
|
||||
{
|
||||
return __virt_to_bus((unsigned long)x);
|
||||
}
|
||||
|
||||
#define bus_to_virt bus_to_virt
|
||||
static inline __deprecated void *bus_to_virt(unsigned long x)
|
||||
{
|
||||
return (void *)__bus_to_virt(x);
|
||||
|
|
|
@ -29,9 +29,9 @@ u8 __readb(const volatile void __iomem *addr);
|
|||
u16 __readw(const volatile void __iomem *addr);
|
||||
u32 __readl(const volatile void __iomem *addr);
|
||||
|
||||
void __writeb(u8 val, void __iomem *addr);
|
||||
void __writew(u16 val, void __iomem *addr);
|
||||
void __writel(u32 val, void __iomem *addr);
|
||||
void __writeb(u8 val, volatile void __iomem *addr);
|
||||
void __writew(u16 val, volatile void __iomem *addr);
|
||||
void __writel(u32 val, volatile void __iomem *addr);
|
||||
|
||||
/*
|
||||
* Argh, someone forgot the IOCS16 line. We therefore have to handle
|
||||
|
@ -62,20 +62,31 @@ void __writel(u32 val, void __iomem *addr);
|
|||
#define writew(v,b) __writew(v,b)
|
||||
#define writel(v,b) __writel(v,b)
|
||||
|
||||
#define insb insb
|
||||
extern void insb(unsigned int port, void *buf, int sz);
|
||||
#define insw insw
|
||||
extern void insw(unsigned int port, void *buf, int sz);
|
||||
#define insl insl
|
||||
extern void insl(unsigned int port, void *buf, int sz);
|
||||
|
||||
#define outsb outsb
|
||||
extern void outsb(unsigned int port, const void *buf, int sz);
|
||||
#define outsw outsw
|
||||
extern void outsw(unsigned int port, const void *buf, int sz);
|
||||
#define outsl outsl
|
||||
extern void outsl(unsigned int port, const void *buf, int sz);
|
||||
|
||||
/* can't support writesb atm */
|
||||
extern void writesw(void __iomem *addr, const void *data, int wordlen);
|
||||
extern void writesl(void __iomem *addr, const void *data, int longlen);
|
||||
#define writesw writesw
|
||||
extern void writesw(volatile void __iomem *addr, const void *data, int wordlen);
|
||||
#define writesl writesl
|
||||
extern void writesl(volatile void __iomem *addr, const void *data, int longlen);
|
||||
|
||||
/* can't support readsb atm */
|
||||
extern void readsw(const void __iomem *addr, void *data, int wordlen);
|
||||
extern void readsl(const void __iomem *addr, void *data, int longlen);
|
||||
#define readsw readsw
|
||||
extern void readsw(const volatile void __iomem *addr, void *data, int wordlen);
|
||||
|
||||
#define readsl readsl
|
||||
extern void readsl(const volatile void __iomem *addr, void *data, int longlen);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -102,7 +102,7 @@ EXPORT_SYMBOL(__readb);
|
|||
EXPORT_SYMBOL(__readw);
|
||||
EXPORT_SYMBOL(__readl);
|
||||
|
||||
void readsw(const void __iomem *addr, void *data, int len)
|
||||
void readsw(const volatile void __iomem *addr, void *data, int len)
|
||||
{
|
||||
void __iomem *a = __isamem_convert_addr(addr);
|
||||
|
||||
|
@ -112,7 +112,7 @@ void readsw(const void __iomem *addr, void *data, int len)
|
|||
}
|
||||
EXPORT_SYMBOL(readsw);
|
||||
|
||||
void readsl(const void __iomem *addr, void *data, int len)
|
||||
void readsl(const volatile void __iomem *addr, void *data, int len)
|
||||
{
|
||||
void __iomem *a = __isamem_convert_addr(addr);
|
||||
|
||||
|
@ -122,7 +122,7 @@ void readsl(const void __iomem *addr, void *data, int len)
|
|||
}
|
||||
EXPORT_SYMBOL(readsl);
|
||||
|
||||
void __writeb(u8 val, void __iomem *addr)
|
||||
void __writeb(u8 val, volatile void __iomem *addr)
|
||||
{
|
||||
void __iomem *a = __isamem_convert_addr(addr);
|
||||
|
||||
|
@ -132,7 +132,7 @@ void __writeb(u8 val, void __iomem *addr)
|
|||
__raw_writeb(val, a);
|
||||
}
|
||||
|
||||
void __writew(u16 val, void __iomem *addr)
|
||||
void __writew(u16 val, volatile void __iomem *addr)
|
||||
{
|
||||
void __iomem *a = __isamem_convert_addr(addr);
|
||||
|
||||
|
@ -142,7 +142,7 @@ void __writew(u16 val, void __iomem *addr)
|
|||
__raw_writew(val, a);
|
||||
}
|
||||
|
||||
void __writel(u32 val, void __iomem *addr)
|
||||
void __writel(u32 val, volatile void __iomem *addr)
|
||||
{
|
||||
void __iomem *a = __isamem_convert_addr(addr);
|
||||
|
||||
|
@ -157,7 +157,7 @@ EXPORT_SYMBOL(__writeb);
|
|||
EXPORT_SYMBOL(__writew);
|
||||
EXPORT_SYMBOL(__writel);
|
||||
|
||||
void writesw(void __iomem *addr, const void *data, int len)
|
||||
void writesw(volatile void __iomem *addr, const void *data, int len)
|
||||
{
|
||||
void __iomem *a = __isamem_convert_addr(addr);
|
||||
|
||||
|
@ -167,7 +167,7 @@ void writesw(void __iomem *addr, const void *data, int len)
|
|||
}
|
||||
EXPORT_SYMBOL(writesw);
|
||||
|
||||
void writesl(void __iomem *addr, const void *data, int len)
|
||||
void writesl(volatile void __iomem *addr, const void *data, int len)
|
||||
{
|
||||
void __iomem *a = __isamem_convert_addr(addr);
|
||||
|
||||
|
|
|
@ -652,7 +652,7 @@ static void __iomem *ixp4xx_ioremap_caller(phys_addr_t addr, size_t size,
|
|||
return (void __iomem *)addr;
|
||||
}
|
||||
|
||||
static void ixp4xx_iounmap(void __iomem *addr)
|
||||
static void ixp4xx_iounmap(volatile void __iomem *addr)
|
||||
{
|
||||
if (!is_pci_memory((__force u32)addr))
|
||||
__iounmap(addr);
|
||||
|
|
|
@ -58,6 +58,10 @@ static inline int is_pci_memory(u32 addr)
|
|||
#define writew(v, p) __indirect_writew(v, p)
|
||||
#define writel(v, p) __indirect_writel(v, p)
|
||||
|
||||
#define writeb_relaxed(v, p) __indirect_writeb(v, p)
|
||||
#define writew_relaxed(v, p) __indirect_writew(v, p)
|
||||
#define writel_relaxed(v, p) __indirect_writel(v, p)
|
||||
|
||||
#define writesb(p, v, l) __indirect_writesb(p, v, l)
|
||||
#define writesw(p, v, l) __indirect_writesw(p, v, l)
|
||||
#define writesl(p, v, l) __indirect_writesl(p, v, l)
|
||||
|
@ -66,6 +70,10 @@ static inline int is_pci_memory(u32 addr)
|
|||
#define readw(p) __indirect_readw(p)
|
||||
#define readl(p) __indirect_readl(p)
|
||||
|
||||
#define readb_relaxed(p) __indirect_readb(p)
|
||||
#define readw_relaxed(p) __indirect_readw(p)
|
||||
#define readl_relaxed(p) __indirect_readl(p)
|
||||
|
||||
#define readsb(p, v, l) __indirect_readsb(p, v, l)
|
||||
#define readsw(p, v, l) __indirect_readsw(p, v, l)
|
||||
#define readsl(p, v, l) __indirect_readsl(p, v, l)
|
||||
|
@ -76,7 +84,7 @@ static inline void __indirect_writeb(u8 value, volatile void __iomem *p)
|
|||
u32 n, byte_enables, data;
|
||||
|
||||
if (!is_pci_memory(addr)) {
|
||||
__raw_writeb(value, addr);
|
||||
__raw_writeb(value, p);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -99,7 +107,7 @@ static inline void __indirect_writew(u16 value, volatile void __iomem *p)
|
|||
u32 n, byte_enables, data;
|
||||
|
||||
if (!is_pci_memory(addr)) {
|
||||
__raw_writew(value, addr);
|
||||
__raw_writew(value, p);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -141,7 +149,7 @@ static inline unsigned char __indirect_readb(const volatile void __iomem *p)
|
|||
u32 n, byte_enables, data;
|
||||
|
||||
if (!is_pci_memory(addr))
|
||||
return __raw_readb(addr);
|
||||
return __raw_readb(p);
|
||||
|
||||
n = addr % 4;
|
||||
byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
|
||||
|
@ -164,7 +172,7 @@ static inline unsigned short __indirect_readw(const volatile void __iomem *p)
|
|||
u32 n, byte_enables, data;
|
||||
|
||||
if (!is_pci_memory(addr))
|
||||
return __raw_readw(addr);
|
||||
return __raw_readw(p);
|
||||
|
||||
n = addr % 4;
|
||||
byte_enables = (0xf & ~(BIT(n) | BIT(n+1))) << IXP4XX_PCI_NP_CBE_BESL;
|
||||
|
@ -226,6 +234,7 @@ static inline void __indirect_readsl(const volatile void __iomem *bus_addr,
|
|||
* I/O functions.
|
||||
*/
|
||||
|
||||
#define outb outb
|
||||
static inline void outb(u8 value, u32 addr)
|
||||
{
|
||||
u32 n, byte_enables, data;
|
||||
|
@ -235,12 +244,14 @@ static inline void outb(u8 value, u32 addr)
|
|||
ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
|
||||
}
|
||||
|
||||
#define outsb outsb
|
||||
static inline void outsb(u32 io_addr, const u8 *vaddr, u32 count)
|
||||
{
|
||||
while (count--)
|
||||
outb(*vaddr++, io_addr);
|
||||
}
|
||||
|
||||
#define outw outw
|
||||
static inline void outw(u16 value, u32 addr)
|
||||
{
|
||||
u32 n, byte_enables, data;
|
||||
|
@ -250,23 +261,27 @@ static inline void outw(u16 value, u32 addr)
|
|||
ixp4xx_pci_write(addr, byte_enables | NP_CMD_IOWRITE, data);
|
||||
}
|
||||
|
||||
#define outsw outsw
|
||||
static inline void outsw(u32 io_addr, const u16 *vaddr, u32 count)
|
||||
{
|
||||
while (count--)
|
||||
outw(cpu_to_le16(*vaddr++), io_addr);
|
||||
}
|
||||
|
||||
#define outl outl
|
||||
static inline void outl(u32 value, u32 addr)
|
||||
{
|
||||
ixp4xx_pci_write(addr, NP_CMD_IOWRITE, value);
|
||||
}
|
||||
|
||||
#define outsl outsl
|
||||
static inline void outsl(u32 io_addr, const u32 *vaddr, u32 count)
|
||||
{
|
||||
while (count--)
|
||||
outl(cpu_to_le32(*vaddr++), io_addr);
|
||||
}
|
||||
|
||||
#define inb inb
|
||||
static inline u8 inb(u32 addr)
|
||||
{
|
||||
u32 n, byte_enables, data;
|
||||
|
@ -278,12 +293,14 @@ static inline u8 inb(u32 addr)
|
|||
return data >> (8*n);
|
||||
}
|
||||
|
||||
#define insb insb
|
||||
static inline void insb(u32 io_addr, u8 *vaddr, u32 count)
|
||||
{
|
||||
while (count--)
|
||||
*vaddr++ = inb(io_addr);
|
||||
}
|
||||
|
||||
#define inw inw
|
||||
static inline u16 inw(u32 addr)
|
||||
{
|
||||
u32 n, byte_enables, data;
|
||||
|
@ -295,12 +312,14 @@ static inline u16 inw(u32 addr)
|
|||
return data>>(8*n);
|
||||
}
|
||||
|
||||
#define insw insw
|
||||
static inline void insw(u32 io_addr, u16 *vaddr, u32 count)
|
||||
{
|
||||
while (count--)
|
||||
*vaddr++ = le16_to_cpu(inw(io_addr));
|
||||
}
|
||||
|
||||
#define inl inl
|
||||
static inline u32 inl(u32 addr)
|
||||
{
|
||||
u32 data;
|
||||
|
@ -310,6 +329,7 @@ static inline u32 inl(u32 addr)
|
|||
return data;
|
||||
}
|
||||
|
||||
#define insl insl
|
||||
static inline void insl(u32 io_addr, u32 *vaddr, u32 count)
|
||||
{
|
||||
while (count--)
|
||||
|
|
|
@ -23,7 +23,6 @@ config ARM64
|
|||
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
|
||||
select GENERIC_CPU_AUTOPROBE
|
||||
select GENERIC_EARLY_IOREMAP
|
||||
select GENERIC_IOMAP
|
||||
select GENERIC_IRQ_PROBE
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_SCHED_CLOCK
|
||||
|
|
|
@ -34,26 +34,31 @@
|
|||
/*
|
||||
* Generic IO read/write. These perform native-endian accesses.
|
||||
*/
|
||||
#define __raw_writeb __raw_writeb
|
||||
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr));
|
||||
}
|
||||
|
||||
#define __raw_writew __raw_writew
|
||||
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr));
|
||||
}
|
||||
|
||||
#define __raw_writel __raw_writel
|
||||
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr));
|
||||
}
|
||||
|
||||
#define __raw_writeq __raw_writeq
|
||||
static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
|
||||
{
|
||||
asm volatile("str %0, [%1]" : : "r" (val), "r" (addr));
|
||||
}
|
||||
|
||||
#define __raw_readb __raw_readb
|
||||
static inline u8 __raw_readb(const volatile void __iomem *addr)
|
||||
{
|
||||
u8 val;
|
||||
|
@ -61,6 +66,7 @@ static inline u8 __raw_readb(const volatile void __iomem *addr)
|
|||
return val;
|
||||
}
|
||||
|
||||
#define __raw_readw __raw_readw
|
||||
static inline u16 __raw_readw(const volatile void __iomem *addr)
|
||||
{
|
||||
u16 val;
|
||||
|
@ -68,6 +74,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
|
|||
return val;
|
||||
}
|
||||
|
||||
#define __raw_readl __raw_readl
|
||||
static inline u32 __raw_readl(const volatile void __iomem *addr)
|
||||
{
|
||||
u32 val;
|
||||
|
@ -75,6 +82,7 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
|
|||
return val;
|
||||
}
|
||||
|
||||
#define __raw_readq __raw_readq
|
||||
static inline u64 __raw_readq(const volatile void __iomem *addr)
|
||||
{
|
||||
u64 val;
|
||||
|
@ -125,94 +133,6 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
|
|||
#define IO_SPACE_LIMIT (SZ_32M - 1)
|
||||
#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M))
|
||||
|
||||
static inline u8 inb(unsigned long addr)
|
||||
{
|
||||
return readb(addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline u16 inw(unsigned long addr)
|
||||
{
|
||||
return readw(addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline u32 inl(unsigned long addr)
|
||||
{
|
||||
return readl(addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline void outb(u8 b, unsigned long addr)
|
||||
{
|
||||
writeb(b, addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline void outw(u16 b, unsigned long addr)
|
||||
{
|
||||
writew(b, addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline void outl(u32 b, unsigned long addr)
|
||||
{
|
||||
writel(b, addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
#define inb_p(addr) inb(addr)
|
||||
#define inw_p(addr) inw(addr)
|
||||
#define inl_p(addr) inl(addr)
|
||||
|
||||
#define outb_p(x, addr) outb((x), (addr))
|
||||
#define outw_p(x, addr) outw((x), (addr))
|
||||
#define outl_p(x, addr) outl((x), (addr))
|
||||
|
||||
static inline void insb(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
u8 *buf = buffer;
|
||||
while (count--)
|
||||
*buf++ = __raw_readb(addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline void insw(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
u16 *buf = buffer;
|
||||
while (count--)
|
||||
*buf++ = __raw_readw(addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline void insl(unsigned long addr, void *buffer, int count)
|
||||
{
|
||||
u32 *buf = buffer;
|
||||
while (count--)
|
||||
*buf++ = __raw_readl(addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline void outsb(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
const u8 *buf = buffer;
|
||||
while (count--)
|
||||
__raw_writeb(*buf++, addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline void outsw(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
const u16 *buf = buffer;
|
||||
while (count--)
|
||||
__raw_writew(*buf++, addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline void outsl(unsigned long addr, const void *buffer, int count)
|
||||
{
|
||||
const u32 *buf = buffer;
|
||||
while (count--)
|
||||
__raw_writel(*buf++, addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
#define insb_p(port,to,len) insb(port,to,len)
|
||||
#define insw_p(port,to,len) insw(port,to,len)
|
||||
#define insl_p(port,to,len) insl(port,to,len)
|
||||
|
||||
#define outsb_p(port,from,len) outsb(port,from,len)
|
||||
#define outsw_p(port,from,len) outsw(port,from,len)
|
||||
#define outsl_p(port,from,len) outsl(port,from,len)
|
||||
|
||||
/*
|
||||
* String version of I/O memory access operations.
|
||||
*/
|
||||
|
@ -236,18 +156,14 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
|
|||
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
|
||||
#define iounmap __iounmap
|
||||
|
||||
#define ARCH_HAS_IOREMAP_WC
|
||||
#include <asm-generic/iomap.h>
|
||||
|
||||
/*
|
||||
* More restrictive address range checking than the default implementation
|
||||
* (PHYS_OFFSET and PHYS_MASK taken into account).
|
||||
* io{read,write}{16,32}be() macros
|
||||
*/
|
||||
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
|
||||
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
|
||||
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
|
||||
#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
|
||||
#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
|
||||
|
||||
extern int devmem_is_allowed(unsigned long pfn);
|
||||
#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
|
||||
#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
|
||||
|
||||
/*
|
||||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||
|
@ -260,6 +176,18 @@ extern int devmem_is_allowed(unsigned long pfn);
|
|||
*/
|
||||
#define xlate_dev_kmem_ptr(p) p
|
||||
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
/*
|
||||
* More restrictive address range checking than the default implementation
|
||||
* (PHYS_OFFSET and PHYS_MASK taken into account).
|
||||
*/
|
||||
#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
|
||||
extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
|
||||
extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
|
||||
|
||||
extern int devmem_is_allowed(unsigned long pfn);
|
||||
|
||||
struct bio_vec;
|
||||
extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
|
||||
const struct bio_vec *vec2);
|
||||
|
|
|
@ -120,11 +120,13 @@ extern phys_addr_t memstart_addr;
|
|||
* translation for translating DMA addresses. Use the driver
|
||||
* DMA support - see dma-mapping.h.
|
||||
*/
|
||||
#define virt_to_phys virt_to_phys
|
||||
static inline phys_addr_t virt_to_phys(const volatile void *x)
|
||||
{
|
||||
return __virt_to_phys((unsigned long)(x));
|
||||
}
|
||||
|
||||
#define phys_to_virt phys_to_virt
|
||||
static inline void *phys_to_virt(phys_addr_t x)
|
||||
{
|
||||
return (void *)(__phys_to_virt(x));
|
||||
|
|
|
@ -365,15 +365,15 @@ ia64_done_with_exception (struct pt_regs *regs)
|
|||
}
|
||||
|
||||
#define ARCH_HAS_TRANSLATE_MEM_PTR 1
|
||||
static __inline__ char *
|
||||
xlate_dev_mem_ptr (unsigned long p)
|
||||
static __inline__ void *
|
||||
xlate_dev_mem_ptr(phys_addr_t p)
|
||||
{
|
||||
struct page *page;
|
||||
char * ptr;
|
||||
void *ptr;
|
||||
|
||||
page = pfn_to_page(p >> PAGE_SHIFT);
|
||||
if (PageUncached(page))
|
||||
ptr = (char *)p + __IA64_UNCACHED_OFFSET;
|
||||
ptr = (void *)p + __IA64_UNCACHED_OFFSET;
|
||||
else
|
||||
ptr = __va(p);
|
||||
|
||||
|
@ -383,15 +383,15 @@ xlate_dev_mem_ptr (unsigned long p)
|
|||
/*
|
||||
* Convert a virtual cached kernel memory pointer to an uncached pointer
|
||||
*/
|
||||
static __inline__ char *
|
||||
xlate_dev_kmem_ptr (char * p)
|
||||
static __inline__ void *
|
||||
xlate_dev_kmem_ptr(void *p)
|
||||
{
|
||||
struct page *page;
|
||||
char * ptr;
|
||||
void *ptr;
|
||||
|
||||
page = virt_to_page((unsigned long)p);
|
||||
if (PageUncached(page))
|
||||
ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET;
|
||||
ptr = (void *)__pa(p) + __IA64_UNCACHED_OFFSET;
|
||||
else
|
||||
ptr = p;
|
||||
|
||||
|
|
|
@ -13,9 +13,10 @@
|
|||
#include <asm/page.h>
|
||||
#include <asm/pci_io.h>
|
||||
|
||||
void *xlate_dev_mem_ptr(unsigned long phys);
|
||||
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
|
||||
void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
|
||||
void *xlate_dev_mem_ptr(phys_addr_t phys);
|
||||
#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
|
||||
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
|
||||
|
||||
/*
|
||||
* Convert a virtual cached pointer to an uncached pointer
|
||||
|
|
|
@ -176,7 +176,7 @@ static int is_swapped(unsigned long addr)
|
|||
* For swapped prefix pages a new buffer is returned that contains a copy of
|
||||
* the absolute memory. The buffer size is maximum one page large.
|
||||
*/
|
||||
void *xlate_dev_mem_ptr(unsigned long addr)
|
||||
void *xlate_dev_mem_ptr(phys_addr_t addr)
|
||||
{
|
||||
void *bounce = (void *) addr;
|
||||
unsigned long size;
|
||||
|
@ -197,7 +197,7 @@ void *xlate_dev_mem_ptr(unsigned long addr)
|
|||
/*
|
||||
* Free converted buffer for /dev/mem access (if necessary)
|
||||
*/
|
||||
void unxlate_dev_mem_ptr(unsigned long addr, void *buf)
|
||||
void unxlate_dev_mem_ptr(phys_addr_t addr, void *buf)
|
||||
{
|
||||
if ((void *) addr != buf)
|
||||
free_page((unsigned long) buf);
|
||||
|
|
|
@ -310,8 +310,8 @@ BUILDIO(b, b, char)
|
|||
BUILDIO(w, w, short)
|
||||
BUILDIO(l, , int)
|
||||
|
||||
extern void *xlate_dev_mem_ptr(unsigned long phys);
|
||||
extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
|
||||
extern void *xlate_dev_mem_ptr(phys_addr_t phys);
|
||||
extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
|
||||
|
||||
extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
|
||||
unsigned long prot_val);
|
||||
|
|
|
@ -327,7 +327,7 @@ EXPORT_SYMBOL(iounmap);
|
|||
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
|
||||
* access
|
||||
*/
|
||||
void *xlate_dev_mem_ptr(unsigned long phys)
|
||||
void *xlate_dev_mem_ptr(phys_addr_t phys)
|
||||
{
|
||||
void *addr;
|
||||
unsigned long start = phys & PAGE_MASK;
|
||||
|
@ -343,7 +343,7 @@ void *xlate_dev_mem_ptr(unsigned long phys)
|
|||
return addr;
|
||||
}
|
||||
|
||||
void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
|
||||
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
|
||||
{
|
||||
if (page_is_ram(phys >> PAGE_SHIFT))
|
||||
return;
|
||||
|
|
|
@ -84,9 +84,12 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
|
|||
}
|
||||
#endif
|
||||
|
||||
void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
|
||||
#ifndef unxlate_dev_mem_ptr
|
||||
#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
|
||||
void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This funcion reads the *physical* memory. The f_pos points directly to the
|
||||
|
@ -97,7 +100,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
|
|||
{
|
||||
phys_addr_t p = *ppos;
|
||||
ssize_t read, sz;
|
||||
char *ptr;
|
||||
void *ptr;
|
||||
|
||||
if (p != *ppos)
|
||||
return 0;
|
||||
|
@ -400,7 +403,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
|
|||
* uncached, then it must also be accessed uncached
|
||||
* by the kernel or data corruption may occur
|
||||
*/
|
||||
kbuf = xlate_dev_kmem_ptr((char *)p);
|
||||
kbuf = xlate_dev_kmem_ptr((void *)p);
|
||||
|
||||
if (copy_to_user(buf, kbuf, sz))
|
||||
return -EFAULT;
|
||||
|
@ -461,7 +464,7 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
|
|||
#endif
|
||||
|
||||
while (count > 0) {
|
||||
char *ptr;
|
||||
void *ptr;
|
||||
|
||||
sz = size_inside_page(p, count);
|
||||
|
||||
|
@ -470,7 +473,7 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
|
|||
* it must also be accessed uncached by the kernel or data
|
||||
* corruption may occur.
|
||||
*/
|
||||
ptr = xlate_dev_kmem_ptr((char *)p);
|
||||
ptr = xlate_dev_kmem_ptr((void *)p);
|
||||
|
||||
copied = copy_from_user(ptr, buf, sz);
|
||||
if (copied) {
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#define __ASM_GENERIC_IO_H
|
||||
|
||||
#include <asm/page.h> /* I/O is all done through memory accesses */
|
||||
#include <linux/string.h> /* for memset() and memcpy() */
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef CONFIG_GENERIC_IOMAP
|
||||
|
@ -24,260 +25,654 @@
|
|||
#define mmiowb() do {} while (0)
|
||||
#endif
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* readX/writeX() are used to access memory mapped devices. On some
|
||||
* architectures the memory mapped IO stuff needs to be accessed
|
||||
* differently. On the simple architectures, we just read/write the
|
||||
* memory location directly.
|
||||
* __raw_{read,write}{b,w,l,q}() access memory in native endianness.
|
||||
*
|
||||
* On some architectures memory mapped IO needs to be accessed differently.
|
||||
* On the simple architectures, we just read/write the memory location
|
||||
* directly.
|
||||
*/
|
||||
|
||||
#ifndef __raw_readb
|
||||
#define __raw_readb __raw_readb
|
||||
static inline u8 __raw_readb(const volatile void __iomem *addr)
|
||||
{
|
||||
return *(const volatile u8 __force *) addr;
|
||||
return *(const volatile u8 __force *)addr;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef __raw_readw
|
||||
#define __raw_readw __raw_readw
|
||||
static inline u16 __raw_readw(const volatile void __iomem *addr)
|
||||
{
|
||||
return *(const volatile u16 __force *) addr;
|
||||
return *(const volatile u16 __force *)addr;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef __raw_readl
|
||||
#define __raw_readl __raw_readl
|
||||
static inline u32 __raw_readl(const volatile void __iomem *addr)
|
||||
{
|
||||
return *(const volatile u32 __force *) addr;
|
||||
return *(const volatile u32 __force *)addr;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define readb __raw_readb
|
||||
#ifdef CONFIG_64BIT
|
||||
#ifndef __raw_readq
|
||||
#define __raw_readq __raw_readq
|
||||
static inline u64 __raw_readq(const volatile void __iomem *addr)
|
||||
{
|
||||
return *(const volatile u64 __force *)addr;
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
#ifndef __raw_writeb
|
||||
#define __raw_writeb __raw_writeb
|
||||
static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
|
||||
{
|
||||
*(volatile u8 __force *)addr = value;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef __raw_writew
|
||||
#define __raw_writew __raw_writew
|
||||
static inline void __raw_writew(u16 value, volatile void __iomem *addr)
|
||||
{
|
||||
*(volatile u16 __force *)addr = value;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef __raw_writel
|
||||
#define __raw_writel __raw_writel
|
||||
static inline void __raw_writel(u32 value, volatile void __iomem *addr)
|
||||
{
|
||||
*(volatile u32 __force *)addr = value;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#ifndef __raw_writeq
|
||||
#define __raw_writeq __raw_writeq
|
||||
static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
|
||||
{
|
||||
*(volatile u64 __force *)addr = value;
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
/*
|
||||
* {read,write}{b,w,l,q}() access little endian memory and return result in
|
||||
* native endianness.
|
||||
*/
|
||||
|
||||
#ifndef readb
|
||||
#define readb readb
|
||||
static inline u8 readb(const volatile void __iomem *addr)
|
||||
{
|
||||
return __raw_readb(addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef readw
|
||||
#define readw readw
|
||||
static inline u16 readw(const volatile void __iomem *addr)
|
||||
{
|
||||
return __le16_to_cpu(__raw_readw(addr));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef readl
|
||||
#define readl readl
|
||||
static inline u32 readl(const volatile void __iomem *addr)
|
||||
{
|
||||
return __le32_to_cpu(__raw_readl(addr));
|
||||
}
|
||||
|
||||
#ifndef __raw_writeb
|
||||
static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
|
||||
{
|
||||
*(volatile u8 __force *) addr = b;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef __raw_writew
|
||||
static inline void __raw_writew(u16 b, volatile void __iomem *addr)
|
||||
{
|
||||
*(volatile u16 __force *) addr = b;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef __raw_writel
|
||||
static inline void __raw_writel(u32 b, volatile void __iomem *addr)
|
||||
{
|
||||
*(volatile u32 __force *) addr = b;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define writeb __raw_writeb
|
||||
#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr)
|
||||
#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#ifndef __raw_readq
|
||||
static inline u64 __raw_readq(const volatile void __iomem *addr)
|
||||
{
|
||||
return *(const volatile u64 __force *) addr;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef readq
|
||||
#define readq readq
|
||||
static inline u64 readq(const volatile void __iomem *addr)
|
||||
{
|
||||
return __le64_to_cpu(__raw_readq(addr));
|
||||
}
|
||||
|
||||
#ifndef __raw_writeq
|
||||
static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
|
||||
{
|
||||
*(volatile u64 __force *) addr = b;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define writeq(b, addr) __raw_writeq(__cpu_to_le64(b), addr)
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
#ifndef PCI_IOBASE
|
||||
#define PCI_IOBASE ((void __iomem *) 0)
|
||||
#ifndef writeb
|
||||
#define writeb writeb
|
||||
static inline void writeb(u8 value, volatile void __iomem *addr)
|
||||
{
|
||||
__raw_writeb(value, addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*****************************************************************************/
|
||||
#ifndef writew
|
||||
#define writew writew
|
||||
static inline void writew(u16 value, volatile void __iomem *addr)
|
||||
{
|
||||
__raw_writew(cpu_to_le16(value), addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef writel
|
||||
#define writel writel
|
||||
static inline void writel(u32 value, volatile void __iomem *addr)
|
||||
{
|
||||
__raw_writel(__cpu_to_le32(value), addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#ifndef writeq
|
||||
#define writeq writeq
|
||||
static inline void writeq(u64 value, volatile void __iomem *addr)
|
||||
{
|
||||
__raw_writeq(__cpu_to_le64(value), addr);
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
/*
|
||||
* traditional input/output functions
|
||||
* {read,write}s{b,w,l,q}() repeatedly access the same memory address in
|
||||
* native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
|
||||
*/
|
||||
|
||||
static inline u8 inb(unsigned long addr)
|
||||
{
|
||||
return readb(addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline u16 inw(unsigned long addr)
|
||||
{
|
||||
return readw(addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline u32 inl(unsigned long addr)
|
||||
{
|
||||
return readl(addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline void outb(u8 b, unsigned long addr)
|
||||
{
|
||||
writeb(b, addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline void outw(u16 b, unsigned long addr)
|
||||
{
|
||||
writew(b, addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
static inline void outl(u32 b, unsigned long addr)
|
||||
{
|
||||
writel(b, addr + PCI_IOBASE);
|
||||
}
|
||||
|
||||
#define inb_p(addr) inb(addr)
|
||||
#define inw_p(addr) inw(addr)
|
||||
#define inl_p(addr) inl(addr)
|
||||
#define outb_p(x, addr) outb((x), (addr))
|
||||
#define outw_p(x, addr) outw((x), (addr))
|
||||
#define outl_p(x, addr) outl((x), (addr))
|
||||
|
||||
#ifndef insb
|
||||
static inline void insb(unsigned long addr, void *buffer, int count)
|
||||
#ifndef readsb
|
||||
#define readsb readsb
|
||||
static inline void readsb(const volatile void __iomem *addr, void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
if (count) {
|
||||
u8 *buf = buffer;
|
||||
|
||||
do {
|
||||
u8 x = __raw_readb(addr + PCI_IOBASE);
|
||||
u8 x = __raw_readb(addr);
|
||||
*buf++ = x;
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef insw
|
||||
static inline void insw(unsigned long addr, void *buffer, int count)
|
||||
#ifndef readsw
|
||||
#define readsw readsw
|
||||
static inline void readsw(const volatile void __iomem *addr, void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
if (count) {
|
||||
u16 *buf = buffer;
|
||||
|
||||
do {
|
||||
u16 x = __raw_readw(addr + PCI_IOBASE);
|
||||
u16 x = __raw_readw(addr);
|
||||
*buf++ = x;
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef insl
|
||||
static inline void insl(unsigned long addr, void *buffer, int count)
|
||||
#ifndef readsl
|
||||
#define readsl readsl
|
||||
static inline void readsl(const volatile void __iomem *addr, void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
if (count) {
|
||||
u32 *buf = buffer;
|
||||
|
||||
do {
|
||||
u32 x = __raw_readl(addr + PCI_IOBASE);
|
||||
u32 x = __raw_readl(addr);
|
||||
*buf++ = x;
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outsb
|
||||
static inline void outsb(unsigned long addr, const void *buffer, int count)
|
||||
#ifdef CONFIG_64BIT
|
||||
#ifndef readsq
|
||||
#define readsq readsq
|
||||
static inline void readsq(const volatile void __iomem *addr, void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
if (count) {
|
||||
u64 *buf = buffer;
|
||||
|
||||
do {
|
||||
u64 x = __raw_readq(addr);
|
||||
*buf++ = x;
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
#ifndef writesb
|
||||
#define writesb writesb
|
||||
static inline void writesb(volatile void __iomem *addr, const void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
if (count) {
|
||||
const u8 *buf = buffer;
|
||||
|
||||
do {
|
||||
__raw_writeb(*buf++, addr + PCI_IOBASE);
|
||||
__raw_writeb(*buf++, addr);
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outsw
|
||||
static inline void outsw(unsigned long addr, const void *buffer, int count)
|
||||
#ifndef writesw
|
||||
#define writesw writesw
|
||||
static inline void writesw(volatile void __iomem *addr, const void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
if (count) {
|
||||
const u16 *buf = buffer;
|
||||
|
||||
do {
|
||||
__raw_writew(*buf++, addr + PCI_IOBASE);
|
||||
__raw_writew(*buf++, addr);
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outsl
|
||||
static inline void outsl(unsigned long addr, const void *buffer, int count)
|
||||
#ifndef writesl
|
||||
#define writesl writesl
|
||||
static inline void writesl(volatile void __iomem *addr, const void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
if (count) {
|
||||
const u32 *buf = buffer;
|
||||
|
||||
do {
|
||||
__raw_writel(*buf++, addr + PCI_IOBASE);
|
||||
__raw_writel(*buf++, addr);
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_GENERIC_IOMAP
|
||||
#define ioread8(addr) readb(addr)
|
||||
#define ioread16(addr) readw(addr)
|
||||
#define ioread16be(addr) __be16_to_cpu(__raw_readw(addr))
|
||||
#define ioread32(addr) readl(addr)
|
||||
#define ioread32be(addr) __be32_to_cpu(__raw_readl(addr))
|
||||
#ifdef CONFIG_64BIT
|
||||
#ifndef writesq
|
||||
#define writesq writesq
|
||||
static inline void writesq(volatile void __iomem *addr, const void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
if (count) {
|
||||
const u64 *buf = buffer;
|
||||
|
||||
#define iowrite8(v, addr) writeb((v), (addr))
|
||||
#define iowrite16(v, addr) writew((v), (addr))
|
||||
#define iowrite16be(v, addr) __raw_writew(__cpu_to_be16(v), addr)
|
||||
#define iowrite32(v, addr) writel((v), (addr))
|
||||
#define iowrite32be(v, addr) __raw_writel(__cpu_to_be32(v), addr)
|
||||
do {
|
||||
__raw_writeq(*buf++, addr);
|
||||
} while (--count);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
#define ioread8_rep(p, dst, count) \
|
||||
insb((unsigned long) (p), (dst), (count))
|
||||
#define ioread16_rep(p, dst, count) \
|
||||
insw((unsigned long) (p), (dst), (count))
|
||||
#define ioread32_rep(p, dst, count) \
|
||||
insl((unsigned long) (p), (dst), (count))
|
||||
|
||||
#define iowrite8_rep(p, src, count) \
|
||||
outsb((unsigned long) (p), (src), (count))
|
||||
#define iowrite16_rep(p, src, count) \
|
||||
outsw((unsigned long) (p), (src), (count))
|
||||
#define iowrite32_rep(p, src, count) \
|
||||
outsl((unsigned long) (p), (src), (count))
|
||||
#endif /* CONFIG_GENERIC_IOMAP */
|
||||
#ifndef PCI_IOBASE
|
||||
#define PCI_IOBASE ((void __iomem *)0)
|
||||
#endif
|
||||
|
||||
#ifndef IO_SPACE_LIMIT
|
||||
#define IO_SPACE_LIMIT 0xffff
|
||||
#endif
|
||||
|
||||
/*
|
||||
* {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
|
||||
* implemented on hardware that needs an additional delay for I/O accesses to
|
||||
* take effect.
|
||||
*/
|
||||
|
||||
#ifndef inb
|
||||
#define inb inb
|
||||
static inline u8 inb(unsigned long addr)
|
||||
{
|
||||
return readb(PCI_IOBASE + addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef inw
|
||||
#define inw inw
|
||||
static inline u16 inw(unsigned long addr)
|
||||
{
|
||||
return readw(PCI_IOBASE + addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef inl
|
||||
#define inl inl
|
||||
static inline u32 inl(unsigned long addr)
|
||||
{
|
||||
return readl(PCI_IOBASE + addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outb
|
||||
#define outb outb
|
||||
static inline void outb(u8 value, unsigned long addr)
|
||||
{
|
||||
writeb(value, PCI_IOBASE + addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outw
|
||||
#define outw outw
|
||||
static inline void outw(u16 value, unsigned long addr)
|
||||
{
|
||||
writew(value, PCI_IOBASE + addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outl
|
||||
#define outl outl
|
||||
static inline void outl(u32 value, unsigned long addr)
|
||||
{
|
||||
writel(value, PCI_IOBASE + addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef inb_p
|
||||
#define inb_p inb_p
|
||||
static inline u8 inb_p(unsigned long addr)
|
||||
{
|
||||
return inb(addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef inw_p
|
||||
#define inw_p inw_p
|
||||
static inline u16 inw_p(unsigned long addr)
|
||||
{
|
||||
return inw(addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef inl_p
|
||||
#define inl_p inl_p
|
||||
static inline u32 inl_p(unsigned long addr)
|
||||
{
|
||||
return inl(addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outb_p
|
||||
#define outb_p outb_p
|
||||
static inline void outb_p(u8 value, unsigned long addr)
|
||||
{
|
||||
outb(value, addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outw_p
|
||||
#define outw_p outw_p
|
||||
static inline void outw_p(u16 value, unsigned long addr)
|
||||
{
|
||||
outw(value, addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outl_p
|
||||
#define outl_p outl_p
|
||||
static inline void outl_p(u32 value, unsigned long addr)
|
||||
{
|
||||
outl(value, addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
|
||||
* single I/O port multiple times.
|
||||
*/
|
||||
|
||||
#ifndef insb
|
||||
#define insb insb
|
||||
static inline void insb(unsigned long addr, void *buffer, unsigned int count)
|
||||
{
|
||||
readsb(PCI_IOBASE + addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef insw
|
||||
#define insw insw
|
||||
static inline void insw(unsigned long addr, void *buffer, unsigned int count)
|
||||
{
|
||||
readsw(PCI_IOBASE + addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef insl
|
||||
#define insl insl
|
||||
static inline void insl(unsigned long addr, void *buffer, unsigned int count)
|
||||
{
|
||||
readsl(PCI_IOBASE + addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outsb
|
||||
#define outsb outsb
|
||||
static inline void outsb(unsigned long addr, const void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
writesb(PCI_IOBASE + addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outsw
|
||||
#define outsw outsw
|
||||
static inline void outsw(unsigned long addr, const void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
writesw(PCI_IOBASE + addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outsl
|
||||
#define outsl outsl
|
||||
static inline void outsl(unsigned long addr, const void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
writesl(PCI_IOBASE + addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef insb_p
|
||||
#define insb_p insb_p
|
||||
static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
|
||||
{
|
||||
insb(addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef insw_p
|
||||
#define insw_p insw_p
|
||||
static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
|
||||
{
|
||||
insw(addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef insl_p
|
||||
#define insl_p insl_p
|
||||
static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
|
||||
{
|
||||
insl(addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outsb_p
|
||||
#define outsb_p outsb_p
|
||||
static inline void outsb_p(unsigned long addr, const void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
outsb(addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outsw_p
|
||||
#define outsw_p outsw_p
|
||||
static inline void outsw_p(unsigned long addr, const void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
outsw(addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef outsl_p
|
||||
#define outsl_p outsl_p
|
||||
static inline void outsl_p(unsigned long addr, const void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
outsl(addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_GENERIC_IOMAP
|
||||
#ifndef ioread8
|
||||
#define ioread8 ioread8
|
||||
static inline u8 ioread8(const volatile void __iomem *addr)
|
||||
{
|
||||
return readb(addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ioread16
|
||||
#define ioread16 ioread16
|
||||
static inline u16 ioread16(const volatile void __iomem *addr)
|
||||
{
|
||||
return readw(addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ioread32
|
||||
#define ioread32 ioread32
|
||||
static inline u32 ioread32(const volatile void __iomem *addr)
|
||||
{
|
||||
return readl(addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef iowrite8
|
||||
#define iowrite8 iowrite8
|
||||
static inline void iowrite8(u8 value, volatile void __iomem *addr)
|
||||
{
|
||||
writeb(value, addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef iowrite16
|
||||
#define iowrite16 iowrite16
|
||||
static inline void iowrite16(u16 value, volatile void __iomem *addr)
|
||||
{
|
||||
writew(value, addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef iowrite32
|
||||
#define iowrite32 iowrite32
|
||||
static inline void iowrite32(u32 value, volatile void __iomem *addr)
|
||||
{
|
||||
writel(value, addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ioread16be
|
||||
#define ioread16be ioread16be
|
||||
static inline u16 ioread16be(const volatile void __iomem *addr)
|
||||
{
|
||||
return __be16_to_cpu(__raw_readw(addr));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ioread32be
|
||||
#define ioread32be ioread32be
|
||||
static inline u32 ioread32be(const volatile void __iomem *addr)
|
||||
{
|
||||
return __be32_to_cpu(__raw_readl(addr));
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef iowrite16be
|
||||
#define iowrite16be iowrite16be
|
||||
static inline void iowrite16be(u16 value, void volatile __iomem *addr)
|
||||
{
|
||||
__raw_writew(__cpu_to_be16(value), addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef iowrite32be
|
||||
#define iowrite32be iowrite32be
|
||||
static inline void iowrite32be(u32 value, volatile void __iomem *addr)
|
||||
{
|
||||
__raw_writel(__cpu_to_be32(value), addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ioread8_rep
|
||||
#define ioread8_rep ioread8_rep
|
||||
static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
readsb(addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ioread16_rep
|
||||
#define ioread16_rep ioread16_rep
|
||||
static inline void ioread16_rep(const volatile void __iomem *addr,
|
||||
void *buffer, unsigned int count)
|
||||
{
|
||||
readsw(addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ioread32_rep
|
||||
#define ioread32_rep ioread32_rep
|
||||
static inline void ioread32_rep(const volatile void __iomem *addr,
|
||||
void *buffer, unsigned int count)
|
||||
{
|
||||
readsl(addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef iowrite8_rep
|
||||
#define iowrite8_rep iowrite8_rep
|
||||
static inline void iowrite8_rep(volatile void __iomem *addr,
|
||||
const void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
writesb(addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef iowrite16_rep
|
||||
#define iowrite16_rep iowrite16_rep
|
||||
static inline void iowrite16_rep(volatile void __iomem *addr,
|
||||
const void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
writesw(addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef iowrite32_rep
|
||||
#define iowrite32_rep iowrite32_rep
|
||||
static inline void iowrite32_rep(volatile void __iomem *addr,
|
||||
const void *buffer,
|
||||
unsigned int count)
|
||||
{
|
||||
writesl(addr, buffer, count);
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_GENERIC_IOMAP */
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/vmalloc.h>
|
||||
#define __io_virt(x) ((void __force *) (x))
|
||||
#define __io_virt(x) ((void __force *)(x))
|
||||
|
||||
#ifndef CONFIG_GENERIC_IOMAP
|
||||
struct pci_dev;
|
||||
extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
|
||||
|
||||
#ifndef pci_iounmap
|
||||
#define pci_iounmap pci_iounmap
|
||||
static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
|
||||
{
|
||||
}
|
||||
|
@ -289,11 +684,15 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
|
|||
* These are pretty trivial
|
||||
*/
|
||||
#ifndef virt_to_phys
|
||||
#define virt_to_phys virt_to_phys
|
||||
static inline unsigned long virt_to_phys(volatile void *address)
|
||||
{
|
||||
return __pa((unsigned long)address);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef phys_to_virt
|
||||
#define phys_to_virt phys_to_virt
|
||||
static inline void *phys_to_virt(unsigned long address)
|
||||
{
|
||||
return __va(address);
|
||||
|
@ -306,37 +705,65 @@ static inline void *phys_to_virt(unsigned long address)
|
|||
* This implementation is for the no-MMU case only... if you have an MMU
|
||||
* you'll need to provide your own definitions.
|
||||
*/
|
||||
#ifndef CONFIG_MMU
|
||||
static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
|
||||
{
|
||||
return (void __iomem*) (unsigned long)offset;
|
||||
}
|
||||
|
||||
#define __ioremap(offset, size, flags) ioremap(offset, size)
|
||||
#ifndef CONFIG_MMU
|
||||
#ifndef ioremap
|
||||
#define ioremap ioremap
|
||||
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
|
||||
{
|
||||
return (void __iomem *)(unsigned long)offset;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef __ioremap
|
||||
#define __ioremap __ioremap
|
||||
static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
|
||||
unsigned long flags)
|
||||
{
|
||||
return ioremap(offset, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ioremap_nocache
|
||||
#define ioremap_nocache ioremap
|
||||
#define ioremap_nocache ioremap_nocache
|
||||
static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
|
||||
{
|
||||
return ioremap(offset, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ioremap_wc
|
||||
#define ioremap_wc ioremap_nocache
|
||||
#define ioremap_wc ioremap_wc
|
||||
static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
|
||||
{
|
||||
return ioremap_nocache(offset, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef iounmap
|
||||
#define iounmap iounmap
|
||||
static inline void iounmap(void __iomem *addr)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#ifdef CONFIG_HAS_IOPORT_MAP
|
||||
#ifndef CONFIG_GENERIC_IOMAP
|
||||
#ifndef ioport_map
|
||||
#define ioport_map ioport_map
|
||||
static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
|
||||
{
|
||||
return PCI_IOBASE + (port & IO_SPACE_LIMIT);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef ioport_unmap
|
||||
#define ioport_unmap ioport_unmap
|
||||
static inline void ioport_unmap(void __iomem *p)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#else /* CONFIG_GENERIC_IOMAP */
|
||||
extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
|
||||
extern void ioport_unmap(void __iomem *p);
|
||||
|
@ -344,35 +771,68 @@ extern void ioport_unmap(void __iomem *p);
|
|||
#endif /* CONFIG_HAS_IOPORT_MAP */
|
||||
|
||||
#ifndef xlate_dev_kmem_ptr
|
||||
#define xlate_dev_kmem_ptr(p) p
|
||||
#define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
|
||||
static inline void *xlate_dev_kmem_ptr(void *addr)
|
||||
{
|
||||
return addr;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef xlate_dev_mem_ptr
|
||||
#define xlate_dev_mem_ptr(p) __va(p)
|
||||
#define xlate_dev_mem_ptr xlate_dev_mem_ptr
|
||||
static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
|
||||
{
|
||||
return __va(addr);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef unxlate_dev_mem_ptr
|
||||
#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
|
||||
static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VIRT_TO_BUS
|
||||
#ifndef virt_to_bus
|
||||
static inline unsigned long virt_to_bus(volatile void *address)
|
||||
static inline unsigned long virt_to_bus(void *address)
|
||||
{
|
||||
return ((unsigned long) address);
|
||||
return (unsigned long)address;
|
||||
}
|
||||
|
||||
static inline void *bus_to_virt(unsigned long address)
|
||||
{
|
||||
return (void *) address;
|
||||
return (void *)address;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef memset_io
|
||||
#define memset_io(a, b, c) memset(__io_virt(a), (b), (c))
|
||||
#define memset_io memset_io
|
||||
static inline void memset_io(volatile void __iomem *addr, int value,
|
||||
size_t size)
|
||||
{
|
||||
memset(__io_virt(addr), value, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef memcpy_fromio
|
||||
#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c))
|
||||
#define memcpy_fromio memcpy_fromio
|
||||
static inline void memcpy_fromio(void *buffer,
|
||||
const volatile void __iomem *addr,
|
||||
size_t size)
|
||||
{
|
||||
memcpy(buffer, __io_virt(addr), size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef memcpy_toio
|
||||
#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c))
|
||||
#define memcpy_toio memcpy_toio
|
||||
static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
|
||||
size_t size)
|
||||
{
|
||||
memcpy(__io_virt(addr), buffer, size);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
|
Loading…
Reference in New Issue