~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm64/include/asm/io.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  * Based on arch/arm/include/asm/io.h
  4  *
  5  * Copyright (C) 1996-2000 Russell King
  6  * Copyright (C) 2012 ARM Ltd.
  7  */
  8 #ifndef __ASM_IO_H
  9 #define __ASM_IO_H
 10 
 11 #include <linux/types.h>
 12 #include <linux/pgtable.h>
 13 
 14 #include <asm/byteorder.h>
 15 #include <asm/barrier.h>
 16 #include <asm/memory.h>
 17 #include <asm/early_ioremap.h>
 18 #include <asm/alternative.h>
 19 #include <asm/cpufeature.h>
 20 
 21 /*
 22  * Generic IO read/write.  These perform native-endian accesses.
 23  */
 24 #define __raw_writeb __raw_writeb
 25 static __always_inline void __raw_writeb(u8 val, volatile void __iomem *addr)
 26 {
 27         volatile u8 __iomem *ptr = addr;
 28         asm volatile("strb %w0, %1" : : "rZ" (val), "Qo" (*ptr));
 29 }
 30 
 31 #define __raw_writew __raw_writew
 32 static __always_inline void __raw_writew(u16 val, volatile void __iomem *addr)
 33 {
 34         volatile u16 __iomem *ptr = addr;
 35         asm volatile("strh %w0, %1" : : "rZ" (val), "Qo" (*ptr));
 36 }
 37 
 38 #define __raw_writel __raw_writel
 39 static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr)
 40 {
 41         volatile u32 __iomem *ptr = addr;
 42         asm volatile("str %w0, %1" : : "rZ" (val), "Qo" (*ptr));
 43 }
 44 
 45 #define __raw_writeq __raw_writeq
 46 static __always_inline void __raw_writeq(u64 val, volatile void __iomem *addr)
 47 {
 48         volatile u64 __iomem *ptr = addr;
 49         asm volatile("str %x0, %1" : : "rZ" (val), "Qo" (*ptr));
 50 }
 51 
 52 #define __raw_readb __raw_readb
 53 static __always_inline u8 __raw_readb(const volatile void __iomem *addr)
 54 {
 55         u8 val;
 56         asm volatile(ALTERNATIVE("ldrb %w0, [%1]",
 57                                  "ldarb %w0, [%1]",
 58                                  ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
 59                      : "=r" (val) : "r" (addr));
 60         return val;
 61 }
 62 
 63 #define __raw_readw __raw_readw
 64 static __always_inline u16 __raw_readw(const volatile void __iomem *addr)
 65 {
 66         u16 val;
 67 
 68         asm volatile(ALTERNATIVE("ldrh %w0, [%1]",
 69                                  "ldarh %w0, [%1]",
 70                                  ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
 71                      : "=r" (val) : "r" (addr));
 72         return val;
 73 }
 74 
 75 #define __raw_readl __raw_readl
 76 static __always_inline u32 __raw_readl(const volatile void __iomem *addr)
 77 {
 78         u32 val;
 79         asm volatile(ALTERNATIVE("ldr %w0, [%1]",
 80                                  "ldar %w0, [%1]",
 81                                  ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
 82                      : "=r" (val) : "r" (addr));
 83         return val;
 84 }
 85 
 86 #define __raw_readq __raw_readq
 87 static __always_inline u64 __raw_readq(const volatile void __iomem *addr)
 88 {
 89         u64 val;
 90         asm volatile(ALTERNATIVE("ldr %0, [%1]",
 91                                  "ldar %0, [%1]",
 92                                  ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
 93                      : "=r" (val) : "r" (addr));
 94         return val;
 95 }
 96 
 97 /* IO barriers */
 98 #define __io_ar(v)                                                      \
 99 ({                                                                      \
100         unsigned long tmp;                                              \
101                                                                         \
102         dma_rmb();                                                              \
103                                                                         \
104         /*                                                              \
105          * Create a dummy control dependency from the IO read to any    \
106          * later instructions. This ensures that a subsequent call to   \
107          * udelay() will be ordered due to the ISB in get_cycles().     \
108          */                                                             \
109         asm volatile("eor       %0, %1, %1\n"                           \
110                      "cbnz      %0, ."                                  \
111                      : "=r" (tmp) : "r" ((unsigned long)(v))            \
112                      : "memory");                                       \
113 })
114 
115 #define __io_bw()               dma_wmb()
116 #define __io_br(v)
117 #define __io_aw(v)
118 
119 /* arm64-specific, don't use in portable drivers */
120 #define __iormb(v)              __io_ar(v)
121 #define __iowmb()               __io_bw()
122 #define __iomb()                dma_mb()
123 
124 /*
125  *  I/O port access primitives.
126  */
127 #define arch_has_dev_port()     (1)
128 #define IO_SPACE_LIMIT          (PCI_IO_SIZE - 1)
129 #define PCI_IOBASE              ((void __iomem *)PCI_IO_START)
130 
131 /*
132  * String version of I/O memory access operations.
133  */
134 extern void __memcpy_fromio(void *, const volatile void __iomem *, size_t);
135 extern void __memcpy_toio(volatile void __iomem *, const void *, size_t);
136 extern void __memset_io(volatile void __iomem *, int, size_t);
137 
138 #define memset_io(c,v,l)        __memset_io((c),(v),(l))
139 #define memcpy_fromio(a,c,l)    __memcpy_fromio((a),(c),(l))
140 #define memcpy_toio(c,a,l)      __memcpy_toio((c),(a),(l))
141 
142 /*
143  * The ARM64 iowrite implementation is intended to support drivers that want to
144  * use write combining. For instance PCI drivers using write combining with a 64
145  * byte __iowrite64_copy() expect to get a 64 byte MemWr TLP on the PCIe bus.
146  *
147  * Newer ARM core have sensitive write combining buffers, it is important that
148  * the stores be contiguous blocks of store instructions. Normal memcpy
149  * approaches have a very low chance to generate write combining.
150  *
151  * Since this is the only API on ARM64 that should be used with write combining
152  * it also integrates the DGH hint which is supposed to lower the latency to
153  * emit the large TLP from the CPU.
154  */
155 
156 static __always_inline void
157 __const_memcpy_toio_aligned32(volatile u32 __iomem *to, const u32 *from,
158                               size_t count)
159 {
160         switch (count) {
161         case 8:
162                 asm volatile("str %w0, [%8, #4 * 0]\n"
163                              "str %w1, [%8, #4 * 1]\n"
164                              "str %w2, [%8, #4 * 2]\n"
165                              "str %w3, [%8, #4 * 3]\n"
166                              "str %w4, [%8, #4 * 4]\n"
167                              "str %w5, [%8, #4 * 5]\n"
168                              "str %w6, [%8, #4 * 6]\n"
169                              "str %w7, [%8, #4 * 7]\n"
170                              :
171                              : "rZ"(from[0]), "rZ"(from[1]), "rZ"(from[2]),
172                                "rZ"(from[3]), "rZ"(from[4]), "rZ"(from[5]),
173                                "rZ"(from[6]), "rZ"(from[7]), "r"(to));
174                 break;
175         case 4:
176                 asm volatile("str %w0, [%4, #4 * 0]\n"
177                              "str %w1, [%4, #4 * 1]\n"
178                              "str %w2, [%4, #4 * 2]\n"
179                              "str %w3, [%4, #4 * 3]\n"
180                              :
181                              : "rZ"(from[0]), "rZ"(from[1]), "rZ"(from[2]),
182                                "rZ"(from[3]), "r"(to));
183                 break;
184         case 2:
185                 asm volatile("str %w0, [%2, #4 * 0]\n"
186                              "str %w1, [%2, #4 * 1]\n"
187                              :
188                              : "rZ"(from[0]), "rZ"(from[1]), "r"(to));
189                 break;
190         case 1:
191                 __raw_writel(*from, to);
192                 break;
193         default:
194                 BUILD_BUG();
195         }
196 }
197 
198 void __iowrite32_copy_full(void __iomem *to, const void *from, size_t count);
199 
200 static __always_inline void
201 __iowrite32_copy(void __iomem *to, const void *from, size_t count)
202 {
203         if (__builtin_constant_p(count) &&
204             (count == 8 || count == 4 || count == 2 || count == 1)) {
205                 __const_memcpy_toio_aligned32(to, from, count);
206                 dgh();
207         } else {
208                 __iowrite32_copy_full(to, from, count);
209         }
210 }
211 #define __iowrite32_copy __iowrite32_copy
212 
213 static __always_inline void
214 __const_memcpy_toio_aligned64(volatile u64 __iomem *to, const u64 *from,
215                               size_t count)
216 {
217         switch (count) {
218         case 8:
219                 asm volatile("str %x0, [%8, #8 * 0]\n"
220                              "str %x1, [%8, #8 * 1]\n"
221                              "str %x2, [%8, #8 * 2]\n"
222                              "str %x3, [%8, #8 * 3]\n"
223                              "str %x4, [%8, #8 * 4]\n"
224                              "str %x5, [%8, #8 * 5]\n"
225                              "str %x6, [%8, #8 * 6]\n"
226                              "str %x7, [%8, #8 * 7]\n"
227                              :
228                              : "rZ"(from[0]), "rZ"(from[1]), "rZ"(from[2]),
229                                "rZ"(from[3]), "rZ"(from[4]), "rZ"(from[5]),
230                                "rZ"(from[6]), "rZ"(from[7]), "r"(to));
231                 break;
232         case 4:
233                 asm volatile("str %x0, [%4, #8 * 0]\n"
234                              "str %x1, [%4, #8 * 1]\n"
235                              "str %x2, [%4, #8 * 2]\n"
236                              "str %x3, [%4, #8 * 3]\n"
237                              :
238                              : "rZ"(from[0]), "rZ"(from[1]), "rZ"(from[2]),
239                                "rZ"(from[3]), "r"(to));
240                 break;
241         case 2:
242                 asm volatile("str %x0, [%2, #8 * 0]\n"
243                              "str %x1, [%2, #8 * 1]\n"
244                              :
245                              : "rZ"(from[0]), "rZ"(from[1]), "r"(to));
246                 break;
247         case 1:
248                 __raw_writeq(*from, to);
249                 break;
250         default:
251                 BUILD_BUG();
252         }
253 }
254 
255 void __iowrite64_copy_full(void __iomem *to, const void *from, size_t count);
256 
257 static __always_inline void
258 __iowrite64_copy(void __iomem *to, const void *from, size_t count)
259 {
260         if (__builtin_constant_p(count) &&
261             (count == 8 || count == 4 || count == 2 || count == 1)) {
262                 __const_memcpy_toio_aligned64(to, from, count);
263                 dgh();
264         } else {
265                 __iowrite64_copy_full(to, from, count);
266         }
267 }
268 #define __iowrite64_copy __iowrite64_copy
269 
270 /*
271  * I/O memory mapping functions.
272  */
273 
274 #define ioremap_prot ioremap_prot
275 
276 #define _PAGE_IOREMAP PROT_DEVICE_nGnRE
277 
278 #define ioremap_wc(addr, size)  \
279         ioremap_prot((addr), (size), PROT_NORMAL_NC)
280 #define ioremap_np(addr, size)  \
281         ioremap_prot((addr), (size), PROT_DEVICE_nGnRnE)
282 
283 /*
284  * io{read,write}{16,32,64}be() macros
285  */
286 #define ioread16be(p)           ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(__v); __v; })
287 #define ioread32be(p)           ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(__v); __v; })
288 #define ioread64be(p)           ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(__v); __v; })
289 
290 #define iowrite16be(v,p)        ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
291 #define iowrite32be(v,p)        ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
292 #define iowrite64be(v,p)        ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); })
293 
294 #include <asm-generic/io.h>
295 
296 #define ioremap_cache ioremap_cache
297 static inline void __iomem *ioremap_cache(phys_addr_t addr, size_t size)
298 {
299         if (pfn_is_map_memory(__phys_to_pfn(addr)))
300                 return (void __iomem *)__phys_to_virt(addr);
301 
302         return ioremap_prot(addr, size, PROT_NORMAL);
303 }
304 
305 /*
306  * More restrictive address range checking than the default implementation
307  * (PHYS_OFFSET and PHYS_MASK taken into account).
308  */
309 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
310 extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
311 extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
312 
313 extern bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
314                                         unsigned long flags);
315 #define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
316 
317 #endif  /* __ASM_IO_H */
318 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php