~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/iomem.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /kernel/iomem.c (Version linux-6.11.5) and /kernel/iomem.c (Version linux-5.9.16)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #include <linux/device.h>                           2 #include <linux/device.h>
  3 #include <linux/types.h>                            3 #include <linux/types.h>
  4 #include <linux/io.h>                               4 #include <linux/io.h>
  5 #include <linux/mm.h>                               5 #include <linux/mm.h>
  6 #include <linux/ioremap.h>                     !!   6 
                                                   >>   7 #ifndef ioremap_cache
                                                   >>   8 /* temporary while we convert existing ioremap_cache users to memremap */
                                                   >>   9 __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
                                                   >>  10 {
                                                   >>  11         return ioremap(offset, size);
                                                   >>  12 }
                                                   >>  13 #endif
  7                                                    14 
  8 #ifndef arch_memremap_wb                           15 #ifndef arch_memremap_wb
  9 static void *arch_memremap_wb(resource_size_t      16 static void *arch_memremap_wb(resource_size_t offset, unsigned long size)
 10 {                                                  17 {
 11 #ifdef ioremap_cache                           << 
 12         return (__force void *)ioremap_cache(o     18         return (__force void *)ioremap_cache(offset, size);
 13 #else                                          << 
 14         return (__force void *)ioremap(offset, << 
 15 #endif                                         << 
 16 }                                                  19 }
 17 #endif                                             20 #endif
 18                                                    21 
 19 #ifndef arch_memremap_can_ram_remap                22 #ifndef arch_memremap_can_ram_remap
 20 static bool arch_memremap_can_ram_remap(resour     23 static bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size,
 21                                         unsign     24                                         unsigned long flags)
 22 {                                                  25 {
 23         return true;                               26         return true;
 24 }                                                  27 }
 25 #endif                                             28 #endif
 26                                                    29 
 27 static void *try_ram_remap(resource_size_t off     30 static void *try_ram_remap(resource_size_t offset, size_t size,
 28                            unsigned long flags     31                            unsigned long flags)
 29 {                                                  32 {
 30         unsigned long pfn = PHYS_PFN(offset);      33         unsigned long pfn = PHYS_PFN(offset);
 31                                                    34 
 32         /* In the simple case just return the      35         /* In the simple case just return the existing linear address */
 33         if (pfn_valid(pfn) && !PageHighMem(pfn     36         if (pfn_valid(pfn) && !PageHighMem(pfn_to_page(pfn)) &&
 34             arch_memremap_can_ram_remap(offset     37             arch_memremap_can_ram_remap(offset, size, flags))
 35                 return __va(offset);               38                 return __va(offset);
 36                                                    39 
 37         return NULL; /* fallback to arch_memre     40         return NULL; /* fallback to arch_memremap_wb */
 38 }                                                  41 }
 39                                                    42 
 40 /**                                                43 /**
 41  * memremap() - remap an iomem_resource as cac     44  * memremap() - remap an iomem_resource as cacheable memory
 42  * @offset: iomem resource start address           45  * @offset: iomem resource start address
 43  * @size: size of remap                            46  * @size: size of remap
 44  * @flags: any of MEMREMAP_WB, MEMREMAP_WT, ME     47  * @flags: any of MEMREMAP_WB, MEMREMAP_WT, MEMREMAP_WC,
 45  *                MEMREMAP_ENC, MEMREMAP_DEC       48  *                MEMREMAP_ENC, MEMREMAP_DEC
 46  *                                                 49  *
 47  * memremap() is "ioremap" for cases where it      50  * memremap() is "ioremap" for cases where it is known that the resource
 48  * being mapped does not have i/o side effects     51  * being mapped does not have i/o side effects and the __iomem
 49  * annotation is not applicable. In the case o     52  * annotation is not applicable. In the case of multiple flags, the different
 50  * mapping types will be attempted in the orde     53  * mapping types will be attempted in the order listed below until one of
 51  * them succeeds.                                  54  * them succeeds.
 52  *                                                 55  *
 53  * MEMREMAP_WB - matches the default mapping f     56  * MEMREMAP_WB - matches the default mapping for System RAM on
 54  * the architecture.  This is usually a read-a     57  * the architecture.  This is usually a read-allocate write-back cache.
 55  * Moreover, if MEMREMAP_WB is specified and t     58  * Moreover, if MEMREMAP_WB is specified and the requested remap region is RAM
 56  * memremap() will bypass establishing a new m     59  * memremap() will bypass establishing a new mapping and instead return
 57  * a pointer into the direct map.                  60  * a pointer into the direct map.
 58  *                                                 61  *
 59  * MEMREMAP_WT - establish a mapping whereby w     62  * MEMREMAP_WT - establish a mapping whereby writes either bypass the
 60  * cache or are written through to memory and      63  * cache or are written through to memory and never exist in a
 61  * cache-dirty state with respect to program v     64  * cache-dirty state with respect to program visibility.  Attempts to
 62  * map System RAM with this mapping type will      65  * map System RAM with this mapping type will fail.
 63  *                                                 66  *
 64  * MEMREMAP_WC - establish a writecombine mapp     67  * MEMREMAP_WC - establish a writecombine mapping, whereby writes may
 65  * be coalesced together (e.g. in the CPU's wr     68  * be coalesced together (e.g. in the CPU's write buffers), but is otherwise
 66  * uncached. Attempts to map System RAM with t     69  * uncached. Attempts to map System RAM with this mapping type will fail.
 67  */                                                70  */
 68 void *memremap(resource_size_t offset, size_t      71 void *memremap(resource_size_t offset, size_t size, unsigned long flags)
 69 {                                                  72 {
 70         int is_ram = region_intersects(offset,     73         int is_ram = region_intersects(offset, size,
 71                                        IORESOU     74                                        IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
 72         void *addr = NULL;                         75         void *addr = NULL;
 73                                                    76 
 74         if (!flags)                                77         if (!flags)
 75                 return NULL;                       78                 return NULL;
 76                                                    79 
 77         if (is_ram == REGION_MIXED) {              80         if (is_ram == REGION_MIXED) {
 78                 WARN_ONCE(1, "memremap attempt     81                 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
 79                                 &offset, (unsi     82                                 &offset, (unsigned long) size);
 80                 return NULL;                       83                 return NULL;
 81         }                                          84         }
 82                                                    85 
 83         /* Try all mapping types requested unt     86         /* Try all mapping types requested until one returns non-NULL */
 84         if (flags & MEMREMAP_WB) {                 87         if (flags & MEMREMAP_WB) {
 85                 /*                                 88                 /*
 86                  * MEMREMAP_WB is special in t     89                  * MEMREMAP_WB is special in that it can be satisfied
 87                  * from the direct map.  Some      90                  * from the direct map.  Some archs depend on the
 88                  * capability of memremap() to     91                  * capability of memremap() to autodetect cases where
 89                  * the requested range is pote     92                  * the requested range is potentially in System RAM.
 90                  */                                93                  */
 91                 if (is_ram == REGION_INTERSECT     94                 if (is_ram == REGION_INTERSECTS)
 92                         addr = try_ram_remap(o     95                         addr = try_ram_remap(offset, size, flags);
 93                 if (!addr)                         96                 if (!addr)
 94                         addr = arch_memremap_w     97                         addr = arch_memremap_wb(offset, size);
 95         }                                          98         }
 96                                                    99 
 97         /*                                        100         /*
 98          * If we don't have a mapping yet and     101          * If we don't have a mapping yet and other request flags are
 99          * present then we will be attempting     102          * present then we will be attempting to establish a new virtual
100          * address mapping.  Enforce that this    103          * address mapping.  Enforce that this mapping is not aliasing
101          * System RAM.                            104          * System RAM.
102          */                                       105          */
103         if (!addr && is_ram == REGION_INTERSEC    106         if (!addr && is_ram == REGION_INTERSECTS && flags != MEMREMAP_WB) {
104                 WARN_ONCE(1, "memremap attempt    107                 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
105                                 &offset, (unsi    108                                 &offset, (unsigned long) size);
106                 return NULL;                      109                 return NULL;
107         }                                         110         }
108                                                   111 
109         if (!addr && (flags & MEMREMAP_WT))       112         if (!addr && (flags & MEMREMAP_WT))
110                 addr = ioremap_wt(offset, size    113                 addr = ioremap_wt(offset, size);
111                                                   114 
112         if (!addr && (flags & MEMREMAP_WC))       115         if (!addr && (flags & MEMREMAP_WC))
113                 addr = ioremap_wc(offset, size    116                 addr = ioremap_wc(offset, size);
114                                                   117 
115         return addr;                              118         return addr;
116 }                                                 119 }
117 EXPORT_SYMBOL(memremap);                          120 EXPORT_SYMBOL(memremap);
118                                                   121 
119 void memunmap(void *addr)                         122 void memunmap(void *addr)
120 {                                                 123 {
121         if (is_ioremap_addr(addr))                124         if (is_ioremap_addr(addr))
122                 iounmap((void __iomem *) addr)    125                 iounmap((void __iomem *) addr);
123 }                                                 126 }
124 EXPORT_SYMBOL(memunmap);                          127 EXPORT_SYMBOL(memunmap);
125                                                   128 
126 static void devm_memremap_release(struct devic    129 static void devm_memremap_release(struct device *dev, void *res)
127 {                                                 130 {
128         memunmap(*(void **)res);                  131         memunmap(*(void **)res);
129 }                                                 132 }
130                                                   133 
131 static int devm_memremap_match(struct device *    134 static int devm_memremap_match(struct device *dev, void *res, void *match_data)
132 {                                                 135 {
133         return *(void **)res == match_data;       136         return *(void **)res == match_data;
134 }                                                 137 }
135                                                   138 
136 void *devm_memremap(struct device *dev, resour    139 void *devm_memremap(struct device *dev, resource_size_t offset,
137                 size_t size, unsigned long fla    140                 size_t size, unsigned long flags)
138 {                                                 141 {
139         void **ptr, *addr;                        142         void **ptr, *addr;
140                                                   143 
141         ptr = devres_alloc_node(devm_memremap_    144         ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
142                         dev_to_node(dev));        145                         dev_to_node(dev));
143         if (!ptr)                                 146         if (!ptr)
144                 return ERR_PTR(-ENOMEM);          147                 return ERR_PTR(-ENOMEM);
145                                                   148 
146         addr = memremap(offset, size, flags);     149         addr = memremap(offset, size, flags);
147         if (addr) {                               150         if (addr) {
148                 *ptr = addr;                      151                 *ptr = addr;
149                 devres_add(dev, ptr);             152                 devres_add(dev, ptr);
150         } else {                                  153         } else {
151                 devres_free(ptr);                 154                 devres_free(ptr);
152                 return ERR_PTR(-ENXIO);           155                 return ERR_PTR(-ENXIO);
153         }                                         156         }
154                                                   157 
155         return addr;                              158         return addr;
156 }                                                 159 }
157 EXPORT_SYMBOL(devm_memremap);                     160 EXPORT_SYMBOL(devm_memremap);
158                                                   161 
159 void devm_memunmap(struct device *dev, void *a    162 void devm_memunmap(struct device *dev, void *addr)
160 {                                                 163 {
161         WARN_ON(devres_release(dev, devm_memre    164         WARN_ON(devres_release(dev, devm_memremap_release,
162                                 devm_memremap_    165                                 devm_memremap_match, addr));
163 }                                                 166 }
164 EXPORT_SYMBOL(devm_memunmap);                     167 EXPORT_SYMBOL(devm_memunmap);
165                                                   168 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php