1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * Copyright (C) 2021 Intel Corporation 3 * Copyright (C) 2021 Intel Corporation 4 * Author: Johannes Berg <johannes@sipsolution 4 * Author: Johannes Berg <johannes@sipsolutions.net> 5 */ 5 */ 6 #include <linux/types.h> 6 #include <linux/types.h> 7 #include <linux/slab.h> 7 #include <linux/slab.h> 8 #include <linux/logic_iomem.h> 8 #include <linux/logic_iomem.h> 9 #include <asm/io.h> 9 #include <asm/io.h> 10 10 11 struct logic_iomem_region { 11 struct logic_iomem_region { 12 const struct resource *res; 12 const struct resource *res; 13 const struct logic_iomem_region_ops *o 13 const struct logic_iomem_region_ops *ops; 14 struct list_head list; 14 struct list_head list; 15 }; 15 }; 16 16 17 struct logic_iomem_area { 17 struct logic_iomem_area { 18 const struct logic_iomem_ops *ops; 18 const struct logic_iomem_ops *ops; 19 void *priv; 19 void *priv; 20 }; 20 }; 21 21 22 #define AREA_SHIFT 24 22 #define AREA_SHIFT 24 23 #define MAX_AREA_SIZE (1 << AREA_SHIFT) 23 #define MAX_AREA_SIZE (1 << AREA_SHIFT) 24 #define MAX_AREAS ((1U << 31) / MAX_AREA 24 #define MAX_AREAS ((1U << 31) / MAX_AREA_SIZE) 25 #define AREA_BITS ((MAX_AREAS - 1) << AR 25 #define AREA_BITS ((MAX_AREAS - 1) << AREA_SHIFT) 26 #define AREA_MASK (MAX_AREA_SIZE - 1) 26 #define AREA_MASK (MAX_AREA_SIZE - 1) 27 #ifdef CONFIG_64BIT 27 #ifdef CONFIG_64BIT 28 #define IOREMAP_BIAS 0xDEAD000000000000UL 28 #define IOREMAP_BIAS 0xDEAD000000000000UL 29 #define IOREMAP_MASK 0xFFFFFFFF00000000UL 29 #define IOREMAP_MASK 0xFFFFFFFF00000000UL 30 #else 30 #else 31 #define IOREMAP_BIAS 0x80000000UL 31 #define IOREMAP_BIAS 0x80000000UL 32 #define IOREMAP_MASK 0x80000000UL 32 #define IOREMAP_MASK 0x80000000UL 33 #endif 33 #endif 34 34 35 static DEFINE_MUTEX(regions_mtx); 35 static DEFINE_MUTEX(regions_mtx); 36 static LIST_HEAD(regions_list); 36 static LIST_HEAD(regions_list); 37 static struct logic_iomem_area mapped_areas[MA 37 static struct logic_iomem_area mapped_areas[MAX_AREAS]; 38 38 39 int logic_iomem_add_region(struct resource *re 39 int logic_iomem_add_region(struct resource *resource, 40 const struct logic_ 40 const struct logic_iomem_region_ops *ops) 41 { 41 { 42 struct logic_iomem_region *rreg; 42 struct logic_iomem_region *rreg; 43 int err; 43 int err; 44 44 45 if (WARN_ON(!resource || !ops)) 45 if (WARN_ON(!resource || !ops)) 46 return -EINVAL; 46 return -EINVAL; 47 47 48 if (WARN_ON((resource->flags & IORESOU 48 if (WARN_ON((resource->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM)) 49 return -EINVAL; 49 return -EINVAL; 50 50 51 rreg = kzalloc(sizeof(*rreg), GFP_KERN 51 rreg = kzalloc(sizeof(*rreg), GFP_KERNEL); 52 if (!rreg) 52 if (!rreg) 53 return -ENOMEM; 53 return -ENOMEM; 54 54 55 err = request_resource(&iomem_resource 55 err = request_resource(&iomem_resource, resource); 56 if (err) { 56 if (err) { 57 kfree(rreg); 57 kfree(rreg); 58 return -ENOMEM; 58 return -ENOMEM; 59 } 59 } 60 60 61 mutex_lock(®ions_mtx); 61 mutex_lock(®ions_mtx); 62 rreg->res = resource; 62 rreg->res = resource; 63 rreg->ops = ops; 63 rreg->ops = ops; 64 list_add_tail(&rreg->list, ®ions_li 64 list_add_tail(&rreg->list, ®ions_list); 65 mutex_unlock(®ions_mtx); 65 mutex_unlock(®ions_mtx); 66 66 67 return 0; 67 return 0; 68 } 68 } 69 EXPORT_SYMBOL(logic_iomem_add_region); 69 EXPORT_SYMBOL(logic_iomem_add_region); 70 70 71 #ifndef CONFIG_INDIRECT_IOMEM_FALLBACK 71 #ifndef CONFIG_INDIRECT_IOMEM_FALLBACK 72 static void __iomem *real_ioremap(phys_addr_t 72 static void __iomem *real_ioremap(phys_addr_t offset, size_t size) 73 { 73 { 74 WARN(1, "invalid ioremap(0x%llx, 0x%zx 74 WARN(1, "invalid ioremap(0x%llx, 0x%zx)\n", 75 (unsigned long long)offset, size) 75 (unsigned long long)offset, size); 76 return NULL; 76 return NULL; 77 } 77 } 78 78 79 static void real_iounmap(volatile void __iomem 79 static void real_iounmap(volatile void __iomem *addr) 80 { 80 { 81 WARN(1, "invalid iounmap for addr 0x%l 81 WARN(1, "invalid iounmap for addr 0x%llx\n", 82 (unsigned long long)(uintptr_t __ 82 (unsigned long long)(uintptr_t __force)addr); 83 } 83 } 84 #endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */ 84 #endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */ 85 85 86 void __iomem *ioremap(phys_addr_t offset, size 86 void __iomem *ioremap(phys_addr_t offset, size_t size) 87 { 87 { 88 void __iomem *ret = NULL; 88 void __iomem *ret = NULL; 89 struct logic_iomem_region *rreg, *foun 89 struct logic_iomem_region *rreg, *found = NULL; 90 int i; 90 int i; 91 91 92 mutex_lock(®ions_mtx); 92 mutex_lock(®ions_mtx); 93 list_for_each_entry(rreg, ®ions_lis 93 list_for_each_entry(rreg, ®ions_list, list) { 94 if (rreg->res->start > offset) 94 if (rreg->res->start > offset) 95 continue; 95 continue; 96 if (rreg->res->end < offset + 96 if (rreg->res->end < offset + size - 1) 97 continue; 97 continue; 98 found = rreg; 98 found = rreg; 99 break; 99 break; 100 } 100 } 101 101 102 if (!found) 102 if (!found) 103 goto out; 103 goto out; 104 104 105 for (i = 0; i < MAX_AREAS; i++) { 105 for (i = 0; i < MAX_AREAS; i++) { 106 long offs; 106 long offs; 107 107 108 if (mapped_areas[i].ops) 108 if (mapped_areas[i].ops) 109 continue; 109 continue; 110 110 111 offs = rreg->ops->map(offset - 111 offs = rreg->ops->map(offset - found->res->start, 112 size, &m 112 size, &mapped_areas[i].ops, 113 &mapped_ 113 &mapped_areas[i].priv); 114 if (offs < 0) { 114 if (offs < 0) { 115 mapped_areas[i].ops = 115 mapped_areas[i].ops = NULL; 116 break; 116 break; 117 } 117 } 118 118 119 if (WARN_ON(!mapped_areas[i].o 119 if (WARN_ON(!mapped_areas[i].ops)) { 120 mapped_areas[i].ops = 120 mapped_areas[i].ops = NULL; 121 break; 121 break; 122 } 122 } 123 123 124 ret = (void __iomem *)(IOREMAP 124 ret = (void __iomem *)(IOREMAP_BIAS + (i << AREA_SHIFT) + offs); 125 break; 125 break; 126 } 126 } 127 out: 127 out: 128 mutex_unlock(®ions_mtx); 128 mutex_unlock(®ions_mtx); 129 if (ret) 129 if (ret) 130 return ret; 130 return ret; 131 return real_ioremap(offset, size); 131 return real_ioremap(offset, size); 132 } 132 } 133 EXPORT_SYMBOL(ioremap); 133 EXPORT_SYMBOL(ioremap); 134 134 135 static inline struct logic_iomem_area * 135 static inline struct logic_iomem_area * 136 get_area(const volatile void __iomem *addr) 136 get_area(const volatile void __iomem *addr) 137 { 137 { 138 unsigned long a = (unsigned long)addr; 138 unsigned long a = (unsigned long)addr; 139 unsigned int idx; 139 unsigned int idx; 140 140 141 if (WARN_ON((a & IOREMAP_MASK) != IORE 141 if (WARN_ON((a & IOREMAP_MASK) != IOREMAP_BIAS)) 142 return NULL; 142 return NULL; 143 143 144 idx = (a & AREA_BITS) >> AREA_SHIFT; 144 idx = (a & AREA_BITS) >> AREA_SHIFT; 145 145 146 if (mapped_areas[idx].ops) 146 if (mapped_areas[idx].ops) 147 return &mapped_areas[idx]; 147 return &mapped_areas[idx]; 148 148 149 return NULL; 149 return NULL; 150 } 150 } 151 151 152 void iounmap(volatile void __iomem *addr) 152 void iounmap(volatile void __iomem *addr) 153 { 153 { 154 struct logic_iomem_area *area = get_ar 154 struct logic_iomem_area *area = get_area(addr); 155 155 156 if (!area) { 156 if (!area) { 157 real_iounmap(addr); 157 real_iounmap(addr); 158 return; 158 return; 159 } 159 } 160 160 161 if (area->ops->unmap) 161 if (area->ops->unmap) 162 area->ops->unmap(area->priv); 162 area->ops->unmap(area->priv); 163 163 164 mutex_lock(®ions_mtx); 164 mutex_lock(®ions_mtx); 165 area->ops = NULL; 165 area->ops = NULL; 166 area->priv = NULL; 166 area->priv = NULL; 167 mutex_unlock(®ions_mtx); 167 mutex_unlock(®ions_mtx); 168 } 168 } 169 EXPORT_SYMBOL(iounmap); 169 EXPORT_SYMBOL(iounmap); 170 170 171 #ifndef CONFIG_INDIRECT_IOMEM_FALLBACK 171 #ifndef CONFIG_INDIRECT_IOMEM_FALLBACK 172 #define MAKE_FALLBACK(op, sz) 172 #define MAKE_FALLBACK(op, sz) \ 173 static u##sz real_raw_read ## op(const volatil 173 static u##sz real_raw_read ## op(const volatile void __iomem *addr) \ 174 { 174 { \ 175 WARN(1, "Invalid read" #op " at addres 175 WARN(1, "Invalid read" #op " at address %llx\n", \ 176 (unsigned long long)(uintptr_t __ 176 (unsigned long long)(uintptr_t __force)addr); \ 177 return (u ## sz)~0ULL; 177 return (u ## sz)~0ULL; \ 178 } 178 } \ 179 179 \ 180 static void real_raw_write ## op(u ## sz val, 180 static void real_raw_write ## op(u ## sz val, \ 181 volatile void 181 volatile void __iomem *addr) \ 182 { 182 { \ 183 WARN(1, "Invalid writeq" #op " of 0x%l 183 WARN(1, "Invalid writeq" #op " of 0x%llx at address %llx\n", \ 184 (unsigned long long)val, 184 (unsigned long long)val, \ 185 (unsigned long long)(uintptr_t __ 185 (unsigned long long)(uintptr_t __force)addr);\ 186 } 186 } \ 187 187 188 MAKE_FALLBACK(b, 8); 188 MAKE_FALLBACK(b, 8); 189 MAKE_FALLBACK(w, 16); 189 MAKE_FALLBACK(w, 16); 190 MAKE_FALLBACK(l, 32); 190 MAKE_FALLBACK(l, 32); 191 #ifdef CONFIG_64BIT 191 #ifdef CONFIG_64BIT 192 MAKE_FALLBACK(q, 64); 192 MAKE_FALLBACK(q, 64); 193 #endif 193 #endif 194 194 195 static void real_memset_io(volatile void __iom 195 static void real_memset_io(volatile void __iomem *addr, int value, size_t size) 196 { 196 { 197 WARN(1, "Invalid memset_io at address 197 WARN(1, "Invalid memset_io at address 0x%llx\n", 198 (unsigned long long)(uintptr_t __ 198 (unsigned long long)(uintptr_t __force)addr); 199 } 199 } 200 200 201 static void real_memcpy_fromio(void *buffer, c 201 static void real_memcpy_fromio(void *buffer, const volatile void __iomem *addr, 202 size_t size) 202 size_t size) 203 { 203 { 204 WARN(1, "Invalid memcpy_fromio at addr 204 WARN(1, "Invalid memcpy_fromio at address 0x%llx\n", 205 (unsigned long long)(uintptr_t __ 205 (unsigned long long)(uintptr_t __force)addr); 206 206 207 memset(buffer, 0xff, size); 207 memset(buffer, 0xff, size); 208 } 208 } 209 209 210 static void real_memcpy_toio(volatile void __i 210 static void real_memcpy_toio(volatile void __iomem *addr, const void *buffer, 211 size_t size) 211 size_t size) 212 { 212 { 213 WARN(1, "Invalid memcpy_toio at addres 213 WARN(1, "Invalid memcpy_toio at address 0x%llx\n", 214 (unsigned long long)(uintptr_t __ 214 (unsigned long long)(uintptr_t __force)addr); 215 } 215 } 216 #endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */ 216 #endif /* CONFIG_INDIRECT_IOMEM_FALLBACK */ 217 217 218 #define MAKE_OP(op, sz) 218 #define MAKE_OP(op, sz) \ 219 u##sz __raw_read ## op(const volatile void __i 219 u##sz __raw_read ## op(const volatile void __iomem *addr) \ 220 { 220 { \ 221 struct logic_iomem_area *area = get_ar 221 struct logic_iomem_area *area = get_area(addr); \ 222 222 \ 223 if (!area) 223 if (!area) \ 224 return real_raw_read ## op(add 224 return real_raw_read ## op(addr); \ 225 225 \ 226 return (u ## sz) area->ops->read(area- 226 return (u ## sz) area->ops->read(area->priv, \ 227 (unsi 227 (unsigned long)addr & AREA_MASK,\ 228 sz / 228 sz / 8); \ 229 } 229 } \ 230 EXPORT_SYMBOL(__raw_read ## op); 230 EXPORT_SYMBOL(__raw_read ## op); \ 231 231 \ 232 void __raw_write ## op(u ## sz val, volatile v 232 void __raw_write ## op(u ## sz val, volatile void __iomem *addr) \ 233 { 233 { \ 234 struct logic_iomem_area *area = get_ar 234 struct logic_iomem_area *area = get_area(addr); \ 235 235 \ 236 if (!area) { 236 if (!area) { \ 237 real_raw_write ## op(val, addr 237 real_raw_write ## op(val, addr); \ 238 return; 238 return; \ 239 } 239 } \ 240 240 \ 241 area->ops->write(area->priv, 241 area->ops->write(area->priv, \ 242 (unsigned long)addr & 242 (unsigned long)addr & AREA_MASK, \ 243 sz / 8, val); 243 sz / 8, val); \ 244 } 244 } \ 245 EXPORT_SYMBOL(__raw_write ## op) 245 EXPORT_SYMBOL(__raw_write ## op) 246 246 247 MAKE_OP(b, 8); 247 MAKE_OP(b, 8); 248 MAKE_OP(w, 16); 248 MAKE_OP(w, 16); 249 MAKE_OP(l, 32); 249 MAKE_OP(l, 32); 250 #ifdef CONFIG_64BIT 250 #ifdef CONFIG_64BIT 251 MAKE_OP(q, 64); 251 MAKE_OP(q, 64); 252 #endif 252 #endif 253 253 254 void memset_io(volatile void __iomem *addr, in 254 void memset_io(volatile void __iomem *addr, int value, size_t size) 255 { 255 { 256 struct logic_iomem_area *area = get_ar 256 struct logic_iomem_area *area = get_area(addr); 257 unsigned long offs, start; 257 unsigned long offs, start; 258 258 259 if (!area) { 259 if (!area) { 260 real_memset_io(addr, value, si 260 real_memset_io(addr, value, size); 261 return; 261 return; 262 } 262 } 263 263 264 start = (unsigned long)addr & AREA_MAS 264 start = (unsigned long)addr & AREA_MASK; 265 265 266 if (area->ops->set) { 266 if (area->ops->set) { 267 area->ops->set(area->priv, sta 267 area->ops->set(area->priv, start, value, size); 268 return; 268 return; 269 } 269 } 270 270 271 for (offs = 0; offs < size; offs++) 271 for (offs = 0; offs < size; offs++) 272 area->ops->write(area->priv, s 272 area->ops->write(area->priv, start + offs, 1, value); 273 } 273 } 274 EXPORT_SYMBOL(memset_io); 274 EXPORT_SYMBOL(memset_io); 275 275 276 void memcpy_fromio(void *buffer, const volatil 276 void memcpy_fromio(void *buffer, const volatile void __iomem *addr, 277 size_t size) 277 size_t size) 278 { 278 { 279 struct logic_iomem_area *area = get_ar 279 struct logic_iomem_area *area = get_area(addr); 280 u8 *buf = buffer; 280 u8 *buf = buffer; 281 unsigned long offs, start; 281 unsigned long offs, start; 282 282 283 if (!area) { 283 if (!area) { 284 real_memcpy_fromio(buffer, add 284 real_memcpy_fromio(buffer, addr, size); 285 return; 285 return; 286 } 286 } 287 287 288 start = (unsigned long)addr & AREA_MAS 288 start = (unsigned long)addr & AREA_MASK; 289 289 290 if (area->ops->copy_from) { 290 if (area->ops->copy_from) { 291 area->ops->copy_from(area->pri 291 area->ops->copy_from(area->priv, buffer, start, size); 292 return; 292 return; 293 } 293 } 294 294 295 for (offs = 0; offs < size; offs++) 295 for (offs = 0; offs < size; offs++) 296 buf[offs] = area->ops->read(ar 296 buf[offs] = area->ops->read(area->priv, start + offs, 1); 297 } 297 } 298 EXPORT_SYMBOL(memcpy_fromio); 298 EXPORT_SYMBOL(memcpy_fromio); 299 299 300 void memcpy_toio(volatile void __iomem *addr, 300 void memcpy_toio(volatile void __iomem *addr, const void *buffer, size_t size) 301 { 301 { 302 struct logic_iomem_area *area = get_ar 302 struct logic_iomem_area *area = get_area(addr); 303 const u8 *buf = buffer; 303 const u8 *buf = buffer; 304 unsigned long offs, start; 304 unsigned long offs, start; 305 305 306 if (!area) { 306 if (!area) { 307 real_memcpy_toio(addr, buffer, 307 real_memcpy_toio(addr, buffer, size); 308 return; 308 return; 309 } 309 } 310 310 311 start = (unsigned long)addr & AREA_MAS 311 start = (unsigned long)addr & AREA_MASK; 312 312 313 if (area->ops->copy_to) { 313 if (area->ops->copy_to) { 314 area->ops->copy_to(area->priv, 314 area->ops->copy_to(area->priv, start, buffer, size); 315 return; 315 return; 316 } 316 } 317 317 318 for (offs = 0; offs < size; offs++) 318 for (offs = 0; offs < size; offs++) 319 area->ops->write(area->priv, s 319 area->ops->write(area->priv, start + offs, 1, buf[offs]); 320 } 320 } 321 EXPORT_SYMBOL(memcpy_toio); 321 EXPORT_SYMBOL(memcpy_toio); 322 322
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.