1 // SPDX-License-Identifier: GPL-2.0-only << 2 /* 1 /* 3 * Copyright (C) 2004, 2007-2010, 2011-2012 Sy !! 2 * arch/i386/mm/ioremap.c >> 3 * >> 4 * Re-map IO memory to kernel address space so that we can access it. >> 5 * This is needed for high PCI addresses that aren't mapped in the >> 6 * 640k-1MB IO memory area on PC's >> 7 * >> 8 * (C) Copyright 1995 1996 Linus Torvalds 4 */ 9 */ 5 10 6 #include <linux/vmalloc.h> 11 #include <linux/vmalloc.h> 7 #include <linux/init.h> 12 #include <linux/init.h> 8 #include <linux/module.h> !! 13 #include <linux/slab.h> 9 #include <linux/io.h> !! 14 #include <asm/io.h> 10 #include <linux/mm.h> !! 15 #include <asm/pgalloc.h> 11 #include <linux/cache.h> !! 16 #include <asm/fixmap.h> 12 !! 17 #include <asm/cacheflush.h> 13 static inline bool arc_uncached_addr_space(phy !! 18 #include <asm/tlbflush.h> 14 { !! 19 #include <asm/pgtable.h> 15 if (is_isa_arcompact()) { !! 20 16 if (paddr >= ARC_UNCACHED_ADDR !! 21 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, 17 return true; !! 22 unsigned long phys_addr, unsigned long flags) 18 } else if (paddr >= perip_base && padd !! 23 { 19 return true; !! 24 unsigned long end; 20 } !! 25 unsigned long pfn; >> 26 >> 27 address &= ~PMD_MASK; >> 28 end = address + size; >> 29 if (end > PMD_SIZE) >> 30 end = PMD_SIZE; >> 31 if (address >= end) >> 32 BUG(); >> 33 pfn = phys_addr >> PAGE_SHIFT; >> 34 do { >> 35 if (!pte_none(*pte)) { >> 36 printk("remap_area_pte: page already exists\n"); >> 37 BUG(); >> 38 } >> 39 set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW | >> 40 _PAGE_DIRTY | _PAGE_ACCESSED | flags))); >> 41 address += PAGE_SIZE; >> 42 pfn++; >> 43 pte++; >> 44 } while (address && (address < end)); >> 45 } >> 46 >> 47 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, >> 48 unsigned long phys_addr, unsigned long flags) >> 49 { >> 50 unsigned long end; >> 51 >> 52 address &= ~PGDIR_MASK; >> 53 end = address + size; >> 54 if (end > PGDIR_SIZE) >> 55 end = PGDIR_SIZE; >> 56 phys_addr -= address; >> 57 if (address >= end) >> 58 BUG(); >> 59 do { >> 60 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); >> 61 if (!pte) >> 62 return -ENOMEM; >> 63 remap_area_pte(pte, address, end - address, address + phys_addr, flags); >> 64 address = (address + PMD_SIZE) & PMD_MASK; >> 65 pmd++; >> 66 } while (address && (address < end)); >> 67 return 0; >> 68 } >> 69 >> 70 static int remap_area_pages(unsigned long address, unsigned long phys_addr, >> 71 unsigned long size, unsigned long flags) >> 72 { >> 73 int error; >> 74 pgd_t * dir; >> 75 unsigned long end = address + size; 21 76 22 return false; !! 77 phys_addr -= address; >> 78 dir = pgd_offset(&init_mm, address); >> 79 flush_cache_all(); >> 80 if (address >= end) >> 81 BUG(); >> 82 spin_lock(&init_mm.page_table_lock); >> 83 do { >> 84 pmd_t *pmd; >> 85 pmd = pmd_alloc(&init_mm, dir, address); >> 86 error = -ENOMEM; >> 87 if (!pmd) >> 88 break; >> 89 if (remap_area_pmd(pmd, address, end - address, >> 90 phys_addr + address, flags)) >> 91 break; >> 92 error = 0; >> 93 address = (address + PGDIR_SIZE) & PGDIR_MASK; >> 94 dir++; >> 95 } while (address && (address < end)); >> 96 spin_unlock(&init_mm.page_table_lock); >> 97 flush_tlb_all(); >> 98 return error; 23 } 99 } 24 100 25 void __iomem *ioremap(phys_addr_t paddr, unsig !! 101 /* >> 102 * Generic mapping function (not visible outside): >> 103 */ >> 104 >> 105 /* >> 106 * Remap an arbitrary physical address space into the kernel virtual >> 107 * address space. Needed when the kernel wants to access high addresses >> 108 * directly. >> 109 * >> 110 * NOTE! We need to allow non-page-aligned mappings too: we will obviously >> 111 * have to convert them into an offset in a page-aligned mapping, but the >> 112 * caller shouldn't need to know that small detail. >> 113 */ >> 114 void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) 26 { 115 { >> 116 void * addr; >> 117 struct vm_struct * area; >> 118 unsigned long offset, last_addr; >> 119 >> 120 /* Don't allow wraparound or zero size */ >> 121 last_addr = phys_addr + size - 1; >> 122 if (!size || last_addr < phys_addr) >> 123 return NULL; >> 124 >> 125 /* >> 126 * Don't remap the low PCI/ISA area, it's always mapped.. >> 127 */ >> 128 if (phys_addr >= 0xA0000 && last_addr < 0x100000) >> 129 return phys_to_virt(phys_addr); >> 130 >> 131 /* >> 132 * Don't allow anybody to remap normal RAM that we're using.. >> 133 */ >> 134 if (phys_addr < virt_to_phys(high_memory)) { >> 135 char *t_addr, *t_end; >> 136 struct page *page; >> 137 >> 138 t_addr = __va(phys_addr); >> 139 t_end = t_addr + (size - 1); >> 140 >> 141 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) >> 142 if(!PageReserved(page)) >> 143 return NULL; >> 144 } >> 145 27 /* 146 /* 28 * If the region is h/w uncached, MMU !! 147 * Mappings have to be page-aligned 29 * The cast to u32 is fine as this reg << 30 */ 148 */ 31 if (arc_uncached_addr_space(paddr)) !! 149 offset = phys_addr & ~PAGE_MASK; 32 return (void __iomem *)(u32)pa !! 150 phys_addr &= PAGE_MASK; >> 151 size = PAGE_ALIGN(last_addr+1) - phys_addr; 33 152 34 return ioremap_prot(paddr, size, !! 153 /* 35 pgprot_val(pgprot_ !! 154 * Ok, go for it.. >> 155 */ >> 156 area = get_vm_area(size, VM_IOREMAP); >> 157 if (!area) >> 158 return NULL; >> 159 area->phys_addr = phys_addr; >> 160 addr = area->addr; >> 161 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { >> 162 vunmap(addr); >> 163 return NULL; >> 164 } >> 165 return (void *) (offset + (char *)addr); 36 } 166 } 37 EXPORT_SYMBOL(ioremap); << 38 167 39 /* !! 168 40 * ioremap with access flags !! 169 /** 41 * Cache semantics wise it is same as ioremap !! 170 * ioremap_nocache - map bus memory into CPU space 42 * However unlike vanilla ioremap which bypass !! 171 * @offset: bus address of the memory 43 * ARC hardware uncached region, this one stil !! 172 * @size: size of the resource to map 44 * might need finer access control (R/W/X) !! 173 * >> 174 * ioremap_nocache performs a platform specific sequence of operations to >> 175 * make bus memory CPU accessible via the readb/readw/readl/writeb/ >> 176 * writew/writel functions and the other mmio helpers. The returned >> 177 * address is not guaranteed to be usable directly as a virtual >> 178 * address. >> 179 * >> 180 * This version of ioremap ensures that the memory is marked uncachable >> 181 * on the CPU as well as honouring existing caching rules from things like >> 182 * the PCI bus. Note that there are other caches and buffers on many >> 183 * busses. In particular driver authors should read up on PCI writes >> 184 * >> 185 * It's useful if some control registers are in such an area and >> 186 * write combining or read caching is not desirable: >> 187 * >> 188 * Must be freed with iounmap. 45 */ 189 */ 46 void __iomem *ioremap_prot(phys_addr_t paddr, !! 190 47 unsigned long flags !! 191 void *ioremap_nocache (unsigned long phys_addr, unsigned long size) 48 { 192 { 49 pgprot_t prot = __pgprot(flags); !! 193 unsigned long last_addr; >> 194 void *p = __ioremap(phys_addr, size, _PAGE_PCD); >> 195 if (!p) >> 196 return p; 50 197 51 /* force uncached */ !! 198 /* Guaranteed to be > phys_addr, as per __ioremap() */ 52 return generic_ioremap_prot(paddr, siz !! 199 last_addr = phys_addr + size - 1; >> 200 >> 201 if (last_addr < virt_to_phys(high_memory)) { >> 202 struct page *ppage = virt_to_page(__va(phys_addr)); >> 203 unsigned long npages; >> 204 >> 205 phys_addr &= PAGE_MASK; >> 206 >> 207 /* This might overflow and become zero.. */ >> 208 last_addr = PAGE_ALIGN(last_addr); >> 209 >> 210 /* .. but that's ok, because modulo-2**n arithmetic will make >> 211 * the page-aligned "last - first" come out right. >> 212 */ >> 213 npages = (last_addr - phys_addr) >> PAGE_SHIFT; >> 214 >> 215 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { >> 216 iounmap(p); >> 217 p = NULL; >> 218 } >> 219 global_flush_tlb(); >> 220 } >> 221 >> 222 return p; 53 } 223 } 54 EXPORT_SYMBOL(ioremap_prot); << 55 224 56 void iounmap(volatile void __iomem *addr) !! 225 void iounmap(void *addr) 57 { 226 { 58 /* weird double cast to handle phys_ad !! 227 struct vm_struct *p; 59 if (arc_uncached_addr_space((phys_addr !! 228 if (addr <= high_memory) >> 229 return; >> 230 p = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr)); >> 231 if (!p) { >> 232 printk("__iounmap: bad address %p\n", addr); 60 return; 233 return; >> 234 } >> 235 >> 236 if (p->flags && p->phys_addr < virt_to_phys(high_memory)) { >> 237 change_page_attr(virt_to_page(__va(p->phys_addr)), >> 238 p->size >> PAGE_SHIFT, >> 239 PAGE_KERNEL); >> 240 global_flush_tlb(); >> 241 } >> 242 kfree(p); >> 243 } >> 244 >> 245 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) >> 246 { >> 247 unsigned long offset, last_addr; >> 248 unsigned int nrpages; >> 249 enum fixed_addresses idx; >> 250 >> 251 /* Don't allow wraparound or zero size */ >> 252 last_addr = phys_addr + size - 1; >> 253 if (!size || last_addr < phys_addr) >> 254 return NULL; 61 255 62 generic_iounmap(addr); !! 256 /* >> 257 * Don't remap the low PCI/ISA area, it's always mapped.. >> 258 */ >> 259 if (phys_addr >= 0xA0000 && last_addr < 0x100000) >> 260 return phys_to_virt(phys_addr); >> 261 >> 262 /* >> 263 * Mappings have to be page-aligned >> 264 */ >> 265 offset = phys_addr & ~PAGE_MASK; >> 266 phys_addr &= PAGE_MASK; >> 267 size = PAGE_ALIGN(last_addr) - phys_addr; >> 268 >> 269 /* >> 270 * Mappings have to fit in the FIX_BTMAP area. >> 271 */ >> 272 nrpages = size >> PAGE_SHIFT; >> 273 if (nrpages > NR_FIX_BTMAPS) >> 274 return NULL; >> 275 >> 276 /* >> 277 * Ok, go for it.. >> 278 */ >> 279 idx = FIX_BTMAP_BEGIN; >> 280 while (nrpages > 0) { >> 281 set_fixmap(idx, phys_addr); >> 282 phys_addr += PAGE_SIZE; >> 283 --idx; >> 284 --nrpages; >> 285 } >> 286 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN)); >> 287 } >> 288 >> 289 void __init bt_iounmap(void *addr, unsigned long size) >> 290 { >> 291 unsigned long virt_addr; >> 292 unsigned long offset; >> 293 unsigned int nrpages; >> 294 enum fixed_addresses idx; >> 295 >> 296 virt_addr = (unsigned long)addr; >> 297 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) >> 298 return; >> 299 offset = virt_addr & ~PAGE_MASK; >> 300 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; >> 301 >> 302 idx = FIX_BTMAP_BEGIN; >> 303 while (nrpages > 0) { >> 304 clear_fixmap(idx); >> 305 --idx; >> 306 --nrpages; >> 307 } 63 } 308 } 64 EXPORT_SYMBOL(iounmap); << 65 309
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.