1 /* 1 /* 2 * arch/sh/mm/ioremap.c !! 2 * arch/i386/mm/ioremap.c 3 * << 4 * (C) Copyright 1995 1996 Linus Torvalds << 5 * (C) Copyright 2005 - 2010 Paul Mundt << 6 * 3 * 7 * Re-map IO memory to kernel address space so 4 * Re-map IO memory to kernel address space so that we can access it. 8 * This is needed for high PCI addresses that 5 * This is needed for high PCI addresses that aren't mapped in the 9 * 640k-1MB IO memory area on PC's 6 * 640k-1MB IO memory area on PC's 10 * 7 * 11 * This file is subject to the terms and condi !! 8 * (C) Copyright 1995 1996 Linus Torvalds 12 * Public License. See the file "COPYING" in t << 13 * archive for more details. << 14 */ 9 */ >> 10 15 #include <linux/vmalloc.h> 11 #include <linux/vmalloc.h> 16 #include <linux/module.h> !! 12 #include <linux/init.h> 17 #include <linux/slab.h> 13 #include <linux/slab.h> 18 #include <linux/mm.h> !! 14 #include <asm/io.h> 19 #include <linux/pci.h> << 20 #include <linux/io.h> << 21 #include <asm/io_trapped.h> << 22 #include <asm/page.h> << 23 #include <asm/pgalloc.h> 15 #include <asm/pgalloc.h> 24 #include <asm/addrspace.h> !! 16 #include <asm/fixmap.h> 25 #include <asm/cacheflush.h> 17 #include <asm/cacheflush.h> 26 #include <asm/tlbflush.h> 18 #include <asm/tlbflush.h> 27 #include <asm/mmu.h> !! 19 #include <asm/pgtable.h> 28 #include "ioremap.h" << 29 20 30 /* !! 21 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, 31 * On 32-bit SH, we traditionally have the who !! 22 unsigned long phys_addr, unsigned long flags) 32 * at all times (as MIPS does), so "ioremap()" !! 23 { 33 * anything but place the address in the prope !! 24 unsigned long end; 34 * and P2 addresses, as well as some P3 ones. !! 25 unsigned long pfn; 35 * and newer cores using extended addressing n << 36 * the ioremap() implementation becomes a bit << 37 */ << 38 #ifdef CONFIG_29BIT << 39 static void __iomem * << 40 __ioremap_29bit(phys_addr_t offset, unsigned l << 41 { << 42 phys_addr_t last_addr = offset + size << 43 << 44 /* << 45 * For P1 and P2 space this is trivial << 46 * mapped. Uncached access for P1 addr << 47 * In the P3 case or for addresses out << 48 * mapping must be done by the PMB or << 49 */ << 50 if (likely(PXSEG(offset) < P3SEG && PX << 51 u64 flags = pgprot_val(prot); << 52 << 53 /* << 54 * Anything using the legacy P << 55 * to be kicked down to page t << 56 */ << 57 if (unlikely(flags & _PAGE_PCC << 58 return NULL; << 59 if (unlikely(flags & _PAGE_CAC << 60 return (void __iomem * << 61 26 62 return (void __iomem *)P2SEGAD !! 27 address &= ~PMD_MASK; 63 } !! 28 end = address + size; >> 29 if (end > PMD_SIZE) >> 30 end = PMD_SIZE; >> 31 if (address >= end) >> 32 BUG(); >> 33 pfn = phys_addr >> PAGE_SHIFT; >> 34 do { >> 35 if (!pte_none(*pte)) { >> 36 printk("remap_area_pte: page already exists\n"); >> 37 BUG(); >> 38 } >> 39 set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW | >> 40 _PAGE_DIRTY | _PAGE_ACCESSED | flags))); >> 41 address += PAGE_SIZE; >> 42 pfn++; >> 43 pte++; >> 44 } while (address && (address < end)); >> 45 } 64 46 65 /* P4 above the store queues are alway !! 47 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, 66 if (unlikely(offset >= P3_ADDR_MAX)) !! 48 unsigned long phys_addr, unsigned long flags) 67 return (void __iomem *)P4SEGAD !! 49 { >> 50 unsigned long end; 68 51 69 return NULL; !! 52 address &= ~PGDIR_MASK; >> 53 end = address + size; >> 54 if (end > PGDIR_SIZE) >> 55 end = PGDIR_SIZE; >> 56 phys_addr -= address; >> 57 if (address >= end) >> 58 BUG(); >> 59 do { >> 60 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); >> 61 if (!pte) >> 62 return -ENOMEM; >> 63 remap_area_pte(pte, address, end - address, address + phys_addr, flags); >> 64 address = (address + PMD_SIZE) & PMD_MASK; >> 65 pmd++; >> 66 } while (address && (address < end)); >> 67 return 0; 70 } 68 } 71 #else << 72 #define __ioremap_29bit(offset, size, prot) << 73 #endif /* CONFIG_29BIT */ << 74 69 75 void __iomem __ref *ioremap_prot(phys_addr_t p !! 70 static int remap_area_pages(unsigned long address, unsigned long phys_addr, 76 unsigned long !! 71 unsigned long size, unsigned long flags) 77 { 72 { 78 void __iomem *mapped; !! 73 int error; 79 pgprot_t pgprot = __pgprot(prot); !! 74 pgd_t * dir; >> 75 unsigned long end = address + size; >> 76 >> 77 phys_addr -= address; >> 78 dir = pgd_offset(&init_mm, address); >> 79 flush_cache_all(); >> 80 if (address >= end) >> 81 BUG(); >> 82 spin_lock(&init_mm.page_table_lock); >> 83 do { >> 84 pmd_t *pmd; >> 85 pmd = pmd_alloc(&init_mm, dir, address); >> 86 error = -ENOMEM; >> 87 if (!pmd) >> 88 break; >> 89 if (remap_area_pmd(pmd, address, end - address, >> 90 phys_addr + address, flags)) >> 91 break; >> 92 error = 0; >> 93 address = (address + PGDIR_SIZE) & PGDIR_MASK; >> 94 dir++; >> 95 } while (address && (address < end)); >> 96 spin_unlock(&init_mm.page_table_lock); >> 97 flush_tlb_all(); >> 98 return error; >> 99 } >> 100 >> 101 /* >> 102 * Generic mapping function (not visible outside): >> 103 */ 80 104 81 mapped = __ioremap_trapped(phys_addr, !! 105 /* 82 if (mapped) !! 106 * Remap an arbitrary physical address space into the kernel virtual 83 return mapped; !! 107 * address space. Needed when the kernel wants to access high addresses >> 108 * directly. >> 109 * >> 110 * NOTE! We need to allow non-page-aligned mappings too: we will obviously >> 111 * have to convert them into an offset in a page-aligned mapping, but the >> 112 * caller shouldn't need to know that small detail. >> 113 */ >> 114 void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) >> 115 { >> 116 void * addr; >> 117 struct vm_struct * area; >> 118 unsigned long offset, last_addr; >> 119 >> 120 /* Don't allow wraparound or zero size */ >> 121 last_addr = phys_addr + size - 1; >> 122 if (!size || last_addr < phys_addr) >> 123 return NULL; 84 124 85 mapped = __ioremap_29bit(phys_addr, si !! 125 /* 86 if (mapped) !! 126 * Don't remap the low PCI/ISA area, it's always mapped.. 87 return mapped; !! 127 */ >> 128 if (phys_addr >= 0xA0000 && last_addr < 0x100000) >> 129 return phys_to_virt(phys_addr); 88 130 89 /* 131 /* 90 * If we can't yet use the regular app !! 132 * Don't allow anybody to remap normal RAM that we're using.. 91 */ 133 */ 92 if (!mem_init_done) !! 134 if (phys_addr < virt_to_phys(high_memory)) { 93 return ioremap_fixed(phys_addr !! 135 char *t_addr, *t_end; >> 136 struct page *page; >> 137 >> 138 t_addr = __va(phys_addr); >> 139 t_end = t_addr + (size - 1); >> 140 >> 141 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) >> 142 if(!PageReserved(page)) >> 143 return NULL; >> 144 } 94 145 95 /* 146 /* 96 * First try to remap through the PMB. !! 147 * Mappings have to be page-aligned 97 * PMB entries are all pre-faulted. << 98 */ 148 */ 99 mapped = pmb_remap_caller(phys_addr, s !! 149 offset = phys_addr & ~PAGE_MASK; 100 __builtin_return_addre !! 150 phys_addr &= PAGE_MASK; 101 if (mapped && !IS_ERR(mapped)) !! 151 size = PAGE_ALIGN(last_addr+1) - phys_addr; 102 return mapped; << 103 152 104 return generic_ioremap_prot(phys_addr, !! 153 /* >> 154 * Ok, go for it.. >> 155 */ >> 156 area = get_vm_area(size, VM_IOREMAP); >> 157 if (!area) >> 158 return NULL; >> 159 area->phys_addr = phys_addr; >> 160 addr = area->addr; >> 161 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { >> 162 vunmap(addr); >> 163 return NULL; >> 164 } >> 165 return (void *) (offset + (char *)addr); 105 } 166 } 106 EXPORT_SYMBOL(ioremap_prot); << 107 167 108 /* !! 168 109 * Simple checks for non-translatable mappings !! 169 /** >> 170 * ioremap_nocache - map bus memory into CPU space >> 171 * @offset: bus address of the memory >> 172 * @size: size of the resource to map >> 173 * >> 174 * ioremap_nocache performs a platform specific sequence of operations to >> 175 * make bus memory CPU accessible via the readb/readw/readl/writeb/ >> 176 * writew/writel functions and the other mmio helpers. The returned >> 177 * address is not guaranteed to be usable directly as a virtual >> 178 * address. >> 179 * >> 180 * This version of ioremap ensures that the memory is marked uncachable >> 181 * on the CPU as well as honouring existing caching rules from things like >> 182 * the PCI bus. Note that there are other caches and buffers on many >> 183 * busses. In particular driver authors should read up on PCI writes >> 184 * >> 185 * It's useful if some control registers are in such an area and >> 186 * write combining or read caching is not desirable: >> 187 * >> 188 * Must be freed with iounmap. 110 */ 189 */ 111 static inline int iomapping_nontranslatable(un !! 190 >> 191 void *ioremap_nocache (unsigned long phys_addr, unsigned long size) 112 { 192 { 113 #ifdef CONFIG_29BIT !! 193 unsigned long last_addr; 114 /* !! 194 void *p = __ioremap(phys_addr, size, _PAGE_PCD); 115 * In 29-bit mode this includes the fi !! 195 if (!p) 116 * parts of P3. !! 196 return p; 117 */ !! 197 118 if (PXSEG(offset) < P3SEG || offset >= !! 198 /* Guaranteed to be > phys_addr, as per __ioremap() */ 119 return 1; !! 199 last_addr = phys_addr + size - 1; 120 #endif !! 200 >> 201 if (last_addr < virt_to_phys(high_memory)) { >> 202 struct page *ppage = virt_to_page(__va(phys_addr)); >> 203 unsigned long npages; >> 204 >> 205 phys_addr &= PAGE_MASK; >> 206 >> 207 /* This might overflow and become zero.. */ >> 208 last_addr = PAGE_ALIGN(last_addr); >> 209 >> 210 /* .. but that's ok, because modulo-2**n arithmetic will make >> 211 * the page-aligned "last - first" come out right. >> 212 */ >> 213 npages = (last_addr - phys_addr) >> PAGE_SHIFT; >> 214 >> 215 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { >> 216 iounmap(p); >> 217 p = NULL; >> 218 } >> 219 global_flush_tlb(); >> 220 } 121 221 122 return 0; !! 222 return p; 123 } 223 } 124 224 125 void iounmap(volatile void __iomem *addr) !! 225 void iounmap(void *addr) 126 { 226 { 127 unsigned long vaddr = (unsigned long _ !! 227 struct vm_struct *p; >> 228 if (addr <= high_memory) >> 229 return; >> 230 p = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr)); >> 231 if (!p) { >> 232 printk("__iounmap: bad address %p\n", addr); >> 233 return; >> 234 } >> 235 >> 236 if (p->flags && p->phys_addr < virt_to_phys(high_memory)) { >> 237 change_page_attr(virt_to_page(__va(p->phys_addr)), >> 238 p->size >> PAGE_SHIFT, >> 239 PAGE_KERNEL); >> 240 global_flush_tlb(); >> 241 } >> 242 kfree(p); >> 243 } >> 244 >> 245 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) >> 246 { >> 247 unsigned long offset, last_addr; >> 248 unsigned int nrpages; >> 249 enum fixed_addresses idx; >> 250 >> 251 /* Don't allow wraparound or zero size */ >> 252 last_addr = phys_addr + size - 1; >> 253 if (!size || last_addr < phys_addr) >> 254 return NULL; 128 255 129 /* 256 /* 130 * Nothing to do if there is no transl !! 257 * Don't remap the low PCI/ISA area, it's always mapped.. 131 */ 258 */ 132 if (iomapping_nontranslatable(vaddr)) !! 259 if (phys_addr >= 0xA0000 && last_addr < 0x100000) 133 return; !! 260 return phys_to_virt(phys_addr); 134 261 135 /* 262 /* 136 * There's no VMA if it's from an earl !! 263 * Mappings have to be page-aligned 137 */ 264 */ 138 if (iounmap_fixed((void __iomem *)addr !! 265 offset = phys_addr & ~PAGE_MASK; 139 return; !! 266 phys_addr &= PAGE_MASK; >> 267 size = PAGE_ALIGN(last_addr) - phys_addr; >> 268 >> 269 /* >> 270 * Mappings have to fit in the FIX_BTMAP area. >> 271 */ >> 272 nrpages = size >> PAGE_SHIFT; >> 273 if (nrpages > NR_FIX_BTMAPS) >> 274 return NULL; 140 275 141 /* 276 /* 142 * If the PMB handled it, there's noth !! 277 * Ok, go for it.. 143 */ 278 */ 144 if (pmb_unmap((void __iomem *)addr) == !! 279 idx = FIX_BTMAP_BEGIN; >> 280 while (nrpages > 0) { >> 281 set_fixmap(idx, phys_addr); >> 282 phys_addr += PAGE_SIZE; >> 283 --idx; >> 284 --nrpages; >> 285 } >> 286 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN)); >> 287 } >> 288 >> 289 void __init bt_iounmap(void *addr, unsigned long size) >> 290 { >> 291 unsigned long virt_addr; >> 292 unsigned long offset; >> 293 unsigned int nrpages; >> 294 enum fixed_addresses idx; >> 295 >> 296 virt_addr = (unsigned long)addr; >> 297 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) 145 return; 298 return; >> 299 offset = virt_addr & ~PAGE_MASK; >> 300 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; 146 301 147 generic_iounmap(addr); !! 302 idx = FIX_BTMAP_BEGIN; >> 303 while (nrpages > 0) { >> 304 clear_fixmap(idx); >> 305 --idx; >> 306 --nrpages; >> 307 } 148 } 308 } 149 EXPORT_SYMBOL(iounmap); << 150 309
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.