1 /* 1 /* 2 * arch/sh/mm/ioremap.c !! 2 * arch/i386/mm/ioremap.c 3 * << 4 * (C) Copyright 1995 1996 Linus Torvalds << 5 * (C) Copyright 2005 - 2010 Paul Mundt << 6 * 3 * 7 * Re-map IO memory to kernel address space so 4 * Re-map IO memory to kernel address space so that we can access it. 8 * This is needed for high PCI addresses that 5 * This is needed for high PCI addresses that aren't mapped in the 9 * 640k-1MB IO memory area on PC's 6 * 640k-1MB IO memory area on PC's 10 * 7 * 11 * This file is subject to the terms and condi !! 8 * (C) Copyright 1995 1996 Linus Torvalds 12 * Public License. See the file "COPYING" in t << 13 * archive for more details. << 14 */ 9 */ >> 10 15 #include <linux/vmalloc.h> 11 #include <linux/vmalloc.h> 16 #include <linux/module.h> !! 12 #include <asm/io.h> 17 #include <linux/slab.h> << 18 #include <linux/mm.h> << 19 #include <linux/pci.h> << 20 #include <linux/io.h> << 21 #include <asm/io_trapped.h> << 22 #include <asm/page.h> << 23 #include <asm/pgalloc.h> 13 #include <asm/pgalloc.h> 24 #include <asm/addrspace.h> << 25 #include <asm/cacheflush.h> << 26 #include <asm/tlbflush.h> << 27 #include <asm/mmu.h> << 28 #include "ioremap.h" << 29 14 30 /* !! 15 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, 31 * On 32-bit SH, we traditionally have the who !! 16 unsigned long phys_addr, unsigned long flags) 32 * at all times (as MIPS does), so "ioremap()" !! 17 { 33 * anything but place the address in the prope !! 18 unsigned long end; 34 * and P2 addresses, as well as some P3 ones. << 35 * and newer cores using extended addressing n << 36 * the ioremap() implementation becomes a bit << 37 */ << 38 #ifdef CONFIG_29BIT << 39 static void __iomem * << 40 __ioremap_29bit(phys_addr_t offset, unsigned l << 41 { << 42 phys_addr_t last_addr = offset + size << 43 << 44 /* << 45 * For P1 and P2 space this is trivial << 46 * mapped. Uncached access for P1 addr << 47 * In the P3 case or for addresses out << 48 * mapping must be done by the PMB or << 49 */ << 50 if (likely(PXSEG(offset) < P3SEG && PX << 51 u64 flags = pgprot_val(prot); << 52 << 53 /* << 54 * Anything using the legacy P << 55 * to be kicked down to page t << 56 */ << 57 if (unlikely(flags & _PAGE_PCC << 58 return NULL; << 59 if (unlikely(flags & _PAGE_CAC << 60 return (void __iomem * << 61 19 62 return (void __iomem *)P2SEGAD !! 20 address &= ~PMD_MASK; 63 } !! 21 end = address + size; >> 22 if (end > PMD_SIZE) >> 23 end = PMD_SIZE; >> 24 if (address >= end) >> 25 BUG(); >> 26 do { >> 27 if (!pte_none(*pte)) { >> 28 printk("remap_area_pte: page already exists\n"); >> 29 BUG(); >> 30 } >> 31 set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_RW | >> 32 _PAGE_DIRTY | _PAGE_ACCESSED | flags))); >> 33 address += PAGE_SIZE; >> 34 phys_addr += PAGE_SIZE; >> 35 pte++; >> 36 } while (address && (address < end)); >> 37 } 64 38 65 /* P4 above the store queues are alway !! 39 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, 66 if (unlikely(offset >= P3_ADDR_MAX)) !! 40 unsigned long phys_addr, unsigned long flags) 67 return (void __iomem *)P4SEGAD !! 41 { >> 42 unsigned long end; 68 43 69 return NULL; !! 44 address &= ~PGDIR_MASK; >> 45 end = address + size; >> 46 if (end > PGDIR_SIZE) >> 47 end = PGDIR_SIZE; >> 48 phys_addr -= address; >> 49 if (address >= end) >> 50 BUG(); >> 51 do { >> 52 pte_t * pte = pte_alloc(&init_mm, pmd, address); >> 53 if (!pte) >> 54 return -ENOMEM; >> 55 remap_area_pte(pte, address, end - address, address + phys_addr, flags); >> 56 address = (address + PMD_SIZE) & PMD_MASK; >> 57 pmd++; >> 58 } while (address && (address < end)); >> 59 return 0; 70 } 60 } 71 #else << 72 #define __ioremap_29bit(offset, size, prot) << 73 #endif /* CONFIG_29BIT */ << 74 61 75 void __iomem __ref *ioremap_prot(phys_addr_t p !! 62 static int remap_area_pages(unsigned long address, unsigned long phys_addr, 76 unsigned long !! 63 unsigned long size, unsigned long flags) 77 { 64 { 78 void __iomem *mapped; !! 65 int error; 79 pgprot_t pgprot = __pgprot(prot); !! 66 pgd_t * dir; >> 67 unsigned long end = address + size; >> 68 >> 69 phys_addr -= address; >> 70 dir = pgd_offset(&init_mm, address); >> 71 flush_cache_all(); >> 72 if (address >= end) >> 73 BUG(); >> 74 spin_lock(&init_mm.page_table_lock); >> 75 do { >> 76 pmd_t *pmd; >> 77 pmd = pmd_alloc(&init_mm, dir, address); >> 78 error = -ENOMEM; >> 79 if (!pmd) >> 80 break; >> 81 if (remap_area_pmd(pmd, address, end - address, >> 82 phys_addr + address, flags)) >> 83 break; >> 84 error = 0; >> 85 address = (address + PGDIR_SIZE) & PGDIR_MASK; >> 86 dir++; >> 87 } while (address && (address < end)); >> 88 spin_unlock(&init_mm.page_table_lock); >> 89 flush_tlb_all(); >> 90 return error; >> 91 } >> 92 >> 93 /* >> 94 * Generic mapping function (not visible outside): >> 95 */ 80 96 81 mapped = __ioremap_trapped(phys_addr, !! 97 /* 82 if (mapped) !! 98 * Remap an arbitrary physical address space into the kernel virtual 83 return mapped; !! 99 * address space. Needed when the kernel wants to access high addresses >> 100 * directly. >> 101 * >> 102 * NOTE! We need to allow non-page-aligned mappings too: we will obviously >> 103 * have to convert them into an offset in a page-aligned mapping, but the >> 104 * caller shouldn't need to know that small detail. >> 105 */ >> 106 void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) >> 107 { >> 108 void * addr; >> 109 struct vm_struct * area; >> 110 unsigned long offset, last_addr; 84 111 85 mapped = __ioremap_29bit(phys_addr, si !! 112 /* Don't allow wraparound or zero size */ 86 if (mapped) !! 113 last_addr = phys_addr + size - 1; 87 return mapped; !! 114 if (!size || last_addr < phys_addr) >> 115 return NULL; 88 116 89 /* 117 /* 90 * If we can't yet use the regular app !! 118 * Don't remap the low PCI/ISA area, it's always mapped.. 91 */ 119 */ 92 if (!mem_init_done) !! 120 if (phys_addr >= 0xA0000 && last_addr < 0x100000) 93 return ioremap_fixed(phys_addr !! 121 return phys_to_virt(phys_addr); 94 122 95 /* 123 /* 96 * First try to remap through the PMB. !! 124 * Don't allow anybody to remap normal RAM that we're using.. 97 * PMB entries are all pre-faulted. << 98 */ 125 */ 99 mapped = pmb_remap_caller(phys_addr, s !! 126 if (phys_addr < virt_to_phys(high_memory)) { 100 __builtin_return_addre !! 127 char *t_addr, *t_end; 101 if (mapped && !IS_ERR(mapped)) !! 128 struct page *page; 102 return mapped; << 103 129 104 return generic_ioremap_prot(phys_addr, !! 130 t_addr = __va(phys_addr); 105 } !! 131 t_end = t_addr + (size - 1); 106 EXPORT_SYMBOL(ioremap_prot); !! 132 >> 133 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) >> 134 if(!PageReserved(page)) >> 135 return NULL; >> 136 } 107 137 108 /* << 109 * Simple checks for non-translatable mappings << 110 */ << 111 static inline int iomapping_nontranslatable(un << 112 { << 113 #ifdef CONFIG_29BIT << 114 /* 138 /* 115 * In 29-bit mode this includes the fi !! 139 * Mappings have to be page-aligned 116 * parts of P3. << 117 */ 140 */ 118 if (PXSEG(offset) < P3SEG || offset >= !! 141 offset = phys_addr & ~PAGE_MASK; 119 return 1; !! 142 phys_addr &= PAGE_MASK; 120 #endif !! 143 size = PAGE_ALIGN(last_addr+1) - phys_addr; >> 144 >> 145 /* >> 146 * Ok, go for it.. >> 147 */ >> 148 area = get_vm_area(size, VM_IOREMAP); >> 149 if (!area) >> 150 return NULL; >> 151 addr = area->addr; >> 152 if (remap_area_pages(VMALLOC_VMADDR(addr), phys_addr, size, flags)) { >> 153 vfree(addr); >> 154 return NULL; >> 155 } >> 156 return (void *) (offset + (char *)addr); >> 157 } 121 158 122 return 0; !! 159 void iounmap(void *addr) >> 160 { >> 161 if (addr > high_memory) >> 162 return vfree((void *) (PAGE_MASK & (unsigned long) addr)); 123 } 163 } 124 164 125 void iounmap(volatile void __iomem *addr) !! 165 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) 126 { 166 { 127 unsigned long vaddr = (unsigned long _ !! 167 unsigned long offset, last_addr; >> 168 unsigned int nrpages; >> 169 enum fixed_addresses idx; >> 170 >> 171 /* Don't allow wraparound or zero size */ >> 172 last_addr = phys_addr + size - 1; >> 173 if (!size || last_addr < phys_addr) >> 174 return NULL; 128 175 129 /* 176 /* 130 * Nothing to do if there is no transl !! 177 * Don't remap the low PCI/ISA area, it's always mapped.. 131 */ 178 */ 132 if (iomapping_nontranslatable(vaddr)) !! 179 if (phys_addr >= 0xA0000 && last_addr < 0x100000) 133 return; !! 180 return phys_to_virt(phys_addr); 134 181 135 /* 182 /* 136 * There's no VMA if it's from an earl !! 183 * Mappings have to be page-aligned 137 */ 184 */ 138 if (iounmap_fixed((void __iomem *)addr !! 185 offset = phys_addr & ~PAGE_MASK; 139 return; !! 186 phys_addr &= PAGE_MASK; >> 187 size = PAGE_ALIGN(last_addr) - phys_addr; 140 188 141 /* 189 /* 142 * If the PMB handled it, there's noth !! 190 * Mappings have to fit in the FIX_BTMAP area. 143 */ 191 */ 144 if (pmb_unmap((void __iomem *)addr) == !! 192 nrpages = size >> PAGE_SHIFT; >> 193 if (nrpages > NR_FIX_BTMAPS) >> 194 return NULL; >> 195 >> 196 /* >> 197 * Ok, go for it.. >> 198 */ >> 199 idx = FIX_BTMAP_BEGIN; >> 200 while (nrpages > 0) { >> 201 set_fixmap(idx, phys_addr); >> 202 phys_addr += PAGE_SIZE; >> 203 --idx; >> 204 --nrpages; >> 205 } >> 206 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN)); >> 207 } >> 208 >> 209 void __init bt_iounmap(void *addr, unsigned long size) >> 210 { >> 211 unsigned long virt_addr; >> 212 unsigned long offset; >> 213 unsigned int nrpages; >> 214 enum fixed_addresses idx; >> 215 >> 216 virt_addr = (unsigned long)addr; >> 217 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) 145 return; 218 return; >> 219 offset = virt_addr & ~PAGE_MASK; >> 220 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; 146 221 147 generic_iounmap(addr); !! 222 idx = FIX_BTMAP_BEGIN; >> 223 while (nrpages > 0) { >> 224 __set_fixmap(idx, 0, __pgprot(0)); >> 225 --idx; >> 226 --nrpages; >> 227 } 148 } 228 } 149 EXPORT_SYMBOL(iounmap); << 150 229
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.