~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/loongarch/mm/ioremap.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/loongarch/mm/ioremap.c (Version linux-6.11-rc3) and /arch/i386/mm/ioremap.c (Version linux-2.6.0)


  1 // SPDX-License-Identifier: GPL-2.0            << 
  2 /*                                                  1 /*
  3  * Copyright (C) 2020-2022 Loongson Technology !!   2  * arch/i386/mm/ioremap.c
                                                   >>   3  *
                                                   >>   4  * Re-map IO memory to kernel address space so that we can access it.
                                                   >>   5  * This is needed for high PCI addresses that aren't mapped in the
                                                   >>   6  * 640k-1MB IO memory area on PC's
                                                   >>   7  *
                                                   >>   8  * (C) Copyright 1995 1996 Linus Torvalds
  4  */                                                 9  */
  5                                                    10 
                                                   >>  11 #include <linux/vmalloc.h>
                                                   >>  12 #include <linux/init.h>
                                                   >>  13 #include <linux/slab.h>
  6 #include <asm/io.h>                                14 #include <asm/io.h>
  7 #include <asm-generic/early_ioremap.h>         !!  15 #include <asm/pgalloc.h>
                                                   >>  16 #include <asm/fixmap.h>
                                                   >>  17 #include <asm/cacheflush.h>
                                                   >>  18 #include <asm/tlbflush.h>
                                                   >>  19 #include <asm/pgtable.h>
  8                                                    20 
  9 void __init __iomem *early_ioremap(u64 phys_ad !!  21 static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
                                                   >>  22         unsigned long phys_addr, unsigned long flags)
 10 {                                                  23 {
 11         return ((void __iomem *)TO_CACHE(phys_ !!  24         unsigned long end;
                                                   >>  25         unsigned long pfn;
                                                   >>  26 
                                                   >>  27         address &= ~PMD_MASK;
                                                   >>  28         end = address + size;
                                                   >>  29         if (end > PMD_SIZE)
                                                   >>  30                 end = PMD_SIZE;
                                                   >>  31         if (address >= end)
                                                   >>  32                 BUG();
                                                   >>  33         pfn = phys_addr >> PAGE_SHIFT;
                                                   >>  34         do {
                                                   >>  35                 if (!pte_none(*pte)) {
                                                   >>  36                         printk("remap_area_pte: page already exists\n");
                                                   >>  37                         BUG();
                                                   >>  38                 }
                                                   >>  39                 set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW | 
                                                   >>  40                                         _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
                                                   >>  41                 address += PAGE_SIZE;
                                                   >>  42                 pfn++;
                                                   >>  43                 pte++;
                                                   >>  44         } while (address && (address < end));
 12 }                                                  45 }
 13                                                    46 
 14 void __init early_iounmap(void __iomem *addr,  !!  47 static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
                                                   >>  48         unsigned long phys_addr, unsigned long flags)
 15 {                                                  49 {
                                                   >>  50         unsigned long end;
 16                                                    51 
                                                   >>  52         address &= ~PGDIR_MASK;
                                                   >>  53         end = address + size;
                                                   >>  54         if (end > PGDIR_SIZE)
                                                   >>  55                 end = PGDIR_SIZE;
                                                   >>  56         phys_addr -= address;
                                                   >>  57         if (address >= end)
                                                   >>  58                 BUG();
                                                   >>  59         do {
                                                   >>  60                 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
                                                   >>  61                 if (!pte)
                                                   >>  62                         return -ENOMEM;
                                                   >>  63                 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
                                                   >>  64                 address = (address + PMD_SIZE) & PMD_MASK;
                                                   >>  65                 pmd++;
                                                   >>  66         } while (address && (address < end));
                                                   >>  67         return 0;
 17 }                                                  68 }
 18                                                    69 
 19 void *early_memremap_ro(resource_size_t phys_a !!  70 static int remap_area_pages(unsigned long address, unsigned long phys_addr,
                                                   >>  71                                  unsigned long size, unsigned long flags)
 20 {                                                  72 {
 21         return early_memremap(phys_addr, size) !!  73         int error;
                                                   >>  74         pgd_t * dir;
                                                   >>  75         unsigned long end = address + size;
                                                   >>  76 
                                                   >>  77         phys_addr -= address;
                                                   >>  78         dir = pgd_offset(&init_mm, address);
                                                   >>  79         flush_cache_all();
                                                   >>  80         if (address >= end)
                                                   >>  81                 BUG();
                                                   >>  82         spin_lock(&init_mm.page_table_lock);
                                                   >>  83         do {
                                                   >>  84                 pmd_t *pmd;
                                                   >>  85                 pmd = pmd_alloc(&init_mm, dir, address);
                                                   >>  86                 error = -ENOMEM;
                                                   >>  87                 if (!pmd)
                                                   >>  88                         break;
                                                   >>  89                 if (remap_area_pmd(pmd, address, end - address,
                                                   >>  90                                          phys_addr + address, flags))
                                                   >>  91                         break;
                                                   >>  92                 error = 0;
                                                   >>  93                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
                                                   >>  94                 dir++;
                                                   >>  95         } while (address && (address < end));
                                                   >>  96         spin_unlock(&init_mm.page_table_lock);
                                                   >>  97         flush_tlb_all();
                                                   >>  98         return error;
                                                   >>  99 }
                                                   >> 100 
                                                   >> 101 /*
                                                   >> 102  * Generic mapping function (not visible outside):
                                                   >> 103  */
                                                   >> 104 
                                                   >> 105 /*
                                                   >> 106  * Remap an arbitrary physical address space into the kernel virtual
                                                   >> 107  * address space. Needed when the kernel wants to access high addresses
                                                   >> 108  * directly.
                                                   >> 109  *
                                                   >> 110  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
                                                   >> 111  * have to convert them into an offset in a page-aligned mapping, but the
                                                   >> 112  * caller shouldn't need to know that small detail.
                                                   >> 113  */
                                                   >> 114 void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
                                                   >> 115 {
                                                   >> 116         void * addr;
                                                   >> 117         struct vm_struct * area;
                                                   >> 118         unsigned long offset, last_addr;
                                                   >> 119 
                                                   >> 120         /* Don't allow wraparound or zero size */
                                                   >> 121         last_addr = phys_addr + size - 1;
                                                   >> 122         if (!size || last_addr < phys_addr)
                                                   >> 123                 return NULL;
                                                   >> 124 
                                                   >> 125         /*
                                                   >> 126          * Don't remap the low PCI/ISA area, it's always mapped..
                                                   >> 127          */
                                                   >> 128         if (phys_addr >= 0xA0000 && last_addr < 0x100000)
                                                   >> 129                 return phys_to_virt(phys_addr);
                                                   >> 130 
                                                   >> 131         /*
                                                   >> 132          * Don't allow anybody to remap normal RAM that we're using..
                                                   >> 133          */
                                                   >> 134         if (phys_addr < virt_to_phys(high_memory)) {
                                                   >> 135                 char *t_addr, *t_end;
                                                   >> 136                 struct page *page;
                                                   >> 137 
                                                   >> 138                 t_addr = __va(phys_addr);
                                                   >> 139                 t_end = t_addr + (size - 1);
                                                   >> 140            
                                                   >> 141                 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
                                                   >> 142                         if(!PageReserved(page))
                                                   >> 143                                 return NULL;
                                                   >> 144         }
                                                   >> 145 
                                                   >> 146         /*
                                                   >> 147          * Mappings have to be page-aligned
                                                   >> 148          */
                                                   >> 149         offset = phys_addr & ~PAGE_MASK;
                                                   >> 150         phys_addr &= PAGE_MASK;
                                                   >> 151         size = PAGE_ALIGN(last_addr+1) - phys_addr;
                                                   >> 152 
                                                   >> 153         /*
                                                   >> 154          * Ok, go for it..
                                                   >> 155          */
                                                   >> 156         area = get_vm_area(size, VM_IOREMAP);
                                                   >> 157         if (!area)
                                                   >> 158                 return NULL;
                                                   >> 159         area->phys_addr = phys_addr;
                                                   >> 160         addr = area->addr;
                                                   >> 161         if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
                                                   >> 162                 vunmap(addr);
                                                   >> 163                 return NULL;
                                                   >> 164         }
                                                   >> 165         return (void *) (offset + (char *)addr);
 22 }                                                 166 }
 23                                                   167 
 24 void *early_memremap_prot(resource_size_t phys !! 168 
 25                     unsigned long prot_val)    !! 169 /**
                                                   >> 170  * ioremap_nocache     -   map bus memory into CPU space
                                                   >> 171  * @offset:    bus address of the memory
                                                   >> 172  * @size:      size of the resource to map
                                                   >> 173  *
                                                   >> 174  * ioremap_nocache performs a platform specific sequence of operations to
                                                   >> 175  * make bus memory CPU accessible via the readb/readw/readl/writeb/
                                                   >> 176  * writew/writel functions and the other mmio helpers. The returned
                                                   >> 177  * address is not guaranteed to be usable directly as a virtual
                                                   >> 178  * address. 
                                                   >> 179  *
                                                   >> 180  * This version of ioremap ensures that the memory is marked uncachable
                                                   >> 181  * on the CPU as well as honouring existing caching rules from things like
                                                   >> 182  * the PCI bus. Note that there are other caches and buffers on many 
                                                   >> 183  * busses. In particular driver authors should read up on PCI writes
                                                   >> 184  *
                                                   >> 185  * It's useful if some control registers are in such an area and
                                                   >> 186  * write combining or read caching is not desirable:
                                                   >> 187  * 
                                                   >> 188  * Must be freed with iounmap.
                                                   >> 189  */
                                                   >> 190 
                                                   >> 191 void *ioremap_nocache (unsigned long phys_addr, unsigned long size)
 26 {                                                 192 {
 27         return early_memremap(phys_addr, size) !! 193         unsigned long last_addr;
                                                   >> 194         void *p = __ioremap(phys_addr, size, _PAGE_PCD);
                                                   >> 195         if (!p) 
                                                   >> 196                 return p; 
                                                   >> 197 
                                                   >> 198         /* Guaranteed to be > phys_addr, as per __ioremap() */
                                                   >> 199         last_addr = phys_addr + size - 1;
                                                   >> 200 
                                                   >> 201         if (last_addr < virt_to_phys(high_memory)) { 
                                                   >> 202                 struct page *ppage = virt_to_page(__va(phys_addr));             
                                                   >> 203                 unsigned long npages;
                                                   >> 204 
                                                   >> 205                 phys_addr &= PAGE_MASK;
                                                   >> 206 
                                                   >> 207                 /* This might overflow and become zero.. */
                                                   >> 208                 last_addr = PAGE_ALIGN(last_addr);
                                                   >> 209 
                                                   >> 210                 /* .. but that's ok, because modulo-2**n arithmetic will make
                                                   >> 211                 * the page-aligned "last - first" come out right.
                                                   >> 212                 */
                                                   >> 213                 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
                                                   >> 214 
                                                   >> 215                 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 
                                                   >> 216                         iounmap(p); 
                                                   >> 217                         p = NULL;
                                                   >> 218                 }
                                                   >> 219                 global_flush_tlb();
                                                   >> 220         }
                                                   >> 221 
                                                   >> 222         return p;                                       
                                                   >> 223 }
                                                   >> 224 
                                                   >> 225 void iounmap(void *addr)
                                                   >> 226 {
                                                   >> 227         struct vm_struct *p;
                                                   >> 228         if (addr <= high_memory) 
                                                   >> 229                 return; 
                                                   >> 230         p = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr));
                                                   >> 231         if (!p) { 
                                                   >> 232                 printk("__iounmap: bad address %p\n", addr);
                                                   >> 233                 return;
                                                   >> 234         } 
                                                   >> 235 
                                                   >> 236         if (p->flags && p->phys_addr < virt_to_phys(high_memory)) { 
                                                   >> 237                 change_page_attr(virt_to_page(__va(p->phys_addr)),
                                                   >> 238                                  p->size >> PAGE_SHIFT,
                                                   >> 239                                  PAGE_KERNEL);                           
                                                   >> 240                 global_flush_tlb();
                                                   >> 241         } 
                                                   >> 242         kfree(p); 
                                                   >> 243 }
                                                   >> 244 
                                                   >> 245 void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
                                                   >> 246 {
                                                   >> 247         unsigned long offset, last_addr;
                                                   >> 248         unsigned int nrpages;
                                                   >> 249         enum fixed_addresses idx;
                                                   >> 250 
                                                   >> 251         /* Don't allow wraparound or zero size */
                                                   >> 252         last_addr = phys_addr + size - 1;
                                                   >> 253         if (!size || last_addr < phys_addr)
                                                   >> 254                 return NULL;
                                                   >> 255 
                                                   >> 256         /*
                                                   >> 257          * Don't remap the low PCI/ISA area, it's always mapped..
                                                   >> 258          */
                                                   >> 259         if (phys_addr >= 0xA0000 && last_addr < 0x100000)
                                                   >> 260                 return phys_to_virt(phys_addr);
                                                   >> 261 
                                                   >> 262         /*
                                                   >> 263          * Mappings have to be page-aligned
                                                   >> 264          */
                                                   >> 265         offset = phys_addr & ~PAGE_MASK;
                                                   >> 266         phys_addr &= PAGE_MASK;
                                                   >> 267         size = PAGE_ALIGN(last_addr) - phys_addr;
                                                   >> 268 
                                                   >> 269         /*
                                                   >> 270          * Mappings have to fit in the FIX_BTMAP area.
                                                   >> 271          */
                                                   >> 272         nrpages = size >> PAGE_SHIFT;
                                                   >> 273         if (nrpages > NR_FIX_BTMAPS)
                                                   >> 274                 return NULL;
                                                   >> 275 
                                                   >> 276         /*
                                                   >> 277          * Ok, go for it..
                                                   >> 278          */
                                                   >> 279         idx = FIX_BTMAP_BEGIN;
                                                   >> 280         while (nrpages > 0) {
                                                   >> 281                 set_fixmap(idx, phys_addr);
                                                   >> 282                 phys_addr += PAGE_SIZE;
                                                   >> 283                 --idx;
                                                   >> 284                 --nrpages;
                                                   >> 285         }
                                                   >> 286         return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
                                                   >> 287 }
                                                   >> 288 
                                                   >> 289 void __init bt_iounmap(void *addr, unsigned long size)
                                                   >> 290 {
                                                   >> 291         unsigned long virt_addr;
                                                   >> 292         unsigned long offset;
                                                   >> 293         unsigned int nrpages;
                                                   >> 294         enum fixed_addresses idx;
                                                   >> 295 
                                                   >> 296         virt_addr = (unsigned long)addr;
                                                   >> 297         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
                                                   >> 298                 return;
                                                   >> 299         offset = virt_addr & ~PAGE_MASK;
                                                   >> 300         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
                                                   >> 301 
                                                   >> 302         idx = FIX_BTMAP_BEGIN;
                                                   >> 303         while (nrpages > 0) {
                                                   >> 304                 clear_fixmap(idx);
                                                   >> 305                 --idx;
                                                   >> 306                 --nrpages;
                                                   >> 307         }
 28 }                                                 308 }
 29                                                   309 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php