~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/loongarch/mm/ioremap.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/loongarch/mm/ioremap.c (Architecture i386) and /arch/mips/mm/ioremap.c (Architecture mips)


  1 // SPDX-License-Identifier: GPL-2.0            << 
  2 /*                                                  1 /*
  3  * Copyright (C) 2020-2022 Loongson Technology !!   2  * This file is subject to the terms and conditions of the GNU General Public
                                                   >>   3  * License.  See the file "COPYING" in the main directory of this archive
                                                   >>   4  * for more details.
                                                   >>   5  *
                                                   >>   6  * (C) Copyright 1995 1996 Linus Torvalds
                                                   >>   7  * (C) Copyright 2001, 2002 Ralf Baechle
  4  */                                                 8  */
                                                   >>   9 #include <linux/export.h>
                                                   >>  10 #include <asm/addrspace.h>
                                                   >>  11 #include <asm/byteorder.h>
                                                   >>  12 #include <linux/ioport.h>
                                                   >>  13 #include <linux/sched.h>
                                                   >>  14 #include <linux/slab.h>
                                                   >>  15 #include <linux/vmalloc.h>
                                                   >>  16 #include <linux/mm_types.h>
                                                   >>  17 #include <linux/io.h>
                                                   >>  18 #include <asm/cacheflush.h>
                                                   >>  19 #include <asm/tlbflush.h>
                                                   >>  20 #include <ioremap.h>
  5                                                    21 
  6 #include <asm/io.h>                            !!  22 #define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
  7 #include <asm-generic/early_ioremap.h>         !!  23 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
  8                                                    24 
  9 void __init __iomem *early_ioremap(u64 phys_ad !!  25 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
                                                   >>  26                                void *arg)
 10 {                                                  27 {
 11         return ((void __iomem *)TO_CACHE(phys_ !!  28         unsigned long i;
 12 }                                              << 
 13                                                    29 
 14 void __init early_iounmap(void __iomem *addr,  !!  30         for (i = 0; i < nr_pages; i++) {
 15 {                                              !!  31                 if (pfn_valid(start_pfn + i) &&
                                                   >>  32                     !PageReserved(pfn_to_page(start_pfn + i)))
                                                   >>  33                         return 1;
                                                   >>  34         }
 16                                                    35 
                                                   >>  36         return 0;
 17 }                                                  37 }
 18                                                    38 
 19 void *early_memremap_ro(resource_size_t phys_a !!  39 /*
                                                   >>  40  * ioremap_prot     -   map bus memory into CPU space
                                                   >>  41  * @phys_addr:    bus address of the memory
                                                   >>  42  * @size:      size of the resource to map
                                                   >>  43  *
                                                   >>  44  * ioremap_prot gives the caller control over cache coherency attributes (CCA)
                                                   >>  45  */
                                                   >>  46 void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
                                                   >>  47                 unsigned long prot_val)
 20 {                                                  48 {
 21         return early_memremap(phys_addr, size) !!  49         unsigned long flags = prot_val & _CACHE_MASK;
                                                   >>  50         unsigned long offset, pfn, last_pfn;
                                                   >>  51         struct vm_struct *area;
                                                   >>  52         phys_addr_t last_addr;
                                                   >>  53         unsigned long vaddr;
                                                   >>  54         void __iomem *cpu_addr;
                                                   >>  55 
                                                   >>  56         cpu_addr = plat_ioremap(phys_addr, size, flags);
                                                   >>  57         if (cpu_addr)
                                                   >>  58                 return cpu_addr;
                                                   >>  59 
                                                   >>  60         phys_addr = fixup_bigphys_addr(phys_addr, size);
                                                   >>  61 
                                                   >>  62         /* Don't allow wraparound or zero size */
                                                   >>  63         last_addr = phys_addr + size - 1;
                                                   >>  64         if (!size || last_addr < phys_addr)
                                                   >>  65                 return NULL;
                                                   >>  66 
                                                   >>  67         /*
                                                   >>  68          * Map uncached objects in the low 512mb of address space using KSEG1,
                                                   >>  69          * otherwise map using page tables.
                                                   >>  70          */
                                                   >>  71         if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
                                                   >>  72             flags == _CACHE_UNCACHED)
                                                   >>  73                 return (void __iomem *) CKSEG1ADDR(phys_addr);
                                                   >>  74 
                                                   >>  75         /* Early remaps should use the unmapped regions til' VM is available */
                                                   >>  76         if (WARN_ON_ONCE(!slab_is_available()))
                                                   >>  77                 return NULL;
                                                   >>  78 
                                                   >>  79         /*
                                                   >>  80          * Don't allow anybody to remap RAM that may be allocated by the page
                                                   >>  81          * allocator, since that could lead to races & data clobbering.
                                                   >>  82          */
                                                   >>  83         pfn = PFN_DOWN(phys_addr);
                                                   >>  84         last_pfn = PFN_DOWN(last_addr);
                                                   >>  85         if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
                                                   >>  86                                   __ioremap_check_ram) == 1) {
                                                   >>  87                 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
                                                   >>  88                           &phys_addr, &last_addr);
                                                   >>  89                 return NULL;
                                                   >>  90         }
                                                   >>  91 
                                                   >>  92         /*
                                                   >>  93          * Mappings have to be page-aligned
                                                   >>  94          */
                                                   >>  95         offset = phys_addr & ~PAGE_MASK;
                                                   >>  96         phys_addr &= PAGE_MASK;
                                                   >>  97         size = PAGE_ALIGN(last_addr + 1) - phys_addr;
                                                   >>  98 
                                                   >>  99         /*
                                                   >> 100          * Ok, go for it..
                                                   >> 101          */
                                                   >> 102         area = get_vm_area(size, VM_IOREMAP);
                                                   >> 103         if (!area)
                                                   >> 104                 return NULL;
                                                   >> 105         vaddr = (unsigned long)area->addr;
                                                   >> 106 
                                                   >> 107         flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
                                                   >> 108         if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
                                                   >> 109                         __pgprot(flags))) {
                                                   >> 110                 free_vm_area(area);
                                                   >> 111                 return NULL;
                                                   >> 112         }
                                                   >> 113 
                                                   >> 114         return (void __iomem *)(vaddr + offset);
 22 }                                                 115 }
                                                   >> 116 EXPORT_SYMBOL(ioremap_prot);
 23                                                   117 
 24 void *early_memremap_prot(resource_size_t phys !! 118 void iounmap(const volatile void __iomem *addr)
 25                     unsigned long prot_val)    << 
 26 {                                                 119 {
 27         return early_memremap(phys_addr, size) !! 120         if (!plat_iounmap(addr) && !IS_KSEG1(addr))
                                                   >> 121                 vunmap((void *)((unsigned long)addr & PAGE_MASK));
 28 }                                                 122 }
                                                   >> 123 EXPORT_SYMBOL(iounmap);
 29                                                   124 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php