~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sh/mm/ioremap.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /arch/sh/mm/ioremap.c (Architecture i386) and /arch/mips/mm/ioremap.c (Architecture mips)


  1 /*                                                  1 /*
  2  * arch/sh/mm/ioremap.c                        !!   2  * This file is subject to the terms and conditions of the GNU General Public
                                                   >>   3  * License.  See the file "COPYING" in the main directory of this archive
                                                   >>   4  * for more details.
  3  *                                                  5  *
  4  * (C) Copyright 1995 1996 Linus Torvalds           6  * (C) Copyright 1995 1996 Linus Torvalds
  5  * (C) Copyright 2005 - 2010  Paul Mundt       !!   7  * (C) Copyright 2001, 2002 Ralf Baechle
  6  *                                             << 
  7  * Re-map IO memory to kernel address space so << 
  8  * This is needed for high PCI addresses that  << 
  9  * 640k-1MB IO memory area on PC's             << 
 10  *                                             << 
 11  * This file is subject to the terms and condi << 
 12  * Public License. See the file "COPYING" in t << 
 13  * archive for more details.                   << 
 14  */                                                 8  */
 15 #include <linux/vmalloc.h>                     !!   9 #include <linux/export.h>
 16 #include <linux/module.h>                      !!  10 #include <asm/addrspace.h>
                                                   >>  11 #include <asm/byteorder.h>
                                                   >>  12 #include <linux/ioport.h>
                                                   >>  13 #include <linux/sched.h>
 17 #include <linux/slab.h>                            14 #include <linux/slab.h>
 18 #include <linux/mm.h>                          !!  15 #include <linux/vmalloc.h>
 19 #include <linux/pci.h>                         !!  16 #include <linux/mm_types.h>
 20 #include <linux/io.h>                              17 #include <linux/io.h>
 21 #include <asm/io_trapped.h>                    << 
 22 #include <asm/page.h>                          << 
 23 #include <asm/pgalloc.h>                       << 
 24 #include <asm/addrspace.h>                     << 
 25 #include <asm/cacheflush.h>                        18 #include <asm/cacheflush.h>
 26 #include <asm/tlbflush.h>                          19 #include <asm/tlbflush.h>
 27 #include <asm/mmu.h>                           !!  20 #include <ioremap.h>
 28 #include "ioremap.h"                           << 
 29                                                    21 
 30 /*                                             !!  22 #define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
 31  * On 32-bit SH, we traditionally have the who !!  23 #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
 32  * at all times (as MIPS does), so "ioremap()" << 
 33  * anything but place the address in the prope << 
 34  * and P2 addresses, as well as some P3 ones.  << 
 35  * and newer cores using extended addressing n << 
 36  * the ioremap() implementation becomes a bit  << 
 37  */                                            << 
 38 #ifdef CONFIG_29BIT                            << 
 39 static void __iomem *                          << 
 40 __ioremap_29bit(phys_addr_t offset, unsigned l << 
 41 {                                              << 
 42         phys_addr_t last_addr = offset + size  << 
 43                                                    24 
 44         /*                                     !!  25 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
 45          * For P1 and P2 space this is trivial !!  26                                void *arg)
 46          * mapped. Uncached access for P1 addr !!  27 {
 47          * In the P3 case or for addresses out !!  28         unsigned long i;
 48          * mapping must be done by the PMB or  << 
 49          */                                    << 
 50         if (likely(PXSEG(offset) < P3SEG && PX << 
 51                 u64 flags = pgprot_val(prot);  << 
 52                                                << 
 53                 /*                             << 
 54                  * Anything using the legacy P << 
 55                  * to be kicked down to page t << 
 56                  */                            << 
 57                 if (unlikely(flags & _PAGE_PCC << 
 58                         return NULL;           << 
 59                 if (unlikely(flags & _PAGE_CAC << 
 60                         return (void __iomem * << 
 61                                                    29 
 62                 return (void __iomem *)P2SEGAD !!  30         for (i = 0; i < nr_pages; i++) {
                                                   >>  31                 if (pfn_valid(start_pfn + i) &&
                                                   >>  32                     !PageReserved(pfn_to_page(start_pfn + i)))
                                                   >>  33                         return 1;
 63         }                                          34         }
 64                                                    35 
 65         /* P4 above the store queues are alway !!  36         return 0;
 66         if (unlikely(offset >= P3_ADDR_MAX))   << 
 67                 return (void __iomem *)P4SEGAD << 
 68                                                << 
 69         return NULL;                           << 
 70 }                                                  37 }
 71 #else                                          << 
 72 #define __ioremap_29bit(offset, size, prot)    << 
 73 #endif /* CONFIG_29BIT */                      << 
 74                                                    38 
 75 void __iomem __ref *ioremap_prot(phys_addr_t p !!  39 /*
 76                                  unsigned long !!  40  * ioremap_prot     -   map bus memory into CPU space
                                                   >>  41  * @phys_addr:    bus address of the memory
                                                   >>  42  * @size:      size of the resource to map
                                                   >>  43  *
                                                   >>  44  * ioremap_prot gives the caller control over cache coherency attributes (CCA)
                                                   >>  45  */
                                                   >>  46 void __iomem *ioremap_prot(phys_addr_t phys_addr, unsigned long size,
                                                   >>  47                 unsigned long prot_val)
 77 {                                                  48 {
 78         void __iomem *mapped;                  !!  49         unsigned long flags = prot_val & _CACHE_MASK;
 79         pgprot_t pgprot = __pgprot(prot);      !!  50         unsigned long offset, pfn, last_pfn;
 80                                                !!  51         struct vm_struct *area;
 81         mapped = __ioremap_trapped(phys_addr,  !!  52         phys_addr_t last_addr;
 82         if (mapped)                            !!  53         unsigned long vaddr;
 83                 return mapped;                 !!  54         void __iomem *cpu_addr;
 84                                                !!  55 
 85         mapped = __ioremap_29bit(phys_addr, si !!  56         cpu_addr = plat_ioremap(phys_addr, size, flags);
 86         if (mapped)                            !!  57         if (cpu_addr)
 87                 return mapped;                 !!  58                 return cpu_addr;
                                                   >>  59 
                                                   >>  60         phys_addr = fixup_bigphys_addr(phys_addr, size);
                                                   >>  61 
                                                   >>  62         /* Don't allow wraparound or zero size */
                                                   >>  63         last_addr = phys_addr + size - 1;
                                                   >>  64         if (!size || last_addr < phys_addr)
                                                   >>  65                 return NULL;
                                                   >>  66 
                                                   >>  67         /*
                                                   >>  68          * Map uncached objects in the low 512mb of address space using KSEG1,
                                                   >>  69          * otherwise map using page tables.
                                                   >>  70          */
                                                   >>  71         if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
                                                   >>  72             flags == _CACHE_UNCACHED)
                                                   >>  73                 return (void __iomem *) CKSEG1ADDR(phys_addr);
                                                   >>  74 
                                                   >>  75         /* Early remaps should use the unmapped regions til' VM is available */
                                                   >>  76         if (WARN_ON_ONCE(!slab_is_available()))
                                                   >>  77                 return NULL;
                                                   >>  78 
                                                   >>  79         /*
                                                   >>  80          * Don't allow anybody to remap RAM that may be allocated by the page
                                                   >>  81          * allocator, since that could lead to races & data clobbering.
                                                   >>  82          */
                                                   >>  83         pfn = PFN_DOWN(phys_addr);
                                                   >>  84         last_pfn = PFN_DOWN(last_addr);
                                                   >>  85         if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
                                                   >>  86                                   __ioremap_check_ram) == 1) {
                                                   >>  87                 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
                                                   >>  88                           &phys_addr, &last_addr);
                                                   >>  89                 return NULL;
                                                   >>  90         }
 88                                                    91 
 89         /*                                         92         /*
 90          * If we can't yet use the regular app !!  93          * Mappings have to be page-aligned
 91          */                                        94          */
 92         if (!mem_init_done)                    !!  95         offset = phys_addr & ~PAGE_MASK;
 93                 return ioremap_fixed(phys_addr !!  96         phys_addr &= PAGE_MASK;
                                                   >>  97         size = PAGE_ALIGN(last_addr + 1) - phys_addr;
 94                                                    98 
 95         /*                                         99         /*
 96          * First try to remap through the PMB. !! 100          * Ok, go for it..
 97          * PMB entries are all pre-faulted.    << 
 98          */                                       101          */
 99         mapped = pmb_remap_caller(phys_addr, s !! 102         area = get_vm_area(size, VM_IOREMAP);
100                         __builtin_return_addre !! 103         if (!area)
101         if (mapped && !IS_ERR(mapped))         !! 104                 return NULL;
102                 return mapped;                 !! 105         vaddr = (unsigned long)area->addr;
103                                                   106 
104         return generic_ioremap_prot(phys_addr, !! 107         flags |= _PAGE_GLOBAL | _PAGE_PRESENT | __READABLE | __WRITEABLE;
105 }                                              !! 108         if (ioremap_page_range(vaddr, vaddr + size, phys_addr,
106 EXPORT_SYMBOL(ioremap_prot);                   !! 109                         __pgprot(flags))) {
107                                                !! 110                 free_vm_area(area);
108 /*                                             !! 111                 return NULL;
109  * Simple checks for non-translatable mappings !! 112         }
110  */                                            << 
111 static inline int iomapping_nontranslatable(un << 
112 {                                              << 
113 #ifdef CONFIG_29BIT                            << 
114         /*                                     << 
115          * In 29-bit mode this includes the fi << 
116          * parts of P3.                        << 
117          */                                    << 
118         if (PXSEG(offset) < P3SEG || offset >= << 
119                 return 1;                      << 
120 #endif                                         << 
121                                                   113 
122         return 0;                              !! 114         return (void __iomem *)(vaddr + offset);
123 }                                                 115 }
                                                   >> 116 EXPORT_SYMBOL(ioremap_prot);
124                                                   117 
125 void iounmap(volatile void __iomem *addr)      !! 118 void iounmap(const volatile void __iomem *addr)
126 {                                                 119 {
127         unsigned long vaddr = (unsigned long _ !! 120         if (!plat_iounmap(addr) && !IS_KSEG1(addr))
128                                                !! 121                 vunmap((void *)((unsigned long)addr & PAGE_MASK));
129         /*                                     << 
130          * Nothing to do if there is no transl << 
131          */                                    << 
132         if (iomapping_nontranslatable(vaddr))  << 
133                 return;                        << 
134                                                << 
135         /*                                     << 
136          * There's no VMA if it's from an earl << 
137          */                                    << 
138         if (iounmap_fixed((void __iomem *)addr << 
139                 return;                        << 
140                                                << 
141         /*                                     << 
142          * If the PMB handled it, there's noth << 
143          */                                    << 
144         if (pmb_unmap((void __iomem *)addr) == << 
145                 return;                        << 
146                                                << 
147         generic_iounmap(addr);                 << 
148 }                                                 122 }
149 EXPORT_SYMBOL(iounmap);                           123 EXPORT_SYMBOL(iounmap);
150                                                   124 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php