~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/mm/copypage-xsc3.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  *  linux/arch/arm/mm/copypage-xsc3.S
  4  *
  5  *  Copyright (C) 2004 Intel Corp.
  6  *
  7  * Adapted for 3rd gen XScale core, no more mini-dcache
  8  * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
  9  */
 10 #include <linux/init.h>
 11 #include <linux/highmem.h>
 12 
 13 /*
 14  * General note:
 15  *  We don't really want write-allocate cache behaviour for these functions
 16  *  since that will just eat through 8K of the cache.
 17  */
 18 
 19 /*
 20  * XSC3 optimised copy_user_highpage
 21  *
 22  * The source page may have some clean entries in the cache already, but we
 23  * can safely ignore them - break_cow() will flush them out of the cache
 24  * if we eventually end up using our copied page.
 25  *
 26  */
 27 static void xsc3_mc_copy_user_page(void *kto, const void *kfrom)
 28 {
 29         int tmp;
 30 
 31         asm volatile ("\
 32 .arch xscale                                    \n\
 33         pld     [%1, #0]                        \n\
 34         pld     [%1, #32]                       \n\
 35 1:      pld     [%1, #64]                       \n\
 36         pld     [%1, #96]                       \n\
 37                                                 \n\
 38 2:      ldrd    r2, r3, [%1], #8                \n\
 39         ldrd    r4, r5, [%1], #8                \n\
 40         mcr     p15, 0, %0, c7, c6, 1           @ invalidate\n\
 41         strd    r2, r3, [%0], #8                \n\
 42         ldrd    r2, r3, [%1], #8                \n\
 43         strd    r4, r5, [%0], #8                \n\
 44         ldrd    r4, r5, [%1], #8                \n\
 45         strd    r2, r3, [%0], #8                \n\
 46         strd    r4, r5, [%0], #8                \n\
 47         ldrd    r2, r3, [%1], #8                \n\
 48         ldrd    r4, r5, [%1], #8                \n\
 49         mcr     p15, 0, %0, c7, c6, 1           @ invalidate\n\
 50         strd    r2, r3, [%0], #8                \n\
 51         ldrd    r2, r3, [%1], #8                \n\
 52         subs    %2, %2, #1                      \n\
 53         strd    r4, r5, [%0], #8                \n\
 54         ldrd    r4, r5, [%1], #8                \n\
 55         strd    r2, r3, [%0], #8                \n\
 56         strd    r4, r5, [%0], #8                \n\
 57         bgt     1b                              \n\
 58         beq     2b                              "
 59         : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
 60         : "2" (PAGE_SIZE / 64 - 1)
 61         : "r2", "r3", "r4", "r5");
 62 }
 63 
 64 void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
 65         unsigned long vaddr, struct vm_area_struct *vma)
 66 {
 67         void *kto, *kfrom;
 68 
 69         kto = kmap_atomic(to);
 70         kfrom = kmap_atomic(from);
 71         flush_cache_page(vma, vaddr, page_to_pfn(from));
 72         xsc3_mc_copy_user_page(kto, kfrom);
 73         kunmap_atomic(kfrom);
 74         kunmap_atomic(kto);
 75 }
 76 
 77 /*
 78  * XScale optimised clear_user_page
 79  */
 80 void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
 81 {
 82         void *ptr, *kaddr = kmap_atomic(page);
 83         asm volatile ("\
 84 .arch xscale                                    \n\
 85         mov     r1, %2                          \n\
 86         mov     r2, #0                          \n\
 87         mov     r3, #0                          \n\
 88 1:      mcr     p15, 0, %0, c7, c6, 1           @ invalidate line\n\
 89         strd    r2, r3, [%0], #8                \n\
 90         strd    r2, r3, [%0], #8                \n\
 91         strd    r2, r3, [%0], #8                \n\
 92         strd    r2, r3, [%0], #8                \n\
 93         subs    r1, r1, #1                      \n\
 94         bne     1b"
 95         : "=r" (ptr)
 96         : "" (kaddr), "I" (PAGE_SIZE / 32)
 97         : "r1", "r2", "r3");
 98         kunmap_atomic(kaddr);
 99 }
100 
101 struct cpu_user_fns xsc3_mc_user_fns __initdata = {
102         .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
103         .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage,
104 };
105 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php