1 // SPDX-License-Identifier: GPL-2.0-only 1 2 /* 3 * User address space access functions. 4 * 5 * Copyright 1997 Andi Kleen <ak@muc.de> 6 * Copyright 1997 Linus Torvalds 7 * Copyright 2002 Andi Kleen <ak@suse.de> 8 */ 9 #include <linux/export.h> 10 #include <linux/uaccess.h> 11 #include <linux/highmem.h> 12 #include <linux/libnvdimm.h> 13 14 /* 15 * Zero Userspace 16 */ 17 18 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 19 /** 20 * clean_cache_range - write back a cache rang 21 * @vaddr: virtual start address 22 * @size: number of bytes to write back 23 * 24 * Write back a cache range using the CLWB (ca 25 * instruction. Note that @size is internally 26 * line size aligned. 27 */ 28 static void clean_cache_range(void *addr, size 29 { 30 u16 x86_clflush_size = boot_cpu_data.x 31 unsigned long clflush_mask = x86_clflu 32 void *vend = addr + size; 33 void *p; 34 35 for (p = (void *)((unsigned long)addr 36 p < vend; p += x86_clflush_size) 37 clwb(p); 38 } 39 40 void arch_wb_cache_pmem(void *addr, size_t siz 41 { 42 clean_cache_range(addr, size); 43 } 44 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); 45 46 long __copy_user_flushcache(void *dst, const v 47 { 48 unsigned long flushed, dest = (unsigne 49 long rc; 50 51 stac(); 52 rc = __copy_user_nocache(dst, src, siz 53 clac(); 54 55 /* 56 * __copy_user_nocache() uses non-temp 57 * of the transfer, but we need to man 58 * transfer is unaligned. A cached mem 59 * destination or size is not naturall 60 * - Require 8-byte alignment when s 61 * - Require 4-byte alignment when s 62 */ 63 if (size < 8) { 64 if (!IS_ALIGNED(dest, 4) || si 65 clean_cache_range(dst, 66 } else { 67 if (!IS_ALIGNED(dest, 8)) { 68 dest = ALIGN(dest, boo 69 clean_cache_range(dst, 70 } 71 72 flushed = dest - (unsigned lon 73 if (size > flushed && !IS_ALIG 74 clean_cache_range(dst 75 } 76 77 return rc; 78 } 79 80 void __memcpy_flushcache(void *_dst, const voi 81 { 82 unsigned long dest = (unsigned long) _ 83 unsigned long source = (unsigned long) 84 85 /* cache copy and flush to align dest 86 if (!IS_ALIGNED(dest, 8)) { 87 size_t len = min_t(size_t, siz 88 89 memcpy((void *) dest, (void *) 90 clean_cache_range((void *) des 91 dest += len; 92 source += len; 93 size -= len; 94 if (!size) 95 return; 96 } 97 98 /* 4x8 movnti loop */ 99 while (size >= 32) { 100 asm("movq (%0), %%r8\n" 101 "movq 8(%0), %%r9\n" 102 "movq 16(%0), %%r10\n" 103 "movq 24(%0), %%r11\n" 104 "movnti %%r8, (%1)\n" 105 "movnti %%r9, 8(%1)\n" 106 "movnti %%r10, 16(%1)\n" 107 "movnti %%r11, 24(%1)\n" 108 :: "r" (source), "r" (dest 109 : "memory", "r8", "r9", "r 110 dest += 32; 111 source += 32; 112 size -= 32; 113 } 114 115 /* 1x8 movnti loop */ 116 while (size >= 8) { 117 asm("movq (%0), %%r8\n" 118 "movnti %%r8, (%1)\n" 119 :: "r" (source), "r" (dest 120 : "memory", "r8"); 121 dest += 8; 122 source += 8; 123 size -= 8; 124 } 125 126 /* 1x4 movnti loop */ 127 while (size >= 4) { 128 asm("movl (%0), %%r8d\n" 129 "movnti %%r8d, (%1)\n" 130 :: "r" (source), "r" (dest 131 : "memory", "r8"); 132 dest += 4; 133 source += 4; 134 size -= 4; 135 } 136 137 /* cache copy for remaining bytes */ 138 if (size) { 139 memcpy((void *) dest, (void *) 140 clean_cache_range((void *) des 141 } 142 } 143 EXPORT_SYMBOL_GPL(__memcpy_flushcache); 144 #endif 145
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.