1 /* SPDX-License-Identifier: GPL-2.0-or-later * 2 /* 3 * linux/arch/arm/mm/proc-arm920.S: MMU funct 4 * 5 * Copyright (C) 1999,2000 ARM Limited 6 * Copyright (C) 2000 Deep Blue Solutions Ltd 7 * hacked for non-paged-MM by Hyok S. Choi, 2 8 * 9 * These are the low level assembler for perfo 10 * functions on the arm920. 11 * 12 * CONFIG_CPU_ARM920_CPU_IDLE -> nohlt 13 */ 14 #include <linux/linkage.h> 15 #include <linux/init.h> 16 #include <linux/cfi_types.h> 17 #include <linux/pgtable.h> 18 #include <asm/assembler.h> 19 #include <asm/hwcap.h> 20 #include <asm/pgtable-hwdef.h> 21 #include <asm/page.h> 22 #include <asm/ptrace.h> 23 #include "proc-macros.S" 24 25 /* 26 * The size of one data cache line. 27 */ 28 #define CACHE_DLINESIZE 32 29 30 /* 31 * The number of data cache segments. 32 */ 33 #define CACHE_DSEGMENTS 8 34 35 /* 36 * The number of lines in a cache segment. 37 */ 38 #define CACHE_DENTRIES 64 39 40 /* 41 * This is the size at which it becomes more e 42 * clean the whole cache, rather than using th 43 * cache line maintenance instructions. 44 */ 45 #define CACHE_DLIMIT 65536 46 47 48 .text 49 /* 50 * cpu_arm920_proc_init() 51 */ 52 SYM_TYPED_FUNC_START(cpu_arm920_proc_init) 53 ret lr 54 SYM_FUNC_END(cpu_arm920_proc_init) 55 56 /* 57 * cpu_arm920_proc_fin() 58 */ 59 SYM_TYPED_FUNC_START(cpu_arm920_proc_fin) 60 mrc p15, 0, r0, c1, c0, 0 61 bic r0, r0, #0x1000 62 bic r0, r0, #0x000e 63 mcr p15, 0, r0, c1, c0, 0 64 ret lr 65 SYM_FUNC_END(cpu_arm920_proc_fin) 66 67 /* 68 * cpu_arm920_reset(loc) 69 * 70 * Perform a soft reset of the system. Put th 71 * same state as it would be if it had been re 72 * to what would be the reset vector. 73 * 74 * loc: location to jump to for soft reset 75 */ 76 .align 5 77 .pushsection .idmap.text, "ax" 78 SYM_TYPED_FUNC_START(cpu_arm920_reset) 79 mov ip, #0 80 mcr p15, 0, ip, c7, c7, 0 81 mcr p15, 0, ip, c7, c10, 4 82 #ifdef CONFIG_MMU 83 mcr p15, 0, ip, c8, c7, 0 84 #endif 85 mrc p15, 0, ip, c1, c0, 0 86 bic ip, ip, #0x000f 87 bic ip, ip, #0x1100 88 mcr p15, 0, ip, c1, c0, 0 89 ret r0 90 SYM_FUNC_END(cpu_arm920_reset) 91 .popsection 92 93 /* 94 * cpu_arm920_do_idle() 95 */ 96 .align 5 97 SYM_TYPED_FUNC_START(cpu_arm920_do_idle) 98 mcr p15, 0, r0, c7, c0, 4 99 ret lr 100 SYM_FUNC_END(cpu_arm920_do_idle) 101 102 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 103 104 /* 105 * flush_icache_all() 106 * 107 * Unconditionally clean and invalidate t 108 */ 109 SYM_TYPED_FUNC_START(arm920_flush_icache_all) 110 mov r0, #0 111 mcr p15, 0, r0, c7, c5, 0 112 ret lr 113 SYM_FUNC_END(arm920_flush_icache_all) 114 115 /* 116 * flush_user_cache_all() 117 * 118 * Invalidate all cache entries in a part 119 * space. 120 */ 121 SYM_FUNC_ALIAS(arm920_flush_user_cache_all, ar 122 123 /* 124 * flush_kern_cache_all() 125 * 126 * Clean and invalidate the entire cache. 127 */ 128 SYM_TYPED_FUNC_START(arm920_flush_kern_cache_a 129 mov r2, #VM_EXEC 130 mov ip, #0 131 __flush_whole_cache: 132 mov r1, #(CACHE_DSEGMENTS - 1) << 133 1: orr r3, r1, #(CACHE_DENTRIES - 1) 134 2: mcr p15, 0, r3, c7, c14, 2 135 subs r3, r3, #1 << 26 136 bcs 2b 137 subs r1, r1, #1 << 5 138 bcs 1b 139 tst r2, #VM_EXEC 140 mcrne p15, 0, ip, c7, c5, 0 141 mcrne p15, 0, ip, c7, c10, 4 142 ret lr 143 SYM_FUNC_END(arm920_flush_kern_cache_all) 144 145 /* 146 * flush_user_cache_range(start, end, fla 147 * 148 * Invalidate a range of cache entries in 149 * address space. 150 * 151 * - start - start address (inclusive) 152 * - end - end address (exclusive) 153 * - flags - vm_flags for address space 154 */ 155 SYM_TYPED_FUNC_START(arm920_flush_user_cache_r 156 mov ip, #0 157 sub r3, r1, r0 158 cmp r3, #CACHE_DLIMIT 159 bhs __flush_whole_cache 160 161 1: mcr p15, 0, r0, c7, c14, 1 162 tst r2, #VM_EXEC 163 mcrne p15, 0, r0, c7, c5, 1 164 add r0, r0, #CACHE_DLINESIZE 165 cmp r0, r1 166 blo 1b 167 tst r2, #VM_EXEC 168 mcrne p15, 0, ip, c7, c10, 4 169 ret lr 170 SYM_FUNC_END(arm920_flush_user_cache_range) 171 172 /* 173 * coherent_kern_range(start, end) 174 * 175 * Ensure coherency between the Icache an 176 * region described by start, end. If yo 177 * Harvard caches, you need to implement 178 * 179 * - start - virtual start address 180 * - end - virtual end address 181 */ 182 SYM_TYPED_FUNC_START(arm920_coherent_kern_rang 183 #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI 184 b arm920_coherent_user_range 185 #endif 186 SYM_FUNC_END(arm920_coherent_kern_range) 187 188 /* 189 * coherent_user_range(start, end) 190 * 191 * Ensure coherency between the Icache an 192 * region described by start, end. If yo 193 * Harvard caches, you need to implement 194 * 195 * - start - virtual start address 196 * - end - virtual end address 197 */ 198 SYM_TYPED_FUNC_START(arm920_coherent_user_rang 199 bic r0, r0, #CACHE_DLINESIZE - 1 200 1: mcr p15, 0, r0, c7, c10, 1 201 mcr p15, 0, r0, c7, c5, 1 202 add r0, r0, #CACHE_DLINESIZE 203 cmp r0, r1 204 blo 1b 205 mcr p15, 0, r0, c7, c10, 4 206 mov r0, #0 207 ret lr 208 SYM_FUNC_END(arm920_coherent_user_range) 209 210 /* 211 * flush_kern_dcache_area(void *addr, siz 212 * 213 * Ensure no D cache aliasing occurs, eit 214 * the I cache 215 * 216 * - addr - kernel address 217 * - size - region size 218 */ 219 SYM_TYPED_FUNC_START(arm920_flush_kern_dcache_ 220 add r1, r0, r1 221 1: mcr p15, 0, r0, c7, c14, 1 222 add r0, r0, #CACHE_DLINESIZE 223 cmp r0, r1 224 blo 1b 225 mov r0, #0 226 mcr p15, 0, r0, c7, c5, 0 227 mcr p15, 0, r0, c7, c10, 4 228 ret lr 229 SYM_FUNC_END(arm920_flush_kern_dcache_area) 230 231 /* 232 * dma_inv_range(start, end) 233 * 234 * Invalidate (discard) the specified vir 235 * May not write back any entries. If 's 236 * are not cache line aligned, those line 237 * back. 238 * 239 * - start - virtual start address 240 * - end - virtual end address 241 * 242 * (same as v4wb) 243 */ 244 arm920_dma_inv_range: 245 tst r0, #CACHE_DLINESIZE - 1 246 bic r0, r0, #CACHE_DLINESIZE - 1 247 mcrne p15, 0, r0, c7, c10, 1 248 tst r1, #CACHE_DLINESIZE - 1 249 mcrne p15, 0, r1, c7, c10, 1 250 1: mcr p15, 0, r0, c7, c6, 1 251 add r0, r0, #CACHE_DLINESIZE 252 cmp r0, r1 253 blo 1b 254 mcr p15, 0, r0, c7, c10, 4 255 ret lr 256 257 /* 258 * dma_clean_range(start, end) 259 * 260 * Clean the specified virtual address ra 261 * 262 * - start - virtual start address 263 * - end - virtual end address 264 * 265 * (same as v4wb) 266 */ 267 arm920_dma_clean_range: 268 bic r0, r0, #CACHE_DLINESIZE - 1 269 1: mcr p15, 0, r0, c7, c10, 1 270 add r0, r0, #CACHE_DLINESIZE 271 cmp r0, r1 272 blo 1b 273 mcr p15, 0, r0, c7, c10, 4 274 ret lr 275 276 /* 277 * dma_flush_range(start, end) 278 * 279 * Clean and invalidate the specified vir 280 * 281 * - start - virtual start address 282 * - end - virtual end address 283 */ 284 SYM_TYPED_FUNC_START(arm920_dma_flush_range) 285 bic r0, r0, #CACHE_DLINESIZE - 1 286 1: mcr p15, 0, r0, c7, c14, 1 287 add r0, r0, #CACHE_DLINESIZE 288 cmp r0, r1 289 blo 1b 290 mcr p15, 0, r0, c7, c10, 4 291 ret lr 292 SYM_FUNC_END(arm920_dma_flush_range) 293 294 /* 295 * dma_map_area(start, size, dir) 296 * - start - kernel virtual start address 297 * - size - size of region 298 * - dir - DMA direction 299 */ 300 SYM_TYPED_FUNC_START(arm920_dma_map_area) 301 add r1, r1, r0 302 cmp r2, #DMA_TO_DEVICE 303 beq arm920_dma_clean_range 304 bcs arm920_dma_inv_range 305 b arm920_dma_flush_range 306 SYM_FUNC_END(arm920_dma_map_area) 307 308 /* 309 * dma_unmap_area(start, size, dir) 310 * - start - kernel virtual start address 311 * - size - size of region 312 * - dir - DMA direction 313 */ 314 SYM_TYPED_FUNC_START(arm920_dma_unmap_area) 315 ret lr 316 SYM_FUNC_END(arm920_dma_unmap_area) 317 318 #endif /* !CONFIG_CPU_DCACHE_WRITETHROUGH */ 319 320 321 SYM_TYPED_FUNC_START(cpu_arm920_dcache_clean_a 322 1: mcr p15, 0, r0, c7, c10, 1 323 add r0, r0, #CACHE_DLINESIZE 324 subs r1, r1, #CACHE_DLINESIZE 325 bhi 1b 326 ret lr 327 SYM_FUNC_END(cpu_arm920_dcache_clean_area) 328 329 /* =============================== PageTable = 330 331 /* 332 * cpu_arm920_switch_mm(pgd) 333 * 334 * Set the translation base pointer to be as d 335 * 336 * pgd: new page tables 337 */ 338 .align 5 339 SYM_TYPED_FUNC_START(cpu_arm920_switch_mm) 340 #ifdef CONFIG_MMU 341 mov ip, #0 342 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 343 mcr p15, 0, ip, c7, c6, 0 344 #else 345 @ && 'Clean & Invalidate whole DCache' 346 @ && Re-written to use Index Ops. 347 @ && Uses registers r1, r3 and ip 348 349 mov r1, #(CACHE_DSEGMENTS - 1) << 350 1: orr r3, r1, #(CACHE_DENTRIES - 1) 351 2: mcr p15, 0, r3, c7, c14, 2 352 subs r3, r3, #1 << 26 353 bcs 2b 354 subs r1, r1, #1 << 5 355 bcs 1b 356 #endif 357 mcr p15, 0, ip, c7, c5, 0 358 mcr p15, 0, ip, c7, c10, 4 359 mcr p15, 0, r0, c2, c0, 0 360 mcr p15, 0, ip, c8, c7, 0 361 #endif 362 ret lr 363 SYM_FUNC_END(cpu_arm920_switch_mm) 364 365 /* 366 * cpu_arm920_set_pte(ptep, pte, ext) 367 * 368 * Set a PTE and flush it out 369 */ 370 .align 5 371 SYM_TYPED_FUNC_START(cpu_arm920_set_pte_ext) 372 #ifdef CONFIG_MMU 373 armv3_set_pte_ext 374 mov r0, r0 375 mcr p15, 0, r0, c7, c10, 1 376 mcr p15, 0, r0, c7, c10, 4 377 #endif 378 ret lr 379 SYM_FUNC_END(cpu_arm920_set_pte_ext) 380 381 /* Suspend/resume support: taken from arch/arm 382 .globl cpu_arm920_suspend_size 383 .equ cpu_arm920_suspend_size, 4 * 3 384 #ifdef CONFIG_ARM_CPU_SUSPEND 385 SYM_TYPED_FUNC_START(cpu_arm920_do_suspend) 386 stmfd sp!, {r4 - r6, lr} 387 mrc p15, 0, r4, c13, c0, 0 @ PID 388 mrc p15, 0, r5, c3, c0, 0 @ Doma 389 mrc p15, 0, r6, c1, c0, 0 @ Cont 390 stmia r0, {r4 - r6} 391 ldmfd sp!, {r4 - r6, pc} 392 SYM_FUNC_END(cpu_arm920_do_suspend) 393 394 SYM_TYPED_FUNC_START(cpu_arm920_do_resume) 395 mov ip, #0 396 mcr p15, 0, ip, c8, c7, 0 @ inva 397 mcr p15, 0, ip, c7, c7, 0 @ inva 398 ldmia r0, {r4 - r6} 399 mcr p15, 0, r4, c13, c0, 0 @ PID 400 mcr p15, 0, r5, c3, c0, 0 @ Doma 401 mcr p15, 0, r1, c2, c0, 0 @ TTB 402 mov r0, r6 @ cont 403 b cpu_resume_mmu 404 SYM_FUNC_END(cpu_arm920_do_resume) 405 #endif 406 407 .type __arm920_setup, #function 408 __arm920_setup: 409 mov r0, #0 410 mcr p15, 0, r0, c7, c7 411 mcr p15, 0, r0, c7, c10, 4 412 #ifdef CONFIG_MMU 413 mcr p15, 0, r0, c8, c7 414 #endif 415 adr r5, arm920_crval 416 ldmia r5, {r5, r6} 417 mrc p15, 0, r0, c1, c0 418 bic r0, r0, r5 419 orr r0, r0, r6 420 ret lr 421 .size __arm920_setup, . - __arm920_s 422 423 /* 424 * R 425 * .RVI ZFRS BLDP WCAM 426 * ..11 0001 ..11 0101 427 * 428 */ 429 .type arm920_crval, #object 430 arm920_crval: 431 crval clear=0x00003f3f, mmuset=0x000 432 433 __INITDATA 434 @ define struct processor (see <asm/pr 435 define_processor_functions arm920, dab 436 437 .section ".rodata" 438 439 string cpu_arch_name, "armv4t" 440 string cpu_elf_name, "v4" 441 string cpu_arm920_name, "ARM920T" 442 443 .align 444 445 .section ".proc.info.init", "a" 446 447 .type __arm920_proc_info,#object 448 __arm920_proc_info: 449 .long 0x41009200 450 .long 0xff00fff0 451 .long PMD_TYPE_SECT | \ 452 PMD_SECT_BUFFERABLE | \ 453 PMD_SECT_CACHEABLE | \ 454 PMD_BIT4 | \ 455 PMD_SECT_AP_WRITE | \ 456 PMD_SECT_AP_READ 457 .long PMD_TYPE_SECT | \ 458 PMD_BIT4 | \ 459 PMD_SECT_AP_WRITE | \ 460 PMD_SECT_AP_READ 461 initfn __arm920_setup, __arm920_proc_ 462 .long cpu_arch_name 463 .long cpu_elf_name 464 .long HWCAP_SWP | HWCAP_HALF | HWCAP 465 .long cpu_arm920_name 466 .long arm920_processor_functions 467 .long v4wbi_tlb_fns 468 .long v4wb_user_fns 469 #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 470 .long arm920_cache_fns 471 #else 472 .long v4wt_cache_fns 473 #endif 474 .size __arm920_proc_info, . - __arm9
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.