1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * linux/arch/arm/mm/proc-xsc3.S 4 * 5 * Original Author: Matthew Gilbert 6 * Current Maintainer: Lennert Buytenhek <buyte 7 * 8 * Copyright 2004 (C) Intel Corp. 9 * Copyright 2005 (C) MontaVista Software, Inc 10 * 11 * MMU functions for the Intel XScale3 Core (X 12 * an extension to Intel's original XScale cor 13 * features: 14 * 15 * - ARMv6 Supersections 16 * - Low Locality Reference pages (replaces mi 17 * - 36-bit addressing 18 * - L2 cache 19 * - Cache coherency if chipset supports it 20 * 21 * Based on original XScale code by Nicolas Pi 22 */ 23 24 #include <linux/linkage.h> 25 #include <linux/init.h> 26 #include <linux/cfi_types.h> 27 #include <linux/pgtable.h> 28 #include <asm/assembler.h> 29 #include <asm/hwcap.h> 30 #include <asm/pgtable-hwdef.h> 31 #include <asm/page.h> 32 #include <asm/ptrace.h> 33 #include "proc-macros.S" 34 35 /* 36 * This is the maximum size of an area which w 37 * area is larger than this, then we flush the 38 */ 39 #define MAX_AREA_SIZE 32768 40 41 /* 42 * The cache line size of the L1 I, L1 D and u 43 */ 44 #define CACHELINESIZE 32 45 46 /* 47 * The size of the L1 D cache. 48 */ 49 #define CACHESIZE 32768 50 51 /* 52 * This macro is used to wait for a CP15 write 53 * have to ensure that the last operation to t 54 * completed before continuing with operation. 55 */ 56 .macro cpwait_ret, lr, rd 57 mrc p15, 0, \rd, c2, c0, 0 58 sub pc, \lr, \rd, LSR #32 59 60 .endm 61 62 /* 63 * This macro cleans and invalidates the entir 64 */ 65 66 .macro clean_d_cache rd, rs 67 mov \rd, #0x1f00 68 orr \rd, \rd, #0x00e0 69 1: mcr p15, 0, \rd, c7, c14, 2 70 adds \rd, \rd, #0x40000000 71 bcc 1b 72 subs \rd, \rd, #0x20 73 bpl 1b 74 .endm 75 76 .text 77 78 /* 79 * cpu_xsc3_proc_init() 80 * 81 * Nothing too exciting at the moment 82 */ 83 SYM_TYPED_FUNC_START(cpu_xsc3_proc_init) 84 ret lr 85 SYM_FUNC_END(cpu_xsc3_proc_init) 86 87 /* 88 * cpu_xsc3_proc_fin() 89 */ 90 SYM_TYPED_FUNC_START(cpu_xsc3_proc_fin) 91 mrc p15, 0, r0, c1, c0, 0 92 bic r0, r0, #0x1800 93 bic r0, r0, #0x0006 94 mcr p15, 0, r0, c1, c0, 0 95 ret lr 96 SYM_FUNC_END(cpu_xsc3_proc_fin) 97 98 /* 99 * cpu_xsc3_reset(loc) 100 * 101 * Perform a soft reset of the system. Put th 102 * same state as it would be if it had been re 103 * to what would be the reset vector. 104 * 105 * loc: location to jump to for soft reset 106 */ 107 .align 5 108 .pushsection .idmap.text, "ax" 109 SYM_TYPED_FUNC_START(cpu_xsc3_reset) 110 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_M 111 msr cpsr_c, r1 112 mrc p15, 0, r1, c1, c0, 0 113 bic r1, r1, #0x3900 114 bic r1, r1, #0x0086 115 mcr p15, 0, r1, c1, c0, 0 116 mcr p15, 0, ip, c7, c7, 0 117 bic r1, r1, #0x0001 118 mcr p15, 0, r1, c1, c0, 0 119 @ CAUTION: MMU turned off from this po 120 @ already containing those two last in 121 mcr p15, 0, ip, c8, c7, 0 122 ret r0 123 SYM_FUNC_END(cpu_xsc3_reset) 124 .popsection 125 126 /* 127 * cpu_xsc3_do_idle() 128 * 129 * Cause the processor to idle 130 * 131 * For now we do nothing but go to idle mode f 132 * 133 * XScale supports clock switching, but using 134 * allows external hardware to react to system 135 */ 136 .align 5 137 138 SYM_TYPED_FUNC_START(cpu_xsc3_do_idle) 139 mov r0, #1 140 mcr p14, 0, r0, c7, c0, 0 141 ret lr 142 SYM_FUNC_END(cpu_xsc3_do_idle) 143 144 /* ================================= CACHE === 145 146 /* 147 * flush_icache_all() 148 * 149 * Unconditionally clean and invalidate t 150 */ 151 SYM_TYPED_FUNC_START(xsc3_flush_icache_all) 152 mov r0, #0 153 mcr p15, 0, r0, c7, c5, 0 154 ret lr 155 SYM_FUNC_END(xsc3_flush_icache_all) 156 157 /* 158 * flush_user_cache_all() 159 * 160 * Invalidate all cache entries in a part 161 * space. 162 */ 163 SYM_FUNC_ALIAS(xsc3_flush_user_cache_all, xsc3 164 165 /* 166 * flush_kern_cache_all() 167 * 168 * Clean and invalidate the entire cache. 169 */ 170 SYM_TYPED_FUNC_START(xsc3_flush_kern_cache_all 171 mov r2, #VM_EXEC 172 mov ip, #0 173 __flush_whole_cache: 174 clean_d_cache r0, r1 175 tst r2, #VM_EXEC 176 mcrne p15, 0, ip, c7, c5, 0 177 mcrne p15, 0, ip, c7, c10, 4 178 mcrne p15, 0, ip, c7, c5, 4 179 ret lr 180 SYM_FUNC_END(xsc3_flush_kern_cache_all) 181 182 /* 183 * flush_user_cache_range(start, end, vm_ 184 * 185 * Invalidate a range of cache entries in 186 * address space. 187 * 188 * - start - start address (may not be al 189 * - end - end address (exclusive, may 190 * - vma - vma_area_struct describing a 191 */ 192 .align 5 193 SYM_TYPED_FUNC_START(xsc3_flush_user_cache_ran 194 mov ip, #0 195 sub r3, r1, r0 196 cmp r3, #MAX_AREA_SIZE 197 bhs __flush_whole_cache 198 199 1: tst r2, #VM_EXEC 200 mcrne p15, 0, r0, c7, c5, 1 201 mcr p15, 0, r0, c7, c14, 1 202 add r0, r0, #CACHELINESIZE 203 cmp r0, r1 204 blo 1b 205 tst r2, #VM_EXEC 206 mcrne p15, 0, ip, c7, c5, 6 207 mcrne p15, 0, ip, c7, c10, 4 208 mcrne p15, 0, ip, c7, c5, 4 209 ret lr 210 SYM_FUNC_END(xsc3_flush_user_cache_range) 211 212 /* 213 * coherent_kern_range(start, end) 214 * 215 * Ensure coherency between the I cache a 216 * region described by start. If you hav 217 * Harvard caches, you need to implement 218 * 219 * - start - virtual start address 220 * - end - virtual end address 221 * 222 * Note: single I-cache line invalidation 223 * it also trashes the mini I-cache used 224 */ 225 SYM_TYPED_FUNC_START(xsc3_coherent_kern_range) 226 #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI 227 b xsc3_coherent_user_range 228 #endif 229 SYM_FUNC_END(xsc3_coherent_kern_range) 230 231 SYM_TYPED_FUNC_START(xsc3_coherent_user_range) 232 bic r0, r0, #CACHELINESIZE - 1 233 1: mcr p15, 0, r0, c7, c10, 1 234 add r0, r0, #CACHELINESIZE 235 cmp r0, r1 236 blo 1b 237 mov r0, #0 238 mcr p15, 0, r0, c7, c5, 0 239 mcr p15, 0, r0, c7, c10, 4 240 mcr p15, 0, r0, c7, c5, 4 241 ret lr 242 SYM_FUNC_END(xsc3_coherent_user_range) 243 244 /* 245 * flush_kern_dcache_area(void *addr, siz 246 * 247 * Ensure no D cache aliasing occurs, eit 248 * the I cache. 249 * 250 * - addr - kernel address 251 * - size - region size 252 */ 253 SYM_TYPED_FUNC_START(xsc3_flush_kern_dcache_ar 254 add r1, r0, r1 255 1: mcr p15, 0, r0, c7, c14, 1 256 add r0, r0, #CACHELINESIZE 257 cmp r0, r1 258 blo 1b 259 mov r0, #0 260 mcr p15, 0, r0, c7, c5, 0 261 mcr p15, 0, r0, c7, c10, 4 262 mcr p15, 0, r0, c7, c5, 4 263 ret lr 264 SYM_FUNC_END(xsc3_flush_kern_dcache_area) 265 266 /* 267 * dma_inv_range(start, end) 268 * 269 * Invalidate (discard) the specified vir 270 * May not write back any entries. If 's 271 * are not cache line aligned, those line 272 * back. 273 * 274 * - start - virtual start address 275 * - end - virtual end address 276 */ 277 xsc3_dma_inv_range: 278 tst r0, #CACHELINESIZE - 1 279 bic r0, r0, #CACHELINESIZE - 1 280 mcrne p15, 0, r0, c7, c10, 1 281 tst r1, #CACHELINESIZE - 1 282 mcrne p15, 0, r1, c7, c10, 1 283 1: mcr p15, 0, r0, c7, c6, 1 284 add r0, r0, #CACHELINESIZE 285 cmp r0, r1 286 blo 1b 287 mcr p15, 0, r0, c7, c10, 4 288 ret lr 289 290 /* 291 * dma_clean_range(start, end) 292 * 293 * Clean the specified virtual address ra 294 * 295 * - start - virtual start address 296 * - end - virtual end address 297 */ 298 xsc3_dma_clean_range: 299 bic r0, r0, #CACHELINESIZE - 1 300 1: mcr p15, 0, r0, c7, c10, 1 301 add r0, r0, #CACHELINESIZE 302 cmp r0, r1 303 blo 1b 304 mcr p15, 0, r0, c7, c10, 4 305 ret lr 306 307 /* 308 * dma_flush_range(start, end) 309 * 310 * Clean and invalidate the specified vir 311 * 312 * - start - virtual start address 313 * - end - virtual end address 314 */ 315 SYM_TYPED_FUNC_START(xsc3_dma_flush_range) 316 bic r0, r0, #CACHELINESIZE - 1 317 1: mcr p15, 0, r0, c7, c14, 1 318 add r0, r0, #CACHELINESIZE 319 cmp r0, r1 320 blo 1b 321 mcr p15, 0, r0, c7, c10, 4 322 ret lr 323 SYM_FUNC_END(xsc3_dma_flush_range) 324 325 /* 326 * dma_map_area(start, size, dir) 327 * - start - kernel virtual start address 328 * - size - size of region 329 * - dir - DMA direction 330 */ 331 SYM_TYPED_FUNC_START(xsc3_dma_map_area) 332 add r1, r1, r0 333 cmp r2, #DMA_TO_DEVICE 334 beq xsc3_dma_clean_range 335 bcs xsc3_dma_inv_range 336 b xsc3_dma_flush_range 337 SYM_FUNC_END(xsc3_dma_map_area) 338 339 /* 340 * dma_unmap_area(start, size, dir) 341 * - start - kernel virtual start address 342 * - size - size of region 343 * - dir - DMA direction 344 */ 345 SYM_TYPED_FUNC_START(xsc3_dma_unmap_area) 346 ret lr 347 SYM_FUNC_END(xsc3_dma_unmap_area) 348 349 SYM_TYPED_FUNC_START(cpu_xsc3_dcache_clean_are 350 1: mcr p15, 0, r0, c7, c10, 1 351 add r0, r0, #CACHELINESIZE 352 subs r1, r1, #CACHELINESIZE 353 bhi 1b 354 ret lr 355 SYM_FUNC_END(cpu_xsc3_dcache_clean_area) 356 357 /* =============================== PageTable = 358 359 /* 360 * cpu_xsc3_switch_mm(pgd) 361 * 362 * Set the translation base pointer to be as d 363 * 364 * pgd: new page tables 365 */ 366 .align 5 367 SYM_TYPED_FUNC_START(cpu_xsc3_switch_mm) 368 clean_d_cache r1, r2 369 mcr p15, 0, ip, c7, c5, 0 370 mcr p15, 0, ip, c7, c10, 4 371 mcr p15, 0, ip, c7, c5, 4 372 orr r0, r0, #0x18 373 mcr p15, 0, r0, c2, c0, 0 374 mcr p15, 0, ip, c8, c7, 0 375 cpwait_ret lr, ip 376 SYM_FUNC_END(cpu_xsc3_switch_mm) 377 378 /* 379 * cpu_xsc3_set_pte_ext(ptep, pte, ext) 380 * 381 * Set a PTE and flush it out 382 */ 383 cpu_xsc3_mt_table: 384 .long 0x00 385 .long PTE_EXT_TEX(1) 386 .long PTE_EXT_TEX(5) | PTE_CACHEABLE 387 .long PTE_CACHEABLE | PTE_BUFFERABLE 388 .long PTE_EXT_TEX(1) | PTE_BUFFERABL 389 .long 0x00 390 .long 0x00 391 .long PTE_EXT_TEX(5) | PTE_CACHEABLE 392 .long 0x00 393 .long PTE_EXT_TEX(1) 394 .long 0x00 395 .long PTE_CACHEABLE | PTE_BUFFERABLE 396 .long PTE_EXT_TEX(2) 397 .long 0x00 398 .long 0x00 399 .long 0x00 400 401 .align 5 402 SYM_TYPED_FUNC_START(cpu_xsc3_set_pte_ext) 403 xscale_set_pte_ext_prologue 404 405 tst r1, #L_PTE_SHARED 406 and r1, r1, #L_PTE_MT_MASK 407 adr ip, cpu_xsc3_mt_table 408 ldr ip, [ip, r1] 409 orrne r2, r2, #PTE_EXT_COHERENT 410 bic r2, r2, #0x0c 411 orr r2, r2, ip 412 413 xscale_set_pte_ext_epilogue 414 ret lr 415 SYM_FUNC_END(cpu_xsc3_set_pte_ext) 416 417 .ltorg 418 .align 419 420 .globl cpu_xsc3_suspend_size 421 .equ cpu_xsc3_suspend_size, 4 * 6 422 #ifdef CONFIG_ARM_CPU_SUSPEND 423 SYM_TYPED_FUNC_START(cpu_xsc3_do_suspend) 424 stmfd sp!, {r4 - r9, lr} 425 mrc p14, 0, r4, c6, c0, 0 @ cloc 426 mrc p15, 0, r5, c15, c1, 0 @ CP a 427 mrc p15, 0, r6, c13, c0, 0 @ PID 428 mrc p15, 0, r7, c3, c0, 0 @ doma 429 mrc p15, 0, r8, c1, c0, 1 @ auxi 430 mrc p15, 0, r9, c1, c0, 0 @ cont 431 bic r4, r4, #2 @ clea 432 stmia r0, {r4 - r9} @ stor 433 ldmia sp!, {r4 - r9, pc} 434 SYM_FUNC_END(cpu_xsc3_do_suspend) 435 436 SYM_TYPED_FUNC_START(cpu_xsc3_do_resume) 437 ldmia r0, {r4 - r9} @ load 438 mov ip, #0 439 mcr p15, 0, ip, c7, c7, 0 @ inva 440 mcr p15, 0, ip, c7, c10, 4 @ drai 441 mcr p15, 0, ip, c7, c5, 4 @ flus 442 mcr p15, 0, ip, c8, c7, 0 @ inva 443 mcr p14, 0, r4, c6, c0, 0 @ cloc 444 mcr p15, 0, r5, c15, c1, 0 @ CP a 445 mcr p15, 0, r6, c13, c0, 0 @ PID 446 mcr p15, 0, r7, c3, c0, 0 @ doma 447 orr r1, r1, #0x18 @ cach 448 mcr p15, 0, r1, c2, c0, 0 @ tran 449 mcr p15, 0, r8, c1, c0, 1 @ auxi 450 mov r0, r9 @ cont 451 b cpu_resume_mmu 452 SYM_FUNC_END(cpu_xsc3_do_resume) 453 #endif 454 455 .type __xsc3_setup, #function 456 __xsc3_setup: 457 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_M 458 msr cpsr_c, r0 459 mcr p15, 0, ip, c7, c7, 0 460 mcr p15, 0, ip, c7, c10, 4 461 mcr p15, 0, ip, c7, c5, 4 462 mcr p15, 0, ip, c8, c7, 0 463 orr r4, r4, #0x18 464 mcr p15, 0, r4, c2, c0, 0 465 466 mov r0, #1 << 6 467 mcr p15, 0, r0, c15, c1, 0 468 469 mrc p15, 0, r0, c1, c0, 1 470 and r0, r0, #2 471 orr r0, r0, #(1 << 10) 472 mcr p15, 0, r0, c1, c0, 1 473 474 adr r5, xsc3_crval 475 ldmia r5, {r5, r6} 476 477 #ifdef CONFIG_CACHE_XSC3L2 478 mrc p15, 1, r0, c0, c0, 1 479 ands r0, r0, #0xf8 480 orrne r6, r6, #(1 << 26) 481 #endif 482 483 mrc p15, 0, r0, c1, c0, 0 484 bic r0, r0, r5 485 orr r0, r0, r6 486 487 ret lr 488 489 .size __xsc3_setup, . - __xsc3_setup 490 491 .type xsc3_crval, #object 492 xsc3_crval: 493 crval clear=0x04002202, mmuset=0x000 494 495 __INITDATA 496 497 @ define struct processor (see <asm/pr 498 define_processor_functions xsc3, dabor 499 500 .section ".rodata" 501 502 string cpu_arch_name, "armv5te" 503 string cpu_elf_name, "v5" 504 string cpu_xsc3_name, "XScale-V3 base 505 506 .align 507 508 .section ".proc.info.init", "a" 509 510 .macro xsc3_proc_info name:req, cpu_val:req, c 511 .type __\name\()_proc_info,#object 512 __\name\()_proc_info: 513 .long \cpu_val 514 .long \cpu_mask 515 .long PMD_TYPE_SECT | \ 516 PMD_SECT_BUFFERABLE | \ 517 PMD_SECT_CACHEABLE | \ 518 PMD_SECT_AP_WRITE | \ 519 PMD_SECT_AP_READ 520 .long PMD_TYPE_SECT | \ 521 PMD_SECT_AP_WRITE | \ 522 PMD_SECT_AP_READ 523 initfn __xsc3_setup, __\name\()_proc_ 524 .long cpu_arch_name 525 .long cpu_elf_name 526 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THU 527 .long cpu_xsc3_name 528 .long xsc3_processor_functions 529 .long v4wbi_tlb_fns 530 .long xsc3_mc_user_fns 531 .long xsc3_cache_fns 532 .size __\name\()_proc_info, . - __\n 533 .endm 534 535 xsc3_proc_info xsc3, 0x69056000, 0xfff 536 537 /* Note: PXA935 changed its implementor ID fro 538 xsc3_proc_info xsc3_pxa935, 0x56056000
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.