~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/mm/proc-xscale.S

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  *  linux/arch/arm/mm/proc-xscale.S
  4  *
  5  *  Author:     Nicolas Pitre
  6  *  Created:    November 2000
  7  *  Copyright:  (C) 2000, 2001 MontaVista Software Inc.
  8  *
  9  * MMU functions for the Intel XScale CPUs
 10  *
 11  * 2001 Aug 21:
 12  *      some contributions by Brett Gaines <brett.w.gaines@intel.com>
 13  *      Copyright 2001 by Intel Corp.
 14  *
 15  * 2001 Sep 08:
 16  *      Completely revisited, many important fixes
 17  *      Nicolas Pitre <nico@fluxnic.net>
 18  */
 19 
 20 #include <linux/linkage.h>
 21 #include <linux/init.h>
 22 #include <linux/cfi_types.h>
 23 #include <linux/pgtable.h>
 24 #include <asm/assembler.h>
 25 #include <asm/hwcap.h>
 26 #include <asm/pgtable-hwdef.h>
 27 #include <asm/page.h>
 28 #include <asm/ptrace.h>
 29 #include "proc-macros.S"
 30 
 31 /*
 32  * This is the maximum size of an area which will be flushed.  If the area
 33  * is larger than this, then we flush the whole cache
 34  */
 35 #define MAX_AREA_SIZE   32768
 36 
 37 /*
 38  * the cache line size of the I and D cache
 39  */
 40 #define CACHELINESIZE   32
 41 
 42 /*
 43  * the size of the data cache
 44  */
 45 #define CACHESIZE       32768
 46 
 47 /*
 48  * Virtual address used to allocate the cache when flushed
 49  *
 50  * This must be an address range which is _never_ used.  It should
 51  * apparently have a mapping in the corresponding page table for
 52  * compatibility with future CPUs that _could_ require it.  For instance we
 53  * don't care.
 54  *
 55  * This must be aligned on a 2*CACHESIZE boundary.  The code selects one of
 56  * the 2 areas in alternance each time the clean_d_cache macro is used.
 57  * Without this the XScale core exhibits cache eviction problems and no one
 58  * knows why.
 59  *
 60  * Reminder: the vector table is located at 0xffff0000-0xffff0fff.
 61  */
 62 #define CLEAN_ADDR      0xfffe0000
 63 
 64 /*
 65  * This macro is used to wait for a CP15 write and is needed
 66  * when we have to ensure that the last operation to the co-pro
 67  * was completed before continuing with operation.
 68  */
 69         .macro  cpwait, rd
 70         mrc     p15, 0, \rd, c2, c0, 0          @ arbitrary read of cp15
 71         mov     \rd, \rd                        @ wait for completion
 72         sub     pc, pc, #4                      @ flush instruction pipeline
 73         .endm
 74 
 75         .macro  cpwait_ret, lr, rd
 76         mrc     p15, 0, \rd, c2, c0, 0          @ arbitrary read of cp15
 77         sub     pc, \lr, \rd, LSR #32           @ wait for completion and
 78                                                 @ flush instruction pipeline
 79         .endm
 80 
 81 /*
 82  * This macro cleans the entire dcache using line allocate.
 83  * The main loop has been unrolled to reduce loop overhead.
 84  * rd and rs are two scratch registers.
 85  */
 86         .macro  clean_d_cache, rd, rs
 87         ldr     \rs, =clean_addr
 88         ldr     \rd, [\rs]
 89         eor     \rd, \rd, #CACHESIZE
 90         str     \rd, [\rs]
 91         add     \rs, \rd, #CACHESIZE
 92 1:      mcr     p15, 0, \rd, c7, c2, 5          @ allocate D cache line
 93         add     \rd, \rd, #CACHELINESIZE
 94         mcr     p15, 0, \rd, c7, c2, 5          @ allocate D cache line
 95         add     \rd, \rd, #CACHELINESIZE
 96         mcr     p15, 0, \rd, c7, c2, 5          @ allocate D cache line
 97         add     \rd, \rd, #CACHELINESIZE
 98         mcr     p15, 0, \rd, c7, c2, 5          @ allocate D cache line
 99         add     \rd, \rd, #CACHELINESIZE
100         teq     \rd, \rs
101         bne     1b
102         .endm
103 
104         .data
105         .align  2
106 clean_addr:     .word   CLEAN_ADDR
107 
108         .text
109 
110 /*
111  * cpu_xscale_proc_init()
112  *
113  * Nothing too exciting at the moment
114  */
115 SYM_TYPED_FUNC_START(cpu_xscale_proc_init)
116         @ enable write buffer coalescing. Some bootloader disable it
117         mrc     p15, 0, r1, c1, c0, 1
118         bic     r1, r1, #1
119         mcr     p15, 0, r1, c1, c0, 1
120         ret     lr
121 SYM_FUNC_END(cpu_xscale_proc_init)
122 
123 /*
124  * cpu_xscale_proc_fin()
125  */
126 SYM_TYPED_FUNC_START(cpu_xscale_proc_fin)
127         mrc     p15, 0, r0, c1, c0, 0           @ ctrl register
128         bic     r0, r0, #0x1800                 @ ...IZ...........
129         bic     r0, r0, #0x0006                 @ .............CA.
130         mcr     p15, 0, r0, c1, c0, 0           @ disable caches
131         ret     lr
132 SYM_FUNC_END(cpu_xscale_proc_fin)
133 
134 /*
135  * cpu_xscale_reset(loc)
136  *
137  * Perform a soft reset of the system.  Put the CPU into the
138  * same state as it would be if it had been reset, and branch
139  * to what would be the reset vector.
140  *
141  * loc: location to jump to for soft reset
142  *
143  * Beware PXA270 erratum E7.
144  */
145         .align  5
146         .pushsection    .idmap.text, "ax"
147 SYM_TYPED_FUNC_START(cpu_xscale_reset)
148         mov     r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
149         msr     cpsr_c, r1                      @ reset CPSR
150         mcr     p15, 0, r1, c10, c4, 1          @ unlock I-TLB
151         mcr     p15, 0, r1, c8, c5, 0           @ invalidate I-TLB
152         mrc     p15, 0, r1, c1, c0, 0           @ ctrl register
153         bic     r1, r1, #0x0086                 @ ........B....CA.
154         bic     r1, r1, #0x3900                 @ ..VIZ..S........
155         sub     pc, pc, #4                      @ flush pipeline
156         @ *** cache line aligned ***
157         mcr     p15, 0, r1, c1, c0, 0           @ ctrl register
158         bic     r1, r1, #0x0001                 @ ...............M
159         mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches & BTB
160         mcr     p15, 0, r1, c1, c0, 0           @ ctrl register
161         @ CAUTION: MMU turned off from this point. We count on the pipeline
162         @ already containing those two last instructions to survive.
163         mcr     p15, 0, ip, c8, c7, 0           @ invalidate I & D TLBs
164         ret     r0
165 SYM_FUNC_END(cpu_xscale_reset)
166         .popsection
167 
168 /*
169  * cpu_xscale_do_idle()
170  *
171  * Cause the processor to idle
172  *
173  * For now we do nothing but go to idle mode for every case
174  *
175  * XScale supports clock switching, but using idle mode support
176  * allows external hardware to react to system state changes.
177  */
178         .align  5
179 
180 SYM_TYPED_FUNC_START(cpu_xscale_do_idle)
181         mov     r0, #1
182         mcr     p14, 0, r0, c7, c0, 0           @ Go to IDLE
183         ret     lr
184 SYM_FUNC_END(cpu_xscale_do_idle)
185 
186 /* ================================= CACHE ================================ */
187 
188 /*
189  *      flush_icache_all()
190  *
191  *      Unconditionally clean and invalidate the entire icache.
192  */
193 SYM_TYPED_FUNC_START(xscale_flush_icache_all)
194         mov     r0, #0
195         mcr     p15, 0, r0, c7, c5, 0           @ invalidate I cache
196         ret     lr
197 SYM_FUNC_END(xscale_flush_icache_all)
198 
199 /*
200  *      flush_user_cache_all()
201  *
202  *      Invalidate all cache entries in a particular address
203  *      space.
204  */
205 SYM_FUNC_ALIAS(xscale_flush_user_cache_all, xscale_flush_kern_cache_all)
206 
207 /*
208  *      flush_kern_cache_all()
209  *
210  *      Clean and invalidate the entire cache.
211  */
212 SYM_TYPED_FUNC_START(xscale_flush_kern_cache_all)
213         mov     r2, #VM_EXEC
214         mov     ip, #0
215 __flush_whole_cache:
216         clean_d_cache r0, r1
217         tst     r2, #VM_EXEC
218         mcrne   p15, 0, ip, c7, c5, 0           @ Invalidate I cache & BTB
219         mcrne   p15, 0, ip, c7, c10, 4          @ Drain Write (& Fill) Buffer
220         ret     lr
221 SYM_FUNC_END(xscale_flush_kern_cache_all)
222 
223 /*
224  *      flush_user_cache_range(start, end, vm_flags)
225  *
226  *      Invalidate a range of cache entries in the specified
227  *      address space.
228  *
229  *      - start - start address (may not be aligned)
230  *      - end   - end address (exclusive, may not be aligned)
231  *      - vma   - vma_area_struct describing address space
232  */
233         .align  5
234 SYM_TYPED_FUNC_START(xscale_flush_user_cache_range)
235         mov     ip, #0
236         sub     r3, r1, r0                      @ calculate total size
237         cmp     r3, #MAX_AREA_SIZE
238         bhs     __flush_whole_cache
239 
240 1:      tst     r2, #VM_EXEC
241         mcrne   p15, 0, r0, c7, c5, 1           @ Invalidate I cache line
242         mcr     p15, 0, r0, c7, c10, 1          @ Clean D cache line
243         mcr     p15, 0, r0, c7, c6, 1           @ Invalidate D cache line
244         add     r0, r0, #CACHELINESIZE
245         cmp     r0, r1
246         blo     1b
247         tst     r2, #VM_EXEC
248         mcrne   p15, 0, ip, c7, c5, 6           @ Invalidate BTB
249         mcrne   p15, 0, ip, c7, c10, 4          @ Drain Write (& Fill) Buffer
250         ret     lr
251 SYM_FUNC_END(xscale_flush_user_cache_range)
252 
253 /*
254  *      coherent_kern_range(start, end)
255  *
256  *      Ensure coherency between the Icache and the Dcache in the
257  *      region described by start.  If you have non-snooping
258  *      Harvard caches, you need to implement this function.
259  *
260  *      - start  - virtual start address
261  *      - end    - virtual end address
262  *
263  *      Note: single I-cache line invalidation isn't used here since
264  *      it also trashes the mini I-cache used by JTAG debuggers.
265  */
266 SYM_TYPED_FUNC_START(xscale_coherent_kern_range)
267         bic     r0, r0, #CACHELINESIZE - 1
268 1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
269         add     r0, r0, #CACHELINESIZE
270         cmp     r0, r1
271         blo     1b
272         mov     r0, #0
273         mcr     p15, 0, r0, c7, c5, 0           @ Invalidate I cache & BTB
274         mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
275         ret     lr
276 SYM_FUNC_END(xscale_coherent_kern_range)
277 
278 /*
279  *      coherent_user_range(start, end)
280  *
281  *      Ensure coherency between the Icache and the Dcache in the
282  *      region described by start.  If you have non-snooping
283  *      Harvard caches, you need to implement this function.
284  *
285  *      - start  - virtual start address
286  *      - end    - virtual end address
287  */
288 SYM_TYPED_FUNC_START(xscale_coherent_user_range)
289         bic     r0, r0, #CACHELINESIZE - 1
290 1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
291         mcr     p15, 0, r0, c7, c5, 1           @ Invalidate I cache entry
292         add     r0, r0, #CACHELINESIZE
293         cmp     r0, r1
294         blo     1b
295         mov     r0, #0
296         mcr     p15, 0, r0, c7, c5, 6           @ Invalidate BTB
297         mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
298         ret     lr
299 SYM_FUNC_END(xscale_coherent_user_range)
300 
301 /*
302  *      flush_kern_dcache_area(void *addr, size_t size)
303  *
304  *      Ensure no D cache aliasing occurs, either with itself or
305  *      the I cache
306  *
307  *      - addr  - kernel address
308  *      - size  - region size
309  */
310 SYM_TYPED_FUNC_START(xscale_flush_kern_dcache_area)
311         add     r1, r0, r1
312 1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
313         mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
314         add     r0, r0, #CACHELINESIZE
315         cmp     r0, r1
316         blo     1b
317         mov     r0, #0
318         mcr     p15, 0, r0, c7, c5, 0           @ Invalidate I cache & BTB
319         mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
320         ret     lr
321 SYM_FUNC_END(xscale_flush_kern_dcache_area)
322 
323 /*
324  *      dma_inv_range(start, end)
325  *
326  *      Invalidate (discard) the specified virtual address range.
327  *      May not write back any entries.  If 'start' or 'end'
328  *      are not cache line aligned, those lines must be written
329  *      back.
330  *
331  *      - start  - virtual start address
332  *      - end    - virtual end address
333  */
334 xscale_dma_inv_range:
335         tst     r0, #CACHELINESIZE - 1
336         bic     r0, r0, #CACHELINESIZE - 1
337         mcrne   p15, 0, r0, c7, c10, 1          @ clean D entry
338         tst     r1, #CACHELINESIZE - 1
339         mcrne   p15, 0, r1, c7, c10, 1          @ clean D entry
340 1:      mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
341         add     r0, r0, #CACHELINESIZE
342         cmp     r0, r1
343         blo     1b
344         mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
345         ret     lr
346 
347 /*
348  *      dma_clean_range(start, end)
349  *
350  *      Clean the specified virtual address range.
351  *
352  *      - start  - virtual start address
353  *      - end    - virtual end address
354  */
355 xscale_dma_clean_range:
356         bic     r0, r0, #CACHELINESIZE - 1
357 1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
358         add     r0, r0, #CACHELINESIZE
359         cmp     r0, r1
360         blo     1b
361         mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
362         ret     lr
363 
364 /*
365  *      dma_flush_range(start, end)
366  *
367  *      Clean and invalidate the specified virtual address range.
368  *
369  *      - start  - virtual start address
370  *      - end    - virtual end address
371  */
372 SYM_TYPED_FUNC_START(xscale_dma_flush_range)
373         bic     r0, r0, #CACHELINESIZE - 1
374 1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
375         mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
376         add     r0, r0, #CACHELINESIZE
377         cmp     r0, r1
378         blo     1b
379         mcr     p15, 0, r0, c7, c10, 4          @ Drain Write (& Fill) Buffer
380         ret     lr
381 SYM_FUNC_END(xscale_dma_flush_range)
382 
383 /*
384  *      dma_map_area(start, size, dir)
385  *      - start - kernel virtual start address
386  *      - size  - size of region
387  *      - dir   - DMA direction
388  */
389 SYM_TYPED_FUNC_START(xscale_dma_map_area)
390         add     r1, r1, r0
391         cmp     r2, #DMA_TO_DEVICE
392         beq     xscale_dma_clean_range
393         bcs     xscale_dma_inv_range
394         b       xscale_dma_flush_range
395 SYM_FUNC_END(xscale_dma_map_area)
396 
397 /*
398  * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
399  * clear the dirty bits, which means that if we invalidate a dirty line,
400  * the dirty data can still be written back to external memory later on.
401  *
402  * The recommended workaround is to always do a clean D-cache line before
403  * doing an invalidate D-cache line, so on the affected processors,
404  * dma_inv_range() is implemented as dma_flush_range().
405  *
406  * See erratum #25 of "Intel 80200 Processor Specification Update",
407  * revision January 22, 2003, available at:
408  *     http://www.intel.com/design/iio/specupdt/273415.htm
409  */
410 
411 /*
412  *      dma_map_area(start, size, dir)
413  *      - start - kernel virtual start address
414  *      - size  - size of region
415  *      - dir   - DMA direction
416  */
417 SYM_TYPED_FUNC_START(xscale_80200_A0_A1_dma_map_area)
418         add     r1, r1, r0
419         teq     r2, #DMA_TO_DEVICE
420         beq     xscale_dma_clean_range
421         b       xscale_dma_flush_range
422 SYM_FUNC_END(xscale_80200_A0_A1_dma_map_area)
423 
424 /*
425  *      dma_unmap_area(start, size, dir)
426  *      - start - kernel virtual start address
427  *      - size  - size of region
428  *      - dir   - DMA direction
429  */
430 SYM_TYPED_FUNC_START(xscale_dma_unmap_area)
431         ret     lr
432 SYM_FUNC_END(xscale_dma_unmap_area)
433 
434 SYM_TYPED_FUNC_START(cpu_xscale_dcache_clean_area)
435 1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
436         add     r0, r0, #CACHELINESIZE
437         subs    r1, r1, #CACHELINESIZE
438         bhi     1b
439         ret     lr
440 SYM_FUNC_END(cpu_xscale_dcache_clean_area)
441 
442 /* =============================== PageTable ============================== */
443 
444 /*
445  * cpu_xscale_switch_mm(pgd)
446  *
447  * Set the translation base pointer to be as described by pgd.
448  *
449  * pgd: new page tables
450  */
451         .align  5
452 SYM_TYPED_FUNC_START(cpu_xscale_switch_mm)
453         clean_d_cache r1, r2
454         mcr     p15, 0, ip, c7, c5, 0           @ Invalidate I cache & BTB
455         mcr     p15, 0, ip, c7, c10, 4          @ Drain Write (& Fill) Buffer
456         mcr     p15, 0, r0, c2, c0, 0           @ load page table pointer
457         mcr     p15, 0, ip, c8, c7, 0           @ invalidate I & D TLBs
458         cpwait_ret lr, ip
459 SYM_FUNC_END(cpu_xscale_switch_mm)
460 
461 /*
462  * cpu_xscale_set_pte_ext(ptep, pte, ext)
463  *
464  * Set a PTE and flush it out
465  *
466  * Errata 40: must set memory to write-through for user read-only pages.
467  */
468 cpu_xscale_mt_table:
469         .long   0x00                                            @ L_PTE_MT_UNCACHED
470         .long   PTE_BUFFERABLE                                  @ L_PTE_MT_BUFFERABLE
471         .long   PTE_CACHEABLE                                   @ L_PTE_MT_WRITETHROUGH
472         .long   PTE_CACHEABLE | PTE_BUFFERABLE                  @ L_PTE_MT_WRITEBACK
473         .long   PTE_EXT_TEX(1) | PTE_BUFFERABLE                 @ L_PTE_MT_DEV_SHARED
474         .long   0x00                                            @ unused
475         .long   PTE_EXT_TEX(1) | PTE_CACHEABLE                  @ L_PTE_MT_MINICACHE
476         .long   PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
477         .long   0x00                                            @ unused
478         .long   PTE_BUFFERABLE                                  @ L_PTE_MT_DEV_WC
479         .long   0x00                                            @ unused
480         .long   PTE_CACHEABLE | PTE_BUFFERABLE                  @ L_PTE_MT_DEV_CACHED
481         .long   0x00                                            @ L_PTE_MT_DEV_NONSHARED
482         .long   0x00                                            @ unused
483         .long   0x00                                            @ unused
484         .long   0x00                                            @ unused
485 
486         .align  5
487 SYM_TYPED_FUNC_START(cpu_xscale_set_pte_ext)
488         xscale_set_pte_ext_prologue
489 
490         @
491         @ Erratum 40: must set memory to write-through for user read-only pages
492         @
493         and     ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_RDONLY) & ~(4 << 2)
494         teq     ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | L_PTE_RDONLY
495 
496         moveq   r1, #L_PTE_MT_WRITETHROUGH
497         and     r1, r1, #L_PTE_MT_MASK
498         adr     ip, cpu_xscale_mt_table
499         ldr     ip, [ip, r1]
500         bic     r2, r2, #0x0c
501         orr     r2, r2, ip
502 
503         xscale_set_pte_ext_epilogue
504         ret     lr
505 SYM_FUNC_END(cpu_xscale_set_pte_ext)
506 
507         .ltorg
508         .align
509 
510 .globl  cpu_xscale_suspend_size
511 .equ    cpu_xscale_suspend_size, 4 * 6
512 #ifdef CONFIG_ARM_CPU_SUSPEND
513 SYM_TYPED_FUNC_START(cpu_xscale_do_suspend)
514         stmfd   sp!, {r4 - r9, lr}
515         mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
516         mrc     p15, 0, r5, c15, c1, 0  @ CP access reg
517         mrc     p15, 0, r6, c13, c0, 0  @ PID
518         mrc     p15, 0, r7, c3, c0, 0   @ domain ID
519         mrc     p15, 0, r8, c1, c0, 1   @ auxiliary control reg
520         mrc     p15, 0, r9, c1, c0, 0   @ control reg
521         bic     r4, r4, #2              @ clear frequency change bit
522         stmia   r0, {r4 - r9}           @ store cp regs
523         ldmfd   sp!, {r4 - r9, pc}
524 SYM_FUNC_END(cpu_xscale_do_suspend)
525 
526 SYM_TYPED_FUNC_START(cpu_xscale_do_resume)
527         ldmia   r0, {r4 - r9}           @ load cp regs
528         mov     ip, #0
529         mcr     p15, 0, ip, c8, c7, 0   @ invalidate I & D TLBs
530         mcr     p15, 0, ip, c7, c7, 0   @ invalidate I & D caches, BTB
531         mcr     p14, 0, r4, c6, c0, 0   @ clock configuration, turbo mode.
532         mcr     p15, 0, r5, c15, c1, 0  @ CP access reg
533         mcr     p15, 0, r6, c13, c0, 0  @ PID
534         mcr     p15, 0, r7, c3, c0, 0   @ domain ID
535         mcr     p15, 0, r1, c2, c0, 0   @ translation table base addr
536         mcr     p15, 0, r8, c1, c0, 1   @ auxiliary control reg
537         mov     r0, r9                  @ control register
538         b       cpu_resume_mmu
539 SYM_FUNC_END(cpu_xscale_do_resume)
540 #endif
541 
542         .type   __xscale_setup, #function
543 __xscale_setup:
544         mcr     p15, 0, ip, c7, c7, 0           @ invalidate I, D caches & BTB
545         mcr     p15, 0, ip, c7, c10, 4          @ Drain Write (& Fill) Buffer
546         mcr     p15, 0, ip, c8, c7, 0           @ invalidate I, D TLBs
547         mov     r0, #1 << 6                     @ cp6 for IOP3xx and Bulverde
548         orr     r0, r0, #1 << 13                @ Its undefined whether this
549         mcr     p15, 0, r0, c15, c1, 0          @ affects USR or SVC modes
550 
551         adr     r5, xscale_crval
552         ldmia   r5, {r5, r6}
553         mrc     p15, 0, r0, c1, c0, 0           @ get control register
554         bic     r0, r0, r5
555         orr     r0, r0, r6
556         ret     lr
557         .size   __xscale_setup, . - __xscale_setup
558 
559         /*
560          *  R
561          * .RVI ZFRS BLDP WCAM
562          * ..11 1.01 .... .101
563          * 
564          */
565         .type   xscale_crval, #object
566 xscale_crval:
567         crval   clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900
568 
569         __INITDATA
570 
571         @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
572         define_processor_functions xscale, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1
573 
574         .section ".rodata"
575 
576         string  cpu_arch_name, "armv5te"
577         string  cpu_elf_name, "v5"
578 
579         string  cpu_80200_A0_A1_name, "XScale-80200 A0/A1"
580         string  cpu_80200_name, "XScale-80200"
581         string  cpu_80219_name, "XScale-80219"
582         string  cpu_8032x_name, "XScale-IOP8032x Family"
583         string  cpu_8033x_name, "XScale-IOP8033x Family"
584         string  cpu_pxa250_name, "XScale-PXA250"
585         string  cpu_pxa210_name, "XScale-PXA210"
586         string  cpu_ixp42x_name, "XScale-IXP42x Family"
587         string  cpu_ixp43x_name, "XScale-IXP43x Family"
588         string  cpu_ixp46x_name, "XScale-IXP46x Family"
589         string  cpu_ixp2400_name, "XScale-IXP2400"
590         string  cpu_ixp2800_name, "XScale-IXP2800"
591         string  cpu_pxa255_name, "XScale-PXA255"
592         string  cpu_pxa270_name, "XScale-PXA270"
593 
594         .align
595 
596         .section ".proc.info.init", "a"
597 
598 .macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
599         .type   __\name\()_proc_info,#object
600 __\name\()_proc_info:
601         .long   \cpu_val
602         .long   \cpu_mask
603         .long   PMD_TYPE_SECT | \
604                 PMD_SECT_BUFFERABLE | \
605                 PMD_SECT_CACHEABLE | \
606                 PMD_SECT_AP_WRITE | \
607                 PMD_SECT_AP_READ
608         .long   PMD_TYPE_SECT | \
609                 PMD_SECT_AP_WRITE | \
610                 PMD_SECT_AP_READ
611         initfn  __xscale_setup, __\name\()_proc_info
612         .long   cpu_arch_name
613         .long   cpu_elf_name
614         .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
615         .long   \cpu_name
616         .long   xscale_processor_functions
617         .long   v4wbi_tlb_fns
618         .long   xscale_mc_user_fns
619         .ifb \cache
620                 .long   xscale_cache_fns
621         .else
622                 .long   \cache
623         .endif
624         .size   __\name\()_proc_info, . - __\name\()_proc_info
625 .endm
626 
627         xscale_proc_info 80200_A0_A1, 0x69052000, 0xfffffffe, cpu_80200_name, \
628                 cache=xscale_80200_A0_A1_cache_fns
629         xscale_proc_info 80200, 0x69052000, 0xfffffff0, cpu_80200_name
630         xscale_proc_info 80219, 0x69052e20, 0xffffffe0, cpu_80219_name
631         xscale_proc_info 8032x, 0x69052420, 0xfffff7e0, cpu_8032x_name
632         xscale_proc_info 8033x, 0x69054010, 0xfffffd30, cpu_8033x_name
633         xscale_proc_info pxa250, 0x69052100, 0xfffff7f0, cpu_pxa250_name
634         xscale_proc_info pxa210, 0x69052120, 0xfffff3f0, cpu_pxa210_name
635         xscale_proc_info ixp2400, 0x69054190, 0xfffffff0, cpu_ixp2400_name
636         xscale_proc_info ixp2800, 0x690541a0, 0xfffffff0, cpu_ixp2800_name
637         xscale_proc_info ixp42x, 0x690541c0, 0xffffffc0, cpu_ixp42x_name
638         xscale_proc_info ixp43x, 0x69054040, 0xfffffff0, cpu_ixp43x_name
639         xscale_proc_info ixp46x, 0x69054200, 0xffffff00, cpu_ixp46x_name
640         xscale_proc_info pxa255, 0x69052d00, 0xfffffff0, cpu_pxa255_name
641         xscale_proc_info pxa270, 0x69054110, 0xfffffff0, cpu_pxa270_name

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php