~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/arm/mm/proc-xsc3.S

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-only */
  2 /*
  3  * linux/arch/arm/mm/proc-xsc3.S
  4  *
  5  * Original Author: Matthew Gilbert
  6  * Current Maintainer: Lennert Buytenhek <buytenh@wantstofly.org>
  7  *
  8  * Copyright 2004 (C) Intel Corp.
  9  * Copyright 2005 (C) MontaVista Software, Inc.
 10  *
 11  * MMU functions for the Intel XScale3 Core (XSC3).  The XSC3 core is
 12  * an extension to Intel's original XScale core that adds the following
 13  * features:
 14  *
 15  * - ARMv6 Supersections
 16  * - Low Locality Reference pages (replaces mini-cache)
 17  * - 36-bit addressing
 18  * - L2 cache
 19  * - Cache coherency if chipset supports it
 20  *
 21  * Based on original XScale code by Nicolas Pitre.
 22  */
 23 
 24 #include <linux/linkage.h>
 25 #include <linux/init.h>
 26 #include <linux/cfi_types.h>
 27 #include <linux/pgtable.h>
 28 #include <asm/assembler.h>
 29 #include <asm/hwcap.h>
 30 #include <asm/pgtable-hwdef.h>
 31 #include <asm/page.h>
 32 #include <asm/ptrace.h>
 33 #include "proc-macros.S"
 34 
 35 /*
 36  * This is the maximum size of an area which will be flushed.  If the
 37  * area is larger than this, then we flush the whole cache.
 38  */
 39 #define MAX_AREA_SIZE   32768
 40 
 41 /*
 42  * The cache line size of the L1 I, L1 D and unified L2 cache.
 43  */
 44 #define CACHELINESIZE   32
 45 
 46 /*
 47  * The size of the L1 D cache.
 48  */
 49 #define CACHESIZE       32768
 50 
 51 /*
 52  * This macro is used to wait for a CP15 write and is needed when we
 53  * have to ensure that the last operation to the coprocessor was
 54  * completed before continuing with operation.
 55  */
 56         .macro  cpwait_ret, lr, rd
 57         mrc     p15, 0, \rd, c2, c0, 0          @ arbitrary read of cp15
 58         sub     pc, \lr, \rd, LSR #32           @ wait for completion and
 59                                                 @ flush instruction pipeline
 60         .endm
 61 
 62 /*
 63  * This macro cleans and invalidates the entire L1 D cache.
 64  */
 65 
 66         .macro  clean_d_cache rd, rs
 67         mov     \rd, #0x1f00
 68         orr     \rd, \rd, #0x00e0
 69 1:      mcr     p15, 0, \rd, c7, c14, 2         @ clean/invalidate L1 D line
 70         adds    \rd, \rd, #0x40000000
 71         bcc     1b
 72         subs    \rd, \rd, #0x20
 73         bpl     1b
 74         .endm
 75 
 76         .text
 77 
 78 /*
 79  * cpu_xsc3_proc_init()
 80  *
 81  * Nothing too exciting at the moment
 82  */
 83 SYM_TYPED_FUNC_START(cpu_xsc3_proc_init)
 84         ret     lr
 85 SYM_FUNC_END(cpu_xsc3_proc_init)
 86 
 87 /*
 88  * cpu_xsc3_proc_fin()
 89  */
 90 SYM_TYPED_FUNC_START(cpu_xsc3_proc_fin)
 91         mrc     p15, 0, r0, c1, c0, 0           @ ctrl register
 92         bic     r0, r0, #0x1800                 @ ...IZ...........
 93         bic     r0, r0, #0x0006                 @ .............CA.
 94         mcr     p15, 0, r0, c1, c0, 0           @ disable caches
 95         ret     lr
 96 SYM_FUNC_END(cpu_xsc3_proc_fin)
 97 
 98 /*
 99  * cpu_xsc3_reset(loc)
100  *
101  * Perform a soft reset of the system.  Put the CPU into the
102  * same state as it would be if it had been reset, and branch
103  * to what would be the reset vector.
104  *
105  * loc: location to jump to for soft reset
106  */
107         .align  5
108         .pushsection    .idmap.text, "ax"
109 SYM_TYPED_FUNC_START(cpu_xsc3_reset)
110         mov     r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
111         msr     cpsr_c, r1                      @ reset CPSR
112         mrc     p15, 0, r1, c1, c0, 0           @ ctrl register
113         bic     r1, r1, #0x3900                 @ ..VIZ..S........
114         bic     r1, r1, #0x0086                 @ ........B....CA.
115         mcr     p15, 0, r1, c1, c0, 0           @ ctrl register
116         mcr     p15, 0, ip, c7, c7, 0           @ invalidate L1 caches and BTB
117         bic     r1, r1, #0x0001                 @ ...............M
118         mcr     p15, 0, r1, c1, c0, 0           @ ctrl register
119         @ CAUTION: MMU turned off from this point.  We count on the pipeline
120         @ already containing those two last instructions to survive.
121         mcr     p15, 0, ip, c8, c7, 0           @ invalidate I and D TLBs
122         ret     r0
123 SYM_FUNC_END(cpu_xsc3_reset)
124         .popsection
125 
126 /*
127  * cpu_xsc3_do_idle()
128  *
129  * Cause the processor to idle
130  *
131  * For now we do nothing but go to idle mode for every case
132  *
133  * XScale supports clock switching, but using idle mode support
134  * allows external hardware to react to system state changes.
135  */
136         .align  5
137 
138 SYM_TYPED_FUNC_START(cpu_xsc3_do_idle)
139         mov     r0, #1
140         mcr     p14, 0, r0, c7, c0, 0           @ go to idle
141         ret     lr
142 SYM_FUNC_END(cpu_xsc3_do_idle)
143 
144 /* ================================= CACHE ================================ */
145 
146 /*
147  *      flush_icache_all()
148  *
149  *      Unconditionally clean and invalidate the entire icache.
150  */
151 SYM_TYPED_FUNC_START(xsc3_flush_icache_all)
152         mov     r0, #0
153         mcr     p15, 0, r0, c7, c5, 0           @ invalidate I cache
154         ret     lr
155 SYM_FUNC_END(xsc3_flush_icache_all)
156 
157 /*
158  *      flush_user_cache_all()
159  *
160  *      Invalidate all cache entries in a particular address
161  *      space.
162  */
163 SYM_FUNC_ALIAS(xsc3_flush_user_cache_all, xsc3_flush_kern_cache_all)
164 
165 /*
166  *      flush_kern_cache_all()
167  *
168  *      Clean and invalidate the entire cache.
169  */
170 SYM_TYPED_FUNC_START(xsc3_flush_kern_cache_all)
171         mov     r2, #VM_EXEC
172         mov     ip, #0
173 __flush_whole_cache:
174         clean_d_cache r0, r1
175         tst     r2, #VM_EXEC
176         mcrne   p15, 0, ip, c7, c5, 0           @ invalidate L1 I cache and BTB
177         mcrne   p15, 0, ip, c7, c10, 4          @ data write barrier
178         mcrne   p15, 0, ip, c7, c5, 4           @ prefetch flush
179         ret     lr
180 SYM_FUNC_END(xsc3_flush_kern_cache_all)
181 
182 /*
183  *      flush_user_cache_range(start, end, vm_flags)
184  *
185  *      Invalidate a range of cache entries in the specified
186  *      address space.
187  *
188  *      - start - start address (may not be aligned)
189  *      - end   - end address (exclusive, may not be aligned)
190  *      - vma   - vma_area_struct describing address space
191  */
192         .align  5
193 SYM_TYPED_FUNC_START(xsc3_flush_user_cache_range)
194         mov     ip, #0
195         sub     r3, r1, r0                      @ calculate total size
196         cmp     r3, #MAX_AREA_SIZE
197         bhs     __flush_whole_cache
198 
199 1:      tst     r2, #VM_EXEC
200         mcrne   p15, 0, r0, c7, c5, 1           @ invalidate L1 I line
201         mcr     p15, 0, r0, c7, c14, 1          @ clean/invalidate L1 D line
202         add     r0, r0, #CACHELINESIZE
203         cmp     r0, r1
204         blo     1b
205         tst     r2, #VM_EXEC
206         mcrne   p15, 0, ip, c7, c5, 6           @ invalidate BTB
207         mcrne   p15, 0, ip, c7, c10, 4          @ data write barrier
208         mcrne   p15, 0, ip, c7, c5, 4           @ prefetch flush
209         ret     lr
210 SYM_FUNC_END(xsc3_flush_user_cache_range)
211 
212 /*
213  *      coherent_kern_range(start, end)
214  *
215  *      Ensure coherency between the I cache and the D cache in the
216  *      region described by start.  If you have non-snooping
217  *      Harvard caches, you need to implement this function.
218  *
219  *      - start  - virtual start address
220  *      - end    - virtual end address
221  *
222  *      Note: single I-cache line invalidation isn't used here since
223  *      it also trashes the mini I-cache used by JTAG debuggers.
224  */
225 SYM_TYPED_FUNC_START(xsc3_coherent_kern_range)
226 #ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
227         b       xsc3_coherent_user_range
228 #endif
229 SYM_FUNC_END(xsc3_coherent_kern_range)
230 
231 SYM_TYPED_FUNC_START(xsc3_coherent_user_range)
232         bic     r0, r0, #CACHELINESIZE - 1
233 1:      mcr     p15, 0, r0, c7, c10, 1          @ clean L1 D line
234         add     r0, r0, #CACHELINESIZE
235         cmp     r0, r1
236         blo     1b
237         mov     r0, #0
238         mcr     p15, 0, r0, c7, c5, 0           @ invalidate L1 I cache and BTB
239         mcr     p15, 0, r0, c7, c10, 4          @ data write barrier
240         mcr     p15, 0, r0, c7, c5, 4           @ prefetch flush
241         ret     lr
242 SYM_FUNC_END(xsc3_coherent_user_range)
243 
244 /*
245  *      flush_kern_dcache_area(void *addr, size_t size)
246  *
247  *      Ensure no D cache aliasing occurs, either with itself or
248  *      the I cache.
249  *
250  *      - addr  - kernel address
251  *      - size  - region size
252  */
253 SYM_TYPED_FUNC_START(xsc3_flush_kern_dcache_area)
254         add     r1, r0, r1
255 1:      mcr     p15, 0, r0, c7, c14, 1          @ clean/invalidate L1 D line
256         add     r0, r0, #CACHELINESIZE
257         cmp     r0, r1
258         blo     1b
259         mov     r0, #0
260         mcr     p15, 0, r0, c7, c5, 0           @ invalidate L1 I cache and BTB
261         mcr     p15, 0, r0, c7, c10, 4          @ data write barrier
262         mcr     p15, 0, r0, c7, c5, 4           @ prefetch flush
263         ret     lr
264 SYM_FUNC_END(xsc3_flush_kern_dcache_area)
265 
266 /*
267  *      dma_inv_range(start, end)
268  *
269  *      Invalidate (discard) the specified virtual address range.
270  *      May not write back any entries.  If 'start' or 'end'
271  *      are not cache line aligned, those lines must be written
272  *      back.
273  *
274  *      - start  - virtual start address
275  *      - end    - virtual end address
276  */
277 xsc3_dma_inv_range:
278         tst     r0, #CACHELINESIZE - 1
279         bic     r0, r0, #CACHELINESIZE - 1
280         mcrne   p15, 0, r0, c7, c10, 1          @ clean L1 D line
281         tst     r1, #CACHELINESIZE - 1
282         mcrne   p15, 0, r1, c7, c10, 1          @ clean L1 D line
283 1:      mcr     p15, 0, r0, c7, c6, 1           @ invalidate L1 D line
284         add     r0, r0, #CACHELINESIZE
285         cmp     r0, r1
286         blo     1b
287         mcr     p15, 0, r0, c7, c10, 4          @ data write barrier
288         ret     lr
289 
290 /*
291  *      dma_clean_range(start, end)
292  *
293  *      Clean the specified virtual address range.
294  *
295  *      - start  - virtual start address
296  *      - end    - virtual end address
297  */
298 xsc3_dma_clean_range:
299         bic     r0, r0, #CACHELINESIZE - 1
300 1:      mcr     p15, 0, r0, c7, c10, 1          @ clean L1 D line
301         add     r0, r0, #CACHELINESIZE
302         cmp     r0, r1
303         blo     1b
304         mcr     p15, 0, r0, c7, c10, 4          @ data write barrier
305         ret     lr
306 
307 /*
308  *      dma_flush_range(start, end)
309  *
310  *      Clean and invalidate the specified virtual address range.
311  *
312  *      - start  - virtual start address
313  *      - end    - virtual end address
314  */
315 SYM_TYPED_FUNC_START(xsc3_dma_flush_range)
316         bic     r0, r0, #CACHELINESIZE - 1
317 1:      mcr     p15, 0, r0, c7, c14, 1          @ clean/invalidate L1 D line
318         add     r0, r0, #CACHELINESIZE
319         cmp     r0, r1
320         blo     1b
321         mcr     p15, 0, r0, c7, c10, 4          @ data write barrier
322         ret     lr
323 SYM_FUNC_END(xsc3_dma_flush_range)
324 
325 /*
326  *      dma_map_area(start, size, dir)
327  *      - start - kernel virtual start address
328  *      - size  - size of region
329  *      - dir   - DMA direction
330  */
331 SYM_TYPED_FUNC_START(xsc3_dma_map_area)
332         add     r1, r1, r0
333         cmp     r2, #DMA_TO_DEVICE
334         beq     xsc3_dma_clean_range
335         bcs     xsc3_dma_inv_range
336         b       xsc3_dma_flush_range
337 SYM_FUNC_END(xsc3_dma_map_area)
338 
339 /*
340  *      dma_unmap_area(start, size, dir)
341  *      - start - kernel virtual start address
342  *      - size  - size of region
343  *      - dir   - DMA direction
344  */
345 SYM_TYPED_FUNC_START(xsc3_dma_unmap_area)
346         ret     lr
347 SYM_FUNC_END(xsc3_dma_unmap_area)
348 
349 SYM_TYPED_FUNC_START(cpu_xsc3_dcache_clean_area)
350 1:      mcr     p15, 0, r0, c7, c10, 1          @ clean L1 D line
351         add     r0, r0, #CACHELINESIZE
352         subs    r1, r1, #CACHELINESIZE
353         bhi     1b
354         ret     lr
355 SYM_FUNC_END(cpu_xsc3_dcache_clean_area)
356 
357 /* =============================== PageTable ============================== */
358 
359 /*
360  * cpu_xsc3_switch_mm(pgd)
361  *
362  * Set the translation base pointer to be as described by pgd.
363  *
364  * pgd: new page tables
365  */
366         .align  5
367 SYM_TYPED_FUNC_START(cpu_xsc3_switch_mm)
368         clean_d_cache r1, r2
369         mcr     p15, 0, ip, c7, c5, 0           @ invalidate L1 I cache and BTB
370         mcr     p15, 0, ip, c7, c10, 4          @ data write barrier
371         mcr     p15, 0, ip, c7, c5, 4           @ prefetch flush
372         orr     r0, r0, #0x18                   @ cache the page table in L2
373         mcr     p15, 0, r0, c2, c0, 0           @ load page table pointer
374         mcr     p15, 0, ip, c8, c7, 0           @ invalidate I and D TLBs
375         cpwait_ret lr, ip
376 SYM_FUNC_END(cpu_xsc3_switch_mm)
377 
378 /*
379  * cpu_xsc3_set_pte_ext(ptep, pte, ext)
380  *
381  * Set a PTE and flush it out
382  */
383 cpu_xsc3_mt_table:
384         .long   0x00                                            @ L_PTE_MT_UNCACHED
385         .long   PTE_EXT_TEX(1)                                  @ L_PTE_MT_BUFFERABLE
386         .long   PTE_EXT_TEX(5) | PTE_CACHEABLE                  @ L_PTE_MT_WRITETHROUGH
387         .long   PTE_CACHEABLE | PTE_BUFFERABLE                  @ L_PTE_MT_WRITEBACK
388         .long   PTE_EXT_TEX(1) | PTE_BUFFERABLE                 @ L_PTE_MT_DEV_SHARED
389         .long   0x00                                            @ unused
390         .long   0x00                                            @ L_PTE_MT_MINICACHE (not present)
391         .long   PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?)
392         .long   0x00                                            @ unused
393         .long   PTE_EXT_TEX(1)                                  @ L_PTE_MT_DEV_WC
394         .long   0x00                                            @ unused
395         .long   PTE_CACHEABLE | PTE_BUFFERABLE                  @ L_PTE_MT_DEV_CACHED
396         .long   PTE_EXT_TEX(2)                                  @ L_PTE_MT_DEV_NONSHARED
397         .long   0x00                                            @ unused
398         .long   0x00                                            @ unused
399         .long   0x00                                            @ unused
400 
401         .align  5
402 SYM_TYPED_FUNC_START(cpu_xsc3_set_pte_ext)
403         xscale_set_pte_ext_prologue
404 
405         tst     r1, #L_PTE_SHARED               @ shared?
406         and     r1, r1, #L_PTE_MT_MASK
407         adr     ip, cpu_xsc3_mt_table
408         ldr     ip, [ip, r1]
409         orrne   r2, r2, #PTE_EXT_COHERENT       @ interlock: mask in coherent bit
410         bic     r2, r2, #0x0c                   @ clear old C,B bits
411         orr     r2, r2, ip
412 
413         xscale_set_pte_ext_epilogue
414         ret     lr
415 SYM_FUNC_END(cpu_xsc3_set_pte_ext)
416 
417         .ltorg
418         .align
419 
420 .globl  cpu_xsc3_suspend_size
421 .equ    cpu_xsc3_suspend_size, 4 * 6
422 #ifdef CONFIG_ARM_CPU_SUSPEND
423 SYM_TYPED_FUNC_START(cpu_xsc3_do_suspend)
424         stmfd   sp!, {r4 - r9, lr}
425         mrc     p14, 0, r4, c6, c0, 0   @ clock configuration, for turbo mode
426         mrc     p15, 0, r5, c15, c1, 0  @ CP access reg
427         mrc     p15, 0, r6, c13, c0, 0  @ PID
428         mrc     p15, 0, r7, c3, c0, 0   @ domain ID
429         mrc     p15, 0, r8, c1, c0, 1   @ auxiliary control reg
430         mrc     p15, 0, r9, c1, c0, 0   @ control reg
431         bic     r4, r4, #2              @ clear frequency change bit
432         stmia   r0, {r4 - r9}           @ store cp regs
433         ldmia   sp!, {r4 - r9, pc}
434 SYM_FUNC_END(cpu_xsc3_do_suspend)
435 
436 SYM_TYPED_FUNC_START(cpu_xsc3_do_resume)
437         ldmia   r0, {r4 - r9}           @ load cp regs
438         mov     ip, #0
439         mcr     p15, 0, ip, c7, c7, 0   @ invalidate I & D caches, BTB
440         mcr     p15, 0, ip, c7, c10, 4  @ drain write (&fill) buffer
441         mcr     p15, 0, ip, c7, c5, 4   @ flush prefetch buffer
442         mcr     p15, 0, ip, c8, c7, 0   @ invalidate I & D TLBs
443         mcr     p14, 0, r4, c6, c0, 0   @ clock configuration, turbo mode.
444         mcr     p15, 0, r5, c15, c1, 0  @ CP access reg
445         mcr     p15, 0, r6, c13, c0, 0  @ PID
446         mcr     p15, 0, r7, c3, c0, 0   @ domain ID
447         orr     r1, r1, #0x18           @ cache the page table in L2
448         mcr     p15, 0, r1, c2, c0, 0   @ translation table base addr
449         mcr     p15, 0, r8, c1, c0, 1   @ auxiliary control reg
450         mov     r0, r9                  @ control register
451         b       cpu_resume_mmu
452 SYM_FUNC_END(cpu_xsc3_do_resume)
453 #endif
454 
455         .type   __xsc3_setup, #function
456 __xsc3_setup:
457         mov     r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
458         msr     cpsr_c, r0
459         mcr     p15, 0, ip, c7, c7, 0           @ invalidate L1 caches and BTB
460         mcr     p15, 0, ip, c7, c10, 4          @ data write barrier
461         mcr     p15, 0, ip, c7, c5, 4           @ prefetch flush
462         mcr     p15, 0, ip, c8, c7, 0           @ invalidate I and D TLBs
463         orr     r4, r4, #0x18                   @ cache the page table in L2
464         mcr     p15, 0, r4, c2, c0, 0           @ load page table pointer
465 
466         mov     r0, #1 << 6                     @ cp6 access for early sched_clock
467         mcr     p15, 0, r0, c15, c1, 0          @ write CP access register
468 
469         mrc     p15, 0, r0, c1, c0, 1           @ get auxiliary control reg
470         and     r0, r0, #2                      @ preserve bit P bit setting
471         orr     r0, r0, #(1 << 10)              @ enable L2 for LLR cache
472         mcr     p15, 0, r0, c1, c0, 1           @ set auxiliary control reg
473 
474         adr     r5, xsc3_crval
475         ldmia   r5, {r5, r6}
476 
477 #ifdef CONFIG_CACHE_XSC3L2
478         mrc     p15, 1, r0, c0, c0, 1           @ get L2 present information
479         ands    r0, r0, #0xf8
480         orrne   r6, r6, #(1 << 26)              @ enable L2 if present
481 #endif
482 
483         mrc     p15, 0, r0, c1, c0, 0           @ get control register
484         bic     r0, r0, r5                      @ ..V. ..R. .... ..A.
485         orr     r0, r0, r6                      @ ..VI Z..S .... .C.M (mmu)
486                                                 @ ...I Z..S .... .... (uc)
487         ret     lr
488 
489         .size   __xsc3_setup, . - __xsc3_setup
490 
491         .type   xsc3_crval, #object
492 xsc3_crval:
493         crval   clear=0x04002202, mmuset=0x00003905, ucset=0x00001900
494 
495         __INITDATA
496 
497         @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
498         define_processor_functions xsc3, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1
499 
500         .section ".rodata"
501 
502         string  cpu_arch_name, "armv5te"
503         string  cpu_elf_name, "v5"
504         string  cpu_xsc3_name, "XScale-V3 based processor"
505 
506         .align
507 
508         .section ".proc.info.init", "a"
509 
510 .macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req
511         .type   __\name\()_proc_info,#object
512 __\name\()_proc_info:
513         .long   \cpu_val
514         .long   \cpu_mask
515         .long   PMD_TYPE_SECT | \
516                 PMD_SECT_BUFFERABLE | \
517                 PMD_SECT_CACHEABLE | \
518                 PMD_SECT_AP_WRITE | \
519                 PMD_SECT_AP_READ
520         .long   PMD_TYPE_SECT | \
521                 PMD_SECT_AP_WRITE | \
522                 PMD_SECT_AP_READ
523         initfn  __xsc3_setup, __\name\()_proc_info
524         .long   cpu_arch_name
525         .long   cpu_elf_name
526         .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
527         .long   cpu_xsc3_name
528         .long   xsc3_processor_functions
529         .long   v4wbi_tlb_fns
530         .long   xsc3_mc_user_fns
531         .long   xsc3_cache_fns
532         .size   __\name\()_proc_info, . - __\name\()_proc_info
533 .endm
534 
535         xsc3_proc_info xsc3, 0x69056000, 0xffffe000
536 
537 /* Note: PXA935 changed its implementor ID from Intel to Marvell */
538         xsc3_proc_info xsc3_pxa935, 0x56056000, 0xffffe000

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php