~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/powerpc/kernel/misc_32.S

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0-or-later */
  2 /*
  3  * This file contains miscellaneous low-level functions.
  4  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  5  *
  6  * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
  7  * and Paul Mackerras.
  8  *
  9  */
 10 
 11 #include <linux/export.h>
 12 #include <linux/sys.h>
 13 #include <asm/unistd.h>
 14 #include <asm/errno.h>
 15 #include <asm/reg.h>
 16 #include <asm/page.h>
 17 #include <asm/cache.h>
 18 #include <asm/cputable.h>
 19 #include <asm/mmu.h>
 20 #include <asm/ppc_asm.h>
 21 #include <asm/thread_info.h>
 22 #include <asm/asm-offsets.h>
 23 #include <asm/processor.h>
 24 #include <asm/bug.h>
 25 #include <asm/ptrace.h>
 26 #include <asm/feature-fixups.h>
 27 
 28         .text
 29 
 30 /*
 31  * This returns the high 64 bits of the product of two 64-bit numbers.
 32  */
 33 _GLOBAL(mulhdu)
 34         cmpwi   r6,0
 35         cmpwi   cr1,r3,0
 36         mr      r10,r4
 37         mulhwu  r4,r4,r5
 38         beq     1f
 39         mulhwu  r0,r10,r6
 40         mullw   r7,r10,r5
 41         addc    r7,r0,r7
 42         addze   r4,r4
 43 1:      beqlr   cr1             /* all done if high part of A is 0 */
 44         mullw   r9,r3,r5
 45         mulhwu  r10,r3,r5
 46         beq     2f
 47         mullw   r0,r3,r6
 48         mulhwu  r8,r3,r6
 49         addc    r7,r0,r7
 50         adde    r4,r4,r8
 51         addze   r10,r10
 52 2:      addc    r4,r4,r9
 53         addze   r3,r10
 54         blr
 55 
 56 /*
 57  * reloc_got2 runs through the .got2 section adding an offset
 58  * to each entry.
 59  */
 60 _GLOBAL(reloc_got2)
 61         mflr    r11
 62         lis     r7,__got2_start@ha
 63         addi    r7,r7,__got2_start@l
 64         lis     r8,__got2_end@ha
 65         addi    r8,r8,__got2_end@l
 66         subf    r8,r7,r8
 67         srwi.   r8,r8,2
 68         beqlr
 69         mtctr   r8
 70         bcl     20,31,$+4
 71 1:      mflr    r0
 72         lis     r4,1b@ha
 73         addi    r4,r4,1b@l
 74         subf    r0,r4,r0
 75         add     r7,r0,r7
 76 2:      lwz     r0,0(r7)
 77         add     r0,r0,r3
 78         stw     r0,0(r7)
 79         addi    r7,r7,4
 80         bdnz    2b
 81         mtlr    r11
 82         blr
 83 
 84 /*
 85  * call_setup_cpu - call the setup_cpu function for this cpu
 86  * r3 = data offset, r24 = cpu number
 87  *
 88  * Setup function is called with:
 89  *   r3 = data offset
 90  *   r4 = ptr to CPU spec (relocated)
 91  */
 92 _GLOBAL(call_setup_cpu)
 93         addis   r4,r3,cur_cpu_spec@ha
 94         addi    r4,r4,cur_cpu_spec@l
 95         lwz     r4,0(r4)
 96         add     r4,r4,r3
 97         lwz     r5,CPU_SPEC_SETUP(r4)
 98         cmpwi   0,r5,0
 99         add     r5,r5,r3
100         beqlr
101         mtctr   r5
102         bctr
103 
104 #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_PPC_BOOK3S_32)
105 
106 /* This gets called by via-pmu.c to switch the PLL selection
107  * on 750fx CPU. This function should really be moved to some
108  * other place (as most of the cpufreq code in via-pmu
109  */
110 _GLOBAL(low_choose_750fx_pll)
111         /* Clear MSR:EE */
112         mfmsr   r7
113         rlwinm  r0,r7,0,17,15
114         mtmsr   r0
115 
116         /* If switching to PLL1, disable HID0:BTIC */
117         cmplwi  cr0,r3,0
118         beq     1f
119         mfspr   r5,SPRN_HID0
120         rlwinm  r5,r5,0,27,25
121         sync
122         mtspr   SPRN_HID0,r5
123         isync
124         sync
125 
126 1:
127         /* Calc new HID1 value */
128         mfspr   r4,SPRN_HID1    /* Build a HID1:PS bit from parameter */
129         rlwinm  r5,r3,16,15,15  /* Clear out HID1:PS from value read */
130         rlwinm  r4,r4,0,16,14   /* Could have I used rlwimi here ? */
131         or      r4,r4,r5
132         mtspr   SPRN_HID1,r4
133 
134 #ifdef CONFIG_SMP
135         /* Store new HID1 image */
136         lwz     r6,TASK_CPU(r2)
137         slwi    r6,r6,2
138 #else
139         li      r6, 0
140 #endif
141         addis   r6,r6,nap_save_hid1@ha
142         stw     r4,nap_save_hid1@l(r6)
143 
144         /* If switching to PLL0, enable HID0:BTIC */
145         cmplwi  cr0,r3,0
146         bne     1f
147         mfspr   r5,SPRN_HID0
148         ori     r5,r5,HID0_BTIC
149         sync
150         mtspr   SPRN_HID0,r5
151         isync
152         sync
153 
154 1:
155         /* Return */
156         mtmsr   r7
157         blr
158 
159 _GLOBAL(low_choose_7447a_dfs)
160         /* Clear MSR:EE */
161         mfmsr   r7
162         rlwinm  r0,r7,0,17,15
163         mtmsr   r0
164         
165         /* Calc new HID1 value */
166         mfspr   r4,SPRN_HID1
167         insrwi  r4,r3,1,9       /* insert parameter into bit 9 */
168         sync
169         mtspr   SPRN_HID1,r4
170         sync
171         isync
172 
173         /* Return */
174         mtmsr   r7
175         blr
176 
177 #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */
178 
179 /*
180  * Copy a whole page.  We use the dcbz instruction on the destination
181  * to reduce memory traffic (it eliminates the unnecessary reads of
182  * the destination into cache).  This requires that the destination
183  * is cacheable.
184  */
185 #define COPY_16_BYTES           \
186         lwz     r6,4(r4);       \
187         lwz     r7,8(r4);       \
188         lwz     r8,12(r4);      \
189         lwzu    r9,16(r4);      \
190         stw     r6,4(r3);       \
191         stw     r7,8(r3);       \
192         stw     r8,12(r3);      \
193         stwu    r9,16(r3)
194 
195 _GLOBAL(copy_page)
196         rlwinm  r5, r3, 0, L1_CACHE_BYTES - 1
197         addi    r3,r3,-4
198 
199 0:      twnei   r5, 0   /* WARN if r3 is not cache aligned */
200         EMIT_WARN_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
201 
202         addi    r4,r4,-4
203 
204         li      r5,4
205 
206 #if MAX_COPY_PREFETCH > 1
207         li      r0,MAX_COPY_PREFETCH
208         li      r11,4
209         mtctr   r0
210 11:     dcbt    r11,r4
211         addi    r11,r11,L1_CACHE_BYTES
212         bdnz    11b
213 #else /* MAX_COPY_PREFETCH == 1 */
214         dcbt    r5,r4
215         li      r11,L1_CACHE_BYTES+4
216 #endif /* MAX_COPY_PREFETCH */
217         li      r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
218         crclr   4*cr0+eq
219 2:
220         mtctr   r0
221 1:
222         dcbt    r11,r4
223         dcbz    r5,r3
224         COPY_16_BYTES
225 #if L1_CACHE_BYTES >= 32
226         COPY_16_BYTES
227 #if L1_CACHE_BYTES >= 64
228         COPY_16_BYTES
229         COPY_16_BYTES
230 #if L1_CACHE_BYTES >= 128
231         COPY_16_BYTES
232         COPY_16_BYTES
233         COPY_16_BYTES
234         COPY_16_BYTES
235 #endif
236 #endif
237 #endif
238         bdnz    1b
239         beqlr
240         crnot   4*cr0+eq,4*cr0+eq
241         li      r0,MAX_COPY_PREFETCH
242         li      r11,4
243         b       2b
244 EXPORT_SYMBOL(copy_page)
245 
246 /*
247  * Extended precision shifts.
248  *
249  * Updated to be valid for shift counts from 0 to 63 inclusive.
250  * -- Gabriel
251  *
252  * R3/R4 has 64 bit value
253  * R5    has shift count
254  * result in R3/R4
255  *
256  *  ashrdi3: arithmetic right shift (sign propagation)  
257  *  lshrdi3: logical right shift
258  *  ashldi3: left shift
259  */
260 _GLOBAL(__ashrdi3)
261         subfic  r6,r5,32
262         srw     r4,r4,r5        # LSW = count > 31 ? 0 : LSW >> count
263         addi    r7,r5,32        # could be xori, or addi with -32
264         slw     r6,r3,r6        # t1 = count > 31 ? 0 : MSW << (32-count)
265         rlwinm  r8,r7,0,32      # t3 = (count < 32) ? 32 : 0
266         sraw    r7,r3,r7        # t2 = MSW >> (count-32)
267         or      r4,r4,r6        # LSW |= t1
268         slw     r7,r7,r8        # t2 = (count < 32) ? 0 : t2
269         sraw    r3,r3,r5        # MSW = MSW >> count
270         or      r4,r4,r7        # LSW |= t2
271         blr
272 EXPORT_SYMBOL(__ashrdi3)
273 
274 _GLOBAL(__ashldi3)
275         subfic  r6,r5,32
276         slw     r3,r3,r5        # MSW = count > 31 ? 0 : MSW << count
277         addi    r7,r5,32        # could be xori, or addi with -32
278         srw     r6,r4,r6        # t1 = count > 31 ? 0 : LSW >> (32-count)
279         slw     r7,r4,r7        # t2 = count < 32 ? 0 : LSW << (count-32)
280         or      r3,r3,r6        # MSW |= t1
281         slw     r4,r4,r5        # LSW = LSW << count
282         or      r3,r3,r7        # MSW |= t2
283         blr
284 EXPORT_SYMBOL(__ashldi3)
285 
286 _GLOBAL(__lshrdi3)
287         subfic  r6,r5,32
288         srw     r4,r4,r5        # LSW = count > 31 ? 0 : LSW >> count
289         addi    r7,r5,32        # could be xori, or addi with -32
290         slw     r6,r3,r6        # t1 = count > 31 ? 0 : MSW << (32-count)
291         srw     r7,r3,r7        # t2 = count < 32 ? 0 : MSW >> (count-32)
292         or      r4,r4,r6        # LSW |= t1
293         srw     r3,r3,r5        # MSW = MSW >> count
294         or      r4,r4,r7        # LSW |= t2
295         blr
296 EXPORT_SYMBOL(__lshrdi3)
297 
298 /*
299  * 64-bit comparison: __cmpdi2(s64 a, s64 b)
300  * Returns 0 if a < b, 1 if a == b, 2 if a > b.
301  */
302 _GLOBAL(__cmpdi2)
303         cmpw    r3,r5
304         li      r3,1
305         bne     1f
306         cmplw   r4,r6
307         beqlr
308 1:      li      r3,0
309         bltlr
310         li      r3,2
311         blr
312 EXPORT_SYMBOL(__cmpdi2)
313 /*
314  * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
315  * Returns 0 if a < b, 1 if a == b, 2 if a > b.
316  */
317 _GLOBAL(__ucmpdi2)
318         cmplw   r3,r5
319         li      r3,1
320         bne     1f
321         cmplw   r4,r6
322         beqlr
323 1:      li      r3,0
324         bltlr
325         li      r3,2
326         blr
327 EXPORT_SYMBOL(__ucmpdi2)
328 
329 _GLOBAL(__bswapdi2)
330         rotlwi  r9,r4,8
331         rotlwi  r10,r3,8
332         rlwimi  r9,r4,24,0,7
333         rlwimi  r10,r3,24,0,7
334         rlwimi  r9,r4,24,16,23
335         rlwimi  r10,r3,24,16,23
336         mr      r3,r9
337         mr      r4,r10
338         blr
339 EXPORT_SYMBOL(__bswapdi2)
340 
341 #ifdef CONFIG_SMP
342 _GLOBAL(start_secondary_resume)
343         /* Reset stack */
344         rlwinm  r1, r1, 0, 0, 31 - THREAD_SHIFT
345         addi    r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE
346         li      r3,0
347         stw     r3,0(r1)                /* Zero the stack frame pointer */
348         bl      start_secondary
349         b       .
350 #endif /* CONFIG_SMP */

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php