~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/loongarch/include/asm/fpu.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * Author: Huacai Chen <chenhuacai@loongson.cn>
  4  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
  5  */
  6 #ifndef _ASM_FPU_H
  7 #define _ASM_FPU_H
  8 
  9 #include <linux/sched.h>
 10 #include <linux/sched/task_stack.h>
 11 #include <linux/ptrace.h>
 12 #include <linux/thread_info.h>
 13 #include <linux/bitops.h>
 14 
 15 #include <asm/cpu.h>
 16 #include <asm/cpu-features.h>
 17 #include <asm/current.h>
 18 #include <asm/loongarch.h>
 19 #include <asm/processor.h>
 20 #include <asm/ptrace.h>
 21 
 22 struct sigcontext;
 23 
 24 #define kernel_fpu_available() cpu_has_fpu
 25 extern void kernel_fpu_begin(void);
 26 extern void kernel_fpu_end(void);
 27 
 28 extern void _init_fpu(unsigned int);
 29 extern void _save_fp(struct loongarch_fpu *);
 30 extern void _restore_fp(struct loongarch_fpu *);
 31 
 32 extern void _save_lsx(struct loongarch_fpu *fpu);
 33 extern void _restore_lsx(struct loongarch_fpu *fpu);
 34 extern void _init_lsx_upper(void);
 35 extern void _restore_lsx_upper(struct loongarch_fpu *fpu);
 36 
 37 extern void _save_lasx(struct loongarch_fpu *fpu);
 38 extern void _restore_lasx(struct loongarch_fpu *fpu);
 39 extern void _init_lasx_upper(void);
 40 extern void _restore_lasx_upper(struct loongarch_fpu *fpu);
 41 
 42 static inline void enable_lsx(void);
 43 static inline void disable_lsx(void);
 44 static inline void save_lsx(struct task_struct *t);
 45 static inline void restore_lsx(struct task_struct *t);
 46 
 47 static inline void enable_lasx(void);
 48 static inline void disable_lasx(void);
 49 static inline void save_lasx(struct task_struct *t);
 50 static inline void restore_lasx(struct task_struct *t);
 51 
 52 /*
 53  * Mask the FCSR Cause bits according to the Enable bits, observing
 54  * that Unimplemented is always enabled.
 55  */
 56 static inline unsigned long mask_fcsr_x(unsigned long fcsr)
 57 {
 58         return fcsr & ((fcsr & FPU_CSR_ALL_E) <<
 59                         (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E)));
 60 }
 61 
 62 static inline int is_fp_enabled(void)
 63 {
 64         return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_FPEN) ?
 65                 1 : 0;
 66 }
 67 
 68 static inline int is_lsx_enabled(void)
 69 {
 70         if (!cpu_has_lsx)
 71                 return 0;
 72 
 73         return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_LSXEN) ?
 74                 1 : 0;
 75 }
 76 
 77 static inline int is_lasx_enabled(void)
 78 {
 79         if (!cpu_has_lasx)
 80                 return 0;
 81 
 82         return (csr_read32(LOONGARCH_CSR_EUEN) & CSR_EUEN_LASXEN) ?
 83                 1 : 0;
 84 }
 85 
 86 static inline int is_simd_enabled(void)
 87 {
 88         return is_lsx_enabled() | is_lasx_enabled();
 89 }
 90 
 91 #define enable_fpu()            set_csr_euen(CSR_EUEN_FPEN)
 92 
 93 #define disable_fpu()           clear_csr_euen(CSR_EUEN_FPEN)
 94 
 95 #define clear_fpu_owner()       clear_thread_flag(TIF_USEDFPU)
 96 
 97 static inline int is_fpu_owner(void)
 98 {
 99         return test_thread_flag(TIF_USEDFPU);
100 }
101 
102 static inline void __own_fpu(void)
103 {
104         enable_fpu();
105         set_thread_flag(TIF_USEDFPU);
106         KSTK_EUEN(current) |= CSR_EUEN_FPEN;
107 }
108 
109 static inline void own_fpu_inatomic(int restore)
110 {
111         if (cpu_has_fpu && !is_fpu_owner()) {
112                 __own_fpu();
113                 if (restore)
114                         _restore_fp(&current->thread.fpu);
115         }
116 }
117 
118 static inline void own_fpu(int restore)
119 {
120         preempt_disable();
121         own_fpu_inatomic(restore);
122         preempt_enable();
123 }
124 
125 static inline void lose_fpu_inatomic(int save, struct task_struct *tsk)
126 {
127         if (is_fpu_owner()) {
128                 if (!is_simd_enabled()) {
129                         if (save)
130                                 _save_fp(&tsk->thread.fpu);
131                         disable_fpu();
132                 } else {
133                         if (save) {
134                                 if (!is_lasx_enabled())
135                                         save_lsx(tsk);
136                                 else
137                                         save_lasx(tsk);
138                         }
139                         disable_fpu();
140                         disable_lsx();
141                         disable_lasx();
142                         clear_tsk_thread_flag(tsk, TIF_USEDSIMD);
143                 }
144                 clear_tsk_thread_flag(tsk, TIF_USEDFPU);
145         }
146         KSTK_EUEN(tsk) &= ~(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN);
147 }
148 
149 static inline void lose_fpu(int save)
150 {
151         preempt_disable();
152         lose_fpu_inatomic(save, current);
153         preempt_enable();
154 }
155 
156 static inline void init_fpu(void)
157 {
158         unsigned int fcsr = current->thread.fpu.fcsr;
159 
160         __own_fpu();
161         _init_fpu(fcsr);
162         set_used_math();
163 }
164 
165 static inline void save_fp(struct task_struct *tsk)
166 {
167         if (cpu_has_fpu)
168                 _save_fp(&tsk->thread.fpu);
169 }
170 
171 static inline void restore_fp(struct task_struct *tsk)
172 {
173         if (cpu_has_fpu)
174                 _restore_fp(&tsk->thread.fpu);
175 }
176 
177 static inline void save_fpu_regs(struct task_struct *tsk)
178 {
179         unsigned int euen;
180 
181         if (tsk == current) {
182                 preempt_disable();
183 
184                 euen = csr_read32(LOONGARCH_CSR_EUEN);
185 
186 #ifdef CONFIG_CPU_HAS_LASX
187                 if (euen & CSR_EUEN_LASXEN)
188                         _save_lasx(&current->thread.fpu);
189                 else
190 #endif
191 #ifdef CONFIG_CPU_HAS_LSX
192                 if (euen & CSR_EUEN_LSXEN)
193                         _save_lsx(&current->thread.fpu);
194                 else
195 #endif
196                 if (euen & CSR_EUEN_FPEN)
197                         _save_fp(&current->thread.fpu);
198 
199                 preempt_enable();
200         }
201 }
202 
203 static inline int is_simd_owner(void)
204 {
205         return test_thread_flag(TIF_USEDSIMD);
206 }
207 
208 #ifdef CONFIG_CPU_HAS_LSX
209 
210 static inline void enable_lsx(void)
211 {
212         if (cpu_has_lsx)
213                 csr_xchg32(CSR_EUEN_LSXEN, CSR_EUEN_LSXEN, LOONGARCH_CSR_EUEN);
214 }
215 
216 static inline void disable_lsx(void)
217 {
218         if (cpu_has_lsx)
219                 csr_xchg32(0, CSR_EUEN_LSXEN, LOONGARCH_CSR_EUEN);
220 }
221 
222 static inline void save_lsx(struct task_struct *t)
223 {
224         if (cpu_has_lsx)
225                 _save_lsx(&t->thread.fpu);
226 }
227 
228 static inline void restore_lsx(struct task_struct *t)
229 {
230         if (cpu_has_lsx)
231                 _restore_lsx(&t->thread.fpu);
232 }
233 
234 static inline void init_lsx_upper(void)
235 {
236         if (cpu_has_lsx)
237                 _init_lsx_upper();
238 }
239 
240 static inline void restore_lsx_upper(struct task_struct *t)
241 {
242         if (cpu_has_lsx)
243                 _restore_lsx_upper(&t->thread.fpu);
244 }
245 
246 #else
247 static inline void enable_lsx(void) {}
248 static inline void disable_lsx(void) {}
249 static inline void save_lsx(struct task_struct *t) {}
250 static inline void restore_lsx(struct task_struct *t) {}
251 static inline void init_lsx_upper(void) {}
252 static inline void restore_lsx_upper(struct task_struct *t) {}
253 #endif
254 
255 #ifdef CONFIG_CPU_HAS_LASX
256 
257 static inline void enable_lasx(void)
258 {
259 
260         if (cpu_has_lasx)
261                 csr_xchg32(CSR_EUEN_LASXEN, CSR_EUEN_LASXEN, LOONGARCH_CSR_EUEN);
262 }
263 
264 static inline void disable_lasx(void)
265 {
266         if (cpu_has_lasx)
267                 csr_xchg32(0, CSR_EUEN_LASXEN, LOONGARCH_CSR_EUEN);
268 }
269 
270 static inline void save_lasx(struct task_struct *t)
271 {
272         if (cpu_has_lasx)
273                 _save_lasx(&t->thread.fpu);
274 }
275 
276 static inline void restore_lasx(struct task_struct *t)
277 {
278         if (cpu_has_lasx)
279                 _restore_lasx(&t->thread.fpu);
280 }
281 
282 static inline void init_lasx_upper(void)
283 {
284         if (cpu_has_lasx)
285                 _init_lasx_upper();
286 }
287 
288 static inline void restore_lasx_upper(struct task_struct *t)
289 {
290         if (cpu_has_lasx)
291                 _restore_lasx_upper(&t->thread.fpu);
292 }
293 
294 #else
295 static inline void enable_lasx(void) {}
296 static inline void disable_lasx(void) {}
297 static inline void save_lasx(struct task_struct *t) {}
298 static inline void restore_lasx(struct task_struct *t) {}
299 static inline void init_lasx_upper(void) {}
300 static inline void restore_lasx_upper(struct task_struct *t) {}
301 #endif
302 
303 static inline int thread_lsx_context_live(void)
304 {
305         if (!cpu_has_lsx)
306                 return 0;
307 
308         return test_thread_flag(TIF_LSX_CTX_LIVE);
309 }
310 
311 static inline int thread_lasx_context_live(void)
312 {
313         if (!cpu_has_lasx)
314                 return 0;
315 
316         return test_thread_flag(TIF_LASX_CTX_LIVE);
317 }
318 
319 #endif /* _ASM_FPU_H */
320 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php