~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/riscv/kernel/traps_misaligned.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Copyright (C) 2020 Western Digital Corporation or its affiliates.
  4  */
  5 #include <linux/kernel.h>
  6 #include <linux/init.h>
  7 #include <linux/mm.h>
  8 #include <linux/module.h>
  9 #include <linux/perf_event.h>
 10 #include <linux/irq.h>
 11 #include <linux/stringify.h>
 12 
 13 #include <asm/processor.h>
 14 #include <asm/ptrace.h>
 15 #include <asm/csr.h>
 16 #include <asm/entry-common.h>
 17 #include <asm/hwprobe.h>
 18 #include <asm/cpufeature.h>
 19 
 20 #define INSN_MATCH_LB                   0x3
 21 #define INSN_MASK_LB                    0x707f
 22 #define INSN_MATCH_LH                   0x1003
 23 #define INSN_MASK_LH                    0x707f
 24 #define INSN_MATCH_LW                   0x2003
 25 #define INSN_MASK_LW                    0x707f
 26 #define INSN_MATCH_LD                   0x3003
 27 #define INSN_MASK_LD                    0x707f
 28 #define INSN_MATCH_LBU                  0x4003
 29 #define INSN_MASK_LBU                   0x707f
 30 #define INSN_MATCH_LHU                  0x5003
 31 #define INSN_MASK_LHU                   0x707f
 32 #define INSN_MATCH_LWU                  0x6003
 33 #define INSN_MASK_LWU                   0x707f
 34 #define INSN_MATCH_SB                   0x23
 35 #define INSN_MASK_SB                    0x707f
 36 #define INSN_MATCH_SH                   0x1023
 37 #define INSN_MASK_SH                    0x707f
 38 #define INSN_MATCH_SW                   0x2023
 39 #define INSN_MASK_SW                    0x707f
 40 #define INSN_MATCH_SD                   0x3023
 41 #define INSN_MASK_SD                    0x707f
 42 
 43 #define INSN_MATCH_FLW                  0x2007
 44 #define INSN_MASK_FLW                   0x707f
 45 #define INSN_MATCH_FLD                  0x3007
 46 #define INSN_MASK_FLD                   0x707f
 47 #define INSN_MATCH_FLQ                  0x4007
 48 #define INSN_MASK_FLQ                   0x707f
 49 #define INSN_MATCH_FSW                  0x2027
 50 #define INSN_MASK_FSW                   0x707f
 51 #define INSN_MATCH_FSD                  0x3027
 52 #define INSN_MASK_FSD                   0x707f
 53 #define INSN_MATCH_FSQ                  0x4027
 54 #define INSN_MASK_FSQ                   0x707f
 55 
 56 #define INSN_MATCH_C_LD                 0x6000
 57 #define INSN_MASK_C_LD                  0xe003
 58 #define INSN_MATCH_C_SD                 0xe000
 59 #define INSN_MASK_C_SD                  0xe003
 60 #define INSN_MATCH_C_LW                 0x4000
 61 #define INSN_MASK_C_LW                  0xe003
 62 #define INSN_MATCH_C_SW                 0xc000
 63 #define INSN_MASK_C_SW                  0xe003
 64 #define INSN_MATCH_C_LDSP               0x6002
 65 #define INSN_MASK_C_LDSP                0xe003
 66 #define INSN_MATCH_C_SDSP               0xe002
 67 #define INSN_MASK_C_SDSP                0xe003
 68 #define INSN_MATCH_C_LWSP               0x4002
 69 #define INSN_MASK_C_LWSP                0xe003
 70 #define INSN_MATCH_C_SWSP               0xc002
 71 #define INSN_MASK_C_SWSP                0xe003
 72 
 73 #define INSN_MATCH_C_FLD                0x2000
 74 #define INSN_MASK_C_FLD                 0xe003
 75 #define INSN_MATCH_C_FLW                0x6000
 76 #define INSN_MASK_C_FLW                 0xe003
 77 #define INSN_MATCH_C_FSD                0xa000
 78 #define INSN_MASK_C_FSD                 0xe003
 79 #define INSN_MATCH_C_FSW                0xe000
 80 #define INSN_MASK_C_FSW                 0xe003
 81 #define INSN_MATCH_C_FLDSP              0x2002
 82 #define INSN_MASK_C_FLDSP               0xe003
 83 #define INSN_MATCH_C_FSDSP              0xa002
 84 #define INSN_MASK_C_FSDSP               0xe003
 85 #define INSN_MATCH_C_FLWSP              0x6002
 86 #define INSN_MASK_C_FLWSP               0xe003
 87 #define INSN_MATCH_C_FSWSP              0xe002
 88 #define INSN_MASK_C_FSWSP               0xe003
 89 
 90 #define INSN_LEN(insn)                  ((((insn) & 0x3) < 0x3) ? 2 : 4)
 91 
 92 #if defined(CONFIG_64BIT)
 93 #define LOG_REGBYTES                    3
 94 #define XLEN                            64
 95 #else
 96 #define LOG_REGBYTES                    2
 97 #define XLEN                            32
 98 #endif
 99 #define REGBYTES                        (1 << LOG_REGBYTES)
100 #define XLEN_MINUS_16                   ((XLEN) - 16)
101 
102 #define SH_RD                           7
103 #define SH_RS1                          15
104 #define SH_RS2                          20
105 #define SH_RS2C                         2
106 
107 #define RV_X(x, s, n)                   (((x) >> (s)) & ((1 << (n)) - 1))
108 #define RVC_LW_IMM(x)                   ((RV_X(x, 6, 1) << 2) | \
109                                          (RV_X(x, 10, 3) << 3) | \
110                                          (RV_X(x, 5, 1) << 6))
111 #define RVC_LD_IMM(x)                   ((RV_X(x, 10, 3) << 3) | \
112                                          (RV_X(x, 5, 2) << 6))
113 #define RVC_LWSP_IMM(x)                 ((RV_X(x, 4, 3) << 2) | \
114                                          (RV_X(x, 12, 1) << 5) | \
115                                          (RV_X(x, 2, 2) << 6))
116 #define RVC_LDSP_IMM(x)                 ((RV_X(x, 5, 2) << 3) | \
117                                          (RV_X(x, 12, 1) << 5) | \
118                                          (RV_X(x, 2, 3) << 6))
119 #define RVC_SWSP_IMM(x)                 ((RV_X(x, 9, 4) << 2) | \
120                                          (RV_X(x, 7, 2) << 6))
121 #define RVC_SDSP_IMM(x)                 ((RV_X(x, 10, 3) << 3) | \
122                                          (RV_X(x, 7, 3) << 6))
123 #define RVC_RS1S(insn)                  (8 + RV_X(insn, SH_RD, 3))
124 #define RVC_RS2S(insn)                  (8 + RV_X(insn, SH_RS2C, 3))
125 #define RVC_RS2(insn)                   RV_X(insn, SH_RS2C, 5)
126 
127 #define SHIFT_RIGHT(x, y)               \
128         ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
129 
130 #define REG_MASK                        \
131         ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
132 
133 #define REG_OFFSET(insn, pos)           \
134         (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
135 
136 #define REG_PTR(insn, pos, regs)        \
137         (ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))
138 
139 #define GET_RM(insn)                    (((insn) >> 12) & 7)
140 
141 #define GET_RS1(insn, regs)             (*REG_PTR(insn, SH_RS1, regs))
142 #define GET_RS2(insn, regs)             (*REG_PTR(insn, SH_RS2, regs))
143 #define GET_RS1S(insn, regs)            (*REG_PTR(RVC_RS1S(insn), 0, regs))
144 #define GET_RS2S(insn, regs)            (*REG_PTR(RVC_RS2S(insn), 0, regs))
145 #define GET_RS2C(insn, regs)            (*REG_PTR(insn, SH_RS2C, regs))
146 #define GET_SP(regs)                    (*REG_PTR(2, 0, regs))
147 #define SET_RD(insn, regs, val)         (*REG_PTR(insn, SH_RD, regs) = (val))
148 #define IMM_I(insn)                     ((s32)(insn) >> 20)
149 #define IMM_S(insn)                     (((s32)(insn) >> 25 << 5) | \
150                                          (s32)(((insn) >> 7) & 0x1f))
151 #define MASK_FUNCT3                     0x7000
152 
153 #define GET_PRECISION(insn) (((insn) >> 25) & 3)
154 #define GET_RM(insn) (((insn) >> 12) & 7)
155 #define PRECISION_S 0
156 #define PRECISION_D 1
157 
158 #ifdef CONFIG_FPU
159 
160 #define FP_GET_RD(insn)         (insn >> 7 & 0x1F)
161 
162 extern void put_f32_reg(unsigned long fp_reg, unsigned long value);
163 
164 static int set_f32_rd(unsigned long insn, struct pt_regs *regs,
165                       unsigned long val)
166 {
167         unsigned long fp_reg = FP_GET_RD(insn);
168 
169         put_f32_reg(fp_reg, val);
170         regs->status |= SR_FS_DIRTY;
171 
172         return 0;
173 }
174 
175 extern void put_f64_reg(unsigned long fp_reg, unsigned long value);
176 
177 static int set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val)
178 {
179         unsigned long fp_reg = FP_GET_RD(insn);
180         unsigned long value;
181 
182 #if __riscv_xlen == 32
183         value = (unsigned long) &val;
184 #else
185         value = val;
186 #endif
187         put_f64_reg(fp_reg, value);
188         regs->status |= SR_FS_DIRTY;
189 
190         return 0;
191 }
192 
193 #if __riscv_xlen == 32
194 extern void get_f64_reg(unsigned long fp_reg, u64 *value);
195 
196 static u64 get_f64_rs(unsigned long insn, u8 fp_reg_offset,
197                       struct pt_regs *regs)
198 {
199         unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
200         u64 val;
201 
202         get_f64_reg(fp_reg, &val);
203         regs->status |= SR_FS_DIRTY;
204 
205         return val;
206 }
207 #else
208 
209 extern unsigned long get_f64_reg(unsigned long fp_reg);
210 
211 static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
212                                 struct pt_regs *regs)
213 {
214         unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
215         unsigned long val;
216 
217         val = get_f64_reg(fp_reg);
218         regs->status |= SR_FS_DIRTY;
219 
220         return val;
221 }
222 
223 #endif
224 
225 extern unsigned long get_f32_reg(unsigned long fp_reg);
226 
227 static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
228                                 struct pt_regs *regs)
229 {
230         unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
231         unsigned long val;
232 
233         val = get_f32_reg(fp_reg);
234         regs->status |= SR_FS_DIRTY;
235 
236         return val;
237 }
238 
239 #else /* CONFIG_FPU */
240 static void set_f32_rd(unsigned long insn, struct pt_regs *regs,
241                        unsigned long val) {}
242 
243 static void set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val) {}
244 
245 static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
246                                 struct pt_regs *regs)
247 {
248         return 0;
249 }
250 
251 static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
252                                 struct pt_regs *regs)
253 {
254         return 0;
255 }
256 
257 #endif
258 
259 #define GET_F64_RS2(insn, regs) (get_f64_rs(insn, 20, regs))
260 #define GET_F64_RS2C(insn, regs) (get_f64_rs(insn, 2, regs))
261 #define GET_F64_RS2S(insn, regs) (get_f64_rs(RVC_RS2S(insn), 0, regs))
262 
263 #define GET_F32_RS2(insn, regs) (get_f32_rs(insn, 20, regs))
264 #define GET_F32_RS2C(insn, regs) (get_f32_rs(insn, 2, regs))
265 #define GET_F32_RS2S(insn, regs) (get_f32_rs(RVC_RS2S(insn), 0, regs))
266 
267 #define __read_insn(regs, insn, insn_addr, type)        \
268 ({                                                      \
269         int __ret;                                      \
270                                                         \
271         if (user_mode(regs)) {                          \
272                 __ret = __get_user(insn, (type __user *) insn_addr); \
273         } else {                                        \
274                 insn = *(type *)insn_addr;              \
275                 __ret = 0;                              \
276         }                                               \
277                                                         \
278         __ret;                                          \
279 })
280 
281 static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
282 {
283         ulong insn = 0;
284 
285         if (epc & 0x2) {
286                 ulong tmp = 0;
287 
288                 if (__read_insn(regs, insn, epc, u16))
289                         return -EFAULT;
290                 /* __get_user() uses regular "lw" which sign extend the loaded
291                  * value make sure to clear higher order bits in case we "or" it
292                  * below with the upper 16 bits half.
293                  */
294                 insn &= GENMASK(15, 0);
295                 if ((insn & __INSN_LENGTH_MASK) != __INSN_LENGTH_32) {
296                         *r_insn = insn;
297                         return 0;
298                 }
299                 epc += sizeof(u16);
300                 if (__read_insn(regs, tmp, epc, u16))
301                         return -EFAULT;
302                 *r_insn = (tmp << 16) | insn;
303 
304                 return 0;
305         } else {
306                 if (__read_insn(regs, insn, epc, u32))
307                         return -EFAULT;
308                 if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) {
309                         *r_insn = insn;
310                         return 0;
311                 }
312                 insn &= GENMASK(15, 0);
313                 *r_insn = insn;
314 
315                 return 0;
316         }
317 }
318 
319 union reg_data {
320         u8 data_bytes[8];
321         ulong data_ulong;
322         u64 data_u64;
323 };
324 
325 static bool unaligned_ctl __read_mostly;
326 
327 /* sysctl hooks */
328 int unaligned_enabled __read_mostly = 1;        /* Enabled by default */
329 
330 int handle_misaligned_load(struct pt_regs *regs)
331 {
332         union reg_data val;
333         unsigned long epc = regs->epc;
334         unsigned long insn;
335         unsigned long addr = regs->badaddr;
336         int fp = 0, shift = 0, len = 0;
337 
338         perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
339 
340 #ifdef CONFIG_RISCV_PROBE_UNALIGNED_ACCESS
341         *this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_EMULATED;
342 #endif
343 
344         if (!unaligned_enabled)
345                 return -1;
346 
347         if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
348                 return -1;
349 
350         if (get_insn(regs, epc, &insn))
351                 return -1;
352 
353         regs->epc = 0;
354 
355         if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
356                 len = 4;
357                 shift = 8 * (sizeof(unsigned long) - len);
358 #if defined(CONFIG_64BIT)
359         } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
360                 len = 8;
361                 shift = 8 * (sizeof(unsigned long) - len);
362         } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
363                 len = 4;
364 #endif
365         } else if ((insn & INSN_MASK_FLD) == INSN_MATCH_FLD) {
366                 fp = 1;
367                 len = 8;
368         } else if ((insn & INSN_MASK_FLW) == INSN_MATCH_FLW) {
369                 fp = 1;
370                 len = 4;
371         } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
372                 len = 2;
373                 shift = 8 * (sizeof(unsigned long) - len);
374         } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
375                 len = 2;
376 #if defined(CONFIG_64BIT)
377         } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
378                 len = 8;
379                 shift = 8 * (sizeof(unsigned long) - len);
380                 insn = RVC_RS2S(insn) << SH_RD;
381         } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
382                    ((insn >> SH_RD) & 0x1f)) {
383                 len = 8;
384                 shift = 8 * (sizeof(unsigned long) - len);
385 #endif
386         } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
387                 len = 4;
388                 shift = 8 * (sizeof(unsigned long) - len);
389                 insn = RVC_RS2S(insn) << SH_RD;
390         } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
391                    ((insn >> SH_RD) & 0x1f)) {
392                 len = 4;
393                 shift = 8 * (sizeof(unsigned long) - len);
394         } else if ((insn & INSN_MASK_C_FLD) == INSN_MATCH_C_FLD) {
395                 fp = 1;
396                 len = 8;
397                 insn = RVC_RS2S(insn) << SH_RD;
398         } else if ((insn & INSN_MASK_C_FLDSP) == INSN_MATCH_C_FLDSP) {
399                 fp = 1;
400                 len = 8;
401 #if defined(CONFIG_32BIT)
402         } else if ((insn & INSN_MASK_C_FLW) == INSN_MATCH_C_FLW) {
403                 fp = 1;
404                 len = 4;
405                 insn = RVC_RS2S(insn) << SH_RD;
406         } else if ((insn & INSN_MASK_C_FLWSP) == INSN_MATCH_C_FLWSP) {
407                 fp = 1;
408                 len = 4;
409 #endif
410         } else {
411                 regs->epc = epc;
412                 return -1;
413         }
414 
415         if (!IS_ENABLED(CONFIG_FPU) && fp)
416                 return -EOPNOTSUPP;
417 
418         val.data_u64 = 0;
419         if (user_mode(regs)) {
420                 if (raw_copy_from_user(&val, (u8 __user *)addr, len))
421                         return -1;
422         } else {
423                 memcpy(&val, (u8 *)addr, len);
424         }
425 
426         if (!fp)
427                 SET_RD(insn, regs, val.data_ulong << shift >> shift);
428         else if (len == 8)
429                 set_f64_rd(insn, regs, val.data_u64);
430         else
431                 set_f32_rd(insn, regs, val.data_ulong);
432 
433         regs->epc = epc + INSN_LEN(insn);
434 
435         return 0;
436 }
437 
438 int handle_misaligned_store(struct pt_regs *regs)
439 {
440         union reg_data val;
441         unsigned long epc = regs->epc;
442         unsigned long insn;
443         unsigned long addr = regs->badaddr;
444         int len = 0, fp = 0;
445 
446         perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
447 
448         if (!unaligned_enabled)
449                 return -1;
450 
451         if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
452                 return -1;
453 
454         if (get_insn(regs, epc, &insn))
455                 return -1;
456 
457         regs->epc = 0;
458 
459         val.data_ulong = GET_RS2(insn, regs);
460 
461         if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
462                 len = 4;
463 #if defined(CONFIG_64BIT)
464         } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
465                 len = 8;
466 #endif
467         } else if ((insn & INSN_MASK_FSD) == INSN_MATCH_FSD) {
468                 fp = 1;
469                 len = 8;
470                 val.data_u64 = GET_F64_RS2(insn, regs);
471         } else if ((insn & INSN_MASK_FSW) == INSN_MATCH_FSW) {
472                 fp = 1;
473                 len = 4;
474                 val.data_ulong = GET_F32_RS2(insn, regs);
475         } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
476                 len = 2;
477 #if defined(CONFIG_64BIT)
478         } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
479                 len = 8;
480                 val.data_ulong = GET_RS2S(insn, regs);
481         } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) {
482                 len = 8;
483                 val.data_ulong = GET_RS2C(insn, regs);
484 #endif
485         } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
486                 len = 4;
487                 val.data_ulong = GET_RS2S(insn, regs);
488         } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
489                 len = 4;
490                 val.data_ulong = GET_RS2C(insn, regs);
491         } else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
492                 fp = 1;
493                 len = 8;
494                 val.data_u64 = GET_F64_RS2S(insn, regs);
495         } else if ((insn & INSN_MASK_C_FSDSP) == INSN_MATCH_C_FSDSP) {
496                 fp = 1;
497                 len = 8;
498                 val.data_u64 = GET_F64_RS2C(insn, regs);
499 #if !defined(CONFIG_64BIT)
500         } else if ((insn & INSN_MASK_C_FSW) == INSN_MATCH_C_FSW) {
501                 fp = 1;
502                 len = 4;
503                 val.data_ulong = GET_F32_RS2S(insn, regs);
504         } else if ((insn & INSN_MASK_C_FSWSP) == INSN_MATCH_C_FSWSP) {
505                 fp = 1;
506                 len = 4;
507                 val.data_ulong = GET_F32_RS2C(insn, regs);
508 #endif
509         } else {
510                 regs->epc = epc;
511                 return -1;
512         }
513 
514         if (!IS_ENABLED(CONFIG_FPU) && fp)
515                 return -EOPNOTSUPP;
516 
517         if (user_mode(regs)) {
518                 if (raw_copy_to_user((u8 __user *)addr, &val, len))
519                         return -1;
520         } else {
521                 memcpy((u8 *)addr, &val, len);
522         }
523 
524         regs->epc = epc + INSN_LEN(insn);
525 
526         return 0;
527 }
528 
529 static bool check_unaligned_access_emulated(int cpu)
530 {
531         long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
532         unsigned long tmp_var, tmp_val;
533         bool misaligned_emu_detected;
534 
535         *mas_ptr = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
536 
537         __asm__ __volatile__ (
538                 "       "REG_L" %[tmp], 1(%[ptr])\n"
539                 : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
540 
541         misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_EMULATED);
542         /*
543          * If unaligned_ctl is already set, this means that we detected that all
544          * CPUS uses emulated misaligned access at boot time. If that changed
545          * when hotplugging the new cpu, this is something we don't handle.
546          */
547         if (unlikely(unaligned_ctl && !misaligned_emu_detected)) {
548                 pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
549                 while (true)
550                         cpu_relax();
551         }
552 
553         return misaligned_emu_detected;
554 }
555 
556 bool check_unaligned_access_emulated_all_cpus(void)
557 {
558         int cpu;
559 
560         /*
561          * We can only support PR_UNALIGN controls if all CPUs have misaligned
562          * accesses emulated since tasks requesting such control can run on any
563          * CPU.
564          */
565         for_each_online_cpu(cpu)
566                 if (!check_unaligned_access_emulated(cpu))
567                         return false;
568 
569         unaligned_ctl = true;
570         return true;
571 }
572 
573 bool unaligned_ctl_available(void)
574 {
575         return unaligned_ctl;
576 }
577 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php