~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/rseq/rseq.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
  2 /*
  3  * rseq.h
  4  *
  5  * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  6  */
  7 
  8 #ifndef RSEQ_H
  9 #define RSEQ_H
 10 
 11 #include <stdint.h>
 12 #include <stdbool.h>
 13 #include <pthread.h>
 14 #include <signal.h>
 15 #include <sched.h>
 16 #include <errno.h>
 17 #include <stdio.h>
 18 #include <stdlib.h>
 19 #include <stddef.h>
 20 #include "rseq-abi.h"
 21 #include "compiler.h"
 22 
 23 #ifndef rseq_sizeof_field
 24 #define rseq_sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
 25 #endif
 26 
 27 #ifndef rseq_offsetofend
 28 #define rseq_offsetofend(TYPE, MEMBER) \
 29         (offsetof(TYPE, MEMBER) + rseq_sizeof_field(TYPE, MEMBER))
 30 #endif
 31 
 32 /*
 33  * Empty code injection macros, override when testing.
 34  * It is important to consider that the ASM injection macros need to be
 35  * fully reentrant (e.g. do not modify the stack).
 36  */
 37 #ifndef RSEQ_INJECT_ASM
 38 #define RSEQ_INJECT_ASM(n)
 39 #endif
 40 
 41 #ifndef RSEQ_INJECT_C
 42 #define RSEQ_INJECT_C(n)
 43 #endif
 44 
 45 #ifndef RSEQ_INJECT_INPUT
 46 #define RSEQ_INJECT_INPUT
 47 #endif
 48 
 49 #ifndef RSEQ_INJECT_CLOBBER
 50 #define RSEQ_INJECT_CLOBBER
 51 #endif
 52 
 53 #ifndef RSEQ_INJECT_FAILED
 54 #define RSEQ_INJECT_FAILED
 55 #endif
 56 
 57 #include "rseq-thread-pointer.h"
 58 
 59 /* Offset from the thread pointer to the rseq area. */
 60 extern ptrdiff_t rseq_offset;
 61 
 62 /*
 63  * Size of the registered rseq area. 0 if the registration was
 64  * unsuccessful.
 65  */
 66 extern unsigned int rseq_size;
 67 
 68 /* Flags used during rseq registration. */
 69 extern unsigned int rseq_flags;
 70 
 71 enum rseq_mo {
 72         RSEQ_MO_RELAXED = 0,
 73         RSEQ_MO_CONSUME = 1,    /* Unused */
 74         RSEQ_MO_ACQUIRE = 2,    /* Unused */
 75         RSEQ_MO_RELEASE = 3,
 76         RSEQ_MO_ACQ_REL = 4,    /* Unused */
 77         RSEQ_MO_SEQ_CST = 5,    /* Unused */
 78 };
 79 
 80 enum rseq_percpu_mode {
 81         RSEQ_PERCPU_CPU_ID = 0,
 82         RSEQ_PERCPU_MM_CID = 1,
 83 };
 84 
 85 static inline struct rseq_abi *rseq_get_abi(void)
 86 {
 87         return (struct rseq_abi *) ((uintptr_t) rseq_thread_pointer() + rseq_offset);
 88 }
 89 
 90 #define rseq_likely(x)          __builtin_expect(!!(x), 1)
 91 #define rseq_unlikely(x)        __builtin_expect(!!(x), 0)
 92 #define rseq_barrier()          __asm__ __volatile__("" : : : "memory")
 93 
 94 #define RSEQ_ACCESS_ONCE(x)     (*(__volatile__  __typeof__(x) *)&(x))
 95 #define RSEQ_WRITE_ONCE(x, v)   __extension__ ({ RSEQ_ACCESS_ONCE(x) = (v); })
 96 #define RSEQ_READ_ONCE(x)       RSEQ_ACCESS_ONCE(x)
 97 
 98 #define __rseq_str_1(x) #x
 99 #define __rseq_str(x)           __rseq_str_1(x)
100 
101 #define rseq_log(fmt, args...)                                                 \
102         fprintf(stderr, fmt "(in %s() at " __FILE__ ":" __rseq_str(__LINE__)"\n", \
103                 ## args, __func__)
104 
105 #define rseq_bug(fmt, args...)          \
106         do {                            \
107                 rseq_log(fmt, ##args);  \
108                 abort();                \
109         } while (0)
110 
111 #if defined(__x86_64__) || defined(__i386__)
112 #include <rseq-x86.h>
113 #elif defined(__ARMEL__)
114 #include <rseq-arm.h>
115 #elif defined (__AARCH64EL__)
116 #include <rseq-arm64.h>
117 #elif defined(__PPC__)
118 #include <rseq-ppc.h>
119 #elif defined(__mips__)
120 #include <rseq-mips.h>
121 #elif defined(__s390__)
122 #include <rseq-s390.h>
123 #elif defined(__riscv)
124 #include <rseq-riscv.h>
125 #else
126 #error unsupported target
127 #endif
128 
129 /*
130  * Register rseq for the current thread. This needs to be called once
131  * by any thread which uses restartable sequences, before they start
132  * using restartable sequences, to ensure restartable sequences
133  * succeed. A restartable sequence executed from a non-registered
134  * thread will always fail.
135  */
136 int rseq_register_current_thread(void);
137 
138 /*
139  * Unregister rseq for current thread.
140  */
141 int rseq_unregister_current_thread(void);
142 
143 /*
144  * Restartable sequence fallback for reading the current CPU number.
145  */
146 int32_t rseq_fallback_current_cpu(void);
147 
148 /*
149  * Restartable sequence fallback for reading the current node number.
150  */
151 int32_t rseq_fallback_current_node(void);
152 
153 /*
154  * Values returned can be either the current CPU number, -1 (rseq is
155  * uninitialized), or -2 (rseq initialization has failed).
156  */
157 static inline int32_t rseq_current_cpu_raw(void)
158 {
159         return RSEQ_ACCESS_ONCE(rseq_get_abi()->cpu_id);
160 }
161 
162 /*
163  * Returns a possible CPU number, which is typically the current CPU.
164  * The returned CPU number can be used to prepare for an rseq critical
165  * section, which will confirm whether the cpu number is indeed the
166  * current one, and whether rseq is initialized.
167  *
168  * The CPU number returned by rseq_cpu_start should always be validated
169  * by passing it to a rseq asm sequence, or by comparing it to the
170  * return value of rseq_current_cpu_raw() if the rseq asm sequence
171  * does not need to be invoked.
172  */
173 static inline uint32_t rseq_cpu_start(void)
174 {
175         return RSEQ_ACCESS_ONCE(rseq_get_abi()->cpu_id_start);
176 }
177 
178 static inline uint32_t rseq_current_cpu(void)
179 {
180         int32_t cpu;
181 
182         cpu = rseq_current_cpu_raw();
183         if (rseq_unlikely(cpu < 0))
184                 cpu = rseq_fallback_current_cpu();
185         return cpu;
186 }
187 
188 static inline bool rseq_node_id_available(void)
189 {
190         return (int) rseq_size >= rseq_offsetofend(struct rseq_abi, node_id);
191 }
192 
193 /*
194  * Current NUMA node number.
195  */
196 static inline uint32_t rseq_current_node_id(void)
197 {
198         assert(rseq_node_id_available());
199         return RSEQ_ACCESS_ONCE(rseq_get_abi()->node_id);
200 }
201 
202 static inline bool rseq_mm_cid_available(void)
203 {
204         return (int) rseq_size >= rseq_offsetofend(struct rseq_abi, mm_cid);
205 }
206 
207 static inline uint32_t rseq_current_mm_cid(void)
208 {
209         return RSEQ_ACCESS_ONCE(rseq_get_abi()->mm_cid);
210 }
211 
212 static inline void rseq_clear_rseq_cs(void)
213 {
214         RSEQ_WRITE_ONCE(rseq_get_abi()->rseq_cs.arch.ptr, 0);
215 }
216 
217 /*
218  * rseq_prepare_unload() should be invoked by each thread executing a rseq
219  * critical section at least once between their last critical section and
220  * library unload of the library defining the rseq critical section (struct
221  * rseq_cs) or the code referred to by the struct rseq_cs start_ip and
222  * post_commit_offset fields. This also applies to use of rseq in code
223  * generated by JIT: rseq_prepare_unload() should be invoked at least once by
224  * each thread executing a rseq critical section before reclaim of the memory
225  * holding the struct rseq_cs or reclaim of the code pointed to by struct
226  * rseq_cs start_ip and post_commit_offset fields.
227  */
228 static inline void rseq_prepare_unload(void)
229 {
230         rseq_clear_rseq_cs();
231 }
232 
233 static inline __attribute__((always_inline))
234 int rseq_cmpeqv_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
235                        intptr_t *v, intptr_t expect,
236                        intptr_t newv, int cpu)
237 {
238         if (rseq_mo != RSEQ_MO_RELAXED)
239                 return -1;
240         switch (percpu_mode) {
241         case RSEQ_PERCPU_CPU_ID:
242                 return rseq_cmpeqv_storev_relaxed_cpu_id(v, expect, newv, cpu);
243         case RSEQ_PERCPU_MM_CID:
244                 return rseq_cmpeqv_storev_relaxed_mm_cid(v, expect, newv, cpu);
245         }
246         return -1;
247 }
248 
249 /*
250  * Compare @v against @expectnot. When it does _not_ match, load @v
251  * into @load, and store the content of *@v + voffp into @v.
252  */
253 static inline __attribute__((always_inline))
254 int rseq_cmpnev_storeoffp_load(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
255                                intptr_t *v, intptr_t expectnot, long voffp, intptr_t *load,
256                                int cpu)
257 {
258         if (rseq_mo != RSEQ_MO_RELAXED)
259                 return -1;
260         switch (percpu_mode) {
261         case RSEQ_PERCPU_CPU_ID:
262                 return rseq_cmpnev_storeoffp_load_relaxed_cpu_id(v, expectnot, voffp, load, cpu);
263         case RSEQ_PERCPU_MM_CID:
264                 return rseq_cmpnev_storeoffp_load_relaxed_mm_cid(v, expectnot, voffp, load, cpu);
265         }
266         return -1;
267 }
268 
269 static inline __attribute__((always_inline))
270 int rseq_addv(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
271               intptr_t *v, intptr_t count, int cpu)
272 {
273         if (rseq_mo != RSEQ_MO_RELAXED)
274                 return -1;
275         switch (percpu_mode) {
276         case RSEQ_PERCPU_CPU_ID:
277                 return rseq_addv_relaxed_cpu_id(v, count, cpu);
278         case RSEQ_PERCPU_MM_CID:
279                 return rseq_addv_relaxed_mm_cid(v, count, cpu);
280         }
281         return -1;
282 }
283 
284 #ifdef RSEQ_ARCH_HAS_OFFSET_DEREF_ADDV
285 /*
286  *   pval = *(ptr+off)
287  *  *pval += inc;
288  */
289 static inline __attribute__((always_inline))
290 int rseq_offset_deref_addv(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
291                            intptr_t *ptr, long off, intptr_t inc, int cpu)
292 {
293         if (rseq_mo != RSEQ_MO_RELAXED)
294                 return -1;
295         switch (percpu_mode) {
296         case RSEQ_PERCPU_CPU_ID:
297                 return rseq_offset_deref_addv_relaxed_cpu_id(ptr, off, inc, cpu);
298         case RSEQ_PERCPU_MM_CID:
299                 return rseq_offset_deref_addv_relaxed_mm_cid(ptr, off, inc, cpu);
300         }
301         return -1;
302 }
303 #endif
304 
305 static inline __attribute__((always_inline))
306 int rseq_cmpeqv_trystorev_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
307                                  intptr_t *v, intptr_t expect,
308                                  intptr_t *v2, intptr_t newv2,
309                                  intptr_t newv, int cpu)
310 {
311         switch (rseq_mo) {
312         case RSEQ_MO_RELAXED:
313                 switch (percpu_mode) {
314                 case RSEQ_PERCPU_CPU_ID:
315                         return rseq_cmpeqv_trystorev_storev_relaxed_cpu_id(v, expect, v2, newv2, newv, cpu);
316                 case RSEQ_PERCPU_MM_CID:
317                         return rseq_cmpeqv_trystorev_storev_relaxed_mm_cid(v, expect, v2, newv2, newv, cpu);
318                 }
319                 return -1;
320         case RSEQ_MO_RELEASE:
321                 switch (percpu_mode) {
322                 case RSEQ_PERCPU_CPU_ID:
323                         return rseq_cmpeqv_trystorev_storev_release_cpu_id(v, expect, v2, newv2, newv, cpu);
324                 case RSEQ_PERCPU_MM_CID:
325                         return rseq_cmpeqv_trystorev_storev_release_mm_cid(v, expect, v2, newv2, newv, cpu);
326                 }
327                 return -1;
328         default:
329                 return -1;
330         }
331 }
332 
333 static inline __attribute__((always_inline))
334 int rseq_cmpeqv_cmpeqv_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
335                               intptr_t *v, intptr_t expect,
336                               intptr_t *v2, intptr_t expect2,
337                               intptr_t newv, int cpu)
338 {
339         if (rseq_mo != RSEQ_MO_RELAXED)
340                 return -1;
341         switch (percpu_mode) {
342         case RSEQ_PERCPU_CPU_ID:
343                 return rseq_cmpeqv_cmpeqv_storev_relaxed_cpu_id(v, expect, v2, expect2, newv, cpu);
344         case RSEQ_PERCPU_MM_CID:
345                 return rseq_cmpeqv_cmpeqv_storev_relaxed_mm_cid(v, expect, v2, expect2, newv, cpu);
346         }
347         return -1;
348 }
349 
350 static inline __attribute__((always_inline))
351 int rseq_cmpeqv_trymemcpy_storev(enum rseq_mo rseq_mo, enum rseq_percpu_mode percpu_mode,
352                                  intptr_t *v, intptr_t expect,
353                                  void *dst, void *src, size_t len,
354                                  intptr_t newv, int cpu)
355 {
356         switch (rseq_mo) {
357         case RSEQ_MO_RELAXED:
358                 switch (percpu_mode) {
359                 case RSEQ_PERCPU_CPU_ID:
360                         return rseq_cmpeqv_trymemcpy_storev_relaxed_cpu_id(v, expect, dst, src, len, newv, cpu);
361                 case RSEQ_PERCPU_MM_CID:
362                         return rseq_cmpeqv_trymemcpy_storev_relaxed_mm_cid(v, expect, dst, src, len, newv, cpu);
363                 }
364                 return -1;
365         case RSEQ_MO_RELEASE:
366                 switch (percpu_mode) {
367                 case RSEQ_PERCPU_CPU_ID:
368                         return rseq_cmpeqv_trymemcpy_storev_release_cpu_id(v, expect, dst, src, len, newv, cpu);
369                 case RSEQ_PERCPU_MM_CID:
370                         return rseq_cmpeqv_trymemcpy_storev_release_mm_cid(v, expect, dst, src, len, newv, cpu);
371                 }
372                 return -1;
373         default:
374                 return -1;
375         }
376 }
377 
378 #endif  /* RSEQ_H_ */
379 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php