~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/trace_recursion.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/trace_recursion.h (Version linux-6.12-rc7) and /include/linux/trace_recursion.h (Version linux-5.11.22)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _LINUX_TRACE_RECURSION_H                    2 #ifndef _LINUX_TRACE_RECURSION_H
  3 #define _LINUX_TRACE_RECURSION_H                    3 #define _LINUX_TRACE_RECURSION_H
  4                                                     4 
  5 #include <linux/interrupt.h>                        5 #include <linux/interrupt.h>
  6 #include <linux/sched.h>                            6 #include <linux/sched.h>
  7                                                     7 
  8 #ifdef CONFIG_TRACING                               8 #ifdef CONFIG_TRACING
  9                                                     9 
 10 /* Only current can touch trace_recursion */       10 /* Only current can touch trace_recursion */
 11                                                    11 
 12 /*                                                 12 /*
 13  * For function tracing recursion:                 13  * For function tracing recursion:
 14  *  The order of these bits are important.         14  *  The order of these bits are important.
 15  *                                                 15  *
 16  *  When function tracing occurs, the followin     16  *  When function tracing occurs, the following steps are made:
 17  *   If arch does not support a ftrace feature     17  *   If arch does not support a ftrace feature:
 18  *    call internal function (uses INTERNAL bi     18  *    call internal function (uses INTERNAL bits) which calls...
                                                   >>  19  *   If callback is registered to the "global" list, the list
                                                   >>  20  *    function is called and recursion checks the GLOBAL bits.
                                                   >>  21  *    then this function calls...
 19  *   The function callback, which can use the      22  *   The function callback, which can use the FTRACE bits to
 20  *    check for recursion.                         23  *    check for recursion.
                                                   >>  24  *
                                                   >>  25  * Now if the arch does not support a feature, and it calls
                                                   >>  26  * the global list function which calls the ftrace callback
                                                   >>  27  * all three of these steps will do a recursion protection.
                                                   >>  28  * There's no reason to do one if the previous caller already
                                                   >>  29  * did. The recursion that we are protecting against will
                                                   >>  30  * go through the same steps again.
                                                   >>  31  *
                                                   >>  32  * To prevent the multiple recursion checks, if a recursion
                                                   >>  33  * bit is set that is higher than the MAX bit of the current
                                                   >>  34  * check, then we know that the check was made by the previous
                                                   >>  35  * caller, and we can skip the current check.
 21  */                                                36  */
 22 enum {                                             37 enum {
 23         /* Function recursion bits */              38         /* Function recursion bits */
 24         TRACE_FTRACE_BIT,                          39         TRACE_FTRACE_BIT,
 25         TRACE_FTRACE_NMI_BIT,                      40         TRACE_FTRACE_NMI_BIT,
 26         TRACE_FTRACE_IRQ_BIT,                      41         TRACE_FTRACE_IRQ_BIT,
 27         TRACE_FTRACE_SIRQ_BIT,                     42         TRACE_FTRACE_SIRQ_BIT,
 28         TRACE_FTRACE_TRANSITION_BIT,           << 
 29                                                    43 
 30         /* Internal use recursion bits */      !!  44         /* INTERNAL_BITs must be greater than FTRACE_BITs */
 31         TRACE_INTERNAL_BIT,                        45         TRACE_INTERNAL_BIT,
 32         TRACE_INTERNAL_NMI_BIT,                    46         TRACE_INTERNAL_NMI_BIT,
 33         TRACE_INTERNAL_IRQ_BIT,                    47         TRACE_INTERNAL_IRQ_BIT,
 34         TRACE_INTERNAL_SIRQ_BIT,                   48         TRACE_INTERNAL_SIRQ_BIT,
 35         TRACE_INTERNAL_TRANSITION_BIT,         << 
 36                                                    49 
 37         TRACE_BRANCH_BIT,                          50         TRACE_BRANCH_BIT,
 38 /*                                                 51 /*
 39  * Abuse of the trace_recursion.                   52  * Abuse of the trace_recursion.
 40  * As we need a way to maintain state if we ar     53  * As we need a way to maintain state if we are tracing the function
 41  * graph in irq because we want to trace a par     54  * graph in irq because we want to trace a particular function that
 42  * was called in irq context but we have irq t     55  * was called in irq context but we have irq tracing off. Since this
 43  * can only be modified by current, we can reu     56  * can only be modified by current, we can reuse trace_recursion.
 44  */                                                57  */
 45         TRACE_IRQ_BIT,                             58         TRACE_IRQ_BIT,
 46                                                    59 
                                                   >>  60         /* Set if the function is in the set_graph_function file */
                                                   >>  61         TRACE_GRAPH_BIT,
                                                   >>  62 
                                                   >>  63         /*
                                                   >>  64          * In the very unlikely case that an interrupt came in
                                                   >>  65          * at a start of graph tracing, and we want to trace
                                                   >>  66          * the function in that interrupt, the depth can be greater
                                                   >>  67          * than zero, because of the preempted start of a previous
                                                   >>  68          * trace. In an even more unlikely case, depth could be 2
                                                   >>  69          * if a softirq interrupted the start of graph tracing,
                                                   >>  70          * followed by an interrupt preempting a start of graph
                                                   >>  71          * tracing in the softirq, and depth can even be 3
                                                   >>  72          * if an NMI came in at the start of an interrupt function
                                                   >>  73          * that preempted a softirq start of a function that
                                                   >>  74          * preempted normal context!!!! Luckily, it can't be
                                                   >>  75          * greater than 3, so the next two bits are a mask
                                                   >>  76          * of what the depth is when we set TRACE_GRAPH_BIT
                                                   >>  77          */
                                                   >>  78 
                                                   >>  79         TRACE_GRAPH_DEPTH_START_BIT,
                                                   >>  80         TRACE_GRAPH_DEPTH_END_BIT,
                                                   >>  81 
                                                   >>  82         /*
                                                   >>  83          * To implement set_graph_notrace, if this bit is set, we ignore
                                                   >>  84          * function graph tracing of called functions, until the return
                                                   >>  85          * function is called to clear it.
                                                   >>  86          */
                                                   >>  87         TRACE_GRAPH_NOTRACE_BIT,
                                                   >>  88 
                                                   >>  89         /*
                                                   >>  90          * When transitioning between context, the preempt_count() may
                                                   >>  91          * not be correct. Allow for a single recursion to cover this case.
                                                   >>  92          */
                                                   >>  93         TRACE_TRANSITION_BIT,
                                                   >>  94 
 47         /* Used to prevent recursion recording     95         /* Used to prevent recursion recording from recursing. */
 48         TRACE_RECORD_RECURSION_BIT,                96         TRACE_RECORD_RECURSION_BIT,
 49 };                                                 97 };
 50                                                    98 
 51 #define trace_recursion_set(bit)        do { (     99 #define trace_recursion_set(bit)        do { (current)->trace_recursion |= (1<<(bit)); } while (0)
 52 #define trace_recursion_clear(bit)      do { (    100 #define trace_recursion_clear(bit)      do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
 53 #define trace_recursion_test(bit)       ((curr    101 #define trace_recursion_test(bit)       ((current)->trace_recursion & (1<<(bit)))
 54                                                   102 
                                                   >> 103 #define trace_recursion_depth() \
                                                   >> 104         (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
                                                   >> 105 #define trace_recursion_set_depth(depth) \
                                                   >> 106         do {                                                            \
                                                   >> 107                 current->trace_recursion &=                             \
                                                   >> 108                         ~(3 << TRACE_GRAPH_DEPTH_START_BIT);            \
                                                   >> 109                 current->trace_recursion |=                             \
                                                   >> 110                         ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT;   \
                                                   >> 111         } while (0)
                                                   >> 112 
 55 #define TRACE_CONTEXT_BITS      4                 113 #define TRACE_CONTEXT_BITS      4
 56                                                   114 
 57 #define TRACE_FTRACE_START      TRACE_FTRACE_B    115 #define TRACE_FTRACE_START      TRACE_FTRACE_BIT
                                                   >> 116 #define TRACE_FTRACE_MAX        ((1 << (TRACE_FTRACE_START + TRACE_CONTEXT_BITS)) - 1)
 58                                                   117 
 59 #define TRACE_LIST_START        TRACE_INTERNAL    118 #define TRACE_LIST_START        TRACE_INTERNAL_BIT
                                                   >> 119 #define TRACE_LIST_MAX          ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
 60                                                   120 
 61 #define TRACE_CONTEXT_MASK      ((1 << (TRACE_ !! 121 #define TRACE_CONTEXT_MASK      TRACE_LIST_MAX
 62                                                   122 
 63 /*                                                123 /*
 64  * Used for setting context                       124  * Used for setting context
 65  *  NMI     = 0                                   125  *  NMI     = 0
 66  *  IRQ     = 1                                   126  *  IRQ     = 1
 67  *  SOFTIRQ = 2                                   127  *  SOFTIRQ = 2
 68  *  NORMAL  = 3                                   128  *  NORMAL  = 3
 69  */                                               129  */
 70 enum {                                            130 enum {
 71         TRACE_CTX_NMI,                            131         TRACE_CTX_NMI,
 72         TRACE_CTX_IRQ,                            132         TRACE_CTX_IRQ,
 73         TRACE_CTX_SOFTIRQ,                        133         TRACE_CTX_SOFTIRQ,
 74         TRACE_CTX_NORMAL,                         134         TRACE_CTX_NORMAL,
 75         TRACE_CTX_TRANSITION,                  << 
 76 };                                                135 };
 77                                                   136 
 78 static __always_inline int trace_get_context_b    137 static __always_inline int trace_get_context_bit(void)
 79 {                                                 138 {
 80         unsigned char bit = interrupt_context_ !! 139         unsigned long pc = preempt_count();
 81                                                   140 
 82         return TRACE_CTX_NORMAL - bit;         !! 141         if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
                                                   >> 142                 return TRACE_CTX_NORMAL;
                                                   >> 143         else
                                                   >> 144                 return pc & NMI_MASK ? TRACE_CTX_NMI :
                                                   >> 145                         pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ;
 83 }                                                 146 }
 84                                                   147 
 85 #ifdef CONFIG_FTRACE_RECORD_RECURSION             148 #ifdef CONFIG_FTRACE_RECORD_RECURSION
 86 extern void ftrace_record_recursion(unsigned l    149 extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
 87 # define do_ftrace_record_recursion(ip, pip)      150 # define do_ftrace_record_recursion(ip, pip)                            \
 88         do {                                      151         do {                                                            \
 89                 if (!trace_recursion_test(TRAC    152                 if (!trace_recursion_test(TRACE_RECORD_RECURSION_BIT)) { \
 90                         trace_recursion_set(TR    153                         trace_recursion_set(TRACE_RECORD_RECURSION_BIT); \
 91                         ftrace_record_recursio    154                         ftrace_record_recursion(ip, pip);               \
 92                         trace_recursion_clear(    155                         trace_recursion_clear(TRACE_RECORD_RECURSION_BIT); \
 93                 }                                 156                 }                                                       \
 94         } while (0)                               157         } while (0)
 95 #else                                             158 #else
 96 # define do_ftrace_record_recursion(ip, pip)      159 # define do_ftrace_record_recursion(ip, pip)    do { } while (0)
 97 #endif                                            160 #endif
 98                                                   161 
 99 #ifdef CONFIG_FTRACE_VALIDATE_RCU_IS_WATCHING  << 
100 # define trace_warn_on_no_rcu(ip)              << 
101         ({                                     << 
102                 bool __ret = !rcu_is_watching( << 
103                 if (__ret && !trace_recursion_ << 
104                         trace_recursion_set(TR << 
105                         WARN_ONCE(true, "RCU n << 
106                         trace_recursion_clear( << 
107                 }                              << 
108                 __ret;                         << 
109         })                                     << 
110 #else                                          << 
111 # define trace_warn_on_no_rcu(ip)       false  << 
112 #endif                                         << 
113                                                << 
114 /*                                             << 
115  * Preemption is promised to be disabled when  << 
116  */                                            << 
117 static __always_inline int trace_test_and_set_    162 static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
118                                                !! 163                                                         int start, int max)
119 {                                                 164 {
120         unsigned int val = READ_ONCE(current->    165         unsigned int val = READ_ONCE(current->trace_recursion);
121         int bit;                                  166         int bit;
122                                                   167 
123         if (trace_warn_on_no_rcu(ip))          !! 168         /* A previous recursion check was made */
124                 return -1;                     !! 169         if ((val & TRACE_CONTEXT_MASK) > max)
                                                   >> 170                 return 0;
125                                                   171 
126         bit = trace_get_context_bit() + start;    172         bit = trace_get_context_bit() + start;
127         if (unlikely(val & (1 << bit))) {         173         if (unlikely(val & (1 << bit))) {
128                 /*                                174                 /*
129                  * If an interrupt occurs duri !! 175                  * It could be that preempt_count has not been updated during
130                  * happens in that interrupt b !! 176                  * a switch between contexts. Allow for a single recursion.
131                  * updated to reflect the new  << 
132                  * will think a recursion occu << 
133                  * Let a single instance happe << 
134                  * not drop those events.      << 
135                  */                               177                  */
136                 bit = TRACE_CTX_TRANSITION + s !! 178                 bit = TRACE_TRANSITION_BIT;
137                 if (val & (1 << bit)) {           179                 if (val & (1 << bit)) {
138                         do_ftrace_record_recur    180                         do_ftrace_record_recursion(ip, pip);
139                         return -1;                181                         return -1;
140                 }                                 182                 }
                                                   >> 183         } else {
                                                   >> 184                 /* Normal check passed, clear the transition to allow it again */
                                                   >> 185                 val &= ~(1 << TRACE_TRANSITION_BIT);
141         }                                         186         }
142                                                   187 
143         val |= 1 << bit;                          188         val |= 1 << bit;
144         current->trace_recursion = val;           189         current->trace_recursion = val;
145         barrier();                                190         barrier();
146                                                   191 
147         preempt_disable_notrace();             !! 192         return bit + 1;
148                                                << 
149         return bit;                            << 
150 }                                                 193 }
151                                                   194 
152 /*                                             << 
153  * Preemption will be enabled (if it was previ << 
154  */                                            << 
155 static __always_inline void trace_clear_recurs    195 static __always_inline void trace_clear_recursion(int bit)
156 {                                                 196 {
157         preempt_enable_notrace();              !! 197         if (!bit)
                                                   >> 198                 return;
                                                   >> 199 
158         barrier();                                200         barrier();
                                                   >> 201         bit--;
159         trace_recursion_clear(bit);               202         trace_recursion_clear(bit);
160 }                                                 203 }
161                                                   204 
162 /**                                               205 /**
163  * ftrace_test_recursion_trylock - tests for r    206  * ftrace_test_recursion_trylock - tests for recursion in same context
164  *                                                207  *
165  * Use this for ftrace callbacks. This will de    208  * Use this for ftrace callbacks. This will detect if the function
166  * tracing recursed in the same context (norma    209  * tracing recursed in the same context (normal vs interrupt),
167  *                                                210  *
168  * Returns: -1 if a recursion happened.           211  * Returns: -1 if a recursion happened.
169  *           >= 0 if no recursion.             !! 212  *           >= 0 if no recursion
170  */                                               213  */
171 static __always_inline int ftrace_test_recursi    214 static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
172                                                   215                                                          unsigned long parent_ip)
173 {                                                 216 {
174         return trace_test_and_set_recursion(ip !! 217         return trace_test_and_set_recursion(ip, parent_ip, TRACE_FTRACE_START, TRACE_FTRACE_MAX);
175 }                                                 218 }
176                                                   219 
177 /**                                               220 /**
178  * ftrace_test_recursion_unlock - called when     221  * ftrace_test_recursion_unlock - called when function callback is complete
179  * @bit: The return of a successful ftrace_tes    222  * @bit: The return of a successful ftrace_test_recursion_trylock()
180  *                                                223  *
181  * This is used at the end of a ftrace callbac    224  * This is used at the end of a ftrace callback.
182  */                                               225  */
183 static __always_inline void ftrace_test_recurs    226 static __always_inline void ftrace_test_recursion_unlock(int bit)
184 {                                                 227 {
185         trace_clear_recursion(bit);               228         trace_clear_recursion(bit);
186 }                                                 229 }
187                                                   230 
188 #endif /* CONFIG_TRACING */                       231 #endif /* CONFIG_TRACING */
189 #endif /* _LINUX_TRACE_RECURSION_H */             232 #endif /* _LINUX_TRACE_RECURSION_H */
190                                                   233 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php