~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/alloc_tag.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 /*
  3  * allocation tagging
  4  */
  5 #ifndef _LINUX_ALLOC_TAG_H
  6 #define _LINUX_ALLOC_TAG_H
  7 
  8 #include <linux/bug.h>
  9 #include <linux/codetag.h>
 10 #include <linux/container_of.h>
 11 #include <linux/preempt.h>
 12 #include <asm/percpu.h>
 13 #include <linux/cpumask.h>
 14 #include <linux/smp.h>
 15 #include <linux/static_key.h>
 16 #include <linux/irqflags.h>
 17 
 18 struct alloc_tag_counters {
 19         u64 bytes;
 20         u64 calls;
 21 };
 22 
 23 /*
 24  * An instance of this structure is created in a special ELF section at every
 25  * allocation callsite. At runtime, the special section is treated as
 26  * an array of these. Embedded codetag utilizes codetag framework.
 27  */
 28 struct alloc_tag {
 29         struct codetag                  ct;
 30         struct alloc_tag_counters __percpu      *counters;
 31 } __aligned(8);
 32 
 33 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
 34 
 35 #define CODETAG_EMPTY   ((void *)1)
 36 
 37 static inline bool is_codetag_empty(union codetag_ref *ref)
 38 {
 39         return ref->ct == CODETAG_EMPTY;
 40 }
 41 
 42 static inline void set_codetag_empty(union codetag_ref *ref)
 43 {
 44         if (ref)
 45                 ref->ct = CODETAG_EMPTY;
 46 }
 47 
 48 #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
 49 
 50 static inline bool is_codetag_empty(union codetag_ref *ref) { return false; }
 51 static inline void set_codetag_empty(union codetag_ref *ref) {}
 52 
 53 #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
 54 
 55 #ifdef CONFIG_MEM_ALLOC_PROFILING
 56 
 57 struct codetag_bytes {
 58         struct codetag *ct;
 59         s64 bytes;
 60 };
 61 
 62 size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep);
 63 
 64 static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
 65 {
 66         return container_of(ct, struct alloc_tag, ct);
 67 }
 68 
 69 #ifdef ARCH_NEEDS_WEAK_PER_CPU
 70 /*
 71  * When percpu variables are required to be defined as weak, static percpu
 72  * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
 73  * Instead we will accound all module allocations to a single counter.
 74  */
 75 DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
 76 
 77 #define DEFINE_ALLOC_TAG(_alloc_tag)                                            \
 78         static struct alloc_tag _alloc_tag __used __aligned(8)                  \
 79         __section("alloc_tags") = {                                             \
 80                 .ct = CODE_TAG_INIT,                                            \
 81                 .counters = &_shared_alloc_tag };
 82 
 83 #else /* ARCH_NEEDS_WEAK_PER_CPU */
 84 
 85 #define DEFINE_ALLOC_TAG(_alloc_tag)                                            \
 86         static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr);      \
 87         static struct alloc_tag _alloc_tag __used __aligned(8)                  \
 88         __section("alloc_tags") = {                                             \
 89                 .ct = CODE_TAG_INIT,                                            \
 90                 .counters = &_alloc_tag_cntr };
 91 
 92 #endif /* ARCH_NEEDS_WEAK_PER_CPU */
 93 
 94 DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
 95                         mem_alloc_profiling_key);
 96 
 97 static inline bool mem_alloc_profiling_enabled(void)
 98 {
 99         return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
100                                    &mem_alloc_profiling_key);
101 }
102 
103 static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag)
104 {
105         struct alloc_tag_counters v = { 0, 0 };
106         struct alloc_tag_counters *counter;
107         int cpu;
108 
109         for_each_possible_cpu(cpu) {
110                 counter = per_cpu_ptr(tag->counters, cpu);
111                 v.bytes += counter->bytes;
112                 v.calls += counter->calls;
113         }
114 
115         return v;
116 }
117 
118 #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
119 static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag)
120 {
121         WARN_ONCE(ref && ref->ct,
122                   "alloc_tag was not cleared (got tag for %s:%u)\n",
123                   ref->ct->filename, ref->ct->lineno);
124 
125         WARN_ONCE(!tag, "current->alloc_tag not set\n");
126 }
127 
128 static inline void alloc_tag_sub_check(union codetag_ref *ref)
129 {
130         WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n");
131 }
132 #else
133 static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {}
134 static inline void alloc_tag_sub_check(union codetag_ref *ref) {}
135 #endif
136 
137 /* Caller should verify both ref and tag to be valid */
138 static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
139 {
140         ref->ct = &tag->ct;
141         /*
142          * We need in increment the call counter every time we have a new
143          * allocation or when we split a large allocation into smaller ones.
144          * Each new reference for every sub-allocation needs to increment call
145          * counter because when we free each part the counter will be decremented.
146          */
147         this_cpu_inc(tag->counters->calls);
148 }
149 
150 static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag)
151 {
152         alloc_tag_add_check(ref, tag);
153         if (!ref || !tag)
154                 return;
155 
156         __alloc_tag_ref_set(ref, tag);
157 }
158 
159 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes)
160 {
161         alloc_tag_add_check(ref, tag);
162         if (!ref || !tag)
163                 return;
164 
165         __alloc_tag_ref_set(ref, tag);
166         this_cpu_add(tag->counters->bytes, bytes);
167 }
168 
169 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes)
170 {
171         struct alloc_tag *tag;
172 
173         alloc_tag_sub_check(ref);
174         if (!ref || !ref->ct)
175                 return;
176 
177         if (is_codetag_empty(ref)) {
178                 ref->ct = NULL;
179                 return;
180         }
181 
182         tag = ct_to_alloc_tag(ref->ct);
183 
184         this_cpu_sub(tag->counters->bytes, bytes);
185         this_cpu_dec(tag->counters->calls);
186 
187         ref->ct = NULL;
188 }
189 
190 #define alloc_tag_record(p)     ((p) = current->alloc_tag)
191 
192 #else /* CONFIG_MEM_ALLOC_PROFILING */
193 
194 #define DEFINE_ALLOC_TAG(_alloc_tag)
195 static inline bool mem_alloc_profiling_enabled(void) { return false; }
196 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag,
197                                  size_t bytes) {}
198 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {}
199 #define alloc_tag_record(p)     do {} while (0)
200 
201 #endif /* CONFIG_MEM_ALLOC_PROFILING */
202 
203 #define alloc_hooks_tag(_tag, _do_alloc)                                \
204 ({                                                                      \
205         struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag);  \
206         typeof(_do_alloc) _res = _do_alloc;                             \
207         alloc_tag_restore(_tag, _old);                                  \
208         _res;                                                           \
209 })
210 
211 #define alloc_hooks(_do_alloc)                                          \
212 ({                                                                      \
213         DEFINE_ALLOC_TAG(_alloc_tag);                                   \
214         alloc_hooks_tag(&_alloc_tag, _do_alloc);                        \
215 })
216 
217 #endif /* _LINUX_ALLOC_TAG_H */
218 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php