1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #undef TRACE_SYSTEM 3 #define TRACE_SYSTEM kmem 4 5 #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ) 6 #define _TRACE_KMEM_H 7 8 #include <linux/types.h> 9 #include <linux/tracepoint.h> 10 #include <trace/events/mmflags.h> 11 12 TRACE_EVENT(kmem_cache_alloc, 13 14 TP_PROTO(unsigned long call_site, 15 const void *ptr, 16 struct kmem_cache *s, 17 gfp_t gfp_flags, 18 int node), 19 20 TP_ARGS(call_site, ptr, s, gfp_flags, node), 21 22 TP_STRUCT__entry( 23 __field( unsigned long, call_site ) 24 __field( const void *, ptr ) 25 __field( size_t, bytes_req ) 26 __field( size_t, bytes_alloc ) 27 __field( unsigned long, gfp_flags ) 28 __field( int, node ) 29 __field( bool, accounted ) 30 ), 31 32 TP_fast_assign( 33 __entry->call_site = call_site; 34 __entry->ptr = ptr; 35 __entry->bytes_req = s->object_size; 36 __entry->bytes_alloc = s->size; 37 __entry->gfp_flags = (__force unsigned long)gfp_flags; 38 __entry->node = node; 39 __entry->accounted = IS_ENABLED(CONFIG_MEMCG) ? 40 ((gfp_flags & __GFP_ACCOUNT) || 41 (s->flags & SLAB_ACCOUNT)) : false; 42 ), 43 44 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s", 45 (void *)__entry->call_site, 46 __entry->ptr, 47 __entry->bytes_req, 48 __entry->bytes_alloc, 49 show_gfp_flags(__entry->gfp_flags), 50 __entry->node, 51 __entry->accounted ? "true" : "false") 52 ); 53 54 TRACE_EVENT(kmalloc, 55 56 TP_PROTO(unsigned long call_site, 57 const void *ptr, 58 size_t bytes_req, 59 size_t bytes_alloc, 60 gfp_t gfp_flags, 61 int node), 62 63 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), 64 65 TP_STRUCT__entry( 66 __field( unsigned long, call_site ) 67 __field( const void *, ptr ) 68 __field( size_t, bytes_req ) 69 __field( size_t, bytes_alloc ) 70 __field( unsigned long, gfp_flags ) 71 __field( int, node ) 72 ), 73 74 TP_fast_assign( 75 __entry->call_site = call_site; 76 __entry->ptr = ptr; 77 __entry->bytes_req = bytes_req; 78 __entry->bytes_alloc = bytes_alloc; 79 __entry->gfp_flags = (__force unsigned long)gfp_flags; 80 __entry->node = node; 81 ), 82 83 TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d accounted=%s", 84 (void *)__entry->call_site, 85 __entry->ptr, 86 __entry->bytes_req, 87 __entry->bytes_alloc, 88 show_gfp_flags(__entry->gfp_flags), 89 __entry->node, 90 (IS_ENABLED(CONFIG_MEMCG) && 91 (__entry->gfp_flags & (__force unsigned long)__GFP_ACCOUNT)) ? "true" : "false") 92 ); 93 94 TRACE_EVENT(kfree, 95 96 TP_PROTO(unsigned long call_site, const void *ptr), 97 98 TP_ARGS(call_site, ptr), 99 100 TP_STRUCT__entry( 101 __field( unsigned long, call_site ) 102 __field( const void *, ptr ) 103 ), 104 105 TP_fast_assign( 106 __entry->call_site = call_site; 107 __entry->ptr = ptr; 108 ), 109 110 TP_printk("call_site=%pS ptr=%p", 111 (void *)__entry->call_site, __entry->ptr) 112 ); 113 114 TRACE_EVENT(kmem_cache_free, 115 116 TP_PROTO(unsigned long call_site, const void *ptr, const struct kmem_cache *s), 117 118 TP_ARGS(call_site, ptr, s), 119 120 TP_STRUCT__entry( 121 __field( unsigned long, call_site ) 122 __field( const void *, ptr ) 123 __string( name, s->name ) 124 ), 125 126 TP_fast_assign( 127 __entry->call_site = call_site; 128 __entry->ptr = ptr; 129 __assign_str(name); 130 ), 131 132 TP_printk("call_site=%pS ptr=%p name=%s", 133 (void *)__entry->call_site, __entry->ptr, __get_str(name)) 134 ); 135 136 TRACE_EVENT(mm_page_free, 137 138 TP_PROTO(struct page *page, unsigned int order), 139 140 TP_ARGS(page, order), 141 142 TP_STRUCT__entry( 143 __field( unsigned long, pfn ) 144 __field( unsigned int, order ) 145 ), 146 147 TP_fast_assign( 148 __entry->pfn = page_to_pfn(page); 149 __entry->order = order; 150 ), 151 152 TP_printk("page=%p pfn=0x%lx order=%d", 153 pfn_to_page(__entry->pfn), 154 __entry->pfn, 155 __entry->order) 156 ); 157 158 TRACE_EVENT(mm_page_free_batched, 159 160 TP_PROTO(struct page *page), 161 162 TP_ARGS(page), 163 164 TP_STRUCT__entry( 165 __field( unsigned long, pfn ) 166 ), 167 168 TP_fast_assign( 169 __entry->pfn = page_to_pfn(page); 170 ), 171 172 TP_printk("page=%p pfn=0x%lx order=0", 173 pfn_to_page(__entry->pfn), 174 __entry->pfn) 175 ); 176 177 TRACE_EVENT(mm_page_alloc, 178 179 TP_PROTO(struct page *page, unsigned int order, 180 gfp_t gfp_flags, int migratetype), 181 182 TP_ARGS(page, order, gfp_flags, migratetype), 183 184 TP_STRUCT__entry( 185 __field( unsigned long, pfn ) 186 __field( unsigned int, order ) 187 __field( unsigned long, gfp_flags ) 188 __field( int, migratetype ) 189 ), 190 191 TP_fast_assign( 192 __entry->pfn = page ? page_to_pfn(page) : -1UL; 193 __entry->order = order; 194 __entry->gfp_flags = (__force unsigned long)gfp_flags; 195 __entry->migratetype = migratetype; 196 ), 197 198 TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d gfp_flags=%s", 199 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, 200 __entry->pfn != -1UL ? __entry->pfn : 0, 201 __entry->order, 202 __entry->migratetype, 203 show_gfp_flags(__entry->gfp_flags)) 204 ); 205 206 DECLARE_EVENT_CLASS(mm_page, 207 208 TP_PROTO(struct page *page, unsigned int order, int migratetype, 209 int percpu_refill), 210 211 TP_ARGS(page, order, migratetype, percpu_refill), 212 213 TP_STRUCT__entry( 214 __field( unsigned long, pfn ) 215 __field( unsigned int, order ) 216 __field( int, migratetype ) 217 __field( int, percpu_refill ) 218 ), 219 220 TP_fast_assign( 221 __entry->pfn = page ? page_to_pfn(page) : -1UL; 222 __entry->order = order; 223 __entry->migratetype = migratetype; 224 __entry->percpu_refill = percpu_refill; 225 ), 226 227 TP_printk("page=%p pfn=0x%lx order=%u migratetype=%d percpu_refill=%d", 228 __entry->pfn != -1UL ? pfn_to_page(__entry->pfn) : NULL, 229 __entry->pfn != -1UL ? __entry->pfn : 0, 230 __entry->order, 231 __entry->migratetype, 232 __entry->percpu_refill) 233 ); 234 235 DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked, 236 237 TP_PROTO(struct page *page, unsigned int order, int migratetype, 238 int percpu_refill), 239 240 TP_ARGS(page, order, migratetype, percpu_refill) 241 ); 242 243 TRACE_EVENT(mm_page_pcpu_drain, 244 245 TP_PROTO(struct page *page, unsigned int order, int migratetype), 246 247 TP_ARGS(page, order, migratetype), 248 249 TP_STRUCT__entry( 250 __field( unsigned long, pfn ) 251 __field( unsigned int, order ) 252 __field( int, migratetype ) 253 ), 254 255 TP_fast_assign( 256 __entry->pfn = page ? page_to_pfn(page) : -1UL; 257 __entry->order = order; 258 __entry->migratetype = migratetype; 259 ), 260 261 TP_printk("page=%p pfn=0x%lx order=%d migratetype=%d", 262 pfn_to_page(__entry->pfn), __entry->pfn, 263 __entry->order, __entry->migratetype) 264 ); 265 266 TRACE_EVENT(mm_page_alloc_extfrag, 267 268 TP_PROTO(struct page *page, 269 int alloc_order, int fallback_order, 270 int alloc_migratetype, int fallback_migratetype), 271 272 TP_ARGS(page, 273 alloc_order, fallback_order, 274 alloc_migratetype, fallback_migratetype), 275 276 TP_STRUCT__entry( 277 __field( unsigned long, pfn ) 278 __field( int, alloc_order ) 279 __field( int, fallback_order ) 280 __field( int, alloc_migratetype ) 281 __field( int, fallback_migratetype ) 282 __field( int, change_ownership ) 283 ), 284 285 TP_fast_assign( 286 __entry->pfn = page_to_pfn(page); 287 __entry->alloc_order = alloc_order; 288 __entry->fallback_order = fallback_order; 289 __entry->alloc_migratetype = alloc_migratetype; 290 __entry->fallback_migratetype = fallback_migratetype; 291 __entry->change_ownership = (alloc_migratetype == 292 get_pageblock_migratetype(page)); 293 ), 294 295 TP_printk("page=%p pfn=0x%lx alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d", 296 pfn_to_page(__entry->pfn), 297 __entry->pfn, 298 __entry->alloc_order, 299 __entry->fallback_order, 300 pageblock_order, 301 __entry->alloc_migratetype, 302 __entry->fallback_migratetype, 303 __entry->fallback_order < pageblock_order, 304 __entry->change_ownership) 305 ); 306 307 TRACE_EVENT(mm_alloc_contig_migrate_range_info, 308 309 TP_PROTO(unsigned long start, 310 unsigned long end, 311 unsigned long nr_migrated, 312 unsigned long nr_reclaimed, 313 unsigned long nr_mapped, 314 int migratetype), 315 316 TP_ARGS(start, end, nr_migrated, nr_reclaimed, nr_mapped, migratetype), 317 318 TP_STRUCT__entry( 319 __field(unsigned long, start) 320 __field(unsigned long, end) 321 __field(unsigned long, nr_migrated) 322 __field(unsigned long, nr_reclaimed) 323 __field(unsigned long, nr_mapped) 324 __field(int, migratetype) 325 ), 326 327 TP_fast_assign( 328 __entry->start = start; 329 __entry->end = end; 330 __entry->nr_migrated = nr_migrated; 331 __entry->nr_reclaimed = nr_reclaimed; 332 __entry->nr_mapped = nr_mapped; 333 __entry->migratetype = migratetype; 334 ), 335 336 TP_printk("start=0x%lx end=0x%lx migratetype=%d nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu", 337 __entry->start, 338 __entry->end, 339 __entry->migratetype, 340 __entry->nr_migrated, 341 __entry->nr_reclaimed, 342 __entry->nr_mapped) 343 ); 344 345 /* 346 * Required for uniquely and securely identifying mm in rss_stat tracepoint. 347 */ 348 #ifndef __PTR_TO_HASHVAL 349 static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr) 350 { 351 int ret; 352 unsigned long hashval; 353 354 ret = ptr_to_hashval(ptr, &hashval); 355 if (ret) 356 return 0; 357 358 /* The hashed value is only 32-bit */ 359 return (unsigned int)hashval; 360 } 361 #define __PTR_TO_HASHVAL 362 #endif 363 364 #define TRACE_MM_PAGES \ 365 EM(MM_FILEPAGES) \ 366 EM(MM_ANONPAGES) \ 367 EM(MM_SWAPENTS) \ 368 EMe(MM_SHMEMPAGES) 369 370 #undef EM 371 #undef EMe 372 373 #define EM(a) TRACE_DEFINE_ENUM(a); 374 #define EMe(a) TRACE_DEFINE_ENUM(a); 375 376 TRACE_MM_PAGES 377 378 #undef EM 379 #undef EMe 380 381 #define EM(a) { a, #a }, 382 #define EMe(a) { a, #a } 383 384 TRACE_EVENT(rss_stat, 385 386 TP_PROTO(struct mm_struct *mm, 387 int member), 388 389 TP_ARGS(mm, member), 390 391 TP_STRUCT__entry( 392 __field(unsigned int, mm_id) 393 __field(unsigned int, curr) 394 __field(int, member) 395 __field(long, size) 396 ), 397 398 TP_fast_assign( 399 __entry->mm_id = mm_ptr_to_hash(mm); 400 __entry->curr = !!(current->mm == mm); 401 __entry->member = member; 402 __entry->size = (percpu_counter_sum_positive(&mm->rss_stat[member]) 403 << PAGE_SHIFT); 404 ), 405 406 TP_printk("mm_id=%u curr=%d type=%s size=%ldB", 407 __entry->mm_id, 408 __entry->curr, 409 __print_symbolic(__entry->member, TRACE_MM_PAGES), 410 __entry->size) 411 ); 412 #endif /* _TRACE_KMEM_H */ 413 414 /* This part must be outside protection */ 415 #include <trace/define_trace.h> 416
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.