1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ 3 4 #include <vmlinux.h> 5 #include <bpf/bpf_helpers.h> 6 #include <bpf/bpf_tracing.h> 7 #include "bpf_misc.h" 8 #include "xdp_metadata.h" 9 #include "bpf_kfuncs.h" 10 11 /* The compiler may be able to detect the access to uninitialized 12 memory in the routines performing out of bound memory accesses and 13 emit warnings about it. This is the case of GCC. */ 14 #if !defined(__clang__) 15 #pragma GCC diagnostic ignored "-Wmaybe-uninitialized" 16 #endif 17 18 int arr[1]; 19 int unkn_idx; 20 const volatile bool call_dead_subprog = false; 21 22 __noinline long global_bad(void) 23 { 24 return arr[unkn_idx]; /* BOOM */ 25 } 26 27 __noinline long global_good(void) 28 { 29 return arr[0]; 30 } 31 32 __noinline long global_calls_bad(void) 33 { 34 return global_good() + global_bad() /* does BOOM indirectly */; 35 } 36 37 __noinline long global_calls_good_only(void) 38 { 39 return global_good(); 40 } 41 42 __noinline long global_dead(void) 43 { 44 return arr[0] * 2; 45 } 46 47 SEC("?raw_tp") 48 __success __log_level(2) 49 /* main prog is validated completely first */ 50 __msg("('global_calls_good_only') is global and assumed valid.") 51 /* eventually global_good() is transitively validated as well */ 52 __msg("Validating global_good() func") 53 __msg("('global_good') is safe for any args that match its prototype") 54 int chained_global_func_calls_success(void) 55 { 56 int sum = 0; 57 58 if (call_dead_subprog) 59 sum += global_dead(); 60 return global_calls_good_only() + sum; 61 } 62 63 SEC("?raw_tp") 64 __failure __log_level(2) 65 /* main prog validated successfully first */ 66 __msg("('global_calls_bad') is global and assumed valid.") 67 /* eventually we validate global_bad() and fail */ 68 __msg("Validating global_bad() func") 69 __msg("math between map_value pointer and register") /* BOOM */ 70 int chained_global_func_calls_bad(void) 71 { 72 return global_calls_bad(); 73 } 74 75 /* do out of bounds access forcing verifier to fail verification if this 76 * global func is called 77 */ 78 __noinline int global_unsupp(const int *mem) 79 { 80 if (!mem) 81 return 0; 82 return mem[100]; /* BOOM */ 83 } 84 85 const volatile bool skip_unsupp_global = true; 86 87 SEC("?raw_tp") 88 __success 89 int guarded_unsupp_global_called(void) 90 { 91 if (!skip_unsupp_global) 92 return global_unsupp(NULL); 93 return 0; 94 } 95 96 SEC("?raw_tp") 97 __failure __log_level(2) 98 __msg("Func#1 ('global_unsupp') is global and assumed valid.") 99 __msg("Validating global_unsupp() func#1...") 100 __msg("value is outside of the allowed memory range") 101 int unguarded_unsupp_global_called(void) 102 { 103 int x = 0; 104 105 return global_unsupp(&x); 106 } 107 108 long stack[128]; 109 110 __weak int subprog_nullable_ptr_bad(int *p) 111 { 112 return (*p) * 2; /* bad, missing null check */ 113 } 114 115 SEC("?raw_tp") 116 __failure __log_level(2) 117 __msg("invalid mem access 'mem_or_null'") 118 int arg_tag_nullable_ptr_fail(void *ctx) 119 { 120 int x = 42; 121 122 return subprog_nullable_ptr_bad(&x); 123 } 124 125 typedef struct { 126 int x; 127 } user_struct_t; 128 129 __noinline __weak int subprog_user_anon_mem(user_struct_t *t) 130 { 131 return t ? t->x : 0; 132 } 133 134 SEC("?tracepoint") 135 __failure __log_level(2) 136 __msg("invalid bpf_context access") 137 __msg("Caller passes invalid args into func#1 ('subprog_user_anon_mem')") 138 int anon_user_mem_invalid(void *ctx) 139 { 140 /* can't pass PTR_TO_CTX as user memory */ 141 return subprog_user_anon_mem(ctx); 142 } 143 144 SEC("?tracepoint") 145 __success __log_level(2) 146 __msg("Func#1 ('subprog_user_anon_mem') is safe for any args that match its prototype") 147 int anon_user_mem_valid(void *ctx) 148 { 149 user_struct_t t = { .x = 42 }; 150 151 return subprog_user_anon_mem(&t); 152 } 153 154 __noinline __weak int subprog_nonnull_ptr_good(int *p1 __arg_nonnull, int *p2 __arg_nonnull) 155 { 156 return (*p1) * (*p2); /* good, no need for NULL checks */ 157 } 158 159 int x = 47; 160 161 SEC("?raw_tp") 162 __success __log_level(2) 163 int arg_tag_nonnull_ptr_good(void *ctx) 164 { 165 int y = 74; 166 167 return subprog_nonnull_ptr_good(&x, &y); 168 } 169 170 /* this global subprog can be now called from many types of entry progs, each 171 * with different context type 172 */ 173 __weak int subprog_ctx_tag(void *ctx __arg_ctx) 174 { 175 return bpf_get_stack(ctx, stack, sizeof(stack), 0); 176 } 177 178 __weak int raw_tp_canonical(struct bpf_raw_tracepoint_args *ctx __arg_ctx) 179 { 180 return 0; 181 } 182 183 __weak int raw_tp_u64_array(u64 *ctx __arg_ctx) 184 { 185 return 0; 186 } 187 188 SEC("?raw_tp") 189 __success __log_level(2) 190 int arg_tag_ctx_raw_tp(void *ctx) 191 { 192 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx); 193 } 194 195 SEC("?raw_tp.w") 196 __success __log_level(2) 197 int arg_tag_ctx_raw_tp_writable(void *ctx) 198 { 199 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx); 200 } 201 202 SEC("?tp_btf/sys_enter") 203 __success __log_level(2) 204 int arg_tag_ctx_raw_tp_btf(void *ctx) 205 { 206 return subprog_ctx_tag(ctx) + raw_tp_canonical(ctx) + raw_tp_u64_array(ctx); 207 } 208 209 struct whatever { }; 210 211 __weak int tp_whatever(struct whatever *ctx __arg_ctx) 212 { 213 return 0; 214 } 215 216 SEC("?tp") 217 __success __log_level(2) 218 int arg_tag_ctx_tp(void *ctx) 219 { 220 return subprog_ctx_tag(ctx) + tp_whatever(ctx); 221 } 222 223 __weak int kprobe_subprog_pt_regs(struct pt_regs *ctx __arg_ctx) 224 { 225 return 0; 226 } 227 228 __weak int kprobe_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx) 229 { 230 return 0; 231 } 232 233 SEC("?kprobe") 234 __success __log_level(2) 235 int arg_tag_ctx_kprobe(void *ctx) 236 { 237 return subprog_ctx_tag(ctx) + 238 kprobe_subprog_pt_regs(ctx) + 239 kprobe_subprog_typedef(ctx); 240 } 241 242 __weak int perf_subprog_regs( 243 #if defined(bpf_target_riscv) 244 struct user_regs_struct *ctx __arg_ctx 245 #elif defined(bpf_target_s390) 246 /* user_pt_regs typedef is anonymous struct, so only `void *` works */ 247 void *ctx __arg_ctx 248 #elif defined(bpf_target_loongarch) || defined(bpf_target_arm64) || defined(bpf_target_powerpc) 249 struct user_pt_regs *ctx __arg_ctx 250 #else 251 struct pt_regs *ctx __arg_ctx 252 #endif 253 ) 254 { 255 return 0; 256 } 257 258 __weak int perf_subprog_typedef(bpf_user_pt_regs_t *ctx __arg_ctx) 259 { 260 return 0; 261 } 262 263 __weak int perf_subprog_canonical(struct bpf_perf_event_data *ctx __arg_ctx) 264 { 265 return 0; 266 } 267 268 SEC("?perf_event") 269 __success __log_level(2) 270 int arg_tag_ctx_perf(void *ctx) 271 { 272 return subprog_ctx_tag(ctx) + 273 perf_subprog_regs(ctx) + 274 perf_subprog_typedef(ctx) + 275 perf_subprog_canonical(ctx); 276 } 277 278 __weak int iter_subprog_void(void *ctx __arg_ctx) 279 { 280 return 0; 281 } 282 283 __weak int iter_subprog_typed(struct bpf_iter__task *ctx __arg_ctx) 284 { 285 return 0; 286 } 287 288 SEC("?iter/task") 289 __success __log_level(2) 290 int arg_tag_ctx_iter_task(struct bpf_iter__task *ctx) 291 { 292 return (iter_subprog_void(ctx) + iter_subprog_typed(ctx)) & 1; 293 } 294 295 __weak int tracing_subprog_void(void *ctx __arg_ctx) 296 { 297 return 0; 298 } 299 300 __weak int tracing_subprog_u64(u64 *ctx __arg_ctx) 301 { 302 return 0; 303 } 304 305 int acc; 306 307 SEC("?fentry/" SYS_PREFIX "sys_nanosleep") 308 __success __log_level(2) 309 int BPF_PROG(arg_tag_ctx_fentry) 310 { 311 acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx); 312 return 0; 313 } 314 315 SEC("?fexit/" SYS_PREFIX "sys_nanosleep") 316 __success __log_level(2) 317 int BPF_PROG(arg_tag_ctx_fexit) 318 { 319 acc += tracing_subprog_void(ctx) + tracing_subprog_u64(ctx); 320 return 0; 321 } 322 323 SEC("?fmod_ret/" SYS_PREFIX "sys_nanosleep") 324 __success __log_level(2) 325 int BPF_PROG(arg_tag_ctx_fmod_ret) 326 { 327 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx); 328 } 329 330 SEC("?lsm/bpf") 331 __success __log_level(2) 332 int BPF_PROG(arg_tag_ctx_lsm) 333 { 334 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx); 335 } 336 337 SEC("?struct_ops/test_1") 338 __success __log_level(2) 339 int BPF_PROG(arg_tag_ctx_struct_ops) 340 { 341 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx); 342 } 343 344 SEC(".struct_ops") 345 struct bpf_dummy_ops dummy_1 = { 346 .test_1 = (void *)arg_tag_ctx_struct_ops, 347 }; 348 349 SEC("?syscall") 350 __success __log_level(2) 351 int arg_tag_ctx_syscall(void *ctx) 352 { 353 return tracing_subprog_void(ctx) + tracing_subprog_u64(ctx) + tp_whatever(ctx); 354 } 355 356 __weak int subprog_dynptr(struct bpf_dynptr *dptr) 357 { 358 long *d, t, buf[1] = {}; 359 360 d = bpf_dynptr_data(dptr, 0, sizeof(long)); 361 if (!d) 362 return 0; 363 364 t = *d + 1; 365 366 d = bpf_dynptr_slice(dptr, 0, &buf, sizeof(long)); 367 if (!d) 368 return t; 369 370 t = *d + 2; 371 372 return t; 373 } 374 375 SEC("?xdp") 376 __success __log_level(2) 377 int arg_tag_dynptr(struct xdp_md *ctx) 378 { 379 struct bpf_dynptr dptr; 380 381 bpf_dynptr_from_xdp(ctx, 0, &dptr); 382 383 return subprog_dynptr(&dptr); 384 } 385 386 char _license[] SEC("license") = "GPL"; 387
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.