~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 
  3 #include <linux/bpf.h>
  4 #include <bpf/bpf_helpers.h>
  5 #include "bpf_misc.h"
  6 
  7 /* Check that precision marks propagate through scalar IDs.
  8  * Registers r{0,1,2} have the same scalar ID at the moment when r0 is
  9  * marked to be precise, this mark is immediately propagated to r{1,2}.
 10  */
 11 SEC("socket")
 12 __success __log_level(2)
 13 __msg("frame0: regs=r0,r1,r2 stack= before 4: (bf) r3 = r10")
 14 __msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
 15 __msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
 16 __msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
 17 __msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
 18 __flag(BPF_F_TEST_STATE_FREQ)
 19 __naked void precision_same_state(void)
 20 {
 21         asm volatile (
 22         /* r0 = random number up to 0xff */
 23         "call %[bpf_ktime_get_ns];"
 24         "r0 &= 0xff;"
 25         /* tie r0.id == r1.id == r2.id */
 26         "r1 = r0;"
 27         "r2 = r0;"
 28         /* force r0 to be precise, this immediately marks r1 and r2 as
 29          * precise as well because of shared IDs
 30          */
 31         "r3 = r10;"
 32         "r3 += r0;"
 33         "r0 = 0;"
 34         "exit;"
 35         :
 36         : __imm(bpf_ktime_get_ns)
 37         : __clobber_all);
 38 }
 39 
 40 /* Same as precision_same_state, but mark propagates through state /
 41  * parent state boundary.
 42  */
 43 SEC("socket")
 44 __success __log_level(2)
 45 __msg("frame0: last_idx 6 first_idx 5 subseq_idx -1")
 46 __msg("frame0: regs=r0,r1,r2 stack= before 5: (bf) r3 = r10")
 47 __msg("frame0: parent state regs=r0,r1,r2 stack=:")
 48 __msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0")
 49 __msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
 50 __msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
 51 __msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
 52 __msg("frame0: parent state regs=r0 stack=:")
 53 __msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
 54 __flag(BPF_F_TEST_STATE_FREQ)
 55 __naked void precision_cross_state(void)
 56 {
 57         asm volatile (
 58         /* r0 = random number up to 0xff */
 59         "call %[bpf_ktime_get_ns];"
 60         "r0 &= 0xff;"
 61         /* tie r0.id == r1.id == r2.id */
 62         "r1 = r0;"
 63         "r2 = r0;"
 64         /* force checkpoint */
 65         "goto +0;"
 66         /* force r0 to be precise, this immediately marks r1 and r2 as
 67          * precise as well because of shared IDs
 68          */
 69         "r3 = r10;"
 70         "r3 += r0;"
 71         "r0 = 0;"
 72         "exit;"
 73         :
 74         : __imm(bpf_ktime_get_ns)
 75         : __clobber_all);
 76 }
 77 
 78 /* Same as precision_same_state, but break one of the
 79  * links, note that r1 is absent from regs=... in __msg below.
 80  */
 81 SEC("socket")
 82 __success __log_level(2)
 83 __msg("frame0: regs=r0,r2 stack= before 5: (bf) r3 = r10")
 84 __msg("frame0: regs=r0,r2 stack= before 4: (b7) r1 = 0")
 85 __msg("frame0: regs=r0,r2 stack= before 3: (bf) r2 = r0")
 86 __msg("frame0: regs=r0 stack= before 2: (bf) r1 = r0")
 87 __msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
 88 __msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
 89 __flag(BPF_F_TEST_STATE_FREQ)
 90 __naked void precision_same_state_broken_link(void)
 91 {
 92         asm volatile (
 93         /* r0 = random number up to 0xff */
 94         "call %[bpf_ktime_get_ns];"
 95         "r0 &= 0xff;"
 96         /* tie r0.id == r1.id == r2.id */
 97         "r1 = r0;"
 98         "r2 = r0;"
 99         /* break link for r1, this is the only line that differs
100          * compared to the previous test
101          */
102         "r1 = 0;"
103         /* force r0 to be precise, this immediately marks r1 and r2 as
104          * precise as well because of shared IDs
105          */
106         "r3 = r10;"
107         "r3 += r0;"
108         "r0 = 0;"
109         "exit;"
110         :
111         : __imm(bpf_ktime_get_ns)
112         : __clobber_all);
113 }
114 
115 /* Same as precision_same_state_broken_link, but with state /
116  * parent state boundary.
117  */
118 SEC("socket")
119 __success __log_level(2)
120 __msg("frame0: regs=r0,r2 stack= before 6: (bf) r3 = r10")
121 __msg("frame0: regs=r0,r2 stack= before 5: (b7) r1 = 0")
122 __msg("frame0: parent state regs=r0,r2 stack=:")
123 __msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0")
124 __msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0")
125 __msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
126 __msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
127 __msg("frame0: parent state regs=r0 stack=:")
128 __msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns")
129 __flag(BPF_F_TEST_STATE_FREQ)
130 __naked void precision_cross_state_broken_link(void)
131 {
132         asm volatile (
133         /* r0 = random number up to 0xff */
134         "call %[bpf_ktime_get_ns];"
135         "r0 &= 0xff;"
136         /* tie r0.id == r1.id == r2.id */
137         "r1 = r0;"
138         "r2 = r0;"
139         /* force checkpoint, although link between r1 and r{0,2} is
140          * broken by the next statement current precision tracking
141          * algorithm can't react to it and propagates mark for r1 to
142          * the parent state.
143          */
144         "goto +0;"
145         /* break link for r1, this is the only line that differs
146          * compared to precision_cross_state()
147          */
148         "r1 = 0;"
149         /* force r0 to be precise, this immediately marks r1 and r2 as
150          * precise as well because of shared IDs
151          */
152         "r3 = r10;"
153         "r3 += r0;"
154         "r0 = 0;"
155         "exit;"
156         :
157         : __imm(bpf_ktime_get_ns)
158         : __clobber_all);
159 }
160 
161 /* Check that precision marks propagate through scalar IDs.
162  * Use the same scalar ID in multiple stack frames, check that
163  * precision information is propagated up the call stack.
164  */
165 SEC("socket")
166 __success __log_level(2)
167 __msg("11: (0f) r2 += r1")
168 /* Current state */
169 __msg("frame2: last_idx 11 first_idx 10 subseq_idx -1")
170 __msg("frame2: regs=r1 stack= before 10: (bf) r2 = r10")
171 __msg("frame2: parent state regs=r1 stack=")
172 /* frame1.r{6,7} are marked because mark_precise_scalar_ids()
173  * looks for all registers with frame2.r1.id in the current state
174  */
175 __msg("frame1: parent state regs=r6,r7 stack=")
176 __msg("frame0: parent state regs=r6 stack=")
177 /* Parent state */
178 __msg("frame2: last_idx 8 first_idx 8 subseq_idx 10")
179 __msg("frame2: regs=r1 stack= before 8: (85) call pc+1")
180 /* frame1.r1 is marked because of backtracking of call instruction */
181 __msg("frame1: parent state regs=r1,r6,r7 stack=")
182 __msg("frame0: parent state regs=r6 stack=")
183 /* Parent state */
184 __msg("frame1: last_idx 7 first_idx 6 subseq_idx 8")
185 __msg("frame1: regs=r1,r6,r7 stack= before 7: (bf) r7 = r1")
186 __msg("frame1: regs=r1,r6 stack= before 6: (bf) r6 = r1")
187 __msg("frame1: parent state regs=r1 stack=")
188 __msg("frame0: parent state regs=r6 stack=")
189 /* Parent state */
190 __msg("frame1: last_idx 4 first_idx 4 subseq_idx 6")
191 __msg("frame1: regs=r1 stack= before 4: (85) call pc+1")
192 __msg("frame0: parent state regs=r1,r6 stack=")
193 /* Parent state */
194 __msg("frame0: last_idx 3 first_idx 1 subseq_idx 4")
195 __msg("frame0: regs=r0,r1,r6 stack= before 3: (bf) r6 = r0")
196 __msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
197 __msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
198 __flag(BPF_F_TEST_STATE_FREQ)
199 __naked void precision_many_frames(void)
200 {
201         asm volatile (
202         /* r0 = random number up to 0xff */
203         "call %[bpf_ktime_get_ns];"
204         "r0 &= 0xff;"
205         /* tie r0.id == r1.id == r6.id */
206         "r1 = r0;"
207         "r6 = r0;"
208         "call precision_many_frames__foo;"
209         "exit;"
210         :
211         : __imm(bpf_ktime_get_ns)
212         : __clobber_all);
213 }
214 
215 static __naked __noinline __used
216 void precision_many_frames__foo(void)
217 {
218         asm volatile (
219         /* conflate one of the register numbers (r6) with outer frame,
220          * to verify that those are tracked independently
221          */
222         "r6 = r1;"
223         "r7 = r1;"
224         "call precision_many_frames__bar;"
225         "exit"
226         ::: __clobber_all);
227 }
228 
229 static __naked __noinline __used
230 void precision_many_frames__bar(void)
231 {
232         asm volatile (
233         /* force r1 to be precise, this immediately marks:
234          * - bar frame r1
235          * - foo frame r{1,6,7}
236          * - main frame r{1,6}
237          */
238         "r2 = r10;"
239         "r2 += r1;"
240         "r0 = 0;"
241         "exit;"
242         ::: __clobber_all);
243 }
244 
245 /* Check that scalars with the same IDs are marked precise on stack as
246  * well as in registers.
247  */
248 SEC("socket")
249 __success __log_level(2)
250 /* foo frame */
251 __msg("frame1: regs=r1 stack=-8,-16 before 9: (bf) r2 = r10")
252 __msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1")
253 __msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1")
254 __msg("frame1: regs=r1 stack= before 4: (85) call pc+2")
255 /* main frame */
256 __msg("frame0: regs=r0,r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1")
257 __msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0")
258 __msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255")
259 __flag(BPF_F_TEST_STATE_FREQ)
260 __naked void precision_stack(void)
261 {
262         asm volatile (
263         /* r0 = random number up to 0xff */
264         "call %[bpf_ktime_get_ns];"
265         "r0 &= 0xff;"
266         /* tie r0.id == r1.id == fp[-8].id */
267         "r1 = r0;"
268         "*(u64*)(r10 - 8) = r1;"
269         "call precision_stack__foo;"
270         "r0 = 0;"
271         "exit;"
272         :
273         : __imm(bpf_ktime_get_ns)
274         : __clobber_all);
275 }
276 
277 static __naked __noinline __used
278 void precision_stack__foo(void)
279 {
280         asm volatile (
281         /* conflate one of the register numbers (r6) with outer frame,
282          * to verify that those are tracked independently
283          */
284         "*(u64*)(r10 - 8) = r1;"
285         "*(u64*)(r10 - 16) = r1;"
286         /* force r1 to be precise, this immediately marks:
287          * - foo frame r1,fp{-8,-16}
288          * - main frame r1,fp{-8}
289          */
290         "r2 = r10;"
291         "r2 += r1;"
292         "exit"
293         ::: __clobber_all);
294 }
295 
296 /* Use two separate scalar IDs to check that these are propagated
297  * independently.
298  */
299 SEC("socket")
300 __success __log_level(2)
301 /* r{6,7} */
302 __msg("11: (0f) r3 += r7")
303 __msg("frame0: regs=r6,r7 stack= before 10: (bf) r3 = r10")
304 /* ... skip some insns ... */
305 __msg("frame0: regs=r6,r7 stack= before 3: (bf) r7 = r0")
306 __msg("frame0: regs=r0,r6 stack= before 2: (bf) r6 = r0")
307 /* r{8,9} */
308 __msg("12: (0f) r3 += r9")
309 __msg("frame0: regs=r8,r9 stack= before 11: (0f) r3 += r7")
310 /* ... skip some insns ... */
311 __msg("frame0: regs=r8,r9 stack= before 7: (bf) r9 = r0")
312 __msg("frame0: regs=r0,r8 stack= before 6: (bf) r8 = r0")
313 __flag(BPF_F_TEST_STATE_FREQ)
314 __naked void precision_two_ids(void)
315 {
316         asm volatile (
317         /* r6 = random number up to 0xff
318          * r6.id == r7.id
319          */
320         "call %[bpf_ktime_get_ns];"
321         "r0 &= 0xff;"
322         "r6 = r0;"
323         "r7 = r0;"
324         /* same, but for r{8,9} */
325         "call %[bpf_ktime_get_ns];"
326         "r0 &= 0xff;"
327         "r8 = r0;"
328         "r9 = r0;"
329         /* clear r0 id */
330         "r0 = 0;"
331         /* force checkpoint */
332         "goto +0;"
333         "r3 = r10;"
334         /* force r7 to be precise, this also marks r6 */
335         "r3 += r7;"
336         /* force r9 to be precise, this also marks r8 */
337         "r3 += r9;"
338         "exit;"
339         :
340         : __imm(bpf_ktime_get_ns)
341         : __clobber_all);
342 }
343 
344 /* Verify that check_ids() is used by regsafe() for scalars.
345  *
346  * r9 = ... some pointer with range X ...
347  * r6 = ... unbound scalar ID=a ...
348  * r7 = ... unbound scalar ID=b ...
349  * if (r6 > r7) goto +1
350  * r7 = r6
351  * if (r7 > X) goto exit
352  * r9 += r6
353  * ... access memory using r9 ...
354  *
355  * The memory access is safe only if r7 is bounded,
356  * which is true for one branch and not true for another.
357  */
358 SEC("socket")
359 __failure __msg("register with unbounded min value")
360 __flag(BPF_F_TEST_STATE_FREQ)
361 __naked void check_ids_in_regsafe(void)
362 {
363         asm volatile (
364         /* Bump allocated stack */
365         "r1 = 0;"
366         "*(u64*)(r10 - 8) = r1;"
367         /* r9 = pointer to stack */
368         "r9 = r10;"
369         "r9 += -8;"
370         /* r7 = ktime_get_ns() */
371         "call %[bpf_ktime_get_ns];"
372         "r7 = r0;"
373         /* r6 = ktime_get_ns() */
374         "call %[bpf_ktime_get_ns];"
375         "r6 = r0;"
376         /* if r6 > r7 is an unpredictable jump */
377         "if r6 > r7 goto l1_%=;"
378         "r7 = r6;"
379 "l1_%=:"
380         /* if r7 > 4 ...; transfers range to r6 on one execution path
381          * but does not transfer on another
382          */
383         "if r7 > 4 goto l2_%=;"
384         /* Access memory at r9[r6], r6 is not always bounded */
385         "r9 += r6;"
386         "r0 = *(u8*)(r9 + 0);"
387 "l2_%=:"
388         "r0 = 0;"
389         "exit;"
390         :
391         : __imm(bpf_ktime_get_ns)
392         : __clobber_all);
393 }
394 
395 /* Similar to check_ids_in_regsafe.
396  * The l0 could be reached in two states:
397  *
398  *   (1) r6{.id=A}, r7{.id=A}, r8{.id=B}
399  *   (2) r6{.id=B}, r7{.id=A}, r8{.id=B}
400  *
401  * Where (2) is not safe, as "r7 > 4" check won't propagate range for it.
402  * This example would be considered safe without changes to
403  * mark_chain_precision() to track scalar values with equal IDs.
404  */
405 SEC("socket")
406 __failure __msg("register with unbounded min value")
407 __flag(BPF_F_TEST_STATE_FREQ)
408 __naked void check_ids_in_regsafe_2(void)
409 {
410         asm volatile (
411         /* Bump allocated stack */
412         "r1 = 0;"
413         "*(u64*)(r10 - 8) = r1;"
414         /* r9 = pointer to stack */
415         "r9 = r10;"
416         "r9 += -8;"
417         /* r8 = ktime_get_ns() */
418         "call %[bpf_ktime_get_ns];"
419         "r8 = r0;"
420         /* r7 = ktime_get_ns() */
421         "call %[bpf_ktime_get_ns];"
422         "r7 = r0;"
423         /* r6 = ktime_get_ns() */
424         "call %[bpf_ktime_get_ns];"
425         "r6 = r0;"
426         /* scratch .id from r0 */
427         "r0 = 0;"
428         /* if r6 > r7 is an unpredictable jump */
429         "if r6 > r7 goto l1_%=;"
430         /* tie r6 and r7 .id */
431         "r6 = r7;"
432 "l0_%=:"
433         /* if r7 > 4 exit(0) */
434         "if r7 > 4 goto l2_%=;"
435         /* Access memory at r9[r6] */
436         "r9 += r6;"
437         "r0 = *(u8*)(r9 + 0);"
438 "l2_%=:"
439         "r0 = 0;"
440         "exit;"
441 "l1_%=:"
442         /* tie r6 and r8 .id */
443         "r6 = r8;"
444         "goto l0_%=;"
445         :
446         : __imm(bpf_ktime_get_ns)
447         : __clobber_all);
448 }
449 
450 /* Check that scalar IDs *are not* generated on register to register
451  * assignments if source register is a constant.
452  *
453  * If such IDs *are* generated the 'l1' below would be reached in
454  * two states:
455  *
456  *   (1) r1{.id=A}, r2{.id=A}
457  *   (2) r1{.id=C}, r2{.id=C}
458  *
459  * Thus forcing 'if r1 == r2' verification twice.
460  */
461 SEC("socket")
462 __success __log_level(2)
463 __msg("11: (1d) if r3 == r4 goto pc+0")
464 __msg("frame 0: propagating r3,r4")
465 __msg("11: safe")
466 __msg("processed 15 insns")
467 __flag(BPF_F_TEST_STATE_FREQ)
468 __naked void no_scalar_id_for_const(void)
469 {
470         asm volatile (
471         "call %[bpf_ktime_get_ns];"
472         /* unpredictable jump */
473         "if r0 > 7 goto l0_%=;"
474         /* possibly generate same scalar ids for r3 and r4 */
475         "r1 = 0;"
476         "r1 = r1;"
477         "r3 = r1;"
478         "r4 = r1;"
479         "goto l1_%=;"
480 "l0_%=:"
481         /* possibly generate different scalar ids for r3 and r4 */
482         "r1 = 0;"
483         "r2 = 0;"
484         "r3 = r1;"
485         "r4 = r2;"
486 "l1_%=:"
487         /* predictable jump, marks r3 and r4 precise */
488         "if r3 == r4 goto +0;"
489         "r0 = 0;"
490         "exit;"
491         :
492         : __imm(bpf_ktime_get_ns)
493         : __clobber_all);
494 }
495 
496 /* Same as no_scalar_id_for_const() but for 32-bit values */
497 SEC("socket")
498 __success __log_level(2)
499 __msg("11: (1e) if w3 == w4 goto pc+0")
500 __msg("frame 0: propagating r3,r4")
501 __msg("11: safe")
502 __msg("processed 15 insns")
503 __flag(BPF_F_TEST_STATE_FREQ)
504 __naked void no_scalar_id_for_const32(void)
505 {
506         asm volatile (
507         "call %[bpf_ktime_get_ns];"
508         /* unpredictable jump */
509         "if r0 > 7 goto l0_%=;"
510         /* possibly generate same scalar ids for r3 and r4 */
511         "w1 = 0;"
512         "w1 = w1;"
513         "w3 = w1;"
514         "w4 = w1;"
515         "goto l1_%=;"
516 "l0_%=:"
517         /* possibly generate different scalar ids for r3 and r4 */
518         "w1 = 0;"
519         "w2 = 0;"
520         "w3 = w1;"
521         "w4 = w2;"
522 "l1_%=:"
523         /* predictable jump, marks r1 and r2 precise */
524         "if w3 == w4 goto +0;"
525         "r0 = 0;"
526         "exit;"
527         :
528         : __imm(bpf_ktime_get_ns)
529         : __clobber_all);
530 }
531 
532 /* Check that unique scalar IDs are ignored when new verifier state is
533  * compared to cached verifier state. For this test:
534  * - cached state has no id on r1
535  * - new state has a unique id on r1
536  */
537 SEC("socket")
538 __success __log_level(2)
539 __msg("6: (25) if r6 > 0x7 goto pc+1")
540 __msg("7: (57) r1 &= 255")
541 __msg("8: (bf) r2 = r10")
542 __msg("from 6 to 8: safe")
543 __msg("processed 12 insns")
544 __flag(BPF_F_TEST_STATE_FREQ)
545 __naked void ignore_unique_scalar_ids_cur(void)
546 {
547         asm volatile (
548         "call %[bpf_ktime_get_ns];"
549         "r6 = r0;"
550         "call %[bpf_ktime_get_ns];"
551         "r0 &= 0xff;"
552         /* r1.id == r0.id */
553         "r1 = r0;"
554         /* make r1.id unique */
555         "r0 = 0;"
556         "if r6 > 7 goto l0_%=;"
557         /* clear r1 id, but keep the range compatible */
558         "r1 &= 0xff;"
559 "l0_%=:"
560         /* get here in two states:
561          * - first: r1 has no id (cached state)
562          * - second: r1 has a unique id (should be considered equivalent)
563          */
564         "r2 = r10;"
565         "r2 += r1;"
566         "exit;"
567         :
568         : __imm(bpf_ktime_get_ns)
569         : __clobber_all);
570 }
571 
572 /* Check that unique scalar IDs are ignored when new verifier state is
573  * compared to cached verifier state. For this test:
574  * - cached state has a unique id on r1
575  * - new state has no id on r1
576  */
577 SEC("socket")
578 __success __log_level(2)
579 __msg("6: (25) if r6 > 0x7 goto pc+1")
580 __msg("7: (05) goto pc+1")
581 __msg("9: (bf) r2 = r10")
582 __msg("9: safe")
583 __msg("processed 13 insns")
584 __flag(BPF_F_TEST_STATE_FREQ)
585 __naked void ignore_unique_scalar_ids_old(void)
586 {
587         asm volatile (
588         "call %[bpf_ktime_get_ns];"
589         "r6 = r0;"
590         "call %[bpf_ktime_get_ns];"
591         "r0 &= 0xff;"
592         /* r1.id == r0.id */
593         "r1 = r0;"
594         /* make r1.id unique */
595         "r0 = 0;"
596         "if r6 > 7 goto l1_%=;"
597         "goto l0_%=;"
598 "l1_%=:"
599         /* clear r1 id, but keep the range compatible */
600         "r1 &= 0xff;"
601 "l0_%=:"
602         /* get here in two states:
603          * - first: r1 has a unique id (cached state)
604          * - second: r1 has no id (should be considered equivalent)
605          */
606         "r2 = r10;"
607         "r2 += r1;"
608         "exit;"
609         :
610         : __imm(bpf_ktime_get_ns)
611         : __clobber_all);
612 }
613 
614 /* Check that two different scalar IDs in a verified state can't be
615  * mapped to the same scalar ID in current state.
616  */
617 SEC("socket")
618 __success __log_level(2)
619 /* The exit instruction should be reachable from two states,
620  * use two matches and "processed .. insns" to ensure this.
621  */
622 __msg("13: (95) exit")
623 __msg("13: (95) exit")
624 __msg("processed 18 insns")
625 __flag(BPF_F_TEST_STATE_FREQ)
626 __naked void two_old_ids_one_cur_id(void)
627 {
628         asm volatile (
629         /* Give unique scalar IDs to r{6,7} */
630         "call %[bpf_ktime_get_ns];"
631         "r0 &= 0xff;"
632         "r6 = r0;"
633         "call %[bpf_ktime_get_ns];"
634         "r0 &= 0xff;"
635         "r7 = r0;"
636         "r0 = 0;"
637         /* Maybe make r{6,7} IDs identical */
638         "if r6 > r7 goto l0_%=;"
639         "goto l1_%=;"
640 "l0_%=:"
641         "r6 = r7;"
642 "l1_%=:"
643         /* Mark r{6,7} precise.
644          * Get here in two states:
645          * - first:  r6{.id=A}, r7{.id=B} (cached state)
646          * - second: r6{.id=A}, r7{.id=A}
647          * Currently we don't want to consider such states equivalent.
648          * Thus "exit;" would be verified twice.
649          */
650         "r2 = r10;"
651         "r2 += r6;"
652         "r2 += r7;"
653         "exit;"
654         :
655         : __imm(bpf_ktime_get_ns)
656         : __clobber_all);
657 }
658 
659 char _license[] SEC("license") = "GPL";
660 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php