~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/bpf/progs/verifier_xadd.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /* Converted from tools/testing/selftests/bpf/verifier/xadd.c */
  3 
  4 #include <linux/bpf.h>
  5 #include <bpf/bpf_helpers.h>
  6 #include "bpf_misc.h"
  7 
  8 struct {
  9         __uint(type, BPF_MAP_TYPE_HASH);
 10         __uint(max_entries, 1);
 11         __type(key, long long);
 12         __type(value, long long);
 13 } map_hash_8b SEC(".maps");
 14 
 15 SEC("tc")
 16 __description("xadd/w check unaligned stack")
 17 __failure __msg("misaligned stack access off")
 18 __naked void xadd_w_check_unaligned_stack(void)
 19 {
 20         asm volatile ("                                 \
 21         r0 = 1;                                         \
 22         *(u64*)(r10 - 8) = r0;                          \
 23         lock *(u32 *)(r10 - 7) += w0;                   \
 24         r0 = *(u64*)(r10 - 8);                          \
 25         exit;                                           \
 26 "       ::: __clobber_all);
 27 }
 28 
 29 SEC("tc")
 30 __description("xadd/w check unaligned map")
 31 __failure __msg("misaligned value access off")
 32 __naked void xadd_w_check_unaligned_map(void)
 33 {
 34         asm volatile ("                                 \
 35         r1 = 0;                                         \
 36         *(u64*)(r10 - 8) = r1;                          \
 37         r2 = r10;                                       \
 38         r2 += -8;                                       \
 39         r1 = %[map_hash_8b] ll;                         \
 40         call %[bpf_map_lookup_elem];                    \
 41         if r0 != 0 goto l0_%=;                          \
 42         exit;                                           \
 43 l0_%=:  r1 = 1;                                         \
 44         lock *(u32 *)(r0 + 3) += w1;                    \
 45         r0 = *(u32*)(r0 + 3);                           \
 46         exit;                                           \
 47 "       :
 48         : __imm(bpf_map_lookup_elem),
 49           __imm_addr(map_hash_8b)
 50         : __clobber_all);
 51 }
 52 
 53 SEC("xdp")
 54 __description("xadd/w check unaligned pkt")
 55 __failure __msg("BPF_ATOMIC stores into R2 pkt is not allowed")
 56 __flag(BPF_F_ANY_ALIGNMENT)
 57 __naked void xadd_w_check_unaligned_pkt(void)
 58 {
 59         asm volatile ("                                 \
 60         r2 = *(u32*)(r1 + %[xdp_md_data]);              \
 61         r3 = *(u32*)(r1 + %[xdp_md_data_end]);          \
 62         r1 = r2;                                        \
 63         r1 += 8;                                        \
 64         if r1 < r3 goto l0_%=;                          \
 65         r0 = 99;                                        \
 66         goto l1_%=;                                     \
 67 l0_%=:  r0 = 1;                                         \
 68         r1 = 0;                                         \
 69         *(u32*)(r2 + 0) = r1;                           \
 70         r1 = 0;                                         \
 71         *(u32*)(r2 + 3) = r1;                           \
 72         lock *(u32 *)(r2 + 1) += w0;                    \
 73         lock *(u32 *)(r2 + 2) += w0;                    \
 74         r0 = *(u32*)(r2 + 1);                           \
 75 l1_%=:  exit;                                           \
 76 "       :
 77         : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
 78           __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end))
 79         : __clobber_all);
 80 }
 81 
 82 SEC("tc")
 83 __description("xadd/w check whether src/dst got mangled, 1")
 84 __success __retval(3)
 85 __naked void src_dst_got_mangled_1(void)
 86 {
 87         asm volatile ("                                 \
 88         r0 = 1;                                         \
 89         r6 = r0;                                        \
 90         r7 = r10;                                       \
 91         *(u64*)(r10 - 8) = r0;                          \
 92         lock *(u64 *)(r10 - 8) += r0;                   \
 93         lock *(u64 *)(r10 - 8) += r0;                   \
 94         if r6 != r0 goto l0_%=;                         \
 95         if r7 != r10 goto l0_%=;                        \
 96         r0 = *(u64*)(r10 - 8);                          \
 97         exit;                                           \
 98 l0_%=:  r0 = 42;                                        \
 99         exit;                                           \
100 "       ::: __clobber_all);
101 }
102 
103 SEC("tc")
104 __description("xadd/w check whether src/dst got mangled, 2")
105 __success __retval(3)
106 __naked void src_dst_got_mangled_2(void)
107 {
108         asm volatile ("                                 \
109         r0 = 1;                                         \
110         r6 = r0;                                        \
111         r7 = r10;                                       \
112         *(u32*)(r10 - 8) = r0;                          \
113         lock *(u32 *)(r10 - 8) += w0;                   \
114         lock *(u32 *)(r10 - 8) += w0;                   \
115         if r6 != r0 goto l0_%=;                         \
116         if r7 != r10 goto l0_%=;                        \
117         r0 = *(u32*)(r10 - 8);                          \
118         exit;                                           \
119 l0_%=:  r0 = 42;                                        \
120         exit;                                           \
121 "       ::: __clobber_all);
122 }
123 
124 char _license[] SEC("license") = "GPL";
125 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php