1 // SPDX-License-Identifier: GPL-2.0 2 /* Converted from tools/testing/selftests/bpf/verifier/meta_access.c */ 3 4 #include <linux/bpf.h> 5 #include <bpf/bpf_helpers.h> 6 #include "bpf_misc.h" 7 8 SEC("xdp") 9 __description("meta access, test1") 10 __success __retval(0) 11 __naked void meta_access_test1(void) 12 { 13 asm volatile (" \ 14 r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 15 r3 = *(u32*)(r1 + %[xdp_md_data]); \ 16 r0 = r2; \ 17 r0 += 8; \ 18 if r0 > r3 goto l0_%=; \ 19 r0 = *(u8*)(r2 + 0); \ 20 l0_%=: r0 = 0; \ 21 exit; \ 22 " : 23 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 24 __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 25 : __clobber_all); 26 } 27 28 SEC("xdp") 29 __description("meta access, test2") 30 __failure __msg("invalid access to packet, off=-8") 31 __naked void meta_access_test2(void) 32 { 33 asm volatile (" \ 34 r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 35 r3 = *(u32*)(r1 + %[xdp_md_data]); \ 36 r0 = r2; \ 37 r0 -= 8; \ 38 r4 = r2; \ 39 r4 += 8; \ 40 if r4 > r3 goto l0_%=; \ 41 r0 = *(u8*)(r0 + 0); \ 42 l0_%=: r0 = 0; \ 43 exit; \ 44 " : 45 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 46 __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 47 : __clobber_all); 48 } 49 50 SEC("xdp") 51 __description("meta access, test3") 52 __failure __msg("invalid access to packet") 53 __naked void meta_access_test3(void) 54 { 55 asm volatile (" \ 56 r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 57 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 58 r0 = r2; \ 59 r0 += 8; \ 60 if r0 > r3 goto l0_%=; \ 61 r0 = *(u8*)(r2 + 0); \ 62 l0_%=: r0 = 0; \ 63 exit; \ 64 " : 65 : __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), 66 __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 67 : __clobber_all); 68 } 69 70 SEC("xdp") 71 __description("meta access, test4") 72 __failure __msg("invalid access to packet") 73 __naked void meta_access_test4(void) 74 { 75 asm volatile (" \ 76 r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 77 r3 = *(u32*)(r1 + %[xdp_md_data_end]); \ 78 r4 = *(u32*)(r1 + %[xdp_md_data]); \ 79 r0 = r4; \ 80 r0 += 8; \ 81 if r0 > r3 goto l0_%=; \ 82 r0 = *(u8*)(r2 + 0); \ 83 l0_%=: r0 = 0; \ 84 exit; \ 85 " : 86 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 87 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), 88 __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 89 : __clobber_all); 90 } 91 92 SEC("xdp") 93 __description("meta access, test5") 94 __failure __msg("R3 !read_ok") 95 __naked void meta_access_test5(void) 96 { 97 asm volatile (" \ 98 r3 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 99 r4 = *(u32*)(r1 + %[xdp_md_data]); \ 100 r0 = r3; \ 101 r0 += 8; \ 102 if r0 > r4 goto l0_%=; \ 103 r2 = -8; \ 104 call %[bpf_xdp_adjust_meta]; \ 105 r0 = *(u8*)(r3 + 0); \ 106 l0_%=: r0 = 0; \ 107 exit; \ 108 " : 109 : __imm(bpf_xdp_adjust_meta), 110 __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 111 __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 112 : __clobber_all); 113 } 114 115 SEC("xdp") 116 __description("meta access, test6") 117 __failure __msg("invalid access to packet") 118 __naked void meta_access_test6(void) 119 { 120 asm volatile (" \ 121 r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 122 r3 = *(u32*)(r1 + %[xdp_md_data]); \ 123 r0 = r3; \ 124 r0 += 8; \ 125 r4 = r2; \ 126 r4 += 8; \ 127 if r4 > r0 goto l0_%=; \ 128 r0 = *(u8*)(r2 + 0); \ 129 l0_%=: r0 = 0; \ 130 exit; \ 131 " : 132 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 133 __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 134 : __clobber_all); 135 } 136 137 SEC("xdp") 138 __description("meta access, test7") 139 __success __retval(0) 140 __naked void meta_access_test7(void) 141 { 142 asm volatile (" \ 143 r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 144 r3 = *(u32*)(r1 + %[xdp_md_data]); \ 145 r0 = r3; \ 146 r0 += 8; \ 147 r4 = r2; \ 148 r4 += 8; \ 149 if r4 > r3 goto l0_%=; \ 150 r0 = *(u8*)(r2 + 0); \ 151 l0_%=: r0 = 0; \ 152 exit; \ 153 " : 154 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 155 __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 156 : __clobber_all); 157 } 158 159 SEC("xdp") 160 __description("meta access, test8") 161 __success __retval(0) 162 __naked void meta_access_test8(void) 163 { 164 asm volatile (" \ 165 r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 166 r3 = *(u32*)(r1 + %[xdp_md_data]); \ 167 r4 = r2; \ 168 r4 += 0xFFFF; \ 169 if r4 > r3 goto l0_%=; \ 170 r0 = *(u8*)(r2 + 0); \ 171 l0_%=: r0 = 0; \ 172 exit; \ 173 " : 174 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 175 __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 176 : __clobber_all); 177 } 178 179 SEC("xdp") 180 __description("meta access, test9") 181 __failure __msg("invalid access to packet") 182 __naked void meta_access_test9(void) 183 { 184 asm volatile (" \ 185 r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 186 r3 = *(u32*)(r1 + %[xdp_md_data]); \ 187 r4 = r2; \ 188 r4 += 0xFFFF; \ 189 r4 += 1; \ 190 if r4 > r3 goto l0_%=; \ 191 r0 = *(u8*)(r2 + 0); \ 192 l0_%=: r0 = 0; \ 193 exit; \ 194 " : 195 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 196 __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 197 : __clobber_all); 198 } 199 200 SEC("xdp") 201 __description("meta access, test10") 202 __failure __msg("invalid access to packet") 203 __naked void meta_access_test10(void) 204 { 205 asm volatile (" \ 206 r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 207 r3 = *(u32*)(r1 + %[xdp_md_data]); \ 208 r4 = *(u32*)(r1 + %[xdp_md_data_end]); \ 209 r5 = 42; \ 210 r6 = 24; \ 211 *(u64*)(r10 - 8) = r5; \ 212 lock *(u64 *)(r10 - 8) += r6; \ 213 r5 = *(u64*)(r10 - 8); \ 214 if r5 > 100 goto l0_%=; \ 215 r3 += r5; \ 216 r5 = r3; \ 217 r6 = r2; \ 218 r6 += 8; \ 219 if r6 > r5 goto l0_%=; \ 220 r2 = *(u8*)(r2 + 0); \ 221 l0_%=: r0 = 0; \ 222 exit; \ 223 " : 224 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 225 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), 226 __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 227 : __clobber_all); 228 } 229 230 SEC("xdp") 231 __description("meta access, test11") 232 __success __retval(0) 233 __naked void meta_access_test11(void) 234 { 235 asm volatile (" \ 236 r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 237 r3 = *(u32*)(r1 + %[xdp_md_data]); \ 238 r5 = 42; \ 239 r6 = 24; \ 240 *(u64*)(r10 - 8) = r5; \ 241 lock *(u64 *)(r10 - 8) += r6; \ 242 r5 = *(u64*)(r10 - 8); \ 243 if r5 > 100 goto l0_%=; \ 244 r2 += r5; \ 245 r5 = r2; \ 246 r6 = r2; \ 247 r6 += 8; \ 248 if r6 > r3 goto l0_%=; \ 249 r5 = *(u8*)(r5 + 0); \ 250 l0_%=: r0 = 0; \ 251 exit; \ 252 " : 253 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 254 __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 255 : __clobber_all); 256 } 257 258 SEC("xdp") 259 __description("meta access, test12") 260 __success __retval(0) 261 __naked void meta_access_test12(void) 262 { 263 asm volatile (" \ 264 r2 = *(u32*)(r1 + %[xdp_md_data_meta]); \ 265 r3 = *(u32*)(r1 + %[xdp_md_data]); \ 266 r4 = *(u32*)(r1 + %[xdp_md_data_end]); \ 267 r5 = r3; \ 268 r5 += 16; \ 269 if r5 > r4 goto l0_%=; \ 270 r0 = *(u8*)(r3 + 0); \ 271 r5 = r2; \ 272 r5 += 16; \ 273 if r5 > r3 goto l0_%=; \ 274 r0 = *(u8*)(r2 + 0); \ 275 l0_%=: r0 = 0; \ 276 exit; \ 277 " : 278 : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)), 279 __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)), 280 __imm_const(xdp_md_data_meta, offsetof(struct xdp_md, data_meta)) 281 : __clobber_all); 282 } 283 284 char _license[] SEC("license") = "GPL"; 285
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.