1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 2 3 #define _GNU_SOURCE 3 #define _GNU_SOURCE 4 #include <err.h> 4 #include <err.h> 5 #include <errno.h> 5 #include <errno.h> 6 #include <pthread.h> 6 #include <pthread.h> 7 #include <setjmp.h> 7 #include <setjmp.h> 8 #include <stdio.h> 8 #include <stdio.h> 9 #include <string.h> 9 #include <string.h> 10 #include <stdbool.h> 10 #include <stdbool.h> 11 #include <unistd.h> 11 #include <unistd.h> 12 #include <x86intrin.h> 12 #include <x86intrin.h> 13 13 14 #include <sys/auxv.h> 14 #include <sys/auxv.h> 15 #include <sys/mman.h> 15 #include <sys/mman.h> 16 #include <sys/shm.h> 16 #include <sys/shm.h> 17 #include <sys/ptrace.h> 17 #include <sys/ptrace.h> 18 #include <sys/syscall.h> 18 #include <sys/syscall.h> 19 #include <sys/wait.h> 19 #include <sys/wait.h> 20 #include <sys/uio.h> 20 #include <sys/uio.h> 21 21 22 #include "../kselftest.h" /* For __cpuid_count 22 #include "../kselftest.h" /* For __cpuid_count() */ 23 23 24 #ifndef __x86_64__ 24 #ifndef __x86_64__ 25 # error This test is 64-bit only 25 # error This test is 64-bit only 26 #endif 26 #endif 27 27 28 #define XSAVE_HDR_OFFSET 512 28 #define XSAVE_HDR_OFFSET 512 29 #define XSAVE_HDR_SIZE 64 29 #define XSAVE_HDR_SIZE 64 30 30 31 struct xsave_buffer { 31 struct xsave_buffer { 32 union { 32 union { 33 struct { 33 struct { 34 char legacy[XSAVE_HDR_ 34 char legacy[XSAVE_HDR_OFFSET]; 35 char header[XSAVE_HDR_ 35 char header[XSAVE_HDR_SIZE]; 36 char extended[0]; 36 char extended[0]; 37 }; 37 }; 38 char bytes[0]; 38 char bytes[0]; 39 }; 39 }; 40 }; 40 }; 41 41 42 static inline void xsave(struct xsave_buffer * 42 static inline void xsave(struct xsave_buffer *xbuf, uint64_t rfbm) 43 { 43 { 44 uint32_t rfbm_lo = rfbm; 44 uint32_t rfbm_lo = rfbm; 45 uint32_t rfbm_hi = rfbm >> 32; 45 uint32_t rfbm_hi = rfbm >> 32; 46 46 47 asm volatile("xsave (%%rdi)" 47 asm volatile("xsave (%%rdi)" 48 : : "D" (xbuf), "a" (rfbm 48 : : "D" (xbuf), "a" (rfbm_lo), "d" (rfbm_hi) 49 : "memory"); 49 : "memory"); 50 } 50 } 51 51 52 static inline void xrstor(struct xsave_buffer 52 static inline void xrstor(struct xsave_buffer *xbuf, uint64_t rfbm) 53 { 53 { 54 uint32_t rfbm_lo = rfbm; 54 uint32_t rfbm_lo = rfbm; 55 uint32_t rfbm_hi = rfbm >> 32; 55 uint32_t rfbm_hi = rfbm >> 32; 56 56 57 asm volatile("xrstor (%%rdi)" 57 asm volatile("xrstor (%%rdi)" 58 : : "D" (xbuf), "a" (rfbm 58 : : "D" (xbuf), "a" (rfbm_lo), "d" (rfbm_hi)); 59 } 59 } 60 60 61 /* err() exits and will not return */ 61 /* err() exits and will not return */ 62 #define fatal_error(msg, ...) err(1, "[FAIL] 62 #define fatal_error(msg, ...) err(1, "[FAIL]\t" msg, ##__VA_ARGS__) 63 63 64 static void sethandler(int sig, void (*handler 64 static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *), 65 int flags) 65 int flags) 66 { 66 { 67 struct sigaction sa; 67 struct sigaction sa; 68 68 69 memset(&sa, 0, sizeof(sa)); 69 memset(&sa, 0, sizeof(sa)); 70 sa.sa_sigaction = handler; 70 sa.sa_sigaction = handler; 71 sa.sa_flags = SA_SIGINFO | flags; 71 sa.sa_flags = SA_SIGINFO | flags; 72 sigemptyset(&sa.sa_mask); 72 sigemptyset(&sa.sa_mask); 73 if (sigaction(sig, &sa, 0)) 73 if (sigaction(sig, &sa, 0)) 74 fatal_error("sigaction"); 74 fatal_error("sigaction"); 75 } 75 } 76 76 77 static void clearhandler(int sig) 77 static void clearhandler(int sig) 78 { 78 { 79 struct sigaction sa; 79 struct sigaction sa; 80 80 81 memset(&sa, 0, sizeof(sa)); 81 memset(&sa, 0, sizeof(sa)); 82 sa.sa_handler = SIG_DFL; 82 sa.sa_handler = SIG_DFL; 83 sigemptyset(&sa.sa_mask); 83 sigemptyset(&sa.sa_mask); 84 if (sigaction(sig, &sa, 0)) 84 if (sigaction(sig, &sa, 0)) 85 fatal_error("sigaction"); 85 fatal_error("sigaction"); 86 } 86 } 87 87 88 #define XFEATURE_XTILECFG 17 88 #define XFEATURE_XTILECFG 17 89 #define XFEATURE_XTILEDATA 18 89 #define XFEATURE_XTILEDATA 18 90 #define XFEATURE_MASK_XTILECFG (1 << XFEATURE 90 #define XFEATURE_MASK_XTILECFG (1 << XFEATURE_XTILECFG) 91 #define XFEATURE_MASK_XTILEDATA (1 << XFEATURE 91 #define XFEATURE_MASK_XTILEDATA (1 << XFEATURE_XTILEDATA) 92 #define XFEATURE_MASK_XTILE (XFEATURE_MASK 92 #define XFEATURE_MASK_XTILE (XFEATURE_MASK_XTILECFG | XFEATURE_MASK_XTILEDATA) 93 93 94 #define CPUID_LEAF1_ECX_XSAVE_MASK (1 << 94 #define CPUID_LEAF1_ECX_XSAVE_MASK (1 << 26) 95 #define CPUID_LEAF1_ECX_OSXSAVE_MASK (1 << 95 #define CPUID_LEAF1_ECX_OSXSAVE_MASK (1 << 27) 96 96 97 static uint32_t xbuf_size; 97 static uint32_t xbuf_size; 98 98 99 static struct { 99 static struct { 100 uint32_t xbuf_offset; 100 uint32_t xbuf_offset; 101 uint32_t size; 101 uint32_t size; 102 } xtiledata; 102 } xtiledata; 103 103 104 #define CPUID_LEAF_XSTATE 0xd 104 #define CPUID_LEAF_XSTATE 0xd 105 #define CPUID_SUBLEAF_XSTATE_USER 0x0 105 #define CPUID_SUBLEAF_XSTATE_USER 0x0 106 #define TILE_CPUID 0x1d 106 #define TILE_CPUID 0x1d 107 #define TILE_PALETTE_ID 0x1 107 #define TILE_PALETTE_ID 0x1 108 108 109 static void check_cpuid_xtiledata(void) 109 static void check_cpuid_xtiledata(void) 110 { 110 { 111 uint32_t eax, ebx, ecx, edx; 111 uint32_t eax, ebx, ecx, edx; 112 112 113 __cpuid_count(CPUID_LEAF_XSTATE, CPUID 113 __cpuid_count(CPUID_LEAF_XSTATE, CPUID_SUBLEAF_XSTATE_USER, 114 eax, ebx, ecx, edx); 114 eax, ebx, ecx, edx); 115 115 116 /* 116 /* 117 * EBX enumerates the size (in bytes) 117 * EBX enumerates the size (in bytes) required by the XSAVE 118 * instruction for an XSAVE area conta 118 * instruction for an XSAVE area containing all the user state 119 * components corresponding to bits cu 119 * components corresponding to bits currently set in XCR0. 120 * 120 * 121 * Stash that off so it can be used to 121 * Stash that off so it can be used to allocate buffers later. 122 */ 122 */ 123 xbuf_size = ebx; 123 xbuf_size = ebx; 124 124 125 __cpuid_count(CPUID_LEAF_XSTATE, XFEAT 125 __cpuid_count(CPUID_LEAF_XSTATE, XFEATURE_XTILEDATA, 126 eax, ebx, ecx, edx); 126 eax, ebx, ecx, edx); 127 /* 127 /* 128 * eax: XTILEDATA state component size 128 * eax: XTILEDATA state component size 129 * ebx: XTILEDATA state component offs 129 * ebx: XTILEDATA state component offset in user buffer 130 */ 130 */ 131 if (!eax || !ebx) 131 if (!eax || !ebx) 132 fatal_error("xstate cpuid: inv 132 fatal_error("xstate cpuid: invalid tile data size/offset: %d/%d", 133 eax, ebx); 133 eax, ebx); 134 134 135 xtiledata.size = eax; 135 xtiledata.size = eax; 136 xtiledata.xbuf_offset = ebx; 136 xtiledata.xbuf_offset = ebx; 137 } 137 } 138 138 139 /* The helpers for managing XSAVE buffer and t 139 /* The helpers for managing XSAVE buffer and tile states: */ 140 140 141 struct xsave_buffer *alloc_xbuf(void) 141 struct xsave_buffer *alloc_xbuf(void) 142 { 142 { 143 struct xsave_buffer *xbuf; 143 struct xsave_buffer *xbuf; 144 144 145 /* XSAVE buffer should be 64B-aligned. 145 /* XSAVE buffer should be 64B-aligned. */ 146 xbuf = aligned_alloc(64, xbuf_size); 146 xbuf = aligned_alloc(64, xbuf_size); 147 if (!xbuf) 147 if (!xbuf) 148 fatal_error("aligned_alloc()") 148 fatal_error("aligned_alloc()"); 149 return xbuf; 149 return xbuf; 150 } 150 } 151 151 152 static inline void clear_xstate_header(struct 152 static inline void clear_xstate_header(struct xsave_buffer *buffer) 153 { 153 { 154 memset(&buffer->header, 0, sizeof(buff 154 memset(&buffer->header, 0, sizeof(buffer->header)); 155 } 155 } 156 156 157 static inline void set_xstatebv(struct xsave_b 157 static inline void set_xstatebv(struct xsave_buffer *buffer, uint64_t bv) 158 { 158 { 159 /* XSTATE_BV is at the beginning of th 159 /* XSTATE_BV is at the beginning of the header: */ 160 *(uint64_t *)(&buffer->header) = bv; 160 *(uint64_t *)(&buffer->header) = bv; 161 } 161 } 162 162 163 static void set_rand_tiledata(struct xsave_buf 163 static void set_rand_tiledata(struct xsave_buffer *xbuf) 164 { 164 { 165 int *ptr = (int *)&xbuf->bytes[xtileda 165 int *ptr = (int *)&xbuf->bytes[xtiledata.xbuf_offset]; 166 int data; 166 int data; 167 int i; 167 int i; 168 168 169 /* 169 /* 170 * Ensure that 'data' is never 0. Thi 170 * Ensure that 'data' is never 0. This ensures that 171 * the registers are never in their in 171 * the registers are never in their initial configuration 172 * and thus never tracked as being in 172 * and thus never tracked as being in the init state. 173 */ 173 */ 174 data = rand() | 1; 174 data = rand() | 1; 175 175 176 for (i = 0; i < xtiledata.size / sizeo 176 for (i = 0; i < xtiledata.size / sizeof(int); i++, ptr++) 177 *ptr = data; 177 *ptr = data; 178 } 178 } 179 179 180 struct xsave_buffer *stashed_xsave; 180 struct xsave_buffer *stashed_xsave; 181 181 182 static void init_stashed_xsave(void) 182 static void init_stashed_xsave(void) 183 { 183 { 184 stashed_xsave = alloc_xbuf(); 184 stashed_xsave = alloc_xbuf(); 185 if (!stashed_xsave) 185 if (!stashed_xsave) 186 fatal_error("failed to allocat 186 fatal_error("failed to allocate stashed_xsave\n"); 187 clear_xstate_header(stashed_xsave); 187 clear_xstate_header(stashed_xsave); 188 } 188 } 189 189 190 static void free_stashed_xsave(void) 190 static void free_stashed_xsave(void) 191 { 191 { 192 free(stashed_xsave); 192 free(stashed_xsave); 193 } 193 } 194 194 195 /* See 'struct _fpx_sw_bytes' at sigcontext.h 195 /* See 'struct _fpx_sw_bytes' at sigcontext.h */ 196 #define SW_BYTES_OFFSET 464 196 #define SW_BYTES_OFFSET 464 197 /* N.B. The struct's field name varies so read 197 /* N.B. The struct's field name varies so read from the offset. */ 198 #define SW_BYTES_BV_OFFSET (SW_BYTES_OFFS 198 #define SW_BYTES_BV_OFFSET (SW_BYTES_OFFSET + 8) 199 199 200 static inline struct _fpx_sw_bytes *get_fpx_sw 200 static inline struct _fpx_sw_bytes *get_fpx_sw_bytes(void *buffer) 201 { 201 { 202 return (struct _fpx_sw_bytes *)(buffer 202 return (struct _fpx_sw_bytes *)(buffer + SW_BYTES_OFFSET); 203 } 203 } 204 204 205 static inline uint64_t get_fpx_sw_bytes_featur 205 static inline uint64_t get_fpx_sw_bytes_features(void *buffer) 206 { 206 { 207 return *(uint64_t *)(buffer + SW_BYTES 207 return *(uint64_t *)(buffer + SW_BYTES_BV_OFFSET); 208 } 208 } 209 209 210 /* Work around printf() being unsafe in signal 210 /* Work around printf() being unsafe in signals: */ 211 #define SIGNAL_BUF_LEN 1000 211 #define SIGNAL_BUF_LEN 1000 212 char signal_message_buffer[SIGNAL_BUF_LEN]; 212 char signal_message_buffer[SIGNAL_BUF_LEN]; 213 void sig_print(char *msg) 213 void sig_print(char *msg) 214 { 214 { 215 int left = SIGNAL_BUF_LEN - strlen(sig 215 int left = SIGNAL_BUF_LEN - strlen(signal_message_buffer) - 1; 216 216 217 strncat(signal_message_buffer, msg, le 217 strncat(signal_message_buffer, msg, left); 218 } 218 } 219 219 220 static volatile bool noperm_signaled; 220 static volatile bool noperm_signaled; 221 static int noperm_errs; 221 static int noperm_errs; 222 /* 222 /* 223 * Signal handler for when AMX is used but 223 * Signal handler for when AMX is used but 224 * permission has not been obtained. 224 * permission has not been obtained. 225 */ 225 */ 226 static void handle_noperm(int sig, siginfo_t * 226 static void handle_noperm(int sig, siginfo_t *si, void *ctx_void) 227 { 227 { 228 ucontext_t *ctx = (ucontext_t *)ctx_vo 228 ucontext_t *ctx = (ucontext_t *)ctx_void; 229 void *xbuf = ctx->uc_mcontext.fpregs; 229 void *xbuf = ctx->uc_mcontext.fpregs; 230 struct _fpx_sw_bytes *sw_bytes; 230 struct _fpx_sw_bytes *sw_bytes; 231 uint64_t features; 231 uint64_t features; 232 232 233 /* Reset the signal message buffer: */ 233 /* Reset the signal message buffer: */ 234 signal_message_buffer[0] = '\0'; 234 signal_message_buffer[0] = '\0'; 235 sig_print("\tAt SIGILL handler,\n"); 235 sig_print("\tAt SIGILL handler,\n"); 236 236 237 if (si->si_code != ILL_ILLOPC) { 237 if (si->si_code != ILL_ILLOPC) { 238 noperm_errs++; 238 noperm_errs++; 239 sig_print("[FAIL]\tInvalid sig 239 sig_print("[FAIL]\tInvalid signal code.\n"); 240 } else { 240 } else { 241 sig_print("[OK]\tValid signal 241 sig_print("[OK]\tValid signal code (ILL_ILLOPC).\n"); 242 } 242 } 243 243 244 sw_bytes = get_fpx_sw_bytes(xbuf); 244 sw_bytes = get_fpx_sw_bytes(xbuf); 245 /* 245 /* 246 * Without permission, the signal XSAV 246 * Without permission, the signal XSAVE buffer should not 247 * have room for AMX register state (a 247 * have room for AMX register state (aka. xtiledata). 248 * Check that the size does not overla 248 * Check that the size does not overlap with where xtiledata 249 * will reside. 249 * will reside. 250 * 250 * 251 * This also implies that no state com 251 * This also implies that no state components *PAST* 252 * XTILEDATA (features >=19) can be pr 252 * XTILEDATA (features >=19) can be present in the buffer. 253 */ 253 */ 254 if (sw_bytes->xstate_size <= xtiledata 254 if (sw_bytes->xstate_size <= xtiledata.xbuf_offset) { 255 sig_print("[OK]\tValid xstate 255 sig_print("[OK]\tValid xstate size\n"); 256 } else { 256 } else { 257 noperm_errs++; 257 noperm_errs++; 258 sig_print("[FAIL]\tInvalid xst 258 sig_print("[FAIL]\tInvalid xstate size\n"); 259 } 259 } 260 260 261 features = get_fpx_sw_bytes_features(x 261 features = get_fpx_sw_bytes_features(xbuf); 262 /* 262 /* 263 * Without permission, the XTILEDATA f 263 * Without permission, the XTILEDATA feature 264 * bit should not be set. 264 * bit should not be set. 265 */ 265 */ 266 if ((features & XFEATURE_MASK_XTILEDAT 266 if ((features & XFEATURE_MASK_XTILEDATA) == 0) { 267 sig_print("[OK]\tValid xstate 267 sig_print("[OK]\tValid xstate mask\n"); 268 } else { 268 } else { 269 noperm_errs++; 269 noperm_errs++; 270 sig_print("[FAIL]\tInvalid xst 270 sig_print("[FAIL]\tInvalid xstate mask\n"); 271 } 271 } 272 272 273 noperm_signaled = true; 273 noperm_signaled = true; 274 ctx->uc_mcontext.gregs[REG_RIP] += 3; 274 ctx->uc_mcontext.gregs[REG_RIP] += 3; /* Skip the faulting XRSTOR */ 275 } 275 } 276 276 277 /* Return true if XRSTOR is successful; otherw 277 /* Return true if XRSTOR is successful; otherwise, false. */ 278 static inline bool xrstor_safe(struct xsave_bu 278 static inline bool xrstor_safe(struct xsave_buffer *xbuf, uint64_t mask) 279 { 279 { 280 noperm_signaled = false; 280 noperm_signaled = false; 281 xrstor(xbuf, mask); 281 xrstor(xbuf, mask); 282 282 283 /* Print any messages produced by the 283 /* Print any messages produced by the signal code: */ 284 printf("%s", signal_message_buffer); 284 printf("%s", signal_message_buffer); 285 /* 285 /* 286 * Reset the buffer to make sure any f 286 * Reset the buffer to make sure any future printing 287 * only outputs new messages: 287 * only outputs new messages: 288 */ 288 */ 289 signal_message_buffer[0] = '\0'; 289 signal_message_buffer[0] = '\0'; 290 290 291 if (noperm_errs) 291 if (noperm_errs) 292 fatal_error("saw %d errors in 292 fatal_error("saw %d errors in noperm signal handler\n", noperm_errs); 293 293 294 return !noperm_signaled; 294 return !noperm_signaled; 295 } 295 } 296 296 297 /* 297 /* 298 * Use XRSTOR to populate the XTILEDATA regist 298 * Use XRSTOR to populate the XTILEDATA registers with 299 * random data. 299 * random data. 300 * 300 * 301 * Return true if successful; otherwise, false 301 * Return true if successful; otherwise, false. 302 */ 302 */ 303 static inline bool load_rand_tiledata(struct x 303 static inline bool load_rand_tiledata(struct xsave_buffer *xbuf) 304 { 304 { 305 clear_xstate_header(xbuf); 305 clear_xstate_header(xbuf); 306 set_xstatebv(xbuf, XFEATURE_MASK_XTILE 306 set_xstatebv(xbuf, XFEATURE_MASK_XTILEDATA); 307 set_rand_tiledata(xbuf); 307 set_rand_tiledata(xbuf); 308 return xrstor_safe(xbuf, XFEATURE_MASK 308 return xrstor_safe(xbuf, XFEATURE_MASK_XTILEDATA); 309 } 309 } 310 310 311 /* Return XTILEDATA to its initial configurati 311 /* Return XTILEDATA to its initial configuration. */ 312 static inline void init_xtiledata(void) 312 static inline void init_xtiledata(void) 313 { 313 { 314 clear_xstate_header(stashed_xsave); 314 clear_xstate_header(stashed_xsave); 315 xrstor_safe(stashed_xsave, XFEATURE_MA 315 xrstor_safe(stashed_xsave, XFEATURE_MASK_XTILEDATA); 316 } 316 } 317 317 318 enum expected_result { FAIL_EXPECTED, SUCCESS_ 318 enum expected_result { FAIL_EXPECTED, SUCCESS_EXPECTED }; 319 319 320 /* arch_prctl() and sigaltstack() test */ 320 /* arch_prctl() and sigaltstack() test */ 321 321 322 #define ARCH_GET_XCOMP_SUPP 0x1021 322 #define ARCH_GET_XCOMP_SUPP 0x1021 323 #define ARCH_GET_XCOMP_PERM 0x1022 323 #define ARCH_GET_XCOMP_PERM 0x1022 324 #define ARCH_REQ_XCOMP_PERM 0x1023 324 #define ARCH_REQ_XCOMP_PERM 0x1023 325 325 326 static void req_xtiledata_perm(void) 326 static void req_xtiledata_perm(void) 327 { 327 { 328 syscall(SYS_arch_prctl, ARCH_REQ_XCOMP 328 syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA); 329 } 329 } 330 330 331 static void validate_req_xcomp_perm(enum expec 331 static void validate_req_xcomp_perm(enum expected_result exp) 332 { 332 { 333 unsigned long bitmask, expected_bitmas 333 unsigned long bitmask, expected_bitmask; 334 long rc; 334 long rc; 335 335 336 rc = syscall(SYS_arch_prctl, ARCH_GET_ 336 rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_PERM, &bitmask); 337 if (rc) { 337 if (rc) { 338 fatal_error("prctl(ARCH_GET_XC 338 fatal_error("prctl(ARCH_GET_XCOMP_PERM) error: %ld", rc); 339 } else if (!(bitmask & XFEATURE_MASK_X 339 } else if (!(bitmask & XFEATURE_MASK_XTILECFG)) { 340 fatal_error("ARCH_GET_XCOMP_PE 340 fatal_error("ARCH_GET_XCOMP_PERM returns XFEATURE_XTILECFG off."); 341 } 341 } 342 342 343 rc = syscall(SYS_arch_prctl, ARCH_REQ_ 343 rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_PERM, XFEATURE_XTILEDATA); 344 if (exp == FAIL_EXPECTED) { 344 if (exp == FAIL_EXPECTED) { 345 if (rc) { 345 if (rc) { 346 printf("[OK]\tARCH_REQ 346 printf("[OK]\tARCH_REQ_XCOMP_PERM saw expected failure..\n"); 347 return; 347 return; 348 } 348 } 349 349 350 fatal_error("ARCH_REQ_XCOMP_PE 350 fatal_error("ARCH_REQ_XCOMP_PERM saw unexpected success.\n"); 351 } else if (rc) { 351 } else if (rc) { 352 fatal_error("ARCH_REQ_XCOMP_PE 352 fatal_error("ARCH_REQ_XCOMP_PERM saw unexpected failure.\n"); 353 } 353 } 354 354 355 expected_bitmask = bitmask | XFEATURE_ 355 expected_bitmask = bitmask | XFEATURE_MASK_XTILEDATA; 356 356 357 rc = syscall(SYS_arch_prctl, ARCH_GET_ 357 rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_PERM, &bitmask); 358 if (rc) { 358 if (rc) { 359 fatal_error("prctl(ARCH_GET_XC 359 fatal_error("prctl(ARCH_GET_XCOMP_PERM) error: %ld", rc); 360 } else if (bitmask != expected_bitmask 360 } else if (bitmask != expected_bitmask) { 361 fatal_error("ARCH_REQ_XCOMP_PE 361 fatal_error("ARCH_REQ_XCOMP_PERM set a wrong bitmask: %lx, expected: %lx.\n", 362 bitmask, expected_ 362 bitmask, expected_bitmask); 363 } else { 363 } else { 364 printf("\tARCH_REQ_XCOMP_PERM 364 printf("\tARCH_REQ_XCOMP_PERM is successful.\n"); 365 } 365 } 366 } 366 } 367 367 368 static void validate_xcomp_perm(enum expected_ 368 static void validate_xcomp_perm(enum expected_result exp) 369 { 369 { 370 bool load_success = load_rand_tiledata 370 bool load_success = load_rand_tiledata(stashed_xsave); 371 371 372 if (exp == FAIL_EXPECTED) { 372 if (exp == FAIL_EXPECTED) { 373 if (load_success) { 373 if (load_success) { 374 noperm_errs++; 374 noperm_errs++; 375 printf("[FAIL]\tLoad t 375 printf("[FAIL]\tLoad tiledata succeeded.\n"); 376 } else { 376 } else { 377 printf("[OK]\tLoad til 377 printf("[OK]\tLoad tiledata failed.\n"); 378 } 378 } 379 } else if (exp == SUCCESS_EXPECTED) { 379 } else if (exp == SUCCESS_EXPECTED) { 380 if (load_success) { 380 if (load_success) { 381 printf("[OK]\tLoad til 381 printf("[OK]\tLoad tiledata succeeded.\n"); 382 } else { 382 } else { 383 noperm_errs++; 383 noperm_errs++; 384 printf("[FAIL]\tLoad t 384 printf("[FAIL]\tLoad tiledata failed.\n"); 385 } 385 } 386 } 386 } 387 } 387 } 388 388 389 #ifndef AT_MINSIGSTKSZ 389 #ifndef AT_MINSIGSTKSZ 390 # define AT_MINSIGSTKSZ 51 390 # define AT_MINSIGSTKSZ 51 391 #endif 391 #endif 392 392 393 static void *alloc_altstack(unsigned int size) 393 static void *alloc_altstack(unsigned int size) 394 { 394 { 395 void *altstack; 395 void *altstack; 396 396 397 altstack = mmap(NULL, size, PROT_READ 397 altstack = mmap(NULL, size, PROT_READ | PROT_WRITE, 398 MAP_PRIVATE | MAP_ANON 398 MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); 399 399 400 if (altstack == MAP_FAILED) 400 if (altstack == MAP_FAILED) 401 fatal_error("mmap() for altsta 401 fatal_error("mmap() for altstack"); 402 402 403 return altstack; 403 return altstack; 404 } 404 } 405 405 406 static void setup_altstack(void *addr, unsigne 406 static void setup_altstack(void *addr, unsigned long size, enum expected_result exp) 407 { 407 { 408 stack_t ss; 408 stack_t ss; 409 int rc; 409 int rc; 410 410 411 memset(&ss, 0, sizeof(ss)); 411 memset(&ss, 0, sizeof(ss)); 412 ss.ss_size = size; 412 ss.ss_size = size; 413 ss.ss_sp = addr; 413 ss.ss_sp = addr; 414 414 415 rc = sigaltstack(&ss, NULL); 415 rc = sigaltstack(&ss, NULL); 416 416 417 if (exp == FAIL_EXPECTED) { 417 if (exp == FAIL_EXPECTED) { 418 if (rc) { 418 if (rc) { 419 printf("[OK]\tsigaltst 419 printf("[OK]\tsigaltstack() failed.\n"); 420 } else { 420 } else { 421 fatal_error("sigaltsta 421 fatal_error("sigaltstack() succeeded unexpectedly.\n"); 422 } 422 } 423 } else if (rc) { 423 } else if (rc) { 424 fatal_error("sigaltstack()"); 424 fatal_error("sigaltstack()"); 425 } 425 } 426 } 426 } 427 427 428 static void test_dynamic_sigaltstack(void) 428 static void test_dynamic_sigaltstack(void) 429 { 429 { 430 unsigned int small_size, enough_size; 430 unsigned int small_size, enough_size; 431 unsigned long minsigstksz; 431 unsigned long minsigstksz; 432 void *altstack; 432 void *altstack; 433 433 434 minsigstksz = getauxval(AT_MINSIGSTKSZ 434 minsigstksz = getauxval(AT_MINSIGSTKSZ); 435 printf("\tAT_MINSIGSTKSZ = %lu\n", min 435 printf("\tAT_MINSIGSTKSZ = %lu\n", minsigstksz); 436 /* 436 /* 437 * getauxval() itself can return 0 for 437 * getauxval() itself can return 0 for failure or 438 * success. But, in this case, AT_MIN 438 * success. But, in this case, AT_MINSIGSTKSZ 439 * will always return a >=0 value if i 439 * will always return a >=0 value if implemented. 440 * Just check for 0. 440 * Just check for 0. 441 */ 441 */ 442 if (minsigstksz == 0) { 442 if (minsigstksz == 0) { 443 printf("no support for AT_MINS 443 printf("no support for AT_MINSIGSTKSZ, skipping sigaltstack tests\n"); 444 return; 444 return; 445 } 445 } 446 446 447 enough_size = minsigstksz * 2; 447 enough_size = minsigstksz * 2; 448 448 449 altstack = alloc_altstack(enough_size) 449 altstack = alloc_altstack(enough_size); 450 printf("\tAllocate memory for altstack 450 printf("\tAllocate memory for altstack (%u bytes).\n", enough_size); 451 451 452 /* 452 /* 453 * Try setup_altstack() with a size wh 453 * Try setup_altstack() with a size which can not fit 454 * XTILEDATA. ARCH_REQ_XCOMP_PERM sho 454 * XTILEDATA. ARCH_REQ_XCOMP_PERM should fail. 455 */ 455 */ 456 small_size = minsigstksz - xtiledata.s 456 small_size = minsigstksz - xtiledata.size; 457 printf("\tAfter sigaltstack() with sma 457 printf("\tAfter sigaltstack() with small size (%u bytes).\n", small_size); 458 setup_altstack(altstack, small_size, S 458 setup_altstack(altstack, small_size, SUCCESS_EXPECTED); 459 validate_req_xcomp_perm(FAIL_EXPECTED) 459 validate_req_xcomp_perm(FAIL_EXPECTED); 460 460 461 /* 461 /* 462 * Try setup_altstack() with a size de 462 * Try setup_altstack() with a size derived from 463 * AT_MINSIGSTKSZ. It should be more 463 * AT_MINSIGSTKSZ. It should be more than large enough 464 * and thus ARCH_REQ_XCOMP_PERM should 464 * and thus ARCH_REQ_XCOMP_PERM should succeed. 465 */ 465 */ 466 printf("\tAfter sigaltstack() with eno 466 printf("\tAfter sigaltstack() with enough size (%u bytes).\n", enough_size); 467 setup_altstack(altstack, enough_size, 467 setup_altstack(altstack, enough_size, SUCCESS_EXPECTED); 468 validate_req_xcomp_perm(SUCCESS_EXPECT 468 validate_req_xcomp_perm(SUCCESS_EXPECTED); 469 469 470 /* 470 /* 471 * Try to coerce setup_altstack() to a 471 * Try to coerce setup_altstack() to again accept a 472 * too-small altstack. This ensures t 472 * too-small altstack. This ensures that big-enough 473 * sigaltstacks can not shrink to a to 473 * sigaltstacks can not shrink to a too-small value 474 * once XTILEDATA permission is establ 474 * once XTILEDATA permission is established. 475 */ 475 */ 476 printf("\tThen, sigaltstack() with sma 476 printf("\tThen, sigaltstack() with small size (%u bytes).\n", small_size); 477 setup_altstack(altstack, small_size, F 477 setup_altstack(altstack, small_size, FAIL_EXPECTED); 478 } 478 } 479 479 480 static void test_dynamic_state(void) 480 static void test_dynamic_state(void) 481 { 481 { 482 pid_t parent, child, grandchild; 482 pid_t parent, child, grandchild; 483 483 484 parent = fork(); 484 parent = fork(); 485 if (parent < 0) { 485 if (parent < 0) { 486 /* fork() failed */ 486 /* fork() failed */ 487 fatal_error("fork"); 487 fatal_error("fork"); 488 } else if (parent > 0) { 488 } else if (parent > 0) { 489 int status; 489 int status; 490 /* fork() succeeded. Now in t 490 /* fork() succeeded. Now in the parent. */ 491 491 492 wait(&status); 492 wait(&status); 493 if (!WIFEXITED(status) || WEXI 493 if (!WIFEXITED(status) || WEXITSTATUS(status)) 494 fatal_error("arch_prct 494 fatal_error("arch_prctl test parent exit"); 495 return; 495 return; 496 } 496 } 497 /* fork() succeeded. Now in the child 497 /* fork() succeeded. Now in the child . */ 498 498 499 printf("[RUN]\tCheck ARCH_REQ_XCOMP_PE 499 printf("[RUN]\tCheck ARCH_REQ_XCOMP_PERM around process fork() and sigaltack() test.\n"); 500 500 501 printf("\tFork a child.\n"); 501 printf("\tFork a child.\n"); 502 child = fork(); 502 child = fork(); 503 if (child < 0) { 503 if (child < 0) { 504 fatal_error("fork"); 504 fatal_error("fork"); 505 } else if (child > 0) { 505 } else if (child > 0) { 506 int status; 506 int status; 507 507 508 wait(&status); 508 wait(&status); 509 if (!WIFEXITED(status) || WEXI 509 if (!WIFEXITED(status) || WEXITSTATUS(status)) 510 fatal_error("arch_prct 510 fatal_error("arch_prctl test child exit"); 511 _exit(0); 511 _exit(0); 512 } 512 } 513 513 514 /* 514 /* 515 * The permission request should fail 515 * The permission request should fail without an 516 * XTILEDATA-compatible signal stack 516 * XTILEDATA-compatible signal stack 517 */ 517 */ 518 printf("\tTest XCOMP_PERM at child.\n" 518 printf("\tTest XCOMP_PERM at child.\n"); 519 validate_xcomp_perm(FAIL_EXPECTED); 519 validate_xcomp_perm(FAIL_EXPECTED); 520 520 521 /* 521 /* 522 * Set up an XTILEDATA-compatible sign 522 * Set up an XTILEDATA-compatible signal stack and 523 * also obtain permission to populate 523 * also obtain permission to populate XTILEDATA. 524 */ 524 */ 525 printf("\tTest dynamic sigaltstack at 525 printf("\tTest dynamic sigaltstack at child:\n"); 526 test_dynamic_sigaltstack(); 526 test_dynamic_sigaltstack(); 527 527 528 /* Ensure that XTILEDATA can be popula 528 /* Ensure that XTILEDATA can be populated. */ 529 printf("\tTest XCOMP_PERM again at chi 529 printf("\tTest XCOMP_PERM again at child.\n"); 530 validate_xcomp_perm(SUCCESS_EXPECTED); 530 validate_xcomp_perm(SUCCESS_EXPECTED); 531 531 532 printf("\tFork a grandchild.\n"); 532 printf("\tFork a grandchild.\n"); 533 grandchild = fork(); 533 grandchild = fork(); 534 if (grandchild < 0) { 534 if (grandchild < 0) { 535 /* fork() failed */ 535 /* fork() failed */ 536 fatal_error("fork"); 536 fatal_error("fork"); 537 } else if (!grandchild) { 537 } else if (!grandchild) { 538 /* fork() succeeded. Now in t 538 /* fork() succeeded. Now in the (grand)child. */ 539 printf("\tTest XCOMP_PERM at g 539 printf("\tTest XCOMP_PERM at grandchild.\n"); 540 540 541 /* 541 /* 542 * Ensure that the grandchild 542 * Ensure that the grandchild inherited 543 * permission and a compatible 543 * permission and a compatible sigaltstack: 544 */ 544 */ 545 validate_xcomp_perm(SUCCESS_EX 545 validate_xcomp_perm(SUCCESS_EXPECTED); 546 } else { 546 } else { 547 int status; 547 int status; 548 /* fork() succeeded. Now in t 548 /* fork() succeeded. Now in the parent. */ 549 549 550 wait(&status); 550 wait(&status); 551 if (!WIFEXITED(status) || WEXI 551 if (!WIFEXITED(status) || WEXITSTATUS(status)) 552 fatal_error("fork test 552 fatal_error("fork test grandchild"); 553 } 553 } 554 554 555 _exit(0); 555 _exit(0); 556 } 556 } 557 557 558 static inline int __compare_tiledata_state(str 558 static inline int __compare_tiledata_state(struct xsave_buffer *xbuf1, struct xsave_buffer *xbuf2) 559 { 559 { 560 return memcmp(&xbuf1->bytes[xtiledata. 560 return memcmp(&xbuf1->bytes[xtiledata.xbuf_offset], 561 &xbuf2->bytes[xtiledata. 561 &xbuf2->bytes[xtiledata.xbuf_offset], 562 xtiledata.size); 562 xtiledata.size); 563 } 563 } 564 564 565 /* 565 /* 566 * Save current register state and compare it 566 * Save current register state and compare it to @xbuf1.' 567 * 567 * 568 * Returns false if @xbuf1 matches the registe 568 * Returns false if @xbuf1 matches the registers. 569 * Returns true if @xbuf1 differs from the re 569 * Returns true if @xbuf1 differs from the registers. 570 */ 570 */ 571 static inline bool __validate_tiledata_regs(st 571 static inline bool __validate_tiledata_regs(struct xsave_buffer *xbuf1) 572 { 572 { 573 struct xsave_buffer *xbuf2; 573 struct xsave_buffer *xbuf2; 574 int ret; 574 int ret; 575 575 576 xbuf2 = alloc_xbuf(); 576 xbuf2 = alloc_xbuf(); 577 if (!xbuf2) 577 if (!xbuf2) 578 fatal_error("failed to allocat 578 fatal_error("failed to allocate XSAVE buffer\n"); 579 579 580 xsave(xbuf2, XFEATURE_MASK_XTILEDATA); 580 xsave(xbuf2, XFEATURE_MASK_XTILEDATA); 581 ret = __compare_tiledata_state(xbuf1, 581 ret = __compare_tiledata_state(xbuf1, xbuf2); 582 582 583 free(xbuf2); 583 free(xbuf2); 584 584 585 if (ret == 0) 585 if (ret == 0) 586 return false; 586 return false; 587 return true; 587 return true; 588 } 588 } 589 589 590 static inline void validate_tiledata_regs_same 590 static inline void validate_tiledata_regs_same(struct xsave_buffer *xbuf) 591 { 591 { 592 int ret = __validate_tiledata_regs(xbu 592 int ret = __validate_tiledata_regs(xbuf); 593 593 594 if (ret != 0) 594 if (ret != 0) 595 fatal_error("TILEDATA register 595 fatal_error("TILEDATA registers changed"); 596 } 596 } 597 597 598 static inline void validate_tiledata_regs_chan 598 static inline void validate_tiledata_regs_changed(struct xsave_buffer *xbuf) 599 { 599 { 600 int ret = __validate_tiledata_regs(xbu 600 int ret = __validate_tiledata_regs(xbuf); 601 601 602 if (ret == 0) 602 if (ret == 0) 603 fatal_error("TILEDATA register 603 fatal_error("TILEDATA registers did not change"); 604 } 604 } 605 605 606 /* tiledata inheritance test */ 606 /* tiledata inheritance test */ 607 607 608 static void test_fork(void) 608 static void test_fork(void) 609 { 609 { 610 pid_t child, grandchild; 610 pid_t child, grandchild; 611 611 612 child = fork(); 612 child = fork(); 613 if (child < 0) { 613 if (child < 0) { 614 /* fork() failed */ 614 /* fork() failed */ 615 fatal_error("fork"); 615 fatal_error("fork"); 616 } else if (child > 0) { 616 } else if (child > 0) { 617 /* fork() succeeded. Now in t 617 /* fork() succeeded. Now in the parent. */ 618 int status; 618 int status; 619 619 620 wait(&status); 620 wait(&status); 621 if (!WIFEXITED(status) || WEXI 621 if (!WIFEXITED(status) || WEXITSTATUS(status)) 622 fatal_error("fork test 622 fatal_error("fork test child"); 623 return; 623 return; 624 } 624 } 625 /* fork() succeeded. Now in the child 625 /* fork() succeeded. Now in the child. */ 626 printf("[RUN]\tCheck tile data inherit 626 printf("[RUN]\tCheck tile data inheritance.\n\tBefore fork(), load tiledata\n"); 627 627 628 load_rand_tiledata(stashed_xsave); 628 load_rand_tiledata(stashed_xsave); 629 629 630 grandchild = fork(); 630 grandchild = fork(); 631 if (grandchild < 0) { 631 if (grandchild < 0) { 632 /* fork() failed */ 632 /* fork() failed */ 633 fatal_error("fork"); 633 fatal_error("fork"); 634 } else if (grandchild > 0) { 634 } else if (grandchild > 0) { 635 /* fork() succeeded. Still in 635 /* fork() succeeded. Still in the first child. */ 636 int status; 636 int status; 637 637 638 wait(&status); 638 wait(&status); 639 if (!WIFEXITED(status) || WEXI 639 if (!WIFEXITED(status) || WEXITSTATUS(status)) 640 fatal_error("fork test 640 fatal_error("fork test grand child"); 641 _exit(0); 641 _exit(0); 642 } 642 } 643 /* fork() succeeded. Now in the (gran 643 /* fork() succeeded. Now in the (grand)child. */ 644 644 645 /* 645 /* 646 * TILEDATA registers are not preserve 646 * TILEDATA registers are not preserved across fork(). 647 * Ensure that their value has changed 647 * Ensure that their value has changed: 648 */ 648 */ 649 validate_tiledata_regs_changed(stashed 649 validate_tiledata_regs_changed(stashed_xsave); 650 650 651 _exit(0); 651 _exit(0); 652 } 652 } 653 653 654 /* Context switching test */ 654 /* Context switching test */ 655 655 656 static struct _ctxtswtest_cfg { 656 static struct _ctxtswtest_cfg { 657 unsigned int iterations; 657 unsigned int iterations; 658 unsigned int num_threads; 658 unsigned int num_threads; 659 } ctxtswtest_config; 659 } ctxtswtest_config; 660 660 661 struct futex_info { 661 struct futex_info { 662 pthread_t thread; 662 pthread_t thread; 663 int nr; 663 int nr; 664 pthread_mutex_t mutex; 664 pthread_mutex_t mutex; 665 struct futex_info *next; 665 struct futex_info *next; 666 }; 666 }; 667 667 668 static void *check_tiledata(void *info) 668 static void *check_tiledata(void *info) 669 { 669 { 670 struct futex_info *finfo = (struct fut 670 struct futex_info *finfo = (struct futex_info *)info; 671 struct xsave_buffer *xbuf; 671 struct xsave_buffer *xbuf; 672 int i; 672 int i; 673 673 674 xbuf = alloc_xbuf(); 674 xbuf = alloc_xbuf(); 675 if (!xbuf) 675 if (!xbuf) 676 fatal_error("unable to allocat 676 fatal_error("unable to allocate XSAVE buffer"); 677 677 678 /* 678 /* 679 * Load random data into 'xbuf' and th 679 * Load random data into 'xbuf' and then restore 680 * it to the tile registers themselves 680 * it to the tile registers themselves. 681 */ 681 */ 682 load_rand_tiledata(xbuf); 682 load_rand_tiledata(xbuf); 683 for (i = 0; i < ctxtswtest_config.iter 683 for (i = 0; i < ctxtswtest_config.iterations; i++) { 684 pthread_mutex_lock(&finfo->mut 684 pthread_mutex_lock(&finfo->mutex); 685 685 686 /* 686 /* 687 * Ensure the register values 687 * Ensure the register values have not 688 * diverged from those recorde 688 * diverged from those recorded in 'xbuf'. 689 */ 689 */ 690 validate_tiledata_regs_same(xb 690 validate_tiledata_regs_same(xbuf); 691 691 692 /* Load new, random values int 692 /* Load new, random values into xbuf and registers */ 693 load_rand_tiledata(xbuf); 693 load_rand_tiledata(xbuf); 694 694 695 /* 695 /* 696 * The last thread's last unlo 696 * The last thread's last unlock will be for 697 * thread 0's mutex. However, 697 * thread 0's mutex. However, thread 0 will 698 * have already exited the loo 698 * have already exited the loop and the mutex 699 * will already be unlocked. 699 * will already be unlocked. 700 * 700 * 701 * Because this is not an ERRO 701 * Because this is not an ERRORCHECK mutex, 702 * that inconsistency will be 702 * that inconsistency will be silently ignored. 703 */ 703 */ 704 pthread_mutex_unlock(&finfo->n 704 pthread_mutex_unlock(&finfo->next->mutex); 705 } 705 } 706 706 707 free(xbuf); 707 free(xbuf); 708 /* 708 /* 709 * Return this thread's finfo, which i 709 * Return this thread's finfo, which is 710 * a unique value for this thread. 710 * a unique value for this thread. 711 */ 711 */ 712 return finfo; 712 return finfo; 713 } 713 } 714 714 715 static int create_threads(int num, struct fute 715 static int create_threads(int num, struct futex_info *finfo) 716 { 716 { 717 int i; 717 int i; 718 718 719 for (i = 0; i < num; i++) { 719 for (i = 0; i < num; i++) { 720 int next_nr; 720 int next_nr; 721 721 722 finfo[i].nr = i; 722 finfo[i].nr = i; 723 /* 723 /* 724 * Thread 'i' will wait on thi 724 * Thread 'i' will wait on this mutex to 725 * be unlocked. Lock it immed 725 * be unlocked. Lock it immediately after 726 * initialization: 726 * initialization: 727 */ 727 */ 728 pthread_mutex_init(&finfo[i].m 728 pthread_mutex_init(&finfo[i].mutex, NULL); 729 pthread_mutex_lock(&finfo[i].m 729 pthread_mutex_lock(&finfo[i].mutex); 730 730 731 next_nr = (i + 1) % num; 731 next_nr = (i + 1) % num; 732 finfo[i].next = &finfo[next_nr 732 finfo[i].next = &finfo[next_nr]; 733 733 734 if (pthread_create(&finfo[i].t 734 if (pthread_create(&finfo[i].thread, NULL, check_tiledata, &finfo[i])) 735 fatal_error("pthread_c 735 fatal_error("pthread_create()"); 736 } 736 } 737 return 0; 737 return 0; 738 } 738 } 739 739 740 static void affinitize_cpu0(void) 740 static void affinitize_cpu0(void) 741 { 741 { 742 cpu_set_t cpuset; 742 cpu_set_t cpuset; 743 743 744 CPU_ZERO(&cpuset); 744 CPU_ZERO(&cpuset); 745 CPU_SET(0, &cpuset); 745 CPU_SET(0, &cpuset); 746 746 747 if (sched_setaffinity(0, sizeof(cpuset 747 if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0) 748 fatal_error("sched_setaffinity 748 fatal_error("sched_setaffinity to CPU 0"); 749 } 749 } 750 750 751 static void test_context_switch(void) 751 static void test_context_switch(void) 752 { 752 { 753 struct futex_info *finfo; 753 struct futex_info *finfo; 754 int i; 754 int i; 755 755 756 /* Affinitize to one CPU to force cont 756 /* Affinitize to one CPU to force context switches */ 757 affinitize_cpu0(); 757 affinitize_cpu0(); 758 758 759 req_xtiledata_perm(); 759 req_xtiledata_perm(); 760 760 761 printf("[RUN]\tCheck tiledata context 761 printf("[RUN]\tCheck tiledata context switches, %d iterations, %d threads.\n", 762 ctxtswtest_config.iterations, 762 ctxtswtest_config.iterations, 763 ctxtswtest_config.num_threads); 763 ctxtswtest_config.num_threads); 764 764 765 765 766 finfo = malloc(sizeof(*finfo) * ctxtsw 766 finfo = malloc(sizeof(*finfo) * ctxtswtest_config.num_threads); 767 if (!finfo) 767 if (!finfo) 768 fatal_error("malloc()"); 768 fatal_error("malloc()"); 769 769 770 create_threads(ctxtswtest_config.num_t 770 create_threads(ctxtswtest_config.num_threads, finfo); 771 771 772 /* 772 /* 773 * This thread wakes up thread 0 773 * This thread wakes up thread 0 774 * Thread 0 will wake up 1 774 * Thread 0 will wake up 1 775 * Thread 1 will wake up 2 775 * Thread 1 will wake up 2 776 * ... 776 * ... 777 * the last thread will wake up 0 777 * the last thread will wake up 0 778 * 778 * 779 * ... this will repeat for the config 779 * ... this will repeat for the configured 780 * number of iterations. 780 * number of iterations. 781 */ 781 */ 782 pthread_mutex_unlock(&finfo[0].mutex); 782 pthread_mutex_unlock(&finfo[0].mutex); 783 783 784 /* Wait for all the threads to finish: 784 /* Wait for all the threads to finish: */ 785 for (i = 0; i < ctxtswtest_config.num_ 785 for (i = 0; i < ctxtswtest_config.num_threads; i++) { 786 void *thread_retval; 786 void *thread_retval; 787 int rc; 787 int rc; 788 788 789 rc = pthread_join(finfo[i].thr 789 rc = pthread_join(finfo[i].thread, &thread_retval); 790 790 791 if (rc) 791 if (rc) 792 fatal_error("pthread_j 792 fatal_error("pthread_join() failed for thread %d err: %d\n", 793 i, rc) 793 i, rc); 794 794 795 if (thread_retval != &finfo[i] 795 if (thread_retval != &finfo[i]) 796 fatal_error("unexpecte 796 fatal_error("unexpected thread retval for thread %d: %p\n", 797 i, thr 797 i, thread_retval); 798 798 799 } 799 } 800 800 801 printf("[OK]\tNo incorrect case was fo 801 printf("[OK]\tNo incorrect case was found.\n"); 802 802 803 free(finfo); 803 free(finfo); 804 } 804 } 805 805 806 /* Ptrace test */ 806 /* Ptrace test */ 807 807 808 /* 808 /* 809 * Make sure the ptracee has the expanded kern 809 * Make sure the ptracee has the expanded kernel buffer on the first 810 * use. Then, initialize the state before perf 810 * use. Then, initialize the state before performing the state 811 * injection from the ptracer. 811 * injection from the ptracer. 812 */ 812 */ 813 static inline void ptracee_firstuse_tiledata(v 813 static inline void ptracee_firstuse_tiledata(void) 814 { 814 { 815 load_rand_tiledata(stashed_xsave); 815 load_rand_tiledata(stashed_xsave); 816 init_xtiledata(); 816 init_xtiledata(); 817 } 817 } 818 818 819 /* 819 /* 820 * Ptracer injects the randomized tile data st 820 * Ptracer injects the randomized tile data state. It also reads 821 * before and after that, which will execute t 821 * before and after that, which will execute the kernel's state copy 822 * functions. So, the tester is advised to dou 822 * functions. So, the tester is advised to double-check any emitted 823 * kernel messages. 823 * kernel messages. 824 */ 824 */ 825 static void ptracer_inject_tiledata(pid_t targ 825 static void ptracer_inject_tiledata(pid_t target) 826 { 826 { 827 struct xsave_buffer *xbuf; 827 struct xsave_buffer *xbuf; 828 struct iovec iov; 828 struct iovec iov; 829 829 830 xbuf = alloc_xbuf(); 830 xbuf = alloc_xbuf(); 831 if (!xbuf) 831 if (!xbuf) 832 fatal_error("unable to allocat 832 fatal_error("unable to allocate XSAVE buffer"); 833 833 834 printf("\tRead the init'ed tiledata vi 834 printf("\tRead the init'ed tiledata via ptrace().\n"); 835 835 836 iov.iov_base = xbuf; 836 iov.iov_base = xbuf; 837 iov.iov_len = xbuf_size; 837 iov.iov_len = xbuf_size; 838 838 839 memset(stashed_xsave, 0, xbuf_size); 839 memset(stashed_xsave, 0, xbuf_size); 840 840 841 if (ptrace(PTRACE_GETREGSET, target, ( 841 if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov)) 842 fatal_error("PTRACE_GETREGSET" 842 fatal_error("PTRACE_GETREGSET"); 843 843 844 if (!__compare_tiledata_state(stashed_ 844 if (!__compare_tiledata_state(stashed_xsave, xbuf)) 845 printf("[OK]\tThe init'ed tile 845 printf("[OK]\tThe init'ed tiledata was read from ptracee.\n"); 846 else 846 else 847 printf("[FAIL]\tThe init'ed ti 847 printf("[FAIL]\tThe init'ed tiledata was not read from ptracee.\n"); 848 848 849 printf("\tInject tiledata via ptrace() 849 printf("\tInject tiledata via ptrace().\n"); 850 850 851 load_rand_tiledata(xbuf); 851 load_rand_tiledata(xbuf); 852 852 853 memcpy(&stashed_xsave->bytes[xtiledata 853 memcpy(&stashed_xsave->bytes[xtiledata.xbuf_offset], 854 &xbuf->bytes[xtiledata.xbuf_off 854 &xbuf->bytes[xtiledata.xbuf_offset], 855 xtiledata.size); 855 xtiledata.size); 856 856 857 if (ptrace(PTRACE_SETREGSET, target, ( 857 if (ptrace(PTRACE_SETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov)) 858 fatal_error("PTRACE_SETREGSET" 858 fatal_error("PTRACE_SETREGSET"); 859 859 860 if (ptrace(PTRACE_GETREGSET, target, ( 860 if (ptrace(PTRACE_GETREGSET, target, (uint32_t)NT_X86_XSTATE, &iov)) 861 fatal_error("PTRACE_GETREGSET" 861 fatal_error("PTRACE_GETREGSET"); 862 862 863 if (!__compare_tiledata_state(stashed_ 863 if (!__compare_tiledata_state(stashed_xsave, xbuf)) 864 printf("[OK]\tTiledata was cor 864 printf("[OK]\tTiledata was correctly written to ptracee.\n"); 865 else 865 else 866 printf("[FAIL]\tTiledata was n 866 printf("[FAIL]\tTiledata was not correctly written to ptracee.\n"); 867 } 867 } 868 868 869 static void test_ptrace(void) 869 static void test_ptrace(void) 870 { 870 { 871 pid_t child; 871 pid_t child; 872 int status; 872 int status; 873 873 874 child = fork(); 874 child = fork(); 875 if (child < 0) { 875 if (child < 0) { 876 err(1, "fork"); 876 err(1, "fork"); 877 } else if (!child) { 877 } else if (!child) { 878 if (ptrace(PTRACE_TRACEME, 0, 878 if (ptrace(PTRACE_TRACEME, 0, NULL, NULL)) 879 err(1, "PTRACE_TRACEME 879 err(1, "PTRACE_TRACEME"); 880 880 881 ptracee_firstuse_tiledata(); 881 ptracee_firstuse_tiledata(); 882 882 883 raise(SIGTRAP); 883 raise(SIGTRAP); 884 _exit(0); 884 _exit(0); 885 } 885 } 886 886 887 do { 887 do { 888 wait(&status); 888 wait(&status); 889 } while (WSTOPSIG(status) != SIGTRAP); 889 } while (WSTOPSIG(status) != SIGTRAP); 890 890 891 ptracer_inject_tiledata(child); 891 ptracer_inject_tiledata(child); 892 892 893 ptrace(PTRACE_DETACH, child, NULL, NUL 893 ptrace(PTRACE_DETACH, child, NULL, NULL); 894 wait(&status); 894 wait(&status); 895 if (!WIFEXITED(status) || WEXITSTATUS( 895 if (!WIFEXITED(status) || WEXITSTATUS(status)) 896 err(1, "ptrace test"); 896 err(1, "ptrace test"); 897 } 897 } 898 898 899 int main(void) 899 int main(void) 900 { 900 { 901 unsigned long features; 901 unsigned long features; 902 long rc; 902 long rc; 903 903 904 rc = syscall(SYS_arch_prctl, ARCH_GET_ 904 rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_SUPP, &features); 905 if (rc || (features & XFEATURE_MASK_XT 905 if (rc || (features & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE) { 906 ksft_print_msg("no AMX support 906 ksft_print_msg("no AMX support\n"); 907 return KSFT_SKIP; 907 return KSFT_SKIP; 908 } 908 } 909 909 910 check_cpuid_xtiledata(); 910 check_cpuid_xtiledata(); 911 911 912 init_stashed_xsave(); 912 init_stashed_xsave(); 913 sethandler(SIGILL, handle_noperm, 0); 913 sethandler(SIGILL, handle_noperm, 0); 914 914 915 test_dynamic_state(); 915 test_dynamic_state(); 916 916 917 /* Request permission for the followin 917 /* Request permission for the following tests */ 918 req_xtiledata_perm(); 918 req_xtiledata_perm(); 919 919 920 test_fork(); 920 test_fork(); 921 921 922 ctxtswtest_config.iterations = 10; 922 ctxtswtest_config.iterations = 10; 923 ctxtswtest_config.num_threads = 5; 923 ctxtswtest_config.num_threads = 5; 924 test_context_switch(); 924 test_context_switch(); 925 925 926 test_ptrace(); 926 test_ptrace(); 927 927 928 clearhandler(SIGILL); 928 clearhandler(SIGILL); 929 free_stashed_xsave(); 929 free_stashed_xsave(); 930 930 931 return 0; 931 return 0; 932 } 932 } 933 933
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.