~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/sgx/main.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*  Copyright(c) 2016-20 Intel Corporation. */
  3 
  4 #include <cpuid.h>
  5 #include <elf.h>
  6 #include <errno.h>
  7 #include <fcntl.h>
  8 #include <stdbool.h>
  9 #include <stdio.h>
 10 #include <stdint.h>
 11 #include <stdlib.h>
 12 #include <string.h>
 13 #include <unistd.h>
 14 #include <sys/ioctl.h>
 15 #include <sys/mman.h>
 16 #include <sys/stat.h>
 17 #include <sys/time.h>
 18 #include <sys/types.h>
 19 #include <sys/auxv.h>
 20 #include "defines.h"
 21 #include "../kselftest_harness.h"
 22 #include "main.h"
 23 
 24 static const uint64_t MAGIC = 0x1122334455667788ULL;
 25 static const uint64_t MAGIC2 = 0x8877665544332211ULL;
 26 vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave;
 27 
 28 /*
 29  * Security Information (SECINFO) data structure needed by a few SGX
 30  * instructions (eg. ENCLU[EACCEPT] and ENCLU[EMODPE]) holds meta-data
 31  * about an enclave page. &enum sgx_secinfo_page_state specifies the
 32  * secinfo flags used for page state.
 33  */
 34 enum sgx_secinfo_page_state {
 35         SGX_SECINFO_PENDING = (1 << 3),
 36         SGX_SECINFO_MODIFIED = (1 << 4),
 37         SGX_SECINFO_PR = (1 << 5),
 38 };
 39 
 40 struct vdso_symtab {
 41         Elf64_Sym *elf_symtab;
 42         const char *elf_symstrtab;
 43         Elf64_Word *elf_hashtab;
 44 };
 45 
 46 static Elf64_Dyn *vdso_get_dyntab(void *addr)
 47 {
 48         Elf64_Ehdr *ehdr = addr;
 49         Elf64_Phdr *phdrtab = addr + ehdr->e_phoff;
 50         int i;
 51 
 52         for (i = 0; i < ehdr->e_phnum; i++)
 53                 if (phdrtab[i].p_type == PT_DYNAMIC)
 54                         return addr + phdrtab[i].p_offset;
 55 
 56         return NULL;
 57 }
 58 
 59 static void *vdso_get_dyn(void *addr, Elf64_Dyn *dyntab, Elf64_Sxword tag)
 60 {
 61         int i;
 62 
 63         for (i = 0; dyntab[i].d_tag != DT_NULL; i++)
 64                 if (dyntab[i].d_tag == tag)
 65                         return addr + dyntab[i].d_un.d_ptr;
 66 
 67         return NULL;
 68 }
 69 
 70 static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab)
 71 {
 72         Elf64_Dyn *dyntab = vdso_get_dyntab(addr);
 73 
 74         symtab->elf_symtab = vdso_get_dyn(addr, dyntab, DT_SYMTAB);
 75         if (!symtab->elf_symtab)
 76                 return false;
 77 
 78         symtab->elf_symstrtab = vdso_get_dyn(addr, dyntab, DT_STRTAB);
 79         if (!symtab->elf_symstrtab)
 80                 return false;
 81 
 82         symtab->elf_hashtab = vdso_get_dyn(addr, dyntab, DT_HASH);
 83         if (!symtab->elf_hashtab)
 84                 return false;
 85 
 86         return true;
 87 }
 88 
 89 static inline int sgx2_supported(void)
 90 {
 91         unsigned int eax, ebx, ecx, edx;
 92 
 93         __cpuid_count(SGX_CPUID, 0x0, eax, ebx, ecx, edx);
 94 
 95         return eax & 0x2;
 96 }
 97 
 98 static unsigned long elf_sym_hash(const char *name)
 99 {
100         unsigned long h = 0, high;
101 
102         while (*name) {
103                 h = (h << 4) + *name++;
104                 high = h & 0xf0000000;
105 
106                 if (high)
107                         h ^= high >> 24;
108 
109                 h &= ~high;
110         }
111 
112         return h;
113 }
114 
115 static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name)
116 {
117         Elf64_Word bucketnum = symtab->elf_hashtab[0];
118         Elf64_Word *buckettab = &symtab->elf_hashtab[2];
119         Elf64_Word *chaintab = &symtab->elf_hashtab[2 + bucketnum];
120         Elf64_Sym *sym;
121         Elf64_Word i;
122 
123         for (i = buckettab[elf_sym_hash(name) % bucketnum]; i != STN_UNDEF;
124              i = chaintab[i]) {
125                 sym = &symtab->elf_symtab[i];
126                 if (!strcmp(name, &symtab->elf_symstrtab[sym->st_name]))
127                         return sym;
128         }
129 
130         return NULL;
131 }
132 
133 /*
134  * Return the offset in the enclave where the TCS segment can be found.
135  * The first RW segment loaded is the TCS.
136  */
137 static off_t encl_get_tcs_offset(struct encl *encl)
138 {
139         int i;
140 
141         for (i = 0; i < encl->nr_segments; i++) {
142                 struct encl_segment *seg = &encl->segment_tbl[i];
143 
144                 if (i == 0 && seg->prot == (PROT_READ | PROT_WRITE))
145                         return seg->offset;
146         }
147 
148         return -1;
149 }
150 
151 /*
152  * Return the offset in the enclave where the data segment can be found.
153  * The first RW segment loaded is the TCS, skip that to get info on the
154  * data segment.
155  */
156 static off_t encl_get_data_offset(struct encl *encl)
157 {
158         int i;
159 
160         for (i = 1; i < encl->nr_segments; i++) {
161                 struct encl_segment *seg = &encl->segment_tbl[i];
162 
163                 if (seg->prot == (PROT_READ | PROT_WRITE))
164                         return seg->offset;
165         }
166 
167         return -1;
168 }
169 
170 FIXTURE(enclave) {
171         struct encl encl;
172         struct sgx_enclave_run run;
173 };
174 
175 static bool setup_test_encl(unsigned long heap_size, struct encl *encl,
176                             struct __test_metadata *_metadata)
177 {
178         Elf64_Sym *sgx_enter_enclave_sym = NULL;
179         struct vdso_symtab symtab;
180         struct encl_segment *seg;
181         char maps_line[256];
182         FILE *maps_file;
183         unsigned int i;
184         void *addr;
185 
186         if (!encl_load("test_encl.elf", encl, heap_size)) {
187                 encl_delete(encl);
188                 TH_LOG("Failed to load the test enclave.");
189                 return false;
190         }
191 
192         if (!encl_measure(encl))
193                 goto err;
194 
195         if (!encl_build(encl))
196                 goto err;
197 
198         /*
199          * An enclave consumer only must do this.
200          */
201         for (i = 0; i < encl->nr_segments; i++) {
202                 struct encl_segment *seg = &encl->segment_tbl[i];
203 
204                 addr = mmap((void *)encl->encl_base + seg->offset, seg->size,
205                             seg->prot, MAP_SHARED | MAP_FIXED, encl->fd, 0);
206                 EXPECT_NE(addr, MAP_FAILED);
207                 if (addr == MAP_FAILED)
208                         goto err;
209         }
210 
211         /* Get vDSO base address */
212         addr = (void *)getauxval(AT_SYSINFO_EHDR);
213         if (!addr)
214                 goto err;
215 
216         if (!vdso_get_symtab(addr, &symtab))
217                 goto err;
218 
219         sgx_enter_enclave_sym = vdso_symtab_get(&symtab, "__vdso_sgx_enter_enclave");
220         if (!sgx_enter_enclave_sym)
221                 goto err;
222 
223         vdso_sgx_enter_enclave = addr + sgx_enter_enclave_sym->st_value;
224 
225         return true;
226 
227 err:
228         for (i = 0; i < encl->nr_segments; i++) {
229                 seg = &encl->segment_tbl[i];
230 
231                 TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot);
232         }
233 
234         maps_file = fopen("/proc/self/maps", "r");
235         if (maps_file != NULL)  {
236                 while (fgets(maps_line, sizeof(maps_line), maps_file) != NULL) {
237                         maps_line[strlen(maps_line) - 1] = '\0';
238 
239                         if (strstr(maps_line, "/dev/sgx_enclave"))
240                                 TH_LOG("%s", maps_line);
241                 }
242 
243                 fclose(maps_file);
244         }
245 
246         TH_LOG("Failed to initialize the test enclave.");
247 
248         encl_delete(encl);
249 
250         return false;
251 }
252 
253 FIXTURE_SETUP(enclave)
254 {
255 }
256 
257 FIXTURE_TEARDOWN(enclave)
258 {
259         encl_delete(&self->encl);
260 }
261 
262 #define ENCL_CALL(op, run, clobbered) \
263         ({ \
264                 int ret; \
265                 if ((clobbered)) \
266                         ret = vdso_sgx_enter_enclave((unsigned long)(op), 0, 0, \
267                                                      EENTER, 0, 0, (run)); \
268                 else \
269                         ret = sgx_enter_enclave((void *)(op), NULL, 0, EENTER, NULL, NULL, \
270                                                 (run)); \
271                 ret; \
272         })
273 
274 #define EXPECT_EEXIT(run) \
275         do { \
276                 EXPECT_EQ((run)->function, EEXIT); \
277                 if ((run)->function != EEXIT) \
278                         TH_LOG("0x%02x 0x%02x 0x%016llx", (run)->exception_vector, \
279                                (run)->exception_error_code, (run)->exception_addr); \
280         } while (0)
281 
282 TEST_F(enclave, unclobbered_vdso)
283 {
284         struct encl_op_get_from_buf get_op;
285         struct encl_op_put_to_buf put_op;
286 
287         ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
288 
289         memset(&self->run, 0, sizeof(self->run));
290         self->run.tcs = self->encl.encl_base;
291 
292         put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
293         put_op.value = MAGIC;
294 
295         EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
296 
297         EXPECT_EEXIT(&self->run);
298         EXPECT_EQ(self->run.user_data, 0);
299 
300         get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
301         get_op.value = 0;
302 
303         EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
304 
305         EXPECT_EQ(get_op.value, MAGIC);
306         EXPECT_EEXIT(&self->run);
307         EXPECT_EQ(self->run.user_data, 0);
308 }
309 
310 /*
311  * A section metric is concatenated in a way that @low bits 12-31 define the
312  * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
313  * metric.
314  */
315 static unsigned long sgx_calc_section_metric(unsigned int low,
316                                              unsigned int high)
317 {
318         return (low & GENMASK_ULL(31, 12)) +
319                ((high & GENMASK_ULL(19, 0)) << 32);
320 }
321 
322 /*
323  * Sum total available physical SGX memory across all EPC sections
324  *
325  * Return: total available physical SGX memory available on system
326  */
327 static unsigned long get_total_epc_mem(void)
328 {
329         unsigned int eax, ebx, ecx, edx;
330         unsigned long total_size = 0;
331         unsigned int type;
332         int section = 0;
333 
334         while (true) {
335                 __cpuid_count(SGX_CPUID, section + SGX_CPUID_EPC, eax, ebx, ecx, edx);
336 
337                 type = eax & SGX_CPUID_EPC_MASK;
338                 if (type == SGX_CPUID_EPC_INVALID)
339                         break;
340 
341                 if (type != SGX_CPUID_EPC_SECTION)
342                         break;
343 
344                 total_size += sgx_calc_section_metric(ecx, edx);
345 
346                 section++;
347         }
348 
349         return total_size;
350 }
351 
352 TEST_F(enclave, unclobbered_vdso_oversubscribed)
353 {
354         struct encl_op_get_from_buf get_op;
355         struct encl_op_put_to_buf put_op;
356         unsigned long total_mem;
357 
358         total_mem = get_total_epc_mem();
359         ASSERT_NE(total_mem, 0);
360         ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
361 
362         memset(&self->run, 0, sizeof(self->run));
363         self->run.tcs = self->encl.encl_base;
364 
365         put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
366         put_op.value = MAGIC;
367 
368         EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
369 
370         EXPECT_EEXIT(&self->run);
371         EXPECT_EQ(self->run.user_data, 0);
372 
373         get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
374         get_op.value = 0;
375 
376         EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
377 
378         EXPECT_EQ(get_op.value, MAGIC);
379         EXPECT_EEXIT(&self->run);
380         EXPECT_EQ(self->run.user_data, 0);
381 }
382 
383 TEST_F_TIMEOUT(enclave, unclobbered_vdso_oversubscribed_remove, 900)
384 {
385         struct sgx_enclave_remove_pages remove_ioc;
386         struct sgx_enclave_modify_types modt_ioc;
387         struct encl_op_get_from_buf get_op;
388         struct encl_op_eaccept eaccept_op;
389         struct encl_op_put_to_buf put_op;
390         struct encl_segment *heap;
391         unsigned long total_mem;
392         int ret, errno_save;
393         unsigned long addr;
394         unsigned long i;
395 
396         /*
397          * Create enclave with additional heap that is as big as all
398          * available physical SGX memory.
399          */
400         total_mem = get_total_epc_mem();
401         ASSERT_NE(total_mem, 0);
402         TH_LOG("Creating an enclave with %lu bytes heap may take a while ...",
403                total_mem);
404         ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
405 
406         /*
407          * Hardware (SGX2) and kernel support is needed for this test. Start
408          * with check that test has a chance of succeeding.
409          */
410         memset(&modt_ioc, 0, sizeof(modt_ioc));
411         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
412 
413         if (ret == -1) {
414                 if (errno == ENOTTY)
415                         SKIP(return,
416                              "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
417                 else if (errno == ENODEV)
418                         SKIP(return, "System does not support SGX2");
419         }
420 
421         /*
422          * Invalid parameters were provided during sanity check,
423          * expect command to fail.
424          */
425         EXPECT_EQ(ret, -1);
426 
427         /* SGX2 is supported by kernel and hardware, test can proceed. */
428         memset(&self->run, 0, sizeof(self->run));
429         self->run.tcs = self->encl.encl_base;
430 
431         heap = &self->encl.segment_tbl[self->encl.nr_segments - 1];
432 
433         put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
434         put_op.value = MAGIC;
435 
436         EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
437 
438         EXPECT_EEXIT(&self->run);
439         EXPECT_EQ(self->run.user_data, 0);
440 
441         get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
442         get_op.value = 0;
443 
444         EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
445 
446         EXPECT_EQ(get_op.value, MAGIC);
447         EXPECT_EEXIT(&self->run);
448         EXPECT_EQ(self->run.user_data, 0);
449 
450         /* Trim entire heap. */
451         memset(&modt_ioc, 0, sizeof(modt_ioc));
452 
453         modt_ioc.offset = heap->offset;
454         modt_ioc.length = heap->size;
455         modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
456 
457         TH_LOG("Changing type of %zd bytes to trimmed may take a while ...",
458                heap->size);
459         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
460         errno_save = ret == -1 ? errno : 0;
461 
462         EXPECT_EQ(ret, 0);
463         EXPECT_EQ(errno_save, 0);
464         EXPECT_EQ(modt_ioc.result, 0);
465         EXPECT_EQ(modt_ioc.count, heap->size);
466 
467         /* EACCEPT all removed pages. */
468         addr = self->encl.encl_base + heap->offset;
469 
470         eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
471         eaccept_op.header.type = ENCL_OP_EACCEPT;
472 
473         TH_LOG("Entering enclave to run EACCEPT for each page of %zd bytes may take a while ...",
474                heap->size);
475         for (i = 0; i < heap->size; i += 4096) {
476                 eaccept_op.epc_addr = addr + i;
477                 eaccept_op.ret = 0;
478 
479                 EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
480 
481                 EXPECT_EQ(self->run.exception_vector, 0);
482                 EXPECT_EQ(self->run.exception_error_code, 0);
483                 EXPECT_EQ(self->run.exception_addr, 0);
484                 ASSERT_EQ(eaccept_op.ret, 0);
485                 ASSERT_EQ(self->run.function, EEXIT);
486         }
487 
488         /* Complete page removal. */
489         memset(&remove_ioc, 0, sizeof(remove_ioc));
490 
491         remove_ioc.offset = heap->offset;
492         remove_ioc.length = heap->size;
493 
494         TH_LOG("Removing %zd bytes from enclave may take a while ...",
495                heap->size);
496         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
497         errno_save = ret == -1 ? errno : 0;
498 
499         EXPECT_EQ(ret, 0);
500         EXPECT_EQ(errno_save, 0);
501         EXPECT_EQ(remove_ioc.count, heap->size);
502 }
503 
504 TEST_F(enclave, clobbered_vdso)
505 {
506         struct encl_op_get_from_buf get_op;
507         struct encl_op_put_to_buf put_op;
508 
509         ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
510 
511         memset(&self->run, 0, sizeof(self->run));
512         self->run.tcs = self->encl.encl_base;
513 
514         put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
515         put_op.value = MAGIC;
516 
517         EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
518 
519         EXPECT_EEXIT(&self->run);
520         EXPECT_EQ(self->run.user_data, 0);
521 
522         get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
523         get_op.value = 0;
524 
525         EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
526 
527         EXPECT_EQ(get_op.value, MAGIC);
528         EXPECT_EEXIT(&self->run);
529         EXPECT_EQ(self->run.user_data, 0);
530 }
531 
532 static int test_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9,
533                         struct sgx_enclave_run *run)
534 {
535         run->user_data = 0;
536 
537         return 0;
538 }
539 
540 TEST_F(enclave, clobbered_vdso_and_user_function)
541 {
542         struct encl_op_get_from_buf get_op;
543         struct encl_op_put_to_buf put_op;
544 
545         ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
546 
547         memset(&self->run, 0, sizeof(self->run));
548         self->run.tcs = self->encl.encl_base;
549 
550         self->run.user_handler = (__u64)test_handler;
551         self->run.user_data = 0xdeadbeef;
552 
553         put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
554         put_op.value = MAGIC;
555 
556         EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
557 
558         EXPECT_EEXIT(&self->run);
559         EXPECT_EQ(self->run.user_data, 0);
560 
561         get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
562         get_op.value = 0;
563 
564         EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
565 
566         EXPECT_EQ(get_op.value, MAGIC);
567         EXPECT_EEXIT(&self->run);
568         EXPECT_EQ(self->run.user_data, 0);
569 }
570 
571 /*
572  * Sanity check that it is possible to enter either of the two hardcoded TCS
573  */
574 TEST_F(enclave, tcs_entry)
575 {
576         struct encl_op_header op;
577 
578         ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
579 
580         memset(&self->run, 0, sizeof(self->run));
581         self->run.tcs = self->encl.encl_base;
582 
583         op.type = ENCL_OP_NOP;
584 
585         EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
586 
587         EXPECT_EEXIT(&self->run);
588         EXPECT_EQ(self->run.exception_vector, 0);
589         EXPECT_EQ(self->run.exception_error_code, 0);
590         EXPECT_EQ(self->run.exception_addr, 0);
591 
592         /* Move to the next TCS. */
593         self->run.tcs = self->encl.encl_base + PAGE_SIZE;
594 
595         EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
596 
597         EXPECT_EEXIT(&self->run);
598         EXPECT_EQ(self->run.exception_vector, 0);
599         EXPECT_EQ(self->run.exception_error_code, 0);
600         EXPECT_EQ(self->run.exception_addr, 0);
601 }
602 
603 /*
604  * Second page of .data segment is used to test changing PTE permissions.
605  * This spans the local encl_buffer within the test enclave.
606  *
607  * 1) Start with a sanity check: a value is written to the target page within
608  *    the enclave and read back to ensure target page can be written to.
609  * 2) Change PTE permissions (RW -> RO) of target page within enclave.
610  * 3) Repeat (1) - this time expecting a regular #PF communicated via the
611  *    vDSO.
612  * 4) Change PTE permissions of target page within enclave back to be RW.
613  * 5) Repeat (1) by resuming enclave, now expected to be possible to write to
614  *    and read from target page within enclave.
615  */
616 TEST_F(enclave, pte_permissions)
617 {
618         struct encl_op_get_from_addr get_addr_op;
619         struct encl_op_put_to_addr put_addr_op;
620         unsigned long data_start;
621         int ret;
622 
623         ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
624 
625         memset(&self->run, 0, sizeof(self->run));
626         self->run.tcs = self->encl.encl_base;
627 
628         data_start = self->encl.encl_base +
629                      encl_get_data_offset(&self->encl) +
630                      PAGE_SIZE;
631 
632         /*
633          * Sanity check to ensure it is possible to write to page that will
634          * have its permissions manipulated.
635          */
636 
637         /* Write MAGIC to page */
638         put_addr_op.value = MAGIC;
639         put_addr_op.addr = data_start;
640         put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
641 
642         EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
643 
644         EXPECT_EEXIT(&self->run);
645         EXPECT_EQ(self->run.exception_vector, 0);
646         EXPECT_EQ(self->run.exception_error_code, 0);
647         EXPECT_EQ(self->run.exception_addr, 0);
648 
649         /*
650          * Read memory that was just written to, confirming that it is the
651          * value previously written (MAGIC).
652          */
653         get_addr_op.value = 0;
654         get_addr_op.addr = data_start;
655         get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
656 
657         EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
658 
659         EXPECT_EQ(get_addr_op.value, MAGIC);
660         EXPECT_EEXIT(&self->run);
661         EXPECT_EQ(self->run.exception_vector, 0);
662         EXPECT_EQ(self->run.exception_error_code, 0);
663         EXPECT_EQ(self->run.exception_addr, 0);
664 
665         /* Change PTE permissions of target page within the enclave */
666         ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ);
667         if (ret)
668                 perror("mprotect");
669 
670         /*
671          * PTE permissions of target page changed to read-only, EPCM
672          * permissions unchanged (EPCM permissions are RW), attempt to
673          * write to the page, expecting a regular #PF.
674          */
675 
676         put_addr_op.value = MAGIC2;
677 
678         EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
679 
680         EXPECT_EQ(self->run.exception_vector, 14);
681         EXPECT_EQ(self->run.exception_error_code, 0x7);
682         EXPECT_EQ(self->run.exception_addr, data_start);
683 
684         self->run.exception_vector = 0;
685         self->run.exception_error_code = 0;
686         self->run.exception_addr = 0;
687 
688         /*
689          * Change PTE permissions back to enable enclave to write to the
690          * target page and resume enclave - do not expect any exceptions this
691          * time.
692          */
693         ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ | PROT_WRITE);
694         if (ret)
695                 perror("mprotect");
696 
697         EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0,
698                                          0, ERESUME, 0, 0, &self->run),
699                  0);
700 
701         EXPECT_EEXIT(&self->run);
702         EXPECT_EQ(self->run.exception_vector, 0);
703         EXPECT_EQ(self->run.exception_error_code, 0);
704         EXPECT_EQ(self->run.exception_addr, 0);
705 
706         get_addr_op.value = 0;
707 
708         EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
709 
710         EXPECT_EQ(get_addr_op.value, MAGIC2);
711         EXPECT_EEXIT(&self->run);
712         EXPECT_EQ(self->run.exception_vector, 0);
713         EXPECT_EQ(self->run.exception_error_code, 0);
714         EXPECT_EQ(self->run.exception_addr, 0);
715 }
716 
717 /*
718  * Modifying permissions of TCS page should not be possible.
719  */
720 TEST_F(enclave, tcs_permissions)
721 {
722         struct sgx_enclave_restrict_permissions ioc;
723         int ret, errno_save;
724 
725         ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
726 
727         memset(&self->run, 0, sizeof(self->run));
728         self->run.tcs = self->encl.encl_base;
729 
730         memset(&ioc, 0, sizeof(ioc));
731 
732         /*
733          * Ensure kernel supports needed ioctl() and system supports needed
734          * commands.
735          */
736 
737         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc);
738         errno_save = ret == -1 ? errno : 0;
739 
740         /*
741          * Invalid parameters were provided during sanity check,
742          * expect command to fail.
743          */
744         ASSERT_EQ(ret, -1);
745 
746         /* ret == -1 */
747         if (errno_save == ENOTTY)
748                 SKIP(return,
749                      "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
750         else if (errno_save == ENODEV)
751                 SKIP(return, "System does not support SGX2");
752 
753         /*
754          * Attempt to make TCS page read-only. This is not allowed and
755          * should be prevented by the kernel.
756          */
757         ioc.offset = encl_get_tcs_offset(&self->encl);
758         ioc.length = PAGE_SIZE;
759         ioc.permissions = SGX_SECINFO_R;
760 
761         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS, &ioc);
762         errno_save = ret == -1 ? errno : 0;
763 
764         EXPECT_EQ(ret, -1);
765         EXPECT_EQ(errno_save, EINVAL);
766         EXPECT_EQ(ioc.result, 0);
767         EXPECT_EQ(ioc.count, 0);
768 }
769 
770 /*
771  * Enclave page permission test.
772  *
773  * Modify and restore enclave page's EPCM (enclave) permissions from
774  * outside enclave (ENCLS[EMODPR] via kernel) as well as from within
775  * enclave (via ENCLU[EMODPE]). Check for page fault if
776  * VMA allows access but EPCM permissions do not.
777  */
778 TEST_F(enclave, epcm_permissions)
779 {
780         struct sgx_enclave_restrict_permissions restrict_ioc;
781         struct encl_op_get_from_addr get_addr_op;
782         struct encl_op_put_to_addr put_addr_op;
783         struct encl_op_eaccept eaccept_op;
784         struct encl_op_emodpe emodpe_op;
785         unsigned long data_start;
786         int ret, errno_save;
787 
788         ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
789 
790         memset(&self->run, 0, sizeof(self->run));
791         self->run.tcs = self->encl.encl_base;
792 
793         /*
794          * Ensure kernel supports needed ioctl() and system supports needed
795          * commands.
796          */
797         memset(&restrict_ioc, 0, sizeof(restrict_ioc));
798 
799         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS,
800                     &restrict_ioc);
801         errno_save = ret == -1 ? errno : 0;
802 
803         /*
804          * Invalid parameters were provided during sanity check,
805          * expect command to fail.
806          */
807         ASSERT_EQ(ret, -1);
808 
809         /* ret == -1 */
810         if (errno_save == ENOTTY)
811                 SKIP(return,
812                      "Kernel does not support SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS ioctl()");
813         else if (errno_save == ENODEV)
814                 SKIP(return, "System does not support SGX2");
815 
816         /*
817          * Page that will have its permissions changed is the second data
818          * page in the .data segment. This forms part of the local encl_buffer
819          * within the enclave.
820          *
821          * At start of test @data_start should have EPCM as well as PTE and
822          * VMA permissions of RW.
823          */
824 
825         data_start = self->encl.encl_base +
826                      encl_get_data_offset(&self->encl) + PAGE_SIZE;
827 
828         /*
829          * Sanity check that page at @data_start is writable before making
830          * any changes to page permissions.
831          *
832          * Start by writing MAGIC to test page.
833          */
834         put_addr_op.value = MAGIC;
835         put_addr_op.addr = data_start;
836         put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
837 
838         EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
839 
840         EXPECT_EEXIT(&self->run);
841         EXPECT_EQ(self->run.exception_vector, 0);
842         EXPECT_EQ(self->run.exception_error_code, 0);
843         EXPECT_EQ(self->run.exception_addr, 0);
844 
845         /*
846          * Read memory that was just written to, confirming that
847          * page is writable.
848          */
849         get_addr_op.value = 0;
850         get_addr_op.addr = data_start;
851         get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
852 
853         EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
854 
855         EXPECT_EQ(get_addr_op.value, MAGIC);
856         EXPECT_EEXIT(&self->run);
857         EXPECT_EQ(self->run.exception_vector, 0);
858         EXPECT_EQ(self->run.exception_error_code, 0);
859         EXPECT_EQ(self->run.exception_addr, 0);
860 
861         /*
862          * Change EPCM permissions to read-only. Kernel still considers
863          * the page writable.
864          */
865         memset(&restrict_ioc, 0, sizeof(restrict_ioc));
866 
867         restrict_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
868         restrict_ioc.length = PAGE_SIZE;
869         restrict_ioc.permissions = SGX_SECINFO_R;
870 
871         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_RESTRICT_PERMISSIONS,
872                     &restrict_ioc);
873         errno_save = ret == -1 ? errno : 0;
874 
875         EXPECT_EQ(ret, 0);
876         EXPECT_EQ(errno_save, 0);
877         EXPECT_EQ(restrict_ioc.result, 0);
878         EXPECT_EQ(restrict_ioc.count, 4096);
879 
880         /*
881          * EPCM permissions changed from kernel, need to EACCEPT from enclave.
882          */
883         eaccept_op.epc_addr = data_start;
884         eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_REG | SGX_SECINFO_PR;
885         eaccept_op.ret = 0;
886         eaccept_op.header.type = ENCL_OP_EACCEPT;
887 
888         EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
889 
890         EXPECT_EEXIT(&self->run);
891         EXPECT_EQ(self->run.exception_vector, 0);
892         EXPECT_EQ(self->run.exception_error_code, 0);
893         EXPECT_EQ(self->run.exception_addr, 0);
894         EXPECT_EQ(eaccept_op.ret, 0);
895 
896         /*
897          * EPCM permissions of page is now read-only, expect #PF
898          * on EPCM when attempting to write to page from within enclave.
899          */
900         put_addr_op.value = MAGIC2;
901 
902         EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
903 
904         EXPECT_EQ(self->run.function, ERESUME);
905         EXPECT_EQ(self->run.exception_vector, 14);
906         EXPECT_EQ(self->run.exception_error_code, 0x8007);
907         EXPECT_EQ(self->run.exception_addr, data_start);
908 
909         self->run.exception_vector = 0;
910         self->run.exception_error_code = 0;
911         self->run.exception_addr = 0;
912 
913         /*
914          * Received AEX but cannot return to enclave at same entrypoint,
915          * need different TCS from where EPCM permission can be made writable
916          * again.
917          */
918         self->run.tcs = self->encl.encl_base + PAGE_SIZE;
919 
920         /*
921          * Enter enclave at new TCS to change EPCM permissions to be
922          * writable again and thus fix the page fault that triggered the
923          * AEX.
924          */
925 
926         emodpe_op.epc_addr = data_start;
927         emodpe_op.flags = SGX_SECINFO_R | SGX_SECINFO_W;
928         emodpe_op.header.type = ENCL_OP_EMODPE;
929 
930         EXPECT_EQ(ENCL_CALL(&emodpe_op, &self->run, true), 0);
931 
932         EXPECT_EEXIT(&self->run);
933         EXPECT_EQ(self->run.exception_vector, 0);
934         EXPECT_EQ(self->run.exception_error_code, 0);
935         EXPECT_EQ(self->run.exception_addr, 0);
936 
937         /*
938          * Attempt to return to main TCS to resume execution at faulting
939          * instruction, PTE should continue to allow writing to the page.
940          */
941         self->run.tcs = self->encl.encl_base;
942 
943         /*
944          * Wrong page permissions that caused original fault has
945          * now been fixed via EPCM permissions.
946          * Resume execution in main TCS to re-attempt the memory access.
947          */
948         self->run.tcs = self->encl.encl_base;
949 
950         EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
951                                          ERESUME, 0, 0,
952                                          &self->run),
953                   0);
954 
955         EXPECT_EEXIT(&self->run);
956         EXPECT_EQ(self->run.exception_vector, 0);
957         EXPECT_EQ(self->run.exception_error_code, 0);
958         EXPECT_EQ(self->run.exception_addr, 0);
959 
960         get_addr_op.value = 0;
961 
962         EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
963 
964         EXPECT_EQ(get_addr_op.value, MAGIC2);
965         EXPECT_EEXIT(&self->run);
966         EXPECT_EQ(self->run.user_data, 0);
967         EXPECT_EQ(self->run.exception_vector, 0);
968         EXPECT_EQ(self->run.exception_error_code, 0);
969         EXPECT_EQ(self->run.exception_addr, 0);
970 }
971 
972 /*
973  * Test the addition of pages to an initialized enclave via writing to
974  * a page belonging to the enclave's address space but was not added
975  * during enclave creation.
976  */
977 TEST_F(enclave, augment)
978 {
979         struct encl_op_get_from_addr get_addr_op;
980         struct encl_op_put_to_addr put_addr_op;
981         struct encl_op_eaccept eaccept_op;
982         size_t total_size = 0;
983         void *addr;
984         int i;
985 
986         if (!sgx2_supported())
987                 SKIP(return, "SGX2 not supported");
988 
989         ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
990 
991         memset(&self->run, 0, sizeof(self->run));
992         self->run.tcs = self->encl.encl_base;
993 
994         for (i = 0; i < self->encl.nr_segments; i++) {
995                 struct encl_segment *seg = &self->encl.segment_tbl[i];
996 
997                 total_size += seg->size;
998         }
999 
1000         /*
1001          * Actual enclave size is expected to be larger than the loaded
1002          * test enclave since enclave size must be a power of 2 in bytes
1003          * and test_encl does not consume it all.
1004          */
1005         EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
1006 
1007         /*
1008          * Create memory mapping for the page that will be added. New
1009          * memory mapping is for one page right after all existing
1010          * mappings.
1011          * Kernel will allow new mapping using any permissions if it
1012          * falls into the enclave's address range but not backed
1013          * by existing enclave pages.
1014          */
1015         addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
1016                     PROT_READ | PROT_WRITE | PROT_EXEC,
1017                     MAP_SHARED | MAP_FIXED, self->encl.fd, 0);
1018         EXPECT_NE(addr, MAP_FAILED);
1019 
1020         self->run.exception_vector = 0;
1021         self->run.exception_error_code = 0;
1022         self->run.exception_addr = 0;
1023 
1024         /*
1025          * Attempt to write to the new page from within enclave.
1026          * Expected to fail since page is not (yet) part of the enclave.
1027          * The first #PF will trigger the addition of the page to the
1028          * enclave, but since the new page needs an EACCEPT from within the
1029          * enclave before it can be used it would not be possible
1030          * to successfully return to the failing instruction. This is the
1031          * cause of the second #PF captured here having the SGX bit set,
1032          * it is from hardware preventing the page from being used.
1033          */
1034         put_addr_op.value = MAGIC;
1035         put_addr_op.addr = (unsigned long)addr;
1036         put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
1037 
1038         EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
1039 
1040         EXPECT_EQ(self->run.function, ERESUME);
1041         EXPECT_EQ(self->run.exception_vector, 14);
1042         EXPECT_EQ(self->run.exception_addr, (unsigned long)addr);
1043 
1044         if (self->run.exception_error_code == 0x6) {
1045                 munmap(addr, PAGE_SIZE);
1046                 SKIP(return, "Kernel does not support adding pages to initialized enclave");
1047         }
1048 
1049         EXPECT_EQ(self->run.exception_error_code, 0x8007);
1050 
1051         self->run.exception_vector = 0;
1052         self->run.exception_error_code = 0;
1053         self->run.exception_addr = 0;
1054 
1055         /* Handle AEX by running EACCEPT from new entry point. */
1056         self->run.tcs = self->encl.encl_base + PAGE_SIZE;
1057 
1058         eaccept_op.epc_addr = self->encl.encl_base + total_size;
1059         eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
1060         eaccept_op.ret = 0;
1061         eaccept_op.header.type = ENCL_OP_EACCEPT;
1062 
1063         EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1064 
1065         EXPECT_EEXIT(&self->run);
1066         EXPECT_EQ(self->run.exception_vector, 0);
1067         EXPECT_EQ(self->run.exception_error_code, 0);
1068         EXPECT_EQ(self->run.exception_addr, 0);
1069         EXPECT_EQ(eaccept_op.ret, 0);
1070 
1071         /* Can now return to main TCS to resume execution. */
1072         self->run.tcs = self->encl.encl_base;
1073 
1074         EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, 0,
1075                                          ERESUME, 0, 0,
1076                                          &self->run),
1077                   0);
1078 
1079         EXPECT_EEXIT(&self->run);
1080         EXPECT_EQ(self->run.exception_vector, 0);
1081         EXPECT_EQ(self->run.exception_error_code, 0);
1082         EXPECT_EQ(self->run.exception_addr, 0);
1083 
1084         /*
1085          * Read memory from newly added page that was just written to,
1086          * confirming that data previously written (MAGIC) is present.
1087          */
1088         get_addr_op.value = 0;
1089         get_addr_op.addr = (unsigned long)addr;
1090         get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
1091 
1092         EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1093 
1094         EXPECT_EQ(get_addr_op.value, MAGIC);
1095         EXPECT_EEXIT(&self->run);
1096         EXPECT_EQ(self->run.exception_vector, 0);
1097         EXPECT_EQ(self->run.exception_error_code, 0);
1098         EXPECT_EQ(self->run.exception_addr, 0);
1099 
1100         munmap(addr, PAGE_SIZE);
1101 }
1102 
1103 /*
1104  * Test for the addition of pages to an initialized enclave via a
1105  * pre-emptive run of EACCEPT on page to be added.
1106  */
1107 TEST_F(enclave, augment_via_eaccept)
1108 {
1109         struct encl_op_get_from_addr get_addr_op;
1110         struct encl_op_put_to_addr put_addr_op;
1111         struct encl_op_eaccept eaccept_op;
1112         size_t total_size = 0;
1113         void *addr;
1114         int i;
1115 
1116         if (!sgx2_supported())
1117                 SKIP(return, "SGX2 not supported");
1118 
1119         ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
1120 
1121         memset(&self->run, 0, sizeof(self->run));
1122         self->run.tcs = self->encl.encl_base;
1123 
1124         for (i = 0; i < self->encl.nr_segments; i++) {
1125                 struct encl_segment *seg = &self->encl.segment_tbl[i];
1126 
1127                 total_size += seg->size;
1128         }
1129 
1130         /*
1131          * Actual enclave size is expected to be larger than the loaded
1132          * test enclave since enclave size must be a power of 2 in bytes while
1133          * test_encl does not consume it all.
1134          */
1135         EXPECT_LT(total_size + PAGE_SIZE, self->encl.encl_size);
1136 
1137         /*
1138          * mmap() a page at end of existing enclave to be used for dynamic
1139          * EPC page.
1140          *
1141          * Kernel will allow new mapping using any permissions if it
1142          * falls into the enclave's address range but not backed
1143          * by existing enclave pages.
1144          */
1145 
1146         addr = mmap((void *)self->encl.encl_base + total_size, PAGE_SIZE,
1147                     PROT_READ | PROT_WRITE | PROT_EXEC, MAP_SHARED | MAP_FIXED,
1148                     self->encl.fd, 0);
1149         EXPECT_NE(addr, MAP_FAILED);
1150 
1151         self->run.exception_vector = 0;
1152         self->run.exception_error_code = 0;
1153         self->run.exception_addr = 0;
1154 
1155         /*
1156          * Run EACCEPT on new page to trigger the #PF->EAUG->EACCEPT(again
1157          * without a #PF). All should be transparent to userspace.
1158          */
1159         eaccept_op.epc_addr = self->encl.encl_base + total_size;
1160         eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
1161         eaccept_op.ret = 0;
1162         eaccept_op.header.type = ENCL_OP_EACCEPT;
1163 
1164         EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1165 
1166         if (self->run.exception_vector == 14 &&
1167             self->run.exception_error_code == 4 &&
1168             self->run.exception_addr == self->encl.encl_base + total_size) {
1169                 munmap(addr, PAGE_SIZE);
1170                 SKIP(return, "Kernel does not support adding pages to initialized enclave");
1171         }
1172 
1173         EXPECT_EEXIT(&self->run);
1174         EXPECT_EQ(self->run.exception_vector, 0);
1175         EXPECT_EQ(self->run.exception_error_code, 0);
1176         EXPECT_EQ(self->run.exception_addr, 0);
1177         EXPECT_EQ(eaccept_op.ret, 0);
1178 
1179         /*
1180          * New page should be accessible from within enclave - attempt to
1181          * write to it.
1182          */
1183         put_addr_op.value = MAGIC;
1184         put_addr_op.addr = (unsigned long)addr;
1185         put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
1186 
1187         EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
1188 
1189         EXPECT_EEXIT(&self->run);
1190         EXPECT_EQ(self->run.exception_vector, 0);
1191         EXPECT_EQ(self->run.exception_error_code, 0);
1192         EXPECT_EQ(self->run.exception_addr, 0);
1193 
1194         /*
1195          * Read memory from newly added page that was just written to,
1196          * confirming that data previously written (MAGIC) is present.
1197          */
1198         get_addr_op.value = 0;
1199         get_addr_op.addr = (unsigned long)addr;
1200         get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
1201 
1202         EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1203 
1204         EXPECT_EQ(get_addr_op.value, MAGIC);
1205         EXPECT_EEXIT(&self->run);
1206         EXPECT_EQ(self->run.exception_vector, 0);
1207         EXPECT_EQ(self->run.exception_error_code, 0);
1208         EXPECT_EQ(self->run.exception_addr, 0);
1209 
1210         munmap(addr, PAGE_SIZE);
1211 }
1212 
1213 /*
1214  * SGX2 page type modification test in two phases:
1215  * Phase 1:
1216  * Create a new TCS, consisting out of three new pages (stack page with regular
1217  * page type, SSA page with regular page type, and TCS page with TCS page
1218  * type) in an initialized enclave and run a simple workload within it.
1219  * Phase 2:
1220  * Remove the three pages added in phase 1, add a new regular page at the
1221  * same address that previously hosted the TCS page and verify that it can
1222  * be modified.
1223  */
1224 TEST_F(enclave, tcs_create)
1225 {
1226         struct encl_op_init_tcs_page init_tcs_page_op;
1227         struct sgx_enclave_remove_pages remove_ioc;
1228         struct encl_op_get_from_addr get_addr_op;
1229         struct sgx_enclave_modify_types modt_ioc;
1230         struct encl_op_put_to_addr put_addr_op;
1231         struct encl_op_get_from_buf get_buf_op;
1232         struct encl_op_put_to_buf put_buf_op;
1233         void *addr, *tcs, *stack_end, *ssa;
1234         struct encl_op_eaccept eaccept_op;
1235         size_t total_size = 0;
1236         uint64_t val_64;
1237         int errno_save;
1238         int ret, i;
1239 
1240         ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl,
1241                                     _metadata));
1242 
1243         memset(&self->run, 0, sizeof(self->run));
1244         self->run.tcs = self->encl.encl_base;
1245 
1246         /*
1247          * Hardware (SGX2) and kernel support is needed for this test. Start
1248          * with check that test has a chance of succeeding.
1249          */
1250         memset(&modt_ioc, 0, sizeof(modt_ioc));
1251         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1252 
1253         if (ret == -1) {
1254                 if (errno == ENOTTY)
1255                         SKIP(return,
1256                              "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1257                 else if (errno == ENODEV)
1258                         SKIP(return, "System does not support SGX2");
1259         }
1260 
1261         /*
1262          * Invalid parameters were provided during sanity check,
1263          * expect command to fail.
1264          */
1265         EXPECT_EQ(ret, -1);
1266 
1267         /*
1268          * Add three regular pages via EAUG: one will be the TCS stack, one
1269          * will be the TCS SSA, and one will be the new TCS. The stack and
1270          * SSA will remain as regular pages, the TCS page will need its
1271          * type changed after populated with needed data.
1272          */
1273         for (i = 0; i < self->encl.nr_segments; i++) {
1274                 struct encl_segment *seg = &self->encl.segment_tbl[i];
1275 
1276                 total_size += seg->size;
1277         }
1278 
1279         /*
1280          * Actual enclave size is expected to be larger than the loaded
1281          * test enclave since enclave size must be a power of 2 in bytes while
1282          * test_encl does not consume it all.
1283          */
1284         EXPECT_LT(total_size + 3 * PAGE_SIZE, self->encl.encl_size);
1285 
1286         /*
1287          * mmap() three pages at end of existing enclave to be used for the
1288          * three new pages.
1289          */
1290         addr = mmap((void *)self->encl.encl_base + total_size, 3 * PAGE_SIZE,
1291                     PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED,
1292                     self->encl.fd, 0);
1293         EXPECT_NE(addr, MAP_FAILED);
1294 
1295         self->run.exception_vector = 0;
1296         self->run.exception_error_code = 0;
1297         self->run.exception_addr = 0;
1298 
1299         stack_end = (void *)self->encl.encl_base + total_size;
1300         tcs = (void *)self->encl.encl_base + total_size + PAGE_SIZE;
1301         ssa = (void *)self->encl.encl_base + total_size + 2 * PAGE_SIZE;
1302 
1303         /*
1304          * Run EACCEPT on each new page to trigger the
1305          * EACCEPT->(#PF)->EAUG->EACCEPT(again without a #PF) flow.
1306          */
1307 
1308         eaccept_op.epc_addr = (unsigned long)stack_end;
1309         eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
1310         eaccept_op.ret = 0;
1311         eaccept_op.header.type = ENCL_OP_EACCEPT;
1312 
1313         EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1314 
1315         if (self->run.exception_vector == 14 &&
1316             self->run.exception_error_code == 4 &&
1317             self->run.exception_addr == (unsigned long)stack_end) {
1318                 munmap(addr, 3 * PAGE_SIZE);
1319                 SKIP(return, "Kernel does not support adding pages to initialized enclave");
1320         }
1321 
1322         EXPECT_EEXIT(&self->run);
1323         EXPECT_EQ(self->run.exception_vector, 0);
1324         EXPECT_EQ(self->run.exception_error_code, 0);
1325         EXPECT_EQ(self->run.exception_addr, 0);
1326         EXPECT_EQ(eaccept_op.ret, 0);
1327 
1328         eaccept_op.epc_addr = (unsigned long)ssa;
1329 
1330         EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1331 
1332         EXPECT_EEXIT(&self->run);
1333         EXPECT_EQ(self->run.exception_vector, 0);
1334         EXPECT_EQ(self->run.exception_error_code, 0);
1335         EXPECT_EQ(self->run.exception_addr, 0);
1336         EXPECT_EQ(eaccept_op.ret, 0);
1337 
1338         eaccept_op.epc_addr = (unsigned long)tcs;
1339 
1340         EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1341 
1342         EXPECT_EEXIT(&self->run);
1343         EXPECT_EQ(self->run.exception_vector, 0);
1344         EXPECT_EQ(self->run.exception_error_code, 0);
1345         EXPECT_EQ(self->run.exception_addr, 0);
1346         EXPECT_EQ(eaccept_op.ret, 0);
1347 
1348         /*
1349          * Three new pages added to enclave. Now populate the TCS page with
1350          * needed data. This should be done from within enclave. Provide
1351          * the function that will do the actual data population with needed
1352          * data.
1353          */
1354 
1355         /*
1356          * New TCS will use the "encl_dyn_entry" entrypoint that expects
1357          * stack to begin in page before TCS page.
1358          */
1359         val_64 = encl_get_entry(&self->encl, "encl_dyn_entry");
1360         EXPECT_NE(val_64, 0);
1361 
1362         init_tcs_page_op.tcs_page = (unsigned long)tcs;
1363         init_tcs_page_op.ssa = (unsigned long)total_size + 2 * PAGE_SIZE;
1364         init_tcs_page_op.entry = val_64;
1365         init_tcs_page_op.header.type = ENCL_OP_INIT_TCS_PAGE;
1366 
1367         EXPECT_EQ(ENCL_CALL(&init_tcs_page_op, &self->run, true), 0);
1368 
1369         EXPECT_EEXIT(&self->run);
1370         EXPECT_EQ(self->run.exception_vector, 0);
1371         EXPECT_EQ(self->run.exception_error_code, 0);
1372         EXPECT_EQ(self->run.exception_addr, 0);
1373 
1374         /* Change TCS page type to TCS. */
1375         memset(&modt_ioc, 0, sizeof(modt_ioc));
1376 
1377         modt_ioc.offset = total_size + PAGE_SIZE;
1378         modt_ioc.length = PAGE_SIZE;
1379         modt_ioc.page_type = SGX_PAGE_TYPE_TCS;
1380 
1381         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1382         errno_save = ret == -1 ? errno : 0;
1383 
1384         EXPECT_EQ(ret, 0);
1385         EXPECT_EQ(errno_save, 0);
1386         EXPECT_EQ(modt_ioc.result, 0);
1387         EXPECT_EQ(modt_ioc.count, 4096);
1388 
1389         /* EACCEPT new TCS page from enclave. */
1390         eaccept_op.epc_addr = (unsigned long)tcs;
1391         eaccept_op.flags = SGX_SECINFO_TCS | SGX_SECINFO_MODIFIED;
1392         eaccept_op.ret = 0;
1393         eaccept_op.header.type = ENCL_OP_EACCEPT;
1394 
1395         EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1396 
1397         EXPECT_EEXIT(&self->run);
1398         EXPECT_EQ(self->run.exception_vector, 0);
1399         EXPECT_EQ(self->run.exception_error_code, 0);
1400         EXPECT_EQ(self->run.exception_addr, 0);
1401         EXPECT_EQ(eaccept_op.ret, 0);
1402 
1403         /* Run workload from new TCS. */
1404         self->run.tcs = (unsigned long)tcs;
1405 
1406         /*
1407          * Simple workload to write to data buffer and read value back.
1408          */
1409         put_buf_op.header.type = ENCL_OP_PUT_TO_BUFFER;
1410         put_buf_op.value = MAGIC;
1411 
1412         EXPECT_EQ(ENCL_CALL(&put_buf_op, &self->run, true), 0);
1413 
1414         EXPECT_EEXIT(&self->run);
1415         EXPECT_EQ(self->run.exception_vector, 0);
1416         EXPECT_EQ(self->run.exception_error_code, 0);
1417         EXPECT_EQ(self->run.exception_addr, 0);
1418 
1419         get_buf_op.header.type = ENCL_OP_GET_FROM_BUFFER;
1420         get_buf_op.value = 0;
1421 
1422         EXPECT_EQ(ENCL_CALL(&get_buf_op, &self->run, true), 0);
1423 
1424         EXPECT_EQ(get_buf_op.value, MAGIC);
1425         EXPECT_EEXIT(&self->run);
1426         EXPECT_EQ(self->run.exception_vector, 0);
1427         EXPECT_EQ(self->run.exception_error_code, 0);
1428         EXPECT_EQ(self->run.exception_addr, 0);
1429 
1430         /*
1431          * Phase 2 of test:
1432          * Remove pages associated with new TCS, create a regular page
1433          * where TCS page used to be and verify it can be used as a regular
1434          * page.
1435          */
1436 
1437         /* Start page removal by requesting change of page type to PT_TRIM. */
1438         memset(&modt_ioc, 0, sizeof(modt_ioc));
1439 
1440         modt_ioc.offset = total_size;
1441         modt_ioc.length = 3 * PAGE_SIZE;
1442         modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
1443 
1444         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1445         errno_save = ret == -1 ? errno : 0;
1446 
1447         EXPECT_EQ(ret, 0);
1448         EXPECT_EQ(errno_save, 0);
1449         EXPECT_EQ(modt_ioc.result, 0);
1450         EXPECT_EQ(modt_ioc.count, 3 * PAGE_SIZE);
1451 
1452         /*
1453          * Enter enclave via TCS #1 and approve page removal by sending
1454          * EACCEPT for each of three removed pages.
1455          */
1456         self->run.tcs = self->encl.encl_base;
1457 
1458         eaccept_op.epc_addr = (unsigned long)stack_end;
1459         eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
1460         eaccept_op.ret = 0;
1461         eaccept_op.header.type = ENCL_OP_EACCEPT;
1462 
1463         EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1464 
1465         EXPECT_EEXIT(&self->run);
1466         EXPECT_EQ(self->run.exception_vector, 0);
1467         EXPECT_EQ(self->run.exception_error_code, 0);
1468         EXPECT_EQ(self->run.exception_addr, 0);
1469         EXPECT_EQ(eaccept_op.ret, 0);
1470 
1471         eaccept_op.epc_addr = (unsigned long)tcs;
1472         eaccept_op.ret = 0;
1473 
1474         EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1475 
1476         EXPECT_EEXIT(&self->run);
1477         EXPECT_EQ(self->run.exception_vector, 0);
1478         EXPECT_EQ(self->run.exception_error_code, 0);
1479         EXPECT_EQ(self->run.exception_addr, 0);
1480         EXPECT_EQ(eaccept_op.ret, 0);
1481 
1482         eaccept_op.epc_addr = (unsigned long)ssa;
1483         eaccept_op.ret = 0;
1484 
1485         EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1486 
1487         EXPECT_EEXIT(&self->run);
1488         EXPECT_EQ(self->run.exception_vector, 0);
1489         EXPECT_EQ(self->run.exception_error_code, 0);
1490         EXPECT_EQ(self->run.exception_addr, 0);
1491         EXPECT_EQ(eaccept_op.ret, 0);
1492 
1493         /* Send final ioctl() to complete page removal. */
1494         memset(&remove_ioc, 0, sizeof(remove_ioc));
1495 
1496         remove_ioc.offset = total_size;
1497         remove_ioc.length = 3 * PAGE_SIZE;
1498 
1499         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
1500         errno_save = ret == -1 ? errno : 0;
1501 
1502         EXPECT_EQ(ret, 0);
1503         EXPECT_EQ(errno_save, 0);
1504         EXPECT_EQ(remove_ioc.count, 3 * PAGE_SIZE);
1505 
1506         /*
1507          * Enter enclave via TCS #1 and access location where TCS #3 was to
1508          * trigger dynamic add of regular page at that location.
1509          */
1510         eaccept_op.epc_addr = (unsigned long)tcs;
1511         eaccept_op.flags = SGX_SECINFO_R | SGX_SECINFO_W | SGX_SECINFO_REG | SGX_SECINFO_PENDING;
1512         eaccept_op.ret = 0;
1513         eaccept_op.header.type = ENCL_OP_EACCEPT;
1514 
1515         EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1516 
1517         EXPECT_EEXIT(&self->run);
1518         EXPECT_EQ(self->run.exception_vector, 0);
1519         EXPECT_EQ(self->run.exception_error_code, 0);
1520         EXPECT_EQ(self->run.exception_addr, 0);
1521         EXPECT_EQ(eaccept_op.ret, 0);
1522 
1523         /*
1524          * New page should be accessible from within enclave - write to it.
1525          */
1526         put_addr_op.value = MAGIC;
1527         put_addr_op.addr = (unsigned long)tcs;
1528         put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
1529 
1530         EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
1531 
1532         EXPECT_EEXIT(&self->run);
1533         EXPECT_EQ(self->run.exception_vector, 0);
1534         EXPECT_EQ(self->run.exception_error_code, 0);
1535         EXPECT_EQ(self->run.exception_addr, 0);
1536 
1537         /*
1538          * Read memory from newly added page that was just written to,
1539          * confirming that data previously written (MAGIC) is present.
1540          */
1541         get_addr_op.value = 0;
1542         get_addr_op.addr = (unsigned long)tcs;
1543         get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
1544 
1545         EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1546 
1547         EXPECT_EQ(get_addr_op.value, MAGIC);
1548         EXPECT_EEXIT(&self->run);
1549         EXPECT_EQ(self->run.exception_vector, 0);
1550         EXPECT_EQ(self->run.exception_error_code, 0);
1551         EXPECT_EQ(self->run.exception_addr, 0);
1552 
1553         munmap(addr, 3 * PAGE_SIZE);
1554 }
1555 
1556 /*
1557  * Ensure sane behavior if user requests page removal, does not run
1558  * EACCEPT from within enclave but still attempts to finalize page removal
1559  * with the SGX_IOC_ENCLAVE_REMOVE_PAGES ioctl(). The latter should fail
1560  * because the removal was not EACCEPTed from within the enclave.
1561  */
1562 TEST_F(enclave, remove_added_page_no_eaccept)
1563 {
1564         struct sgx_enclave_remove_pages remove_ioc;
1565         struct encl_op_get_from_addr get_addr_op;
1566         struct sgx_enclave_modify_types modt_ioc;
1567         struct encl_op_put_to_addr put_addr_op;
1568         unsigned long data_start;
1569         int ret, errno_save;
1570 
1571         ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
1572 
1573         memset(&self->run, 0, sizeof(self->run));
1574         self->run.tcs = self->encl.encl_base;
1575 
1576         /*
1577          * Hardware (SGX2) and kernel support is needed for this test. Start
1578          * with check that test has a chance of succeeding.
1579          */
1580         memset(&modt_ioc, 0, sizeof(modt_ioc));
1581         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1582 
1583         if (ret == -1) {
1584                 if (errno == ENOTTY)
1585                         SKIP(return,
1586                              "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1587                 else if (errno == ENODEV)
1588                         SKIP(return, "System does not support SGX2");
1589         }
1590 
1591         /*
1592          * Invalid parameters were provided during sanity check,
1593          * expect command to fail.
1594          */
1595         EXPECT_EQ(ret, -1);
1596 
1597         /*
1598          * Page that will be removed is the second data page in the .data
1599          * segment. This forms part of the local encl_buffer within the
1600          * enclave.
1601          */
1602         data_start = self->encl.encl_base +
1603                      encl_get_data_offset(&self->encl) + PAGE_SIZE;
1604 
1605         /*
1606          * Sanity check that page at @data_start is writable before
1607          * removing it.
1608          *
1609          * Start by writing MAGIC to test page.
1610          */
1611         put_addr_op.value = MAGIC;
1612         put_addr_op.addr = data_start;
1613         put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
1614 
1615         EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
1616 
1617         EXPECT_EEXIT(&self->run);
1618         EXPECT_EQ(self->run.exception_vector, 0);
1619         EXPECT_EQ(self->run.exception_error_code, 0);
1620         EXPECT_EQ(self->run.exception_addr, 0);
1621 
1622         /*
1623          * Read memory that was just written to, confirming that data
1624          * previously written (MAGIC) is present.
1625          */
1626         get_addr_op.value = 0;
1627         get_addr_op.addr = data_start;
1628         get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
1629 
1630         EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1631 
1632         EXPECT_EQ(get_addr_op.value, MAGIC);
1633         EXPECT_EEXIT(&self->run);
1634         EXPECT_EQ(self->run.exception_vector, 0);
1635         EXPECT_EQ(self->run.exception_error_code, 0);
1636         EXPECT_EQ(self->run.exception_addr, 0);
1637 
1638         /* Start page removal by requesting change of page type to PT_TRIM */
1639         memset(&modt_ioc, 0, sizeof(modt_ioc));
1640 
1641         modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
1642         modt_ioc.length = PAGE_SIZE;
1643         modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
1644 
1645         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1646         errno_save = ret == -1 ? errno : 0;
1647 
1648         EXPECT_EQ(ret, 0);
1649         EXPECT_EQ(errno_save, 0);
1650         EXPECT_EQ(modt_ioc.result, 0);
1651         EXPECT_EQ(modt_ioc.count, 4096);
1652 
1653         /* Skip EACCEPT */
1654 
1655         /* Send final ioctl() to complete page removal */
1656         memset(&remove_ioc, 0, sizeof(remove_ioc));
1657 
1658         remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
1659         remove_ioc.length = PAGE_SIZE;
1660 
1661         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
1662         errno_save = ret == -1 ? errno : 0;
1663 
1664         /* Operation not permitted since EACCEPT was omitted. */
1665         EXPECT_EQ(ret, -1);
1666         EXPECT_EQ(errno_save, EPERM);
1667         EXPECT_EQ(remove_ioc.count, 0);
1668 }
1669 
1670 /*
1671  * Request enclave page removal but instead of correctly following with
1672  * EACCEPT a read attempt to page is made from within the enclave.
1673  */
1674 TEST_F(enclave, remove_added_page_invalid_access)
1675 {
1676         struct encl_op_get_from_addr get_addr_op;
1677         struct encl_op_put_to_addr put_addr_op;
1678         struct sgx_enclave_modify_types ioc;
1679         unsigned long data_start;
1680         int ret, errno_save;
1681 
1682         ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
1683 
1684         memset(&self->run, 0, sizeof(self->run));
1685         self->run.tcs = self->encl.encl_base;
1686 
1687         /*
1688          * Hardware (SGX2) and kernel support is needed for this test. Start
1689          * with check that test has a chance of succeeding.
1690          */
1691         memset(&ioc, 0, sizeof(ioc));
1692         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
1693 
1694         if (ret == -1) {
1695                 if (errno == ENOTTY)
1696                         SKIP(return,
1697                              "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1698                 else if (errno == ENODEV)
1699                         SKIP(return, "System does not support SGX2");
1700         }
1701 
1702         /*
1703          * Invalid parameters were provided during sanity check,
1704          * expect command to fail.
1705          */
1706         EXPECT_EQ(ret, -1);
1707 
1708         /*
1709          * Page that will be removed is the second data page in the .data
1710          * segment. This forms part of the local encl_buffer within the
1711          * enclave.
1712          */
1713         data_start = self->encl.encl_base +
1714                      encl_get_data_offset(&self->encl) + PAGE_SIZE;
1715 
1716         /*
1717          * Sanity check that page at @data_start is writable before
1718          * removing it.
1719          *
1720          * Start by writing MAGIC to test page.
1721          */
1722         put_addr_op.value = MAGIC;
1723         put_addr_op.addr = data_start;
1724         put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
1725 
1726         EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
1727 
1728         EXPECT_EEXIT(&self->run);
1729         EXPECT_EQ(self->run.exception_vector, 0);
1730         EXPECT_EQ(self->run.exception_error_code, 0);
1731         EXPECT_EQ(self->run.exception_addr, 0);
1732 
1733         /*
1734          * Read memory that was just written to, confirming that data
1735          * previously written (MAGIC) is present.
1736          */
1737         get_addr_op.value = 0;
1738         get_addr_op.addr = data_start;
1739         get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
1740 
1741         EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1742 
1743         EXPECT_EQ(get_addr_op.value, MAGIC);
1744         EXPECT_EEXIT(&self->run);
1745         EXPECT_EQ(self->run.exception_vector, 0);
1746         EXPECT_EQ(self->run.exception_error_code, 0);
1747         EXPECT_EQ(self->run.exception_addr, 0);
1748 
1749         /* Start page removal by requesting change of page type to PT_TRIM. */
1750         memset(&ioc, 0, sizeof(ioc));
1751 
1752         ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
1753         ioc.length = PAGE_SIZE;
1754         ioc.page_type = SGX_PAGE_TYPE_TRIM;
1755 
1756         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
1757         errno_save = ret == -1 ? errno : 0;
1758 
1759         EXPECT_EQ(ret, 0);
1760         EXPECT_EQ(errno_save, 0);
1761         EXPECT_EQ(ioc.result, 0);
1762         EXPECT_EQ(ioc.count, 4096);
1763 
1764         /*
1765          * Read from page that was just removed.
1766          */
1767         get_addr_op.value = 0;
1768 
1769         EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1770 
1771         /*
1772          * From kernel perspective the page is present but according to SGX the
1773          * page should not be accessible so a #PF with SGX bit set is
1774          * expected.
1775          */
1776 
1777         EXPECT_EQ(self->run.function, ERESUME);
1778         EXPECT_EQ(self->run.exception_vector, 14);
1779         EXPECT_EQ(self->run.exception_error_code, 0x8005);
1780         EXPECT_EQ(self->run.exception_addr, data_start);
1781 }
1782 
1783 /*
1784  * Request enclave page removal and correctly follow with
1785  * EACCEPT but do not follow with removal ioctl() but instead a read attempt
1786  * to removed page is made from within the enclave.
1787  */
1788 TEST_F(enclave, remove_added_page_invalid_access_after_eaccept)
1789 {
1790         struct encl_op_get_from_addr get_addr_op;
1791         struct encl_op_put_to_addr put_addr_op;
1792         struct sgx_enclave_modify_types ioc;
1793         struct encl_op_eaccept eaccept_op;
1794         unsigned long data_start;
1795         int ret, errno_save;
1796 
1797         ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
1798 
1799         memset(&self->run, 0, sizeof(self->run));
1800         self->run.tcs = self->encl.encl_base;
1801 
1802         /*
1803          * Hardware (SGX2) and kernel support is needed for this test. Start
1804          * with check that test has a chance of succeeding.
1805          */
1806         memset(&ioc, 0, sizeof(ioc));
1807         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
1808 
1809         if (ret == -1) {
1810                 if (errno == ENOTTY)
1811                         SKIP(return,
1812                              "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1813                 else if (errno == ENODEV)
1814                         SKIP(return, "System does not support SGX2");
1815         }
1816 
1817         /*
1818          * Invalid parameters were provided during sanity check,
1819          * expect command to fail.
1820          */
1821         EXPECT_EQ(ret, -1);
1822 
1823         /*
1824          * Page that will be removed is the second data page in the .data
1825          * segment. This forms part of the local encl_buffer within the
1826          * enclave.
1827          */
1828         data_start = self->encl.encl_base +
1829                      encl_get_data_offset(&self->encl) + PAGE_SIZE;
1830 
1831         /*
1832          * Sanity check that page at @data_start is writable before
1833          * removing it.
1834          *
1835          * Start by writing MAGIC to test page.
1836          */
1837         put_addr_op.value = MAGIC;
1838         put_addr_op.addr = data_start;
1839         put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
1840 
1841         EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
1842 
1843         EXPECT_EEXIT(&self->run);
1844         EXPECT_EQ(self->run.exception_vector, 0);
1845         EXPECT_EQ(self->run.exception_error_code, 0);
1846         EXPECT_EQ(self->run.exception_addr, 0);
1847 
1848         /*
1849          * Read memory that was just written to, confirming that data
1850          * previously written (MAGIC) is present.
1851          */
1852         get_addr_op.value = 0;
1853         get_addr_op.addr = data_start;
1854         get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
1855 
1856         EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1857 
1858         EXPECT_EQ(get_addr_op.value, MAGIC);
1859         EXPECT_EEXIT(&self->run);
1860         EXPECT_EQ(self->run.exception_vector, 0);
1861         EXPECT_EQ(self->run.exception_error_code, 0);
1862         EXPECT_EQ(self->run.exception_addr, 0);
1863 
1864         /* Start page removal by requesting change of page type to PT_TRIM. */
1865         memset(&ioc, 0, sizeof(ioc));
1866 
1867         ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
1868         ioc.length = PAGE_SIZE;
1869         ioc.page_type = SGX_PAGE_TYPE_TRIM;
1870 
1871         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &ioc);
1872         errno_save = ret == -1 ? errno : 0;
1873 
1874         EXPECT_EQ(ret, 0);
1875         EXPECT_EQ(errno_save, 0);
1876         EXPECT_EQ(ioc.result, 0);
1877         EXPECT_EQ(ioc.count, 4096);
1878 
1879         eaccept_op.epc_addr = (unsigned long)data_start;
1880         eaccept_op.ret = 0;
1881         eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
1882         eaccept_op.header.type = ENCL_OP_EACCEPT;
1883 
1884         EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1885 
1886         EXPECT_EEXIT(&self->run);
1887         EXPECT_EQ(self->run.exception_vector, 0);
1888         EXPECT_EQ(self->run.exception_error_code, 0);
1889         EXPECT_EQ(self->run.exception_addr, 0);
1890         EXPECT_EQ(eaccept_op.ret, 0);
1891 
1892         /* Skip ioctl() to remove page. */
1893 
1894         /*
1895          * Read from page that was just removed.
1896          */
1897         get_addr_op.value = 0;
1898 
1899         EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
1900 
1901         /*
1902          * From kernel perspective the page is present but according to SGX the
1903          * page should not be accessible so a #PF with SGX bit set is
1904          * expected.
1905          */
1906 
1907         EXPECT_EQ(self->run.function, ERESUME);
1908         EXPECT_EQ(self->run.exception_vector, 14);
1909         EXPECT_EQ(self->run.exception_error_code, 0x8005);
1910         EXPECT_EQ(self->run.exception_addr, data_start);
1911 }
1912 
1913 TEST_F(enclave, remove_untouched_page)
1914 {
1915         struct sgx_enclave_remove_pages remove_ioc;
1916         struct sgx_enclave_modify_types modt_ioc;
1917         struct encl_op_eaccept eaccept_op;
1918         unsigned long data_start;
1919         int ret, errno_save;
1920 
1921         ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
1922 
1923         /*
1924          * Hardware (SGX2) and kernel support is needed for this test. Start
1925          * with check that test has a chance of succeeding.
1926          */
1927         memset(&modt_ioc, 0, sizeof(modt_ioc));
1928         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1929 
1930         if (ret == -1) {
1931                 if (errno == ENOTTY)
1932                         SKIP(return,
1933                              "Kernel does not support SGX_IOC_ENCLAVE_MODIFY_TYPES ioctl()");
1934                 else if (errno == ENODEV)
1935                         SKIP(return, "System does not support SGX2");
1936         }
1937 
1938         /*
1939          * Invalid parameters were provided during sanity check,
1940          * expect command to fail.
1941          */
1942         EXPECT_EQ(ret, -1);
1943 
1944         /* SGX2 is supported by kernel and hardware, test can proceed. */
1945         memset(&self->run, 0, sizeof(self->run));
1946         self->run.tcs = self->encl.encl_base;
1947 
1948         data_start = self->encl.encl_base +
1949                          encl_get_data_offset(&self->encl) + PAGE_SIZE;
1950 
1951         memset(&modt_ioc, 0, sizeof(modt_ioc));
1952 
1953         modt_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
1954         modt_ioc.length = PAGE_SIZE;
1955         modt_ioc.page_type = SGX_PAGE_TYPE_TRIM;
1956         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_MODIFY_TYPES, &modt_ioc);
1957         errno_save = ret == -1 ? errno : 0;
1958 
1959         EXPECT_EQ(ret, 0);
1960         EXPECT_EQ(errno_save, 0);
1961         EXPECT_EQ(modt_ioc.result, 0);
1962         EXPECT_EQ(modt_ioc.count, 4096);
1963 
1964         /*
1965          * Enter enclave via TCS #1 and approve page removal by sending
1966          * EACCEPT for removed page.
1967          */
1968 
1969         eaccept_op.epc_addr = data_start;
1970         eaccept_op.flags = SGX_SECINFO_TRIM | SGX_SECINFO_MODIFIED;
1971         eaccept_op.ret = 0;
1972         eaccept_op.header.type = ENCL_OP_EACCEPT;
1973 
1974         EXPECT_EQ(ENCL_CALL(&eaccept_op, &self->run, true), 0);
1975         EXPECT_EEXIT(&self->run);
1976         EXPECT_EQ(self->run.exception_vector, 0);
1977         EXPECT_EQ(self->run.exception_error_code, 0);
1978         EXPECT_EQ(self->run.exception_addr, 0);
1979         EXPECT_EQ(eaccept_op.ret, 0);
1980 
1981         memset(&remove_ioc, 0, sizeof(remove_ioc));
1982 
1983         remove_ioc.offset = encl_get_data_offset(&self->encl) + PAGE_SIZE;
1984         remove_ioc.length = PAGE_SIZE;
1985         ret = ioctl(self->encl.fd, SGX_IOC_ENCLAVE_REMOVE_PAGES, &remove_ioc);
1986         errno_save = ret == -1 ? errno : 0;
1987 
1988         EXPECT_EQ(ret, 0);
1989         EXPECT_EQ(errno_save, 0);
1990         EXPECT_EQ(remove_ioc.count, 4096);
1991 }
1992 
1993 TEST_HARNESS_MAIN
1994 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php