~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/kvm/s390x/tprot.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * Test TEST PROTECTION emulation.
  4  *
  5  * Copyright IBM Corp. 2021
  6  */
  7 #include <sys/mman.h>
  8 #include "test_util.h"
  9 #include "kvm_util.h"
 10 #include "kselftest.h"
 11 #include "ucall_common.h"
 12 
 13 #define PAGE_SHIFT 12
 14 #define PAGE_SIZE (1 << PAGE_SHIFT)
 15 #define CR0_FETCH_PROTECTION_OVERRIDE   (1UL << (63 - 38))
 16 #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39))
 17 
 18 static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE];
 19 static uint8_t *const page_store_prot = pages[0];
 20 static uint8_t *const page_fetch_prot = pages[1];
 21 
 22 /* Nonzero return value indicates that address not mapped */
 23 static int set_storage_key(void *addr, uint8_t key)
 24 {
 25         int not_mapped = 0;
 26 
 27         asm volatile (
 28                        "lra     %[addr], 0(0,%[addr])\n"
 29                 "       jz      0f\n"
 30                 "       llill   %[not_mapped],1\n"
 31                 "       j       1f\n"
 32                 "0:     sske    %[key], %[addr]\n"
 33                 "1:"
 34                 : [addr] "+&a" (addr), [not_mapped] "+r" (not_mapped)
 35                 : [key] "r" (key)
 36                 : "cc"
 37         );
 38         return -not_mapped;
 39 }
 40 
 41 enum permission {
 42         READ_WRITE = 0,
 43         READ = 1,
 44         RW_PROTECTED = 2,
 45         TRANSL_UNAVAIL = 3,
 46 };
 47 
 48 static enum permission test_protection(void *addr, uint8_t key)
 49 {
 50         uint64_t mask;
 51 
 52         asm volatile (
 53                        "tprot   %[addr], 0(%[key])\n"
 54                 "       ipm     %[mask]\n"
 55                 : [mask] "=r" (mask)
 56                 : [addr] "Q" (*(char *)addr),
 57                   [key] "a" (key)
 58                 : "cc"
 59         );
 60 
 61         return (enum permission)(mask >> 28);
 62 }
 63 
 64 enum stage {
 65         STAGE_INIT_SIMPLE,
 66         TEST_SIMPLE,
 67         STAGE_INIT_FETCH_PROT_OVERRIDE,
 68         TEST_FETCH_PROT_OVERRIDE,
 69         TEST_STORAGE_PROT_OVERRIDE,
 70         STAGE_END       /* must be the last entry (it's the amount of tests) */
 71 };
 72 
 73 struct test {
 74         enum stage stage;
 75         void *addr;
 76         uint8_t key;
 77         enum permission expected;
 78 } tests[] = {
 79         /*
 80          * We perform each test in the array by executing TEST PROTECTION on
 81          * the specified addr with the specified key and checking if the returned
 82          * permissions match the expected value.
 83          * Both guest and host cooperate to set up the required test conditions.
 84          * A central condition is that the page targeted by addr has to be DAT
 85          * protected in the host mappings, in order for KVM to emulate the
 86          * TEST PROTECTION instruction.
 87          * Since the page tables are shared, the host uses mprotect to achieve
 88          * this.
 89          *
 90          * Test resulting in RW_PROTECTED/TRANSL_UNAVAIL will be interpreted
 91          * by SIE, not KVM, but there is no harm in testing them also.
 92          * See Enhanced Suppression-on-Protection Facilities in the
 93          * Interpretive-Execution Mode
 94          */
 95         /*
 96          * guest: set storage key of page_store_prot to 1
 97          *        storage key of page_fetch_prot to 9 and enable
 98          *        protection for it
 99          * STAGE_INIT_SIMPLE
100          * host: write protect both via mprotect
101          */
102         /* access key 0 matches any storage key -> RW */
103         { TEST_SIMPLE, page_store_prot, 0x00, READ_WRITE },
104         /* access key matches storage key -> RW */
105         { TEST_SIMPLE, page_store_prot, 0x10, READ_WRITE },
106         /* mismatched keys, but no fetch protection -> RO */
107         { TEST_SIMPLE, page_store_prot, 0x20, READ },
108         /* access key 0 matches any storage key -> RW */
109         { TEST_SIMPLE, page_fetch_prot, 0x00, READ_WRITE },
110         /* access key matches storage key -> RW */
111         { TEST_SIMPLE, page_fetch_prot, 0x90, READ_WRITE },
112         /* mismatched keys, fetch protection -> inaccessible */
113         { TEST_SIMPLE, page_fetch_prot, 0x10, RW_PROTECTED },
114         /* page 0 not mapped yet -> translation not available */
115         { TEST_SIMPLE, (void *)0x00, 0x10, TRANSL_UNAVAIL },
116         /*
117          * host: try to map page 0
118          * guest: set storage key of page 0 to 9 and enable fetch protection
119          * STAGE_INIT_FETCH_PROT_OVERRIDE
120          * host: write protect page 0
121          *       enable fetch protection override
122          */
123         /* mismatched keys, fetch protection, but override applies -> RO */
124         { TEST_FETCH_PROT_OVERRIDE, (void *)0x00, 0x10, READ },
125         /* mismatched keys, fetch protection, override applies to 0-2048 only -> inaccessible */
126         { TEST_FETCH_PROT_OVERRIDE, (void *)2049, 0x10, RW_PROTECTED },
127         /*
128          * host: enable storage protection override
129          */
130         /* mismatched keys, but override applies (storage key 9) -> RW */
131         { TEST_STORAGE_PROT_OVERRIDE, page_fetch_prot, 0x10, READ_WRITE },
132         /* mismatched keys, no fetch protection, override doesn't apply -> RO */
133         { TEST_STORAGE_PROT_OVERRIDE, page_store_prot, 0x20, READ },
134         /* mismatched keys, but override applies (storage key 9) -> RW */
135         { TEST_STORAGE_PROT_OVERRIDE, (void *)2049, 0x10, READ_WRITE },
136         /* end marker */
137         { STAGE_END, 0, 0, 0 },
138 };
139 
140 static enum stage perform_next_stage(int *i, bool mapped_0)
141 {
142         enum stage stage = tests[*i].stage;
143         enum permission result;
144         bool skip;
145 
146         for (; tests[*i].stage == stage; (*i)++) {
147                 /*
148                  * Some fetch protection override tests require that page 0
149                  * be mapped, however, when the hosts tries to map that page via
150                  * vm_vaddr_alloc, it may happen that some other page gets mapped
151                  * instead.
152                  * In order to skip these tests we detect this inside the guest
153                  */
154                 skip = tests[*i].addr < (void *)4096 &&
155                        tests[*i].expected != TRANSL_UNAVAIL &&
156                        !mapped_0;
157                 if (!skip) {
158                         result = test_protection(tests[*i].addr, tests[*i].key);
159                         __GUEST_ASSERT(result == tests[*i].expected,
160                                        "Wanted %u, got %u, for i = %u",
161                                        tests[*i].expected, result, *i);
162                 }
163         }
164         return stage;
165 }
166 
167 static void guest_code(void)
168 {
169         bool mapped_0;
170         int i = 0;
171 
172         GUEST_ASSERT_EQ(set_storage_key(page_store_prot, 0x10), 0);
173         GUEST_ASSERT_EQ(set_storage_key(page_fetch_prot, 0x98), 0);
174         GUEST_SYNC(STAGE_INIT_SIMPLE);
175         GUEST_SYNC(perform_next_stage(&i, false));
176 
177         /* Fetch-protection override */
178         mapped_0 = !set_storage_key((void *)0, 0x98);
179         GUEST_SYNC(STAGE_INIT_FETCH_PROT_OVERRIDE);
180         GUEST_SYNC(perform_next_stage(&i, mapped_0));
181 
182         /* Storage-protection override */
183         GUEST_SYNC(perform_next_stage(&i, mapped_0));
184 }
185 
186 #define HOST_SYNC_NO_TAP(vcpup, stage)                          \
187 ({                                                              \
188         struct kvm_vcpu *__vcpu = (vcpup);                      \
189         struct ucall uc;                                        \
190         int __stage = (stage);                                  \
191                                                                 \
192         vcpu_run(__vcpu);                                       \
193         get_ucall(__vcpu, &uc);                                 \
194         if (uc.cmd == UCALL_ABORT)                              \
195                 REPORT_GUEST_ASSERT(uc);                        \
196         TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC);                     \
197         TEST_ASSERT_EQ(uc.args[1], __stage);                    \
198 })
199 
200 #define HOST_SYNC(vcpu, stage)                  \
201 ({                                              \
202         HOST_SYNC_NO_TAP(vcpu, stage);          \
203         ksft_test_result_pass("" #stage "\n");  \
204 })
205 
206 int main(int argc, char *argv[])
207 {
208         struct kvm_vcpu *vcpu;
209         struct kvm_vm *vm;
210         struct kvm_run *run;
211         vm_vaddr_t guest_0_page;
212 
213         ksft_print_header();
214         ksft_set_plan(STAGE_END);
215 
216         vm = vm_create_with_one_vcpu(&vcpu, guest_code);
217         run = vcpu->run;
218 
219         HOST_SYNC(vcpu, STAGE_INIT_SIMPLE);
220         mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ);
221         HOST_SYNC(vcpu, TEST_SIMPLE);
222 
223         guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0);
224         if (guest_0_page != 0) {
225                 /* Use NO_TAP so we don't get a PASS print */
226                 HOST_SYNC_NO_TAP(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE);
227                 ksft_test_result_skip("STAGE_INIT_FETCH_PROT_OVERRIDE - "
228                                       "Did not allocate page at 0\n");
229         } else {
230                 HOST_SYNC(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE);
231         }
232         if (guest_0_page == 0)
233                 mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ);
234         run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
235         run->kvm_dirty_regs = KVM_SYNC_CRS;
236         HOST_SYNC(vcpu, TEST_FETCH_PROT_OVERRIDE);
237 
238         run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
239         run->kvm_dirty_regs = KVM_SYNC_CRS;
240         HOST_SYNC(vcpu, TEST_STORAGE_PROT_OVERRIDE);
241 
242         kvm_vm_free(vm);
243 
244         ksft_finished();        /* Print results and exit() accordingly */
245 }
246 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php