~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 #include "test_util.h"
  3 #include "kvm_util.h"
  4 #include "processor.h"
  5 #include "vmx.h"
  6 #include "svm_util.h"
  7 
  8 #define L2_GUEST_STACK_SIZE 256
  9 
 10 /*
 11  * Arbitrary, never shoved into KVM/hardware, just need to avoid conflict with
 12  * the "real" exceptions used, #SS/#GP/#DF (12/13/8).
 13  */
 14 #define FAKE_TRIPLE_FAULT_VECTOR        0xaa
 15 
 16 /* Arbitrary 32-bit error code injected by this test. */
 17 #define SS_ERROR_CODE 0xdeadbeef
 18 
 19 /*
 20  * Bit '' is set on Intel if the exception occurs while delivering a previous
 21  * event/exception.  AMD's wording is ambiguous, but presumably the bit is set
 22  * if the exception occurs while delivering an external event, e.g. NMI or INTR,
 23  * but not for exceptions that occur when delivering other exceptions or
 24  * software interrupts.
 25  *
 26  * Note, Intel's name for it, "External event", is misleading and much more
 27  * aligned with AMD's behavior, but the SDM is quite clear on its behavior.
 28  */
 29 #define ERROR_CODE_EXT_FLAG     BIT(0)
 30 
 31 /*
 32  * Bit '1' is set if the fault occurred when looking up a descriptor in the
 33  * IDT, which is the case here as the IDT is empty/NULL.
 34  */
 35 #define ERROR_CODE_IDT_FLAG     BIT(1)
 36 
 37 /*
 38  * The #GP that occurs when vectoring #SS should show the index into the IDT
 39  * for #SS, plus have the "IDT flag" set.
 40  */
 41 #define GP_ERROR_CODE_AMD ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG)
 42 #define GP_ERROR_CODE_INTEL ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG | ERROR_CODE_EXT_FLAG)
 43 
 44 /*
 45  * Intel and AMD both shove '' into the error code on #DF, regardless of what
 46  * led to the double fault.
 47  */
 48 #define DF_ERROR_CODE 0
 49 
 50 #define INTERCEPT_SS            (BIT_ULL(SS_VECTOR))
 51 #define INTERCEPT_SS_DF         (INTERCEPT_SS | BIT_ULL(DF_VECTOR))
 52 #define INTERCEPT_SS_GP_DF      (INTERCEPT_SS_DF | BIT_ULL(GP_VECTOR))
 53 
 54 static void l2_ss_pending_test(void)
 55 {
 56         GUEST_SYNC(SS_VECTOR);
 57 }
 58 
 59 static void l2_ss_injected_gp_test(void)
 60 {
 61         GUEST_SYNC(GP_VECTOR);
 62 }
 63 
 64 static void l2_ss_injected_df_test(void)
 65 {
 66         GUEST_SYNC(DF_VECTOR);
 67 }
 68 
 69 static void l2_ss_injected_tf_test(void)
 70 {
 71         GUEST_SYNC(FAKE_TRIPLE_FAULT_VECTOR);
 72 }
 73 
 74 static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector,
 75                        uint32_t error_code)
 76 {
 77         struct vmcb *vmcb = svm->vmcb;
 78         struct vmcb_control_area *ctrl = &vmcb->control;
 79 
 80         vmcb->save.rip = (u64)l2_code;
 81         run_guest(vmcb, svm->vmcb_gpa);
 82 
 83         if (vector == FAKE_TRIPLE_FAULT_VECTOR)
 84                 return;
 85 
 86         GUEST_ASSERT_EQ(ctrl->exit_code, (SVM_EXIT_EXCP_BASE + vector));
 87         GUEST_ASSERT_EQ(ctrl->exit_info_1, error_code);
 88 }
 89 
 90 static void l1_svm_code(struct svm_test_data *svm)
 91 {
 92         struct vmcb_control_area *ctrl = &svm->vmcb->control;
 93         unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
 94 
 95         generic_svm_setup(svm, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
 96         svm->vmcb->save.idtr.limit = 0;
 97         ctrl->intercept |= BIT_ULL(INTERCEPT_SHUTDOWN);
 98 
 99         ctrl->intercept_exceptions = INTERCEPT_SS_GP_DF;
100         svm_run_l2(svm, l2_ss_pending_test, SS_VECTOR, SS_ERROR_CODE);
101         svm_run_l2(svm, l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_AMD);
102 
103         ctrl->intercept_exceptions = INTERCEPT_SS_DF;
104         svm_run_l2(svm, l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE);
105 
106         ctrl->intercept_exceptions = INTERCEPT_SS;
107         svm_run_l2(svm, l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0);
108         GUEST_ASSERT_EQ(ctrl->exit_code, SVM_EXIT_SHUTDOWN);
109 
110         GUEST_DONE();
111 }
112 
113 static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code)
114 {
115         GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_code));
116 
117         GUEST_ASSERT_EQ(vector == SS_VECTOR ? vmlaunch() : vmresume(), 0);
118 
119         if (vector == FAKE_TRIPLE_FAULT_VECTOR)
120                 return;
121 
122         GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_EXCEPTION_NMI);
123         GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO) & 0xff), vector);
124         GUEST_ASSERT_EQ(vmreadz(VM_EXIT_INTR_ERROR_CODE), error_code);
125 }
126 
127 static void l1_vmx_code(struct vmx_pages *vmx)
128 {
129         unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
130 
131         GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx), true);
132 
133         GUEST_ASSERT_EQ(load_vmcs(vmx), true);
134 
135         prepare_vmcs(vmx, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
136         GUEST_ASSERT_EQ(vmwrite(GUEST_IDTR_LIMIT, 0), 0);
137 
138         /*
139          * VMX disallows injecting an exception with error_code[31:16] != 0,
140          * and hardware will never generate a VM-Exit with bits 31:16 set.
141          * KVM should likewise truncate the "bad" userspace value.
142          */
143         GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_GP_DF), 0);
144         vmx_run_l2(l2_ss_pending_test, SS_VECTOR, (u16)SS_ERROR_CODE);
145         vmx_run_l2(l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_INTEL);
146 
147         GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_DF), 0);
148         vmx_run_l2(l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE);
149 
150         GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS), 0);
151         vmx_run_l2(l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0);
152         GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_TRIPLE_FAULT);
153 
154         GUEST_DONE();
155 }
156 
157 static void __attribute__((__flatten__)) l1_guest_code(void *test_data)
158 {
159         if (this_cpu_has(X86_FEATURE_SVM))
160                 l1_svm_code(test_data);
161         else
162                 l1_vmx_code(test_data);
163 }
164 
165 static void assert_ucall_vector(struct kvm_vcpu *vcpu, int vector)
166 {
167         struct ucall uc;
168 
169         TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
170 
171         switch (get_ucall(vcpu, &uc)) {
172         case UCALL_SYNC:
173                 TEST_ASSERT(vector == uc.args[1],
174                             "Expected L2 to ask for %d, got %ld", vector, uc.args[1]);
175                 break;
176         case UCALL_DONE:
177                 TEST_ASSERT(vector == -1,
178                             "Expected L2 to ask for %d, L2 says it's done", vector);
179                 break;
180         case UCALL_ABORT:
181                 REPORT_GUEST_ASSERT(uc);
182                 break;
183         default:
184                 TEST_FAIL("Expected L2 to ask for %d, got unexpected ucall %lu", vector, uc.cmd);
185         }
186 }
187 
188 static void queue_ss_exception(struct kvm_vcpu *vcpu, bool inject)
189 {
190         struct kvm_vcpu_events events;
191 
192         vcpu_events_get(vcpu, &events);
193 
194         TEST_ASSERT(!events.exception.pending,
195                     "Vector %d unexpectedlt pending", events.exception.nr);
196         TEST_ASSERT(!events.exception.injected,
197                     "Vector %d unexpectedly injected", events.exception.nr);
198 
199         events.flags = KVM_VCPUEVENT_VALID_PAYLOAD;
200         events.exception.pending = !inject;
201         events.exception.injected = inject;
202         events.exception.nr = SS_VECTOR;
203         events.exception.has_error_code = true;
204         events.exception.error_code = SS_ERROR_CODE;
205         vcpu_events_set(vcpu, &events);
206 }
207 
208 /*
209  * Verify KVM_{G,S}ET_EVENTS play nice with pending vs. injected exceptions
210  * when an exception is being queued for L2.  Specifically, verify that KVM
211  * honors L1 exception intercept controls when a #SS is pending/injected,
212  * triggers a #GP on vectoring the #SS, morphs to #DF if #GP isn't intercepted
213  * by L1, and finally causes (nested) SHUTDOWN if #DF isn't intercepted by L1.
214  */
215 int main(int argc, char *argv[])
216 {
217         vm_vaddr_t nested_test_data_gva;
218         struct kvm_vcpu_events events;
219         struct kvm_vcpu *vcpu;
220         struct kvm_vm *vm;
221 
222         TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXCEPTION_PAYLOAD));
223         TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));
224 
225         vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
226         vm_enable_cap(vm, KVM_CAP_EXCEPTION_PAYLOAD, -2ul);
227 
228         if (kvm_cpu_has(X86_FEATURE_SVM))
229                 vcpu_alloc_svm(vm, &nested_test_data_gva);
230         else
231                 vcpu_alloc_vmx(vm, &nested_test_data_gva);
232 
233         vcpu_args_set(vcpu, 1, nested_test_data_gva);
234 
235         /* Run L1 => L2.  L2 should sync and request #SS. */
236         vcpu_run(vcpu);
237         assert_ucall_vector(vcpu, SS_VECTOR);
238 
239         /* Pend #SS and request immediate exit.  #SS should still be pending. */
240         queue_ss_exception(vcpu, false);
241         vcpu->run->immediate_exit = true;
242         vcpu_run_complete_io(vcpu);
243 
244         /* Verify the pending events comes back out the same as it went in. */
245         vcpu_events_get(vcpu, &events);
246         TEST_ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD,
247                         KVM_VCPUEVENT_VALID_PAYLOAD);
248         TEST_ASSERT_EQ(events.exception.pending, true);
249         TEST_ASSERT_EQ(events.exception.nr, SS_VECTOR);
250         TEST_ASSERT_EQ(events.exception.has_error_code, true);
251         TEST_ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE);
252 
253         /*
254          * Run for real with the pending #SS, L1 should get a VM-Exit due to
255          * #SS interception and re-enter L2 to request #GP (via injected #SS).
256          */
257         vcpu->run->immediate_exit = false;
258         vcpu_run(vcpu);
259         assert_ucall_vector(vcpu, GP_VECTOR);
260 
261         /*
262          * Inject #SS, the #SS should bypass interception and cause #GP, which
263          * L1 should intercept before KVM morphs it to #DF.  L1 should then
264          * disable #GP interception and run L2 to request #DF (via #SS => #GP).
265          */
266         queue_ss_exception(vcpu, true);
267         vcpu_run(vcpu);
268         assert_ucall_vector(vcpu, DF_VECTOR);
269 
270         /*
271          * Inject #SS, the #SS should bypass interception and cause #GP, which
272          * L1 is no longer interception, and so should see a #DF VM-Exit.  L1
273          * should then signal that is done.
274          */
275         queue_ss_exception(vcpu, true);
276         vcpu_run(vcpu);
277         assert_ucall_vector(vcpu, FAKE_TRIPLE_FAULT_VECTOR);
278 
279         /*
280          * Inject #SS yet again.  L1 is not intercepting #GP or #DF, and so
281          * should see nested TRIPLE_FAULT / SHUTDOWN.
282          */
283         queue_ss_exception(vcpu, true);
284         vcpu_run(vcpu);
285         assert_ucall_vector(vcpu, -1);
286 
287         kvm_vm_free(vm);
288 }
289 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php