1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * vgic_irq.c - Test userspace injection of IRQs 4 * 5 * This test validates the injection of IRQs from userspace using various 6 * methods (e.g., KVM_IRQ_LINE) and modes (e.g., EOI). The guest "asks" the 7 * host to inject a specific intid via a GUEST_SYNC call, and then checks that 8 * it received it. 9 */ 10 #include <asm/kvm.h> 11 #include <asm/kvm_para.h> 12 #include <sys/eventfd.h> 13 #include <linux/sizes.h> 14 15 #include "processor.h" 16 #include "test_util.h" 17 #include "kvm_util.h" 18 #include "gic.h" 19 #include "gic_v3.h" 20 #include "vgic.h" 21 22 /* 23 * Stores the user specified args; it's passed to the guest and to every test 24 * function. 25 */ 26 struct test_args { 27 uint32_t nr_irqs; /* number of KVM supported IRQs. */ 28 bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */ 29 bool level_sensitive; /* 1 is level, 0 is edge */ 30 int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */ 31 bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */ 32 }; 33 34 /* 35 * KVM implements 32 priority levels: 36 * 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8 37 * 38 * Note that these macros will still be correct in the case that KVM implements 39 * more priority levels. Also note that 32 is the minimum for GICv3 and GICv2. 40 */ 41 #define KVM_NUM_PRIOS 32 42 #define KVM_PRIO_SHIFT 3 /* steps of 8 = 1 << 3 */ 43 #define KVM_PRIO_STEPS (1 << KVM_PRIO_SHIFT) /* 8 */ 44 #define LOWEST_PRIO (KVM_NUM_PRIOS - 1) 45 #define CPU_PRIO_MASK (LOWEST_PRIO << KVM_PRIO_SHIFT) /* 0xf8 */ 46 #define IRQ_DEFAULT_PRIO (LOWEST_PRIO - 1) 47 #define IRQ_DEFAULT_PRIO_REG (IRQ_DEFAULT_PRIO << KVM_PRIO_SHIFT) /* 0xf0 */ 48 49 /* 50 * The kvm_inject_* utilities are used by the guest to ask the host to inject 51 * interrupts (e.g., using the KVM_IRQ_LINE ioctl). 52 */ 53 54 typedef enum { 55 KVM_INJECT_EDGE_IRQ_LINE = 1, 56 KVM_SET_IRQ_LINE, 57 KVM_SET_IRQ_LINE_HIGH, 58 KVM_SET_LEVEL_INFO_HIGH, 59 KVM_INJECT_IRQFD, 60 KVM_WRITE_ISPENDR, 61 KVM_WRITE_ISACTIVER, 62 } kvm_inject_cmd; 63 64 struct kvm_inject_args { 65 kvm_inject_cmd cmd; 66 uint32_t first_intid; 67 uint32_t num; 68 int level; 69 bool expect_failure; 70 }; 71 72 /* Used on the guest side to perform the hypercall. */ 73 static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, 74 uint32_t num, int level, bool expect_failure); 75 76 /* Used on the host side to get the hypercall info. */ 77 static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, 78 struct kvm_inject_args *args); 79 80 #define _KVM_INJECT_MULTI(cmd, intid, num, expect_failure) \ 81 kvm_inject_call(cmd, intid, num, -1 /* not used */, expect_failure) 82 83 #define KVM_INJECT_MULTI(cmd, intid, num) \ 84 _KVM_INJECT_MULTI(cmd, intid, num, false) 85 86 #define _KVM_INJECT(cmd, intid, expect_failure) \ 87 _KVM_INJECT_MULTI(cmd, intid, 1, expect_failure) 88 89 #define KVM_INJECT(cmd, intid) \ 90 _KVM_INJECT_MULTI(cmd, intid, 1, false) 91 92 #define KVM_ACTIVATE(cmd, intid) \ 93 kvm_inject_call(cmd, intid, 1, 1, false); 94 95 struct kvm_inject_desc { 96 kvm_inject_cmd cmd; 97 /* can inject PPIs, PPIs, and/or SPIs. */ 98 bool sgi, ppi, spi; 99 }; 100 101 static struct kvm_inject_desc inject_edge_fns[] = { 102 /* sgi ppi spi */ 103 { KVM_INJECT_EDGE_IRQ_LINE, false, false, true }, 104 { KVM_INJECT_IRQFD, false, false, true }, 105 { KVM_WRITE_ISPENDR, true, false, true }, 106 { 0, }, 107 }; 108 109 static struct kvm_inject_desc inject_level_fns[] = { 110 /* sgi ppi spi */ 111 { KVM_SET_IRQ_LINE_HIGH, false, true, true }, 112 { KVM_SET_LEVEL_INFO_HIGH, false, true, true }, 113 { KVM_INJECT_IRQFD, false, false, true }, 114 { KVM_WRITE_ISPENDR, false, true, true }, 115 { 0, }, 116 }; 117 118 static struct kvm_inject_desc set_active_fns[] = { 119 /* sgi ppi spi */ 120 { KVM_WRITE_ISACTIVER, true, true, true }, 121 { 0, }, 122 }; 123 124 #define for_each_inject_fn(t, f) \ 125 for ((f) = (t); (f)->cmd; (f)++) 126 127 #define for_each_supported_inject_fn(args, t, f) \ 128 for_each_inject_fn(t, f) \ 129 if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD) 130 131 #define for_each_supported_activate_fn(args, t, f) \ 132 for_each_supported_inject_fn((args), (t), (f)) 133 134 /* Shared between the guest main thread and the IRQ handlers. */ 135 volatile uint64_t irq_handled; 136 volatile uint32_t irqnr_received[MAX_SPI + 1]; 137 138 static void reset_stats(void) 139 { 140 int i; 141 142 irq_handled = 0; 143 for (i = 0; i <= MAX_SPI; i++) 144 irqnr_received[i] = 0; 145 } 146 147 static uint64_t gic_read_ap1r0(void) 148 { 149 uint64_t reg = read_sysreg_s(SYS_ICC_AP1R0_EL1); 150 151 dsb(sy); 152 return reg; 153 } 154 155 static void gic_write_ap1r0(uint64_t val) 156 { 157 write_sysreg_s(val, SYS_ICC_AP1R0_EL1); 158 isb(); 159 } 160 161 static void guest_set_irq_line(uint32_t intid, uint32_t level); 162 163 static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive) 164 { 165 uint32_t intid = gic_get_and_ack_irq(); 166 167 if (intid == IAR_SPURIOUS) 168 return; 169 170 GUEST_ASSERT(gic_irq_get_active(intid)); 171 172 if (!level_sensitive) 173 GUEST_ASSERT(!gic_irq_get_pending(intid)); 174 175 if (level_sensitive) 176 guest_set_irq_line(intid, 0); 177 178 GUEST_ASSERT(intid < MAX_SPI); 179 irqnr_received[intid] += 1; 180 irq_handled += 1; 181 182 gic_set_eoi(intid); 183 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0); 184 if (eoi_split) 185 gic_set_dir(intid); 186 187 GUEST_ASSERT(!gic_irq_get_active(intid)); 188 GUEST_ASSERT(!gic_irq_get_pending(intid)); 189 } 190 191 static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid, 192 uint32_t num, int level, bool expect_failure) 193 { 194 struct kvm_inject_args args = { 195 .cmd = cmd, 196 .first_intid = first_intid, 197 .num = num, 198 .level = level, 199 .expect_failure = expect_failure, 200 }; 201 GUEST_SYNC(&args); 202 } 203 204 #define GUEST_ASSERT_IAR_EMPTY() \ 205 do { \ 206 uint32_t _intid; \ 207 _intid = gic_get_and_ack_irq(); \ 208 GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \ 209 } while (0) 210 211 #define CAT_HELPER(a, b) a ## b 212 #define CAT(a, b) CAT_HELPER(a, b) 213 #define PREFIX guest_irq_handler_ 214 #define GUEST_IRQ_HANDLER_NAME(split, lev) CAT(PREFIX, CAT(split, lev)) 215 #define GENERATE_GUEST_IRQ_HANDLER(split, lev) \ 216 static void CAT(PREFIX, CAT(split, lev))(struct ex_regs *regs) \ 217 { \ 218 guest_irq_generic_handler(split, lev); \ 219 } 220 221 GENERATE_GUEST_IRQ_HANDLER(0, 0); 222 GENERATE_GUEST_IRQ_HANDLER(0, 1); 223 GENERATE_GUEST_IRQ_HANDLER(1, 0); 224 GENERATE_GUEST_IRQ_HANDLER(1, 1); 225 226 static void (*guest_irq_handlers[2][2])(struct ex_regs *) = { 227 {GUEST_IRQ_HANDLER_NAME(0, 0), GUEST_IRQ_HANDLER_NAME(0, 1),}, 228 {GUEST_IRQ_HANDLER_NAME(1, 0), GUEST_IRQ_HANDLER_NAME(1, 1),}, 229 }; 230 231 static void reset_priorities(struct test_args *args) 232 { 233 int i; 234 235 for (i = 0; i < args->nr_irqs; i++) 236 gic_set_priority(i, IRQ_DEFAULT_PRIO_REG); 237 } 238 239 static void guest_set_irq_line(uint32_t intid, uint32_t level) 240 { 241 kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false); 242 } 243 244 static void test_inject_fail(struct test_args *args, 245 uint32_t intid, kvm_inject_cmd cmd) 246 { 247 reset_stats(); 248 249 _KVM_INJECT(cmd, intid, true); 250 /* no IRQ to handle on entry */ 251 252 GUEST_ASSERT_EQ(irq_handled, 0); 253 GUEST_ASSERT_IAR_EMPTY(); 254 } 255 256 static void guest_inject(struct test_args *args, 257 uint32_t first_intid, uint32_t num, 258 kvm_inject_cmd cmd) 259 { 260 uint32_t i; 261 262 reset_stats(); 263 264 /* Cycle over all priorities to make things more interesting. */ 265 for (i = first_intid; i < num + first_intid; i++) 266 gic_set_priority(i, (i % (KVM_NUM_PRIOS - 1)) << 3); 267 268 asm volatile("msr daifset, #2" : : : "memory"); 269 KVM_INJECT_MULTI(cmd, first_intid, num); 270 271 while (irq_handled < num) { 272 asm volatile("wfi\n" 273 "msr daifclr, #2\n" 274 /* handle IRQ */ 275 "msr daifset, #2\n" 276 : : : "memory"); 277 } 278 asm volatile("msr daifclr, #2" : : : "memory"); 279 280 GUEST_ASSERT_EQ(irq_handled, num); 281 for (i = first_intid; i < num + first_intid; i++) 282 GUEST_ASSERT_EQ(irqnr_received[i], 1); 283 GUEST_ASSERT_IAR_EMPTY(); 284 285 reset_priorities(args); 286 } 287 288 /* 289 * Restore the active state of multiple concurrent IRQs (given by 290 * concurrent_irqs). This does what a live-migration would do on the 291 * destination side assuming there are some active IRQs that were not 292 * deactivated yet. 293 */ 294 static void guest_restore_active(struct test_args *args, 295 uint32_t first_intid, uint32_t num, 296 kvm_inject_cmd cmd) 297 { 298 uint32_t prio, intid, ap1r; 299 int i; 300 301 /* 302 * Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs 303 * in descending order, so intid+1 can preempt intid. 304 */ 305 for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) { 306 GUEST_ASSERT(prio >= 0); 307 intid = i + first_intid; 308 gic_set_priority(intid, prio); 309 } 310 311 /* 312 * In a real migration, KVM would restore all GIC state before running 313 * guest code. 314 */ 315 for (i = 0; i < num; i++) { 316 intid = i + first_intid; 317 KVM_ACTIVATE(cmd, intid); 318 ap1r = gic_read_ap1r0(); 319 ap1r |= 1U << i; 320 gic_write_ap1r0(ap1r); 321 } 322 323 /* This is where the "migration" would occur. */ 324 325 /* finish handling the IRQs starting with the highest priority one. */ 326 for (i = 0; i < num; i++) { 327 intid = num - i - 1 + first_intid; 328 gic_set_eoi(intid); 329 if (args->eoi_split) 330 gic_set_dir(intid); 331 } 332 333 for (i = 0; i < num; i++) 334 GUEST_ASSERT(!gic_irq_get_active(i + first_intid)); 335 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0); 336 GUEST_ASSERT_IAR_EMPTY(); 337 } 338 339 /* 340 * Polls the IAR until it's not a spurious interrupt. 341 * 342 * This function should only be used in test_inject_preemption (with IRQs 343 * masked). 344 */ 345 static uint32_t wait_for_and_activate_irq(void) 346 { 347 uint32_t intid; 348 349 do { 350 asm volatile("wfi" : : : "memory"); 351 intid = gic_get_and_ack_irq(); 352 } while (intid == IAR_SPURIOUS); 353 354 return intid; 355 } 356 357 /* 358 * Inject multiple concurrent IRQs (num IRQs starting at first_intid) and 359 * handle them without handling the actual exceptions. This is done by masking 360 * interrupts for the whole test. 361 */ 362 static void test_inject_preemption(struct test_args *args, 363 uint32_t first_intid, int num, 364 kvm_inject_cmd cmd) 365 { 366 uint32_t intid, prio, step = KVM_PRIO_STEPS; 367 int i; 368 369 /* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs 370 * in descending order, so intid+1 can preempt intid. 371 */ 372 for (i = 0, prio = (num - 1) * step; i < num; i++, prio -= step) { 373 GUEST_ASSERT(prio >= 0); 374 intid = i + first_intid; 375 gic_set_priority(intid, prio); 376 } 377 378 local_irq_disable(); 379 380 for (i = 0; i < num; i++) { 381 uint32_t tmp; 382 intid = i + first_intid; 383 KVM_INJECT(cmd, intid); 384 /* Each successive IRQ will preempt the previous one. */ 385 tmp = wait_for_and_activate_irq(); 386 GUEST_ASSERT_EQ(tmp, intid); 387 if (args->level_sensitive) 388 guest_set_irq_line(intid, 0); 389 } 390 391 /* finish handling the IRQs starting with the highest priority one. */ 392 for (i = 0; i < num; i++) { 393 intid = num - i - 1 + first_intid; 394 gic_set_eoi(intid); 395 if (args->eoi_split) 396 gic_set_dir(intid); 397 } 398 399 local_irq_enable(); 400 401 for (i = 0; i < num; i++) 402 GUEST_ASSERT(!gic_irq_get_active(i + first_intid)); 403 GUEST_ASSERT_EQ(gic_read_ap1r0(), 0); 404 GUEST_ASSERT_IAR_EMPTY(); 405 406 reset_priorities(args); 407 } 408 409 static void test_injection(struct test_args *args, struct kvm_inject_desc *f) 410 { 411 uint32_t nr_irqs = args->nr_irqs; 412 413 if (f->sgi) { 414 guest_inject(args, MIN_SGI, 1, f->cmd); 415 guest_inject(args, 0, 16, f->cmd); 416 } 417 418 if (f->ppi) 419 guest_inject(args, MIN_PPI, 1, f->cmd); 420 421 if (f->spi) { 422 guest_inject(args, MIN_SPI, 1, f->cmd); 423 guest_inject(args, nr_irqs - 1, 1, f->cmd); 424 guest_inject(args, MIN_SPI, nr_irqs - MIN_SPI, f->cmd); 425 } 426 } 427 428 static void test_injection_failure(struct test_args *args, 429 struct kvm_inject_desc *f) 430 { 431 uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, }; 432 int i; 433 434 for (i = 0; i < ARRAY_SIZE(bad_intid); i++) 435 test_inject_fail(args, bad_intid[i], f->cmd); 436 } 437 438 static void test_preemption(struct test_args *args, struct kvm_inject_desc *f) 439 { 440 /* 441 * Test up to 4 levels of preemption. The reason is that KVM doesn't 442 * currently implement the ability to have more than the number-of-LRs 443 * number of concurrently active IRQs. The number of LRs implemented is 444 * IMPLEMENTATION DEFINED, however, it seems that most implement 4. 445 */ 446 if (f->sgi) 447 test_inject_preemption(args, MIN_SGI, 4, f->cmd); 448 449 if (f->ppi) 450 test_inject_preemption(args, MIN_PPI, 4, f->cmd); 451 452 if (f->spi) 453 test_inject_preemption(args, MIN_SPI, 4, f->cmd); 454 } 455 456 static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f) 457 { 458 /* Test up to 4 active IRQs. Same reason as in test_preemption. */ 459 if (f->sgi) 460 guest_restore_active(args, MIN_SGI, 4, f->cmd); 461 462 if (f->ppi) 463 guest_restore_active(args, MIN_PPI, 4, f->cmd); 464 465 if (f->spi) 466 guest_restore_active(args, MIN_SPI, 4, f->cmd); 467 } 468 469 static void guest_code(struct test_args *args) 470 { 471 uint32_t i, nr_irqs = args->nr_irqs; 472 bool level_sensitive = args->level_sensitive; 473 struct kvm_inject_desc *f, *inject_fns; 474 475 gic_init(GIC_V3, 1); 476 477 for (i = 0; i < nr_irqs; i++) 478 gic_irq_enable(i); 479 480 for (i = MIN_SPI; i < nr_irqs; i++) 481 gic_irq_set_config(i, !level_sensitive); 482 483 gic_set_eoi_split(args->eoi_split); 484 485 reset_priorities(args); 486 gic_set_priority_mask(CPU_PRIO_MASK); 487 488 inject_fns = level_sensitive ? inject_level_fns 489 : inject_edge_fns; 490 491 local_irq_enable(); 492 493 /* Start the tests. */ 494 for_each_supported_inject_fn(args, inject_fns, f) { 495 test_injection(args, f); 496 test_preemption(args, f); 497 test_injection_failure(args, f); 498 } 499 500 /* 501 * Restore the active state of IRQs. This would happen when live 502 * migrating IRQs in the middle of being handled. 503 */ 504 for_each_supported_activate_fn(args, set_active_fns, f) 505 test_restore_active(args, f); 506 507 GUEST_DONE(); 508 } 509 510 static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level, 511 struct test_args *test_args, bool expect_failure) 512 { 513 int ret; 514 515 if (!expect_failure) { 516 kvm_arm_irq_line(vm, intid, level); 517 } else { 518 /* The interface doesn't allow larger intid's. */ 519 if (intid > KVM_ARM_IRQ_NUM_MASK) 520 return; 521 522 ret = _kvm_arm_irq_line(vm, intid, level); 523 TEST_ASSERT(ret != 0 && errno == EINVAL, 524 "Bad intid %i did not cause KVM_IRQ_LINE " 525 "error: rc: %i errno: %i", intid, ret, errno); 526 } 527 } 528 529 void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level, 530 bool expect_failure) 531 { 532 if (!expect_failure) { 533 kvm_irq_set_level_info(gic_fd, intid, level); 534 } else { 535 int ret = _kvm_irq_set_level_info(gic_fd, intid, level); 536 /* 537 * The kernel silently fails for invalid SPIs and SGIs (which 538 * are not level-sensitive). It only checks for intid to not 539 * spill over 1U << 10 (the max reserved SPI). Also, callers 540 * are supposed to mask the intid with 0x3ff (1023). 541 */ 542 if (intid > VGIC_MAX_RESERVED) 543 TEST_ASSERT(ret != 0 && errno == EINVAL, 544 "Bad intid %i did not cause VGIC_GRP_LEVEL_INFO " 545 "error: rc: %i errno: %i", intid, ret, errno); 546 else 547 TEST_ASSERT(!ret, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO " 548 "for intid %i failed, rc: %i errno: %i", 549 intid, ret, errno); 550 } 551 } 552 553 static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm, 554 uint32_t intid, uint32_t num, uint32_t kvm_max_routes, 555 bool expect_failure) 556 { 557 struct kvm_irq_routing *routing; 558 int ret; 559 uint64_t i; 560 561 assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES); 562 563 routing = kvm_gsi_routing_create(); 564 for (i = intid; i < (uint64_t)intid + num; i++) 565 kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI); 566 567 if (!expect_failure) { 568 kvm_gsi_routing_write(vm, routing); 569 } else { 570 ret = _kvm_gsi_routing_write(vm, routing); 571 /* The kernel only checks e->irqchip.pin >= KVM_IRQCHIP_NUM_PINS */ 572 if (((uint64_t)intid + num - 1 - MIN_SPI) >= KVM_IRQCHIP_NUM_PINS) 573 TEST_ASSERT(ret != 0 && errno == EINVAL, 574 "Bad intid %u did not cause KVM_SET_GSI_ROUTING " 575 "error: rc: %i errno: %i", intid, ret, errno); 576 else 577 TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING " 578 "for intid %i failed, rc: %i errno: %i", 579 intid, ret, errno); 580 } 581 } 582 583 static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid, 584 struct kvm_vcpu *vcpu, 585 bool expect_failure) 586 { 587 /* 588 * Ignore this when expecting failure as invalid intids will lead to 589 * either trying to inject SGIs when we configured the test to be 590 * level_sensitive (or the reverse), or inject large intids which 591 * will lead to writing above the ISPENDR register space (and we 592 * don't want to do that either). 593 */ 594 if (!expect_failure) 595 kvm_irq_write_ispendr(gic_fd, intid, vcpu); 596 } 597 598 static void kvm_routing_and_irqfd_check(struct kvm_vm *vm, 599 uint32_t intid, uint32_t num, uint32_t kvm_max_routes, 600 bool expect_failure) 601 { 602 int fd[MAX_SPI]; 603 uint64_t val; 604 int ret, f; 605 uint64_t i; 606 607 /* 608 * There is no way to try injecting an SGI or PPI as the interface 609 * starts counting from the first SPI (above the private ones), so just 610 * exit. 611 */ 612 if (INTID_IS_SGI(intid) || INTID_IS_PPI(intid)) 613 return; 614 615 kvm_set_gsi_routing_irqchip_check(vm, intid, num, 616 kvm_max_routes, expect_failure); 617 618 /* 619 * If expect_failure, then just to inject anyway. These 620 * will silently fail. And in any case, the guest will check 621 * that no actual interrupt was injected for those cases. 622 */ 623 624 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) { 625 fd[f] = eventfd(0, 0); 626 TEST_ASSERT(fd[f] != -1, __KVM_SYSCALL_ERROR("eventfd()", fd[f])); 627 } 628 629 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) { 630 struct kvm_irqfd irqfd = { 631 .fd = fd[f], 632 .gsi = i - MIN_SPI, 633 }; 634 assert(i <= (uint64_t)UINT_MAX); 635 vm_ioctl(vm, KVM_IRQFD, &irqfd); 636 } 637 638 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) { 639 val = 1; 640 ret = write(fd[f], &val, sizeof(uint64_t)); 641 TEST_ASSERT(ret == sizeof(uint64_t), 642 __KVM_SYSCALL_ERROR("write()", ret)); 643 } 644 645 for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) 646 close(fd[f]); 647 } 648 649 /* handles the valid case: intid=0xffffffff num=1 */ 650 #define for_each_intid(first, num, tmp, i) \ 651 for ((tmp) = (i) = (first); \ 652 (tmp) < (uint64_t)(first) + (uint64_t)(num); \ 653 (tmp)++, (i)++) 654 655 static void run_guest_cmd(struct kvm_vcpu *vcpu, int gic_fd, 656 struct kvm_inject_args *inject_args, 657 struct test_args *test_args) 658 { 659 kvm_inject_cmd cmd = inject_args->cmd; 660 uint32_t intid = inject_args->first_intid; 661 uint32_t num = inject_args->num; 662 int level = inject_args->level; 663 bool expect_failure = inject_args->expect_failure; 664 struct kvm_vm *vm = vcpu->vm; 665 uint64_t tmp; 666 uint32_t i; 667 668 /* handles the valid case: intid=0xffffffff num=1 */ 669 assert(intid < UINT_MAX - num || num == 1); 670 671 switch (cmd) { 672 case KVM_INJECT_EDGE_IRQ_LINE: 673 for_each_intid(intid, num, tmp, i) 674 kvm_irq_line_check(vm, i, 1, test_args, 675 expect_failure); 676 for_each_intid(intid, num, tmp, i) 677 kvm_irq_line_check(vm, i, 0, test_args, 678 expect_failure); 679 break; 680 case KVM_SET_IRQ_LINE: 681 for_each_intid(intid, num, tmp, i) 682 kvm_irq_line_check(vm, i, level, test_args, 683 expect_failure); 684 break; 685 case KVM_SET_IRQ_LINE_HIGH: 686 for_each_intid(intid, num, tmp, i) 687 kvm_irq_line_check(vm, i, 1, test_args, 688 expect_failure); 689 break; 690 case KVM_SET_LEVEL_INFO_HIGH: 691 for_each_intid(intid, num, tmp, i) 692 kvm_irq_set_level_info_check(gic_fd, i, 1, 693 expect_failure); 694 break; 695 case KVM_INJECT_IRQFD: 696 kvm_routing_and_irqfd_check(vm, intid, num, 697 test_args->kvm_max_routes, 698 expect_failure); 699 break; 700 case KVM_WRITE_ISPENDR: 701 for (i = intid; i < intid + num; i++) 702 kvm_irq_write_ispendr_check(gic_fd, i, vcpu, 703 expect_failure); 704 break; 705 case KVM_WRITE_ISACTIVER: 706 for (i = intid; i < intid + num; i++) 707 kvm_irq_write_isactiver(gic_fd, i, vcpu); 708 break; 709 default: 710 break; 711 } 712 } 713 714 static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc, 715 struct kvm_inject_args *args) 716 { 717 struct kvm_inject_args *kvm_args_hva; 718 vm_vaddr_t kvm_args_gva; 719 720 kvm_args_gva = uc->args[1]; 721 kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva); 722 memcpy(args, kvm_args_hva, sizeof(struct kvm_inject_args)); 723 } 724 725 static void print_args(struct test_args *args) 726 { 727 printf("nr-irqs=%d level-sensitive=%d eoi-split=%d\n", 728 args->nr_irqs, args->level_sensitive, 729 args->eoi_split); 730 } 731 732 static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split) 733 { 734 struct ucall uc; 735 int gic_fd; 736 struct kvm_vcpu *vcpu; 737 struct kvm_vm *vm; 738 struct kvm_inject_args inject_args; 739 vm_vaddr_t args_gva; 740 741 struct test_args args = { 742 .nr_irqs = nr_irqs, 743 .level_sensitive = level_sensitive, 744 .eoi_split = eoi_split, 745 .kvm_max_routes = kvm_check_cap(KVM_CAP_IRQ_ROUTING), 746 .kvm_supports_irqfd = kvm_check_cap(KVM_CAP_IRQFD), 747 }; 748 749 print_args(&args); 750 751 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 752 753 vm_init_descriptor_tables(vm); 754 vcpu_init_descriptor_tables(vcpu); 755 756 /* Setup the guest args page (so it gets the args). */ 757 args_gva = vm_vaddr_alloc_page(vm); 758 memcpy(addr_gva2hva(vm, args_gva), &args, sizeof(args)); 759 vcpu_args_set(vcpu, 1, args_gva); 760 761 gic_fd = vgic_v3_setup(vm, 1, nr_irqs); 762 __TEST_REQUIRE(gic_fd >= 0, "Failed to create vgic-v3, skipping"); 763 764 vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, 765 guest_irq_handlers[args.eoi_split][args.level_sensitive]); 766 767 while (1) { 768 vcpu_run(vcpu); 769 770 switch (get_ucall(vcpu, &uc)) { 771 case UCALL_SYNC: 772 kvm_inject_get_call(vm, &uc, &inject_args); 773 run_guest_cmd(vcpu, gic_fd, &inject_args, &args); 774 break; 775 case UCALL_ABORT: 776 REPORT_GUEST_ASSERT(uc); 777 break; 778 case UCALL_DONE: 779 goto done; 780 default: 781 TEST_FAIL("Unknown ucall %lu", uc.cmd); 782 } 783 } 784 785 done: 786 close(gic_fd); 787 kvm_vm_free(vm); 788 } 789 790 static void help(const char *name) 791 { 792 printf( 793 "\n" 794 "usage: %s [-n num_irqs] [-e eoi_split] [-l level_sensitive]\n", name); 795 printf(" -n: specify number of IRQs to setup the vgic with. " 796 "It has to be a multiple of 32 and between 64 and 1024.\n"); 797 printf(" -e: if 1 then EOI is split into a write to DIR on top " 798 "of writing EOI.\n"); 799 printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0)."); 800 puts(""); 801 exit(1); 802 } 803 804 int main(int argc, char **argv) 805 { 806 uint32_t nr_irqs = 64; 807 bool default_args = true; 808 bool level_sensitive = false; 809 int opt; 810 bool eoi_split = false; 811 812 while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) { 813 switch (opt) { 814 case 'n': 815 nr_irqs = atoi_non_negative("Number of IRQs", optarg); 816 if (nr_irqs > 1024 || nr_irqs % 32) 817 help(argv[0]); 818 break; 819 case 'e': 820 eoi_split = (bool)atoi_paranoid(optarg); 821 default_args = false; 822 break; 823 case 'l': 824 level_sensitive = (bool)atoi_paranoid(optarg); 825 default_args = false; 826 break; 827 case 'h': 828 default: 829 help(argv[0]); 830 break; 831 } 832 } 833 834 /* 835 * If the user just specified nr_irqs and/or gic_version, then run all 836 * combinations. 837 */ 838 if (default_args) { 839 test_vgic(nr_irqs, false /* level */, false /* eoi_split */); 840 test_vgic(nr_irqs, false /* level */, true /* eoi_split */); 841 test_vgic(nr_irqs, true /* level */, false /* eoi_split */); 842 test_vgic(nr_irqs, true /* level */, true /* eoi_split */); 843 } else { 844 test_vgic(nr_irqs, level_sensitive, eoi_split); 845 } 846 847 return 0; 848 } 849
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.