1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * kvm guest debug support 4 * 5 * Copyright IBM Corp. 2014 6 * 7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com> 8 */ 9 #include <linux/kvm_host.h> 10 #include <linux/errno.h> 11 #include "kvm-s390.h" 12 #include "gaccess.h" 13 14 /* 15 * Extends the address range given by *start and *stop to include the address 16 * range starting with estart and the length len. Takes care of overflowing 17 * intervals and tries to minimize the overall interval size. 18 */ 19 static void extend_address_range(u64 *start, u64 *stop, u64 estart, int len) 20 { 21 u64 estop; 22 23 if (len > 0) 24 len--; 25 else 26 len = 0; 27 28 estop = estart + len; 29 30 /* 0-0 range represents "not set" */ 31 if ((*start == 0) && (*stop == 0)) { 32 *start = estart; 33 *stop = estop; 34 } else if (*start <= *stop) { 35 /* increase the existing range */ 36 if (estart < *start) 37 *start = estart; 38 if (estop > *stop) 39 *stop = estop; 40 } else { 41 /* "overflowing" interval, whereby *stop > *start */ 42 if (estart <= *stop) { 43 if (estop > *stop) 44 *stop = estop; 45 } else if (estop > *start) { 46 if (estart < *start) 47 *start = estart; 48 } 49 /* minimize the range */ 50 else if ((estop - *stop) < (*start - estart)) 51 *stop = estop; 52 else 53 *start = estart; 54 } 55 } 56 57 #define MAX_INST_SIZE 6 58 59 static void enable_all_hw_bp(struct kvm_vcpu *vcpu) 60 { 61 unsigned long start, len; 62 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; 63 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; 64 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; 65 int i; 66 67 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || 68 vcpu->arch.guestdbg.hw_bp_info == NULL) 69 return; 70 71 /* 72 * If the guest is not interested in branching events, we can safely 73 * limit them to the PER address range. 74 */ 75 if (!(*cr9 & PER_EVENT_BRANCH)) 76 *cr9 |= PER_CONTROL_BRANCH_ADDRESS; 77 *cr9 |= PER_EVENT_IFETCH | PER_EVENT_BRANCH; 78 79 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { 80 start = vcpu->arch.guestdbg.hw_bp_info[i].addr; 81 len = vcpu->arch.guestdbg.hw_bp_info[i].len; 82 83 /* 84 * The instruction in front of the desired bp has to 85 * report instruction-fetching events 86 */ 87 if (start < MAX_INST_SIZE) { 88 len += start; 89 start = 0; 90 } else { 91 start -= MAX_INST_SIZE; 92 len += MAX_INST_SIZE; 93 } 94 95 extend_address_range(cr10, cr11, start, len); 96 } 97 } 98 99 static void enable_all_hw_wp(struct kvm_vcpu *vcpu) 100 { 101 unsigned long start, len; 102 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; 103 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; 104 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; 105 int i; 106 107 if (vcpu->arch.guestdbg.nr_hw_wp <= 0 || 108 vcpu->arch.guestdbg.hw_wp_info == NULL) 109 return; 110 111 /* if host uses storage alternation for special address 112 * spaces, enable all events and give all to the guest */ 113 if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) { 114 *cr9 &= ~PER_CONTROL_ALTERATION; 115 *cr10 = 0; 116 *cr11 = -1UL; 117 } else { 118 *cr9 &= ~PER_CONTROL_ALTERATION; 119 *cr9 |= PER_EVENT_STORE; 120 121 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { 122 start = vcpu->arch.guestdbg.hw_wp_info[i].addr; 123 len = vcpu->arch.guestdbg.hw_wp_info[i].len; 124 125 extend_address_range(cr10, cr11, start, len); 126 } 127 } 128 } 129 130 void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu) 131 { 132 vcpu->arch.guestdbg.cr0 = vcpu->arch.sie_block->gcr[0]; 133 vcpu->arch.guestdbg.cr9 = vcpu->arch.sie_block->gcr[9]; 134 vcpu->arch.guestdbg.cr10 = vcpu->arch.sie_block->gcr[10]; 135 vcpu->arch.guestdbg.cr11 = vcpu->arch.sie_block->gcr[11]; 136 } 137 138 void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu) 139 { 140 vcpu->arch.sie_block->gcr[0] = vcpu->arch.guestdbg.cr0; 141 vcpu->arch.sie_block->gcr[9] = vcpu->arch.guestdbg.cr9; 142 vcpu->arch.sie_block->gcr[10] = vcpu->arch.guestdbg.cr10; 143 vcpu->arch.sie_block->gcr[11] = vcpu->arch.guestdbg.cr11; 144 } 145 146 void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu) 147 { 148 /* 149 * TODO: if guest psw has per enabled, otherwise 0s! 150 * This reduces the amount of reported events. 151 * Need to intercept all psw changes! 152 */ 153 154 if (guestdbg_sstep_enabled(vcpu)) { 155 /* disable timer (clock-comparator) interrupts */ 156 vcpu->arch.sie_block->gcr[0] &= ~CR0_CLOCK_COMPARATOR_SUBMASK; 157 vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH; 158 vcpu->arch.sie_block->gcr[10] = 0; 159 vcpu->arch.sie_block->gcr[11] = -1UL; 160 } 161 162 if (guestdbg_hw_bp_enabled(vcpu)) { 163 enable_all_hw_bp(vcpu); 164 enable_all_hw_wp(vcpu); 165 } 166 167 /* TODO: Instruction-fetching-nullification not allowed for now */ 168 if (vcpu->arch.sie_block->gcr[9] & PER_EVENT_NULLIFICATION) 169 vcpu->arch.sie_block->gcr[9] &= ~PER_EVENT_NULLIFICATION; 170 } 171 172 #define MAX_WP_SIZE 100 173 174 static int __import_wp_info(struct kvm_vcpu *vcpu, 175 struct kvm_hw_breakpoint *bp_data, 176 struct kvm_hw_wp_info_arch *wp_info) 177 { 178 int ret = 0; 179 wp_info->len = bp_data->len; 180 wp_info->addr = bp_data->addr; 181 wp_info->phys_addr = bp_data->phys_addr; 182 wp_info->old_data = NULL; 183 184 if (wp_info->len < 0 || wp_info->len > MAX_WP_SIZE) 185 return -EINVAL; 186 187 wp_info->old_data = kmalloc(bp_data->len, GFP_KERNEL_ACCOUNT); 188 if (!wp_info->old_data) 189 return -ENOMEM; 190 /* try to backup the original value */ 191 ret = read_guest_abs(vcpu, wp_info->phys_addr, wp_info->old_data, 192 wp_info->len); 193 if (ret) { 194 kfree(wp_info->old_data); 195 wp_info->old_data = NULL; 196 } 197 198 return ret; 199 } 200 201 #define MAX_BP_COUNT 50 202 203 int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu, 204 struct kvm_guest_debug *dbg) 205 { 206 int ret = 0, nr_wp = 0, nr_bp = 0, i; 207 struct kvm_hw_breakpoint *bp_data = NULL; 208 struct kvm_hw_wp_info_arch *wp_info = NULL; 209 struct kvm_hw_bp_info_arch *bp_info = NULL; 210 211 if (dbg->arch.nr_hw_bp <= 0 || !dbg->arch.hw_bp) 212 return 0; 213 else if (dbg->arch.nr_hw_bp > MAX_BP_COUNT) 214 return -EINVAL; 215 216 bp_data = memdup_array_user(dbg->arch.hw_bp, dbg->arch.nr_hw_bp, 217 sizeof(*bp_data)); 218 if (IS_ERR(bp_data)) 219 return PTR_ERR(bp_data); 220 221 for (i = 0; i < dbg->arch.nr_hw_bp; i++) { 222 switch (bp_data[i].type) { 223 case KVM_HW_WP_WRITE: 224 nr_wp++; 225 break; 226 case KVM_HW_BP: 227 nr_bp++; 228 break; 229 default: 230 break; 231 } 232 } 233 234 if (nr_wp > 0) { 235 wp_info = kmalloc_array(nr_wp, 236 sizeof(*wp_info), 237 GFP_KERNEL_ACCOUNT); 238 if (!wp_info) { 239 ret = -ENOMEM; 240 goto error; 241 } 242 } 243 if (nr_bp > 0) { 244 bp_info = kmalloc_array(nr_bp, 245 sizeof(*bp_info), 246 GFP_KERNEL_ACCOUNT); 247 if (!bp_info) { 248 ret = -ENOMEM; 249 goto error; 250 } 251 } 252 253 for (nr_wp = 0, nr_bp = 0, i = 0; i < dbg->arch.nr_hw_bp; i++) { 254 switch (bp_data[i].type) { 255 case KVM_HW_WP_WRITE: 256 ret = __import_wp_info(vcpu, &bp_data[i], 257 &wp_info[nr_wp]); 258 if (ret) 259 goto error; 260 nr_wp++; 261 break; 262 case KVM_HW_BP: 263 bp_info[nr_bp].len = bp_data[i].len; 264 bp_info[nr_bp].addr = bp_data[i].addr; 265 nr_bp++; 266 break; 267 } 268 } 269 270 vcpu->arch.guestdbg.nr_hw_bp = nr_bp; 271 vcpu->arch.guestdbg.hw_bp_info = bp_info; 272 vcpu->arch.guestdbg.nr_hw_wp = nr_wp; 273 vcpu->arch.guestdbg.hw_wp_info = wp_info; 274 return 0; 275 error: 276 kfree(bp_data); 277 kfree(wp_info); 278 kfree(bp_info); 279 return ret; 280 } 281 282 void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu) 283 { 284 int i; 285 struct kvm_hw_wp_info_arch *hw_wp_info = NULL; 286 287 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { 288 hw_wp_info = &vcpu->arch.guestdbg.hw_wp_info[i]; 289 kfree(hw_wp_info->old_data); 290 hw_wp_info->old_data = NULL; 291 } 292 kfree(vcpu->arch.guestdbg.hw_wp_info); 293 vcpu->arch.guestdbg.hw_wp_info = NULL; 294 295 kfree(vcpu->arch.guestdbg.hw_bp_info); 296 vcpu->arch.guestdbg.hw_bp_info = NULL; 297 298 vcpu->arch.guestdbg.nr_hw_wp = 0; 299 vcpu->arch.guestdbg.nr_hw_bp = 0; 300 } 301 302 static inline int in_addr_range(u64 addr, u64 a, u64 b) 303 { 304 if (a <= b) 305 return (addr >= a) && (addr <= b); 306 else 307 /* "overflowing" interval */ 308 return (addr >= a) || (addr <= b); 309 } 310 311 #define end_of_range(bp_info) (bp_info->addr + bp_info->len - 1) 312 313 static struct kvm_hw_bp_info_arch *find_hw_bp(struct kvm_vcpu *vcpu, 314 unsigned long addr) 315 { 316 struct kvm_hw_bp_info_arch *bp_info = vcpu->arch.guestdbg.hw_bp_info; 317 int i; 318 319 if (vcpu->arch.guestdbg.nr_hw_bp == 0) 320 return NULL; 321 322 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { 323 /* addr is directly the start or in the range of a bp */ 324 if (addr == bp_info->addr) 325 goto found; 326 if (bp_info->len > 0 && 327 in_addr_range(addr, bp_info->addr, end_of_range(bp_info))) 328 goto found; 329 330 bp_info++; 331 } 332 333 return NULL; 334 found: 335 return bp_info; 336 } 337 338 static struct kvm_hw_wp_info_arch *any_wp_changed(struct kvm_vcpu *vcpu) 339 { 340 int i; 341 struct kvm_hw_wp_info_arch *wp_info = NULL; 342 void *temp = NULL; 343 344 if (vcpu->arch.guestdbg.nr_hw_wp == 0) 345 return NULL; 346 347 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_wp; i++) { 348 wp_info = &vcpu->arch.guestdbg.hw_wp_info[i]; 349 if (!wp_info || !wp_info->old_data || wp_info->len <= 0) 350 continue; 351 352 temp = kmalloc(wp_info->len, GFP_KERNEL_ACCOUNT); 353 if (!temp) 354 continue; 355 356 /* refetch the wp data and compare it to the old value */ 357 if (!read_guest_abs(vcpu, wp_info->phys_addr, temp, 358 wp_info->len)) { 359 if (memcmp(temp, wp_info->old_data, wp_info->len)) { 360 kfree(temp); 361 return wp_info; 362 } 363 } 364 kfree(temp); 365 temp = NULL; 366 } 367 368 return NULL; 369 } 370 371 void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu) 372 { 373 vcpu->run->exit_reason = KVM_EXIT_DEBUG; 374 vcpu->guest_debug &= ~KVM_GUESTDBG_EXIT_PENDING; 375 } 376 377 #define PER_CODE_MASK (PER_EVENT_MASK >> 24) 378 #define PER_CODE_BRANCH (PER_EVENT_BRANCH >> 24) 379 #define PER_CODE_IFETCH (PER_EVENT_IFETCH >> 24) 380 #define PER_CODE_STORE (PER_EVENT_STORE >> 24) 381 #define PER_CODE_STORE_REAL (PER_EVENT_STORE_REAL >> 24) 382 383 #define per_bp_event(code) \ 384 (code & (PER_CODE_IFETCH | PER_CODE_BRANCH)) 385 #define per_write_wp_event(code) \ 386 (code & (PER_CODE_STORE | PER_CODE_STORE_REAL)) 387 388 static int debug_exit_required(struct kvm_vcpu *vcpu, u8 perc, 389 unsigned long peraddr) 390 { 391 struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch; 392 struct kvm_hw_wp_info_arch *wp_info = NULL; 393 struct kvm_hw_bp_info_arch *bp_info = NULL; 394 unsigned long addr = vcpu->arch.sie_block->gpsw.addr; 395 396 if (guestdbg_hw_bp_enabled(vcpu)) { 397 if (per_write_wp_event(perc) && 398 vcpu->arch.guestdbg.nr_hw_wp > 0) { 399 wp_info = any_wp_changed(vcpu); 400 if (wp_info) { 401 debug_exit->addr = wp_info->addr; 402 debug_exit->type = KVM_HW_WP_WRITE; 403 goto exit_required; 404 } 405 } 406 if (per_bp_event(perc) && 407 vcpu->arch.guestdbg.nr_hw_bp > 0) { 408 bp_info = find_hw_bp(vcpu, addr); 409 /* remove duplicate events if PC==PER address */ 410 if (bp_info && (addr != peraddr)) { 411 debug_exit->addr = addr; 412 debug_exit->type = KVM_HW_BP; 413 vcpu->arch.guestdbg.last_bp = addr; 414 goto exit_required; 415 } 416 /* breakpoint missed */ 417 bp_info = find_hw_bp(vcpu, peraddr); 418 if (bp_info && vcpu->arch.guestdbg.last_bp != peraddr) { 419 debug_exit->addr = peraddr; 420 debug_exit->type = KVM_HW_BP; 421 goto exit_required; 422 } 423 } 424 } 425 if (guestdbg_sstep_enabled(vcpu) && per_bp_event(perc)) { 426 debug_exit->addr = addr; 427 debug_exit->type = KVM_SINGLESTEP; 428 goto exit_required; 429 } 430 431 return 0; 432 exit_required: 433 return 1; 434 } 435 436 static int per_fetched_addr(struct kvm_vcpu *vcpu, unsigned long *addr) 437 { 438 u8 exec_ilen = 0; 439 u16 opcode[3]; 440 int rc; 441 442 if (vcpu->arch.sie_block->icptcode == ICPT_PROGI) { 443 /* PER address references the fetched or the execute instr */ 444 *addr = vcpu->arch.sie_block->peraddr; 445 /* 446 * Manually detect if we have an EXECUTE instruction. As 447 * instructions are always 2 byte aligned we can read the 448 * first two bytes unconditionally 449 */ 450 rc = read_guest_instr(vcpu, *addr, &opcode, 2); 451 if (rc) 452 return rc; 453 if (opcode[0] >> 8 == 0x44) 454 exec_ilen = 4; 455 if ((opcode[0] & 0xff0f) == 0xc600) 456 exec_ilen = 6; 457 } else { 458 /* instr was suppressed, calculate the responsible instr */ 459 *addr = __rewind_psw(vcpu->arch.sie_block->gpsw, 460 kvm_s390_get_ilen(vcpu)); 461 if (vcpu->arch.sie_block->icptstatus & 0x01) { 462 exec_ilen = (vcpu->arch.sie_block->icptstatus & 0x60) >> 4; 463 if (!exec_ilen) 464 exec_ilen = 4; 465 } 466 } 467 468 if (exec_ilen) { 469 /* read the complete EXECUTE instr to detect the fetched addr */ 470 rc = read_guest_instr(vcpu, *addr, &opcode, exec_ilen); 471 if (rc) 472 return rc; 473 if (exec_ilen == 6) { 474 /* EXECUTE RELATIVE LONG - RIL-b format */ 475 s32 rl = *((s32 *) (opcode + 1)); 476 477 /* rl is a _signed_ 32 bit value specifying halfwords */ 478 *addr += (u64)(s64) rl * 2; 479 } else { 480 /* EXECUTE - RX-a format */ 481 u32 base = (opcode[1] & 0xf000) >> 12; 482 u32 disp = opcode[1] & 0x0fff; 483 u32 index = opcode[0] & 0x000f; 484 485 *addr = base ? vcpu->run->s.regs.gprs[base] : 0; 486 *addr += index ? vcpu->run->s.regs.gprs[index] : 0; 487 *addr += disp; 488 } 489 *addr = kvm_s390_logical_to_effective(vcpu, *addr); 490 } 491 return 0; 492 } 493 494 #define guest_per_enabled(vcpu) \ 495 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) 496 497 int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu) 498 { 499 const u64 cr10 = vcpu->arch.sie_block->gcr[10]; 500 const u64 cr11 = vcpu->arch.sie_block->gcr[11]; 501 const u8 ilen = kvm_s390_get_ilen(vcpu); 502 struct kvm_s390_pgm_info pgm_info = { 503 .code = PGM_PER, 504 .per_code = PER_CODE_IFETCH, 505 .per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen), 506 }; 507 unsigned long fetched_addr; 508 int rc; 509 510 /* 511 * The PSW points to the next instruction, therefore the intercepted 512 * instruction generated a PER i-fetch event. PER address therefore 513 * points at the previous PSW address (could be an EXECUTE function). 514 */ 515 if (!guestdbg_enabled(vcpu)) 516 return kvm_s390_inject_prog_irq(vcpu, &pgm_info); 517 518 if (debug_exit_required(vcpu, pgm_info.per_code, pgm_info.per_address)) 519 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING; 520 521 if (!guest_per_enabled(vcpu) || 522 !(vcpu->arch.sie_block->gcr[9] & PER_EVENT_IFETCH)) 523 return 0; 524 525 rc = per_fetched_addr(vcpu, &fetched_addr); 526 if (rc < 0) 527 return rc; 528 if (rc) 529 /* instruction-fetching exceptions */ 530 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 531 532 if (in_addr_range(fetched_addr, cr10, cr11)) 533 return kvm_s390_inject_prog_irq(vcpu, &pgm_info); 534 return 0; 535 } 536 537 static int filter_guest_per_event(struct kvm_vcpu *vcpu) 538 { 539 const u8 perc = vcpu->arch.sie_block->perc; 540 u64 addr = vcpu->arch.sie_block->gpsw.addr; 541 u64 cr9 = vcpu->arch.sie_block->gcr[9]; 542 u64 cr10 = vcpu->arch.sie_block->gcr[10]; 543 u64 cr11 = vcpu->arch.sie_block->gcr[11]; 544 /* filter all events, demanded by the guest */ 545 u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK; 546 unsigned long fetched_addr; 547 int rc; 548 549 if (!guest_per_enabled(vcpu)) 550 guest_perc = 0; 551 552 /* filter "successful-branching" events */ 553 if (guest_perc & PER_CODE_BRANCH && 554 cr9 & PER_CONTROL_BRANCH_ADDRESS && 555 !in_addr_range(addr, cr10, cr11)) 556 guest_perc &= ~PER_CODE_BRANCH; 557 558 /* filter "instruction-fetching" events */ 559 if (guest_perc & PER_CODE_IFETCH) { 560 rc = per_fetched_addr(vcpu, &fetched_addr); 561 if (rc < 0) 562 return rc; 563 /* 564 * Don't inject an irq on exceptions. This would make handling 565 * on icpt code 8 very complex (as PSW was already rewound). 566 */ 567 if (rc || !in_addr_range(fetched_addr, cr10, cr11)) 568 guest_perc &= ~PER_CODE_IFETCH; 569 } 570 571 /* All other PER events will be given to the guest */ 572 /* TODO: Check altered address/address space */ 573 574 vcpu->arch.sie_block->perc = guest_perc; 575 576 if (!guest_perc) 577 vcpu->arch.sie_block->iprcc &= ~PGM_PER; 578 return 0; 579 } 580 581 #define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH) 582 #define hssec(vcpu) (vcpu->arch.sie_block->gcr[13] & _ASCE_SPACE_SWITCH) 583 #define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1) 584 #define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff) 585 586 int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu) 587 { 588 int rc, new_as; 589 590 if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc, 591 vcpu->arch.sie_block->peraddr)) 592 vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING; 593 594 rc = filter_guest_per_event(vcpu); 595 if (rc) 596 return rc; 597 598 /* 599 * Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger 600 * a space-switch event. PER events enforce space-switch events 601 * for these instructions. So if no PER event for the guest is left, 602 * we might have to filter the space-switch element out, too. 603 */ 604 if (vcpu->arch.sie_block->iprcc == PGM_SPACE_SWITCH) { 605 vcpu->arch.sie_block->iprcc = 0; 606 new_as = psw_bits(vcpu->arch.sie_block->gpsw).as; 607 608 /* 609 * If the AS changed from / to home, we had RP, SAC or SACF 610 * instruction. Check primary and home space-switch-event 611 * controls. (theoretically home -> home produced no event) 612 */ 613 if (((new_as == PSW_BITS_AS_HOME) ^ old_as_is_home(vcpu)) && 614 (pssec(vcpu) || hssec(vcpu))) 615 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; 616 617 /* 618 * PT, PTI, PR, PC instruction operate on primary AS only. Check 619 * if the primary-space-switch-event control was or got set. 620 */ 621 if (new_as == PSW_BITS_AS_PRIMARY && !old_as_is_home(vcpu) && 622 (pssec(vcpu) || old_ssec(vcpu))) 623 vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH; 624 } 625 return 0; 626 } 627
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.