~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/tools/testing/selftests/kvm/lib/s390x/processor.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * KVM selftest s390x library code - CPU-related functions (page tables...)
  4  *
  5  * Copyright (C) 2019, Red Hat, Inc.
  6  */
  7 
  8 #include "processor.h"
  9 #include "kvm_util.h"
 10 
 11 #define PAGES_PER_REGION 4
 12 
 13 void virt_arch_pgd_alloc(struct kvm_vm *vm)
 14 {
 15         vm_paddr_t paddr;
 16 
 17         TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
 18                     vm->page_size);
 19 
 20         if (vm->pgd_created)
 21                 return;
 22 
 23         paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
 24                                    KVM_GUEST_PAGE_TABLE_MIN_PADDR,
 25                                    vm->memslots[MEM_REGION_PT]);
 26         memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
 27 
 28         vm->pgd = paddr;
 29         vm->pgd_created = true;
 30 }
 31 
 32 /*
 33  * Allocate 4 pages for a region/segment table (ri < 4), or one page for
 34  * a page table (ri == 4). Returns a suitable region/segment table entry
 35  * which points to the freshly allocated pages.
 36  */
 37 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
 38 {
 39         uint64_t taddr;
 40 
 41         taddr = vm_phy_pages_alloc(vm,  ri < 4 ? PAGES_PER_REGION : 1,
 42                                    KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
 43         memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size);
 44 
 45         return (taddr & REGION_ENTRY_ORIGIN)
 46                 | (((4 - ri) << 2) & REGION_ENTRY_TYPE)
 47                 | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
 48 }
 49 
 50 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
 51 {
 52         int ri, idx;
 53         uint64_t *entry;
 54 
 55         TEST_ASSERT((gva % vm->page_size) == 0,
 56                 "Virtual address not on page boundary,\n"
 57                 "  vaddr: 0x%lx vm->page_size: 0x%x",
 58                 gva, vm->page_size);
 59         TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
 60                 (gva >> vm->page_shift)),
 61                 "Invalid virtual address, vaddr: 0x%lx",
 62                 gva);
 63         TEST_ASSERT((gpa % vm->page_size) == 0,
 64                 "Physical address not on page boundary,\n"
 65                 "  paddr: 0x%lx vm->page_size: 0x%x",
 66                 gva, vm->page_size);
 67         TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
 68                 "Physical address beyond beyond maximum supported,\n"
 69                 "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
 70                 gva, vm->max_gfn, vm->page_size);
 71 
 72         /* Walk through region and segment tables */
 73         entry = addr_gpa2hva(vm, vm->pgd);
 74         for (ri = 1; ri <= 4; ri++) {
 75                 idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
 76                 if (entry[idx] & REGION_ENTRY_INVALID)
 77                         entry[idx] = virt_alloc_region(vm, ri);
 78                 entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
 79         }
 80 
 81         /* Fill in page table entry */
 82         idx = (gva >> 12) & 0x0ffu;             /* page index */
 83         if (!(entry[idx] & PAGE_INVALID))
 84                 fprintf(stderr,
 85                         "WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
 86         entry[idx] = gpa;
 87 }
 88 
 89 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
 90 {
 91         int ri, idx;
 92         uint64_t *entry;
 93 
 94         TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
 95                     vm->page_size);
 96 
 97         entry = addr_gpa2hva(vm, vm->pgd);
 98         for (ri = 1; ri <= 4; ri++) {
 99                 idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
100                 TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID),
101                             "No region mapping for vm virtual address 0x%lx",
102                             gva);
103                 entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
104         }
105 
106         idx = (gva >> 12) & 0x0ffu;             /* page index */
107 
108         TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
109                     "No page mapping for vm virtual address 0x%lx", gva);
110 
111         return (entry[idx] & ~0xffful) + (gva & 0xffful);
112 }
113 
114 static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
115                            uint64_t ptea_start)
116 {
117         uint64_t *pte, ptea;
118 
119         for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
120                 pte = addr_gpa2hva(vm, ptea);
121                 if (*pte & PAGE_INVALID)
122                         continue;
123                 fprintf(stream, "%*spte @ 0x%lx: 0x%016lx\n",
124                         indent, "", ptea, *pte);
125         }
126 }
127 
128 static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
129                              uint64_t reg_tab_addr)
130 {
131         uint64_t addr, *entry;
132 
133         for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
134                 entry = addr_gpa2hva(vm, addr);
135                 if (*entry & REGION_ENTRY_INVALID)
136                         continue;
137                 fprintf(stream, "%*srt%lde @ 0x%lx: 0x%016lx\n",
138                         indent, "", 4 - ((*entry & REGION_ENTRY_TYPE) >> 2),
139                         addr, *entry);
140                 if (*entry & REGION_ENTRY_TYPE) {
141                         virt_dump_region(stream, vm, indent + 2,
142                                          *entry & REGION_ENTRY_ORIGIN);
143                 } else {
144                         virt_dump_ptes(stream, vm, indent + 2,
145                                        *entry & REGION_ENTRY_ORIGIN);
146                 }
147         }
148 }
149 
150 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
151 {
152         if (!vm->pgd_created)
153                 return;
154 
155         virt_dump_region(stream, vm, indent, vm->pgd);
156 }
157 
158 void vcpu_arch_set_entry_point(struct kvm_vcpu *vcpu, void *guest_code)
159 {
160         vcpu->run->psw_addr = (uintptr_t)guest_code;
161 }
162 
163 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
164 {
165         size_t stack_size =  DEFAULT_STACK_PGS * getpagesize();
166         uint64_t stack_vaddr;
167         struct kvm_regs regs;
168         struct kvm_sregs sregs;
169         struct kvm_vcpu *vcpu;
170 
171         TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
172                     vm->page_size);
173 
174         stack_vaddr = __vm_vaddr_alloc(vm, stack_size,
175                                        DEFAULT_GUEST_STACK_VADDR_MIN,
176                                        MEM_REGION_DATA);
177 
178         vcpu = __vm_vcpu_add(vm, vcpu_id);
179 
180         /* Setup guest registers */
181         vcpu_regs_get(vcpu, &regs);
182         regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
183         vcpu_regs_set(vcpu, &regs);
184 
185         vcpu_sregs_get(vcpu, &sregs);
186         sregs.crs[0] |= 0x00040000;             /* Enable floating point regs */
187         sregs.crs[1] = vm->pgd | 0xf;           /* Primary region table */
188         vcpu_sregs_set(vcpu, &sregs);
189 
190         vcpu->run->psw_mask = 0x0400000180000000ULL;  /* DAT enabled + 64 bit mode */
191 
192         return vcpu;
193 }
194 
195 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
196 {
197         va_list ap;
198         struct kvm_regs regs;
199         int i;
200 
201         TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n"
202                     "  num: %u",
203                     num);
204 
205         va_start(ap, num);
206         vcpu_regs_get(vcpu, &regs);
207 
208         for (i = 0; i < num; i++)
209                 regs.gprs[i + 2] = va_arg(ap, uint64_t);
210 
211         vcpu_regs_set(vcpu, &regs);
212         va_end(ap);
213 }
214 
215 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
216 {
217         fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
218                 indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
219 }
220 
221 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
222 {
223 }
224 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php