~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/xen/p2m.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 
  3 /*
  4  * Xen leaves the responsibility for maintaining p2m mappings to the
  5  * guests themselves, but it must also access and update the p2m array
  6  * during suspend/resume when all the pages are reallocated.
  7  *
  8  * The logical flat p2m table is mapped to a linear kernel memory area.
  9  * For accesses by Xen a three-level tree linked via mfns only is set up to
 10  * allow the address space to be sparse.
 11  *
 12  *               Xen
 13  *                |
 14  *          p2m_top_mfn
 15  *              /   \
 16  * p2m_mid_mfn p2m_mid_mfn
 17  *         /           /
 18  *  p2m p2m p2m ...
 19  *
 20  * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
 21  *
 22  * The p2m_top_mfn level is limited to 1 page, so the maximum representable
 23  * pseudo-physical address space is:
 24  *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
 25  *
 26  * P2M_PER_PAGE depends on the architecture, as a mfn is always
 27  * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
 28  * 512 and 1024 entries respectively.
 29  *
 30  * In short, these structures contain the Machine Frame Number (MFN) of the PFN.
 31  *
 32  * However not all entries are filled with MFNs. Specifically for all other
 33  * leaf entries, or for the top  root, or middle one, for which there is a void
 34  * entry, we assume it is  "missing". So (for example)
 35  *  pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY.
 36  * We have a dedicated page p2m_missing with all entries being
 37  * INVALID_P2M_ENTRY. This page may be referenced multiple times in the p2m
 38  * list/tree in case there are multiple areas with P2M_PER_PAGE invalid pfns.
 39  *
 40  * We also have the possibility of setting 1-1 mappings on certain regions, so
 41  * that:
 42  *  pfn_to_mfn(0xc0000)=0xc0000
 43  *
 44  * The benefit of this is, that we can assume for non-RAM regions (think
 45  * PCI BARs, or ACPI spaces), we can create mappings easily because we
 46  * get the PFN value to match the MFN.
 47  *
 48  * For this to work efficiently we have one new page p2m_identity. All entries
 49  * in p2m_identity are set to INVALID_P2M_ENTRY type (Xen toolstack only
 50  * recognizes that and MFNs, no other fancy value).
 51  *
 52  * On lookup we spot that the entry points to p2m_identity and return the
 53  * identity value instead of dereferencing and returning INVALID_P2M_ENTRY.
 54  * If the entry points to an allocated page, we just proceed as before and
 55  * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in
 56  * appropriate functions (pfn_to_mfn).
 57  *
 58  * The reason for having the IDENTITY_FRAME_BIT instead of just returning the
 59  * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
 60  * non-identity pfn. To protect ourselves against we elect to set (and get) the
 61  * IDENTITY_FRAME_BIT on all identity mapped PFNs.
 62  */
 63 
 64 #include <linux/init.h>
 65 #include <linux/export.h>
 66 #include <linux/list.h>
 67 #include <linux/hash.h>
 68 #include <linux/sched.h>
 69 #include <linux/seq_file.h>
 70 #include <linux/memblock.h>
 71 #include <linux/slab.h>
 72 #include <linux/vmalloc.h>
 73 
 74 #include <asm/cache.h>
 75 #include <asm/setup.h>
 76 #include <linux/uaccess.h>
 77 
 78 #include <asm/xen/page.h>
 79 #include <asm/xen/hypercall.h>
 80 #include <asm/xen/hypervisor.h>
 81 #include <xen/balloon.h>
 82 #include <xen/grant_table.h>
 83 
 84 #include "xen-ops.h"
 85 
 86 #define P2M_MID_PER_PAGE        (PAGE_SIZE / sizeof(unsigned long *))
 87 #define P2M_TOP_PER_PAGE        (PAGE_SIZE / sizeof(unsigned long **))
 88 
 89 #define MAX_P2M_PFN     (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
 90 
 91 #define PMDS_PER_MID_PAGE       (P2M_MID_PER_PAGE / PTRS_PER_PTE)
 92 
 93 unsigned long *xen_p2m_addr __read_mostly;
 94 EXPORT_SYMBOL_GPL(xen_p2m_addr);
 95 unsigned long xen_p2m_size __read_mostly;
 96 EXPORT_SYMBOL_GPL(xen_p2m_size);
 97 unsigned long xen_max_p2m_pfn __read_mostly;
 98 EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
 99 
100 #ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
101 #define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
102 #else
103 #define P2M_LIMIT 0
104 #endif
105 
106 static DEFINE_SPINLOCK(p2m_update_lock);
107 
108 static unsigned long *p2m_mid_missing_mfn;
109 static unsigned long *p2m_top_mfn;
110 static unsigned long **p2m_top_mfn_p;
111 static unsigned long *p2m_missing;
112 static unsigned long *p2m_identity;
113 static pte_t *p2m_missing_pte;
114 static pte_t *p2m_identity_pte;
115 
116 /*
117  * Hint at last populated PFN.
118  *
119  * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack
120  * can avoid scanning the whole P2M (which may be sized to account for
121  * hotplugged memory).
122  */
123 static unsigned long xen_p2m_last_pfn;
124 
125 static inline unsigned p2m_top_index(unsigned long pfn)
126 {
127         BUG_ON(pfn >= MAX_P2M_PFN);
128         return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
129 }
130 
131 static inline unsigned p2m_mid_index(unsigned long pfn)
132 {
133         return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
134 }
135 
136 static void p2m_top_mfn_init(unsigned long *top)
137 {
138         unsigned i;
139 
140         for (i = 0; i < P2M_TOP_PER_PAGE; i++)
141                 top[i] = virt_to_mfn(p2m_mid_missing_mfn);
142 }
143 
144 static void p2m_top_mfn_p_init(unsigned long **top)
145 {
146         unsigned i;
147 
148         for (i = 0; i < P2M_TOP_PER_PAGE; i++)
149                 top[i] = p2m_mid_missing_mfn;
150 }
151 
152 static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf)
153 {
154         unsigned i;
155 
156         for (i = 0; i < P2M_MID_PER_PAGE; i++)
157                 mid[i] = virt_to_mfn(leaf);
158 }
159 
160 static void p2m_init(unsigned long *p2m)
161 {
162         unsigned i;
163 
164         for (i = 0; i < P2M_PER_PAGE; i++)
165                 p2m[i] = INVALID_P2M_ENTRY;
166 }
167 
168 static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
169 {
170         unsigned i;
171 
172         for (i = 0; i < P2M_PER_PAGE; i++)
173                 p2m[i] = IDENTITY_FRAME(pfn + i);
174 }
175 
176 static void * __ref alloc_p2m_page(void)
177 {
178         if (unlikely(!slab_is_available())) {
179                 void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
180 
181                 if (!ptr)
182                         panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
183                               __func__, PAGE_SIZE, PAGE_SIZE);
184 
185                 return ptr;
186         }
187 
188         return (void *)__get_free_page(GFP_KERNEL);
189 }
190 
191 static void __ref free_p2m_page(void *p)
192 {
193         if (unlikely(!slab_is_available())) {
194                 memblock_free(p, PAGE_SIZE);
195                 return;
196         }
197 
198         free_page((unsigned long)p);
199 }
200 
201 /*
202  * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
203  *
204  * This is called both at boot time, and after resuming from suspend:
205  * - At boot time we're called rather early, and must use alloc_bootmem*()
206  *   to allocate memory.
207  *
208  * - After resume we're called from within stop_machine, but the mfn
209  *   tree should already be completely allocated.
210  */
211 void __ref xen_build_mfn_list_list(void)
212 {
213         unsigned long pfn, mfn;
214         pte_t *ptep;
215         unsigned int level, topidx, mididx;
216         unsigned long *mid_mfn_p;
217 
218         if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
219                 return;
220 
221         /* Pre-initialize p2m_top_mfn to be completely missing */
222         if (p2m_top_mfn == NULL) {
223                 p2m_mid_missing_mfn = alloc_p2m_page();
224                 p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
225 
226                 p2m_top_mfn_p = alloc_p2m_page();
227                 p2m_top_mfn_p_init(p2m_top_mfn_p);
228 
229                 p2m_top_mfn = alloc_p2m_page();
230                 p2m_top_mfn_init(p2m_top_mfn);
231         } else {
232                 /* Reinitialise, mfn's all change after migration */
233                 p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
234         }
235 
236         for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN;
237              pfn += P2M_PER_PAGE) {
238                 topidx = p2m_top_index(pfn);
239                 mididx = p2m_mid_index(pfn);
240 
241                 mid_mfn_p = p2m_top_mfn_p[topidx];
242                 ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn),
243                                       &level);
244                 BUG_ON(!ptep || level != PG_LEVEL_4K);
245                 mfn = pte_mfn(*ptep);
246                 ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
247 
248                 /* Don't bother allocating any mfn mid levels if
249                  * they're just missing, just update the stored mfn,
250                  * since all could have changed over a migrate.
251                  */
252                 if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) {
253                         BUG_ON(mididx);
254                         BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
255                         p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
256                         pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
257                         continue;
258                 }
259 
260                 if (mid_mfn_p == p2m_mid_missing_mfn) {
261                         mid_mfn_p = alloc_p2m_page();
262                         p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
263 
264                         p2m_top_mfn_p[topidx] = mid_mfn_p;
265                 }
266 
267                 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
268                 mid_mfn_p[mididx] = mfn;
269         }
270 }
271 
272 void xen_setup_mfn_list_list(void)
273 {
274         BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
275 
276         if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
277                 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = ~0UL;
278         else
279                 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
280                         virt_to_mfn(p2m_top_mfn);
281         HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
282         HYPERVISOR_shared_info->arch.p2m_generation = 0;
283         HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
284         HYPERVISOR_shared_info->arch.p2m_cr3 =
285                 xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
286 }
287 
288 /* Set up p2m_top to point to the domain-builder provided p2m pages */
289 void __init xen_build_dynamic_phys_to_machine(void)
290 {
291         unsigned long pfn;
292 
293         xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
294         xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE);
295 
296         for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++)
297                 xen_p2m_addr[pfn] = INVALID_P2M_ENTRY;
298 
299         xen_max_p2m_pfn = xen_p2m_size;
300 }
301 
302 #define P2M_TYPE_IDENTITY       0
303 #define P2M_TYPE_MISSING        1
304 #define P2M_TYPE_PFN            2
305 #define P2M_TYPE_UNKNOWN        3
306 
307 static int xen_p2m_elem_type(unsigned long pfn)
308 {
309         unsigned long mfn;
310 
311         if (pfn >= xen_p2m_size)
312                 return P2M_TYPE_IDENTITY;
313 
314         mfn = xen_p2m_addr[pfn];
315 
316         if (mfn == INVALID_P2M_ENTRY)
317                 return P2M_TYPE_MISSING;
318 
319         if (mfn & IDENTITY_FRAME_BIT)
320                 return P2M_TYPE_IDENTITY;
321 
322         return P2M_TYPE_PFN;
323 }
324 
325 static void __init xen_rebuild_p2m_list(unsigned long *p2m)
326 {
327         unsigned int i, chunk;
328         unsigned long pfn;
329         unsigned long *mfns;
330         pte_t *ptep;
331         pmd_t *pmdp;
332         int type;
333 
334         p2m_missing = alloc_p2m_page();
335         p2m_init(p2m_missing);
336         p2m_identity = alloc_p2m_page();
337         p2m_init(p2m_identity);
338 
339         p2m_missing_pte = alloc_p2m_page();
340         paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT);
341         p2m_identity_pte = alloc_p2m_page();
342         paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT);
343         for (i = 0; i < PTRS_PER_PTE; i++) {
344                 set_pte(p2m_missing_pte + i,
345                         pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO));
346                 set_pte(p2m_identity_pte + i,
347                         pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO));
348         }
349 
350         for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
351                 /*
352                  * Try to map missing/identity PMDs or p2m-pages if possible.
353                  * We have to respect the structure of the mfn_list_list
354                  * which will be built just afterwards.
355                  * Chunk size to test is one p2m page if we are in the middle
356                  * of a mfn_list_list mid page and the complete mid page area
357                  * if we are at index 0 of the mid page. Please note that a
358                  * mid page might cover more than one PMD, e.g. on 32 bit PAE
359                  * kernels.
360                  */
361                 chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ?
362                         P2M_PER_PAGE : P2M_PER_PAGE * P2M_MID_PER_PAGE;
363 
364                 type = xen_p2m_elem_type(pfn);
365                 i = 0;
366                 if (type != P2M_TYPE_PFN)
367                         for (i = 1; i < chunk; i++)
368                                 if (xen_p2m_elem_type(pfn + i) != type)
369                                         break;
370                 if (i < chunk)
371                         /* Reset to minimal chunk size. */
372                         chunk = P2M_PER_PAGE;
373 
374                 if (type == P2M_TYPE_PFN || i < chunk) {
375                         /* Use initial p2m page contents. */
376                         mfns = alloc_p2m_page();
377                         copy_page(mfns, xen_p2m_addr + pfn);
378                         ptep = populate_extra_pte((unsigned long)(p2m + pfn));
379                         set_pte(ptep,
380                                 pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL));
381                         continue;
382                 }
383 
384                 if (chunk == P2M_PER_PAGE) {
385                         /* Map complete missing or identity p2m-page. */
386                         mfns = (type == P2M_TYPE_MISSING) ?
387                                 p2m_missing : p2m_identity;
388                         ptep = populate_extra_pte((unsigned long)(p2m + pfn));
389                         set_pte(ptep,
390                                 pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO));
391                         continue;
392                 }
393 
394                 /* Complete missing or identity PMD(s) can be mapped. */
395                 ptep = (type == P2M_TYPE_MISSING) ?
396                         p2m_missing_pte : p2m_identity_pte;
397                 for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
398                         pmdp = populate_extra_pmd(
399                                 (unsigned long)(p2m + pfn) + i * PMD_SIZE);
400                         set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
401                 }
402         }
403 }
404 
405 void __init xen_vmalloc_p2m_tree(void)
406 {
407         static struct vm_struct vm;
408         unsigned long p2m_limit;
409 
410         xen_p2m_last_pfn = xen_max_p2m_pfn;
411 
412         p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
413         vm.flags = VM_ALLOC;
414         vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
415                         PMD_SIZE * PMDS_PER_MID_PAGE);
416         vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
417         pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
418 
419         xen_max_p2m_pfn = vm.size / sizeof(unsigned long);
420 
421         xen_rebuild_p2m_list(vm.addr);
422 
423         xen_p2m_addr = vm.addr;
424         xen_p2m_size = xen_max_p2m_pfn;
425 
426         xen_inv_extra_mem();
427 }
428 
429 unsigned long get_phys_to_machine(unsigned long pfn)
430 {
431         pte_t *ptep;
432         unsigned int level;
433 
434         if (unlikely(pfn >= xen_p2m_size)) {
435                 if (pfn < xen_max_p2m_pfn)
436                         return xen_chk_extra_mem(pfn);
437 
438                 return IDENTITY_FRAME(pfn);
439         }
440 
441         ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
442         BUG_ON(!ptep || level != PG_LEVEL_4K);
443 
444         /*
445          * The INVALID_P2M_ENTRY is filled in both p2m_*identity
446          * and in p2m_*missing, so returning the INVALID_P2M_ENTRY
447          * would be wrong.
448          */
449         if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
450                 return IDENTITY_FRAME(pfn);
451 
452         return xen_p2m_addr[pfn];
453 }
454 EXPORT_SYMBOL_GPL(get_phys_to_machine);
455 
456 /*
457  * Allocate new pmd(s). It is checked whether the old pmd is still in place.
458  * If not, nothing is changed. This is okay as the only reason for allocating
459  * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
460  * pmd.
461  */
462 static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
463 {
464         pte_t *ptechk;
465         pte_t *pte_newpg[PMDS_PER_MID_PAGE];
466         pmd_t *pmdp;
467         unsigned int level;
468         unsigned long flags;
469         unsigned long vaddr;
470         int i;
471 
472         /* Do all allocations first to bail out in error case. */
473         for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
474                 pte_newpg[i] = alloc_p2m_page();
475                 if (!pte_newpg[i]) {
476                         for (i--; i >= 0; i--)
477                                 free_p2m_page(pte_newpg[i]);
478 
479                         return NULL;
480                 }
481         }
482 
483         vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1);
484 
485         for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
486                 copy_page(pte_newpg[i], pte_pg);
487                 paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT);
488 
489                 pmdp = lookup_pmd_address(vaddr);
490                 BUG_ON(!pmdp);
491 
492                 spin_lock_irqsave(&p2m_update_lock, flags);
493 
494                 ptechk = lookup_address(vaddr, &level);
495                 if (ptechk == pte_pg) {
496                         HYPERVISOR_shared_info->arch.p2m_generation++;
497                         wmb(); /* Tools are synchronizing via p2m_generation. */
498                         set_pmd(pmdp,
499                                 __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
500                         wmb(); /* Tools are synchronizing via p2m_generation. */
501                         HYPERVISOR_shared_info->arch.p2m_generation++;
502                         pte_newpg[i] = NULL;
503                 }
504 
505                 spin_unlock_irqrestore(&p2m_update_lock, flags);
506 
507                 if (pte_newpg[i]) {
508                         paravirt_release_pte(__pa(pte_newpg[i]) >> PAGE_SHIFT);
509                         free_p2m_page(pte_newpg[i]);
510                 }
511 
512                 vaddr += PMD_SIZE;
513         }
514 
515         return lookup_address(addr, &level);
516 }
517 
518 /*
519  * Fully allocate the p2m structure for a given pfn.  We need to check
520  * that both the top and mid levels are allocated, and make sure the
521  * parallel mfn tree is kept in sync.  We may race with other cpus, so
522  * the new pages are installed with cmpxchg; if we lose the race then
523  * simply free the page we allocated and use the one that's there.
524  */
525 int xen_alloc_p2m_entry(unsigned long pfn)
526 {
527         unsigned topidx;
528         unsigned long *top_mfn_p, *mid_mfn;
529         pte_t *ptep, *pte_pg;
530         unsigned int level;
531         unsigned long flags;
532         unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
533         unsigned long p2m_pfn;
534 
535         ptep = lookup_address(addr, &level);
536         BUG_ON(!ptep || level != PG_LEVEL_4K);
537         pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
538 
539         if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
540                 /* PMD level is missing, allocate a new one */
541                 ptep = alloc_p2m_pmd(addr, pte_pg);
542                 if (!ptep)
543                         return -ENOMEM;
544         }
545 
546         if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
547                 topidx = p2m_top_index(pfn);
548                 top_mfn_p = &p2m_top_mfn[topidx];
549                 mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]);
550 
551                 BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
552 
553                 if (mid_mfn == p2m_mid_missing_mfn) {
554                         /* Separately check the mid mfn level */
555                         unsigned long missing_mfn;
556                         unsigned long mid_mfn_mfn;
557 
558                         mid_mfn = alloc_p2m_page();
559                         if (!mid_mfn)
560                                 return -ENOMEM;
561 
562                         p2m_mid_mfn_init(mid_mfn, p2m_missing);
563 
564                         missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
565                         mid_mfn_mfn = virt_to_mfn(mid_mfn);
566                         /* try_cmpxchg() updates missing_mfn on failure. */
567                         if (try_cmpxchg(top_mfn_p, &missing_mfn, mid_mfn_mfn)) {
568                                 p2m_top_mfn_p[topidx] = mid_mfn;
569                         } else {
570                                 free_p2m_page(mid_mfn);
571                                 mid_mfn = mfn_to_virt(missing_mfn);
572                         }
573                 }
574         } else {
575                 mid_mfn = NULL;
576         }
577 
578         p2m_pfn = pte_pfn(READ_ONCE(*ptep));
579         if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
580             p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
581                 /* p2m leaf page is missing */
582                 unsigned long *p2m;
583 
584                 p2m = alloc_p2m_page();
585                 if (!p2m)
586                         return -ENOMEM;
587 
588                 if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))
589                         p2m_init(p2m);
590                 else
591                         p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));
592 
593                 spin_lock_irqsave(&p2m_update_lock, flags);
594 
595                 if (pte_pfn(*ptep) == p2m_pfn) {
596                         HYPERVISOR_shared_info->arch.p2m_generation++;
597                         wmb(); /* Tools are synchronizing via p2m_generation. */
598                         set_pte(ptep,
599                                 pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL));
600                         wmb(); /* Tools are synchronizing via p2m_generation. */
601                         HYPERVISOR_shared_info->arch.p2m_generation++;
602                         if (mid_mfn)
603                                 mid_mfn[p2m_mid_index(pfn)] = virt_to_mfn(p2m);
604                         p2m = NULL;
605                 }
606 
607                 spin_unlock_irqrestore(&p2m_update_lock, flags);
608 
609                 if (p2m)
610                         free_p2m_page(p2m);
611         }
612 
613         /* Expanded the p2m? */
614         if (pfn >= xen_p2m_last_pfn) {
615                 xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE);
616                 HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
617         }
618 
619         return 0;
620 }
621 EXPORT_SYMBOL(xen_alloc_p2m_entry);
622 
623 unsigned long __init set_phys_range_identity(unsigned long pfn_s,
624                                       unsigned long pfn_e)
625 {
626         unsigned long pfn;
627 
628         if (unlikely(pfn_s >= xen_p2m_size))
629                 return 0;
630 
631         if (pfn_s > pfn_e)
632                 return 0;
633 
634         if (pfn_e > xen_p2m_size)
635                 pfn_e = xen_p2m_size;
636 
637         for (pfn = pfn_s; pfn < pfn_e; pfn++)
638                 xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn);
639 
640         return pfn - pfn_s;
641 }
642 
643 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
644 {
645         pte_t *ptep;
646         unsigned int level;
647 
648         /* Only invalid entries allowed above the highest p2m covered frame. */
649         if (unlikely(pfn >= xen_p2m_size))
650                 return mfn == INVALID_P2M_ENTRY;
651 
652         /*
653          * The interface requires atomic updates on p2m elements.
654          * xen_safe_write_ulong() is using an atomic store via asm().
655          */
656         if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
657                 return true;
658 
659         ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
660         BUG_ON(!ptep || level != PG_LEVEL_4K);
661 
662         if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_missing)))
663                 return mfn == INVALID_P2M_ENTRY;
664 
665         if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
666                 return mfn == IDENTITY_FRAME(pfn);
667 
668         return false;
669 }
670 
671 bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
672 {
673         if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
674                 int ret;
675 
676                 ret = xen_alloc_p2m_entry(pfn);
677                 if (ret < 0)
678                         return false;
679 
680                 return __set_phys_to_machine(pfn, mfn);
681         }
682 
683         return true;
684 }
685 
686 int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
687                             struct gnttab_map_grant_ref *kmap_ops,
688                             struct page **pages, unsigned int count)
689 {
690         int i, ret = 0;
691         pte_t *pte;
692 
693         if (xen_feature(XENFEAT_auto_translated_physmap))
694                 return 0;
695 
696         if (kmap_ops) {
697                 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
698                                                 kmap_ops, count);
699                 if (ret)
700                         goto out;
701         }
702 
703         for (i = 0; i < count; i++) {
704                 unsigned long mfn, pfn;
705                 struct gnttab_unmap_grant_ref unmap[2];
706                 int rc;
707 
708                 /* Do not add to override if the map failed. */
709                 if (map_ops[i].status != GNTST_okay ||
710                     (kmap_ops && kmap_ops[i].status != GNTST_okay))
711                         continue;
712 
713                 if (map_ops[i].flags & GNTMAP_contains_pte) {
714                         pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
715                                 (map_ops[i].host_addr & ~PAGE_MASK));
716                         mfn = pte_mfn(*pte);
717                 } else {
718                         mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
719                 }
720                 pfn = page_to_pfn(pages[i]);
721 
722                 WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
723 
724                 if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
725                         continue;
726 
727                 /*
728                  * Signal an error for this slot. This in turn requires
729                  * immediate unmapping.
730                  */
731                 map_ops[i].status = GNTST_general_error;
732                 unmap[0].host_addr = map_ops[i].host_addr;
733                 unmap[0].handle = map_ops[i].handle;
734                 map_ops[i].handle = INVALID_GRANT_HANDLE;
735                 if (map_ops[i].flags & GNTMAP_device_map)
736                         unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
737                 else
738                         unmap[0].dev_bus_addr = 0;
739 
740                 if (kmap_ops) {
741                         kmap_ops[i].status = GNTST_general_error;
742                         unmap[1].host_addr = kmap_ops[i].host_addr;
743                         unmap[1].handle = kmap_ops[i].handle;
744                         kmap_ops[i].handle = INVALID_GRANT_HANDLE;
745                         if (kmap_ops[i].flags & GNTMAP_device_map)
746                                 unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
747                         else
748                                 unmap[1].dev_bus_addr = 0;
749                 }
750 
751                 /*
752                  * Pre-populate both status fields, to be recognizable in
753                  * the log message below.
754                  */
755                 unmap[0].status = 1;
756                 unmap[1].status = 1;
757 
758                 rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
759                                                unmap, 1 + !!kmap_ops);
760                 if (rc || unmap[0].status != GNTST_okay ||
761                     unmap[1].status != GNTST_okay)
762                         pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n",
763                                     rc, unmap[0].status, unmap[1].status);
764         }
765 
766 out:
767         return ret;
768 }
769 
770 int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
771                               struct gnttab_unmap_grant_ref *kunmap_ops,
772                               struct page **pages, unsigned int count)
773 {
774         int i, ret = 0;
775 
776         if (xen_feature(XENFEAT_auto_translated_physmap))
777                 return 0;
778 
779         for (i = 0; i < count; i++) {
780                 unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
781                 unsigned long pfn = page_to_pfn(pages[i]);
782 
783                 if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT))
784                         set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
785                 else
786                         ret = -EINVAL;
787         }
788         if (kunmap_ops)
789                 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
790                                                 kunmap_ops, count) ?: ret;
791 
792         return ret;
793 }
794 
795 #ifdef CONFIG_XEN_DEBUG_FS
796 #include <linux/debugfs.h>
797 static int p2m_dump_show(struct seq_file *m, void *v)
798 {
799         static const char * const type_name[] = {
800                                 [P2M_TYPE_IDENTITY] = "identity",
801                                 [P2M_TYPE_MISSING] = "missing",
802                                 [P2M_TYPE_PFN] = "pfn",
803                                 [P2M_TYPE_UNKNOWN] = "abnormal"};
804         unsigned long pfn, first_pfn;
805         int type, prev_type;
806 
807         prev_type = xen_p2m_elem_type(0);
808         first_pfn = 0;
809 
810         for (pfn = 0; pfn < xen_p2m_size; pfn++) {
811                 type = xen_p2m_elem_type(pfn);
812                 if (type != prev_type) {
813                         seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
814                                    type_name[prev_type]);
815                         prev_type = type;
816                         first_pfn = pfn;
817                 }
818         }
819         seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
820                    type_name[prev_type]);
821         return 0;
822 }
823 
824 DEFINE_SHOW_ATTRIBUTE(p2m_dump);
825 
826 static struct dentry *d_mmu_debug;
827 
828 static int __init xen_p2m_debugfs(void)
829 {
830         struct dentry *d_xen = xen_init_debugfs();
831 
832         d_mmu_debug = debugfs_create_dir("mmu", d_xen);
833 
834         debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
835         return 0;
836 }
837 fs_initcall(xen_p2m_debugfs);
838 #endif /* CONFIG_XEN_DEBUG_FS */
839 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php