~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/xen/p2m.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 
  3 /*
  4  * Xen leaves the responsibility for maintaining p2m mappings to the
  5  * guests themselves, but it must also access and update the p2m array
  6  * during suspend/resume when all the pages are reallocated.
  7  *
  8  * The logical flat p2m table is mapped to a linear kernel memory area.
  9  * For accesses by Xen a three-level tree linked via mfns only is set up to
 10  * allow the address space to be sparse.
 11  *
 12  *               Xen
 13  *                |
 14  *          p2m_top_mfn
 15  *              /   \
 16  * p2m_mid_mfn p2m_mid_mfn
 17  *         /           /
 18  *  p2m p2m p2m ...
 19  *
 20  * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p.
 21  *
 22  * The p2m_top_mfn level is limited to 1 page, so the maximum representable
 23  * pseudo-physical address space is:
 24  *  P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages
 25  *
 26  * P2M_PER_PAGE depends on the architecture, as a mfn is always
 27  * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to
 28  * 512 and 1024 entries respectively.
 29  *
 30  * In short, these structures contain the Machine Frame Number (MFN) of the PFN.
 31  *
 32  * However not all entries are filled with MFNs. Specifically for all other
 33  * leaf entries, or for the top  root, or middle one, for which there is a void
 34  * entry, we assume it is  "missing". So (for example)
 35  *  pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY.
 36  * We have a dedicated page p2m_missing with all entries being
 37  * INVALID_P2M_ENTRY. This page may be referenced multiple times in the p2m
 38  * list/tree in case there are multiple areas with P2M_PER_PAGE invalid pfns.
 39  *
 40  * We also have the possibility of setting 1-1 mappings on certain regions, so
 41  * that:
 42  *  pfn_to_mfn(0xc0000)=0xc0000
 43  *
 44  * The benefit of this is, that we can assume for non-RAM regions (think
 45  * PCI BARs, or ACPI spaces), we can create mappings easily because we
 46  * get the PFN value to match the MFN.
 47  *
 48  * For this to work efficiently we have one new page p2m_identity. All entries
 49  * in p2m_identity are set to INVALID_P2M_ENTRY type (Xen toolstack only
 50  * recognizes that and MFNs, no other fancy value).
 51  *
 52  * On lookup we spot that the entry points to p2m_identity and return the
 53  * identity value instead of dereferencing and returning INVALID_P2M_ENTRY.
 54  * If the entry points to an allocated page, we just proceed as before and
 55  * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in
 56  * appropriate functions (pfn_to_mfn).
 57  *
 58  * The reason for having the IDENTITY_FRAME_BIT instead of just returning the
 59  * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a
 60  * non-identity pfn. To protect ourselves against we elect to set (and get) the
 61  * IDENTITY_FRAME_BIT on all identity mapped PFNs.
 62  */
 63 
 64 #include <linux/init.h>
 65 #include <linux/export.h>
 66 #include <linux/list.h>
 67 #include <linux/hash.h>
 68 #include <linux/sched.h>
 69 #include <linux/seq_file.h>
 70 #include <linux/memblock.h>
 71 #include <linux/slab.h>
 72 #include <linux/vmalloc.h>
 73 #include <linux/acpi.h>
 74 
 75 #include <asm/cache.h>
 76 #include <asm/setup.h>
 77 #include <linux/uaccess.h>
 78 
 79 #include <asm/xen/page.h>
 80 #include <asm/xen/hypercall.h>
 81 #include <asm/xen/hypervisor.h>
 82 #include <xen/balloon.h>
 83 #include <xen/grant_table.h>
 84 #include <xen/hvc-console.h>
 85 
 86 #include "xen-ops.h"
 87 
 88 #define P2M_MID_PER_PAGE        (PAGE_SIZE / sizeof(unsigned long *))
 89 #define P2M_TOP_PER_PAGE        (PAGE_SIZE / sizeof(unsigned long **))
 90 
 91 #define MAX_P2M_PFN     (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
 92 
 93 #define PMDS_PER_MID_PAGE       (P2M_MID_PER_PAGE / PTRS_PER_PTE)
 94 
 95 unsigned long *xen_p2m_addr __read_mostly;
 96 EXPORT_SYMBOL_GPL(xen_p2m_addr);
 97 unsigned long xen_p2m_size __read_mostly;
 98 EXPORT_SYMBOL_GPL(xen_p2m_size);
 99 unsigned long xen_max_p2m_pfn __read_mostly;
100 EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
101 
102 #ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
103 #define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
104 #else
105 #define P2M_LIMIT 0
106 #endif
107 
108 static DEFINE_SPINLOCK(p2m_update_lock);
109 
110 static unsigned long *p2m_mid_missing_mfn;
111 static unsigned long *p2m_top_mfn;
112 static unsigned long **p2m_top_mfn_p;
113 static unsigned long *p2m_missing;
114 static unsigned long *p2m_identity;
115 static pte_t *p2m_missing_pte;
116 static pte_t *p2m_identity_pte;
117 
118 /*
119  * Hint at last populated PFN.
120  *
121  * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack
122  * can avoid scanning the whole P2M (which may be sized to account for
123  * hotplugged memory).
124  */
125 static unsigned long xen_p2m_last_pfn;
126 
127 static inline unsigned p2m_top_index(unsigned long pfn)
128 {
129         BUG_ON(pfn >= MAX_P2M_PFN);
130         return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE);
131 }
132 
133 static inline unsigned p2m_mid_index(unsigned long pfn)
134 {
135         return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
136 }
137 
138 static void p2m_top_mfn_init(unsigned long *top)
139 {
140         unsigned i;
141 
142         for (i = 0; i < P2M_TOP_PER_PAGE; i++)
143                 top[i] = virt_to_mfn(p2m_mid_missing_mfn);
144 }
145 
146 static void p2m_top_mfn_p_init(unsigned long **top)
147 {
148         unsigned i;
149 
150         for (i = 0; i < P2M_TOP_PER_PAGE; i++)
151                 top[i] = p2m_mid_missing_mfn;
152 }
153 
154 static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf)
155 {
156         unsigned i;
157 
158         for (i = 0; i < P2M_MID_PER_PAGE; i++)
159                 mid[i] = virt_to_mfn(leaf);
160 }
161 
162 static void p2m_init(unsigned long *p2m)
163 {
164         unsigned i;
165 
166         for (i = 0; i < P2M_PER_PAGE; i++)
167                 p2m[i] = INVALID_P2M_ENTRY;
168 }
169 
170 static void p2m_init_identity(unsigned long *p2m, unsigned long pfn)
171 {
172         unsigned i;
173 
174         for (i = 0; i < P2M_PER_PAGE; i++)
175                 p2m[i] = IDENTITY_FRAME(pfn + i);
176 }
177 
178 static void * __ref alloc_p2m_page(void)
179 {
180         if (unlikely(!slab_is_available())) {
181                 void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
182 
183                 if (!ptr)
184                         panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
185                               __func__, PAGE_SIZE, PAGE_SIZE);
186 
187                 return ptr;
188         }
189 
190         return (void *)__get_free_page(GFP_KERNEL);
191 }
192 
193 static void __ref free_p2m_page(void *p)
194 {
195         if (unlikely(!slab_is_available())) {
196                 memblock_free(p, PAGE_SIZE);
197                 return;
198         }
199 
200         free_page((unsigned long)p);
201 }
202 
203 /*
204  * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
205  *
206  * This is called both at boot time, and after resuming from suspend:
207  * - At boot time we're called rather early, and must use alloc_bootmem*()
208  *   to allocate memory.
209  *
210  * - After resume we're called from within stop_machine, but the mfn
211  *   tree should already be completely allocated.
212  */
213 void __ref xen_build_mfn_list_list(void)
214 {
215         unsigned long pfn, mfn;
216         pte_t *ptep;
217         unsigned int level, topidx, mididx;
218         unsigned long *mid_mfn_p;
219 
220         if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
221                 return;
222 
223         /* Pre-initialize p2m_top_mfn to be completely missing */
224         if (p2m_top_mfn == NULL) {
225                 p2m_mid_missing_mfn = alloc_p2m_page();
226                 p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
227 
228                 p2m_top_mfn_p = alloc_p2m_page();
229                 p2m_top_mfn_p_init(p2m_top_mfn_p);
230 
231                 p2m_top_mfn = alloc_p2m_page();
232                 p2m_top_mfn_init(p2m_top_mfn);
233         } else {
234                 /* Reinitialise, mfn's all change after migration */
235                 p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
236         }
237 
238         for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN;
239              pfn += P2M_PER_PAGE) {
240                 topidx = p2m_top_index(pfn);
241                 mididx = p2m_mid_index(pfn);
242 
243                 mid_mfn_p = p2m_top_mfn_p[topidx];
244                 ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn),
245                                       &level);
246                 BUG_ON(!ptep || level != PG_LEVEL_4K);
247                 mfn = pte_mfn(*ptep);
248                 ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
249 
250                 /* Don't bother allocating any mfn mid levels if
251                  * they're just missing, just update the stored mfn,
252                  * since all could have changed over a migrate.
253                  */
254                 if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) {
255                         BUG_ON(mididx);
256                         BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
257                         p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn);
258                         pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE;
259                         continue;
260                 }
261 
262                 if (mid_mfn_p == p2m_mid_missing_mfn) {
263                         mid_mfn_p = alloc_p2m_page();
264                         p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
265 
266                         p2m_top_mfn_p[topidx] = mid_mfn_p;
267                 }
268 
269                 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
270                 mid_mfn_p[mididx] = mfn;
271         }
272 }
273 
274 void xen_setup_mfn_list_list(void)
275 {
276         BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
277 
278         if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
279                 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = ~0UL;
280         else
281                 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
282                         virt_to_mfn(p2m_top_mfn);
283         HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
284         HYPERVISOR_shared_info->arch.p2m_generation = 0;
285         HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
286         HYPERVISOR_shared_info->arch.p2m_cr3 =
287                 xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
288 }
289 
290 /* Set up p2m_top to point to the domain-builder provided p2m pages */
291 void __init xen_build_dynamic_phys_to_machine(void)
292 {
293         unsigned long pfn;
294 
295         xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
296         xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE);
297 
298         for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++)
299                 xen_p2m_addr[pfn] = INVALID_P2M_ENTRY;
300 
301         xen_max_p2m_pfn = xen_p2m_size;
302 }
303 
304 #define P2M_TYPE_IDENTITY       0
305 #define P2M_TYPE_MISSING        1
306 #define P2M_TYPE_PFN            2
307 #define P2M_TYPE_UNKNOWN        3
308 
309 static int xen_p2m_elem_type(unsigned long pfn)
310 {
311         unsigned long mfn;
312 
313         if (pfn >= xen_p2m_size)
314                 return P2M_TYPE_IDENTITY;
315 
316         mfn = xen_p2m_addr[pfn];
317 
318         if (mfn == INVALID_P2M_ENTRY)
319                 return P2M_TYPE_MISSING;
320 
321         if (mfn & IDENTITY_FRAME_BIT)
322                 return P2M_TYPE_IDENTITY;
323 
324         return P2M_TYPE_PFN;
325 }
326 
327 static void __init xen_rebuild_p2m_list(unsigned long *p2m)
328 {
329         unsigned int i, chunk;
330         unsigned long pfn;
331         unsigned long *mfns;
332         pte_t *ptep;
333         pmd_t *pmdp;
334         int type;
335 
336         p2m_missing = alloc_p2m_page();
337         p2m_init(p2m_missing);
338         p2m_identity = alloc_p2m_page();
339         p2m_init(p2m_identity);
340 
341         p2m_missing_pte = alloc_p2m_page();
342         paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT);
343         p2m_identity_pte = alloc_p2m_page();
344         paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT);
345         for (i = 0; i < PTRS_PER_PTE; i++) {
346                 set_pte(p2m_missing_pte + i,
347                         pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO));
348                 set_pte(p2m_identity_pte + i,
349                         pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO));
350         }
351 
352         for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) {
353                 /*
354                  * Try to map missing/identity PMDs or p2m-pages if possible.
355                  * We have to respect the structure of the mfn_list_list
356                  * which will be built just afterwards.
357                  * Chunk size to test is one p2m page if we are in the middle
358                  * of a mfn_list_list mid page and the complete mid page area
359                  * if we are at index 0 of the mid page. Please note that a
360                  * mid page might cover more than one PMD, e.g. on 32 bit PAE
361                  * kernels.
362                  */
363                 chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ?
364                         P2M_PER_PAGE : P2M_PER_PAGE * P2M_MID_PER_PAGE;
365 
366                 type = xen_p2m_elem_type(pfn);
367                 i = 0;
368                 if (type != P2M_TYPE_PFN)
369                         for (i = 1; i < chunk; i++)
370                                 if (xen_p2m_elem_type(pfn + i) != type)
371                                         break;
372                 if (i < chunk)
373                         /* Reset to minimal chunk size. */
374                         chunk = P2M_PER_PAGE;
375 
376                 if (type == P2M_TYPE_PFN || i < chunk) {
377                         /* Use initial p2m page contents. */
378                         mfns = alloc_p2m_page();
379                         copy_page(mfns, xen_p2m_addr + pfn);
380                         ptep = populate_extra_pte((unsigned long)(p2m + pfn));
381                         set_pte(ptep,
382                                 pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL));
383                         continue;
384                 }
385 
386                 if (chunk == P2M_PER_PAGE) {
387                         /* Map complete missing or identity p2m-page. */
388                         mfns = (type == P2M_TYPE_MISSING) ?
389                                 p2m_missing : p2m_identity;
390                         ptep = populate_extra_pte((unsigned long)(p2m + pfn));
391                         set_pte(ptep,
392                                 pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO));
393                         continue;
394                 }
395 
396                 /* Complete missing or identity PMD(s) can be mapped. */
397                 ptep = (type == P2M_TYPE_MISSING) ?
398                         p2m_missing_pte : p2m_identity_pte;
399                 for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
400                         pmdp = populate_extra_pmd(
401                                 (unsigned long)(p2m + pfn) + i * PMD_SIZE);
402                         set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
403                 }
404         }
405 }
406 
407 void __init xen_vmalloc_p2m_tree(void)
408 {
409         static struct vm_struct vm;
410         unsigned long p2m_limit;
411 
412         xen_p2m_last_pfn = xen_max_p2m_pfn;
413 
414         p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
415         vm.flags = VM_ALLOC;
416         vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
417                         PMD_SIZE * PMDS_PER_MID_PAGE);
418         vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE);
419         pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size);
420 
421         xen_max_p2m_pfn = vm.size / sizeof(unsigned long);
422 
423         xen_rebuild_p2m_list(vm.addr);
424 
425         xen_p2m_addr = vm.addr;
426         xen_p2m_size = xen_max_p2m_pfn;
427 
428         xen_inv_extra_mem();
429 }
430 
431 unsigned long get_phys_to_machine(unsigned long pfn)
432 {
433         pte_t *ptep;
434         unsigned int level;
435 
436         if (unlikely(pfn >= xen_p2m_size)) {
437                 if (pfn < xen_max_p2m_pfn)
438                         return xen_chk_extra_mem(pfn);
439 
440                 return IDENTITY_FRAME(pfn);
441         }
442 
443         ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
444         BUG_ON(!ptep || level != PG_LEVEL_4K);
445 
446         /*
447          * The INVALID_P2M_ENTRY is filled in both p2m_*identity
448          * and in p2m_*missing, so returning the INVALID_P2M_ENTRY
449          * would be wrong.
450          */
451         if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
452                 return IDENTITY_FRAME(pfn);
453 
454         return xen_p2m_addr[pfn];
455 }
456 EXPORT_SYMBOL_GPL(get_phys_to_machine);
457 
458 /*
459  * Allocate new pmd(s). It is checked whether the old pmd is still in place.
460  * If not, nothing is changed. This is okay as the only reason for allocating
461  * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
462  * pmd.
463  */
464 static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
465 {
466         pte_t *ptechk;
467         pte_t *pte_newpg[PMDS_PER_MID_PAGE];
468         pmd_t *pmdp;
469         unsigned int level;
470         unsigned long flags;
471         unsigned long vaddr;
472         int i;
473 
474         /* Do all allocations first to bail out in error case. */
475         for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
476                 pte_newpg[i] = alloc_p2m_page();
477                 if (!pte_newpg[i]) {
478                         for (i--; i >= 0; i--)
479                                 free_p2m_page(pte_newpg[i]);
480 
481                         return NULL;
482                 }
483         }
484 
485         vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1);
486 
487         for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
488                 copy_page(pte_newpg[i], pte_pg);
489                 paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT);
490 
491                 pmdp = lookup_pmd_address(vaddr);
492                 BUG_ON(!pmdp);
493 
494                 spin_lock_irqsave(&p2m_update_lock, flags);
495 
496                 ptechk = lookup_address(vaddr, &level);
497                 if (ptechk == pte_pg) {
498                         HYPERVISOR_shared_info->arch.p2m_generation++;
499                         wmb(); /* Tools are synchronizing via p2m_generation. */
500                         set_pmd(pmdp,
501                                 __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
502                         wmb(); /* Tools are synchronizing via p2m_generation. */
503                         HYPERVISOR_shared_info->arch.p2m_generation++;
504                         pte_newpg[i] = NULL;
505                 }
506 
507                 spin_unlock_irqrestore(&p2m_update_lock, flags);
508 
509                 if (pte_newpg[i]) {
510                         paravirt_release_pte(__pa(pte_newpg[i]) >> PAGE_SHIFT);
511                         free_p2m_page(pte_newpg[i]);
512                 }
513 
514                 vaddr += PMD_SIZE;
515         }
516 
517         return lookup_address(addr, &level);
518 }
519 
520 /*
521  * Fully allocate the p2m structure for a given pfn.  We need to check
522  * that both the top and mid levels are allocated, and make sure the
523  * parallel mfn tree is kept in sync.  We may race with other cpus, so
524  * the new pages are installed with cmpxchg; if we lose the race then
525  * simply free the page we allocated and use the one that's there.
526  */
527 int xen_alloc_p2m_entry(unsigned long pfn)
528 {
529         unsigned topidx;
530         unsigned long *top_mfn_p, *mid_mfn;
531         pte_t *ptep, *pte_pg;
532         unsigned int level;
533         unsigned long flags;
534         unsigned long addr = (unsigned long)(xen_p2m_addr + pfn);
535         unsigned long p2m_pfn;
536 
537         ptep = lookup_address(addr, &level);
538         BUG_ON(!ptep || level != PG_LEVEL_4K);
539         pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1));
540 
541         if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
542                 /* PMD level is missing, allocate a new one */
543                 ptep = alloc_p2m_pmd(addr, pte_pg);
544                 if (!ptep)
545                         return -ENOMEM;
546         }
547 
548         if (p2m_top_mfn && pfn < MAX_P2M_PFN) {
549                 topidx = p2m_top_index(pfn);
550                 top_mfn_p = &p2m_top_mfn[topidx];
551                 mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]);
552 
553                 BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
554 
555                 if (mid_mfn == p2m_mid_missing_mfn) {
556                         /* Separately check the mid mfn level */
557                         unsigned long missing_mfn;
558                         unsigned long mid_mfn_mfn;
559 
560                         mid_mfn = alloc_p2m_page();
561                         if (!mid_mfn)
562                                 return -ENOMEM;
563 
564                         p2m_mid_mfn_init(mid_mfn, p2m_missing);
565 
566                         missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
567                         mid_mfn_mfn = virt_to_mfn(mid_mfn);
568                         /* try_cmpxchg() updates missing_mfn on failure. */
569                         if (try_cmpxchg(top_mfn_p, &missing_mfn, mid_mfn_mfn)) {
570                                 p2m_top_mfn_p[topidx] = mid_mfn;
571                         } else {
572                                 free_p2m_page(mid_mfn);
573                                 mid_mfn = mfn_to_virt(missing_mfn);
574                         }
575                 }
576         } else {
577                 mid_mfn = NULL;
578         }
579 
580         p2m_pfn = pte_pfn(READ_ONCE(*ptep));
581         if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
582             p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
583                 /* p2m leaf page is missing */
584                 unsigned long *p2m;
585 
586                 p2m = alloc_p2m_page();
587                 if (!p2m)
588                         return -ENOMEM;
589 
590                 if (p2m_pfn == PFN_DOWN(__pa(p2m_missing)))
591                         p2m_init(p2m);
592                 else
593                         p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1));
594 
595                 spin_lock_irqsave(&p2m_update_lock, flags);
596 
597                 if (pte_pfn(*ptep) == p2m_pfn) {
598                         HYPERVISOR_shared_info->arch.p2m_generation++;
599                         wmb(); /* Tools are synchronizing via p2m_generation. */
600                         set_pte(ptep,
601                                 pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL));
602                         wmb(); /* Tools are synchronizing via p2m_generation. */
603                         HYPERVISOR_shared_info->arch.p2m_generation++;
604                         if (mid_mfn)
605                                 mid_mfn[p2m_mid_index(pfn)] = virt_to_mfn(p2m);
606                         p2m = NULL;
607                 }
608 
609                 spin_unlock_irqrestore(&p2m_update_lock, flags);
610 
611                 if (p2m)
612                         free_p2m_page(p2m);
613         }
614 
615         /* Expanded the p2m? */
616         if (pfn >= xen_p2m_last_pfn) {
617                 xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE);
618                 HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
619         }
620 
621         return 0;
622 }
623 EXPORT_SYMBOL(xen_alloc_p2m_entry);
624 
625 unsigned long __init set_phys_range_identity(unsigned long pfn_s,
626                                       unsigned long pfn_e)
627 {
628         unsigned long pfn;
629 
630         if (unlikely(pfn_s >= xen_p2m_size))
631                 return 0;
632 
633         if (pfn_s > pfn_e)
634                 return 0;
635 
636         if (pfn_e > xen_p2m_size)
637                 pfn_e = xen_p2m_size;
638 
639         for (pfn = pfn_s; pfn < pfn_e; pfn++)
640                 xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn);
641 
642         return pfn - pfn_s;
643 }
644 
645 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
646 {
647         pte_t *ptep;
648         unsigned int level;
649 
650         /* Only invalid entries allowed above the highest p2m covered frame. */
651         if (unlikely(pfn >= xen_p2m_size))
652                 return mfn == INVALID_P2M_ENTRY;
653 
654         /*
655          * The interface requires atomic updates on p2m elements.
656          * xen_safe_write_ulong() is using an atomic store via asm().
657          */
658         if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
659                 return true;
660 
661         ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level);
662         BUG_ON(!ptep || level != PG_LEVEL_4K);
663 
664         if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_missing)))
665                 return mfn == INVALID_P2M_ENTRY;
666 
667         if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity)))
668                 return mfn == IDENTITY_FRAME(pfn);
669 
670         return false;
671 }
672 
673 bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
674 {
675         if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
676                 int ret;
677 
678                 ret = xen_alloc_p2m_entry(pfn);
679                 if (ret < 0)
680                         return false;
681 
682                 return __set_phys_to_machine(pfn, mfn);
683         }
684 
685         return true;
686 }
687 
688 int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
689                             struct gnttab_map_grant_ref *kmap_ops,
690                             struct page **pages, unsigned int count)
691 {
692         int i, ret = 0;
693         pte_t *pte;
694 
695         if (xen_feature(XENFEAT_auto_translated_physmap))
696                 return 0;
697 
698         if (kmap_ops) {
699                 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
700                                                 kmap_ops, count);
701                 if (ret)
702                         goto out;
703         }
704 
705         for (i = 0; i < count; i++) {
706                 unsigned long mfn, pfn;
707                 struct gnttab_unmap_grant_ref unmap[2];
708                 int rc;
709 
710                 /* Do not add to override if the map failed. */
711                 if (map_ops[i].status != GNTST_okay ||
712                     (kmap_ops && kmap_ops[i].status != GNTST_okay))
713                         continue;
714 
715                 if (map_ops[i].flags & GNTMAP_contains_pte) {
716                         pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
717                                 (map_ops[i].host_addr & ~PAGE_MASK));
718                         mfn = pte_mfn(*pte);
719                 } else {
720                         mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
721                 }
722                 pfn = page_to_pfn(pages[i]);
723 
724                 WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned");
725 
726                 if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
727                         continue;
728 
729                 /*
730                  * Signal an error for this slot. This in turn requires
731                  * immediate unmapping.
732                  */
733                 map_ops[i].status = GNTST_general_error;
734                 unmap[0].host_addr = map_ops[i].host_addr;
735                 unmap[0].handle = map_ops[i].handle;
736                 map_ops[i].handle = INVALID_GRANT_HANDLE;
737                 if (map_ops[i].flags & GNTMAP_device_map)
738                         unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr;
739                 else
740                         unmap[0].dev_bus_addr = 0;
741 
742                 if (kmap_ops) {
743                         kmap_ops[i].status = GNTST_general_error;
744                         unmap[1].host_addr = kmap_ops[i].host_addr;
745                         unmap[1].handle = kmap_ops[i].handle;
746                         kmap_ops[i].handle = INVALID_GRANT_HANDLE;
747                         if (kmap_ops[i].flags & GNTMAP_device_map)
748                                 unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr;
749                         else
750                                 unmap[1].dev_bus_addr = 0;
751                 }
752 
753                 /*
754                  * Pre-populate both status fields, to be recognizable in
755                  * the log message below.
756                  */
757                 unmap[0].status = 1;
758                 unmap[1].status = 1;
759 
760                 rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
761                                                unmap, 1 + !!kmap_ops);
762                 if (rc || unmap[0].status != GNTST_okay ||
763                     unmap[1].status != GNTST_okay)
764                         pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n",
765                                     rc, unmap[0].status, unmap[1].status);
766         }
767 
768 out:
769         return ret;
770 }
771 
772 int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
773                               struct gnttab_unmap_grant_ref *kunmap_ops,
774                               struct page **pages, unsigned int count)
775 {
776         int i, ret = 0;
777 
778         if (xen_feature(XENFEAT_auto_translated_physmap))
779                 return 0;
780 
781         for (i = 0; i < count; i++) {
782                 unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i]));
783                 unsigned long pfn = page_to_pfn(pages[i]);
784 
785                 if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT))
786                         set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
787                 else
788                         ret = -EINVAL;
789         }
790         if (kunmap_ops)
791                 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
792                                                 kunmap_ops, count) ?: ret;
793 
794         return ret;
795 }
796 
797 /* Remapped non-RAM areas */
798 #define NR_NONRAM_REMAP 4
799 static struct nonram_remap {
800         phys_addr_t maddr;
801         phys_addr_t paddr;
802         size_t size;
803 } xen_nonram_remap[NR_NONRAM_REMAP] __ro_after_init;
804 static unsigned int nr_nonram_remap __ro_after_init;
805 
806 /*
807  * Do the real remapping of non-RAM regions as specified in the
808  * xen_nonram_remap[] array.
809  * In case of an error just crash the system.
810  */
811 void __init xen_do_remap_nonram(void)
812 {
813         unsigned int i;
814         unsigned int remapped = 0;
815         const struct nonram_remap *remap = xen_nonram_remap;
816         unsigned long pfn, mfn, end_pfn;
817 
818         for (i = 0; i < nr_nonram_remap; i++) {
819                 end_pfn = PFN_UP(remap->paddr + remap->size);
820                 pfn = PFN_DOWN(remap->paddr);
821                 mfn = PFN_DOWN(remap->maddr);
822                 while (pfn < end_pfn) {
823                         if (!set_phys_to_machine(pfn, mfn))
824                                 panic("Failed to set p2m mapping for pfn=%lx mfn=%lx\n",
825                                        pfn, mfn);
826 
827                         pfn++;
828                         mfn++;
829                         remapped++;
830                 }
831 
832                 remap++;
833         }
834 
835         pr_info("Remapped %u non-RAM page(s)\n", remapped);
836 }
837 
838 #ifdef CONFIG_ACPI
839 /*
840  * Xen variant of acpi_os_ioremap() taking potentially remapped non-RAM
841  * regions into account.
842  * Any attempt to map an area crossing a remap boundary will produce a
843  * WARN() splat.
844  * phys is related to remap->maddr on input and will be rebased to remap->paddr.
845  */
846 static void __iomem *xen_acpi_os_ioremap(acpi_physical_address phys,
847                                          acpi_size size)
848 {
849         unsigned int i;
850         const struct nonram_remap *remap = xen_nonram_remap;
851 
852         for (i = 0; i < nr_nonram_remap; i++) {
853                 if (phys + size > remap->maddr &&
854                     phys < remap->maddr + remap->size) {
855                         WARN_ON(phys < remap->maddr ||
856                                 phys + size > remap->maddr + remap->size);
857                         phys += remap->paddr - remap->maddr;
858                         break;
859                 }
860         }
861 
862         return x86_acpi_os_ioremap(phys, size);
863 }
864 #endif /* CONFIG_ACPI */
865 
866 /*
867  * Add a new non-RAM remap entry.
868  * In case of no free entry found, just crash the system.
869  */
870 void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr,
871                                  unsigned long size)
872 {
873         BUG_ON((maddr & ~PAGE_MASK) != (paddr & ~PAGE_MASK));
874 
875         if (nr_nonram_remap == NR_NONRAM_REMAP) {
876                 xen_raw_console_write("Number of required E820 entry remapping actions exceed maximum value\n");
877                 BUG();
878         }
879 
880 #ifdef CONFIG_ACPI
881         /* Switch to the Xen acpi_os_ioremap() variant. */
882         if (nr_nonram_remap == 0)
883                 acpi_os_ioremap = xen_acpi_os_ioremap;
884 #endif
885 
886         xen_nonram_remap[nr_nonram_remap].maddr = maddr;
887         xen_nonram_remap[nr_nonram_remap].paddr = paddr;
888         xen_nonram_remap[nr_nonram_remap].size = size;
889 
890         nr_nonram_remap++;
891 }
892 
893 #ifdef CONFIG_XEN_DEBUG_FS
894 #include <linux/debugfs.h>
895 static int p2m_dump_show(struct seq_file *m, void *v)
896 {
897         static const char * const type_name[] = {
898                                 [P2M_TYPE_IDENTITY] = "identity",
899                                 [P2M_TYPE_MISSING] = "missing",
900                                 [P2M_TYPE_PFN] = "pfn",
901                                 [P2M_TYPE_UNKNOWN] = "abnormal"};
902         unsigned long pfn, first_pfn;
903         int type, prev_type;
904 
905         prev_type = xen_p2m_elem_type(0);
906         first_pfn = 0;
907 
908         for (pfn = 0; pfn < xen_p2m_size; pfn++) {
909                 type = xen_p2m_elem_type(pfn);
910                 if (type != prev_type) {
911                         seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
912                                    type_name[prev_type]);
913                         prev_type = type;
914                         first_pfn = pfn;
915                 }
916         }
917         seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn,
918                    type_name[prev_type]);
919         return 0;
920 }
921 
922 DEFINE_SHOW_ATTRIBUTE(p2m_dump);
923 
924 static struct dentry *d_mmu_debug;
925 
926 static int __init xen_p2m_debugfs(void)
927 {
928         struct dentry *d_xen = xen_init_debugfs();
929 
930         d_mmu_debug = debugfs_create_dir("mmu", d_xen);
931 
932         debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
933         return 0;
934 }
935 fs_initcall(xen_p2m_debugfs);
936 #endif /* CONFIG_XEN_DEBUG_FS */
937 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php