~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/s390/kernel/uv.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Common Ultravisor functions and initialization
  4  *
  5  * Copyright IBM Corp. 2019, 2020
  6  */
  7 #define KMSG_COMPONENT "prot_virt"
  8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9 
 10 #include <linux/kernel.h>
 11 #include <linux/types.h>
 12 #include <linux/sizes.h>
 13 #include <linux/bitmap.h>
 14 #include <linux/memblock.h>
 15 #include <linux/pagemap.h>
 16 #include <linux/swap.h>
 17 #include <asm/facility.h>
 18 #include <asm/sections.h>
 19 #include <asm/uv.h>
 20 
 21 #if !IS_ENABLED(CONFIG_KVM)
 22 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
 23 {
 24         return 0;
 25 }
 26 
 27 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
 28                unsigned int fault_flags)
 29 {
 30         return 0;
 31 }
 32 #endif
 33 
 34 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
 35 int __bootdata_preserved(prot_virt_guest);
 36 EXPORT_SYMBOL(prot_virt_guest);
 37 
 38 /*
 39  * uv_info contains both host and guest information but it's currently only
 40  * expected to be used within modules if it's the KVM module or for
 41  * any PV guest module.
 42  *
 43  * The kernel itself will write these values once in uv_query_info()
 44  * and then make some of them readable via a sysfs interface.
 45  */
 46 struct uv_info __bootdata_preserved(uv_info);
 47 EXPORT_SYMBOL(uv_info);
 48 
 49 int __bootdata_preserved(prot_virt_host);
 50 EXPORT_SYMBOL(prot_virt_host);
 51 
 52 static int __init uv_init(phys_addr_t stor_base, unsigned long stor_len)
 53 {
 54         struct uv_cb_init uvcb = {
 55                 .header.cmd = UVC_CMD_INIT_UV,
 56                 .header.len = sizeof(uvcb),
 57                 .stor_origin = stor_base,
 58                 .stor_len = stor_len,
 59         };
 60 
 61         if (uv_call(0, (uint64_t)&uvcb)) {
 62                 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
 63                        uvcb.header.rc, uvcb.header.rrc);
 64                 return -1;
 65         }
 66         return 0;
 67 }
 68 
 69 void __init setup_uv(void)
 70 {
 71         void *uv_stor_base;
 72 
 73         if (!is_prot_virt_host())
 74                 return;
 75 
 76         uv_stor_base = memblock_alloc_try_nid(
 77                 uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
 78                 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
 79         if (!uv_stor_base) {
 80                 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
 81                         uv_info.uv_base_stor_len);
 82                 goto fail;
 83         }
 84 
 85         if (uv_init(__pa(uv_stor_base), uv_info.uv_base_stor_len)) {
 86                 memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
 87                 goto fail;
 88         }
 89 
 90         pr_info("Reserving %luMB as ultravisor base storage\n",
 91                 uv_info.uv_base_stor_len >> 20);
 92         return;
 93 fail:
 94         pr_info("Disabling support for protected virtualization");
 95         prot_virt_host = 0;
 96 }
 97 
 98 /*
 99  * Requests the Ultravisor to pin the page in the shared state. This will
100  * cause an intercept when the guest attempts to unshare the pinned page.
101  */
102 int uv_pin_shared(unsigned long paddr)
103 {
104         struct uv_cb_cfs uvcb = {
105                 .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
106                 .header.len = sizeof(uvcb),
107                 .paddr = paddr,
108         };
109 
110         if (uv_call(0, (u64)&uvcb))
111                 return -EINVAL;
112         return 0;
113 }
114 EXPORT_SYMBOL_GPL(uv_pin_shared);
115 
116 /*
117  * Requests the Ultravisor to destroy a guest page and make it
118  * accessible to the host. The destroy clears the page instead of
119  * exporting.
120  *
121  * @paddr: Absolute host address of page to be destroyed
122  */
123 static int uv_destroy(unsigned long paddr)
124 {
125         struct uv_cb_cfs uvcb = {
126                 .header.cmd = UVC_CMD_DESTR_SEC_STOR,
127                 .header.len = sizeof(uvcb),
128                 .paddr = paddr
129         };
130 
131         if (uv_call(0, (u64)&uvcb)) {
132                 /*
133                  * Older firmware uses 107/d as an indication of a non secure
134                  * page. Let us emulate the newer variant (no-op).
135                  */
136                 if (uvcb.header.rc == 0x107 && uvcb.header.rrc == 0xd)
137                         return 0;
138                 return -EINVAL;
139         }
140         return 0;
141 }
142 
143 /*
144  * The caller must already hold a reference to the folio
145  */
146 int uv_destroy_folio(struct folio *folio)
147 {
148         int rc;
149 
150         /* See gmap_make_secure(): large folios cannot be secure */
151         if (unlikely(folio_test_large(folio)))
152                 return 0;
153 
154         folio_get(folio);
155         rc = uv_destroy(folio_to_phys(folio));
156         if (!rc)
157                 clear_bit(PG_arch_1, &folio->flags);
158         folio_put(folio);
159         return rc;
160 }
161 
162 /*
163  * The present PTE still indirectly holds a folio reference through the mapping.
164  */
165 int uv_destroy_pte(pte_t pte)
166 {
167         VM_WARN_ON(!pte_present(pte));
168         return uv_destroy_folio(pfn_folio(pte_pfn(pte)));
169 }
170 
171 /*
172  * Requests the Ultravisor to encrypt a guest page and make it
173  * accessible to the host for paging (export).
174  *
175  * @paddr: Absolute host address of page to be exported
176  */
177 static int uv_convert_from_secure(unsigned long paddr)
178 {
179         struct uv_cb_cfs uvcb = {
180                 .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
181                 .header.len = sizeof(uvcb),
182                 .paddr = paddr
183         };
184 
185         if (uv_call(0, (u64)&uvcb))
186                 return -EINVAL;
187         return 0;
188 }
189 
190 /*
191  * The caller must already hold a reference to the folio.
192  */
193 static int uv_convert_from_secure_folio(struct folio *folio)
194 {
195         int rc;
196 
197         /* See gmap_make_secure(): large folios cannot be secure */
198         if (unlikely(folio_test_large(folio)))
199                 return 0;
200 
201         folio_get(folio);
202         rc = uv_convert_from_secure(folio_to_phys(folio));
203         if (!rc)
204                 clear_bit(PG_arch_1, &folio->flags);
205         folio_put(folio);
206         return rc;
207 }
208 
209 /*
210  * The present PTE still indirectly holds a folio reference through the mapping.
211  */
212 int uv_convert_from_secure_pte(pte_t pte)
213 {
214         VM_WARN_ON(!pte_present(pte));
215         return uv_convert_from_secure_folio(pfn_folio(pte_pfn(pte)));
216 }
217 
218 /*
219  * Calculate the expected ref_count for a folio that would otherwise have no
220  * further pins. This was cribbed from similar functions in other places in
221  * the kernel, but with some slight modifications. We know that a secure
222  * folio can not be a large folio, for example.
223  */
224 static int expected_folio_refs(struct folio *folio)
225 {
226         int res;
227 
228         res = folio_mapcount(folio);
229         if (folio_test_swapcache(folio)) {
230                 res++;
231         } else if (folio_mapping(folio)) {
232                 res++;
233                 if (folio->private)
234                         res++;
235         }
236         return res;
237 }
238 
239 static int make_folio_secure(struct folio *folio, struct uv_cb_header *uvcb)
240 {
241         int expected, cc = 0;
242 
243         if (folio_test_writeback(folio))
244                 return -EAGAIN;
245         expected = expected_folio_refs(folio);
246         if (!folio_ref_freeze(folio, expected))
247                 return -EBUSY;
248         set_bit(PG_arch_1, &folio->flags);
249         /*
250          * If the UVC does not succeed or fail immediately, we don't want to
251          * loop for long, or we might get stall notifications.
252          * On the other hand, this is a complex scenario and we are holding a lot of
253          * locks, so we can't easily sleep and reschedule. We try only once,
254          * and if the UVC returned busy or partial completion, we return
255          * -EAGAIN and we let the callers deal with it.
256          */
257         cc = __uv_call(0, (u64)uvcb);
258         folio_ref_unfreeze(folio, expected);
259         /*
260          * Return -ENXIO if the folio was not mapped, -EINVAL for other errors.
261          * If busy or partially completed, return -EAGAIN.
262          */
263         if (cc == UVC_CC_OK)
264                 return 0;
265         else if (cc == UVC_CC_BUSY || cc == UVC_CC_PARTIAL)
266                 return -EAGAIN;
267         return uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
268 }
269 
270 /**
271  * should_export_before_import - Determine whether an export is needed
272  * before an import-like operation
273  * @uvcb: the Ultravisor control block of the UVC to be performed
274  * @mm: the mm of the process
275  *
276  * Returns whether an export is needed before every import-like operation.
277  * This is needed for shared pages, which don't trigger a secure storage
278  * exception when accessed from a different guest.
279  *
280  * Although considered as one, the Unpin Page UVC is not an actual import,
281  * so it is not affected.
282  *
283  * No export is needed also when there is only one protected VM, because the
284  * page cannot belong to the wrong VM in that case (there is no "other VM"
285  * it can belong to).
286  *
287  * Return: true if an export is needed before every import, otherwise false.
288  */
289 static bool should_export_before_import(struct uv_cb_header *uvcb, struct mm_struct *mm)
290 {
291         /*
292          * The misc feature indicates, among other things, that importing a
293          * shared page from a different protected VM will automatically also
294          * transfer its ownership.
295          */
296         if (uv_has_feature(BIT_UV_FEAT_MISC))
297                 return false;
298         if (uvcb->cmd == UVC_CMD_UNPIN_PAGE_SHARED)
299                 return false;
300         return atomic_read(&mm->context.protected_count) > 1;
301 }
302 
303 /*
304  * Drain LRU caches: the local one on first invocation and the ones of all
305  * CPUs on successive invocations. Returns "true" on the first invocation.
306  */
307 static bool drain_lru(bool *drain_lru_called)
308 {
309         /*
310          * If we have tried a local drain and the folio refcount
311          * still does not match our expected safe value, try with a
312          * system wide drain. This is needed if the pagevecs holding
313          * the page are on a different CPU.
314          */
315         if (*drain_lru_called) {
316                 lru_add_drain_all();
317                 /* We give up here, don't retry immediately. */
318                 return false;
319         }
320         /*
321          * We are here if the folio refcount does not match the
322          * expected safe value. The main culprits are usually
323          * pagevecs. With lru_add_drain() we drain the pagevecs
324          * on the local CPU so that hopefully the refcount will
325          * reach the expected safe value.
326          */
327         lru_add_drain();
328         *drain_lru_called = true;
329         /* The caller should try again immediately */
330         return true;
331 }
332 
333 /*
334  * Requests the Ultravisor to make a page accessible to a guest.
335  * If it's brought in the first time, it will be cleared. If
336  * it has been exported before, it will be decrypted and integrity
337  * checked.
338  */
339 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
340 {
341         struct vm_area_struct *vma;
342         bool drain_lru_called = false;
343         spinlock_t *ptelock;
344         unsigned long uaddr;
345         struct folio *folio;
346         pte_t *ptep;
347         int rc;
348 
349 again:
350         rc = -EFAULT;
351         mmap_read_lock(gmap->mm);
352 
353         uaddr = __gmap_translate(gmap, gaddr);
354         if (IS_ERR_VALUE(uaddr))
355                 goto out;
356         vma = vma_lookup(gmap->mm, uaddr);
357         if (!vma)
358                 goto out;
359         /*
360          * Secure pages cannot be huge and userspace should not combine both.
361          * In case userspace does it anyway this will result in an -EFAULT for
362          * the unpack. The guest is thus never reaching secure mode. If
363          * userspace is playing dirty tricky with mapping huge pages later
364          * on this will result in a segmentation fault.
365          */
366         if (is_vm_hugetlb_page(vma))
367                 goto out;
368 
369         rc = -ENXIO;
370         ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
371         if (!ptep)
372                 goto out;
373         if (pte_present(*ptep) && !(pte_val(*ptep) & _PAGE_INVALID) && pte_write(*ptep)) {
374                 folio = page_folio(pte_page(*ptep));
375                 rc = -EAGAIN;
376                 if (folio_test_large(folio)) {
377                         rc = -E2BIG;
378                 } else if (folio_trylock(folio)) {
379                         if (should_export_before_import(uvcb, gmap->mm))
380                                 uv_convert_from_secure(PFN_PHYS(folio_pfn(folio)));
381                         rc = make_folio_secure(folio, uvcb);
382                         folio_unlock(folio);
383                 }
384 
385                 /*
386                  * Once we drop the PTL, the folio may get unmapped and
387                  * freed immediately. We need a temporary reference.
388                  */
389                 if (rc == -EAGAIN || rc == -E2BIG)
390                         folio_get(folio);
391         }
392         pte_unmap_unlock(ptep, ptelock);
393 out:
394         mmap_read_unlock(gmap->mm);
395 
396         switch (rc) {
397         case -E2BIG:
398                 folio_lock(folio);
399                 rc = split_folio(folio);
400                 folio_unlock(folio);
401                 folio_put(folio);
402 
403                 switch (rc) {
404                 case 0:
405                         /* Splitting succeeded, try again immediately. */
406                         goto again;
407                 case -EAGAIN:
408                         /* Additional folio references. */
409                         if (drain_lru(&drain_lru_called))
410                                 goto again;
411                         return -EAGAIN;
412                 case -EBUSY:
413                         /* Unexpected race. */
414                         return -EAGAIN;
415                 }
416                 WARN_ON_ONCE(1);
417                 return -ENXIO;
418         case -EAGAIN:
419                 /*
420                  * If we are here because the UVC returned busy or partial
421                  * completion, this is just a useless check, but it is safe.
422                  */
423                 folio_wait_writeback(folio);
424                 folio_put(folio);
425                 return -EAGAIN;
426         case -EBUSY:
427                 /* Additional folio references. */
428                 if (drain_lru(&drain_lru_called))
429                         goto again;
430                 return -EAGAIN;
431         case -ENXIO:
432                 if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
433                         return -EFAULT;
434                 return -EAGAIN;
435         }
436         return rc;
437 }
438 EXPORT_SYMBOL_GPL(gmap_make_secure);
439 
440 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
441 {
442         struct uv_cb_cts uvcb = {
443                 .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
444                 .header.len = sizeof(uvcb),
445                 .guest_handle = gmap->guest_handle,
446                 .gaddr = gaddr,
447         };
448 
449         return gmap_make_secure(gmap, gaddr, &uvcb);
450 }
451 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
452 
453 /**
454  * gmap_destroy_page - Destroy a guest page.
455  * @gmap: the gmap of the guest
456  * @gaddr: the guest address to destroy
457  *
458  * An attempt will be made to destroy the given guest page. If the attempt
459  * fails, an attempt is made to export the page. If both attempts fail, an
460  * appropriate error is returned.
461  */
462 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr)
463 {
464         struct vm_area_struct *vma;
465         unsigned long uaddr;
466         struct folio *folio;
467         struct page *page;
468         int rc;
469 
470         rc = -EFAULT;
471         mmap_read_lock(gmap->mm);
472 
473         uaddr = __gmap_translate(gmap, gaddr);
474         if (IS_ERR_VALUE(uaddr))
475                 goto out;
476         vma = vma_lookup(gmap->mm, uaddr);
477         if (!vma)
478                 goto out;
479         /*
480          * Huge pages should not be able to become secure
481          */
482         if (is_vm_hugetlb_page(vma))
483                 goto out;
484 
485         rc = 0;
486         /* we take an extra reference here */
487         page = follow_page(vma, uaddr, FOLL_WRITE | FOLL_GET);
488         if (IS_ERR_OR_NULL(page))
489                 goto out;
490         folio = page_folio(page);
491         rc = uv_destroy_folio(folio);
492         /*
493          * Fault handlers can race; it is possible that two CPUs will fault
494          * on the same secure page. One CPU can destroy the page, reboot,
495          * re-enter secure mode and import it, while the second CPU was
496          * stuck at the beginning of the handler. At some point the second
497          * CPU will be able to progress, and it will not be able to destroy
498          * the page. In that case we do not want to terminate the process,
499          * we instead try to export the page.
500          */
501         if (rc)
502                 rc = uv_convert_from_secure_folio(folio);
503         folio_put(folio);
504 out:
505         mmap_read_unlock(gmap->mm);
506         return rc;
507 }
508 EXPORT_SYMBOL_GPL(gmap_destroy_page);
509 
510 /*
511  * To be called with the folio locked or with an extra reference! This will
512  * prevent gmap_make_secure from touching the folio concurrently. Having 2
513  * parallel arch_make_folio_accessible is fine, as the UV calls will become a
514  * no-op if the folio is already exported.
515  */
516 int arch_make_folio_accessible(struct folio *folio)
517 {
518         int rc = 0;
519 
520         /* See gmap_make_secure(): large folios cannot be secure */
521         if (unlikely(folio_test_large(folio)))
522                 return 0;
523 
524         /*
525          * PG_arch_1 is used in 2 places:
526          * 1. for storage keys of hugetlb folios and KVM
527          * 2. As an indication that this small folio might be secure. This can
528          *    overindicate, e.g. we set the bit before calling
529          *    convert_to_secure.
530          * As secure pages are never large folios, both variants can co-exists.
531          */
532         if (!test_bit(PG_arch_1, &folio->flags))
533                 return 0;
534 
535         rc = uv_pin_shared(folio_to_phys(folio));
536         if (!rc) {
537                 clear_bit(PG_arch_1, &folio->flags);
538                 return 0;
539         }
540 
541         rc = uv_convert_from_secure(folio_to_phys(folio));
542         if (!rc) {
543                 clear_bit(PG_arch_1, &folio->flags);
544                 return 0;
545         }
546 
547         return rc;
548 }
549 EXPORT_SYMBOL_GPL(arch_make_folio_accessible);
550 
551 int arch_make_page_accessible(struct page *page)
552 {
553         return arch_make_folio_accessible(page_folio(page));
554 }
555 EXPORT_SYMBOL_GPL(arch_make_page_accessible);
556 static ssize_t uv_query_facilities(struct kobject *kobj,
557                                    struct kobj_attribute *attr, char *buf)
558 {
559         return sysfs_emit(buf, "%lx\n%lx\n%lx\n%lx\n",
560                           uv_info.inst_calls_list[0],
561                           uv_info.inst_calls_list[1],
562                           uv_info.inst_calls_list[2],
563                           uv_info.inst_calls_list[3]);
564 }
565 
566 static struct kobj_attribute uv_query_facilities_attr =
567         __ATTR(facilities, 0444, uv_query_facilities, NULL);
568 
569 static ssize_t uv_query_supp_se_hdr_ver(struct kobject *kobj,
570                                         struct kobj_attribute *attr, char *buf)
571 {
572         return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_ver);
573 }
574 
575 static struct kobj_attribute uv_query_supp_se_hdr_ver_attr =
576         __ATTR(supp_se_hdr_ver, 0444, uv_query_supp_se_hdr_ver, NULL);
577 
578 static ssize_t uv_query_supp_se_hdr_pcf(struct kobject *kobj,
579                                         struct kobj_attribute *attr, char *buf)
580 {
581         return sysfs_emit(buf, "%lx\n", uv_info.supp_se_hdr_pcf);
582 }
583 
584 static struct kobj_attribute uv_query_supp_se_hdr_pcf_attr =
585         __ATTR(supp_se_hdr_pcf, 0444, uv_query_supp_se_hdr_pcf, NULL);
586 
587 static ssize_t uv_query_dump_cpu_len(struct kobject *kobj,
588                                      struct kobj_attribute *attr, char *buf)
589 {
590         return sysfs_emit(buf, "%lx\n", uv_info.guest_cpu_stor_len);
591 }
592 
593 static struct kobj_attribute uv_query_dump_cpu_len_attr =
594         __ATTR(uv_query_dump_cpu_len, 0444, uv_query_dump_cpu_len, NULL);
595 
596 static ssize_t uv_query_dump_storage_state_len(struct kobject *kobj,
597                                                struct kobj_attribute *attr, char *buf)
598 {
599         return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_storage_state_len);
600 }
601 
602 static struct kobj_attribute uv_query_dump_storage_state_len_attr =
603         __ATTR(dump_storage_state_len, 0444, uv_query_dump_storage_state_len, NULL);
604 
605 static ssize_t uv_query_dump_finalize_len(struct kobject *kobj,
606                                           struct kobj_attribute *attr, char *buf)
607 {
608         return sysfs_emit(buf, "%lx\n", uv_info.conf_dump_finalize_len);
609 }
610 
611 static struct kobj_attribute uv_query_dump_finalize_len_attr =
612         __ATTR(dump_finalize_len, 0444, uv_query_dump_finalize_len, NULL);
613 
614 static ssize_t uv_query_feature_indications(struct kobject *kobj,
615                                             struct kobj_attribute *attr, char *buf)
616 {
617         return sysfs_emit(buf, "%lx\n", uv_info.uv_feature_indications);
618 }
619 
620 static struct kobj_attribute uv_query_feature_indications_attr =
621         __ATTR(feature_indications, 0444, uv_query_feature_indications, NULL);
622 
623 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
624                                        struct kobj_attribute *attr, char *buf)
625 {
626         return sysfs_emit(buf, "%d\n", uv_info.max_guest_cpu_id + 1);
627 }
628 
629 static struct kobj_attribute uv_query_max_guest_cpus_attr =
630         __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
631 
632 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
633                                       struct kobj_attribute *attr, char *buf)
634 {
635         return sysfs_emit(buf, "%d\n", uv_info.max_num_sec_conf);
636 }
637 
638 static struct kobj_attribute uv_query_max_guest_vms_attr =
639         __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
640 
641 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
642                                        struct kobj_attribute *attr, char *buf)
643 {
644         return sysfs_emit(buf, "%lx\n", uv_info.max_sec_stor_addr);
645 }
646 
647 static struct kobj_attribute uv_query_max_guest_addr_attr =
648         __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
649 
650 static ssize_t uv_query_supp_att_req_hdr_ver(struct kobject *kobj,
651                                              struct kobj_attribute *attr, char *buf)
652 {
653         return sysfs_emit(buf, "%lx\n", uv_info.supp_att_req_hdr_ver);
654 }
655 
656 static struct kobj_attribute uv_query_supp_att_req_hdr_ver_attr =
657         __ATTR(supp_att_req_hdr_ver, 0444, uv_query_supp_att_req_hdr_ver, NULL);
658 
659 static ssize_t uv_query_supp_att_pflags(struct kobject *kobj,
660                                         struct kobj_attribute *attr, char *buf)
661 {
662         return sysfs_emit(buf, "%lx\n", uv_info.supp_att_pflags);
663 }
664 
665 static struct kobj_attribute uv_query_supp_att_pflags_attr =
666         __ATTR(supp_att_pflags, 0444, uv_query_supp_att_pflags, NULL);
667 
668 static ssize_t uv_query_supp_add_secret_req_ver(struct kobject *kobj,
669                                                 struct kobj_attribute *attr, char *buf)
670 {
671         return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_req_ver);
672 }
673 
674 static struct kobj_attribute uv_query_supp_add_secret_req_ver_attr =
675         __ATTR(supp_add_secret_req_ver, 0444, uv_query_supp_add_secret_req_ver, NULL);
676 
677 static ssize_t uv_query_supp_add_secret_pcf(struct kobject *kobj,
678                                             struct kobj_attribute *attr, char *buf)
679 {
680         return sysfs_emit(buf, "%lx\n", uv_info.supp_add_secret_pcf);
681 }
682 
683 static struct kobj_attribute uv_query_supp_add_secret_pcf_attr =
684         __ATTR(supp_add_secret_pcf, 0444, uv_query_supp_add_secret_pcf, NULL);
685 
686 static ssize_t uv_query_supp_secret_types(struct kobject *kobj,
687                                           struct kobj_attribute *attr, char *buf)
688 {
689         return sysfs_emit(buf, "%lx\n", uv_info.supp_secret_types);
690 }
691 
692 static struct kobj_attribute uv_query_supp_secret_types_attr =
693         __ATTR(supp_secret_types, 0444, uv_query_supp_secret_types, NULL);
694 
695 static ssize_t uv_query_max_secrets(struct kobject *kobj,
696                                     struct kobj_attribute *attr, char *buf)
697 {
698         return sysfs_emit(buf, "%d\n", uv_info.max_secrets);
699 }
700 
701 static struct kobj_attribute uv_query_max_secrets_attr =
702         __ATTR(max_secrets, 0444, uv_query_max_secrets, NULL);
703 
704 static struct attribute *uv_query_attrs[] = {
705         &uv_query_facilities_attr.attr,
706         &uv_query_feature_indications_attr.attr,
707         &uv_query_max_guest_cpus_attr.attr,
708         &uv_query_max_guest_vms_attr.attr,
709         &uv_query_max_guest_addr_attr.attr,
710         &uv_query_supp_se_hdr_ver_attr.attr,
711         &uv_query_supp_se_hdr_pcf_attr.attr,
712         &uv_query_dump_storage_state_len_attr.attr,
713         &uv_query_dump_finalize_len_attr.attr,
714         &uv_query_dump_cpu_len_attr.attr,
715         &uv_query_supp_att_req_hdr_ver_attr.attr,
716         &uv_query_supp_att_pflags_attr.attr,
717         &uv_query_supp_add_secret_req_ver_attr.attr,
718         &uv_query_supp_add_secret_pcf_attr.attr,
719         &uv_query_supp_secret_types_attr.attr,
720         &uv_query_max_secrets_attr.attr,
721         NULL,
722 };
723 
724 static struct attribute_group uv_query_attr_group = {
725         .attrs = uv_query_attrs,
726 };
727 
728 static ssize_t uv_is_prot_virt_guest(struct kobject *kobj,
729                                      struct kobj_attribute *attr, char *buf)
730 {
731         return sysfs_emit(buf, "%d\n", prot_virt_guest);
732 }
733 
734 static ssize_t uv_is_prot_virt_host(struct kobject *kobj,
735                                     struct kobj_attribute *attr, char *buf)
736 {
737         return sysfs_emit(buf, "%d\n", prot_virt_host);
738 }
739 
740 static struct kobj_attribute uv_prot_virt_guest =
741         __ATTR(prot_virt_guest, 0444, uv_is_prot_virt_guest, NULL);
742 
743 static struct kobj_attribute uv_prot_virt_host =
744         __ATTR(prot_virt_host, 0444, uv_is_prot_virt_host, NULL);
745 
746 static const struct attribute *uv_prot_virt_attrs[] = {
747         &uv_prot_virt_guest.attr,
748         &uv_prot_virt_host.attr,
749         NULL,
750 };
751 
752 static struct kset *uv_query_kset;
753 static struct kobject *uv_kobj;
754 
755 static int __init uv_info_init(void)
756 {
757         int rc = -ENOMEM;
758 
759         if (!test_facility(158))
760                 return 0;
761 
762         uv_kobj = kobject_create_and_add("uv", firmware_kobj);
763         if (!uv_kobj)
764                 return -ENOMEM;
765 
766         rc = sysfs_create_files(uv_kobj, uv_prot_virt_attrs);
767         if (rc)
768                 goto out_kobj;
769 
770         uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
771         if (!uv_query_kset) {
772                 rc = -ENOMEM;
773                 goto out_ind_files;
774         }
775 
776         rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
777         if (!rc)
778                 return 0;
779 
780         kset_unregister(uv_query_kset);
781 out_ind_files:
782         sysfs_remove_files(uv_kobj, uv_prot_virt_attrs);
783 out_kobj:
784         kobject_del(uv_kobj);
785         kobject_put(uv_kobj);
786         return rc;
787 }
788 device_initcall(uv_info_init);
789 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php