~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/x86/kernel/cpu/sgx/main.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*  Copyright(c) 2016-20 Intel Corporation. */
  3 
  4 #include <linux/file.h>
  5 #include <linux/freezer.h>
  6 #include <linux/highmem.h>
  7 #include <linux/kthread.h>
  8 #include <linux/miscdevice.h>
  9 #include <linux/node.h>
 10 #include <linux/pagemap.h>
 11 #include <linux/ratelimit.h>
 12 #include <linux/sched/mm.h>
 13 #include <linux/sched/signal.h>
 14 #include <linux/slab.h>
 15 #include <linux/sysfs.h>
 16 #include <linux/vmalloc.h>
 17 #include <asm/sgx.h>
 18 #include "driver.h"
 19 #include "encl.h"
 20 #include "encls.h"
 21 
 22 struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
 23 static int sgx_nr_epc_sections;
 24 static struct task_struct *ksgxd_tsk;
 25 static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq);
 26 static DEFINE_XARRAY(sgx_epc_address_space);
 27 
 28 /*
 29  * These variables are part of the state of the reclaimer, and must be accessed
 30  * with sgx_reclaimer_lock acquired.
 31  */
 32 static LIST_HEAD(sgx_active_page_list);
 33 static DEFINE_SPINLOCK(sgx_reclaimer_lock);
 34 
 35 static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0);
 36 
 37 /* Nodes with one or more EPC sections. */
 38 static nodemask_t sgx_numa_mask;
 39 
 40 /*
 41  * Array with one list_head for each possible NUMA node.  Each
 42  * list contains all the sgx_epc_section's which are on that
 43  * node.
 44  */
 45 static struct sgx_numa_node *sgx_numa_nodes;
 46 
 47 static LIST_HEAD(sgx_dirty_page_list);
 48 
 49 /*
 50  * Reset post-kexec EPC pages to the uninitialized state. The pages are removed
 51  * from the input list, and made available for the page allocator. SECS pages
 52  * prepending their children in the input list are left intact.
 53  *
 54  * Return 0 when sanitization was successful or kthread was stopped, and the
 55  * number of unsanitized pages otherwise.
 56  */
 57 static unsigned long __sgx_sanitize_pages(struct list_head *dirty_page_list)
 58 {
 59         unsigned long left_dirty = 0;
 60         struct sgx_epc_page *page;
 61         LIST_HEAD(dirty);
 62         int ret;
 63 
 64         /* dirty_page_list is thread-local, no need for a lock: */
 65         while (!list_empty(dirty_page_list)) {
 66                 if (kthread_should_stop())
 67                         return 0;
 68 
 69                 page = list_first_entry(dirty_page_list, struct sgx_epc_page, list);
 70 
 71                 /*
 72                  * Checking page->poison without holding the node->lock
 73                  * is racy, but losing the race (i.e. poison is set just
 74                  * after the check) just means __eremove() will be uselessly
 75                  * called for a page that sgx_free_epc_page() will put onto
 76                  * the node->sgx_poison_page_list later.
 77                  */
 78                 if (page->poison) {
 79                         struct sgx_epc_section *section = &sgx_epc_sections[page->section];
 80                         struct sgx_numa_node *node = section->node;
 81 
 82                         spin_lock(&node->lock);
 83                         list_move(&page->list, &node->sgx_poison_page_list);
 84                         spin_unlock(&node->lock);
 85 
 86                         continue;
 87                 }
 88 
 89                 ret = __eremove(sgx_get_epc_virt_addr(page));
 90                 if (!ret) {
 91                         /*
 92                          * page is now sanitized.  Make it available via the SGX
 93                          * page allocator:
 94                          */
 95                         list_del(&page->list);
 96                         sgx_free_epc_page(page);
 97                 } else {
 98                         /* The page is not yet clean - move to the dirty list. */
 99                         list_move_tail(&page->list, &dirty);
100                         left_dirty++;
101                 }
102 
103                 cond_resched();
104         }
105 
106         list_splice(&dirty, dirty_page_list);
107         return left_dirty;
108 }
109 
110 static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page)
111 {
112         struct sgx_encl_page *page = epc_page->owner;
113         struct sgx_encl *encl = page->encl;
114         struct sgx_encl_mm *encl_mm;
115         bool ret = true;
116         int idx;
117 
118         idx = srcu_read_lock(&encl->srcu);
119 
120         list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
121                 if (!mmget_not_zero(encl_mm->mm))
122                         continue;
123 
124                 mmap_read_lock(encl_mm->mm);
125                 ret = !sgx_encl_test_and_clear_young(encl_mm->mm, page);
126                 mmap_read_unlock(encl_mm->mm);
127 
128                 mmput_async(encl_mm->mm);
129 
130                 if (!ret)
131                         break;
132         }
133 
134         srcu_read_unlock(&encl->srcu, idx);
135 
136         if (!ret)
137                 return false;
138 
139         return true;
140 }
141 
142 static void sgx_reclaimer_block(struct sgx_epc_page *epc_page)
143 {
144         struct sgx_encl_page *page = epc_page->owner;
145         unsigned long addr = page->desc & PAGE_MASK;
146         struct sgx_encl *encl = page->encl;
147         int ret;
148 
149         sgx_zap_enclave_ptes(encl, addr);
150 
151         mutex_lock(&encl->lock);
152 
153         ret = __eblock(sgx_get_epc_virt_addr(epc_page));
154         if (encls_failed(ret))
155                 ENCLS_WARN(ret, "EBLOCK");
156 
157         mutex_unlock(&encl->lock);
158 }
159 
160 static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot,
161                           struct sgx_backing *backing)
162 {
163         struct sgx_pageinfo pginfo;
164         int ret;
165 
166         pginfo.addr = 0;
167         pginfo.secs = 0;
168 
169         pginfo.contents = (unsigned long)kmap_local_page(backing->contents);
170         pginfo.metadata = (unsigned long)kmap_local_page(backing->pcmd) +
171                           backing->pcmd_offset;
172 
173         ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot);
174         set_page_dirty(backing->pcmd);
175         set_page_dirty(backing->contents);
176 
177         kunmap_local((void *)(unsigned long)(pginfo.metadata -
178                                               backing->pcmd_offset));
179         kunmap_local((void *)(unsigned long)pginfo.contents);
180 
181         return ret;
182 }
183 
184 void sgx_ipi_cb(void *info)
185 {
186 }
187 
188 /*
189  * Swap page to the regular memory transformed to the blocked state by using
190  * EBLOCK, which means that it can no longer be referenced (no new TLB entries).
191  *
192  * The first trial just tries to write the page assuming that some other thread
193  * has reset the count for threads inside the enclave by using ETRACK, and
194  * previous thread count has been zeroed out. The second trial calls ETRACK
195  * before EWB. If that fails we kick all the HW threads out, and then do EWB,
196  * which should be guaranteed the succeed.
197  */
198 static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
199                          struct sgx_backing *backing)
200 {
201         struct sgx_encl_page *encl_page = epc_page->owner;
202         struct sgx_encl *encl = encl_page->encl;
203         struct sgx_va_page *va_page;
204         unsigned int va_offset;
205         void *va_slot;
206         int ret;
207 
208         encl_page->desc &= ~SGX_ENCL_PAGE_BEING_RECLAIMED;
209 
210         va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
211                                    list);
212         va_offset = sgx_alloc_va_slot(va_page);
213         va_slot = sgx_get_epc_virt_addr(va_page->epc_page) + va_offset;
214         if (sgx_va_page_full(va_page))
215                 list_move_tail(&va_page->list, &encl->va_pages);
216 
217         ret = __sgx_encl_ewb(epc_page, va_slot, backing);
218         if (ret == SGX_NOT_TRACKED) {
219                 ret = __etrack(sgx_get_epc_virt_addr(encl->secs.epc_page));
220                 if (ret) {
221                         if (encls_failed(ret))
222                                 ENCLS_WARN(ret, "ETRACK");
223                 }
224 
225                 ret = __sgx_encl_ewb(epc_page, va_slot, backing);
226                 if (ret == SGX_NOT_TRACKED) {
227                         /*
228                          * Slow path, send IPIs to kick cpus out of the
229                          * enclave.  Note, it's imperative that the cpu
230                          * mask is generated *after* ETRACK, else we'll
231                          * miss cpus that entered the enclave between
232                          * generating the mask and incrementing epoch.
233                          */
234                         on_each_cpu_mask(sgx_encl_cpumask(encl),
235                                          sgx_ipi_cb, NULL, 1);
236                         ret = __sgx_encl_ewb(epc_page, va_slot, backing);
237                 }
238         }
239 
240         if (ret) {
241                 if (encls_failed(ret))
242                         ENCLS_WARN(ret, "EWB");
243 
244                 sgx_free_va_slot(va_page, va_offset);
245         } else {
246                 encl_page->desc |= va_offset;
247                 encl_page->va_page = va_page;
248         }
249 }
250 
251 static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
252                                 struct sgx_backing *backing)
253 {
254         struct sgx_encl_page *encl_page = epc_page->owner;
255         struct sgx_encl *encl = encl_page->encl;
256         struct sgx_backing secs_backing;
257         int ret;
258 
259         mutex_lock(&encl->lock);
260 
261         sgx_encl_ewb(epc_page, backing);
262         encl_page->epc_page = NULL;
263         encl->secs_child_cnt--;
264         sgx_encl_put_backing(backing);
265 
266         if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) {
267                 ret = sgx_encl_alloc_backing(encl, PFN_DOWN(encl->size),
268                                            &secs_backing);
269                 if (ret)
270                         goto out;
271 
272                 sgx_encl_ewb(encl->secs.epc_page, &secs_backing);
273 
274                 sgx_encl_free_epc_page(encl->secs.epc_page);
275                 encl->secs.epc_page = NULL;
276 
277                 sgx_encl_put_backing(&secs_backing);
278         }
279 
280 out:
281         mutex_unlock(&encl->lock);
282 }
283 
284 /*
285  * Take a fixed number of pages from the head of the active page pool and
286  * reclaim them to the enclave's private shmem files. Skip the pages, which have
287  * been accessed since the last scan. Move those pages to the tail of active
288  * page pool so that the pages get scanned in LRU like fashion.
289  *
290  * Batch process a chunk of pages (at the moment 16) in order to degrade amount
291  * of IPI's and ETRACK's potentially required. sgx_encl_ewb() does degrade a bit
292  * among the HW threads with three stage EWB pipeline (EWB, ETRACK + EWB and IPI
293  * + EWB) but not sufficiently. Reclaiming one page at a time would also be
294  * problematic as it would increase the lock contention too much, which would
295  * halt forward progress.
296  */
297 static void sgx_reclaim_pages(void)
298 {
299         struct sgx_epc_page *chunk[SGX_NR_TO_SCAN];
300         struct sgx_backing backing[SGX_NR_TO_SCAN];
301         struct sgx_encl_page *encl_page;
302         struct sgx_epc_page *epc_page;
303         pgoff_t page_index;
304         int cnt = 0;
305         int ret;
306         int i;
307 
308         spin_lock(&sgx_reclaimer_lock);
309         for (i = 0; i < SGX_NR_TO_SCAN; i++) {
310                 if (list_empty(&sgx_active_page_list))
311                         break;
312 
313                 epc_page = list_first_entry(&sgx_active_page_list,
314                                             struct sgx_epc_page, list);
315                 list_del_init(&epc_page->list);
316                 encl_page = epc_page->owner;
317 
318                 if (kref_get_unless_zero(&encl_page->encl->refcount) != 0)
319                         chunk[cnt++] = epc_page;
320                 else
321                         /* The owner is freeing the page. No need to add the
322                          * page back to the list of reclaimable pages.
323                          */
324                         epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
325         }
326         spin_unlock(&sgx_reclaimer_lock);
327 
328         for (i = 0; i < cnt; i++) {
329                 epc_page = chunk[i];
330                 encl_page = epc_page->owner;
331 
332                 if (!sgx_reclaimer_age(epc_page))
333                         goto skip;
334 
335                 page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
336 
337                 mutex_lock(&encl_page->encl->lock);
338                 ret = sgx_encl_alloc_backing(encl_page->encl, page_index, &backing[i]);
339                 if (ret) {
340                         mutex_unlock(&encl_page->encl->lock);
341                         goto skip;
342                 }
343 
344                 encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED;
345                 mutex_unlock(&encl_page->encl->lock);
346                 continue;
347 
348 skip:
349                 spin_lock(&sgx_reclaimer_lock);
350                 list_add_tail(&epc_page->list, &sgx_active_page_list);
351                 spin_unlock(&sgx_reclaimer_lock);
352 
353                 kref_put(&encl_page->encl->refcount, sgx_encl_release);
354 
355                 chunk[i] = NULL;
356         }
357 
358         for (i = 0; i < cnt; i++) {
359                 epc_page = chunk[i];
360                 if (epc_page)
361                         sgx_reclaimer_block(epc_page);
362         }
363 
364         for (i = 0; i < cnt; i++) {
365                 epc_page = chunk[i];
366                 if (!epc_page)
367                         continue;
368 
369                 encl_page = epc_page->owner;
370                 sgx_reclaimer_write(epc_page, &backing[i]);
371 
372                 kref_put(&encl_page->encl->refcount, sgx_encl_release);
373                 epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
374 
375                 sgx_free_epc_page(epc_page);
376         }
377 }
378 
379 static bool sgx_should_reclaim(unsigned long watermark)
380 {
381         return atomic_long_read(&sgx_nr_free_pages) < watermark &&
382                !list_empty(&sgx_active_page_list);
383 }
384 
385 /*
386  * sgx_reclaim_direct() should be called (without enclave's mutex held)
387  * in locations where SGX memory resources might be low and might be
388  * needed in order to make forward progress.
389  */
390 void sgx_reclaim_direct(void)
391 {
392         if (sgx_should_reclaim(SGX_NR_LOW_PAGES))
393                 sgx_reclaim_pages();
394 }
395 
396 static int ksgxd(void *p)
397 {
398         set_freezable();
399 
400         /*
401          * Sanitize pages in order to recover from kexec(). The 2nd pass is
402          * required for SECS pages, whose child pages blocked EREMOVE.
403          */
404         __sgx_sanitize_pages(&sgx_dirty_page_list);
405         WARN_ON(__sgx_sanitize_pages(&sgx_dirty_page_list));
406 
407         while (!kthread_should_stop()) {
408                 if (try_to_freeze())
409                         continue;
410 
411                 wait_event_freezable(ksgxd_waitq,
412                                      kthread_should_stop() ||
413                                      sgx_should_reclaim(SGX_NR_HIGH_PAGES));
414 
415                 if (sgx_should_reclaim(SGX_NR_HIGH_PAGES))
416                         sgx_reclaim_pages();
417 
418                 cond_resched();
419         }
420 
421         return 0;
422 }
423 
424 static bool __init sgx_page_reclaimer_init(void)
425 {
426         struct task_struct *tsk;
427 
428         tsk = kthread_run(ksgxd, NULL, "ksgxd");
429         if (IS_ERR(tsk))
430                 return false;
431 
432         ksgxd_tsk = tsk;
433 
434         return true;
435 }
436 
437 bool current_is_ksgxd(void)
438 {
439         return current == ksgxd_tsk;
440 }
441 
442 static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid)
443 {
444         struct sgx_numa_node *node = &sgx_numa_nodes[nid];
445         struct sgx_epc_page *page = NULL;
446 
447         spin_lock(&node->lock);
448 
449         if (list_empty(&node->free_page_list)) {
450                 spin_unlock(&node->lock);
451                 return NULL;
452         }
453 
454         page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list);
455         list_del_init(&page->list);
456         page->flags = 0;
457 
458         spin_unlock(&node->lock);
459         atomic_long_dec(&sgx_nr_free_pages);
460 
461         return page;
462 }
463 
464 /**
465  * __sgx_alloc_epc_page() - Allocate an EPC page
466  *
467  * Iterate through NUMA nodes and reserve ia free EPC page to the caller. Start
468  * from the NUMA node, where the caller is executing.
469  *
470  * Return:
471  * - an EPC page:       A borrowed EPC pages were available.
472  * - NULL:              Out of EPC pages.
473  */
474 struct sgx_epc_page *__sgx_alloc_epc_page(void)
475 {
476         struct sgx_epc_page *page;
477         int nid_of_current = numa_node_id();
478         int nid_start, nid;
479 
480         /*
481          * Try local node first. If it doesn't have an EPC section,
482          * fall back to the non-local NUMA nodes.
483          */
484         if (node_isset(nid_of_current, sgx_numa_mask))
485                 nid_start = nid_of_current;
486         else
487                 nid_start = next_node_in(nid_of_current, sgx_numa_mask);
488 
489         nid = nid_start;
490         do {
491                 page = __sgx_alloc_epc_page_from_node(nid);
492                 if (page)
493                         return page;
494 
495                 nid = next_node_in(nid, sgx_numa_mask);
496         } while (nid != nid_start);
497 
498         return ERR_PTR(-ENOMEM);
499 }
500 
501 /**
502  * sgx_mark_page_reclaimable() - Mark a page as reclaimable
503  * @page:       EPC page
504  *
505  * Mark a page as reclaimable and add it to the active page list. Pages
506  * are automatically removed from the active list when freed.
507  */
508 void sgx_mark_page_reclaimable(struct sgx_epc_page *page)
509 {
510         spin_lock(&sgx_reclaimer_lock);
511         page->flags |= SGX_EPC_PAGE_RECLAIMER_TRACKED;
512         list_add_tail(&page->list, &sgx_active_page_list);
513         spin_unlock(&sgx_reclaimer_lock);
514 }
515 
516 /**
517  * sgx_unmark_page_reclaimable() - Remove a page from the reclaim list
518  * @page:       EPC page
519  *
520  * Clear the reclaimable flag and remove the page from the active page list.
521  *
522  * Return:
523  *   0 on success,
524  *   -EBUSY if the page is in the process of being reclaimed
525  */
526 int sgx_unmark_page_reclaimable(struct sgx_epc_page *page)
527 {
528         spin_lock(&sgx_reclaimer_lock);
529         if (page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED) {
530                 /* The page is being reclaimed. */
531                 if (list_empty(&page->list)) {
532                         spin_unlock(&sgx_reclaimer_lock);
533                         return -EBUSY;
534                 }
535 
536                 list_del(&page->list);
537                 page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
538         }
539         spin_unlock(&sgx_reclaimer_lock);
540 
541         return 0;
542 }
543 
544 /**
545  * sgx_alloc_epc_page() - Allocate an EPC page
546  * @owner:      the owner of the EPC page
547  * @reclaim:    reclaim pages if necessary
548  *
549  * Iterate through EPC sections and borrow a free EPC page to the caller. When a
550  * page is no longer needed it must be released with sgx_free_epc_page(). If
551  * @reclaim is set to true, directly reclaim pages when we are out of pages. No
552  * mm's can be locked when @reclaim is set to true.
553  *
554  * Finally, wake up ksgxd when the number of pages goes below the watermark
555  * before returning back to the caller.
556  *
557  * Return:
558  *   an EPC page,
559  *   -errno on error
560  */
561 struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim)
562 {
563         struct sgx_epc_page *page;
564 
565         for ( ; ; ) {
566                 page = __sgx_alloc_epc_page();
567                 if (!IS_ERR(page)) {
568                         page->owner = owner;
569                         break;
570                 }
571 
572                 if (list_empty(&sgx_active_page_list))
573                         return ERR_PTR(-ENOMEM);
574 
575                 if (!reclaim) {
576                         page = ERR_PTR(-EBUSY);
577                         break;
578                 }
579 
580                 if (signal_pending(current)) {
581                         page = ERR_PTR(-ERESTARTSYS);
582                         break;
583                 }
584 
585                 sgx_reclaim_pages();
586                 cond_resched();
587         }
588 
589         if (sgx_should_reclaim(SGX_NR_LOW_PAGES))
590                 wake_up(&ksgxd_waitq);
591 
592         return page;
593 }
594 
595 /**
596  * sgx_free_epc_page() - Free an EPC page
597  * @page:       an EPC page
598  *
599  * Put the EPC page back to the list of free pages. It's the caller's
600  * responsibility to make sure that the page is in uninitialized state. In other
601  * words, do EREMOVE, EWB or whatever operation is necessary before calling
602  * this function.
603  */
604 void sgx_free_epc_page(struct sgx_epc_page *page)
605 {
606         struct sgx_epc_section *section = &sgx_epc_sections[page->section];
607         struct sgx_numa_node *node = section->node;
608 
609         spin_lock(&node->lock);
610 
611         page->owner = NULL;
612         if (page->poison)
613                 list_add(&page->list, &node->sgx_poison_page_list);
614         else
615                 list_add_tail(&page->list, &node->free_page_list);
616         page->flags = SGX_EPC_PAGE_IS_FREE;
617 
618         spin_unlock(&node->lock);
619         atomic_long_inc(&sgx_nr_free_pages);
620 }
621 
622 static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
623                                          unsigned long index,
624                                          struct sgx_epc_section *section)
625 {
626         unsigned long nr_pages = size >> PAGE_SHIFT;
627         unsigned long i;
628 
629         section->virt_addr = memremap(phys_addr, size, MEMREMAP_WB);
630         if (!section->virt_addr)
631                 return false;
632 
633         section->pages = vmalloc(nr_pages * sizeof(struct sgx_epc_page));
634         if (!section->pages) {
635                 memunmap(section->virt_addr);
636                 return false;
637         }
638 
639         section->phys_addr = phys_addr;
640         xa_store_range(&sgx_epc_address_space, section->phys_addr,
641                        phys_addr + size - 1, section, GFP_KERNEL);
642 
643         for (i = 0; i < nr_pages; i++) {
644                 section->pages[i].section = index;
645                 section->pages[i].flags = 0;
646                 section->pages[i].owner = NULL;
647                 section->pages[i].poison = 0;
648                 list_add_tail(&section->pages[i].list, &sgx_dirty_page_list);
649         }
650 
651         return true;
652 }
653 
654 bool arch_is_platform_page(u64 paddr)
655 {
656         return !!xa_load(&sgx_epc_address_space, paddr);
657 }
658 EXPORT_SYMBOL_GPL(arch_is_platform_page);
659 
660 static struct sgx_epc_page *sgx_paddr_to_page(u64 paddr)
661 {
662         struct sgx_epc_section *section;
663 
664         section = xa_load(&sgx_epc_address_space, paddr);
665         if (!section)
666                 return NULL;
667 
668         return &section->pages[PFN_DOWN(paddr - section->phys_addr)];
669 }
670 
671 /*
672  * Called in process context to handle a hardware reported
673  * error in an SGX EPC page.
674  * If the MF_ACTION_REQUIRED bit is set in flags, then the
675  * context is the task that consumed the poison data. Otherwise
676  * this is called from a kernel thread unrelated to the page.
677  */
678 int arch_memory_failure(unsigned long pfn, int flags)
679 {
680         struct sgx_epc_page *page = sgx_paddr_to_page(pfn << PAGE_SHIFT);
681         struct sgx_epc_section *section;
682         struct sgx_numa_node *node;
683 
684         /*
685          * mm/memory-failure.c calls this routine for all errors
686          * where there isn't a "struct page" for the address. But that
687          * includes other address ranges besides SGX.
688          */
689         if (!page)
690                 return -ENXIO;
691 
692         /*
693          * If poison was consumed synchronously. Send a SIGBUS to
694          * the task. Hardware has already exited the SGX enclave and
695          * will not allow re-entry to an enclave that has a memory
696          * error. The signal may help the task understand why the
697          * enclave is broken.
698          */
699         if (flags & MF_ACTION_REQUIRED)
700                 force_sig(SIGBUS);
701 
702         section = &sgx_epc_sections[page->section];
703         node = section->node;
704 
705         spin_lock(&node->lock);
706 
707         /* Already poisoned? Nothing more to do */
708         if (page->poison)
709                 goto out;
710 
711         page->poison = 1;
712 
713         /*
714          * If the page is on a free list, move it to the per-node
715          * poison page list.
716          */
717         if (page->flags & SGX_EPC_PAGE_IS_FREE) {
718                 list_move(&page->list, &node->sgx_poison_page_list);
719                 goto out;
720         }
721 
722         /*
723          * TBD: Add additional plumbing to enable pre-emptive
724          * action for asynchronous poison notification. Until
725          * then just hope that the poison:
726          * a) is not accessed - sgx_free_epc_page() will deal with it
727          *    when the user gives it back
728          * b) results in a recoverable machine check rather than
729          *    a fatal one
730          */
731 out:
732         spin_unlock(&node->lock);
733         return 0;
734 }
735 
736 /**
737  * A section metric is concatenated in a way that @low bits 12-31 define the
738  * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
739  * metric.
740  */
741 static inline u64 __init sgx_calc_section_metric(u64 low, u64 high)
742 {
743         return (low & GENMASK_ULL(31, 12)) +
744                ((high & GENMASK_ULL(19, 0)) << 32);
745 }
746 
747 #ifdef CONFIG_NUMA
748 static ssize_t sgx_total_bytes_show(struct device *dev, struct device_attribute *attr, char *buf)
749 {
750         return sysfs_emit(buf, "%lu\n", sgx_numa_nodes[dev->id].size);
751 }
752 static DEVICE_ATTR_RO(sgx_total_bytes);
753 
754 static umode_t arch_node_attr_is_visible(struct kobject *kobj,
755                 struct attribute *attr, int idx)
756 {
757         /* Make all x86/ attributes invisible when SGX is not initialized: */
758         if (nodes_empty(sgx_numa_mask))
759                 return 0;
760 
761         return attr->mode;
762 }
763 
764 static struct attribute *arch_node_dev_attrs[] = {
765         &dev_attr_sgx_total_bytes.attr,
766         NULL,
767 };
768 
769 const struct attribute_group arch_node_dev_group = {
770         .name = "x86",
771         .attrs = arch_node_dev_attrs,
772         .is_visible = arch_node_attr_is_visible,
773 };
774 
775 static void __init arch_update_sysfs_visibility(int nid)
776 {
777         struct node *node = node_devices[nid];
778         int ret;
779 
780         ret = sysfs_update_group(&node->dev.kobj, &arch_node_dev_group);
781 
782         if (ret)
783                 pr_err("sysfs update failed (%d), files may be invisible", ret);
784 }
785 #else /* !CONFIG_NUMA */
786 static void __init arch_update_sysfs_visibility(int nid) {}
787 #endif
788 
789 static bool __init sgx_page_cache_init(void)
790 {
791         u32 eax, ebx, ecx, edx, type;
792         u64 pa, size;
793         int nid;
794         int i;
795 
796         sgx_numa_nodes = kmalloc_array(num_possible_nodes(), sizeof(*sgx_numa_nodes), GFP_KERNEL);
797         if (!sgx_numa_nodes)
798                 return false;
799 
800         for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) {
801                 cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx);
802 
803                 type = eax & SGX_CPUID_EPC_MASK;
804                 if (type == SGX_CPUID_EPC_INVALID)
805                         break;
806 
807                 if (type != SGX_CPUID_EPC_SECTION) {
808                         pr_err_once("Unknown EPC section type: %u\n", type);
809                         break;
810                 }
811 
812                 pa   = sgx_calc_section_metric(eax, ebx);
813                 size = sgx_calc_section_metric(ecx, edx);
814 
815                 pr_info("EPC section 0x%llx-0x%llx\n", pa, pa + size - 1);
816 
817                 if (!sgx_setup_epc_section(pa, size, i, &sgx_epc_sections[i])) {
818                         pr_err("No free memory for an EPC section\n");
819                         break;
820                 }
821 
822                 nid = numa_map_to_online_node(phys_to_target_node(pa));
823                 if (nid == NUMA_NO_NODE) {
824                         /* The physical address is already printed above. */
825                         pr_warn(FW_BUG "Unable to map EPC section to online node. Fallback to the NUMA node 0.\n");
826                         nid = 0;
827                 }
828 
829                 if (!node_isset(nid, sgx_numa_mask)) {
830                         spin_lock_init(&sgx_numa_nodes[nid].lock);
831                         INIT_LIST_HEAD(&sgx_numa_nodes[nid].free_page_list);
832                         INIT_LIST_HEAD(&sgx_numa_nodes[nid].sgx_poison_page_list);
833                         node_set(nid, sgx_numa_mask);
834                         sgx_numa_nodes[nid].size = 0;
835 
836                         /* Make SGX-specific node sysfs files visible: */
837                         arch_update_sysfs_visibility(nid);
838                 }
839 
840                 sgx_epc_sections[i].node =  &sgx_numa_nodes[nid];
841                 sgx_numa_nodes[nid].size += size;
842 
843                 sgx_nr_epc_sections++;
844         }
845 
846         if (!sgx_nr_epc_sections) {
847                 pr_err("There are zero EPC sections.\n");
848                 return false;
849         }
850 
851         return true;
852 }
853 
854 /*
855  * Update the SGX_LEPUBKEYHASH MSRs to the values specified by caller.
856  * Bare-metal driver requires to update them to hash of enclave's signer
857  * before EINIT. KVM needs to update them to guest's virtual MSR values
858  * before doing EINIT from guest.
859  */
860 void sgx_update_lepubkeyhash(u64 *lepubkeyhash)
861 {
862         int i;
863 
864         WARN_ON_ONCE(preemptible());
865 
866         for (i = 0; i < 4; i++)
867                 wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]);
868 }
869 
870 const struct file_operations sgx_provision_fops = {
871         .owner                  = THIS_MODULE,
872 };
873 
874 static struct miscdevice sgx_dev_provision = {
875         .minor = MISC_DYNAMIC_MINOR,
876         .name = "sgx_provision",
877         .nodename = "sgx_provision",
878         .fops = &sgx_provision_fops,
879 };
880 
881 /**
882  * sgx_set_attribute() - Update allowed attributes given file descriptor
883  * @allowed_attributes:         Pointer to allowed enclave attributes
884  * @attribute_fd:               File descriptor for specific attribute
885  *
886  * Append enclave attribute indicated by file descriptor to allowed
887  * attributes. Currently only SGX_ATTR_PROVISIONKEY indicated by
888  * /dev/sgx_provision is supported.
889  *
890  * Return:
891  * -0:          SGX_ATTR_PROVISIONKEY is appended to allowed_attributes
892  * -EINVAL:     Invalid, or not supported file descriptor
893  */
894 int sgx_set_attribute(unsigned long *allowed_attributes,
895                       unsigned int attribute_fd)
896 {
897         struct fd f = fdget(attribute_fd);
898 
899         if (!f.file)
900                 return -EINVAL;
901 
902         if (f.file->f_op != &sgx_provision_fops) {
903                 fdput(f);
904                 return -EINVAL;
905         }
906 
907         *allowed_attributes |= SGX_ATTR_PROVISIONKEY;
908 
909         fdput(f);
910         return 0;
911 }
912 EXPORT_SYMBOL_GPL(sgx_set_attribute);
913 
914 static int __init sgx_init(void)
915 {
916         int ret;
917         int i;
918 
919         if (!cpu_feature_enabled(X86_FEATURE_SGX))
920                 return -ENODEV;
921 
922         if (!sgx_page_cache_init())
923                 return -ENOMEM;
924 
925         if (!sgx_page_reclaimer_init()) {
926                 ret = -ENOMEM;
927                 goto err_page_cache;
928         }
929 
930         ret = misc_register(&sgx_dev_provision);
931         if (ret)
932                 goto err_kthread;
933 
934         /*
935          * Always try to initialize the native *and* KVM drivers.
936          * The KVM driver is less picky than the native one and
937          * can function if the native one is not supported on the
938          * current system or fails to initialize.
939          *
940          * Error out only if both fail to initialize.
941          */
942         ret = sgx_drv_init();
943 
944         if (sgx_vepc_init() && ret)
945                 goto err_provision;
946 
947         return 0;
948 
949 err_provision:
950         misc_deregister(&sgx_dev_provision);
951 
952 err_kthread:
953         kthread_stop(ksgxd_tsk);
954 
955 err_page_cache:
956         for (i = 0; i < sgx_nr_epc_sections; i++) {
957                 vfree(sgx_epc_sections[i].pages);
958                 memunmap(sgx_epc_sections[i].virt_addr);
959         }
960 
961         return ret;
962 }
963 
964 device_initcall(sgx_init);
965 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php