1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Memory Encryption Support Common Code 4 * 5 * Copyright (C) 2016 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 */ 9 10 #include <linux/dma-direct.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/swiotlb.h> 13 #include <linux/cc_platform.h> 14 #include <linux/mem_encrypt.h> 15 #include <linux/virtio_anchor.h> 16 17 #include <asm/sev.h> 18 19 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ 20 bool force_dma_unencrypted(struct device *dev) 21 { 22 /* 23 * For SEV, all DMA must be to unencrypted addresses. 24 */ 25 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 26 return true; 27 28 /* 29 * For SME, all DMA must be to unencrypted addresses if the 30 * device does not support DMA to addresses that include the 31 * encryption mask. 32 */ 33 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { 34 u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask)); 35 u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask, 36 dev->bus_dma_limit); 37 38 if (dma_dev_mask <= dma_enc_mask) 39 return true; 40 } 41 42 return false; 43 } 44 45 static void print_mem_encrypt_feature_info(void) 46 { 47 pr_info("Memory Encryption Features active: "); 48 49 switch (cc_vendor) { 50 case CC_VENDOR_INTEL: 51 pr_cont("Intel TDX\n"); 52 break; 53 case CC_VENDOR_AMD: 54 pr_cont("AMD"); 55 56 /* Secure Memory Encryption */ 57 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { 58 /* 59 * SME is mutually exclusive with any of the SEV 60 * features below. 61 */ 62 pr_cont(" SME\n"); 63 return; 64 } 65 66 /* Secure Encrypted Virtualization */ 67 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 68 pr_cont(" SEV"); 69 70 /* Encrypted Register State */ 71 if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) 72 pr_cont(" SEV-ES"); 73 74 /* Secure Nested Paging */ 75 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) 76 pr_cont(" SEV-SNP"); 77 78 pr_cont("\n"); 79 80 sev_show_status(); 81 82 break; 83 default: 84 pr_cont("Unknown\n"); 85 } 86 } 87 88 /* Architecture __weak replacement functions */ 89 void __init mem_encrypt_init(void) 90 { 91 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT)) 92 return; 93 94 /* Call into SWIOTLB to update the SWIOTLB DMA buffers */ 95 swiotlb_update_mem_attributes(); 96 97 print_mem_encrypt_feature_info(); 98 } 99 100 void __init mem_encrypt_setup_arch(void) 101 { 102 phys_addr_t total_mem = memblock_phys_mem_size(); 103 unsigned long size; 104 105 /* 106 * Do RMP table fixups after the e820 tables have been setup by 107 * e820__memory_setup(). 108 */ 109 if (cc_platform_has(CC_ATTR_HOST_SEV_SNP)) 110 snp_fixup_e820_tables(); 111 112 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) 113 return; 114 115 /* 116 * For SEV and TDX, all DMA has to occur via shared/unencrypted pages. 117 * Kernel uses SWIOTLB to make this happen without changing device 118 * drivers. However, depending on the workload being run, the 119 * default 64MB of SWIOTLB may not be enough and SWIOTLB may 120 * run out of buffers for DMA, resulting in I/O errors and/or 121 * performance degradation especially with high I/O workloads. 122 * 123 * Adjust the default size of SWIOTLB using a percentage of guest 124 * memory for SWIOTLB buffers. Also, as the SWIOTLB bounce buffer 125 * memory is allocated from low memory, ensure that the adjusted size 126 * is within the limits of low available memory. 127 * 128 * The percentage of guest memory used here for SWIOTLB buffers 129 * is more of an approximation of the static adjustment which 130 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6% 131 */ 132 size = total_mem * 6 / 100; 133 size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G); 134 swiotlb_adjust_size(size); 135 136 /* Set restricted memory access for virtio. */ 137 virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc); 138 } 139
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.