1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * Copyright (C) 2018-2020 Christoph Hellwig. 3 * Copyright (C) 2018-2020 Christoph Hellwig. 4 * 4 * 5 * DMA operations that map physical memory dir 5 * DMA operations that map physical memory directly without using an IOMMU. 6 */ 6 */ 7 #include <linux/memblock.h> /* for max_pfn */ 7 #include <linux/memblock.h> /* for max_pfn */ 8 #include <linux/export.h> 8 #include <linux/export.h> 9 #include <linux/mm.h> 9 #include <linux/mm.h> 10 #include <linux/dma-map-ops.h> 10 #include <linux/dma-map-ops.h> 11 #include <linux/scatterlist.h> 11 #include <linux/scatterlist.h> 12 #include <linux/pfn.h> 12 #include <linux/pfn.h> 13 #include <linux/vmalloc.h> 13 #include <linux/vmalloc.h> 14 #include <linux/set_memory.h> 14 #include <linux/set_memory.h> 15 #include <linux/slab.h> 15 #include <linux/slab.h> 16 #include "direct.h" 16 #include "direct.h" 17 17 18 /* 18 /* 19 * Most architectures use ZONE_DMA for the fir 19 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use 20 * it for entirely different regions. In that 20 * it for entirely different regions. In that case the arch code needs to 21 * override the variable below for dma-direct 21 * override the variable below for dma-direct to work properly. 22 */ 22 */ 23 unsigned int zone_dma_bits __ro_after_init = 2 23 unsigned int zone_dma_bits __ro_after_init = 24; 24 24 25 static inline dma_addr_t phys_to_dma_direct(st 25 static inline dma_addr_t phys_to_dma_direct(struct device *dev, 26 phys_addr_t phys) 26 phys_addr_t phys) 27 { 27 { 28 if (force_dma_unencrypted(dev)) 28 if (force_dma_unencrypted(dev)) 29 return phys_to_dma_unencrypted 29 return phys_to_dma_unencrypted(dev, phys); 30 return phys_to_dma(dev, phys); 30 return phys_to_dma(dev, phys); 31 } 31 } 32 32 33 static inline struct page *dma_direct_to_page( 33 static inline struct page *dma_direct_to_page(struct device *dev, 34 dma_addr_t dma_addr) 34 dma_addr_t dma_addr) 35 { 35 { 36 return pfn_to_page(PHYS_PFN(dma_to_phy 36 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr))); 37 } 37 } 38 38 39 u64 dma_direct_get_required_mask(struct device 39 u64 dma_direct_get_required_mask(struct device *dev) 40 { 40 { 41 phys_addr_t phys = (phys_addr_t)(max_p 41 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; 42 u64 max_dma = phys_to_dma_direct(dev, 42 u64 max_dma = phys_to_dma_direct(dev, phys); 43 43 44 return (1ULL << (fls64(max_dma) - 1)) 44 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; 45 } 45 } 46 46 47 static gfp_t dma_direct_optimal_gfp_mask(struc !! 47 static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, >> 48 u64 *phys_limit) 48 { 49 { 49 u64 dma_limit = min_not_zero( !! 50 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); 50 dev->coherent_dma_mask, << 51 dev->bus_dma_limit); << 52 51 53 /* 52 /* 54 * Optimistically try the zone that th 53 * Optimistically try the zone that the physical address mask falls 55 * into first. If that returns memory 54 * into first. If that returns memory that isn't actually addressable 56 * we will fallback to the next lower 55 * we will fallback to the next lower zone and try again. 57 * 56 * 58 * Note that GFP_DMA32 and GFP_DMA are 57 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding 59 * zones. 58 * zones. 60 */ 59 */ 61 *phys_limit = dma_to_phys(dev, dma_lim 60 *phys_limit = dma_to_phys(dev, dma_limit); 62 if (*phys_limit <= DMA_BIT_MASK(zone_d 61 if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits)) 63 return GFP_DMA; 62 return GFP_DMA; 64 if (*phys_limit <= DMA_BIT_MASK(32)) 63 if (*phys_limit <= DMA_BIT_MASK(32)) 65 return GFP_DMA32; 64 return GFP_DMA32; 66 return 0; 65 return 0; 67 } 66 } 68 67 69 bool dma_coherent_ok(struct device *dev, phys_ !! 68 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) 70 { 69 { 71 dma_addr_t dma_addr = phys_to_dma_dire 70 dma_addr_t dma_addr = phys_to_dma_direct(dev, phys); 72 71 73 if (dma_addr == DMA_MAPPING_ERROR) 72 if (dma_addr == DMA_MAPPING_ERROR) 74 return false; 73 return false; 75 return dma_addr + size - 1 <= 74 return dma_addr + size - 1 <= 76 min_not_zero(dev->coherent_dma 75 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); 77 } 76 } 78 77 79 static int dma_set_decrypted(struct device *de << 80 { << 81 if (!force_dma_unencrypted(dev)) << 82 return 0; << 83 return set_memory_decrypted((unsigned << 84 } << 85 << 86 static int dma_set_encrypted(struct device *de << 87 { << 88 int ret; << 89 << 90 if (!force_dma_unencrypted(dev)) << 91 return 0; << 92 ret = set_memory_encrypted((unsigned l << 93 if (ret) << 94 pr_warn_ratelimited("leaking D << 95 return ret; << 96 } << 97 << 98 static void __dma_direct_free_pages(struct dev 78 static void __dma_direct_free_pages(struct device *dev, struct page *page, 99 size_t siz 79 size_t size) 100 { 80 { 101 if (swiotlb_free(dev, page, size)) !! 81 if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) && >> 82 swiotlb_free(dev, page, size)) 102 return; 83 return; 103 dma_free_contiguous(dev, page, size); 84 dma_free_contiguous(dev, page, size); 104 } 85 } 105 86 106 static struct page *dma_direct_alloc_swiotlb(s << 107 { << 108 struct page *page = swiotlb_alloc(dev, << 109 << 110 if (page && !dma_coherent_ok(dev, page << 111 swiotlb_free(dev, page, size); << 112 return NULL; << 113 } << 114 << 115 return page; << 116 } << 117 << 118 static struct page *__dma_direct_alloc_pages(s 87 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, 119 gfp_t gfp, bool allow_highmem) !! 88 gfp_t gfp) 120 { 89 { 121 int node = dev_to_node(dev); 90 int node = dev_to_node(dev); 122 struct page *page = NULL; 91 struct page *page = NULL; 123 u64 phys_limit; 92 u64 phys_limit; 124 93 125 WARN_ON_ONCE(!PAGE_ALIGNED(size)); 94 WARN_ON_ONCE(!PAGE_ALIGNED(size)); 126 95 127 if (is_swiotlb_for_alloc(dev)) !! 96 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, 128 return dma_direct_alloc_swiotl !! 97 &phys_limit); >> 98 if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) && >> 99 is_swiotlb_for_alloc(dev)) { >> 100 page = swiotlb_alloc(dev, size); >> 101 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { >> 102 __dma_direct_free_pages(dev, page, size); >> 103 return NULL; >> 104 } >> 105 return page; >> 106 } 129 107 130 gfp |= dma_direct_optimal_gfp_mask(dev << 131 page = dma_alloc_contiguous(dev, size, 108 page = dma_alloc_contiguous(dev, size, gfp); 132 if (page) { !! 109 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 133 if (!dma_coherent_ok(dev, page !! 110 dma_free_contiguous(dev, page, size); 134 (!allow_highmem && PageHig !! 111 page = NULL; 135 dma_free_contiguous(de << 136 page = NULL; << 137 } << 138 } 112 } 139 again: 113 again: 140 if (!page) 114 if (!page) 141 page = alloc_pages_node(node, 115 page = alloc_pages_node(node, gfp, get_order(size)); 142 if (page && !dma_coherent_ok(dev, page 116 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 143 dma_free_contiguous(dev, page, 117 dma_free_contiguous(dev, page, size); 144 page = NULL; 118 page = NULL; 145 119 146 if (IS_ENABLED(CONFIG_ZONE_DMA 120 if (IS_ENABLED(CONFIG_ZONE_DMA32) && 147 phys_limit < DMA_BIT_MASK( 121 phys_limit < DMA_BIT_MASK(64) && 148 !(gfp & (GFP_DMA32 | GFP_D 122 !(gfp & (GFP_DMA32 | GFP_DMA))) { 149 gfp |= GFP_DMA32; 123 gfp |= GFP_DMA32; 150 goto again; 124 goto again; 151 } 125 } 152 126 153 if (IS_ENABLED(CONFIG_ZONE_DMA 127 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { 154 gfp = (gfp & ~GFP_DMA3 128 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 155 goto again; 129 goto again; 156 } 130 } 157 } 131 } 158 132 159 return page; 133 return page; 160 } 134 } 161 135 162 /* << 163 * Check if a potentially blocking operations << 164 * pools for the given device/gfp. << 165 */ << 166 static bool dma_direct_use_pool(struct device << 167 { << 168 return !gfpflags_allow_blocking(gfp) & << 169 } << 170 << 171 static void *dma_direct_alloc_from_pool(struct 136 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, 172 dma_addr_t *dma_handle, gfp_t 137 dma_addr_t *dma_handle, gfp_t gfp) 173 { 138 { 174 struct page *page; 139 struct page *page; 175 u64 phys_limit; !! 140 u64 phys_mask; 176 void *ret; 141 void *ret; 177 142 178 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DM !! 143 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, 179 return NULL; !! 144 &phys_mask); 180 << 181 gfp |= dma_direct_optimal_gfp_mask(dev << 182 page = dma_alloc_from_pool(dev, size, 145 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); 183 if (!page) 146 if (!page) 184 return NULL; 147 return NULL; 185 *dma_handle = phys_to_dma_direct(dev, 148 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); 186 return ret; 149 return ret; 187 } 150 } 188 151 189 static void *dma_direct_alloc_no_mapping(struc << 190 dma_addr_t *dma_handle, gfp_t << 191 { << 192 struct page *page; << 193 << 194 page = __dma_direct_alloc_pages(dev, s << 195 if (!page) << 196 return NULL; << 197 << 198 /* remove any dirty cache lines on the << 199 if (!PageHighMem(page)) << 200 arch_dma_prep_coherent(page, s << 201 << 202 /* return the page pointer as the opaq << 203 *dma_handle = phys_to_dma_direct(dev, << 204 return page; << 205 } << 206 << 207 void *dma_direct_alloc(struct device *dev, siz 152 void *dma_direct_alloc(struct device *dev, size_t size, 208 dma_addr_t *dma_handle, gfp_t 153 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 209 { 154 { 210 bool remap = false, set_uncached = fal << 211 struct page *page; 155 struct page *page; 212 void *ret; 156 void *ret; >> 157 int err; 213 158 214 size = PAGE_ALIGN(size); 159 size = PAGE_ALIGN(size); 215 if (attrs & DMA_ATTR_NO_WARN) 160 if (attrs & DMA_ATTR_NO_WARN) 216 gfp |= __GFP_NOWARN; 161 gfp |= __GFP_NOWARN; 217 162 218 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPIN 163 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && 219 !force_dma_unencrypted(dev) && !is !! 164 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { 220 return dma_direct_alloc_no_map !! 165 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); 221 !! 166 if (!page) 222 if (!dev_is_dma_coherent(dev)) { << 223 if (IS_ENABLED(CONFIG_ARCH_HAS << 224 !is_swiotlb_for_alloc(dev) << 225 return arch_dma_alloc( << 226 << 227 << 228 /* << 229 * If there is a global pool, << 230 * non-coherent devices. << 231 */ << 232 if (IS_ENABLED(CONFIG_DMA_GLOB << 233 return dma_alloc_from_ << 234 dma_ha << 235 << 236 /* << 237 * Otherwise we require the ar << 238 * mark arbitrary parts of the << 239 * or remapped it uncached. << 240 */ << 241 set_uncached = IS_ENABLED(CONF << 242 remap = IS_ENABLED(CONFIG_DMA_ << 243 if (!set_uncached && !remap) { << 244 pr_warn_once("coherent << 245 return NULL; 167 return NULL; 246 } !! 168 /* remove any dirty cache lines on the kernel alias */ >> 169 if (!PageHighMem(page)) >> 170 arch_dma_prep_coherent(page, size); >> 171 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); >> 172 /* return the page pointer as the opaque cookie */ >> 173 return page; 247 } 174 } 248 175 >> 176 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && >> 177 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && >> 178 !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && >> 179 !dev_is_dma_coherent(dev) && >> 180 !is_swiotlb_for_alloc(dev)) >> 181 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); >> 182 >> 183 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && >> 184 !dev_is_dma_coherent(dev)) >> 185 return dma_alloc_from_global_coherent(dev, size, dma_handle); >> 186 249 /* 187 /* 250 * Remapping or decrypting memory may !! 188 * Remapping or decrypting memory may block. If either is required and 251 * the atomic pools instead if we aren !! 189 * we can't block, allocate the memory from the atomic pools. >> 190 * If restricted DMA (i.e., is_swiotlb_for_alloc) is required, one must >> 191 * set up another device coherent pool by shared-dma-pool and use >> 192 * dma_alloc_from_dev_coherent instead. 252 */ 193 */ 253 if ((remap || force_dma_unencrypted(de !! 194 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && 254 dma_direct_use_pool(dev, gfp)) !! 195 !gfpflags_allow_blocking(gfp) && >> 196 (force_dma_unencrypted(dev) || >> 197 (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && >> 198 !dev_is_dma_coherent(dev))) && >> 199 !is_swiotlb_for_alloc(dev)) 255 return dma_direct_alloc_from_p 200 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); 256 201 257 /* we always manually zero the memory 202 /* we always manually zero the memory once we are done */ 258 page = __dma_direct_alloc_pages(dev, s !! 203 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); 259 if (!page) 204 if (!page) 260 return NULL; 205 return NULL; 261 206 262 /* !! 207 if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 263 * dma_alloc_contiguous can return hig !! 208 !dev_is_dma_coherent(dev)) || 264 * combination the cma= arguments and !! 209 (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) { 265 * remapped to return a kernel virtual << 266 */ << 267 if (PageHighMem(page)) { << 268 remap = true; << 269 set_uncached = false; << 270 } << 271 << 272 if (remap) { << 273 pgprot_t prot = dma_pgprot(dev << 274 << 275 if (force_dma_unencrypted(dev) << 276 prot = pgprot_decrypte << 277 << 278 /* remove any dirty cache line 210 /* remove any dirty cache lines on the kernel alias */ 279 arch_dma_prep_coherent(page, s 211 arch_dma_prep_coherent(page, size); 280 212 281 /* create a coherent mapping * 213 /* create a coherent mapping */ 282 ret = dma_common_contiguous_re !! 214 ret = dma_common_contiguous_remap(page, size, >> 215 dma_pgprot(dev, PAGE_KERNEL, attrs), 283 __builtin_retu 216 __builtin_return_address(0)); 284 if (!ret) 217 if (!ret) 285 goto out_free_pages; 218 goto out_free_pages; 286 } else { !! 219 if (force_dma_unencrypted(dev)) { 287 ret = page_address(page); !! 220 err = set_memory_decrypted((unsigned long)ret, 288 if (dma_set_decrypted(dev, ret !! 221 1 << get_order(size)); 289 goto out_leak_pages; !! 222 if (err) >> 223 goto out_free_pages; >> 224 } >> 225 memset(ret, 0, size); >> 226 goto done; >> 227 } >> 228 >> 229 if (PageHighMem(page)) { >> 230 /* >> 231 * Depending on the cma= arguments and per-arch setup >> 232 * dma_alloc_contiguous could return highmem pages. >> 233 * Without remapping there is no way to return them here, >> 234 * so log an error and fail. >> 235 */ >> 236 dev_info(dev, "Rejecting highmem page from CMA.\n"); >> 237 goto out_free_pages; >> 238 } >> 239 >> 240 ret = page_address(page); >> 241 if (force_dma_unencrypted(dev)) { >> 242 err = set_memory_decrypted((unsigned long)ret, >> 243 1 << get_order(size)); >> 244 if (err) >> 245 goto out_free_pages; 290 } 246 } 291 247 292 memset(ret, 0, size); 248 memset(ret, 0, size); 293 249 294 if (set_uncached) { !! 250 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && >> 251 !dev_is_dma_coherent(dev)) { 295 arch_dma_prep_coherent(page, s 252 arch_dma_prep_coherent(page, size); 296 ret = arch_dma_set_uncached(re 253 ret = arch_dma_set_uncached(ret, size); 297 if (IS_ERR(ret)) 254 if (IS_ERR(ret)) 298 goto out_encrypt_pages 255 goto out_encrypt_pages; 299 } 256 } 300 !! 257 done: 301 *dma_handle = phys_to_dma_direct(dev, 258 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); 302 return ret; 259 return ret; 303 260 304 out_encrypt_pages: 261 out_encrypt_pages: 305 if (dma_set_encrypted(dev, page_addres !! 262 if (force_dma_unencrypted(dev)) { 306 return NULL; !! 263 err = set_memory_encrypted((unsigned long)page_address(page), >> 264 1 << get_order(size)); >> 265 /* If memory cannot be re-encrypted, it must be leaked */ >> 266 if (err) >> 267 return NULL; >> 268 } 307 out_free_pages: 269 out_free_pages: 308 __dma_direct_free_pages(dev, page, siz 270 __dma_direct_free_pages(dev, page, size); 309 return NULL; 271 return NULL; 310 out_leak_pages: << 311 return NULL; << 312 } 272 } 313 273 314 void dma_direct_free(struct device *dev, size_ 274 void dma_direct_free(struct device *dev, size_t size, 315 void *cpu_addr, dma_addr_t dma 275 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) 316 { 276 { 317 unsigned int page_order = get_order(si 277 unsigned int page_order = get_order(size); 318 278 319 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPIN 279 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && 320 !force_dma_unencrypted(dev) && !is 280 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { 321 /* cpu_addr is a struct page c 281 /* cpu_addr is a struct page cookie, not a kernel address */ 322 dma_free_contiguous(dev, cpu_a 282 dma_free_contiguous(dev, cpu_addr, size); 323 return; 283 return; 324 } 284 } 325 285 326 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALL !! 286 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && >> 287 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && >> 288 !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && 327 !dev_is_dma_coherent(dev) && 289 !dev_is_dma_coherent(dev) && 328 !is_swiotlb_for_alloc(dev)) { 290 !is_swiotlb_for_alloc(dev)) { 329 arch_dma_free(dev, size, cpu_a 291 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); 330 return; 292 return; 331 } 293 } 332 294 333 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) 295 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && 334 !dev_is_dma_coherent(dev)) { 296 !dev_is_dma_coherent(dev)) { 335 if (!dma_release_from_global_c 297 if (!dma_release_from_global_coherent(page_order, cpu_addr)) 336 WARN_ON_ONCE(1); 298 WARN_ON_ONCE(1); 337 return; 299 return; 338 } 300 } 339 301 340 /* If cpu_addr is not from an atomic p 302 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ 341 if (IS_ENABLED(CONFIG_DMA_COHERENT_POO 303 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && 342 dma_free_from_pool(dev, cpu_addr, 304 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) 343 return; 305 return; 344 306 345 if (is_vmalloc_addr(cpu_addr)) { !! 307 if (force_dma_unencrypted(dev)) >> 308 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); >> 309 >> 310 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) 346 vunmap(cpu_addr); 311 vunmap(cpu_addr); 347 } else { !! 312 else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) 348 if (IS_ENABLED(CONFIG_ARCH_HAS !! 313 arch_dma_clear_uncached(cpu_addr, size); 349 arch_dma_clear_uncache << 350 if (dma_set_encrypted(dev, cpu << 351 return; << 352 } << 353 314 354 __dma_direct_free_pages(dev, dma_direc 315 __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); 355 } 316 } 356 317 357 struct page *dma_direct_alloc_pages(struct dev 318 struct page *dma_direct_alloc_pages(struct device *dev, size_t size, 358 dma_addr_t *dma_handle, enum d 319 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) 359 { 320 { 360 struct page *page; 321 struct page *page; 361 void *ret; 322 void *ret; 362 323 363 if (force_dma_unencrypted(dev) && dma_ !! 324 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && >> 325 force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) && >> 326 !is_swiotlb_for_alloc(dev)) 364 return dma_direct_alloc_from_p 327 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); 365 328 366 page = __dma_direct_alloc_pages(dev, s !! 329 page = __dma_direct_alloc_pages(dev, size, gfp); 367 if (!page) 330 if (!page) 368 return NULL; 331 return NULL; >> 332 if (PageHighMem(page)) { >> 333 /* >> 334 * Depending on the cma= arguments and per-arch setup >> 335 * dma_alloc_contiguous could return highmem pages. >> 336 * Without remapping there is no way to return them here, >> 337 * so log an error and fail. >> 338 */ >> 339 dev_info(dev, "Rejecting highmem page from CMA.\n"); >> 340 goto out_free_pages; >> 341 } 369 342 370 ret = page_address(page); 343 ret = page_address(page); 371 if (dma_set_decrypted(dev, ret, size)) !! 344 if (force_dma_unencrypted(dev)) { 372 goto out_leak_pages; !! 345 if (set_memory_decrypted((unsigned long)ret, >> 346 1 << get_order(size))) >> 347 goto out_free_pages; >> 348 } 373 memset(ret, 0, size); 349 memset(ret, 0, size); 374 *dma_handle = phys_to_dma_direct(dev, 350 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); 375 return page; 351 return page; 376 out_leak_pages: !! 352 out_free_pages: >> 353 __dma_direct_free_pages(dev, page, size); 377 return NULL; 354 return NULL; 378 } 355 } 379 356 380 void dma_direct_free_pages(struct device *dev, 357 void dma_direct_free_pages(struct device *dev, size_t size, 381 struct page *page, dma_addr_t 358 struct page *page, dma_addr_t dma_addr, 382 enum dma_data_direction dir) 359 enum dma_data_direction dir) 383 { 360 { >> 361 unsigned int page_order = get_order(size); 384 void *vaddr = page_address(page); 362 void *vaddr = page_address(page); 385 363 386 /* If cpu_addr is not from an atomic p 364 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ 387 if (IS_ENABLED(CONFIG_DMA_COHERENT_POO 365 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && 388 dma_free_from_pool(dev, vaddr, siz 366 dma_free_from_pool(dev, vaddr, size)) 389 return; 367 return; 390 368 391 if (dma_set_encrypted(dev, vaddr, size !! 369 if (force_dma_unencrypted(dev)) 392 return; !! 370 set_memory_encrypted((unsigned long)vaddr, 1 << page_order); >> 371 393 __dma_direct_free_pages(dev, page, siz 372 __dma_direct_free_pages(dev, page, size); 394 } 373 } 395 374 396 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVIC 375 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ 397 defined(CONFIG_SWIOTLB) 376 defined(CONFIG_SWIOTLB) 398 void dma_direct_sync_sg_for_device(struct devi 377 void dma_direct_sync_sg_for_device(struct device *dev, 399 struct scatterlist *sgl, int n 378 struct scatterlist *sgl, int nents, enum dma_data_direction dir) 400 { 379 { 401 struct scatterlist *sg; 380 struct scatterlist *sg; 402 int i; 381 int i; 403 382 404 for_each_sg(sgl, sg, nents, i) { 383 for_each_sg(sgl, sg, nents, i) { 405 phys_addr_t paddr = dma_to_phy 384 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); 406 385 407 swiotlb_sync_single_for_device !! 386 if (unlikely(is_swiotlb_buffer(dev, paddr))) >> 387 swiotlb_sync_single_for_device(dev, paddr, sg->length, >> 388 dir); 408 389 409 if (!dev_is_dma_coherent(dev)) 390 if (!dev_is_dma_coherent(dev)) 410 arch_sync_dma_for_devi 391 arch_sync_dma_for_device(paddr, sg->length, 411 dir); 392 dir); 412 } 393 } 413 } 394 } 414 #endif 395 #endif 415 396 416 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) 397 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 417 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_A 398 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ 418 defined(CONFIG_SWIOTLB) 399 defined(CONFIG_SWIOTLB) 419 void dma_direct_sync_sg_for_cpu(struct device 400 void dma_direct_sync_sg_for_cpu(struct device *dev, 420 struct scatterlist *sgl, int n 401 struct scatterlist *sgl, int nents, enum dma_data_direction dir) 421 { 402 { 422 struct scatterlist *sg; 403 struct scatterlist *sg; 423 int i; 404 int i; 424 405 425 for_each_sg(sgl, sg, nents, i) { 406 for_each_sg(sgl, sg, nents, i) { 426 phys_addr_t paddr = dma_to_phy 407 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); 427 408 428 if (!dev_is_dma_coherent(dev)) 409 if (!dev_is_dma_coherent(dev)) 429 arch_sync_dma_for_cpu( 410 arch_sync_dma_for_cpu(paddr, sg->length, dir); 430 411 431 swiotlb_sync_single_for_cpu(de !! 412 if (unlikely(is_swiotlb_buffer(dev, paddr))) >> 413 swiotlb_sync_single_for_cpu(dev, paddr, sg->length, >> 414 dir); 432 415 433 if (dir == DMA_FROM_DEVICE) 416 if (dir == DMA_FROM_DEVICE) 434 arch_dma_mark_clean(pa 417 arch_dma_mark_clean(paddr, sg->length); 435 } 418 } 436 419 437 if (!dev_is_dma_coherent(dev)) 420 if (!dev_is_dma_coherent(dev)) 438 arch_sync_dma_for_cpu_all(); 421 arch_sync_dma_for_cpu_all(); 439 } 422 } 440 423 441 /* << 442 * Unmaps segments, except for ones marked as << 443 * require any further action as they contain << 444 */ << 445 void dma_direct_unmap_sg(struct device *dev, s 424 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, 446 int nents, enum dma_data_direc 425 int nents, enum dma_data_direction dir, unsigned long attrs) 447 { 426 { 448 struct scatterlist *sg; 427 struct scatterlist *sg; 449 int i; 428 int i; 450 429 451 for_each_sg(sgl, sg, nents, i) { !! 430 for_each_sg(sgl, sg, nents, i) 452 if (sg_dma_is_bus_address(sg)) !! 431 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, 453 sg_dma_unmark_bus_addr !! 432 attrs); 454 else << 455 dma_direct_unmap_page( << 456 << 457 } << 458 } 433 } 459 #endif 434 #endif 460 435 461 int dma_direct_map_sg(struct device *dev, stru 436 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, 462 enum dma_data_direction dir, u 437 enum dma_data_direction dir, unsigned long attrs) 463 { 438 { 464 struct pci_p2pdma_map_state p2pdma_sta !! 439 int i; 465 enum pci_p2pdma_map_type map; << 466 struct scatterlist *sg; 440 struct scatterlist *sg; 467 int i, ret; << 468 441 469 for_each_sg(sgl, sg, nents, i) { 442 for_each_sg(sgl, sg, nents, i) { 470 if (is_pci_p2pdma_page(sg_page << 471 map = pci_p2pdma_map_s << 472 switch (map) { << 473 case PCI_P2PDMA_MAP_BU << 474 continue; << 475 case PCI_P2PDMA_MAP_TH << 476 /* << 477 * Any P2P map << 478 * host bridge << 479 * address and << 480 * done with d << 481 */ << 482 break; << 483 default: << 484 ret = -EREMOTE << 485 goto out_unmap << 486 } << 487 } << 488 << 489 sg->dma_address = dma_direct_m 443 sg->dma_address = dma_direct_map_page(dev, sg_page(sg), 490 sg->offset, sg 444 sg->offset, sg->length, dir, attrs); 491 if (sg->dma_address == DMA_MAP !! 445 if (sg->dma_address == DMA_MAPPING_ERROR) 492 ret = -EIO; << 493 goto out_unmap; 446 goto out_unmap; 494 } << 495 sg_dma_len(sg) = sg->length; 447 sg_dma_len(sg) = sg->length; 496 } 448 } 497 449 498 return nents; 450 return nents; 499 451 500 out_unmap: 452 out_unmap: 501 dma_direct_unmap_sg(dev, sgl, i, dir, 453 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 502 return ret; !! 454 return -EIO; 503 } 455 } 504 456 505 dma_addr_t dma_direct_map_resource(struct devi 457 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, 506 size_t size, enum dma_data_dir 458 size_t size, enum dma_data_direction dir, unsigned long attrs) 507 { 459 { 508 dma_addr_t dma_addr = paddr; 460 dma_addr_t dma_addr = paddr; 509 461 510 if (unlikely(!dma_capable(dev, dma_add 462 if (unlikely(!dma_capable(dev, dma_addr, size, false))) { 511 dev_err_once(dev, 463 dev_err_once(dev, 512 "DMA addr %pad+%z 464 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", 513 &dma_addr, size, 465 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); 514 WARN_ON_ONCE(1); 466 WARN_ON_ONCE(1); 515 return DMA_MAPPING_ERROR; 467 return DMA_MAPPING_ERROR; 516 } 468 } 517 469 518 return dma_addr; 470 return dma_addr; 519 } 471 } 520 472 521 int dma_direct_get_sgtable(struct device *dev, 473 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, 522 void *cpu_addr, dma_addr_t dma 474 void *cpu_addr, dma_addr_t dma_addr, size_t size, 523 unsigned long attrs) 475 unsigned long attrs) 524 { 476 { 525 struct page *page = dma_direct_to_page 477 struct page *page = dma_direct_to_page(dev, dma_addr); 526 int ret; 478 int ret; 527 479 528 ret = sg_alloc_table(sgt, 1, GFP_KERNE 480 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 529 if (!ret) 481 if (!ret) 530 sg_set_page(sgt->sgl, page, PA 482 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 531 return ret; 483 return ret; 532 } 484 } 533 485 534 bool dma_direct_can_mmap(struct device *dev) 486 bool dma_direct_can_mmap(struct device *dev) 535 { 487 { 536 return dev_is_dma_coherent(dev) || 488 return dev_is_dma_coherent(dev) || 537 IS_ENABLED(CONFIG_DMA_NONCOHER 489 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP); 538 } 490 } 539 491 540 int dma_direct_mmap(struct device *dev, struct 492 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, 541 void *cpu_addr, dma_addr_t dma 493 void *cpu_addr, dma_addr_t dma_addr, size_t size, 542 unsigned long attrs) 494 unsigned long attrs) 543 { 495 { 544 unsigned long user_count = vma_pages(v 496 unsigned long user_count = vma_pages(vma); 545 unsigned long count = PAGE_ALIGN(size) 497 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 546 unsigned long pfn = PHYS_PFN(dma_to_ph 498 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr)); 547 int ret = -ENXIO; 499 int ret = -ENXIO; 548 500 549 vma->vm_page_prot = dma_pgprot(dev, vm 501 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 550 if (force_dma_unencrypted(dev)) << 551 vma->vm_page_prot = pgprot_dec << 552 502 553 if (dma_mmap_from_dev_coherent(dev, vm 503 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 554 return ret; 504 return ret; 555 if (dma_mmap_from_global_coherent(vma, 505 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) 556 return ret; 506 return ret; 557 507 558 if (vma->vm_pgoff >= count || user_cou 508 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) 559 return -ENXIO; 509 return -ENXIO; 560 return remap_pfn_range(vma, vma->vm_st 510 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, 561 user_count << PAGE_SHI 511 user_count << PAGE_SHIFT, vma->vm_page_prot); 562 } 512 } 563 513 564 int dma_direct_supported(struct device *dev, u 514 int dma_direct_supported(struct device *dev, u64 mask) 565 { 515 { 566 u64 min_mask = (max_pfn - 1) << PAGE_S 516 u64 min_mask = (max_pfn - 1) << PAGE_SHIFT; 567 517 568 /* 518 /* 569 * Because 32-bit DMA masks are so com 519 * Because 32-bit DMA masks are so common we expect every architecture 570 * to be able to satisfy them - either 520 * to be able to satisfy them - either by not supporting more physical 571 * memory, or by providing a ZONE_DMA3 521 * memory, or by providing a ZONE_DMA32. If neither is the case, the 572 * architecture needs to use an IOMMU 522 * architecture needs to use an IOMMU instead of the direct mapping. 573 */ 523 */ 574 if (mask >= DMA_BIT_MASK(32)) 524 if (mask >= DMA_BIT_MASK(32)) 575 return 1; 525 return 1; 576 526 577 /* 527 /* 578 * This check needs to be against the 528 * This check needs to be against the actual bit mask value, so use 579 * phys_to_dma_unencrypted() here so t 529 * phys_to_dma_unencrypted() here so that the SME encryption mask isn't 580 * part of the check. 530 * part of the check. 581 */ 531 */ 582 if (IS_ENABLED(CONFIG_ZONE_DMA)) 532 if (IS_ENABLED(CONFIG_ZONE_DMA)) 583 min_mask = min_t(u64, min_mask 533 min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits)); 584 return mask >= phys_to_dma_unencrypted 534 return mask >= phys_to_dma_unencrypted(dev, min_mask); 585 } 535 } 586 536 587 /* << 588 * To check whether all ram resource ranges ar << 589 * Returns 0 when further check is needed << 590 * Returns 1 if there is some RAM range can't << 591 */ << 592 static int check_ram_in_range_map(unsigned lon << 593 unsigned lon << 594 { << 595 unsigned long end_pfn = start_pfn + nr << 596 const struct bus_dma_region *bdr = NUL << 597 const struct bus_dma_region *m; << 598 struct device *dev = data; << 599 << 600 while (start_pfn < end_pfn) { << 601 for (m = dev->dma_range_map; P << 602 unsigned long cpu_star << 603 << 604 if (start_pfn >= cpu_s << 605 start_pfn - cpu_st << 606 bdr = m; << 607 break; << 608 } << 609 } << 610 if (!bdr) << 611 return 1; << 612 << 613 start_pfn = PFN_DOWN(bdr->cpu_ << 614 } << 615 << 616 return 0; << 617 } << 618 << 619 bool dma_direct_all_ram_mapped(struct device * << 620 { << 621 if (!dev->dma_range_map) << 622 return true; << 623 return !walk_system_ram_range(0, PFN_D << 624 check_ra << 625 } << 626 << 627 size_t dma_direct_max_mapping_size(struct devi 537 size_t dma_direct_max_mapping_size(struct device *dev) 628 { 538 { 629 /* If SWIOTLB is active, use its maxim 539 /* If SWIOTLB is active, use its maximum mapping size */ 630 if (is_swiotlb_active(dev) && 540 if (is_swiotlb_active(dev) && 631 (dma_addressing_limited(dev) || is 541 (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev))) 632 return swiotlb_max_mapping_siz 542 return swiotlb_max_mapping_size(dev); 633 return SIZE_MAX; 543 return SIZE_MAX; 634 } 544 } 635 545 636 bool dma_direct_need_sync(struct device *dev, 546 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) 637 { 547 { 638 return !dev_is_dma_coherent(dev) || 548 return !dev_is_dma_coherent(dev) || 639 swiotlb_find_pool(dev, dma_to_p !! 549 is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr)); 640 } 550 } 641 551 642 /** 552 /** 643 * dma_direct_set_offset - Assign scalar offse 553 * dma_direct_set_offset - Assign scalar offset for a single DMA range. 644 * @dev: device pointer; needed to "own 554 * @dev: device pointer; needed to "own" the alloced memory. 645 * @cpu_start: beginning of memory region cov 555 * @cpu_start: beginning of memory region covered by this offset. 646 * @dma_start: beginning of DMA/PCI region co 556 * @dma_start: beginning of DMA/PCI region covered by this offset. 647 * @size: size of the region. 557 * @size: size of the region. 648 * 558 * 649 * This is for the simple case of a uniform of 559 * This is for the simple case of a uniform offset which cannot 650 * be discovered by "dma-ranges". 560 * be discovered by "dma-ranges". 651 * 561 * 652 * It returns -ENOMEM if out of memory, -EINVA 562 * It returns -ENOMEM if out of memory, -EINVAL if a map 653 * already exists, 0 otherwise. 563 * already exists, 0 otherwise. 654 * 564 * 655 * Note: any call to this from a driver is a b 565 * Note: any call to this from a driver is a bug. The mapping needs 656 * to be described by the device tree or other 566 * to be described by the device tree or other firmware interfaces. 657 */ 567 */ 658 int dma_direct_set_offset(struct device *dev, 568 int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, 659 dma_addr_t dma_start, 569 dma_addr_t dma_start, u64 size) 660 { 570 { 661 struct bus_dma_region *map; 571 struct bus_dma_region *map; 662 u64 offset = (u64)cpu_start - (u64)dma 572 u64 offset = (u64)cpu_start - (u64)dma_start; 663 573 664 if (dev->dma_range_map) { 574 if (dev->dma_range_map) { 665 dev_err(dev, "attempt to add D 575 dev_err(dev, "attempt to add DMA range to existing map\n"); 666 return -EINVAL; 576 return -EINVAL; 667 } 577 } 668 578 669 if (!offset) 579 if (!offset) 670 return 0; 580 return 0; 671 581 672 map = kcalloc(2, sizeof(*map), GFP_KER 582 map = kcalloc(2, sizeof(*map), GFP_KERNEL); 673 if (!map) 583 if (!map) 674 return -ENOMEM; 584 return -ENOMEM; 675 map[0].cpu_start = cpu_start; 585 map[0].cpu_start = cpu_start; 676 map[0].dma_start = dma_start; 586 map[0].dma_start = dma_start; >> 587 map[0].offset = offset; 677 map[0].size = size; 588 map[0].size = size; 678 dev->dma_range_map = map; 589 dev->dma_range_map = map; 679 return 0; 590 return 0; 680 } 591 } 681 592
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.