1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * Copyright (C) 2018-2020 Christoph Hellwig. !! 3 * Copyright (C) 2018 Christoph Hellwig. 4 * 4 * 5 * DMA operations that map physical memory dir 5 * DMA operations that map physical memory directly without using an IOMMU. 6 */ 6 */ 7 #include <linux/memblock.h> /* for max_pfn */ 7 #include <linux/memblock.h> /* for max_pfn */ 8 #include <linux/export.h> 8 #include <linux/export.h> 9 #include <linux/mm.h> 9 #include <linux/mm.h> 10 #include <linux/dma-map-ops.h> !! 10 #include <linux/dma-direct.h> 11 #include <linux/scatterlist.h> 11 #include <linux/scatterlist.h> >> 12 #include <linux/dma-contiguous.h> >> 13 #include <linux/dma-noncoherent.h> 12 #include <linux/pfn.h> 14 #include <linux/pfn.h> 13 #include <linux/vmalloc.h> << 14 #include <linux/set_memory.h> 15 #include <linux/set_memory.h> 15 #include <linux/slab.h> << 16 #include "direct.h" << 17 16 18 /* 17 /* 19 * Most architectures use ZONE_DMA for the fir !! 18 * Most architectures use ZONE_DMA for the first 16 Megabytes, but 20 * it for entirely different regions. In that !! 19 * some use it for entirely different regions: 21 * override the variable below for dma-direct << 22 */ 20 */ 23 unsigned int zone_dma_bits __ro_after_init = 2 !! 21 #ifndef ARCH_ZONE_DMA_BITS >> 22 #define ARCH_ZONE_DMA_BITS 24 >> 23 #endif 24 24 25 static inline dma_addr_t phys_to_dma_direct(st !! 25 /* 26 phys_addr_t phys) !! 26 * For AMD SEV all DMA must be to unencrypted addresses. >> 27 */ >> 28 static inline bool force_dma_unencrypted(void) 27 { 29 { 28 if (force_dma_unencrypted(dev)) !! 30 return sev_active(); 29 return phys_to_dma_unencrypted << 30 return phys_to_dma(dev, phys); << 31 } 31 } 32 32 33 static inline struct page *dma_direct_to_page( !! 33 static bool 34 dma_addr_t dma_addr) !! 34 check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, >> 35 const char *caller) 35 { 36 { 36 return pfn_to_page(PHYS_PFN(dma_to_phy !! 37 if (unlikely(dev && !dma_capable(dev, dma_addr, size))) { >> 38 if (!dev->dma_mask) { >> 39 dev_err(dev, >> 40 "%s: call on device without dma_mask\n", >> 41 caller); >> 42 return false; >> 43 } >> 44 >> 45 if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) { >> 46 dev_err(dev, >> 47 "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n", >> 48 caller, &dma_addr, size, >> 49 *dev->dma_mask, dev->bus_dma_mask); >> 50 } >> 51 return false; >> 52 } >> 53 return true; >> 54 } >> 55 >> 56 static inline dma_addr_t phys_to_dma_direct(struct device *dev, >> 57 phys_addr_t phys) >> 58 { >> 59 if (force_dma_unencrypted()) >> 60 return __phys_to_dma(dev, phys); >> 61 return phys_to_dma(dev, phys); 37 } 62 } 38 63 39 u64 dma_direct_get_required_mask(struct device 64 u64 dma_direct_get_required_mask(struct device *dev) 40 { 65 { 41 phys_addr_t phys = (phys_addr_t)(max_p !! 66 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); 42 u64 max_dma = phys_to_dma_direct(dev, !! 67 >> 68 if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma) >> 69 max_dma = dev->bus_dma_mask; 43 70 44 return (1ULL << (fls64(max_dma) - 1)) 71 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; 45 } 72 } 46 73 47 static gfp_t dma_direct_optimal_gfp_mask(struc !! 74 static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, >> 75 u64 *phys_mask) 48 { 76 { 49 u64 dma_limit = min_not_zero( !! 77 if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask) 50 dev->coherent_dma_mask, !! 78 dma_mask = dev->bus_dma_mask; 51 dev->bus_dma_limit); !! 79 >> 80 if (force_dma_unencrypted()) >> 81 *phys_mask = __dma_to_phys(dev, dma_mask); >> 82 else >> 83 *phys_mask = dma_to_phys(dev, dma_mask); 52 84 53 /* 85 /* 54 * Optimistically try the zone that th 86 * Optimistically try the zone that the physical address mask falls 55 * into first. If that returns memory 87 * into first. If that returns memory that isn't actually addressable 56 * we will fallback to the next lower 88 * we will fallback to the next lower zone and try again. 57 * 89 * 58 * Note that GFP_DMA32 and GFP_DMA are 90 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding 59 * zones. 91 * zones. 60 */ 92 */ 61 *phys_limit = dma_to_phys(dev, dma_lim !! 93 if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) 62 if (*phys_limit <= DMA_BIT_MASK(zone_d << 63 return GFP_DMA; 94 return GFP_DMA; 64 if (*phys_limit <= DMA_BIT_MASK(32)) !! 95 if (*phys_mask <= DMA_BIT_MASK(32)) 65 return GFP_DMA32; 96 return GFP_DMA32; 66 return 0; 97 return 0; 67 } 98 } 68 99 69 bool dma_coherent_ok(struct device *dev, phys_ !! 100 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) 70 { << 71 dma_addr_t dma_addr = phys_to_dma_dire << 72 << 73 if (dma_addr == DMA_MAPPING_ERROR) << 74 return false; << 75 return dma_addr + size - 1 <= << 76 min_not_zero(dev->coherent_dma << 77 } << 78 << 79 static int dma_set_decrypted(struct device *de << 80 { 101 { 81 if (!force_dma_unencrypted(dev)) !! 102 return phys_to_dma_direct(dev, phys) + size - 1 <= 82 return 0; !! 103 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask); 83 return set_memory_decrypted((unsigned << 84 } << 85 << 86 static int dma_set_encrypted(struct device *de << 87 { << 88 int ret; << 89 << 90 if (!force_dma_unencrypted(dev)) << 91 return 0; << 92 ret = set_memory_encrypted((unsigned l << 93 if (ret) << 94 pr_warn_ratelimited("leaking D << 95 return ret; << 96 } 104 } 97 105 98 static void __dma_direct_free_pages(struct dev !! 106 void *dma_direct_alloc_pages(struct device *dev, size_t size, 99 size_t siz !! 107 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 100 { << 101 if (swiotlb_free(dev, page, size)) << 102 return; << 103 dma_free_contiguous(dev, page, size); << 104 } << 105 << 106 static struct page *dma_direct_alloc_swiotlb(s << 107 { << 108 struct page *page = swiotlb_alloc(dev, << 109 << 110 if (page && !dma_coherent_ok(dev, page << 111 swiotlb_free(dev, page, size); << 112 return NULL; << 113 } << 114 << 115 return page; << 116 } << 117 << 118 static struct page *__dma_direct_alloc_pages(s << 119 gfp_t gfp, bool allow_highmem) << 120 { 108 { 121 int node = dev_to_node(dev); !! 109 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; >> 110 int page_order = get_order(size); 122 struct page *page = NULL; 111 struct page *page = NULL; 123 u64 phys_limit; !! 112 u64 phys_mask; 124 !! 113 void *ret; 125 WARN_ON_ONCE(!PAGE_ALIGNED(size)); << 126 114 127 if (is_swiotlb_for_alloc(dev)) !! 115 if (attrs & DMA_ATTR_NO_WARN) 128 return dma_direct_alloc_swiotl !! 116 gfp |= __GFP_NOWARN; 129 117 130 gfp |= dma_direct_optimal_gfp_mask(dev !! 118 /* we always manually zero the memory once we are done: */ 131 page = dma_alloc_contiguous(dev, size, !! 119 gfp &= ~__GFP_ZERO; 132 if (page) { !! 120 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, 133 if (!dma_coherent_ok(dev, page !! 121 &phys_mask); 134 (!allow_highmem && PageHig !! 122 again: 135 dma_free_contiguous(de !! 123 /* CMA can be used only in the context which permits sleeping */ >> 124 if (gfpflags_allow_blocking(gfp)) { >> 125 page = dma_alloc_from_contiguous(dev, count, page_order, >> 126 gfp & __GFP_NOWARN); >> 127 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { >> 128 dma_release_from_contiguous(dev, page, count); 136 page = NULL; 129 page = NULL; 137 } 130 } 138 } 131 } 139 again: << 140 if (!page) 132 if (!page) 141 page = alloc_pages_node(node, !! 133 page = alloc_pages_node(dev_to_node(dev), gfp, page_order); >> 134 142 if (page && !dma_coherent_ok(dev, page 135 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 143 dma_free_contiguous(dev, page, !! 136 __free_pages(page, page_order); 144 page = NULL; 137 page = NULL; 145 138 146 if (IS_ENABLED(CONFIG_ZONE_DMA 139 if (IS_ENABLED(CONFIG_ZONE_DMA32) && 147 phys_limit < DMA_BIT_MASK( !! 140 phys_mask < DMA_BIT_MASK(64) && 148 !(gfp & (GFP_DMA32 | GFP_D 141 !(gfp & (GFP_DMA32 | GFP_DMA))) { 149 gfp |= GFP_DMA32; 142 gfp |= GFP_DMA32; 150 goto again; 143 goto again; 151 } 144 } 152 145 153 if (IS_ENABLED(CONFIG_ZONE_DMA !! 146 if (IS_ENABLED(CONFIG_ZONE_DMA) && >> 147 phys_mask < DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) { 154 gfp = (gfp & ~GFP_DMA3 148 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 155 goto again; 149 goto again; 156 } 150 } 157 } 151 } 158 152 159 return page; << 160 } << 161 << 162 /* << 163 * Check if a potentially blocking operations << 164 * pools for the given device/gfp. << 165 */ << 166 static bool dma_direct_use_pool(struct device << 167 { << 168 return !gfpflags_allow_blocking(gfp) & << 169 } << 170 << 171 static void *dma_direct_alloc_from_pool(struct << 172 dma_addr_t *dma_handle, gfp_t << 173 { << 174 struct page *page; << 175 u64 phys_limit; << 176 void *ret; << 177 << 178 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DM << 179 return NULL; << 180 << 181 gfp |= dma_direct_optimal_gfp_mask(dev << 182 page = dma_alloc_from_pool(dev, size, << 183 if (!page) 153 if (!page) 184 return NULL; 154 return NULL; 185 *dma_handle = phys_to_dma_direct(dev, !! 155 ret = page_address(page); >> 156 if (force_dma_unencrypted()) { >> 157 set_memory_decrypted((unsigned long)ret, 1 << page_order); >> 158 *dma_handle = __phys_to_dma(dev, page_to_phys(page)); >> 159 } else { >> 160 *dma_handle = phys_to_dma(dev, page_to_phys(page)); >> 161 } >> 162 memset(ret, 0, size); 186 return ret; 163 return ret; 187 } 164 } 188 165 189 static void *dma_direct_alloc_no_mapping(struc !! 166 /* 190 dma_addr_t *dma_handle, gfp_t !! 167 * NOTE: this function must never look at the dma_addr argument, because we want >> 168 * to be able to use it as a helper for iommu implementations as well. >> 169 */ >> 170 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, >> 171 dma_addr_t dma_addr, unsigned long attrs) 191 { 172 { 192 struct page *page; !! 173 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 193 !! 174 unsigned int page_order = get_order(size); 194 page = __dma_direct_alloc_pages(dev, s << 195 if (!page) << 196 return NULL; << 197 175 198 /* remove any dirty cache lines on the !! 176 if (force_dma_unencrypted()) 199 if (!PageHighMem(page)) !! 177 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); 200 arch_dma_prep_coherent(page, s !! 178 if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count)) 201 !! 179 free_pages((unsigned long)cpu_addr, page_order); 202 /* return the page pointer as the opaq << 203 *dma_handle = phys_to_dma_direct(dev, << 204 return page; << 205 } 180 } 206 181 207 void *dma_direct_alloc(struct device *dev, siz 182 void *dma_direct_alloc(struct device *dev, size_t size, 208 dma_addr_t *dma_handle, gfp_t 183 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 209 { 184 { 210 bool remap = false, set_uncached = fal !! 185 if (!dev_is_dma_coherent(dev)) 211 struct page *page; !! 186 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); 212 void *ret; !! 187 return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); 213 << 214 size = PAGE_ALIGN(size); << 215 if (attrs & DMA_ATTR_NO_WARN) << 216 gfp |= __GFP_NOWARN; << 217 << 218 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPIN << 219 !force_dma_unencrypted(dev) && !is << 220 return dma_direct_alloc_no_map << 221 << 222 if (!dev_is_dma_coherent(dev)) { << 223 if (IS_ENABLED(CONFIG_ARCH_HAS << 224 !is_swiotlb_for_alloc(dev) << 225 return arch_dma_alloc( << 226 << 227 << 228 /* << 229 * If there is a global pool, << 230 * non-coherent devices. << 231 */ << 232 if (IS_ENABLED(CONFIG_DMA_GLOB << 233 return dma_alloc_from_ << 234 dma_ha << 235 << 236 /* << 237 * Otherwise we require the ar << 238 * mark arbitrary parts of the << 239 * or remapped it uncached. << 240 */ << 241 set_uncached = IS_ENABLED(CONF << 242 remap = IS_ENABLED(CONFIG_DMA_ << 243 if (!set_uncached && !remap) { << 244 pr_warn_once("coherent << 245 return NULL; << 246 } << 247 } << 248 << 249 /* << 250 * Remapping or decrypting memory may << 251 * the atomic pools instead if we aren << 252 */ << 253 if ((remap || force_dma_unencrypted(de << 254 dma_direct_use_pool(dev, gfp)) << 255 return dma_direct_alloc_from_p << 256 << 257 /* we always manually zero the memory << 258 page = __dma_direct_alloc_pages(dev, s << 259 if (!page) << 260 return NULL; << 261 << 262 /* << 263 * dma_alloc_contiguous can return hig << 264 * combination the cma= arguments and << 265 * remapped to return a kernel virtual << 266 */ << 267 if (PageHighMem(page)) { << 268 remap = true; << 269 set_uncached = false; << 270 } << 271 << 272 if (remap) { << 273 pgprot_t prot = dma_pgprot(dev << 274 << 275 if (force_dma_unencrypted(dev) << 276 prot = pgprot_decrypte << 277 << 278 /* remove any dirty cache line << 279 arch_dma_prep_coherent(page, s << 280 << 281 /* create a coherent mapping * << 282 ret = dma_common_contiguous_re << 283 __builtin_retu << 284 if (!ret) << 285 goto out_free_pages; << 286 } else { << 287 ret = page_address(page); << 288 if (dma_set_decrypted(dev, ret << 289 goto out_leak_pages; << 290 } << 291 << 292 memset(ret, 0, size); << 293 << 294 if (set_uncached) { << 295 arch_dma_prep_coherent(page, s << 296 ret = arch_dma_set_uncached(re << 297 if (IS_ERR(ret)) << 298 goto out_encrypt_pages << 299 } << 300 << 301 *dma_handle = phys_to_dma_direct(dev, << 302 return ret; << 303 << 304 out_encrypt_pages: << 305 if (dma_set_encrypted(dev, page_addres << 306 return NULL; << 307 out_free_pages: << 308 __dma_direct_free_pages(dev, page, siz << 309 return NULL; << 310 out_leak_pages: << 311 return NULL; << 312 } 188 } 313 189 314 void dma_direct_free(struct device *dev, size_ 190 void dma_direct_free(struct device *dev, size_t size, 315 void *cpu_addr, dma_addr_t dma 191 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) 316 { 192 { 317 unsigned int page_order = get_order(si !! 193 if (!dev_is_dma_coherent(dev)) 318 << 319 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPIN << 320 !force_dma_unencrypted(dev) && !is << 321 /* cpu_addr is a struct page c << 322 dma_free_contiguous(dev, cpu_a << 323 return; << 324 } << 325 << 326 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALL << 327 !dev_is_dma_coherent(dev) && << 328 !is_swiotlb_for_alloc(dev)) { << 329 arch_dma_free(dev, size, cpu_a 194 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); 330 return; !! 195 else 331 } !! 196 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); 332 << 333 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) << 334 !dev_is_dma_coherent(dev)) { << 335 if (!dma_release_from_global_c << 336 WARN_ON_ONCE(1); << 337 return; << 338 } << 339 << 340 /* If cpu_addr is not from an atomic p << 341 if (IS_ENABLED(CONFIG_DMA_COHERENT_POO << 342 dma_free_from_pool(dev, cpu_addr, << 343 return; << 344 << 345 if (is_vmalloc_addr(cpu_addr)) { << 346 vunmap(cpu_addr); << 347 } else { << 348 if (IS_ENABLED(CONFIG_ARCH_HAS << 349 arch_dma_clear_uncache << 350 if (dma_set_encrypted(dev, cpu << 351 return; << 352 } << 353 << 354 __dma_direct_free_pages(dev, dma_direc << 355 } 197 } 356 198 357 struct page *dma_direct_alloc_pages(struct dev !! 199 static void dma_direct_sync_single_for_device(struct device *dev, 358 dma_addr_t *dma_handle, enum d !! 200 dma_addr_t addr, size_t size, enum dma_data_direction dir) 359 { 201 { 360 struct page *page; !! 202 if (dev_is_dma_coherent(dev)) 361 void *ret; << 362 << 363 if (force_dma_unencrypted(dev) && dma_ << 364 return dma_direct_alloc_from_p << 365 << 366 page = __dma_direct_alloc_pages(dev, s << 367 if (!page) << 368 return NULL; << 369 << 370 ret = page_address(page); << 371 if (dma_set_decrypted(dev, ret, size)) << 372 goto out_leak_pages; << 373 memset(ret, 0, size); << 374 *dma_handle = phys_to_dma_direct(dev, << 375 return page; << 376 out_leak_pages: << 377 return NULL; << 378 } << 379 << 380 void dma_direct_free_pages(struct device *dev, << 381 struct page *page, dma_addr_t << 382 enum dma_data_direction dir) << 383 { << 384 void *vaddr = page_address(page); << 385 << 386 /* If cpu_addr is not from an atomic p << 387 if (IS_ENABLED(CONFIG_DMA_COHERENT_POO << 388 dma_free_from_pool(dev, vaddr, siz << 389 return; 203 return; 390 !! 204 arch_sync_dma_for_device(dev, dma_to_phys(dev, addr), size, dir); 391 if (dma_set_encrypted(dev, vaddr, size << 392 return; << 393 __dma_direct_free_pages(dev, page, siz << 394 } 205 } 395 206 396 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVIC !! 207 static void dma_direct_sync_sg_for_device(struct device *dev, 397 defined(CONFIG_SWIOTLB) << 398 void dma_direct_sync_sg_for_device(struct devi << 399 struct scatterlist *sgl, int n 208 struct scatterlist *sgl, int nents, enum dma_data_direction dir) 400 { 209 { 401 struct scatterlist *sg; 210 struct scatterlist *sg; 402 int i; 211 int i; 403 212 404 for_each_sg(sgl, sg, nents, i) { !! 213 if (dev_is_dma_coherent(dev)) 405 phys_addr_t paddr = dma_to_phy !! 214 return; 406 << 407 swiotlb_sync_single_for_device << 408 215 409 if (!dev_is_dma_coherent(dev)) !! 216 for_each_sg(sgl, sg, nents, i) 410 arch_sync_dma_for_devi !! 217 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir); 411 dir); << 412 } << 413 } 218 } 414 #endif << 415 219 416 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) 220 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 417 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_A !! 221 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) 418 defined(CONFIG_SWIOTLB) !! 222 static void dma_direct_sync_single_for_cpu(struct device *dev, 419 void dma_direct_sync_sg_for_cpu(struct device !! 223 dma_addr_t addr, size_t size, enum dma_data_direction dir) >> 224 { >> 225 if (dev_is_dma_coherent(dev)) >> 226 return; >> 227 arch_sync_dma_for_cpu(dev, dma_to_phys(dev, addr), size, dir); >> 228 arch_sync_dma_for_cpu_all(dev); >> 229 } >> 230 >> 231 static void dma_direct_sync_sg_for_cpu(struct device *dev, 420 struct scatterlist *sgl, int n 232 struct scatterlist *sgl, int nents, enum dma_data_direction dir) 421 { 233 { 422 struct scatterlist *sg; 234 struct scatterlist *sg; 423 int i; 235 int i; 424 236 425 for_each_sg(sgl, sg, nents, i) { !! 237 if (dev_is_dma_coherent(dev)) 426 phys_addr_t paddr = dma_to_phy !! 238 return; 427 << 428 if (!dev_is_dma_coherent(dev)) << 429 arch_sync_dma_for_cpu( << 430 << 431 swiotlb_sync_single_for_cpu(de << 432 239 433 if (dir == DMA_FROM_DEVICE) !! 240 for_each_sg(sgl, sg, nents, i) 434 arch_dma_mark_clean(pa !! 241 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir); 435 } !! 242 arch_sync_dma_for_cpu_all(dev); >> 243 } 436 244 437 if (!dev_is_dma_coherent(dev)) !! 245 static void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, 438 arch_sync_dma_for_cpu_all(); !! 246 size_t size, enum dma_data_direction dir, unsigned long attrs) >> 247 { >> 248 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) >> 249 dma_direct_sync_single_for_cpu(dev, addr, size, dir); 439 } 250 } 440 251 441 /* !! 252 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, 442 * Unmaps segments, except for ones marked as << 443 * require any further action as they contain << 444 */ << 445 void dma_direct_unmap_sg(struct device *dev, s << 446 int nents, enum dma_data_direc 253 int nents, enum dma_data_direction dir, unsigned long attrs) 447 { 254 { 448 struct scatterlist *sg; !! 255 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 449 int i; !! 256 dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir); 450 << 451 for_each_sg(sgl, sg, nents, i) { << 452 if (sg_dma_is_bus_address(sg)) << 453 sg_dma_unmark_bus_addr << 454 else << 455 dma_direct_unmap_page( << 456 << 457 } << 458 } 257 } 459 #endif 258 #endif 460 259 >> 260 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, >> 261 unsigned long offset, size_t size, enum dma_data_direction dir, >> 262 unsigned long attrs) >> 263 { >> 264 phys_addr_t phys = page_to_phys(page) + offset; >> 265 dma_addr_t dma_addr = phys_to_dma(dev, phys); >> 266 >> 267 if (!check_addr(dev, dma_addr, size, __func__)) >> 268 return DIRECT_MAPPING_ERROR; >> 269 >> 270 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) >> 271 dma_direct_sync_single_for_device(dev, dma_addr, size, dir); >> 272 return dma_addr; >> 273 } >> 274 461 int dma_direct_map_sg(struct device *dev, stru 275 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, 462 enum dma_data_direction dir, u 276 enum dma_data_direction dir, unsigned long attrs) 463 { 277 { 464 struct pci_p2pdma_map_state p2pdma_sta !! 278 int i; 465 enum pci_p2pdma_map_type map; << 466 struct scatterlist *sg; 279 struct scatterlist *sg; 467 int i, ret; << 468 280 469 for_each_sg(sgl, sg, nents, i) { 281 for_each_sg(sgl, sg, nents, i) { 470 if (is_pci_p2pdma_page(sg_page !! 282 BUG_ON(!sg_page(sg)); 471 map = pci_p2pdma_map_s << 472 switch (map) { << 473 case PCI_P2PDMA_MAP_BU << 474 continue; << 475 case PCI_P2PDMA_MAP_TH << 476 /* << 477 * Any P2P map << 478 * host bridge << 479 * address and << 480 * done with d << 481 */ << 482 break; << 483 default: << 484 ret = -EREMOTE << 485 goto out_unmap << 486 } << 487 } << 488 283 489 sg->dma_address = dma_direct_m !! 284 sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg)); 490 sg->offset, sg !! 285 if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__)) 491 if (sg->dma_address == DMA_MAP !! 286 return 0; 492 ret = -EIO; << 493 goto out_unmap; << 494 } << 495 sg_dma_len(sg) = sg->length; 287 sg_dma_len(sg) = sg->length; 496 } 288 } 497 289 >> 290 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) >> 291 dma_direct_sync_sg_for_device(dev, sgl, nents, dir); 498 return nents; 292 return nents; 499 << 500 out_unmap: << 501 dma_direct_unmap_sg(dev, sgl, i, dir, << 502 return ret; << 503 } << 504 << 505 dma_addr_t dma_direct_map_resource(struct devi << 506 size_t size, enum dma_data_dir << 507 { << 508 dma_addr_t dma_addr = paddr; << 509 << 510 if (unlikely(!dma_capable(dev, dma_add << 511 dev_err_once(dev, << 512 "DMA addr %pad+%z << 513 &dma_addr, size, << 514 WARN_ON_ONCE(1); << 515 return DMA_MAPPING_ERROR; << 516 } << 517 << 518 return dma_addr; << 519 } << 520 << 521 int dma_direct_get_sgtable(struct device *dev, << 522 void *cpu_addr, dma_addr_t dma << 523 unsigned long attrs) << 524 { << 525 struct page *page = dma_direct_to_page << 526 int ret; << 527 << 528 ret = sg_alloc_table(sgt, 1, GFP_KERNE << 529 if (!ret) << 530 sg_set_page(sgt->sgl, page, PA << 531 return ret; << 532 } << 533 << 534 bool dma_direct_can_mmap(struct device *dev) << 535 { << 536 return dev_is_dma_coherent(dev) || << 537 IS_ENABLED(CONFIG_DMA_NONCOHER << 538 } << 539 << 540 int dma_direct_mmap(struct device *dev, struct << 541 void *cpu_addr, dma_addr_t dma << 542 unsigned long attrs) << 543 { << 544 unsigned long user_count = vma_pages(v << 545 unsigned long count = PAGE_ALIGN(size) << 546 unsigned long pfn = PHYS_PFN(dma_to_ph << 547 int ret = -ENXIO; << 548 << 549 vma->vm_page_prot = dma_pgprot(dev, vm << 550 if (force_dma_unencrypted(dev)) << 551 vma->vm_page_prot = pgprot_dec << 552 << 553 if (dma_mmap_from_dev_coherent(dev, vm << 554 return ret; << 555 if (dma_mmap_from_global_coherent(vma, << 556 return ret; << 557 << 558 if (vma->vm_pgoff >= count || user_cou << 559 return -ENXIO; << 560 return remap_pfn_range(vma, vma->vm_st << 561 user_count << PAGE_SHI << 562 } 293 } 563 294 >> 295 /* >> 296 * Because 32-bit DMA masks are so common we expect every architecture to be >> 297 * able to satisfy them - either by not supporting more physical memory, or by >> 298 * providing a ZONE_DMA32. If neither is the case, the architecture needs to >> 299 * use an IOMMU instead of the direct mapping. >> 300 */ 564 int dma_direct_supported(struct device *dev, u 301 int dma_direct_supported(struct device *dev, u64 mask) 565 { 302 { 566 u64 min_mask = (max_pfn - 1) << PAGE_S !! 303 u64 min_mask; 567 304 568 /* !! 305 if (IS_ENABLED(CONFIG_ZONE_DMA)) 569 * Because 32-bit DMA masks are so com !! 306 min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS); 570 * to be able to satisfy them - either !! 307 else 571 * memory, or by providing a ZONE_DMA3 !! 308 min_mask = DMA_BIT_MASK(32); 572 * architecture needs to use an IOMMU !! 309 573 */ !! 310 min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT); 574 if (mask >= DMA_BIT_MASK(32)) << 575 return 1; << 576 311 577 /* 312 /* 578 * This check needs to be against the !! 313 * This check needs to be against the actual bit mask value, so 579 * phys_to_dma_unencrypted() here so t !! 314 * use __phys_to_dma() here so that the SME encryption mask isn't 580 * part of the check. 315 * part of the check. 581 */ 316 */ 582 if (IS_ENABLED(CONFIG_ZONE_DMA)) !! 317 return mask >= __phys_to_dma(dev, min_mask); 583 min_mask = min_t(u64, min_mask << 584 return mask >= phys_to_dma_unencrypted << 585 } 318 } 586 319 587 /* !! 320 int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr) 588 * To check whether all ram resource ranges ar << 589 * Returns 0 when further check is needed << 590 * Returns 1 if there is some RAM range can't << 591 */ << 592 static int check_ram_in_range_map(unsigned lon << 593 unsigned lon << 594 { 321 { 595 unsigned long end_pfn = start_pfn + nr !! 322 return dma_addr == DIRECT_MAPPING_ERROR; 596 const struct bus_dma_region *bdr = NUL << 597 const struct bus_dma_region *m; << 598 struct device *dev = data; << 599 << 600 while (start_pfn < end_pfn) { << 601 for (m = dev->dma_range_map; P << 602 unsigned long cpu_star << 603 << 604 if (start_pfn >= cpu_s << 605 start_pfn - cpu_st << 606 bdr = m; << 607 break; << 608 } << 609 } << 610 if (!bdr) << 611 return 1; << 612 << 613 start_pfn = PFN_DOWN(bdr->cpu_ << 614 } << 615 << 616 return 0; << 617 } << 618 << 619 bool dma_direct_all_ram_mapped(struct device * << 620 { << 621 if (!dev->dma_range_map) << 622 return true; << 623 return !walk_system_ram_range(0, PFN_D << 624 check_ra << 625 } << 626 << 627 size_t dma_direct_max_mapping_size(struct devi << 628 { << 629 /* If SWIOTLB is active, use its maxim << 630 if (is_swiotlb_active(dev) && << 631 (dma_addressing_limited(dev) || is << 632 return swiotlb_max_mapping_siz << 633 return SIZE_MAX; << 634 } << 635 << 636 bool dma_direct_need_sync(struct device *dev, << 637 { << 638 return !dev_is_dma_coherent(dev) || << 639 swiotlb_find_pool(dev, dma_to_p << 640 } 323 } 641 324 642 /** !! 325 const struct dma_map_ops dma_direct_ops = { 643 * dma_direct_set_offset - Assign scalar offse !! 326 .alloc = dma_direct_alloc, 644 * @dev: device pointer; needed to "own !! 327 .free = dma_direct_free, 645 * @cpu_start: beginning of memory region cov !! 328 .map_page = dma_direct_map_page, 646 * @dma_start: beginning of DMA/PCI region co !! 329 .map_sg = dma_direct_map_sg, 647 * @size: size of the region. !! 330 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) 648 * !! 331 .sync_single_for_device = dma_direct_sync_single_for_device, 649 * This is for the simple case of a uniform of !! 332 .sync_sg_for_device = dma_direct_sync_sg_for_device, 650 * be discovered by "dma-ranges". !! 333 #endif 651 * !! 334 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 652 * It returns -ENOMEM if out of memory, -EINVA !! 335 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) 653 * already exists, 0 otherwise. !! 336 .sync_single_for_cpu = dma_direct_sync_single_for_cpu, 654 * !! 337 .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, 655 * Note: any call to this from a driver is a b !! 338 .unmap_page = dma_direct_unmap_page, 656 * to be described by the device tree or other !! 339 .unmap_sg = dma_direct_unmap_sg, 657 */ !! 340 #endif 658 int dma_direct_set_offset(struct device *dev, !! 341 .get_required_mask = dma_direct_get_required_mask, 659 dma_addr_t dma_start, !! 342 .dma_supported = dma_direct_supported, 660 { !! 343 .mapping_error = dma_direct_mapping_error, 661 struct bus_dma_region *map; !! 344 .cache_sync = arch_dma_cache_sync, 662 u64 offset = (u64)cpu_start - (u64)dma !! 345 }; 663 !! 346 EXPORT_SYMBOL(dma_direct_ops); 664 if (dev->dma_range_map) { << 665 dev_err(dev, "attempt to add D << 666 return -EINVAL; << 667 } << 668 << 669 if (!offset) << 670 return 0; << 671 << 672 map = kcalloc(2, sizeof(*map), GFP_KER << 673 if (!map) << 674 return -ENOMEM; << 675 map[0].cpu_start = cpu_start; << 676 map[0].dma_start = dma_start; << 677 map[0].size = size; << 678 dev->dma_range_map = map; << 679 return 0; << 680 } << 681 347
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.