1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * Copyright (C) 2018-2020 Christoph Hellwig. !! 3 * Copyright (C) 2018 Christoph Hellwig. 4 * 4 * 5 * DMA operations that map physical memory dir 5 * DMA operations that map physical memory directly without using an IOMMU. 6 */ 6 */ 7 #include <linux/memblock.h> /* for max_pfn */ 7 #include <linux/memblock.h> /* for max_pfn */ 8 #include <linux/export.h> 8 #include <linux/export.h> 9 #include <linux/mm.h> 9 #include <linux/mm.h> 10 #include <linux/dma-map-ops.h> !! 10 #include <linux/dma-direct.h> 11 #include <linux/scatterlist.h> 11 #include <linux/scatterlist.h> >> 12 #include <linux/dma-contiguous.h> >> 13 #include <linux/dma-noncoherent.h> 12 #include <linux/pfn.h> 14 #include <linux/pfn.h> 13 #include <linux/vmalloc.h> << 14 #include <linux/set_memory.h> 15 #include <linux/set_memory.h> 15 #include <linux/slab.h> !! 16 #include <linux/swiotlb.h> 16 #include "direct.h" << 17 17 18 /* 18 /* 19 * Most architectures use ZONE_DMA for the fir !! 19 * Most architectures use ZONE_DMA for the first 16 Megabytes, but 20 * it for entirely different regions. In that !! 20 * some use it for entirely different regions: 21 * override the variable below for dma-direct << 22 */ 21 */ 23 unsigned int zone_dma_bits __ro_after_init = 2 !! 22 #ifndef ARCH_ZONE_DMA_BITS >> 23 #define ARCH_ZONE_DMA_BITS 24 >> 24 #endif >> 25 >> 26 static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size) >> 27 { >> 28 if (!dev->dma_mask) { >> 29 dev_err_once(dev, "DMA map on device without dma_mask\n"); >> 30 } else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) { >> 31 dev_err_once(dev, >> 32 "overflow %pad+%zu of DMA mask %llx bus mask %llx\n", >> 33 &dma_addr, size, *dev->dma_mask, dev->bus_dma_mask); >> 34 } >> 35 WARN_ON_ONCE(1); >> 36 } 24 37 25 static inline dma_addr_t phys_to_dma_direct(st 38 static inline dma_addr_t phys_to_dma_direct(struct device *dev, 26 phys_addr_t phys) 39 phys_addr_t phys) 27 { 40 { 28 if (force_dma_unencrypted(dev)) 41 if (force_dma_unencrypted(dev)) 29 return phys_to_dma_unencrypted !! 42 return __phys_to_dma(dev, phys); 30 return phys_to_dma(dev, phys); 43 return phys_to_dma(dev, phys); 31 } 44 } 32 45 33 static inline struct page *dma_direct_to_page( << 34 dma_addr_t dma_addr) << 35 { << 36 return pfn_to_page(PHYS_PFN(dma_to_phy << 37 } << 38 << 39 u64 dma_direct_get_required_mask(struct device 46 u64 dma_direct_get_required_mask(struct device *dev) 40 { 47 { 41 phys_addr_t phys = (phys_addr_t)(max_p !! 48 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); 42 u64 max_dma = phys_to_dma_direct(dev, << 43 49 44 return (1ULL << (fls64(max_dma) - 1)) 50 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; 45 } 51 } 46 52 47 static gfp_t dma_direct_optimal_gfp_mask(struc !! 53 static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, >> 54 u64 *phys_mask) 48 { 55 { 49 u64 dma_limit = min_not_zero( !! 56 if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask) 50 dev->coherent_dma_mask, !! 57 dma_mask = dev->bus_dma_mask; 51 dev->bus_dma_limit); !! 58 >> 59 if (force_dma_unencrypted(dev)) >> 60 *phys_mask = __dma_to_phys(dev, dma_mask); >> 61 else >> 62 *phys_mask = dma_to_phys(dev, dma_mask); 52 63 53 /* 64 /* 54 * Optimistically try the zone that th 65 * Optimistically try the zone that the physical address mask falls 55 * into first. If that returns memory 66 * into first. If that returns memory that isn't actually addressable 56 * we will fallback to the next lower 67 * we will fallback to the next lower zone and try again. 57 * 68 * 58 * Note that GFP_DMA32 and GFP_DMA are 69 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding 59 * zones. 70 * zones. 60 */ 71 */ 61 *phys_limit = dma_to_phys(dev, dma_lim !! 72 if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS)) 62 if (*phys_limit <= DMA_BIT_MASK(zone_d << 63 return GFP_DMA; 73 return GFP_DMA; 64 if (*phys_limit <= DMA_BIT_MASK(32)) !! 74 if (*phys_mask <= DMA_BIT_MASK(32)) 65 return GFP_DMA32; 75 return GFP_DMA32; 66 return 0; 76 return 0; 67 } 77 } 68 78 69 bool dma_coherent_ok(struct device *dev, phys_ !! 79 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) 70 { << 71 dma_addr_t dma_addr = phys_to_dma_dire << 72 << 73 if (dma_addr == DMA_MAPPING_ERROR) << 74 return false; << 75 return dma_addr + size - 1 <= << 76 min_not_zero(dev->coherent_dma << 77 } << 78 << 79 static int dma_set_decrypted(struct device *de << 80 { << 81 if (!force_dma_unencrypted(dev)) << 82 return 0; << 83 return set_memory_decrypted((unsigned << 84 } << 85 << 86 static int dma_set_encrypted(struct device *de << 87 { << 88 int ret; << 89 << 90 if (!force_dma_unencrypted(dev)) << 91 return 0; << 92 ret = set_memory_encrypted((unsigned l << 93 if (ret) << 94 pr_warn_ratelimited("leaking D << 95 return ret; << 96 } << 97 << 98 static void __dma_direct_free_pages(struct dev << 99 size_t siz << 100 { << 101 if (swiotlb_free(dev, page, size)) << 102 return; << 103 dma_free_contiguous(dev, page, size); << 104 } << 105 << 106 static struct page *dma_direct_alloc_swiotlb(s << 107 { 80 { 108 struct page *page = swiotlb_alloc(dev, !! 81 return phys_to_dma_direct(dev, phys) + size - 1 <= 109 !! 82 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask); 110 if (page && !dma_coherent_ok(dev, page << 111 swiotlb_free(dev, page, size); << 112 return NULL; << 113 } << 114 << 115 return page; << 116 } 83 } 117 84 118 static struct page *__dma_direct_alloc_pages(s !! 85 struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, 119 gfp_t gfp, bool allow_highmem) !! 86 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 120 { 87 { >> 88 size_t alloc_size = PAGE_ALIGN(size); 121 int node = dev_to_node(dev); 89 int node = dev_to_node(dev); 122 struct page *page = NULL; 90 struct page *page = NULL; 123 u64 phys_limit; !! 91 u64 phys_mask; 124 92 125 WARN_ON_ONCE(!PAGE_ALIGNED(size)); !! 93 if (attrs & DMA_ATTR_NO_WARN) 126 !! 94 gfp |= __GFP_NOWARN; 127 if (is_swiotlb_for_alloc(dev)) << 128 return dma_direct_alloc_swiotl << 129 95 130 gfp |= dma_direct_optimal_gfp_mask(dev !! 96 /* we always manually zero the memory once we are done: */ 131 page = dma_alloc_contiguous(dev, size, !! 97 gfp &= ~__GFP_ZERO; 132 if (page) { !! 98 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, 133 if (!dma_coherent_ok(dev, page !! 99 &phys_mask); 134 (!allow_highmem && PageHig !! 100 page = dma_alloc_contiguous(dev, alloc_size, gfp); 135 dma_free_contiguous(de !! 101 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 136 page = NULL; !! 102 dma_free_contiguous(dev, page, alloc_size); 137 } !! 103 page = NULL; 138 } 104 } 139 again: 105 again: 140 if (!page) 106 if (!page) 141 page = alloc_pages_node(node, !! 107 page = alloc_pages_node(node, gfp, get_order(alloc_size)); 142 if (page && !dma_coherent_ok(dev, page 108 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 143 dma_free_contiguous(dev, page, 109 dma_free_contiguous(dev, page, size); 144 page = NULL; 110 page = NULL; 145 111 146 if (IS_ENABLED(CONFIG_ZONE_DMA 112 if (IS_ENABLED(CONFIG_ZONE_DMA32) && 147 phys_limit < DMA_BIT_MASK( !! 113 phys_mask < DMA_BIT_MASK(64) && 148 !(gfp & (GFP_DMA32 | GFP_D 114 !(gfp & (GFP_DMA32 | GFP_DMA))) { 149 gfp |= GFP_DMA32; 115 gfp |= GFP_DMA32; 150 goto again; 116 goto again; 151 } 117 } 152 118 153 if (IS_ENABLED(CONFIG_ZONE_DMA 119 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { 154 gfp = (gfp & ~GFP_DMA3 120 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 155 goto again; 121 goto again; 156 } 122 } 157 } 123 } 158 124 159 return page; 125 return page; 160 } 126 } 161 127 162 /* !! 128 void *dma_direct_alloc_pages(struct device *dev, size_t size, 163 * Check if a potentially blocking operations !! 129 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 164 * pools for the given device/gfp. << 165 */ << 166 static bool dma_direct_use_pool(struct device << 167 { << 168 return !gfpflags_allow_blocking(gfp) & << 169 } << 170 << 171 static void *dma_direct_alloc_from_pool(struct << 172 dma_addr_t *dma_handle, gfp_t << 173 { 130 { 174 struct page *page; 131 struct page *page; 175 u64 phys_limit; << 176 void *ret; 132 void *ret; 177 133 178 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DM !! 134 page = __dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); 179 return NULL; << 180 << 181 gfp |= dma_direct_optimal_gfp_mask(dev << 182 page = dma_alloc_from_pool(dev, size, << 183 if (!page) 135 if (!page) 184 return NULL; 136 return NULL; 185 *dma_handle = phys_to_dma_direct(dev, << 186 return ret; << 187 } << 188 << 189 static void *dma_direct_alloc_no_mapping(struc << 190 dma_addr_t *dma_handle, gfp_t << 191 { << 192 struct page *page; << 193 << 194 page = __dma_direct_alloc_pages(dev, s << 195 if (!page) << 196 return NULL; << 197 << 198 /* remove any dirty cache lines on the << 199 if (!PageHighMem(page)) << 200 arch_dma_prep_coherent(page, s << 201 << 202 /* return the page pointer as the opaq << 203 *dma_handle = phys_to_dma_direct(dev, << 204 return page; << 205 } << 206 << 207 void *dma_direct_alloc(struct device *dev, siz << 208 dma_addr_t *dma_handle, gfp_t << 209 { << 210 bool remap = false, set_uncached = fal << 211 struct page *page; << 212 void *ret; << 213 << 214 size = PAGE_ALIGN(size); << 215 if (attrs & DMA_ATTR_NO_WARN) << 216 gfp |= __GFP_NOWARN; << 217 137 218 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPIN 138 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && 219 !force_dma_unencrypted(dev) && !is !! 139 !force_dma_unencrypted(dev)) { 220 return dma_direct_alloc_no_map !! 140 /* remove any dirty cache lines on the kernel alias */ 221 !! 141 if (!PageHighMem(page)) 222 if (!dev_is_dma_coherent(dev)) { !! 142 arch_dma_prep_coherent(page, size); 223 if (IS_ENABLED(CONFIG_ARCH_HAS !! 143 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 224 !is_swiotlb_for_alloc(dev) !! 144 /* return the page pointer as the opaque cookie */ 225 return arch_dma_alloc( !! 145 return page; 226 !! 146 } 227 << 228 /* << 229 * If there is a global pool, << 230 * non-coherent devices. << 231 */ << 232 if (IS_ENABLED(CONFIG_DMA_GLOB << 233 return dma_alloc_from_ << 234 dma_ha << 235 147 >> 148 if (PageHighMem(page)) { 236 /* 149 /* 237 * Otherwise we require the ar !! 150 * Depending on the cma= arguments and per-arch setup 238 * mark arbitrary parts of the !! 151 * dma_alloc_contiguous could return highmem pages. 239 * or remapped it uncached. !! 152 * Without remapping there is no way to return them here, >> 153 * so log an error and fail. 240 */ 154 */ 241 set_uncached = IS_ENABLED(CONF !! 155 dev_info(dev, "Rejecting highmem page from CMA.\n"); 242 remap = IS_ENABLED(CONFIG_DMA_ !! 156 __dma_direct_free_pages(dev, size, page); 243 if (!set_uncached && !remap) { << 244 pr_warn_once("coherent << 245 return NULL; << 246 } << 247 } << 248 << 249 /* << 250 * Remapping or decrypting memory may << 251 * the atomic pools instead if we aren << 252 */ << 253 if ((remap || force_dma_unencrypted(de << 254 dma_direct_use_pool(dev, gfp)) << 255 return dma_direct_alloc_from_p << 256 << 257 /* we always manually zero the memory << 258 page = __dma_direct_alloc_pages(dev, s << 259 if (!page) << 260 return NULL; 157 return NULL; 261 << 262 /* << 263 * dma_alloc_contiguous can return hig << 264 * combination the cma= arguments and << 265 * remapped to return a kernel virtual << 266 */ << 267 if (PageHighMem(page)) { << 268 remap = true; << 269 set_uncached = false; << 270 } 158 } 271 159 272 if (remap) { !! 160 ret = page_address(page); 273 pgprot_t prot = dma_pgprot(dev !! 161 if (force_dma_unencrypted(dev)) { 274 !! 162 set_memory_decrypted((unsigned long)ret, 1 << get_order(size)); 275 if (force_dma_unencrypted(dev) !! 163 *dma_handle = __phys_to_dma(dev, page_to_phys(page)); 276 prot = pgprot_decrypte << 277 << 278 /* remove any dirty cache line << 279 arch_dma_prep_coherent(page, s << 280 << 281 /* create a coherent mapping * << 282 ret = dma_common_contiguous_re << 283 __builtin_retu << 284 if (!ret) << 285 goto out_free_pages; << 286 } else { 164 } else { 287 ret = page_address(page); !! 165 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 288 if (dma_set_decrypted(dev, ret << 289 goto out_leak_pages; << 290 } 166 } 291 << 292 memset(ret, 0, size); 167 memset(ret, 0, size); 293 168 294 if (set_uncached) { !! 169 if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && >> 170 dma_alloc_need_uncached(dev, attrs)) { 295 arch_dma_prep_coherent(page, s 171 arch_dma_prep_coherent(page, size); 296 ret = arch_dma_set_uncached(re !! 172 ret = uncached_kernel_address(ret); 297 if (IS_ERR(ret)) << 298 goto out_encrypt_pages << 299 } 173 } 300 174 301 *dma_handle = phys_to_dma_direct(dev, << 302 return ret; 175 return ret; >> 176 } 303 177 304 out_encrypt_pages: !! 178 void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page) 305 if (dma_set_encrypted(dev, page_addres !! 179 { 306 return NULL; !! 180 dma_free_contiguous(dev, page, size); 307 out_free_pages: << 308 __dma_direct_free_pages(dev, page, siz << 309 return NULL; << 310 out_leak_pages: << 311 return NULL; << 312 } 181 } 313 182 314 void dma_direct_free(struct device *dev, size_ !! 183 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, 315 void *cpu_addr, dma_addr_t dma !! 184 dma_addr_t dma_addr, unsigned long attrs) 316 { 185 { 317 unsigned int page_order = get_order(si 186 unsigned int page_order = get_order(size); 318 187 319 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPIN 188 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && 320 !force_dma_unencrypted(dev) && !is !! 189 !force_dma_unencrypted(dev)) { 321 /* cpu_addr is a struct page c 190 /* cpu_addr is a struct page cookie, not a kernel address */ 322 dma_free_contiguous(dev, cpu_a !! 191 __dma_direct_free_pages(dev, size, cpu_addr); 323 return; 192 return; 324 } 193 } 325 194 326 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALL !! 195 if (force_dma_unencrypted(dev)) 327 !dev_is_dma_coherent(dev) && !! 196 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); 328 !is_swiotlb_for_alloc(dev)) { << 329 arch_dma_free(dev, size, cpu_a << 330 return; << 331 } << 332 << 333 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) << 334 !dev_is_dma_coherent(dev)) { << 335 if (!dma_release_from_global_c << 336 WARN_ON_ONCE(1); << 337 return; << 338 } << 339 << 340 /* If cpu_addr is not from an atomic p << 341 if (IS_ENABLED(CONFIG_DMA_COHERENT_POO << 342 dma_free_from_pool(dev, cpu_addr, << 343 return; << 344 << 345 if (is_vmalloc_addr(cpu_addr)) { << 346 vunmap(cpu_addr); << 347 } else { << 348 if (IS_ENABLED(CONFIG_ARCH_HAS << 349 arch_dma_clear_uncache << 350 if (dma_set_encrypted(dev, cpu << 351 return; << 352 } << 353 197 354 __dma_direct_free_pages(dev, dma_direc !! 198 if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && >> 199 dma_alloc_need_uncached(dev, attrs)) >> 200 cpu_addr = cached_kernel_address(cpu_addr); >> 201 __dma_direct_free_pages(dev, size, virt_to_page(cpu_addr)); 355 } 202 } 356 203 357 struct page *dma_direct_alloc_pages(struct dev !! 204 void *dma_direct_alloc(struct device *dev, size_t size, 358 dma_addr_t *dma_handle, enum d !! 205 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 359 { 206 { 360 struct page *page; !! 207 if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && 361 void *ret; !! 208 dma_alloc_need_uncached(dev, attrs)) 362 !! 209 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); 363 if (force_dma_unencrypted(dev) && dma_ !! 210 return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); 364 return dma_direct_alloc_from_p !! 211 } 365 << 366 page = __dma_direct_alloc_pages(dev, s << 367 if (!page) << 368 return NULL; << 369 212 370 ret = page_address(page); !! 213 void dma_direct_free(struct device *dev, size_t size, 371 if (dma_set_decrypted(dev, ret, size)) !! 214 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) 372 goto out_leak_pages; !! 215 { 373 memset(ret, 0, size); !! 216 if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && 374 *dma_handle = phys_to_dma_direct(dev, !! 217 dma_alloc_need_uncached(dev, attrs)) 375 return page; !! 218 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); 376 out_leak_pages: !! 219 else 377 return NULL; !! 220 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); 378 } 221 } 379 222 380 void dma_direct_free_pages(struct device *dev, !! 223 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ 381 struct page *page, dma_addr_t !! 224 defined(CONFIG_SWIOTLB) 382 enum dma_data_direction dir) !! 225 void dma_direct_sync_single_for_device(struct device *dev, >> 226 dma_addr_t addr, size_t size, enum dma_data_direction dir) 383 { 227 { 384 void *vaddr = page_address(page); !! 228 phys_addr_t paddr = dma_to_phys(dev, addr); 385 229 386 /* If cpu_addr is not from an atomic p !! 230 if (unlikely(is_swiotlb_buffer(paddr))) 387 if (IS_ENABLED(CONFIG_DMA_COHERENT_POO !! 231 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); 388 dma_free_from_pool(dev, vaddr, siz << 389 return; << 390 232 391 if (dma_set_encrypted(dev, vaddr, size !! 233 if (!dev_is_dma_coherent(dev)) 392 return; !! 234 arch_sync_dma_for_device(dev, paddr, size, dir); 393 __dma_direct_free_pages(dev, page, siz << 394 } 235 } >> 236 EXPORT_SYMBOL(dma_direct_sync_single_for_device); 395 237 396 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVIC << 397 defined(CONFIG_SWIOTLB) << 398 void dma_direct_sync_sg_for_device(struct devi 238 void dma_direct_sync_sg_for_device(struct device *dev, 399 struct scatterlist *sgl, int n 239 struct scatterlist *sgl, int nents, enum dma_data_direction dir) 400 { 240 { 401 struct scatterlist *sg; 241 struct scatterlist *sg; 402 int i; 242 int i; 403 243 404 for_each_sg(sgl, sg, nents, i) { 244 for_each_sg(sgl, sg, nents, i) { 405 phys_addr_t paddr = dma_to_phy 245 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); 406 246 407 swiotlb_sync_single_for_device !! 247 if (unlikely(is_swiotlb_buffer(paddr))) >> 248 swiotlb_tbl_sync_single(dev, paddr, sg->length, >> 249 dir, SYNC_FOR_DEVICE); 408 250 409 if (!dev_is_dma_coherent(dev)) 251 if (!dev_is_dma_coherent(dev)) 410 arch_sync_dma_for_devi !! 252 arch_sync_dma_for_device(dev, paddr, sg->length, 411 dir); 253 dir); 412 } 254 } 413 } 255 } >> 256 EXPORT_SYMBOL(dma_direct_sync_sg_for_device); 414 #endif 257 #endif 415 258 416 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) 259 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 417 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_A 260 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ 418 defined(CONFIG_SWIOTLB) 261 defined(CONFIG_SWIOTLB) >> 262 void dma_direct_sync_single_for_cpu(struct device *dev, >> 263 dma_addr_t addr, size_t size, enum dma_data_direction dir) >> 264 { >> 265 phys_addr_t paddr = dma_to_phys(dev, addr); >> 266 >> 267 if (!dev_is_dma_coherent(dev)) { >> 268 arch_sync_dma_for_cpu(dev, paddr, size, dir); >> 269 arch_sync_dma_for_cpu_all(dev); >> 270 } >> 271 >> 272 if (unlikely(is_swiotlb_buffer(paddr))) >> 273 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); >> 274 } >> 275 EXPORT_SYMBOL(dma_direct_sync_single_for_cpu); >> 276 419 void dma_direct_sync_sg_for_cpu(struct device 277 void dma_direct_sync_sg_for_cpu(struct device *dev, 420 struct scatterlist *sgl, int n 278 struct scatterlist *sgl, int nents, enum dma_data_direction dir) 421 { 279 { 422 struct scatterlist *sg; 280 struct scatterlist *sg; 423 int i; 281 int i; 424 282 425 for_each_sg(sgl, sg, nents, i) { 283 for_each_sg(sgl, sg, nents, i) { 426 phys_addr_t paddr = dma_to_phy 284 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); 427 285 428 if (!dev_is_dma_coherent(dev)) 286 if (!dev_is_dma_coherent(dev)) 429 arch_sync_dma_for_cpu( !! 287 arch_sync_dma_for_cpu(dev, paddr, sg->length, dir); 430 << 431 swiotlb_sync_single_for_cpu(de << 432 288 433 if (dir == DMA_FROM_DEVICE) !! 289 if (unlikely(is_swiotlb_buffer(paddr))) 434 arch_dma_mark_clean(pa !! 290 swiotlb_tbl_sync_single(dev, paddr, sg->length, dir, >> 291 SYNC_FOR_CPU); 435 } 292 } 436 293 437 if (!dev_is_dma_coherent(dev)) 294 if (!dev_is_dma_coherent(dev)) 438 arch_sync_dma_for_cpu_all(); !! 295 arch_sync_dma_for_cpu_all(dev); 439 } 296 } >> 297 EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu); >> 298 >> 299 void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, >> 300 size_t size, enum dma_data_direction dir, unsigned long attrs) >> 301 { >> 302 phys_addr_t phys = dma_to_phys(dev, addr); >> 303 >> 304 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) >> 305 dma_direct_sync_single_for_cpu(dev, addr, size, dir); >> 306 >> 307 if (unlikely(is_swiotlb_buffer(phys))) >> 308 swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); >> 309 } >> 310 EXPORT_SYMBOL(dma_direct_unmap_page); 440 311 441 /* << 442 * Unmaps segments, except for ones marked as << 443 * require any further action as they contain << 444 */ << 445 void dma_direct_unmap_sg(struct device *dev, s 312 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, 446 int nents, enum dma_data_direc 313 int nents, enum dma_data_direction dir, unsigned long attrs) 447 { 314 { 448 struct scatterlist *sg; 315 struct scatterlist *sg; 449 int i; 316 int i; 450 317 451 for_each_sg(sgl, sg, nents, i) { !! 318 for_each_sg(sgl, sg, nents, i) 452 if (sg_dma_is_bus_address(sg)) !! 319 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, 453 sg_dma_unmark_bus_addr !! 320 attrs); 454 else << 455 dma_direct_unmap_page( << 456 << 457 } << 458 } 321 } >> 322 EXPORT_SYMBOL(dma_direct_unmap_sg); 459 #endif 323 #endif 460 324 >> 325 static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr, >> 326 size_t size) >> 327 { >> 328 return swiotlb_force != SWIOTLB_FORCE && >> 329 dma_capable(dev, dma_addr, size); >> 330 } >> 331 >> 332 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, >> 333 unsigned long offset, size_t size, enum dma_data_direction dir, >> 334 unsigned long attrs) >> 335 { >> 336 phys_addr_t phys = page_to_phys(page) + offset; >> 337 dma_addr_t dma_addr = phys_to_dma(dev, phys); >> 338 >> 339 if (unlikely(!dma_direct_possible(dev, dma_addr, size)) && >> 340 !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) { >> 341 report_addr(dev, dma_addr, size); >> 342 return DMA_MAPPING_ERROR; >> 343 } >> 344 >> 345 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) >> 346 arch_sync_dma_for_device(dev, phys, size, dir); >> 347 return dma_addr; >> 348 } >> 349 EXPORT_SYMBOL(dma_direct_map_page); >> 350 461 int dma_direct_map_sg(struct device *dev, stru 351 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, 462 enum dma_data_direction dir, u 352 enum dma_data_direction dir, unsigned long attrs) 463 { 353 { 464 struct pci_p2pdma_map_state p2pdma_sta !! 354 int i; 465 enum pci_p2pdma_map_type map; << 466 struct scatterlist *sg; 355 struct scatterlist *sg; 467 int i, ret; << 468 356 469 for_each_sg(sgl, sg, nents, i) { 357 for_each_sg(sgl, sg, nents, i) { 470 if (is_pci_p2pdma_page(sg_page << 471 map = pci_p2pdma_map_s << 472 switch (map) { << 473 case PCI_P2PDMA_MAP_BU << 474 continue; << 475 case PCI_P2PDMA_MAP_TH << 476 /* << 477 * Any P2P map << 478 * host bridge << 479 * address and << 480 * done with d << 481 */ << 482 break; << 483 default: << 484 ret = -EREMOTE << 485 goto out_unmap << 486 } << 487 } << 488 << 489 sg->dma_address = dma_direct_m 358 sg->dma_address = dma_direct_map_page(dev, sg_page(sg), 490 sg->offset, sg 359 sg->offset, sg->length, dir, attrs); 491 if (sg->dma_address == DMA_MAP !! 360 if (sg->dma_address == DMA_MAPPING_ERROR) 492 ret = -EIO; << 493 goto out_unmap; 361 goto out_unmap; 494 } << 495 sg_dma_len(sg) = sg->length; 362 sg_dma_len(sg) = sg->length; 496 } 363 } 497 364 498 return nents; 365 return nents; 499 366 500 out_unmap: 367 out_unmap: 501 dma_direct_unmap_sg(dev, sgl, i, dir, 368 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 502 return ret; !! 369 return 0; 503 } 370 } >> 371 EXPORT_SYMBOL(dma_direct_map_sg); 504 372 505 dma_addr_t dma_direct_map_resource(struct devi 373 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, 506 size_t size, enum dma_data_dir 374 size_t size, enum dma_data_direction dir, unsigned long attrs) 507 { 375 { 508 dma_addr_t dma_addr = paddr; 376 dma_addr_t dma_addr = paddr; 509 377 510 if (unlikely(!dma_capable(dev, dma_add !! 378 if (unlikely(!dma_direct_possible(dev, dma_addr, size))) { 511 dev_err_once(dev, !! 379 report_addr(dev, dma_addr, size); 512 "DMA addr %pad+%z << 513 &dma_addr, size, << 514 WARN_ON_ONCE(1); << 515 return DMA_MAPPING_ERROR; 380 return DMA_MAPPING_ERROR; 516 } 381 } 517 382 518 return dma_addr; 383 return dma_addr; 519 } 384 } >> 385 EXPORT_SYMBOL(dma_direct_map_resource); 520 386 521 int dma_direct_get_sgtable(struct device *dev, !! 387 /* 522 void *cpu_addr, dma_addr_t dma !! 388 * Because 32-bit DMA masks are so common we expect every architecture to be 523 unsigned long attrs) !! 389 * able to satisfy them - either by not supporting more physical memory, or by 524 { !! 390 * providing a ZONE_DMA32. If neither is the case, the architecture needs to 525 struct page *page = dma_direct_to_page !! 391 * use an IOMMU instead of the direct mapping. 526 int ret; !! 392 */ 527 << 528 ret = sg_alloc_table(sgt, 1, GFP_KERNE << 529 if (!ret) << 530 sg_set_page(sgt->sgl, page, PA << 531 return ret; << 532 } << 533 << 534 bool dma_direct_can_mmap(struct device *dev) << 535 { << 536 return dev_is_dma_coherent(dev) || << 537 IS_ENABLED(CONFIG_DMA_NONCOHER << 538 } << 539 << 540 int dma_direct_mmap(struct device *dev, struct << 541 void *cpu_addr, dma_addr_t dma << 542 unsigned long attrs) << 543 { << 544 unsigned long user_count = vma_pages(v << 545 unsigned long count = PAGE_ALIGN(size) << 546 unsigned long pfn = PHYS_PFN(dma_to_ph << 547 int ret = -ENXIO; << 548 << 549 vma->vm_page_prot = dma_pgprot(dev, vm << 550 if (force_dma_unencrypted(dev)) << 551 vma->vm_page_prot = pgprot_dec << 552 << 553 if (dma_mmap_from_dev_coherent(dev, vm << 554 return ret; << 555 if (dma_mmap_from_global_coherent(vma, << 556 return ret; << 557 << 558 if (vma->vm_pgoff >= count || user_cou << 559 return -ENXIO; << 560 return remap_pfn_range(vma, vma->vm_st << 561 user_count << PAGE_SHI << 562 } << 563 << 564 int dma_direct_supported(struct device *dev, u 393 int dma_direct_supported(struct device *dev, u64 mask) 565 { 394 { 566 u64 min_mask = (max_pfn - 1) << PAGE_S !! 395 u64 min_mask; 567 396 568 /* !! 397 if (IS_ENABLED(CONFIG_ZONE_DMA)) 569 * Because 32-bit DMA masks are so com !! 398 min_mask = DMA_BIT_MASK(ARCH_ZONE_DMA_BITS); 570 * to be able to satisfy them - either !! 399 else 571 * memory, or by providing a ZONE_DMA3 !! 400 min_mask = DMA_BIT_MASK(32); 572 * architecture needs to use an IOMMU !! 401 573 */ !! 402 min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT); 574 if (mask >= DMA_BIT_MASK(32)) << 575 return 1; << 576 403 577 /* 404 /* 578 * This check needs to be against the !! 405 * This check needs to be against the actual bit mask value, so 579 * phys_to_dma_unencrypted() here so t !! 406 * use __phys_to_dma() here so that the SME encryption mask isn't 580 * part of the check. 407 * part of the check. 581 */ 408 */ 582 if (IS_ENABLED(CONFIG_ZONE_DMA)) !! 409 return mask >= __phys_to_dma(dev, min_mask); 583 min_mask = min_t(u64, min_mask << 584 return mask >= phys_to_dma_unencrypted << 585 } << 586 << 587 /* << 588 * To check whether all ram resource ranges ar << 589 * Returns 0 when further check is needed << 590 * Returns 1 if there is some RAM range can't << 591 */ << 592 static int check_ram_in_range_map(unsigned lon << 593 unsigned lon << 594 { << 595 unsigned long end_pfn = start_pfn + nr << 596 const struct bus_dma_region *bdr = NUL << 597 const struct bus_dma_region *m; << 598 struct device *dev = data; << 599 << 600 while (start_pfn < end_pfn) { << 601 for (m = dev->dma_range_map; P << 602 unsigned long cpu_star << 603 << 604 if (start_pfn >= cpu_s << 605 start_pfn - cpu_st << 606 bdr = m; << 607 break; << 608 } << 609 } << 610 if (!bdr) << 611 return 1; << 612 << 613 start_pfn = PFN_DOWN(bdr->cpu_ << 614 } << 615 << 616 return 0; << 617 } << 618 << 619 bool dma_direct_all_ram_mapped(struct device * << 620 { << 621 if (!dev->dma_range_map) << 622 return true; << 623 return !walk_system_ram_range(0, PFN_D << 624 check_ra << 625 } 410 } 626 411 627 size_t dma_direct_max_mapping_size(struct devi 412 size_t dma_direct_max_mapping_size(struct device *dev) 628 { 413 { 629 /* If SWIOTLB is active, use its maxim 414 /* If SWIOTLB is active, use its maximum mapping size */ 630 if (is_swiotlb_active(dev) && !! 415 if (is_swiotlb_active() && 631 (dma_addressing_limited(dev) || is !! 416 (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE)) 632 return swiotlb_max_mapping_siz 417 return swiotlb_max_mapping_size(dev); 633 return SIZE_MAX; 418 return SIZE_MAX; 634 } << 635 << 636 bool dma_direct_need_sync(struct device *dev, << 637 { << 638 return !dev_is_dma_coherent(dev) || << 639 swiotlb_find_pool(dev, dma_to_p << 640 } << 641 << 642 /** << 643 * dma_direct_set_offset - Assign scalar offse << 644 * @dev: device pointer; needed to "own << 645 * @cpu_start: beginning of memory region cov << 646 * @dma_start: beginning of DMA/PCI region co << 647 * @size: size of the region. << 648 * << 649 * This is for the simple case of a uniform of << 650 * be discovered by "dma-ranges". << 651 * << 652 * It returns -ENOMEM if out of memory, -EINVA << 653 * already exists, 0 otherwise. << 654 * << 655 * Note: any call to this from a driver is a b << 656 * to be described by the device tree or other << 657 */ << 658 int dma_direct_set_offset(struct device *dev, << 659 dma_addr_t dma_start, << 660 { << 661 struct bus_dma_region *map; << 662 u64 offset = (u64)cpu_start - (u64)dma << 663 << 664 if (dev->dma_range_map) { << 665 dev_err(dev, "attempt to add D << 666 return -EINVAL; << 667 } << 668 << 669 if (!offset) << 670 return 0; << 671 << 672 map = kcalloc(2, sizeof(*map), GFP_KER << 673 if (!map) << 674 return -ENOMEM; << 675 map[0].cpu_start = cpu_start; << 676 map[0].dma_start = dma_start; << 677 map[0].size = size; << 678 dev->dma_range_map = map; << 679 return 0; << 680 } 419 } 681 420
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.