1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * Copyright (C) 2018-2020 Christoph Hellwig. !! 3 * Copyright (C) 2018 Christoph Hellwig. 4 * 4 * 5 * DMA operations that map physical memory dir 5 * DMA operations that map physical memory directly without using an IOMMU. 6 */ 6 */ 7 #include <linux/memblock.h> /* for max_pfn */ 7 #include <linux/memblock.h> /* for max_pfn */ 8 #include <linux/export.h> 8 #include <linux/export.h> 9 #include <linux/mm.h> 9 #include <linux/mm.h> 10 #include <linux/dma-map-ops.h> !! 10 #include <linux/dma-direct.h> 11 #include <linux/scatterlist.h> 11 #include <linux/scatterlist.h> >> 12 #include <linux/dma-contiguous.h> >> 13 #include <linux/dma-noncoherent.h> 12 #include <linux/pfn.h> 14 #include <linux/pfn.h> 13 #include <linux/vmalloc.h> 15 #include <linux/vmalloc.h> 14 #include <linux/set_memory.h> 16 #include <linux/set_memory.h> 15 #include <linux/slab.h> !! 17 #include <linux/swiotlb.h> 16 #include "direct.h" << 17 18 18 /* 19 /* 19 * Most architectures use ZONE_DMA for the fir !! 20 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use it 20 * it for entirely different regions. In that 21 * it for entirely different regions. In that case the arch code needs to 21 * override the variable below for dma-direct 22 * override the variable below for dma-direct to work properly. 22 */ 23 */ 23 unsigned int zone_dma_bits __ro_after_init = 2 24 unsigned int zone_dma_bits __ro_after_init = 24; 24 25 >> 26 static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size) >> 27 { >> 28 if (!dev->dma_mask) { >> 29 dev_err_once(dev, "DMA map on device without dma_mask\n"); >> 30 } else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_limit) { >> 31 dev_err_once(dev, >> 32 "overflow %pad+%zu of DMA mask %llx bus limit %llx\n", >> 33 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); >> 34 } >> 35 WARN_ON_ONCE(1); >> 36 } >> 37 25 static inline dma_addr_t phys_to_dma_direct(st 38 static inline dma_addr_t phys_to_dma_direct(struct device *dev, 26 phys_addr_t phys) 39 phys_addr_t phys) 27 { 40 { 28 if (force_dma_unencrypted(dev)) 41 if (force_dma_unencrypted(dev)) 29 return phys_to_dma_unencrypted !! 42 return __phys_to_dma(dev, phys); 30 return phys_to_dma(dev, phys); 43 return phys_to_dma(dev, phys); 31 } 44 } 32 45 33 static inline struct page *dma_direct_to_page( 46 static inline struct page *dma_direct_to_page(struct device *dev, 34 dma_addr_t dma_addr) 47 dma_addr_t dma_addr) 35 { 48 { 36 return pfn_to_page(PHYS_PFN(dma_to_phy 49 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr))); 37 } 50 } 38 51 39 u64 dma_direct_get_required_mask(struct device 52 u64 dma_direct_get_required_mask(struct device *dev) 40 { 53 { 41 phys_addr_t phys = (phys_addr_t)(max_p !! 54 u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); 42 u64 max_dma = phys_to_dma_direct(dev, << 43 55 44 return (1ULL << (fls64(max_dma) - 1)) 56 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; 45 } 57 } 46 58 47 static gfp_t dma_direct_optimal_gfp_mask(struc !! 59 static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, >> 60 u64 *phys_limit) 48 { 61 { 49 u64 dma_limit = min_not_zero( !! 62 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); 50 dev->coherent_dma_mask, !! 63 51 dev->bus_dma_limit); !! 64 if (force_dma_unencrypted(dev)) >> 65 *phys_limit = __dma_to_phys(dev, dma_limit); >> 66 else >> 67 *phys_limit = dma_to_phys(dev, dma_limit); 52 68 53 /* 69 /* 54 * Optimistically try the zone that th 70 * Optimistically try the zone that the physical address mask falls 55 * into first. If that returns memory 71 * into first. If that returns memory that isn't actually addressable 56 * we will fallback to the next lower 72 * we will fallback to the next lower zone and try again. 57 * 73 * 58 * Note that GFP_DMA32 and GFP_DMA are 74 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding 59 * zones. 75 * zones. 60 */ 76 */ 61 *phys_limit = dma_to_phys(dev, dma_lim << 62 if (*phys_limit <= DMA_BIT_MASK(zone_d 77 if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits)) 63 return GFP_DMA; 78 return GFP_DMA; 64 if (*phys_limit <= DMA_BIT_MASK(32)) 79 if (*phys_limit <= DMA_BIT_MASK(32)) 65 return GFP_DMA32; 80 return GFP_DMA32; 66 return 0; 81 return 0; 67 } 82 } 68 83 69 bool dma_coherent_ok(struct device *dev, phys_ !! 84 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) 70 { << 71 dma_addr_t dma_addr = phys_to_dma_dire << 72 << 73 if (dma_addr == DMA_MAPPING_ERROR) << 74 return false; << 75 return dma_addr + size - 1 <= << 76 min_not_zero(dev->coherent_dma << 77 } << 78 << 79 static int dma_set_decrypted(struct device *de << 80 { << 81 if (!force_dma_unencrypted(dev)) << 82 return 0; << 83 return set_memory_decrypted((unsigned << 84 } << 85 << 86 static int dma_set_encrypted(struct device *de << 87 { 85 { 88 int ret; !! 86 return phys_to_dma_direct(dev, phys) + size - 1 <= 89 !! 87 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); 90 if (!force_dma_unencrypted(dev)) << 91 return 0; << 92 ret = set_memory_encrypted((unsigned l << 93 if (ret) << 94 pr_warn_ratelimited("leaking D << 95 return ret; << 96 } << 97 << 98 static void __dma_direct_free_pages(struct dev << 99 size_t siz << 100 { << 101 if (swiotlb_free(dev, page, size)) << 102 return; << 103 dma_free_contiguous(dev, page, size); << 104 } << 105 << 106 static struct page *dma_direct_alloc_swiotlb(s << 107 { << 108 struct page *page = swiotlb_alloc(dev, << 109 << 110 if (page && !dma_coherent_ok(dev, page << 111 swiotlb_free(dev, page, size); << 112 return NULL; << 113 } << 114 << 115 return page; << 116 } 88 } 117 89 118 static struct page *__dma_direct_alloc_pages(s !! 90 struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, 119 gfp_t gfp, bool allow_highmem) !! 91 gfp_t gfp, unsigned long attrs) 120 { 92 { >> 93 size_t alloc_size = PAGE_ALIGN(size); 121 int node = dev_to_node(dev); 94 int node = dev_to_node(dev); 122 struct page *page = NULL; 95 struct page *page = NULL; 123 u64 phys_limit; 96 u64 phys_limit; 124 97 125 WARN_ON_ONCE(!PAGE_ALIGNED(size)); !! 98 if (attrs & DMA_ATTR_NO_WARN) 126 !! 99 gfp |= __GFP_NOWARN; 127 if (is_swiotlb_for_alloc(dev)) << 128 return dma_direct_alloc_swiotl << 129 100 130 gfp |= dma_direct_optimal_gfp_mask(dev !! 101 /* we always manually zero the memory once we are done: */ 131 page = dma_alloc_contiguous(dev, size, !! 102 gfp &= ~__GFP_ZERO; 132 if (page) { !! 103 gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, 133 if (!dma_coherent_ok(dev, page !! 104 &phys_limit); 134 (!allow_highmem && PageHig !! 105 page = dma_alloc_contiguous(dev, alloc_size, gfp); 135 dma_free_contiguous(de !! 106 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 136 page = NULL; !! 107 dma_free_contiguous(dev, page, alloc_size); 137 } !! 108 page = NULL; 138 } 109 } 139 again: 110 again: 140 if (!page) 111 if (!page) 141 page = alloc_pages_node(node, !! 112 page = alloc_pages_node(node, gfp, get_order(alloc_size)); 142 if (page && !dma_coherent_ok(dev, page 113 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 143 dma_free_contiguous(dev, page, 114 dma_free_contiguous(dev, page, size); 144 page = NULL; 115 page = NULL; 145 116 146 if (IS_ENABLED(CONFIG_ZONE_DMA 117 if (IS_ENABLED(CONFIG_ZONE_DMA32) && 147 phys_limit < DMA_BIT_MASK( 118 phys_limit < DMA_BIT_MASK(64) && 148 !(gfp & (GFP_DMA32 | GFP_D 119 !(gfp & (GFP_DMA32 | GFP_DMA))) { 149 gfp |= GFP_DMA32; 120 gfp |= GFP_DMA32; 150 goto again; 121 goto again; 151 } 122 } 152 123 153 if (IS_ENABLED(CONFIG_ZONE_DMA 124 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { 154 gfp = (gfp & ~GFP_DMA3 125 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 155 goto again; 126 goto again; 156 } 127 } 157 } 128 } 158 129 159 return page; 130 return page; 160 } 131 } 161 132 162 /* !! 133 void *dma_direct_alloc_pages(struct device *dev, size_t size, 163 * Check if a potentially blocking operations << 164 * pools for the given device/gfp. << 165 */ << 166 static bool dma_direct_use_pool(struct device << 167 { << 168 return !gfpflags_allow_blocking(gfp) & << 169 } << 170 << 171 static void *dma_direct_alloc_from_pool(struct << 172 dma_addr_t *dma_handle, gfp_t << 173 { << 174 struct page *page; << 175 u64 phys_limit; << 176 void *ret; << 177 << 178 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DM << 179 return NULL; << 180 << 181 gfp |= dma_direct_optimal_gfp_mask(dev << 182 page = dma_alloc_from_pool(dev, size, << 183 if (!page) << 184 return NULL; << 185 *dma_handle = phys_to_dma_direct(dev, << 186 return ret; << 187 } << 188 << 189 static void *dma_direct_alloc_no_mapping(struc << 190 dma_addr_t *dma_handle, gfp_t << 191 { << 192 struct page *page; << 193 << 194 page = __dma_direct_alloc_pages(dev, s << 195 if (!page) << 196 return NULL; << 197 << 198 /* remove any dirty cache lines on the << 199 if (!PageHighMem(page)) << 200 arch_dma_prep_coherent(page, s << 201 << 202 /* return the page pointer as the opaq << 203 *dma_handle = phys_to_dma_direct(dev, << 204 return page; << 205 } << 206 << 207 void *dma_direct_alloc(struct device *dev, siz << 208 dma_addr_t *dma_handle, gfp_t 134 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 209 { 135 { 210 bool remap = false, set_uncached = fal << 211 struct page *page; 136 struct page *page; 212 void *ret; 137 void *ret; 213 138 214 size = PAGE_ALIGN(size); !! 139 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 215 if (attrs & DMA_ATTR_NO_WARN) !! 140 dma_alloc_need_uncached(dev, attrs) && 216 gfp |= __GFP_NOWARN; !! 141 !gfpflags_allow_blocking(gfp)) { 217 !! 142 ret = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp); 218 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPIN !! 143 if (!ret) 219 !force_dma_unencrypted(dev) && !is << 220 return dma_direct_alloc_no_map << 221 << 222 if (!dev_is_dma_coherent(dev)) { << 223 if (IS_ENABLED(CONFIG_ARCH_HAS << 224 !is_swiotlb_for_alloc(dev) << 225 return arch_dma_alloc( << 226 << 227 << 228 /* << 229 * If there is a global pool, << 230 * non-coherent devices. << 231 */ << 232 if (IS_ENABLED(CONFIG_DMA_GLOB << 233 return dma_alloc_from_ << 234 dma_ha << 235 << 236 /* << 237 * Otherwise we require the ar << 238 * mark arbitrary parts of the << 239 * or remapped it uncached. << 240 */ << 241 set_uncached = IS_ENABLED(CONF << 242 remap = IS_ENABLED(CONFIG_DMA_ << 243 if (!set_uncached && !remap) { << 244 pr_warn_once("coherent << 245 return NULL; 144 return NULL; 246 } !! 145 goto done; 247 } 146 } 248 147 249 /* !! 148 page = __dma_direct_alloc_pages(dev, size, gfp, attrs); 250 * Remapping or decrypting memory may << 251 * the atomic pools instead if we aren << 252 */ << 253 if ((remap || force_dma_unencrypted(de << 254 dma_direct_use_pool(dev, gfp)) << 255 return dma_direct_alloc_from_p << 256 << 257 /* we always manually zero the memory << 258 page = __dma_direct_alloc_pages(dev, s << 259 if (!page) 149 if (!page) 260 return NULL; 150 return NULL; 261 151 262 /* !! 152 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && 263 * dma_alloc_contiguous can return hig !! 153 !force_dma_unencrypted(dev)) { 264 * combination the cma= arguments and !! 154 /* remove any dirty cache lines on the kernel alias */ 265 * remapped to return a kernel virtual !! 155 if (!PageHighMem(page)) 266 */ !! 156 arch_dma_prep_coherent(page, size); 267 if (PageHighMem(page)) { !! 157 /* return the page pointer as the opaque cookie */ 268 remap = true; !! 158 ret = page; 269 set_uncached = false; !! 159 goto done; 270 } 160 } 271 161 272 if (remap) { !! 162 if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 273 pgprot_t prot = dma_pgprot(dev !! 163 dma_alloc_need_uncached(dev, attrs)) || 274 !! 164 (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) { 275 if (force_dma_unencrypted(dev) << 276 prot = pgprot_decrypte << 277 << 278 /* remove any dirty cache line 165 /* remove any dirty cache lines on the kernel alias */ 279 arch_dma_prep_coherent(page, s !! 166 arch_dma_prep_coherent(page, PAGE_ALIGN(size)); 280 167 281 /* create a coherent mapping * 168 /* create a coherent mapping */ 282 ret = dma_common_contiguous_re !! 169 ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size), >> 170 dma_pgprot(dev, PAGE_KERNEL, attrs), 283 __builtin_retu 171 __builtin_return_address(0)); 284 if (!ret) !! 172 if (!ret) { 285 goto out_free_pages; !! 173 dma_free_contiguous(dev, page, size); 286 } else { !! 174 return ret; 287 ret = page_address(page); !! 175 } 288 if (dma_set_decrypted(dev, ret !! 176 289 goto out_leak_pages; !! 177 memset(ret, 0, size); >> 178 goto done; 290 } 179 } 291 180 >> 181 if (PageHighMem(page)) { >> 182 /* >> 183 * Depending on the cma= arguments and per-arch setup >> 184 * dma_alloc_contiguous could return highmem pages. >> 185 * Without remapping there is no way to return them here, >> 186 * so log an error and fail. >> 187 */ >> 188 dev_info(dev, "Rejecting highmem page from CMA.\n"); >> 189 dma_free_contiguous(dev, page, size); >> 190 return NULL; >> 191 } >> 192 >> 193 ret = page_address(page); >> 194 if (force_dma_unencrypted(dev)) >> 195 set_memory_decrypted((unsigned long)ret, 1 << get_order(size)); >> 196 292 memset(ret, 0, size); 197 memset(ret, 0, size); 293 198 294 if (set_uncached) { !! 199 if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && >> 200 dma_alloc_need_uncached(dev, attrs)) { 295 arch_dma_prep_coherent(page, s 201 arch_dma_prep_coherent(page, size); 296 ret = arch_dma_set_uncached(re !! 202 ret = uncached_kernel_address(ret); 297 if (IS_ERR(ret)) << 298 goto out_encrypt_pages << 299 } 203 } 300 !! 204 done: 301 *dma_handle = phys_to_dma_direct(dev, !! 205 if (force_dma_unencrypted(dev)) >> 206 *dma_handle = __phys_to_dma(dev, page_to_phys(page)); >> 207 else >> 208 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 302 return ret; 209 return ret; 303 << 304 out_encrypt_pages: << 305 if (dma_set_encrypted(dev, page_addres << 306 return NULL; << 307 out_free_pages: << 308 __dma_direct_free_pages(dev, page, siz << 309 return NULL; << 310 out_leak_pages: << 311 return NULL; << 312 } 210 } 313 211 314 void dma_direct_free(struct device *dev, size_ !! 212 void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, 315 void *cpu_addr, dma_addr_t dma !! 213 dma_addr_t dma_addr, unsigned long attrs) 316 { 214 { 317 unsigned int page_order = get_order(si 215 unsigned int page_order = get_order(size); 318 216 319 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPIN 217 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && 320 !force_dma_unencrypted(dev) && !is !! 218 !force_dma_unencrypted(dev)) { 321 /* cpu_addr is a struct page c 219 /* cpu_addr is a struct page cookie, not a kernel address */ 322 dma_free_contiguous(dev, cpu_a 220 dma_free_contiguous(dev, cpu_addr, size); 323 return; 221 return; 324 } 222 } 325 223 326 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALL !! 224 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 327 !dev_is_dma_coherent(dev) && !! 225 dma_free_from_pool(cpu_addr, PAGE_ALIGN(size))) 328 !is_swiotlb_for_alloc(dev)) { << 329 arch_dma_free(dev, size, cpu_a << 330 return; << 331 } << 332 << 333 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) << 334 !dev_is_dma_coherent(dev)) { << 335 if (!dma_release_from_global_c << 336 WARN_ON_ONCE(1); << 337 return; 226 return; 338 } << 339 227 340 /* If cpu_addr is not from an atomic p !! 228 if (force_dma_unencrypted(dev)) 341 if (IS_ENABLED(CONFIG_DMA_COHERENT_POO !! 229 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); 342 dma_free_from_pool(dev, cpu_addr, << 343 return; << 344 230 345 if (is_vmalloc_addr(cpu_addr)) { !! 231 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) 346 vunmap(cpu_addr); 232 vunmap(cpu_addr); 347 } else { << 348 if (IS_ENABLED(CONFIG_ARCH_HAS << 349 arch_dma_clear_uncache << 350 if (dma_set_encrypted(dev, cpu << 351 return; << 352 } << 353 233 354 __dma_direct_free_pages(dev, dma_direc !! 234 dma_free_contiguous(dev, dma_direct_to_page(dev, dma_addr), size); 355 } 235 } 356 236 357 struct page *dma_direct_alloc_pages(struct dev !! 237 void *dma_direct_alloc(struct device *dev, size_t size, 358 dma_addr_t *dma_handle, enum d !! 238 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 359 { 239 { 360 struct page *page; !! 240 if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && 361 void *ret; !! 241 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 362 !! 242 dma_alloc_need_uncached(dev, attrs)) 363 if (force_dma_unencrypted(dev) && dma_ !! 243 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); 364 return dma_direct_alloc_from_p !! 244 return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); 365 !! 245 } 366 page = __dma_direct_alloc_pages(dev, s << 367 if (!page) << 368 return NULL; << 369 246 370 ret = page_address(page); !! 247 void dma_direct_free(struct device *dev, size_t size, 371 if (dma_set_decrypted(dev, ret, size)) !! 248 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) 372 goto out_leak_pages; !! 249 { 373 memset(ret, 0, size); !! 250 if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && 374 *dma_handle = phys_to_dma_direct(dev, !! 251 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 375 return page; !! 252 dma_alloc_need_uncached(dev, attrs)) 376 out_leak_pages: !! 253 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); 377 return NULL; !! 254 else >> 255 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); 378 } 256 } 379 257 380 void dma_direct_free_pages(struct device *dev, !! 258 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ 381 struct page *page, dma_addr_t !! 259 defined(CONFIG_SWIOTLB) 382 enum dma_data_direction dir) !! 260 void dma_direct_sync_single_for_device(struct device *dev, >> 261 dma_addr_t addr, size_t size, enum dma_data_direction dir) 383 { 262 { 384 void *vaddr = page_address(page); !! 263 phys_addr_t paddr = dma_to_phys(dev, addr); 385 264 386 /* If cpu_addr is not from an atomic p !! 265 if (unlikely(is_swiotlb_buffer(paddr))) 387 if (IS_ENABLED(CONFIG_DMA_COHERENT_POO !! 266 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); 388 dma_free_from_pool(dev, vaddr, siz << 389 return; << 390 267 391 if (dma_set_encrypted(dev, vaddr, size !! 268 if (!dev_is_dma_coherent(dev)) 392 return; !! 269 arch_sync_dma_for_device(paddr, size, dir); 393 __dma_direct_free_pages(dev, page, siz << 394 } 270 } >> 271 EXPORT_SYMBOL(dma_direct_sync_single_for_device); 395 272 396 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVIC << 397 defined(CONFIG_SWIOTLB) << 398 void dma_direct_sync_sg_for_device(struct devi 273 void dma_direct_sync_sg_for_device(struct device *dev, 399 struct scatterlist *sgl, int n 274 struct scatterlist *sgl, int nents, enum dma_data_direction dir) 400 { 275 { 401 struct scatterlist *sg; 276 struct scatterlist *sg; 402 int i; 277 int i; 403 278 404 for_each_sg(sgl, sg, nents, i) { 279 for_each_sg(sgl, sg, nents, i) { 405 phys_addr_t paddr = dma_to_phy 280 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); 406 281 407 swiotlb_sync_single_for_device !! 282 if (unlikely(is_swiotlb_buffer(paddr))) >> 283 swiotlb_tbl_sync_single(dev, paddr, sg->length, >> 284 dir, SYNC_FOR_DEVICE); 408 285 409 if (!dev_is_dma_coherent(dev)) 286 if (!dev_is_dma_coherent(dev)) 410 arch_sync_dma_for_devi 287 arch_sync_dma_for_device(paddr, sg->length, 411 dir); 288 dir); 412 } 289 } 413 } 290 } >> 291 EXPORT_SYMBOL(dma_direct_sync_sg_for_device); 414 #endif 292 #endif 415 293 416 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) 294 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 417 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_A 295 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ 418 defined(CONFIG_SWIOTLB) 296 defined(CONFIG_SWIOTLB) >> 297 void dma_direct_sync_single_for_cpu(struct device *dev, >> 298 dma_addr_t addr, size_t size, enum dma_data_direction dir) >> 299 { >> 300 phys_addr_t paddr = dma_to_phys(dev, addr); >> 301 >> 302 if (!dev_is_dma_coherent(dev)) { >> 303 arch_sync_dma_for_cpu(paddr, size, dir); >> 304 arch_sync_dma_for_cpu_all(); >> 305 } >> 306 >> 307 if (unlikely(is_swiotlb_buffer(paddr))) >> 308 swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); >> 309 } >> 310 EXPORT_SYMBOL(dma_direct_sync_single_for_cpu); >> 311 419 void dma_direct_sync_sg_for_cpu(struct device 312 void dma_direct_sync_sg_for_cpu(struct device *dev, 420 struct scatterlist *sgl, int n 313 struct scatterlist *sgl, int nents, enum dma_data_direction dir) 421 { 314 { 422 struct scatterlist *sg; 315 struct scatterlist *sg; 423 int i; 316 int i; 424 317 425 for_each_sg(sgl, sg, nents, i) { 318 for_each_sg(sgl, sg, nents, i) { 426 phys_addr_t paddr = dma_to_phy 319 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); 427 320 428 if (!dev_is_dma_coherent(dev)) 321 if (!dev_is_dma_coherent(dev)) 429 arch_sync_dma_for_cpu( 322 arch_sync_dma_for_cpu(paddr, sg->length, dir); 430 323 431 swiotlb_sync_single_for_cpu(de !! 324 if (unlikely(is_swiotlb_buffer(paddr))) 432 !! 325 swiotlb_tbl_sync_single(dev, paddr, sg->length, dir, 433 if (dir == DMA_FROM_DEVICE) !! 326 SYNC_FOR_CPU); 434 arch_dma_mark_clean(pa << 435 } 327 } 436 328 437 if (!dev_is_dma_coherent(dev)) 329 if (!dev_is_dma_coherent(dev)) 438 arch_sync_dma_for_cpu_all(); 330 arch_sync_dma_for_cpu_all(); 439 } 331 } >> 332 EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu); >> 333 >> 334 void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, >> 335 size_t size, enum dma_data_direction dir, unsigned long attrs) >> 336 { >> 337 phys_addr_t phys = dma_to_phys(dev, addr); >> 338 >> 339 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) >> 340 dma_direct_sync_single_for_cpu(dev, addr, size, dir); >> 341 >> 342 if (unlikely(is_swiotlb_buffer(phys))) >> 343 swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs); >> 344 } >> 345 EXPORT_SYMBOL(dma_direct_unmap_page); 440 346 441 /* << 442 * Unmaps segments, except for ones marked as << 443 * require any further action as they contain << 444 */ << 445 void dma_direct_unmap_sg(struct device *dev, s 347 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, 446 int nents, enum dma_data_direc 348 int nents, enum dma_data_direction dir, unsigned long attrs) 447 { 349 { 448 struct scatterlist *sg; 350 struct scatterlist *sg; 449 int i; 351 int i; 450 352 451 for_each_sg(sgl, sg, nents, i) { !! 353 for_each_sg(sgl, sg, nents, i) 452 if (sg_dma_is_bus_address(sg)) !! 354 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, 453 sg_dma_unmark_bus_addr !! 355 attrs); 454 else << 455 dma_direct_unmap_page( << 456 << 457 } << 458 } 356 } >> 357 EXPORT_SYMBOL(dma_direct_unmap_sg); 459 #endif 358 #endif 460 359 >> 360 static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr, >> 361 size_t size) >> 362 { >> 363 return swiotlb_force != SWIOTLB_FORCE && >> 364 dma_capable(dev, dma_addr, size, true); >> 365 } >> 366 >> 367 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, >> 368 unsigned long offset, size_t size, enum dma_data_direction dir, >> 369 unsigned long attrs) >> 370 { >> 371 phys_addr_t phys = page_to_phys(page) + offset; >> 372 dma_addr_t dma_addr = phys_to_dma(dev, phys); >> 373 >> 374 if (unlikely(!dma_direct_possible(dev, dma_addr, size)) && >> 375 !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) { >> 376 report_addr(dev, dma_addr, size); >> 377 return DMA_MAPPING_ERROR; >> 378 } >> 379 >> 380 if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) >> 381 arch_sync_dma_for_device(phys, size, dir); >> 382 return dma_addr; >> 383 } >> 384 EXPORT_SYMBOL(dma_direct_map_page); >> 385 461 int dma_direct_map_sg(struct device *dev, stru 386 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, 462 enum dma_data_direction dir, u 387 enum dma_data_direction dir, unsigned long attrs) 463 { 388 { 464 struct pci_p2pdma_map_state p2pdma_sta !! 389 int i; 465 enum pci_p2pdma_map_type map; << 466 struct scatterlist *sg; 390 struct scatterlist *sg; 467 int i, ret; << 468 391 469 for_each_sg(sgl, sg, nents, i) { 392 for_each_sg(sgl, sg, nents, i) { 470 if (is_pci_p2pdma_page(sg_page << 471 map = pci_p2pdma_map_s << 472 switch (map) { << 473 case PCI_P2PDMA_MAP_BU << 474 continue; << 475 case PCI_P2PDMA_MAP_TH << 476 /* << 477 * Any P2P map << 478 * host bridge << 479 * address and << 480 * done with d << 481 */ << 482 break; << 483 default: << 484 ret = -EREMOTE << 485 goto out_unmap << 486 } << 487 } << 488 << 489 sg->dma_address = dma_direct_m 393 sg->dma_address = dma_direct_map_page(dev, sg_page(sg), 490 sg->offset, sg 394 sg->offset, sg->length, dir, attrs); 491 if (sg->dma_address == DMA_MAP !! 395 if (sg->dma_address == DMA_MAPPING_ERROR) 492 ret = -EIO; << 493 goto out_unmap; 396 goto out_unmap; 494 } << 495 sg_dma_len(sg) = sg->length; 397 sg_dma_len(sg) = sg->length; 496 } 398 } 497 399 498 return nents; 400 return nents; 499 401 500 out_unmap: 402 out_unmap: 501 dma_direct_unmap_sg(dev, sgl, i, dir, 403 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 502 return ret; !! 404 return 0; 503 } 405 } >> 406 EXPORT_SYMBOL(dma_direct_map_sg); 504 407 505 dma_addr_t dma_direct_map_resource(struct devi 408 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, 506 size_t size, enum dma_data_dir 409 size_t size, enum dma_data_direction dir, unsigned long attrs) 507 { 410 { 508 dma_addr_t dma_addr = paddr; 411 dma_addr_t dma_addr = paddr; 509 412 510 if (unlikely(!dma_capable(dev, dma_add 413 if (unlikely(!dma_capable(dev, dma_addr, size, false))) { 511 dev_err_once(dev, !! 414 report_addr(dev, dma_addr, size); 512 "DMA addr %pad+%z << 513 &dma_addr, size, << 514 WARN_ON_ONCE(1); << 515 return DMA_MAPPING_ERROR; 415 return DMA_MAPPING_ERROR; 516 } 416 } 517 417 518 return dma_addr; 418 return dma_addr; 519 } 419 } >> 420 EXPORT_SYMBOL(dma_direct_map_resource); 520 421 521 int dma_direct_get_sgtable(struct device *dev, 422 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, 522 void *cpu_addr, dma_addr_t dma 423 void *cpu_addr, dma_addr_t dma_addr, size_t size, 523 unsigned long attrs) 424 unsigned long attrs) 524 { 425 { 525 struct page *page = dma_direct_to_page 426 struct page *page = dma_direct_to_page(dev, dma_addr); 526 int ret; 427 int ret; 527 428 528 ret = sg_alloc_table(sgt, 1, GFP_KERNE 429 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 529 if (!ret) 430 if (!ret) 530 sg_set_page(sgt->sgl, page, PA 431 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 531 return ret; 432 return ret; 532 } 433 } 533 434 >> 435 #ifdef CONFIG_MMU 534 bool dma_direct_can_mmap(struct device *dev) 436 bool dma_direct_can_mmap(struct device *dev) 535 { 437 { 536 return dev_is_dma_coherent(dev) || 438 return dev_is_dma_coherent(dev) || 537 IS_ENABLED(CONFIG_DMA_NONCOHER 439 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP); 538 } 440 } 539 441 540 int dma_direct_mmap(struct device *dev, struct 442 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, 541 void *cpu_addr, dma_addr_t dma 443 void *cpu_addr, dma_addr_t dma_addr, size_t size, 542 unsigned long attrs) 444 unsigned long attrs) 543 { 445 { 544 unsigned long user_count = vma_pages(v 446 unsigned long user_count = vma_pages(vma); 545 unsigned long count = PAGE_ALIGN(size) 447 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 546 unsigned long pfn = PHYS_PFN(dma_to_ph 448 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr)); 547 int ret = -ENXIO; 449 int ret = -ENXIO; 548 450 549 vma->vm_page_prot = dma_pgprot(dev, vm 451 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 550 if (force_dma_unencrypted(dev)) << 551 vma->vm_page_prot = pgprot_dec << 552 452 553 if (dma_mmap_from_dev_coherent(dev, vm 453 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 554 return ret; 454 return ret; 555 if (dma_mmap_from_global_coherent(vma, << 556 return ret; << 557 455 558 if (vma->vm_pgoff >= count || user_cou 456 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) 559 return -ENXIO; 457 return -ENXIO; 560 return remap_pfn_range(vma, vma->vm_st 458 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, 561 user_count << PAGE_SHI 459 user_count << PAGE_SHIFT, vma->vm_page_prot); 562 } 460 } >> 461 #else /* CONFIG_MMU */ >> 462 bool dma_direct_can_mmap(struct device *dev) >> 463 { >> 464 return false; >> 465 } >> 466 >> 467 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, >> 468 void *cpu_addr, dma_addr_t dma_addr, size_t size, >> 469 unsigned long attrs) >> 470 { >> 471 return -ENXIO; >> 472 } >> 473 #endif /* CONFIG_MMU */ 563 474 564 int dma_direct_supported(struct device *dev, u 475 int dma_direct_supported(struct device *dev, u64 mask) 565 { 476 { 566 u64 min_mask = (max_pfn - 1) << PAGE_S 477 u64 min_mask = (max_pfn - 1) << PAGE_SHIFT; 567 478 568 /* 479 /* 569 * Because 32-bit DMA masks are so com 480 * Because 32-bit DMA masks are so common we expect every architecture 570 * to be able to satisfy them - either 481 * to be able to satisfy them - either by not supporting more physical 571 * memory, or by providing a ZONE_DMA3 482 * memory, or by providing a ZONE_DMA32. If neither is the case, the 572 * architecture needs to use an IOMMU 483 * architecture needs to use an IOMMU instead of the direct mapping. 573 */ 484 */ 574 if (mask >= DMA_BIT_MASK(32)) 485 if (mask >= DMA_BIT_MASK(32)) 575 return 1; 486 return 1; 576 487 577 /* 488 /* 578 * This check needs to be against the !! 489 * This check needs to be against the actual bit mask value, so 579 * phys_to_dma_unencrypted() here so t !! 490 * use __phys_to_dma() here so that the SME encryption mask isn't 580 * part of the check. 491 * part of the check. 581 */ 492 */ 582 if (IS_ENABLED(CONFIG_ZONE_DMA)) 493 if (IS_ENABLED(CONFIG_ZONE_DMA)) 583 min_mask = min_t(u64, min_mask 494 min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits)); 584 return mask >= phys_to_dma_unencrypted !! 495 return mask >= __phys_to_dma(dev, min_mask); 585 } << 586 << 587 /* << 588 * To check whether all ram resource ranges ar << 589 * Returns 0 when further check is needed << 590 * Returns 1 if there is some RAM range can't << 591 */ << 592 static int check_ram_in_range_map(unsigned lon << 593 unsigned lon << 594 { << 595 unsigned long end_pfn = start_pfn + nr << 596 const struct bus_dma_region *bdr = NUL << 597 const struct bus_dma_region *m; << 598 struct device *dev = data; << 599 << 600 while (start_pfn < end_pfn) { << 601 for (m = dev->dma_range_map; P << 602 unsigned long cpu_star << 603 << 604 if (start_pfn >= cpu_s << 605 start_pfn - cpu_st << 606 bdr = m; << 607 break; << 608 } << 609 } << 610 if (!bdr) << 611 return 1; << 612 << 613 start_pfn = PFN_DOWN(bdr->cpu_ << 614 } << 615 << 616 return 0; << 617 } << 618 << 619 bool dma_direct_all_ram_mapped(struct device * << 620 { << 621 if (!dev->dma_range_map) << 622 return true; << 623 return !walk_system_ram_range(0, PFN_D << 624 check_ra << 625 } 496 } 626 497 627 size_t dma_direct_max_mapping_size(struct devi 498 size_t dma_direct_max_mapping_size(struct device *dev) 628 { 499 { 629 /* If SWIOTLB is active, use its maxim 500 /* If SWIOTLB is active, use its maximum mapping size */ 630 if (is_swiotlb_active(dev) && !! 501 if (is_swiotlb_active() && 631 (dma_addressing_limited(dev) || is !! 502 (dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE)) 632 return swiotlb_max_mapping_siz 503 return swiotlb_max_mapping_size(dev); 633 return SIZE_MAX; 504 return SIZE_MAX; 634 } << 635 << 636 bool dma_direct_need_sync(struct device *dev, << 637 { << 638 return !dev_is_dma_coherent(dev) || << 639 swiotlb_find_pool(dev, dma_to_p << 640 } << 641 << 642 /** << 643 * dma_direct_set_offset - Assign scalar offse << 644 * @dev: device pointer; needed to "own << 645 * @cpu_start: beginning of memory region cov << 646 * @dma_start: beginning of DMA/PCI region co << 647 * @size: size of the region. << 648 * << 649 * This is for the simple case of a uniform of << 650 * be discovered by "dma-ranges". << 651 * << 652 * It returns -ENOMEM if out of memory, -EINVA << 653 * already exists, 0 otherwise. << 654 * << 655 * Note: any call to this from a driver is a b << 656 * to be described by the device tree or other << 657 */ << 658 int dma_direct_set_offset(struct device *dev, << 659 dma_addr_t dma_start, << 660 { << 661 struct bus_dma_region *map; << 662 u64 offset = (u64)cpu_start - (u64)dma << 663 << 664 if (dev->dma_range_map) { << 665 dev_err(dev, "attempt to add D << 666 return -EINVAL; << 667 } << 668 << 669 if (!offset) << 670 return 0; << 671 << 672 map = kcalloc(2, sizeof(*map), GFP_KER << 673 if (!map) << 674 return -ENOMEM; << 675 map[0].cpu_start = cpu_start; << 676 map[0].dma_start = dma_start; << 677 map[0].size = size; << 678 dev->dma_range_map = map; << 679 return 0; << 680 } 505 } 681 506
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.