1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * Copyright (C) 2018-2020 Christoph Hellwig. 3 * Copyright (C) 2018-2020 Christoph Hellwig. 4 * 4 * 5 * DMA operations that map physical memory dir 5 * DMA operations that map physical memory directly without using an IOMMU. 6 */ 6 */ 7 #include <linux/memblock.h> /* for max_pfn */ 7 #include <linux/memblock.h> /* for max_pfn */ 8 #include <linux/export.h> 8 #include <linux/export.h> 9 #include <linux/mm.h> 9 #include <linux/mm.h> 10 #include <linux/dma-map-ops.h> 10 #include <linux/dma-map-ops.h> 11 #include <linux/scatterlist.h> 11 #include <linux/scatterlist.h> 12 #include <linux/pfn.h> 12 #include <linux/pfn.h> 13 #include <linux/vmalloc.h> 13 #include <linux/vmalloc.h> 14 #include <linux/set_memory.h> 14 #include <linux/set_memory.h> 15 #include <linux/slab.h> 15 #include <linux/slab.h> 16 #include "direct.h" 16 #include "direct.h" 17 17 18 /* 18 /* 19 * Most architectures use ZONE_DMA for the fir 19 * Most architectures use ZONE_DMA for the first 16 Megabytes, but some use 20 * it for entirely different regions. In that 20 * it for entirely different regions. In that case the arch code needs to 21 * override the variable below for dma-direct 21 * override the variable below for dma-direct to work properly. 22 */ 22 */ 23 unsigned int zone_dma_bits __ro_after_init = 2 23 unsigned int zone_dma_bits __ro_after_init = 24; 24 24 25 static inline dma_addr_t phys_to_dma_direct(st 25 static inline dma_addr_t phys_to_dma_direct(struct device *dev, 26 phys_addr_t phys) 26 phys_addr_t phys) 27 { 27 { 28 if (force_dma_unencrypted(dev)) 28 if (force_dma_unencrypted(dev)) 29 return phys_to_dma_unencrypted 29 return phys_to_dma_unencrypted(dev, phys); 30 return phys_to_dma(dev, phys); 30 return phys_to_dma(dev, phys); 31 } 31 } 32 32 33 static inline struct page *dma_direct_to_page( 33 static inline struct page *dma_direct_to_page(struct device *dev, 34 dma_addr_t dma_addr) 34 dma_addr_t dma_addr) 35 { 35 { 36 return pfn_to_page(PHYS_PFN(dma_to_phy 36 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr))); 37 } 37 } 38 38 39 u64 dma_direct_get_required_mask(struct device 39 u64 dma_direct_get_required_mask(struct device *dev) 40 { 40 { 41 phys_addr_t phys = (phys_addr_t)(max_p 41 phys_addr_t phys = (phys_addr_t)(max_pfn - 1) << PAGE_SHIFT; 42 u64 max_dma = phys_to_dma_direct(dev, 42 u64 max_dma = phys_to_dma_direct(dev, phys); 43 43 44 return (1ULL << (fls64(max_dma) - 1)) 44 return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; 45 } 45 } 46 46 47 static gfp_t dma_direct_optimal_gfp_mask(struc !! 47 static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, >> 48 u64 *phys_limit) 48 { 49 { 49 u64 dma_limit = min_not_zero( !! 50 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); 50 dev->coherent_dma_mask, << 51 dev->bus_dma_limit); << 52 51 53 /* 52 /* 54 * Optimistically try the zone that th 53 * Optimistically try the zone that the physical address mask falls 55 * into first. If that returns memory 54 * into first. If that returns memory that isn't actually addressable 56 * we will fallback to the next lower 55 * we will fallback to the next lower zone and try again. 57 * 56 * 58 * Note that GFP_DMA32 and GFP_DMA are 57 * Note that GFP_DMA32 and GFP_DMA are no ops without the corresponding 59 * zones. 58 * zones. 60 */ 59 */ 61 *phys_limit = dma_to_phys(dev, dma_lim 60 *phys_limit = dma_to_phys(dev, dma_limit); 62 if (*phys_limit <= DMA_BIT_MASK(zone_d 61 if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits)) 63 return GFP_DMA; 62 return GFP_DMA; 64 if (*phys_limit <= DMA_BIT_MASK(32)) 63 if (*phys_limit <= DMA_BIT_MASK(32)) 65 return GFP_DMA32; 64 return GFP_DMA32; 66 return 0; 65 return 0; 67 } 66 } 68 67 69 bool dma_coherent_ok(struct device *dev, phys_ !! 68 static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) 70 { 69 { 71 dma_addr_t dma_addr = phys_to_dma_dire 70 dma_addr_t dma_addr = phys_to_dma_direct(dev, phys); 72 71 73 if (dma_addr == DMA_MAPPING_ERROR) 72 if (dma_addr == DMA_MAPPING_ERROR) 74 return false; 73 return false; 75 return dma_addr + size - 1 <= 74 return dma_addr + size - 1 <= 76 min_not_zero(dev->coherent_dma 75 min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); 77 } 76 } 78 77 79 static int dma_set_decrypted(struct device *de 78 static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) 80 { 79 { 81 if (!force_dma_unencrypted(dev)) 80 if (!force_dma_unencrypted(dev)) 82 return 0; 81 return 0; 83 return set_memory_decrypted((unsigned 82 return set_memory_decrypted((unsigned long)vaddr, PFN_UP(size)); 84 } 83 } 85 84 86 static int dma_set_encrypted(struct device *de 85 static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) 87 { 86 { 88 int ret; 87 int ret; 89 88 90 if (!force_dma_unencrypted(dev)) 89 if (!force_dma_unencrypted(dev)) 91 return 0; 90 return 0; 92 ret = set_memory_encrypted((unsigned l 91 ret = set_memory_encrypted((unsigned long)vaddr, PFN_UP(size)); 93 if (ret) 92 if (ret) 94 pr_warn_ratelimited("leaking D 93 pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n"); 95 return ret; 94 return ret; 96 } 95 } 97 96 98 static void __dma_direct_free_pages(struct dev 97 static void __dma_direct_free_pages(struct device *dev, struct page *page, 99 size_t siz 98 size_t size) 100 { 99 { 101 if (swiotlb_free(dev, page, size)) 100 if (swiotlb_free(dev, page, size)) 102 return; 101 return; 103 dma_free_contiguous(dev, page, size); 102 dma_free_contiguous(dev, page, size); 104 } 103 } 105 104 106 static struct page *dma_direct_alloc_swiotlb(s 105 static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) 107 { 106 { 108 struct page *page = swiotlb_alloc(dev, 107 struct page *page = swiotlb_alloc(dev, size); 109 108 110 if (page && !dma_coherent_ok(dev, page 109 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 111 swiotlb_free(dev, page, size); 110 swiotlb_free(dev, page, size); 112 return NULL; 111 return NULL; 113 } 112 } 114 113 115 return page; 114 return page; 116 } 115 } 117 116 118 static struct page *__dma_direct_alloc_pages(s 117 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, 119 gfp_t gfp, bool allow_highmem) 118 gfp_t gfp, bool allow_highmem) 120 { 119 { 121 int node = dev_to_node(dev); 120 int node = dev_to_node(dev); 122 struct page *page = NULL; 121 struct page *page = NULL; 123 u64 phys_limit; 122 u64 phys_limit; 124 123 125 WARN_ON_ONCE(!PAGE_ALIGNED(size)); 124 WARN_ON_ONCE(!PAGE_ALIGNED(size)); 126 125 127 if (is_swiotlb_for_alloc(dev)) 126 if (is_swiotlb_for_alloc(dev)) 128 return dma_direct_alloc_swiotl 127 return dma_direct_alloc_swiotlb(dev, size); 129 128 130 gfp |= dma_direct_optimal_gfp_mask(dev !! 129 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, >> 130 &phys_limit); 131 page = dma_alloc_contiguous(dev, size, 131 page = dma_alloc_contiguous(dev, size, gfp); 132 if (page) { 132 if (page) { 133 if (!dma_coherent_ok(dev, page 133 if (!dma_coherent_ok(dev, page_to_phys(page), size) || 134 (!allow_highmem && PageHig 134 (!allow_highmem && PageHighMem(page))) { 135 dma_free_contiguous(de 135 dma_free_contiguous(dev, page, size); 136 page = NULL; 136 page = NULL; 137 } 137 } 138 } 138 } 139 again: 139 again: 140 if (!page) 140 if (!page) 141 page = alloc_pages_node(node, 141 page = alloc_pages_node(node, gfp, get_order(size)); 142 if (page && !dma_coherent_ok(dev, page 142 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { 143 dma_free_contiguous(dev, page, 143 dma_free_contiguous(dev, page, size); 144 page = NULL; 144 page = NULL; 145 145 146 if (IS_ENABLED(CONFIG_ZONE_DMA 146 if (IS_ENABLED(CONFIG_ZONE_DMA32) && 147 phys_limit < DMA_BIT_MASK( 147 phys_limit < DMA_BIT_MASK(64) && 148 !(gfp & (GFP_DMA32 | GFP_D 148 !(gfp & (GFP_DMA32 | GFP_DMA))) { 149 gfp |= GFP_DMA32; 149 gfp |= GFP_DMA32; 150 goto again; 150 goto again; 151 } 151 } 152 152 153 if (IS_ENABLED(CONFIG_ZONE_DMA 153 if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) { 154 gfp = (gfp & ~GFP_DMA3 154 gfp = (gfp & ~GFP_DMA32) | GFP_DMA; 155 goto again; 155 goto again; 156 } 156 } 157 } 157 } 158 158 159 return page; 159 return page; 160 } 160 } 161 161 162 /* 162 /* 163 * Check if a potentially blocking operations 163 * Check if a potentially blocking operations needs to dip into the atomic 164 * pools for the given device/gfp. 164 * pools for the given device/gfp. 165 */ 165 */ 166 static bool dma_direct_use_pool(struct device 166 static bool dma_direct_use_pool(struct device *dev, gfp_t gfp) 167 { 167 { 168 return !gfpflags_allow_blocking(gfp) & 168 return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev); 169 } 169 } 170 170 171 static void *dma_direct_alloc_from_pool(struct 171 static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, 172 dma_addr_t *dma_handle, gfp_t 172 dma_addr_t *dma_handle, gfp_t gfp) 173 { 173 { 174 struct page *page; 174 struct page *page; 175 u64 phys_limit; !! 175 u64 phys_mask; 176 void *ret; 176 void *ret; 177 177 178 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DM 178 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))) 179 return NULL; 179 return NULL; 180 180 181 gfp |= dma_direct_optimal_gfp_mask(dev !! 181 gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, >> 182 &phys_mask); 182 page = dma_alloc_from_pool(dev, size, 183 page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); 183 if (!page) 184 if (!page) 184 return NULL; 185 return NULL; 185 *dma_handle = phys_to_dma_direct(dev, 186 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); 186 return ret; 187 return ret; 187 } 188 } 188 189 189 static void *dma_direct_alloc_no_mapping(struc 190 static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size, 190 dma_addr_t *dma_handle, gfp_t 191 dma_addr_t *dma_handle, gfp_t gfp) 191 { 192 { 192 struct page *page; 193 struct page *page; 193 194 194 page = __dma_direct_alloc_pages(dev, s 195 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); 195 if (!page) 196 if (!page) 196 return NULL; 197 return NULL; 197 198 198 /* remove any dirty cache lines on the 199 /* remove any dirty cache lines on the kernel alias */ 199 if (!PageHighMem(page)) 200 if (!PageHighMem(page)) 200 arch_dma_prep_coherent(page, s 201 arch_dma_prep_coherent(page, size); 201 202 202 /* return the page pointer as the opaq 203 /* return the page pointer as the opaque cookie */ 203 *dma_handle = phys_to_dma_direct(dev, 204 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); 204 return page; 205 return page; 205 } 206 } 206 207 207 void *dma_direct_alloc(struct device *dev, siz 208 void *dma_direct_alloc(struct device *dev, size_t size, 208 dma_addr_t *dma_handle, gfp_t 209 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 209 { 210 { 210 bool remap = false, set_uncached = fal 211 bool remap = false, set_uncached = false; 211 struct page *page; 212 struct page *page; 212 void *ret; 213 void *ret; 213 214 214 size = PAGE_ALIGN(size); 215 size = PAGE_ALIGN(size); 215 if (attrs & DMA_ATTR_NO_WARN) 216 if (attrs & DMA_ATTR_NO_WARN) 216 gfp |= __GFP_NOWARN; 217 gfp |= __GFP_NOWARN; 217 218 218 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPIN 219 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && 219 !force_dma_unencrypted(dev) && !is 220 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) 220 return dma_direct_alloc_no_map 221 return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp); 221 222 222 if (!dev_is_dma_coherent(dev)) { 223 if (!dev_is_dma_coherent(dev)) { 223 if (IS_ENABLED(CONFIG_ARCH_HAS !! 224 /* >> 225 * Fallback to the arch handler if it exists. This should >> 226 * eventually go away. >> 227 */ >> 228 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && >> 229 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && >> 230 !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && 224 !is_swiotlb_for_alloc(dev) 231 !is_swiotlb_for_alloc(dev)) 225 return arch_dma_alloc( 232 return arch_dma_alloc(dev, size, dma_handle, gfp, 226 233 attrs); 227 234 228 /* 235 /* 229 * If there is a global pool, 236 * If there is a global pool, always allocate from it for 230 * non-coherent devices. 237 * non-coherent devices. 231 */ 238 */ 232 if (IS_ENABLED(CONFIG_DMA_GLOB 239 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL)) 233 return dma_alloc_from_ 240 return dma_alloc_from_global_coherent(dev, size, 234 dma_ha 241 dma_handle); 235 242 236 /* 243 /* 237 * Otherwise we require the ar !! 244 * Otherwise remap if the architecture is asking for it. But 238 * mark arbitrary parts of the !! 245 * given that remapping memory is a blocking operation we'll 239 * or remapped it uncached. !! 246 * instead have to dip into the atomic pools. 240 */ 247 */ 241 set_uncached = IS_ENABLED(CONF << 242 remap = IS_ENABLED(CONFIG_DMA_ 248 remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP); 243 if (!set_uncached && !remap) { !! 249 if (remap) { 244 pr_warn_once("coherent !! 250 if (dma_direct_use_pool(dev, gfp)) 245 return NULL; !! 251 return dma_direct_alloc_from_pool(dev, size, >> 252 dma_handle, gfp); >> 253 } else { >> 254 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED)) >> 255 return NULL; >> 256 set_uncached = true; 246 } 257 } 247 } 258 } 248 259 249 /* 260 /* 250 * Remapping or decrypting memory may !! 261 * Decrypting memory may block, so allocate the memory from the atomic 251 * the atomic pools instead if we aren !! 262 * pools if we can't block. 252 */ 263 */ 253 if ((remap || force_dma_unencrypted(de !! 264 if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp)) 254 dma_direct_use_pool(dev, gfp)) << 255 return dma_direct_alloc_from_p 265 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); 256 266 257 /* we always manually zero the memory 267 /* we always manually zero the memory once we are done */ 258 page = __dma_direct_alloc_pages(dev, s 268 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO, true); 259 if (!page) 269 if (!page) 260 return NULL; 270 return NULL; 261 << 262 /* << 263 * dma_alloc_contiguous can return hig << 264 * combination the cma= arguments and << 265 * remapped to return a kernel virtual << 266 */ << 267 if (PageHighMem(page)) { 271 if (PageHighMem(page)) { >> 272 /* >> 273 * Depending on the cma= arguments and per-arch setup, >> 274 * dma_alloc_contiguous could return highmem pages. >> 275 * Without remapping there is no way to return them here, so >> 276 * log an error and fail. >> 277 */ >> 278 if (!IS_ENABLED(CONFIG_DMA_REMAP)) { >> 279 dev_info(dev, "Rejecting highmem page from CMA.\n"); >> 280 goto out_free_pages; >> 281 } 268 remap = true; 282 remap = true; 269 set_uncached = false; 283 set_uncached = false; 270 } 284 } 271 285 272 if (remap) { 286 if (remap) { 273 pgprot_t prot = dma_pgprot(dev << 274 << 275 if (force_dma_unencrypted(dev) << 276 prot = pgprot_decrypte << 277 << 278 /* remove any dirty cache line 287 /* remove any dirty cache lines on the kernel alias */ 279 arch_dma_prep_coherent(page, s 288 arch_dma_prep_coherent(page, size); 280 289 281 /* create a coherent mapping * 290 /* create a coherent mapping */ 282 ret = dma_common_contiguous_re !! 291 ret = dma_common_contiguous_remap(page, size, >> 292 dma_pgprot(dev, PAGE_KERNEL, attrs), 283 __builtin_retu 293 __builtin_return_address(0)); 284 if (!ret) 294 if (!ret) 285 goto out_free_pages; 295 goto out_free_pages; 286 } else { 296 } else { 287 ret = page_address(page); 297 ret = page_address(page); 288 if (dma_set_decrypted(dev, ret 298 if (dma_set_decrypted(dev, ret, size)) 289 goto out_leak_pages; !! 299 goto out_free_pages; 290 } 300 } 291 301 292 memset(ret, 0, size); 302 memset(ret, 0, size); 293 303 294 if (set_uncached) { 304 if (set_uncached) { 295 arch_dma_prep_coherent(page, s 305 arch_dma_prep_coherent(page, size); 296 ret = arch_dma_set_uncached(re 306 ret = arch_dma_set_uncached(ret, size); 297 if (IS_ERR(ret)) 307 if (IS_ERR(ret)) 298 goto out_encrypt_pages 308 goto out_encrypt_pages; 299 } 309 } 300 310 301 *dma_handle = phys_to_dma_direct(dev, 311 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); 302 return ret; 312 return ret; 303 313 304 out_encrypt_pages: 314 out_encrypt_pages: 305 if (dma_set_encrypted(dev, page_addres 315 if (dma_set_encrypted(dev, page_address(page), size)) 306 return NULL; 316 return NULL; 307 out_free_pages: 317 out_free_pages: 308 __dma_direct_free_pages(dev, page, siz 318 __dma_direct_free_pages(dev, page, size); 309 return NULL; 319 return NULL; 310 out_leak_pages: << 311 return NULL; << 312 } 320 } 313 321 314 void dma_direct_free(struct device *dev, size_ 322 void dma_direct_free(struct device *dev, size_t size, 315 void *cpu_addr, dma_addr_t dma 323 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) 316 { 324 { 317 unsigned int page_order = get_order(si 325 unsigned int page_order = get_order(size); 318 326 319 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPIN 327 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && 320 !force_dma_unencrypted(dev) && !is 328 !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { 321 /* cpu_addr is a struct page c 329 /* cpu_addr is a struct page cookie, not a kernel address */ 322 dma_free_contiguous(dev, cpu_a 330 dma_free_contiguous(dev, cpu_addr, size); 323 return; 331 return; 324 } 332 } 325 333 326 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_ALL !! 334 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && >> 335 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && >> 336 !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && 327 !dev_is_dma_coherent(dev) && 337 !dev_is_dma_coherent(dev) && 328 !is_swiotlb_for_alloc(dev)) { 338 !is_swiotlb_for_alloc(dev)) { 329 arch_dma_free(dev, size, cpu_a 339 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); 330 return; 340 return; 331 } 341 } 332 342 333 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) 343 if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && 334 !dev_is_dma_coherent(dev)) { 344 !dev_is_dma_coherent(dev)) { 335 if (!dma_release_from_global_c 345 if (!dma_release_from_global_coherent(page_order, cpu_addr)) 336 WARN_ON_ONCE(1); 346 WARN_ON_ONCE(1); 337 return; 347 return; 338 } 348 } 339 349 340 /* If cpu_addr is not from an atomic p 350 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ 341 if (IS_ENABLED(CONFIG_DMA_COHERENT_POO 351 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && 342 dma_free_from_pool(dev, cpu_addr, 352 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) 343 return; 353 return; 344 354 345 if (is_vmalloc_addr(cpu_addr)) { !! 355 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 346 vunmap(cpu_addr); 356 vunmap(cpu_addr); 347 } else { 357 } else { 348 if (IS_ENABLED(CONFIG_ARCH_HAS 358 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) 349 arch_dma_clear_uncache 359 arch_dma_clear_uncached(cpu_addr, size); 350 if (dma_set_encrypted(dev, cpu !! 360 if (dma_set_encrypted(dev, cpu_addr, 1 << page_order)) 351 return; 361 return; 352 } 362 } 353 363 354 __dma_direct_free_pages(dev, dma_direc 364 __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); 355 } 365 } 356 366 357 struct page *dma_direct_alloc_pages(struct dev 367 struct page *dma_direct_alloc_pages(struct device *dev, size_t size, 358 dma_addr_t *dma_handle, enum d 368 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) 359 { 369 { 360 struct page *page; 370 struct page *page; 361 void *ret; 371 void *ret; 362 372 363 if (force_dma_unencrypted(dev) && dma_ 373 if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp)) 364 return dma_direct_alloc_from_p 374 return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); 365 375 366 page = __dma_direct_alloc_pages(dev, s 376 page = __dma_direct_alloc_pages(dev, size, gfp, false); 367 if (!page) 377 if (!page) 368 return NULL; 378 return NULL; 369 379 370 ret = page_address(page); 380 ret = page_address(page); 371 if (dma_set_decrypted(dev, ret, size)) 381 if (dma_set_decrypted(dev, ret, size)) 372 goto out_leak_pages; !! 382 goto out_free_pages; 373 memset(ret, 0, size); 383 memset(ret, 0, size); 374 *dma_handle = phys_to_dma_direct(dev, 384 *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); 375 return page; 385 return page; 376 out_leak_pages: !! 386 out_free_pages: >> 387 __dma_direct_free_pages(dev, page, size); 377 return NULL; 388 return NULL; 378 } 389 } 379 390 380 void dma_direct_free_pages(struct device *dev, 391 void dma_direct_free_pages(struct device *dev, size_t size, 381 struct page *page, dma_addr_t 392 struct page *page, dma_addr_t dma_addr, 382 enum dma_data_direction dir) 393 enum dma_data_direction dir) 383 { 394 { >> 395 unsigned int page_order = get_order(size); 384 void *vaddr = page_address(page); 396 void *vaddr = page_address(page); 385 397 386 /* If cpu_addr is not from an atomic p 398 /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ 387 if (IS_ENABLED(CONFIG_DMA_COHERENT_POO 399 if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && 388 dma_free_from_pool(dev, vaddr, siz 400 dma_free_from_pool(dev, vaddr, size)) 389 return; 401 return; 390 402 391 if (dma_set_encrypted(dev, vaddr, size !! 403 if (dma_set_encrypted(dev, vaddr, 1 << page_order)) 392 return; 404 return; 393 __dma_direct_free_pages(dev, page, siz 405 __dma_direct_free_pages(dev, page, size); 394 } 406 } 395 407 396 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVIC 408 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ 397 defined(CONFIG_SWIOTLB) 409 defined(CONFIG_SWIOTLB) 398 void dma_direct_sync_sg_for_device(struct devi 410 void dma_direct_sync_sg_for_device(struct device *dev, 399 struct scatterlist *sgl, int n 411 struct scatterlist *sgl, int nents, enum dma_data_direction dir) 400 { 412 { 401 struct scatterlist *sg; 413 struct scatterlist *sg; 402 int i; 414 int i; 403 415 404 for_each_sg(sgl, sg, nents, i) { 416 for_each_sg(sgl, sg, nents, i) { 405 phys_addr_t paddr = dma_to_phy 417 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); 406 418 407 swiotlb_sync_single_for_device !! 419 if (unlikely(is_swiotlb_buffer(dev, paddr))) >> 420 swiotlb_sync_single_for_device(dev, paddr, sg->length, >> 421 dir); 408 422 409 if (!dev_is_dma_coherent(dev)) 423 if (!dev_is_dma_coherent(dev)) 410 arch_sync_dma_for_devi 424 arch_sync_dma_for_device(paddr, sg->length, 411 dir); 425 dir); 412 } 426 } 413 } 427 } 414 #endif 428 #endif 415 429 416 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) 430 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ 417 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_A 431 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \ 418 defined(CONFIG_SWIOTLB) 432 defined(CONFIG_SWIOTLB) 419 void dma_direct_sync_sg_for_cpu(struct device 433 void dma_direct_sync_sg_for_cpu(struct device *dev, 420 struct scatterlist *sgl, int n 434 struct scatterlist *sgl, int nents, enum dma_data_direction dir) 421 { 435 { 422 struct scatterlist *sg; 436 struct scatterlist *sg; 423 int i; 437 int i; 424 438 425 for_each_sg(sgl, sg, nents, i) { 439 for_each_sg(sgl, sg, nents, i) { 426 phys_addr_t paddr = dma_to_phy 440 phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); 427 441 428 if (!dev_is_dma_coherent(dev)) 442 if (!dev_is_dma_coherent(dev)) 429 arch_sync_dma_for_cpu( 443 arch_sync_dma_for_cpu(paddr, sg->length, dir); 430 444 431 swiotlb_sync_single_for_cpu(de !! 445 if (unlikely(is_swiotlb_buffer(dev, paddr))) >> 446 swiotlb_sync_single_for_cpu(dev, paddr, sg->length, >> 447 dir); 432 448 433 if (dir == DMA_FROM_DEVICE) 449 if (dir == DMA_FROM_DEVICE) 434 arch_dma_mark_clean(pa 450 arch_dma_mark_clean(paddr, sg->length); 435 } 451 } 436 452 437 if (!dev_is_dma_coherent(dev)) 453 if (!dev_is_dma_coherent(dev)) 438 arch_sync_dma_for_cpu_all(); 454 arch_sync_dma_for_cpu_all(); 439 } 455 } 440 456 441 /* << 442 * Unmaps segments, except for ones marked as << 443 * require any further action as they contain << 444 */ << 445 void dma_direct_unmap_sg(struct device *dev, s 457 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, 446 int nents, enum dma_data_direc 458 int nents, enum dma_data_direction dir, unsigned long attrs) 447 { 459 { 448 struct scatterlist *sg; 460 struct scatterlist *sg; 449 int i; 461 int i; 450 462 451 for_each_sg(sgl, sg, nents, i) { !! 463 for_each_sg(sgl, sg, nents, i) 452 if (sg_dma_is_bus_address(sg)) !! 464 dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, 453 sg_dma_unmark_bus_addr !! 465 attrs); 454 else << 455 dma_direct_unmap_page( << 456 << 457 } << 458 } 466 } 459 #endif 467 #endif 460 468 461 int dma_direct_map_sg(struct device *dev, stru 469 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, 462 enum dma_data_direction dir, u 470 enum dma_data_direction dir, unsigned long attrs) 463 { 471 { 464 struct pci_p2pdma_map_state p2pdma_sta !! 472 int i; 465 enum pci_p2pdma_map_type map; << 466 struct scatterlist *sg; 473 struct scatterlist *sg; 467 int i, ret; << 468 474 469 for_each_sg(sgl, sg, nents, i) { 475 for_each_sg(sgl, sg, nents, i) { 470 if (is_pci_p2pdma_page(sg_page << 471 map = pci_p2pdma_map_s << 472 switch (map) { << 473 case PCI_P2PDMA_MAP_BU << 474 continue; << 475 case PCI_P2PDMA_MAP_TH << 476 /* << 477 * Any P2P map << 478 * host bridge << 479 * address and << 480 * done with d << 481 */ << 482 break; << 483 default: << 484 ret = -EREMOTE << 485 goto out_unmap << 486 } << 487 } << 488 << 489 sg->dma_address = dma_direct_m 476 sg->dma_address = dma_direct_map_page(dev, sg_page(sg), 490 sg->offset, sg 477 sg->offset, sg->length, dir, attrs); 491 if (sg->dma_address == DMA_MAP !! 478 if (sg->dma_address == DMA_MAPPING_ERROR) 492 ret = -EIO; << 493 goto out_unmap; 479 goto out_unmap; 494 } << 495 sg_dma_len(sg) = sg->length; 480 sg_dma_len(sg) = sg->length; 496 } 481 } 497 482 498 return nents; 483 return nents; 499 484 500 out_unmap: 485 out_unmap: 501 dma_direct_unmap_sg(dev, sgl, i, dir, 486 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 502 return ret; !! 487 return -EIO; 503 } 488 } 504 489 505 dma_addr_t dma_direct_map_resource(struct devi 490 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, 506 size_t size, enum dma_data_dir 491 size_t size, enum dma_data_direction dir, unsigned long attrs) 507 { 492 { 508 dma_addr_t dma_addr = paddr; 493 dma_addr_t dma_addr = paddr; 509 494 510 if (unlikely(!dma_capable(dev, dma_add 495 if (unlikely(!dma_capable(dev, dma_addr, size, false))) { 511 dev_err_once(dev, 496 dev_err_once(dev, 512 "DMA addr %pad+%z 497 "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n", 513 &dma_addr, size, 498 &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit); 514 WARN_ON_ONCE(1); 499 WARN_ON_ONCE(1); 515 return DMA_MAPPING_ERROR; 500 return DMA_MAPPING_ERROR; 516 } 501 } 517 502 518 return dma_addr; 503 return dma_addr; 519 } 504 } 520 505 521 int dma_direct_get_sgtable(struct device *dev, 506 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, 522 void *cpu_addr, dma_addr_t dma 507 void *cpu_addr, dma_addr_t dma_addr, size_t size, 523 unsigned long attrs) 508 unsigned long attrs) 524 { 509 { 525 struct page *page = dma_direct_to_page 510 struct page *page = dma_direct_to_page(dev, dma_addr); 526 int ret; 511 int ret; 527 512 528 ret = sg_alloc_table(sgt, 1, GFP_KERNE 513 ret = sg_alloc_table(sgt, 1, GFP_KERNEL); 529 if (!ret) 514 if (!ret) 530 sg_set_page(sgt->sgl, page, PA 515 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); 531 return ret; 516 return ret; 532 } 517 } 533 518 534 bool dma_direct_can_mmap(struct device *dev) 519 bool dma_direct_can_mmap(struct device *dev) 535 { 520 { 536 return dev_is_dma_coherent(dev) || 521 return dev_is_dma_coherent(dev) || 537 IS_ENABLED(CONFIG_DMA_NONCOHER 522 IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP); 538 } 523 } 539 524 540 int dma_direct_mmap(struct device *dev, struct 525 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, 541 void *cpu_addr, dma_addr_t dma 526 void *cpu_addr, dma_addr_t dma_addr, size_t size, 542 unsigned long attrs) 527 unsigned long attrs) 543 { 528 { 544 unsigned long user_count = vma_pages(v 529 unsigned long user_count = vma_pages(vma); 545 unsigned long count = PAGE_ALIGN(size) 530 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; 546 unsigned long pfn = PHYS_PFN(dma_to_ph 531 unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr)); 547 int ret = -ENXIO; 532 int ret = -ENXIO; 548 533 549 vma->vm_page_prot = dma_pgprot(dev, vm 534 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 550 if (force_dma_unencrypted(dev)) << 551 vma->vm_page_prot = pgprot_dec << 552 535 553 if (dma_mmap_from_dev_coherent(dev, vm 536 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 554 return ret; 537 return ret; 555 if (dma_mmap_from_global_coherent(vma, 538 if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) 556 return ret; 539 return ret; 557 540 558 if (vma->vm_pgoff >= count || user_cou 541 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) 559 return -ENXIO; 542 return -ENXIO; 560 return remap_pfn_range(vma, vma->vm_st 543 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, 561 user_count << PAGE_SHI 544 user_count << PAGE_SHIFT, vma->vm_page_prot); 562 } 545 } 563 546 564 int dma_direct_supported(struct device *dev, u 547 int dma_direct_supported(struct device *dev, u64 mask) 565 { 548 { 566 u64 min_mask = (max_pfn - 1) << PAGE_S 549 u64 min_mask = (max_pfn - 1) << PAGE_SHIFT; 567 550 568 /* 551 /* 569 * Because 32-bit DMA masks are so com 552 * Because 32-bit DMA masks are so common we expect every architecture 570 * to be able to satisfy them - either 553 * to be able to satisfy them - either by not supporting more physical 571 * memory, or by providing a ZONE_DMA3 554 * memory, or by providing a ZONE_DMA32. If neither is the case, the 572 * architecture needs to use an IOMMU 555 * architecture needs to use an IOMMU instead of the direct mapping. 573 */ 556 */ 574 if (mask >= DMA_BIT_MASK(32)) 557 if (mask >= DMA_BIT_MASK(32)) 575 return 1; 558 return 1; 576 559 577 /* 560 /* 578 * This check needs to be against the 561 * This check needs to be against the actual bit mask value, so use 579 * phys_to_dma_unencrypted() here so t 562 * phys_to_dma_unencrypted() here so that the SME encryption mask isn't 580 * part of the check. 563 * part of the check. 581 */ 564 */ 582 if (IS_ENABLED(CONFIG_ZONE_DMA)) 565 if (IS_ENABLED(CONFIG_ZONE_DMA)) 583 min_mask = min_t(u64, min_mask 566 min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits)); 584 return mask >= phys_to_dma_unencrypted 567 return mask >= phys_to_dma_unencrypted(dev, min_mask); 585 } 568 } 586 569 587 /* << 588 * To check whether all ram resource ranges ar << 589 * Returns 0 when further check is needed << 590 * Returns 1 if there is some RAM range can't << 591 */ << 592 static int check_ram_in_range_map(unsigned lon << 593 unsigned lon << 594 { << 595 unsigned long end_pfn = start_pfn + nr << 596 const struct bus_dma_region *bdr = NUL << 597 const struct bus_dma_region *m; << 598 struct device *dev = data; << 599 << 600 while (start_pfn < end_pfn) { << 601 for (m = dev->dma_range_map; P << 602 unsigned long cpu_star << 603 << 604 if (start_pfn >= cpu_s << 605 start_pfn - cpu_st << 606 bdr = m; << 607 break; << 608 } << 609 } << 610 if (!bdr) << 611 return 1; << 612 << 613 start_pfn = PFN_DOWN(bdr->cpu_ << 614 } << 615 << 616 return 0; << 617 } << 618 << 619 bool dma_direct_all_ram_mapped(struct device * << 620 { << 621 if (!dev->dma_range_map) << 622 return true; << 623 return !walk_system_ram_range(0, PFN_D << 624 check_ra << 625 } << 626 << 627 size_t dma_direct_max_mapping_size(struct devi 570 size_t dma_direct_max_mapping_size(struct device *dev) 628 { 571 { 629 /* If SWIOTLB is active, use its maxim 572 /* If SWIOTLB is active, use its maximum mapping size */ 630 if (is_swiotlb_active(dev) && 573 if (is_swiotlb_active(dev) && 631 (dma_addressing_limited(dev) || is 574 (dma_addressing_limited(dev) || is_swiotlb_force_bounce(dev))) 632 return swiotlb_max_mapping_siz 575 return swiotlb_max_mapping_size(dev); 633 return SIZE_MAX; 576 return SIZE_MAX; 634 } 577 } 635 578 636 bool dma_direct_need_sync(struct device *dev, 579 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) 637 { 580 { 638 return !dev_is_dma_coherent(dev) || 581 return !dev_is_dma_coherent(dev) || 639 swiotlb_find_pool(dev, dma_to_p !! 582 is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr)); 640 } 583 } 641 584 642 /** 585 /** 643 * dma_direct_set_offset - Assign scalar offse 586 * dma_direct_set_offset - Assign scalar offset for a single DMA range. 644 * @dev: device pointer; needed to "own 587 * @dev: device pointer; needed to "own" the alloced memory. 645 * @cpu_start: beginning of memory region cov 588 * @cpu_start: beginning of memory region covered by this offset. 646 * @dma_start: beginning of DMA/PCI region co 589 * @dma_start: beginning of DMA/PCI region covered by this offset. 647 * @size: size of the region. 590 * @size: size of the region. 648 * 591 * 649 * This is for the simple case of a uniform of 592 * This is for the simple case of a uniform offset which cannot 650 * be discovered by "dma-ranges". 593 * be discovered by "dma-ranges". 651 * 594 * 652 * It returns -ENOMEM if out of memory, -EINVA 595 * It returns -ENOMEM if out of memory, -EINVAL if a map 653 * already exists, 0 otherwise. 596 * already exists, 0 otherwise. 654 * 597 * 655 * Note: any call to this from a driver is a b 598 * Note: any call to this from a driver is a bug. The mapping needs 656 * to be described by the device tree or other 599 * to be described by the device tree or other firmware interfaces. 657 */ 600 */ 658 int dma_direct_set_offset(struct device *dev, 601 int dma_direct_set_offset(struct device *dev, phys_addr_t cpu_start, 659 dma_addr_t dma_start, 602 dma_addr_t dma_start, u64 size) 660 { 603 { 661 struct bus_dma_region *map; 604 struct bus_dma_region *map; 662 u64 offset = (u64)cpu_start - (u64)dma 605 u64 offset = (u64)cpu_start - (u64)dma_start; 663 606 664 if (dev->dma_range_map) { 607 if (dev->dma_range_map) { 665 dev_err(dev, "attempt to add D 608 dev_err(dev, "attempt to add DMA range to existing map\n"); 666 return -EINVAL; 609 return -EINVAL; 667 } 610 } 668 611 669 if (!offset) 612 if (!offset) 670 return 0; 613 return 0; 671 614 672 map = kcalloc(2, sizeof(*map), GFP_KER 615 map = kcalloc(2, sizeof(*map), GFP_KERNEL); 673 if (!map) 616 if (!map) 674 return -ENOMEM; 617 return -ENOMEM; 675 map[0].cpu_start = cpu_start; 618 map[0].cpu_start = cpu_start; 676 map[0].dma_start = dma_start; 619 map[0].dma_start = dma_start; >> 620 map[0].offset = offset; 677 map[0].size = size; 621 map[0].size = size; 678 dev->dma_range_map = map; 622 dev->dma_range_map = map; 679 return 0; 623 return 0; 680 } 624 } 681 625
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.