1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/arch/arm/mm/dma-mapping.c 4 * 5 * Copyright (C) 2000-2004 Russell King 6 * 7 * DMA uncached mapping support. 8 */ 9 #include <linux/module.h> 10 #include <linux/mm.h> 11 #include <linux/genalloc.h> 12 #include <linux/gfp.h> 13 #include <linux/errno.h> 14 #include <linux/list.h> 15 #include <linux/init.h> 16 #include <linux/device.h> 17 #include <linux/dma-direct.h> 18 #include <linux/dma-map-ops.h> 19 #include <linux/highmem.h> 20 #include <linux/memblock.h> 21 #include <linux/slab.h> 22 #include <linux/iommu.h> 23 #include <linux/io.h> 24 #include <linux/vmalloc.h> 25 #include <linux/sizes.h> 26 #include <linux/cma.h> 27 28 #include <asm/page.h> 29 #include <asm/highmem.h> 30 #include <asm/cacheflush.h> 31 #include <asm/tlbflush.h> 32 #include <asm/mach/arch.h> 33 #include <asm/dma-iommu.h> 34 #include <asm/mach/map.h> 35 #include <asm/system_info.h> 36 #include <asm/xen/xen-ops.h> 37 38 #include "dma.h" 39 #include "mm.h" 40 41 struct arm_dma_alloc_args { 42 struct device *dev; 43 size_t size; 44 gfp_t gfp; 45 pgprot_t prot; 46 const void *caller; 47 bool want_vaddr; 48 int coherent_flag; 49 }; 50 51 struct arm_dma_free_args { 52 struct device *dev; 53 size_t size; 54 void *cpu_addr; 55 struct page *page; 56 bool want_vaddr; 57 }; 58 59 #define NORMAL 0 60 #define COHERENT 1 61 62 struct arm_dma_allocator { 63 void *(*alloc)(struct arm_dma_alloc_args *args, 64 struct page **ret_page); 65 void (*free)(struct arm_dma_free_args *args); 66 }; 67 68 struct arm_dma_buffer { 69 struct list_head list; 70 void *virt; 71 struct arm_dma_allocator *allocator; 72 }; 73 74 static LIST_HEAD(arm_dma_bufs); 75 static DEFINE_SPINLOCK(arm_dma_bufs_lock); 76 77 static struct arm_dma_buffer *arm_dma_buffer_find(void *virt) 78 { 79 struct arm_dma_buffer *buf, *found = NULL; 80 unsigned long flags; 81 82 spin_lock_irqsave(&arm_dma_bufs_lock, flags); 83 list_for_each_entry(buf, &arm_dma_bufs, list) { 84 if (buf->virt == virt) { 85 list_del(&buf->list); 86 found = buf; 87 break; 88 } 89 } 90 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 91 return found; 92 } 93 94 /* 95 * The DMA API is built upon the notion of "buffer ownership". A buffer 96 * is either exclusively owned by the CPU (and therefore may be accessed 97 * by it) or exclusively owned by the DMA device. These helper functions 98 * represent the transitions between these two ownership states. 99 * 100 * Note, however, that on later ARMs, this notion does not work due to 101 * speculative prefetches. We model our approach on the assumption that 102 * the CPU does do speculative prefetches, which means we clean caches 103 * before transfers and delay cache invalidation until transfer completion. 104 * 105 */ 106 107 static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) 108 { 109 /* 110 * Ensure that the allocated pages are zeroed, and that any data 111 * lurking in the kernel direct-mapped region is invalidated. 112 */ 113 if (PageHighMem(page)) { 114 phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); 115 phys_addr_t end = base + size; 116 while (size > 0) { 117 void *ptr = kmap_atomic(page); 118 memset(ptr, 0, PAGE_SIZE); 119 if (coherent_flag != COHERENT) 120 dmac_flush_range(ptr, ptr + PAGE_SIZE); 121 kunmap_atomic(ptr); 122 page++; 123 size -= PAGE_SIZE; 124 } 125 if (coherent_flag != COHERENT) 126 outer_flush_range(base, end); 127 } else { 128 void *ptr = page_address(page); 129 memset(ptr, 0, size); 130 if (coherent_flag != COHERENT) { 131 dmac_flush_range(ptr, ptr + size); 132 outer_flush_range(__pa(ptr), __pa(ptr) + size); 133 } 134 } 135 } 136 137 /* 138 * Allocate a DMA buffer for 'dev' of size 'size' using the 139 * specified gfp mask. Note that 'size' must be page aligned. 140 */ 141 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, 142 gfp_t gfp, int coherent_flag) 143 { 144 unsigned long order = get_order(size); 145 struct page *page, *p, *e; 146 147 page = alloc_pages(gfp, order); 148 if (!page) 149 return NULL; 150 151 /* 152 * Now split the huge page and free the excess pages 153 */ 154 split_page(page, order); 155 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 156 __free_page(p); 157 158 __dma_clear_buffer(page, size, coherent_flag); 159 160 return page; 161 } 162 163 /* 164 * Free a DMA buffer. 'size' must be page aligned. 165 */ 166 static void __dma_free_buffer(struct page *page, size_t size) 167 { 168 struct page *e = page + (size >> PAGE_SHIFT); 169 170 while (page < e) { 171 __free_page(page); 172 page++; 173 } 174 } 175 176 static void *__alloc_from_contiguous(struct device *dev, size_t size, 177 pgprot_t prot, struct page **ret_page, 178 const void *caller, bool want_vaddr, 179 int coherent_flag, gfp_t gfp); 180 181 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 182 pgprot_t prot, struct page **ret_page, 183 const void *caller, bool want_vaddr); 184 185 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K 186 static struct gen_pool *atomic_pool __ro_after_init; 187 188 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE; 189 190 static int __init early_coherent_pool(char *p) 191 { 192 atomic_pool_size = memparse(p, &p); 193 return 0; 194 } 195 early_param("coherent_pool", early_coherent_pool); 196 197 /* 198 * Initialise the coherent pool for atomic allocations. 199 */ 200 static int __init atomic_pool_init(void) 201 { 202 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); 203 gfp_t gfp = GFP_KERNEL | GFP_DMA; 204 struct page *page; 205 void *ptr; 206 207 atomic_pool = gen_pool_create(PAGE_SHIFT, -1); 208 if (!atomic_pool) 209 goto out; 210 /* 211 * The atomic pool is only used for non-coherent allocations 212 * so we must pass NORMAL for coherent_flag. 213 */ 214 if (dev_get_cma_area(NULL)) 215 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, 216 &page, atomic_pool_init, true, NORMAL, 217 GFP_KERNEL); 218 else 219 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, 220 &page, atomic_pool_init, true); 221 if (ptr) { 222 int ret; 223 224 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, 225 page_to_phys(page), 226 atomic_pool_size, -1); 227 if (ret) 228 goto destroy_genpool; 229 230 gen_pool_set_algo(atomic_pool, 231 gen_pool_first_fit_order_align, 232 NULL); 233 pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n", 234 atomic_pool_size / 1024); 235 return 0; 236 } 237 238 destroy_genpool: 239 gen_pool_destroy(atomic_pool); 240 atomic_pool = NULL; 241 out: 242 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", 243 atomic_pool_size / 1024); 244 return -ENOMEM; 245 } 246 /* 247 * CMA is activated by core_initcall, so we must be called after it. 248 */ 249 postcore_initcall(atomic_pool_init); 250 251 #ifdef CONFIG_CMA_AREAS 252 struct dma_contig_early_reserve { 253 phys_addr_t base; 254 unsigned long size; 255 }; 256 257 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; 258 259 static int dma_mmu_remap_num __initdata; 260 261 #ifdef CONFIG_DMA_CMA 262 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) 263 { 264 dma_mmu_remap[dma_mmu_remap_num].base = base; 265 dma_mmu_remap[dma_mmu_remap_num].size = size; 266 dma_mmu_remap_num++; 267 } 268 #endif 269 270 void __init dma_contiguous_remap(void) 271 { 272 int i; 273 for (i = 0; i < dma_mmu_remap_num; i++) { 274 phys_addr_t start = dma_mmu_remap[i].base; 275 phys_addr_t end = start + dma_mmu_remap[i].size; 276 struct map_desc map; 277 unsigned long addr; 278 279 if (end > arm_lowmem_limit) 280 end = arm_lowmem_limit; 281 if (start >= end) 282 continue; 283 284 map.pfn = __phys_to_pfn(start); 285 map.virtual = __phys_to_virt(start); 286 map.length = end - start; 287 map.type = MT_MEMORY_DMA_READY; 288 289 /* 290 * Clear previous low-memory mapping to ensure that the 291 * TLB does not see any conflicting entries, then flush 292 * the TLB of the old entries before creating new mappings. 293 * 294 * This ensures that any speculatively loaded TLB entries 295 * (even though they may be rare) can not cause any problems, 296 * and ensures that this code is architecturally compliant. 297 */ 298 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 299 addr += PMD_SIZE) 300 pmd_clear(pmd_off_k(addr)); 301 302 flush_tlb_kernel_range(__phys_to_virt(start), 303 __phys_to_virt(end)); 304 305 iotable_init(&map, 1); 306 } 307 } 308 #endif 309 310 static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data) 311 { 312 struct page *page = virt_to_page((void *)addr); 313 pgprot_t prot = *(pgprot_t *)data; 314 315 set_pte_ext(pte, mk_pte(page, prot), 0); 316 return 0; 317 } 318 319 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) 320 { 321 unsigned long start = (unsigned long) page_address(page); 322 unsigned end = start + size; 323 324 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 325 flush_tlb_kernel_range(start, end); 326 } 327 328 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, 329 pgprot_t prot, struct page **ret_page, 330 const void *caller, bool want_vaddr) 331 { 332 struct page *page; 333 void *ptr = NULL; 334 /* 335 * __alloc_remap_buffer is only called when the device is 336 * non-coherent 337 */ 338 page = __dma_alloc_buffer(dev, size, gfp, NORMAL); 339 if (!page) 340 return NULL; 341 if (!want_vaddr) 342 goto out; 343 344 ptr = dma_common_contiguous_remap(page, size, prot, caller); 345 if (!ptr) { 346 __dma_free_buffer(page, size); 347 return NULL; 348 } 349 350 out: 351 *ret_page = page; 352 return ptr; 353 } 354 355 static void *__alloc_from_pool(size_t size, struct page **ret_page) 356 { 357 unsigned long val; 358 void *ptr = NULL; 359 360 if (!atomic_pool) { 361 WARN(1, "coherent pool not initialised!\n"); 362 return NULL; 363 } 364 365 val = gen_pool_alloc(atomic_pool, size); 366 if (val) { 367 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); 368 369 *ret_page = phys_to_page(phys); 370 ptr = (void *)val; 371 } 372 373 return ptr; 374 } 375 376 static bool __in_atomic_pool(void *start, size_t size) 377 { 378 return gen_pool_has_addr(atomic_pool, (unsigned long)start, size); 379 } 380 381 static int __free_from_pool(void *start, size_t size) 382 { 383 if (!__in_atomic_pool(start, size)) 384 return 0; 385 386 gen_pool_free(atomic_pool, (unsigned long)start, size); 387 388 return 1; 389 } 390 391 static void *__alloc_from_contiguous(struct device *dev, size_t size, 392 pgprot_t prot, struct page **ret_page, 393 const void *caller, bool want_vaddr, 394 int coherent_flag, gfp_t gfp) 395 { 396 unsigned long order = get_order(size); 397 size_t count = size >> PAGE_SHIFT; 398 struct page *page; 399 void *ptr = NULL; 400 401 page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN); 402 if (!page) 403 return NULL; 404 405 __dma_clear_buffer(page, size, coherent_flag); 406 407 if (!want_vaddr) 408 goto out; 409 410 if (PageHighMem(page)) { 411 ptr = dma_common_contiguous_remap(page, size, prot, caller); 412 if (!ptr) { 413 dma_release_from_contiguous(dev, page, count); 414 return NULL; 415 } 416 } else { 417 __dma_remap(page, size, prot); 418 ptr = page_address(page); 419 } 420 421 out: 422 *ret_page = page; 423 return ptr; 424 } 425 426 static void __free_from_contiguous(struct device *dev, struct page *page, 427 void *cpu_addr, size_t size, bool want_vaddr) 428 { 429 if (want_vaddr) { 430 if (PageHighMem(page)) 431 dma_common_free_remap(cpu_addr, size); 432 else 433 __dma_remap(page, size, PAGE_KERNEL); 434 } 435 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); 436 } 437 438 static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot) 439 { 440 prot = (attrs & DMA_ATTR_WRITE_COMBINE) ? 441 pgprot_writecombine(prot) : 442 pgprot_dmacoherent(prot); 443 return prot; 444 } 445 446 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, 447 struct page **ret_page) 448 { 449 struct page *page; 450 /* __alloc_simple_buffer is only called when the device is coherent */ 451 page = __dma_alloc_buffer(dev, size, gfp, COHERENT); 452 if (!page) 453 return NULL; 454 455 *ret_page = page; 456 return page_address(page); 457 } 458 459 static void *simple_allocator_alloc(struct arm_dma_alloc_args *args, 460 struct page **ret_page) 461 { 462 return __alloc_simple_buffer(args->dev, args->size, args->gfp, 463 ret_page); 464 } 465 466 static void simple_allocator_free(struct arm_dma_free_args *args) 467 { 468 __dma_free_buffer(args->page, args->size); 469 } 470 471 static struct arm_dma_allocator simple_allocator = { 472 .alloc = simple_allocator_alloc, 473 .free = simple_allocator_free, 474 }; 475 476 static void *cma_allocator_alloc(struct arm_dma_alloc_args *args, 477 struct page **ret_page) 478 { 479 return __alloc_from_contiguous(args->dev, args->size, args->prot, 480 ret_page, args->caller, 481 args->want_vaddr, args->coherent_flag, 482 args->gfp); 483 } 484 485 static void cma_allocator_free(struct arm_dma_free_args *args) 486 { 487 __free_from_contiguous(args->dev, args->page, args->cpu_addr, 488 args->size, args->want_vaddr); 489 } 490 491 static struct arm_dma_allocator cma_allocator = { 492 .alloc = cma_allocator_alloc, 493 .free = cma_allocator_free, 494 }; 495 496 static void *pool_allocator_alloc(struct arm_dma_alloc_args *args, 497 struct page **ret_page) 498 { 499 return __alloc_from_pool(args->size, ret_page); 500 } 501 502 static void pool_allocator_free(struct arm_dma_free_args *args) 503 { 504 __free_from_pool(args->cpu_addr, args->size); 505 } 506 507 static struct arm_dma_allocator pool_allocator = { 508 .alloc = pool_allocator_alloc, 509 .free = pool_allocator_free, 510 }; 511 512 static void *remap_allocator_alloc(struct arm_dma_alloc_args *args, 513 struct page **ret_page) 514 { 515 return __alloc_remap_buffer(args->dev, args->size, args->gfp, 516 args->prot, ret_page, args->caller, 517 args->want_vaddr); 518 } 519 520 static void remap_allocator_free(struct arm_dma_free_args *args) 521 { 522 if (args->want_vaddr) 523 dma_common_free_remap(args->cpu_addr, args->size); 524 525 __dma_free_buffer(args->page, args->size); 526 } 527 528 static struct arm_dma_allocator remap_allocator = { 529 .alloc = remap_allocator_alloc, 530 .free = remap_allocator_free, 531 }; 532 533 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, 534 gfp_t gfp, pgprot_t prot, bool is_coherent, 535 unsigned long attrs, const void *caller) 536 { 537 u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); 538 struct page *page = NULL; 539 void *addr; 540 bool allowblock, cma; 541 struct arm_dma_buffer *buf; 542 struct arm_dma_alloc_args args = { 543 .dev = dev, 544 .size = PAGE_ALIGN(size), 545 .gfp = gfp, 546 .prot = prot, 547 .caller = caller, 548 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 549 .coherent_flag = is_coherent ? COHERENT : NORMAL, 550 }; 551 552 #ifdef CONFIG_DMA_API_DEBUG 553 u64 limit = (mask + 1) & ~mask; 554 if (limit && size >= limit) { 555 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", 556 size, mask); 557 return NULL; 558 } 559 #endif 560 561 buf = kzalloc(sizeof(*buf), 562 gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)); 563 if (!buf) 564 return NULL; 565 566 if (mask < 0xffffffffULL) 567 gfp |= GFP_DMA; 568 569 args.gfp = gfp; 570 571 *handle = DMA_MAPPING_ERROR; 572 allowblock = gfpflags_allow_blocking(gfp); 573 cma = allowblock ? dev_get_cma_area(dev) : NULL; 574 575 if (cma) 576 buf->allocator = &cma_allocator; 577 else if (is_coherent) 578 buf->allocator = &simple_allocator; 579 else if (allowblock) 580 buf->allocator = &remap_allocator; 581 else 582 buf->allocator = &pool_allocator; 583 584 addr = buf->allocator->alloc(&args, &page); 585 586 if (page) { 587 unsigned long flags; 588 589 *handle = phys_to_dma(dev, page_to_phys(page)); 590 buf->virt = args.want_vaddr ? addr : page; 591 592 spin_lock_irqsave(&arm_dma_bufs_lock, flags); 593 list_add(&buf->list, &arm_dma_bufs); 594 spin_unlock_irqrestore(&arm_dma_bufs_lock, flags); 595 } else { 596 kfree(buf); 597 } 598 599 return args.want_vaddr ? addr : page; 600 } 601 602 /* 603 * Free a buffer as defined by the above mapping. 604 */ 605 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, 606 dma_addr_t handle, unsigned long attrs, 607 bool is_coherent) 608 { 609 struct page *page = phys_to_page(dma_to_phys(dev, handle)); 610 struct arm_dma_buffer *buf; 611 struct arm_dma_free_args args = { 612 .dev = dev, 613 .size = PAGE_ALIGN(size), 614 .cpu_addr = cpu_addr, 615 .page = page, 616 .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0), 617 }; 618 619 buf = arm_dma_buffer_find(cpu_addr); 620 if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr)) 621 return; 622 623 buf->allocator->free(&args); 624 kfree(buf); 625 } 626 627 static void dma_cache_maint_page(struct page *page, unsigned long offset, 628 size_t size, enum dma_data_direction dir, 629 void (*op)(const void *, size_t, int)) 630 { 631 unsigned long pfn; 632 size_t left = size; 633 634 pfn = page_to_pfn(page) + offset / PAGE_SIZE; 635 offset %= PAGE_SIZE; 636 637 /* 638 * A single sg entry may refer to multiple physically contiguous 639 * pages. But we still need to process highmem pages individually. 640 * If highmem is not configured then the bulk of this loop gets 641 * optimized out. 642 */ 643 do { 644 size_t len = left; 645 void *vaddr; 646 647 page = pfn_to_page(pfn); 648 649 if (PageHighMem(page)) { 650 if (len + offset > PAGE_SIZE) 651 len = PAGE_SIZE - offset; 652 653 if (cache_is_vipt_nonaliasing()) { 654 vaddr = kmap_atomic(page); 655 op(vaddr + offset, len, dir); 656 kunmap_atomic(vaddr); 657 } else { 658 vaddr = kmap_high_get(page); 659 if (vaddr) { 660 op(vaddr + offset, len, dir); 661 kunmap_high(page); 662 } 663 } 664 } else { 665 vaddr = page_address(page) + offset; 666 op(vaddr, len, dir); 667 } 668 offset = 0; 669 pfn++; 670 left -= len; 671 } while (left); 672 } 673 674 /* 675 * Make an area consistent for devices. 676 * Note: Drivers should NOT use this function directly. 677 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 678 */ 679 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, 680 size_t size, enum dma_data_direction dir) 681 { 682 phys_addr_t paddr; 683 684 dma_cache_maint_page(page, off, size, dir, dmac_map_area); 685 686 paddr = page_to_phys(page) + off; 687 if (dir == DMA_FROM_DEVICE) { 688 outer_inv_range(paddr, paddr + size); 689 } else { 690 outer_clean_range(paddr, paddr + size); 691 } 692 /* FIXME: non-speculating: flush on bidirectional mappings? */ 693 } 694 695 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, 696 size_t size, enum dma_data_direction dir) 697 { 698 phys_addr_t paddr = page_to_phys(page) + off; 699 700 /* FIXME: non-speculating: not required */ 701 /* in any case, don't bother invalidating if DMA to device */ 702 if (dir != DMA_TO_DEVICE) { 703 outer_inv_range(paddr, paddr + size); 704 705 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 706 } 707 708 /* 709 * Mark the D-cache clean for these pages to avoid extra flushing. 710 */ 711 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { 712 struct folio *folio = pfn_folio(paddr / PAGE_SIZE); 713 size_t offset = offset_in_folio(folio, paddr); 714 715 for (;;) { 716 size_t sz = folio_size(folio) - offset; 717 718 if (size < sz) 719 break; 720 if (!offset) 721 set_bit(PG_dcache_clean, &folio->flags); 722 offset = 0; 723 size -= sz; 724 if (!size) 725 break; 726 folio = folio_next(folio); 727 } 728 } 729 } 730 731 #ifdef CONFIG_ARM_DMA_USE_IOMMU 732 733 static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) 734 { 735 int prot = 0; 736 737 if (attrs & DMA_ATTR_PRIVILEGED) 738 prot |= IOMMU_PRIV; 739 740 switch (dir) { 741 case DMA_BIDIRECTIONAL: 742 return prot | IOMMU_READ | IOMMU_WRITE; 743 case DMA_TO_DEVICE: 744 return prot | IOMMU_READ; 745 case DMA_FROM_DEVICE: 746 return prot | IOMMU_WRITE; 747 default: 748 return prot; 749 } 750 } 751 752 /* IOMMU */ 753 754 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 755 756 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, 757 size_t size) 758 { 759 unsigned int order = get_order(size); 760 unsigned int align = 0; 761 unsigned int count, start; 762 size_t mapping_size = mapping->bits << PAGE_SHIFT; 763 unsigned long flags; 764 dma_addr_t iova; 765 int i; 766 767 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) 768 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; 769 770 count = PAGE_ALIGN(size) >> PAGE_SHIFT; 771 align = (1 << order) - 1; 772 773 spin_lock_irqsave(&mapping->lock, flags); 774 for (i = 0; i < mapping->nr_bitmaps; i++) { 775 start = bitmap_find_next_zero_area(mapping->bitmaps[i], 776 mapping->bits, 0, count, align); 777 778 if (start > mapping->bits) 779 continue; 780 781 bitmap_set(mapping->bitmaps[i], start, count); 782 break; 783 } 784 785 /* 786 * No unused range found. Try to extend the existing mapping 787 * and perform a second attempt to reserve an IO virtual 788 * address range of size bytes. 789 */ 790 if (i == mapping->nr_bitmaps) { 791 if (extend_iommu_mapping(mapping)) { 792 spin_unlock_irqrestore(&mapping->lock, flags); 793 return DMA_MAPPING_ERROR; 794 } 795 796 start = bitmap_find_next_zero_area(mapping->bitmaps[i], 797 mapping->bits, 0, count, align); 798 799 if (start > mapping->bits) { 800 spin_unlock_irqrestore(&mapping->lock, flags); 801 return DMA_MAPPING_ERROR; 802 } 803 804 bitmap_set(mapping->bitmaps[i], start, count); 805 } 806 spin_unlock_irqrestore(&mapping->lock, flags); 807 808 iova = mapping->base + (mapping_size * i); 809 iova += start << PAGE_SHIFT; 810 811 return iova; 812 } 813 814 static inline void __free_iova(struct dma_iommu_mapping *mapping, 815 dma_addr_t addr, size_t size) 816 { 817 unsigned int start, count; 818 size_t mapping_size = mapping->bits << PAGE_SHIFT; 819 unsigned long flags; 820 dma_addr_t bitmap_base; 821 u32 bitmap_index; 822 823 if (!size) 824 return; 825 826 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; 827 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); 828 829 bitmap_base = mapping->base + mapping_size * bitmap_index; 830 831 start = (addr - bitmap_base) >> PAGE_SHIFT; 832 833 if (addr + size > bitmap_base + mapping_size) { 834 /* 835 * The address range to be freed reaches into the iova 836 * range of the next bitmap. This should not happen as 837 * we don't allow this in __alloc_iova (at the 838 * moment). 839 */ 840 BUG(); 841 } else 842 count = size >> PAGE_SHIFT; 843 844 spin_lock_irqsave(&mapping->lock, flags); 845 bitmap_clear(mapping->bitmaps[bitmap_index], start, count); 846 spin_unlock_irqrestore(&mapping->lock, flags); 847 } 848 849 /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */ 850 static const int iommu_order_array[] = { 9, 8, 4, 0 }; 851 852 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, 853 gfp_t gfp, unsigned long attrs, 854 int coherent_flag) 855 { 856 struct page **pages; 857 int count = size >> PAGE_SHIFT; 858 int array_size = count * sizeof(struct page *); 859 int i = 0; 860 int order_idx = 0; 861 862 pages = kvzalloc(array_size, GFP_KERNEL); 863 if (!pages) 864 return NULL; 865 866 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) 867 { 868 unsigned long order = get_order(size); 869 struct page *page; 870 871 page = dma_alloc_from_contiguous(dev, count, order, 872 gfp & __GFP_NOWARN); 873 if (!page) 874 goto error; 875 876 __dma_clear_buffer(page, size, coherent_flag); 877 878 for (i = 0; i < count; i++) 879 pages[i] = page + i; 880 881 return pages; 882 } 883 884 /* Go straight to 4K chunks if caller says it's OK. */ 885 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) 886 order_idx = ARRAY_SIZE(iommu_order_array) - 1; 887 888 /* 889 * IOMMU can map any pages, so himem can also be used here 890 */ 891 gfp |= __GFP_NOWARN | __GFP_HIGHMEM; 892 893 while (count) { 894 int j, order; 895 896 order = iommu_order_array[order_idx]; 897 898 /* Drop down when we get small */ 899 if (__fls(count) < order) { 900 order_idx++; 901 continue; 902 } 903 904 if (order) { 905 /* See if it's easy to allocate a high-order chunk */ 906 pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); 907 908 /* Go down a notch at first sign of pressure */ 909 if (!pages[i]) { 910 order_idx++; 911 continue; 912 } 913 } else { 914 pages[i] = alloc_pages(gfp, 0); 915 if (!pages[i]) 916 goto error; 917 } 918 919 if (order) { 920 split_page(pages[i], order); 921 j = 1 << order; 922 while (--j) 923 pages[i + j] = pages[i] + j; 924 } 925 926 __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag); 927 i += 1 << order; 928 count -= 1 << order; 929 } 930 931 return pages; 932 error: 933 while (i--) 934 if (pages[i]) 935 __free_pages(pages[i], 0); 936 kvfree(pages); 937 return NULL; 938 } 939 940 static int __iommu_free_buffer(struct device *dev, struct page **pages, 941 size_t size, unsigned long attrs) 942 { 943 int count = size >> PAGE_SHIFT; 944 int i; 945 946 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) { 947 dma_release_from_contiguous(dev, pages[0], count); 948 } else { 949 for (i = 0; i < count; i++) 950 if (pages[i]) 951 __free_pages(pages[i], 0); 952 } 953 954 kvfree(pages); 955 return 0; 956 } 957 958 /* 959 * Create a mapping in device IO address space for specified pages 960 */ 961 static dma_addr_t 962 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, 963 unsigned long attrs) 964 { 965 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 966 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 967 dma_addr_t dma_addr, iova; 968 int i; 969 970 dma_addr = __alloc_iova(mapping, size); 971 if (dma_addr == DMA_MAPPING_ERROR) 972 return dma_addr; 973 974 iova = dma_addr; 975 for (i = 0; i < count; ) { 976 int ret; 977 978 unsigned int next_pfn = page_to_pfn(pages[i]) + 1; 979 phys_addr_t phys = page_to_phys(pages[i]); 980 unsigned int len, j; 981 982 for (j = i + 1; j < count; j++, next_pfn++) 983 if (page_to_pfn(pages[j]) != next_pfn) 984 break; 985 986 len = (j - i) << PAGE_SHIFT; 987 ret = iommu_map(mapping->domain, iova, phys, len, 988 __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs), 989 GFP_KERNEL); 990 if (ret < 0) 991 goto fail; 992 iova += len; 993 i = j; 994 } 995 return dma_addr; 996 fail: 997 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); 998 __free_iova(mapping, dma_addr, size); 999 return DMA_MAPPING_ERROR; 1000 } 1001 1002 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) 1003 { 1004 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1005 1006 /* 1007 * add optional in-page offset from iova to size and align 1008 * result to page size 1009 */ 1010 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); 1011 iova &= PAGE_MASK; 1012 1013 iommu_unmap(mapping->domain, iova, size); 1014 __free_iova(mapping, iova, size); 1015 return 0; 1016 } 1017 1018 static struct page **__atomic_get_pages(void *addr) 1019 { 1020 struct page *page; 1021 phys_addr_t phys; 1022 1023 phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); 1024 page = phys_to_page(phys); 1025 1026 return (struct page **)page; 1027 } 1028 1029 static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs) 1030 { 1031 if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) 1032 return __atomic_get_pages(cpu_addr); 1033 1034 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1035 return cpu_addr; 1036 1037 return dma_common_find_pages(cpu_addr); 1038 } 1039 1040 static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, 1041 dma_addr_t *handle, int coherent_flag, 1042 unsigned long attrs) 1043 { 1044 struct page *page; 1045 void *addr; 1046 1047 if (coherent_flag == COHERENT) 1048 addr = __alloc_simple_buffer(dev, size, gfp, &page); 1049 else 1050 addr = __alloc_from_pool(size, &page); 1051 if (!addr) 1052 return NULL; 1053 1054 *handle = __iommu_create_mapping(dev, &page, size, attrs); 1055 if (*handle == DMA_MAPPING_ERROR) 1056 goto err_mapping; 1057 1058 return addr; 1059 1060 err_mapping: 1061 __free_from_pool(addr, size); 1062 return NULL; 1063 } 1064 1065 static void __iommu_free_atomic(struct device *dev, void *cpu_addr, 1066 dma_addr_t handle, size_t size, int coherent_flag) 1067 { 1068 __iommu_remove_mapping(dev, handle, size); 1069 if (coherent_flag == COHERENT) 1070 __dma_free_buffer(virt_to_page(cpu_addr), size); 1071 else 1072 __free_from_pool(cpu_addr, size); 1073 } 1074 1075 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, 1076 dma_addr_t *handle, gfp_t gfp, unsigned long attrs) 1077 { 1078 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); 1079 struct page **pages; 1080 void *addr = NULL; 1081 int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL; 1082 1083 *handle = DMA_MAPPING_ERROR; 1084 size = PAGE_ALIGN(size); 1085 1086 if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) 1087 return __iommu_alloc_simple(dev, size, gfp, handle, 1088 coherent_flag, attrs); 1089 1090 pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag); 1091 if (!pages) 1092 return NULL; 1093 1094 *handle = __iommu_create_mapping(dev, pages, size, attrs); 1095 if (*handle == DMA_MAPPING_ERROR) 1096 goto err_buffer; 1097 1098 if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) 1099 return pages; 1100 1101 addr = dma_common_pages_remap(pages, size, prot, 1102 __builtin_return_address(0)); 1103 if (!addr) 1104 goto err_mapping; 1105 1106 return addr; 1107 1108 err_mapping: 1109 __iommu_remove_mapping(dev, *handle, size); 1110 err_buffer: 1111 __iommu_free_buffer(dev, pages, size, attrs); 1112 return NULL; 1113 } 1114 1115 static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, 1116 void *cpu_addr, dma_addr_t dma_addr, size_t size, 1117 unsigned long attrs) 1118 { 1119 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1120 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; 1121 int err; 1122 1123 if (!pages) 1124 return -ENXIO; 1125 1126 if (vma->vm_pgoff >= nr_pages) 1127 return -ENXIO; 1128 1129 if (!dev->dma_coherent) 1130 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); 1131 1132 err = vm_map_pages(vma, pages, nr_pages); 1133 if (err) 1134 pr_err("Remapping memory failed: %d\n", err); 1135 1136 return err; 1137 } 1138 1139 /* 1140 * free a page as defined by the above mapping. 1141 * Must not be called with IRQs disabled. 1142 */ 1143 static void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, 1144 dma_addr_t handle, unsigned long attrs) 1145 { 1146 int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL; 1147 struct page **pages; 1148 size = PAGE_ALIGN(size); 1149 1150 if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) { 1151 __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag); 1152 return; 1153 } 1154 1155 pages = __iommu_get_pages(cpu_addr, attrs); 1156 if (!pages) { 1157 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); 1158 return; 1159 } 1160 1161 if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) 1162 dma_common_free_remap(cpu_addr, size); 1163 1164 __iommu_remove_mapping(dev, handle, size); 1165 __iommu_free_buffer(dev, pages, size, attrs); 1166 } 1167 1168 static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, 1169 void *cpu_addr, dma_addr_t dma_addr, 1170 size_t size, unsigned long attrs) 1171 { 1172 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 1173 struct page **pages = __iommu_get_pages(cpu_addr, attrs); 1174 1175 if (!pages) 1176 return -ENXIO; 1177 1178 return sg_alloc_table_from_pages(sgt, pages, count, 0, size, 1179 GFP_KERNEL); 1180 } 1181 1182 /* 1183 * Map a part of the scatter-gather list into contiguous io address space 1184 */ 1185 static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, 1186 size_t size, dma_addr_t *handle, 1187 enum dma_data_direction dir, unsigned long attrs) 1188 { 1189 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1190 dma_addr_t iova, iova_base; 1191 int ret = 0; 1192 unsigned int count; 1193 struct scatterlist *s; 1194 int prot; 1195 1196 size = PAGE_ALIGN(size); 1197 *handle = DMA_MAPPING_ERROR; 1198 1199 iova_base = iova = __alloc_iova(mapping, size); 1200 if (iova == DMA_MAPPING_ERROR) 1201 return -ENOMEM; 1202 1203 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 1204 phys_addr_t phys = page_to_phys(sg_page(s)); 1205 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1206 1207 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 1208 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1209 1210 prot = __dma_info_to_prot(dir, attrs); 1211 1212 ret = iommu_map(mapping->domain, iova, phys, len, prot, 1213 GFP_KERNEL); 1214 if (ret < 0) 1215 goto fail; 1216 count += len >> PAGE_SHIFT; 1217 iova += len; 1218 } 1219 *handle = iova_base; 1220 1221 return 0; 1222 fail: 1223 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); 1224 __free_iova(mapping, iova_base, size); 1225 return ret; 1226 } 1227 1228 /** 1229 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA 1230 * @dev: valid struct device pointer 1231 * @sg: list of buffers 1232 * @nents: number of buffers to map 1233 * @dir: DMA transfer direction 1234 * 1235 * Map a set of buffers described by scatterlist in streaming mode for DMA. 1236 * The scatter gather list elements are merged together (if possible) and 1237 * tagged with the appropriate dma address and length. They are obtained via 1238 * sg_dma_{address,length}. 1239 */ 1240 static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, 1241 int nents, enum dma_data_direction dir, unsigned long attrs) 1242 { 1243 struct scatterlist *s = sg, *dma = sg, *start = sg; 1244 int i, count = 0, ret; 1245 unsigned int offset = s->offset; 1246 unsigned int size = s->offset + s->length; 1247 unsigned int max = dma_get_max_seg_size(dev); 1248 1249 for (i = 1; i < nents; i++) { 1250 s = sg_next(s); 1251 1252 s->dma_length = 0; 1253 1254 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 1255 ret = __map_sg_chunk(dev, start, size, 1256 &dma->dma_address, dir, attrs); 1257 if (ret < 0) 1258 goto bad_mapping; 1259 1260 dma->dma_address += offset; 1261 dma->dma_length = size - offset; 1262 1263 size = offset = s->offset; 1264 start = s; 1265 dma = sg_next(dma); 1266 count += 1; 1267 } 1268 size += s->length; 1269 } 1270 ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs); 1271 if (ret < 0) 1272 goto bad_mapping; 1273 1274 dma->dma_address += offset; 1275 dma->dma_length = size - offset; 1276 1277 return count+1; 1278 1279 bad_mapping: 1280 for_each_sg(sg, s, count, i) 1281 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 1282 if (ret == -ENOMEM) 1283 return ret; 1284 return -EINVAL; 1285 } 1286 1287 /** 1288 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 1289 * @dev: valid struct device pointer 1290 * @sg: list of buffers 1291 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 1292 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1293 * 1294 * Unmap a set of streaming mode DMA translations. Again, CPU access 1295 * rules concerning calls here are the same as for dma_unmap_single(). 1296 */ 1297 static void arm_iommu_unmap_sg(struct device *dev, 1298 struct scatterlist *sg, int nents, 1299 enum dma_data_direction dir, 1300 unsigned long attrs) 1301 { 1302 struct scatterlist *s; 1303 int i; 1304 1305 for_each_sg(sg, s, nents, i) { 1306 if (sg_dma_len(s)) 1307 __iommu_remove_mapping(dev, sg_dma_address(s), 1308 sg_dma_len(s)); 1309 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 1310 __dma_page_dev_to_cpu(sg_page(s), s->offset, 1311 s->length, dir); 1312 } 1313 } 1314 1315 /** 1316 * arm_iommu_sync_sg_for_cpu 1317 * @dev: valid struct device pointer 1318 * @sg: list of buffers 1319 * @nents: number of buffers to map (returned from dma_map_sg) 1320 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1321 */ 1322 static void arm_iommu_sync_sg_for_cpu(struct device *dev, 1323 struct scatterlist *sg, 1324 int nents, enum dma_data_direction dir) 1325 { 1326 struct scatterlist *s; 1327 int i; 1328 1329 if (dev->dma_coherent) 1330 return; 1331 1332 for_each_sg(sg, s, nents, i) 1333 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); 1334 1335 } 1336 1337 /** 1338 * arm_iommu_sync_sg_for_device 1339 * @dev: valid struct device pointer 1340 * @sg: list of buffers 1341 * @nents: number of buffers to map (returned from dma_map_sg) 1342 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 1343 */ 1344 static void arm_iommu_sync_sg_for_device(struct device *dev, 1345 struct scatterlist *sg, 1346 int nents, enum dma_data_direction dir) 1347 { 1348 struct scatterlist *s; 1349 int i; 1350 1351 if (dev->dma_coherent) 1352 return; 1353 1354 for_each_sg(sg, s, nents, i) 1355 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1356 } 1357 1358 /** 1359 * arm_iommu_map_page 1360 * @dev: valid struct device pointer 1361 * @page: page that buffer resides in 1362 * @offset: offset into page for start of buffer 1363 * @size: size of buffer to map 1364 * @dir: DMA transfer direction 1365 * 1366 * IOMMU aware version of arm_dma_map_page() 1367 */ 1368 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, 1369 unsigned long offset, size_t size, enum dma_data_direction dir, 1370 unsigned long attrs) 1371 { 1372 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1373 dma_addr_t dma_addr; 1374 int ret, prot, len = PAGE_ALIGN(size + offset); 1375 1376 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 1377 __dma_page_cpu_to_dev(page, offset, size, dir); 1378 1379 dma_addr = __alloc_iova(mapping, len); 1380 if (dma_addr == DMA_MAPPING_ERROR) 1381 return dma_addr; 1382 1383 prot = __dma_info_to_prot(dir, attrs); 1384 1385 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 1386 prot, GFP_KERNEL); 1387 if (ret < 0) 1388 goto fail; 1389 1390 return dma_addr + offset; 1391 fail: 1392 __free_iova(mapping, dma_addr, len); 1393 return DMA_MAPPING_ERROR; 1394 } 1395 1396 /** 1397 * arm_iommu_unmap_page 1398 * @dev: valid struct device pointer 1399 * @handle: DMA address of buffer 1400 * @size: size of buffer (same as passed to dma_map_page) 1401 * @dir: DMA transfer direction (same as passed to dma_map_page) 1402 * 1403 * IOMMU aware version of arm_dma_unmap_page() 1404 */ 1405 static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, 1406 size_t size, enum dma_data_direction dir, unsigned long attrs) 1407 { 1408 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1409 dma_addr_t iova = handle & PAGE_MASK; 1410 struct page *page; 1411 int offset = handle & ~PAGE_MASK; 1412 int len = PAGE_ALIGN(size + offset); 1413 1414 if (!iova) 1415 return; 1416 1417 if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { 1418 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1419 __dma_page_dev_to_cpu(page, offset, size, dir); 1420 } 1421 1422 iommu_unmap(mapping->domain, iova, len); 1423 __free_iova(mapping, iova, len); 1424 } 1425 1426 /** 1427 * arm_iommu_map_resource - map a device resource for DMA 1428 * @dev: valid struct device pointer 1429 * @phys_addr: physical address of resource 1430 * @size: size of resource to map 1431 * @dir: DMA transfer direction 1432 */ 1433 static dma_addr_t arm_iommu_map_resource(struct device *dev, 1434 phys_addr_t phys_addr, size_t size, 1435 enum dma_data_direction dir, unsigned long attrs) 1436 { 1437 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1438 dma_addr_t dma_addr; 1439 int ret, prot; 1440 phys_addr_t addr = phys_addr & PAGE_MASK; 1441 unsigned int offset = phys_addr & ~PAGE_MASK; 1442 size_t len = PAGE_ALIGN(size + offset); 1443 1444 dma_addr = __alloc_iova(mapping, len); 1445 if (dma_addr == DMA_MAPPING_ERROR) 1446 return dma_addr; 1447 1448 prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; 1449 1450 ret = iommu_map(mapping->domain, dma_addr, addr, len, prot, GFP_KERNEL); 1451 if (ret < 0) 1452 goto fail; 1453 1454 return dma_addr + offset; 1455 fail: 1456 __free_iova(mapping, dma_addr, len); 1457 return DMA_MAPPING_ERROR; 1458 } 1459 1460 /** 1461 * arm_iommu_unmap_resource - unmap a device DMA resource 1462 * @dev: valid struct device pointer 1463 * @dma_handle: DMA address to resource 1464 * @size: size of resource to map 1465 * @dir: DMA transfer direction 1466 */ 1467 static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle, 1468 size_t size, enum dma_data_direction dir, 1469 unsigned long attrs) 1470 { 1471 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1472 dma_addr_t iova = dma_handle & PAGE_MASK; 1473 unsigned int offset = dma_handle & ~PAGE_MASK; 1474 size_t len = PAGE_ALIGN(size + offset); 1475 1476 if (!iova) 1477 return; 1478 1479 iommu_unmap(mapping->domain, iova, len); 1480 __free_iova(mapping, iova, len); 1481 } 1482 1483 static void arm_iommu_sync_single_for_cpu(struct device *dev, 1484 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1485 { 1486 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1487 dma_addr_t iova = handle & PAGE_MASK; 1488 struct page *page; 1489 unsigned int offset = handle & ~PAGE_MASK; 1490 1491 if (dev->dma_coherent || !iova) 1492 return; 1493 1494 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1495 __dma_page_dev_to_cpu(page, offset, size, dir); 1496 } 1497 1498 static void arm_iommu_sync_single_for_device(struct device *dev, 1499 dma_addr_t handle, size_t size, enum dma_data_direction dir) 1500 { 1501 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1502 dma_addr_t iova = handle & PAGE_MASK; 1503 struct page *page; 1504 unsigned int offset = handle & ~PAGE_MASK; 1505 1506 if (dev->dma_coherent || !iova) 1507 return; 1508 1509 page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); 1510 __dma_page_cpu_to_dev(page, offset, size, dir); 1511 } 1512 1513 static const struct dma_map_ops iommu_ops = { 1514 .alloc = arm_iommu_alloc_attrs, 1515 .free = arm_iommu_free_attrs, 1516 .mmap = arm_iommu_mmap_attrs, 1517 .get_sgtable = arm_iommu_get_sgtable, 1518 1519 .map_page = arm_iommu_map_page, 1520 .unmap_page = arm_iommu_unmap_page, 1521 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, 1522 .sync_single_for_device = arm_iommu_sync_single_for_device, 1523 1524 .map_sg = arm_iommu_map_sg, 1525 .unmap_sg = arm_iommu_unmap_sg, 1526 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, 1527 .sync_sg_for_device = arm_iommu_sync_sg_for_device, 1528 1529 .map_resource = arm_iommu_map_resource, 1530 .unmap_resource = arm_iommu_unmap_resource, 1531 }; 1532 1533 /** 1534 * arm_iommu_create_mapping 1535 * @bus: pointer to the bus holding the client device (for IOMMU calls) 1536 * @base: start address of the valid IO address space 1537 * @size: maximum size of the valid IO address space 1538 * 1539 * Creates a mapping structure which holds information about used/unused 1540 * IO address ranges, which is required to perform memory allocation and 1541 * mapping with IOMMU aware functions. 1542 * 1543 * The client device need to be attached to the mapping with 1544 * arm_iommu_attach_device function. 1545 */ 1546 struct dma_iommu_mapping * 1547 arm_iommu_create_mapping(const struct bus_type *bus, dma_addr_t base, u64 size) 1548 { 1549 unsigned int bits = size >> PAGE_SHIFT; 1550 unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long); 1551 struct dma_iommu_mapping *mapping; 1552 int extensions = 1; 1553 int err = -ENOMEM; 1554 1555 /* currently only 32-bit DMA address space is supported */ 1556 if (size > DMA_BIT_MASK(32) + 1) 1557 return ERR_PTR(-ERANGE); 1558 1559 if (!bitmap_size) 1560 return ERR_PTR(-EINVAL); 1561 1562 if (bitmap_size > PAGE_SIZE) { 1563 extensions = bitmap_size / PAGE_SIZE; 1564 bitmap_size = PAGE_SIZE; 1565 } 1566 1567 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); 1568 if (!mapping) 1569 goto err; 1570 1571 mapping->bitmap_size = bitmap_size; 1572 mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), 1573 GFP_KERNEL); 1574 if (!mapping->bitmaps) 1575 goto err2; 1576 1577 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); 1578 if (!mapping->bitmaps[0]) 1579 goto err3; 1580 1581 mapping->nr_bitmaps = 1; 1582 mapping->extensions = extensions; 1583 mapping->base = base; 1584 mapping->bits = BITS_PER_BYTE * bitmap_size; 1585 1586 spin_lock_init(&mapping->lock); 1587 1588 mapping->domain = iommu_domain_alloc(bus); 1589 if (!mapping->domain) 1590 goto err4; 1591 1592 kref_init(&mapping->kref); 1593 return mapping; 1594 err4: 1595 kfree(mapping->bitmaps[0]); 1596 err3: 1597 kfree(mapping->bitmaps); 1598 err2: 1599 kfree(mapping); 1600 err: 1601 return ERR_PTR(err); 1602 } 1603 EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); 1604 1605 static void release_iommu_mapping(struct kref *kref) 1606 { 1607 int i; 1608 struct dma_iommu_mapping *mapping = 1609 container_of(kref, struct dma_iommu_mapping, kref); 1610 1611 iommu_domain_free(mapping->domain); 1612 for (i = 0; i < mapping->nr_bitmaps; i++) 1613 kfree(mapping->bitmaps[i]); 1614 kfree(mapping->bitmaps); 1615 kfree(mapping); 1616 } 1617 1618 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) 1619 { 1620 int next_bitmap; 1621 1622 if (mapping->nr_bitmaps >= mapping->extensions) 1623 return -EINVAL; 1624 1625 next_bitmap = mapping->nr_bitmaps; 1626 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, 1627 GFP_ATOMIC); 1628 if (!mapping->bitmaps[next_bitmap]) 1629 return -ENOMEM; 1630 1631 mapping->nr_bitmaps++; 1632 1633 return 0; 1634 } 1635 1636 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) 1637 { 1638 if (mapping) 1639 kref_put(&mapping->kref, release_iommu_mapping); 1640 } 1641 EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); 1642 1643 static int __arm_iommu_attach_device(struct device *dev, 1644 struct dma_iommu_mapping *mapping) 1645 { 1646 int err; 1647 1648 err = iommu_attach_device(mapping->domain, dev); 1649 if (err) 1650 return err; 1651 1652 kref_get(&mapping->kref); 1653 to_dma_iommu_mapping(dev) = mapping; 1654 1655 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); 1656 return 0; 1657 } 1658 1659 /** 1660 * arm_iommu_attach_device 1661 * @dev: valid struct device pointer 1662 * @mapping: io address space mapping structure (returned from 1663 * arm_iommu_create_mapping) 1664 * 1665 * Attaches specified io address space mapping to the provided device. 1666 * This replaces the dma operations (dma_map_ops pointer) with the 1667 * IOMMU aware version. 1668 * 1669 * More than one client might be attached to the same io address space 1670 * mapping. 1671 */ 1672 int arm_iommu_attach_device(struct device *dev, 1673 struct dma_iommu_mapping *mapping) 1674 { 1675 int err; 1676 1677 err = __arm_iommu_attach_device(dev, mapping); 1678 if (err) 1679 return err; 1680 1681 set_dma_ops(dev, &iommu_ops); 1682 return 0; 1683 } 1684 EXPORT_SYMBOL_GPL(arm_iommu_attach_device); 1685 1686 /** 1687 * arm_iommu_detach_device 1688 * @dev: valid struct device pointer 1689 * 1690 * Detaches the provided device from a previously attached map. 1691 * This overwrites the dma_ops pointer with appropriate non-IOMMU ops. 1692 */ 1693 void arm_iommu_detach_device(struct device *dev) 1694 { 1695 struct dma_iommu_mapping *mapping; 1696 1697 mapping = to_dma_iommu_mapping(dev); 1698 if (!mapping) { 1699 dev_warn(dev, "Not attached\n"); 1700 return; 1701 } 1702 1703 iommu_detach_device(mapping->domain, dev); 1704 kref_put(&mapping->kref, release_iommu_mapping); 1705 to_dma_iommu_mapping(dev) = NULL; 1706 set_dma_ops(dev, NULL); 1707 1708 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); 1709 } 1710 EXPORT_SYMBOL_GPL(arm_iommu_detach_device); 1711 1712 static void arm_setup_iommu_dma_ops(struct device *dev) 1713 { 1714 struct dma_iommu_mapping *mapping; 1715 u64 dma_base = 0, size = 1ULL << 32; 1716 1717 if (dev->dma_range_map) { 1718 dma_base = dma_range_map_min(dev->dma_range_map); 1719 size = dma_range_map_max(dev->dma_range_map) - dma_base; 1720 } 1721 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); 1722 if (IS_ERR(mapping)) { 1723 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", 1724 size, dev_name(dev)); 1725 return; 1726 } 1727 1728 if (__arm_iommu_attach_device(dev, mapping)) { 1729 pr_warn("Failed to attached device %s to IOMMU_mapping\n", 1730 dev_name(dev)); 1731 arm_iommu_release_mapping(mapping); 1732 return; 1733 } 1734 1735 set_dma_ops(dev, &iommu_ops); 1736 } 1737 1738 static void arm_teardown_iommu_dma_ops(struct device *dev) 1739 { 1740 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); 1741 1742 if (!mapping) 1743 return; 1744 1745 arm_iommu_detach_device(dev); 1746 arm_iommu_release_mapping(mapping); 1747 } 1748 1749 #else 1750 1751 static void arm_setup_iommu_dma_ops(struct device *dev) 1752 { 1753 } 1754 1755 static void arm_teardown_iommu_dma_ops(struct device *dev) { } 1756 1757 #endif /* CONFIG_ARM_DMA_USE_IOMMU */ 1758 1759 void arch_setup_dma_ops(struct device *dev, bool coherent) 1760 { 1761 /* 1762 * Due to legacy code that sets the ->dma_coherent flag from a bus 1763 * notifier we can't just assign coherent to the ->dma_coherent flag 1764 * here, but instead have to make sure we only set but never clear it 1765 * for now. 1766 */ 1767 if (coherent) 1768 dev->dma_coherent = true; 1769 1770 /* 1771 * Don't override the dma_ops if they have already been set. Ideally 1772 * this should be the only location where dma_ops are set, remove this 1773 * check when all other callers of set_dma_ops will have disappeared. 1774 */ 1775 if (dev->dma_ops) 1776 return; 1777 1778 if (device_iommu_mapped(dev)) 1779 arm_setup_iommu_dma_ops(dev); 1780 1781 xen_setup_dma_ops(dev); 1782 dev->archdata.dma_ops_setup = true; 1783 } 1784 1785 void arch_teardown_dma_ops(struct device *dev) 1786 { 1787 if (!dev->archdata.dma_ops_setup) 1788 return; 1789 1790 arm_teardown_iommu_dma_ops(dev); 1791 /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ 1792 set_dma_ops(dev, NULL); 1793 } 1794 1795 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 1796 enum dma_data_direction dir) 1797 { 1798 __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 1799 size, dir); 1800 } 1801 1802 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 1803 enum dma_data_direction dir) 1804 { 1805 __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1), 1806 size, dir); 1807 } 1808 1809 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 1810 gfp_t gfp, unsigned long attrs) 1811 { 1812 return __dma_alloc(dev, size, dma_handle, gfp, 1813 __get_dma_pgprot(attrs, PAGE_KERNEL), false, 1814 attrs, __builtin_return_address(0)); 1815 } 1816 1817 void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, 1818 dma_addr_t dma_handle, unsigned long attrs) 1819 { 1820 __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); 1821 } 1822
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.