~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/dma-mapping.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/dma-mapping.h (Version linux-6.12-rc7) and /include/linux/dma-mapping.h (Version linux-5.4.285)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _LINUX_DMA_MAPPING_H                        2 #ifndef _LINUX_DMA_MAPPING_H
  3 #define _LINUX_DMA_MAPPING_H                        3 #define _LINUX_DMA_MAPPING_H
  4                                                     4 
  5 #include <linux/cache.h>                       << 
  6 #include <linux/sizes.h>                            5 #include <linux/sizes.h>
  7 #include <linux/string.h>                           6 #include <linux/string.h>
  8 #include <linux/device.h>                           7 #include <linux/device.h>
  9 #include <linux/err.h>                              8 #include <linux/err.h>
                                                   >>   9 #include <linux/dma-debug.h>
 10 #include <linux/dma-direction.h>                   10 #include <linux/dma-direction.h>
 11 #include <linux/scatterlist.h>                     11 #include <linux/scatterlist.h>
 12 #include <linux/bug.h>                             12 #include <linux/bug.h>
 13 #include <linux/mem_encrypt.h>                     13 #include <linux/mem_encrypt.h>
 14                                                    14 
 15 /**                                                15 /**
 16  * List of possible attributes associated with     16  * List of possible attributes associated with a DMA mapping. The semantics
 17  * of each attribute should be defined in Docu !!  17  * of each attribute should be defined in Documentation/DMA-attributes.txt.
                                                   >>  18  *
                                                   >>  19  * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
                                                   >>  20  * forces all pending DMA writes to complete.
 18  */                                                21  */
 19                                                !!  22 #define DMA_ATTR_WRITE_BARRIER          (1UL << 0)
 20 /*                                                 23 /*
 21  * DMA_ATTR_WEAK_ORDERING: Specifies that read     24  * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
 22  * may be weakly ordered, that is that reads a     25  * may be weakly ordered, that is that reads and writes may pass each other.
 23  */                                                26  */
 24 #define DMA_ATTR_WEAK_ORDERING          (1UL <     27 #define DMA_ATTR_WEAK_ORDERING          (1UL << 1)
 25 /*                                                 28 /*
 26  * DMA_ATTR_WRITE_COMBINE: Specifies that writ     29  * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
 27  * buffered to improve performance.                30  * buffered to improve performance.
 28  */                                                31  */
 29 #define DMA_ATTR_WRITE_COMBINE          (1UL <     32 #define DMA_ATTR_WRITE_COMBINE          (1UL << 2)
 30 /*                                                 33 /*
                                                   >>  34  * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
                                                   >>  35  * consistent or non-consistent memory as it sees fit.
                                                   >>  36  */
                                                   >>  37 #define DMA_ATTR_NON_CONSISTENT         (1UL << 3)
                                                   >>  38 /*
 31  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platfo     39  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
 32  * virtual mapping for the allocated buffer.       40  * virtual mapping for the allocated buffer.
 33  */                                                41  */
 34 #define DMA_ATTR_NO_KERNEL_MAPPING      (1UL <     42 #define DMA_ATTR_NO_KERNEL_MAPPING      (1UL << 4)
 35 /*                                                 43 /*
 36  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform cod     44  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
 37  * the CPU cache for the given buffer assuming     45  * the CPU cache for the given buffer assuming that it has been already
 38  * transferred to 'device' domain.                 46  * transferred to 'device' domain.
 39  */                                                47  */
 40 #define DMA_ATTR_SKIP_CPU_SYNC          (1UL <     48 #define DMA_ATTR_SKIP_CPU_SYNC          (1UL << 5)
 41 /*                                                 49 /*
 42  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguou     50  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
 43  * in physical memory.                             51  * in physical memory.
 44  */                                                52  */
 45 #define DMA_ATTR_FORCE_CONTIGUOUS       (1UL <     53 #define DMA_ATTR_FORCE_CONTIGUOUS       (1UL << 6)
 46 /*                                                 54 /*
 47  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint     55  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
 48  * that it's probably not worth the time to tr     56  * that it's probably not worth the time to try to allocate memory to in a way
 49  * that gives better TLB efficiency.               57  * that gives better TLB efficiency.
 50  */                                                58  */
 51 #define DMA_ATTR_ALLOC_SINGLE_PAGES     (1UL <     59 #define DMA_ATTR_ALLOC_SINGLE_PAGES     (1UL << 7)
 52 /*                                                 60 /*
 53  * DMA_ATTR_NO_WARN: This tells the DMA-mappin     61  * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
 54  * allocation failure reports (similarly to __     62  * allocation failure reports (similarly to __GFP_NOWARN).
 55  */                                                63  */
 56 #define DMA_ATTR_NO_WARN        (1UL << 8)         64 #define DMA_ATTR_NO_WARN        (1UL << 8)
 57                                                    65 
 58 /*                                                 66 /*
 59  * DMA_ATTR_PRIVILEGED: used to indicate that      67  * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
 60  * accessible at an elevated privilege level (     68  * accessible at an elevated privilege level (and ideally inaccessible or
 61  * at least read-only at lesser-privileged lev     69  * at least read-only at lesser-privileged levels).
 62  */                                                70  */
 63 #define DMA_ATTR_PRIVILEGED             (1UL <     71 #define DMA_ATTR_PRIVILEGED             (1UL << 9)
 64                                                    72 
 65 /*                                                 73 /*
 66  * A dma_addr_t can hold any valid DMA or bus  !!  74  * A dma_addr_t can hold any valid DMA or bus address for the platform.
 67  * be given to a device to use as a DMA source !!  75  * It can be given to a device to use as a DMA source or target.  A CPU cannot
 68  * given device and there may be a translation !!  76  * reference a dma_addr_t directly because there may be translation between
 69  * space and the bus address space.            !!  77  * its physical address space and the bus address space.
 70  *                                             !!  78  */
 71  * DMA_MAPPING_ERROR is the magic error code i !!  79 struct dma_map_ops {
 72  * be used directly in drivers, but checked fo !!  80         void* (*alloc)(struct device *dev, size_t size,
 73  * instead.                                    !!  81                                 dma_addr_t *dma_handle, gfp_t gfp,
 74  */                                            !!  82                                 unsigned long attrs);
                                                   >>  83         void (*free)(struct device *dev, size_t size,
                                                   >>  84                               void *vaddr, dma_addr_t dma_handle,
                                                   >>  85                               unsigned long attrs);
                                                   >>  86         int (*mmap)(struct device *, struct vm_area_struct *,
                                                   >>  87                           void *, dma_addr_t, size_t,
                                                   >>  88                           unsigned long attrs);
                                                   >>  89 
                                                   >>  90         int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
                                                   >>  91                            dma_addr_t, size_t, unsigned long attrs);
                                                   >>  92 
                                                   >>  93         dma_addr_t (*map_page)(struct device *dev, struct page *page,
                                                   >>  94                                unsigned long offset, size_t size,
                                                   >>  95                                enum dma_data_direction dir,
                                                   >>  96                                unsigned long attrs);
                                                   >>  97         void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
                                                   >>  98                            size_t size, enum dma_data_direction dir,
                                                   >>  99                            unsigned long attrs);
                                                   >> 100         /*
                                                   >> 101          * map_sg returns 0 on error and a value > 0 on success.
                                                   >> 102          * It should never return a value < 0.
                                                   >> 103          */
                                                   >> 104         int (*map_sg)(struct device *dev, struct scatterlist *sg,
                                                   >> 105                       int nents, enum dma_data_direction dir,
                                                   >> 106                       unsigned long attrs);
                                                   >> 107         void (*unmap_sg)(struct device *dev,
                                                   >> 108                          struct scatterlist *sg, int nents,
                                                   >> 109                          enum dma_data_direction dir,
                                                   >> 110                          unsigned long attrs);
                                                   >> 111         dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
                                                   >> 112                                size_t size, enum dma_data_direction dir,
                                                   >> 113                                unsigned long attrs);
                                                   >> 114         void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
                                                   >> 115                            size_t size, enum dma_data_direction dir,
                                                   >> 116                            unsigned long attrs);
                                                   >> 117         void (*sync_single_for_cpu)(struct device *dev,
                                                   >> 118                                     dma_addr_t dma_handle, size_t size,
                                                   >> 119                                     enum dma_data_direction dir);
                                                   >> 120         void (*sync_single_for_device)(struct device *dev,
                                                   >> 121                                        dma_addr_t dma_handle, size_t size,
                                                   >> 122                                        enum dma_data_direction dir);
                                                   >> 123         void (*sync_sg_for_cpu)(struct device *dev,
                                                   >> 124                                 struct scatterlist *sg, int nents,
                                                   >> 125                                 enum dma_data_direction dir);
                                                   >> 126         void (*sync_sg_for_device)(struct device *dev,
                                                   >> 127                                    struct scatterlist *sg, int nents,
                                                   >> 128                                    enum dma_data_direction dir);
                                                   >> 129         void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
                                                   >> 130                         enum dma_data_direction direction);
                                                   >> 131         int (*dma_supported)(struct device *dev, u64 mask);
                                                   >> 132         u64 (*get_required_mask)(struct device *dev);
                                                   >> 133         size_t (*max_mapping_size)(struct device *dev);
                                                   >> 134         unsigned long (*get_merge_boundary)(struct device *dev);
                                                   >> 135 };
                                                   >> 136 
 75 #define DMA_MAPPING_ERROR               (~(dma    137 #define DMA_MAPPING_ERROR               (~(dma_addr_t)0)
 76                                                   138 
                                                   >> 139 extern const struct dma_map_ops dma_virt_ops;
                                                   >> 140 extern const struct dma_map_ops dma_dummy_ops;
                                                   >> 141 
 77 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL :    142 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
 78                                                   143 
 79 #ifdef CONFIG_DMA_API_DEBUG                    !! 144 #define DMA_MASK_NONE   0x0ULL
 80 void debug_dma_mapping_error(struct device *de !! 145 
 81 void debug_dma_map_single(struct device *dev,  !! 146 static inline int valid_dma_direction(int dma_direction)
 82                 unsigned long len);            !! 147 {
                                                   >> 148         return ((dma_direction == DMA_BIDIRECTIONAL) ||
                                                   >> 149                 (dma_direction == DMA_TO_DEVICE) ||
                                                   >> 150                 (dma_direction == DMA_FROM_DEVICE));
                                                   >> 151 }
                                                   >> 152 
                                                   >> 153 #ifdef CONFIG_DMA_DECLARE_COHERENT
                                                   >> 154 /*
                                                   >> 155  * These three functions are only for dma allocator.
                                                   >> 156  * Don't use them in device drivers.
                                                   >> 157  */
                                                   >> 158 int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
                                                   >> 159                                        dma_addr_t *dma_handle, void **ret);
                                                   >> 160 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
                                                   >> 161 
                                                   >> 162 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
                                                   >> 163                             void *cpu_addr, size_t size, int *ret);
                                                   >> 164 
                                                   >> 165 void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
                                                   >> 166 int dma_release_from_global_coherent(int order, void *vaddr);
                                                   >> 167 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
                                                   >> 168                                   size_t size, int *ret);
                                                   >> 169 
 83 #else                                             170 #else
 84 static inline void debug_dma_mapping_error(str !! 171 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
 85                 dma_addr_t dma_addr)           !! 172 #define dma_release_from_dev_coherent(dev, order, vaddr) (0)
                                                   >> 173 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
                                                   >> 174 
                                                   >> 175 static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
                                                   >> 176                                                    dma_addr_t *dma_handle)
 86 {                                                 177 {
                                                   >> 178         return NULL;
 87 }                                                 179 }
 88 static inline void debug_dma_map_single(struct !! 180 
 89                 unsigned long len)             !! 181 static inline int dma_release_from_global_coherent(int order, void *vaddr)
                                                   >> 182 {
                                                   >> 183         return 0;
                                                   >> 184 }
                                                   >> 185 
                                                   >> 186 static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
                                                   >> 187                                                 void *cpu_addr, size_t size,
                                                   >> 188                                                 int *ret)
                                                   >> 189 {
                                                   >> 190         return 0;
                                                   >> 191 }
                                                   >> 192 #endif /* CONFIG_DMA_DECLARE_COHERENT */
                                                   >> 193 
                                                   >> 194 static inline bool dma_is_direct(const struct dma_map_ops *ops)
 90 {                                                 195 {
                                                   >> 196         return likely(!ops);
 91 }                                                 197 }
 92 #endif /* CONFIG_DMA_API_DEBUG */              !! 198 
                                                   >> 199 /*
                                                   >> 200  * All the dma_direct_* declarations are here just for the indirect call bypass,
                                                   >> 201  * and must not be used directly drivers!
                                                   >> 202  */
                                                   >> 203 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
                                                   >> 204                 unsigned long offset, size_t size, enum dma_data_direction dir,
                                                   >> 205                 unsigned long attrs);
                                                   >> 206 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
                                                   >> 207                 enum dma_data_direction dir, unsigned long attrs);
                                                   >> 208 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
                                                   >> 209                 size_t size, enum dma_data_direction dir, unsigned long attrs);
                                                   >> 210 
                                                   >> 211 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
                                                   >> 212     defined(CONFIG_SWIOTLB)
                                                   >> 213 void dma_direct_sync_single_for_device(struct device *dev,
                                                   >> 214                 dma_addr_t addr, size_t size, enum dma_data_direction dir);
                                                   >> 215 void dma_direct_sync_sg_for_device(struct device *dev,
                                                   >> 216                 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
                                                   >> 217 #else
                                                   >> 218 static inline void dma_direct_sync_single_for_device(struct device *dev,
                                                   >> 219                 dma_addr_t addr, size_t size, enum dma_data_direction dir)
                                                   >> 220 {
                                                   >> 221 }
                                                   >> 222 static inline void dma_direct_sync_sg_for_device(struct device *dev,
                                                   >> 223                 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
                                                   >> 224 {
                                                   >> 225 }
                                                   >> 226 #endif
                                                   >> 227 
                                                   >> 228 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
                                                   >> 229     defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
                                                   >> 230     defined(CONFIG_SWIOTLB)
                                                   >> 231 void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
                                                   >> 232                 size_t size, enum dma_data_direction dir, unsigned long attrs);
                                                   >> 233 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
                                                   >> 234                 int nents, enum dma_data_direction dir, unsigned long attrs);
                                                   >> 235 void dma_direct_sync_single_for_cpu(struct device *dev,
                                                   >> 236                 dma_addr_t addr, size_t size, enum dma_data_direction dir);
                                                   >> 237 void dma_direct_sync_sg_for_cpu(struct device *dev,
                                                   >> 238                 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
                                                   >> 239 #else
                                                   >> 240 static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
                                                   >> 241                 size_t size, enum dma_data_direction dir, unsigned long attrs)
                                                   >> 242 {
                                                   >> 243 }
                                                   >> 244 static inline void dma_direct_unmap_sg(struct device *dev,
                                                   >> 245                 struct scatterlist *sgl, int nents, enum dma_data_direction dir,
                                                   >> 246                 unsigned long attrs)
                                                   >> 247 {
                                                   >> 248 }
                                                   >> 249 static inline void dma_direct_sync_single_for_cpu(struct device *dev,
                                                   >> 250                 dma_addr_t addr, size_t size, enum dma_data_direction dir)
                                                   >> 251 {
                                                   >> 252 }
                                                   >> 253 static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
                                                   >> 254                 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
                                                   >> 255 {
                                                   >> 256 }
                                                   >> 257 #endif
                                                   >> 258 
                                                   >> 259 size_t dma_direct_max_mapping_size(struct device *dev);
 93                                                   260 
 94 #ifdef CONFIG_HAS_DMA                             261 #ifdef CONFIG_HAS_DMA
                                                   >> 262 #include <asm/dma-mapping.h>
                                                   >> 263 
                                                   >> 264 static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
                                                   >> 265 {
                                                   >> 266         if (dev->dma_ops)
                                                   >> 267                 return dev->dma_ops;
                                                   >> 268         return get_arch_dma_ops(dev->bus);
                                                   >> 269 }
                                                   >> 270 
                                                   >> 271 static inline void set_dma_ops(struct device *dev,
                                                   >> 272                                const struct dma_map_ops *dma_ops)
                                                   >> 273 {
                                                   >> 274         dev->dma_ops = dma_ops;
                                                   >> 275 }
                                                   >> 276 
                                                   >> 277 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
                                                   >> 278                 struct page *page, size_t offset, size_t size,
                                                   >> 279                 enum dma_data_direction dir, unsigned long attrs)
                                                   >> 280 {
                                                   >> 281         const struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 282         dma_addr_t addr;
                                                   >> 283 
                                                   >> 284         BUG_ON(!valid_dma_direction(dir));
                                                   >> 285         if (dma_is_direct(ops))
                                                   >> 286                 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
                                                   >> 287         else
                                                   >> 288                 addr = ops->map_page(dev, page, offset, size, dir, attrs);
                                                   >> 289         debug_dma_map_page(dev, page, offset, size, dir, addr);
                                                   >> 290 
                                                   >> 291         return addr;
                                                   >> 292 }
                                                   >> 293 
                                                   >> 294 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
                                                   >> 295                 size_t size, enum dma_data_direction dir, unsigned long attrs)
                                                   >> 296 {
                                                   >> 297         const struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 298 
                                                   >> 299         BUG_ON(!valid_dma_direction(dir));
                                                   >> 300         if (dma_is_direct(ops))
                                                   >> 301                 dma_direct_unmap_page(dev, addr, size, dir, attrs);
                                                   >> 302         else if (ops->unmap_page)
                                                   >> 303                 ops->unmap_page(dev, addr, size, dir, attrs);
                                                   >> 304         debug_dma_unmap_page(dev, addr, size, dir);
                                                   >> 305 }
                                                   >> 306 
                                                   >> 307 /*
                                                   >> 308  * dma_maps_sg_attrs returns 0 on error and > 0 on success.
                                                   >> 309  * It should never return a value < 0.
                                                   >> 310  */
                                                   >> 311 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
                                                   >> 312                                    int nents, enum dma_data_direction dir,
                                                   >> 313                                    unsigned long attrs)
                                                   >> 314 {
                                                   >> 315         const struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 316         int ents;
                                                   >> 317 
                                                   >> 318         BUG_ON(!valid_dma_direction(dir));
                                                   >> 319         if (dma_is_direct(ops))
                                                   >> 320                 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
                                                   >> 321         else
                                                   >> 322                 ents = ops->map_sg(dev, sg, nents, dir, attrs);
                                                   >> 323         BUG_ON(ents < 0);
                                                   >> 324         debug_dma_map_sg(dev, sg, nents, ents, dir);
                                                   >> 325 
                                                   >> 326         return ents;
                                                   >> 327 }
                                                   >> 328 
                                                   >> 329 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
                                                   >> 330                                       int nents, enum dma_data_direction dir,
                                                   >> 331                                       unsigned long attrs)
                                                   >> 332 {
                                                   >> 333         const struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 334 
                                                   >> 335         BUG_ON(!valid_dma_direction(dir));
                                                   >> 336         debug_dma_unmap_sg(dev, sg, nents, dir);
                                                   >> 337         if (dma_is_direct(ops))
                                                   >> 338                 dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
                                                   >> 339         else if (ops->unmap_sg)
                                                   >> 340                 ops->unmap_sg(dev, sg, nents, dir, attrs);
                                                   >> 341 }
                                                   >> 342 
                                                   >> 343 static inline dma_addr_t dma_map_resource(struct device *dev,
                                                   >> 344                                           phys_addr_t phys_addr,
                                                   >> 345                                           size_t size,
                                                   >> 346                                           enum dma_data_direction dir,
                                                   >> 347                                           unsigned long attrs)
                                                   >> 348 {
                                                   >> 349         const struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 350         dma_addr_t addr = DMA_MAPPING_ERROR;
                                                   >> 351 
                                                   >> 352         BUG_ON(!valid_dma_direction(dir));
                                                   >> 353 
                                                   >> 354         /* Don't allow RAM to be mapped */
                                                   >> 355         if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
                                                   >> 356                 return DMA_MAPPING_ERROR;
                                                   >> 357 
                                                   >> 358         if (dma_is_direct(ops))
                                                   >> 359                 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
                                                   >> 360         else if (ops->map_resource)
                                                   >> 361                 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
                                                   >> 362 
                                                   >> 363         debug_dma_map_resource(dev, phys_addr, size, dir, addr);
                                                   >> 364         return addr;
                                                   >> 365 }
                                                   >> 366 
                                                   >> 367 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
                                                   >> 368                                       size_t size, enum dma_data_direction dir,
                                                   >> 369                                       unsigned long attrs)
                                                   >> 370 {
                                                   >> 371         const struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 372 
                                                   >> 373         BUG_ON(!valid_dma_direction(dir));
                                                   >> 374         if (!dma_is_direct(ops) && ops->unmap_resource)
                                                   >> 375                 ops->unmap_resource(dev, addr, size, dir, attrs);
                                                   >> 376         debug_dma_unmap_resource(dev, addr, size, dir);
                                                   >> 377 }
                                                   >> 378 
                                                   >> 379 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
                                                   >> 380                                            size_t size,
                                                   >> 381                                            enum dma_data_direction dir)
                                                   >> 382 {
                                                   >> 383         const struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 384 
                                                   >> 385         BUG_ON(!valid_dma_direction(dir));
                                                   >> 386         if (dma_is_direct(ops))
                                                   >> 387                 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
                                                   >> 388         else if (ops->sync_single_for_cpu)
                                                   >> 389                 ops->sync_single_for_cpu(dev, addr, size, dir);
                                                   >> 390         debug_dma_sync_single_for_cpu(dev, addr, size, dir);
                                                   >> 391 }
                                                   >> 392 
                                                   >> 393 static inline void dma_sync_single_for_device(struct device *dev,
                                                   >> 394                                               dma_addr_t addr, size_t size,
                                                   >> 395                                               enum dma_data_direction dir)
                                                   >> 396 {
                                                   >> 397         const struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 398 
                                                   >> 399         BUG_ON(!valid_dma_direction(dir));
                                                   >> 400         if (dma_is_direct(ops))
                                                   >> 401                 dma_direct_sync_single_for_device(dev, addr, size, dir);
                                                   >> 402         else if (ops->sync_single_for_device)
                                                   >> 403                 ops->sync_single_for_device(dev, addr, size, dir);
                                                   >> 404         debug_dma_sync_single_for_device(dev, addr, size, dir);
                                                   >> 405 }
                                                   >> 406 
                                                   >> 407 static inline void
                                                   >> 408 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
                                                   >> 409                     int nelems, enum dma_data_direction dir)
                                                   >> 410 {
                                                   >> 411         const struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 412 
                                                   >> 413         BUG_ON(!valid_dma_direction(dir));
                                                   >> 414         if (dma_is_direct(ops))
                                                   >> 415                 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
                                                   >> 416         else if (ops->sync_sg_for_cpu)
                                                   >> 417                 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
                                                   >> 418         debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
                                                   >> 419 }
                                                   >> 420 
                                                   >> 421 static inline void
                                                   >> 422 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
                                                   >> 423                        int nelems, enum dma_data_direction dir)
                                                   >> 424 {
                                                   >> 425         const struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 426 
                                                   >> 427         BUG_ON(!valid_dma_direction(dir));
                                                   >> 428         if (dma_is_direct(ops))
                                                   >> 429                 dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
                                                   >> 430         else if (ops->sync_sg_for_device)
                                                   >> 431                 ops->sync_sg_for_device(dev, sg, nelems, dir);
                                                   >> 432         debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
                                                   >> 433 
                                                   >> 434 }
                                                   >> 435 
 95 static inline int dma_mapping_error(struct dev    436 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 96 {                                                 437 {
 97         debug_dma_mapping_error(dev, dma_addr)    438         debug_dma_mapping_error(dev, dma_addr);
 98                                                   439 
 99         if (unlikely(dma_addr == DMA_MAPPING_E !! 440         if (dma_addr == DMA_MAPPING_ERROR)
100                 return -ENOMEM;                   441                 return -ENOMEM;
101         return 0;                                 442         return 0;
102 }                                                 443 }
103                                                   444 
104 dma_addr_t dma_map_page_attrs(struct device *d << 
105                 size_t offset, size_t size, en << 
106                 unsigned long attrs);          << 
107 void dma_unmap_page_attrs(struct device *dev,  << 
108                 enum dma_data_direction dir, u << 
109 unsigned int dma_map_sg_attrs(struct device *d << 
110                 int nents, enum dma_data_direc << 
111 void dma_unmap_sg_attrs(struct device *dev, st << 
112                                       int nent << 
113                                       unsigned << 
114 int dma_map_sgtable(struct device *dev, struct << 
115                 enum dma_data_direction dir, u << 
116 dma_addr_t dma_map_resource(struct device *dev << 
117                 size_t size, enum dma_data_dir << 
118 void dma_unmap_resource(struct device *dev, dm << 
119                 enum dma_data_direction dir, u << 
120 void *dma_alloc_attrs(struct device *dev, size    445 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
121                 gfp_t flag, unsigned long attr    446                 gfp_t flag, unsigned long attrs);
122 void dma_free_attrs(struct device *dev, size_t    447 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
123                 dma_addr_t dma_handle, unsigne    448                 dma_addr_t dma_handle, unsigned long attrs);
124 void *dmam_alloc_attrs(struct device *dev, siz    449 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
125                 gfp_t gfp, unsigned long attrs    450                 gfp_t gfp, unsigned long attrs);
126 void dmam_free_coherent(struct device *dev, si    451 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
127                 dma_addr_t dma_handle);           452                 dma_addr_t dma_handle);
                                                   >> 453 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                                                   >> 454                 enum dma_data_direction dir);
128 int dma_get_sgtable_attrs(struct device *dev,     455 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
129                 void *cpu_addr, dma_addr_t dma    456                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
130                 unsigned long attrs);             457                 unsigned long attrs);
131 int dma_mmap_attrs(struct device *dev, struct     458 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
132                 void *cpu_addr, dma_addr_t dma    459                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
133                 unsigned long attrs);             460                 unsigned long attrs);
134 bool dma_can_mmap(struct device *dev);            461 bool dma_can_mmap(struct device *dev);
135 bool dma_pci_p2pdma_supported(struct device *d !! 462 int dma_supported(struct device *dev, u64 mask);
136 int dma_set_mask(struct device *dev, u64 mask)    463 int dma_set_mask(struct device *dev, u64 mask);
137 int dma_set_coherent_mask(struct device *dev,     464 int dma_set_coherent_mask(struct device *dev, u64 mask);
138 u64 dma_get_required_mask(struct device *dev);    465 u64 dma_get_required_mask(struct device *dev);
139 bool dma_addressing_limited(struct device *dev << 
140 size_t dma_max_mapping_size(struct device *dev    466 size_t dma_max_mapping_size(struct device *dev);
141 size_t dma_opt_mapping_size(struct device *dev << 
142 unsigned long dma_get_merge_boundary(struct de    467 unsigned long dma_get_merge_boundary(struct device *dev);
143 struct sg_table *dma_alloc_noncontiguous(struc << 
144                 enum dma_data_direction dir, g << 
145 void dma_free_noncontiguous(struct device *dev << 
146                 struct sg_table *sgt, enum dma << 
147 void *dma_vmap_noncontiguous(struct device *de << 
148                 struct sg_table *sgt);         << 
149 void dma_vunmap_noncontiguous(struct device *d << 
150 int dma_mmap_noncontiguous(struct device *dev, << 
151                 size_t size, struct sg_table * << 
152 #else /* CONFIG_HAS_DMA */                        468 #else /* CONFIG_HAS_DMA */
153 static inline dma_addr_t dma_map_page_attrs(st    469 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
154                 struct page *page, size_t offs    470                 struct page *page, size_t offset, size_t size,
155                 enum dma_data_direction dir, u    471                 enum dma_data_direction dir, unsigned long attrs)
156 {                                                 472 {
157         return DMA_MAPPING_ERROR;                 473         return DMA_MAPPING_ERROR;
158 }                                                 474 }
159 static inline void dma_unmap_page_attrs(struct    475 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
160                 size_t size, enum dma_data_dir    476                 size_t size, enum dma_data_direction dir, unsigned long attrs)
161 {                                                 477 {
162 }                                                 478 }
163 static inline unsigned int dma_map_sg_attrs(st !! 479 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
164                 struct scatterlist *sg, int ne !! 480                 int nents, enum dma_data_direction dir, unsigned long attrs)
165                 unsigned long attrs)           << 
166 {                                                 481 {
167         return 0;                                 482         return 0;
168 }                                                 483 }
169 static inline void dma_unmap_sg_attrs(struct d    484 static inline void dma_unmap_sg_attrs(struct device *dev,
170                 struct scatterlist *sg, int ne    485                 struct scatterlist *sg, int nents, enum dma_data_direction dir,
171                 unsigned long attrs)              486                 unsigned long attrs)
172 {                                                 487 {
173 }                                                 488 }
174 static inline int dma_map_sgtable(struct devic << 
175                 enum dma_data_direction dir, u << 
176 {                                              << 
177         return -EOPNOTSUPP;                    << 
178 }                                              << 
179 static inline dma_addr_t dma_map_resource(stru    489 static inline dma_addr_t dma_map_resource(struct device *dev,
180                 phys_addr_t phys_addr, size_t     490                 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
181                 unsigned long attrs)              491                 unsigned long attrs)
182 {                                                 492 {
183         return DMA_MAPPING_ERROR;                 493         return DMA_MAPPING_ERROR;
184 }                                                 494 }
185 static inline void dma_unmap_resource(struct d    495 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
186                 size_t size, enum dma_data_dir    496                 size_t size, enum dma_data_direction dir, unsigned long attrs)
187 {                                                 497 {
188 }                                                 498 }
                                                   >> 499 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
                                                   >> 500                 size_t size, enum dma_data_direction dir)
                                                   >> 501 {
                                                   >> 502 }
                                                   >> 503 static inline void dma_sync_single_for_device(struct device *dev,
                                                   >> 504                 dma_addr_t addr, size_t size, enum dma_data_direction dir)
                                                   >> 505 {
                                                   >> 506 }
                                                   >> 507 static inline void dma_sync_sg_for_cpu(struct device *dev,
                                                   >> 508                 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
                                                   >> 509 {
                                                   >> 510 }
                                                   >> 511 static inline void dma_sync_sg_for_device(struct device *dev,
                                                   >> 512                 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
                                                   >> 513 {
                                                   >> 514 }
189 static inline int dma_mapping_error(struct dev    515 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
190 {                                                 516 {
191         return -ENOMEM;                           517         return -ENOMEM;
192 }                                                 518 }
193 static inline void *dma_alloc_attrs(struct dev    519 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
194                 dma_addr_t *dma_handle, gfp_t     520                 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
195 {                                                 521 {
196         return NULL;                              522         return NULL;
197 }                                                 523 }
198 static void dma_free_attrs(struct device *dev,    524 static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
199                 dma_addr_t dma_handle, unsigne    525                 dma_addr_t dma_handle, unsigned long attrs)
200 {                                                 526 {
201 }                                                 527 }
202 static inline void *dmam_alloc_attrs(struct de    528 static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
203                 dma_addr_t *dma_handle, gfp_t     529                 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
204 {                                                 530 {
205         return NULL;                              531         return NULL;
206 }                                                 532 }
207 static inline void dmam_free_coherent(struct d    533 static inline void dmam_free_coherent(struct device *dev, size_t size,
208                 void *vaddr, dma_addr_t dma_ha    534                 void *vaddr, dma_addr_t dma_handle)
209 {                                                 535 {
210 }                                                 536 }
                                                   >> 537 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                                                   >> 538                 enum dma_data_direction dir)
                                                   >> 539 {
                                                   >> 540 }
211 static inline int dma_get_sgtable_attrs(struct    541 static inline int dma_get_sgtable_attrs(struct device *dev,
212                 struct sg_table *sgt, void *cp    542                 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
213                 size_t size, unsigned long att    543                 size_t size, unsigned long attrs)
214 {                                                 544 {
215         return -ENXIO;                            545         return -ENXIO;
216 }                                                 546 }
217 static inline int dma_mmap_attrs(struct device    547 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
218                 void *cpu_addr, dma_addr_t dma    548                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
219                 unsigned long attrs)              549                 unsigned long attrs)
220 {                                                 550 {
221         return -ENXIO;                            551         return -ENXIO;
222 }                                                 552 }
223 static inline bool dma_can_mmap(struct device     553 static inline bool dma_can_mmap(struct device *dev)
224 {                                                 554 {
225         return false;                             555         return false;
226 }                                                 556 }
227 static inline bool dma_pci_p2pdma_supported(st !! 557 static inline int dma_supported(struct device *dev, u64 mask)
228 {                                                 558 {
229         return false;                          !! 559         return 0;
230 }                                                 560 }
231 static inline int dma_set_mask(struct device *    561 static inline int dma_set_mask(struct device *dev, u64 mask)
232 {                                                 562 {
233         return -EIO;                              563         return -EIO;
234 }                                                 564 }
235 static inline int dma_set_coherent_mask(struct    565 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
236 {                                                 566 {
237         return -EIO;                              567         return -EIO;
238 }                                                 568 }
239 static inline u64 dma_get_required_mask(struct    569 static inline u64 dma_get_required_mask(struct device *dev)
240 {                                                 570 {
241         return 0;                                 571         return 0;
242 }                                                 572 }
243 static inline bool dma_addressing_limited(stru << 
244 {                                              << 
245         return false;                          << 
246 }                                              << 
247 static inline size_t dma_max_mapping_size(stru    573 static inline size_t dma_max_mapping_size(struct device *dev)
248 {                                                 574 {
249         return 0;                                 575         return 0;
250 }                                                 576 }
251 static inline size_t dma_opt_mapping_size(stru << 
252 {                                              << 
253         return 0;                              << 
254 }                                              << 
255 static inline unsigned long dma_get_merge_boun    577 static inline unsigned long dma_get_merge_boundary(struct device *dev)
256 {                                                 578 {
257         return 0;                                 579         return 0;
258 }                                                 580 }
259 static inline struct sg_table *dma_alloc_nonco << 
260                 size_t size, enum dma_data_dir << 
261                 unsigned long attrs)           << 
262 {                                              << 
263         return NULL;                           << 
264 }                                              << 
265 static inline void dma_free_noncontiguous(stru << 
266                 struct sg_table *sgt, enum dma << 
267 {                                              << 
268 }                                              << 
269 static inline void *dma_vmap_noncontiguous(str << 
270                 struct sg_table *sgt)          << 
271 {                                              << 
272         return NULL;                           << 
273 }                                              << 
274 static inline void dma_vunmap_noncontiguous(st << 
275 {                                              << 
276 }                                              << 
277 static inline int dma_mmap_noncontiguous(struc << 
278                 struct vm_area_struct *vma, si << 
279 {                                              << 
280         return -EINVAL;                        << 
281 }                                              << 
282 #endif /* CONFIG_HAS_DMA */                       581 #endif /* CONFIG_HAS_DMA */
283                                                   582 
284 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_ << 
285 void __dma_sync_single_for_cpu(struct device * << 
286                 enum dma_data_direction dir);  << 
287 void __dma_sync_single_for_device(struct devic << 
288                 size_t size, enum dma_data_dir << 
289 void __dma_sync_sg_for_cpu(struct device *dev, << 
290                 int nelems, enum dma_data_dire << 
291 void __dma_sync_sg_for_device(struct device *d << 
292                 int nelems, enum dma_data_dire << 
293 bool __dma_need_sync(struct device *dev, dma_a << 
294                                                << 
295 static inline bool dma_dev_need_sync(const str << 
296 {                                              << 
297         /* Always call DMA sync operations whe << 
298         return !dev->dma_skip_sync || IS_ENABL << 
299 }                                              << 
300                                                << 
301 static inline void dma_sync_single_for_cpu(str << 
302                 size_t size, enum dma_data_dir << 
303 {                                              << 
304         if (dma_dev_need_sync(dev))            << 
305                 __dma_sync_single_for_cpu(dev, << 
306 }                                              << 
307                                                << 
308 static inline void dma_sync_single_for_device( << 
309                 dma_addr_t addr, size_t size,  << 
310 {                                              << 
311         if (dma_dev_need_sync(dev))            << 
312                 __dma_sync_single_for_device(d << 
313 }                                              << 
314                                                << 
315 static inline void dma_sync_sg_for_cpu(struct  << 
316                 struct scatterlist *sg, int ne << 
317 {                                              << 
318         if (dma_dev_need_sync(dev))            << 
319                 __dma_sync_sg_for_cpu(dev, sg, << 
320 }                                              << 
321                                                << 
322 static inline void dma_sync_sg_for_device(stru << 
323                 struct scatterlist *sg, int ne << 
324 {                                              << 
325         if (dma_dev_need_sync(dev))            << 
326                 __dma_sync_sg_for_device(dev,  << 
327 }                                              << 
328                                                << 
329 static inline bool dma_need_sync(struct device << 
330 {                                              << 
331         return dma_dev_need_sync(dev) ? __dma_ << 
332 }                                              << 
333 #else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_S << 
334 static inline bool dma_dev_need_sync(const str << 
335 {                                              << 
336         return false;                          << 
337 }                                              << 
338 static inline void dma_sync_single_for_cpu(str << 
339                 size_t size, enum dma_data_dir << 
340 {                                              << 
341 }                                              << 
342 static inline void dma_sync_single_for_device( << 
343                 dma_addr_t addr, size_t size,  << 
344 {                                              << 
345 }                                              << 
346 static inline void dma_sync_sg_for_cpu(struct  << 
347                 struct scatterlist *sg, int ne << 
348 {                                              << 
349 }                                              << 
350 static inline void dma_sync_sg_for_device(stru << 
351                 struct scatterlist *sg, int ne << 
352 {                                              << 
353 }                                              << 
354 static inline bool dma_need_sync(struct device << 
355 {                                              << 
356         return false;                          << 
357 }                                              << 
358 #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_ << 
359                                                << 
360 struct page *dma_alloc_pages(struct device *de << 
361                 dma_addr_t *dma_handle, enum d << 
362 void dma_free_pages(struct device *dev, size_t << 
363                 dma_addr_t dma_handle, enum dm << 
364 int dma_mmap_pages(struct device *dev, struct  << 
365                 size_t size, struct page *page << 
366                                                << 
367 static inline void *dma_alloc_noncoherent(stru << 
368                 dma_addr_t *dma_handle, enum d << 
369 {                                              << 
370         struct page *page = dma_alloc_pages(de << 
371         return page ? page_address(page) : NUL << 
372 }                                              << 
373                                                << 
374 static inline void dma_free_noncoherent(struct << 
375                 void *vaddr, dma_addr_t dma_ha << 
376 {                                              << 
377         dma_free_pages(dev, size, virt_to_page << 
378 }                                              << 
379                                                << 
380 static inline dma_addr_t dma_map_single_attrs(    583 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
381                 size_t size, enum dma_data_dir    584                 size_t size, enum dma_data_direction dir, unsigned long attrs)
382 {                                                 585 {
383         /* DMA must never operate on areas tha    586         /* DMA must never operate on areas that might be remapped. */
384         if (dev_WARN_ONCE(dev, is_vmalloc_addr    587         if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
385                           "rejecting DMA map o    588                           "rejecting DMA map of vmalloc memory\n"))
386                 return DMA_MAPPING_ERROR;         589                 return DMA_MAPPING_ERROR;
387         debug_dma_map_single(dev, ptr, size);     590         debug_dma_map_single(dev, ptr, size);
388         return dma_map_page_attrs(dev, virt_to    591         return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
389                         size, dir, attrs);        592                         size, dir, attrs);
390 }                                                 593 }
391                                                   594 
392 static inline void dma_unmap_single_attrs(stru    595 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
393                 size_t size, enum dma_data_dir    596                 size_t size, enum dma_data_direction dir, unsigned long attrs)
394 {                                                 597 {
395         return dma_unmap_page_attrs(dev, addr,    598         return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
396 }                                                 599 }
397                                                   600 
398 static inline void dma_sync_single_range_for_c    601 static inline void dma_sync_single_range_for_cpu(struct device *dev,
399                 dma_addr_t addr, unsigned long    602                 dma_addr_t addr, unsigned long offset, size_t size,
400                 enum dma_data_direction dir)      603                 enum dma_data_direction dir)
401 {                                                 604 {
402         return dma_sync_single_for_cpu(dev, ad    605         return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
403 }                                                 606 }
404                                                   607 
405 static inline void dma_sync_single_range_for_d    608 static inline void dma_sync_single_range_for_device(struct device *dev,
406                 dma_addr_t addr, unsigned long    609                 dma_addr_t addr, unsigned long offset, size_t size,
407                 enum dma_data_direction dir)      610                 enum dma_data_direction dir)
408 {                                                 611 {
409         return dma_sync_single_for_device(dev,    612         return dma_sync_single_for_device(dev, addr + offset, size, dir);
410 }                                                 613 }
411                                                   614 
412 /**                                               615 /**
                                                   >> 616  * dma_map_sgtable - Map the given buffer for DMA
                                                   >> 617  * @dev:        The device for which to perform the DMA operation
                                                   >> 618  * @sgt:        The sg_table object describing the buffer
                                                   >> 619  * @dir:        DMA direction
                                                   >> 620  * @attrs:      Optional DMA attributes for the map operation
                                                   >> 621  *
                                                   >> 622  * Maps a buffer described by a scatterlist stored in the given sg_table
                                                   >> 623  * object for the @dir DMA operation by the @dev device. After success the
                                                   >> 624  * ownership for the buffer is transferred to the DMA domain.  One has to
                                                   >> 625  * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
                                                   >> 626  * ownership of the buffer back to the CPU domain before touching the
                                                   >> 627  * buffer by the CPU.
                                                   >> 628  *
                                                   >> 629  * Returns 0 on success or -EINVAL on error during mapping the buffer.
                                                   >> 630  */
                                                   >> 631 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
                                                   >> 632                 enum dma_data_direction dir, unsigned long attrs)
                                                   >> 633 {
                                                   >> 634         int nents;
                                                   >> 635 
                                                   >> 636         nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
                                                   >> 637         if (nents <= 0)
                                                   >> 638                 return -EINVAL;
                                                   >> 639         sgt->nents = nents;
                                                   >> 640         return 0;
                                                   >> 641 }
                                                   >> 642 
                                                   >> 643 /**
413  * dma_unmap_sgtable - Unmap the given buffer     644  * dma_unmap_sgtable - Unmap the given buffer for DMA
414  * @dev:        The device for which to perfor    645  * @dev:        The device for which to perform the DMA operation
415  * @sgt:        The sg_table object describing    646  * @sgt:        The sg_table object describing the buffer
416  * @dir:        DMA direction                     647  * @dir:        DMA direction
417  * @attrs:      Optional DMA attributes for th    648  * @attrs:      Optional DMA attributes for the unmap operation
418  *                                                649  *
419  * Unmaps a buffer described by a scatterlist     650  * Unmaps a buffer described by a scatterlist stored in the given sg_table
420  * object for the @dir DMA operation by the @d    651  * object for the @dir DMA operation by the @dev device. After this function
421  * the ownership of the buffer is transferred     652  * the ownership of the buffer is transferred back to the CPU domain.
422  */                                               653  */
423 static inline void dma_unmap_sgtable(struct de    654 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
424                 enum dma_data_direction dir, u    655                 enum dma_data_direction dir, unsigned long attrs)
425 {                                                 656 {
426         dma_unmap_sg_attrs(dev, sgt->sgl, sgt-    657         dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
427 }                                                 658 }
428                                                   659 
429 /**                                               660 /**
430  * dma_sync_sgtable_for_cpu - Synchronize the     661  * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
431  * @dev:        The device for which to perfor    662  * @dev:        The device for which to perform the DMA operation
432  * @sgt:        The sg_table object describing    663  * @sgt:        The sg_table object describing the buffer
433  * @dir:        DMA direction                     664  * @dir:        DMA direction
434  *                                                665  *
435  * Performs the needed cache synchronization a    666  * Performs the needed cache synchronization and moves the ownership of the
436  * buffer back to the CPU domain, so it is saf    667  * buffer back to the CPU domain, so it is safe to perform any access to it
437  * by the CPU. Before doing any further DMA op    668  * by the CPU. Before doing any further DMA operations, one has to transfer
438  * the ownership of the buffer back to the DMA    669  * the ownership of the buffer back to the DMA domain by calling the
439  * dma_sync_sgtable_for_device().                 670  * dma_sync_sgtable_for_device().
440  */                                               671  */
441 static inline void dma_sync_sgtable_for_cpu(st    672 static inline void dma_sync_sgtable_for_cpu(struct device *dev,
442                 struct sg_table *sgt, enum dma    673                 struct sg_table *sgt, enum dma_data_direction dir)
443 {                                                 674 {
444         dma_sync_sg_for_cpu(dev, sgt->sgl, sgt    675         dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
445 }                                                 676 }
446                                                   677 
447 /**                                               678 /**
448  * dma_sync_sgtable_for_device - Synchronize t    679  * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
449  * @dev:        The device for which to perfor    680  * @dev:        The device for which to perform the DMA operation
450  * @sgt:        The sg_table object describing    681  * @sgt:        The sg_table object describing the buffer
451  * @dir:        DMA direction                     682  * @dir:        DMA direction
452  *                                                683  *
453  * Performs the needed cache synchronization a    684  * Performs the needed cache synchronization and moves the ownership of the
454  * buffer back to the DMA domain, so it is saf    685  * buffer back to the DMA domain, so it is safe to perform the DMA operation.
455  * Once finished, one has to call dma_sync_sgt    686  * Once finished, one has to call dma_sync_sgtable_for_cpu() or
456  * dma_unmap_sgtable().                           687  * dma_unmap_sgtable().
457  */                                               688  */
458 static inline void dma_sync_sgtable_for_device    689 static inline void dma_sync_sgtable_for_device(struct device *dev,
459                 struct sg_table *sgt, enum dma    690                 struct sg_table *sgt, enum dma_data_direction dir)
460 {                                                 691 {
461         dma_sync_sg_for_device(dev, sgt->sgl,     692         dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
462 }                                                 693 }
463                                                   694 
464 #define dma_map_single(d, a, s, r) dma_map_sin    695 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
465 #define dma_unmap_single(d, a, s, r) dma_unmap    696 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
466 #define dma_map_sg(d, s, n, r) dma_map_sg_attr    697 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
467 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_    698 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
468 #define dma_map_page(d, p, o, s, r) dma_map_pa    699 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
469 #define dma_unmap_page(d, a, s, r) dma_unmap_p    700 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
470 #define dma_get_sgtable(d, t, v, h, s) dma_get    701 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
471 #define dma_mmap_coherent(d, v, c, h, s) dma_m    702 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
472                                                   703 
473 bool dma_coherent_ok(struct device *dev, phys_ !! 704 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
                                                   >> 705                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
                                                   >> 706                 unsigned long attrs);
                                                   >> 707 
                                                   >> 708 struct page **dma_common_find_pages(void *cpu_addr);
                                                   >> 709 void *dma_common_contiguous_remap(struct page *page, size_t size,
                                                   >> 710                         pgprot_t prot, const void *caller);
                                                   >> 711 
                                                   >> 712 void *dma_common_pages_remap(struct page **pages, size_t size,
                                                   >> 713                         pgprot_t prot, const void *caller);
                                                   >> 714 void dma_common_free_remap(void *cpu_addr, size_t size);
                                                   >> 715 
                                                   >> 716 bool dma_in_atomic_pool(void *start, size_t size);
                                                   >> 717 void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
                                                   >> 718 bool dma_free_from_pool(void *start, size_t size);
                                                   >> 719 
                                                   >> 720 int
                                                   >> 721 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
                                                   >> 722                 dma_addr_t dma_addr, size_t size, unsigned long attrs);
474                                                   723 
475 static inline void *dma_alloc_coherent(struct     724 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
476                 dma_addr_t *dma_handle, gfp_t     725                 dma_addr_t *dma_handle, gfp_t gfp)
477 {                                                 726 {
                                                   >> 727 
478         return dma_alloc_attrs(dev, size, dma_    728         return dma_alloc_attrs(dev, size, dma_handle, gfp,
479                         (gfp & __GFP_NOWARN) ?    729                         (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
480 }                                                 730 }
481                                                   731 
482 static inline void dma_free_coherent(struct de    732 static inline void dma_free_coherent(struct device *dev, size_t size,
483                 void *cpu_addr, dma_addr_t dma    733                 void *cpu_addr, dma_addr_t dma_handle)
484 {                                                 734 {
485         return dma_free_attrs(dev, size, cpu_a    735         return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
486 }                                                 736 }
487                                                   737 
488                                                   738 
489 static inline u64 dma_get_mask(struct device *    739 static inline u64 dma_get_mask(struct device *dev)
490 {                                                 740 {
491         if (dev->dma_mask && *dev->dma_mask)      741         if (dev->dma_mask && *dev->dma_mask)
492                 return *dev->dma_mask;            742                 return *dev->dma_mask;
493         return DMA_BIT_MASK(32);                  743         return DMA_BIT_MASK(32);
494 }                                                 744 }
495                                                   745 
496 /*                                                746 /*
497  * Set both the DMA mask and the coherent DMA     747  * Set both the DMA mask and the coherent DMA mask to the same thing.
498  * Note that we don't check the return value f    748  * Note that we don't check the return value from dma_set_coherent_mask()
499  * as the DMA API guarantees that the coherent    749  * as the DMA API guarantees that the coherent DMA mask can be set to
500  * the same or smaller than the streaming DMA     750  * the same or smaller than the streaming DMA mask.
501  */                                               751  */
502 static inline int dma_set_mask_and_coherent(st    752 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
503 {                                                 753 {
504         int rc = dma_set_mask(dev, mask);         754         int rc = dma_set_mask(dev, mask);
505         if (rc == 0)                              755         if (rc == 0)
506                 dma_set_coherent_mask(dev, mas    756                 dma_set_coherent_mask(dev, mask);
507         return rc;                                757         return rc;
508 }                                                 758 }
509                                                   759 
510 /*                                                760 /*
511  * Similar to the above, except it deals with     761  * Similar to the above, except it deals with the case where the device
512  * does not have dev->dma_mask appropriately s    762  * does not have dev->dma_mask appropriately setup.
513  */                                               763  */
514 static inline int dma_coerce_mask_and_coherent    764 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
515 {                                                 765 {
516         dev->dma_mask = &dev->coherent_dma_mas    766         dev->dma_mask = &dev->coherent_dma_mask;
517         return dma_set_mask_and_coherent(dev,     767         return dma_set_mask_and_coherent(dev, mask);
518 }                                                 768 }
519                                                   769 
520 static inline unsigned int dma_get_max_seg_siz !! 770 /**
                                                   >> 771  * dma_addressing_limited - return if the device is addressing limited
                                                   >> 772  * @dev:        device to check
                                                   >> 773  *
                                                   >> 774  * Return %true if the devices DMA mask is too small to address all memory in
                                                   >> 775  * the system, else %false.  Lack of addressing bits is the prime reason for
                                                   >> 776  * bounce buffering, but might not be the only one.
                                                   >> 777  */
                                                   >> 778 static inline bool dma_addressing_limited(struct device *dev)
521 {                                                 779 {
522         if (dev->dma_parms && dev->dma_parms-> !! 780         return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) <
523                 return dev->dma_parms->max_seg !! 781                             dma_get_required_mask(dev);
524         return SZ_64K;                         << 
525 }                                                 782 }
526                                                   783 
527 static inline void dma_set_max_seg_size(struct !! 784 #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
                                                   >> 785 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
                                                   >> 786                 const struct iommu_ops *iommu, bool coherent);
                                                   >> 787 #else
                                                   >> 788 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
                                                   >> 789                 u64 size, const struct iommu_ops *iommu, bool coherent)
528 {                                                 790 {
529         if (WARN_ON_ONCE(!dev->dma_parms))     << 
530                 return;                        << 
531         dev->dma_parms->max_segment_size = siz << 
532 }                                                 791 }
                                                   >> 792 #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
533                                                   793 
534 static inline unsigned long dma_get_seg_bounda !! 794 #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
                                                   >> 795 void arch_teardown_dma_ops(struct device *dev);
                                                   >> 796 #else
                                                   >> 797 static inline void arch_teardown_dma_ops(struct device *dev)
535 {                                                 798 {
536         if (dev->dma_parms && dev->dma_parms-> << 
537                 return dev->dma_parms->segment << 
538         return ULONG_MAX;                      << 
539 }                                                 799 }
                                                   >> 800 #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
540                                                   801 
541 /**                                            !! 802 static inline unsigned int dma_get_max_seg_size(struct device *dev)
542  * dma_get_seg_boundary_nr_pages - return the  << 
543  * @dev: device to guery the boundary for      << 
544  * @page_shift: ilog() of the IOMMU page size  << 
545  *                                             << 
546  * Return the segment boundary in IOMMU page u << 
547  * the CPU page size) for the passed in device << 
548  *                                             << 
549  * If @dev is NULL a boundary of U32_MAX is as << 
550  * non-DMA API callers.                        << 
551  */                                            << 
552 static inline unsigned long dma_get_seg_bounda << 
553                 unsigned int page_shift)       << 
554 {                                                 803 {
555         if (!dev)                              !! 804         if (dev->dma_parms && dev->dma_parms->max_segment_size)
556                 return (U32_MAX >> page_shift) !! 805                 return dev->dma_parms->max_segment_size;
557         return (dma_get_seg_boundary(dev) >> p !! 806         return SZ_64K;
558 }                                                 807 }
559                                                   808 
560 static inline void dma_set_seg_boundary(struct !! 809 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
561 {                                                 810 {
562         if (WARN_ON_ONCE(!dev->dma_parms))     !! 811         if (dev->dma_parms) {
563                 return;                        !! 812                 dev->dma_parms->max_segment_size = size;
564         dev->dma_parms->segment_boundary_mask  !! 813                 return 0;
                                                   >> 814         }
                                                   >> 815         return -EIO;
565 }                                                 816 }
566                                                   817 
567 static inline unsigned int dma_get_min_align_m !! 818 static inline unsigned long dma_get_seg_boundary(struct device *dev)
568 {                                                 819 {
569         if (dev->dma_parms)                    !! 820         if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
570                 return dev->dma_parms->min_ali !! 821                 return dev->dma_parms->segment_boundary_mask;
571         return 0;                              !! 822         return DMA_BIT_MASK(32);
572 }                                                 823 }
573                                                   824 
574 static inline void dma_set_min_align_mask(stru !! 825 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
575                 unsigned int min_align_mask)   << 
576 {                                                 826 {
577         if (WARN_ON_ONCE(!dev->dma_parms))     !! 827         if (dev->dma_parms) {
578                 return;                        !! 828                 dev->dma_parms->segment_boundary_mask = mask;
579         dev->dma_parms->min_align_mask = min_a !! 829                 return 0;
                                                   >> 830         }
                                                   >> 831         return -EIO;
580 }                                                 832 }
581                                                   833 
582 #ifndef dma_get_cache_alignment                << 
583 static inline int dma_get_cache_alignment(void    834 static inline int dma_get_cache_alignment(void)
584 {                                                 835 {
585 #ifdef ARCH_HAS_DMA_MINALIGN                   !! 836 #ifdef ARCH_DMA_MINALIGN
586         return ARCH_DMA_MINALIGN;                 837         return ARCH_DMA_MINALIGN;
587 #endif                                            838 #endif
588         return 1;                                 839         return 1;
589 }                                                 840 }
590 #endif                                         !! 841 
                                                   >> 842 #ifdef CONFIG_DMA_DECLARE_COHERENT
                                                   >> 843 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
                                                   >> 844                                 dma_addr_t device_addr, size_t size);
                                                   >> 845 #else
                                                   >> 846 static inline int
                                                   >> 847 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
                                                   >> 848                             dma_addr_t device_addr, size_t size)
                                                   >> 849 {
                                                   >> 850         return -ENOSYS;
                                                   >> 851 }
                                                   >> 852 #endif /* CONFIG_DMA_DECLARE_COHERENT */
591                                                   853 
592 static inline void *dmam_alloc_coherent(struct    854 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
593                 dma_addr_t *dma_handle, gfp_t     855                 dma_addr_t *dma_handle, gfp_t gfp)
594 {                                                 856 {
595         return dmam_alloc_attrs(dev, size, dma    857         return dmam_alloc_attrs(dev, size, dma_handle, gfp,
596                         (gfp & __GFP_NOWARN) ?    858                         (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
597 }                                                 859 }
598                                                   860 
599 static inline void *dma_alloc_wc(struct device    861 static inline void *dma_alloc_wc(struct device *dev, size_t size,
600                                  dma_addr_t *d    862                                  dma_addr_t *dma_addr, gfp_t gfp)
601 {                                                 863 {
602         unsigned long attrs = DMA_ATTR_WRITE_C    864         unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
603                                                   865 
604         if (gfp & __GFP_NOWARN)                   866         if (gfp & __GFP_NOWARN)
605                 attrs |= DMA_ATTR_NO_WARN;        867                 attrs |= DMA_ATTR_NO_WARN;
606                                                   868 
607         return dma_alloc_attrs(dev, size, dma_    869         return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
608 }                                                 870 }
609                                                   871 
610 static inline void dma_free_wc(struct device *    872 static inline void dma_free_wc(struct device *dev, size_t size,
611                                void *cpu_addr,    873                                void *cpu_addr, dma_addr_t dma_addr)
612 {                                                 874 {
613         return dma_free_attrs(dev, size, cpu_a    875         return dma_free_attrs(dev, size, cpu_addr, dma_addr,
614                               DMA_ATTR_WRITE_C    876                               DMA_ATTR_WRITE_COMBINE);
615 }                                                 877 }
616                                                   878 
617 static inline int dma_mmap_wc(struct device *d    879 static inline int dma_mmap_wc(struct device *dev,
618                               struct vm_area_s    880                               struct vm_area_struct *vma,
619                               void *cpu_addr,     881                               void *cpu_addr, dma_addr_t dma_addr,
620                               size_t size)        882                               size_t size)
621 {                                                 883 {
622         return dma_mmap_attrs(dev, vma, cpu_ad    884         return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
623                               DMA_ATTR_WRITE_C    885                               DMA_ATTR_WRITE_COMBINE);
624 }                                                 886 }
625                                                   887 
626 #ifdef CONFIG_NEED_DMA_MAP_STATE                  888 #ifdef CONFIG_NEED_DMA_MAP_STATE
627 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)          889 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
628 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)            890 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
629 #define dma_unmap_addr(PTR, ADDR_NAME)            891 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
630 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL    892 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
631 #define dma_unmap_len(PTR, LEN_NAME)              893 #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
632 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)     894 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
633 #else                                             895 #else
634 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)          896 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
635 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)            897 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
636 #define dma_unmap_addr(PTR, ADDR_NAME)            898 #define dma_unmap_addr(PTR, ADDR_NAME)           (0)
637 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL    899 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
638 #define dma_unmap_len(PTR, LEN_NAME)              900 #define dma_unmap_len(PTR, LEN_NAME)             (0)
639 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)     901 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
640 #endif                                            902 #endif
641                                                   903 
642 #endif /* _LINUX_DMA_MAPPING_H */              !! 904 #endif
643                                                   905 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php