~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/dma-mapping.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/dma-mapping.h (Version linux-6.12-rc7) and /include/linux/dma-mapping.h (Version linux-4.9.337)


  1 /* SPDX-License-Identifier: GPL-2.0 */         << 
  2 #ifndef _LINUX_DMA_MAPPING_H                        1 #ifndef _LINUX_DMA_MAPPING_H
  3 #define _LINUX_DMA_MAPPING_H                        2 #define _LINUX_DMA_MAPPING_H
  4                                                     3 
  5 #include <linux/cache.h>                       << 
  6 #include <linux/sizes.h>                            4 #include <linux/sizes.h>
  7 #include <linux/string.h>                           5 #include <linux/string.h>
  8 #include <linux/device.h>                           6 #include <linux/device.h>
  9 #include <linux/err.h>                              7 #include <linux/err.h>
                                                   >>   8 #include <linux/dma-debug.h>
 10 #include <linux/dma-direction.h>                    9 #include <linux/dma-direction.h>
 11 #include <linux/scatterlist.h>                     10 #include <linux/scatterlist.h>
                                                   >>  11 #include <linux/kmemcheck.h>
 12 #include <linux/bug.h>                             12 #include <linux/bug.h>
 13 #include <linux/mem_encrypt.h>                 << 
 14                                                    13 
 15 /**                                                14 /**
 16  * List of possible attributes associated with     15  * List of possible attributes associated with a DMA mapping. The semantics
 17  * of each attribute should be defined in Docu !!  16  * of each attribute should be defined in Documentation/DMA-attributes.txt.
                                                   >>  17  *
                                                   >>  18  * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
                                                   >>  19  * forces all pending DMA writes to complete.
 18  */                                                20  */
 19                                                !!  21 #define DMA_ATTR_WRITE_BARRIER          (1UL << 0)
 20 /*                                                 22 /*
 21  * DMA_ATTR_WEAK_ORDERING: Specifies that read     23  * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
 22  * may be weakly ordered, that is that reads a     24  * may be weakly ordered, that is that reads and writes may pass each other.
 23  */                                                25  */
 24 #define DMA_ATTR_WEAK_ORDERING          (1UL <     26 #define DMA_ATTR_WEAK_ORDERING          (1UL << 1)
 25 /*                                                 27 /*
 26  * DMA_ATTR_WRITE_COMBINE: Specifies that writ     28  * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
 27  * buffered to improve performance.                29  * buffered to improve performance.
 28  */                                                30  */
 29 #define DMA_ATTR_WRITE_COMBINE          (1UL <     31 #define DMA_ATTR_WRITE_COMBINE          (1UL << 2)
 30 /*                                                 32 /*
                                                   >>  33  * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
                                                   >>  34  * consistent or non-consistent memory as it sees fit.
                                                   >>  35  */
                                                   >>  36 #define DMA_ATTR_NON_CONSISTENT         (1UL << 3)
                                                   >>  37 /*
 31  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platfo     38  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
 32  * virtual mapping for the allocated buffer.       39  * virtual mapping for the allocated buffer.
 33  */                                                40  */
 34 #define DMA_ATTR_NO_KERNEL_MAPPING      (1UL <     41 #define DMA_ATTR_NO_KERNEL_MAPPING      (1UL << 4)
 35 /*                                                 42 /*
 36  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform cod     43  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
 37  * the CPU cache for the given buffer assuming     44  * the CPU cache for the given buffer assuming that it has been already
 38  * transferred to 'device' domain.                 45  * transferred to 'device' domain.
 39  */                                                46  */
 40 #define DMA_ATTR_SKIP_CPU_SYNC          (1UL <     47 #define DMA_ATTR_SKIP_CPU_SYNC          (1UL << 5)
 41 /*                                                 48 /*
 42  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguou     49  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
 43  * in physical memory.                             50  * in physical memory.
 44  */                                                51  */
 45 #define DMA_ATTR_FORCE_CONTIGUOUS       (1UL <     52 #define DMA_ATTR_FORCE_CONTIGUOUS       (1UL << 6)
 46 /*                                                 53 /*
 47  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint     54  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
 48  * that it's probably not worth the time to tr     55  * that it's probably not worth the time to try to allocate memory to in a way
 49  * that gives better TLB efficiency.               56  * that gives better TLB efficiency.
 50  */                                                57  */
 51 #define DMA_ATTR_ALLOC_SINGLE_PAGES     (1UL <     58 #define DMA_ATTR_ALLOC_SINGLE_PAGES     (1UL << 7)
 52 /*                                                 59 /*
 53  * DMA_ATTR_NO_WARN: This tells the DMA-mappin     60  * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
 54  * allocation failure reports (similarly to __     61  * allocation failure reports (similarly to __GFP_NOWARN).
 55  */                                                62  */
 56 #define DMA_ATTR_NO_WARN        (1UL << 8)         63 #define DMA_ATTR_NO_WARN        (1UL << 8)
 57                                                    64 
 58 /*                                                 65 /*
 59  * DMA_ATTR_PRIVILEGED: used to indicate that  !!  66  * A dma_addr_t can hold any valid DMA or bus address for the platform.
 60  * accessible at an elevated privilege level ( !!  67  * It can be given to a device to use as a DMA source or target.  A CPU cannot
 61  * at least read-only at lesser-privileged lev !!  68  * reference a dma_addr_t directly because there may be translation between
 62  */                                            !!  69  * its physical address space and the bus address space.
 63 #define DMA_ATTR_PRIVILEGED             (1UL < !!  70  */
                                                   >>  71 struct dma_map_ops {
                                                   >>  72         void* (*alloc)(struct device *dev, size_t size,
                                                   >>  73                                 dma_addr_t *dma_handle, gfp_t gfp,
                                                   >>  74                                 unsigned long attrs);
                                                   >>  75         void (*free)(struct device *dev, size_t size,
                                                   >>  76                               void *vaddr, dma_addr_t dma_handle,
                                                   >>  77                               unsigned long attrs);
                                                   >>  78         int (*mmap)(struct device *, struct vm_area_struct *,
                                                   >>  79                           void *, dma_addr_t, size_t,
                                                   >>  80                           unsigned long attrs);
                                                   >>  81 
                                                   >>  82         int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
                                                   >>  83                            dma_addr_t, size_t, unsigned long attrs);
                                                   >>  84 
                                                   >>  85         dma_addr_t (*map_page)(struct device *dev, struct page *page,
                                                   >>  86                                unsigned long offset, size_t size,
                                                   >>  87                                enum dma_data_direction dir,
                                                   >>  88                                unsigned long attrs);
                                                   >>  89         void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
                                                   >>  90                            size_t size, enum dma_data_direction dir,
                                                   >>  91                            unsigned long attrs);
                                                   >>  92         /*
                                                   >>  93          * map_sg returns 0 on error and a value > 0 on success.
                                                   >>  94          * It should never return a value < 0.
                                                   >>  95          */
                                                   >>  96         int (*map_sg)(struct device *dev, struct scatterlist *sg,
                                                   >>  97                       int nents, enum dma_data_direction dir,
                                                   >>  98                       unsigned long attrs);
                                                   >>  99         void (*unmap_sg)(struct device *dev,
                                                   >> 100                          struct scatterlist *sg, int nents,
                                                   >> 101                          enum dma_data_direction dir,
                                                   >> 102                          unsigned long attrs);
                                                   >> 103         dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
                                                   >> 104                                size_t size, enum dma_data_direction dir,
                                                   >> 105                                unsigned long attrs);
                                                   >> 106         void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
                                                   >> 107                            size_t size, enum dma_data_direction dir,
                                                   >> 108                            unsigned long attrs);
                                                   >> 109         void (*sync_single_for_cpu)(struct device *dev,
                                                   >> 110                                     dma_addr_t dma_handle, size_t size,
                                                   >> 111                                     enum dma_data_direction dir);
                                                   >> 112         void (*sync_single_for_device)(struct device *dev,
                                                   >> 113                                        dma_addr_t dma_handle, size_t size,
                                                   >> 114                                        enum dma_data_direction dir);
                                                   >> 115         void (*sync_sg_for_cpu)(struct device *dev,
                                                   >> 116                                 struct scatterlist *sg, int nents,
                                                   >> 117                                 enum dma_data_direction dir);
                                                   >> 118         void (*sync_sg_for_device)(struct device *dev,
                                                   >> 119                                    struct scatterlist *sg, int nents,
                                                   >> 120                                    enum dma_data_direction dir);
                                                   >> 121         int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
                                                   >> 122         int (*dma_supported)(struct device *dev, u64 mask);
                                                   >> 123         int (*set_dma_mask)(struct device *dev, u64 mask);
                                                   >> 124 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
                                                   >> 125         u64 (*get_required_mask)(struct device *dev);
                                                   >> 126 #endif
                                                   >> 127         int is_phys;
                                                   >> 128 };
 64                                                   129 
 65 /*                                             !! 130 extern struct dma_map_ops dma_noop_ops;
 66  * A dma_addr_t can hold any valid DMA or bus  << 
 67  * be given to a device to use as a DMA source << 
 68  * given device and there may be a translation << 
 69  * space and the bus address space.            << 
 70  *                                             << 
 71  * DMA_MAPPING_ERROR is the magic error code i << 
 72  * be used directly in drivers, but checked fo << 
 73  * instead.                                    << 
 74  */                                            << 
 75 #define DMA_MAPPING_ERROR               (~(dma << 
 76                                                   131 
 77 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL :    132 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
 78                                                   133 
 79 #ifdef CONFIG_DMA_API_DEBUG                    !! 134 #define DMA_MASK_NONE   0x0ULL
 80 void debug_dma_mapping_error(struct device *de !! 135 
 81 void debug_dma_map_single(struct device *dev,  !! 136 static inline int valid_dma_direction(int dma_direction)
 82                 unsigned long len);            << 
 83 #else                                          << 
 84 static inline void debug_dma_mapping_error(str << 
 85                 dma_addr_t dma_addr)           << 
 86 {                                                 137 {
                                                   >> 138         return ((dma_direction == DMA_BIDIRECTIONAL) ||
                                                   >> 139                 (dma_direction == DMA_TO_DEVICE) ||
                                                   >> 140                 (dma_direction == DMA_FROM_DEVICE));
 87 }                                                 141 }
 88 static inline void debug_dma_map_single(struct !! 142 
 89                 unsigned long len)             !! 143 static inline int is_device_dma_capable(struct device *dev)
 90 {                                                 144 {
                                                   >> 145         return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
 91 }                                                 146 }
 92 #endif /* CONFIG_DMA_API_DEBUG */              << 
 93                                                   147 
 94 #ifdef CONFIG_HAS_DMA                          !! 148 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
 95 static inline int dma_mapping_error(struct dev !! 149 /*
 96 {                                              !! 150  * These three functions are only for dma allocator.
 97         debug_dma_mapping_error(dev, dma_addr) !! 151  * Don't use them in device drivers.
                                                   >> 152  */
                                                   >> 153 int dma_alloc_from_coherent(struct device *dev, ssize_t size,
                                                   >> 154                                        dma_addr_t *dma_handle, void **ret);
                                                   >> 155 int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
 98                                                   156 
 99         if (unlikely(dma_addr == DMA_MAPPING_E !! 157 int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
100                 return -ENOMEM;                !! 158                             void *cpu_addr, size_t size, int *ret);
101         return 0;                              !! 159 #else
102 }                                              !! 160 #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
                                                   >> 161 #define dma_release_from_coherent(dev, order, vaddr) (0)
                                                   >> 162 #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
                                                   >> 163 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
103                                                   164 
104 dma_addr_t dma_map_page_attrs(struct device *d !! 165 #ifdef CONFIG_HAS_DMA
105                 size_t offset, size_t size, en !! 166 #include <asm/dma-mapping.h>
106                 unsigned long attrs);          !! 167 #else
107 void dma_unmap_page_attrs(struct device *dev,  !! 168 /*
108                 enum dma_data_direction dir, u !! 169  * Define the dma api to allow compilation but not linking of
109 unsigned int dma_map_sg_attrs(struct device *d !! 170  * dma dependent code.  Code that depends on the dma-mapping
110                 int nents, enum dma_data_direc !! 171  * API needs to set 'depends on HAS_DMA' in its Kconfig
111 void dma_unmap_sg_attrs(struct device *dev, st !! 172  */
112                                       int nent !! 173 extern struct dma_map_ops bad_dma_ops;
113                                       unsigned !! 174 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
114 int dma_map_sgtable(struct device *dev, struct << 
115                 enum dma_data_direction dir, u << 
116 dma_addr_t dma_map_resource(struct device *dev << 
117                 size_t size, enum dma_data_dir << 
118 void dma_unmap_resource(struct device *dev, dm << 
119                 enum dma_data_direction dir, u << 
120 void *dma_alloc_attrs(struct device *dev, size << 
121                 gfp_t flag, unsigned long attr << 
122 void dma_free_attrs(struct device *dev, size_t << 
123                 dma_addr_t dma_handle, unsigne << 
124 void *dmam_alloc_attrs(struct device *dev, siz << 
125                 gfp_t gfp, unsigned long attrs << 
126 void dmam_free_coherent(struct device *dev, si << 
127                 dma_addr_t dma_handle);        << 
128 int dma_get_sgtable_attrs(struct device *dev,  << 
129                 void *cpu_addr, dma_addr_t dma << 
130                 unsigned long attrs);          << 
131 int dma_mmap_attrs(struct device *dev, struct  << 
132                 void *cpu_addr, dma_addr_t dma << 
133                 unsigned long attrs);          << 
134 bool dma_can_mmap(struct device *dev);         << 
135 bool dma_pci_p2pdma_supported(struct device *d << 
136 int dma_set_mask(struct device *dev, u64 mask) << 
137 int dma_set_coherent_mask(struct device *dev,  << 
138 u64 dma_get_required_mask(struct device *dev); << 
139 bool dma_addressing_limited(struct device *dev << 
140 size_t dma_max_mapping_size(struct device *dev << 
141 size_t dma_opt_mapping_size(struct device *dev << 
142 unsigned long dma_get_merge_boundary(struct de << 
143 struct sg_table *dma_alloc_noncontiguous(struc << 
144                 enum dma_data_direction dir, g << 
145 void dma_free_noncontiguous(struct device *dev << 
146                 struct sg_table *sgt, enum dma << 
147 void *dma_vmap_noncontiguous(struct device *de << 
148                 struct sg_table *sgt);         << 
149 void dma_vunmap_noncontiguous(struct device *d << 
150 int dma_mmap_noncontiguous(struct device *dev, << 
151                 size_t size, struct sg_table * << 
152 #else /* CONFIG_HAS_DMA */                     << 
153 static inline dma_addr_t dma_map_page_attrs(st << 
154                 struct page *page, size_t offs << 
155                 enum dma_data_direction dir, u << 
156 {                                              << 
157         return DMA_MAPPING_ERROR;              << 
158 }                                              << 
159 static inline void dma_unmap_page_attrs(struct << 
160                 size_t size, enum dma_data_dir << 
161 {                                              << 
162 }                                              << 
163 static inline unsigned int dma_map_sg_attrs(st << 
164                 struct scatterlist *sg, int ne << 
165                 unsigned long attrs)           << 
166 {                                              << 
167         return 0;                              << 
168 }                                              << 
169 static inline void dma_unmap_sg_attrs(struct d << 
170                 struct scatterlist *sg, int ne << 
171                 unsigned long attrs)           << 
172 {                                              << 
173 }                                              << 
174 static inline int dma_map_sgtable(struct devic << 
175                 enum dma_data_direction dir, u << 
176 {                                              << 
177         return -EOPNOTSUPP;                    << 
178 }                                              << 
179 static inline dma_addr_t dma_map_resource(stru << 
180                 phys_addr_t phys_addr, size_t  << 
181                 unsigned long attrs)           << 
182 {                                              << 
183         return DMA_MAPPING_ERROR;              << 
184 }                                              << 
185 static inline void dma_unmap_resource(struct d << 
186                 size_t size, enum dma_data_dir << 
187 {                                              << 
188 }                                              << 
189 static inline int dma_mapping_error(struct dev << 
190 {                                              << 
191         return -ENOMEM;                        << 
192 }                                              << 
193 static inline void *dma_alloc_attrs(struct dev << 
194                 dma_addr_t *dma_handle, gfp_t  << 
195 {                                              << 
196         return NULL;                           << 
197 }                                              << 
198 static void dma_free_attrs(struct device *dev, << 
199                 dma_addr_t dma_handle, unsigne << 
200 {                                              << 
201 }                                              << 
202 static inline void *dmam_alloc_attrs(struct de << 
203                 dma_addr_t *dma_handle, gfp_t  << 
204 {                                              << 
205         return NULL;                           << 
206 }                                              << 
207 static inline void dmam_free_coherent(struct d << 
208                 void *vaddr, dma_addr_t dma_ha << 
209 {                                              << 
210 }                                              << 
211 static inline int dma_get_sgtable_attrs(struct << 
212                 struct sg_table *sgt, void *cp << 
213                 size_t size, unsigned long att << 
214 {                                              << 
215         return -ENXIO;                         << 
216 }                                              << 
217 static inline int dma_mmap_attrs(struct device << 
218                 void *cpu_addr, dma_addr_t dma << 
219                 unsigned long attrs)           << 
220 {                                              << 
221         return -ENXIO;                         << 
222 }                                              << 
223 static inline bool dma_can_mmap(struct device  << 
224 {                                              << 
225         return false;                          << 
226 }                                              << 
227 static inline bool dma_pci_p2pdma_supported(st << 
228 {                                              << 
229         return false;                          << 
230 }                                              << 
231 static inline int dma_set_mask(struct device * << 
232 {                                              << 
233         return -EIO;                           << 
234 }                                              << 
235 static inline int dma_set_coherent_mask(struct << 
236 {                                              << 
237         return -EIO;                           << 
238 }                                              << 
239 static inline u64 dma_get_required_mask(struct << 
240 {                                              << 
241         return 0;                              << 
242 }                                              << 
243 static inline bool dma_addressing_limited(stru << 
244 {                                              << 
245         return false;                          << 
246 }                                              << 
247 static inline size_t dma_max_mapping_size(stru << 
248 {                                              << 
249         return 0;                              << 
250 }                                              << 
251 static inline size_t dma_opt_mapping_size(stru << 
252 {                                              << 
253         return 0;                              << 
254 }                                              << 
255 static inline unsigned long dma_get_merge_boun << 
256 {                                              << 
257         return 0;                              << 
258 }                                              << 
259 static inline struct sg_table *dma_alloc_nonco << 
260                 size_t size, enum dma_data_dir << 
261                 unsigned long attrs)           << 
262 {                                              << 
263         return NULL;                           << 
264 }                                              << 
265 static inline void dma_free_noncontiguous(stru << 
266                 struct sg_table *sgt, enum dma << 
267 {                                                 175 {
                                                   >> 176         return &bad_dma_ops;
268 }                                                 177 }
269 static inline void *dma_vmap_noncontiguous(str !! 178 #endif
270                 struct sg_table *sgt)          !! 179 
271 {                                              !! 180 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
272         return NULL;                           !! 181                                               size_t size,
                                                   >> 182                                               enum dma_data_direction dir,
                                                   >> 183                                               unsigned long attrs)
                                                   >> 184 {
                                                   >> 185         struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 186         dma_addr_t addr;
                                                   >> 187 
                                                   >> 188         kmemcheck_mark_initialized(ptr, size);
                                                   >> 189         BUG_ON(!valid_dma_direction(dir));
                                                   >> 190         addr = ops->map_page(dev, virt_to_page(ptr),
                                                   >> 191                              offset_in_page(ptr), size,
                                                   >> 192                              dir, attrs);
                                                   >> 193         debug_dma_map_page(dev, virt_to_page(ptr),
                                                   >> 194                            offset_in_page(ptr), size,
                                                   >> 195                            dir, addr, true);
                                                   >> 196         return addr;
273 }                                                 197 }
274 static inline void dma_vunmap_noncontiguous(st !! 198 
                                                   >> 199 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
                                                   >> 200                                           size_t size,
                                                   >> 201                                           enum dma_data_direction dir,
                                                   >> 202                                           unsigned long attrs)
275 {                                                 203 {
                                                   >> 204         struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 205 
                                                   >> 206         BUG_ON(!valid_dma_direction(dir));
                                                   >> 207         if (ops->unmap_page)
                                                   >> 208                 ops->unmap_page(dev, addr, size, dir, attrs);
                                                   >> 209         debug_dma_unmap_page(dev, addr, size, dir, true);
276 }                                                 210 }
277 static inline int dma_mmap_noncontiguous(struc !! 211 
278                 struct vm_area_struct *vma, si !! 212 /*
                                                   >> 213  * dma_maps_sg_attrs returns 0 on error and > 0 on success.
                                                   >> 214  * It should never return a value < 0.
                                                   >> 215  */
                                                   >> 216 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
                                                   >> 217                                    int nents, enum dma_data_direction dir,
                                                   >> 218                                    unsigned long attrs)
279 {                                                 219 {
280         return -EINVAL;                        !! 220         struct dma_map_ops *ops = get_dma_ops(dev);
281 }                                              !! 221         int i, ents;
282 #endif /* CONFIG_HAS_DMA */                    !! 222         struct scatterlist *s;
283                                                   223 
284 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_ !! 224         for_each_sg(sg, s, nents, i)
285 void __dma_sync_single_for_cpu(struct device * !! 225                 kmemcheck_mark_initialized(sg_virt(s), s->length);
286                 enum dma_data_direction dir);  !! 226         BUG_ON(!valid_dma_direction(dir));
287 void __dma_sync_single_for_device(struct devic !! 227         ents = ops->map_sg(dev, sg, nents, dir, attrs);
288                 size_t size, enum dma_data_dir !! 228         BUG_ON(ents < 0);
289 void __dma_sync_sg_for_cpu(struct device *dev, !! 229         debug_dma_map_sg(dev, sg, nents, ents, dir);
290                 int nelems, enum dma_data_dire << 
291 void __dma_sync_sg_for_device(struct device *d << 
292                 int nelems, enum dma_data_dire << 
293 bool __dma_need_sync(struct device *dev, dma_a << 
294                                                   230 
295 static inline bool dma_dev_need_sync(const str !! 231         return ents;
296 {                                              << 
297         /* Always call DMA sync operations whe << 
298         return !dev->dma_skip_sync || IS_ENABL << 
299 }                                                 232 }
300                                                   233 
301 static inline void dma_sync_single_for_cpu(str !! 234 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
302                 size_t size, enum dma_data_dir !! 235                                       int nents, enum dma_data_direction dir,
                                                   >> 236                                       unsigned long attrs)
303 {                                                 237 {
304         if (dma_dev_need_sync(dev))            !! 238         struct dma_map_ops *ops = get_dma_ops(dev);
305                 __dma_sync_single_for_cpu(dev, << 
306 }                                              << 
307                                                   239 
308 static inline void dma_sync_single_for_device( !! 240         BUG_ON(!valid_dma_direction(dir));
309                 dma_addr_t addr, size_t size,  !! 241         debug_dma_unmap_sg(dev, sg, nents, dir);
310 {                                              !! 242         if (ops->unmap_sg)
311         if (dma_dev_need_sync(dev))            !! 243                 ops->unmap_sg(dev, sg, nents, dir, attrs);
312                 __dma_sync_single_for_device(d << 
313 }                                                 244 }
314                                                   245 
315 static inline void dma_sync_sg_for_cpu(struct  !! 246 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
316                 struct scatterlist *sg, int ne !! 247                                       size_t offset, size_t size,
                                                   >> 248                                       enum dma_data_direction dir)
317 {                                                 249 {
318         if (dma_dev_need_sync(dev))            !! 250         struct dma_map_ops *ops = get_dma_ops(dev);
319                 __dma_sync_sg_for_cpu(dev, sg, !! 251         dma_addr_t addr;
                                                   >> 252 
                                                   >> 253         kmemcheck_mark_initialized(page_address(page) + offset, size);
                                                   >> 254         BUG_ON(!valid_dma_direction(dir));
                                                   >> 255         addr = ops->map_page(dev, page, offset, size, dir, 0);
                                                   >> 256         debug_dma_map_page(dev, page, offset, size, dir, addr, false);
                                                   >> 257 
                                                   >> 258         return addr;
320 }                                                 259 }
321                                                   260 
322 static inline void dma_sync_sg_for_device(stru !! 261 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
323                 struct scatterlist *sg, int ne !! 262                                   size_t size, enum dma_data_direction dir)
324 {                                                 263 {
325         if (dma_dev_need_sync(dev))            !! 264         struct dma_map_ops *ops = get_dma_ops(dev);
326                 __dma_sync_sg_for_device(dev,  !! 265 
                                                   >> 266         BUG_ON(!valid_dma_direction(dir));
                                                   >> 267         if (ops->unmap_page)
                                                   >> 268                 ops->unmap_page(dev, addr, size, dir, 0);
                                                   >> 269         debug_dma_unmap_page(dev, addr, size, dir, false);
327 }                                                 270 }
328                                                   271 
329 static inline bool dma_need_sync(struct device !! 272 static inline dma_addr_t dma_map_resource(struct device *dev,
                                                   >> 273                                           phys_addr_t phys_addr,
                                                   >> 274                                           size_t size,
                                                   >> 275                                           enum dma_data_direction dir,
                                                   >> 276                                           unsigned long attrs)
330 {                                                 277 {
331         return dma_dev_need_sync(dev) ? __dma_ !! 278         struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 279         dma_addr_t addr;
                                                   >> 280 
                                                   >> 281         BUG_ON(!valid_dma_direction(dir));
                                                   >> 282 
                                                   >> 283         /* Don't allow RAM to be mapped */
                                                   >> 284         BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
                                                   >> 285 
                                                   >> 286         addr = phys_addr;
                                                   >> 287         if (ops->map_resource)
                                                   >> 288                 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
                                                   >> 289 
                                                   >> 290         debug_dma_map_resource(dev, phys_addr, size, dir, addr);
                                                   >> 291 
                                                   >> 292         return addr;
332 }                                                 293 }
333 #else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_S !! 294 
334 static inline bool dma_dev_need_sync(const str !! 295 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
                                                   >> 296                                       size_t size, enum dma_data_direction dir,
                                                   >> 297                                       unsigned long attrs)
335 {                                                 298 {
336         return false;                          !! 299         struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 300 
                                                   >> 301         BUG_ON(!valid_dma_direction(dir));
                                                   >> 302         if (ops->unmap_resource)
                                                   >> 303                 ops->unmap_resource(dev, addr, size, dir, attrs);
                                                   >> 304         debug_dma_unmap_resource(dev, addr, size, dir);
337 }                                                 305 }
                                                   >> 306 
338 static inline void dma_sync_single_for_cpu(str    307 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
339                 size_t size, enum dma_data_dir !! 308                                            size_t size,
                                                   >> 309                                            enum dma_data_direction dir)
340 {                                                 310 {
                                                   >> 311         struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 312 
                                                   >> 313         BUG_ON(!valid_dma_direction(dir));
                                                   >> 314         if (ops->sync_single_for_cpu)
                                                   >> 315                 ops->sync_single_for_cpu(dev, addr, size, dir);
                                                   >> 316         debug_dma_sync_single_for_cpu(dev, addr, size, dir);
341 }                                                 317 }
                                                   >> 318 
342 static inline void dma_sync_single_for_device(    319 static inline void dma_sync_single_for_device(struct device *dev,
343                 dma_addr_t addr, size_t size,  !! 320                                               dma_addr_t addr, size_t size,
                                                   >> 321                                               enum dma_data_direction dir)
344 {                                                 322 {
                                                   >> 323         struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 324 
                                                   >> 325         BUG_ON(!valid_dma_direction(dir));
                                                   >> 326         if (ops->sync_single_for_device)
                                                   >> 327                 ops->sync_single_for_device(dev, addr, size, dir);
                                                   >> 328         debug_dma_sync_single_for_device(dev, addr, size, dir);
                                                   >> 329 }
                                                   >> 330 
                                                   >> 331 static inline void dma_sync_single_range_for_cpu(struct device *dev,
                                                   >> 332                                                  dma_addr_t addr,
                                                   >> 333                                                  unsigned long offset,
                                                   >> 334                                                  size_t size,
                                                   >> 335                                                  enum dma_data_direction dir)
                                                   >> 336 {
                                                   >> 337         const struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 338 
                                                   >> 339         BUG_ON(!valid_dma_direction(dir));
                                                   >> 340         if (ops->sync_single_for_cpu)
                                                   >> 341                 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
                                                   >> 342         debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
345 }                                                 343 }
346 static inline void dma_sync_sg_for_cpu(struct  !! 344 
347                 struct scatterlist *sg, int ne !! 345 static inline void dma_sync_single_range_for_device(struct device *dev,
                                                   >> 346                                                     dma_addr_t addr,
                                                   >> 347                                                     unsigned long offset,
                                                   >> 348                                                     size_t size,
                                                   >> 349                                                     enum dma_data_direction dir)
348 {                                                 350 {
                                                   >> 351         const struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 352 
                                                   >> 353         BUG_ON(!valid_dma_direction(dir));
                                                   >> 354         if (ops->sync_single_for_device)
                                                   >> 355                 ops->sync_single_for_device(dev, addr + offset, size, dir);
                                                   >> 356         debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
349 }                                                 357 }
350 static inline void dma_sync_sg_for_device(stru !! 358 
351                 struct scatterlist *sg, int ne !! 359 static inline void
                                                   >> 360 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
                                                   >> 361                     int nelems, enum dma_data_direction dir)
352 {                                                 362 {
                                                   >> 363         struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 364 
                                                   >> 365         BUG_ON(!valid_dma_direction(dir));
                                                   >> 366         if (ops->sync_sg_for_cpu)
                                                   >> 367                 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
                                                   >> 368         debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
353 }                                                 369 }
354 static inline bool dma_need_sync(struct device !! 370 
                                                   >> 371 static inline void
                                                   >> 372 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
                                                   >> 373                        int nelems, enum dma_data_direction dir)
355 {                                                 374 {
356         return false;                          !! 375         struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 376 
                                                   >> 377         BUG_ON(!valid_dma_direction(dir));
                                                   >> 378         if (ops->sync_sg_for_device)
                                                   >> 379                 ops->sync_sg_for_device(dev, sg, nelems, dir);
                                                   >> 380         debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
                                                   >> 381 
357 }                                                 382 }
358 #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_ << 
359                                                   383 
360 struct page *dma_alloc_pages(struct device *de !! 384 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
361                 dma_addr_t *dma_handle, enum d !! 385 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
362 void dma_free_pages(struct device *dev, size_t !! 386 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
363                 dma_addr_t dma_handle, enum dm !! 387 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
364 int dma_mmap_pages(struct device *dev, struct  !! 388 
365                 size_t size, struct page *page !! 389 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
                                                   >> 390                            void *cpu_addr, dma_addr_t dma_addr, size_t size);
366                                                   391 
367 static inline void *dma_alloc_noncoherent(stru !! 392 void *dma_common_contiguous_remap(struct page *page, size_t size,
368                 dma_addr_t *dma_handle, enum d !! 393                         unsigned long vm_flags,
369 {                                              !! 394                         pgprot_t prot, const void *caller);
370         struct page *page = dma_alloc_pages(de !! 395 
371         return page ? page_address(page) : NUL !! 396 void *dma_common_pages_remap(struct page **pages, size_t size,
                                                   >> 397                         unsigned long vm_flags, pgprot_t prot,
                                                   >> 398                         const void *caller);
                                                   >> 399 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
                                                   >> 400 
                                                   >> 401 /**
                                                   >> 402  * dma_mmap_attrs - map a coherent DMA allocation into user space
                                                   >> 403  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
                                                   >> 404  * @vma: vm_area_struct describing requested user mapping
                                                   >> 405  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
                                                   >> 406  * @handle: device-view address returned from dma_alloc_attrs
                                                   >> 407  * @size: size of memory originally requested in dma_alloc_attrs
                                                   >> 408  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
                                                   >> 409  *
                                                   >> 410  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
                                                   >> 411  * into user space.  The coherent DMA buffer must not be freed by the
                                                   >> 412  * driver until the user space mapping has been released.
                                                   >> 413  */
                                                   >> 414 static inline int
                                                   >> 415 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
                                                   >> 416                dma_addr_t dma_addr, size_t size, unsigned long attrs)
                                                   >> 417 {
                                                   >> 418         struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 419         BUG_ON(!ops);
                                                   >> 420         if (ops->mmap)
                                                   >> 421                 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
                                                   >> 422         return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
372 }                                                 423 }
373                                                   424 
374 static inline void dma_free_noncoherent(struct !! 425 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
375                 void *vaddr, dma_addr_t dma_ha !! 426 
376 {                                              !! 427 int
377         dma_free_pages(dev, size, virt_to_page !! 428 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
                                                   >> 429                        void *cpu_addr, dma_addr_t dma_addr, size_t size);
                                                   >> 430 
                                                   >> 431 static inline int
                                                   >> 432 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
                                                   >> 433                       dma_addr_t dma_addr, size_t size,
                                                   >> 434                       unsigned long attrs)
                                                   >> 435 {
                                                   >> 436         struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 437         BUG_ON(!ops);
                                                   >> 438         if (ops->get_sgtable)
                                                   >> 439                 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
                                                   >> 440                                         attrs);
                                                   >> 441         return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
378 }                                                 442 }
379                                                   443 
380 static inline dma_addr_t dma_map_single_attrs( !! 444 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
381                 size_t size, enum dma_data_dir !! 445 
                                                   >> 446 #ifndef arch_dma_alloc_attrs
                                                   >> 447 #define arch_dma_alloc_attrs(dev, flag) (true)
                                                   >> 448 #endif
                                                   >> 449 
                                                   >> 450 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
                                                   >> 451                                        dma_addr_t *dma_handle, gfp_t flag,
                                                   >> 452                                        unsigned long attrs)
382 {                                                 453 {
383         /* DMA must never operate on areas tha !! 454         struct dma_map_ops *ops = get_dma_ops(dev);
384         if (dev_WARN_ONCE(dev, is_vmalloc_addr !! 455         void *cpu_addr;
385                           "rejecting DMA map o !! 456 
386                 return DMA_MAPPING_ERROR;      !! 457         BUG_ON(!ops);
387         debug_dma_map_single(dev, ptr, size);  !! 458 
388         return dma_map_page_attrs(dev, virt_to !! 459         if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
389                         size, dir, attrs);     !! 460                 return cpu_addr;
                                                   >> 461 
                                                   >> 462         if (!arch_dma_alloc_attrs(&dev, &flag))
                                                   >> 463                 return NULL;
                                                   >> 464         if (!ops->alloc)
                                                   >> 465                 return NULL;
                                                   >> 466 
                                                   >> 467         cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
                                                   >> 468         debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
                                                   >> 469         return cpu_addr;
390 }                                                 470 }
391                                                   471 
392 static inline void dma_unmap_single_attrs(stru !! 472 static inline void dma_free_attrs(struct device *dev, size_t size,
393                 size_t size, enum dma_data_dir !! 473                                      void *cpu_addr, dma_addr_t dma_handle,
                                                   >> 474                                      unsigned long attrs)
394 {                                                 475 {
395         return dma_unmap_page_attrs(dev, addr, !! 476         struct dma_map_ops *ops = get_dma_ops(dev);
                                                   >> 477 
                                                   >> 478         BUG_ON(!ops);
                                                   >> 479         WARN_ON(irqs_disabled());
                                                   >> 480 
                                                   >> 481         if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
                                                   >> 482                 return;
                                                   >> 483 
                                                   >> 484         if (!ops->free || !cpu_addr)
                                                   >> 485                 return;
                                                   >> 486 
                                                   >> 487         debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
                                                   >> 488         ops->free(dev, size, cpu_addr, dma_handle, attrs);
396 }                                                 489 }
397                                                   490 
398 static inline void dma_sync_single_range_for_c !! 491 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
399                 dma_addr_t addr, unsigned long !! 492                 dma_addr_t *dma_handle, gfp_t flag)
400                 enum dma_data_direction dir)   << 
401 {                                                 493 {
402         return dma_sync_single_for_cpu(dev, ad !! 494         return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
403 }                                                 495 }
404                                                   496 
405 static inline void dma_sync_single_range_for_d !! 497 static inline void dma_free_coherent(struct device *dev, size_t size,
406                 dma_addr_t addr, unsigned long !! 498                 void *cpu_addr, dma_addr_t dma_handle)
407                 enum dma_data_direction dir)   << 
408 {                                                 499 {
409         return dma_sync_single_for_device(dev, !! 500         return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
410 }                                                 501 }
411                                                   502 
412 /**                                            !! 503 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
413  * dma_unmap_sgtable - Unmap the given buffer  !! 504                 dma_addr_t *dma_handle, gfp_t gfp)
414  * @dev:        The device for which to perfor << 
415  * @sgt:        The sg_table object describing << 
416  * @dir:        DMA direction                  << 
417  * @attrs:      Optional DMA attributes for th << 
418  *                                             << 
419  * Unmaps a buffer described by a scatterlist  << 
420  * object for the @dir DMA operation by the @d << 
421  * the ownership of the buffer is transferred  << 
422  */                                            << 
423 static inline void dma_unmap_sgtable(struct de << 
424                 enum dma_data_direction dir, u << 
425 {                                                 505 {
426         dma_unmap_sg_attrs(dev, sgt->sgl, sgt- !! 506         return dma_alloc_attrs(dev, size, dma_handle, gfp,
                                                   >> 507                                DMA_ATTR_NON_CONSISTENT);
427 }                                                 508 }
428                                                   509 
429 /**                                            !! 510 static inline void dma_free_noncoherent(struct device *dev, size_t size,
430  * dma_sync_sgtable_for_cpu - Synchronize the  !! 511                 void *cpu_addr, dma_addr_t dma_handle)
431  * @dev:        The device for which to perfor << 
432  * @sgt:        The sg_table object describing << 
433  * @dir:        DMA direction                  << 
434  *                                             << 
435  * Performs the needed cache synchronization a << 
436  * buffer back to the CPU domain, so it is saf << 
437  * by the CPU. Before doing any further DMA op << 
438  * the ownership of the buffer back to the DMA << 
439  * dma_sync_sgtable_for_device().              << 
440  */                                            << 
441 static inline void dma_sync_sgtable_for_cpu(st << 
442                 struct sg_table *sgt, enum dma << 
443 {                                                 512 {
444         dma_sync_sg_for_cpu(dev, sgt->sgl, sgt !! 513         dma_free_attrs(dev, size, cpu_addr, dma_handle,
                                                   >> 514                        DMA_ATTR_NON_CONSISTENT);
445 }                                                 515 }
446                                                   516 
447 /**                                            !! 517 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
448  * dma_sync_sgtable_for_device - Synchronize t << 
449  * @dev:        The device for which to perfor << 
450  * @sgt:        The sg_table object describing << 
451  * @dir:        DMA direction                  << 
452  *                                             << 
453  * Performs the needed cache synchronization a << 
454  * buffer back to the DMA domain, so it is saf << 
455  * Once finished, one has to call dma_sync_sgt << 
456  * dma_unmap_sgtable().                        << 
457  */                                            << 
458 static inline void dma_sync_sgtable_for_device << 
459                 struct sg_table *sgt, enum dma << 
460 {                                                 518 {
461         dma_sync_sg_for_device(dev, sgt->sgl,  !! 519         debug_dma_mapping_error(dev, dma_addr);
462 }                                              << 
463                                                   520 
464 #define dma_map_single(d, a, s, r) dma_map_sin !! 521         if (get_dma_ops(dev)->mapping_error)
465 #define dma_unmap_single(d, a, s, r) dma_unmap !! 522                 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
466 #define dma_map_sg(d, s, n, r) dma_map_sg_attr << 
467 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_ << 
468 #define dma_map_page(d, p, o, s, r) dma_map_pa << 
469 #define dma_unmap_page(d, a, s, r) dma_unmap_p << 
470 #define dma_get_sgtable(d, t, v, h, s) dma_get << 
471 #define dma_mmap_coherent(d, v, c, h, s) dma_m << 
472                                                   523 
473 bool dma_coherent_ok(struct device *dev, phys_ !! 524 #ifdef DMA_ERROR_CODE
                                                   >> 525         return dma_addr == DMA_ERROR_CODE;
                                                   >> 526 #else
                                                   >> 527         return 0;
                                                   >> 528 #endif
                                                   >> 529 }
474                                                   530 
475 static inline void *dma_alloc_coherent(struct  !! 531 #ifndef HAVE_ARCH_DMA_SUPPORTED
476                 dma_addr_t *dma_handle, gfp_t  !! 532 static inline int dma_supported(struct device *dev, u64 mask)
477 {                                                 533 {
478         return dma_alloc_attrs(dev, size, dma_ !! 534         struct dma_map_ops *ops = get_dma_ops(dev);
479                         (gfp & __GFP_NOWARN) ? !! 535 
                                                   >> 536         if (!ops)
                                                   >> 537                 return 0;
                                                   >> 538         if (!ops->dma_supported)
                                                   >> 539                 return 1;
                                                   >> 540         return ops->dma_supported(dev, mask);
480 }                                                 541 }
                                                   >> 542 #endif
481                                                   543 
482 static inline void dma_free_coherent(struct de !! 544 #ifndef HAVE_ARCH_DMA_SET_MASK
483                 void *cpu_addr, dma_addr_t dma !! 545 static inline int dma_set_mask(struct device *dev, u64 mask)
484 {                                                 546 {
485         return dma_free_attrs(dev, size, cpu_a !! 547         struct dma_map_ops *ops = get_dma_ops(dev);
486 }                                              !! 548 
                                                   >> 549         if (ops->set_dma_mask)
                                                   >> 550                 return ops->set_dma_mask(dev, mask);
487                                                   551 
                                                   >> 552         if (!dev->dma_mask || !dma_supported(dev, mask))
                                                   >> 553                 return -EIO;
                                                   >> 554         *dev->dma_mask = mask;
                                                   >> 555         return 0;
                                                   >> 556 }
                                                   >> 557 #endif
488                                                   558 
489 static inline u64 dma_get_mask(struct device *    559 static inline u64 dma_get_mask(struct device *dev)
490 {                                                 560 {
491         if (dev->dma_mask && *dev->dma_mask)   !! 561         if (dev && dev->dma_mask && *dev->dma_mask)
492                 return *dev->dma_mask;            562                 return *dev->dma_mask;
493         return DMA_BIT_MASK(32);                  563         return DMA_BIT_MASK(32);
494 }                                                 564 }
495                                                   565 
                                                   >> 566 #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
                                                   >> 567 int dma_set_coherent_mask(struct device *dev, u64 mask);
                                                   >> 568 #else
                                                   >> 569 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
                                                   >> 570 {
                                                   >> 571         if (!dma_supported(dev, mask))
                                                   >> 572                 return -EIO;
                                                   >> 573         dev->coherent_dma_mask = mask;
                                                   >> 574         return 0;
                                                   >> 575 }
                                                   >> 576 #endif
                                                   >> 577 
496 /*                                                578 /*
497  * Set both the DMA mask and the coherent DMA     579  * Set both the DMA mask and the coherent DMA mask to the same thing.
498  * Note that we don't check the return value f    580  * Note that we don't check the return value from dma_set_coherent_mask()
499  * as the DMA API guarantees that the coherent    581  * as the DMA API guarantees that the coherent DMA mask can be set to
500  * the same or smaller than the streaming DMA     582  * the same or smaller than the streaming DMA mask.
501  */                                               583  */
502 static inline int dma_set_mask_and_coherent(st    584 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
503 {                                                 585 {
504         int rc = dma_set_mask(dev, mask);         586         int rc = dma_set_mask(dev, mask);
505         if (rc == 0)                              587         if (rc == 0)
506                 dma_set_coherent_mask(dev, mas    588                 dma_set_coherent_mask(dev, mask);
507         return rc;                                589         return rc;
508 }                                                 590 }
509                                                   591 
510 /*                                                592 /*
511  * Similar to the above, except it deals with     593  * Similar to the above, except it deals with the case where the device
512  * does not have dev->dma_mask appropriately s    594  * does not have dev->dma_mask appropriately setup.
513  */                                               595  */
514 static inline int dma_coerce_mask_and_coherent    596 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
515 {                                                 597 {
516         dev->dma_mask = &dev->coherent_dma_mas    598         dev->dma_mask = &dev->coherent_dma_mask;
517         return dma_set_mask_and_coherent(dev,     599         return dma_set_mask_and_coherent(dev, mask);
518 }                                                 600 }
519                                                   601 
                                                   >> 602 extern u64 dma_get_required_mask(struct device *dev);
                                                   >> 603 
                                                   >> 604 #ifndef arch_setup_dma_ops
                                                   >> 605 static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
                                                   >> 606                                       u64 size, const struct iommu_ops *iommu,
                                                   >> 607                                       bool coherent) { }
                                                   >> 608 #endif
                                                   >> 609 
                                                   >> 610 #ifndef arch_teardown_dma_ops
                                                   >> 611 static inline void arch_teardown_dma_ops(struct device *dev) { }
                                                   >> 612 #endif
                                                   >> 613 
520 static inline unsigned int dma_get_max_seg_siz    614 static inline unsigned int dma_get_max_seg_size(struct device *dev)
521 {                                                 615 {
522         if (dev->dma_parms && dev->dma_parms->    616         if (dev->dma_parms && dev->dma_parms->max_segment_size)
523                 return dev->dma_parms->max_seg    617                 return dev->dma_parms->max_segment_size;
524         return SZ_64K;                            618         return SZ_64K;
525 }                                                 619 }
526                                                   620 
527 static inline void dma_set_max_seg_size(struct !! 621 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
528 {                                                 622 {
529         if (WARN_ON_ONCE(!dev->dma_parms))     !! 623         if (dev->dma_parms) {
530                 return;                        !! 624                 dev->dma_parms->max_segment_size = size;
531         dev->dma_parms->max_segment_size = siz !! 625                 return 0;
                                                   >> 626         }
                                                   >> 627         return -EIO;
532 }                                                 628 }
533                                                   629 
534 static inline unsigned long dma_get_seg_bounda    630 static inline unsigned long dma_get_seg_boundary(struct device *dev)
535 {                                                 631 {
536         if (dev->dma_parms && dev->dma_parms->    632         if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
537                 return dev->dma_parms->segment    633                 return dev->dma_parms->segment_boundary_mask;
538         return ULONG_MAX;                      !! 634         return DMA_BIT_MASK(32);
539 }                                              << 
540                                                << 
541 /**                                            << 
542  * dma_get_seg_boundary_nr_pages - return the  << 
543  * @dev: device to guery the boundary for      << 
544  * @page_shift: ilog() of the IOMMU page size  << 
545  *                                             << 
546  * Return the segment boundary in IOMMU page u << 
547  * the CPU page size) for the passed in device << 
548  *                                             << 
549  * If @dev is NULL a boundary of U32_MAX is as << 
550  * non-DMA API callers.                        << 
551  */                                            << 
552 static inline unsigned long dma_get_seg_bounda << 
553                 unsigned int page_shift)       << 
554 {                                              << 
555         if (!dev)                              << 
556                 return (U32_MAX >> page_shift) << 
557         return (dma_get_seg_boundary(dev) >> p << 
558 }                                                 635 }
559                                                   636 
560 static inline void dma_set_seg_boundary(struct !! 637 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
561 {                                                 638 {
562         if (WARN_ON_ONCE(!dev->dma_parms))     !! 639         if (dev->dma_parms) {
563                 return;                        !! 640                 dev->dma_parms->segment_boundary_mask = mask;
564         dev->dma_parms->segment_boundary_mask  !! 641                 return 0;
                                                   >> 642         }
                                                   >> 643         return -EIO;
565 }                                                 644 }
566                                                   645 
567 static inline unsigned int dma_get_min_align_m !! 646 #ifndef dma_max_pfn
                                                   >> 647 static inline unsigned long dma_max_pfn(struct device *dev)
568 {                                                 648 {
569         if (dev->dma_parms)                    !! 649         return *dev->dma_mask >> PAGE_SHIFT;
570                 return dev->dma_parms->min_ali << 
571         return 0;                              << 
572 }                                                 650 }
                                                   >> 651 #endif
573                                                   652 
574 static inline void dma_set_min_align_mask(stru !! 653 static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
575                 unsigned int min_align_mask)   !! 654                                         dma_addr_t *dma_handle, gfp_t flag)
576 {                                                 655 {
577         if (WARN_ON_ONCE(!dev->dma_parms))     !! 656         void *ret = dma_alloc_coherent(dev, size, dma_handle,
578                 return;                        !! 657                                        flag | __GFP_ZERO);
579         dev->dma_parms->min_align_mask = min_a !! 658         return ret;
580 }                                                 659 }
581                                                   660 
582 #ifndef dma_get_cache_alignment                << 
583 static inline int dma_get_cache_alignment(void    661 static inline int dma_get_cache_alignment(void)
584 {                                                 662 {
585 #ifdef ARCH_HAS_DMA_MINALIGN                   !! 663 #ifdef ARCH_DMA_MINALIGN
586         return ARCH_DMA_MINALIGN;                 664         return ARCH_DMA_MINALIGN;
587 #endif                                            665 #endif
588         return 1;                                 666         return 1;
589 }                                                 667 }
590 #endif                                         << 
591                                                   668 
592 static inline void *dmam_alloc_coherent(struct !! 669 /* flags for the coherent memory api */
593                 dma_addr_t *dma_handle, gfp_t  !! 670 #define DMA_MEMORY_MAP                  0x01
                                                   >> 671 #define DMA_MEMORY_IO                   0x02
                                                   >> 672 #define DMA_MEMORY_INCLUDES_CHILDREN    0x04
                                                   >> 673 #define DMA_MEMORY_EXCLUSIVE            0x08
                                                   >> 674 
                                                   >> 675 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
                                                   >> 676 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
                                                   >> 677                                 dma_addr_t device_addr, size_t size, int flags);
                                                   >> 678 void dma_release_declared_memory(struct device *dev);
                                                   >> 679 void *dma_mark_declared_memory_occupied(struct device *dev,
                                                   >> 680                                         dma_addr_t device_addr, size_t size);
                                                   >> 681 #else
                                                   >> 682 static inline int
                                                   >> 683 dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
                                                   >> 684                             dma_addr_t device_addr, size_t size, int flags)
594 {                                                 685 {
595         return dmam_alloc_attrs(dev, size, dma !! 686         return 0;
596                         (gfp & __GFP_NOWARN) ? << 
597 }                                                 687 }
598                                                   688 
599 static inline void *dma_alloc_wc(struct device !! 689 static inline void
600                                  dma_addr_t *d !! 690 dma_release_declared_memory(struct device *dev)
601 {                                                 691 {
602         unsigned long attrs = DMA_ATTR_WRITE_C !! 692 }
603                                                   693 
604         if (gfp & __GFP_NOWARN)                !! 694 static inline void *
605                 attrs |= DMA_ATTR_NO_WARN;     !! 695 dma_mark_declared_memory_occupied(struct device *dev,
                                                   >> 696                                   dma_addr_t device_addr, size_t size)
                                                   >> 697 {
                                                   >> 698         return ERR_PTR(-EBUSY);
                                                   >> 699 }
                                                   >> 700 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
                                                   >> 701 
                                                   >> 702 /*
                                                   >> 703  * Managed DMA API
                                                   >> 704  */
                                                   >> 705 extern void *dmam_alloc_coherent(struct device *dev, size_t size,
                                                   >> 706                                  dma_addr_t *dma_handle, gfp_t gfp);
                                                   >> 707 extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
                                                   >> 708                                dma_addr_t dma_handle);
                                                   >> 709 extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
                                                   >> 710                                     dma_addr_t *dma_handle, gfp_t gfp);
                                                   >> 711 extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
                                                   >> 712                                   dma_addr_t dma_handle);
                                                   >> 713 #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
                                                   >> 714 extern int dmam_declare_coherent_memory(struct device *dev,
                                                   >> 715                                         phys_addr_t phys_addr,
                                                   >> 716                                         dma_addr_t device_addr, size_t size,
                                                   >> 717                                         int flags);
                                                   >> 718 extern void dmam_release_declared_memory(struct device *dev);
                                                   >> 719 #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
                                                   >> 720 static inline int dmam_declare_coherent_memory(struct device *dev,
                                                   >> 721                                 phys_addr_t phys_addr, dma_addr_t device_addr,
                                                   >> 722                                 size_t size, gfp_t gfp)
                                                   >> 723 {
                                                   >> 724         return 0;
                                                   >> 725 }
606                                                   726 
607         return dma_alloc_attrs(dev, size, dma_ !! 727 static inline void dmam_release_declared_memory(struct device *dev)
                                                   >> 728 {
608 }                                                 729 }
                                                   >> 730 #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
                                                   >> 731 
                                                   >> 732 static inline void *dma_alloc_wc(struct device *dev, size_t size,
                                                   >> 733                                  dma_addr_t *dma_addr, gfp_t gfp)
                                                   >> 734 {
                                                   >> 735         return dma_alloc_attrs(dev, size, dma_addr, gfp,
                                                   >> 736                                DMA_ATTR_WRITE_COMBINE);
                                                   >> 737 }
                                                   >> 738 #ifndef dma_alloc_writecombine
                                                   >> 739 #define dma_alloc_writecombine dma_alloc_wc
                                                   >> 740 #endif
609                                                   741 
610 static inline void dma_free_wc(struct device *    742 static inline void dma_free_wc(struct device *dev, size_t size,
611                                void *cpu_addr,    743                                void *cpu_addr, dma_addr_t dma_addr)
612 {                                                 744 {
613         return dma_free_attrs(dev, size, cpu_a    745         return dma_free_attrs(dev, size, cpu_addr, dma_addr,
614                               DMA_ATTR_WRITE_C    746                               DMA_ATTR_WRITE_COMBINE);
615 }                                                 747 }
                                                   >> 748 #ifndef dma_free_writecombine
                                                   >> 749 #define dma_free_writecombine dma_free_wc
                                                   >> 750 #endif
616                                                   751 
617 static inline int dma_mmap_wc(struct device *d    752 static inline int dma_mmap_wc(struct device *dev,
618                               struct vm_area_s    753                               struct vm_area_struct *vma,
619                               void *cpu_addr,     754                               void *cpu_addr, dma_addr_t dma_addr,
620                               size_t size)        755                               size_t size)
621 {                                                 756 {
622         return dma_mmap_attrs(dev, vma, cpu_ad    757         return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
623                               DMA_ATTR_WRITE_C    758                               DMA_ATTR_WRITE_COMBINE);
624 }                                                 759 }
                                                   >> 760 #ifndef dma_mmap_writecombine
                                                   >> 761 #define dma_mmap_writecombine dma_mmap_wc
                                                   >> 762 #endif
625                                                   763 
626 #ifdef CONFIG_NEED_DMA_MAP_STATE               !! 764 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
627 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)          765 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
628 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)            766 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
629 #define dma_unmap_addr(PTR, ADDR_NAME)            767 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
630 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL    768 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
631 #define dma_unmap_len(PTR, LEN_NAME)              769 #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
632 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)     770 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
633 #else                                             771 #else
634 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)          772 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
635 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)            773 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
636 #define dma_unmap_addr(PTR, ADDR_NAME)            774 #define dma_unmap_addr(PTR, ADDR_NAME)           (0)
637 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL    775 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
638 #define dma_unmap_len(PTR, LEN_NAME)              776 #define dma_unmap_len(PTR, LEN_NAME)             (0)
639 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)     777 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
640 #endif                                            778 #endif
641                                                   779 
642 #endif /* _LINUX_DMA_MAPPING_H */              !! 780 #endif
643                                                   781 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php