~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/dma-mapping.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/dma-mapping.h (Version linux-6.12-rc7) and /include/linux/dma-mapping.h (Version linux-6.9.12)


  1 /* SPDX-License-Identifier: GPL-2.0 */              1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _LINUX_DMA_MAPPING_H                        2 #ifndef _LINUX_DMA_MAPPING_H
  3 #define _LINUX_DMA_MAPPING_H                        3 #define _LINUX_DMA_MAPPING_H
  4                                                     4 
  5 #include <linux/cache.h>                            5 #include <linux/cache.h>
  6 #include <linux/sizes.h>                            6 #include <linux/sizes.h>
  7 #include <linux/string.h>                           7 #include <linux/string.h>
  8 #include <linux/device.h>                           8 #include <linux/device.h>
  9 #include <linux/err.h>                              9 #include <linux/err.h>
 10 #include <linux/dma-direction.h>                   10 #include <linux/dma-direction.h>
 11 #include <linux/scatterlist.h>                     11 #include <linux/scatterlist.h>
 12 #include <linux/bug.h>                             12 #include <linux/bug.h>
 13 #include <linux/mem_encrypt.h>                     13 #include <linux/mem_encrypt.h>
 14                                                    14 
 15 /**                                                15 /**
 16  * List of possible attributes associated with     16  * List of possible attributes associated with a DMA mapping. The semantics
 17  * of each attribute should be defined in Docu     17  * of each attribute should be defined in Documentation/core-api/dma-attributes.rst.
 18  */                                                18  */
 19                                                    19 
 20 /*                                                 20 /*
 21  * DMA_ATTR_WEAK_ORDERING: Specifies that read     21  * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
 22  * may be weakly ordered, that is that reads a     22  * may be weakly ordered, that is that reads and writes may pass each other.
 23  */                                                23  */
 24 #define DMA_ATTR_WEAK_ORDERING          (1UL <     24 #define DMA_ATTR_WEAK_ORDERING          (1UL << 1)
 25 /*                                                 25 /*
 26  * DMA_ATTR_WRITE_COMBINE: Specifies that writ     26  * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
 27  * buffered to improve performance.                27  * buffered to improve performance.
 28  */                                                28  */
 29 #define DMA_ATTR_WRITE_COMBINE          (1UL <     29 #define DMA_ATTR_WRITE_COMBINE          (1UL << 2)
 30 /*                                                 30 /*
 31  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platfo     31  * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
 32  * virtual mapping for the allocated buffer.       32  * virtual mapping for the allocated buffer.
 33  */                                                33  */
 34 #define DMA_ATTR_NO_KERNEL_MAPPING      (1UL <     34 #define DMA_ATTR_NO_KERNEL_MAPPING      (1UL << 4)
 35 /*                                                 35 /*
 36  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform cod     36  * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
 37  * the CPU cache for the given buffer assuming     37  * the CPU cache for the given buffer assuming that it has been already
 38  * transferred to 'device' domain.                 38  * transferred to 'device' domain.
 39  */                                                39  */
 40 #define DMA_ATTR_SKIP_CPU_SYNC          (1UL <     40 #define DMA_ATTR_SKIP_CPU_SYNC          (1UL << 5)
 41 /*                                                 41 /*
 42  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguou     42  * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
 43  * in physical memory.                             43  * in physical memory.
 44  */                                                44  */
 45 #define DMA_ATTR_FORCE_CONTIGUOUS       (1UL <     45 #define DMA_ATTR_FORCE_CONTIGUOUS       (1UL << 6)
 46 /*                                                 46 /*
 47  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint     47  * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
 48  * that it's probably not worth the time to tr     48  * that it's probably not worth the time to try to allocate memory to in a way
 49  * that gives better TLB efficiency.               49  * that gives better TLB efficiency.
 50  */                                                50  */
 51 #define DMA_ATTR_ALLOC_SINGLE_PAGES     (1UL <     51 #define DMA_ATTR_ALLOC_SINGLE_PAGES     (1UL << 7)
 52 /*                                                 52 /*
 53  * DMA_ATTR_NO_WARN: This tells the DMA-mappin     53  * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
 54  * allocation failure reports (similarly to __     54  * allocation failure reports (similarly to __GFP_NOWARN).
 55  */                                                55  */
 56 #define DMA_ATTR_NO_WARN        (1UL << 8)         56 #define DMA_ATTR_NO_WARN        (1UL << 8)
 57                                                    57 
 58 /*                                                 58 /*
 59  * DMA_ATTR_PRIVILEGED: used to indicate that      59  * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
 60  * accessible at an elevated privilege level (     60  * accessible at an elevated privilege level (and ideally inaccessible or
 61  * at least read-only at lesser-privileged lev     61  * at least read-only at lesser-privileged levels).
 62  */                                                62  */
 63 #define DMA_ATTR_PRIVILEGED             (1UL <     63 #define DMA_ATTR_PRIVILEGED             (1UL << 9)
 64                                                    64 
 65 /*                                                 65 /*
 66  * A dma_addr_t can hold any valid DMA or bus      66  * A dma_addr_t can hold any valid DMA or bus address for the platform.  It can
 67  * be given to a device to use as a DMA source     67  * be given to a device to use as a DMA source or target.  It is specific to a
 68  * given device and there may be a translation     68  * given device and there may be a translation between the CPU physical address
 69  * space and the bus address space.                69  * space and the bus address space.
 70  *                                                 70  *
 71  * DMA_MAPPING_ERROR is the magic error code i     71  * DMA_MAPPING_ERROR is the magic error code if a mapping failed.  It should not
 72  * be used directly in drivers, but checked fo     72  * be used directly in drivers, but checked for using dma_mapping_error()
 73  * instead.                                        73  * instead.
 74  */                                                74  */
 75 #define DMA_MAPPING_ERROR               (~(dma     75 #define DMA_MAPPING_ERROR               (~(dma_addr_t)0)
 76                                                    76 
 77 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL :     77 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
 78                                                    78 
 79 #ifdef CONFIG_DMA_API_DEBUG                        79 #ifdef CONFIG_DMA_API_DEBUG
 80 void debug_dma_mapping_error(struct device *de     80 void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 81 void debug_dma_map_single(struct device *dev,      81 void debug_dma_map_single(struct device *dev, const void *addr,
 82                 unsigned long len);                82                 unsigned long len);
 83 #else                                              83 #else
 84 static inline void debug_dma_mapping_error(str     84 static inline void debug_dma_mapping_error(struct device *dev,
 85                 dma_addr_t dma_addr)               85                 dma_addr_t dma_addr)
 86 {                                                  86 {
 87 }                                                  87 }
 88 static inline void debug_dma_map_single(struct     88 static inline void debug_dma_map_single(struct device *dev, const void *addr,
 89                 unsigned long len)                 89                 unsigned long len)
 90 {                                                  90 {
 91 }                                                  91 }
 92 #endif /* CONFIG_DMA_API_DEBUG */                  92 #endif /* CONFIG_DMA_API_DEBUG */
 93                                                    93 
 94 #ifdef CONFIG_HAS_DMA                              94 #ifdef CONFIG_HAS_DMA
 95 static inline int dma_mapping_error(struct dev     95 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 96 {                                                  96 {
 97         debug_dma_mapping_error(dev, dma_addr)     97         debug_dma_mapping_error(dev, dma_addr);
 98                                                    98 
 99         if (unlikely(dma_addr == DMA_MAPPING_E     99         if (unlikely(dma_addr == DMA_MAPPING_ERROR))
100                 return -ENOMEM;                   100                 return -ENOMEM;
101         return 0;                                 101         return 0;
102 }                                                 102 }
103                                                   103 
104 dma_addr_t dma_map_page_attrs(struct device *d    104 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
105                 size_t offset, size_t size, en    105                 size_t offset, size_t size, enum dma_data_direction dir,
106                 unsigned long attrs);             106                 unsigned long attrs);
107 void dma_unmap_page_attrs(struct device *dev,     107 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
108                 enum dma_data_direction dir, u    108                 enum dma_data_direction dir, unsigned long attrs);
109 unsigned int dma_map_sg_attrs(struct device *d    109 unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
110                 int nents, enum dma_data_direc    110                 int nents, enum dma_data_direction dir, unsigned long attrs);
111 void dma_unmap_sg_attrs(struct device *dev, st    111 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
112                                       int nent    112                                       int nents, enum dma_data_direction dir,
113                                       unsigned    113                                       unsigned long attrs);
114 int dma_map_sgtable(struct device *dev, struct    114 int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
115                 enum dma_data_direction dir, u    115                 enum dma_data_direction dir, unsigned long attrs);
116 dma_addr_t dma_map_resource(struct device *dev    116 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
117                 size_t size, enum dma_data_dir    117                 size_t size, enum dma_data_direction dir, unsigned long attrs);
118 void dma_unmap_resource(struct device *dev, dm    118 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
119                 enum dma_data_direction dir, u    119                 enum dma_data_direction dir, unsigned long attrs);
                                                   >> 120 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
                                                   >> 121                 enum dma_data_direction dir);
                                                   >> 122 void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
                                                   >> 123                 size_t size, enum dma_data_direction dir);
                                                   >> 124 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
                                                   >> 125                     int nelems, enum dma_data_direction dir);
                                                   >> 126 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
                                                   >> 127                        int nelems, enum dma_data_direction dir);
120 void *dma_alloc_attrs(struct device *dev, size    128 void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
121                 gfp_t flag, unsigned long attr    129                 gfp_t flag, unsigned long attrs);
122 void dma_free_attrs(struct device *dev, size_t    130 void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
123                 dma_addr_t dma_handle, unsigne    131                 dma_addr_t dma_handle, unsigned long attrs);
124 void *dmam_alloc_attrs(struct device *dev, siz    132 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
125                 gfp_t gfp, unsigned long attrs    133                 gfp_t gfp, unsigned long attrs);
126 void dmam_free_coherent(struct device *dev, si    134 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
127                 dma_addr_t dma_handle);           135                 dma_addr_t dma_handle);
128 int dma_get_sgtable_attrs(struct device *dev,     136 int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
129                 void *cpu_addr, dma_addr_t dma    137                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
130                 unsigned long attrs);             138                 unsigned long attrs);
131 int dma_mmap_attrs(struct device *dev, struct     139 int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
132                 void *cpu_addr, dma_addr_t dma    140                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
133                 unsigned long attrs);             141                 unsigned long attrs);
134 bool dma_can_mmap(struct device *dev);            142 bool dma_can_mmap(struct device *dev);
135 bool dma_pci_p2pdma_supported(struct device *d    143 bool dma_pci_p2pdma_supported(struct device *dev);
136 int dma_set_mask(struct device *dev, u64 mask)    144 int dma_set_mask(struct device *dev, u64 mask);
137 int dma_set_coherent_mask(struct device *dev,     145 int dma_set_coherent_mask(struct device *dev, u64 mask);
138 u64 dma_get_required_mask(struct device *dev);    146 u64 dma_get_required_mask(struct device *dev);
139 bool dma_addressing_limited(struct device *dev    147 bool dma_addressing_limited(struct device *dev);
140 size_t dma_max_mapping_size(struct device *dev    148 size_t dma_max_mapping_size(struct device *dev);
141 size_t dma_opt_mapping_size(struct device *dev    149 size_t dma_opt_mapping_size(struct device *dev);
                                                   >> 150 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
142 unsigned long dma_get_merge_boundary(struct de    151 unsigned long dma_get_merge_boundary(struct device *dev);
143 struct sg_table *dma_alloc_noncontiguous(struc    152 struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size,
144                 enum dma_data_direction dir, g    153                 enum dma_data_direction dir, gfp_t gfp, unsigned long attrs);
145 void dma_free_noncontiguous(struct device *dev    154 void dma_free_noncontiguous(struct device *dev, size_t size,
146                 struct sg_table *sgt, enum dma    155                 struct sg_table *sgt, enum dma_data_direction dir);
147 void *dma_vmap_noncontiguous(struct device *de    156 void *dma_vmap_noncontiguous(struct device *dev, size_t size,
148                 struct sg_table *sgt);            157                 struct sg_table *sgt);
149 void dma_vunmap_noncontiguous(struct device *d    158 void dma_vunmap_noncontiguous(struct device *dev, void *vaddr);
150 int dma_mmap_noncontiguous(struct device *dev,    159 int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
151                 size_t size, struct sg_table *    160                 size_t size, struct sg_table *sgt);
152 #else /* CONFIG_HAS_DMA */                        161 #else /* CONFIG_HAS_DMA */
153 static inline dma_addr_t dma_map_page_attrs(st    162 static inline dma_addr_t dma_map_page_attrs(struct device *dev,
154                 struct page *page, size_t offs    163                 struct page *page, size_t offset, size_t size,
155                 enum dma_data_direction dir, u    164                 enum dma_data_direction dir, unsigned long attrs)
156 {                                                 165 {
157         return DMA_MAPPING_ERROR;                 166         return DMA_MAPPING_ERROR;
158 }                                                 167 }
159 static inline void dma_unmap_page_attrs(struct    168 static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
160                 size_t size, enum dma_data_dir    169                 size_t size, enum dma_data_direction dir, unsigned long attrs)
161 {                                                 170 {
162 }                                                 171 }
163 static inline unsigned int dma_map_sg_attrs(st    172 static inline unsigned int dma_map_sg_attrs(struct device *dev,
164                 struct scatterlist *sg, int ne    173                 struct scatterlist *sg, int nents, enum dma_data_direction dir,
165                 unsigned long attrs)              174                 unsigned long attrs)
166 {                                                 175 {
167         return 0;                                 176         return 0;
168 }                                                 177 }
169 static inline void dma_unmap_sg_attrs(struct d    178 static inline void dma_unmap_sg_attrs(struct device *dev,
170                 struct scatterlist *sg, int ne    179                 struct scatterlist *sg, int nents, enum dma_data_direction dir,
171                 unsigned long attrs)              180                 unsigned long attrs)
172 {                                                 181 {
173 }                                                 182 }
174 static inline int dma_map_sgtable(struct devic    183 static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
175                 enum dma_data_direction dir, u    184                 enum dma_data_direction dir, unsigned long attrs)
176 {                                                 185 {
177         return -EOPNOTSUPP;                       186         return -EOPNOTSUPP;
178 }                                                 187 }
179 static inline dma_addr_t dma_map_resource(stru    188 static inline dma_addr_t dma_map_resource(struct device *dev,
180                 phys_addr_t phys_addr, size_t     189                 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
181                 unsigned long attrs)              190                 unsigned long attrs)
182 {                                                 191 {
183         return DMA_MAPPING_ERROR;                 192         return DMA_MAPPING_ERROR;
184 }                                                 193 }
185 static inline void dma_unmap_resource(struct d    194 static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
186                 size_t size, enum dma_data_dir    195                 size_t size, enum dma_data_direction dir, unsigned long attrs)
187 {                                                 196 {
188 }                                                 197 }
                                                   >> 198 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
                                                   >> 199                 size_t size, enum dma_data_direction dir)
                                                   >> 200 {
                                                   >> 201 }
                                                   >> 202 static inline void dma_sync_single_for_device(struct device *dev,
                                                   >> 203                 dma_addr_t addr, size_t size, enum dma_data_direction dir)
                                                   >> 204 {
                                                   >> 205 }
                                                   >> 206 static inline void dma_sync_sg_for_cpu(struct device *dev,
                                                   >> 207                 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
                                                   >> 208 {
                                                   >> 209 }
                                                   >> 210 static inline void dma_sync_sg_for_device(struct device *dev,
                                                   >> 211                 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
                                                   >> 212 {
                                                   >> 213 }
189 static inline int dma_mapping_error(struct dev    214 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
190 {                                                 215 {
191         return -ENOMEM;                           216         return -ENOMEM;
192 }                                                 217 }
193 static inline void *dma_alloc_attrs(struct dev    218 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
194                 dma_addr_t *dma_handle, gfp_t     219                 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
195 {                                                 220 {
196         return NULL;                              221         return NULL;
197 }                                                 222 }
198 static void dma_free_attrs(struct device *dev,    223 static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
199                 dma_addr_t dma_handle, unsigne    224                 dma_addr_t dma_handle, unsigned long attrs)
200 {                                                 225 {
201 }                                                 226 }
202 static inline void *dmam_alloc_attrs(struct de    227 static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
203                 dma_addr_t *dma_handle, gfp_t     228                 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
204 {                                                 229 {
205         return NULL;                              230         return NULL;
206 }                                                 231 }
207 static inline void dmam_free_coherent(struct d    232 static inline void dmam_free_coherent(struct device *dev, size_t size,
208                 void *vaddr, dma_addr_t dma_ha    233                 void *vaddr, dma_addr_t dma_handle)
209 {                                                 234 {
210 }                                                 235 }
211 static inline int dma_get_sgtable_attrs(struct    236 static inline int dma_get_sgtable_attrs(struct device *dev,
212                 struct sg_table *sgt, void *cp    237                 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
213                 size_t size, unsigned long att    238                 size_t size, unsigned long attrs)
214 {                                                 239 {
215         return -ENXIO;                            240         return -ENXIO;
216 }                                                 241 }
217 static inline int dma_mmap_attrs(struct device    242 static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
218                 void *cpu_addr, dma_addr_t dma    243                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
219                 unsigned long attrs)              244                 unsigned long attrs)
220 {                                                 245 {
221         return -ENXIO;                            246         return -ENXIO;
222 }                                                 247 }
223 static inline bool dma_can_mmap(struct device     248 static inline bool dma_can_mmap(struct device *dev)
224 {                                                 249 {
225         return false;                             250         return false;
226 }                                                 251 }
227 static inline bool dma_pci_p2pdma_supported(st    252 static inline bool dma_pci_p2pdma_supported(struct device *dev)
228 {                                                 253 {
229         return false;                             254         return false;
230 }                                                 255 }
231 static inline int dma_set_mask(struct device *    256 static inline int dma_set_mask(struct device *dev, u64 mask)
232 {                                                 257 {
233         return -EIO;                              258         return -EIO;
234 }                                                 259 }
235 static inline int dma_set_coherent_mask(struct    260 static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
236 {                                                 261 {
237         return -EIO;                              262         return -EIO;
238 }                                                 263 }
239 static inline u64 dma_get_required_mask(struct    264 static inline u64 dma_get_required_mask(struct device *dev)
240 {                                                 265 {
241         return 0;                                 266         return 0;
242 }                                                 267 }
243 static inline bool dma_addressing_limited(stru    268 static inline bool dma_addressing_limited(struct device *dev)
244 {                                                 269 {
245         return false;                             270         return false;
246 }                                                 271 }
247 static inline size_t dma_max_mapping_size(stru    272 static inline size_t dma_max_mapping_size(struct device *dev)
248 {                                                 273 {
249         return 0;                                 274         return 0;
250 }                                                 275 }
251 static inline size_t dma_opt_mapping_size(stru    276 static inline size_t dma_opt_mapping_size(struct device *dev)
252 {                                                 277 {
253         return 0;                                 278         return 0;
254 }                                                 279 }
                                                   >> 280 static inline bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
                                                   >> 281 {
                                                   >> 282         return false;
                                                   >> 283 }
255 static inline unsigned long dma_get_merge_boun    284 static inline unsigned long dma_get_merge_boundary(struct device *dev)
256 {                                                 285 {
257         return 0;                                 286         return 0;
258 }                                                 287 }
259 static inline struct sg_table *dma_alloc_nonco    288 static inline struct sg_table *dma_alloc_noncontiguous(struct device *dev,
260                 size_t size, enum dma_data_dir    289                 size_t size, enum dma_data_direction dir, gfp_t gfp,
261                 unsigned long attrs)              290                 unsigned long attrs)
262 {                                                 291 {
263         return NULL;                              292         return NULL;
264 }                                                 293 }
265 static inline void dma_free_noncontiguous(stru    294 static inline void dma_free_noncontiguous(struct device *dev, size_t size,
266                 struct sg_table *sgt, enum dma    295                 struct sg_table *sgt, enum dma_data_direction dir)
267 {                                                 296 {
268 }                                                 297 }
269 static inline void *dma_vmap_noncontiguous(str    298 static inline void *dma_vmap_noncontiguous(struct device *dev, size_t size,
270                 struct sg_table *sgt)             299                 struct sg_table *sgt)
271 {                                                 300 {
272         return NULL;                              301         return NULL;
273 }                                                 302 }
274 static inline void dma_vunmap_noncontiguous(st    303 static inline void dma_vunmap_noncontiguous(struct device *dev, void *vaddr)
275 {                                                 304 {
276 }                                                 305 }
277 static inline int dma_mmap_noncontiguous(struc    306 static inline int dma_mmap_noncontiguous(struct device *dev,
278                 struct vm_area_struct *vma, si    307                 struct vm_area_struct *vma, size_t size, struct sg_table *sgt)
279 {                                                 308 {
280         return -EINVAL;                           309         return -EINVAL;
281 }                                                 310 }
282 #endif /* CONFIG_HAS_DMA */                       311 #endif /* CONFIG_HAS_DMA */
283                                                   312 
284 #if defined(CONFIG_HAS_DMA) && defined(CONFIG_ << 
285 void __dma_sync_single_for_cpu(struct device * << 
286                 enum dma_data_direction dir);  << 
287 void __dma_sync_single_for_device(struct devic << 
288                 size_t size, enum dma_data_dir << 
289 void __dma_sync_sg_for_cpu(struct device *dev, << 
290                 int nelems, enum dma_data_dire << 
291 void __dma_sync_sg_for_device(struct device *d << 
292                 int nelems, enum dma_data_dire << 
293 bool __dma_need_sync(struct device *dev, dma_a << 
294                                                << 
295 static inline bool dma_dev_need_sync(const str << 
296 {                                              << 
297         /* Always call DMA sync operations whe << 
298         return !dev->dma_skip_sync || IS_ENABL << 
299 }                                              << 
300                                                << 
301 static inline void dma_sync_single_for_cpu(str << 
302                 size_t size, enum dma_data_dir << 
303 {                                              << 
304         if (dma_dev_need_sync(dev))            << 
305                 __dma_sync_single_for_cpu(dev, << 
306 }                                              << 
307                                                << 
308 static inline void dma_sync_single_for_device( << 
309                 dma_addr_t addr, size_t size,  << 
310 {                                              << 
311         if (dma_dev_need_sync(dev))            << 
312                 __dma_sync_single_for_device(d << 
313 }                                              << 
314                                                << 
315 static inline void dma_sync_sg_for_cpu(struct  << 
316                 struct scatterlist *sg, int ne << 
317 {                                              << 
318         if (dma_dev_need_sync(dev))            << 
319                 __dma_sync_sg_for_cpu(dev, sg, << 
320 }                                              << 
321                                                << 
322 static inline void dma_sync_sg_for_device(stru << 
323                 struct scatterlist *sg, int ne << 
324 {                                              << 
325         if (dma_dev_need_sync(dev))            << 
326                 __dma_sync_sg_for_device(dev,  << 
327 }                                              << 
328                                                << 
329 static inline bool dma_need_sync(struct device << 
330 {                                              << 
331         return dma_dev_need_sync(dev) ? __dma_ << 
332 }                                              << 
333 #else /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_S << 
334 static inline bool dma_dev_need_sync(const str << 
335 {                                              << 
336         return false;                          << 
337 }                                              << 
338 static inline void dma_sync_single_for_cpu(str << 
339                 size_t size, enum dma_data_dir << 
340 {                                              << 
341 }                                              << 
342 static inline void dma_sync_single_for_device( << 
343                 dma_addr_t addr, size_t size,  << 
344 {                                              << 
345 }                                              << 
346 static inline void dma_sync_sg_for_cpu(struct  << 
347                 struct scatterlist *sg, int ne << 
348 {                                              << 
349 }                                              << 
350 static inline void dma_sync_sg_for_device(stru << 
351                 struct scatterlist *sg, int ne << 
352 {                                              << 
353 }                                              << 
354 static inline bool dma_need_sync(struct device << 
355 {                                              << 
356         return false;                          << 
357 }                                              << 
358 #endif /* !CONFIG_HAS_DMA || !CONFIG_DMA_NEED_ << 
359                                                << 
360 struct page *dma_alloc_pages(struct device *de    313 struct page *dma_alloc_pages(struct device *dev, size_t size,
361                 dma_addr_t *dma_handle, enum d    314                 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp);
362 void dma_free_pages(struct device *dev, size_t    315 void dma_free_pages(struct device *dev, size_t size, struct page *page,
363                 dma_addr_t dma_handle, enum dm    316                 dma_addr_t dma_handle, enum dma_data_direction dir);
364 int dma_mmap_pages(struct device *dev, struct     317 int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma,
365                 size_t size, struct page *page    318                 size_t size, struct page *page);
366                                                   319 
367 static inline void *dma_alloc_noncoherent(stru    320 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
368                 dma_addr_t *dma_handle, enum d    321                 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
369 {                                                 322 {
370         struct page *page = dma_alloc_pages(de    323         struct page *page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
371         return page ? page_address(page) : NUL    324         return page ? page_address(page) : NULL;
372 }                                                 325 }
373                                                   326 
374 static inline void dma_free_noncoherent(struct    327 static inline void dma_free_noncoherent(struct device *dev, size_t size,
375                 void *vaddr, dma_addr_t dma_ha    328                 void *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir)
376 {                                                 329 {
377         dma_free_pages(dev, size, virt_to_page    330         dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
378 }                                                 331 }
379                                                   332 
380 static inline dma_addr_t dma_map_single_attrs(    333 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
381                 size_t size, enum dma_data_dir    334                 size_t size, enum dma_data_direction dir, unsigned long attrs)
382 {                                                 335 {
383         /* DMA must never operate on areas tha    336         /* DMA must never operate on areas that might be remapped. */
384         if (dev_WARN_ONCE(dev, is_vmalloc_addr    337         if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
385                           "rejecting DMA map o    338                           "rejecting DMA map of vmalloc memory\n"))
386                 return DMA_MAPPING_ERROR;         339                 return DMA_MAPPING_ERROR;
387         debug_dma_map_single(dev, ptr, size);     340         debug_dma_map_single(dev, ptr, size);
388         return dma_map_page_attrs(dev, virt_to    341         return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
389                         size, dir, attrs);        342                         size, dir, attrs);
390 }                                                 343 }
391                                                   344 
392 static inline void dma_unmap_single_attrs(stru    345 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
393                 size_t size, enum dma_data_dir    346                 size_t size, enum dma_data_direction dir, unsigned long attrs)
394 {                                                 347 {
395         return dma_unmap_page_attrs(dev, addr,    348         return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
396 }                                                 349 }
397                                                   350 
398 static inline void dma_sync_single_range_for_c    351 static inline void dma_sync_single_range_for_cpu(struct device *dev,
399                 dma_addr_t addr, unsigned long    352                 dma_addr_t addr, unsigned long offset, size_t size,
400                 enum dma_data_direction dir)      353                 enum dma_data_direction dir)
401 {                                                 354 {
402         return dma_sync_single_for_cpu(dev, ad    355         return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
403 }                                                 356 }
404                                                   357 
405 static inline void dma_sync_single_range_for_d    358 static inline void dma_sync_single_range_for_device(struct device *dev,
406                 dma_addr_t addr, unsigned long    359                 dma_addr_t addr, unsigned long offset, size_t size,
407                 enum dma_data_direction dir)      360                 enum dma_data_direction dir)
408 {                                                 361 {
409         return dma_sync_single_for_device(dev,    362         return dma_sync_single_for_device(dev, addr + offset, size, dir);
410 }                                                 363 }
411                                                   364 
412 /**                                               365 /**
413  * dma_unmap_sgtable - Unmap the given buffer     366  * dma_unmap_sgtable - Unmap the given buffer for DMA
414  * @dev:        The device for which to perfor    367  * @dev:        The device for which to perform the DMA operation
415  * @sgt:        The sg_table object describing    368  * @sgt:        The sg_table object describing the buffer
416  * @dir:        DMA direction                     369  * @dir:        DMA direction
417  * @attrs:      Optional DMA attributes for th    370  * @attrs:      Optional DMA attributes for the unmap operation
418  *                                                371  *
419  * Unmaps a buffer described by a scatterlist     372  * Unmaps a buffer described by a scatterlist stored in the given sg_table
420  * object for the @dir DMA operation by the @d    373  * object for the @dir DMA operation by the @dev device. After this function
421  * the ownership of the buffer is transferred     374  * the ownership of the buffer is transferred back to the CPU domain.
422  */                                               375  */
423 static inline void dma_unmap_sgtable(struct de    376 static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
424                 enum dma_data_direction dir, u    377                 enum dma_data_direction dir, unsigned long attrs)
425 {                                                 378 {
426         dma_unmap_sg_attrs(dev, sgt->sgl, sgt-    379         dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
427 }                                                 380 }
428                                                   381 
429 /**                                               382 /**
430  * dma_sync_sgtable_for_cpu - Synchronize the     383  * dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
431  * @dev:        The device for which to perfor    384  * @dev:        The device for which to perform the DMA operation
432  * @sgt:        The sg_table object describing    385  * @sgt:        The sg_table object describing the buffer
433  * @dir:        DMA direction                     386  * @dir:        DMA direction
434  *                                                387  *
435  * Performs the needed cache synchronization a    388  * Performs the needed cache synchronization and moves the ownership of the
436  * buffer back to the CPU domain, so it is saf    389  * buffer back to the CPU domain, so it is safe to perform any access to it
437  * by the CPU. Before doing any further DMA op    390  * by the CPU. Before doing any further DMA operations, one has to transfer
438  * the ownership of the buffer back to the DMA    391  * the ownership of the buffer back to the DMA domain by calling the
439  * dma_sync_sgtable_for_device().                 392  * dma_sync_sgtable_for_device().
440  */                                               393  */
441 static inline void dma_sync_sgtable_for_cpu(st    394 static inline void dma_sync_sgtable_for_cpu(struct device *dev,
442                 struct sg_table *sgt, enum dma    395                 struct sg_table *sgt, enum dma_data_direction dir)
443 {                                                 396 {
444         dma_sync_sg_for_cpu(dev, sgt->sgl, sgt    397         dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
445 }                                                 398 }
446                                                   399 
447 /**                                               400 /**
448  * dma_sync_sgtable_for_device - Synchronize t    401  * dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
449  * @dev:        The device for which to perfor    402  * @dev:        The device for which to perform the DMA operation
450  * @sgt:        The sg_table object describing    403  * @sgt:        The sg_table object describing the buffer
451  * @dir:        DMA direction                     404  * @dir:        DMA direction
452  *                                                405  *
453  * Performs the needed cache synchronization a    406  * Performs the needed cache synchronization and moves the ownership of the
454  * buffer back to the DMA domain, so it is saf    407  * buffer back to the DMA domain, so it is safe to perform the DMA operation.
455  * Once finished, one has to call dma_sync_sgt    408  * Once finished, one has to call dma_sync_sgtable_for_cpu() or
456  * dma_unmap_sgtable().                           409  * dma_unmap_sgtable().
457  */                                               410  */
458 static inline void dma_sync_sgtable_for_device    411 static inline void dma_sync_sgtable_for_device(struct device *dev,
459                 struct sg_table *sgt, enum dma    412                 struct sg_table *sgt, enum dma_data_direction dir)
460 {                                                 413 {
461         dma_sync_sg_for_device(dev, sgt->sgl,     414         dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
462 }                                                 415 }
463                                                   416 
464 #define dma_map_single(d, a, s, r) dma_map_sin    417 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
465 #define dma_unmap_single(d, a, s, r) dma_unmap    418 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
466 #define dma_map_sg(d, s, n, r) dma_map_sg_attr    419 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
467 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_    420 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
468 #define dma_map_page(d, p, o, s, r) dma_map_pa    421 #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
469 #define dma_unmap_page(d, a, s, r) dma_unmap_p    422 #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
470 #define dma_get_sgtable(d, t, v, h, s) dma_get    423 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
471 #define dma_mmap_coherent(d, v, c, h, s) dma_m    424 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
472                                                   425 
473 bool dma_coherent_ok(struct device *dev, phys_    426 bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
474                                                   427 
475 static inline void *dma_alloc_coherent(struct     428 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
476                 dma_addr_t *dma_handle, gfp_t     429                 dma_addr_t *dma_handle, gfp_t gfp)
477 {                                                 430 {
478         return dma_alloc_attrs(dev, size, dma_    431         return dma_alloc_attrs(dev, size, dma_handle, gfp,
479                         (gfp & __GFP_NOWARN) ?    432                         (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
480 }                                                 433 }
481                                                   434 
482 static inline void dma_free_coherent(struct de    435 static inline void dma_free_coherent(struct device *dev, size_t size,
483                 void *cpu_addr, dma_addr_t dma    436                 void *cpu_addr, dma_addr_t dma_handle)
484 {                                                 437 {
485         return dma_free_attrs(dev, size, cpu_a    438         return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
486 }                                                 439 }
487                                                   440 
488                                                   441 
489 static inline u64 dma_get_mask(struct device *    442 static inline u64 dma_get_mask(struct device *dev)
490 {                                                 443 {
491         if (dev->dma_mask && *dev->dma_mask)      444         if (dev->dma_mask && *dev->dma_mask)
492                 return *dev->dma_mask;            445                 return *dev->dma_mask;
493         return DMA_BIT_MASK(32);                  446         return DMA_BIT_MASK(32);
494 }                                                 447 }
495                                                   448 
496 /*                                                449 /*
497  * Set both the DMA mask and the coherent DMA     450  * Set both the DMA mask and the coherent DMA mask to the same thing.
498  * Note that we don't check the return value f    451  * Note that we don't check the return value from dma_set_coherent_mask()
499  * as the DMA API guarantees that the coherent    452  * as the DMA API guarantees that the coherent DMA mask can be set to
500  * the same or smaller than the streaming DMA     453  * the same or smaller than the streaming DMA mask.
501  */                                               454  */
502 static inline int dma_set_mask_and_coherent(st    455 static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
503 {                                                 456 {
504         int rc = dma_set_mask(dev, mask);         457         int rc = dma_set_mask(dev, mask);
505         if (rc == 0)                              458         if (rc == 0)
506                 dma_set_coherent_mask(dev, mas    459                 dma_set_coherent_mask(dev, mask);
507         return rc;                                460         return rc;
508 }                                                 461 }
509                                                   462 
510 /*                                                463 /*
511  * Similar to the above, except it deals with     464  * Similar to the above, except it deals with the case where the device
512  * does not have dev->dma_mask appropriately s    465  * does not have dev->dma_mask appropriately setup.
513  */                                               466  */
514 static inline int dma_coerce_mask_and_coherent    467 static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
515 {                                                 468 {
516         dev->dma_mask = &dev->coherent_dma_mas    469         dev->dma_mask = &dev->coherent_dma_mask;
517         return dma_set_mask_and_coherent(dev,     470         return dma_set_mask_and_coherent(dev, mask);
518 }                                                 471 }
519                                                   472 
520 static inline unsigned int dma_get_max_seg_siz    473 static inline unsigned int dma_get_max_seg_size(struct device *dev)
521 {                                                 474 {
522         if (dev->dma_parms && dev->dma_parms->    475         if (dev->dma_parms && dev->dma_parms->max_segment_size)
523                 return dev->dma_parms->max_seg    476                 return dev->dma_parms->max_segment_size;
524         return SZ_64K;                            477         return SZ_64K;
525 }                                                 478 }
526                                                   479 
527 static inline void dma_set_max_seg_size(struct !! 480 static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
528 {                                                 481 {
529         if (WARN_ON_ONCE(!dev->dma_parms))     !! 482         if (dev->dma_parms) {
530                 return;                        !! 483                 dev->dma_parms->max_segment_size = size;
531         dev->dma_parms->max_segment_size = siz !! 484                 return 0;
                                                   >> 485         }
                                                   >> 486         return -EIO;
532 }                                                 487 }
533                                                   488 
534 static inline unsigned long dma_get_seg_bounda    489 static inline unsigned long dma_get_seg_boundary(struct device *dev)
535 {                                                 490 {
536         if (dev->dma_parms && dev->dma_parms->    491         if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
537                 return dev->dma_parms->segment    492                 return dev->dma_parms->segment_boundary_mask;
538         return ULONG_MAX;                         493         return ULONG_MAX;
539 }                                                 494 }
540                                                   495 
541 /**                                               496 /**
542  * dma_get_seg_boundary_nr_pages - return the     497  * dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
543  * @dev: device to guery the boundary for         498  * @dev: device to guery the boundary for
544  * @page_shift: ilog() of the IOMMU page size     499  * @page_shift: ilog() of the IOMMU page size
545  *                                                500  *
546  * Return the segment boundary in IOMMU page u    501  * Return the segment boundary in IOMMU page units (which may be different from
547  * the CPU page size) for the passed in device    502  * the CPU page size) for the passed in device.
548  *                                                503  *
549  * If @dev is NULL a boundary of U32_MAX is as    504  * If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
550  * non-DMA API callers.                           505  * non-DMA API callers.
551  */                                               506  */
552 static inline unsigned long dma_get_seg_bounda    507 static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
553                 unsigned int page_shift)          508                 unsigned int page_shift)
554 {                                                 509 {
555         if (!dev)                                 510         if (!dev)
556                 return (U32_MAX >> page_shift)    511                 return (U32_MAX >> page_shift) + 1;
557         return (dma_get_seg_boundary(dev) >> p    512         return (dma_get_seg_boundary(dev) >> page_shift) + 1;
558 }                                                 513 }
559                                                   514 
560 static inline void dma_set_seg_boundary(struct !! 515 static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
561 {                                                 516 {
562         if (WARN_ON_ONCE(!dev->dma_parms))     !! 517         if (dev->dma_parms) {
563                 return;                        !! 518                 dev->dma_parms->segment_boundary_mask = mask;
564         dev->dma_parms->segment_boundary_mask  !! 519                 return 0;
                                                   >> 520         }
                                                   >> 521         return -EIO;
565 }                                                 522 }
566                                                   523 
567 static inline unsigned int dma_get_min_align_m    524 static inline unsigned int dma_get_min_align_mask(struct device *dev)
568 {                                                 525 {
569         if (dev->dma_parms)                       526         if (dev->dma_parms)
570                 return dev->dma_parms->min_ali    527                 return dev->dma_parms->min_align_mask;
571         return 0;                                 528         return 0;
572 }                                                 529 }
573                                                   530 
574 static inline void dma_set_min_align_mask(stru !! 531 static inline int dma_set_min_align_mask(struct device *dev,
575                 unsigned int min_align_mask)      532                 unsigned int min_align_mask)
576 {                                                 533 {
577         if (WARN_ON_ONCE(!dev->dma_parms))        534         if (WARN_ON_ONCE(!dev->dma_parms))
578                 return;                        !! 535                 return -EIO;
579         dev->dma_parms->min_align_mask = min_a    536         dev->dma_parms->min_align_mask = min_align_mask;
                                                   >> 537         return 0;
580 }                                                 538 }
581                                                   539 
582 #ifndef dma_get_cache_alignment                   540 #ifndef dma_get_cache_alignment
583 static inline int dma_get_cache_alignment(void    541 static inline int dma_get_cache_alignment(void)
584 {                                                 542 {
585 #ifdef ARCH_HAS_DMA_MINALIGN                      543 #ifdef ARCH_HAS_DMA_MINALIGN
586         return ARCH_DMA_MINALIGN;                 544         return ARCH_DMA_MINALIGN;
587 #endif                                            545 #endif
588         return 1;                                 546         return 1;
589 }                                                 547 }
590 #endif                                            548 #endif
591                                                   549 
592 static inline void *dmam_alloc_coherent(struct    550 static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
593                 dma_addr_t *dma_handle, gfp_t     551                 dma_addr_t *dma_handle, gfp_t gfp)
594 {                                                 552 {
595         return dmam_alloc_attrs(dev, size, dma    553         return dmam_alloc_attrs(dev, size, dma_handle, gfp,
596                         (gfp & __GFP_NOWARN) ?    554                         (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
597 }                                                 555 }
598                                                   556 
599 static inline void *dma_alloc_wc(struct device    557 static inline void *dma_alloc_wc(struct device *dev, size_t size,
600                                  dma_addr_t *d    558                                  dma_addr_t *dma_addr, gfp_t gfp)
601 {                                                 559 {
602         unsigned long attrs = DMA_ATTR_WRITE_C    560         unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
603                                                   561 
604         if (gfp & __GFP_NOWARN)                   562         if (gfp & __GFP_NOWARN)
605                 attrs |= DMA_ATTR_NO_WARN;        563                 attrs |= DMA_ATTR_NO_WARN;
606                                                   564 
607         return dma_alloc_attrs(dev, size, dma_    565         return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
608 }                                                 566 }
609                                                   567 
610 static inline void dma_free_wc(struct device *    568 static inline void dma_free_wc(struct device *dev, size_t size,
611                                void *cpu_addr,    569                                void *cpu_addr, dma_addr_t dma_addr)
612 {                                                 570 {
613         return dma_free_attrs(dev, size, cpu_a    571         return dma_free_attrs(dev, size, cpu_addr, dma_addr,
614                               DMA_ATTR_WRITE_C    572                               DMA_ATTR_WRITE_COMBINE);
615 }                                                 573 }
616                                                   574 
617 static inline int dma_mmap_wc(struct device *d    575 static inline int dma_mmap_wc(struct device *dev,
618                               struct vm_area_s    576                               struct vm_area_struct *vma,
619                               void *cpu_addr,     577                               void *cpu_addr, dma_addr_t dma_addr,
620                               size_t size)        578                               size_t size)
621 {                                                 579 {
622         return dma_mmap_attrs(dev, vma, cpu_ad    580         return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
623                               DMA_ATTR_WRITE_C    581                               DMA_ATTR_WRITE_COMBINE);
624 }                                                 582 }
625                                                   583 
626 #ifdef CONFIG_NEED_DMA_MAP_STATE                  584 #ifdef CONFIG_NEED_DMA_MAP_STATE
627 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)          585 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
628 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)            586 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
629 #define dma_unmap_addr(PTR, ADDR_NAME)            587 #define dma_unmap_addr(PTR, ADDR_NAME)           ((PTR)->ADDR_NAME)
630 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL    588 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  (((PTR)->ADDR_NAME) = (VAL))
631 #define dma_unmap_len(PTR, LEN_NAME)              589 #define dma_unmap_len(PTR, LEN_NAME)             ((PTR)->LEN_NAME)
632 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)     590 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    (((PTR)->LEN_NAME) = (VAL))
633 #else                                             591 #else
634 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)          592 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
635 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)            593 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
636 #define dma_unmap_addr(PTR, ADDR_NAME)            594 #define dma_unmap_addr(PTR, ADDR_NAME)           (0)
637 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL    595 #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL)  do { } while (0)
638 #define dma_unmap_len(PTR, LEN_NAME)              596 #define dma_unmap_len(PTR, LEN_NAME)             (0)
639 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)     597 #define dma_unmap_len_set(PTR, LEN_NAME, VAL)    do { } while (0)
640 #endif                                            598 #endif
641                                                   599 
642 #endif /* _LINUX_DMA_MAPPING_H */                 600 #endif /* _LINUX_DMA_MAPPING_H */
643                                                   601 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php