~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/dma-resv.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/dma-resv.h (Version linux-6.11.5) and /include/linux/dma-resv.h (Version linux-5.11.22)


  1 /*                                                  1 /*
  2  * Header file for reservations for dma-buf an      2  * Header file for reservations for dma-buf and ttm
  3  *                                                  3  *
  4  * Copyright(C) 2011 Linaro Limited. All right      4  * Copyright(C) 2011 Linaro Limited. All rights reserved.
  5  * Copyright (C) 2012-2013 Canonical Ltd            5  * Copyright (C) 2012-2013 Canonical Ltd
  6  * Copyright (C) 2012 Texas Instruments             6  * Copyright (C) 2012 Texas Instruments
  7  *                                                  7  *
  8  * Authors:                                         8  * Authors:
  9  * Rob Clark <robdclark@gmail.com>                  9  * Rob Clark <robdclark@gmail.com>
 10  * Maarten Lankhorst <maarten.lankhorst@canoni     10  * Maarten Lankhorst <maarten.lankhorst@canonical.com>
 11  * Thomas Hellstrom <thellstrom-at-vmware-dot-     11  * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 12  *                                                 12  *
 13  * Based on bo.c which bears the following cop     13  * Based on bo.c which bears the following copyright notice,
 14  * but is dual licensed:                           14  * but is dual licensed:
 15  *                                                 15  *
 16  * Copyright (c) 2006-2009 VMware, Inc., Palo      16  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 17  * All Rights Reserved.                            17  * All Rights Reserved.
 18  *                                                 18  *
 19  * Permission is hereby granted, free of charg     19  * Permission is hereby granted, free of charge, to any person obtaining a
 20  * copy of this software and associated docume     20  * copy of this software and associated documentation files (the
 21  * "Software"), to deal in the Software withou     21  * "Software"), to deal in the Software without restriction, including
 22  * without limitation the rights to use, copy,     22  * without limitation the rights to use, copy, modify, merge, publish,
 23  * distribute, sub license, and/or sell copies     23  * distribute, sub license, and/or sell copies of the Software, and to
 24  * permit persons to whom the Software is furn     24  * permit persons to whom the Software is furnished to do so, subject to
 25  * the following conditions:                       25  * the following conditions:
 26  *                                                 26  *
 27  * The above copyright notice and this permiss     27  * The above copyright notice and this permission notice (including the
 28  * next paragraph) shall be included in all co     28  * next paragraph) shall be included in all copies or substantial portions
 29  * of the Software.                                29  * of the Software.
 30  *                                                 30  *
 31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT W     31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 32  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE W     32  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 33  * FITNESS FOR A PARTICULAR PURPOSE AND NON-IN     33  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 34  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS S     34  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 35  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN A     35  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 36  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNE     36  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 37  * USE OR OTHER DEALINGS IN THE SOFTWARE.          37  * USE OR OTHER DEALINGS IN THE SOFTWARE.
 38  */                                                38  */
 39 #ifndef _LINUX_RESERVATION_H                       39 #ifndef _LINUX_RESERVATION_H
 40 #define _LINUX_RESERVATION_H                       40 #define _LINUX_RESERVATION_H
 41                                                    41 
 42 #include <linux/ww_mutex.h>                        42 #include <linux/ww_mutex.h>
 43 #include <linux/dma-fence.h>                       43 #include <linux/dma-fence.h>
 44 #include <linux/slab.h>                            44 #include <linux/slab.h>
 45 #include <linux/seqlock.h>                         45 #include <linux/seqlock.h>
 46 #include <linux/rcupdate.h>                        46 #include <linux/rcupdate.h>
 47                                                    47 
 48 extern struct ww_class reservation_ww_class;       48 extern struct ww_class reservation_ww_class;
 49                                                    49 
 50 struct dma_resv_list;                          << 
 51                                                << 
 52 /**                                                50 /**
 53  * enum dma_resv_usage - how the fences from a !!  51  * struct dma_resv_list - a list of shared fences
 54  *                                             !!  52  * @rcu: for internal use
 55  * This enum describes the different use cases !!  53  * @shared_count: table of shared fences
 56  * controls which fences are returned when que !!  54  * @shared_max: for growing shared fence table
 57  *                                             !!  55  * @shared: shared fence table
 58  * An important fact is that there is the orde !!  56  */
 59  * when the dma_resv object is asked for fence !!  57 struct dma_resv_list {
 60  * for the lower use case are returned as well !!  58         struct rcu_head rcu;
 61  *                                             !!  59         u32 shared_count, shared_max;
 62  * For example when asking for WRITE fences th !!  60         struct dma_fence __rcu *shared[];
 63  * as well. Similar when asked for READ fences << 
 64  * fences are returned as well.                << 
 65  *                                             << 
 66  * Already used fences can be promoted in the  << 
 67  * DMA_RESV_USAGE_BOOKKEEP could become DMA_RE << 
 68  * with this usage. But fences can never be de << 
 69  * with DMA_RESV_USAGE_WRITE could become DMA_ << 
 70  */                                            << 
 71 enum dma_resv_usage {                          << 
 72         /**                                    << 
 73          * @DMA_RESV_USAGE_KERNEL: For in kern << 
 74          *                                     << 
 75          * This should only be used for things << 
 76          * with a DMA hardware engine for the  << 
 77          * management.                         << 
 78          *                                     << 
 79          * Drivers *always* must wait for thos << 
 80          * resource protected by the dma_resv  << 
 81          * that is when the resource is known  << 
 82          * pinning it previously.              << 
 83          */                                    << 
 84         DMA_RESV_USAGE_KERNEL,                 << 
 85                                                << 
 86         /**                                    << 
 87          * @DMA_RESV_USAGE_WRITE: Implicit wri << 
 88          *                                     << 
 89          * This should only be used for usersp << 
 90          * an implicit write dependency.       << 
 91          */                                    << 
 92         DMA_RESV_USAGE_WRITE,                  << 
 93                                                << 
 94         /**                                    << 
 95          * @DMA_RESV_USAGE_READ: Implicit read << 
 96          *                                     << 
 97          * This should only be used for usersp << 
 98          * an implicit read dependency.        << 
 99          */                                    << 
100         DMA_RESV_USAGE_READ,                   << 
101                                                << 
102         /**                                    << 
103          * @DMA_RESV_USAGE_BOOKKEEP: No implic << 
104          *                                     << 
105          * This should be used by submissions  << 
106          * any implicit synchronization.       << 
107          *                                     << 
108          * The most common case are preemption << 
109          * flushes as well as explicit synced  << 
110          *                                     << 
111          * Explicit synced user user submissio << 
112          * DMA_RESV_USAGE_READ or DMA_RESV_USA << 
113          * dma_buf_import_sync_file() when imp << 
114          * become necessary after initial addi << 
115          */                                    << 
116         DMA_RESV_USAGE_BOOKKEEP                << 
117 };                                                 61 };
118                                                    62 
119 /**                                                63 /**
120  * dma_resv_usage_rw - helper for implicit syn << 
121  * @write: true if we create a new implicit sy << 
122  *                                             << 
123  * This returns the implicit synchronization u << 
124  * see enum dma_resv_usage and &dma_buf.resv.  << 
125  */                                            << 
126 static inline enum dma_resv_usage dma_resv_usa << 
127 {                                              << 
128         /* This looks confusing at first sight << 
129          *                                     << 
130          * The rational is that new write oper << 
131          * existing read and write operations  << 
132          * But a new read operation only needs << 
133          * operations to finish.               << 
134          */                                    << 
135         return write ? DMA_RESV_USAGE_READ : D << 
136 }                                              << 
137                                                << 
138 /**                                            << 
139  * struct dma_resv - a reservation object mana     64  * struct dma_resv - a reservation object manages fences for a buffer
140  *                                             !!  65  * @lock: update side lock
141  * This is a container for dma_fence objects w !!  66  * @seq: sequence count for managing RCU read-side synchronization
142  * cases.                                      !!  67  * @fence_excl: the exclusive fence, if there is one currently
143  *                                             !!  68  * @fence: list of current shared fences
144  * One use is to synchronize cross-driver acce << 
145  * dynamic buffer management or just to handle << 
146  * different users of the buffer in userspace. << 
147  * in-depth discussion.                        << 
148  *                                             << 
149  * The other major use is to manage access and << 
150  * buffer based memory manager. struct ttm_buf << 
151  * example here, since this is where reservati << 
152  * use in drivers is spreading and some driver << 
153  * drm_gem_object with the same scheme.        << 
154  */                                                69  */
155 struct dma_resv {                                  70 struct dma_resv {
156         /**                                    << 
157          * @lock:                              << 
158          *                                     << 
159          * Update side lock. Don't use directl << 
160          * functions like dma_resv_lock() and  << 
161          *                                     << 
162          * Drivers which use the reservation o << 
163          * also use this lock to protect buffe << 
164          * allocation policies or throughout c << 
165          */                                    << 
166         struct ww_mutex lock;                      71         struct ww_mutex lock;
                                                   >>  72         seqcount_ww_mutex_t seq;
167                                                    73 
168         /**                                    !!  74         struct dma_fence __rcu *fence_excl;
169          * @fences:                            !!  75         struct dma_resv_list __rcu *fence;
170          *                                     << 
171          * Array of fences which where added t << 
172          *                                     << 
173          * A new fence is added by calling dma << 
174          * often needs to be done past the poi << 
175          * submission it cannot fail, and ther << 
176          * reserved by calling dma_resv_reserv << 
177          */                                    << 
178         struct dma_resv_list __rcu *fences;    << 
179 };                                             << 
180                                                << 
181 /**                                            << 
182  * struct dma_resv_iter - current position int << 
183  *                                             << 
184  * Don't touch this directly in the driver, us << 
185  *                                             << 
186  * IMPORTANT                                   << 
187  *                                             << 
188  * When using the lockless iterators like dma_ << 
189  * dma_resv_for_each_fence_unlocked() beware t << 
190  * Code which accumulates statistics or simila << 
191  * dma_resv_iter_is_restarted().               << 
192  */                                            << 
193 struct dma_resv_iter {                         << 
194         /** @obj: The dma_resv object we itera << 
195         struct dma_resv *obj;                  << 
196                                                << 
197         /** @usage: Return fences with this us << 
198         enum dma_resv_usage usage;             << 
199                                                << 
200         /** @fence: the currently handled fenc << 
201         struct dma_fence *fence;               << 
202                                                << 
203         /** @fence_usage: the usage of the cur << 
204         enum dma_resv_usage fence_usage;       << 
205                                                << 
206         /** @index: index into the shared fenc << 
207         unsigned int index;                    << 
208                                                << 
209         /** @fences: the shared fences; privat << 
210         struct dma_resv_list *fences;          << 
211                                                << 
212         /** @num_fences: number of fences */   << 
213         unsigned int num_fences;               << 
214                                                << 
215         /** @is_restarted: true if this is the << 
216         bool is_restarted;                     << 
217 };                                                 76 };
218                                                    77 
219 struct dma_fence *dma_resv_iter_first_unlocked !!  78 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
220 struct dma_fence *dma_resv_iter_next_unlocked( !!  79 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
221 struct dma_fence *dma_resv_iter_first(struct d << 
222 struct dma_fence *dma_resv_iter_next(struct dm << 
223                                                << 
224 /**                                            << 
225  * dma_resv_iter_begin - initialize a dma_resv << 
226  * @cursor: The dma_resv_iter object to initia << 
227  * @obj: The dma_resv object which we want to  << 
228  * @usage: controls which fences to include, s << 
229  */                                            << 
230 static inline void dma_resv_iter_begin(struct  << 
231                                        struct  << 
232                                        enum dm << 
233 {                                              << 
234         cursor->obj = obj;                     << 
235         cursor->usage = usage;                 << 
236         cursor->fence = NULL;                  << 
237 }                                              << 
238                                                << 
239 /**                                            << 
240  * dma_resv_iter_end - cleanup a dma_resv_iter << 
241  * @cursor: the dma_resv_iter object which sho << 
242  *                                             << 
243  * Make sure that the reference to the fence i << 
244  * dropped.                                    << 
245  */                                            << 
246 static inline void dma_resv_iter_end(struct dm << 
247 {                                              << 
248         dma_fence_put(cursor->fence);          << 
249 }                                              << 
250                                                << 
251 /**                                            << 
252  * dma_resv_iter_usage - Return the usage of t << 
253  * @cursor: the cursor of the current position << 
254  *                                             << 
255  * Returns the usage of the currently processe << 
256  */                                            << 
257 static inline enum dma_resv_usage              << 
258 dma_resv_iter_usage(struct dma_resv_iter *curs << 
259 {                                              << 
260         return cursor->fence_usage;            << 
261 }                                              << 
262                                                    80 
263 /**                                                81 /**
264  * dma_resv_iter_is_restarted - test if this i !!  82  * dma_resv_get_list - get the reservation object's
265  * @cursor: the cursor with the current positi !!  83  * shared fence list, with update-side lock held
                                                   >>  84  * @obj: the reservation object
266  *                                                 85  *
267  * Return true if this is the first fence in a !!  86  * Returns the shared fence list.  Does NOT take references to
                                                   >>  87  * the fence.  The obj->lock must be held.
268  */                                                88  */
269 static inline bool dma_resv_iter_is_restarted( !!  89 static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
270 {                                                  90 {
271         return cursor->is_restarted;           !!  91         return rcu_dereference_protected(obj->fence,
                                                   >>  92                                          dma_resv_held(obj));
272 }                                                  93 }
273                                                    94 
274 /**                                                95 /**
275  * dma_resv_for_each_fence_unlocked - unlocked << 
276  * @cursor: a struct dma_resv_iter pointer     << 
277  * @fence: the current fence                   << 
278  *                                             << 
279  * Iterate over the fences in a struct dma_res << 
280  * &dma_resv.lock and using RCU instead. The c << 
281  * with dma_resv_iter_begin() and cleaned up w << 
282  * the iterator a reference to the dma_fence i << 
283  *                                             << 
284  * Beware that the iterator can be restarted w << 
285  * @cursor is modified. Code which accumulates << 
286  * check for this with dma_resv_iter_is_restar << 
287  * lock iterator dma_resv_for_each_fence() whe << 
288  */                                            << 
289 #define dma_resv_for_each_fence_unlocked(curso << 
290         for (fence = dma_resv_iter_first_unloc << 
291              fence; fence = dma_resv_iter_next << 
292                                                << 
293 /**                                            << 
294  * dma_resv_for_each_fence - fence iterator    << 
295  * @cursor: a struct dma_resv_iter pointer     << 
296  * @obj: a dma_resv object pointer             << 
297  * @usage: controls which fences to return     << 
298  * @fence: the current fence                   << 
299  *                                             << 
300  * Iterate over the fences in a struct dma_res << 
301  * &dma_resv.lock. @all_fences controls if the << 
302  * well. The cursor initialisation is part of  << 
303  * valid as long as the lock is held and so no << 
304  * taken.                                      << 
305  */                                            << 
306 #define dma_resv_for_each_fence(cursor, obj, u << 
307         for (dma_resv_iter_begin(cursor, obj,  << 
308              fence = dma_resv_iter_first(curso << 
309              fence = dma_resv_iter_next(cursor << 
310                                                << 
311 #define dma_resv_held(obj) lockdep_is_held(&(o << 
312 #define dma_resv_assert_held(obj) lockdep_asse << 
313                                                << 
314 #ifdef CONFIG_DEBUG_MUTEXES                    << 
315 void dma_resv_reset_max_fences(struct dma_resv << 
316 #else                                          << 
317 static inline void dma_resv_reset_max_fences(s << 
318 #endif                                         << 
319                                                << 
320 /**                                            << 
321  * dma_resv_lock - lock the reservation object     96  * dma_resv_lock - lock the reservation object
322  * @obj: the reservation object                    97  * @obj: the reservation object
323  * @ctx: the locking context                       98  * @ctx: the locking context
324  *                                                 99  *
325  * Locks the reservation object for exclusive     100  * Locks the reservation object for exclusive access and modification. Note,
326  * that the lock is only against other writers    101  * that the lock is only against other writers, readers will run concurrently
327  * with a writer under RCU. The seqlock is use    102  * with a writer under RCU. The seqlock is used to notify readers if they
328  * overlap with a writer.                         103  * overlap with a writer.
329  *                                                104  *
330  * As the reservation object may be locked by     105  * As the reservation object may be locked by multiple parties in an
331  * undefined order, a #ww_acquire_ctx is passe    106  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
332  * is detected. See ww_mutex_lock() and ww_acq    107  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
333  * object may be locked by itself by passing N    108  * object may be locked by itself by passing NULL as @ctx.
334  *                                             << 
335  * When a die situation is indicated by return << 
336  * @ctx must be unlocked and then dma_resv_loc << 
337  *                                             << 
338  * Unlocked by calling dma_resv_unlock().      << 
339  *                                             << 
340  * See also dma_resv_lock_interruptible() for  << 
341  */                                               109  */
342 static inline int dma_resv_lock(struct dma_res    110 static inline int dma_resv_lock(struct dma_resv *obj,
343                                 struct ww_acqu    111                                 struct ww_acquire_ctx *ctx)
344 {                                                 112 {
345         return ww_mutex_lock(&obj->lock, ctx);    113         return ww_mutex_lock(&obj->lock, ctx);
346 }                                                 114 }
347                                                   115 
348 /**                                               116 /**
349  * dma_resv_lock_interruptible - lock the rese    117  * dma_resv_lock_interruptible - lock the reservation object
350  * @obj: the reservation object                   118  * @obj: the reservation object
351  * @ctx: the locking context                      119  * @ctx: the locking context
352  *                                                120  *
353  * Locks the reservation object interruptible     121  * Locks the reservation object interruptible for exclusive access and
354  * modification. Note, that the lock is only a    122  * modification. Note, that the lock is only against other writers, readers
355  * will run concurrently with a writer under R    123  * will run concurrently with a writer under RCU. The seqlock is used to
356  * notify readers if they overlap with a write    124  * notify readers if they overlap with a writer.
357  *                                                125  *
358  * As the reservation object may be locked by     126  * As the reservation object may be locked by multiple parties in an
359  * undefined order, a #ww_acquire_ctx is passe    127  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
360  * is detected. See ww_mutex_lock() and ww_acq    128  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
361  * object may be locked by itself by passing N    129  * object may be locked by itself by passing NULL as @ctx.
362  *                                             << 
363  * When a die situation is indicated by return << 
364  * @ctx must be unlocked and then dma_resv_loc << 
365  * @obj.                                       << 
366  *                                             << 
367  * Unlocked by calling dma_resv_unlock().      << 
368  */                                               130  */
369 static inline int dma_resv_lock_interruptible(    131 static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
370                                                   132                                               struct ww_acquire_ctx *ctx)
371 {                                                 133 {
372         return ww_mutex_lock_interruptible(&ob    134         return ww_mutex_lock_interruptible(&obj->lock, ctx);
373 }                                                 135 }
374                                                   136 
375 /**                                               137 /**
376  * dma_resv_lock_slow - slowpath lock the rese    138  * dma_resv_lock_slow - slowpath lock the reservation object
377  * @obj: the reservation object                   139  * @obj: the reservation object
378  * @ctx: the locking context                      140  * @ctx: the locking context
379  *                                                141  *
380  * Acquires the reservation object after a die    142  * Acquires the reservation object after a die case. This function
381  * will sleep until the lock becomes available    143  * will sleep until the lock becomes available. See dma_resv_lock() as
382  * well.                                          144  * well.
383  *                                             << 
384  * See also dma_resv_lock_slow_interruptible() << 
385  */                                               145  */
386 static inline void dma_resv_lock_slow(struct d    146 static inline void dma_resv_lock_slow(struct dma_resv *obj,
387                                       struct w    147                                       struct ww_acquire_ctx *ctx)
388 {                                                 148 {
389         ww_mutex_lock_slow(&obj->lock, ctx);      149         ww_mutex_lock_slow(&obj->lock, ctx);
390 }                                                 150 }
391                                                   151 
392 /**                                               152 /**
393  * dma_resv_lock_slow_interruptible - slowpath    153  * dma_resv_lock_slow_interruptible - slowpath lock the reservation
394  * object, interruptible                          154  * object, interruptible
395  * @obj: the reservation object                   155  * @obj: the reservation object
396  * @ctx: the locking context                      156  * @ctx: the locking context
397  *                                                157  *
398  * Acquires the reservation object interruptib    158  * Acquires the reservation object interruptible after a die case. This function
399  * will sleep until the lock becomes available    159  * will sleep until the lock becomes available. See
400  * dma_resv_lock_interruptible() as well.         160  * dma_resv_lock_interruptible() as well.
401  */                                               161  */
402 static inline int dma_resv_lock_slow_interrupt    162 static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
403                                                   163                                                    struct ww_acquire_ctx *ctx)
404 {                                                 164 {
405         return ww_mutex_lock_slow_interruptibl    165         return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
406 }                                                 166 }
407                                                   167 
408 /**                                               168 /**
409  * dma_resv_trylock - trylock the reservation     169  * dma_resv_trylock - trylock the reservation object
410  * @obj: the reservation object                   170  * @obj: the reservation object
411  *                                                171  *
412  * Tries to lock the reservation object for ex    172  * Tries to lock the reservation object for exclusive access and modification.
413  * Note, that the lock is only against other w    173  * Note, that the lock is only against other writers, readers will run
414  * concurrently with a writer under RCU. The s    174  * concurrently with a writer under RCU. The seqlock is used to notify readers
415  * if they overlap with a writer.                 175  * if they overlap with a writer.
416  *                                                176  *
417  * Also note that since no context is provided    177  * Also note that since no context is provided, no deadlock protection is
418  * possible, which is also not needed for a tr !! 178  * possible.
419  *                                                179  *
420  * Returns true if the lock was acquired, fals    180  * Returns true if the lock was acquired, false otherwise.
421  */                                               181  */
422 static inline bool __must_check dma_resv_trylo    182 static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
423 {                                                 183 {
424         return ww_mutex_trylock(&obj->lock, NU !! 184         return ww_mutex_trylock(&obj->lock);
425 }                                                 185 }
426                                                   186 
427 /**                                               187 /**
428  * dma_resv_is_locked - is the reservation obj    188  * dma_resv_is_locked - is the reservation object locked
429  * @obj: the reservation object                   189  * @obj: the reservation object
430  *                                                190  *
431  * Returns true if the mutex is locked, false     191  * Returns true if the mutex is locked, false if unlocked.
432  */                                               192  */
433 static inline bool dma_resv_is_locked(struct d    193 static inline bool dma_resv_is_locked(struct dma_resv *obj)
434 {                                                 194 {
435         return ww_mutex_is_locked(&obj->lock);    195         return ww_mutex_is_locked(&obj->lock);
436 }                                                 196 }
437                                                   197 
438 /**                                               198 /**
439  * dma_resv_locking_ctx - returns the context     199  * dma_resv_locking_ctx - returns the context used to lock the object
440  * @obj: the reservation object                   200  * @obj: the reservation object
441  *                                                201  *
442  * Returns the context used to lock a reservat    202  * Returns the context used to lock a reservation object or NULL if no context
443  * was used or the object is not locked at all    203  * was used or the object is not locked at all.
444  *                                             << 
445  * WARNING: This interface is pretty horrible, << 
446  * doesn't pass the struct ww_acquire_ctx arou << 
447  * Everyone else just uses it to check whether << 
448  * not.                                        << 
449  */                                               204  */
450 static inline struct ww_acquire_ctx *dma_resv_    205 static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
451 {                                                 206 {
452         return READ_ONCE(obj->lock.ctx);          207         return READ_ONCE(obj->lock.ctx);
453 }                                                 208 }
454                                                   209 
455 /**                                               210 /**
456  * dma_resv_unlock - unlock the reservation ob    211  * dma_resv_unlock - unlock the reservation object
457  * @obj: the reservation object                   212  * @obj: the reservation object
458  *                                                213  *
459  * Unlocks the reservation object following ex    214  * Unlocks the reservation object following exclusive access.
460  */                                               215  */
461 static inline void dma_resv_unlock(struct dma_    216 static inline void dma_resv_unlock(struct dma_resv *obj)
462 {                                                 217 {
463         dma_resv_reset_max_fences(obj);        !! 218 #ifdef CONFIG_DEBUG_MUTEXES
                                                   >> 219         /* Test shared fence slot reservation */
                                                   >> 220         if (rcu_access_pointer(obj->fence)) {
                                                   >> 221                 struct dma_resv_list *fence = dma_resv_get_list(obj);
                                                   >> 222 
                                                   >> 223                 fence->shared_max = fence->shared_count;
                                                   >> 224         }
                                                   >> 225 #endif
464         ww_mutex_unlock(&obj->lock);              226         ww_mutex_unlock(&obj->lock);
465 }                                                 227 }
466                                                   228 
                                                   >> 229 /**
                                                   >> 230  * dma_resv_get_excl - get the reservation object's
                                                   >> 231  * exclusive fence, with update-side lock held
                                                   >> 232  * @obj: the reservation object
                                                   >> 233  *
                                                   >> 234  * Returns the exclusive fence (if any).  Does NOT take a
                                                   >> 235  * reference. Writers must hold obj->lock, readers may only
                                                   >> 236  * hold a RCU read side lock.
                                                   >> 237  *
                                                   >> 238  * RETURNS
                                                   >> 239  * The exclusive fence or NULL
                                                   >> 240  */
                                                   >> 241 static inline struct dma_fence *
                                                   >> 242 dma_resv_get_excl(struct dma_resv *obj)
                                                   >> 243 {
                                                   >> 244         return rcu_dereference_protected(obj->fence_excl,
                                                   >> 245                                          dma_resv_held(obj));
                                                   >> 246 }
                                                   >> 247 
                                                   >> 248 /**
                                                   >> 249  * dma_resv_get_excl_rcu - get the reservation object's
                                                   >> 250  * exclusive fence, without lock held.
                                                   >> 251  * @obj: the reservation object
                                                   >> 252  *
                                                   >> 253  * If there is an exclusive fence, this atomically increments it's
                                                   >> 254  * reference count and returns it.
                                                   >> 255  *
                                                   >> 256  * RETURNS
                                                   >> 257  * The exclusive fence or NULL if none
                                                   >> 258  */
                                                   >> 259 static inline struct dma_fence *
                                                   >> 260 dma_resv_get_excl_rcu(struct dma_resv *obj)
                                                   >> 261 {
                                                   >> 262         struct dma_fence *fence;
                                                   >> 263 
                                                   >> 264         if (!rcu_access_pointer(obj->fence_excl))
                                                   >> 265                 return NULL;
                                                   >> 266 
                                                   >> 267         rcu_read_lock();
                                                   >> 268         fence = dma_fence_get_rcu_safe(&obj->fence_excl);
                                                   >> 269         rcu_read_unlock();
                                                   >> 270 
                                                   >> 271         return fence;
                                                   >> 272 }
                                                   >> 273 
467 void dma_resv_init(struct dma_resv *obj);         274 void dma_resv_init(struct dma_resv *obj);
468 void dma_resv_fini(struct dma_resv *obj);         275 void dma_resv_fini(struct dma_resv *obj);
469 int dma_resv_reserve_fences(struct dma_resv *o !! 276 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
470 void dma_resv_add_fence(struct dma_resv *obj,  !! 277 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
471                         enum dma_resv_usage us !! 278 
472 void dma_resv_replace_fences(struct dma_resv * !! 279 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
473                              struct dma_fence  !! 280 
474                              enum dma_resv_usa !! 281 int dma_resv_get_fences_rcu(struct dma_resv *obj,
475 int dma_resv_get_fences(struct dma_resv *obj,  !! 282                             struct dma_fence **pfence_excl,
476                         unsigned int *num_fenc !! 283                             unsigned *pshared_count,
477 int dma_resv_get_singleton(struct dma_resv *ob !! 284                             struct dma_fence ***pshared);
478                            struct dma_fence ** !! 285 
479 int dma_resv_copy_fences(struct dma_resv *dst,    286 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
480 long dma_resv_wait_timeout(struct dma_resv *ob !! 287 
481                            bool intr, unsigned !! 288 long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
482 void dma_resv_set_deadline(struct dma_resv *ob !! 289                                unsigned long timeout);
483                            ktime_t deadline);  !! 290 
484 bool dma_resv_test_signaled(struct dma_resv *o !! 291 bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
485 void dma_resv_describe(struct dma_resv *obj, s << 
486                                                   292 
487 #endif /* _LINUX_RESERVATION_H */                 293 #endif /* _LINUX_RESERVATION_H */
488                                                   294 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php