~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/dma-resv.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/dma-resv.h (Version linux-6.11.5) and /include/linux/dma-resv.h (Version linux-5.5.19)


  1 /*                                                  1 /*
  2  * Header file for reservations for dma-buf an      2  * Header file for reservations for dma-buf and ttm
  3  *                                                  3  *
  4  * Copyright(C) 2011 Linaro Limited. All right      4  * Copyright(C) 2011 Linaro Limited. All rights reserved.
  5  * Copyright (C) 2012-2013 Canonical Ltd            5  * Copyright (C) 2012-2013 Canonical Ltd
  6  * Copyright (C) 2012 Texas Instruments             6  * Copyright (C) 2012 Texas Instruments
  7  *                                                  7  *
  8  * Authors:                                         8  * Authors:
  9  * Rob Clark <robdclark@gmail.com>                  9  * Rob Clark <robdclark@gmail.com>
 10  * Maarten Lankhorst <maarten.lankhorst@canoni     10  * Maarten Lankhorst <maarten.lankhorst@canonical.com>
 11  * Thomas Hellstrom <thellstrom-at-vmware-dot-     11  * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 12  *                                                 12  *
 13  * Based on bo.c which bears the following cop     13  * Based on bo.c which bears the following copyright notice,
 14  * but is dual licensed:                           14  * but is dual licensed:
 15  *                                                 15  *
 16  * Copyright (c) 2006-2009 VMware, Inc., Palo      16  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
 17  * All Rights Reserved.                            17  * All Rights Reserved.
 18  *                                                 18  *
 19  * Permission is hereby granted, free of charg     19  * Permission is hereby granted, free of charge, to any person obtaining a
 20  * copy of this software and associated docume     20  * copy of this software and associated documentation files (the
 21  * "Software"), to deal in the Software withou     21  * "Software"), to deal in the Software without restriction, including
 22  * without limitation the rights to use, copy,     22  * without limitation the rights to use, copy, modify, merge, publish,
 23  * distribute, sub license, and/or sell copies     23  * distribute, sub license, and/or sell copies of the Software, and to
 24  * permit persons to whom the Software is furn     24  * permit persons to whom the Software is furnished to do so, subject to
 25  * the following conditions:                       25  * the following conditions:
 26  *                                                 26  *
 27  * The above copyright notice and this permiss     27  * The above copyright notice and this permission notice (including the
 28  * next paragraph) shall be included in all co     28  * next paragraph) shall be included in all copies or substantial portions
 29  * of the Software.                                29  * of the Software.
 30  *                                                 30  *
 31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT W     31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 32  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE W     32  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 33  * FITNESS FOR A PARTICULAR PURPOSE AND NON-IN     33  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 34  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS S     34  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 35  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN A     35  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 36  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNE     36  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 37  * USE OR OTHER DEALINGS IN THE SOFTWARE.          37  * USE OR OTHER DEALINGS IN THE SOFTWARE.
 38  */                                                38  */
 39 #ifndef _LINUX_RESERVATION_H                       39 #ifndef _LINUX_RESERVATION_H
 40 #define _LINUX_RESERVATION_H                       40 #define _LINUX_RESERVATION_H
 41                                                    41 
 42 #include <linux/ww_mutex.h>                        42 #include <linux/ww_mutex.h>
 43 #include <linux/dma-fence.h>                       43 #include <linux/dma-fence.h>
 44 #include <linux/slab.h>                            44 #include <linux/slab.h>
 45 #include <linux/seqlock.h>                         45 #include <linux/seqlock.h>
 46 #include <linux/rcupdate.h>                        46 #include <linux/rcupdate.h>
 47                                                    47 
 48 extern struct ww_class reservation_ww_class;       48 extern struct ww_class reservation_ww_class;
 49                                                !!  49 extern struct lock_class_key reservation_seqcount_class;
 50 struct dma_resv_list;                          !!  50 extern const char reservation_seqcount_string[];
 51                                                    51 
 52 /**                                                52 /**
 53  * enum dma_resv_usage - how the fences from a !!  53  * struct dma_resv_list - a list of shared fences
 54  *                                             !!  54  * @rcu: for internal use
 55  * This enum describes the different use cases !!  55  * @shared_count: table of shared fences
 56  * controls which fences are returned when que !!  56  * @shared_max: for growing shared fence table
 57  *                                             !!  57  * @shared: shared fence table
 58  * An important fact is that there is the orde !!  58  */
 59  * when the dma_resv object is asked for fence !!  59 struct dma_resv_list {
 60  * for the lower use case are returned as well !!  60         struct rcu_head rcu;
 61  *                                             !!  61         u32 shared_count, shared_max;
 62  * For example when asking for WRITE fences th !!  62         struct dma_fence __rcu *shared[];
 63  * as well. Similar when asked for READ fences << 
 64  * fences are returned as well.                << 
 65  *                                             << 
 66  * Already used fences can be promoted in the  << 
 67  * DMA_RESV_USAGE_BOOKKEEP could become DMA_RE << 
 68  * with this usage. But fences can never be de << 
 69  * with DMA_RESV_USAGE_WRITE could become DMA_ << 
 70  */                                            << 
 71 enum dma_resv_usage {                          << 
 72         /**                                    << 
 73          * @DMA_RESV_USAGE_KERNEL: For in kern << 
 74          *                                     << 
 75          * This should only be used for things << 
 76          * with a DMA hardware engine for the  << 
 77          * management.                         << 
 78          *                                     << 
 79          * Drivers *always* must wait for thos << 
 80          * resource protected by the dma_resv  << 
 81          * that is when the resource is known  << 
 82          * pinning it previously.              << 
 83          */                                    << 
 84         DMA_RESV_USAGE_KERNEL,                 << 
 85                                                << 
 86         /**                                    << 
 87          * @DMA_RESV_USAGE_WRITE: Implicit wri << 
 88          *                                     << 
 89          * This should only be used for usersp << 
 90          * an implicit write dependency.       << 
 91          */                                    << 
 92         DMA_RESV_USAGE_WRITE,                  << 
 93                                                << 
 94         /**                                    << 
 95          * @DMA_RESV_USAGE_READ: Implicit read << 
 96          *                                     << 
 97          * This should only be used for usersp << 
 98          * an implicit read dependency.        << 
 99          */                                    << 
100         DMA_RESV_USAGE_READ,                   << 
101                                                << 
102         /**                                    << 
103          * @DMA_RESV_USAGE_BOOKKEEP: No implic << 
104          *                                     << 
105          * This should be used by submissions  << 
106          * any implicit synchronization.       << 
107          *                                     << 
108          * The most common case are preemption << 
109          * flushes as well as explicit synced  << 
110          *                                     << 
111          * Explicit synced user user submissio << 
112          * DMA_RESV_USAGE_READ or DMA_RESV_USA << 
113          * dma_buf_import_sync_file() when imp << 
114          * become necessary after initial addi << 
115          */                                    << 
116         DMA_RESV_USAGE_BOOKKEEP                << 
117 };                                                 63 };
118                                                    64 
119 /**                                                65 /**
120  * dma_resv_usage_rw - helper for implicit syn << 
121  * @write: true if we create a new implicit sy << 
122  *                                             << 
123  * This returns the implicit synchronization u << 
124  * see enum dma_resv_usage and &dma_buf.resv.  << 
125  */                                            << 
126 static inline enum dma_resv_usage dma_resv_usa << 
127 {                                              << 
128         /* This looks confusing at first sight << 
129          *                                     << 
130          * The rational is that new write oper << 
131          * existing read and write operations  << 
132          * But a new read operation only needs << 
133          * operations to finish.               << 
134          */                                    << 
135         return write ? DMA_RESV_USAGE_READ : D << 
136 }                                              << 
137                                                << 
138 /**                                            << 
139  * struct dma_resv - a reservation object mana     66  * struct dma_resv - a reservation object manages fences for a buffer
140  *                                             !!  67  * @lock: update side lock
141  * This is a container for dma_fence objects w !!  68  * @seq: sequence count for managing RCU read-side synchronization
142  * cases.                                      !!  69  * @fence_excl: the exclusive fence, if there is one currently
143  *                                             !!  70  * @fence: list of current shared fences
144  * One use is to synchronize cross-driver acce << 
145  * dynamic buffer management or just to handle << 
146  * different users of the buffer in userspace. << 
147  * in-depth discussion.                        << 
148  *                                             << 
149  * The other major use is to manage access and << 
150  * buffer based memory manager. struct ttm_buf << 
151  * example here, since this is where reservati << 
152  * use in drivers is spreading and some driver << 
153  * drm_gem_object with the same scheme.        << 
154  */                                                71  */
155 struct dma_resv {                                  72 struct dma_resv {
156         /**                                    << 
157          * @lock:                              << 
158          *                                     << 
159          * Update side lock. Don't use directl << 
160          * functions like dma_resv_lock() and  << 
161          *                                     << 
162          * Drivers which use the reservation o << 
163          * also use this lock to protect buffe << 
164          * allocation policies or throughout c << 
165          */                                    << 
166         struct ww_mutex lock;                      73         struct ww_mutex lock;
                                                   >>  74         seqcount_t seq;
167                                                    75 
168         /**                                    !!  76         struct dma_fence __rcu *fence_excl;
169          * @fences:                            !!  77         struct dma_resv_list __rcu *fence;
170          *                                     << 
171          * Array of fences which where added t << 
172          *                                     << 
173          * A new fence is added by calling dma << 
174          * often needs to be done past the poi << 
175          * submission it cannot fail, and ther << 
176          * reserved by calling dma_resv_reserv << 
177          */                                    << 
178         struct dma_resv_list __rcu *fences;    << 
179 };                                             << 
180                                                << 
181 /**                                            << 
182  * struct dma_resv_iter - current position int << 
183  *                                             << 
184  * Don't touch this directly in the driver, us << 
185  *                                             << 
186  * IMPORTANT                                   << 
187  *                                             << 
188  * When using the lockless iterators like dma_ << 
189  * dma_resv_for_each_fence_unlocked() beware t << 
190  * Code which accumulates statistics or simila << 
191  * dma_resv_iter_is_restarted().               << 
192  */                                            << 
193 struct dma_resv_iter {                         << 
194         /** @obj: The dma_resv object we itera << 
195         struct dma_resv *obj;                  << 
196                                                << 
197         /** @usage: Return fences with this us << 
198         enum dma_resv_usage usage;             << 
199                                                << 
200         /** @fence: the currently handled fenc << 
201         struct dma_fence *fence;               << 
202                                                << 
203         /** @fence_usage: the usage of the cur << 
204         enum dma_resv_usage fence_usage;       << 
205                                                << 
206         /** @index: index into the shared fenc << 
207         unsigned int index;                    << 
208                                                << 
209         /** @fences: the shared fences; privat << 
210         struct dma_resv_list *fences;          << 
211                                                << 
212         /** @num_fences: number of fences */   << 
213         unsigned int num_fences;               << 
214                                                << 
215         /** @is_restarted: true if this is the << 
216         bool is_restarted;                     << 
217 };                                                 78 };
218                                                    79 
219 struct dma_fence *dma_resv_iter_first_unlocked !!  80 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
220 struct dma_fence *dma_resv_iter_next_unlocked( !!  81 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
221 struct dma_fence *dma_resv_iter_first(struct d << 
222 struct dma_fence *dma_resv_iter_next(struct dm << 
223                                                << 
224 /**                                            << 
225  * dma_resv_iter_begin - initialize a dma_resv << 
226  * @cursor: The dma_resv_iter object to initia << 
227  * @obj: The dma_resv object which we want to  << 
228  * @usage: controls which fences to include, s << 
229  */                                            << 
230 static inline void dma_resv_iter_begin(struct  << 
231                                        struct  << 
232                                        enum dm << 
233 {                                              << 
234         cursor->obj = obj;                     << 
235         cursor->usage = usage;                 << 
236         cursor->fence = NULL;                  << 
237 }                                              << 
238                                                << 
239 /**                                            << 
240  * dma_resv_iter_end - cleanup a dma_resv_iter << 
241  * @cursor: the dma_resv_iter object which sho << 
242  *                                             << 
243  * Make sure that the reference to the fence i << 
244  * dropped.                                    << 
245  */                                            << 
246 static inline void dma_resv_iter_end(struct dm << 
247 {                                              << 
248         dma_fence_put(cursor->fence);          << 
249 }                                              << 
250                                                << 
251 /**                                            << 
252  * dma_resv_iter_usage - Return the usage of t << 
253  * @cursor: the cursor of the current position << 
254  *                                             << 
255  * Returns the usage of the currently processe << 
256  */                                            << 
257 static inline enum dma_resv_usage              << 
258 dma_resv_iter_usage(struct dma_resv_iter *curs << 
259 {                                              << 
260         return cursor->fence_usage;            << 
261 }                                              << 
262                                                    82 
263 /**                                                83 /**
264  * dma_resv_iter_is_restarted - test if this i !!  84  * dma_resv_get_list - get the reservation object's
265  * @cursor: the cursor with the current positi !!  85  * shared fence list, with update-side lock held
                                                   >>  86  * @obj: the reservation object
266  *                                                 87  *
267  * Return true if this is the first fence in a !!  88  * Returns the shared fence list.  Does NOT take references to
                                                   >>  89  * the fence.  The obj->lock must be held.
268  */                                                90  */
269 static inline bool dma_resv_iter_is_restarted( !!  91 static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
270 {                                                  92 {
271         return cursor->is_restarted;           !!  93         return rcu_dereference_protected(obj->fence,
                                                   >>  94                                          dma_resv_held(obj));
272 }                                                  95 }
273                                                    96 
274 /**                                                97 /**
275  * dma_resv_for_each_fence_unlocked - unlocked << 
276  * @cursor: a struct dma_resv_iter pointer     << 
277  * @fence: the current fence                   << 
278  *                                             << 
279  * Iterate over the fences in a struct dma_res << 
280  * &dma_resv.lock and using RCU instead. The c << 
281  * with dma_resv_iter_begin() and cleaned up w << 
282  * the iterator a reference to the dma_fence i << 
283  *                                             << 
284  * Beware that the iterator can be restarted w << 
285  * @cursor is modified. Code which accumulates << 
286  * check for this with dma_resv_iter_is_restar << 
287  * lock iterator dma_resv_for_each_fence() whe << 
288  */                                            << 
289 #define dma_resv_for_each_fence_unlocked(curso << 
290         for (fence = dma_resv_iter_first_unloc << 
291              fence; fence = dma_resv_iter_next << 
292                                                << 
293 /**                                            << 
294  * dma_resv_for_each_fence - fence iterator    << 
295  * @cursor: a struct dma_resv_iter pointer     << 
296  * @obj: a dma_resv object pointer             << 
297  * @usage: controls which fences to return     << 
298  * @fence: the current fence                   << 
299  *                                             << 
300  * Iterate over the fences in a struct dma_res << 
301  * &dma_resv.lock. @all_fences controls if the << 
302  * well. The cursor initialisation is part of  << 
303  * valid as long as the lock is held and so no << 
304  * taken.                                      << 
305  */                                            << 
306 #define dma_resv_for_each_fence(cursor, obj, u << 
307         for (dma_resv_iter_begin(cursor, obj,  << 
308              fence = dma_resv_iter_first(curso << 
309              fence = dma_resv_iter_next(cursor << 
310                                                << 
311 #define dma_resv_held(obj) lockdep_is_held(&(o << 
312 #define dma_resv_assert_held(obj) lockdep_asse << 
313                                                << 
314 #ifdef CONFIG_DEBUG_MUTEXES                    << 
315 void dma_resv_reset_max_fences(struct dma_resv << 
316 #else                                          << 
317 static inline void dma_resv_reset_max_fences(s << 
318 #endif                                         << 
319                                                << 
320 /**                                            << 
321  * dma_resv_lock - lock the reservation object     98  * dma_resv_lock - lock the reservation object
322  * @obj: the reservation object                    99  * @obj: the reservation object
323  * @ctx: the locking context                      100  * @ctx: the locking context
324  *                                                101  *
325  * Locks the reservation object for exclusive     102  * Locks the reservation object for exclusive access and modification. Note,
326  * that the lock is only against other writers    103  * that the lock is only against other writers, readers will run concurrently
327  * with a writer under RCU. The seqlock is use    104  * with a writer under RCU. The seqlock is used to notify readers if they
328  * overlap with a writer.                         105  * overlap with a writer.
329  *                                                106  *
330  * As the reservation object may be locked by     107  * As the reservation object may be locked by multiple parties in an
331  * undefined order, a #ww_acquire_ctx is passe    108  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
332  * is detected. See ww_mutex_lock() and ww_acq    109  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
333  * object may be locked by itself by passing N    110  * object may be locked by itself by passing NULL as @ctx.
334  *                                             << 
335  * When a die situation is indicated by return << 
336  * @ctx must be unlocked and then dma_resv_loc << 
337  *                                             << 
338  * Unlocked by calling dma_resv_unlock().      << 
339  *                                             << 
340  * See also dma_resv_lock_interruptible() for  << 
341  */                                               111  */
342 static inline int dma_resv_lock(struct dma_res    112 static inline int dma_resv_lock(struct dma_resv *obj,
343                                 struct ww_acqu    113                                 struct ww_acquire_ctx *ctx)
344 {                                                 114 {
345         return ww_mutex_lock(&obj->lock, ctx);    115         return ww_mutex_lock(&obj->lock, ctx);
346 }                                                 116 }
347                                                   117 
348 /**                                               118 /**
349  * dma_resv_lock_interruptible - lock the rese    119  * dma_resv_lock_interruptible - lock the reservation object
350  * @obj: the reservation object                   120  * @obj: the reservation object
351  * @ctx: the locking context                      121  * @ctx: the locking context
352  *                                                122  *
353  * Locks the reservation object interruptible     123  * Locks the reservation object interruptible for exclusive access and
354  * modification. Note, that the lock is only a    124  * modification. Note, that the lock is only against other writers, readers
355  * will run concurrently with a writer under R    125  * will run concurrently with a writer under RCU. The seqlock is used to
356  * notify readers if they overlap with a write    126  * notify readers if they overlap with a writer.
357  *                                                127  *
358  * As the reservation object may be locked by     128  * As the reservation object may be locked by multiple parties in an
359  * undefined order, a #ww_acquire_ctx is passe    129  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
360  * is detected. See ww_mutex_lock() and ww_acq    130  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
361  * object may be locked by itself by passing N    131  * object may be locked by itself by passing NULL as @ctx.
362  *                                             << 
363  * When a die situation is indicated by return << 
364  * @ctx must be unlocked and then dma_resv_loc << 
365  * @obj.                                       << 
366  *                                             << 
367  * Unlocked by calling dma_resv_unlock().      << 
368  */                                               132  */
369 static inline int dma_resv_lock_interruptible(    133 static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
370                                                   134                                               struct ww_acquire_ctx *ctx)
371 {                                                 135 {
372         return ww_mutex_lock_interruptible(&ob    136         return ww_mutex_lock_interruptible(&obj->lock, ctx);
373 }                                                 137 }
374                                                   138 
375 /**                                               139 /**
376  * dma_resv_lock_slow - slowpath lock the rese    140  * dma_resv_lock_slow - slowpath lock the reservation object
377  * @obj: the reservation object                   141  * @obj: the reservation object
378  * @ctx: the locking context                      142  * @ctx: the locking context
379  *                                                143  *
380  * Acquires the reservation object after a die    144  * Acquires the reservation object after a die case. This function
381  * will sleep until the lock becomes available    145  * will sleep until the lock becomes available. See dma_resv_lock() as
382  * well.                                          146  * well.
383  *                                             << 
384  * See also dma_resv_lock_slow_interruptible() << 
385  */                                               147  */
386 static inline void dma_resv_lock_slow(struct d    148 static inline void dma_resv_lock_slow(struct dma_resv *obj,
387                                       struct w    149                                       struct ww_acquire_ctx *ctx)
388 {                                                 150 {
389         ww_mutex_lock_slow(&obj->lock, ctx);      151         ww_mutex_lock_slow(&obj->lock, ctx);
390 }                                                 152 }
391                                                   153 
392 /**                                               154 /**
393  * dma_resv_lock_slow_interruptible - slowpath    155  * dma_resv_lock_slow_interruptible - slowpath lock the reservation
394  * object, interruptible                          156  * object, interruptible
395  * @obj: the reservation object                   157  * @obj: the reservation object
396  * @ctx: the locking context                      158  * @ctx: the locking context
397  *                                                159  *
398  * Acquires the reservation object interruptib    160  * Acquires the reservation object interruptible after a die case. This function
399  * will sleep until the lock becomes available    161  * will sleep until the lock becomes available. See
400  * dma_resv_lock_interruptible() as well.         162  * dma_resv_lock_interruptible() as well.
401  */                                               163  */
402 static inline int dma_resv_lock_slow_interrupt    164 static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
403                                                   165                                                    struct ww_acquire_ctx *ctx)
404 {                                                 166 {
405         return ww_mutex_lock_slow_interruptibl    167         return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
406 }                                                 168 }
407                                                   169 
408 /**                                               170 /**
409  * dma_resv_trylock - trylock the reservation     171  * dma_resv_trylock - trylock the reservation object
410  * @obj: the reservation object                   172  * @obj: the reservation object
411  *                                                173  *
412  * Tries to lock the reservation object for ex    174  * Tries to lock the reservation object for exclusive access and modification.
413  * Note, that the lock is only against other w    175  * Note, that the lock is only against other writers, readers will run
414  * concurrently with a writer under RCU. The s    176  * concurrently with a writer under RCU. The seqlock is used to notify readers
415  * if they overlap with a writer.                 177  * if they overlap with a writer.
416  *                                                178  *
417  * Also note that since no context is provided    179  * Also note that since no context is provided, no deadlock protection is
418  * possible, which is also not needed for a tr !! 180  * possible.
419  *                                                181  *
420  * Returns true if the lock was acquired, fals    182  * Returns true if the lock was acquired, false otherwise.
421  */                                               183  */
422 static inline bool __must_check dma_resv_trylo    184 static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
423 {                                                 185 {
424         return ww_mutex_trylock(&obj->lock, NU !! 186         return ww_mutex_trylock(&obj->lock);
425 }                                                 187 }
426                                                   188 
427 /**                                               189 /**
428  * dma_resv_is_locked - is the reservation obj    190  * dma_resv_is_locked - is the reservation object locked
429  * @obj: the reservation object                   191  * @obj: the reservation object
430  *                                                192  *
431  * Returns true if the mutex is locked, false     193  * Returns true if the mutex is locked, false if unlocked.
432  */                                               194  */
433 static inline bool dma_resv_is_locked(struct d    195 static inline bool dma_resv_is_locked(struct dma_resv *obj)
434 {                                                 196 {
435         return ww_mutex_is_locked(&obj->lock);    197         return ww_mutex_is_locked(&obj->lock);
436 }                                                 198 }
437                                                   199 
438 /**                                               200 /**
439  * dma_resv_locking_ctx - returns the context     201  * dma_resv_locking_ctx - returns the context used to lock the object
440  * @obj: the reservation object                   202  * @obj: the reservation object
441  *                                                203  *
442  * Returns the context used to lock a reservat    204  * Returns the context used to lock a reservation object or NULL if no context
443  * was used or the object is not locked at all    205  * was used or the object is not locked at all.
444  *                                             << 
445  * WARNING: This interface is pretty horrible, << 
446  * doesn't pass the struct ww_acquire_ctx arou << 
447  * Everyone else just uses it to check whether << 
448  * not.                                        << 
449  */                                               206  */
450 static inline struct ww_acquire_ctx *dma_resv_    207 static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
451 {                                                 208 {
452         return READ_ONCE(obj->lock.ctx);          209         return READ_ONCE(obj->lock.ctx);
453 }                                                 210 }
454                                                   211 
455 /**                                               212 /**
456  * dma_resv_unlock - unlock the reservation ob    213  * dma_resv_unlock - unlock the reservation object
457  * @obj: the reservation object                   214  * @obj: the reservation object
458  *                                                215  *
459  * Unlocks the reservation object following ex    216  * Unlocks the reservation object following exclusive access.
460  */                                               217  */
461 static inline void dma_resv_unlock(struct dma_    218 static inline void dma_resv_unlock(struct dma_resv *obj)
462 {                                                 219 {
463         dma_resv_reset_max_fences(obj);        !! 220 #ifdef CONFIG_DEBUG_MUTEXES
                                                   >> 221         /* Test shared fence slot reservation */
                                                   >> 222         if (rcu_access_pointer(obj->fence)) {
                                                   >> 223                 struct dma_resv_list *fence = dma_resv_get_list(obj);
                                                   >> 224 
                                                   >> 225                 fence->shared_max = fence->shared_count;
                                                   >> 226         }
                                                   >> 227 #endif
464         ww_mutex_unlock(&obj->lock);              228         ww_mutex_unlock(&obj->lock);
465 }                                                 229 }
466                                                   230 
                                                   >> 231 /**
                                                   >> 232  * dma_resv_get_excl - get the reservation object's
                                                   >> 233  * exclusive fence, with update-side lock held
                                                   >> 234  * @obj: the reservation object
                                                   >> 235  *
                                                   >> 236  * Returns the exclusive fence (if any).  Does NOT take a
                                                   >> 237  * reference. Writers must hold obj->lock, readers may only
                                                   >> 238  * hold a RCU read side lock.
                                                   >> 239  *
                                                   >> 240  * RETURNS
                                                   >> 241  * The exclusive fence or NULL
                                                   >> 242  */
                                                   >> 243 static inline struct dma_fence *
                                                   >> 244 dma_resv_get_excl(struct dma_resv *obj)
                                                   >> 245 {
                                                   >> 246         return rcu_dereference_protected(obj->fence_excl,
                                                   >> 247                                          dma_resv_held(obj));
                                                   >> 248 }
                                                   >> 249 
                                                   >> 250 /**
                                                   >> 251  * dma_resv_get_excl_rcu - get the reservation object's
                                                   >> 252  * exclusive fence, without lock held.
                                                   >> 253  * @obj: the reservation object
                                                   >> 254  *
                                                   >> 255  * If there is an exclusive fence, this atomically increments it's
                                                   >> 256  * reference count and returns it.
                                                   >> 257  *
                                                   >> 258  * RETURNS
                                                   >> 259  * The exclusive fence or NULL if none
                                                   >> 260  */
                                                   >> 261 static inline struct dma_fence *
                                                   >> 262 dma_resv_get_excl_rcu(struct dma_resv *obj)
                                                   >> 263 {
                                                   >> 264         struct dma_fence *fence;
                                                   >> 265 
                                                   >> 266         if (!rcu_access_pointer(obj->fence_excl))
                                                   >> 267                 return NULL;
                                                   >> 268 
                                                   >> 269         rcu_read_lock();
                                                   >> 270         fence = dma_fence_get_rcu_safe(&obj->fence_excl);
                                                   >> 271         rcu_read_unlock();
                                                   >> 272 
                                                   >> 273         return fence;
                                                   >> 274 }
                                                   >> 275 
467 void dma_resv_init(struct dma_resv *obj);         276 void dma_resv_init(struct dma_resv *obj);
468 void dma_resv_fini(struct dma_resv *obj);         277 void dma_resv_fini(struct dma_resv *obj);
469 int dma_resv_reserve_fences(struct dma_resv *o !! 278 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
470 void dma_resv_add_fence(struct dma_resv *obj,  !! 279 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
471                         enum dma_resv_usage us !! 280 
472 void dma_resv_replace_fences(struct dma_resv * !! 281 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
473                              struct dma_fence  !! 282 
474                              enum dma_resv_usa !! 283 int dma_resv_get_fences_rcu(struct dma_resv *obj,
475 int dma_resv_get_fences(struct dma_resv *obj,  !! 284                             struct dma_fence **pfence_excl,
476                         unsigned int *num_fenc !! 285                             unsigned *pshared_count,
477 int dma_resv_get_singleton(struct dma_resv *ob !! 286                             struct dma_fence ***pshared);
478                            struct dma_fence ** !! 287 
479 int dma_resv_copy_fences(struct dma_resv *dst,    288 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
480 long dma_resv_wait_timeout(struct dma_resv *ob !! 289 
481                            bool intr, unsigned !! 290 long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
482 void dma_resv_set_deadline(struct dma_resv *ob !! 291                                unsigned long timeout);
483                            ktime_t deadline);  !! 292 
484 bool dma_resv_test_signaled(struct dma_resv *o !! 293 bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
485 void dma_resv_describe(struct dma_resv *obj, s << 
486                                                   294 
487 #endif /* _LINUX_RESERVATION_H */                 295 #endif /* _LINUX_RESERVATION_H */
488                                                   296 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php