1 /* 1 /* 2 * Header file for reservations for dma-buf an 2 * Header file for reservations for dma-buf and ttm 3 * 3 * 4 * Copyright(C) 2011 Linaro Limited. All right 4 * Copyright(C) 2011 Linaro Limited. All rights reserved. 5 * Copyright (C) 2012-2013 Canonical Ltd 5 * Copyright (C) 2012-2013 Canonical Ltd 6 * Copyright (C) 2012 Texas Instruments 6 * Copyright (C) 2012 Texas Instruments 7 * 7 * 8 * Authors: 8 * Authors: 9 * Rob Clark <robdclark@gmail.com> 9 * Rob Clark <robdclark@gmail.com> 10 * Maarten Lankhorst <maarten.lankhorst@canoni 10 * Maarten Lankhorst <maarten.lankhorst@canonical.com> 11 * Thomas Hellstrom <thellstrom-at-vmware-dot- 11 * Thomas Hellstrom <thellstrom-at-vmware-dot-com> 12 * 12 * 13 * Based on bo.c which bears the following cop 13 * Based on bo.c which bears the following copyright notice, 14 * but is dual licensed: 14 * but is dual licensed: 15 * 15 * 16 * Copyright (c) 2006-2009 VMware, Inc., Palo 16 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 17 * All Rights Reserved. 17 * All Rights Reserved. 18 * 18 * 19 * Permission is hereby granted, free of charg 19 * Permission is hereby granted, free of charge, to any person obtaining a 20 * copy of this software and associated docume 20 * copy of this software and associated documentation files (the 21 * "Software"), to deal in the Software withou 21 * "Software"), to deal in the Software without restriction, including 22 * without limitation the rights to use, copy, 22 * without limitation the rights to use, copy, modify, merge, publish, 23 * distribute, sub license, and/or sell copies 23 * distribute, sub license, and/or sell copies of the Software, and to 24 * permit persons to whom the Software is furn 24 * permit persons to whom the Software is furnished to do so, subject to 25 * the following conditions: 25 * the following conditions: 26 * 26 * 27 * The above copyright notice and this permiss 27 * The above copyright notice and this permission notice (including the 28 * next paragraph) shall be included in all co 28 * next paragraph) shall be included in all copies or substantial portions 29 * of the Software. 29 * of the Software. 30 * 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT W 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE W 32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 33 * FITNESS FOR A PARTICULAR PURPOSE AND NON-IN 33 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 34 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS S 34 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 35 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN A 35 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 36 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNE 36 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 37 * USE OR OTHER DEALINGS IN THE SOFTWARE. 37 * USE OR OTHER DEALINGS IN THE SOFTWARE. 38 */ 38 */ 39 #ifndef _LINUX_RESERVATION_H 39 #ifndef _LINUX_RESERVATION_H 40 #define _LINUX_RESERVATION_H 40 #define _LINUX_RESERVATION_H 41 41 42 #include <linux/ww_mutex.h> 42 #include <linux/ww_mutex.h> 43 #include <linux/dma-fence.h> 43 #include <linux/dma-fence.h> 44 #include <linux/slab.h> 44 #include <linux/slab.h> 45 #include <linux/seqlock.h> 45 #include <linux/seqlock.h> 46 #include <linux/rcupdate.h> 46 #include <linux/rcupdate.h> 47 47 48 extern struct ww_class reservation_ww_class; 48 extern struct ww_class reservation_ww_class; 49 49 50 struct dma_resv_list; << 51 << 52 /** 50 /** 53 * enum dma_resv_usage - how the fences from a !! 51 * struct dma_resv_list - a list of shared fences 54 * !! 52 * @rcu: for internal use 55 * This enum describes the different use cases !! 53 * @shared_count: table of shared fences 56 * controls which fences are returned when que !! 54 * @shared_max: for growing shared fence table 57 * !! 55 * @shared: shared fence table 58 * An important fact is that there is the orde !! 56 */ 59 * when the dma_resv object is asked for fence !! 57 struct dma_resv_list { 60 * for the lower use case are returned as well !! 58 struct rcu_head rcu; 61 * !! 59 u32 shared_count, shared_max; 62 * For example when asking for WRITE fences th !! 60 struct dma_fence __rcu *shared[]; 63 * as well. Similar when asked for READ fences << 64 * fences are returned as well. << 65 * << 66 * Already used fences can be promoted in the << 67 * DMA_RESV_USAGE_BOOKKEEP could become DMA_RE << 68 * with this usage. But fences can never be de << 69 * with DMA_RESV_USAGE_WRITE could become DMA_ << 70 */ << 71 enum dma_resv_usage { << 72 /** << 73 * @DMA_RESV_USAGE_KERNEL: For in kern << 74 * << 75 * This should only be used for things << 76 * with a DMA hardware engine for the << 77 * management. << 78 * << 79 * Drivers *always* must wait for thos << 80 * resource protected by the dma_resv << 81 * that is when the resource is known << 82 * pinning it previously. << 83 */ << 84 DMA_RESV_USAGE_KERNEL, << 85 << 86 /** << 87 * @DMA_RESV_USAGE_WRITE: Implicit wri << 88 * << 89 * This should only be used for usersp << 90 * an implicit write dependency. << 91 */ << 92 DMA_RESV_USAGE_WRITE, << 93 << 94 /** << 95 * @DMA_RESV_USAGE_READ: Implicit read << 96 * << 97 * This should only be used for usersp << 98 * an implicit read dependency. << 99 */ << 100 DMA_RESV_USAGE_READ, << 101 << 102 /** << 103 * @DMA_RESV_USAGE_BOOKKEEP: No implic << 104 * << 105 * This should be used by submissions << 106 * any implicit synchronization. << 107 * << 108 * The most common case are preemption << 109 * flushes as well as explicit synced << 110 * << 111 * Explicit synced user user submissio << 112 * DMA_RESV_USAGE_READ or DMA_RESV_USA << 113 * dma_buf_import_sync_file() when imp << 114 * become necessary after initial addi << 115 */ << 116 DMA_RESV_USAGE_BOOKKEEP << 117 }; 61 }; 118 62 119 /** 63 /** 120 * dma_resv_usage_rw - helper for implicit syn << 121 * @write: true if we create a new implicit sy << 122 * << 123 * This returns the implicit synchronization u << 124 * see enum dma_resv_usage and &dma_buf.resv. << 125 */ << 126 static inline enum dma_resv_usage dma_resv_usa << 127 { << 128 /* This looks confusing at first sight << 129 * << 130 * The rational is that new write oper << 131 * existing read and write operations << 132 * But a new read operation only needs << 133 * operations to finish. << 134 */ << 135 return write ? DMA_RESV_USAGE_READ : D << 136 } << 137 << 138 /** << 139 * struct dma_resv - a reservation object mana 64 * struct dma_resv - a reservation object manages fences for a buffer 140 * 65 * 141 * This is a container for dma_fence objects w !! 66 * There are multiple uses for this, with sometimes slightly different rules in 142 * cases. !! 67 * how the fence slots are used. 143 * 68 * 144 * One use is to synchronize cross-driver acce 69 * One use is to synchronize cross-driver access to a struct dma_buf, either for 145 * dynamic buffer management or just to handle 70 * dynamic buffer management or just to handle implicit synchronization between 146 * different users of the buffer in userspace. 71 * different users of the buffer in userspace. See &dma_buf.resv for a more 147 * in-depth discussion. 72 * in-depth discussion. 148 * 73 * 149 * The other major use is to manage access and 74 * The other major use is to manage access and locking within a driver in a 150 * buffer based memory manager. struct ttm_buf 75 * buffer based memory manager. struct ttm_buffer_object is the canonical 151 * example here, since this is where reservati 76 * example here, since this is where reservation objects originated from. But 152 * use in drivers is spreading and some driver 77 * use in drivers is spreading and some drivers also manage struct 153 * drm_gem_object with the same scheme. 78 * drm_gem_object with the same scheme. 154 */ 79 */ 155 struct dma_resv { 80 struct dma_resv { 156 /** 81 /** 157 * @lock: 82 * @lock: 158 * 83 * 159 * Update side lock. Don't use directl 84 * Update side lock. Don't use directly, instead use the wrapper 160 * functions like dma_resv_lock() and 85 * functions like dma_resv_lock() and dma_resv_unlock(). 161 * 86 * 162 * Drivers which use the reservation o 87 * Drivers which use the reservation object to manage memory dynamically 163 * also use this lock to protect buffe 88 * also use this lock to protect buffer object state like placement, 164 * allocation policies or throughout c 89 * allocation policies or throughout command submission. 165 */ 90 */ 166 struct ww_mutex lock; 91 struct ww_mutex lock; 167 92 168 /** 93 /** 169 * @fences: !! 94 * @seq: 170 * 95 * 171 * Array of fences which where added t !! 96 * Sequence count for managing RCU read-side synchronization, allows >> 97 * read-only access to @fence_excl and @fence while ensuring we take a >> 98 * consistent snapshot. >> 99 */ >> 100 seqcount_ww_mutex_t seq; >> 101 >> 102 /** >> 103 * @fence_excl: >> 104 * >> 105 * The exclusive fence, if there is one currently. 172 * 106 * 173 * A new fence is added by calling dma !! 107 * There are two ways to update this fence: 174 * often needs to be done past the poi !! 108 * >> 109 * - First by calling dma_resv_add_excl_fence(), which replaces all >> 110 * fences attached to the reservation object. To guarantee that no >> 111 * fences are lost, this new fence must signal only after all previous >> 112 * fences, both shared and exclusive, have signalled. In some cases it >> 113 * is convenient to achieve that by attaching a struct dma_fence_array >> 114 * with all the new and old fences. >> 115 * >> 116 * - Alternatively the fence can be set directly, which leaves the >> 117 * shared fences unchanged. To guarantee that no fences are lost, this >> 118 * new fence must signal only after the previous exclusive fence has >> 119 * signalled. Since the shared fences are staying intact, it is not >> 120 * necessary to maintain any ordering against those. If semantically >> 121 * only a new access is added without actually treating the previous >> 122 * one as a dependency the exclusive fences can be strung together >> 123 * using struct dma_fence_chain. >> 124 * >> 125 * Note that actual semantics of what an exclusive or shared fence mean >> 126 * is defined by the user, for reservation objects shared across drivers >> 127 * see &dma_buf.resv. >> 128 */ >> 129 struct dma_fence __rcu *fence_excl; >> 130 >> 131 /** >> 132 * @fence: >> 133 * >> 134 * List of current shared fences. >> 135 * >> 136 * There are no ordering constraints of shared fences against the >> 137 * exclusive fence slot. If a waiter needs to wait for all access, it >> 138 * has to wait for both sets of fences to signal. >> 139 * >> 140 * A new fence is added by calling dma_resv_add_shared_fence(). Since >> 141 * this often needs to be done past the point of no return in command 175 * submission it cannot fail, and ther 142 * submission it cannot fail, and therefore sufficient slots need to be 176 * reserved by calling dma_resv_reserv !! 143 * reserved by calling dma_resv_reserve_shared(). >> 144 * >> 145 * Note that actual semantics of what an exclusive or shared fence mean >> 146 * is defined by the user, for reservation objects shared across drivers >> 147 * see &dma_buf.resv. 177 */ 148 */ 178 struct dma_resv_list __rcu *fences; !! 149 struct dma_resv_list __rcu *fence; 179 }; 150 }; 180 151 181 /** 152 /** 182 * struct dma_resv_iter - current position int 153 * struct dma_resv_iter - current position into the dma_resv fences 183 * 154 * 184 * Don't touch this directly in the driver, us 155 * Don't touch this directly in the driver, use the accessor function instead. 185 * << 186 * IMPORTANT << 187 * << 188 * When using the lockless iterators like dma_ << 189 * dma_resv_for_each_fence_unlocked() beware t << 190 * Code which accumulates statistics or simila << 191 * dma_resv_iter_is_restarted(). << 192 */ 156 */ 193 struct dma_resv_iter { 157 struct dma_resv_iter { 194 /** @obj: The dma_resv object we itera 158 /** @obj: The dma_resv object we iterate over */ 195 struct dma_resv *obj; 159 struct dma_resv *obj; 196 160 197 /** @usage: Return fences with this us !! 161 /** @all_fences: If all fences should be returned */ 198 enum dma_resv_usage usage; !! 162 bool all_fences; 199 163 200 /** @fence: the currently handled fenc 164 /** @fence: the currently handled fence */ 201 struct dma_fence *fence; 165 struct dma_fence *fence; 202 166 203 /** @fence_usage: the usage of the cur !! 167 /** @seq: sequence number to check for modifications */ 204 enum dma_resv_usage fence_usage; !! 168 unsigned int seq; 205 169 206 /** @index: index into the shared fenc 170 /** @index: index into the shared fences */ 207 unsigned int index; 171 unsigned int index; 208 172 209 /** @fences: the shared fences; privat 173 /** @fences: the shared fences; private, *MUST* not dereference */ 210 struct dma_resv_list *fences; 174 struct dma_resv_list *fences; 211 175 212 /** @num_fences: number of fences */ !! 176 /** @shared_count: number of shared fences */ 213 unsigned int num_fences; !! 177 unsigned int shared_count; 214 178 215 /** @is_restarted: true if this is the 179 /** @is_restarted: true if this is the first returned fence */ 216 bool is_restarted; 180 bool is_restarted; 217 }; 181 }; 218 182 219 struct dma_fence *dma_resv_iter_first_unlocked 183 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor); 220 struct dma_fence *dma_resv_iter_next_unlocked( 184 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor); 221 struct dma_fence *dma_resv_iter_first(struct d 185 struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor); 222 struct dma_fence *dma_resv_iter_next(struct dm 186 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor); 223 187 224 /** 188 /** 225 * dma_resv_iter_begin - initialize a dma_resv 189 * dma_resv_iter_begin - initialize a dma_resv_iter object 226 * @cursor: The dma_resv_iter object to initia 190 * @cursor: The dma_resv_iter object to initialize 227 * @obj: The dma_resv object which we want to 191 * @obj: The dma_resv object which we want to iterate over 228 * @usage: controls which fences to include, s !! 192 * @all_fences: If all fences should be returned or just the exclusive one 229 */ 193 */ 230 static inline void dma_resv_iter_begin(struct 194 static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor, 231 struct 195 struct dma_resv *obj, 232 enum dm !! 196 bool all_fences) 233 { 197 { 234 cursor->obj = obj; 198 cursor->obj = obj; 235 cursor->usage = usage; !! 199 cursor->all_fences = all_fences; 236 cursor->fence = NULL; 200 cursor->fence = NULL; 237 } 201 } 238 202 239 /** 203 /** 240 * dma_resv_iter_end - cleanup a dma_resv_iter 204 * dma_resv_iter_end - cleanup a dma_resv_iter object 241 * @cursor: the dma_resv_iter object which sho 205 * @cursor: the dma_resv_iter object which should be cleaned up 242 * 206 * 243 * Make sure that the reference to the fence i 207 * Make sure that the reference to the fence in the cursor is properly 244 * dropped. 208 * dropped. 245 */ 209 */ 246 static inline void dma_resv_iter_end(struct dm 210 static inline void dma_resv_iter_end(struct dma_resv_iter *cursor) 247 { 211 { 248 dma_fence_put(cursor->fence); 212 dma_fence_put(cursor->fence); 249 } 213 } 250 214 251 /** 215 /** 252 * dma_resv_iter_usage - Return the usage of t !! 216 * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one 253 * @cursor: the cursor of the current position 217 * @cursor: the cursor of the current position 254 * 218 * 255 * Returns the usage of the currently processe !! 219 * Returns true if the currently returned fence is the exclusive one. 256 */ 220 */ 257 static inline enum dma_resv_usage !! 221 static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor) 258 dma_resv_iter_usage(struct dma_resv_iter *curs << 259 { 222 { 260 return cursor->fence_usage; !! 223 return cursor->index == 0; 261 } 224 } 262 225 263 /** 226 /** 264 * dma_resv_iter_is_restarted - test if this i 227 * dma_resv_iter_is_restarted - test if this is the first fence after a restart 265 * @cursor: the cursor with the current positi 228 * @cursor: the cursor with the current position 266 * 229 * 267 * Return true if this is the first fence in a 230 * Return true if this is the first fence in an iteration after a restart. 268 */ 231 */ 269 static inline bool dma_resv_iter_is_restarted( 232 static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) 270 { 233 { 271 return cursor->is_restarted; 234 return cursor->is_restarted; 272 } 235 } 273 236 274 /** 237 /** 275 * dma_resv_for_each_fence_unlocked - unlocked 238 * dma_resv_for_each_fence_unlocked - unlocked fence iterator 276 * @cursor: a struct dma_resv_iter pointer 239 * @cursor: a struct dma_resv_iter pointer 277 * @fence: the current fence 240 * @fence: the current fence 278 * 241 * 279 * Iterate over the fences in a struct dma_res 242 * Iterate over the fences in a struct dma_resv object without holding the 280 * &dma_resv.lock and using RCU instead. The c 243 * &dma_resv.lock and using RCU instead. The cursor needs to be initialized 281 * with dma_resv_iter_begin() and cleaned up w 244 * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside 282 * the iterator a reference to the dma_fence i 245 * the iterator a reference to the dma_fence is held and the RCU lock dropped. 283 * !! 246 * When the dma_resv is modified the iteration starts over again. 284 * Beware that the iterator can be restarted w << 285 * @cursor is modified. Code which accumulates << 286 * check for this with dma_resv_iter_is_restar << 287 * lock iterator dma_resv_for_each_fence() whe << 288 */ 247 */ 289 #define dma_resv_for_each_fence_unlocked(curso 248 #define dma_resv_for_each_fence_unlocked(cursor, fence) \ 290 for (fence = dma_resv_iter_first_unloc 249 for (fence = dma_resv_iter_first_unlocked(cursor); \ 291 fence; fence = dma_resv_iter_next 250 fence; fence = dma_resv_iter_next_unlocked(cursor)) 292 251 293 /** 252 /** 294 * dma_resv_for_each_fence - fence iterator 253 * dma_resv_for_each_fence - fence iterator 295 * @cursor: a struct dma_resv_iter pointer 254 * @cursor: a struct dma_resv_iter pointer 296 * @obj: a dma_resv object pointer 255 * @obj: a dma_resv object pointer 297 * @usage: controls which fences to return !! 256 * @all_fences: true if all fences should be returned 298 * @fence: the current fence 257 * @fence: the current fence 299 * 258 * 300 * Iterate over the fences in a struct dma_res 259 * Iterate over the fences in a struct dma_resv object while holding the 301 * &dma_resv.lock. @all_fences controls if the 260 * &dma_resv.lock. @all_fences controls if the shared fences are returned as 302 * well. The cursor initialisation is part of 261 * well. The cursor initialisation is part of the iterator and the fence stays 303 * valid as long as the lock is held and so no 262 * valid as long as the lock is held and so no extra reference to the fence is 304 * taken. 263 * taken. 305 */ 264 */ 306 #define dma_resv_for_each_fence(cursor, obj, u !! 265 #define dma_resv_for_each_fence(cursor, obj, all_fences, fence) \ 307 for (dma_resv_iter_begin(cursor, obj, !! 266 for (dma_resv_iter_begin(cursor, obj, all_fences), \ 308 fence = dma_resv_iter_first(curso 267 fence = dma_resv_iter_first(cursor); fence; \ 309 fence = dma_resv_iter_next(cursor 268 fence = dma_resv_iter_next(cursor)) 310 269 311 #define dma_resv_held(obj) lockdep_is_held(&(o 270 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) 312 #define dma_resv_assert_held(obj) lockdep_asse 271 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) 313 272 314 #ifdef CONFIG_DEBUG_MUTEXES 273 #ifdef CONFIG_DEBUG_MUTEXES 315 void dma_resv_reset_max_fences(struct dma_resv !! 274 void dma_resv_reset_shared_max(struct dma_resv *obj); 316 #else 275 #else 317 static inline void dma_resv_reset_max_fences(s !! 276 static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {} 318 #endif 277 #endif 319 278 320 /** 279 /** 321 * dma_resv_lock - lock the reservation object 280 * dma_resv_lock - lock the reservation object 322 * @obj: the reservation object 281 * @obj: the reservation object 323 * @ctx: the locking context 282 * @ctx: the locking context 324 * 283 * 325 * Locks the reservation object for exclusive 284 * Locks the reservation object for exclusive access and modification. Note, 326 * that the lock is only against other writers 285 * that the lock is only against other writers, readers will run concurrently 327 * with a writer under RCU. The seqlock is use 286 * with a writer under RCU. The seqlock is used to notify readers if they 328 * overlap with a writer. 287 * overlap with a writer. 329 * 288 * 330 * As the reservation object may be locked by 289 * As the reservation object may be locked by multiple parties in an 331 * undefined order, a #ww_acquire_ctx is passe 290 * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle 332 * is detected. See ww_mutex_lock() and ww_acq 291 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation 333 * object may be locked by itself by passing N 292 * object may be locked by itself by passing NULL as @ctx. 334 * 293 * 335 * When a die situation is indicated by return 294 * When a die situation is indicated by returning -EDEADLK all locks held by 336 * @ctx must be unlocked and then dma_resv_loc 295 * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj. 337 * 296 * 338 * Unlocked by calling dma_resv_unlock(). 297 * Unlocked by calling dma_resv_unlock(). 339 * 298 * 340 * See also dma_resv_lock_interruptible() for 299 * See also dma_resv_lock_interruptible() for the interruptible variant. 341 */ 300 */ 342 static inline int dma_resv_lock(struct dma_res 301 static inline int dma_resv_lock(struct dma_resv *obj, 343 struct ww_acqu 302 struct ww_acquire_ctx *ctx) 344 { 303 { 345 return ww_mutex_lock(&obj->lock, ctx); 304 return ww_mutex_lock(&obj->lock, ctx); 346 } 305 } 347 306 348 /** 307 /** 349 * dma_resv_lock_interruptible - lock the rese 308 * dma_resv_lock_interruptible - lock the reservation object 350 * @obj: the reservation object 309 * @obj: the reservation object 351 * @ctx: the locking context 310 * @ctx: the locking context 352 * 311 * 353 * Locks the reservation object interruptible 312 * Locks the reservation object interruptible for exclusive access and 354 * modification. Note, that the lock is only a 313 * modification. Note, that the lock is only against other writers, readers 355 * will run concurrently with a writer under R 314 * will run concurrently with a writer under RCU. The seqlock is used to 356 * notify readers if they overlap with a write 315 * notify readers if they overlap with a writer. 357 * 316 * 358 * As the reservation object may be locked by 317 * As the reservation object may be locked by multiple parties in an 359 * undefined order, a #ww_acquire_ctx is passe 318 * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle 360 * is detected. See ww_mutex_lock() and ww_acq 319 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation 361 * object may be locked by itself by passing N 320 * object may be locked by itself by passing NULL as @ctx. 362 * 321 * 363 * When a die situation is indicated by return 322 * When a die situation is indicated by returning -EDEADLK all locks held by 364 * @ctx must be unlocked and then dma_resv_loc 323 * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on 365 * @obj. 324 * @obj. 366 * 325 * 367 * Unlocked by calling dma_resv_unlock(). 326 * Unlocked by calling dma_resv_unlock(). 368 */ 327 */ 369 static inline int dma_resv_lock_interruptible( 328 static inline int dma_resv_lock_interruptible(struct dma_resv *obj, 370 329 struct ww_acquire_ctx *ctx) 371 { 330 { 372 return ww_mutex_lock_interruptible(&ob 331 return ww_mutex_lock_interruptible(&obj->lock, ctx); 373 } 332 } 374 333 375 /** 334 /** 376 * dma_resv_lock_slow - slowpath lock the rese 335 * dma_resv_lock_slow - slowpath lock the reservation object 377 * @obj: the reservation object 336 * @obj: the reservation object 378 * @ctx: the locking context 337 * @ctx: the locking context 379 * 338 * 380 * Acquires the reservation object after a die 339 * Acquires the reservation object after a die case. This function 381 * will sleep until the lock becomes available 340 * will sleep until the lock becomes available. See dma_resv_lock() as 382 * well. 341 * well. 383 * 342 * 384 * See also dma_resv_lock_slow_interruptible() 343 * See also dma_resv_lock_slow_interruptible() for the interruptible variant. 385 */ 344 */ 386 static inline void dma_resv_lock_slow(struct d 345 static inline void dma_resv_lock_slow(struct dma_resv *obj, 387 struct w 346 struct ww_acquire_ctx *ctx) 388 { 347 { 389 ww_mutex_lock_slow(&obj->lock, ctx); 348 ww_mutex_lock_slow(&obj->lock, ctx); 390 } 349 } 391 350 392 /** 351 /** 393 * dma_resv_lock_slow_interruptible - slowpath 352 * dma_resv_lock_slow_interruptible - slowpath lock the reservation 394 * object, interruptible 353 * object, interruptible 395 * @obj: the reservation object 354 * @obj: the reservation object 396 * @ctx: the locking context 355 * @ctx: the locking context 397 * 356 * 398 * Acquires the reservation object interruptib 357 * Acquires the reservation object interruptible after a die case. This function 399 * will sleep until the lock becomes available 358 * will sleep until the lock becomes available. See 400 * dma_resv_lock_interruptible() as well. 359 * dma_resv_lock_interruptible() as well. 401 */ 360 */ 402 static inline int dma_resv_lock_slow_interrupt 361 static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, 403 362 struct ww_acquire_ctx *ctx) 404 { 363 { 405 return ww_mutex_lock_slow_interruptibl 364 return ww_mutex_lock_slow_interruptible(&obj->lock, ctx); 406 } 365 } 407 366 408 /** 367 /** 409 * dma_resv_trylock - trylock the reservation 368 * dma_resv_trylock - trylock the reservation object 410 * @obj: the reservation object 369 * @obj: the reservation object 411 * 370 * 412 * Tries to lock the reservation object for ex 371 * Tries to lock the reservation object for exclusive access and modification. 413 * Note, that the lock is only against other w 372 * Note, that the lock is only against other writers, readers will run 414 * concurrently with a writer under RCU. The s 373 * concurrently with a writer under RCU. The seqlock is used to notify readers 415 * if they overlap with a writer. 374 * if they overlap with a writer. 416 * 375 * 417 * Also note that since no context is provided 376 * Also note that since no context is provided, no deadlock protection is 418 * possible, which is also not needed for a tr 377 * possible, which is also not needed for a trylock. 419 * 378 * 420 * Returns true if the lock was acquired, fals 379 * Returns true if the lock was acquired, false otherwise. 421 */ 380 */ 422 static inline bool __must_check dma_resv_trylo 381 static inline bool __must_check dma_resv_trylock(struct dma_resv *obj) 423 { 382 { 424 return ww_mutex_trylock(&obj->lock, NU 383 return ww_mutex_trylock(&obj->lock, NULL); 425 } 384 } 426 385 427 /** 386 /** 428 * dma_resv_is_locked - is the reservation obj 387 * dma_resv_is_locked - is the reservation object locked 429 * @obj: the reservation object 388 * @obj: the reservation object 430 * 389 * 431 * Returns true if the mutex is locked, false 390 * Returns true if the mutex is locked, false if unlocked. 432 */ 391 */ 433 static inline bool dma_resv_is_locked(struct d 392 static inline bool dma_resv_is_locked(struct dma_resv *obj) 434 { 393 { 435 return ww_mutex_is_locked(&obj->lock); 394 return ww_mutex_is_locked(&obj->lock); 436 } 395 } 437 396 438 /** 397 /** 439 * dma_resv_locking_ctx - returns the context 398 * dma_resv_locking_ctx - returns the context used to lock the object 440 * @obj: the reservation object 399 * @obj: the reservation object 441 * 400 * 442 * Returns the context used to lock a reservat 401 * Returns the context used to lock a reservation object or NULL if no context 443 * was used or the object is not locked at all 402 * was used or the object is not locked at all. 444 * 403 * 445 * WARNING: This interface is pretty horrible, 404 * WARNING: This interface is pretty horrible, but TTM needs it because it 446 * doesn't pass the struct ww_acquire_ctx arou 405 * doesn't pass the struct ww_acquire_ctx around in some very long callchains. 447 * Everyone else just uses it to check whether 406 * Everyone else just uses it to check whether they're holding a reservation or 448 * not. 407 * not. 449 */ 408 */ 450 static inline struct ww_acquire_ctx *dma_resv_ 409 static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) 451 { 410 { 452 return READ_ONCE(obj->lock.ctx); 411 return READ_ONCE(obj->lock.ctx); 453 } 412 } 454 413 455 /** 414 /** 456 * dma_resv_unlock - unlock the reservation ob 415 * dma_resv_unlock - unlock the reservation object 457 * @obj: the reservation object 416 * @obj: the reservation object 458 * 417 * 459 * Unlocks the reservation object following ex 418 * Unlocks the reservation object following exclusive access. 460 */ 419 */ 461 static inline void dma_resv_unlock(struct dma_ 420 static inline void dma_resv_unlock(struct dma_resv *obj) 462 { 421 { 463 dma_resv_reset_max_fences(obj); !! 422 dma_resv_reset_shared_max(obj); 464 ww_mutex_unlock(&obj->lock); 423 ww_mutex_unlock(&obj->lock); 465 } 424 } 466 425 >> 426 /** >> 427 * dma_resv_excl_fence - return the object's exclusive fence >> 428 * @obj: the reservation object >> 429 * >> 430 * Returns the exclusive fence (if any). Caller must either hold the objects >> 431 * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), >> 432 * or one of the variants of each >> 433 * >> 434 * RETURNS >> 435 * The exclusive fence or NULL >> 436 */ >> 437 static inline struct dma_fence * >> 438 dma_resv_excl_fence(struct dma_resv *obj) >> 439 { >> 440 return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj)); >> 441 } >> 442 >> 443 /** >> 444 * dma_resv_get_excl_unlocked - get the reservation object's >> 445 * exclusive fence, without lock held. >> 446 * @obj: the reservation object >> 447 * >> 448 * If there is an exclusive fence, this atomically increments it's >> 449 * reference count and returns it. >> 450 * >> 451 * RETURNS >> 452 * The exclusive fence or NULL if none >> 453 */ >> 454 static inline struct dma_fence * >> 455 dma_resv_get_excl_unlocked(struct dma_resv *obj) >> 456 { >> 457 struct dma_fence *fence; >> 458 >> 459 if (!rcu_access_pointer(obj->fence_excl)) >> 460 return NULL; >> 461 >> 462 rcu_read_lock(); >> 463 fence = dma_fence_get_rcu_safe(&obj->fence_excl); >> 464 rcu_read_unlock(); >> 465 >> 466 return fence; >> 467 } >> 468 >> 469 /** >> 470 * dma_resv_shared_list - get the reservation object's shared fence list >> 471 * @obj: the reservation object >> 472 * >> 473 * Returns the shared fence list. Caller must either hold the objects >> 474 * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), >> 475 * or one of the variants of each >> 476 */ >> 477 static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj) >> 478 { >> 479 return rcu_dereference_check(obj->fence, dma_resv_held(obj)); >> 480 } >> 481 467 void dma_resv_init(struct dma_resv *obj); 482 void dma_resv_init(struct dma_resv *obj); 468 void dma_resv_fini(struct dma_resv *obj); 483 void dma_resv_fini(struct dma_resv *obj); 469 int dma_resv_reserve_fences(struct dma_resv *o !! 484 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); 470 void dma_resv_add_fence(struct dma_resv *obj, !! 485 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); 471 enum dma_resv_usage us !! 486 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); 472 void dma_resv_replace_fences(struct dma_resv * !! 487 int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl, 473 struct dma_fence !! 488 unsigned *pshared_count, struct dma_fence ***pshared); 474 enum dma_resv_usa << 475 int dma_resv_get_fences(struct dma_resv *obj, << 476 unsigned int *num_fenc << 477 int dma_resv_get_singleton(struct dma_resv *ob << 478 struct dma_fence ** << 479 int dma_resv_copy_fences(struct dma_resv *dst, 489 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); 480 long dma_resv_wait_timeout(struct dma_resv *ob !! 490 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, 481 bool intr, unsigned !! 491 unsigned long timeout); 482 void dma_resv_set_deadline(struct dma_resv *ob !! 492 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all); 483 ktime_t deadline); << 484 bool dma_resv_test_signaled(struct dma_resv *o << 485 void dma_resv_describe(struct dma_resv *obj, s << 486 493 487 #endif /* _LINUX_RESERVATION_H */ 494 #endif /* _LINUX_RESERVATION_H */ 488 495
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.