1 // SPDX-License-Identifier: GPL-2.0-only 1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 2 /* 3 * rtmutex API 3 * rtmutex API 4 */ 4 */ 5 #include <linux/spinlock.h> 5 #include <linux/spinlock.h> 6 #include <linux/export.h> 6 #include <linux/export.h> 7 7 8 #define RT_MUTEX_BUILD_MUTEX 8 #define RT_MUTEX_BUILD_MUTEX 9 #define WW_RT 9 #define WW_RT 10 #include "rtmutex.c" 10 #include "rtmutex.c" 11 11 12 int ww_mutex_trylock(struct ww_mutex *lock, st 12 int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) 13 { 13 { 14 struct rt_mutex *rtm = &lock->base; 14 struct rt_mutex *rtm = &lock->base; 15 15 16 if (!ww_ctx) 16 if (!ww_ctx) 17 return rt_mutex_trylock(rtm); 17 return rt_mutex_trylock(rtm); 18 18 19 /* 19 /* 20 * Reset the wounded flag after a kill 20 * Reset the wounded flag after a kill. No other process can 21 * race and wound us here, since they 21 * race and wound us here, since they can't have a valid owner 22 * pointer if we don't have any locks 22 * pointer if we don't have any locks held. 23 */ 23 */ 24 if (ww_ctx->acquired == 0) 24 if (ww_ctx->acquired == 0) 25 ww_ctx->wounded = 0; 25 ww_ctx->wounded = 0; 26 26 27 if (__rt_mutex_trylock(&rtm->rtmutex)) 27 if (__rt_mutex_trylock(&rtm->rtmutex)) { 28 ww_mutex_set_context_fastpath( 28 ww_mutex_set_context_fastpath(lock, ww_ctx); 29 mutex_acquire_nest(&rtm->dep_m 29 mutex_acquire_nest(&rtm->dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_); 30 return 1; 30 return 1; 31 } 31 } 32 32 33 return 0; 33 return 0; 34 } 34 } 35 EXPORT_SYMBOL(ww_mutex_trylock); 35 EXPORT_SYMBOL(ww_mutex_trylock); 36 36 37 static int __sched 37 static int __sched 38 __ww_rt_mutex_lock(struct ww_mutex *lock, stru 38 __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx, 39 unsigned int state, unsigne 39 unsigned int state, unsigned long ip) 40 { 40 { 41 struct lockdep_map __maybe_unused *nes 41 struct lockdep_map __maybe_unused *nest_lock = NULL; 42 struct rt_mutex *rtm = &lock->base; 42 struct rt_mutex *rtm = &lock->base; 43 int ret; 43 int ret; 44 44 45 might_sleep(); 45 might_sleep(); 46 46 47 if (ww_ctx) { 47 if (ww_ctx) { 48 if (unlikely(ww_ctx == READ_ON 48 if (unlikely(ww_ctx == READ_ONCE(lock->ctx))) 49 return -EALREADY; 49 return -EALREADY; 50 50 51 /* 51 /* 52 * Reset the wounded flag afte 52 * Reset the wounded flag after a kill. No other process can 53 * race and wound us here, sin 53 * race and wound us here, since they can't have a valid owner 54 * pointer if we don't have an 54 * pointer if we don't have any locks held. 55 */ 55 */ 56 if (ww_ctx->acquired == 0) 56 if (ww_ctx->acquired == 0) 57 ww_ctx->wounded = 0; 57 ww_ctx->wounded = 0; 58 58 59 #ifdef CONFIG_DEBUG_LOCK_ALLOC 59 #ifdef CONFIG_DEBUG_LOCK_ALLOC 60 nest_lock = &ww_ctx->dep_map; 60 nest_lock = &ww_ctx->dep_map; 61 #endif 61 #endif 62 } 62 } 63 mutex_acquire_nest(&rtm->dep_map, 0, 0 63 mutex_acquire_nest(&rtm->dep_map, 0, 0, nest_lock, ip); 64 64 65 if (likely(rt_mutex_try_acquire(&rtm-> 65 if (likely(rt_mutex_try_acquire(&rtm->rtmutex))) { 66 if (ww_ctx) 66 if (ww_ctx) 67 ww_mutex_set_context_f 67 ww_mutex_set_context_fastpath(lock, ww_ctx); 68 return 0; 68 return 0; 69 } 69 } 70 70 71 ret = rt_mutex_slowlock(&rtm->rtmutex, 71 ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state); 72 72 73 if (ret) 73 if (ret) 74 mutex_release(&rtm->dep_map, i 74 mutex_release(&rtm->dep_map, ip); 75 return ret; 75 return ret; 76 } 76 } 77 77 78 int __sched 78 int __sched 79 ww_mutex_lock(struct ww_mutex *lock, struct ww 79 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 80 { 80 { 81 return __ww_rt_mutex_lock(lock, ctx, T 81 return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_); 82 } 82 } 83 EXPORT_SYMBOL(ww_mutex_lock); 83 EXPORT_SYMBOL(ww_mutex_lock); 84 84 85 int __sched 85 int __sched 86 ww_mutex_lock_interruptible(struct ww_mutex *l 86 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) 87 { 87 { 88 return __ww_rt_mutex_lock(lock, ctx, T 88 return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_); 89 } 89 } 90 EXPORT_SYMBOL(ww_mutex_lock_interruptible); 90 EXPORT_SYMBOL(ww_mutex_lock_interruptible); 91 91 92 void __sched ww_mutex_unlock(struct ww_mutex * 92 void __sched ww_mutex_unlock(struct ww_mutex *lock) 93 { 93 { 94 struct rt_mutex *rtm = &lock->base; 94 struct rt_mutex *rtm = &lock->base; 95 95 96 __ww_mutex_unlock(lock); 96 __ww_mutex_unlock(lock); 97 97 98 mutex_release(&rtm->dep_map, _RET_IP_) 98 mutex_release(&rtm->dep_map, _RET_IP_); 99 __rt_mutex_unlock(&rtm->rtmutex); 99 __rt_mutex_unlock(&rtm->rtmutex); 100 } 100 } 101 EXPORT_SYMBOL(ww_mutex_unlock); 101 EXPORT_SYMBOL(ww_mutex_unlock); 102 102
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.