1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 2 /* 3 * RT Mutexes: blocking mutual exclusion locks 3 * RT Mutexes: blocking mutual exclusion locks with PI support 4 * 4 * 5 * started by Ingo Molnar and Thomas Gleixner: 5 * started by Ingo Molnar and Thomas Gleixner: 6 * 6 * 7 * Copyright (C) 2004-2006 Red Hat, Inc., Ing 7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 * Copyright (C) 2006, Timesys Corp., Thomas 8 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> 9 * 9 * 10 * This file contains the private data structu 10 * This file contains the private data structure and API definitions. 11 */ 11 */ 12 12 13 #ifndef __KERNEL_RTMUTEX_COMMON_H 13 #ifndef __KERNEL_RTMUTEX_COMMON_H 14 #define __KERNEL_RTMUTEX_COMMON_H 14 #define __KERNEL_RTMUTEX_COMMON_H 15 15 16 #include <linux/debug_locks.h> 16 #include <linux/debug_locks.h> 17 #include <linux/rtmutex.h> 17 #include <linux/rtmutex.h> 18 #include <linux/sched/wake_q.h> 18 #include <linux/sched/wake_q.h> 19 19 20 20 21 /* 21 /* 22 * This is a helper for the struct rt_mutex_wa 22 * This is a helper for the struct rt_mutex_waiter below. A waiter goes in two 23 * separate trees and they need their own copy 23 * separate trees and they need their own copy of the sort keys because of 24 * different locking requirements. 24 * different locking requirements. 25 * 25 * 26 * @entry: rbtree node to enqueue 26 * @entry: rbtree node to enqueue into the waiters tree 27 * @prio: Priority of the waiter 27 * @prio: Priority of the waiter 28 * @deadline: Deadline of the waiter 28 * @deadline: Deadline of the waiter if applicable 29 * 29 * 30 * See rt_waiter_node_less() and waiter_*_prio 30 * See rt_waiter_node_less() and waiter_*_prio(). 31 */ 31 */ 32 struct rt_waiter_node { 32 struct rt_waiter_node { 33 struct rb_node entry; 33 struct rb_node entry; 34 int prio; 34 int prio; 35 u64 deadline; 35 u64 deadline; 36 }; 36 }; 37 37 38 /* 38 /* 39 * This is the control structure for tasks blo 39 * This is the control structure for tasks blocked on a rt_mutex, 40 * which is allocated on the kernel stack on o 40 * which is allocated on the kernel stack on of the blocked task. 41 * 41 * 42 * @tree: node to enqueue into t 42 * @tree: node to enqueue into the mutex waiters tree 43 * @pi_tree: node to enqueue into t 43 * @pi_tree: node to enqueue into the mutex owner waiters tree 44 * @task: task reference to the 44 * @task: task reference to the blocked task 45 * @lock: Pointer to the rt_mute 45 * @lock: Pointer to the rt_mutex on which the waiter blocks 46 * @wake_state: Wakeup state to use (T 46 * @wake_state: Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT) 47 * @ww_ctx: WW context pointer 47 * @ww_ctx: WW context pointer 48 * 48 * 49 * @tree is ordered by @lock->wait_lock 49 * @tree is ordered by @lock->wait_lock 50 * @pi_tree is ordered by rt_mutex_owner(@lock 50 * @pi_tree is ordered by rt_mutex_owner(@lock)->pi_lock 51 */ 51 */ 52 struct rt_mutex_waiter { 52 struct rt_mutex_waiter { 53 struct rt_waiter_node tree; 53 struct rt_waiter_node tree; 54 struct rt_waiter_node pi_tree; 54 struct rt_waiter_node pi_tree; 55 struct task_struct *task; 55 struct task_struct *task; 56 struct rt_mutex_base *lock; 56 struct rt_mutex_base *lock; 57 unsigned int wake_state; 57 unsigned int wake_state; 58 struct ww_acquire_ctx *ww_ctx; 58 struct ww_acquire_ctx *ww_ctx; 59 }; 59 }; 60 60 61 /** 61 /** 62 * rt_wake_q_head - Wrapper around regular wak 62 * rt_wake_q_head - Wrapper around regular wake_q_head to support 63 * "sleeping" spinlocks on RT 63 * "sleeping" spinlocks on RT 64 * @head: The regular wake_q_hea 64 * @head: The regular wake_q_head for sleeping lock variants 65 * @rtlock_task: Task pointer for RT lo 65 * @rtlock_task: Task pointer for RT lock (spin/rwlock) wakeups 66 */ 66 */ 67 struct rt_wake_q_head { 67 struct rt_wake_q_head { 68 struct wake_q_head head; 68 struct wake_q_head head; 69 struct task_struct *rtlock_task; 69 struct task_struct *rtlock_task; 70 }; 70 }; 71 71 72 #define DEFINE_RT_WAKE_Q(name) 72 #define DEFINE_RT_WAKE_Q(name) \ 73 struct rt_wake_q_head name = { 73 struct rt_wake_q_head name = { \ 74 .head = WAKE_Q_HEAD_ 74 .head = WAKE_Q_HEAD_INITIALIZER(name.head), \ 75 .rtlock_task = NULL, 75 .rtlock_task = NULL, \ 76 } 76 } 77 77 78 /* 78 /* 79 * PI-futex support (proxy locking functions, 79 * PI-futex support (proxy locking functions, etc.): 80 */ 80 */ 81 extern void rt_mutex_init_proxy_locked(struct 81 extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock, 82 struct 82 struct task_struct *proxy_owner); 83 extern void rt_mutex_proxy_unlock(struct rt_mu 83 extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock); 84 extern int __rt_mutex_start_proxy_lock(struct 84 extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, 85 struct rt 85 struct rt_mutex_waiter *waiter, 86 struct ta 86 struct task_struct *task); 87 extern int rt_mutex_start_proxy_lock(struct rt 87 extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, 88 struct rt 88 struct rt_mutex_waiter *waiter, 89 struct ta 89 struct task_struct *task); 90 extern int rt_mutex_wait_proxy_lock(struct rt_ 90 extern int rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock, 91 struct hrtimer_ 91 struct hrtimer_sleeper *to, 92 struct rt_mutex 92 struct rt_mutex_waiter *waiter); 93 extern bool rt_mutex_cleanup_proxy_lock(struct 93 extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock, 94 struct rt_mut 94 struct rt_mutex_waiter *waiter); 95 95 96 extern int rt_mutex_futex_trylock(struct rt_mu 96 extern int rt_mutex_futex_trylock(struct rt_mutex_base *l); 97 extern int __rt_mutex_futex_trylock(struct rt_ 97 extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l); 98 98 99 extern void rt_mutex_futex_unlock(struct rt_mu 99 extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock); 100 extern bool __rt_mutex_futex_unlock(struct rt_ 100 extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock, 101 struct rt_wake 101 struct rt_wake_q_head *wqh); 102 102 103 extern void rt_mutex_postunlock(struct rt_wake 103 extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh); 104 104 105 /* 105 /* 106 * Must be guarded because this header is incl 106 * Must be guarded because this header is included from rcu/tree_plugin.h 107 * unconditionally. 107 * unconditionally. 108 */ 108 */ 109 #ifdef CONFIG_RT_MUTEXES 109 #ifdef CONFIG_RT_MUTEXES 110 static inline int rt_mutex_has_waiters(struct 110 static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock) 111 { 111 { 112 return !RB_EMPTY_ROOT(&lock->waiters.r 112 return !RB_EMPTY_ROOT(&lock->waiters.rb_root); 113 } 113 } 114 114 115 /* 115 /* 116 * Lockless speculative check whether @waiter 116 * Lockless speculative check whether @waiter is still the top waiter on 117 * @lock. This is solely comparing pointers an 117 * @lock. This is solely comparing pointers and not derefencing the 118 * leftmost entry which might be about to vani 118 * leftmost entry which might be about to vanish. 119 */ 119 */ 120 static inline bool rt_mutex_waiter_is_top_wait 120 static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock, 121 121 struct rt_mutex_waiter *waiter) 122 { 122 { 123 struct rb_node *leftmost = rb_first_ca 123 struct rb_node *leftmost = rb_first_cached(&lock->waiters); 124 124 125 return rb_entry(leftmost, struct rt_mu 125 return rb_entry(leftmost, struct rt_mutex_waiter, tree.entry) == waiter; 126 } 126 } 127 127 128 static inline struct rt_mutex_waiter *rt_mutex 128 static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock) 129 { 129 { 130 struct rb_node *leftmost = rb_first_ca 130 struct rb_node *leftmost = rb_first_cached(&lock->waiters); 131 struct rt_mutex_waiter *w = NULL; 131 struct rt_mutex_waiter *w = NULL; 132 132 133 lockdep_assert_held(&lock->wait_lock); 133 lockdep_assert_held(&lock->wait_lock); 134 134 135 if (leftmost) { 135 if (leftmost) { 136 w = rb_entry(leftmost, struct 136 w = rb_entry(leftmost, struct rt_mutex_waiter, tree.entry); 137 BUG_ON(w->lock != lock); 137 BUG_ON(w->lock != lock); 138 } 138 } 139 return w; 139 return w; 140 } 140 } 141 141 142 static inline int task_has_pi_waiters(struct t 142 static inline int task_has_pi_waiters(struct task_struct *p) 143 { 143 { 144 return !RB_EMPTY_ROOT(&p->pi_waiters.r 144 return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root); 145 } 145 } 146 146 147 static inline struct rt_mutex_waiter *task_top 147 static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p) 148 { 148 { 149 lockdep_assert_held(&p->pi_lock); 149 lockdep_assert_held(&p->pi_lock); 150 150 151 return rb_entry(p->pi_waiters.rb_leftm 151 return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter, 152 pi_tree.entry); 152 pi_tree.entry); 153 } 153 } 154 154 155 #define RT_MUTEX_HAS_WAITERS 1UL 155 #define RT_MUTEX_HAS_WAITERS 1UL 156 156 157 static inline struct task_struct *rt_mutex_own 157 static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock) 158 { 158 { 159 unsigned long owner = (unsigned long) 159 unsigned long owner = (unsigned long) READ_ONCE(lock->owner); 160 160 161 return (struct task_struct *) (owner & 161 return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS); 162 } 162 } 163 163 164 /* 164 /* 165 * Constants for rt mutex functions which have 165 * Constants for rt mutex functions which have a selectable deadlock 166 * detection. 166 * detection. 167 * 167 * 168 * RT_MUTEX_MIN_CHAINWALK: Stops the lock 168 * RT_MUTEX_MIN_CHAINWALK: Stops the lock chain walk when there are 169 * no further PI 169 * no further PI adjustments to be made. 170 * 170 * 171 * RT_MUTEX_FULL_CHAINWALK: Invoke deadloc 171 * RT_MUTEX_FULL_CHAINWALK: Invoke deadlock detection with a full 172 * walk of the lo 172 * walk of the lock chain. 173 */ 173 */ 174 enum rtmutex_chainwalk { 174 enum rtmutex_chainwalk { 175 RT_MUTEX_MIN_CHAINWALK, 175 RT_MUTEX_MIN_CHAINWALK, 176 RT_MUTEX_FULL_CHAINWALK, 176 RT_MUTEX_FULL_CHAINWALK, 177 }; 177 }; 178 178 179 static inline void __rt_mutex_base_init(struct 179 static inline void __rt_mutex_base_init(struct rt_mutex_base *lock) 180 { 180 { 181 raw_spin_lock_init(&lock->wait_lock); 181 raw_spin_lock_init(&lock->wait_lock); 182 lock->waiters = RB_ROOT_CACHED; 182 lock->waiters = RB_ROOT_CACHED; 183 lock->owner = NULL; 183 lock->owner = NULL; 184 } 184 } 185 185 186 /* Debug functions */ 186 /* Debug functions */ 187 static inline void debug_rt_mutex_unlock(struc 187 static inline void debug_rt_mutex_unlock(struct rt_mutex_base *lock) 188 { 188 { 189 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES 189 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 190 DEBUG_LOCKS_WARN_ON(rt_mutex_o 190 DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); 191 } 191 } 192 192 193 static inline void debug_rt_mutex_proxy_unlock 193 static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex_base *lock) 194 { 194 { 195 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES 195 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 196 DEBUG_LOCKS_WARN_ON(!rt_mutex_ 196 DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock)); 197 } 197 } 198 198 199 static inline void debug_rt_mutex_init_waiter( 199 static inline void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) 200 { 200 { 201 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES 201 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 202 memset(waiter, 0x11, sizeof(*w 202 memset(waiter, 0x11, sizeof(*waiter)); 203 } 203 } 204 204 205 static inline void debug_rt_mutex_free_waiter( 205 static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) 206 { 206 { 207 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES 207 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES)) 208 memset(waiter, 0x22, sizeof(*w 208 memset(waiter, 0x22, sizeof(*waiter)); 209 } 209 } 210 210 211 static inline void rt_mutex_init_waiter(struct 211 static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) 212 { 212 { 213 debug_rt_mutex_init_waiter(waiter); 213 debug_rt_mutex_init_waiter(waiter); 214 RB_CLEAR_NODE(&waiter->pi_tree.entry); 214 RB_CLEAR_NODE(&waiter->pi_tree.entry); 215 RB_CLEAR_NODE(&waiter->tree.entry); 215 RB_CLEAR_NODE(&waiter->tree.entry); 216 waiter->wake_state = TASK_NORMAL; 216 waiter->wake_state = TASK_NORMAL; 217 waiter->task = NULL; 217 waiter->task = NULL; 218 } 218 } 219 219 220 static inline void rt_mutex_init_rtlock_waiter 220 static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter) 221 { 221 { 222 rt_mutex_init_waiter(waiter); 222 rt_mutex_init_waiter(waiter); 223 waiter->wake_state = TASK_RTLOCK_WAIT; 223 waiter->wake_state = TASK_RTLOCK_WAIT; 224 } 224 } 225 225 226 #else /* CONFIG_RT_MUTEXES */ 226 #else /* CONFIG_RT_MUTEXES */ 227 /* Used in rcu/tree_plugin.h */ 227 /* Used in rcu/tree_plugin.h */ 228 static inline struct task_struct *rt_mutex_own 228 static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock) 229 { 229 { 230 return NULL; 230 return NULL; 231 } 231 } 232 #endif /* !CONFIG_RT_MUTEXES */ 232 #endif /* !CONFIG_RT_MUTEXES */ 233 233 234 #endif 234 #endif 235 235
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.