1 /* SPDX-License-Identifier: GPL-2.0 */ 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 2 /* 3 * RT Mutexes: blocking mutual exclusion locks 3 * RT Mutexes: blocking mutual exclusion locks with PI support 4 * 4 * 5 * started by Ingo Molnar and Thomas Gleixner: 5 * started by Ingo Molnar and Thomas Gleixner: 6 * 6 * 7 * Copyright (C) 2004-2006 Red Hat, Inc., Ing 7 * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 8 * Copyright (C) 2006, Timesys Corp., Thomas 8 * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> 9 * 9 * 10 * This file contains the private data structu 10 * This file contains the private data structure and API definitions. 11 */ 11 */ 12 12 13 #ifndef __KERNEL_RTMUTEX_COMMON_H 13 #ifndef __KERNEL_RTMUTEX_COMMON_H 14 #define __KERNEL_RTMUTEX_COMMON_H 14 #define __KERNEL_RTMUTEX_COMMON_H 15 15 16 #include <linux/debug_locks.h> << 17 #include <linux/rtmutex.h> 16 #include <linux/rtmutex.h> 18 #include <linux/sched/wake_q.h> 17 #include <linux/sched/wake_q.h> 19 18 20 << 21 /* << 22 * This is a helper for the struct rt_mutex_wa << 23 * separate trees and they need their own copy << 24 * different locking requirements. << 25 * << 26 * @entry: rbtree node to enqueue << 27 * @prio: Priority of the waiter << 28 * @deadline: Deadline of the waiter << 29 * << 30 * See rt_waiter_node_less() and waiter_*_prio << 31 */ << 32 struct rt_waiter_node { << 33 struct rb_node entry; << 34 int prio; << 35 u64 deadline; << 36 }; << 37 << 38 /* 19 /* 39 * This is the control structure for tasks blo 20 * This is the control structure for tasks blocked on a rt_mutex, 40 * which is allocated on the kernel stack on o 21 * which is allocated on the kernel stack on of the blocked task. 41 * 22 * 42 * @tree: node to enqueue into t !! 23 * @tree_entry: pi node to enqueue into the mutex waiters tree 43 * @pi_tree: node to enqueue into t !! 24 * @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree 44 * @task: task reference to the 25 * @task: task reference to the blocked task 45 * @lock: Pointer to the rt_mute << 46 * @wake_state: Wakeup state to use (T << 47 * @ww_ctx: WW context pointer << 48 * << 49 * @tree is ordered by @lock->wait_lock << 50 * @pi_tree is ordered by rt_mutex_owner(@lock << 51 */ 26 */ 52 struct rt_mutex_waiter { 27 struct rt_mutex_waiter { 53 struct rt_waiter_node tree; !! 28 struct rb_node tree_entry; 54 struct rt_waiter_node pi_tree; !! 29 struct rb_node pi_tree_entry; 55 struct task_struct *task; 30 struct task_struct *task; 56 struct rt_mutex_base *lock; !! 31 struct rt_mutex *lock; 57 unsigned int wake_state; !! 32 #ifdef CONFIG_DEBUG_RT_MUTEXES 58 struct ww_acquire_ctx *ww_ctx; !! 33 unsigned long ip; 59 }; !! 34 struct pid *deadlock_task_pid; 60 !! 35 struct rt_mutex *deadlock_lock; 61 /** !! 36 #endif 62 * rt_wake_q_head - Wrapper around regular wak !! 37 int prio; 63 * "sleeping" spinlocks on RT !! 38 u64 deadline; 64 * @head: The regular wake_q_hea << 65 * @rtlock_task: Task pointer for RT lo << 66 */ << 67 struct rt_wake_q_head { << 68 struct wake_q_head head; << 69 struct task_struct *rtlock_task; << 70 }; 39 }; 71 40 72 #define DEFINE_RT_WAKE_Q(name) << 73 struct rt_wake_q_head name = { << 74 .head = WAKE_Q_HEAD_ << 75 .rtlock_task = NULL, << 76 } << 77 << 78 /* 41 /* 79 * PI-futex support (proxy locking functions, !! 42 * Various helpers to access the waiters-tree: 80 */ 43 */ 81 extern void rt_mutex_init_proxy_locked(struct << 82 struct << 83 extern void rt_mutex_proxy_unlock(struct rt_mu << 84 extern int __rt_mutex_start_proxy_lock(struct << 85 struct rt << 86 struct ta << 87 extern int rt_mutex_start_proxy_lock(struct rt << 88 struct rt << 89 struct ta << 90 extern int rt_mutex_wait_proxy_lock(struct rt_ << 91 struct hrtimer_ << 92 struct rt_mutex << 93 extern bool rt_mutex_cleanup_proxy_lock(struct << 94 struct rt_mut << 95 44 96 extern int rt_mutex_futex_trylock(struct rt_mu << 97 extern int __rt_mutex_futex_trylock(struct rt_ << 98 << 99 extern void rt_mutex_futex_unlock(struct rt_mu << 100 extern bool __rt_mutex_futex_unlock(struct rt_ << 101 struct rt_wake << 102 << 103 extern void rt_mutex_postunlock(struct rt_wake << 104 << 105 /* << 106 * Must be guarded because this header is incl << 107 * unconditionally. << 108 */ << 109 #ifdef CONFIG_RT_MUTEXES 45 #ifdef CONFIG_RT_MUTEXES 110 static inline int rt_mutex_has_waiters(struct << 111 { << 112 return !RB_EMPTY_ROOT(&lock->waiters.r << 113 } << 114 46 115 /* !! 47 static inline int rt_mutex_has_waiters(struct rt_mutex *lock) 116 * Lockless speculative check whether @waiter << 117 * @lock. This is solely comparing pointers an << 118 * leftmost entry which might be about to vani << 119 */ << 120 static inline bool rt_mutex_waiter_is_top_wait << 121 << 122 { 48 { 123 struct rb_node *leftmost = rb_first_ca !! 49 return !RB_EMPTY_ROOT(&lock->waiters.rb_root); 124 << 125 return rb_entry(leftmost, struct rt_mu << 126 } 50 } 127 51 128 static inline struct rt_mutex_waiter *rt_mutex !! 52 static inline struct rt_mutex_waiter * >> 53 rt_mutex_top_waiter(struct rt_mutex *lock) 129 { 54 { 130 struct rb_node *leftmost = rb_first_ca 55 struct rb_node *leftmost = rb_first_cached(&lock->waiters); 131 struct rt_mutex_waiter *w = NULL; 56 struct rt_mutex_waiter *w = NULL; 132 57 133 lockdep_assert_held(&lock->wait_lock); << 134 << 135 if (leftmost) { 58 if (leftmost) { 136 w = rb_entry(leftmost, struct !! 59 w = rb_entry(leftmost, struct rt_mutex_waiter, tree_entry); 137 BUG_ON(w->lock != lock); 60 BUG_ON(w->lock != lock); 138 } 61 } 139 return w; 62 return w; 140 } 63 } 141 64 142 static inline int task_has_pi_waiters(struct t 65 static inline int task_has_pi_waiters(struct task_struct *p) 143 { 66 { 144 return !RB_EMPTY_ROOT(&p->pi_waiters.r 67 return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root); 145 } 68 } 146 69 147 static inline struct rt_mutex_waiter *task_top !! 70 static inline struct rt_mutex_waiter * >> 71 task_top_pi_waiter(struct task_struct *p) 148 { 72 { 149 lockdep_assert_held(&p->pi_lock); !! 73 return rb_entry(p->pi_waiters.rb_leftmost, >> 74 struct rt_mutex_waiter, pi_tree_entry); >> 75 } >> 76 >> 77 #else 150 78 151 return rb_entry(p->pi_waiters.rb_leftm !! 79 static inline int rt_mutex_has_waiters(struct rt_mutex *lock) 152 pi_tree.entry); !! 80 { >> 81 return false; 153 } 82 } 154 83 >> 84 static inline struct rt_mutex_waiter * >> 85 rt_mutex_top_waiter(struct rt_mutex *lock) >> 86 { >> 87 return NULL; >> 88 } >> 89 >> 90 static inline int task_has_pi_waiters(struct task_struct *p) >> 91 { >> 92 return false; >> 93 } >> 94 >> 95 static inline struct rt_mutex_waiter * >> 96 task_top_pi_waiter(struct task_struct *p) >> 97 { >> 98 return NULL; >> 99 } >> 100 >> 101 #endif >> 102 >> 103 /* >> 104 * lock->owner state tracking: >> 105 */ 155 #define RT_MUTEX_HAS_WAITERS 1UL 106 #define RT_MUTEX_HAS_WAITERS 1UL 156 107 157 static inline struct task_struct *rt_mutex_own !! 108 static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) 158 { 109 { 159 unsigned long owner = (unsigned long) 110 unsigned long owner = (unsigned long) READ_ONCE(lock->owner); 160 111 161 return (struct task_struct *) (owner & 112 return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS); 162 } 113 } 163 114 164 /* 115 /* 165 * Constants for rt mutex functions which have 116 * Constants for rt mutex functions which have a selectable deadlock 166 * detection. 117 * detection. 167 * 118 * 168 * RT_MUTEX_MIN_CHAINWALK: Stops the lock 119 * RT_MUTEX_MIN_CHAINWALK: Stops the lock chain walk when there are 169 * no further PI 120 * no further PI adjustments to be made. 170 * 121 * 171 * RT_MUTEX_FULL_CHAINWALK: Invoke deadloc 122 * RT_MUTEX_FULL_CHAINWALK: Invoke deadlock detection with a full 172 * walk of the lo 123 * walk of the lock chain. 173 */ 124 */ 174 enum rtmutex_chainwalk { 125 enum rtmutex_chainwalk { 175 RT_MUTEX_MIN_CHAINWALK, 126 RT_MUTEX_MIN_CHAINWALK, 176 RT_MUTEX_FULL_CHAINWALK, 127 RT_MUTEX_FULL_CHAINWALK, 177 }; 128 }; 178 129 179 static inline void __rt_mutex_base_init(struct !! 130 /* 180 { !! 131 * PI-futex support (proxy locking functions, etc.): 181 raw_spin_lock_init(&lock->wait_lock); !! 132 */ 182 lock->waiters = RB_ROOT_CACHED; !! 133 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); 183 lock->owner = NULL; !! 134 extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, 184 } !! 135 struct task_struct *proxy_owner); 185 !! 136 extern void rt_mutex_proxy_unlock(struct rt_mutex *lock); 186 /* Debug functions */ !! 137 extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); 187 static inline void debug_rt_mutex_unlock(struc !! 138 extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, 188 { !! 139 struct rt_mutex_waiter *waiter, 189 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES !! 140 struct task_struct *task); 190 DEBUG_LOCKS_WARN_ON(rt_mutex_o !! 141 extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock, 191 } !! 142 struct rt_mutex_waiter *waiter, 192 !! 143 struct task_struct *task); 193 static inline void debug_rt_mutex_proxy_unlock !! 144 extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, 194 { !! 145 struct hrtimer_sleeper *to, 195 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES !! 146 struct rt_mutex_waiter *waiter); 196 DEBUG_LOCKS_WARN_ON(!rt_mutex_ !! 147 extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, 197 } !! 148 struct rt_mutex_waiter *waiter); 198 << 199 static inline void debug_rt_mutex_init_waiter( << 200 { << 201 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES << 202 memset(waiter, 0x11, sizeof(*w << 203 } << 204 << 205 static inline void debug_rt_mutex_free_waiter( << 206 { << 207 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES << 208 memset(waiter, 0x22, sizeof(*w << 209 } << 210 << 211 static inline void rt_mutex_init_waiter(struct << 212 { << 213 debug_rt_mutex_init_waiter(waiter); << 214 RB_CLEAR_NODE(&waiter->pi_tree.entry); << 215 RB_CLEAR_NODE(&waiter->tree.entry); << 216 waiter->wake_state = TASK_NORMAL; << 217 waiter->task = NULL; << 218 } << 219 149 220 static inline void rt_mutex_init_rtlock_waiter !! 150 extern int rt_mutex_futex_trylock(struct rt_mutex *l); 221 { !! 151 extern int __rt_mutex_futex_trylock(struct rt_mutex *l); 222 rt_mutex_init_waiter(waiter); << 223 waiter->wake_state = TASK_RTLOCK_WAIT; << 224 } << 225 152 226 #else /* CONFIG_RT_MUTEXES */ !! 153 extern void rt_mutex_futex_unlock(struct rt_mutex *lock); 227 /* Used in rcu/tree_plugin.h */ !! 154 extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, 228 static inline struct task_struct *rt_mutex_own !! 155 struct wake_q_head *wqh); 229 { !! 156 230 return NULL; !! 157 extern void rt_mutex_postunlock(struct wake_q_head *wake_q); 231 } !! 158 232 #endif /* !CONFIG_RT_MUTEXES */ !! 159 #ifdef CONFIG_DEBUG_RT_MUTEXES >> 160 # include "rtmutex-debug.h" >> 161 #else >> 162 # include "rtmutex.h" >> 163 #endif 233 164 234 #endif 165 #endif 235 166
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.