~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/locking/semaphore.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * Copyright (c) 2008 Intel Corporation
  4  * Author: Matthew Wilcox <willy@linux.intel.com>
  5  *
  6  * This file implements counting semaphores.
  7  * A counting semaphore may be acquired 'n' times before sleeping.
  8  * See mutex.c for single-acquisition sleeping locks which enforce
  9  * rules which allow code to be debugged more easily.
 10  */
 11 
 12 /*
 13  * Some notes on the implementation:
 14  *
 15  * The spinlock controls access to the other members of the semaphore.
 16  * down_trylock() and up() can be called from interrupt context, so we
 17  * have to disable interrupts when taking the lock.  It turns out various
 18  * parts of the kernel expect to be able to use down() on a semaphore in
 19  * interrupt context when they know it will succeed, so we have to use
 20  * irqsave variants for down(), down_interruptible() and down_killable()
 21  * too.
 22  *
 23  * The ->count variable represents how many more tasks can acquire this
 24  * semaphore.  If it's zero, there may be tasks waiting on the wait_list.
 25  */
 26 
 27 #include <linux/compiler.h>
 28 #include <linux/kernel.h>
 29 #include <linux/export.h>
 30 #include <linux/sched.h>
 31 #include <linux/sched/debug.h>
 32 #include <linux/semaphore.h>
 33 #include <linux/spinlock.h>
 34 #include <linux/ftrace.h>
 35 #include <trace/events/lock.h>
 36 
 37 static noinline void __down(struct semaphore *sem);
 38 static noinline int __down_interruptible(struct semaphore *sem);
 39 static noinline int __down_killable(struct semaphore *sem);
 40 static noinline int __down_timeout(struct semaphore *sem, long timeout);
 41 static noinline void __up(struct semaphore *sem);
 42 
 43 /**
 44  * down - acquire the semaphore
 45  * @sem: the semaphore to be acquired
 46  *
 47  * Acquires the semaphore.  If no more tasks are allowed to acquire the
 48  * semaphore, calling this function will put the task to sleep until the
 49  * semaphore is released.
 50  *
 51  * Use of this function is deprecated, please use down_interruptible() or
 52  * down_killable() instead.
 53  */
 54 void __sched down(struct semaphore *sem)
 55 {
 56         unsigned long flags;
 57 
 58         might_sleep();
 59         raw_spin_lock_irqsave(&sem->lock, flags);
 60         if (likely(sem->count > 0))
 61                 sem->count--;
 62         else
 63                 __down(sem);
 64         raw_spin_unlock_irqrestore(&sem->lock, flags);
 65 }
 66 EXPORT_SYMBOL(down);
 67 
 68 /**
 69  * down_interruptible - acquire the semaphore unless interrupted
 70  * @sem: the semaphore to be acquired
 71  *
 72  * Attempts to acquire the semaphore.  If no more tasks are allowed to
 73  * acquire the semaphore, calling this function will put the task to sleep.
 74  * If the sleep is interrupted by a signal, this function will return -EINTR.
 75  * If the semaphore is successfully acquired, this function returns 0.
 76  */
 77 int __sched down_interruptible(struct semaphore *sem)
 78 {
 79         unsigned long flags;
 80         int result = 0;
 81 
 82         might_sleep();
 83         raw_spin_lock_irqsave(&sem->lock, flags);
 84         if (likely(sem->count > 0))
 85                 sem->count--;
 86         else
 87                 result = __down_interruptible(sem);
 88         raw_spin_unlock_irqrestore(&sem->lock, flags);
 89 
 90         return result;
 91 }
 92 EXPORT_SYMBOL(down_interruptible);
 93 
 94 /**
 95  * down_killable - acquire the semaphore unless killed
 96  * @sem: the semaphore to be acquired
 97  *
 98  * Attempts to acquire the semaphore.  If no more tasks are allowed to
 99  * acquire the semaphore, calling this function will put the task to sleep.
100  * If the sleep is interrupted by a fatal signal, this function will return
101  * -EINTR.  If the semaphore is successfully acquired, this function returns
102  * 0.
103  */
104 int __sched down_killable(struct semaphore *sem)
105 {
106         unsigned long flags;
107         int result = 0;
108 
109         might_sleep();
110         raw_spin_lock_irqsave(&sem->lock, flags);
111         if (likely(sem->count > 0))
112                 sem->count--;
113         else
114                 result = __down_killable(sem);
115         raw_spin_unlock_irqrestore(&sem->lock, flags);
116 
117         return result;
118 }
119 EXPORT_SYMBOL(down_killable);
120 
121 /**
122  * down_trylock - try to acquire the semaphore, without waiting
123  * @sem: the semaphore to be acquired
124  *
125  * Try to acquire the semaphore atomically.  Returns 0 if the semaphore has
126  * been acquired successfully or 1 if it cannot be acquired.
127  *
128  * NOTE: This return value is inverted from both spin_trylock and
129  * mutex_trylock!  Be careful about this when converting code.
130  *
131  * Unlike mutex_trylock, this function can be used from interrupt context,
132  * and the semaphore can be released by any task or interrupt.
133  */
134 int __sched down_trylock(struct semaphore *sem)
135 {
136         unsigned long flags;
137         int count;
138 
139         raw_spin_lock_irqsave(&sem->lock, flags);
140         count = sem->count - 1;
141         if (likely(count >= 0))
142                 sem->count = count;
143         raw_spin_unlock_irqrestore(&sem->lock, flags);
144 
145         return (count < 0);
146 }
147 EXPORT_SYMBOL(down_trylock);
148 
149 /**
150  * down_timeout - acquire the semaphore within a specified time
151  * @sem: the semaphore to be acquired
152  * @timeout: how long to wait before failing
153  *
154  * Attempts to acquire the semaphore.  If no more tasks are allowed to
155  * acquire the semaphore, calling this function will put the task to sleep.
156  * If the semaphore is not released within the specified number of jiffies,
157  * this function returns -ETIME.  It returns 0 if the semaphore was acquired.
158  */
159 int __sched down_timeout(struct semaphore *sem, long timeout)
160 {
161         unsigned long flags;
162         int result = 0;
163 
164         might_sleep();
165         raw_spin_lock_irqsave(&sem->lock, flags);
166         if (likely(sem->count > 0))
167                 sem->count--;
168         else
169                 result = __down_timeout(sem, timeout);
170         raw_spin_unlock_irqrestore(&sem->lock, flags);
171 
172         return result;
173 }
174 EXPORT_SYMBOL(down_timeout);
175 
176 /**
177  * up - release the semaphore
178  * @sem: the semaphore to release
179  *
180  * Release the semaphore.  Unlike mutexes, up() may be called from any
181  * context and even by tasks which have never called down().
182  */
183 void __sched up(struct semaphore *sem)
184 {
185         unsigned long flags;
186 
187         raw_spin_lock_irqsave(&sem->lock, flags);
188         if (likely(list_empty(&sem->wait_list)))
189                 sem->count++;
190         else
191                 __up(sem);
192         raw_spin_unlock_irqrestore(&sem->lock, flags);
193 }
194 EXPORT_SYMBOL(up);
195 
196 /* Functions for the contended case */
197 
198 struct semaphore_waiter {
199         struct list_head list;
200         struct task_struct *task;
201         bool up;
202 };
203 
204 /*
205  * Because this function is inlined, the 'state' parameter will be
206  * constant, and thus optimised away by the compiler.  Likewise the
207  * 'timeout' parameter for the cases without timeouts.
208  */
209 static inline int __sched ___down_common(struct semaphore *sem, long state,
210                                                                 long timeout)
211 {
212         struct semaphore_waiter waiter;
213 
214         list_add_tail(&waiter.list, &sem->wait_list);
215         waiter.task = current;
216         waiter.up = false;
217 
218         for (;;) {
219                 if (signal_pending_state(state, current))
220                         goto interrupted;
221                 if (unlikely(timeout <= 0))
222                         goto timed_out;
223                 __set_current_state(state);
224                 raw_spin_unlock_irq(&sem->lock);
225                 timeout = schedule_timeout(timeout);
226                 raw_spin_lock_irq(&sem->lock);
227                 if (waiter.up)
228                         return 0;
229         }
230 
231  timed_out:
232         list_del(&waiter.list);
233         return -ETIME;
234 
235  interrupted:
236         list_del(&waiter.list);
237         return -EINTR;
238 }
239 
240 static inline int __sched __down_common(struct semaphore *sem, long state,
241                                         long timeout)
242 {
243         int ret;
244 
245         trace_contention_begin(sem, 0);
246         ret = ___down_common(sem, state, timeout);
247         trace_contention_end(sem, ret);
248 
249         return ret;
250 }
251 
252 static noinline void __sched __down(struct semaphore *sem)
253 {
254         __down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
255 }
256 
257 static noinline int __sched __down_interruptible(struct semaphore *sem)
258 {
259         return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
260 }
261 
262 static noinline int __sched __down_killable(struct semaphore *sem)
263 {
264         return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
265 }
266 
267 static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
268 {
269         return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
270 }
271 
272 static noinline void __sched __up(struct semaphore *sem)
273 {
274         struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
275                                                 struct semaphore_waiter, list);
276         list_del(&waiter->list);
277         waiter->up = true;
278         wake_up_process(waiter->task);
279 }
280 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php