~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/spinlock_up.h

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /include/linux/spinlock_up.h (Version linux-6.12-rc7) and /include/linux/spinlock_up.h (Version linux-4.12.14)


  1 #ifndef __LINUX_SPINLOCK_UP_H                       1 #ifndef __LINUX_SPINLOCK_UP_H
  2 #define __LINUX_SPINLOCK_UP_H                       2 #define __LINUX_SPINLOCK_UP_H
  3                                                     3 
  4 #ifndef __LINUX_INSIDE_SPINLOCK_H              !!   4 #ifndef __LINUX_SPINLOCK_H
  5 # error "please don't include this file direct      5 # error "please don't include this file directly"
  6 #endif                                              6 #endif
  7                                                     7 
  8 #include <asm/processor.h>      /* for cpu_rel      8 #include <asm/processor.h>      /* for cpu_relax() */
  9 #include <asm/barrier.h>                            9 #include <asm/barrier.h>
 10                                                    10 
 11 /*                                                 11 /*
 12  * include/linux/spinlock_up.h - UP-debug vers     12  * include/linux/spinlock_up.h - UP-debug version of spinlocks.
 13  *                                                 13  *
 14  * portions Copyright 2005, Red Hat, Inc., Ing     14  * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
 15  * Released under the General Public License (     15  * Released under the General Public License (GPL).
 16  *                                                 16  *
 17  * In the debug case, 1 means unlocked, 0 mean     17  * In the debug case, 1 means unlocked, 0 means locked. (the values
 18  * are inverted, to catch initialization bugs)     18  * are inverted, to catch initialization bugs)
 19  *                                                 19  *
 20  * No atomicity anywhere, we are on UP. Howeve     20  * No atomicity anywhere, we are on UP. However, we still need
 21  * the compiler barriers, because we do not wa     21  * the compiler barriers, because we do not want the compiler to
 22  * move potentially faulting instructions (not     22  * move potentially faulting instructions (notably user accesses)
 23  * into the locked sequence, resulting in non-     23  * into the locked sequence, resulting in non-atomic execution.
 24  */                                                24  */
 25                                                    25 
 26 #ifdef CONFIG_DEBUG_SPINLOCK                       26 #ifdef CONFIG_DEBUG_SPINLOCK
 27 #define arch_spin_is_locked(x)          ((x)->     27 #define arch_spin_is_locked(x)          ((x)->slock == 0)
 28                                                    28 
                                                   >>  29 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
                                                   >>  30 {
                                                   >>  31         smp_cond_load_acquire(&lock->slock, VAL);
                                                   >>  32 }
                                                   >>  33 
 29 static inline void arch_spin_lock(arch_spinloc     34 static inline void arch_spin_lock(arch_spinlock_t *lock)
 30 {                                                  35 {
 31         lock->slock = 0;                           36         lock->slock = 0;
 32         barrier();                                 37         barrier();
 33 }                                                  38 }
 34                                                    39 
                                                   >>  40 static inline void
                                                   >>  41 arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
                                                   >>  42 {
                                                   >>  43         local_irq_save(flags);
                                                   >>  44         lock->slock = 0;
                                                   >>  45         barrier();
                                                   >>  46 }
                                                   >>  47 
 35 static inline int arch_spin_trylock(arch_spinl     48 static inline int arch_spin_trylock(arch_spinlock_t *lock)
 36 {                                                  49 {
 37         char oldval = lock->slock;                 50         char oldval = lock->slock;
 38                                                    51 
 39         lock->slock = 0;                           52         lock->slock = 0;
 40         barrier();                                 53         barrier();
 41                                                    54 
 42         return oldval > 0;                         55         return oldval > 0;
 43 }                                                  56 }
 44                                                    57 
 45 static inline void arch_spin_unlock(arch_spinl     58 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 46 {                                                  59 {
 47         barrier();                                 60         barrier();
 48         lock->slock = 1;                           61         lock->slock = 1;
 49 }                                                  62 }
 50                                                    63 
 51 /*                                                 64 /*
 52  * Read-write spinlocks. No debug version.         65  * Read-write spinlocks. No debug version.
 53  */                                                66  */
 54 #define arch_read_lock(lock)            do { b     67 #define arch_read_lock(lock)            do { barrier(); (void)(lock); } while (0)
 55 #define arch_write_lock(lock)           do { b     68 #define arch_write_lock(lock)           do { barrier(); (void)(lock); } while (0)
 56 #define arch_read_trylock(lock) ({ barrier();      69 #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
 57 #define arch_write_trylock(lock)        ({ bar     70 #define arch_write_trylock(lock)        ({ barrier(); (void)(lock); 1; })
 58 #define arch_read_unlock(lock)          do { b     71 #define arch_read_unlock(lock)          do { barrier(); (void)(lock); } while (0)
 59 #define arch_write_unlock(lock) do { barrier()     72 #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
 60                                                    73 
 61 #else /* DEBUG_SPINLOCK */                         74 #else /* DEBUG_SPINLOCK */
 62 #define arch_spin_is_locked(lock)       ((void     75 #define arch_spin_is_locked(lock)       ((void)(lock), 0)
                                                   >>  76 #define arch_spin_unlock_wait(lock)     do { barrier(); (void)(lock); } while (0)
 63 /* for sched/core.c and kernel_lock.c: */          77 /* for sched/core.c and kernel_lock.c: */
 64 # define arch_spin_lock(lock)           do { b     78 # define arch_spin_lock(lock)           do { barrier(); (void)(lock); } while (0)
                                                   >>  79 # define arch_spin_lock_flags(lock, flags)      do { barrier(); (void)(lock); } while (0)
 65 # define arch_spin_unlock(lock) do { barrier()     80 # define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0)
 66 # define arch_spin_trylock(lock)        ({ bar     81 # define arch_spin_trylock(lock)        ({ barrier(); (void)(lock); 1; })
 67 #endif /* DEBUG_SPINLOCK */                        82 #endif /* DEBUG_SPINLOCK */
 68                                                    83 
 69 #define arch_spin_is_contended(lock)    (((voi     84 #define arch_spin_is_contended(lock)    (((void)(lock), 0))
                                                   >>  85 
                                                   >>  86 #define arch_read_can_lock(lock)        (((void)(lock), 1))
                                                   >>  87 #define arch_write_can_lock(lock)       (((void)(lock), 1))
 70                                                    88 
 71 #endif /* __LINUX_SPINLOCK_UP_H */                 89 #endif /* __LINUX_SPINLOCK_UP_H */
 72                                                    90 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php