~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/arch/sh/include/asm/spinlock-llsc.h

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0
  2  *
  3  * include/asm-sh/spinlock-llsc.h
  4  *
  5  * Copyright (C) 2002, 2003 Paul Mundt
  6  * Copyright (C) 2006, 2007 Akio Idehara
  7  */
  8 #ifndef __ASM_SH_SPINLOCK_LLSC_H
  9 #define __ASM_SH_SPINLOCK_LLSC_H
 10 
 11 #include <asm/barrier.h>
 12 #include <asm/processor.h>
 13 
 14 /*
 15  * Your basic SMP spinlocks, allowing only a single CPU anywhere
 16  */
 17 
 18 #define arch_spin_is_locked(x)          ((x)->lock <= 0)
 19 
 20 /*
 21  * Simple spin lock operations.  There are two variants, one clears IRQ's
 22  * on the local processor, one does not.
 23  *
 24  * We make no fairness assumptions.  They have a cost.
 25  */
 26 static inline void arch_spin_lock(arch_spinlock_t *lock)
 27 {
 28         unsigned long tmp;
 29         unsigned long oldval;
 30 
 31         __asm__ __volatile__ (
 32                 "1:                                             \n\t"
 33                 "movli.l        @%2, %0 ! arch_spin_lock        \n\t"
 34                 "mov            %0, %1                          \n\t"
 35                 "mov            #0, %0                          \n\t"
 36                 "movco.l        %0, @%2                         \n\t"
 37                 "bf             1b                              \n\t"
 38                 "cmp/pl         %1                              \n\t"
 39                 "bf             1b                              \n\t"
 40                 : "=&z" (tmp), "=&r" (oldval)
 41                 : "r" (&lock->lock)
 42                 : "t", "memory"
 43         );
 44 }
 45 
 46 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 47 {
 48         unsigned long tmp;
 49 
 50         /* This could be optimised with ARCH_HAS_MMIOWB */
 51         mmiowb();
 52         __asm__ __volatile__ (
 53                 "mov            #1, %0 ! arch_spin_unlock       \n\t"
 54                 "mov.l          %0, @%1                         \n\t"
 55                 : "=&z" (tmp)
 56                 : "r" (&lock->lock)
 57                 : "t", "memory"
 58         );
 59 }
 60 
 61 static inline int arch_spin_trylock(arch_spinlock_t *lock)
 62 {
 63         unsigned long tmp, oldval;
 64 
 65         __asm__ __volatile__ (
 66                 "1:                                             \n\t"
 67                 "movli.l        @%2, %0 ! arch_spin_trylock     \n\t"
 68                 "mov            %0, %1                          \n\t"
 69                 "mov            #0, %0                          \n\t"
 70                 "movco.l        %0, @%2                         \n\t"
 71                 "bf             1b                              \n\t"
 72                 "synco                                          \n\t"
 73                 : "=&z" (tmp), "=&r" (oldval)
 74                 : "r" (&lock->lock)
 75                 : "t", "memory"
 76         );
 77 
 78         return oldval;
 79 }
 80 
 81 /*
 82  * Read-write spinlocks, allowing multiple readers but only one writer.
 83  *
 84  * NOTE! it is quite common to have readers in interrupts but no interrupt
 85  * writers. For those circumstances we can "mix" irq-safe locks - any writer
 86  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
 87  * read-locks.
 88  */
 89 
 90 static inline void arch_read_lock(arch_rwlock_t *rw)
 91 {
 92         unsigned long tmp;
 93 
 94         __asm__ __volatile__ (
 95                 "1:                                             \n\t"
 96                 "movli.l        @%1, %0 ! arch_read_lock        \n\t"
 97                 "cmp/pl         %0                              \n\t"
 98                 "bf             1b                              \n\t"
 99                 "add            #-1, %0                         \n\t"
100                 "movco.l        %0, @%1                         \n\t"
101                 "bf             1b                              \n\t"
102                 : "=&z" (tmp)
103                 : "r" (&rw->lock)
104                 : "t", "memory"
105         );
106 }
107 
108 static inline void arch_read_unlock(arch_rwlock_t *rw)
109 {
110         unsigned long tmp;
111 
112         __asm__ __volatile__ (
113                 "1:                                             \n\t"
114                 "movli.l        @%1, %0 ! arch_read_unlock      \n\t"
115                 "add            #1, %0                          \n\t"
116                 "movco.l        %0, @%1                         \n\t"
117                 "bf             1b                              \n\t"
118                 : "=&z" (tmp)
119                 : "r" (&rw->lock)
120                 : "t", "memory"
121         );
122 }
123 
124 static inline void arch_write_lock(arch_rwlock_t *rw)
125 {
126         unsigned long tmp;
127 
128         __asm__ __volatile__ (
129                 "1:                                             \n\t"
130                 "movli.l        @%1, %0 ! arch_write_lock       \n\t"
131                 "cmp/hs         %2, %0                          \n\t"
132                 "bf             1b                              \n\t"
133                 "sub            %2, %0                          \n\t"
134                 "movco.l        %0, @%1                         \n\t"
135                 "bf             1b                              \n\t"
136                 : "=&z" (tmp)
137                 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
138                 : "t", "memory"
139         );
140 }
141 
142 static inline void arch_write_unlock(arch_rwlock_t *rw)
143 {
144         __asm__ __volatile__ (
145                 "mov.l          %1, @%0 ! arch_write_unlock     \n\t"
146                 :
147                 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
148                 : "t", "memory"
149         );
150 }
151 
152 static inline int arch_read_trylock(arch_rwlock_t *rw)
153 {
154         unsigned long tmp, oldval;
155 
156         __asm__ __volatile__ (
157                 "1:                                             \n\t"
158                 "movli.l        @%2, %0 ! arch_read_trylock     \n\t"
159                 "mov            %0, %1                          \n\t"
160                 "cmp/pl         %0                              \n\t"
161                 "bf             2f                              \n\t"
162                 "add            #-1, %0                         \n\t"
163                 "movco.l        %0, @%2                         \n\t"
164                 "bf             1b                              \n\t"
165                 "2:                                             \n\t"
166                 "synco                                          \n\t"
167                 : "=&z" (tmp), "=&r" (oldval)
168                 : "r" (&rw->lock)
169                 : "t", "memory"
170         );
171 
172         return (oldval > 0);
173 }
174 
175 static inline int arch_write_trylock(arch_rwlock_t *rw)
176 {
177         unsigned long tmp, oldval;
178 
179         __asm__ __volatile__ (
180                 "1:                                             \n\t"
181                 "movli.l        @%2, %0 ! arch_write_trylock    \n\t"
182                 "mov            %0, %1                          \n\t"
183                 "cmp/hs         %3, %0                          \n\t"
184                 "bf             2f                              \n\t"
185                 "sub            %3, %0                          \n\t"
186                 "2:                                             \n\t"
187                 "movco.l        %0, @%2                         \n\t"
188                 "bf             1b                              \n\t"
189                 "synco                                          \n\t"
190                 : "=&z" (tmp), "=&r" (oldval)
191                 : "r" (&rw->lock), "r" (RW_LOCK_BIAS)
192                 : "t", "memory"
193         );
194 
195         return (oldval > (RW_LOCK_BIAS - 1));
196 }
197 
198 #endif /* __ASM_SH_SPINLOCK_LLSC_H */
199 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php