~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/include/linux/page_ref.h

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 /* SPDX-License-Identifier: GPL-2.0 */
  2 #ifndef _LINUX_PAGE_REF_H
  3 #define _LINUX_PAGE_REF_H
  4 
  5 #include <linux/atomic.h>
  6 #include <linux/mm_types.h>
  7 #include <linux/page-flags.h>
  8 #include <linux/tracepoint-defs.h>
  9 
 10 DECLARE_TRACEPOINT(page_ref_set);
 11 DECLARE_TRACEPOINT(page_ref_mod);
 12 DECLARE_TRACEPOINT(page_ref_mod_and_test);
 13 DECLARE_TRACEPOINT(page_ref_mod_and_return);
 14 DECLARE_TRACEPOINT(page_ref_mod_unless);
 15 DECLARE_TRACEPOINT(page_ref_freeze);
 16 DECLARE_TRACEPOINT(page_ref_unfreeze);
 17 
 18 #ifdef CONFIG_DEBUG_PAGE_REF
 19 
 20 /*
 21  * Ideally we would want to use the trace_<tracepoint>_enabled() helper
 22  * functions. But due to include header file issues, that is not
 23  * feasible. Instead we have to open code the static key functions.
 24  *
 25  * See trace_##name##_enabled(void) in include/linux/tracepoint.h
 26  */
 27 #define page_ref_tracepoint_active(t) tracepoint_enabled(t)
 28 
 29 extern void __page_ref_set(struct page *page, int v);
 30 extern void __page_ref_mod(struct page *page, int v);
 31 extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
 32 extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
 33 extern void __page_ref_mod_unless(struct page *page, int v, int u);
 34 extern void __page_ref_freeze(struct page *page, int v, int ret);
 35 extern void __page_ref_unfreeze(struct page *page, int v);
 36 
 37 #else
 38 
 39 #define page_ref_tracepoint_active(t) false
 40 
 41 static inline void __page_ref_set(struct page *page, int v)
 42 {
 43 }
 44 static inline void __page_ref_mod(struct page *page, int v)
 45 {
 46 }
 47 static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
 48 {
 49 }
 50 static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
 51 {
 52 }
 53 static inline void __page_ref_mod_unless(struct page *page, int v, int u)
 54 {
 55 }
 56 static inline void __page_ref_freeze(struct page *page, int v, int ret)
 57 {
 58 }
 59 static inline void __page_ref_unfreeze(struct page *page, int v)
 60 {
 61 }
 62 
 63 #endif
 64 
 65 static inline int page_ref_count(const struct page *page)
 66 {
 67         return atomic_read(&page->_refcount);
 68 }
 69 
 70 /**
 71  * folio_ref_count - The reference count on this folio.
 72  * @folio: The folio.
 73  *
 74  * The refcount is usually incremented by calls to folio_get() and
 75  * decremented by calls to folio_put().  Some typical users of the
 76  * folio refcount:
 77  *
 78  * - Each reference from a page table
 79  * - The page cache
 80  * - Filesystem private data
 81  * - The LRU list
 82  * - Pipes
 83  * - Direct IO which references this page in the process address space
 84  *
 85  * Return: The number of references to this folio.
 86  */
 87 static inline int folio_ref_count(const struct folio *folio)
 88 {
 89         return page_ref_count(&folio->page);
 90 }
 91 
 92 static inline int page_count(const struct page *page)
 93 {
 94         return folio_ref_count(page_folio(page));
 95 }
 96 
 97 static inline void set_page_count(struct page *page, int v)
 98 {
 99         atomic_set(&page->_refcount, v);
100         if (page_ref_tracepoint_active(page_ref_set))
101                 __page_ref_set(page, v);
102 }
103 
104 static inline void folio_set_count(struct folio *folio, int v)
105 {
106         set_page_count(&folio->page, v);
107 }
108 
109 /*
110  * Setup the page count before being freed into the page allocator for
111  * the first time (boot or memory hotplug)
112  */
113 static inline void init_page_count(struct page *page)
114 {
115         set_page_count(page, 1);
116 }
117 
118 static inline void page_ref_add(struct page *page, int nr)
119 {
120         atomic_add(nr, &page->_refcount);
121         if (page_ref_tracepoint_active(page_ref_mod))
122                 __page_ref_mod(page, nr);
123 }
124 
125 static inline void folio_ref_add(struct folio *folio, int nr)
126 {
127         page_ref_add(&folio->page, nr);
128 }
129 
130 static inline void page_ref_sub(struct page *page, int nr)
131 {
132         atomic_sub(nr, &page->_refcount);
133         if (page_ref_tracepoint_active(page_ref_mod))
134                 __page_ref_mod(page, -nr);
135 }
136 
137 static inline void folio_ref_sub(struct folio *folio, int nr)
138 {
139         page_ref_sub(&folio->page, nr);
140 }
141 
142 static inline int folio_ref_sub_return(struct folio *folio, int nr)
143 {
144         int ret = atomic_sub_return(nr, &folio->_refcount);
145 
146         if (page_ref_tracepoint_active(page_ref_mod_and_return))
147                 __page_ref_mod_and_return(&folio->page, -nr, ret);
148         return ret;
149 }
150 
151 static inline void page_ref_inc(struct page *page)
152 {
153         atomic_inc(&page->_refcount);
154         if (page_ref_tracepoint_active(page_ref_mod))
155                 __page_ref_mod(page, 1);
156 }
157 
158 static inline void folio_ref_inc(struct folio *folio)
159 {
160         page_ref_inc(&folio->page);
161 }
162 
163 static inline void page_ref_dec(struct page *page)
164 {
165         atomic_dec(&page->_refcount);
166         if (page_ref_tracepoint_active(page_ref_mod))
167                 __page_ref_mod(page, -1);
168 }
169 
170 static inline void folio_ref_dec(struct folio *folio)
171 {
172         page_ref_dec(&folio->page);
173 }
174 
175 static inline int page_ref_sub_and_test(struct page *page, int nr)
176 {
177         int ret = atomic_sub_and_test(nr, &page->_refcount);
178 
179         if (page_ref_tracepoint_active(page_ref_mod_and_test))
180                 __page_ref_mod_and_test(page, -nr, ret);
181         return ret;
182 }
183 
184 static inline int folio_ref_sub_and_test(struct folio *folio, int nr)
185 {
186         return page_ref_sub_and_test(&folio->page, nr);
187 }
188 
189 static inline int page_ref_inc_return(struct page *page)
190 {
191         int ret = atomic_inc_return(&page->_refcount);
192 
193         if (page_ref_tracepoint_active(page_ref_mod_and_return))
194                 __page_ref_mod_and_return(page, 1, ret);
195         return ret;
196 }
197 
198 static inline int folio_ref_inc_return(struct folio *folio)
199 {
200         return page_ref_inc_return(&folio->page);
201 }
202 
203 static inline int page_ref_dec_and_test(struct page *page)
204 {
205         int ret = atomic_dec_and_test(&page->_refcount);
206 
207         if (page_ref_tracepoint_active(page_ref_mod_and_test))
208                 __page_ref_mod_and_test(page, -1, ret);
209         return ret;
210 }
211 
212 static inline int folio_ref_dec_and_test(struct folio *folio)
213 {
214         return page_ref_dec_and_test(&folio->page);
215 }
216 
217 static inline int page_ref_dec_return(struct page *page)
218 {
219         int ret = atomic_dec_return(&page->_refcount);
220 
221         if (page_ref_tracepoint_active(page_ref_mod_and_return))
222                 __page_ref_mod_and_return(page, -1, ret);
223         return ret;
224 }
225 
226 static inline int folio_ref_dec_return(struct folio *folio)
227 {
228         return page_ref_dec_return(&folio->page);
229 }
230 
231 static inline bool page_ref_add_unless(struct page *page, int nr, int u)
232 {
233         bool ret = false;
234 
235         rcu_read_lock();
236         /* avoid writing to the vmemmap area being remapped */
237         if (!page_is_fake_head(page) && page_ref_count(page) != u)
238                 ret = atomic_add_unless(&page->_refcount, nr, u);
239         rcu_read_unlock();
240 
241         if (page_ref_tracepoint_active(page_ref_mod_unless))
242                 __page_ref_mod_unless(page, nr, ret);
243         return ret;
244 }
245 
246 static inline bool folio_ref_add_unless(struct folio *folio, int nr, int u)
247 {
248         return page_ref_add_unless(&folio->page, nr, u);
249 }
250 
251 /**
252  * folio_try_get - Attempt to increase the refcount on a folio.
253  * @folio: The folio.
254  *
255  * If you do not already have a reference to a folio, you can attempt to
256  * get one using this function.  It may fail if, for example, the folio
257  * has been freed since you found a pointer to it, or it is frozen for
258  * the purposes of splitting or migration.
259  *
260  * Return: True if the reference count was successfully incremented.
261  */
262 static inline bool folio_try_get(struct folio *folio)
263 {
264         return folio_ref_add_unless(folio, 1, 0);
265 }
266 
267 static inline bool folio_ref_try_add(struct folio *folio, int count)
268 {
269         return folio_ref_add_unless(folio, count, 0);
270 }
271 
272 static inline int page_ref_freeze(struct page *page, int count)
273 {
274         int ret = likely(atomic_cmpxchg(&page->_refcount, count, 0) == count);
275 
276         if (page_ref_tracepoint_active(page_ref_freeze))
277                 __page_ref_freeze(page, count, ret);
278         return ret;
279 }
280 
281 static inline int folio_ref_freeze(struct folio *folio, int count)
282 {
283         return page_ref_freeze(&folio->page, count);
284 }
285 
286 static inline void page_ref_unfreeze(struct page *page, int count)
287 {
288         VM_BUG_ON_PAGE(page_count(page) != 0, page);
289         VM_BUG_ON(count == 0);
290 
291         atomic_set_release(&page->_refcount, count);
292         if (page_ref_tracepoint_active(page_ref_unfreeze))
293                 __page_ref_unfreeze(page, count);
294 }
295 
296 static inline void folio_ref_unfreeze(struct folio *folio, int count)
297 {
298         page_ref_unfreeze(&folio->page, count);
299 }
300 #endif
301 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php