~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/page_counter.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /mm/page_counter.c (Version linux-6.12-rc7) and /mm/page_counter.c (Version linux-6.10.14)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2 /*                                                  2 /*
  3  * Lockless hierarchical page accounting & lim      3  * Lockless hierarchical page accounting & limiting
  4  *                                                  4  *
  5  * Copyright (C) 2014 Red Hat, Inc., Johannes       5  * Copyright (C) 2014 Red Hat, Inc., Johannes Weiner
  6  */                                                 6  */
  7                                                     7 
  8 #include <linux/page_counter.h>                     8 #include <linux/page_counter.h>
  9 #include <linux/atomic.h>                           9 #include <linux/atomic.h>
 10 #include <linux/kernel.h>                          10 #include <linux/kernel.h>
 11 #include <linux/string.h>                          11 #include <linux/string.h>
 12 #include <linux/sched.h>                           12 #include <linux/sched.h>
 13 #include <linux/bug.h>                             13 #include <linux/bug.h>
 14 #include <asm/page.h>                              14 #include <asm/page.h>
 15                                                    15 
 16 static bool track_protection(struct page_count << 
 17 {                                              << 
 18         return c->protection_support;          << 
 19 }                                              << 
 20                                                << 
 21 static void propagate_protected_usage(struct p     16 static void propagate_protected_usage(struct page_counter *c,
 22                                       unsigned     17                                       unsigned long usage)
 23 {                                                  18 {
 24         unsigned long protected, old_protected     19         unsigned long protected, old_protected;
 25         long delta;                                20         long delta;
 26                                                    21 
 27         if (!c->parent)                            22         if (!c->parent)
 28                 return;                            23                 return;
 29                                                    24 
 30         protected = min(usage, READ_ONCE(c->mi     25         protected = min(usage, READ_ONCE(c->min));
 31         old_protected = atomic_long_read(&c->m     26         old_protected = atomic_long_read(&c->min_usage);
 32         if (protected != old_protected) {          27         if (protected != old_protected) {
 33                 old_protected = atomic_long_xc     28                 old_protected = atomic_long_xchg(&c->min_usage, protected);
 34                 delta = protected - old_protec     29                 delta = protected - old_protected;
 35                 if (delta)                         30                 if (delta)
 36                         atomic_long_add(delta,     31                         atomic_long_add(delta, &c->parent->children_min_usage);
 37         }                                          32         }
 38                                                    33 
 39         protected = min(usage, READ_ONCE(c->lo     34         protected = min(usage, READ_ONCE(c->low));
 40         old_protected = atomic_long_read(&c->l     35         old_protected = atomic_long_read(&c->low_usage);
 41         if (protected != old_protected) {          36         if (protected != old_protected) {
 42                 old_protected = atomic_long_xc     37                 old_protected = atomic_long_xchg(&c->low_usage, protected);
 43                 delta = protected - old_protec     38                 delta = protected - old_protected;
 44                 if (delta)                         39                 if (delta)
 45                         atomic_long_add(delta,     40                         atomic_long_add(delta, &c->parent->children_low_usage);
 46         }                                          41         }
 47 }                                                  42 }
 48                                                    43 
 49 /**                                                44 /**
 50  * page_counter_cancel - take pages out of the     45  * page_counter_cancel - take pages out of the local counter
 51  * @counter: counter                               46  * @counter: counter
 52  * @nr_pages: number of pages to cancel            47  * @nr_pages: number of pages to cancel
 53  */                                                48  */
 54 void page_counter_cancel(struct page_counter *     49 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
 55 {                                                  50 {
 56         long new;                                  51         long new;
 57                                                    52 
 58         new = atomic_long_sub_return(nr_pages,     53         new = atomic_long_sub_return(nr_pages, &counter->usage);
 59         /* More uncharges than charges? */         54         /* More uncharges than charges? */
 60         if (WARN_ONCE(new < 0, "page_counter u     55         if (WARN_ONCE(new < 0, "page_counter underflow: %ld nr_pages=%lu\n",
 61                       new, nr_pages)) {            56                       new, nr_pages)) {
 62                 new = 0;                           57                 new = 0;
 63                 atomic_long_set(&counter->usag     58                 atomic_long_set(&counter->usage, new);
 64         }                                          59         }
 65         if (track_protection(counter))         !!  60         propagate_protected_usage(counter, new);
 66                 propagate_protected_usage(coun << 
 67 }                                                  61 }
 68                                                    62 
 69 /**                                                63 /**
 70  * page_counter_charge - hierarchically charge     64  * page_counter_charge - hierarchically charge pages
 71  * @counter: counter                               65  * @counter: counter
 72  * @nr_pages: number of pages to charge            66  * @nr_pages: number of pages to charge
 73  *                                                 67  *
 74  * NOTE: This does not consider any configured     68  * NOTE: This does not consider any configured counter limits.
 75  */                                                69  */
 76 void page_counter_charge(struct page_counter *     70 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
 77 {                                                  71 {
 78         struct page_counter *c;                    72         struct page_counter *c;
 79         bool protection = track_protection(cou << 
 80                                                    73 
 81         for (c = counter; c; c = c->parent) {      74         for (c = counter; c; c = c->parent) {
 82                 long new;                          75                 long new;
 83                                                    76 
 84                 new = atomic_long_add_return(n     77                 new = atomic_long_add_return(nr_pages, &c->usage);
 85                 if (protection)                !!  78                 propagate_protected_usage(c, new);
 86                         propagate_protected_us << 
 87                 /*                                 79                 /*
 88                  * This is indeed racy, but we     80                  * This is indeed racy, but we can live with some
 89                  * inaccuracy in the watermark     81                  * inaccuracy in the watermark.
 90                  *                             << 
 91                  * Notably, we have two waterm << 
 92                  * visible peak and one that c << 
 93                  *                             << 
 94                  * Since we reset both waterma << 
 95                  * we can guarantee that water << 
 96                  * don't need to do both compa << 
 97                  *                             << 
 98                  * On systems with branch pred << 
 99                  * be almost free.             << 
100                  */                                82                  */
101                 if (new > READ_ONCE(c->local_w !!  83                 if (new > READ_ONCE(c->watermark))
102                         WRITE_ONCE(c->local_wa !!  84                         WRITE_ONCE(c->watermark, new);
103                         if (new > READ_ONCE(c- << 
104                                 WRITE_ONCE(c-> << 
105                 }                              << 
106         }                                          85         }
107 }                                                  86 }
108                                                    87 
109 /**                                                88 /**
110  * page_counter_try_charge - try to hierarchic     89  * page_counter_try_charge - try to hierarchically charge pages
111  * @counter: counter                               90  * @counter: counter
112  * @nr_pages: number of pages to charge            91  * @nr_pages: number of pages to charge
113  * @fail: points first counter to hit its limi     92  * @fail: points first counter to hit its limit, if any
114  *                                                 93  *
115  * Returns %true on success, or %false and @fa     94  * Returns %true on success, or %false and @fail if the counter or one
116  * of its ancestors has hit its configured lim     95  * of its ancestors has hit its configured limit.
117  */                                                96  */
118 bool page_counter_try_charge(struct page_count     97 bool page_counter_try_charge(struct page_counter *counter,
119                              unsigned long nr_     98                              unsigned long nr_pages,
120                              struct page_count     99                              struct page_counter **fail)
121 {                                                 100 {
122         struct page_counter *c;                   101         struct page_counter *c;
123         bool protection = track_protection(cou << 
124                                                   102 
125         for (c = counter; c; c = c->parent) {     103         for (c = counter; c; c = c->parent) {
126                 long new;                         104                 long new;
127                 /*                                105                 /*
128                  * Charge speculatively to avo    106                  * Charge speculatively to avoid an expensive CAS.  If
129                  * a bigger charge fails, it m    107                  * a bigger charge fails, it might falsely lock out a
130                  * racing smaller charge and s    108                  * racing smaller charge and send it into reclaim
131                  * early, but the error is lim    109                  * early, but the error is limited to the difference
132                  * between the two sizes, whic    110                  * between the two sizes, which is less than 2M/4M in
133                  * case of a THP locking out a    111                  * case of a THP locking out a regular page charge.
134                  *                                112                  *
135                  * The atomic_long_add_return(    113                  * The atomic_long_add_return() implies a full memory
136                  * barrier between incrementin    114                  * barrier between incrementing the count and reading
137                  * the limit.  When racing wit    115                  * the limit.  When racing with page_counter_set_max(),
138                  * we either see the new limit    116                  * we either see the new limit or the setter sees the
139                  * counter has changed and ret    117                  * counter has changed and retries.
140                  */                               118                  */
141                 new = atomic_long_add_return(n    119                 new = atomic_long_add_return(nr_pages, &c->usage);
142                 if (new > c->max) {               120                 if (new > c->max) {
143                         atomic_long_sub(nr_pag    121                         atomic_long_sub(nr_pages, &c->usage);
144                         /*                        122                         /*
145                          * This is racy, but w    123                          * This is racy, but we can live with some
146                          * inaccuracy in the f    124                          * inaccuracy in the failcnt which is only used
147                          * to report stats.       125                          * to report stats.
148                          */                       126                          */
149                         data_race(c->failcnt++    127                         data_race(c->failcnt++);
150                         *fail = c;                128                         *fail = c;
151                         goto failed;              129                         goto failed;
152                 }                                 130                 }
153                 if (protection)                !! 131                 propagate_protected_usage(c, new);
154                         propagate_protected_us !! 132                 /*
155                                                !! 133                  * Just like with failcnt, we can live with some
156                 /* see comment on page_counter !! 134                  * inaccuracy in the watermark.
157                 if (new > READ_ONCE(c->local_w !! 135                  */
158                         WRITE_ONCE(c->local_wa !! 136                 if (new > READ_ONCE(c->watermark))
159                         if (new > READ_ONCE(c- !! 137                         WRITE_ONCE(c->watermark, new);
160                                 WRITE_ONCE(c-> << 
161                 }                              << 
162         }                                         138         }
163         return true;                              139         return true;
164                                                   140 
165 failed:                                           141 failed:
166         for (c = counter; c != *fail; c = c->p    142         for (c = counter; c != *fail; c = c->parent)
167                 page_counter_cancel(c, nr_page    143                 page_counter_cancel(c, nr_pages);
168                                                   144 
169         return false;                             145         return false;
170 }                                                 146 }
171                                                   147 
172 /**                                               148 /**
173  * page_counter_uncharge - hierarchically unch    149  * page_counter_uncharge - hierarchically uncharge pages
174  * @counter: counter                              150  * @counter: counter
175  * @nr_pages: number of pages to uncharge         151  * @nr_pages: number of pages to uncharge
176  */                                               152  */
177 void page_counter_uncharge(struct page_counter    153 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
178 {                                                 154 {
179         struct page_counter *c;                   155         struct page_counter *c;
180                                                   156 
181         for (c = counter; c; c = c->parent)       157         for (c = counter; c; c = c->parent)
182                 page_counter_cancel(c, nr_page    158                 page_counter_cancel(c, nr_pages);
183 }                                                 159 }
184                                                   160 
185 /**                                               161 /**
186  * page_counter_set_max - set the maximum numb    162  * page_counter_set_max - set the maximum number of pages allowed
187  * @counter: counter                              163  * @counter: counter
188  * @nr_pages: limit to set                        164  * @nr_pages: limit to set
189  *                                                165  *
190  * Returns 0 on success, -EBUSY if the current    166  * Returns 0 on success, -EBUSY if the current number of pages on the
191  * counter already exceeds the specified limit    167  * counter already exceeds the specified limit.
192  *                                                168  *
193  * The caller must serialize invocations on th    169  * The caller must serialize invocations on the same counter.
194  */                                               170  */
195 int page_counter_set_max(struct page_counter *    171 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
196 {                                                 172 {
197         for (;;) {                                173         for (;;) {
198                 unsigned long old;                174                 unsigned long old;
199                 long usage;                       175                 long usage;
200                                                   176 
201                 /*                                177                 /*
202                  * Update the limit while maki    178                  * Update the limit while making sure that it's not
203                  * below the concurrently-chan    179                  * below the concurrently-changing counter value.
204                  *                                180                  *
205                  * The xchg implies two full m    181                  * The xchg implies two full memory barriers before
206                  * and after, so the read-swap    182                  * and after, so the read-swap-read is ordered and
207                  * ensures coherency with page    183                  * ensures coherency with page_counter_try_charge():
208                  * that function modifies the     184                  * that function modifies the count before checking
209                  * the limit, so if it sees th    185                  * the limit, so if it sees the old limit, we see the
210                  * modified counter and retry.    186                  * modified counter and retry.
211                  */                               187                  */
212                 usage = page_counter_read(coun    188                 usage = page_counter_read(counter);
213                                                   189 
214                 if (usage > nr_pages)             190                 if (usage > nr_pages)
215                         return -EBUSY;            191                         return -EBUSY;
216                                                   192 
217                 old = xchg(&counter->max, nr_p    193                 old = xchg(&counter->max, nr_pages);
218                                                   194 
219                 if (page_counter_read(counter)    195                 if (page_counter_read(counter) <= usage || nr_pages >= old)
220                         return 0;                 196                         return 0;
221                                                   197 
222                 counter->max = old;               198                 counter->max = old;
223                 cond_resched();                   199                 cond_resched();
224         }                                         200         }
225 }                                                 201 }
226                                                   202 
227 /**                                               203 /**
228  * page_counter_set_min - set the amount of pr    204  * page_counter_set_min - set the amount of protected memory
229  * @counter: counter                              205  * @counter: counter
230  * @nr_pages: value to set                        206  * @nr_pages: value to set
231  *                                                207  *
232  * The caller must serialize invocations on th    208  * The caller must serialize invocations on the same counter.
233  */                                               209  */
234 void page_counter_set_min(struct page_counter     210 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
235 {                                                 211 {
236         struct page_counter *c;                   212         struct page_counter *c;
237                                                   213 
238         WRITE_ONCE(counter->min, nr_pages);       214         WRITE_ONCE(counter->min, nr_pages);
239                                                   215 
240         for (c = counter; c; c = c->parent)       216         for (c = counter; c; c = c->parent)
241                 propagate_protected_usage(c, a    217                 propagate_protected_usage(c, atomic_long_read(&c->usage));
242 }                                                 218 }
243                                                   219 
244 /**                                               220 /**
245  * page_counter_set_low - set the amount of pr    221  * page_counter_set_low - set the amount of protected memory
246  * @counter: counter                              222  * @counter: counter
247  * @nr_pages: value to set                        223  * @nr_pages: value to set
248  *                                                224  *
249  * The caller must serialize invocations on th    225  * The caller must serialize invocations on the same counter.
250  */                                               226  */
251 void page_counter_set_low(struct page_counter     227 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
252 {                                                 228 {
253         struct page_counter *c;                   229         struct page_counter *c;
254                                                   230 
255         WRITE_ONCE(counter->low, nr_pages);       231         WRITE_ONCE(counter->low, nr_pages);
256                                                   232 
257         for (c = counter; c; c = c->parent)       233         for (c = counter; c; c = c->parent)
258                 propagate_protected_usage(c, a    234                 propagate_protected_usage(c, atomic_long_read(&c->usage));
259 }                                                 235 }
260                                                   236 
261 /**                                               237 /**
262  * page_counter_memparse - memparse() for page    238  * page_counter_memparse - memparse() for page counter limits
263  * @buf: string to parse                          239  * @buf: string to parse
264  * @max: string meaning maximum possible value    240  * @max: string meaning maximum possible value
265  * @nr_pages: returns the result in number of     241  * @nr_pages: returns the result in number of pages
266  *                                                242  *
267  * Returns -EINVAL, or 0 and @nr_pages on succ    243  * Returns -EINVAL, or 0 and @nr_pages on success.  @nr_pages will be
268  * limited to %PAGE_COUNTER_MAX.                  244  * limited to %PAGE_COUNTER_MAX.
269  */                                               245  */
270 int page_counter_memparse(const char *buf, con    246 int page_counter_memparse(const char *buf, const char *max,
271                           unsigned long *nr_pa    247                           unsigned long *nr_pages)
272 {                                                 248 {
273         char *end;                                249         char *end;
274         u64 bytes;                                250         u64 bytes;
275                                                   251 
276         if (!strcmp(buf, max)) {                  252         if (!strcmp(buf, max)) {
277                 *nr_pages = PAGE_COUNTER_MAX;     253                 *nr_pages = PAGE_COUNTER_MAX;
278                 return 0;                         254                 return 0;
279         }                                         255         }
280                                                   256 
281         bytes = memparse(buf, &end);              257         bytes = memparse(buf, &end);
282         if (*end != '\0')                         258         if (*end != '\0')
283                 return -EINVAL;                   259                 return -EINVAL;
284                                                   260 
285         *nr_pages = min(bytes / PAGE_SIZE, (u6    261         *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX);
286                                                   262 
287         return 0;                                 263         return 0;
288 }                                                 264 }
289                                                << 
290                                                << 
291 #ifdef CONFIG_MEMCG                            << 
292 /*                                             << 
293  * This function calculates an individual page << 
294  * protection which is derived from its own me << 
295  * parent's and siblings' settings, as well as << 
296  * distribution in the tree.                   << 
297  *                                             << 
298  * The following rules apply to the effective  << 
299  *                                             << 
300  * 1. At the first level of reclaim, effective << 
301  *    the declared protection in memory.min an << 
302  *                                             << 
303  * 2. To enable safe delegation of the protect << 
304  *    subsequent levels the effective protecti << 
305  *    parent's effective protection.           << 
306  *                                             << 
307  * 3. To make complex and dynamic subtrees eas << 
308  *    user is allowed to overcommit the declar << 
309  *    level. If that is the case, the parent's << 
310  *    distributed to the children in proportio << 
311  *    they have declared and how much of it th << 
312  *                                             << 
313  *    This makes distribution proportional, bu << 
314  *    if one counter claims much more protecti << 
315  *    the unused remainder is available to its << 
316  *                                             << 
317  * 4. Conversely, when the declared protection << 
318  *    given level, the distribution of the lar << 
319  *    budget is NOT proportional. A counter's  << 
320  *    is capped to its own memory.min/low sett << 
321  *                                             << 
322  * 5. However, to allow protecting recursive s << 
323  *    without having to declare each individua << 
324  *    of the ancestor's claim to protection, a << 
325  *    "floating" - protection from up the tree << 
326  *    proportion to each counter's *usage*. Th << 
327  *    neutral wrt sibling cgroups and lets the << 
328  *    the shared parental protection budget, b << 
329  *    subtree as a whole from neighboring subt << 
330  *                                             << 
331  * Note that 4. and 5. are not in conflict: 4. << 
332  * against immediate siblings whereas 5. is ab << 
333  * neighboring subtrees.                       << 
334  */                                            << 
335 static unsigned long effective_protection(unsi << 
336                                           unsi << 
337                                           unsi << 
338                                           unsi << 
339                                           unsi << 
340                                           bool << 
341 {                                              << 
342         unsigned long protected;               << 
343         unsigned long ep;                      << 
344                                                << 
345         protected = min(usage, setting);       << 
346         /*                                     << 
347          * If all cgroups at this level combin << 
348          * protection than what the parent aff << 
349          * shares in proportion to utilization << 
350          *                                     << 
351          * We are using actual utilization rat << 
352          * claimed protection in order to be w << 
353          * but unused protection is available  << 
354          * otherwise get a smaller chunk than  << 
355          */                                    << 
356         if (siblings_protected > parent_effect << 
357                 return protected * parent_effe << 
358                                                << 
359         /*                                     << 
360          * Ok, utilized protection of all chil << 
361          * parent affords them, so we know wha << 
362          * and utilizes is effectively protect << 
363          *                                     << 
364          * If there is unprotected usage beyon << 
365          * will apply pressure in proportion t << 
366          *                                     << 
367          * If there is unutilized protection,  << 
368          * shielded from reclaim, but we do re << 
369          * protection than what the group coul << 
370          * is okay. With the overcommit distri << 
371          * protection is always dependent on h << 
372          * consumed among the siblings anyway. << 
373          */                                    << 
374         ep = protected;                        << 
375                                                << 
376         /*                                     << 
377          * If the children aren't claiming (al << 
378          * afforded to them by the parent, dis << 
379          * proportion to the (unprotected) mem << 
380          * way, cgroups that aren't explicitly << 
381          * other compete freely over the allow << 
382          * collectively protected from neighbo << 
383          *                                     << 
384          * We're using unprotected memory for  << 
385          * some cgroups DO claim explicit prot << 
386          * the same bytes twice.               << 
387          *                                     << 
388          * Check both usage and parent_usage a << 
389          * protected values. One should imply  << 
390          * aren't read atomically - make sure  << 
391          */                                    << 
392         if (!recursive_protection)             << 
393                 return ep;                     << 
394                                                << 
395         if (parent_effective > siblings_protec << 
396             parent_usage > siblings_protected  << 
397             usage > protected) {               << 
398                 unsigned long unclaimed;       << 
399                                                << 
400                 unclaimed = parent_effective - << 
401                 unclaimed *= usage - protected << 
402                 unclaimed /= parent_usage - si << 
403                                                << 
404                 ep += unclaimed;               << 
405         }                                      << 
406                                                << 
407         return ep;                             << 
408 }                                              << 
409                                                << 
410                                                << 
411 /**                                            << 
412  * page_counter_calculate_protection - check i << 
413  * @root: the top ancestor of the sub-tree bei << 
414  * @counter: the page_counter the counter to u << 
415  * @recursive_protection: Whether to use memor << 
416  *                                             << 
417  * Calculates elow/emin thresholds for given p << 
418  *                                             << 
419  * WARNING: This function is not stateless! It << 
420  *          of a top-down tree iteration, not  << 
421  */                                            << 
422 void page_counter_calculate_protection(struct  << 
423                                        struct  << 
424                                        bool re << 
425 {                                              << 
426         unsigned long usage, parent_usage;     << 
427         struct page_counter *parent = counter- << 
428                                                << 
429         /*                                     << 
430          * Effective values of the reclaim tar << 
431          * can be stale. Have a look at mem_cg << 
432          * details.                            << 
433          * TODO: calculation should be more ro << 
434          * that special casing.                << 
435          */                                    << 
436         if (root == counter)                   << 
437                 return;                        << 
438                                                << 
439         usage = page_counter_read(counter);    << 
440         if (!usage)                            << 
441                 return;                        << 
442                                                << 
443         if (parent == root) {                  << 
444                 counter->emin = READ_ONCE(coun << 
445                 counter->elow = READ_ONCE(coun << 
446                 return;                        << 
447         }                                      << 
448                                                << 
449         parent_usage = page_counter_read(paren << 
450                                                << 
451         WRITE_ONCE(counter->emin, effective_pr << 
452                         READ_ONCE(counter->min << 
453                         READ_ONCE(parent->emin << 
454                         atomic_long_read(&pare << 
455                         recursive_protection)) << 
456                                                << 
457         WRITE_ONCE(counter->elow, effective_pr << 
458                         READ_ONCE(counter->low << 
459                         READ_ONCE(parent->elow << 
460                         atomic_long_read(&pare << 
461                         recursive_protection)) << 
462 }                                              << 
463 #endif /* CONFIG_MEMCG */                      << 
464                                                   265 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php