~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/page_counter.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /mm/page_counter.c (Version linux-6.12-rc7) and /mm/page_counter.c (Version linux-5.3.18)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2 /*                                                  2 /*
  3  * Lockless hierarchical page accounting & lim      3  * Lockless hierarchical page accounting & limiting
  4  *                                                  4  *
  5  * Copyright (C) 2014 Red Hat, Inc., Johannes       5  * Copyright (C) 2014 Red Hat, Inc., Johannes Weiner
  6  */                                                 6  */
  7                                                     7 
  8 #include <linux/page_counter.h>                     8 #include <linux/page_counter.h>
  9 #include <linux/atomic.h>                           9 #include <linux/atomic.h>
 10 #include <linux/kernel.h>                          10 #include <linux/kernel.h>
 11 #include <linux/string.h>                          11 #include <linux/string.h>
 12 #include <linux/sched.h>                           12 #include <linux/sched.h>
 13 #include <linux/bug.h>                             13 #include <linux/bug.h>
 14 #include <asm/page.h>                              14 #include <asm/page.h>
 15                                                    15 
 16 static bool track_protection(struct page_count << 
 17 {                                              << 
 18         return c->protection_support;          << 
 19 }                                              << 
 20                                                << 
 21 static void propagate_protected_usage(struct p     16 static void propagate_protected_usage(struct page_counter *c,
 22                                       unsigned     17                                       unsigned long usage)
 23 {                                                  18 {
 24         unsigned long protected, old_protected     19         unsigned long protected, old_protected;
 25         long delta;                                20         long delta;
 26                                                    21 
 27         if (!c->parent)                            22         if (!c->parent)
 28                 return;                            23                 return;
 29                                                    24 
 30         protected = min(usage, READ_ONCE(c->mi !!  25         if (c->min || atomic_long_read(&c->min_usage)) {
 31         old_protected = atomic_long_read(&c->m !!  26                 if (usage <= c->min)
 32         if (protected != old_protected) {      !!  27                         protected = usage;
                                                   >>  28                 else
                                                   >>  29                         protected = 0;
                                                   >>  30 
 33                 old_protected = atomic_long_xc     31                 old_protected = atomic_long_xchg(&c->min_usage, protected);
 34                 delta = protected - old_protec     32                 delta = protected - old_protected;
 35                 if (delta)                         33                 if (delta)
 36                         atomic_long_add(delta,     34                         atomic_long_add(delta, &c->parent->children_min_usage);
 37         }                                          35         }
 38                                                    36 
 39         protected = min(usage, READ_ONCE(c->lo !!  37         if (c->low || atomic_long_read(&c->low_usage)) {
 40         old_protected = atomic_long_read(&c->l !!  38                 if (usage <= c->low)
 41         if (protected != old_protected) {      !!  39                         protected = usage;
                                                   >>  40                 else
                                                   >>  41                         protected = 0;
                                                   >>  42 
 42                 old_protected = atomic_long_xc     43                 old_protected = atomic_long_xchg(&c->low_usage, protected);
 43                 delta = protected - old_protec     44                 delta = protected - old_protected;
 44                 if (delta)                         45                 if (delta)
 45                         atomic_long_add(delta,     46                         atomic_long_add(delta, &c->parent->children_low_usage);
 46         }                                          47         }
 47 }                                                  48 }
 48                                                    49 
 49 /**                                                50 /**
 50  * page_counter_cancel - take pages out of the     51  * page_counter_cancel - take pages out of the local counter
 51  * @counter: counter                               52  * @counter: counter
 52  * @nr_pages: number of pages to cancel            53  * @nr_pages: number of pages to cancel
 53  */                                                54  */
 54 void page_counter_cancel(struct page_counter *     55 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
 55 {                                                  56 {
 56         long new;                                  57         long new;
 57                                                    58 
 58         new = atomic_long_sub_return(nr_pages,     59         new = atomic_long_sub_return(nr_pages, &counter->usage);
                                                   >>  60         propagate_protected_usage(counter, new);
 59         /* More uncharges than charges? */         61         /* More uncharges than charges? */
 60         if (WARN_ONCE(new < 0, "page_counter u !!  62         WARN_ON_ONCE(new < 0);
 61                       new, nr_pages)) {        << 
 62                 new = 0;                       << 
 63                 atomic_long_set(&counter->usag << 
 64         }                                      << 
 65         if (track_protection(counter))         << 
 66                 propagate_protected_usage(coun << 
 67 }                                                  63 }
 68                                                    64 
 69 /**                                                65 /**
 70  * page_counter_charge - hierarchically charge     66  * page_counter_charge - hierarchically charge pages
 71  * @counter: counter                               67  * @counter: counter
 72  * @nr_pages: number of pages to charge            68  * @nr_pages: number of pages to charge
 73  *                                                 69  *
 74  * NOTE: This does not consider any configured     70  * NOTE: This does not consider any configured counter limits.
 75  */                                                71  */
 76 void page_counter_charge(struct page_counter *     72 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
 77 {                                                  73 {
 78         struct page_counter *c;                    74         struct page_counter *c;
 79         bool protection = track_protection(cou << 
 80                                                    75 
 81         for (c = counter; c; c = c->parent) {      76         for (c = counter; c; c = c->parent) {
 82                 long new;                          77                 long new;
 83                                                    78 
 84                 new = atomic_long_add_return(n     79                 new = atomic_long_add_return(nr_pages, &c->usage);
 85                 if (protection)                !!  80                 propagate_protected_usage(counter, new);
 86                         propagate_protected_us << 
 87                 /*                                 81                 /*
 88                  * This is indeed racy, but we     82                  * This is indeed racy, but we can live with some
 89                  * inaccuracy in the watermark     83                  * inaccuracy in the watermark.
 90                  *                             << 
 91                  * Notably, we have two waterm << 
 92                  * visible peak and one that c << 
 93                  *                             << 
 94                  * Since we reset both waterma << 
 95                  * we can guarantee that water << 
 96                  * don't need to do both compa << 
 97                  *                             << 
 98                  * On systems with branch pred << 
 99                  * be almost free.             << 
100                  */                                84                  */
101                 if (new > READ_ONCE(c->local_w !!  85                 if (new > c->watermark)
102                         WRITE_ONCE(c->local_wa !!  86                         c->watermark = new;
103                         if (new > READ_ONCE(c- << 
104                                 WRITE_ONCE(c-> << 
105                 }                              << 
106         }                                          87         }
107 }                                                  88 }
108                                                    89 
109 /**                                                90 /**
110  * page_counter_try_charge - try to hierarchic     91  * page_counter_try_charge - try to hierarchically charge pages
111  * @counter: counter                               92  * @counter: counter
112  * @nr_pages: number of pages to charge            93  * @nr_pages: number of pages to charge
113  * @fail: points first counter to hit its limi     94  * @fail: points first counter to hit its limit, if any
114  *                                                 95  *
115  * Returns %true on success, or %false and @fa     96  * Returns %true on success, or %false and @fail if the counter or one
116  * of its ancestors has hit its configured lim     97  * of its ancestors has hit its configured limit.
117  */                                                98  */
118 bool page_counter_try_charge(struct page_count     99 bool page_counter_try_charge(struct page_counter *counter,
119                              unsigned long nr_    100                              unsigned long nr_pages,
120                              struct page_count    101                              struct page_counter **fail)
121 {                                                 102 {
122         struct page_counter *c;                   103         struct page_counter *c;
123         bool protection = track_protection(cou << 
124                                                   104 
125         for (c = counter; c; c = c->parent) {     105         for (c = counter; c; c = c->parent) {
126                 long new;                         106                 long new;
127                 /*                                107                 /*
128                  * Charge speculatively to avo    108                  * Charge speculatively to avoid an expensive CAS.  If
129                  * a bigger charge fails, it m    109                  * a bigger charge fails, it might falsely lock out a
130                  * racing smaller charge and s    110                  * racing smaller charge and send it into reclaim
131                  * early, but the error is lim    111                  * early, but the error is limited to the difference
132                  * between the two sizes, whic    112                  * between the two sizes, which is less than 2M/4M in
133                  * case of a THP locking out a    113                  * case of a THP locking out a regular page charge.
134                  *                                114                  *
135                  * The atomic_long_add_return(    115                  * The atomic_long_add_return() implies a full memory
136                  * barrier between incrementin    116                  * barrier between incrementing the count and reading
137                  * the limit.  When racing wit !! 117                  * the limit.  When racing with page_counter_limit(),
138                  * we either see the new limit    118                  * we either see the new limit or the setter sees the
139                  * counter has changed and ret    119                  * counter has changed and retries.
140                  */                               120                  */
141                 new = atomic_long_add_return(n    121                 new = atomic_long_add_return(nr_pages, &c->usage);
142                 if (new > c->max) {               122                 if (new > c->max) {
143                         atomic_long_sub(nr_pag    123                         atomic_long_sub(nr_pages, &c->usage);
                                                   >> 124                         propagate_protected_usage(counter, new);
144                         /*                        125                         /*
145                          * This is racy, but w    126                          * This is racy, but we can live with some
146                          * inaccuracy in the f !! 127                          * inaccuracy in the failcnt.
147                          * to report stats.    << 
148                          */                       128                          */
149                         data_race(c->failcnt++ !! 129                         c->failcnt++;
150                         *fail = c;                130                         *fail = c;
151                         goto failed;              131                         goto failed;
152                 }                                 132                 }
153                 if (protection)                !! 133                 propagate_protected_usage(counter, new);
154                         propagate_protected_us !! 134                 /*
155                                                !! 135                  * Just like with failcnt, we can live with some
156                 /* see comment on page_counter !! 136                  * inaccuracy in the watermark.
157                 if (new > READ_ONCE(c->local_w !! 137                  */
158                         WRITE_ONCE(c->local_wa !! 138                 if (new > c->watermark)
159                         if (new > READ_ONCE(c- !! 139                         c->watermark = new;
160                                 WRITE_ONCE(c-> << 
161                 }                              << 
162         }                                         140         }
163         return true;                              141         return true;
164                                                   142 
165 failed:                                           143 failed:
166         for (c = counter; c != *fail; c = c->p    144         for (c = counter; c != *fail; c = c->parent)
167                 page_counter_cancel(c, nr_page    145                 page_counter_cancel(c, nr_pages);
168                                                   146 
169         return false;                             147         return false;
170 }                                                 148 }
171                                                   149 
172 /**                                               150 /**
173  * page_counter_uncharge - hierarchically unch    151  * page_counter_uncharge - hierarchically uncharge pages
174  * @counter: counter                              152  * @counter: counter
175  * @nr_pages: number of pages to uncharge         153  * @nr_pages: number of pages to uncharge
176  */                                               154  */
177 void page_counter_uncharge(struct page_counter    155 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
178 {                                                 156 {
179         struct page_counter *c;                   157         struct page_counter *c;
180                                                   158 
181         for (c = counter; c; c = c->parent)       159         for (c = counter; c; c = c->parent)
182                 page_counter_cancel(c, nr_page    160                 page_counter_cancel(c, nr_pages);
183 }                                                 161 }
184                                                   162 
185 /**                                               163 /**
186  * page_counter_set_max - set the maximum numb    164  * page_counter_set_max - set the maximum number of pages allowed
187  * @counter: counter                              165  * @counter: counter
188  * @nr_pages: limit to set                        166  * @nr_pages: limit to set
189  *                                                167  *
190  * Returns 0 on success, -EBUSY if the current    168  * Returns 0 on success, -EBUSY if the current number of pages on the
191  * counter already exceeds the specified limit    169  * counter already exceeds the specified limit.
192  *                                                170  *
193  * The caller must serialize invocations on th    171  * The caller must serialize invocations on the same counter.
194  */                                               172  */
195 int page_counter_set_max(struct page_counter *    173 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
196 {                                                 174 {
197         for (;;) {                                175         for (;;) {
198                 unsigned long old;                176                 unsigned long old;
199                 long usage;                       177                 long usage;
200                                                   178 
201                 /*                                179                 /*
202                  * Update the limit while maki    180                  * Update the limit while making sure that it's not
203                  * below the concurrently-chan    181                  * below the concurrently-changing counter value.
204                  *                                182                  *
205                  * The xchg implies two full m    183                  * The xchg implies two full memory barriers before
206                  * and after, so the read-swap    184                  * and after, so the read-swap-read is ordered and
207                  * ensures coherency with page    185                  * ensures coherency with page_counter_try_charge():
208                  * that function modifies the     186                  * that function modifies the count before checking
209                  * the limit, so if it sees th    187                  * the limit, so if it sees the old limit, we see the
210                  * modified counter and retry.    188                  * modified counter and retry.
211                  */                               189                  */
212                 usage = page_counter_read(coun !! 190                 usage = atomic_long_read(&counter->usage);
213                                                   191 
214                 if (usage > nr_pages)             192                 if (usage > nr_pages)
215                         return -EBUSY;            193                         return -EBUSY;
216                                                   194 
217                 old = xchg(&counter->max, nr_p    195                 old = xchg(&counter->max, nr_pages);
218                                                   196 
219                 if (page_counter_read(counter) !! 197                 if (atomic_long_read(&counter->usage) <= usage)
220                         return 0;                 198                         return 0;
221                                                   199 
222                 counter->max = old;               200                 counter->max = old;
223                 cond_resched();                   201                 cond_resched();
224         }                                         202         }
225 }                                                 203 }
226                                                   204 
227 /**                                               205 /**
228  * page_counter_set_min - set the amount of pr    206  * page_counter_set_min - set the amount of protected memory
229  * @counter: counter                              207  * @counter: counter
230  * @nr_pages: value to set                        208  * @nr_pages: value to set
231  *                                                209  *
232  * The caller must serialize invocations on th    210  * The caller must serialize invocations on the same counter.
233  */                                               211  */
234 void page_counter_set_min(struct page_counter     212 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
235 {                                                 213 {
236         struct page_counter *c;                   214         struct page_counter *c;
237                                                   215 
238         WRITE_ONCE(counter->min, nr_pages);    !! 216         counter->min = nr_pages;
239                                                   217 
240         for (c = counter; c; c = c->parent)       218         for (c = counter; c; c = c->parent)
241                 propagate_protected_usage(c, a    219                 propagate_protected_usage(c, atomic_long_read(&c->usage));
242 }                                                 220 }
243                                                   221 
244 /**                                               222 /**
245  * page_counter_set_low - set the amount of pr    223  * page_counter_set_low - set the amount of protected memory
246  * @counter: counter                              224  * @counter: counter
247  * @nr_pages: value to set                        225  * @nr_pages: value to set
248  *                                                226  *
249  * The caller must serialize invocations on th    227  * The caller must serialize invocations on the same counter.
250  */                                               228  */
251 void page_counter_set_low(struct page_counter     229 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
252 {                                                 230 {
253         struct page_counter *c;                   231         struct page_counter *c;
254                                                   232 
255         WRITE_ONCE(counter->low, nr_pages);    !! 233         counter->low = nr_pages;
256                                                   234 
257         for (c = counter; c; c = c->parent)       235         for (c = counter; c; c = c->parent)
258                 propagate_protected_usage(c, a    236                 propagate_protected_usage(c, atomic_long_read(&c->usage));
259 }                                                 237 }
260                                                   238 
261 /**                                               239 /**
262  * page_counter_memparse - memparse() for page    240  * page_counter_memparse - memparse() for page counter limits
263  * @buf: string to parse                          241  * @buf: string to parse
264  * @max: string meaning maximum possible value    242  * @max: string meaning maximum possible value
265  * @nr_pages: returns the result in number of     243  * @nr_pages: returns the result in number of pages
266  *                                                244  *
267  * Returns -EINVAL, or 0 and @nr_pages on succ    245  * Returns -EINVAL, or 0 and @nr_pages on success.  @nr_pages will be
268  * limited to %PAGE_COUNTER_MAX.                  246  * limited to %PAGE_COUNTER_MAX.
269  */                                               247  */
270 int page_counter_memparse(const char *buf, con    248 int page_counter_memparse(const char *buf, const char *max,
271                           unsigned long *nr_pa    249                           unsigned long *nr_pages)
272 {                                                 250 {
273         char *end;                                251         char *end;
274         u64 bytes;                                252         u64 bytes;
275                                                   253 
276         if (!strcmp(buf, max)) {                  254         if (!strcmp(buf, max)) {
277                 *nr_pages = PAGE_COUNTER_MAX;     255                 *nr_pages = PAGE_COUNTER_MAX;
278                 return 0;                         256                 return 0;
279         }                                         257         }
280                                                   258 
281         bytes = memparse(buf, &end);              259         bytes = memparse(buf, &end);
282         if (*end != '\0')                         260         if (*end != '\0')
283                 return -EINVAL;                   261                 return -EINVAL;
284                                                   262 
285         *nr_pages = min(bytes / PAGE_SIZE, (u6    263         *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX);
286                                                   264 
287         return 0;                                 265         return 0;
288 }                                                 266 }
289                                                << 
290                                                << 
291 #ifdef CONFIG_MEMCG                            << 
292 /*                                             << 
293  * This function calculates an individual page << 
294  * protection which is derived from its own me << 
295  * parent's and siblings' settings, as well as << 
296  * distribution in the tree.                   << 
297  *                                             << 
298  * The following rules apply to the effective  << 
299  *                                             << 
300  * 1. At the first level of reclaim, effective << 
301  *    the declared protection in memory.min an << 
302  *                                             << 
303  * 2. To enable safe delegation of the protect << 
304  *    subsequent levels the effective protecti << 
305  *    parent's effective protection.           << 
306  *                                             << 
307  * 3. To make complex and dynamic subtrees eas << 
308  *    user is allowed to overcommit the declar << 
309  *    level. If that is the case, the parent's << 
310  *    distributed to the children in proportio << 
311  *    they have declared and how much of it th << 
312  *                                             << 
313  *    This makes distribution proportional, bu << 
314  *    if one counter claims much more protecti << 
315  *    the unused remainder is available to its << 
316  *                                             << 
317  * 4. Conversely, when the declared protection << 
318  *    given level, the distribution of the lar << 
319  *    budget is NOT proportional. A counter's  << 
320  *    is capped to its own memory.min/low sett << 
321  *                                             << 
322  * 5. However, to allow protecting recursive s << 
323  *    without having to declare each individua << 
324  *    of the ancestor's claim to protection, a << 
325  *    "floating" - protection from up the tree << 
326  *    proportion to each counter's *usage*. Th << 
327  *    neutral wrt sibling cgroups and lets the << 
328  *    the shared parental protection budget, b << 
329  *    subtree as a whole from neighboring subt << 
330  *                                             << 
331  * Note that 4. and 5. are not in conflict: 4. << 
332  * against immediate siblings whereas 5. is ab << 
333  * neighboring subtrees.                       << 
334  */                                            << 
335 static unsigned long effective_protection(unsi << 
336                                           unsi << 
337                                           unsi << 
338                                           unsi << 
339                                           unsi << 
340                                           bool << 
341 {                                              << 
342         unsigned long protected;               << 
343         unsigned long ep;                      << 
344                                                << 
345         protected = min(usage, setting);       << 
346         /*                                     << 
347          * If all cgroups at this level combin << 
348          * protection than what the parent aff << 
349          * shares in proportion to utilization << 
350          *                                     << 
351          * We are using actual utilization rat << 
352          * claimed protection in order to be w << 
353          * but unused protection is available  << 
354          * otherwise get a smaller chunk than  << 
355          */                                    << 
356         if (siblings_protected > parent_effect << 
357                 return protected * parent_effe << 
358                                                << 
359         /*                                     << 
360          * Ok, utilized protection of all chil << 
361          * parent affords them, so we know wha << 
362          * and utilizes is effectively protect << 
363          *                                     << 
364          * If there is unprotected usage beyon << 
365          * will apply pressure in proportion t << 
366          *                                     << 
367          * If there is unutilized protection,  << 
368          * shielded from reclaim, but we do re << 
369          * protection than what the group coul << 
370          * is okay. With the overcommit distri << 
371          * protection is always dependent on h << 
372          * consumed among the siblings anyway. << 
373          */                                    << 
374         ep = protected;                        << 
375                                                << 
376         /*                                     << 
377          * If the children aren't claiming (al << 
378          * afforded to them by the parent, dis << 
379          * proportion to the (unprotected) mem << 
380          * way, cgroups that aren't explicitly << 
381          * other compete freely over the allow << 
382          * collectively protected from neighbo << 
383          *                                     << 
384          * We're using unprotected memory for  << 
385          * some cgroups DO claim explicit prot << 
386          * the same bytes twice.               << 
387          *                                     << 
388          * Check both usage and parent_usage a << 
389          * protected values. One should imply  << 
390          * aren't read atomically - make sure  << 
391          */                                    << 
392         if (!recursive_protection)             << 
393                 return ep;                     << 
394                                                << 
395         if (parent_effective > siblings_protec << 
396             parent_usage > siblings_protected  << 
397             usage > protected) {               << 
398                 unsigned long unclaimed;       << 
399                                                << 
400                 unclaimed = parent_effective - << 
401                 unclaimed *= usage - protected << 
402                 unclaimed /= parent_usage - si << 
403                                                << 
404                 ep += unclaimed;               << 
405         }                                      << 
406                                                << 
407         return ep;                             << 
408 }                                              << 
409                                                << 
410                                                << 
411 /**                                            << 
412  * page_counter_calculate_protection - check i << 
413  * @root: the top ancestor of the sub-tree bei << 
414  * @counter: the page_counter the counter to u << 
415  * @recursive_protection: Whether to use memor << 
416  *                                             << 
417  * Calculates elow/emin thresholds for given p << 
418  *                                             << 
419  * WARNING: This function is not stateless! It << 
420  *          of a top-down tree iteration, not  << 
421  */                                            << 
422 void page_counter_calculate_protection(struct  << 
423                                        struct  << 
424                                        bool re << 
425 {                                              << 
426         unsigned long usage, parent_usage;     << 
427         struct page_counter *parent = counter- << 
428                                                << 
429         /*                                     << 
430          * Effective values of the reclaim tar << 
431          * can be stale. Have a look at mem_cg << 
432          * details.                            << 
433          * TODO: calculation should be more ro << 
434          * that special casing.                << 
435          */                                    << 
436         if (root == counter)                   << 
437                 return;                        << 
438                                                << 
439         usage = page_counter_read(counter);    << 
440         if (!usage)                            << 
441                 return;                        << 
442                                                << 
443         if (parent == root) {                  << 
444                 counter->emin = READ_ONCE(coun << 
445                 counter->elow = READ_ONCE(coun << 
446                 return;                        << 
447         }                                      << 
448                                                << 
449         parent_usage = page_counter_read(paren << 
450                                                << 
451         WRITE_ONCE(counter->emin, effective_pr << 
452                         READ_ONCE(counter->min << 
453                         READ_ONCE(parent->emin << 
454                         atomic_long_read(&pare << 
455                         recursive_protection)) << 
456                                                << 
457         WRITE_ONCE(counter->elow, effective_pr << 
458                         READ_ONCE(counter->low << 
459                         READ_ONCE(parent->elow << 
460                         atomic_long_read(&pare << 
461                         recursive_protection)) << 
462 }                                              << 
463 #endif /* CONFIG_MEMCG */                      << 
464                                                   267 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php