~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/page_counter.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /mm/page_counter.c (Architecture mips) and /mm/page_counter.c (Architecture i386)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2 /*                                                  2 /*
  3  * Lockless hierarchical page accounting & lim      3  * Lockless hierarchical page accounting & limiting
  4  *                                                  4  *
  5  * Copyright (C) 2014 Red Hat, Inc., Johannes       5  * Copyright (C) 2014 Red Hat, Inc., Johannes Weiner
  6  */                                                 6  */
  7                                                     7 
  8 #include <linux/page_counter.h>                     8 #include <linux/page_counter.h>
  9 #include <linux/atomic.h>                           9 #include <linux/atomic.h>
 10 #include <linux/kernel.h>                          10 #include <linux/kernel.h>
 11 #include <linux/string.h>                          11 #include <linux/string.h>
 12 #include <linux/sched.h>                           12 #include <linux/sched.h>
 13 #include <linux/bug.h>                             13 #include <linux/bug.h>
 14 #include <asm/page.h>                              14 #include <asm/page.h>
 15                                                    15 
 16 static bool track_protection(struct page_count     16 static bool track_protection(struct page_counter *c)
 17 {                                                  17 {
 18         return c->protection_support;              18         return c->protection_support;
 19 }                                                  19 }
 20                                                    20 
 21 static void propagate_protected_usage(struct p     21 static void propagate_protected_usage(struct page_counter *c,
 22                                       unsigned     22                                       unsigned long usage)
 23 {                                                  23 {
 24         unsigned long protected, old_protected     24         unsigned long protected, old_protected;
 25         long delta;                                25         long delta;
 26                                                    26 
 27         if (!c->parent)                            27         if (!c->parent)
 28                 return;                            28                 return;
 29                                                    29 
 30         protected = min(usage, READ_ONCE(c->mi     30         protected = min(usage, READ_ONCE(c->min));
 31         old_protected = atomic_long_read(&c->m     31         old_protected = atomic_long_read(&c->min_usage);
 32         if (protected != old_protected) {          32         if (protected != old_protected) {
 33                 old_protected = atomic_long_xc     33                 old_protected = atomic_long_xchg(&c->min_usage, protected);
 34                 delta = protected - old_protec     34                 delta = protected - old_protected;
 35                 if (delta)                         35                 if (delta)
 36                         atomic_long_add(delta,     36                         atomic_long_add(delta, &c->parent->children_min_usage);
 37         }                                          37         }
 38                                                    38 
 39         protected = min(usage, READ_ONCE(c->lo     39         protected = min(usage, READ_ONCE(c->low));
 40         old_protected = atomic_long_read(&c->l     40         old_protected = atomic_long_read(&c->low_usage);
 41         if (protected != old_protected) {          41         if (protected != old_protected) {
 42                 old_protected = atomic_long_xc     42                 old_protected = atomic_long_xchg(&c->low_usage, protected);
 43                 delta = protected - old_protec     43                 delta = protected - old_protected;
 44                 if (delta)                         44                 if (delta)
 45                         atomic_long_add(delta,     45                         atomic_long_add(delta, &c->parent->children_low_usage);
 46         }                                          46         }
 47 }                                                  47 }
 48                                                    48 
 49 /**                                                49 /**
 50  * page_counter_cancel - take pages out of the     50  * page_counter_cancel - take pages out of the local counter
 51  * @counter: counter                               51  * @counter: counter
 52  * @nr_pages: number of pages to cancel            52  * @nr_pages: number of pages to cancel
 53  */                                                53  */
 54 void page_counter_cancel(struct page_counter *     54 void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
 55 {                                                  55 {
 56         long new;                                  56         long new;
 57                                                    57 
 58         new = atomic_long_sub_return(nr_pages,     58         new = atomic_long_sub_return(nr_pages, &counter->usage);
 59         /* More uncharges than charges? */         59         /* More uncharges than charges? */
 60         if (WARN_ONCE(new < 0, "page_counter u     60         if (WARN_ONCE(new < 0, "page_counter underflow: %ld nr_pages=%lu\n",
 61                       new, nr_pages)) {            61                       new, nr_pages)) {
 62                 new = 0;                           62                 new = 0;
 63                 atomic_long_set(&counter->usag     63                 atomic_long_set(&counter->usage, new);
 64         }                                          64         }
 65         if (track_protection(counter))             65         if (track_protection(counter))
 66                 propagate_protected_usage(coun     66                 propagate_protected_usage(counter, new);
 67 }                                                  67 }
 68                                                    68 
 69 /**                                                69 /**
 70  * page_counter_charge - hierarchically charge     70  * page_counter_charge - hierarchically charge pages
 71  * @counter: counter                               71  * @counter: counter
 72  * @nr_pages: number of pages to charge            72  * @nr_pages: number of pages to charge
 73  *                                                 73  *
 74  * NOTE: This does not consider any configured     74  * NOTE: This does not consider any configured counter limits.
 75  */                                                75  */
 76 void page_counter_charge(struct page_counter *     76 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
 77 {                                                  77 {
 78         struct page_counter *c;                    78         struct page_counter *c;
 79         bool protection = track_protection(cou     79         bool protection = track_protection(counter);
 80                                                    80 
 81         for (c = counter; c; c = c->parent) {      81         for (c = counter; c; c = c->parent) {
 82                 long new;                          82                 long new;
 83                                                    83 
 84                 new = atomic_long_add_return(n     84                 new = atomic_long_add_return(nr_pages, &c->usage);
 85                 if (protection)                    85                 if (protection)
 86                         propagate_protected_us     86                         propagate_protected_usage(c, new);
 87                 /*                                 87                 /*
 88                  * This is indeed racy, but we     88                  * This is indeed racy, but we can live with some
 89                  * inaccuracy in the watermark     89                  * inaccuracy in the watermark.
 90                  *                                 90                  *
 91                  * Notably, we have two waterm     91                  * Notably, we have two watermarks to allow for both a globally
 92                  * visible peak and one that c     92                  * visible peak and one that can be reset at a smaller scope.
 93                  *                                 93                  *
 94                  * Since we reset both waterma     94                  * Since we reset both watermarks when the global reset occurs,
 95                  * we can guarantee that water     95                  * we can guarantee that watermark >= local_watermark, so we
 96                  * don't need to do both compa     96                  * don't need to do both comparisons every time.
 97                  *                                 97                  *
 98                  * On systems with branch pred     98                  * On systems with branch predictors, the inner condition should
 99                  * be almost free.                 99                  * be almost free.
100                  */                               100                  */
101                 if (new > READ_ONCE(c->local_w    101                 if (new > READ_ONCE(c->local_watermark)) {
102                         WRITE_ONCE(c->local_wa    102                         WRITE_ONCE(c->local_watermark, new);
103                         if (new > READ_ONCE(c-    103                         if (new > READ_ONCE(c->watermark))
104                                 WRITE_ONCE(c->    104                                 WRITE_ONCE(c->watermark, new);
105                 }                                 105                 }
106         }                                         106         }
107 }                                                 107 }
108                                                   108 
109 /**                                               109 /**
110  * page_counter_try_charge - try to hierarchic    110  * page_counter_try_charge - try to hierarchically charge pages
111  * @counter: counter                              111  * @counter: counter
112  * @nr_pages: number of pages to charge           112  * @nr_pages: number of pages to charge
113  * @fail: points first counter to hit its limi    113  * @fail: points first counter to hit its limit, if any
114  *                                                114  *
115  * Returns %true on success, or %false and @fa    115  * Returns %true on success, or %false and @fail if the counter or one
116  * of its ancestors has hit its configured lim    116  * of its ancestors has hit its configured limit.
117  */                                               117  */
118 bool page_counter_try_charge(struct page_count    118 bool page_counter_try_charge(struct page_counter *counter,
119                              unsigned long nr_    119                              unsigned long nr_pages,
120                              struct page_count    120                              struct page_counter **fail)
121 {                                                 121 {
122         struct page_counter *c;                   122         struct page_counter *c;
123         bool protection = track_protection(cou    123         bool protection = track_protection(counter);
124                                                   124 
125         for (c = counter; c; c = c->parent) {     125         for (c = counter; c; c = c->parent) {
126                 long new;                         126                 long new;
127                 /*                                127                 /*
128                  * Charge speculatively to avo    128                  * Charge speculatively to avoid an expensive CAS.  If
129                  * a bigger charge fails, it m    129                  * a bigger charge fails, it might falsely lock out a
130                  * racing smaller charge and s    130                  * racing smaller charge and send it into reclaim
131                  * early, but the error is lim    131                  * early, but the error is limited to the difference
132                  * between the two sizes, whic    132                  * between the two sizes, which is less than 2M/4M in
133                  * case of a THP locking out a    133                  * case of a THP locking out a regular page charge.
134                  *                                134                  *
135                  * The atomic_long_add_return(    135                  * The atomic_long_add_return() implies a full memory
136                  * barrier between incrementin    136                  * barrier between incrementing the count and reading
137                  * the limit.  When racing wit    137                  * the limit.  When racing with page_counter_set_max(),
138                  * we either see the new limit    138                  * we either see the new limit or the setter sees the
139                  * counter has changed and ret    139                  * counter has changed and retries.
140                  */                               140                  */
141                 new = atomic_long_add_return(n    141                 new = atomic_long_add_return(nr_pages, &c->usage);
142                 if (new > c->max) {               142                 if (new > c->max) {
143                         atomic_long_sub(nr_pag    143                         atomic_long_sub(nr_pages, &c->usage);
144                         /*                        144                         /*
145                          * This is racy, but w    145                          * This is racy, but we can live with some
146                          * inaccuracy in the f    146                          * inaccuracy in the failcnt which is only used
147                          * to report stats.       147                          * to report stats.
148                          */                       148                          */
149                         data_race(c->failcnt++    149                         data_race(c->failcnt++);
150                         *fail = c;                150                         *fail = c;
151                         goto failed;              151                         goto failed;
152                 }                                 152                 }
153                 if (protection)                   153                 if (protection)
154                         propagate_protected_us    154                         propagate_protected_usage(c, new);
155                                                   155 
156                 /* see comment on page_counter    156                 /* see comment on page_counter_charge */
157                 if (new > READ_ONCE(c->local_w    157                 if (new > READ_ONCE(c->local_watermark)) {
158                         WRITE_ONCE(c->local_wa    158                         WRITE_ONCE(c->local_watermark, new);
159                         if (new > READ_ONCE(c-    159                         if (new > READ_ONCE(c->watermark))
160                                 WRITE_ONCE(c->    160                                 WRITE_ONCE(c->watermark, new);
161                 }                                 161                 }
162         }                                         162         }
163         return true;                              163         return true;
164                                                   164 
165 failed:                                           165 failed:
166         for (c = counter; c != *fail; c = c->p    166         for (c = counter; c != *fail; c = c->parent)
167                 page_counter_cancel(c, nr_page    167                 page_counter_cancel(c, nr_pages);
168                                                   168 
169         return false;                             169         return false;
170 }                                                 170 }
171                                                   171 
172 /**                                               172 /**
173  * page_counter_uncharge - hierarchically unch    173  * page_counter_uncharge - hierarchically uncharge pages
174  * @counter: counter                              174  * @counter: counter
175  * @nr_pages: number of pages to uncharge         175  * @nr_pages: number of pages to uncharge
176  */                                               176  */
177 void page_counter_uncharge(struct page_counter    177 void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
178 {                                                 178 {
179         struct page_counter *c;                   179         struct page_counter *c;
180                                                   180 
181         for (c = counter; c; c = c->parent)       181         for (c = counter; c; c = c->parent)
182                 page_counter_cancel(c, nr_page    182                 page_counter_cancel(c, nr_pages);
183 }                                                 183 }
184                                                   184 
185 /**                                               185 /**
186  * page_counter_set_max - set the maximum numb    186  * page_counter_set_max - set the maximum number of pages allowed
187  * @counter: counter                              187  * @counter: counter
188  * @nr_pages: limit to set                        188  * @nr_pages: limit to set
189  *                                                189  *
190  * Returns 0 on success, -EBUSY if the current    190  * Returns 0 on success, -EBUSY if the current number of pages on the
191  * counter already exceeds the specified limit    191  * counter already exceeds the specified limit.
192  *                                                192  *
193  * The caller must serialize invocations on th    193  * The caller must serialize invocations on the same counter.
194  */                                               194  */
195 int page_counter_set_max(struct page_counter *    195 int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages)
196 {                                                 196 {
197         for (;;) {                                197         for (;;) {
198                 unsigned long old;                198                 unsigned long old;
199                 long usage;                       199                 long usage;
200                                                   200 
201                 /*                                201                 /*
202                  * Update the limit while maki    202                  * Update the limit while making sure that it's not
203                  * below the concurrently-chan    203                  * below the concurrently-changing counter value.
204                  *                                204                  *
205                  * The xchg implies two full m    205                  * The xchg implies two full memory barriers before
206                  * and after, so the read-swap    206                  * and after, so the read-swap-read is ordered and
207                  * ensures coherency with page    207                  * ensures coherency with page_counter_try_charge():
208                  * that function modifies the     208                  * that function modifies the count before checking
209                  * the limit, so if it sees th    209                  * the limit, so if it sees the old limit, we see the
210                  * modified counter and retry.    210                  * modified counter and retry.
211                  */                               211                  */
212                 usage = page_counter_read(coun    212                 usage = page_counter_read(counter);
213                                                   213 
214                 if (usage > nr_pages)             214                 if (usage > nr_pages)
215                         return -EBUSY;            215                         return -EBUSY;
216                                                   216 
217                 old = xchg(&counter->max, nr_p    217                 old = xchg(&counter->max, nr_pages);
218                                                   218 
219                 if (page_counter_read(counter)    219                 if (page_counter_read(counter) <= usage || nr_pages >= old)
220                         return 0;                 220                         return 0;
221                                                   221 
222                 counter->max = old;               222                 counter->max = old;
223                 cond_resched();                   223                 cond_resched();
224         }                                         224         }
225 }                                                 225 }
226                                                   226 
227 /**                                               227 /**
228  * page_counter_set_min - set the amount of pr    228  * page_counter_set_min - set the amount of protected memory
229  * @counter: counter                              229  * @counter: counter
230  * @nr_pages: value to set                        230  * @nr_pages: value to set
231  *                                                231  *
232  * The caller must serialize invocations on th    232  * The caller must serialize invocations on the same counter.
233  */                                               233  */
234 void page_counter_set_min(struct page_counter     234 void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages)
235 {                                                 235 {
236         struct page_counter *c;                   236         struct page_counter *c;
237                                                   237 
238         WRITE_ONCE(counter->min, nr_pages);       238         WRITE_ONCE(counter->min, nr_pages);
239                                                   239 
240         for (c = counter; c; c = c->parent)       240         for (c = counter; c; c = c->parent)
241                 propagate_protected_usage(c, a    241                 propagate_protected_usage(c, atomic_long_read(&c->usage));
242 }                                                 242 }
243                                                   243 
244 /**                                               244 /**
245  * page_counter_set_low - set the amount of pr    245  * page_counter_set_low - set the amount of protected memory
246  * @counter: counter                              246  * @counter: counter
247  * @nr_pages: value to set                        247  * @nr_pages: value to set
248  *                                                248  *
249  * The caller must serialize invocations on th    249  * The caller must serialize invocations on the same counter.
250  */                                               250  */
251 void page_counter_set_low(struct page_counter     251 void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages)
252 {                                                 252 {
253         struct page_counter *c;                   253         struct page_counter *c;
254                                                   254 
255         WRITE_ONCE(counter->low, nr_pages);       255         WRITE_ONCE(counter->low, nr_pages);
256                                                   256 
257         for (c = counter; c; c = c->parent)       257         for (c = counter; c; c = c->parent)
258                 propagate_protected_usage(c, a    258                 propagate_protected_usage(c, atomic_long_read(&c->usage));
259 }                                                 259 }
260                                                   260 
261 /**                                               261 /**
262  * page_counter_memparse - memparse() for page    262  * page_counter_memparse - memparse() for page counter limits
263  * @buf: string to parse                          263  * @buf: string to parse
264  * @max: string meaning maximum possible value    264  * @max: string meaning maximum possible value
265  * @nr_pages: returns the result in number of     265  * @nr_pages: returns the result in number of pages
266  *                                                266  *
267  * Returns -EINVAL, or 0 and @nr_pages on succ    267  * Returns -EINVAL, or 0 and @nr_pages on success.  @nr_pages will be
268  * limited to %PAGE_COUNTER_MAX.                  268  * limited to %PAGE_COUNTER_MAX.
269  */                                               269  */
270 int page_counter_memparse(const char *buf, con    270 int page_counter_memparse(const char *buf, const char *max,
271                           unsigned long *nr_pa    271                           unsigned long *nr_pages)
272 {                                                 272 {
273         char *end;                                273         char *end;
274         u64 bytes;                                274         u64 bytes;
275                                                   275 
276         if (!strcmp(buf, max)) {                  276         if (!strcmp(buf, max)) {
277                 *nr_pages = PAGE_COUNTER_MAX;     277                 *nr_pages = PAGE_COUNTER_MAX;
278                 return 0;                         278                 return 0;
279         }                                         279         }
280                                                   280 
281         bytes = memparse(buf, &end);              281         bytes = memparse(buf, &end);
282         if (*end != '\0')                         282         if (*end != '\0')
283                 return -EINVAL;                   283                 return -EINVAL;
284                                                   284 
285         *nr_pages = min(bytes / PAGE_SIZE, (u6    285         *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX);
286                                                   286 
287         return 0;                                 287         return 0;
288 }                                                 288 }
289                                                   289 
290                                                   290 
291 #ifdef CONFIG_MEMCG                               291 #ifdef CONFIG_MEMCG
292 /*                                                292 /*
293  * This function calculates an individual page    293  * This function calculates an individual page counter's effective
294  * protection which is derived from its own me    294  * protection which is derived from its own memory.min/low, its
295  * parent's and siblings' settings, as well as    295  * parent's and siblings' settings, as well as the actual memory
296  * distribution in the tree.                      296  * distribution in the tree.
297  *                                                297  *
298  * The following rules apply to the effective     298  * The following rules apply to the effective protection values:
299  *                                                299  *
300  * 1. At the first level of reclaim, effective    300  * 1. At the first level of reclaim, effective protection is equal to
301  *    the declared protection in memory.min an    301  *    the declared protection in memory.min and memory.low.
302  *                                                302  *
303  * 2. To enable safe delegation of the protect    303  * 2. To enable safe delegation of the protection configuration, at
304  *    subsequent levels the effective protecti    304  *    subsequent levels the effective protection is capped to the
305  *    parent's effective protection.              305  *    parent's effective protection.
306  *                                                306  *
307  * 3. To make complex and dynamic subtrees eas    307  * 3. To make complex and dynamic subtrees easier to configure, the
308  *    user is allowed to overcommit the declar    308  *    user is allowed to overcommit the declared protection at a given
309  *    level. If that is the case, the parent's    309  *    level. If that is the case, the parent's effective protection is
310  *    distributed to the children in proportio    310  *    distributed to the children in proportion to how much protection
311  *    they have declared and how much of it th    311  *    they have declared and how much of it they are utilizing.
312  *                                                312  *
313  *    This makes distribution proportional, bu    313  *    This makes distribution proportional, but also work-conserving:
314  *    if one counter claims much more protecti    314  *    if one counter claims much more protection than it uses memory,
315  *    the unused remainder is available to its    315  *    the unused remainder is available to its siblings.
316  *                                                316  *
317  * 4. Conversely, when the declared protection    317  * 4. Conversely, when the declared protection is undercommitted at a
318  *    given level, the distribution of the lar    318  *    given level, the distribution of the larger parental protection
319  *    budget is NOT proportional. A counter's     319  *    budget is NOT proportional. A counter's protection from a sibling
320  *    is capped to its own memory.min/low sett    320  *    is capped to its own memory.min/low setting.
321  *                                                321  *
322  * 5. However, to allow protecting recursive s    322  * 5. However, to allow protecting recursive subtrees from each other
323  *    without having to declare each individua    323  *    without having to declare each individual counter's fixed share
324  *    of the ancestor's claim to protection, a    324  *    of the ancestor's claim to protection, any unutilized -
325  *    "floating" - protection from up the tree    325  *    "floating" - protection from up the tree is distributed in
326  *    proportion to each counter's *usage*. Th    326  *    proportion to each counter's *usage*. This makes the protection
327  *    neutral wrt sibling cgroups and lets the    327  *    neutral wrt sibling cgroups and lets them compete freely over
328  *    the shared parental protection budget, b    328  *    the shared parental protection budget, but it protects the
329  *    subtree as a whole from neighboring subt    329  *    subtree as a whole from neighboring subtrees.
330  *                                                330  *
331  * Note that 4. and 5. are not in conflict: 4.    331  * Note that 4. and 5. are not in conflict: 4. is about protecting
332  * against immediate siblings whereas 5. is ab    332  * against immediate siblings whereas 5. is about protecting against
333  * neighboring subtrees.                          333  * neighboring subtrees.
334  */                                               334  */
335 static unsigned long effective_protection(unsi    335 static unsigned long effective_protection(unsigned long usage,
336                                           unsi    336                                           unsigned long parent_usage,
337                                           unsi    337                                           unsigned long setting,
338                                           unsi    338                                           unsigned long parent_effective,
339                                           unsi    339                                           unsigned long siblings_protected,
340                                           bool    340                                           bool recursive_protection)
341 {                                                 341 {
342         unsigned long protected;                  342         unsigned long protected;
343         unsigned long ep;                         343         unsigned long ep;
344                                                   344 
345         protected = min(usage, setting);          345         protected = min(usage, setting);
346         /*                                        346         /*
347          * If all cgroups at this level combin    347          * If all cgroups at this level combined claim and use more
348          * protection than what the parent aff    348          * protection than what the parent affords them, distribute
349          * shares in proportion to utilization    349          * shares in proportion to utilization.
350          *                                        350          *
351          * We are using actual utilization rat    351          * We are using actual utilization rather than the statically
352          * claimed protection in order to be w    352          * claimed protection in order to be work-conserving: claimed
353          * but unused protection is available     353          * but unused protection is available to siblings that would
354          * otherwise get a smaller chunk than     354          * otherwise get a smaller chunk than what they claimed.
355          */                                       355          */
356         if (siblings_protected > parent_effect    356         if (siblings_protected > parent_effective)
357                 return protected * parent_effe    357                 return protected * parent_effective / siblings_protected;
358                                                   358 
359         /*                                        359         /*
360          * Ok, utilized protection of all chil    360          * Ok, utilized protection of all children is within what the
361          * parent affords them, so we know wha    361          * parent affords them, so we know whatever this child claims
362          * and utilizes is effectively protect    362          * and utilizes is effectively protected.
363          *                                        363          *
364          * If there is unprotected usage beyon    364          * If there is unprotected usage beyond this value, reclaim
365          * will apply pressure in proportion t    365          * will apply pressure in proportion to that amount.
366          *                                        366          *
367          * If there is unutilized protection,     367          * If there is unutilized protection, the cgroup will be fully
368          * shielded from reclaim, but we do re    368          * shielded from reclaim, but we do return a smaller value for
369          * protection than what the group coul    369          * protection than what the group could enjoy in theory. This
370          * is okay. With the overcommit distri    370          * is okay. With the overcommit distribution above, effective
371          * protection is always dependent on h    371          * protection is always dependent on how memory is actually
372          * consumed among the siblings anyway.    372          * consumed among the siblings anyway.
373          */                                       373          */
374         ep = protected;                           374         ep = protected;
375                                                   375 
376         /*                                        376         /*
377          * If the children aren't claiming (al    377          * If the children aren't claiming (all of) the protection
378          * afforded to them by the parent, dis    378          * afforded to them by the parent, distribute the remainder in
379          * proportion to the (unprotected) mem    379          * proportion to the (unprotected) memory of each cgroup. That
380          * way, cgroups that aren't explicitly    380          * way, cgroups that aren't explicitly prioritized wrt each
381          * other compete freely over the allow    381          * other compete freely over the allowance, but they are
382          * collectively protected from neighbo    382          * collectively protected from neighboring trees.
383          *                                        383          *
384          * We're using unprotected memory for     384          * We're using unprotected memory for the weight so that if
385          * some cgroups DO claim explicit prot    385          * some cgroups DO claim explicit protection, we don't protect
386          * the same bytes twice.                  386          * the same bytes twice.
387          *                                        387          *
388          * Check both usage and parent_usage a    388          * Check both usage and parent_usage against the respective
389          * protected values. One should imply     389          * protected values. One should imply the other, but they
390          * aren't read atomically - make sure     390          * aren't read atomically - make sure the division is sane.
391          */                                       391          */
392         if (!recursive_protection)                392         if (!recursive_protection)
393                 return ep;                        393                 return ep;
394                                                   394 
395         if (parent_effective > siblings_protec    395         if (parent_effective > siblings_protected &&
396             parent_usage > siblings_protected     396             parent_usage > siblings_protected &&
397             usage > protected) {                  397             usage > protected) {
398                 unsigned long unclaimed;          398                 unsigned long unclaimed;
399                                                   399 
400                 unclaimed = parent_effective -    400                 unclaimed = parent_effective - siblings_protected;
401                 unclaimed *= usage - protected    401                 unclaimed *= usage - protected;
402                 unclaimed /= parent_usage - si    402                 unclaimed /= parent_usage - siblings_protected;
403                                                   403 
404                 ep += unclaimed;                  404                 ep += unclaimed;
405         }                                         405         }
406                                                   406 
407         return ep;                                407         return ep;
408 }                                                 408 }
409                                                   409 
410                                                   410 
411 /**                                               411 /**
412  * page_counter_calculate_protection - check i    412  * page_counter_calculate_protection - check if memory consumption is in the normal range
413  * @root: the top ancestor of the sub-tree bei    413  * @root: the top ancestor of the sub-tree being checked
414  * @counter: the page_counter the counter to u    414  * @counter: the page_counter the counter to update
415  * @recursive_protection: Whether to use memor    415  * @recursive_protection: Whether to use memory_recursiveprot behavior.
416  *                                                416  *
417  * Calculates elow/emin thresholds for given p    417  * Calculates elow/emin thresholds for given page_counter.
418  *                                                418  *
419  * WARNING: This function is not stateless! It    419  * WARNING: This function is not stateless! It can only be used as part
420  *          of a top-down tree iteration, not     420  *          of a top-down tree iteration, not for isolated queries.
421  */                                               421  */
422 void page_counter_calculate_protection(struct     422 void page_counter_calculate_protection(struct page_counter *root,
423                                        struct     423                                        struct page_counter *counter,
424                                        bool re    424                                        bool recursive_protection)
425 {                                                 425 {
426         unsigned long usage, parent_usage;        426         unsigned long usage, parent_usage;
427         struct page_counter *parent = counter-    427         struct page_counter *parent = counter->parent;
428                                                   428 
429         /*                                        429         /*
430          * Effective values of the reclaim tar    430          * Effective values of the reclaim targets are ignored so they
431          * can be stale. Have a look at mem_cg    431          * can be stale. Have a look at mem_cgroup_protection for more
432          * details.                               432          * details.
433          * TODO: calculation should be more ro    433          * TODO: calculation should be more robust so that we do not need
434          * that special casing.                   434          * that special casing.
435          */                                       435          */
436         if (root == counter)                      436         if (root == counter)
437                 return;                           437                 return;
438                                                   438 
439         usage = page_counter_read(counter);       439         usage = page_counter_read(counter);
440         if (!usage)                               440         if (!usage)
441                 return;                           441                 return;
442                                                   442 
443         if (parent == root) {                     443         if (parent == root) {
444                 counter->emin = READ_ONCE(coun    444                 counter->emin = READ_ONCE(counter->min);
445                 counter->elow = READ_ONCE(coun    445                 counter->elow = READ_ONCE(counter->low);
446                 return;                           446                 return;
447         }                                         447         }
448                                                   448 
449         parent_usage = page_counter_read(paren    449         parent_usage = page_counter_read(parent);
450                                                   450 
451         WRITE_ONCE(counter->emin, effective_pr    451         WRITE_ONCE(counter->emin, effective_protection(usage, parent_usage,
452                         READ_ONCE(counter->min    452                         READ_ONCE(counter->min),
453                         READ_ONCE(parent->emin    453                         READ_ONCE(parent->emin),
454                         atomic_long_read(&pare    454                         atomic_long_read(&parent->children_min_usage),
455                         recursive_protection))    455                         recursive_protection));
456                                                   456 
457         WRITE_ONCE(counter->elow, effective_pr    457         WRITE_ONCE(counter->elow, effective_protection(usage, parent_usage,
458                         READ_ONCE(counter->low    458                         READ_ONCE(counter->low),
459                         READ_ONCE(parent->elow    459                         READ_ONCE(parent->elow),
460                         atomic_long_read(&pare    460                         atomic_long_read(&parent->children_low_usage),
461                         recursive_protection))    461                         recursive_protection));
462 }                                                 462 }
463 #endif /* CONFIG_MEMCG */                         463 #endif /* CONFIG_MEMCG */
464                                                   464 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php