~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/f2fs/gc.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * fs/f2fs/gc.c
  4  *
  5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  6  *             http://www.samsung.com/
  7  */
  8 #include <linux/fs.h>
  9 #include <linux/module.h>
 10 #include <linux/init.h>
 11 #include <linux/f2fs_fs.h>
 12 #include <linux/kthread.h>
 13 #include <linux/delay.h>
 14 #include <linux/freezer.h>
 15 #include <linux/sched/signal.h>
 16 #include <linux/random.h>
 17 #include <linux/sched/mm.h>
 18 
 19 #include "f2fs.h"
 20 #include "node.h"
 21 #include "segment.h"
 22 #include "gc.h"
 23 #include "iostat.h"
 24 #include <trace/events/f2fs.h>
 25 
 26 static struct kmem_cache *victim_entry_slab;
 27 
 28 static unsigned int count_bits(const unsigned long *addr,
 29                                 unsigned int offset, unsigned int len);
 30 
 31 static int gc_thread_func(void *data)
 32 {
 33         struct f2fs_sb_info *sbi = data;
 34         struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
 35         wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
 36         wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
 37         unsigned int wait_ms;
 38         struct f2fs_gc_control gc_control = {
 39                 .victim_segno = NULL_SEGNO,
 40                 .should_migrate_blocks = false,
 41                 .err_gc_skipped = false };
 42 
 43         wait_ms = gc_th->min_sleep_time;
 44 
 45         set_freezable();
 46         do {
 47                 bool sync_mode, foreground = false;
 48 
 49                 wait_event_freezable_timeout(*wq,
 50                                 kthread_should_stop() ||
 51                                 waitqueue_active(fggc_wq) ||
 52                                 gc_th->gc_wake,
 53                                 msecs_to_jiffies(wait_ms));
 54 
 55                 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
 56                         foreground = true;
 57 
 58                 /* give it a try one time */
 59                 if (gc_th->gc_wake)
 60                         gc_th->gc_wake = false;
 61 
 62                 if (f2fs_readonly(sbi->sb)) {
 63                         stat_other_skip_bggc_count(sbi);
 64                         continue;
 65                 }
 66                 if (kthread_should_stop())
 67                         break;
 68 
 69                 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
 70                         increase_sleep_time(gc_th, &wait_ms);
 71                         stat_other_skip_bggc_count(sbi);
 72                         continue;
 73                 }
 74 
 75                 if (time_to_inject(sbi, FAULT_CHECKPOINT))
 76                         f2fs_stop_checkpoint(sbi, false,
 77                                         STOP_CP_REASON_FAULT_INJECT);
 78 
 79                 if (!sb_start_write_trylock(sbi->sb)) {
 80                         stat_other_skip_bggc_count(sbi);
 81                         continue;
 82                 }
 83 
 84                 gc_control.one_time = false;
 85 
 86                 /*
 87                  * [GC triggering condition]
 88                  * 0. GC is not conducted currently.
 89                  * 1. There are enough dirty segments.
 90                  * 2. IO subsystem is idle by checking the # of writeback pages.
 91                  * 3. IO subsystem is idle by checking the # of requests in
 92                  *    bdev's request list.
 93                  *
 94                  * Note) We have to avoid triggering GCs frequently.
 95                  * Because it is possible that some segments can be
 96                  * invalidated soon after by user update or deletion.
 97                  * So, I'd like to wait some time to collect dirty segments.
 98                  */
 99                 if (sbi->gc_mode == GC_URGENT_HIGH ||
100                                 sbi->gc_mode == GC_URGENT_MID) {
101                         wait_ms = gc_th->urgent_sleep_time;
102                         f2fs_down_write(&sbi->gc_lock);
103                         goto do_gc;
104                 }
105 
106                 if (foreground) {
107                         f2fs_down_write(&sbi->gc_lock);
108                         goto do_gc;
109                 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
110                         stat_other_skip_bggc_count(sbi);
111                         goto next;
112                 }
113 
114                 if (!is_idle(sbi, GC_TIME)) {
115                         increase_sleep_time(gc_th, &wait_ms);
116                         f2fs_up_write(&sbi->gc_lock);
117                         stat_io_skip_bggc_count(sbi);
118                         goto next;
119                 }
120 
121                 if (f2fs_sb_has_blkzoned(sbi)) {
122                         if (has_enough_free_blocks(sbi, LIMIT_NO_ZONED_GC)) {
123                                 wait_ms = gc_th->no_gc_sleep_time;
124                                 f2fs_up_write(&sbi->gc_lock);
125                                 goto next;
126                         }
127                         if (wait_ms == gc_th->no_gc_sleep_time)
128                                 wait_ms = gc_th->max_sleep_time;
129                 }
130 
131                 if (need_to_boost_gc(sbi)) {
132                         decrease_sleep_time(gc_th, &wait_ms);
133                         if (f2fs_sb_has_blkzoned(sbi))
134                                 gc_control.one_time = true;
135                 } else {
136                         increase_sleep_time(gc_th, &wait_ms);
137                 }
138 do_gc:
139                 stat_inc_gc_call_count(sbi, foreground ?
140                                         FOREGROUND : BACKGROUND);
141 
142                 sync_mode = (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) ||
143                                 gc_control.one_time;
144 
145                 /* foreground GC was been triggered via f2fs_balance_fs() */
146                 if (foreground)
147                         sync_mode = false;
148 
149                 gc_control.init_gc_type = sync_mode ? FG_GC : BG_GC;
150                 gc_control.no_bg_gc = foreground;
151                 gc_control.nr_free_secs = foreground ? 1 : 0;
152 
153                 /* if return value is not zero, no victim was selected */
154                 if (f2fs_gc(sbi, &gc_control)) {
155                         /* don't bother wait_ms by foreground gc */
156                         if (!foreground)
157                                 wait_ms = gc_th->no_gc_sleep_time;
158                 } else {
159                         /* reset wait_ms to default sleep time */
160                         if (wait_ms == gc_th->no_gc_sleep_time)
161                                 wait_ms = gc_th->min_sleep_time;
162                 }
163 
164                 if (foreground)
165                         wake_up_all(&gc_th->fggc_wq);
166 
167                 trace_f2fs_background_gc(sbi->sb, wait_ms,
168                                 prefree_segments(sbi), free_segments(sbi));
169 
170                 /* balancing f2fs's metadata periodically */
171                 f2fs_balance_fs_bg(sbi, true);
172 next:
173                 if (sbi->gc_mode != GC_NORMAL) {
174                         spin_lock(&sbi->gc_remaining_trials_lock);
175                         if (sbi->gc_remaining_trials) {
176                                 sbi->gc_remaining_trials--;
177                                 if (!sbi->gc_remaining_trials)
178                                         sbi->gc_mode = GC_NORMAL;
179                         }
180                         spin_unlock(&sbi->gc_remaining_trials_lock);
181                 }
182                 sb_end_write(sbi->sb);
183 
184         } while (!kthread_should_stop());
185         return 0;
186 }
187 
188 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
189 {
190         struct f2fs_gc_kthread *gc_th;
191         dev_t dev = sbi->sb->s_bdev->bd_dev;
192 
193         gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
194         if (!gc_th)
195                 return -ENOMEM;
196 
197         gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
198 
199         if (f2fs_sb_has_blkzoned(sbi)) {
200                 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME_ZONED;
201                 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME_ZONED;
202                 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME_ZONED;
203         } else {
204                 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
205                 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
206                 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
207         }
208 
209         gc_th->gc_wake = false;
210 
211         sbi->gc_thread = gc_th;
212         init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
213         init_waitqueue_head(&sbi->gc_thread->fggc_wq);
214         sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
215                         "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
216         if (IS_ERR(gc_th->f2fs_gc_task)) {
217                 int err = PTR_ERR(gc_th->f2fs_gc_task);
218 
219                 kfree(gc_th);
220                 sbi->gc_thread = NULL;
221                 return err;
222         }
223 
224         return 0;
225 }
226 
227 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
228 {
229         struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
230 
231         if (!gc_th)
232                 return;
233         kthread_stop(gc_th->f2fs_gc_task);
234         wake_up_all(&gc_th->fggc_wq);
235         kfree(gc_th);
236         sbi->gc_thread = NULL;
237 }
238 
239 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
240 {
241         int gc_mode;
242 
243         if (gc_type == BG_GC) {
244                 if (sbi->am.atgc_enabled)
245                         gc_mode = GC_AT;
246                 else
247                         gc_mode = GC_CB;
248         } else {
249                 gc_mode = GC_GREEDY;
250         }
251 
252         switch (sbi->gc_mode) {
253         case GC_IDLE_CB:
254                 gc_mode = GC_CB;
255                 break;
256         case GC_IDLE_GREEDY:
257         case GC_URGENT_HIGH:
258                 gc_mode = GC_GREEDY;
259                 break;
260         case GC_IDLE_AT:
261                 gc_mode = GC_AT;
262                 break;
263         }
264 
265         return gc_mode;
266 }
267 
268 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
269                         int type, struct victim_sel_policy *p)
270 {
271         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
272 
273         if (p->alloc_mode == SSR) {
274                 p->gc_mode = GC_GREEDY;
275                 p->dirty_bitmap = dirty_i->dirty_segmap[type];
276                 p->max_search = dirty_i->nr_dirty[type];
277                 p->ofs_unit = 1;
278         } else if (p->alloc_mode == AT_SSR) {
279                 p->gc_mode = GC_GREEDY;
280                 p->dirty_bitmap = dirty_i->dirty_segmap[type];
281                 p->max_search = dirty_i->nr_dirty[type];
282                 p->ofs_unit = 1;
283         } else {
284                 p->gc_mode = select_gc_type(sbi, gc_type);
285                 p->ofs_unit = SEGS_PER_SEC(sbi);
286                 if (__is_large_section(sbi)) {
287                         p->dirty_bitmap = dirty_i->dirty_secmap;
288                         p->max_search = count_bits(p->dirty_bitmap,
289                                                 0, MAIN_SECS(sbi));
290                 } else {
291                         p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
292                         p->max_search = dirty_i->nr_dirty[DIRTY];
293                 }
294         }
295 
296         /*
297          * adjust candidates range, should select all dirty segments for
298          * foreground GC and urgent GC cases.
299          */
300         if (gc_type != FG_GC &&
301                         (sbi->gc_mode != GC_URGENT_HIGH) &&
302                         (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
303                         p->max_search > sbi->max_victim_search)
304                 p->max_search = sbi->max_victim_search;
305 
306         /* let's select beginning hot/small space first. */
307         if (f2fs_need_rand_seg(sbi))
308                 p->offset = get_random_u32_below(MAIN_SECS(sbi) *
309                                                 SEGS_PER_SEC(sbi));
310         else if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
311                 p->offset = 0;
312         else
313                 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
314 }
315 
316 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
317                                 struct victim_sel_policy *p)
318 {
319         /* SSR allocates in a segment unit */
320         if (p->alloc_mode == SSR)
321                 return BLKS_PER_SEG(sbi);
322         else if (p->alloc_mode == AT_SSR)
323                 return UINT_MAX;
324 
325         /* LFS */
326         if (p->gc_mode == GC_GREEDY)
327                 return SEGS_TO_BLKS(sbi, 2 * p->ofs_unit);
328         else if (p->gc_mode == GC_CB)
329                 return UINT_MAX;
330         else if (p->gc_mode == GC_AT)
331                 return UINT_MAX;
332         else /* No other gc_mode */
333                 return 0;
334 }
335 
336 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
337 {
338         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
339         unsigned int secno;
340 
341         /*
342          * If the gc_type is FG_GC, we can select victim segments
343          * selected by background GC before.
344          * Those segments guarantee they have small valid blocks.
345          */
346         for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
347                 if (sec_usage_check(sbi, secno))
348                         continue;
349                 clear_bit(secno, dirty_i->victim_secmap);
350                 return GET_SEG_FROM_SEC(sbi, secno);
351         }
352         return NULL_SEGNO;
353 }
354 
355 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
356 {
357         struct sit_info *sit_i = SIT_I(sbi);
358         unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
359         unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
360         unsigned long long mtime = 0;
361         unsigned int vblocks;
362         unsigned char age = 0;
363         unsigned char u;
364         unsigned int i;
365         unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
366 
367         for (i = 0; i < usable_segs_per_sec; i++)
368                 mtime += get_seg_entry(sbi, start + i)->mtime;
369         vblocks = get_valid_blocks(sbi, segno, true);
370 
371         mtime = div_u64(mtime, usable_segs_per_sec);
372         vblocks = div_u64(vblocks, usable_segs_per_sec);
373 
374         u = BLKS_TO_SEGS(sbi, vblocks * 100);
375 
376         /* Handle if the system time has changed by the user */
377         if (mtime < sit_i->min_mtime)
378                 sit_i->min_mtime = mtime;
379         if (mtime > sit_i->max_mtime)
380                 sit_i->max_mtime = mtime;
381         if (sit_i->max_mtime != sit_i->min_mtime)
382                 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
383                                 sit_i->max_mtime - sit_i->min_mtime);
384 
385         return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
386 }
387 
388 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
389                         unsigned int segno, struct victim_sel_policy *p)
390 {
391         if (p->alloc_mode == SSR)
392                 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
393 
394         /* alloc_mode == LFS */
395         if (p->gc_mode == GC_GREEDY)
396                 return get_valid_blocks(sbi, segno, true);
397         else if (p->gc_mode == GC_CB)
398                 return get_cb_cost(sbi, segno);
399 
400         f2fs_bug_on(sbi, 1);
401         return 0;
402 }
403 
404 static unsigned int count_bits(const unsigned long *addr,
405                                 unsigned int offset, unsigned int len)
406 {
407         unsigned int end = offset + len, sum = 0;
408 
409         while (offset < end) {
410                 if (test_bit(offset++, addr))
411                         ++sum;
412         }
413         return sum;
414 }
415 
416 static bool f2fs_check_victim_tree(struct f2fs_sb_info *sbi,
417                                 struct rb_root_cached *root)
418 {
419 #ifdef CONFIG_F2FS_CHECK_FS
420         struct rb_node *cur = rb_first_cached(root), *next;
421         struct victim_entry *cur_ve, *next_ve;
422 
423         while (cur) {
424                 next = rb_next(cur);
425                 if (!next)
426                         return true;
427 
428                 cur_ve = rb_entry(cur, struct victim_entry, rb_node);
429                 next_ve = rb_entry(next, struct victim_entry, rb_node);
430 
431                 if (cur_ve->mtime > next_ve->mtime) {
432                         f2fs_info(sbi, "broken victim_rbtree, "
433                                 "cur_mtime(%llu) next_mtime(%llu)",
434                                 cur_ve->mtime, next_ve->mtime);
435                         return false;
436                 }
437                 cur = next;
438         }
439 #endif
440         return true;
441 }
442 
443 static struct victim_entry *__lookup_victim_entry(struct f2fs_sb_info *sbi,
444                                         unsigned long long mtime)
445 {
446         struct atgc_management *am = &sbi->am;
447         struct rb_node *node = am->root.rb_root.rb_node;
448         struct victim_entry *ve = NULL;
449 
450         while (node) {
451                 ve = rb_entry(node, struct victim_entry, rb_node);
452 
453                 if (mtime < ve->mtime)
454                         node = node->rb_left;
455                 else
456                         node = node->rb_right;
457         }
458         return ve;
459 }
460 
461 static struct victim_entry *__create_victim_entry(struct f2fs_sb_info *sbi,
462                 unsigned long long mtime, unsigned int segno)
463 {
464         struct atgc_management *am = &sbi->am;
465         struct victim_entry *ve;
466 
467         ve =  f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS, true, NULL);
468 
469         ve->mtime = mtime;
470         ve->segno = segno;
471 
472         list_add_tail(&ve->list, &am->victim_list);
473         am->victim_count++;
474 
475         return ve;
476 }
477 
478 static void __insert_victim_entry(struct f2fs_sb_info *sbi,
479                                 unsigned long long mtime, unsigned int segno)
480 {
481         struct atgc_management *am = &sbi->am;
482         struct rb_root_cached *root = &am->root;
483         struct rb_node **p = &root->rb_root.rb_node;
484         struct rb_node *parent = NULL;
485         struct victim_entry *ve;
486         bool left_most = true;
487 
488         /* look up rb tree to find parent node */
489         while (*p) {
490                 parent = *p;
491                 ve = rb_entry(parent, struct victim_entry, rb_node);
492 
493                 if (mtime < ve->mtime) {
494                         p = &(*p)->rb_left;
495                 } else {
496                         p = &(*p)->rb_right;
497                         left_most = false;
498                 }
499         }
500 
501         ve = __create_victim_entry(sbi, mtime, segno);
502 
503         rb_link_node(&ve->rb_node, parent, p);
504         rb_insert_color_cached(&ve->rb_node, root, left_most);
505 }
506 
507 static void add_victim_entry(struct f2fs_sb_info *sbi,
508                                 struct victim_sel_policy *p, unsigned int segno)
509 {
510         struct sit_info *sit_i = SIT_I(sbi);
511         unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
512         unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
513         unsigned long long mtime = 0;
514         unsigned int i;
515 
516         if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
517                 if (p->gc_mode == GC_AT &&
518                         get_valid_blocks(sbi, segno, true) == 0)
519                         return;
520         }
521 
522         for (i = 0; i < SEGS_PER_SEC(sbi); i++)
523                 mtime += get_seg_entry(sbi, start + i)->mtime;
524         mtime = div_u64(mtime, SEGS_PER_SEC(sbi));
525 
526         /* Handle if the system time has changed by the user */
527         if (mtime < sit_i->min_mtime)
528                 sit_i->min_mtime = mtime;
529         if (mtime > sit_i->max_mtime)
530                 sit_i->max_mtime = mtime;
531         if (mtime < sit_i->dirty_min_mtime)
532                 sit_i->dirty_min_mtime = mtime;
533         if (mtime > sit_i->dirty_max_mtime)
534                 sit_i->dirty_max_mtime = mtime;
535 
536         /* don't choose young section as candidate */
537         if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
538                 return;
539 
540         __insert_victim_entry(sbi, mtime, segno);
541 }
542 
543 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
544                                                 struct victim_sel_policy *p)
545 {
546         struct sit_info *sit_i = SIT_I(sbi);
547         struct atgc_management *am = &sbi->am;
548         struct rb_root_cached *root = &am->root;
549         struct rb_node *node;
550         struct victim_entry *ve;
551         unsigned long long total_time;
552         unsigned long long age, u, accu;
553         unsigned long long max_mtime = sit_i->dirty_max_mtime;
554         unsigned long long min_mtime = sit_i->dirty_min_mtime;
555         unsigned int sec_blocks = CAP_BLKS_PER_SEC(sbi);
556         unsigned int vblocks;
557         unsigned int dirty_threshold = max(am->max_candidate_count,
558                                         am->candidate_ratio *
559                                         am->victim_count / 100);
560         unsigned int age_weight = am->age_weight;
561         unsigned int cost;
562         unsigned int iter = 0;
563 
564         if (max_mtime < min_mtime)
565                 return;
566 
567         max_mtime += 1;
568         total_time = max_mtime - min_mtime;
569 
570         accu = div64_u64(ULLONG_MAX, total_time);
571         accu = min_t(unsigned long long, div_u64(accu, 100),
572                                         DEFAULT_ACCURACY_CLASS);
573 
574         node = rb_first_cached(root);
575 next:
576         ve = rb_entry_safe(node, struct victim_entry, rb_node);
577         if (!ve)
578                 return;
579 
580         if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
581                 goto skip;
582 
583         /* age = 10000 * x% * 60 */
584         age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
585                                                                 age_weight;
586 
587         vblocks = get_valid_blocks(sbi, ve->segno, true);
588         f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
589 
590         /* u = 10000 * x% * 40 */
591         u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
592                                                         (100 - age_weight);
593 
594         f2fs_bug_on(sbi, age + u >= UINT_MAX);
595 
596         cost = UINT_MAX - (age + u);
597         iter++;
598 
599         if (cost < p->min_cost ||
600                         (cost == p->min_cost && age > p->oldest_age)) {
601                 p->min_cost = cost;
602                 p->oldest_age = age;
603                 p->min_segno = ve->segno;
604         }
605 skip:
606         if (iter < dirty_threshold) {
607                 node = rb_next(node);
608                 goto next;
609         }
610 }
611 
612 /*
613  * select candidates around source section in range of
614  * [target - dirty_threshold, target + dirty_threshold]
615  */
616 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
617                                                 struct victim_sel_policy *p)
618 {
619         struct sit_info *sit_i = SIT_I(sbi);
620         struct atgc_management *am = &sbi->am;
621         struct victim_entry *ve;
622         unsigned long long age;
623         unsigned long long max_mtime = sit_i->dirty_max_mtime;
624         unsigned long long min_mtime = sit_i->dirty_min_mtime;
625         unsigned int vblocks;
626         unsigned int dirty_threshold = max(am->max_candidate_count,
627                                         am->candidate_ratio *
628                                         am->victim_count / 100);
629         unsigned int cost, iter;
630         int stage = 0;
631 
632         if (max_mtime < min_mtime)
633                 return;
634         max_mtime += 1;
635 next_stage:
636         iter = 0;
637         ve = __lookup_victim_entry(sbi, p->age);
638 next_node:
639         if (!ve) {
640                 if (stage++ == 0)
641                         goto next_stage;
642                 return;
643         }
644 
645         if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
646                 goto skip_node;
647 
648         age = max_mtime - ve->mtime;
649 
650         vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
651         f2fs_bug_on(sbi, !vblocks);
652 
653         /* rare case */
654         if (vblocks == BLKS_PER_SEG(sbi))
655                 goto skip_node;
656 
657         iter++;
658 
659         age = max_mtime - abs(p->age - age);
660         cost = UINT_MAX - vblocks;
661 
662         if (cost < p->min_cost ||
663                         (cost == p->min_cost && age > p->oldest_age)) {
664                 p->min_cost = cost;
665                 p->oldest_age = age;
666                 p->min_segno = ve->segno;
667         }
668 skip_node:
669         if (iter < dirty_threshold) {
670                 ve = rb_entry(stage == 0 ? rb_prev(&ve->rb_node) :
671                                         rb_next(&ve->rb_node),
672                                         struct victim_entry, rb_node);
673                 goto next_node;
674         }
675 
676         if (stage++ == 0)
677                 goto next_stage;
678 }
679 
680 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
681                                                 struct victim_sel_policy *p)
682 {
683         f2fs_bug_on(sbi, !f2fs_check_victim_tree(sbi, &sbi->am.root));
684 
685         if (p->gc_mode == GC_AT)
686                 atgc_lookup_victim(sbi, p);
687         else if (p->alloc_mode == AT_SSR)
688                 atssr_lookup_victim(sbi, p);
689         else
690                 f2fs_bug_on(sbi, 1);
691 }
692 
693 static void release_victim_entry(struct f2fs_sb_info *sbi)
694 {
695         struct atgc_management *am = &sbi->am;
696         struct victim_entry *ve, *tmp;
697 
698         list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
699                 list_del(&ve->list);
700                 kmem_cache_free(victim_entry_slab, ve);
701                 am->victim_count--;
702         }
703 
704         am->root = RB_ROOT_CACHED;
705 
706         f2fs_bug_on(sbi, am->victim_count);
707         f2fs_bug_on(sbi, !list_empty(&am->victim_list));
708 }
709 
710 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
711 {
712         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
713         unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
714 
715         if (!dirty_i->enable_pin_section)
716                 return false;
717         if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
718                 dirty_i->pinned_secmap_cnt++;
719         return true;
720 }
721 
722 static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
723 {
724         return dirty_i->pinned_secmap_cnt;
725 }
726 
727 static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
728                                                 unsigned int secno)
729 {
730         return dirty_i->enable_pin_section &&
731                 f2fs_pinned_section_exists(dirty_i) &&
732                 test_bit(secno, dirty_i->pinned_secmap);
733 }
734 
735 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
736 {
737         unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
738 
739         if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
740                 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
741                 DIRTY_I(sbi)->pinned_secmap_cnt = 0;
742         }
743         DIRTY_I(sbi)->enable_pin_section = enable;
744 }
745 
746 static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
747                                                         unsigned int segno)
748 {
749         if (!f2fs_is_pinned_file(inode))
750                 return 0;
751         if (gc_type != FG_GC)
752                 return -EBUSY;
753         if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
754                 f2fs_pin_file_control(inode, true);
755         return -EAGAIN;
756 }
757 
758 /*
759  * This function is called from two paths.
760  * One is garbage collection and the other is SSR segment selection.
761  * When it is called during GC, it just gets a victim segment
762  * and it does not remove it from dirty seglist.
763  * When it is called from SSR segment selection, it finds a segment
764  * which has minimum valid blocks and removes it from dirty seglist.
765  */
766 int f2fs_get_victim(struct f2fs_sb_info *sbi, unsigned int *result,
767                         int gc_type, int type, char alloc_mode,
768                         unsigned long long age)
769 {
770         struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
771         struct sit_info *sm = SIT_I(sbi);
772         struct victim_sel_policy p;
773         unsigned int secno, last_victim;
774         unsigned int last_segment;
775         unsigned int nsearched;
776         bool is_atgc;
777         int ret = 0;
778 
779         mutex_lock(&dirty_i->seglist_lock);
780         last_segment = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
781 
782         p.alloc_mode = alloc_mode;
783         p.age = age;
784         p.age_threshold = sbi->am.age_threshold;
785 
786 retry:
787         select_policy(sbi, gc_type, type, &p);
788         p.min_segno = NULL_SEGNO;
789         p.oldest_age = 0;
790         p.min_cost = get_max_cost(sbi, &p);
791 
792         is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
793         nsearched = 0;
794 
795         if (is_atgc)
796                 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
797 
798         if (*result != NULL_SEGNO) {
799                 if (!get_valid_blocks(sbi, *result, false)) {
800                         ret = -ENODATA;
801                         goto out;
802                 }
803 
804                 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
805                         ret = -EBUSY;
806                 else
807                         p.min_segno = *result;
808                 goto out;
809         }
810 
811         ret = -ENODATA;
812         if (p.max_search == 0)
813                 goto out;
814 
815         if (__is_large_section(sbi) && p.alloc_mode == LFS) {
816                 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
817                         p.min_segno = sbi->next_victim_seg[BG_GC];
818                         *result = p.min_segno;
819                         sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
820                         goto got_result;
821                 }
822                 if (gc_type == FG_GC &&
823                                 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
824                         p.min_segno = sbi->next_victim_seg[FG_GC];
825                         *result = p.min_segno;
826                         sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
827                         goto got_result;
828                 }
829         }
830 
831         last_victim = sm->last_victim[p.gc_mode];
832         if (p.alloc_mode == LFS && gc_type == FG_GC) {
833                 p.min_segno = check_bg_victims(sbi);
834                 if (p.min_segno != NULL_SEGNO)
835                         goto got_it;
836         }
837 
838         while (1) {
839                 unsigned long cost, *dirty_bitmap;
840                 unsigned int unit_no, segno;
841 
842                 dirty_bitmap = p.dirty_bitmap;
843                 unit_no = find_next_bit(dirty_bitmap,
844                                 last_segment / p.ofs_unit,
845                                 p.offset / p.ofs_unit);
846                 segno = unit_no * p.ofs_unit;
847                 if (segno >= last_segment) {
848                         if (sm->last_victim[p.gc_mode]) {
849                                 last_segment =
850                                         sm->last_victim[p.gc_mode];
851                                 sm->last_victim[p.gc_mode] = 0;
852                                 p.offset = 0;
853                                 continue;
854                         }
855                         break;
856                 }
857 
858                 p.offset = segno + p.ofs_unit;
859                 nsearched++;
860 
861 #ifdef CONFIG_F2FS_CHECK_FS
862                 /*
863                  * skip selecting the invalid segno (that is failed due to block
864                  * validity check failure during GC) to avoid endless GC loop in
865                  * such cases.
866                  */
867                 if (test_bit(segno, sm->invalid_segmap))
868                         goto next;
869 #endif
870 
871                 secno = GET_SEC_FROM_SEG(sbi, segno);
872 
873                 if (sec_usage_check(sbi, secno))
874                         goto next;
875 
876                 /* Don't touch checkpointed data */
877                 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
878                         if (p.alloc_mode == LFS) {
879                                 /*
880                                  * LFS is set to find source section during GC.
881                                  * The victim should have no checkpointed data.
882                                  */
883                                 if (get_ckpt_valid_blocks(sbi, segno, true))
884                                         goto next;
885                         } else {
886                                 /*
887                                  * SSR | AT_SSR are set to find target segment
888                                  * for writes which can be full by checkpointed
889                                  * and newly written blocks.
890                                  */
891                                 if (!f2fs_segment_has_free_slot(sbi, segno))
892                                         goto next;
893                         }
894                 }
895 
896                 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
897                         goto next;
898 
899                 if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
900                         goto next;
901 
902                 if (is_atgc) {
903                         add_victim_entry(sbi, &p, segno);
904                         goto next;
905                 }
906 
907                 cost = get_gc_cost(sbi, segno, &p);
908 
909                 if (p.min_cost > cost) {
910                         p.min_segno = segno;
911                         p.min_cost = cost;
912                 }
913 next:
914                 if (nsearched >= p.max_search) {
915                         if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
916                                 sm->last_victim[p.gc_mode] =
917                                         last_victim + p.ofs_unit;
918                         else
919                                 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
920                         sm->last_victim[p.gc_mode] %=
921                                 (MAIN_SECS(sbi) * SEGS_PER_SEC(sbi));
922                         break;
923                 }
924         }
925 
926         /* get victim for GC_AT/AT_SSR */
927         if (is_atgc) {
928                 lookup_victim_by_age(sbi, &p);
929                 release_victim_entry(sbi);
930         }
931 
932         if (is_atgc && p.min_segno == NULL_SEGNO &&
933                         sm->elapsed_time < p.age_threshold) {
934                 p.age_threshold = 0;
935                 goto retry;
936         }
937 
938         if (p.min_segno != NULL_SEGNO) {
939 got_it:
940                 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
941 got_result:
942                 if (p.alloc_mode == LFS) {
943                         secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
944                         if (gc_type == FG_GC)
945                                 sbi->cur_victim_sec = secno;
946                         else
947                                 set_bit(secno, dirty_i->victim_secmap);
948                 }
949                 ret = 0;
950 
951         }
952 out:
953         if (p.min_segno != NULL_SEGNO)
954                 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
955                                 sbi->cur_victim_sec,
956                                 prefree_segments(sbi), free_segments(sbi));
957         mutex_unlock(&dirty_i->seglist_lock);
958 
959         return ret;
960 }
961 
962 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
963 {
964         struct inode_entry *ie;
965 
966         ie = radix_tree_lookup(&gc_list->iroot, ino);
967         if (ie)
968                 return ie->inode;
969         return NULL;
970 }
971 
972 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
973 {
974         struct inode_entry *new_ie;
975 
976         if (inode == find_gc_inode(gc_list, inode->i_ino)) {
977                 iput(inode);
978                 return;
979         }
980         new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
981                                         GFP_NOFS, true, NULL);
982         new_ie->inode = inode;
983 
984         f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
985         list_add_tail(&new_ie->list, &gc_list->ilist);
986 }
987 
988 static void put_gc_inode(struct gc_inode_list *gc_list)
989 {
990         struct inode_entry *ie, *next_ie;
991 
992         list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
993                 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
994                 iput(ie->inode);
995                 list_del(&ie->list);
996                 kmem_cache_free(f2fs_inode_entry_slab, ie);
997         }
998 }
999 
1000 static int check_valid_map(struct f2fs_sb_info *sbi,
1001                                 unsigned int segno, int offset)
1002 {
1003         struct sit_info *sit_i = SIT_I(sbi);
1004         struct seg_entry *sentry;
1005         int ret;
1006 
1007         down_read(&sit_i->sentry_lock);
1008         sentry = get_seg_entry(sbi, segno);
1009         ret = f2fs_test_bit(offset, sentry->cur_valid_map);
1010         up_read(&sit_i->sentry_lock);
1011         return ret;
1012 }
1013 
1014 /*
1015  * This function compares node address got in summary with that in NAT.
1016  * On validity, copy that node with cold status, otherwise (invalid node)
1017  * ignore that.
1018  */
1019 static int gc_node_segment(struct f2fs_sb_info *sbi,
1020                 struct f2fs_summary *sum, unsigned int segno, int gc_type)
1021 {
1022         struct f2fs_summary *entry;
1023         block_t start_addr;
1024         int off;
1025         int phase = 0;
1026         bool fggc = (gc_type == FG_GC);
1027         int submitted = 0;
1028         unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1029 
1030         start_addr = START_BLOCK(sbi, segno);
1031 
1032 next_step:
1033         entry = sum;
1034 
1035         if (fggc && phase == 2)
1036                 atomic_inc(&sbi->wb_sync_req[NODE]);
1037 
1038         for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1039                 nid_t nid = le32_to_cpu(entry->nid);
1040                 struct page *node_page;
1041                 struct node_info ni;
1042                 int err;
1043 
1044                 /* stop BG_GC if there is not enough free sections. */
1045                 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
1046                         return submitted;
1047 
1048                 if (check_valid_map(sbi, segno, off) == 0)
1049                         continue;
1050 
1051                 if (phase == 0) {
1052                         f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1053                                                         META_NAT, true);
1054                         continue;
1055                 }
1056 
1057                 if (phase == 1) {
1058                         f2fs_ra_node_page(sbi, nid);
1059                         continue;
1060                 }
1061 
1062                 /* phase == 2 */
1063                 node_page = f2fs_get_node_page(sbi, nid);
1064                 if (IS_ERR(node_page))
1065                         continue;
1066 
1067                 /* block may become invalid during f2fs_get_node_page */
1068                 if (check_valid_map(sbi, segno, off) == 0) {
1069                         f2fs_put_page(node_page, 1);
1070                         continue;
1071                 }
1072 
1073                 if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1074                         f2fs_put_page(node_page, 1);
1075                         continue;
1076                 }
1077 
1078                 if (ni.blk_addr != start_addr + off) {
1079                         f2fs_put_page(node_page, 1);
1080                         continue;
1081                 }
1082 
1083                 err = f2fs_move_node_page(node_page, gc_type);
1084                 if (!err && gc_type == FG_GC)
1085                         submitted++;
1086                 stat_inc_node_blk_count(sbi, 1, gc_type);
1087         }
1088 
1089         if (++phase < 3)
1090                 goto next_step;
1091 
1092         if (fggc)
1093                 atomic_dec(&sbi->wb_sync_req[NODE]);
1094         return submitted;
1095 }
1096 
1097 /*
1098  * Calculate start block index indicating the given node offset.
1099  * Be careful, caller should give this node offset only indicating direct node
1100  * blocks. If any node offsets, which point the other types of node blocks such
1101  * as indirect or double indirect node blocks, are given, it must be a caller's
1102  * bug.
1103  */
1104 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1105 {
1106         unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1107         unsigned int bidx;
1108 
1109         if (node_ofs == 0)
1110                 return 0;
1111 
1112         if (node_ofs <= 2) {
1113                 bidx = node_ofs - 1;
1114         } else if (node_ofs <= indirect_blks) {
1115                 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1116 
1117                 bidx = node_ofs - 2 - dec;
1118         } else {
1119                 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1120 
1121                 bidx = node_ofs - 5 - dec;
1122         }
1123         return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1124 }
1125 
1126 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1127                 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1128 {
1129         struct page *node_page;
1130         nid_t nid;
1131         unsigned int ofs_in_node, max_addrs, base;
1132         block_t source_blkaddr;
1133 
1134         nid = le32_to_cpu(sum->nid);
1135         ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1136 
1137         node_page = f2fs_get_node_page(sbi, nid);
1138         if (IS_ERR(node_page))
1139                 return false;
1140 
1141         if (f2fs_get_node_info(sbi, nid, dni, false)) {
1142                 f2fs_put_page(node_page, 1);
1143                 return false;
1144         }
1145 
1146         if (sum->version != dni->version) {
1147                 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1148                           __func__);
1149                 set_sbi_flag(sbi, SBI_NEED_FSCK);
1150         }
1151 
1152         if (f2fs_check_nid_range(sbi, dni->ino)) {
1153                 f2fs_put_page(node_page, 1);
1154                 return false;
1155         }
1156 
1157         if (IS_INODE(node_page)) {
1158                 base = offset_in_addr(F2FS_INODE(node_page));
1159                 max_addrs = DEF_ADDRS_PER_INODE;
1160         } else {
1161                 base = 0;
1162                 max_addrs = DEF_ADDRS_PER_BLOCK;
1163         }
1164 
1165         if (base + ofs_in_node >= max_addrs) {
1166                 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1167                         base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1168                 f2fs_put_page(node_page, 1);
1169                 return false;
1170         }
1171 
1172         *nofs = ofs_of_node(node_page);
1173         source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1174         f2fs_put_page(node_page, 1);
1175 
1176         if (source_blkaddr != blkaddr) {
1177 #ifdef CONFIG_F2FS_CHECK_FS
1178                 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1179                 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1180 
1181                 if (unlikely(check_valid_map(sbi, segno, offset))) {
1182                         if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1183                                 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1184                                          blkaddr, source_blkaddr, segno);
1185                                 set_sbi_flag(sbi, SBI_NEED_FSCK);
1186                         }
1187                 }
1188 #endif
1189                 return false;
1190         }
1191         return true;
1192 }
1193 
1194 static int ra_data_block(struct inode *inode, pgoff_t index)
1195 {
1196         struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1197         struct address_space *mapping = f2fs_is_cow_file(inode) ?
1198                                 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
1199         struct dnode_of_data dn;
1200         struct page *page;
1201         struct f2fs_io_info fio = {
1202                 .sbi = sbi,
1203                 .ino = inode->i_ino,
1204                 .type = DATA,
1205                 .temp = COLD,
1206                 .op = REQ_OP_READ,
1207                 .op_flags = 0,
1208                 .encrypted_page = NULL,
1209                 .in_list = 0,
1210         };
1211         int err;
1212 
1213         page = f2fs_grab_cache_page(mapping, index, true);
1214         if (!page)
1215                 return -ENOMEM;
1216 
1217         if (f2fs_lookup_read_extent_cache_block(inode, index,
1218                                                 &dn.data_blkaddr)) {
1219                 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1220                                                 DATA_GENERIC_ENHANCE_READ))) {
1221                         err = -EFSCORRUPTED;
1222                         goto put_page;
1223                 }
1224                 goto got_it;
1225         }
1226 
1227         set_new_dnode(&dn, inode, NULL, NULL, 0);
1228         err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1229         if (err)
1230                 goto put_page;
1231         f2fs_put_dnode(&dn);
1232 
1233         if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1234                 err = -ENOENT;
1235                 goto put_page;
1236         }
1237         if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1238                                                 DATA_GENERIC_ENHANCE))) {
1239                 err = -EFSCORRUPTED;
1240                 goto put_page;
1241         }
1242 got_it:
1243         /* read page */
1244         fio.page = page;
1245         fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1246 
1247         /*
1248          * don't cache encrypted data into meta inode until previous dirty
1249          * data were writebacked to avoid racing between GC and flush.
1250          */
1251         f2fs_wait_on_page_writeback(page, DATA, true, true);
1252 
1253         f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1254 
1255         fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1256                                         dn.data_blkaddr,
1257                                         FGP_LOCK | FGP_CREAT, GFP_NOFS);
1258         if (!fio.encrypted_page) {
1259                 err = -ENOMEM;
1260                 goto put_page;
1261         }
1262 
1263         err = f2fs_submit_page_bio(&fio);
1264         if (err)
1265                 goto put_encrypted_page;
1266         f2fs_put_page(fio.encrypted_page, 0);
1267         f2fs_put_page(page, 1);
1268 
1269         f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
1270         f2fs_update_iostat(sbi, NULL, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1271 
1272         return 0;
1273 put_encrypted_page:
1274         f2fs_put_page(fio.encrypted_page, 1);
1275 put_page:
1276         f2fs_put_page(page, 1);
1277         return err;
1278 }
1279 
1280 /*
1281  * Move data block via META_MAPPING while keeping locked data page.
1282  * This can be used to move blocks, aka LBAs, directly on disk.
1283  */
1284 static int move_data_block(struct inode *inode, block_t bidx,
1285                                 int gc_type, unsigned int segno, int off)
1286 {
1287         struct address_space *mapping = f2fs_is_cow_file(inode) ?
1288                                 F2FS_I(inode)->atomic_inode->i_mapping : inode->i_mapping;
1289         struct f2fs_io_info fio = {
1290                 .sbi = F2FS_I_SB(inode),
1291                 .ino = inode->i_ino,
1292                 .type = DATA,
1293                 .temp = COLD,
1294                 .op = REQ_OP_READ,
1295                 .op_flags = 0,
1296                 .encrypted_page = NULL,
1297                 .in_list = 0,
1298         };
1299         struct dnode_of_data dn;
1300         struct f2fs_summary sum;
1301         struct node_info ni;
1302         struct page *page, *mpage;
1303         block_t newaddr;
1304         int err = 0;
1305         bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1306         int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1307                                 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1308                                 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1309 
1310         /* do not read out */
1311         page = f2fs_grab_cache_page(mapping, bidx, false);
1312         if (!page)
1313                 return -ENOMEM;
1314 
1315         if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1316                 err = -ENOENT;
1317                 goto out;
1318         }
1319 
1320         err = f2fs_gc_pinned_control(inode, gc_type, segno);
1321         if (err)
1322                 goto out;
1323 
1324         set_new_dnode(&dn, inode, NULL, NULL, 0);
1325         err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1326         if (err)
1327                 goto out;
1328 
1329         if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1330                 ClearPageUptodate(page);
1331                 err = -ENOENT;
1332                 goto put_out;
1333         }
1334 
1335         /*
1336          * don't cache encrypted data into meta inode until previous dirty
1337          * data were writebacked to avoid racing between GC and flush.
1338          */
1339         f2fs_wait_on_page_writeback(page, DATA, true, true);
1340 
1341         f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1342 
1343         err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1344         if (err)
1345                 goto put_out;
1346 
1347         /* read page */
1348         fio.page = page;
1349         fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1350 
1351         if (lfs_mode)
1352                 f2fs_down_write(&fio.sbi->io_order_lock);
1353 
1354         mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1355                                         fio.old_blkaddr, false);
1356         if (!mpage) {
1357                 err = -ENOMEM;
1358                 goto up_out;
1359         }
1360 
1361         fio.encrypted_page = mpage;
1362 
1363         /* read source block in mpage */
1364         if (!PageUptodate(mpage)) {
1365                 err = f2fs_submit_page_bio(&fio);
1366                 if (err) {
1367                         f2fs_put_page(mpage, 1);
1368                         goto up_out;
1369                 }
1370 
1371                 f2fs_update_iostat(fio.sbi, inode, FS_DATA_READ_IO,
1372                                                         F2FS_BLKSIZE);
1373                 f2fs_update_iostat(fio.sbi, NULL, FS_GDATA_READ_IO,
1374                                                         F2FS_BLKSIZE);
1375 
1376                 lock_page(mpage);
1377                 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1378                                                 !PageUptodate(mpage))) {
1379                         err = -EIO;
1380                         f2fs_put_page(mpage, 1);
1381                         goto up_out;
1382                 }
1383         }
1384 
1385         set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1386 
1387         /* allocate block address */
1388         err = f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1389                                 &sum, type, NULL);
1390         if (err) {
1391                 f2fs_put_page(mpage, 1);
1392                 /* filesystem should shutdown, no need to recovery block */
1393                 goto up_out;
1394         }
1395 
1396         fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1397                                 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1398         if (!fio.encrypted_page) {
1399                 err = -ENOMEM;
1400                 f2fs_put_page(mpage, 1);
1401                 goto recover_block;
1402         }
1403 
1404         /* write target block */
1405         f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1406         memcpy(page_address(fio.encrypted_page),
1407                                 page_address(mpage), PAGE_SIZE);
1408         f2fs_put_page(mpage, 1);
1409 
1410         f2fs_invalidate_internal_cache(fio.sbi, fio.old_blkaddr);
1411 
1412         set_page_dirty(fio.encrypted_page);
1413         if (clear_page_dirty_for_io(fio.encrypted_page))
1414                 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1415 
1416         set_page_writeback(fio.encrypted_page);
1417 
1418         fio.op = REQ_OP_WRITE;
1419         fio.op_flags = REQ_SYNC;
1420         fio.new_blkaddr = newaddr;
1421         f2fs_submit_page_write(&fio);
1422 
1423         f2fs_update_iostat(fio.sbi, NULL, FS_GC_DATA_IO, F2FS_BLKSIZE);
1424 
1425         f2fs_update_data_blkaddr(&dn, newaddr);
1426         set_inode_flag(inode, FI_APPEND_WRITE);
1427 
1428         f2fs_put_page(fio.encrypted_page, 1);
1429 recover_block:
1430         if (err)
1431                 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1432                                                         true, true, true);
1433 up_out:
1434         if (lfs_mode)
1435                 f2fs_up_write(&fio.sbi->io_order_lock);
1436 put_out:
1437         f2fs_put_dnode(&dn);
1438 out:
1439         f2fs_put_page(page, 1);
1440         return err;
1441 }
1442 
1443 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1444                                                         unsigned int segno, int off)
1445 {
1446         struct page *page;
1447         int err = 0;
1448 
1449         page = f2fs_get_lock_data_page(inode, bidx, true);
1450         if (IS_ERR(page))
1451                 return PTR_ERR(page);
1452 
1453         if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1454                 err = -ENOENT;
1455                 goto out;
1456         }
1457 
1458         err = f2fs_gc_pinned_control(inode, gc_type, segno);
1459         if (err)
1460                 goto out;
1461 
1462         if (gc_type == BG_GC) {
1463                 if (folio_test_writeback(page_folio(page))) {
1464                         err = -EAGAIN;
1465                         goto out;
1466                 }
1467                 set_page_dirty(page);
1468                 set_page_private_gcing(page);
1469         } else {
1470                 struct f2fs_io_info fio = {
1471                         .sbi = F2FS_I_SB(inode),
1472                         .ino = inode->i_ino,
1473                         .type = DATA,
1474                         .temp = COLD,
1475                         .op = REQ_OP_WRITE,
1476                         .op_flags = REQ_SYNC,
1477                         .old_blkaddr = NULL_ADDR,
1478                         .page = page,
1479                         .encrypted_page = NULL,
1480                         .need_lock = LOCK_REQ,
1481                         .io_type = FS_GC_DATA_IO,
1482                 };
1483                 bool is_dirty = PageDirty(page);
1484 
1485 retry:
1486                 f2fs_wait_on_page_writeback(page, DATA, true, true);
1487 
1488                 set_page_dirty(page);
1489                 if (clear_page_dirty_for_io(page)) {
1490                         inode_dec_dirty_pages(inode);
1491                         f2fs_remove_dirty_inode(inode);
1492                 }
1493 
1494                 set_page_private_gcing(page);
1495 
1496                 err = f2fs_do_write_data_page(&fio);
1497                 if (err) {
1498                         clear_page_private_gcing(page);
1499                         if (err == -ENOMEM) {
1500                                 memalloc_retry_wait(GFP_NOFS);
1501                                 goto retry;
1502                         }
1503                         if (is_dirty)
1504                                 set_page_dirty(page);
1505                 }
1506         }
1507 out:
1508         f2fs_put_page(page, 1);
1509         return err;
1510 }
1511 
1512 /*
1513  * This function tries to get parent node of victim data block, and identifies
1514  * data block validity. If the block is valid, copy that with cold status and
1515  * modify parent node.
1516  * If the parent node is not valid or the data block address is different,
1517  * the victim data block is ignored.
1518  */
1519 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1520                 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1521                 bool force_migrate)
1522 {
1523         struct super_block *sb = sbi->sb;
1524         struct f2fs_summary *entry;
1525         block_t start_addr;
1526         int off;
1527         int phase = 0;
1528         int submitted = 0;
1529         unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1530 
1531         start_addr = START_BLOCK(sbi, segno);
1532 
1533 next_step:
1534         entry = sum;
1535 
1536         for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1537                 struct page *data_page;
1538                 struct inode *inode;
1539                 struct node_info dni; /* dnode info for the data */
1540                 unsigned int ofs_in_node, nofs;
1541                 block_t start_bidx;
1542                 nid_t nid = le32_to_cpu(entry->nid);
1543 
1544                 /*
1545                  * stop BG_GC if there is not enough free sections.
1546                  * Or, stop GC if the segment becomes fully valid caused by
1547                  * race condition along with SSR block allocation.
1548                  */
1549                 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1550                         (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1551                                                         CAP_BLKS_PER_SEC(sbi)))
1552                         return submitted;
1553 
1554                 if (check_valid_map(sbi, segno, off) == 0)
1555                         continue;
1556 
1557                 if (phase == 0) {
1558                         f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1559                                                         META_NAT, true);
1560                         continue;
1561                 }
1562 
1563                 if (phase == 1) {
1564                         f2fs_ra_node_page(sbi, nid);
1565                         continue;
1566                 }
1567 
1568                 /* Get an inode by ino with checking validity */
1569                 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1570                         continue;
1571 
1572                 if (phase == 2) {
1573                         f2fs_ra_node_page(sbi, dni.ino);
1574                         continue;
1575                 }
1576 
1577                 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1578 
1579                 if (phase == 3) {
1580                         int err;
1581 
1582                         inode = f2fs_iget(sb, dni.ino);
1583                         if (IS_ERR(inode))
1584                                 continue;
1585 
1586                         if (is_bad_inode(inode) ||
1587                                         special_file(inode->i_mode)) {
1588                                 iput(inode);
1589                                 continue;
1590                         }
1591 
1592                         if (f2fs_has_inline_data(inode)) {
1593                                 iput(inode);
1594                                 set_sbi_flag(sbi, SBI_NEED_FSCK);
1595                                 f2fs_err_ratelimited(sbi,
1596                                         "inode %lx has both inline_data flag and "
1597                                         "data block, nid=%u, ofs_in_node=%u",
1598                                         inode->i_ino, dni.nid, ofs_in_node);
1599                                 continue;
1600                         }
1601 
1602                         err = f2fs_gc_pinned_control(inode, gc_type, segno);
1603                         if (err == -EAGAIN) {
1604                                 iput(inode);
1605                                 return submitted;
1606                         }
1607 
1608                         if (!f2fs_down_write_trylock(
1609                                 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1610                                 iput(inode);
1611                                 sbi->skipped_gc_rwsem++;
1612                                 continue;
1613                         }
1614 
1615                         start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1616                                                                 ofs_in_node;
1617 
1618                         if (f2fs_meta_inode_gc_required(inode)) {
1619                                 int err = ra_data_block(inode, start_bidx);
1620 
1621                                 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1622                                 if (err) {
1623                                         iput(inode);
1624                                         continue;
1625                                 }
1626                                 add_gc_inode(gc_list, inode);
1627                                 continue;
1628                         }
1629 
1630                         data_page = f2fs_get_read_data_page(inode, start_bidx,
1631                                                         REQ_RAHEAD, true, NULL);
1632                         f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1633                         if (IS_ERR(data_page)) {
1634                                 iput(inode);
1635                                 continue;
1636                         }
1637 
1638                         f2fs_put_page(data_page, 0);
1639                         add_gc_inode(gc_list, inode);
1640                         continue;
1641                 }
1642 
1643                 /* phase 4 */
1644                 inode = find_gc_inode(gc_list, dni.ino);
1645                 if (inode) {
1646                         struct f2fs_inode_info *fi = F2FS_I(inode);
1647                         bool locked = false;
1648                         int err;
1649 
1650                         if (S_ISREG(inode->i_mode)) {
1651                                 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[WRITE])) {
1652                                         sbi->skipped_gc_rwsem++;
1653                                         continue;
1654                                 }
1655                                 if (!f2fs_down_write_trylock(
1656                                                 &fi->i_gc_rwsem[READ])) {
1657                                         sbi->skipped_gc_rwsem++;
1658                                         f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1659                                         continue;
1660                                 }
1661                                 locked = true;
1662 
1663                                 /* wait for all inflight aio data */
1664                                 inode_dio_wait(inode);
1665                         }
1666 
1667                         start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1668                                                                 + ofs_in_node;
1669                         if (f2fs_meta_inode_gc_required(inode))
1670                                 err = move_data_block(inode, start_bidx,
1671                                                         gc_type, segno, off);
1672                         else
1673                                 err = move_data_page(inode, start_bidx, gc_type,
1674                                                                 segno, off);
1675 
1676                         if (!err && (gc_type == FG_GC ||
1677                                         f2fs_meta_inode_gc_required(inode)))
1678                                 submitted++;
1679 
1680                         if (locked) {
1681                                 f2fs_up_write(&fi->i_gc_rwsem[READ]);
1682                                 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1683                         }
1684 
1685                         stat_inc_data_blk_count(sbi, 1, gc_type);
1686                 }
1687         }
1688 
1689         if (++phase < 5)
1690                 goto next_step;
1691 
1692         return submitted;
1693 }
1694 
1695 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1696                         int gc_type)
1697 {
1698         struct sit_info *sit_i = SIT_I(sbi);
1699         int ret;
1700 
1701         down_write(&sit_i->sentry_lock);
1702         ret = f2fs_get_victim(sbi, victim, gc_type, NO_CHECK_TYPE, LFS, 0);
1703         up_write(&sit_i->sentry_lock);
1704         return ret;
1705 }
1706 
1707 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1708                                 unsigned int start_segno,
1709                                 struct gc_inode_list *gc_list, int gc_type,
1710                                 bool force_migrate, bool one_time)
1711 {
1712         struct page *sum_page;
1713         struct f2fs_summary_block *sum;
1714         struct blk_plug plug;
1715         unsigned int segno = start_segno;
1716         unsigned int end_segno = start_segno + SEGS_PER_SEC(sbi);
1717         unsigned int sec_end_segno;
1718         int seg_freed = 0, migrated = 0;
1719         unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1720                                                 SUM_TYPE_DATA : SUM_TYPE_NODE;
1721         unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
1722         int submitted = 0;
1723 
1724         if (__is_large_section(sbi)) {
1725                 sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
1726 
1727                 /*
1728                  * zone-capacity can be less than zone-size in zoned devices,
1729                  * resulting in less than expected usable segments in the zone,
1730                  * calculate the end segno in the zone which can be garbage
1731                  * collected
1732                  */
1733                 if (f2fs_sb_has_blkzoned(sbi))
1734                         sec_end_segno -= SEGS_PER_SEC(sbi) -
1735                                         f2fs_usable_segs_in_sec(sbi, segno);
1736 
1737                 if (gc_type == BG_GC || one_time) {
1738                         unsigned int window_granularity =
1739                                 sbi->migration_window_granularity;
1740 
1741                         if (f2fs_sb_has_blkzoned(sbi) &&
1742                                         !has_enough_free_blocks(sbi,
1743                                         LIMIT_BOOST_ZONED_GC))
1744                                 window_granularity *= BOOST_GC_MULTIPLE;
1745 
1746                         end_segno = start_segno + window_granularity;
1747                 }
1748 
1749                 if (end_segno > sec_end_segno)
1750                         end_segno = sec_end_segno;
1751         }
1752 
1753         sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1754 
1755         /* readahead multi ssa blocks those have contiguous address */
1756         if (__is_large_section(sbi))
1757                 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1758                                         end_segno - segno, META_SSA, true);
1759 
1760         /* reference all summary page */
1761         while (segno < end_segno) {
1762                 sum_page = f2fs_get_sum_page(sbi, segno++);
1763                 if (IS_ERR(sum_page)) {
1764                         int err = PTR_ERR(sum_page);
1765 
1766                         end_segno = segno - 1;
1767                         for (segno = start_segno; segno < end_segno; segno++) {
1768                                 sum_page = find_get_page(META_MAPPING(sbi),
1769                                                 GET_SUM_BLOCK(sbi, segno));
1770                                 f2fs_put_page(sum_page, 0);
1771                                 f2fs_put_page(sum_page, 0);
1772                         }
1773                         return err;
1774                 }
1775                 unlock_page(sum_page);
1776         }
1777 
1778         blk_start_plug(&plug);
1779 
1780         for (segno = start_segno; segno < end_segno; segno++) {
1781 
1782                 /* find segment summary of victim */
1783                 sum_page = find_get_page(META_MAPPING(sbi),
1784                                         GET_SUM_BLOCK(sbi, segno));
1785                 f2fs_put_page(sum_page, 0);
1786 
1787                 if (get_valid_blocks(sbi, segno, false) == 0)
1788                         goto freed;
1789                 if (gc_type == BG_GC && __is_large_section(sbi) &&
1790                                 migrated >= sbi->migration_granularity)
1791                         goto skip;
1792                 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1793                         goto skip;
1794 
1795                 sum = page_address(sum_page);
1796                 if (type != GET_SUM_TYPE((&sum->footer))) {
1797                         f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1798                                  segno, type, GET_SUM_TYPE((&sum->footer)));
1799                         f2fs_stop_checkpoint(sbi, false,
1800                                 STOP_CP_REASON_CORRUPTED_SUMMARY);
1801                         goto skip;
1802                 }
1803 
1804                 /*
1805                  * this is to avoid deadlock:
1806                  * - lock_page(sum_page)         - f2fs_replace_block
1807                  *  - check_valid_map()            - down_write(sentry_lock)
1808                  *   - down_read(sentry_lock)     - change_curseg()
1809                  *                                  - lock_page(sum_page)
1810                  */
1811                 if (type == SUM_TYPE_NODE)
1812                         submitted += gc_node_segment(sbi, sum->entries, segno,
1813                                                                 gc_type);
1814                 else
1815                         submitted += gc_data_segment(sbi, sum->entries, gc_list,
1816                                                         segno, gc_type,
1817                                                         force_migrate);
1818 
1819                 stat_inc_gc_seg_count(sbi, data_type, gc_type);
1820                 sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1821                 migrated++;
1822 
1823 freed:
1824                 if (gc_type == FG_GC &&
1825                                 get_valid_blocks(sbi, segno, false) == 0)
1826                         seg_freed++;
1827 
1828                 if (__is_large_section(sbi))
1829                         sbi->next_victim_seg[gc_type] =
1830                                 (segno + 1 < sec_end_segno) ?
1831                                         segno + 1 : NULL_SEGNO;
1832 skip:
1833                 f2fs_put_page(sum_page, 0);
1834         }
1835 
1836         if (submitted)
1837                 f2fs_submit_merged_write(sbi, data_type);
1838 
1839         blk_finish_plug(&plug);
1840 
1841         if (migrated)
1842                 stat_inc_gc_sec_count(sbi, data_type, gc_type);
1843 
1844         return seg_freed;
1845 }
1846 
1847 int f2fs_gc(struct f2fs_sb_info *sbi, struct f2fs_gc_control *gc_control)
1848 {
1849         int gc_type = gc_control->init_gc_type;
1850         unsigned int segno = gc_control->victim_segno;
1851         int sec_freed = 0, seg_freed = 0, total_freed = 0, total_sec_freed = 0;
1852         int ret = 0;
1853         struct cp_control cpc;
1854         struct gc_inode_list gc_list = {
1855                 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1856                 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1857         };
1858         unsigned int skipped_round = 0, round = 0;
1859         unsigned int upper_secs;
1860 
1861         trace_f2fs_gc_begin(sbi->sb, gc_type, gc_control->no_bg_gc,
1862                                 gc_control->nr_free_secs,
1863                                 get_pages(sbi, F2FS_DIRTY_NODES),
1864                                 get_pages(sbi, F2FS_DIRTY_DENTS),
1865                                 get_pages(sbi, F2FS_DIRTY_IMETA),
1866                                 free_sections(sbi),
1867                                 free_segments(sbi),
1868                                 reserved_segments(sbi),
1869                                 prefree_segments(sbi));
1870 
1871         cpc.reason = __get_cp_reason(sbi);
1872 gc_more:
1873         sbi->skipped_gc_rwsem = 0;
1874         if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1875                 ret = -EINVAL;
1876                 goto stop;
1877         }
1878         if (unlikely(f2fs_cp_error(sbi))) {
1879                 ret = -EIO;
1880                 goto stop;
1881         }
1882 
1883         /* Let's run FG_GC, if we don't have enough space. */
1884         if (has_not_enough_free_secs(sbi, 0, 0)) {
1885                 gc_type = FG_GC;
1886 
1887                 /*
1888                  * For example, if there are many prefree_segments below given
1889                  * threshold, we can make them free by checkpoint. Then, we
1890                  * secure free segments which doesn't need fggc any more.
1891                  */
1892                 if (prefree_segments(sbi)) {
1893                         stat_inc_cp_call_count(sbi, TOTAL_CALL);
1894                         ret = f2fs_write_checkpoint(sbi, &cpc);
1895                         if (ret)
1896                                 goto stop;
1897                         /* Reset due to checkpoint */
1898                         sec_freed = 0;
1899                 }
1900         }
1901 
1902         /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1903         if (gc_type == BG_GC && gc_control->no_bg_gc) {
1904                 ret = -EINVAL;
1905                 goto stop;
1906         }
1907 retry:
1908         ret = __get_victim(sbi, &segno, gc_type);
1909         if (ret) {
1910                 /* allow to search victim from sections has pinned data */
1911                 if (ret == -ENODATA && gc_type == FG_GC &&
1912                                 f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1913                         f2fs_unpin_all_sections(sbi, false);
1914                         goto retry;
1915                 }
1916                 goto stop;
1917         }
1918 
1919         seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type,
1920                                 gc_control->should_migrate_blocks,
1921                                 gc_control->one_time);
1922         if (seg_freed < 0)
1923                 goto stop;
1924 
1925         total_freed += seg_freed;
1926 
1927         if (seg_freed == f2fs_usable_segs_in_sec(sbi, segno)) {
1928                 sec_freed++;
1929                 total_sec_freed++;
1930         }
1931 
1932         if (gc_control->one_time)
1933                 goto stop;
1934 
1935         if (gc_type == FG_GC) {
1936                 sbi->cur_victim_sec = NULL_SEGNO;
1937 
1938                 if (has_enough_free_secs(sbi, sec_freed, 0)) {
1939                         if (!gc_control->no_bg_gc &&
1940                             total_sec_freed < gc_control->nr_free_secs)
1941                                 goto go_gc_more;
1942                         goto stop;
1943                 }
1944                 if (sbi->skipped_gc_rwsem)
1945                         skipped_round++;
1946                 round++;
1947                 if (skipped_round > MAX_SKIP_GC_COUNT &&
1948                                 skipped_round * 2 >= round) {
1949                         stat_inc_cp_call_count(sbi, TOTAL_CALL);
1950                         ret = f2fs_write_checkpoint(sbi, &cpc);
1951                         goto stop;
1952                 }
1953         } else if (has_enough_free_secs(sbi, 0, 0)) {
1954                 goto stop;
1955         }
1956 
1957         __get_secs_required(sbi, NULL, &upper_secs, NULL);
1958 
1959         /*
1960          * Write checkpoint to reclaim prefree segments.
1961          * We need more three extra sections for writer's data/node/dentry.
1962          */
1963         if (free_sections(sbi) <= upper_secs + NR_GC_CHECKPOINT_SECS &&
1964                                 prefree_segments(sbi)) {
1965                 stat_inc_cp_call_count(sbi, TOTAL_CALL);
1966                 ret = f2fs_write_checkpoint(sbi, &cpc);
1967                 if (ret)
1968                         goto stop;
1969                 /* Reset due to checkpoint */
1970                 sec_freed = 0;
1971         }
1972 go_gc_more:
1973         segno = NULL_SEGNO;
1974         goto gc_more;
1975 
1976 stop:
1977         SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1978         SIT_I(sbi)->last_victim[FLUSH_DEVICE] = gc_control->victim_segno;
1979 
1980         if (gc_type == FG_GC)
1981                 f2fs_unpin_all_sections(sbi, true);
1982 
1983         trace_f2fs_gc_end(sbi->sb, ret, total_freed, total_sec_freed,
1984                                 get_pages(sbi, F2FS_DIRTY_NODES),
1985                                 get_pages(sbi, F2FS_DIRTY_DENTS),
1986                                 get_pages(sbi, F2FS_DIRTY_IMETA),
1987                                 free_sections(sbi),
1988                                 free_segments(sbi),
1989                                 reserved_segments(sbi),
1990                                 prefree_segments(sbi));
1991 
1992         f2fs_up_write(&sbi->gc_lock);
1993 
1994         put_gc_inode(&gc_list);
1995 
1996         if (gc_control->err_gc_skipped && !ret)
1997                 ret = total_sec_freed ? 0 : -EAGAIN;
1998         return ret;
1999 }
2000 
2001 int __init f2fs_create_garbage_collection_cache(void)
2002 {
2003         victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
2004                                         sizeof(struct victim_entry));
2005         return victim_entry_slab ? 0 : -ENOMEM;
2006 }
2007 
2008 void f2fs_destroy_garbage_collection_cache(void)
2009 {
2010         kmem_cache_destroy(victim_entry_slab);
2011 }
2012 
2013 static void init_atgc_management(struct f2fs_sb_info *sbi)
2014 {
2015         struct atgc_management *am = &sbi->am;
2016 
2017         if (test_opt(sbi, ATGC) &&
2018                 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
2019                 am->atgc_enabled = true;
2020 
2021         am->root = RB_ROOT_CACHED;
2022         INIT_LIST_HEAD(&am->victim_list);
2023         am->victim_count = 0;
2024 
2025         am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
2026         am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
2027         am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
2028         am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
2029 }
2030 
2031 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
2032 {
2033         sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
2034 
2035         /* give warm/cold data area from slower device */
2036         if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
2037                 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
2038                                 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
2039 
2040         init_atgc_management(sbi);
2041 }
2042 
2043 int f2fs_gc_range(struct f2fs_sb_info *sbi,
2044                 unsigned int start_seg, unsigned int end_seg,
2045                 bool dry_run, unsigned int dry_run_sections)
2046 {
2047         unsigned int segno;
2048         unsigned int gc_secs = dry_run_sections;
2049 
2050         if (unlikely(f2fs_cp_error(sbi)))
2051                 return -EIO;
2052 
2053         for (segno = start_seg; segno <= end_seg; segno += SEGS_PER_SEC(sbi)) {
2054                 struct gc_inode_list gc_list = {
2055                         .ilist = LIST_HEAD_INIT(gc_list.ilist),
2056                         .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
2057                 };
2058 
2059                 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true, false);
2060                 put_gc_inode(&gc_list);
2061 
2062                 if (!dry_run && get_valid_blocks(sbi, segno, true))
2063                         return -EAGAIN;
2064                 if (dry_run && dry_run_sections &&
2065                     !get_valid_blocks(sbi, segno, true) && --gc_secs == 0)
2066                         break;
2067 
2068                 if (fatal_signal_pending(current))
2069                         return -ERESTARTSYS;
2070         }
2071 
2072         return 0;
2073 }
2074 
2075 static int free_segment_range(struct f2fs_sb_info *sbi,
2076                                 unsigned int secs, bool dry_run)
2077 {
2078         unsigned int next_inuse, start, end;
2079         struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2080         int gc_mode, gc_type;
2081         int err = 0;
2082         int type;
2083 
2084         /* Force block allocation for GC */
2085         MAIN_SECS(sbi) -= secs;
2086         start = MAIN_SECS(sbi) * SEGS_PER_SEC(sbi);
2087         end = MAIN_SEGS(sbi) - 1;
2088 
2089         mutex_lock(&DIRTY_I(sbi)->seglist_lock);
2090         for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
2091                 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
2092                         SIT_I(sbi)->last_victim[gc_mode] = 0;
2093 
2094         for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
2095                 if (sbi->next_victim_seg[gc_type] >= start)
2096                         sbi->next_victim_seg[gc_type] = NULL_SEGNO;
2097         mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
2098 
2099         /* Move out cursegs from the target range */
2100         for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++) {
2101                 err = f2fs_allocate_segment_for_resize(sbi, type, start, end);
2102                 if (err)
2103                         goto out;
2104         }
2105 
2106         /* do GC to move out valid blocks in the range */
2107         err = f2fs_gc_range(sbi, start, end, dry_run, 0);
2108         if (err || dry_run)
2109                 goto out;
2110 
2111         stat_inc_cp_call_count(sbi, TOTAL_CALL);
2112         err = f2fs_write_checkpoint(sbi, &cpc);
2113         if (err)
2114                 goto out;
2115 
2116         next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2117         if (next_inuse <= end) {
2118                 f2fs_err(sbi, "segno %u should be free but still inuse!",
2119                          next_inuse);
2120                 f2fs_bug_on(sbi, 1);
2121         }
2122 out:
2123         MAIN_SECS(sbi) += secs;
2124         return err;
2125 }
2126 
2127 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2128 {
2129         struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2130         int section_count;
2131         int segment_count;
2132         int segment_count_main;
2133         long long block_count;
2134         int segs = secs * SEGS_PER_SEC(sbi);
2135 
2136         f2fs_down_write(&sbi->sb_lock);
2137 
2138         section_count = le32_to_cpu(raw_sb->section_count);
2139         segment_count = le32_to_cpu(raw_sb->segment_count);
2140         segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2141         block_count = le64_to_cpu(raw_sb->block_count);
2142 
2143         raw_sb->section_count = cpu_to_le32(section_count + secs);
2144         raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2145         raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2146         raw_sb->block_count = cpu_to_le64(block_count +
2147                         (long long)SEGS_TO_BLKS(sbi, segs));
2148         if (f2fs_is_multi_device(sbi)) {
2149                 int last_dev = sbi->s_ndevs - 1;
2150                 int dev_segs =
2151                         le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2152 
2153                 raw_sb->devs[last_dev].total_segments =
2154                                                 cpu_to_le32(dev_segs + segs);
2155         }
2156 
2157         f2fs_up_write(&sbi->sb_lock);
2158 }
2159 
2160 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2161 {
2162         int segs = secs * SEGS_PER_SEC(sbi);
2163         long long blks = SEGS_TO_BLKS(sbi, segs);
2164         long long user_block_count =
2165                                 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2166 
2167         SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2168         MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2169         MAIN_SECS(sbi) += secs;
2170         FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2171         FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2172         F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2173 
2174         if (f2fs_is_multi_device(sbi)) {
2175                 int last_dev = sbi->s_ndevs - 1;
2176 
2177                 FDEV(last_dev).total_segments =
2178                                 (int)FDEV(last_dev).total_segments + segs;
2179                 FDEV(last_dev).end_blk =
2180                                 (long long)FDEV(last_dev).end_blk + blks;
2181 #ifdef CONFIG_BLK_DEV_ZONED
2182                 FDEV(last_dev).nr_blkz = FDEV(last_dev).nr_blkz +
2183                                         div_u64(blks, sbi->blocks_per_blkz);
2184 #endif
2185         }
2186 }
2187 
2188 int f2fs_resize_fs(struct file *filp, __u64 block_count)
2189 {
2190         struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2191         __u64 old_block_count, shrunk_blocks;
2192         struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2193         unsigned int secs;
2194         int err = 0;
2195         __u32 rem;
2196 
2197         old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2198         if (block_count > old_block_count)
2199                 return -EINVAL;
2200 
2201         if (f2fs_is_multi_device(sbi)) {
2202                 int last_dev = sbi->s_ndevs - 1;
2203                 __u64 last_segs = FDEV(last_dev).total_segments;
2204 
2205                 if (block_count + SEGS_TO_BLKS(sbi, last_segs) <=
2206                                                                 old_block_count)
2207                         return -EINVAL;
2208         }
2209 
2210         /* new fs size should align to section size */
2211         div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2212         if (rem)
2213                 return -EINVAL;
2214 
2215         if (block_count == old_block_count)
2216                 return 0;
2217 
2218         if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2219                 f2fs_err(sbi, "Should run fsck to repair first.");
2220                 return -EFSCORRUPTED;
2221         }
2222 
2223         if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2224                 f2fs_err(sbi, "Checkpoint should be enabled.");
2225                 return -EINVAL;
2226         }
2227 
2228         err = mnt_want_write_file(filp);
2229         if (err)
2230                 return err;
2231 
2232         shrunk_blocks = old_block_count - block_count;
2233         secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2234 
2235         /* stop other GC */
2236         if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2237                 err = -EAGAIN;
2238                 goto out_drop_write;
2239         }
2240 
2241         /* stop CP to protect MAIN_SEC in free_segment_range */
2242         f2fs_lock_op(sbi);
2243 
2244         spin_lock(&sbi->stat_lock);
2245         if (shrunk_blocks + valid_user_blocks(sbi) +
2246                 sbi->current_reserved_blocks + sbi->unusable_block_count +
2247                 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2248                 err = -ENOSPC;
2249         spin_unlock(&sbi->stat_lock);
2250 
2251         if (err)
2252                 goto out_unlock;
2253 
2254         err = free_segment_range(sbi, secs, true);
2255 
2256 out_unlock:
2257         f2fs_unlock_op(sbi);
2258         f2fs_up_write(&sbi->gc_lock);
2259 out_drop_write:
2260         mnt_drop_write_file(filp);
2261         if (err)
2262                 return err;
2263 
2264         err = freeze_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2265         if (err)
2266                 return err;
2267 
2268         if (f2fs_readonly(sbi->sb)) {
2269                 err = thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2270                 if (err)
2271                         return err;
2272                 return -EROFS;
2273         }
2274 
2275         f2fs_down_write(&sbi->gc_lock);
2276         f2fs_down_write(&sbi->cp_global_sem);
2277 
2278         spin_lock(&sbi->stat_lock);
2279         if (shrunk_blocks + valid_user_blocks(sbi) +
2280                 sbi->current_reserved_blocks + sbi->unusable_block_count +
2281                 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2282                 err = -ENOSPC;
2283         else
2284                 sbi->user_block_count -= shrunk_blocks;
2285         spin_unlock(&sbi->stat_lock);
2286         if (err)
2287                 goto out_err;
2288 
2289         set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2290         err = free_segment_range(sbi, secs, false);
2291         if (err)
2292                 goto recover_out;
2293 
2294         update_sb_metadata(sbi, -secs);
2295 
2296         err = f2fs_commit_super(sbi, false);
2297         if (err) {
2298                 update_sb_metadata(sbi, secs);
2299                 goto recover_out;
2300         }
2301 
2302         update_fs_metadata(sbi, -secs);
2303         clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2304         set_sbi_flag(sbi, SBI_IS_DIRTY);
2305 
2306         stat_inc_cp_call_count(sbi, TOTAL_CALL);
2307         err = f2fs_write_checkpoint(sbi, &cpc);
2308         if (err) {
2309                 update_fs_metadata(sbi, secs);
2310                 update_sb_metadata(sbi, secs);
2311                 f2fs_commit_super(sbi, false);
2312         }
2313 recover_out:
2314         clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2315         if (err) {
2316                 set_sbi_flag(sbi, SBI_NEED_FSCK);
2317                 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2318 
2319                 spin_lock(&sbi->stat_lock);
2320                 sbi->user_block_count += shrunk_blocks;
2321                 spin_unlock(&sbi->stat_lock);
2322         }
2323 out_err:
2324         f2fs_up_write(&sbi->cp_global_sem);
2325         f2fs_up_write(&sbi->gc_lock);
2326         thaw_super(sbi->sb, FREEZE_HOLDER_USERSPACE);
2327         return err;
2328 }
2329 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php