~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/xfs/xfs_discard.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (C) 2010, 2023 Red Hat, Inc.
  4  * All Rights Reserved.
  5  */
  6 #include "xfs.h"
  7 #include "xfs_shared.h"
  8 #include "xfs_format.h"
  9 #include "xfs_log_format.h"
 10 #include "xfs_trans_resv.h"
 11 #include "xfs_trans.h"
 12 #include "xfs_mount.h"
 13 #include "xfs_btree.h"
 14 #include "xfs_alloc_btree.h"
 15 #include "xfs_alloc.h"
 16 #include "xfs_discard.h"
 17 #include "xfs_error.h"
 18 #include "xfs_extent_busy.h"
 19 #include "xfs_trace.h"
 20 #include "xfs_log.h"
 21 #include "xfs_ag.h"
 22 #include "xfs_health.h"
 23 #include "xfs_rtbitmap.h"
 24 
 25 /*
 26  * Notes on an efficient, low latency fstrim algorithm
 27  *
 28  * We need to walk the filesystem free space and issue discards on the free
 29  * space that meet the search criteria (size and location). We cannot issue
 30  * discards on extents that might be in use, or are so recently in use they are
 31  * still marked as busy. To serialise against extent state changes whilst we are
 32  * gathering extents to trim, we must hold the AGF lock to lock out other
 33  * allocations and extent free operations that might change extent state.
 34  *
 35  * However, we cannot just hold the AGF for the entire AG free space walk whilst
 36  * we issue discards on each free space that is found. Storage devices can have
 37  * extremely slow discard implementations (e.g. ceph RBD) and so walking a
 38  * couple of million free extents and issuing synchronous discards on each
 39  * extent can take a *long* time. Whilst we are doing this walk, nothing else
 40  * can access the AGF, and we can stall transactions and hence the log whilst
 41  * modifications wait for the AGF lock to be released. This can lead hung tasks
 42  * kicking the hung task timer and rebooting the system. This is bad.
 43  *
 44  * Hence we need to take a leaf from the bulkstat playbook. It takes the AGI
 45  * lock, gathers a range of inode cluster buffers that are allocated, drops the
 46  * AGI lock and then reads all the inode cluster buffers and processes them. It
 47  * loops doing this, using a cursor to keep track of where it is up to in the AG
 48  * for each iteration to restart the INOBT lookup from.
 49  *
 50  * We can't do this exactly with free space - once we drop the AGF lock, the
 51  * state of the free extent is out of our control and we cannot run a discard
 52  * safely on it in this situation. Unless, of course, we've marked the free
 53  * extent as busy and undergoing a discard operation whilst we held the AGF
 54  * locked.
 55  *
 56  * This is exactly how online discard works - free extents are marked busy when
 57  * they are freed, and once the extent free has been committed to the journal,
 58  * the busy extent record is marked as "undergoing discard" and the discard is
 59  * then issued on the free extent. Once the discard completes, the busy extent
 60  * record is removed and the extent is able to be allocated again.
 61  *
 62  * In the context of fstrim, if we find a free extent we need to discard, we
 63  * don't have to discard it immediately. All we need to do it record that free
 64  * extent as being busy and under discard, and all the allocation routines will
 65  * now avoid trying to allocate it. Hence if we mark the extent as busy under
 66  * the AGF lock, we can safely discard it without holding the AGF lock because
 67  * nothing will attempt to allocate that free space until the discard completes.
 68  *
 69  * This also allows us to issue discards asynchronously like we do with online
 70  * discard, and so for fast devices fstrim will run much faster as we can have
 71  * multiple discard operations in flight at once, as well as pipeline the free
 72  * extent search so that it overlaps in flight discard IO.
 73  */
 74 
 75 struct workqueue_struct *xfs_discard_wq;
 76 
 77 static void
 78 xfs_discard_endio_work(
 79         struct work_struct      *work)
 80 {
 81         struct xfs_busy_extents *extents =
 82                 container_of(work, struct xfs_busy_extents, endio_work);
 83 
 84         xfs_extent_busy_clear(extents->mount, &extents->extent_list, false);
 85         kfree(extents->owner);
 86 }
 87 
 88 /*
 89  * Queue up the actual completion to a thread to avoid IRQ-safe locking for
 90  * pagb_lock.
 91  */
 92 static void
 93 xfs_discard_endio(
 94         struct bio              *bio)
 95 {
 96         struct xfs_busy_extents *extents = bio->bi_private;
 97 
 98         INIT_WORK(&extents->endio_work, xfs_discard_endio_work);
 99         queue_work(xfs_discard_wq, &extents->endio_work);
100         bio_put(bio);
101 }
102 
103 /*
104  * Walk the discard list and issue discards on all the busy extents in the
105  * list. We plug and chain the bios so that we only need a single completion
106  * call to clear all the busy extents once the discards are complete.
107  */
108 int
109 xfs_discard_extents(
110         struct xfs_mount        *mp,
111         struct xfs_busy_extents *extents)
112 {
113         struct xfs_extent_busy  *busyp;
114         struct bio              *bio = NULL;
115         struct blk_plug         plug;
116         int                     error = 0;
117 
118         blk_start_plug(&plug);
119         list_for_each_entry(busyp, &extents->extent_list, list) {
120                 trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
121                                          busyp->length);
122 
123                 error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
124                                 XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
125                                 XFS_FSB_TO_BB(mp, busyp->length),
126                                 GFP_KERNEL, &bio);
127                 if (error && error != -EOPNOTSUPP) {
128                         xfs_info(mp,
129          "discard failed for extent [0x%llx,%u], error %d",
130                                  (unsigned long long)busyp->bno,
131                                  busyp->length,
132                                  error);
133                         break;
134                 }
135         }
136 
137         if (bio) {
138                 bio->bi_private = extents;
139                 bio->bi_end_io = xfs_discard_endio;
140                 submit_bio(bio);
141         } else {
142                 xfs_discard_endio_work(&extents->endio_work);
143         }
144         blk_finish_plug(&plug);
145 
146         return error;
147 }
148 
149 struct xfs_trim_cur {
150         xfs_agblock_t   start;
151         xfs_extlen_t    count;
152         xfs_agblock_t   end;
153         xfs_extlen_t    minlen;
154         bool            by_bno;
155 };
156 
157 static int
158 xfs_trim_gather_extents(
159         struct xfs_perag        *pag,
160         struct xfs_trim_cur     *tcur,
161         struct xfs_busy_extents *extents)
162 {
163         struct xfs_mount        *mp = pag->pag_mount;
164         struct xfs_trans        *tp;
165         struct xfs_btree_cur    *cur;
166         struct xfs_buf          *agbp;
167         int                     error;
168         int                     i;
169         int                     batch = 100;
170 
171         /*
172          * Force out the log.  This means any transactions that might have freed
173          * space before we take the AGF buffer lock are now on disk, and the
174          * volatile disk cache is flushed.
175          */
176         xfs_log_force(mp, XFS_LOG_SYNC);
177 
178         error = xfs_trans_alloc_empty(mp, &tp);
179         if (error)
180                 return error;
181 
182         error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
183         if (error)
184                 goto out_trans_cancel;
185 
186         if (tcur->by_bno) {
187                 /* sub-AG discard request always starts at tcur->start */
188                 cur = xfs_bnobt_init_cursor(mp, tp, agbp, pag);
189                 error = xfs_alloc_lookup_le(cur, tcur->start, 0, &i);
190                 if (!error && !i)
191                         error = xfs_alloc_lookup_ge(cur, tcur->start, 0, &i);
192         } else if (tcur->start == 0) {
193                 /* first time through a by-len starts with max length */
194                 cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
195                 error = xfs_alloc_lookup_ge(cur, 0, tcur->count, &i);
196         } else {
197                 /* nth time through a by-len starts where we left off */
198                 cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
199                 error = xfs_alloc_lookup_le(cur, tcur->start, tcur->count, &i);
200         }
201         if (error)
202                 goto out_del_cursor;
203         if (i == 0) {
204                 /* nothing of that length left in the AG, we are done */
205                 tcur->count = 0;
206                 goto out_del_cursor;
207         }
208 
209         /*
210          * Loop until we are done with all extents that are large
211          * enough to be worth discarding or we hit batch limits.
212          */
213         while (i) {
214                 xfs_agblock_t   fbno;
215                 xfs_extlen_t    flen;
216 
217                 error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
218                 if (error)
219                         break;
220                 if (XFS_IS_CORRUPT(mp, i != 1)) {
221                         xfs_btree_mark_sick(cur);
222                         error = -EFSCORRUPTED;
223                         break;
224                 }
225 
226                 if (--batch <= 0) {
227                         /*
228                          * Update the cursor to point at this extent so we
229                          * restart the next batch from this extent.
230                          */
231                         tcur->start = fbno;
232                         tcur->count = flen;
233                         break;
234                 }
235 
236                 /*
237                  * If the extent is entirely outside of the range we are
238                  * supposed to skip it.  Do not bother to trim down partially
239                  * overlapping ranges for now.
240                  */
241                 if (fbno + flen < tcur->start) {
242                         trace_xfs_discard_exclude(mp, pag->pag_agno, fbno, flen);
243                         goto next_extent;
244                 }
245                 if (fbno > tcur->end) {
246                         trace_xfs_discard_exclude(mp, pag->pag_agno, fbno, flen);
247                         if (tcur->by_bno) {
248                                 tcur->count = 0;
249                                 break;
250                         }
251                         goto next_extent;
252                 }
253 
254                 /* Trim the extent returned to the range we want. */
255                 if (fbno < tcur->start) {
256                         flen -= tcur->start - fbno;
257                         fbno = tcur->start;
258                 }
259                 if (fbno + flen > tcur->end + 1)
260                         flen = tcur->end - fbno + 1;
261 
262                 /* Too small?  Give up. */
263                 if (flen < tcur->minlen) {
264                         trace_xfs_discard_toosmall(mp, pag->pag_agno, fbno, flen);
265                         if (tcur->by_bno)
266                                 goto next_extent;
267                         tcur->count = 0;
268                         break;
269                 }
270 
271                 /*
272                  * If any blocks in the range are still busy, skip the
273                  * discard and try again the next time.
274                  */
275                 if (xfs_extent_busy_search(mp, pag, fbno, flen)) {
276                         trace_xfs_discard_busy(mp, pag->pag_agno, fbno, flen);
277                         goto next_extent;
278                 }
279 
280                 xfs_extent_busy_insert_discard(pag, fbno, flen,
281                                 &extents->extent_list);
282 next_extent:
283                 if (tcur->by_bno)
284                         error = xfs_btree_increment(cur, 0, &i);
285                 else
286                         error = xfs_btree_decrement(cur, 0, &i);
287                 if (error)
288                         break;
289 
290                 /*
291                  * If there's no more records in the tree, we are done. Set the
292                  * cursor block count to 0 to indicate to the caller that there
293                  * is no more extents to search.
294                  */
295                 if (i == 0)
296                         tcur->count = 0;
297         }
298 
299         /*
300          * If there was an error, release all the gathered busy extents because
301          * we aren't going to issue a discard on them any more.
302          */
303         if (error)
304                 xfs_extent_busy_clear(mp, &extents->extent_list, false);
305 out_del_cursor:
306         xfs_btree_del_cursor(cur, error);
307 out_trans_cancel:
308         xfs_trans_cancel(tp);
309         return error;
310 }
311 
312 static bool
313 xfs_trim_should_stop(void)
314 {
315         return fatal_signal_pending(current) || freezing(current);
316 }
317 
318 /*
319  * Iterate the free list gathering extents and discarding them. We need a cursor
320  * for the repeated iteration of gather/discard loop, so use the longest extent
321  * we found in the last batch as the key to start the next.
322  */
323 static int
324 xfs_trim_perag_extents(
325         struct xfs_perag        *pag,
326         xfs_agblock_t           start,
327         xfs_agblock_t           end,
328         xfs_extlen_t            minlen)
329 {
330         struct xfs_trim_cur     tcur = {
331                 .start          = start,
332                 .count          = pag->pagf_longest,
333                 .end            = end,
334                 .minlen         = minlen,
335         };
336         int                     error = 0;
337 
338         if (start != 0 || end != pag->block_count)
339                 tcur.by_bno = true;
340 
341         do {
342                 struct xfs_busy_extents *extents;
343 
344                 extents = kzalloc(sizeof(*extents), GFP_KERNEL);
345                 if (!extents) {
346                         error = -ENOMEM;
347                         break;
348                 }
349 
350                 extents->mount = pag->pag_mount;
351                 extents->owner = extents;
352                 INIT_LIST_HEAD(&extents->extent_list);
353 
354                 error = xfs_trim_gather_extents(pag, &tcur, extents);
355                 if (error) {
356                         kfree(extents);
357                         break;
358                 }
359 
360                 /*
361                  * We hand the extent list to the discard function here so the
362                  * discarded extents can be removed from the busy extent list.
363                  * This allows the discards to run asynchronously with gathering
364                  * the next round of extents to discard.
365                  *
366                  * However, we must ensure that we do not reference the extent
367                  * list  after this function call, as it may have been freed by
368                  * the time control returns to us.
369                  */
370                 error = xfs_discard_extents(pag->pag_mount, extents);
371                 if (error)
372                         break;
373 
374                 if (xfs_trim_should_stop())
375                         break;
376 
377         } while (tcur.count != 0);
378 
379         return error;
380 
381 }
382 
383 static int
384 xfs_trim_datadev_extents(
385         struct xfs_mount        *mp,
386         xfs_daddr_t             start,
387         xfs_daddr_t             end,
388         xfs_extlen_t            minlen)
389 {
390         xfs_agnumber_t          start_agno, end_agno;
391         xfs_agblock_t           start_agbno, end_agbno;
392         xfs_daddr_t             ddev_end;
393         struct xfs_perag        *pag;
394         int                     last_error = 0, error;
395 
396         ddev_end = min_t(xfs_daddr_t, end,
397                          XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1);
398 
399         start_agno = xfs_daddr_to_agno(mp, start);
400         start_agbno = xfs_daddr_to_agbno(mp, start);
401         end_agno = xfs_daddr_to_agno(mp, ddev_end);
402         end_agbno = xfs_daddr_to_agbno(mp, ddev_end);
403 
404         for_each_perag_range(mp, start_agno, end_agno, pag) {
405                 xfs_agblock_t   agend = pag->block_count;
406 
407                 if (start_agno == end_agno)
408                         agend = end_agbno;
409                 error = xfs_trim_perag_extents(pag, start_agbno, agend, minlen);
410                 if (error)
411                         last_error = error;
412 
413                 if (xfs_trim_should_stop()) {
414                         xfs_perag_rele(pag);
415                         break;
416                 }
417                 start_agbno = 0;
418         }
419 
420         return last_error;
421 }
422 
423 #ifdef CONFIG_XFS_RT
424 struct xfs_trim_rtdev {
425         /* list of rt extents to free */
426         struct list_head        extent_list;
427 
428         /* minimum length that caller allows us to trim */
429         xfs_rtblock_t           minlen_fsb;
430 
431         /* restart point for the rtbitmap walk */
432         xfs_rtxnum_t            restart_rtx;
433 
434         /* stopping point for the current rtbitmap walk */
435         xfs_rtxnum_t            stop_rtx;
436 };
437 
438 struct xfs_rtx_busy {
439         struct list_head        list;
440         xfs_rtblock_t           bno;
441         xfs_rtblock_t           length;
442 };
443 
444 static void
445 xfs_discard_free_rtdev_extents(
446         struct xfs_trim_rtdev   *tr)
447 {
448         struct xfs_rtx_busy     *busyp, *n;
449 
450         list_for_each_entry_safe(busyp, n, &tr->extent_list, list) {
451                 list_del_init(&busyp->list);
452                 kfree(busyp);
453         }
454 }
455 
456 /*
457  * Walk the discard list and issue discards on all the busy extents in the
458  * list. We plug and chain the bios so that we only need a single completion
459  * call to clear all the busy extents once the discards are complete.
460  */
461 static int
462 xfs_discard_rtdev_extents(
463         struct xfs_mount        *mp,
464         struct xfs_trim_rtdev   *tr)
465 {
466         struct block_device     *bdev = mp->m_rtdev_targp->bt_bdev;
467         struct xfs_rtx_busy     *busyp;
468         struct bio              *bio = NULL;
469         struct blk_plug         plug;
470         xfs_rtblock_t           start = NULLRTBLOCK, length = 0;
471         int                     error = 0;
472 
473         blk_start_plug(&plug);
474         list_for_each_entry(busyp, &tr->extent_list, list) {
475                 if (start == NULLRTBLOCK)
476                         start = busyp->bno;
477                 length += busyp->length;
478 
479                 trace_xfs_discard_rtextent(mp, busyp->bno, busyp->length);
480 
481                 error = __blkdev_issue_discard(bdev,
482                                 XFS_FSB_TO_BB(mp, busyp->bno),
483                                 XFS_FSB_TO_BB(mp, busyp->length),
484                                 GFP_NOFS, &bio);
485                 if (error)
486                         break;
487         }
488         xfs_discard_free_rtdev_extents(tr);
489 
490         if (bio) {
491                 error = submit_bio_wait(bio);
492                 if (error == -EOPNOTSUPP)
493                         error = 0;
494                 if (error)
495                         xfs_info(mp,
496          "discard failed for rtextent [0x%llx,%llu], error %d",
497                                  (unsigned long long)start,
498                                  (unsigned long long)length,
499                                  error);
500                 bio_put(bio);
501         }
502         blk_finish_plug(&plug);
503 
504         return error;
505 }
506 
507 static int
508 xfs_trim_gather_rtextent(
509         struct xfs_mount                *mp,
510         struct xfs_trans                *tp,
511         const struct xfs_rtalloc_rec    *rec,
512         void                            *priv)
513 {
514         struct xfs_trim_rtdev           *tr = priv;
515         struct xfs_rtx_busy             *busyp;
516         xfs_rtblock_t                   rbno, rlen;
517 
518         if (rec->ar_startext > tr->stop_rtx) {
519                 /*
520                  * If we've scanned a large number of rtbitmap blocks, update
521                  * the cursor to point at this extent so we restart the next
522                  * batch from this extent.
523                  */
524                 tr->restart_rtx = rec->ar_startext;
525                 return -ECANCELED;
526         }
527 
528         rbno = xfs_rtx_to_rtb(mp, rec->ar_startext);
529         rlen = xfs_rtx_to_rtb(mp, rec->ar_extcount);
530 
531         /* Ignore too small. */
532         if (rlen < tr->minlen_fsb) {
533                 trace_xfs_discard_rttoosmall(mp, rbno, rlen);
534                 return 0;
535         }
536 
537         busyp = kzalloc(sizeof(struct xfs_rtx_busy), GFP_KERNEL);
538         if (!busyp)
539                 return -ENOMEM;
540 
541         busyp->bno = rbno;
542         busyp->length = rlen;
543         INIT_LIST_HEAD(&busyp->list);
544         list_add_tail(&busyp->list, &tr->extent_list);
545 
546         tr->restart_rtx = rec->ar_startext + rec->ar_extcount;
547         return 0;
548 }
549 
550 static int
551 xfs_trim_rtdev_extents(
552         struct xfs_mount        *mp,
553         xfs_daddr_t             start,
554         xfs_daddr_t             end,
555         xfs_daddr_t             minlen)
556 {
557         struct xfs_rtalloc_rec  low = { };
558         struct xfs_rtalloc_rec  high = { };
559         struct xfs_trim_rtdev   tr = {
560                 .minlen_fsb     = XFS_BB_TO_FSB(mp, minlen),
561         };
562         struct xfs_trans        *tp;
563         xfs_daddr_t             rtdev_daddr;
564         int                     error;
565 
566         INIT_LIST_HEAD(&tr.extent_list);
567 
568         /* Shift the start and end downwards to match the rt device. */
569         rtdev_daddr = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
570         if (start > rtdev_daddr)
571                 start -= rtdev_daddr;
572         else
573                 start = 0;
574 
575         if (end <= rtdev_daddr)
576                 return 0;
577         end -= rtdev_daddr;
578 
579         error = xfs_trans_alloc_empty(mp, &tp);
580         if (error)
581                 return error;
582 
583         end = min_t(xfs_daddr_t, end,
584                         XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks) - 1);
585 
586         /* Convert the rt blocks to rt extents */
587         low.ar_startext = xfs_rtb_to_rtxup(mp, XFS_BB_TO_FSB(mp, start));
588         high.ar_startext = xfs_rtb_to_rtx(mp, XFS_BB_TO_FSBT(mp, end));
589 
590         /*
591          * Walk the free ranges between low and high.  The query_range function
592          * trims the extents returned.
593          */
594         do {
595                 tr.stop_rtx = low.ar_startext + (mp->m_sb.sb_blocksize * NBBY);
596                 xfs_rtbitmap_lock_shared(mp, XFS_RBMLOCK_BITMAP);
597                 error = xfs_rtalloc_query_range(mp, tp, &low, &high,
598                                 xfs_trim_gather_rtextent, &tr);
599 
600                 if (error == -ECANCELED)
601                         error = 0;
602                 if (error) {
603                         xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
604                         xfs_discard_free_rtdev_extents(&tr);
605                         break;
606                 }
607 
608                 if (list_empty(&tr.extent_list)) {
609                         xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
610                         break;
611                 }
612 
613                 error = xfs_discard_rtdev_extents(mp, &tr);
614                 xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
615                 if (error)
616                         break;
617 
618                 low.ar_startext = tr.restart_rtx;
619         } while (!xfs_trim_should_stop() && low.ar_startext <= high.ar_startext);
620 
621         xfs_trans_cancel(tp);
622         return error;
623 }
624 #else
625 # define xfs_trim_rtdev_extents(...)    (-EOPNOTSUPP)
626 #endif /* CONFIG_XFS_RT */
627 
628 /*
629  * trim a range of the filesystem.
630  *
631  * Note: the parameters passed from userspace are byte ranges into the
632  * filesystem which does not match to the format we use for filesystem block
633  * addressing. FSB addressing is sparse (AGNO|AGBNO), while the incoming format
634  * is a linear address range. Hence we need to use DADDR based conversions and
635  * comparisons for determining the correct offset and regions to trim.
636  *
637  * The realtime device is mapped into the FITRIM "address space" immediately
638  * after the data device.
639  */
640 int
641 xfs_ioc_trim(
642         struct xfs_mount                *mp,
643         struct fstrim_range __user      *urange)
644 {
645         unsigned int            granularity =
646                 bdev_discard_granularity(mp->m_ddev_targp->bt_bdev);
647         struct block_device     *rt_bdev = NULL;
648         struct fstrim_range     range;
649         xfs_daddr_t             start, end;
650         xfs_extlen_t            minlen;
651         xfs_rfsblock_t          max_blocks;
652         int                     error, last_error = 0;
653 
654         if (!capable(CAP_SYS_ADMIN))
655                 return -EPERM;
656         if (mp->m_rtdev_targp &&
657             bdev_max_discard_sectors(mp->m_rtdev_targp->bt_bdev))
658                 rt_bdev = mp->m_rtdev_targp->bt_bdev;
659         if (!bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev) && !rt_bdev)
660                 return -EOPNOTSUPP;
661 
662         if (rt_bdev)
663                 granularity = max(granularity,
664                                   bdev_discard_granularity(rt_bdev));
665 
666         /*
667          * We haven't recovered the log, so we cannot use our bnobt-guided
668          * storage zapping commands.
669          */
670         if (xfs_has_norecovery(mp))
671                 return -EROFS;
672 
673         if (copy_from_user(&range, urange, sizeof(range)))
674                 return -EFAULT;
675 
676         range.minlen = max_t(u64, granularity, range.minlen);
677         minlen = XFS_B_TO_FSB(mp, range.minlen);
678 
679         /*
680          * Truncating down the len isn't actually quite correct, but using
681          * BBTOB would mean we trivially get overflows for values
682          * of ULLONG_MAX or slightly lower.  And ULLONG_MAX is the default
683          * used by the fstrim application.  In the end it really doesn't
684          * matter as trimming blocks is an advisory interface.
685          */
686         max_blocks = mp->m_sb.sb_dblocks + mp->m_sb.sb_rblocks;
687         if (range.start >= XFS_FSB_TO_B(mp, max_blocks) ||
688             range.minlen > XFS_FSB_TO_B(mp, mp->m_ag_max_usable) ||
689             range.len < mp->m_sb.sb_blocksize)
690                 return -EINVAL;
691 
692         start = BTOBB(range.start);
693         end = start + BTOBBT(range.len) - 1;
694 
695         if (bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev)) {
696                 error = xfs_trim_datadev_extents(mp, start, end, minlen);
697                 if (error)
698                         last_error = error;
699         }
700 
701         if (rt_bdev && !xfs_trim_should_stop()) {
702                 error = xfs_trim_rtdev_extents(mp, start, end, minlen);
703                 if (error)
704                         last_error = error;
705         }
706 
707         if (last_error)
708                 return last_error;
709 
710         range.len = min_t(unsigned long long, range.len,
711                           XFS_FSB_TO_B(mp, max_blocks));
712         if (copy_to_user(urange, &range, sizeof(range)))
713                 return -EFAULT;
714         return 0;
715 }
716 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php