~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/xfs/xfs_fsops.c

Version: ~ [ linux-6.12-rc7 ] ~ [ linux-6.11.7 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.60 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.116 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.171 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.229 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.285 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.323 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.12 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

Diff markup

Differences between /fs/xfs/xfs_fsops.c (Architecture m68k) and /fs/xfs/xfs_fsops.c (Architecture sparc)


  1 // SPDX-License-Identifier: GPL-2.0                 1 // SPDX-License-Identifier: GPL-2.0
  2 /*                                                  2 /*
  3  * Copyright (c) 2000-2005 Silicon Graphics, I      3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4  * All Rights Reserved.                             4  * All Rights Reserved.
  5  */                                                 5  */
  6 #include "xfs.h"                                    6 #include "xfs.h"
  7 #include "xfs_fs.h"                                 7 #include "xfs_fs.h"
  8 #include "xfs_shared.h"                             8 #include "xfs_shared.h"
  9 #include "xfs_format.h"                             9 #include "xfs_format.h"
 10 #include "xfs_log_format.h"                        10 #include "xfs_log_format.h"
 11 #include "xfs_trans_resv.h"                        11 #include "xfs_trans_resv.h"
 12 #include "xfs_sb.h"                                12 #include "xfs_sb.h"
 13 #include "xfs_mount.h"                             13 #include "xfs_mount.h"
 14 #include "xfs_trans.h"                             14 #include "xfs_trans.h"
 15 #include "xfs_error.h"                             15 #include "xfs_error.h"
 16 #include "xfs_alloc.h"                             16 #include "xfs_alloc.h"
 17 #include "xfs_fsops.h"                             17 #include "xfs_fsops.h"
 18 #include "xfs_trans_space.h"                       18 #include "xfs_trans_space.h"
 19 #include "xfs_log.h"                               19 #include "xfs_log.h"
 20 #include "xfs_log_priv.h"                          20 #include "xfs_log_priv.h"
 21 #include "xfs_ag.h"                                21 #include "xfs_ag.h"
 22 #include "xfs_ag_resv.h"                           22 #include "xfs_ag_resv.h"
 23 #include "xfs_trace.h"                             23 #include "xfs_trace.h"
 24                                                    24 
 25 /*                                                 25 /*
 26  * Write new AG headers to disk. Non-transacti     26  * Write new AG headers to disk. Non-transactional, but need to be
 27  * written and completed prior to the growfs t     27  * written and completed prior to the growfs transaction being logged.
 28  * To do this, we use a delayed write buffer l     28  * To do this, we use a delayed write buffer list and wait for
 29  * submission and IO completion of the list as     29  * submission and IO completion of the list as a whole. This allows the
 30  * IO subsystem to merge all the AG headers in     30  * IO subsystem to merge all the AG headers in a single AG into a single
 31  * IO and hide most of the latency of the IO f     31  * IO and hide most of the latency of the IO from us.
 32  *                                                 32  *
 33  * This also means that if we get an error whi     33  * This also means that if we get an error whilst building the buffer
 34  * list to write, we can cancel the entire lis     34  * list to write, we can cancel the entire list without having written
 35  * anything.                                       35  * anything.
 36  */                                                36  */
 37 static int                                         37 static int
 38 xfs_resizefs_init_new_ags(                         38 xfs_resizefs_init_new_ags(
 39         struct xfs_trans        *tp,               39         struct xfs_trans        *tp,
 40         struct aghdr_init_data  *id,               40         struct aghdr_init_data  *id,
 41         xfs_agnumber_t          oagcount,          41         xfs_agnumber_t          oagcount,
 42         xfs_agnumber_t          nagcount,          42         xfs_agnumber_t          nagcount,
 43         xfs_rfsblock_t          delta,             43         xfs_rfsblock_t          delta,
 44         struct xfs_perag        *last_pag,         44         struct xfs_perag        *last_pag,
 45         bool                    *lastag_extend     45         bool                    *lastag_extended)
 46 {                                                  46 {
 47         struct xfs_mount        *mp = tp->t_mo     47         struct xfs_mount        *mp = tp->t_mountp;
 48         xfs_rfsblock_t          nb = mp->m_sb.     48         xfs_rfsblock_t          nb = mp->m_sb.sb_dblocks + delta;
 49         int                     error;             49         int                     error;
 50                                                    50 
 51         *lastag_extended = false;                  51         *lastag_extended = false;
 52                                                    52 
 53         INIT_LIST_HEAD(&id->buffer_list);          53         INIT_LIST_HEAD(&id->buffer_list);
 54         for (id->agno = nagcount - 1;              54         for (id->agno = nagcount - 1;
 55              id->agno >= oagcount;                 55              id->agno >= oagcount;
 56              id->agno--, delta -= id->agsize)      56              id->agno--, delta -= id->agsize) {
 57                                                    57 
 58                 if (id->agno == nagcount - 1)      58                 if (id->agno == nagcount - 1)
 59                         id->agsize = nb - (id-     59                         id->agsize = nb - (id->agno *
 60                                         (xfs_r     60                                         (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
 61                 else                               61                 else
 62                         id->agsize = mp->m_sb.     62                         id->agsize = mp->m_sb.sb_agblocks;
 63                                                    63 
 64                 error = xfs_ag_init_headers(mp     64                 error = xfs_ag_init_headers(mp, id);
 65                 if (error) {                       65                 if (error) {
 66                         xfs_buf_delwri_cancel(     66                         xfs_buf_delwri_cancel(&id->buffer_list);
 67                         return error;              67                         return error;
 68                 }                                  68                 }
 69         }                                          69         }
 70                                                    70 
 71         error = xfs_buf_delwri_submit(&id->buf     71         error = xfs_buf_delwri_submit(&id->buffer_list);
 72         if (error)                                 72         if (error)
 73                 return error;                      73                 return error;
 74                                                    74 
 75         if (delta) {                               75         if (delta) {
 76                 *lastag_extended = true;           76                 *lastag_extended = true;
 77                 error = xfs_ag_extend_space(la     77                 error = xfs_ag_extend_space(last_pag, tp, delta);
 78         }                                          78         }
 79         return error;                              79         return error;
 80 }                                                  80 }
 81                                                    81 
 82 /*                                                 82 /*
 83  * growfs operations                               83  * growfs operations
 84  */                                                84  */
 85 static int                                         85 static int
 86 xfs_growfs_data_private(                           86 xfs_growfs_data_private(
 87         struct xfs_mount        *mp,               87         struct xfs_mount        *mp,            /* mount point for filesystem */
 88         struct xfs_growfs_data  *in)               88         struct xfs_growfs_data  *in)            /* growfs data input struct */
 89 {                                                  89 {
 90         xfs_agnumber_t          oagcount = mp-     90         xfs_agnumber_t          oagcount = mp->m_sb.sb_agcount;
 91         struct xfs_buf          *bp;               91         struct xfs_buf          *bp;
 92         int                     error;             92         int                     error;
 93         xfs_agnumber_t          nagcount;          93         xfs_agnumber_t          nagcount;
 94         xfs_agnumber_t          nagimax = 0;       94         xfs_agnumber_t          nagimax = 0;
 95         xfs_rfsblock_t          nb, nb_div, nb     95         xfs_rfsblock_t          nb, nb_div, nb_mod;
 96         int64_t                 delta;             96         int64_t                 delta;
 97         bool                    lastag_extende     97         bool                    lastag_extended = false;
 98         struct xfs_trans        *tp;               98         struct xfs_trans        *tp;
 99         struct aghdr_init_data  id = {};           99         struct aghdr_init_data  id = {};
100         struct xfs_perag        *last_pag;        100         struct xfs_perag        *last_pag;
101                                                   101 
102         nb = in->newblocks;                       102         nb = in->newblocks;
103         error = xfs_sb_validate_fsb_count(&mp-    103         error = xfs_sb_validate_fsb_count(&mp->m_sb, nb);
104         if (error)                                104         if (error)
105                 return error;                     105                 return error;
106                                                   106 
107         if (nb > mp->m_sb.sb_dblocks) {           107         if (nb > mp->m_sb.sb_dblocks) {
108                 error = xfs_buf_read_uncached(    108                 error = xfs_buf_read_uncached(mp->m_ddev_targp,
109                                 XFS_FSB_TO_BB(    109                                 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
110                                 XFS_FSS_TO_BB(    110                                 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
111                 if (error)                        111                 if (error)
112                         return error;             112                         return error;
113                 xfs_buf_relse(bp);                113                 xfs_buf_relse(bp);
114         }                                         114         }
115                                                   115 
116         nb_div = nb;                              116         nb_div = nb;
117         nb_mod = do_div(nb_div, mp->m_sb.sb_ag    117         nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
118         if (nb_mod && nb_mod >= XFS_MIN_AG_BLO    118         if (nb_mod && nb_mod >= XFS_MIN_AG_BLOCKS)
119                 nb_div++;                         119                 nb_div++;
120         else if (nb_mod)                          120         else if (nb_mod)
121                 nb = nb_div * mp->m_sb.sb_agbl    121                 nb = nb_div * mp->m_sb.sb_agblocks;
122                                                   122 
123         if (nb_div > XFS_MAX_AGNUMBER + 1) {      123         if (nb_div > XFS_MAX_AGNUMBER + 1) {
124                 nb_div = XFS_MAX_AGNUMBER + 1;    124                 nb_div = XFS_MAX_AGNUMBER + 1;
125                 nb = nb_div * mp->m_sb.sb_agbl    125                 nb = nb_div * mp->m_sb.sb_agblocks;
126         }                                         126         }
127         nagcount = nb_div;                        127         nagcount = nb_div;
128         delta = nb - mp->m_sb.sb_dblocks;         128         delta = nb - mp->m_sb.sb_dblocks;
129         /*                                        129         /*
130          * Reject filesystems with a single AG    130          * Reject filesystems with a single AG because they are not
131          * supported, and reject a shrink oper    131          * supported, and reject a shrink operation that would cause a
132          * filesystem to become unsupported.      132          * filesystem to become unsupported.
133          */                                       133          */
134         if (delta < 0 && nagcount < 2)            134         if (delta < 0 && nagcount < 2)
135                 return -EINVAL;                   135                 return -EINVAL;
136                                                   136 
137         /* No work to do */                       137         /* No work to do */
138         if (delta == 0)                           138         if (delta == 0)
139                 return 0;                         139                 return 0;
140                                                   140 
141         /* TODO: shrinking the entire AGs hasn    141         /* TODO: shrinking the entire AGs hasn't yet completed */
142         if (nagcount < oagcount)                  142         if (nagcount < oagcount)
143                 return -EINVAL;                   143                 return -EINVAL;
144                                                   144 
145         /* allocate the new per-ag structures     145         /* allocate the new per-ag structures */
146         error = xfs_initialize_perag(mp, oagco    146         error = xfs_initialize_perag(mp, oagcount, nagcount, nb, &nagimax);
147         if (error)                                147         if (error)
148                 return error;                     148                 return error;
149                                                   149 
150         if (delta > 0)                            150         if (delta > 0)
151                 error = xfs_trans_alloc(mp, &M    151                 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
152                                 XFS_GROWFS_SPA    152                                 XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
153                                 &tp);             153                                 &tp);
154         else                                      154         else
155                 error = xfs_trans_alloc(mp, &M    155                 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, -delta, 0,
156                                 0, &tp);          156                                 0, &tp);
157         if (error)                                157         if (error)
158                 goto out_free_unused_perag;       158                 goto out_free_unused_perag;
159                                                   159 
160         last_pag = xfs_perag_get(mp, oagcount     160         last_pag = xfs_perag_get(mp, oagcount - 1);
161         if (delta > 0) {                          161         if (delta > 0) {
162                 error = xfs_resizefs_init_new_    162                 error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
163                                 delta, last_pa    163                                 delta, last_pag, &lastag_extended);
164         } else {                                  164         } else {
165                 xfs_warn_mount(mp, XFS_OPSTATE    165                 xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SHRINK,
166         "EXPERIMENTAL online shrink feature in    166         "EXPERIMENTAL online shrink feature in use. Use at your own risk!");
167                                                   167 
168                 error = xfs_ag_shrink_space(la    168                 error = xfs_ag_shrink_space(last_pag, &tp, -delta);
169         }                                         169         }
170         xfs_perag_put(last_pag);                  170         xfs_perag_put(last_pag);
171         if (error)                                171         if (error)
172                 goto out_trans_cancel;            172                 goto out_trans_cancel;
173                                                   173 
174         /*                                        174         /*
175          * Update changed superblock fields tr    175          * Update changed superblock fields transactionally. These are not
176          * seen by the rest of the world until    176          * seen by the rest of the world until the transaction commit applies
177          * them atomically to the superblock.     177          * them atomically to the superblock.
178          */                                       178          */
179         if (nagcount > oagcount)                  179         if (nagcount > oagcount)
180                 xfs_trans_mod_sb(tp, XFS_TRANS    180                 xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
181         if (delta)                                181         if (delta)
182                 xfs_trans_mod_sb(tp, XFS_TRANS    182                 xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, delta);
183         if (id.nfree)                             183         if (id.nfree)
184                 xfs_trans_mod_sb(tp, XFS_TRANS    184                 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree);
185                                                   185 
186         /*                                        186         /*
187          * Sync sb counters now to reflect the    187          * Sync sb counters now to reflect the updated values. This is
188          * particularly important for shrink b    188          * particularly important for shrink because the write verifier
189          * will fail if sb_fdblocks is ever la    189          * will fail if sb_fdblocks is ever larger than sb_dblocks.
190          */                                       190          */
191         if (xfs_has_lazysbcount(mp))              191         if (xfs_has_lazysbcount(mp))
192                 xfs_log_sb(tp);                   192                 xfs_log_sb(tp);
193                                                   193 
194         xfs_trans_set_sync(tp);                   194         xfs_trans_set_sync(tp);
195         error = xfs_trans_commit(tp);             195         error = xfs_trans_commit(tp);
196         if (error)                                196         if (error)
197                 return error;                     197                 return error;
198                                                   198 
199         /* New allocation groups fully initial    199         /* New allocation groups fully initialized, so update mount struct */
200         if (nagimax)                              200         if (nagimax)
201                 mp->m_maxagi = nagimax;           201                 mp->m_maxagi = nagimax;
202         xfs_set_low_space_thresholds(mp);         202         xfs_set_low_space_thresholds(mp);
203         mp->m_alloc_set_aside = xfs_alloc_set_    203         mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
204                                                   204 
205         if (delta > 0) {                          205         if (delta > 0) {
206                 /*                                206                 /*
207                  * If we expanded the last AG,    207                  * If we expanded the last AG, free the per-AG reservation
208                  * so we can reinitialize it w    208                  * so we can reinitialize it with the new size.
209                  */                               209                  */
210                 if (lastag_extended) {            210                 if (lastag_extended) {
211                         struct xfs_perag          211                         struct xfs_perag        *pag;
212                                                   212 
213                         pag = xfs_perag_get(mp    213                         pag = xfs_perag_get(mp, id.agno);
214                         xfs_ag_resv_free(pag);    214                         xfs_ag_resv_free(pag);
215                         xfs_perag_put(pag);       215                         xfs_perag_put(pag);
216                 }                                 216                 }
217                 /*                                217                 /*
218                  * Reserve AG metadata blocks.    218                  * Reserve AG metadata blocks. ENOSPC here does not mean there
219                  * was a growfs failure, just     219                  * was a growfs failure, just that there still isn't space for
220                  * new user data after the gro    220                  * new user data after the grow has been run.
221                  */                               221                  */
222                 error = xfs_fs_reserve_ag_bloc    222                 error = xfs_fs_reserve_ag_blocks(mp);
223                 if (error == -ENOSPC)             223                 if (error == -ENOSPC)
224                         error = 0;                224                         error = 0;
225         }                                         225         }
226         return error;                             226         return error;
227                                                   227 
228 out_trans_cancel:                                 228 out_trans_cancel:
229         xfs_trans_cancel(tp);                     229         xfs_trans_cancel(tp);
230 out_free_unused_perag:                            230 out_free_unused_perag:
231         if (nagcount > oagcount)                  231         if (nagcount > oagcount)
232                 xfs_free_perag_range(mp, oagco    232                 xfs_free_perag_range(mp, oagcount, nagcount);
233         return error;                             233         return error;
234 }                                                 234 }
235                                                   235 
236 static int                                        236 static int
237 xfs_growfs_log_private(                           237 xfs_growfs_log_private(
238         struct xfs_mount        *mp,    /* mou    238         struct xfs_mount        *mp,    /* mount point for filesystem */
239         struct xfs_growfs_log   *in)    /* gro    239         struct xfs_growfs_log   *in)    /* growfs log input struct */
240 {                                                 240 {
241         xfs_extlen_t            nb;               241         xfs_extlen_t            nb;
242                                                   242 
243         nb = in->newblocks;                       243         nb = in->newblocks;
244         if (nb < XFS_MIN_LOG_BLOCKS || nb < XF    244         if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
245                 return -EINVAL;                   245                 return -EINVAL;
246         if (nb == mp->m_sb.sb_logblocks &&        246         if (nb == mp->m_sb.sb_logblocks &&
247             in->isint == (mp->m_sb.sb_logstart    247             in->isint == (mp->m_sb.sb_logstart != 0))
248                 return -EINVAL;                   248                 return -EINVAL;
249         /*                                        249         /*
250          * Moving the log is hard, need new in    250          * Moving the log is hard, need new interfaces to sync
251          * the log first, hold off all activit    251          * the log first, hold off all activity while moving it.
252          * Can have shorter or longer log in t    252          * Can have shorter or longer log in the same space,
253          * or transform internal to external l    253          * or transform internal to external log or vice versa.
254          */                                       254          */
255         return -ENOSYS;                           255         return -ENOSYS;
256 }                                                 256 }
257                                                   257 
258 static int                                        258 static int
259 xfs_growfs_imaxpct(                               259 xfs_growfs_imaxpct(
260         struct xfs_mount        *mp,              260         struct xfs_mount        *mp,
261         __u32                   imaxpct)          261         __u32                   imaxpct)
262 {                                                 262 {
263         struct xfs_trans        *tp;              263         struct xfs_trans        *tp;
264         int                     dpct;             264         int                     dpct;
265         int                     error;            265         int                     error;
266                                                   266 
267         if (imaxpct > 100)                        267         if (imaxpct > 100)
268                 return -EINVAL;                   268                 return -EINVAL;
269                                                   269 
270         error = xfs_trans_alloc(mp, &M_RES(mp)    270         error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
271                         XFS_GROWFS_SPACE_RES(m    271                         XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
272         if (error)                                272         if (error)
273                 return error;                     273                 return error;
274                                                   274 
275         dpct = imaxpct - mp->m_sb.sb_imax_pct;    275         dpct = imaxpct - mp->m_sb.sb_imax_pct;
276         xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAX    276         xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
277         xfs_trans_set_sync(tp);                   277         xfs_trans_set_sync(tp);
278         return xfs_trans_commit(tp);              278         return xfs_trans_commit(tp);
279 }                                                 279 }
280                                                   280 
281 /*                                                281 /*
282  * protected versions of growfs function acqui    282  * protected versions of growfs function acquire and release locks on the mount
283  * point - exported through ioctls: XFS_IOC_FS    283  * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
284  * XFS_IOC_FSGROWFSRT                             284  * XFS_IOC_FSGROWFSRT
285  */                                               285  */
286 int                                               286 int
287 xfs_growfs_data(                                  287 xfs_growfs_data(
288         struct xfs_mount        *mp,              288         struct xfs_mount        *mp,
289         struct xfs_growfs_data  *in)              289         struct xfs_growfs_data  *in)
290 {                                                 290 {
291         int                     error = 0;        291         int                     error = 0;
292                                                   292 
293         if (!capable(CAP_SYS_ADMIN))              293         if (!capable(CAP_SYS_ADMIN))
294                 return -EPERM;                    294                 return -EPERM;
295         if (!mutex_trylock(&mp->m_growlock))      295         if (!mutex_trylock(&mp->m_growlock))
296                 return -EWOULDBLOCK;              296                 return -EWOULDBLOCK;
297                                                   297 
298         /* update imaxpct separately to the ph    298         /* update imaxpct separately to the physical grow of the filesystem */
299         if (in->imaxpct != mp->m_sb.sb_imax_pc    299         if (in->imaxpct != mp->m_sb.sb_imax_pct) {
300                 error = xfs_growfs_imaxpct(mp,    300                 error = xfs_growfs_imaxpct(mp, in->imaxpct);
301                 if (error)                        301                 if (error)
302                         goto out_error;           302                         goto out_error;
303         }                                         303         }
304                                                   304 
305         if (in->newblocks != mp->m_sb.sb_dbloc    305         if (in->newblocks != mp->m_sb.sb_dblocks) {
306                 error = xfs_growfs_data_privat    306                 error = xfs_growfs_data_private(mp, in);
307                 if (error)                        307                 if (error)
308                         goto out_error;           308                         goto out_error;
309         }                                         309         }
310                                                   310 
311         /* Post growfs calculations needed to     311         /* Post growfs calculations needed to reflect new state in operations */
312         if (mp->m_sb.sb_imax_pct) {               312         if (mp->m_sb.sb_imax_pct) {
313                 uint64_t icount = mp->m_sb.sb_    313                 uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
314                 do_div(icount, 100);              314                 do_div(icount, 100);
315                 M_IGEO(mp)->maxicount = XFS_FS    315                 M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount);
316         } else                                    316         } else
317                 M_IGEO(mp)->maxicount = 0;        317                 M_IGEO(mp)->maxicount = 0;
318                                                   318 
319         /* Update secondary superblocks now th    319         /* Update secondary superblocks now the physical grow has completed */
320         error = xfs_update_secondary_sbs(mp);     320         error = xfs_update_secondary_sbs(mp);
321                                                   321 
322 out_error:                                        322 out_error:
323         /*                                        323         /*
324          * Increment the generation unconditio    324          * Increment the generation unconditionally, the error could be from
325          * updating the secondary superblocks,    325          * updating the secondary superblocks, in which case the new size
326          * is live already.                       326          * is live already.
327          */                                       327          */
328         mp->m_generation++;                       328         mp->m_generation++;
329         mutex_unlock(&mp->m_growlock);            329         mutex_unlock(&mp->m_growlock);
330         return error;                             330         return error;
331 }                                                 331 }
332                                                   332 
333 int                                               333 int
334 xfs_growfs_log(                                   334 xfs_growfs_log(
335         xfs_mount_t             *mp,              335         xfs_mount_t             *mp,
336         struct xfs_growfs_log   *in)              336         struct xfs_growfs_log   *in)
337 {                                                 337 {
338         int error;                                338         int error;
339                                                   339 
340         if (!capable(CAP_SYS_ADMIN))              340         if (!capable(CAP_SYS_ADMIN))
341                 return -EPERM;                    341                 return -EPERM;
342         if (!mutex_trylock(&mp->m_growlock))      342         if (!mutex_trylock(&mp->m_growlock))
343                 return -EWOULDBLOCK;              343                 return -EWOULDBLOCK;
344         error = xfs_growfs_log_private(mp, in)    344         error = xfs_growfs_log_private(mp, in);
345         mutex_unlock(&mp->m_growlock);            345         mutex_unlock(&mp->m_growlock);
346         return error;                             346         return error;
347 }                                                 347 }
348                                                   348 
349 /*                                                349 /*
350  * Reserve the requested number of blocks if a    350  * Reserve the requested number of blocks if available. Otherwise return
351  * as many as possible to satisfy the request.    351  * as many as possible to satisfy the request. The actual number
352  * reserved are returned in outval.               352  * reserved are returned in outval.
353  */                                               353  */
354 int                                               354 int
355 xfs_reserve_blocks(                               355 xfs_reserve_blocks(
356         struct xfs_mount        *mp,              356         struct xfs_mount        *mp,
357         uint64_t                request)          357         uint64_t                request)
358 {                                                 358 {
359         int64_t                 lcounter, delt    359         int64_t                 lcounter, delta;
360         int64_t                 fdblks_delta =    360         int64_t                 fdblks_delta = 0;
361         int64_t                 free;             361         int64_t                 free;
362         int                     error = 0;        362         int                     error = 0;
363                                                   363 
364         /*                                        364         /*
365          * With per-cpu counters, this becomes    365          * With per-cpu counters, this becomes an interesting problem. we need
366          * to work out if we are freeing or al    366          * to work out if we are freeing or allocation blocks first, then we can
367          * do the modification as necessary.      367          * do the modification as necessary.
368          *                                        368          *
369          * We do this under the m_sb_lock so t    369          * We do this under the m_sb_lock so that if we are near ENOSPC, we will
370          * hold out any changes while we work     370          * hold out any changes while we work out what to do. This means that
371          * the amount of free space can change    371          * the amount of free space can change while we do this, so we need to
372          * retry if we end up trying to reserv    372          * retry if we end up trying to reserve more space than is available.
373          */                                       373          */
374         spin_lock(&mp->m_sb_lock);                374         spin_lock(&mp->m_sb_lock);
375                                                   375 
376         /*                                        376         /*
377          * If our previous reservation was lar    377          * If our previous reservation was larger than the current value,
378          * then move any unused blocks back to    378          * then move any unused blocks back to the free pool. Modify the resblks
379          * counters directly since we shouldn'    379          * counters directly since we shouldn't have any problems unreserving
380          * space.                                 380          * space.
381          */                                       381          */
382         if (mp->m_resblks > request) {            382         if (mp->m_resblks > request) {
383                 lcounter = mp->m_resblks_avail    383                 lcounter = mp->m_resblks_avail - request;
384                 if (lcounter > 0) {               384                 if (lcounter > 0) {             /* release unused blocks */
385                         fdblks_delta = lcounte    385                         fdblks_delta = lcounter;
386                         mp->m_resblks_avail -=    386                         mp->m_resblks_avail -= lcounter;
387                 }                                 387                 }
388                 mp->m_resblks = request;          388                 mp->m_resblks = request;
389                 if (fdblks_delta) {               389                 if (fdblks_delta) {
390                         spin_unlock(&mp->m_sb_    390                         spin_unlock(&mp->m_sb_lock);
391                         xfs_add_fdblocks(mp, f    391                         xfs_add_fdblocks(mp, fdblks_delta);
392                         spin_lock(&mp->m_sb_lo    392                         spin_lock(&mp->m_sb_lock);
393                 }                                 393                 }
394                                                   394 
395                 goto out;                         395                 goto out;
396         }                                         396         }
397                                                   397 
398         /*                                        398         /*
399          * If the request is larger than the c    399          * If the request is larger than the current reservation, reserve the
400          * blocks before we update the reserve    400          * blocks before we update the reserve counters. Sample m_fdblocks and
401          * perform a partial reservation if th    401          * perform a partial reservation if the request exceeds free space.
402          *                                        402          *
403          * The code below estimates how many b    403          * The code below estimates how many blocks it can request from
404          * fdblocks to stash in the reserve po    404          * fdblocks to stash in the reserve pool.  This is a classic TOCTOU
405          * race since fdblocks updates are not    405          * race since fdblocks updates are not always coordinated via
406          * m_sb_lock.  Set the reserve size ev    406          * m_sb_lock.  Set the reserve size even if there's not enough free
407          * space to fill it because mod_fdbloc    407          * space to fill it because mod_fdblocks will refill an undersized
408          * reserve when it can.                   408          * reserve when it can.
409          */                                       409          */
410         free = percpu_counter_sum(&mp->m_fdblo    410         free = percpu_counter_sum(&mp->m_fdblocks) -
411                                                   411                                                 xfs_fdblocks_unavailable(mp);
412         delta = request - mp->m_resblks;          412         delta = request - mp->m_resblks;
413         mp->m_resblks = request;                  413         mp->m_resblks = request;
414         if (delta > 0 && free > 0) {              414         if (delta > 0 && free > 0) {
415                 /*                                415                 /*
416                  * We'll either succeed in get    416                  * We'll either succeed in getting space from the free block
417                  * count or we'll get an ENOSP    417                  * count or we'll get an ENOSPC.  Don't set the reserved flag
418                  * here - we don't want to res    418                  * here - we don't want to reserve the extra reserve blocks
419                  * from the reserve.              419                  * from the reserve.
420                  *                                420                  *
421                  * The desired reserve size ca    421                  * The desired reserve size can change after we drop the lock.
422                  * Use mod_fdblocks to put the    422                  * Use mod_fdblocks to put the space into the reserve or into
423                  * fdblocks as appropriate.       423                  * fdblocks as appropriate.
424                  */                               424                  */
425                 fdblks_delta = min(free, delta    425                 fdblks_delta = min(free, delta);
426                 spin_unlock(&mp->m_sb_lock);      426                 spin_unlock(&mp->m_sb_lock);
427                 error = xfs_dec_fdblocks(mp, f    427                 error = xfs_dec_fdblocks(mp, fdblks_delta, 0);
428                 if (!error)                       428                 if (!error)
429                         xfs_add_fdblocks(mp, f    429                         xfs_add_fdblocks(mp, fdblks_delta);
430                 spin_lock(&mp->m_sb_lock);        430                 spin_lock(&mp->m_sb_lock);
431         }                                         431         }
432 out:                                              432 out:
433         spin_unlock(&mp->m_sb_lock);              433         spin_unlock(&mp->m_sb_lock);
434         return error;                             434         return error;
435 }                                                 435 }
436                                                   436 
437 int                                               437 int
438 xfs_fs_goingdown(                                 438 xfs_fs_goingdown(
439         xfs_mount_t     *mp,                      439         xfs_mount_t     *mp,
440         uint32_t        inflags)                  440         uint32_t        inflags)
441 {                                                 441 {
442         switch (inflags) {                        442         switch (inflags) {
443         case XFS_FSOP_GOING_FLAGS_DEFAULT: {      443         case XFS_FSOP_GOING_FLAGS_DEFAULT: {
444                 if (!bdev_freeze(mp->m_super->    444                 if (!bdev_freeze(mp->m_super->s_bdev)) {
445                         xfs_force_shutdown(mp,    445                         xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
446                         bdev_thaw(mp->m_super-    446                         bdev_thaw(mp->m_super->s_bdev);
447                 }                                 447                 }
448                 break;                            448                 break;
449         }                                         449         }
450         case XFS_FSOP_GOING_FLAGS_LOGFLUSH:       450         case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
451                 xfs_force_shutdown(mp, SHUTDOW    451                 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
452                 break;                            452                 break;
453         case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:     453         case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
454                 xfs_force_shutdown(mp,            454                 xfs_force_shutdown(mp,
455                                 SHUTDOWN_FORCE    455                                 SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
456                 break;                            456                 break;
457         default:                                  457         default:
458                 return -EINVAL;                   458                 return -EINVAL;
459         }                                         459         }
460                                                   460 
461         return 0;                                 461         return 0;
462 }                                                 462 }
463                                                   463 
464 /*                                                464 /*
465  * Force a shutdown of the filesystem instantl    465  * Force a shutdown of the filesystem instantly while keeping the filesystem
466  * consistent. We don't do an unmount here; ju    466  * consistent. We don't do an unmount here; just shutdown the shop, make sure
467  * that absolutely nothing persistent happens     467  * that absolutely nothing persistent happens to this filesystem after this
468  * point.                                         468  * point.
469  *                                                469  *
470  * The shutdown state change is atomic, result    470  * The shutdown state change is atomic, resulting in the first and only the
471  * first shutdown call processing the shutdown    471  * first shutdown call processing the shutdown. This means we only shutdown the
472  * log once as it requires, and we don't spam     472  * log once as it requires, and we don't spam the logs when multiple concurrent
473  * shutdowns race to set the shutdown flags.      473  * shutdowns race to set the shutdown flags.
474  */                                               474  */
475 void                                              475 void
476 xfs_do_force_shutdown(                            476 xfs_do_force_shutdown(
477         struct xfs_mount *mp,                     477         struct xfs_mount *mp,
478         uint32_t        flags,                    478         uint32_t        flags,
479         char            *fname,                   479         char            *fname,
480         int             lnnum)                    480         int             lnnum)
481 {                                                 481 {
482         int             tag;                      482         int             tag;
483         const char      *why;                     483         const char      *why;
484                                                   484 
485                                                   485 
486         if (xfs_set_shutdown(mp)) {               486         if (xfs_set_shutdown(mp)) {
487                 xlog_shutdown_wait(mp->m_log);    487                 xlog_shutdown_wait(mp->m_log);
488                 return;                           488                 return;
489         }                                         489         }
490         if (mp->m_sb_bp)                          490         if (mp->m_sb_bp)
491                 mp->m_sb_bp->b_flags |= XBF_DO    491                 mp->m_sb_bp->b_flags |= XBF_DONE;
492                                                   492 
493         if (flags & SHUTDOWN_FORCE_UMOUNT)        493         if (flags & SHUTDOWN_FORCE_UMOUNT)
494                 xfs_alert(mp, "User initiated     494                 xfs_alert(mp, "User initiated shutdown received.");
495                                                   495 
496         if (xlog_force_shutdown(mp->m_log, fla    496         if (xlog_force_shutdown(mp->m_log, flags)) {
497                 tag = XFS_PTAG_SHUTDOWN_LOGERR    497                 tag = XFS_PTAG_SHUTDOWN_LOGERROR;
498                 why = "Log I/O Error";            498                 why = "Log I/O Error";
499         } else if (flags & SHUTDOWN_CORRUPT_IN    499         } else if (flags & SHUTDOWN_CORRUPT_INCORE) {
500                 tag = XFS_PTAG_SHUTDOWN_CORRUP    500                 tag = XFS_PTAG_SHUTDOWN_CORRUPT;
501                 why = "Corruption of in-memory    501                 why = "Corruption of in-memory data";
502         } else if (flags & SHUTDOWN_CORRUPT_ON    502         } else if (flags & SHUTDOWN_CORRUPT_ONDISK) {
503                 tag = XFS_PTAG_SHUTDOWN_CORRUP    503                 tag = XFS_PTAG_SHUTDOWN_CORRUPT;
504                 why = "Corruption of on-disk m    504                 why = "Corruption of on-disk metadata";
505         } else if (flags & SHUTDOWN_DEVICE_REM    505         } else if (flags & SHUTDOWN_DEVICE_REMOVED) {
506                 tag = XFS_PTAG_SHUTDOWN_IOERRO    506                 tag = XFS_PTAG_SHUTDOWN_IOERROR;
507                 why = "Block device removal";     507                 why = "Block device removal";
508         } else {                                  508         } else {
509                 tag = XFS_PTAG_SHUTDOWN_IOERRO    509                 tag = XFS_PTAG_SHUTDOWN_IOERROR;
510                 why = "Metadata I/O Error";       510                 why = "Metadata I/O Error";
511         }                                         511         }
512                                                   512 
513         trace_xfs_force_shutdown(mp, tag, flag    513         trace_xfs_force_shutdown(mp, tag, flags, fname, lnnum);
514                                                   514 
515         xfs_alert_tag(mp, tag,                    515         xfs_alert_tag(mp, tag,
516 "%s (0x%x) detected at %pS (%s:%d).  Shutting     516 "%s (0x%x) detected at %pS (%s:%d).  Shutting down filesystem.",
517                         why, flags, __return_a    517                         why, flags, __return_address, fname, lnnum);
518         xfs_alert(mp,                             518         xfs_alert(mp,
519                 "Please unmount the filesystem    519                 "Please unmount the filesystem and rectify the problem(s)");
520         if (xfs_error_level >= XFS_ERRLEVEL_HI    520         if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
521                 xfs_stack_trace();                521                 xfs_stack_trace();
522 }                                                 522 }
523                                                   523 
524 /*                                                524 /*
525  * Reserve free space for per-AG metadata.        525  * Reserve free space for per-AG metadata.
526  */                                               526  */
527 int                                               527 int
528 xfs_fs_reserve_ag_blocks(                         528 xfs_fs_reserve_ag_blocks(
529         struct xfs_mount        *mp)              529         struct xfs_mount        *mp)
530 {                                                 530 {
531         xfs_agnumber_t          agno;             531         xfs_agnumber_t          agno;
532         struct xfs_perag        *pag;             532         struct xfs_perag        *pag;
533         int                     error = 0;        533         int                     error = 0;
534         int                     err2;             534         int                     err2;
535                                                   535 
536         mp->m_finobt_nores = false;               536         mp->m_finobt_nores = false;
537         for_each_perag(mp, agno, pag) {           537         for_each_perag(mp, agno, pag) {
538                 err2 = xfs_ag_resv_init(pag, N    538                 err2 = xfs_ag_resv_init(pag, NULL);
539                 if (err2 && !error)               539                 if (err2 && !error)
540                         error = err2;             540                         error = err2;
541         }                                         541         }
542                                                   542 
543         if (error && error != -ENOSPC) {          543         if (error && error != -ENOSPC) {
544                 xfs_warn(mp,                      544                 xfs_warn(mp,
545         "Error %d reserving per-AG metadata re    545         "Error %d reserving per-AG metadata reserve pool.", error);
546                 xfs_force_shutdown(mp, SHUTDOW    546                 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
547         }                                         547         }
548                                                   548 
549         return error;                             549         return error;
550 }                                                 550 }
551                                                   551 
552 /*                                                552 /*
553  * Free space reserved for per-AG metadata.       553  * Free space reserved for per-AG metadata.
554  */                                               554  */
555 void                                              555 void
556 xfs_fs_unreserve_ag_blocks(                       556 xfs_fs_unreserve_ag_blocks(
557         struct xfs_mount        *mp)              557         struct xfs_mount        *mp)
558 {                                                 558 {
559         xfs_agnumber_t          agno;             559         xfs_agnumber_t          agno;
560         struct xfs_perag        *pag;             560         struct xfs_perag        *pag;
561                                                   561 
562         for_each_perag(mp, agno, pag)             562         for_each_perag(mp, agno, pag)
563                 xfs_ag_resv_free(pag);            563                 xfs_ag_resv_free(pag);
564 }                                                 564 }
565                                                   565 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php