~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/xfs/xfs_log_cil.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
  4  */
  5 
  6 #include "xfs.h"
  7 #include "xfs_fs.h"
  8 #include "xfs_format.h"
  9 #include "xfs_log_format.h"
 10 #include "xfs_shared.h"
 11 #include "xfs_trans_resv.h"
 12 #include "xfs_mount.h"
 13 #include "xfs_extent_busy.h"
 14 #include "xfs_trans.h"
 15 #include "xfs_trans_priv.h"
 16 #include "xfs_log.h"
 17 #include "xfs_log_priv.h"
 18 #include "xfs_trace.h"
 19 #include "xfs_discard.h"
 20 
 21 /*
 22  * Allocate a new ticket. Failing to get a new ticket makes it really hard to
 23  * recover, so we don't allow failure here. Also, we allocate in a context that
 24  * we don't want to be issuing transactions from, so we need to tell the
 25  * allocation code this as well.
 26  *
 27  * We don't reserve any space for the ticket - we are going to steal whatever
 28  * space we require from transactions as they commit. To ensure we reserve all
 29  * the space required, we need to set the current reservation of the ticket to
 30  * zero so that we know to steal the initial transaction overhead from the
 31  * first transaction commit.
 32  */
 33 static struct xlog_ticket *
 34 xlog_cil_ticket_alloc(
 35         struct xlog     *log)
 36 {
 37         struct xlog_ticket *tic;
 38 
 39         tic = xlog_ticket_alloc(log, 0, 1, 0);
 40 
 41         /*
 42          * set the current reservation to zero so we know to steal the basic
 43          * transaction overhead reservation from the first transaction commit.
 44          */
 45         tic->t_curr_res = 0;
 46         tic->t_iclog_hdrs = 0;
 47         return tic;
 48 }
 49 
 50 static inline void
 51 xlog_cil_set_iclog_hdr_count(struct xfs_cil *cil)
 52 {
 53         struct xlog     *log = cil->xc_log;
 54 
 55         atomic_set(&cil->xc_iclog_hdrs,
 56                    (XLOG_CIL_BLOCKING_SPACE_LIMIT(log) /
 57                         (log->l_iclog_size - log->l_iclog_hsize)));
 58 }
 59 
 60 /*
 61  * Check if the current log item was first committed in this sequence.
 62  * We can't rely on just the log item being in the CIL, we have to check
 63  * the recorded commit sequence number.
 64  *
 65  * Note: for this to be used in a non-racy manner, it has to be called with
 66  * CIL flushing locked out. As a result, it should only be used during the
 67  * transaction commit process when deciding what to format into the item.
 68  */
 69 static bool
 70 xlog_item_in_current_chkpt(
 71         struct xfs_cil          *cil,
 72         struct xfs_log_item     *lip)
 73 {
 74         if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
 75                 return false;
 76 
 77         /*
 78          * li_seq is written on the first commit of a log item to record the
 79          * first checkpoint it is written to. Hence if it is different to the
 80          * current sequence, we're in a new checkpoint.
 81          */
 82         return lip->li_seq == READ_ONCE(cil->xc_current_sequence);
 83 }
 84 
 85 bool
 86 xfs_log_item_in_current_chkpt(
 87         struct xfs_log_item *lip)
 88 {
 89         return xlog_item_in_current_chkpt(lip->li_log->l_cilp, lip);
 90 }
 91 
 92 /*
 93  * Unavoidable forward declaration - xlog_cil_push_work() calls
 94  * xlog_cil_ctx_alloc() itself.
 95  */
 96 static void xlog_cil_push_work(struct work_struct *work);
 97 
 98 static struct xfs_cil_ctx *
 99 xlog_cil_ctx_alloc(void)
100 {
101         struct xfs_cil_ctx      *ctx;
102 
103         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
104         INIT_LIST_HEAD(&ctx->committing);
105         INIT_LIST_HEAD(&ctx->busy_extents.extent_list);
106         INIT_LIST_HEAD(&ctx->log_items);
107         INIT_LIST_HEAD(&ctx->lv_chain);
108         INIT_WORK(&ctx->push_work, xlog_cil_push_work);
109         return ctx;
110 }
111 
112 /*
113  * Aggregate the CIL per cpu structures into global counts, lists, etc and
114  * clear the percpu state ready for the next context to use. This is called
115  * from the push code with the context lock held exclusively, hence nothing else
116  * will be accessing or modifying the per-cpu counters.
117  */
118 static void
119 xlog_cil_push_pcp_aggregate(
120         struct xfs_cil          *cil,
121         struct xfs_cil_ctx      *ctx)
122 {
123         struct xlog_cil_pcp     *cilpcp;
124         int                     cpu;
125 
126         for_each_cpu(cpu, &ctx->cil_pcpmask) {
127                 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
128 
129                 ctx->ticket->t_curr_res += cilpcp->space_reserved;
130                 cilpcp->space_reserved = 0;
131 
132                 if (!list_empty(&cilpcp->busy_extents)) {
133                         list_splice_init(&cilpcp->busy_extents,
134                                         &ctx->busy_extents.extent_list);
135                 }
136                 if (!list_empty(&cilpcp->log_items))
137                         list_splice_init(&cilpcp->log_items, &ctx->log_items);
138 
139                 /*
140                  * We're in the middle of switching cil contexts.  Reset the
141                  * counter we use to detect when the current context is nearing
142                  * full.
143                  */
144                 cilpcp->space_used = 0;
145         }
146 }
147 
148 /*
149  * Aggregate the CIL per-cpu space used counters into the global atomic value.
150  * This is called when the per-cpu counter aggregation will first pass the soft
151  * limit threshold so we can switch to atomic counter aggregation for accurate
152  * detection of hard limit traversal.
153  */
154 static void
155 xlog_cil_insert_pcp_aggregate(
156         struct xfs_cil          *cil,
157         struct xfs_cil_ctx      *ctx)
158 {
159         struct xlog_cil_pcp     *cilpcp;
160         int                     cpu;
161         int                     count = 0;
162 
163         /* Trigger atomic updates then aggregate only for the first caller */
164         if (!test_and_clear_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags))
165                 return;
166 
167         /*
168          * We can race with other cpus setting cil_pcpmask.  However, we've
169          * atomically cleared PCP_SPACE which forces other threads to add to
170          * the global space used count.  cil_pcpmask is a superset of cilpcp
171          * structures that could have a nonzero space_used.
172          */
173         for_each_cpu(cpu, &ctx->cil_pcpmask) {
174                 int     old, prev;
175 
176                 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
177                 do {
178                         old = cilpcp->space_used;
179                         prev = cmpxchg(&cilpcp->space_used, old, 0);
180                 } while (old != prev);
181                 count += old;
182         }
183         atomic_add(count, &ctx->space_used);
184 }
185 
186 static void
187 xlog_cil_ctx_switch(
188         struct xfs_cil          *cil,
189         struct xfs_cil_ctx      *ctx)
190 {
191         xlog_cil_set_iclog_hdr_count(cil);
192         set_bit(XLOG_CIL_EMPTY, &cil->xc_flags);
193         set_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags);
194         ctx->sequence = ++cil->xc_current_sequence;
195         ctx->cil = cil;
196         cil->xc_ctx = ctx;
197 }
198 
199 /*
200  * After the first stage of log recovery is done, we know where the head and
201  * tail of the log are. We need this log initialisation done before we can
202  * initialise the first CIL checkpoint context.
203  *
204  * Here we allocate a log ticket to track space usage during a CIL push.  This
205  * ticket is passed to xlog_write() directly so that we don't slowly leak log
206  * space by failing to account for space used by log headers and additional
207  * region headers for split regions.
208  */
209 void
210 xlog_cil_init_post_recovery(
211         struct xlog     *log)
212 {
213         log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
214         log->l_cilp->xc_ctx->sequence = 1;
215         xlog_cil_set_iclog_hdr_count(log->l_cilp);
216 }
217 
218 static inline int
219 xlog_cil_iovec_space(
220         uint    niovecs)
221 {
222         return round_up((sizeof(struct xfs_log_vec) +
223                                         niovecs * sizeof(struct xfs_log_iovec)),
224                         sizeof(uint64_t));
225 }
226 
227 /*
228  * Allocate or pin log vector buffers for CIL insertion.
229  *
230  * The CIL currently uses disposable buffers for copying a snapshot of the
231  * modified items into the log during a push. The biggest problem with this is
232  * the requirement to allocate the disposable buffer during the commit if:
233  *      a) does not exist; or
234  *      b) it is too small
235  *
236  * If we do this allocation within xlog_cil_insert_format_items(), it is done
237  * under the xc_ctx_lock, which means that a CIL push cannot occur during
238  * the memory allocation. This means that we have a potential deadlock situation
239  * under low memory conditions when we have lots of dirty metadata pinned in
240  * the CIL and we need a CIL commit to occur to free memory.
241  *
242  * To avoid this, we need to move the memory allocation outside the
243  * xc_ctx_lock, but because the log vector buffers are disposable, that opens
244  * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
245  * vector buffers between the check and the formatting of the item into the
246  * log vector buffer within the xc_ctx_lock.
247  *
248  * Because the log vector buffer needs to be unchanged during the CIL push
249  * process, we cannot share the buffer between the transaction commit (which
250  * modifies the buffer) and the CIL push context that is writing the changes
251  * into the log. This means skipping preallocation of buffer space is
252  * unreliable, but we most definitely do not want to be allocating and freeing
253  * buffers unnecessarily during commits when overwrites can be done safely.
254  *
255  * The simplest solution to this problem is to allocate a shadow buffer when a
256  * log item is committed for the second time, and then to only use this buffer
257  * if necessary. The buffer can remain attached to the log item until such time
258  * it is needed, and this is the buffer that is reallocated to match the size of
259  * the incoming modification. Then during the formatting of the item we can swap
260  * the active buffer with the new one if we can't reuse the existing buffer. We
261  * don't free the old buffer as it may be reused on the next modification if
262  * it's size is right, otherwise we'll free and reallocate it at that point.
263  *
264  * This function builds a vector for the changes in each log item in the
265  * transaction. It then works out the length of the buffer needed for each log
266  * item, allocates them and attaches the vector to the log item in preparation
267  * for the formatting step which occurs under the xc_ctx_lock.
268  *
269  * While this means the memory footprint goes up, it avoids the repeated
270  * alloc/free pattern that repeated modifications of an item would otherwise
271  * cause, and hence minimises the CPU overhead of such behaviour.
272  */
273 static void
274 xlog_cil_alloc_shadow_bufs(
275         struct xlog             *log,
276         struct xfs_trans        *tp)
277 {
278         struct xfs_log_item     *lip;
279 
280         list_for_each_entry(lip, &tp->t_items, li_trans) {
281                 struct xfs_log_vec *lv;
282                 int     niovecs = 0;
283                 int     nbytes = 0;
284                 int     buf_size;
285                 bool    ordered = false;
286 
287                 /* Skip items which aren't dirty in this transaction. */
288                 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
289                         continue;
290 
291                 /* get number of vecs and size of data to be stored */
292                 lip->li_ops->iop_size(lip, &niovecs, &nbytes);
293 
294                 /*
295                  * Ordered items need to be tracked but we do not wish to write
296                  * them. We need a logvec to track the object, but we do not
297                  * need an iovec or buffer to be allocated for copying data.
298                  */
299                 if (niovecs == XFS_LOG_VEC_ORDERED) {
300                         ordered = true;
301                         niovecs = 0;
302                         nbytes = 0;
303                 }
304 
305                 /*
306                  * We 64-bit align the length of each iovec so that the start of
307                  * the next one is naturally aligned.  We'll need to account for
308                  * that slack space here.
309                  *
310                  * We also add the xlog_op_header to each region when
311                  * formatting, but that's not accounted to the size of the item
312                  * at this point. Hence we'll need an addition number of bytes
313                  * for each vector to hold an opheader.
314                  *
315                  * Then round nbytes up to 64-bit alignment so that the initial
316                  * buffer alignment is easy to calculate and verify.
317                  */
318                 nbytes += niovecs *
319                         (sizeof(uint64_t) + sizeof(struct xlog_op_header));
320                 nbytes = round_up(nbytes, sizeof(uint64_t));
321 
322                 /*
323                  * The data buffer needs to start 64-bit aligned, so round up
324                  * that space to ensure we can align it appropriately and not
325                  * overrun the buffer.
326                  */
327                 buf_size = nbytes + xlog_cil_iovec_space(niovecs);
328 
329                 /*
330                  * if we have no shadow buffer, or it is too small, we need to
331                  * reallocate it.
332                  */
333                 if (!lip->li_lv_shadow ||
334                     buf_size > lip->li_lv_shadow->lv_size) {
335                         /*
336                          * We free and allocate here as a realloc would copy
337                          * unnecessary data. We don't use kvzalloc() for the
338                          * same reason - we don't need to zero the data area in
339                          * the buffer, only the log vector header and the iovec
340                          * storage.
341                          */
342                         kvfree(lip->li_lv_shadow);
343                         lv = xlog_kvmalloc(buf_size);
344 
345                         memset(lv, 0, xlog_cil_iovec_space(niovecs));
346 
347                         INIT_LIST_HEAD(&lv->lv_list);
348                         lv->lv_item = lip;
349                         lv->lv_size = buf_size;
350                         if (ordered)
351                                 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
352                         else
353                                 lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
354                         lip->li_lv_shadow = lv;
355                 } else {
356                         /* same or smaller, optimise common overwrite case */
357                         lv = lip->li_lv_shadow;
358                         if (ordered)
359                                 lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
360                         else
361                                 lv->lv_buf_len = 0;
362                         lv->lv_bytes = 0;
363                 }
364 
365                 /* Ensure the lv is set up according to ->iop_size */
366                 lv->lv_niovecs = niovecs;
367 
368                 /* The allocated data region lies beyond the iovec region */
369                 lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
370         }
371 
372 }
373 
374 /*
375  * Prepare the log item for insertion into the CIL. Calculate the difference in
376  * log space it will consume, and if it is a new item pin it as well.
377  */
378 STATIC void
379 xfs_cil_prepare_item(
380         struct xlog             *log,
381         struct xfs_log_vec      *lv,
382         struct xfs_log_vec      *old_lv,
383         int                     *diff_len)
384 {
385         /* Account for the new LV being passed in */
386         if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
387                 *diff_len += lv->lv_bytes;
388 
389         /*
390          * If there is no old LV, this is the first time we've seen the item in
391          * this CIL context and so we need to pin it. If we are replacing the
392          * old_lv, then remove the space it accounts for and make it the shadow
393          * buffer for later freeing. In both cases we are now switching to the
394          * shadow buffer, so update the pointer to it appropriately.
395          */
396         if (!old_lv) {
397                 if (lv->lv_item->li_ops->iop_pin)
398                         lv->lv_item->li_ops->iop_pin(lv->lv_item);
399                 lv->lv_item->li_lv_shadow = NULL;
400         } else if (old_lv != lv) {
401                 ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
402 
403                 *diff_len -= old_lv->lv_bytes;
404                 lv->lv_item->li_lv_shadow = old_lv;
405         }
406 
407         /* attach new log vector to log item */
408         lv->lv_item->li_lv = lv;
409 
410         /*
411          * If this is the first time the item is being committed to the
412          * CIL, store the sequence number on the log item so we can
413          * tell in future commits whether this is the first checkpoint
414          * the item is being committed into.
415          */
416         if (!lv->lv_item->li_seq)
417                 lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
418 }
419 
420 /*
421  * Format log item into a flat buffers
422  *
423  * For delayed logging, we need to hold a formatted buffer containing all the
424  * changes on the log item. This enables us to relog the item in memory and
425  * write it out asynchronously without needing to relock the object that was
426  * modified at the time it gets written into the iclog.
427  *
428  * This function takes the prepared log vectors attached to each log item, and
429  * formats the changes into the log vector buffer. The buffer it uses is
430  * dependent on the current state of the vector in the CIL - the shadow lv is
431  * guaranteed to be large enough for the current modification, but we will only
432  * use that if we can't reuse the existing lv. If we can't reuse the existing
433  * lv, then simple swap it out for the shadow lv. We don't free it - that is
434  * done lazily either by th enext modification or the freeing of the log item.
435  *
436  * We don't set up region headers during this process; we simply copy the
437  * regions into the flat buffer. We can do this because we still have to do a
438  * formatting step to write the regions into the iclog buffer.  Writing the
439  * ophdrs during the iclog write means that we can support splitting large
440  * regions across iclog boundares without needing a change in the format of the
441  * item/region encapsulation.
442  *
443  * Hence what we need to do now is change the rewrite the vector array to point
444  * to the copied region inside the buffer we just allocated. This allows us to
445  * format the regions into the iclog as though they are being formatted
446  * directly out of the objects themselves.
447  */
448 static void
449 xlog_cil_insert_format_items(
450         struct xlog             *log,
451         struct xfs_trans        *tp,
452         int                     *diff_len)
453 {
454         struct xfs_log_item     *lip;
455 
456         /* Bail out if we didn't find a log item.  */
457         if (list_empty(&tp->t_items)) {
458                 ASSERT(0);
459                 return;
460         }
461 
462         list_for_each_entry(lip, &tp->t_items, li_trans) {
463                 struct xfs_log_vec *lv;
464                 struct xfs_log_vec *old_lv = NULL;
465                 struct xfs_log_vec *shadow;
466                 bool    ordered = false;
467 
468                 /* Skip items which aren't dirty in this transaction. */
469                 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
470                         continue;
471 
472                 /*
473                  * The formatting size information is already attached to
474                  * the shadow lv on the log item.
475                  */
476                 shadow = lip->li_lv_shadow;
477                 if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
478                         ordered = true;
479 
480                 /* Skip items that do not have any vectors for writing */
481                 if (!shadow->lv_niovecs && !ordered)
482                         continue;
483 
484                 /* compare to existing item size */
485                 old_lv = lip->li_lv;
486                 if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
487                         /* same or smaller, optimise common overwrite case */
488                         lv = lip->li_lv;
489 
490                         if (ordered)
491                                 goto insert;
492 
493                         /*
494                          * set the item up as though it is a new insertion so
495                          * that the space reservation accounting is correct.
496                          */
497                         *diff_len -= lv->lv_bytes;
498 
499                         /* Ensure the lv is set up according to ->iop_size */
500                         lv->lv_niovecs = shadow->lv_niovecs;
501 
502                         /* reset the lv buffer information for new formatting */
503                         lv->lv_buf_len = 0;
504                         lv->lv_bytes = 0;
505                         lv->lv_buf = (char *)lv +
506                                         xlog_cil_iovec_space(lv->lv_niovecs);
507                 } else {
508                         /* switch to shadow buffer! */
509                         lv = shadow;
510                         lv->lv_item = lip;
511                         if (ordered) {
512                                 /* track as an ordered logvec */
513                                 ASSERT(lip->li_lv == NULL);
514                                 goto insert;
515                         }
516                 }
517 
518                 ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
519                 lip->li_ops->iop_format(lip, lv);
520 insert:
521                 xfs_cil_prepare_item(log, lv, old_lv, diff_len);
522         }
523 }
524 
525 /*
526  * The use of lockless waitqueue_active() requires that the caller has
527  * serialised itself against the wakeup call in xlog_cil_push_work(). That
528  * can be done by either holding the push lock or the context lock.
529  */
530 static inline bool
531 xlog_cil_over_hard_limit(
532         struct xlog     *log,
533         int32_t         space_used)
534 {
535         if (waitqueue_active(&log->l_cilp->xc_push_wait))
536                 return true;
537         if (space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log))
538                 return true;
539         return false;
540 }
541 
542 /*
543  * Insert the log items into the CIL and calculate the difference in space
544  * consumed by the item. Add the space to the checkpoint ticket and calculate
545  * if the change requires additional log metadata. If it does, take that space
546  * as well. Remove the amount of space we added to the checkpoint ticket from
547  * the current transaction ticket so that the accounting works out correctly.
548  */
549 static void
550 xlog_cil_insert_items(
551         struct xlog             *log,
552         struct xfs_trans        *tp,
553         uint32_t                released_space)
554 {
555         struct xfs_cil          *cil = log->l_cilp;
556         struct xfs_cil_ctx      *ctx = cil->xc_ctx;
557         struct xfs_log_item     *lip;
558         int                     len = 0;
559         int                     iovhdr_res = 0, split_res = 0, ctx_res = 0;
560         int                     space_used;
561         int                     order;
562         unsigned int            cpu_nr;
563         struct xlog_cil_pcp     *cilpcp;
564 
565         ASSERT(tp);
566 
567         /*
568          * We can do this safely because the context can't checkpoint until we
569          * are done so it doesn't matter exactly how we update the CIL.
570          */
571         xlog_cil_insert_format_items(log, tp, &len);
572 
573         /*
574          * Subtract the space released by intent cancelation from the space we
575          * consumed so that we remove it from the CIL space and add it back to
576          * the current transaction reservation context.
577          */
578         len -= released_space;
579 
580         /*
581          * Grab the per-cpu pointer for the CIL before we start any accounting.
582          * That ensures that we are running with pre-emption disabled and so we
583          * can't be scheduled away between split sample/update operations that
584          * are done without outside locking to serialise them.
585          */
586         cpu_nr = get_cpu();
587         cilpcp = this_cpu_ptr(cil->xc_pcp);
588 
589         /* Tell the future push that there was work added by this CPU. */
590         if (!cpumask_test_cpu(cpu_nr, &ctx->cil_pcpmask))
591                 cpumask_test_and_set_cpu(cpu_nr, &ctx->cil_pcpmask);
592 
593         /*
594          * We need to take the CIL checkpoint unit reservation on the first
595          * commit into the CIL. Test the XLOG_CIL_EMPTY bit first so we don't
596          * unnecessarily do an atomic op in the fast path here. We can clear the
597          * XLOG_CIL_EMPTY bit as we are under the xc_ctx_lock here and that
598          * needs to be held exclusively to reset the XLOG_CIL_EMPTY bit.
599          */
600         if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) &&
601             test_and_clear_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
602                 ctx_res = ctx->ticket->t_unit_res;
603 
604         /*
605          * Check if we need to steal iclog headers. atomic_read() is not a
606          * locked atomic operation, so we can check the value before we do any
607          * real atomic ops in the fast path. If we've already taken the CIL unit
608          * reservation from this commit, we've already got one iclog header
609          * space reserved so we have to account for that otherwise we risk
610          * overrunning the reservation on this ticket.
611          *
612          * If the CIL is already at the hard limit, we might need more header
613          * space that originally reserved. So steal more header space from every
614          * commit that occurs once we are over the hard limit to ensure the CIL
615          * push won't run out of reservation space.
616          *
617          * This can steal more than we need, but that's OK.
618          *
619          * The cil->xc_ctx_lock provides the serialisation necessary for safely
620          * calling xlog_cil_over_hard_limit() in this context.
621          */
622         space_used = atomic_read(&ctx->space_used) + cilpcp->space_used + len;
623         if (atomic_read(&cil->xc_iclog_hdrs) > 0 ||
624             xlog_cil_over_hard_limit(log, space_used)) {
625                 split_res = log->l_iclog_hsize +
626                                         sizeof(struct xlog_op_header);
627                 if (ctx_res)
628                         ctx_res += split_res * (tp->t_ticket->t_iclog_hdrs - 1);
629                 else
630                         ctx_res = split_res * tp->t_ticket->t_iclog_hdrs;
631                 atomic_sub(tp->t_ticket->t_iclog_hdrs, &cil->xc_iclog_hdrs);
632         }
633         cilpcp->space_reserved += ctx_res;
634 
635         /*
636          * Accurately account when over the soft limit, otherwise fold the
637          * percpu count into the global count if over the per-cpu threshold.
638          */
639         if (!test_bit(XLOG_CIL_PCP_SPACE, &cil->xc_flags)) {
640                 atomic_add(len, &ctx->space_used);
641         } else if (cilpcp->space_used + len >
642                         (XLOG_CIL_SPACE_LIMIT(log) / num_online_cpus())) {
643                 space_used = atomic_add_return(cilpcp->space_used + len,
644                                                 &ctx->space_used);
645                 cilpcp->space_used = 0;
646 
647                 /*
648                  * If we just transitioned over the soft limit, we need to
649                  * transition to the global atomic counter.
650                  */
651                 if (space_used >= XLOG_CIL_SPACE_LIMIT(log))
652                         xlog_cil_insert_pcp_aggregate(cil, ctx);
653         } else {
654                 cilpcp->space_used += len;
655         }
656         /* attach the transaction to the CIL if it has any busy extents */
657         if (!list_empty(&tp->t_busy))
658                 list_splice_init(&tp->t_busy, &cilpcp->busy_extents);
659 
660         /*
661          * Now update the order of everything modified in the transaction
662          * and insert items into the CIL if they aren't already there.
663          * We do this here so we only need to take the CIL lock once during
664          * the transaction commit.
665          */
666         order = atomic_inc_return(&ctx->order_id);
667         list_for_each_entry(lip, &tp->t_items, li_trans) {
668                 /* Skip items which aren't dirty in this transaction. */
669                 if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
670                         continue;
671 
672                 lip->li_order_id = order;
673                 if (!list_empty(&lip->li_cil))
674                         continue;
675                 list_add_tail(&lip->li_cil, &cilpcp->log_items);
676         }
677         put_cpu();
678 
679         /*
680          * If we've overrun the reservation, dump the tx details before we move
681          * the log items. Shutdown is imminent...
682          */
683         tp->t_ticket->t_curr_res -= ctx_res + len;
684         if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
685                 xfs_warn(log->l_mp, "Transaction log reservation overrun:");
686                 xfs_warn(log->l_mp,
687                          "  log items: %d bytes (iov hdrs: %d bytes)",
688                          len, iovhdr_res);
689                 xfs_warn(log->l_mp, "  split region headers: %d bytes",
690                          split_res);
691                 xfs_warn(log->l_mp, "  ctx ticket: %d bytes", ctx_res);
692                 xlog_print_trans(tp);
693                 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
694         }
695 }
696 
697 static inline void
698 xlog_cil_ail_insert_batch(
699         struct xfs_ail          *ailp,
700         struct xfs_ail_cursor   *cur,
701         struct xfs_log_item     **log_items,
702         int                     nr_items,
703         xfs_lsn_t               commit_lsn)
704 {
705         int     i;
706 
707         spin_lock(&ailp->ail_lock);
708         /* xfs_trans_ail_update_bulk drops ailp->ail_lock */
709         xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
710 
711         for (i = 0; i < nr_items; i++) {
712                 struct xfs_log_item *lip = log_items[i];
713 
714                 if (lip->li_ops->iop_unpin)
715                         lip->li_ops->iop_unpin(lip, 0);
716         }
717 }
718 
719 /*
720  * Take the checkpoint's log vector chain of items and insert the attached log
721  * items into the AIL. This uses bulk insertion techniques to minimise AIL lock
722  * traffic.
723  *
724  * The AIL tracks log items via the start record LSN of the checkpoint,
725  * not the commit record LSN. This is because we can pipeline multiple
726  * checkpoints, and so the start record of checkpoint N+1 can be
727  * written before the commit record of checkpoint N. i.e:
728  *
729  *   start N                    commit N
730  *      +-------------+------------+----------------+
731  *                start N+1                     commit N+1
732  *
733  * The tail of the log cannot be moved to the LSN of commit N when all
734  * the items of that checkpoint are written back, because then the
735  * start record for N+1 is no longer in the active portion of the log
736  * and recovery will fail/corrupt the filesystem.
737  *
738  * Hence when all the log items in checkpoint N are written back, the
739  * tail of the log most now only move as far forwards as the start LSN
740  * of checkpoint N+1.
741  *
742  * If we are called with the aborted flag set, it is because a log write during
743  * a CIL checkpoint commit has failed. In this case, all the items in the
744  * checkpoint have already gone through iop_committed and iop_committing, which
745  * means that checkpoint commit abort handling is treated exactly the same as an
746  * iclog write error even though we haven't started any IO yet. Hence in this
747  * case all we need to do is iop_committed processing, followed by an
748  * iop_unpin(aborted) call.
749  *
750  * The AIL cursor is used to optimise the insert process. If commit_lsn is not
751  * at the end of the AIL, the insert cursor avoids the need to walk the AIL to
752  * find the insertion point on every xfs_log_item_batch_insert() call. This
753  * saves a lot of needless list walking and is a net win, even though it
754  * slightly increases that amount of AIL lock traffic to set it up and tear it
755  * down.
756  */
757 static void
758 xlog_cil_ail_insert(
759         struct xfs_cil_ctx      *ctx,
760         bool                    aborted)
761 {
762 #define LOG_ITEM_BATCH_SIZE     32
763         struct xfs_ail          *ailp = ctx->cil->xc_log->l_ailp;
764         struct xfs_log_item     *log_items[LOG_ITEM_BATCH_SIZE];
765         struct xfs_log_vec      *lv;
766         struct xfs_ail_cursor   cur;
767         xfs_lsn_t               old_head;
768         int                     i = 0;
769 
770         /*
771          * Update the AIL head LSN with the commit record LSN of this
772          * checkpoint. As iclogs are always completed in order, this should
773          * always be the same (as iclogs can contain multiple commit records) or
774          * higher LSN than the current head. We do this before insertion of the
775          * items so that log space checks during insertion will reflect the
776          * space that this checkpoint has already consumed.  We call
777          * xfs_ail_update_finish() so that tail space and space-based wakeups
778          * will be recalculated appropriately.
779          */
780         ASSERT(XFS_LSN_CMP(ctx->commit_lsn, ailp->ail_head_lsn) >= 0 ||
781                         aborted);
782         spin_lock(&ailp->ail_lock);
783         xfs_trans_ail_cursor_last(ailp, &cur, ctx->start_lsn);
784         old_head = ailp->ail_head_lsn;
785         ailp->ail_head_lsn = ctx->commit_lsn;
786         /* xfs_ail_update_finish() drops the ail_lock */
787         xfs_ail_update_finish(ailp, NULLCOMMITLSN);
788 
789         /*
790          * We move the AIL head forwards to account for the space used in the
791          * log before we remove that space from the grant heads. This prevents a
792          * transient condition where reservation space appears to become
793          * available on return, only for it to disappear again immediately as
794          * the AIL head update accounts in the log tail space.
795          */
796         smp_wmb();      /* paired with smp_rmb in xlog_grant_space_left */
797         xlog_grant_return_space(ailp->ail_log, old_head, ailp->ail_head_lsn);
798 
799         /* unpin all the log items */
800         list_for_each_entry(lv, &ctx->lv_chain, lv_list) {
801                 struct xfs_log_item     *lip = lv->lv_item;
802                 xfs_lsn_t               item_lsn;
803 
804                 if (aborted)
805                         set_bit(XFS_LI_ABORTED, &lip->li_flags);
806 
807                 if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
808                         lip->li_ops->iop_release(lip);
809                         continue;
810                 }
811 
812                 if (lip->li_ops->iop_committed)
813                         item_lsn = lip->li_ops->iop_committed(lip,
814                                         ctx->start_lsn);
815                 else
816                         item_lsn = ctx->start_lsn;
817 
818                 /* item_lsn of -1 means the item needs no further processing */
819                 if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
820                         continue;
821 
822                 /*
823                  * if we are aborting the operation, no point in inserting the
824                  * object into the AIL as we are in a shutdown situation.
825                  */
826                 if (aborted) {
827                         ASSERT(xlog_is_shutdown(ailp->ail_log));
828                         if (lip->li_ops->iop_unpin)
829                                 lip->li_ops->iop_unpin(lip, 1);
830                         continue;
831                 }
832 
833                 if (item_lsn != ctx->start_lsn) {
834 
835                         /*
836                          * Not a bulk update option due to unusual item_lsn.
837                          * Push into AIL immediately, rechecking the lsn once
838                          * we have the ail lock. Then unpin the item. This does
839                          * not affect the AIL cursor the bulk insert path is
840                          * using.
841                          */
842                         spin_lock(&ailp->ail_lock);
843                         if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
844                                 xfs_trans_ail_update(ailp, lip, item_lsn);
845                         else
846                                 spin_unlock(&ailp->ail_lock);
847                         if (lip->li_ops->iop_unpin)
848                                 lip->li_ops->iop_unpin(lip, 0);
849                         continue;
850                 }
851 
852                 /* Item is a candidate for bulk AIL insert.  */
853                 log_items[i++] = lv->lv_item;
854                 if (i >= LOG_ITEM_BATCH_SIZE) {
855                         xlog_cil_ail_insert_batch(ailp, &cur, log_items,
856                                         LOG_ITEM_BATCH_SIZE, ctx->start_lsn);
857                         i = 0;
858                 }
859         }
860 
861         /* make sure we insert the remainder! */
862         if (i)
863                 xlog_cil_ail_insert_batch(ailp, &cur, log_items, i,
864                                 ctx->start_lsn);
865 
866         spin_lock(&ailp->ail_lock);
867         xfs_trans_ail_cursor_done(&cur);
868         spin_unlock(&ailp->ail_lock);
869 }
870 
871 static void
872 xlog_cil_free_logvec(
873         struct list_head        *lv_chain)
874 {
875         struct xfs_log_vec      *lv;
876 
877         while (!list_empty(lv_chain)) {
878                 lv = list_first_entry(lv_chain, struct xfs_log_vec, lv_list);
879                 list_del_init(&lv->lv_list);
880                 kvfree(lv);
881         }
882 }
883 
884 /*
885  * Mark all items committed and clear busy extents. We free the log vector
886  * chains in a separate pass so that we unpin the log items as quickly as
887  * possible.
888  */
889 static void
890 xlog_cil_committed(
891         struct xfs_cil_ctx      *ctx)
892 {
893         struct xfs_mount        *mp = ctx->cil->xc_log->l_mp;
894         bool                    abort = xlog_is_shutdown(ctx->cil->xc_log);
895 
896         /*
897          * If the I/O failed, we're aborting the commit and already shutdown.
898          * Wake any commit waiters before aborting the log items so we don't
899          * block async log pushers on callbacks. Async log pushers explicitly do
900          * not wait on log force completion because they may be holding locks
901          * required to unpin items.
902          */
903         if (abort) {
904                 spin_lock(&ctx->cil->xc_push_lock);
905                 wake_up_all(&ctx->cil->xc_start_wait);
906                 wake_up_all(&ctx->cil->xc_commit_wait);
907                 spin_unlock(&ctx->cil->xc_push_lock);
908         }
909 
910         xlog_cil_ail_insert(ctx, abort);
911 
912         xfs_extent_busy_sort(&ctx->busy_extents.extent_list);
913         xfs_extent_busy_clear(mp, &ctx->busy_extents.extent_list,
914                               xfs_has_discard(mp) && !abort);
915 
916         spin_lock(&ctx->cil->xc_push_lock);
917         list_del(&ctx->committing);
918         spin_unlock(&ctx->cil->xc_push_lock);
919 
920         xlog_cil_free_logvec(&ctx->lv_chain);
921 
922         if (!list_empty(&ctx->busy_extents.extent_list)) {
923                 ctx->busy_extents.mount = mp;
924                 ctx->busy_extents.owner = ctx;
925                 xfs_discard_extents(mp, &ctx->busy_extents);
926                 return;
927         }
928 
929         kfree(ctx);
930 }
931 
932 void
933 xlog_cil_process_committed(
934         struct list_head        *list)
935 {
936         struct xfs_cil_ctx      *ctx;
937 
938         while ((ctx = list_first_entry_or_null(list,
939                         struct xfs_cil_ctx, iclog_entry))) {
940                 list_del(&ctx->iclog_entry);
941                 xlog_cil_committed(ctx);
942         }
943 }
944 
945 /*
946 * Record the LSN of the iclog we were just granted space to start writing into.
947 * If the context doesn't have a start_lsn recorded, then this iclog will
948 * contain the start record for the checkpoint. Otherwise this write contains
949 * the commit record for the checkpoint.
950 */
951 void
952 xlog_cil_set_ctx_write_state(
953         struct xfs_cil_ctx      *ctx,
954         struct xlog_in_core     *iclog)
955 {
956         struct xfs_cil          *cil = ctx->cil;
957         xfs_lsn_t               lsn = be64_to_cpu(iclog->ic_header.h_lsn);
958 
959         ASSERT(!ctx->commit_lsn);
960         if (!ctx->start_lsn) {
961                 spin_lock(&cil->xc_push_lock);
962                 /*
963                  * The LSN we need to pass to the log items on transaction
964                  * commit is the LSN reported by the first log vector write, not
965                  * the commit lsn. If we use the commit record lsn then we can
966                  * move the grant write head beyond the tail LSN and overwrite
967                  * it.
968                  */
969                 ctx->start_lsn = lsn;
970                 wake_up_all(&cil->xc_start_wait);
971                 spin_unlock(&cil->xc_push_lock);
972 
973                 /*
974                  * Make sure the metadata we are about to overwrite in the log
975                  * has been flushed to stable storage before this iclog is
976                  * issued.
977                  */
978                 spin_lock(&cil->xc_log->l_icloglock);
979                 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
980                 spin_unlock(&cil->xc_log->l_icloglock);
981                 return;
982         }
983 
984         /*
985          * Take a reference to the iclog for the context so that we still hold
986          * it when xlog_write is done and has released it. This means the
987          * context controls when the iclog is released for IO.
988          */
989         atomic_inc(&iclog->ic_refcnt);
990 
991         /*
992          * xlog_state_get_iclog_space() guarantees there is enough space in the
993          * iclog for an entire commit record, so we can attach the context
994          * callbacks now.  This needs to be done before we make the commit_lsn
995          * visible to waiters so that checkpoints with commit records in the
996          * same iclog order their IO completion callbacks in the same order that
997          * the commit records appear in the iclog.
998          */
999         spin_lock(&cil->xc_log->l_icloglock);
1000         list_add_tail(&ctx->iclog_entry, &iclog->ic_callbacks);
1001         spin_unlock(&cil->xc_log->l_icloglock);
1002 
1003         /*
1004          * Now we can record the commit LSN and wake anyone waiting for this
1005          * sequence to have the ordered commit record assigned to a physical
1006          * location in the log.
1007          */
1008         spin_lock(&cil->xc_push_lock);
1009         ctx->commit_iclog = iclog;
1010         ctx->commit_lsn = lsn;
1011         wake_up_all(&cil->xc_commit_wait);
1012         spin_unlock(&cil->xc_push_lock);
1013 }
1014 
1015 
1016 /*
1017  * Ensure that the order of log writes follows checkpoint sequence order. This
1018  * relies on the context LSN being zero until the log write has guaranteed the
1019  * LSN that the log write will start at via xlog_state_get_iclog_space().
1020  */
1021 enum _record_type {
1022         _START_RECORD,
1023         _COMMIT_RECORD,
1024 };
1025 
1026 static int
1027 xlog_cil_order_write(
1028         struct xfs_cil          *cil,
1029         xfs_csn_t               sequence,
1030         enum _record_type       record)
1031 {
1032         struct xfs_cil_ctx      *ctx;
1033 
1034 restart:
1035         spin_lock(&cil->xc_push_lock);
1036         list_for_each_entry(ctx, &cil->xc_committing, committing) {
1037                 /*
1038                  * Avoid getting stuck in this loop because we were woken by the
1039                  * shutdown, but then went back to sleep once already in the
1040                  * shutdown state.
1041                  */
1042                 if (xlog_is_shutdown(cil->xc_log)) {
1043                         spin_unlock(&cil->xc_push_lock);
1044                         return -EIO;
1045                 }
1046 
1047                 /*
1048                  * Higher sequences will wait for this one so skip them.
1049                  * Don't wait for our own sequence, either.
1050                  */
1051                 if (ctx->sequence >= sequence)
1052                         continue;
1053 
1054                 /* Wait until the LSN for the record has been recorded. */
1055                 switch (record) {
1056                 case _START_RECORD:
1057                         if (!ctx->start_lsn) {
1058                                 xlog_wait(&cil->xc_start_wait, &cil->xc_push_lock);
1059                                 goto restart;
1060                         }
1061                         break;
1062                 case _COMMIT_RECORD:
1063                         if (!ctx->commit_lsn) {
1064                                 xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1065                                 goto restart;
1066                         }
1067                         break;
1068                 }
1069         }
1070         spin_unlock(&cil->xc_push_lock);
1071         return 0;
1072 }
1073 
1074 /*
1075  * Write out the log vector change now attached to the CIL context. This will
1076  * write a start record that needs to be strictly ordered in ascending CIL
1077  * sequence order so that log recovery will always use in-order start LSNs when
1078  * replaying checkpoints.
1079  */
1080 static int
1081 xlog_cil_write_chain(
1082         struct xfs_cil_ctx      *ctx,
1083         uint32_t                chain_len)
1084 {
1085         struct xlog             *log = ctx->cil->xc_log;
1086         int                     error;
1087 
1088         error = xlog_cil_order_write(ctx->cil, ctx->sequence, _START_RECORD);
1089         if (error)
1090                 return error;
1091         return xlog_write(log, ctx, &ctx->lv_chain, ctx->ticket, chain_len);
1092 }
1093 
1094 /*
1095  * Write out the commit record of a checkpoint transaction to close off a
1096  * running log write. These commit records are strictly ordered in ascending CIL
1097  * sequence order so that log recovery will always replay the checkpoints in the
1098  * correct order.
1099  */
1100 static int
1101 xlog_cil_write_commit_record(
1102         struct xfs_cil_ctx      *ctx)
1103 {
1104         struct xlog             *log = ctx->cil->xc_log;
1105         struct xlog_op_header   ophdr = {
1106                 .oh_clientid = XFS_TRANSACTION,
1107                 .oh_tid = cpu_to_be32(ctx->ticket->t_tid),
1108                 .oh_flags = XLOG_COMMIT_TRANS,
1109         };
1110         struct xfs_log_iovec    reg = {
1111                 .i_addr = &ophdr,
1112                 .i_len = sizeof(struct xlog_op_header),
1113                 .i_type = XLOG_REG_TYPE_COMMIT,
1114         };
1115         struct xfs_log_vec      vec = {
1116                 .lv_niovecs = 1,
1117                 .lv_iovecp = &reg,
1118         };
1119         int                     error;
1120         LIST_HEAD(lv_chain);
1121         list_add(&vec.lv_list, &lv_chain);
1122 
1123         if (xlog_is_shutdown(log))
1124                 return -EIO;
1125 
1126         error = xlog_cil_order_write(ctx->cil, ctx->sequence, _COMMIT_RECORD);
1127         if (error)
1128                 return error;
1129 
1130         /* account for space used by record data */
1131         ctx->ticket->t_curr_res -= reg.i_len;
1132         error = xlog_write(log, ctx, &lv_chain, ctx->ticket, reg.i_len);
1133         if (error)
1134                 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1135         return error;
1136 }
1137 
1138 struct xlog_cil_trans_hdr {
1139         struct xlog_op_header   oph[2];
1140         struct xfs_trans_header thdr;
1141         struct xfs_log_iovec    lhdr[2];
1142 };
1143 
1144 /*
1145  * Build a checkpoint transaction header to begin the journal transaction.  We
1146  * need to account for the space used by the transaction header here as it is
1147  * not accounted for in xlog_write().
1148  *
1149  * This is the only place we write a transaction header, so we also build the
1150  * log opheaders that indicate the start of a log transaction and wrap the
1151  * transaction header. We keep the start record in it's own log vector rather
1152  * than compacting them into a single region as this ends up making the logic
1153  * in xlog_write() for handling empty opheaders for start, commit and unmount
1154  * records much simpler.
1155  */
1156 static void
1157 xlog_cil_build_trans_hdr(
1158         struct xfs_cil_ctx      *ctx,
1159         struct xlog_cil_trans_hdr *hdr,
1160         struct xfs_log_vec      *lvhdr,
1161         int                     num_iovecs)
1162 {
1163         struct xlog_ticket      *tic = ctx->ticket;
1164         __be32                  tid = cpu_to_be32(tic->t_tid);
1165 
1166         memset(hdr, 0, sizeof(*hdr));
1167 
1168         /* Log start record */
1169         hdr->oph[0].oh_tid = tid;
1170         hdr->oph[0].oh_clientid = XFS_TRANSACTION;
1171         hdr->oph[0].oh_flags = XLOG_START_TRANS;
1172 
1173         /* log iovec region pointer */
1174         hdr->lhdr[0].i_addr = &hdr->oph[0];
1175         hdr->lhdr[0].i_len = sizeof(struct xlog_op_header);
1176         hdr->lhdr[0].i_type = XLOG_REG_TYPE_LRHEADER;
1177 
1178         /* log opheader */
1179         hdr->oph[1].oh_tid = tid;
1180         hdr->oph[1].oh_clientid = XFS_TRANSACTION;
1181         hdr->oph[1].oh_len = cpu_to_be32(sizeof(struct xfs_trans_header));
1182 
1183         /* transaction header in host byte order format */
1184         hdr->thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
1185         hdr->thdr.th_type = XFS_TRANS_CHECKPOINT;
1186         hdr->thdr.th_tid = tic->t_tid;
1187         hdr->thdr.th_num_items = num_iovecs;
1188 
1189         /* log iovec region pointer */
1190         hdr->lhdr[1].i_addr = &hdr->oph[1];
1191         hdr->lhdr[1].i_len = sizeof(struct xlog_op_header) +
1192                                 sizeof(struct xfs_trans_header);
1193         hdr->lhdr[1].i_type = XLOG_REG_TYPE_TRANSHDR;
1194 
1195         lvhdr->lv_niovecs = 2;
1196         lvhdr->lv_iovecp = &hdr->lhdr[0];
1197         lvhdr->lv_bytes = hdr->lhdr[0].i_len + hdr->lhdr[1].i_len;
1198 
1199         tic->t_curr_res -= lvhdr->lv_bytes;
1200 }
1201 
1202 /*
1203  * CIL item reordering compare function. We want to order in ascending ID order,
1204  * but we want to leave items with the same ID in the order they were added to
1205  * the list. This is important for operations like reflink where we log 4 order
1206  * dependent intents in a single transaction when we overwrite an existing
1207  * shared extent with a new shared extent. i.e. BUI(unmap), CUI(drop),
1208  * CUI (inc), BUI(remap)...
1209  */
1210 static int
1211 xlog_cil_order_cmp(
1212         void                    *priv,
1213         const struct list_head  *a,
1214         const struct list_head  *b)
1215 {
1216         struct xfs_log_vec      *l1 = container_of(a, struct xfs_log_vec, lv_list);
1217         struct xfs_log_vec      *l2 = container_of(b, struct xfs_log_vec, lv_list);
1218 
1219         return l1->lv_order_id > l2->lv_order_id;
1220 }
1221 
1222 /*
1223  * Pull all the log vectors off the items in the CIL, and remove the items from
1224  * the CIL. We don't need the CIL lock here because it's only needed on the
1225  * transaction commit side which is currently locked out by the flush lock.
1226  *
1227  * If a log item is marked with a whiteout, we do not need to write it to the
1228  * journal and so we just move them to the whiteout list for the caller to
1229  * dispose of appropriately.
1230  */
1231 static void
1232 xlog_cil_build_lv_chain(
1233         struct xfs_cil_ctx      *ctx,
1234         struct list_head        *whiteouts,
1235         uint32_t                *num_iovecs,
1236         uint32_t                *num_bytes)
1237 {
1238         while (!list_empty(&ctx->log_items)) {
1239                 struct xfs_log_item     *item;
1240                 struct xfs_log_vec      *lv;
1241 
1242                 item = list_first_entry(&ctx->log_items,
1243                                         struct xfs_log_item, li_cil);
1244 
1245                 if (test_bit(XFS_LI_WHITEOUT, &item->li_flags)) {
1246                         list_move(&item->li_cil, whiteouts);
1247                         trace_xfs_cil_whiteout_skip(item);
1248                         continue;
1249                 }
1250 
1251                 lv = item->li_lv;
1252                 lv->lv_order_id = item->li_order_id;
1253 
1254                 /* we don't write ordered log vectors */
1255                 if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED)
1256                         *num_bytes += lv->lv_bytes;
1257                 *num_iovecs += lv->lv_niovecs;
1258                 list_add_tail(&lv->lv_list, &ctx->lv_chain);
1259 
1260                 list_del_init(&item->li_cil);
1261                 item->li_order_id = 0;
1262                 item->li_lv = NULL;
1263         }
1264 }
1265 
1266 static void
1267 xlog_cil_cleanup_whiteouts(
1268         struct list_head        *whiteouts)
1269 {
1270         while (!list_empty(whiteouts)) {
1271                 struct xfs_log_item *item = list_first_entry(whiteouts,
1272                                                 struct xfs_log_item, li_cil);
1273                 list_del_init(&item->li_cil);
1274                 trace_xfs_cil_whiteout_unpin(item);
1275                 item->li_ops->iop_unpin(item, 1);
1276         }
1277 }
1278 
1279 /*
1280  * Push the Committed Item List to the log.
1281  *
1282  * If the current sequence is the same as xc_push_seq we need to do a flush. If
1283  * xc_push_seq is less than the current sequence, then it has already been
1284  * flushed and we don't need to do anything - the caller will wait for it to
1285  * complete if necessary.
1286  *
1287  * xc_push_seq is checked unlocked against the sequence number for a match.
1288  * Hence we can allow log forces to run racily and not issue pushes for the
1289  * same sequence twice.  If we get a race between multiple pushes for the same
1290  * sequence they will block on the first one and then abort, hence avoiding
1291  * needless pushes.
1292  *
1293  * This runs from a workqueue so it does not inherent any specific memory
1294  * allocation context. However, we do not want to block on memory reclaim
1295  * recursing back into the filesystem because this push may have been triggered
1296  * by memory reclaim itself. Hence we really need to run under full GFP_NOFS
1297  * contraints here.
1298  */
1299 static void
1300 xlog_cil_push_work(
1301         struct work_struct      *work)
1302 {
1303         unsigned int            nofs_flags = memalloc_nofs_save();
1304         struct xfs_cil_ctx      *ctx =
1305                 container_of(work, struct xfs_cil_ctx, push_work);
1306         struct xfs_cil          *cil = ctx->cil;
1307         struct xlog             *log = cil->xc_log;
1308         struct xfs_cil_ctx      *new_ctx;
1309         int                     num_iovecs = 0;
1310         int                     num_bytes = 0;
1311         int                     error = 0;
1312         struct xlog_cil_trans_hdr thdr;
1313         struct xfs_log_vec      lvhdr = {};
1314         xfs_csn_t               push_seq;
1315         bool                    push_commit_stable;
1316         LIST_HEAD               (whiteouts);
1317         struct xlog_ticket      *ticket;
1318 
1319         new_ctx = xlog_cil_ctx_alloc();
1320         new_ctx->ticket = xlog_cil_ticket_alloc(log);
1321 
1322         down_write(&cil->xc_ctx_lock);
1323 
1324         spin_lock(&cil->xc_push_lock);
1325         push_seq = cil->xc_push_seq;
1326         ASSERT(push_seq <= ctx->sequence);
1327         push_commit_stable = cil->xc_push_commit_stable;
1328         cil->xc_push_commit_stable = false;
1329 
1330         /*
1331          * As we are about to switch to a new, empty CIL context, we no longer
1332          * need to throttle tasks on CIL space overruns. Wake any waiters that
1333          * the hard push throttle may have caught so they can start committing
1334          * to the new context. The ctx->xc_push_lock provides the serialisation
1335          * necessary for safely using the lockless waitqueue_active() check in
1336          * this context.
1337          */
1338         if (waitqueue_active(&cil->xc_push_wait))
1339                 wake_up_all(&cil->xc_push_wait);
1340 
1341         xlog_cil_push_pcp_aggregate(cil, ctx);
1342 
1343         /*
1344          * Check if we've anything to push. If there is nothing, then we don't
1345          * move on to a new sequence number and so we have to be able to push
1346          * this sequence again later.
1347          */
1348         if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1349                 cil->xc_push_seq = 0;
1350                 spin_unlock(&cil->xc_push_lock);
1351                 goto out_skip;
1352         }
1353 
1354 
1355         /* check for a previously pushed sequence */
1356         if (push_seq < ctx->sequence) {
1357                 spin_unlock(&cil->xc_push_lock);
1358                 goto out_skip;
1359         }
1360 
1361         /*
1362          * We are now going to push this context, so add it to the committing
1363          * list before we do anything else. This ensures that anyone waiting on
1364          * this push can easily detect the difference between a "push in
1365          * progress" and "CIL is empty, nothing to do".
1366          *
1367          * IOWs, a wait loop can now check for:
1368          *      the current sequence not being found on the committing list;
1369          *      an empty CIL; and
1370          *      an unchanged sequence number
1371          * to detect a push that had nothing to do and therefore does not need
1372          * waiting on. If the CIL is not empty, we get put on the committing
1373          * list before emptying the CIL and bumping the sequence number. Hence
1374          * an empty CIL and an unchanged sequence number means we jumped out
1375          * above after doing nothing.
1376          *
1377          * Hence the waiter will either find the commit sequence on the
1378          * committing list or the sequence number will be unchanged and the CIL
1379          * still dirty. In that latter case, the push has not yet started, and
1380          * so the waiter will have to continue trying to check the CIL
1381          * committing list until it is found. In extreme cases of delay, the
1382          * sequence may fully commit between the attempts the wait makes to wait
1383          * on the commit sequence.
1384          */
1385         list_add(&ctx->committing, &cil->xc_committing);
1386         spin_unlock(&cil->xc_push_lock);
1387 
1388         xlog_cil_build_lv_chain(ctx, &whiteouts, &num_iovecs, &num_bytes);
1389 
1390         /*
1391          * Switch the contexts so we can drop the context lock and move out
1392          * of a shared context. We can't just go straight to the commit record,
1393          * though - we need to synchronise with previous and future commits so
1394          * that the commit records are correctly ordered in the log to ensure
1395          * that we process items during log IO completion in the correct order.
1396          *
1397          * For example, if we get an EFI in one checkpoint and the EFD in the
1398          * next (e.g. due to log forces), we do not want the checkpoint with
1399          * the EFD to be committed before the checkpoint with the EFI.  Hence
1400          * we must strictly order the commit records of the checkpoints so
1401          * that: a) the checkpoint callbacks are attached to the iclogs in the
1402          * correct order; and b) the checkpoints are replayed in correct order
1403          * in log recovery.
1404          *
1405          * Hence we need to add this context to the committing context list so
1406          * that higher sequences will wait for us to write out a commit record
1407          * before they do.
1408          *
1409          * xfs_log_force_seq requires us to mirror the new sequence into the cil
1410          * structure atomically with the addition of this sequence to the
1411          * committing list. This also ensures that we can do unlocked checks
1412          * against the current sequence in log forces without risking
1413          * deferencing a freed context pointer.
1414          */
1415         spin_lock(&cil->xc_push_lock);
1416         xlog_cil_ctx_switch(cil, new_ctx);
1417         spin_unlock(&cil->xc_push_lock);
1418         up_write(&cil->xc_ctx_lock);
1419 
1420         /*
1421          * Sort the log vector chain before we add the transaction headers.
1422          * This ensures we always have the transaction headers at the start
1423          * of the chain.
1424          */
1425         list_sort(NULL, &ctx->lv_chain, xlog_cil_order_cmp);
1426 
1427         /*
1428          * Build a checkpoint transaction header and write it to the log to
1429          * begin the transaction. We need to account for the space used by the
1430          * transaction header here as it is not accounted for in xlog_write().
1431          * Add the lvhdr to the head of the lv chain we pass to xlog_write() so
1432          * it gets written into the iclog first.
1433          */
1434         xlog_cil_build_trans_hdr(ctx, &thdr, &lvhdr, num_iovecs);
1435         num_bytes += lvhdr.lv_bytes;
1436         list_add(&lvhdr.lv_list, &ctx->lv_chain);
1437 
1438         /*
1439          * Take the lvhdr back off the lv_chain immediately after calling
1440          * xlog_cil_write_chain() as it should not be passed to log IO
1441          * completion.
1442          */
1443         error = xlog_cil_write_chain(ctx, num_bytes);
1444         list_del(&lvhdr.lv_list);
1445         if (error)
1446                 goto out_abort_free_ticket;
1447 
1448         error = xlog_cil_write_commit_record(ctx);
1449         if (error)
1450                 goto out_abort_free_ticket;
1451 
1452         /*
1453          * Grab the ticket from the ctx so we can ungrant it after releasing the
1454          * commit_iclog. The ctx may be freed by the time we return from
1455          * releasing the commit_iclog (i.e. checkpoint has been completed and
1456          * callback run) so we can't reference the ctx after the call to
1457          * xlog_state_release_iclog().
1458          */
1459         ticket = ctx->ticket;
1460 
1461         /*
1462          * If the checkpoint spans multiple iclogs, wait for all previous iclogs
1463          * to complete before we submit the commit_iclog. We can't use state
1464          * checks for this - ACTIVE can be either a past completed iclog or a
1465          * future iclog being filled, while WANT_SYNC through SYNC_DONE can be a
1466          * past or future iclog awaiting IO or ordered IO completion to be run.
1467          * In the latter case, if it's a future iclog and we wait on it, the we
1468          * will hang because it won't get processed through to ic_force_wait
1469          * wakeup until this commit_iclog is written to disk.  Hence we use the
1470          * iclog header lsn and compare it to the commit lsn to determine if we
1471          * need to wait on iclogs or not.
1472          */
1473         spin_lock(&log->l_icloglock);
1474         if (ctx->start_lsn != ctx->commit_lsn) {
1475                 xfs_lsn_t       plsn;
1476 
1477                 plsn = be64_to_cpu(ctx->commit_iclog->ic_prev->ic_header.h_lsn);
1478                 if (plsn && XFS_LSN_CMP(plsn, ctx->commit_lsn) < 0) {
1479                         /*
1480                          * Waiting on ic_force_wait orders the completion of
1481                          * iclogs older than ic_prev. Hence we only need to wait
1482                          * on the most recent older iclog here.
1483                          */
1484                         xlog_wait_on_iclog(ctx->commit_iclog->ic_prev);
1485                         spin_lock(&log->l_icloglock);
1486                 }
1487 
1488                 /*
1489                  * We need to issue a pre-flush so that the ordering for this
1490                  * checkpoint is correctly preserved down to stable storage.
1491                  */
1492                 ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FLUSH;
1493         }
1494 
1495         /*
1496          * The commit iclog must be written to stable storage to guarantee
1497          * journal IO vs metadata writeback IO is correctly ordered on stable
1498          * storage.
1499          *
1500          * If the push caller needs the commit to be immediately stable and the
1501          * commit_iclog is not yet marked as XLOG_STATE_WANT_SYNC to indicate it
1502          * will be written when released, switch it's state to WANT_SYNC right
1503          * now.
1504          */
1505         ctx->commit_iclog->ic_flags |= XLOG_ICL_NEED_FUA;
1506         if (push_commit_stable &&
1507             ctx->commit_iclog->ic_state == XLOG_STATE_ACTIVE)
1508                 xlog_state_switch_iclogs(log, ctx->commit_iclog, 0);
1509         ticket = ctx->ticket;
1510         xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1511 
1512         /* Not safe to reference ctx now! */
1513 
1514         spin_unlock(&log->l_icloglock);
1515         xlog_cil_cleanup_whiteouts(&whiteouts);
1516         xfs_log_ticket_ungrant(log, ticket);
1517         memalloc_nofs_restore(nofs_flags);
1518         return;
1519 
1520 out_skip:
1521         up_write(&cil->xc_ctx_lock);
1522         xfs_log_ticket_put(new_ctx->ticket);
1523         kfree(new_ctx);
1524         memalloc_nofs_restore(nofs_flags);
1525         return;
1526 
1527 out_abort_free_ticket:
1528         ASSERT(xlog_is_shutdown(log));
1529         xlog_cil_cleanup_whiteouts(&whiteouts);
1530         if (!ctx->commit_iclog) {
1531                 xfs_log_ticket_ungrant(log, ctx->ticket);
1532                 xlog_cil_committed(ctx);
1533                 memalloc_nofs_restore(nofs_flags);
1534                 return;
1535         }
1536         spin_lock(&log->l_icloglock);
1537         ticket = ctx->ticket;
1538         xlog_state_release_iclog(log, ctx->commit_iclog, ticket);
1539         /* Not safe to reference ctx now! */
1540         spin_unlock(&log->l_icloglock);
1541         xfs_log_ticket_ungrant(log, ticket);
1542         memalloc_nofs_restore(nofs_flags);
1543 }
1544 
1545 /*
1546  * We need to push CIL every so often so we don't cache more than we can fit in
1547  * the log. The limit really is that a checkpoint can't be more than half the
1548  * log (the current checkpoint is not allowed to overwrite the previous
1549  * checkpoint), but commit latency and memory usage limit this to a smaller
1550  * size.
1551  */
1552 static void
1553 xlog_cil_push_background(
1554         struct xlog     *log)
1555 {
1556         struct xfs_cil  *cil = log->l_cilp;
1557         int             space_used = atomic_read(&cil->xc_ctx->space_used);
1558 
1559         /*
1560          * The cil won't be empty because we are called while holding the
1561          * context lock so whatever we added to the CIL will still be there.
1562          */
1563         ASSERT(!test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1564 
1565         /*
1566          * We are done if:
1567          * - we haven't used up all the space available yet; or
1568          * - we've already queued up a push; and
1569          * - we're not over the hard limit; and
1570          * - nothing has been over the hard limit.
1571          *
1572          * If so, we don't need to take the push lock as there's nothing to do.
1573          */
1574         if (space_used < XLOG_CIL_SPACE_LIMIT(log) ||
1575             (cil->xc_push_seq == cil->xc_current_sequence &&
1576              space_used < XLOG_CIL_BLOCKING_SPACE_LIMIT(log) &&
1577              !waitqueue_active(&cil->xc_push_wait))) {
1578                 up_read(&cil->xc_ctx_lock);
1579                 return;
1580         }
1581 
1582         spin_lock(&cil->xc_push_lock);
1583         if (cil->xc_push_seq < cil->xc_current_sequence) {
1584                 cil->xc_push_seq = cil->xc_current_sequence;
1585                 queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1586         }
1587 
1588         /*
1589          * Drop the context lock now, we can't hold that if we need to sleep
1590          * because we are over the blocking threshold. The push_lock is still
1591          * held, so blocking threshold sleep/wakeup is still correctly
1592          * serialised here.
1593          */
1594         up_read(&cil->xc_ctx_lock);
1595 
1596         /*
1597          * If we are well over the space limit, throttle the work that is being
1598          * done until the push work on this context has begun. Enforce the hard
1599          * throttle on all transaction commits once it has been activated, even
1600          * if the committing transactions have resulted in the space usage
1601          * dipping back down under the hard limit.
1602          *
1603          * The ctx->xc_push_lock provides the serialisation necessary for safely
1604          * calling xlog_cil_over_hard_limit() in this context.
1605          */
1606         if (xlog_cil_over_hard_limit(log, space_used)) {
1607                 trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
1608                 ASSERT(space_used < log->l_logsize);
1609                 xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
1610                 return;
1611         }
1612 
1613         spin_unlock(&cil->xc_push_lock);
1614 
1615 }
1616 
1617 /*
1618  * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
1619  * number that is passed. When it returns, the work will be queued for
1620  * @push_seq, but it won't be completed.
1621  *
1622  * If the caller is performing a synchronous force, we will flush the workqueue
1623  * to get previously queued work moving to minimise the wait time they will
1624  * undergo waiting for all outstanding pushes to complete. The caller is
1625  * expected to do the required waiting for push_seq to complete.
1626  *
1627  * If the caller is performing an async push, we need to ensure that the
1628  * checkpoint is fully flushed out of the iclogs when we finish the push. If we
1629  * don't do this, then the commit record may remain sitting in memory in an
1630  * ACTIVE iclog. This then requires another full log force to push to disk,
1631  * which defeats the purpose of having an async, non-blocking CIL force
1632  * mechanism. Hence in this case we need to pass a flag to the push work to
1633  * indicate it needs to flush the commit record itself.
1634  */
1635 static void
1636 xlog_cil_push_now(
1637         struct xlog     *log,
1638         xfs_lsn_t       push_seq,
1639         bool            async)
1640 {
1641         struct xfs_cil  *cil = log->l_cilp;
1642 
1643         if (!cil)
1644                 return;
1645 
1646         ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
1647 
1648         /* start on any pending background push to minimise wait time on it */
1649         if (!async)
1650                 flush_workqueue(cil->xc_push_wq);
1651 
1652         spin_lock(&cil->xc_push_lock);
1653 
1654         /*
1655          * If this is an async flush request, we always need to set the
1656          * xc_push_commit_stable flag even if something else has already queued
1657          * a push. The flush caller is asking for the CIL to be on stable
1658          * storage when the next push completes, so regardless of who has queued
1659          * the push, the flush requires stable semantics from it.
1660          */
1661         cil->xc_push_commit_stable = async;
1662 
1663         /*
1664          * If the CIL is empty or we've already pushed the sequence then
1665          * there's no more work that we need to do.
1666          */
1667         if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags) ||
1668             push_seq <= cil->xc_push_seq) {
1669                 spin_unlock(&cil->xc_push_lock);
1670                 return;
1671         }
1672 
1673         cil->xc_push_seq = push_seq;
1674         queue_work(cil->xc_push_wq, &cil->xc_ctx->push_work);
1675         spin_unlock(&cil->xc_push_lock);
1676 }
1677 
1678 bool
1679 xlog_cil_empty(
1680         struct xlog     *log)
1681 {
1682         struct xfs_cil  *cil = log->l_cilp;
1683         bool            empty = false;
1684 
1685         spin_lock(&cil->xc_push_lock);
1686         if (test_bit(XLOG_CIL_EMPTY, &cil->xc_flags))
1687                 empty = true;
1688         spin_unlock(&cil->xc_push_lock);
1689         return empty;
1690 }
1691 
1692 /*
1693  * If there are intent done items in this transaction and the related intent was
1694  * committed in the current (same) CIL checkpoint, we don't need to write either
1695  * the intent or intent done item to the journal as the change will be
1696  * journalled atomically within this checkpoint. As we cannot remove items from
1697  * the CIL here, mark the related intent with a whiteout so that the CIL push
1698  * can remove it rather than writing it to the journal. Then remove the intent
1699  * done item from the current transaction and release it so it doesn't get put
1700  * into the CIL at all.
1701  */
1702 static uint32_t
1703 xlog_cil_process_intents(
1704         struct xfs_cil          *cil,
1705         struct xfs_trans        *tp)
1706 {
1707         struct xfs_log_item     *lip, *ilip, *next;
1708         uint32_t                len = 0;
1709 
1710         list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1711                 if (!(lip->li_ops->flags & XFS_ITEM_INTENT_DONE))
1712                         continue;
1713 
1714                 ilip = lip->li_ops->iop_intent(lip);
1715                 if (!ilip || !xlog_item_in_current_chkpt(cil, ilip))
1716                         continue;
1717                 set_bit(XFS_LI_WHITEOUT, &ilip->li_flags);
1718                 trace_xfs_cil_whiteout_mark(ilip);
1719                 len += ilip->li_lv->lv_bytes;
1720                 kvfree(ilip->li_lv);
1721                 ilip->li_lv = NULL;
1722 
1723                 xfs_trans_del_item(lip);
1724                 lip->li_ops->iop_release(lip);
1725         }
1726         return len;
1727 }
1728 
1729 /*
1730  * Commit a transaction with the given vector to the Committed Item List.
1731  *
1732  * To do this, we need to format the item, pin it in memory if required and
1733  * account for the space used by the transaction. Once we have done that we
1734  * need to release the unused reservation for the transaction, attach the
1735  * transaction to the checkpoint context so we carry the busy extents through
1736  * to checkpoint completion, and then unlock all the items in the transaction.
1737  *
1738  * Called with the context lock already held in read mode to lock out
1739  * background commit, returns without it held once background commits are
1740  * allowed again.
1741  */
1742 void
1743 xlog_cil_commit(
1744         struct xlog             *log,
1745         struct xfs_trans        *tp,
1746         xfs_csn_t               *commit_seq,
1747         bool                    regrant)
1748 {
1749         struct xfs_cil          *cil = log->l_cilp;
1750         struct xfs_log_item     *lip, *next;
1751         uint32_t                released_space = 0;
1752 
1753         /*
1754          * Do all necessary memory allocation before we lock the CIL.
1755          * This ensures the allocation does not deadlock with a CIL
1756          * push in memory reclaim (e.g. from kswapd).
1757          */
1758         xlog_cil_alloc_shadow_bufs(log, tp);
1759 
1760         /* lock out background commit */
1761         down_read(&cil->xc_ctx_lock);
1762 
1763         if (tp->t_flags & XFS_TRANS_HAS_INTENT_DONE)
1764                 released_space = xlog_cil_process_intents(cil, tp);
1765 
1766         xlog_cil_insert_items(log, tp, released_space);
1767 
1768         if (regrant && !xlog_is_shutdown(log))
1769                 xfs_log_ticket_regrant(log, tp->t_ticket);
1770         else
1771                 xfs_log_ticket_ungrant(log, tp->t_ticket);
1772         tp->t_ticket = NULL;
1773         xfs_trans_unreserve_and_mod_sb(tp);
1774 
1775         /*
1776          * Once all the items of the transaction have been copied to the CIL,
1777          * the items can be unlocked and possibly freed.
1778          *
1779          * This needs to be done before we drop the CIL context lock because we
1780          * have to update state in the log items and unlock them before they go
1781          * to disk. If we don't, then the CIL checkpoint can race with us and
1782          * we can run checkpoint completion before we've updated and unlocked
1783          * the log items. This affects (at least) processing of stale buffers,
1784          * inodes and EFIs.
1785          */
1786         trace_xfs_trans_commit_items(tp, _RET_IP_);
1787         list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1788                 xfs_trans_del_item(lip);
1789                 if (lip->li_ops->iop_committing)
1790                         lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
1791         }
1792         if (commit_seq)
1793                 *commit_seq = cil->xc_ctx->sequence;
1794 
1795         /* xlog_cil_push_background() releases cil->xc_ctx_lock */
1796         xlog_cil_push_background(log);
1797 }
1798 
1799 /*
1800  * Flush the CIL to stable storage but don't wait for it to complete. This
1801  * requires the CIL push to ensure the commit record for the push hits the disk,
1802  * but otherwise is no different to a push done from a log force.
1803  */
1804 void
1805 xlog_cil_flush(
1806         struct xlog     *log)
1807 {
1808         xfs_csn_t       seq = log->l_cilp->xc_current_sequence;
1809 
1810         trace_xfs_log_force(log->l_mp, seq, _RET_IP_);
1811         xlog_cil_push_now(log, seq, true);
1812 
1813         /*
1814          * If the CIL is empty, make sure that any previous checkpoint that may
1815          * still be in an active iclog is pushed to stable storage.
1816          */
1817         if (test_bit(XLOG_CIL_EMPTY, &log->l_cilp->xc_flags))
1818                 xfs_log_force(log->l_mp, 0);
1819 }
1820 
1821 /*
1822  * Conditionally push the CIL based on the sequence passed in.
1823  *
1824  * We only need to push if we haven't already pushed the sequence number given.
1825  * Hence the only time we will trigger a push here is if the push sequence is
1826  * the same as the current context.
1827  *
1828  * We return the current commit lsn to allow the callers to determine if a
1829  * iclog flush is necessary following this call.
1830  */
1831 xfs_lsn_t
1832 xlog_cil_force_seq(
1833         struct xlog     *log,
1834         xfs_csn_t       sequence)
1835 {
1836         struct xfs_cil          *cil = log->l_cilp;
1837         struct xfs_cil_ctx      *ctx;
1838         xfs_lsn_t               commit_lsn = NULLCOMMITLSN;
1839 
1840         ASSERT(sequence <= cil->xc_current_sequence);
1841 
1842         if (!sequence)
1843                 sequence = cil->xc_current_sequence;
1844         trace_xfs_log_force(log->l_mp, sequence, _RET_IP_);
1845 
1846         /*
1847          * check to see if we need to force out the current context.
1848          * xlog_cil_push() handles racing pushes for the same sequence,
1849          * so no need to deal with it here.
1850          */
1851 restart:
1852         xlog_cil_push_now(log, sequence, false);
1853 
1854         /*
1855          * See if we can find a previous sequence still committing.
1856          * We need to wait for all previous sequence commits to complete
1857          * before allowing the force of push_seq to go ahead. Hence block
1858          * on commits for those as well.
1859          */
1860         spin_lock(&cil->xc_push_lock);
1861         list_for_each_entry(ctx, &cil->xc_committing, committing) {
1862                 /*
1863                  * Avoid getting stuck in this loop because we were woken by the
1864                  * shutdown, but then went back to sleep once already in the
1865                  * shutdown state.
1866                  */
1867                 if (xlog_is_shutdown(log))
1868                         goto out_shutdown;
1869                 if (ctx->sequence > sequence)
1870                         continue;
1871                 if (!ctx->commit_lsn) {
1872                         /*
1873                          * It is still being pushed! Wait for the push to
1874                          * complete, then start again from the beginning.
1875                          */
1876                         XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
1877                         xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1878                         goto restart;
1879                 }
1880                 if (ctx->sequence != sequence)
1881                         continue;
1882                 /* found it! */
1883                 commit_lsn = ctx->commit_lsn;
1884         }
1885 
1886         /*
1887          * The call to xlog_cil_push_now() executes the push in the background.
1888          * Hence by the time we have got here it our sequence may not have been
1889          * pushed yet. This is true if the current sequence still matches the
1890          * push sequence after the above wait loop and the CIL still contains
1891          * dirty objects. This is guaranteed by the push code first adding the
1892          * context to the committing list before emptying the CIL.
1893          *
1894          * Hence if we don't find the context in the committing list and the
1895          * current sequence number is unchanged then the CIL contents are
1896          * significant.  If the CIL is empty, if means there was nothing to push
1897          * and that means there is nothing to wait for. If the CIL is not empty,
1898          * it means we haven't yet started the push, because if it had started
1899          * we would have found the context on the committing list.
1900          */
1901         if (sequence == cil->xc_current_sequence &&
1902             !test_bit(XLOG_CIL_EMPTY, &cil->xc_flags)) {
1903                 spin_unlock(&cil->xc_push_lock);
1904                 goto restart;
1905         }
1906 
1907         spin_unlock(&cil->xc_push_lock);
1908         return commit_lsn;
1909 
1910         /*
1911          * We detected a shutdown in progress. We need to trigger the log force
1912          * to pass through it's iclog state machine error handling, even though
1913          * we are already in a shutdown state. Hence we can't return
1914          * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1915          * LSN is already stable), so we return a zero LSN instead.
1916          */
1917 out_shutdown:
1918         spin_unlock(&cil->xc_push_lock);
1919         return 0;
1920 }
1921 
1922 /*
1923  * Perform initial CIL structure initialisation.
1924  */
1925 int
1926 xlog_cil_init(
1927         struct xlog             *log)
1928 {
1929         struct xfs_cil          *cil;
1930         struct xfs_cil_ctx      *ctx;
1931         struct xlog_cil_pcp     *cilpcp;
1932         int                     cpu;
1933 
1934         cil = kzalloc(sizeof(*cil), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1935         if (!cil)
1936                 return -ENOMEM;
1937         /*
1938          * Limit the CIL pipeline depth to 4 concurrent works to bound the
1939          * concurrency the log spinlocks will be exposed to.
1940          */
1941         cil->xc_push_wq = alloc_workqueue("xfs-cil/%s",
1942                         XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
1943                         4, log->l_mp->m_super->s_id);
1944         if (!cil->xc_push_wq)
1945                 goto out_destroy_cil;
1946 
1947         cil->xc_log = log;
1948         cil->xc_pcp = alloc_percpu(struct xlog_cil_pcp);
1949         if (!cil->xc_pcp)
1950                 goto out_destroy_wq;
1951 
1952         for_each_possible_cpu(cpu) {
1953                 cilpcp = per_cpu_ptr(cil->xc_pcp, cpu);
1954                 INIT_LIST_HEAD(&cilpcp->busy_extents);
1955                 INIT_LIST_HEAD(&cilpcp->log_items);
1956         }
1957 
1958         INIT_LIST_HEAD(&cil->xc_committing);
1959         spin_lock_init(&cil->xc_push_lock);
1960         init_waitqueue_head(&cil->xc_push_wait);
1961         init_rwsem(&cil->xc_ctx_lock);
1962         init_waitqueue_head(&cil->xc_start_wait);
1963         init_waitqueue_head(&cil->xc_commit_wait);
1964         log->l_cilp = cil;
1965 
1966         ctx = xlog_cil_ctx_alloc();
1967         xlog_cil_ctx_switch(cil, ctx);
1968         return 0;
1969 
1970 out_destroy_wq:
1971         destroy_workqueue(cil->xc_push_wq);
1972 out_destroy_cil:
1973         kfree(cil);
1974         return -ENOMEM;
1975 }
1976 
1977 void
1978 xlog_cil_destroy(
1979         struct xlog     *log)
1980 {
1981         struct xfs_cil  *cil = log->l_cilp;
1982 
1983         if (cil->xc_ctx) {
1984                 if (cil->xc_ctx->ticket)
1985                         xfs_log_ticket_put(cil->xc_ctx->ticket);
1986                 kfree(cil->xc_ctx);
1987         }
1988 
1989         ASSERT(test_bit(XLOG_CIL_EMPTY, &cil->xc_flags));
1990         free_percpu(cil->xc_pcp);
1991         destroy_workqueue(cil->xc_push_wq);
1992         kfree(cil);
1993 }
1994 
1995 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php