~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/nilfs2/btnode.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0+
  2 /*
  3  * NILFS B-tree node cache
  4  *
  5  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  6  *
  7  * Originally written by Seiji Kihara.
  8  * Fully revised by Ryusuke Konishi for stabilization and simplification.
  9  *
 10  */
 11 
 12 #include <linux/types.h>
 13 #include <linux/buffer_head.h>
 14 #include <linux/mm.h>
 15 #include <linux/backing-dev.h>
 16 #include <linux/gfp.h>
 17 #include "nilfs.h"
 18 #include "mdt.h"
 19 #include "dat.h"
 20 #include "page.h"
 21 #include "btnode.h"
 22 
 23 
 24 /**
 25  * nilfs_init_btnc_inode - initialize B-tree node cache inode
 26  * @btnc_inode: inode to be initialized
 27  *
 28  * nilfs_init_btnc_inode() sets up an inode for B-tree node cache.
 29  */
 30 void nilfs_init_btnc_inode(struct inode *btnc_inode)
 31 {
 32         struct nilfs_inode_info *ii = NILFS_I(btnc_inode);
 33 
 34         btnc_inode->i_mode = S_IFREG;
 35         ii->i_flags = 0;
 36         memset(&ii->i_bmap_data, 0, sizeof(struct nilfs_bmap));
 37         mapping_set_gfp_mask(btnc_inode->i_mapping, GFP_NOFS);
 38 }
 39 
 40 void nilfs_btnode_cache_clear(struct address_space *btnc)
 41 {
 42         invalidate_mapping_pages(btnc, 0, -1);
 43         truncate_inode_pages(btnc, 0);
 44 }
 45 
 46 struct buffer_head *
 47 nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
 48 {
 49         struct inode *inode = btnc->host;
 50         struct buffer_head *bh;
 51 
 52         bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
 53         if (unlikely(!bh))
 54                 return ERR_PTR(-ENOMEM);
 55 
 56         if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
 57                      buffer_dirty(bh))) {
 58                 /*
 59                  * The block buffer at the specified new address was already
 60                  * in use.  This can happen if it is a virtual block number
 61                  * and has been reallocated due to corruption of the bitmap
 62                  * used to manage its allocation state (if not, the buffer
 63                  * clearing of an abandoned b-tree node is missing somewhere).
 64                  */
 65                 nilfs_error(inode->i_sb,
 66                             "state inconsistency probably due to duplicate use of b-tree node block address %llu (ino=%lu)",
 67                             (unsigned long long)blocknr, inode->i_ino);
 68                 goto failed;
 69         }
 70         memset(bh->b_data, 0, i_blocksize(inode));
 71         bh->b_bdev = inode->i_sb->s_bdev;
 72         bh->b_blocknr = blocknr;
 73         set_buffer_mapped(bh);
 74         set_buffer_uptodate(bh);
 75 
 76         folio_unlock(bh->b_folio);
 77         folio_put(bh->b_folio);
 78         return bh;
 79 
 80 failed:
 81         folio_unlock(bh->b_folio);
 82         folio_put(bh->b_folio);
 83         brelse(bh);
 84         return ERR_PTR(-EIO);
 85 }
 86 
 87 int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
 88                               sector_t pblocknr, blk_opf_t opf,
 89                               struct buffer_head **pbh, sector_t *submit_ptr)
 90 {
 91         struct buffer_head *bh;
 92         struct inode *inode = btnc->host;
 93         struct folio *folio;
 94         int err;
 95 
 96         bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
 97         if (unlikely(!bh))
 98                 return -ENOMEM;
 99 
100         err = -EEXIST; /* internal code */
101         folio = bh->b_folio;
102 
103         if (buffer_uptodate(bh) || buffer_dirty(bh))
104                 goto found;
105 
106         if (pblocknr == 0) {
107                 pblocknr = blocknr;
108                 if (inode->i_ino != NILFS_DAT_INO) {
109                         struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
110 
111                         /* blocknr is a virtual block number */
112                         err = nilfs_dat_translate(nilfs->ns_dat, blocknr,
113                                                   &pblocknr);
114                         if (unlikely(err)) {
115                                 brelse(bh);
116                                 goto out_locked;
117                         }
118                 }
119         }
120 
121         if (opf & REQ_RAHEAD) {
122                 if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) {
123                         err = -EBUSY; /* internal code */
124                         brelse(bh);
125                         goto out_locked;
126                 }
127         } else { /* opf == REQ_OP_READ */
128                 lock_buffer(bh);
129         }
130         if (buffer_uptodate(bh)) {
131                 unlock_buffer(bh);
132                 err = -EEXIST; /* internal code */
133                 goto found;
134         }
135         set_buffer_mapped(bh);
136         bh->b_bdev = inode->i_sb->s_bdev;
137         bh->b_blocknr = pblocknr; /* set block address for read */
138         bh->b_end_io = end_buffer_read_sync;
139         get_bh(bh);
140         submit_bh(opf, bh);
141         bh->b_blocknr = blocknr; /* set back to the given block address */
142         *submit_ptr = pblocknr;
143         err = 0;
144 found:
145         *pbh = bh;
146 
147 out_locked:
148         folio_unlock(folio);
149         folio_put(folio);
150         return err;
151 }
152 
153 /**
154  * nilfs_btnode_delete - delete B-tree node buffer
155  * @bh: buffer to be deleted
156  *
157  * nilfs_btnode_delete() invalidates the specified buffer and delete the page
158  * including the buffer if the page gets unbusy.
159  */
160 void nilfs_btnode_delete(struct buffer_head *bh)
161 {
162         struct address_space *mapping;
163         struct folio *folio = bh->b_folio;
164         pgoff_t index = folio->index;
165         int still_dirty;
166 
167         folio_get(folio);
168         folio_lock(folio);
169         folio_wait_writeback(folio);
170 
171         nilfs_forget_buffer(bh);
172         still_dirty = folio_test_dirty(folio);
173         mapping = folio->mapping;
174         folio_unlock(folio);
175         folio_put(folio);
176 
177         if (!still_dirty && mapping)
178                 invalidate_inode_pages2_range(mapping, index, index);
179 }
180 
181 /**
182  * nilfs_btnode_prepare_change_key
183  *  prepare to move contents of the block for old key to one of new key.
184  *  the old buffer will not be removed, but might be reused for new buffer.
185  *  it might return -ENOMEM because of memory allocation errors,
186  *  and might return -EIO because of disk read errors.
187  */
188 int nilfs_btnode_prepare_change_key(struct address_space *btnc,
189                                     struct nilfs_btnode_chkey_ctxt *ctxt)
190 {
191         struct buffer_head *obh, *nbh;
192         struct inode *inode = btnc->host;
193         __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
194         int err;
195 
196         if (oldkey == newkey)
197                 return 0;
198 
199         obh = ctxt->bh;
200         ctxt->newbh = NULL;
201 
202         if (inode->i_blkbits == PAGE_SHIFT) {
203                 struct folio *ofolio = obh->b_folio;
204                 folio_lock(ofolio);
205 retry:
206                 /* BUG_ON(oldkey != obh->b_folio->index); */
207                 if (unlikely(oldkey != ofolio->index))
208                         NILFS_FOLIO_BUG(ofolio,
209                                        "invalid oldkey %lld (newkey=%lld)",
210                                        (unsigned long long)oldkey,
211                                        (unsigned long long)newkey);
212 
213                 xa_lock_irq(&btnc->i_pages);
214                 err = __xa_insert(&btnc->i_pages, newkey, ofolio, GFP_NOFS);
215                 xa_unlock_irq(&btnc->i_pages);
216                 /*
217                  * Note: folio->index will not change to newkey until
218                  * nilfs_btnode_commit_change_key() will be called.
219                  * To protect the folio in intermediate state, the folio lock
220                  * is held.
221                  */
222                 if (!err)
223                         return 0;
224                 else if (err != -EBUSY)
225                         goto failed_unlock;
226 
227                 err = invalidate_inode_pages2_range(btnc, newkey, newkey);
228                 if (!err)
229                         goto retry;
230                 /* fallback to copy mode */
231                 folio_unlock(ofolio);
232         }
233 
234         nbh = nilfs_btnode_create_block(btnc, newkey);
235         if (IS_ERR(nbh))
236                 return PTR_ERR(nbh);
237 
238         BUG_ON(nbh == obh);
239         ctxt->newbh = nbh;
240         return 0;
241 
242  failed_unlock:
243         folio_unlock(obh->b_folio);
244         return err;
245 }
246 
247 /**
248  * nilfs_btnode_commit_change_key
249  *  commit the change_key operation prepared by prepare_change_key().
250  */
251 void nilfs_btnode_commit_change_key(struct address_space *btnc,
252                                     struct nilfs_btnode_chkey_ctxt *ctxt)
253 {
254         struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh;
255         __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
256         struct folio *ofolio;
257 
258         if (oldkey == newkey)
259                 return;
260 
261         if (nbh == NULL) {      /* blocksize == pagesize */
262                 ofolio = obh->b_folio;
263                 if (unlikely(oldkey != ofolio->index))
264                         NILFS_FOLIO_BUG(ofolio,
265                                        "invalid oldkey %lld (newkey=%lld)",
266                                        (unsigned long long)oldkey,
267                                        (unsigned long long)newkey);
268                 mark_buffer_dirty(obh);
269 
270                 xa_lock_irq(&btnc->i_pages);
271                 __xa_erase(&btnc->i_pages, oldkey);
272                 __xa_set_mark(&btnc->i_pages, newkey, PAGECACHE_TAG_DIRTY);
273                 xa_unlock_irq(&btnc->i_pages);
274 
275                 ofolio->index = obh->b_blocknr = newkey;
276                 folio_unlock(ofolio);
277         } else {
278                 nilfs_copy_buffer(nbh, obh);
279                 mark_buffer_dirty(nbh);
280 
281                 nbh->b_blocknr = newkey;
282                 ctxt->bh = nbh;
283                 nilfs_btnode_delete(obh); /* will decrement bh->b_count */
284         }
285 }
286 
287 /**
288  * nilfs_btnode_abort_change_key
289  *  abort the change_key operation prepared by prepare_change_key().
290  */
291 void nilfs_btnode_abort_change_key(struct address_space *btnc,
292                                    struct nilfs_btnode_chkey_ctxt *ctxt)
293 {
294         struct buffer_head *nbh = ctxt->newbh;
295         __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey;
296 
297         if (oldkey == newkey)
298                 return;
299 
300         if (nbh == NULL) {      /* blocksize == pagesize */
301                 xa_erase_irq(&btnc->i_pages, newkey);
302                 folio_unlock(ctxt->bh->b_folio);
303         } else {
304                 /*
305                  * When canceling a buffer that a prepare operation has
306                  * allocated to copy a node block to another location, use
307                  * nilfs_btnode_delete() to initialize and release the buffer
308                  * so that the buffer flags will not be in an inconsistent
309                  * state when it is reallocated.
310                  */
311                 nilfs_btnode_delete(nbh);
312         }
313 }
314 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php