~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/jfs/jfs_metapage.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  *   Copyright (C) International Business Machines Corp., 2000-2005
  4  *   Portions Copyright (C) Christoph Hellwig, 2001-2002
  5  */
  6 
  7 #include <linux/blkdev.h>
  8 #include <linux/fs.h>
  9 #include <linux/mm.h>
 10 #include <linux/module.h>
 11 #include <linux/bio.h>
 12 #include <linux/slab.h>
 13 #include <linux/init.h>
 14 #include <linux/buffer_head.h>
 15 #include <linux/mempool.h>
 16 #include <linux/seq_file.h>
 17 #include <linux/writeback.h>
 18 #include "jfs_incore.h"
 19 #include "jfs_superblock.h"
 20 #include "jfs_filsys.h"
 21 #include "jfs_metapage.h"
 22 #include "jfs_txnmgr.h"
 23 #include "jfs_debug.h"
 24 
 25 #ifdef CONFIG_JFS_STATISTICS
 26 static struct {
 27         uint    pagealloc;      /* # of page allocations */
 28         uint    pagefree;       /* # of page frees */
 29         uint    lockwait;       /* # of sleeping lock_metapage() calls */
 30 } mpStat;
 31 #endif
 32 
 33 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
 34 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
 35 
 36 static inline void unlock_metapage(struct metapage *mp)
 37 {
 38         clear_bit_unlock(META_locked, &mp->flag);
 39         wake_up(&mp->wait);
 40 }
 41 
 42 static inline void __lock_metapage(struct metapage *mp)
 43 {
 44         DECLARE_WAITQUEUE(wait, current);
 45         INCREMENT(mpStat.lockwait);
 46         add_wait_queue_exclusive(&mp->wait, &wait);
 47         do {
 48                 set_current_state(TASK_UNINTERRUPTIBLE);
 49                 if (metapage_locked(mp)) {
 50                         folio_unlock(mp->folio);
 51                         io_schedule();
 52                         folio_lock(mp->folio);
 53                 }
 54         } while (trylock_metapage(mp));
 55         __set_current_state(TASK_RUNNING);
 56         remove_wait_queue(&mp->wait, &wait);
 57 }
 58 
 59 /*
 60  * Must have mp->folio locked
 61  */
 62 static inline void lock_metapage(struct metapage *mp)
 63 {
 64         if (trylock_metapage(mp))
 65                 __lock_metapage(mp);
 66 }
 67 
 68 #define METAPOOL_MIN_PAGES 32
 69 static struct kmem_cache *metapage_cache;
 70 static mempool_t *metapage_mempool;
 71 
 72 #define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
 73 
 74 #if MPS_PER_PAGE > 1
 75 
 76 struct meta_anchor {
 77         int mp_count;
 78         atomic_t io_count;
 79         blk_status_t status;
 80         struct metapage *mp[MPS_PER_PAGE];
 81 };
 82 
 83 static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
 84 {
 85         struct meta_anchor *anchor = folio->private;
 86 
 87         if (!anchor)
 88                 return NULL;
 89         return anchor->mp[offset >> L2PSIZE];
 90 }
 91 
 92 static inline int insert_metapage(struct folio *folio, struct metapage *mp)
 93 {
 94         struct meta_anchor *a;
 95         int index;
 96         int l2mp_blocks;        /* log2 blocks per metapage */
 97 
 98         a = folio->private;
 99         if (!a) {
100                 a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
101                 if (!a)
102                         return -ENOMEM;
103                 folio_attach_private(folio, a);
104                 kmap(&folio->page);
105         }
106 
107         if (mp) {
108                 l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
109                 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
110                 a->mp_count++;
111                 a->mp[index] = mp;
112         }
113 
114         return 0;
115 }
116 
117 static inline void remove_metapage(struct folio *folio, struct metapage *mp)
118 {
119         struct meta_anchor *a = folio->private;
120         int l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
121         int index;
122 
123         index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
124 
125         BUG_ON(a->mp[index] != mp);
126 
127         a->mp[index] = NULL;
128         if (--a->mp_count == 0) {
129                 kfree(a);
130                 folio_detach_private(folio);
131                 kunmap(&folio->page);
132         }
133 }
134 
135 static inline void inc_io(struct folio *folio)
136 {
137         struct meta_anchor *anchor = folio->private;
138 
139         atomic_inc(&anchor->io_count);
140 }
141 
142 static inline void dec_io(struct folio *folio, blk_status_t status,
143                 void (*handler)(struct folio *, blk_status_t))
144 {
145         struct meta_anchor *anchor = folio->private;
146 
147         if (anchor->status == BLK_STS_OK)
148                 anchor->status = status;
149 
150         if (atomic_dec_and_test(&anchor->io_count))
151                 handler(folio, anchor->status);
152 }
153 
154 #else
155 static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
156 {
157         return folio->private;
158 }
159 
160 static inline int insert_metapage(struct folio *folio, struct metapage *mp)
161 {
162         if (mp) {
163                 folio_attach_private(folio, mp);
164                 kmap(&folio->page);
165         }
166         return 0;
167 }
168 
169 static inline void remove_metapage(struct folio *folio, struct metapage *mp)
170 {
171         folio_detach_private(folio);
172         kunmap(&folio->page);
173 }
174 
175 #define inc_io(folio) do {} while(0)
176 #define dec_io(folio, status, handler) handler(folio, status)
177 
178 #endif
179 
180 static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
181 {
182         struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
183 
184         if (mp) {
185                 mp->lid = 0;
186                 mp->lsn = 0;
187                 mp->data = NULL;
188                 mp->clsn = 0;
189                 mp->log = NULL;
190                 init_waitqueue_head(&mp->wait);
191         }
192         return mp;
193 }
194 
195 static inline void free_metapage(struct metapage *mp)
196 {
197         mempool_free(mp, metapage_mempool);
198 }
199 
200 int __init metapage_init(void)
201 {
202         /*
203          * Allocate the metapage structures
204          */
205         metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
206                                            0, 0, NULL);
207         if (metapage_cache == NULL)
208                 return -ENOMEM;
209 
210         metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
211                                                     metapage_cache);
212 
213         if (metapage_mempool == NULL) {
214                 kmem_cache_destroy(metapage_cache);
215                 return -ENOMEM;
216         }
217 
218         return 0;
219 }
220 
221 void metapage_exit(void)
222 {
223         mempool_destroy(metapage_mempool);
224         kmem_cache_destroy(metapage_cache);
225 }
226 
227 static inline void drop_metapage(struct folio *folio, struct metapage *mp)
228 {
229         if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
230             test_bit(META_io, &mp->flag))
231                 return;
232         remove_metapage(folio, mp);
233         INCREMENT(mpStat.pagefree);
234         free_metapage(mp);
235 }
236 
237 /*
238  * Metapage address space operations
239  */
240 
241 static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
242                                     int *len)
243 {
244         int rc = 0;
245         int xflag;
246         s64 xaddr;
247         sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
248                                inode->i_blkbits;
249 
250         if (lblock >= file_blocks)
251                 return 0;
252         if (lblock + *len > file_blocks)
253                 *len = file_blocks - lblock;
254 
255         if (inode->i_ino) {
256                 rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
257                 if ((rc == 0) && *len)
258                         lblock = (sector_t)xaddr;
259                 else
260                         lblock = 0;
261         } /* else no mapping */
262 
263         return lblock;
264 }
265 
266 static void last_read_complete(struct folio *folio, blk_status_t status)
267 {
268         if (status)
269                 printk(KERN_ERR "Read error %d at %#llx\n", status,
270                                 folio_pos(folio));
271 
272         folio_end_read(folio, status == 0);
273 }
274 
275 static void metapage_read_end_io(struct bio *bio)
276 {
277         struct folio *folio = bio->bi_private;
278 
279         dec_io(folio, bio->bi_status, last_read_complete);
280         bio_put(bio);
281 }
282 
283 static void remove_from_logsync(struct metapage *mp)
284 {
285         struct jfs_log *log = mp->log;
286         unsigned long flags;
287 /*
288  * This can race.  Recheck that log hasn't been set to null, and after
289  * acquiring logsync lock, recheck lsn
290  */
291         if (!log)
292                 return;
293 
294         LOGSYNC_LOCK(log, flags);
295         if (mp->lsn) {
296                 mp->log = NULL;
297                 mp->lsn = 0;
298                 mp->clsn = 0;
299                 log->count--;
300                 list_del(&mp->synclist);
301         }
302         LOGSYNC_UNLOCK(log, flags);
303 }
304 
305 static void last_write_complete(struct folio *folio, blk_status_t status)
306 {
307         struct metapage *mp;
308         unsigned int offset;
309 
310         if (status) {
311                 int err = blk_status_to_errno(status);
312                 printk(KERN_ERR "metapage_write_end_io: I/O error\n");
313                 mapping_set_error(folio->mapping, err);
314         }
315 
316         for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
317                 mp = folio_to_mp(folio, offset);
318                 if (mp && test_bit(META_io, &mp->flag)) {
319                         if (mp->lsn)
320                                 remove_from_logsync(mp);
321                         clear_bit(META_io, &mp->flag);
322                 }
323                 /*
324                  * I'd like to call drop_metapage here, but I don't think it's
325                  * safe unless I have the page locked
326                  */
327         }
328         folio_end_writeback(folio);
329 }
330 
331 static void metapage_write_end_io(struct bio *bio)
332 {
333         struct folio *folio = bio->bi_private;
334 
335         BUG_ON(!folio->private);
336 
337         dec_io(folio, bio->bi_status, last_write_complete);
338         bio_put(bio);
339 }
340 
341 static int metapage_write_folio(struct folio *folio,
342                 struct writeback_control *wbc, void *unused)
343 {
344         struct bio *bio = NULL;
345         int block_offset;       /* block offset of mp within page */
346         struct inode *inode = folio->mapping->host;
347         int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
348         int len;
349         int xlen;
350         struct metapage *mp;
351         int redirty = 0;
352         sector_t lblock;
353         int nr_underway = 0;
354         sector_t pblock;
355         sector_t next_block = 0;
356         sector_t page_start;
357         unsigned long bio_bytes = 0;
358         unsigned long bio_offset = 0;
359         int offset;
360         int bad_blocks = 0;
361 
362         page_start = folio_pos(folio) >> inode->i_blkbits;
363         BUG_ON(!folio_test_locked(folio));
364         BUG_ON(folio_test_writeback(folio));
365         folio_start_writeback(folio);
366 
367         for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
368                 mp = folio_to_mp(folio, offset);
369 
370                 if (!mp || !test_bit(META_dirty, &mp->flag))
371                         continue;
372 
373                 if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
374                         redirty = 1;
375                         /*
376                          * Make sure this page isn't blocked indefinitely.
377                          * If the journal isn't undergoing I/O, push it
378                          */
379                         if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
380                                 jfs_flush_journal(mp->log, 0);
381                         continue;
382                 }
383 
384                 clear_bit(META_dirty, &mp->flag);
385                 set_bit(META_io, &mp->flag);
386                 block_offset = offset >> inode->i_blkbits;
387                 lblock = page_start + block_offset;
388                 if (bio) {
389                         if (xlen && lblock == next_block) {
390                                 /* Contiguous, in memory & on disk */
391                                 len = min(xlen, blocks_per_mp);
392                                 xlen -= len;
393                                 bio_bytes += len << inode->i_blkbits;
394                                 continue;
395                         }
396                         /* Not contiguous */
397                         bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
398                         /*
399                          * Increment counter before submitting i/o to keep
400                          * count from hitting zero before we're through
401                          */
402                         inc_io(folio);
403                         if (!bio->bi_iter.bi_size)
404                                 goto dump_bio;
405                         submit_bio(bio);
406                         nr_underway++;
407                         bio = NULL;
408                 } else
409                         inc_io(folio);
410                 xlen = (folio_size(folio) - offset) >> inode->i_blkbits;
411                 pblock = metapage_get_blocks(inode, lblock, &xlen);
412                 if (!pblock) {
413                         printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
414                         /*
415                          * We already called inc_io(), but can't cancel it
416                          * with dec_io() until we're done with the page
417                          */
418                         bad_blocks++;
419                         continue;
420                 }
421                 len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
422 
423                 bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
424                 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
425                 bio->bi_end_io = metapage_write_end_io;
426                 bio->bi_private = folio;
427 
428                 /* Don't call bio_add_page yet, we may add to this vec */
429                 bio_offset = offset;
430                 bio_bytes = len << inode->i_blkbits;
431 
432                 xlen -= len;
433                 next_block = lblock + len;
434         }
435         if (bio) {
436                 bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
437                 if (!bio->bi_iter.bi_size)
438                         goto dump_bio;
439 
440                 submit_bio(bio);
441                 nr_underway++;
442         }
443         if (redirty)
444                 folio_redirty_for_writepage(wbc, folio);
445 
446         folio_unlock(folio);
447 
448         if (bad_blocks)
449                 goto err_out;
450 
451         if (nr_underway == 0)
452                 folio_end_writeback(folio);
453 
454         return 0;
455 dump_bio:
456         print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
457                        4, bio, sizeof(*bio), 0);
458         bio_put(bio);
459         folio_unlock(folio);
460         dec_io(folio, BLK_STS_OK, last_write_complete);
461 err_out:
462         while (bad_blocks--)
463                 dec_io(folio, BLK_STS_OK, last_write_complete);
464         return -EIO;
465 }
466 
467 static int metapage_writepages(struct address_space *mapping,
468                 struct writeback_control *wbc)
469 {
470         struct blk_plug plug;
471         int err;
472 
473         blk_start_plug(&plug);
474         err = write_cache_pages(mapping, wbc, metapage_write_folio, NULL);
475         blk_finish_plug(&plug);
476 
477         return err;
478 }
479 
480 static int metapage_read_folio(struct file *fp, struct folio *folio)
481 {
482         struct inode *inode = folio->mapping->host;
483         struct bio *bio = NULL;
484         int block_offset;
485         int blocks_per_page = i_blocks_per_folio(inode, folio);
486         sector_t page_start;    /* address of page in fs blocks */
487         sector_t pblock;
488         int xlen;
489         unsigned int len;
490         int offset;
491 
492         BUG_ON(!folio_test_locked(folio));
493         page_start = folio_pos(folio) >> inode->i_blkbits;
494 
495         block_offset = 0;
496         while (block_offset < blocks_per_page) {
497                 xlen = blocks_per_page - block_offset;
498                 pblock = metapage_get_blocks(inode, page_start + block_offset,
499                                              &xlen);
500                 if (pblock) {
501                         if (!folio->private)
502                                 insert_metapage(folio, NULL);
503                         inc_io(folio);
504                         if (bio)
505                                 submit_bio(bio);
506 
507                         bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
508                                         GFP_NOFS);
509                         bio->bi_iter.bi_sector =
510                                 pblock << (inode->i_blkbits - 9);
511                         bio->bi_end_io = metapage_read_end_io;
512                         bio->bi_private = folio;
513                         len = xlen << inode->i_blkbits;
514                         offset = block_offset << inode->i_blkbits;
515                         bio_add_folio_nofail(bio, folio, len, offset);
516                         block_offset += xlen;
517                 } else
518                         block_offset++;
519         }
520         if (bio)
521                 submit_bio(bio);
522         else
523                 folio_unlock(folio);
524 
525         return 0;
526 }
527 
528 static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
529 {
530         struct metapage *mp;
531         bool ret = true;
532         int offset;
533 
534         for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
535                 mp = folio_to_mp(folio, offset);
536 
537                 if (!mp)
538                         continue;
539 
540                 jfs_info("metapage_release_folio: mp = 0x%p", mp);
541                 if (mp->count || mp->nohomeok ||
542                     test_bit(META_dirty, &mp->flag)) {
543                         jfs_info("count = %ld, nohomeok = %d", mp->count,
544                                  mp->nohomeok);
545                         ret = false;
546                         continue;
547                 }
548                 if (mp->lsn)
549                         remove_from_logsync(mp);
550                 remove_metapage(folio, mp);
551                 INCREMENT(mpStat.pagefree);
552                 free_metapage(mp);
553         }
554         return ret;
555 }
556 
557 static void metapage_invalidate_folio(struct folio *folio, size_t offset,
558                                     size_t length)
559 {
560         BUG_ON(offset || length < folio_size(folio));
561 
562         BUG_ON(folio_test_writeback(folio));
563 
564         metapage_release_folio(folio, 0);
565 }
566 
567 const struct address_space_operations jfs_metapage_aops = {
568         .read_folio     = metapage_read_folio,
569         .writepages     = metapage_writepages,
570         .release_folio  = metapage_release_folio,
571         .invalidate_folio = metapage_invalidate_folio,
572         .dirty_folio    = filemap_dirty_folio,
573 };
574 
575 struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
576                                 unsigned int size, int absolute,
577                                 unsigned long new)
578 {
579         int l2BlocksPerPage;
580         int l2bsize;
581         struct address_space *mapping;
582         struct metapage *mp = NULL;
583         struct folio *folio;
584         unsigned long page_index;
585         unsigned long page_offset;
586 
587         jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
588                  inode->i_ino, lblock, absolute);
589 
590         l2bsize = inode->i_blkbits;
591         l2BlocksPerPage = PAGE_SHIFT - l2bsize;
592         page_index = lblock >> l2BlocksPerPage;
593         page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
594         if ((page_offset + size) > PAGE_SIZE) {
595                 jfs_err("MetaData crosses page boundary!!");
596                 jfs_err("lblock = %lx, size  = %d", lblock, size);
597                 dump_stack();
598                 return NULL;
599         }
600         if (absolute)
601                 mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
602         else {
603                 /*
604                  * If an nfs client tries to read an inode that is larger
605                  * than any existing inodes, we may try to read past the
606                  * end of the inode map
607                  */
608                 if ((lblock << inode->i_blkbits) >= inode->i_size)
609                         return NULL;
610                 mapping = inode->i_mapping;
611         }
612 
613         if (new && (PSIZE == PAGE_SIZE)) {
614                 folio = filemap_grab_folio(mapping, page_index);
615                 if (IS_ERR(folio)) {
616                         jfs_err("filemap_grab_folio failed!");
617                         return NULL;
618                 }
619                 folio_mark_uptodate(folio);
620         } else {
621                 folio = read_mapping_folio(mapping, page_index, NULL);
622                 if (IS_ERR(folio)) {
623                         jfs_err("read_mapping_page failed!");
624                         return NULL;
625                 }
626                 folio_lock(folio);
627         }
628 
629         mp = folio_to_mp(folio, page_offset);
630         if (mp) {
631                 if (mp->logical_size != size) {
632                         jfs_error(inode->i_sb,
633                                   "get_mp->logical_size != size\n");
634                         jfs_err("logical_size = %d, size = %d",
635                                 mp->logical_size, size);
636                         dump_stack();
637                         goto unlock;
638                 }
639                 mp->count++;
640                 lock_metapage(mp);
641                 if (test_bit(META_discard, &mp->flag)) {
642                         if (!new) {
643                                 jfs_error(inode->i_sb,
644                                           "using a discarded metapage\n");
645                                 discard_metapage(mp);
646                                 goto unlock;
647                         }
648                         clear_bit(META_discard, &mp->flag);
649                 }
650         } else {
651                 INCREMENT(mpStat.pagealloc);
652                 mp = alloc_metapage(GFP_NOFS);
653                 if (!mp)
654                         goto unlock;
655                 mp->folio = folio;
656                 mp->sb = inode->i_sb;
657                 mp->flag = 0;
658                 mp->xflag = COMMIT_PAGE;
659                 mp->count = 1;
660                 mp->nohomeok = 0;
661                 mp->logical_size = size;
662                 mp->data = folio_address(folio) + page_offset;
663                 mp->index = lblock;
664                 if (unlikely(insert_metapage(folio, mp))) {
665                         free_metapage(mp);
666                         goto unlock;
667                 }
668                 lock_metapage(mp);
669         }
670 
671         if (new) {
672                 jfs_info("zeroing mp = 0x%p", mp);
673                 memset(mp->data, 0, PSIZE);
674         }
675 
676         folio_unlock(folio);
677         jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
678         return mp;
679 
680 unlock:
681         folio_unlock(folio);
682         return NULL;
683 }
684 
685 void grab_metapage(struct metapage * mp)
686 {
687         jfs_info("grab_metapage: mp = 0x%p", mp);
688         folio_get(mp->folio);
689         folio_lock(mp->folio);
690         mp->count++;
691         lock_metapage(mp);
692         folio_unlock(mp->folio);
693 }
694 
695 static int metapage_write_one(struct folio *folio)
696 {
697         struct address_space *mapping = folio->mapping;
698         struct writeback_control wbc = {
699                 .sync_mode = WB_SYNC_ALL,
700                 .nr_to_write = folio_nr_pages(folio),
701         };
702         int ret = 0;
703 
704         BUG_ON(!folio_test_locked(folio));
705 
706         folio_wait_writeback(folio);
707 
708         if (folio_clear_dirty_for_io(folio)) {
709                 folio_get(folio);
710                 ret = metapage_write_folio(folio, &wbc, NULL);
711                 if (ret == 0)
712                         folio_wait_writeback(folio);
713                 folio_put(folio);
714         } else {
715                 folio_unlock(folio);
716         }
717 
718         if (!ret)
719                 ret = filemap_check_errors(mapping);
720         return ret;
721 }
722 
723 void force_metapage(struct metapage *mp)
724 {
725         struct folio *folio = mp->folio;
726         jfs_info("force_metapage: mp = 0x%p", mp);
727         set_bit(META_forcewrite, &mp->flag);
728         clear_bit(META_sync, &mp->flag);
729         folio_get(folio);
730         folio_lock(folio);
731         folio_mark_dirty(folio);
732         if (metapage_write_one(folio))
733                 jfs_error(mp->sb, "metapage_write_one() failed\n");
734         clear_bit(META_forcewrite, &mp->flag);
735         folio_put(folio);
736 }
737 
738 void hold_metapage(struct metapage *mp)
739 {
740         folio_lock(mp->folio);
741 }
742 
743 void put_metapage(struct metapage *mp)
744 {
745         if (mp->count || mp->nohomeok) {
746                 /* Someone else will release this */
747                 folio_unlock(mp->folio);
748                 return;
749         }
750         folio_get(mp->folio);
751         mp->count++;
752         lock_metapage(mp);
753         folio_unlock(mp->folio);
754         release_metapage(mp);
755 }
756 
757 void release_metapage(struct metapage * mp)
758 {
759         struct folio *folio = mp->folio;
760         jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
761 
762         folio_lock(folio);
763         unlock_metapage(mp);
764 
765         assert(mp->count);
766         if (--mp->count || mp->nohomeok) {
767                 folio_unlock(folio);
768                 folio_put(folio);
769                 return;
770         }
771 
772         if (test_bit(META_dirty, &mp->flag)) {
773                 folio_mark_dirty(folio);
774                 if (test_bit(META_sync, &mp->flag)) {
775                         clear_bit(META_sync, &mp->flag);
776                         if (metapage_write_one(folio))
777                                 jfs_error(mp->sb, "metapage_write_one() failed\n");
778                         folio_lock(folio);
779                 }
780         } else if (mp->lsn)     /* discard_metapage doesn't remove it */
781                 remove_from_logsync(mp);
782 
783         /* Try to keep metapages from using up too much memory */
784         drop_metapage(folio, mp);
785 
786         folio_unlock(folio);
787         folio_put(folio);
788 }
789 
790 void __invalidate_metapages(struct inode *ip, s64 addr, int len)
791 {
792         sector_t lblock;
793         int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
794         int BlocksPerPage = 1 << l2BlocksPerPage;
795         /* All callers are interested in block device's mapping */
796         struct address_space *mapping =
797                 JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
798         struct metapage *mp;
799         unsigned int offset;
800 
801         /*
802          * Mark metapages to discard.  They will eventually be
803          * released, but should not be written.
804          */
805         for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
806              lblock += BlocksPerPage) {
807                 struct folio *folio = filemap_lock_folio(mapping,
808                                 lblock >> l2BlocksPerPage);
809                 if (IS_ERR(folio))
810                         continue;
811                 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
812                         mp = folio_to_mp(folio, offset);
813                         if (!mp)
814                                 continue;
815                         if (mp->index < addr)
816                                 continue;
817                         if (mp->index >= addr + len)
818                                 break;
819 
820                         clear_bit(META_dirty, &mp->flag);
821                         set_bit(META_discard, &mp->flag);
822                         if (mp->lsn)
823                                 remove_from_logsync(mp);
824                 }
825                 folio_unlock(folio);
826                 folio_put(folio);
827         }
828 }
829 
830 #ifdef CONFIG_JFS_STATISTICS
831 int jfs_mpstat_proc_show(struct seq_file *m, void *v)
832 {
833         seq_printf(m,
834                        "JFS Metapage statistics\n"
835                        "=======================\n"
836                        "page allocations = %d\n"
837                        "page frees = %d\n"
838                        "lock waits = %d\n",
839                        mpStat.pagealloc,
840                        mpStat.pagefree,
841                        mpStat.lockwait);
842         return 0;
843 }
844 #endif
845 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php