~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/xfs/xfs_aops.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4  * Copyright (c) 2016-2018 Christoph Hellwig.
  5  * All Rights Reserved.
  6  */
  7 #include "xfs.h"
  8 #include "xfs_shared.h"
  9 #include "xfs_format.h"
 10 #include "xfs_log_format.h"
 11 #include "xfs_trans_resv.h"
 12 #include "xfs_mount.h"
 13 #include "xfs_inode.h"
 14 #include "xfs_trans.h"
 15 #include "xfs_iomap.h"
 16 #include "xfs_trace.h"
 17 #include "xfs_bmap.h"
 18 #include "xfs_bmap_util.h"
 19 #include "xfs_reflink.h"
 20 #include "xfs_errortag.h"
 21 #include "xfs_error.h"
 22 
 23 struct xfs_writepage_ctx {
 24         struct iomap_writepage_ctx ctx;
 25         unsigned int            data_seq;
 26         unsigned int            cow_seq;
 27 };
 28 
 29 static inline struct xfs_writepage_ctx *
 30 XFS_WPC(struct iomap_writepage_ctx *ctx)
 31 {
 32         return container_of(ctx, struct xfs_writepage_ctx, ctx);
 33 }
 34 
 35 /*
 36  * Fast and loose check if this write could update the on-disk inode size.
 37  */
 38 static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
 39 {
 40         return ioend->io_offset + ioend->io_size >
 41                 XFS_I(ioend->io_inode)->i_disk_size;
 42 }
 43 
 44 /*
 45  * Update on-disk file size now that data has been written to disk.
 46  */
 47 int
 48 xfs_setfilesize(
 49         struct xfs_inode        *ip,
 50         xfs_off_t               offset,
 51         size_t                  size)
 52 {
 53         struct xfs_mount        *mp = ip->i_mount;
 54         struct xfs_trans        *tp;
 55         xfs_fsize_t             isize;
 56         int                     error;
 57 
 58         error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
 59         if (error)
 60                 return error;
 61 
 62         xfs_ilock(ip, XFS_ILOCK_EXCL);
 63         isize = xfs_new_eof(ip, offset + size);
 64         if (!isize) {
 65                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
 66                 xfs_trans_cancel(tp);
 67                 return 0;
 68         }
 69 
 70         trace_xfs_setfilesize(ip, offset, size);
 71 
 72         ip->i_disk_size = isize;
 73         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 74         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 75 
 76         return xfs_trans_commit(tp);
 77 }
 78 
 79 /*
 80  * IO write completion.
 81  */
 82 STATIC void
 83 xfs_end_ioend(
 84         struct iomap_ioend      *ioend)
 85 {
 86         struct xfs_inode        *ip = XFS_I(ioend->io_inode);
 87         struct xfs_mount        *mp = ip->i_mount;
 88         xfs_off_t               offset = ioend->io_offset;
 89         size_t                  size = ioend->io_size;
 90         unsigned int            nofs_flag;
 91         int                     error;
 92 
 93         /*
 94          * We can allocate memory here while doing writeback on behalf of
 95          * memory reclaim.  To avoid memory allocation deadlocks set the
 96          * task-wide nofs context for the following operations.
 97          */
 98         nofs_flag = memalloc_nofs_save();
 99 
100         /*
101          * Just clean up the in-memory structures if the fs has been shut down.
102          */
103         if (xfs_is_shutdown(mp)) {
104                 error = -EIO;
105                 goto done;
106         }
107 
108         /*
109          * Clean up all COW blocks and underlying data fork delalloc blocks on
110          * I/O error. The delalloc punch is required because this ioend was
111          * mapped to blocks in the COW fork and the associated pages are no
112          * longer dirty. If we don't remove delalloc blocks here, they become
113          * stale and can corrupt free space accounting on unmount.
114          */
115         error = blk_status_to_errno(ioend->io_bio.bi_status);
116         if (unlikely(error)) {
117                 if (ioend->io_flags & IOMAP_F_SHARED) {
118                         xfs_reflink_cancel_cow_range(ip, offset, size, true);
119                         xfs_bmap_punch_delalloc_range(ip, offset,
120                                         offset + size);
121                 }
122                 goto done;
123         }
124 
125         /*
126          * Success: commit the COW or unwritten blocks if needed.
127          */
128         if (ioend->io_flags & IOMAP_F_SHARED)
129                 error = xfs_reflink_end_cow(ip, offset, size);
130         else if (ioend->io_type == IOMAP_UNWRITTEN)
131                 error = xfs_iomap_write_unwritten(ip, offset, size, false);
132 
133         if (!error && xfs_ioend_is_append(ioend))
134                 error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
135 done:
136         iomap_finish_ioends(ioend, error);
137         memalloc_nofs_restore(nofs_flag);
138 }
139 
140 /*
141  * Finish all pending IO completions that require transactional modifications.
142  *
143  * We try to merge physical and logically contiguous ioends before completion to
144  * minimise the number of transactions we need to perform during IO completion.
145  * Both unwritten extent conversion and COW remapping need to iterate and modify
146  * one physical extent at a time, so we gain nothing by merging physically
147  * discontiguous extents here.
148  *
149  * The ioend chain length that we can be processing here is largely unbound in
150  * length and we may have to perform significant amounts of work on each ioend
151  * to complete it. Hence we have to be careful about holding the CPU for too
152  * long in this loop.
153  */
154 void
155 xfs_end_io(
156         struct work_struct      *work)
157 {
158         struct xfs_inode        *ip =
159                 container_of(work, struct xfs_inode, i_ioend_work);
160         struct iomap_ioend      *ioend;
161         struct list_head        tmp;
162         unsigned long           flags;
163 
164         spin_lock_irqsave(&ip->i_ioend_lock, flags);
165         list_replace_init(&ip->i_ioend_list, &tmp);
166         spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
167 
168         iomap_sort_ioends(&tmp);
169         while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
170                         io_list))) {
171                 list_del_init(&ioend->io_list);
172                 iomap_ioend_try_merge(ioend, &tmp);
173                 xfs_end_ioend(ioend);
174                 cond_resched();
175         }
176 }
177 
178 STATIC void
179 xfs_end_bio(
180         struct bio              *bio)
181 {
182         struct iomap_ioend      *ioend = iomap_ioend_from_bio(bio);
183         struct xfs_inode        *ip = XFS_I(ioend->io_inode);
184         unsigned long           flags;
185 
186         spin_lock_irqsave(&ip->i_ioend_lock, flags);
187         if (list_empty(&ip->i_ioend_list))
188                 WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
189                                          &ip->i_ioend_work));
190         list_add_tail(&ioend->io_list, &ip->i_ioend_list);
191         spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
192 }
193 
194 /*
195  * Fast revalidation of the cached writeback mapping. Return true if the current
196  * mapping is valid, false otherwise.
197  */
198 static bool
199 xfs_imap_valid(
200         struct iomap_writepage_ctx      *wpc,
201         struct xfs_inode                *ip,
202         loff_t                          offset)
203 {
204         if (offset < wpc->iomap.offset ||
205             offset >= wpc->iomap.offset + wpc->iomap.length)
206                 return false;
207         /*
208          * If this is a COW mapping, it is sufficient to check that the mapping
209          * covers the offset. Be careful to check this first because the caller
210          * can revalidate a COW mapping without updating the data seqno.
211          */
212         if (wpc->iomap.flags & IOMAP_F_SHARED)
213                 return true;
214 
215         /*
216          * This is not a COW mapping. Check the sequence number of the data fork
217          * because concurrent changes could have invalidated the extent. Check
218          * the COW fork because concurrent changes since the last time we
219          * checked (and found nothing at this offset) could have added
220          * overlapping blocks.
221          */
222         if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq)) {
223                 trace_xfs_wb_data_iomap_invalid(ip, &wpc->iomap,
224                                 XFS_WPC(wpc)->data_seq, XFS_DATA_FORK);
225                 return false;
226         }
227         if (xfs_inode_has_cow_data(ip) &&
228             XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq)) {
229                 trace_xfs_wb_cow_iomap_invalid(ip, &wpc->iomap,
230                                 XFS_WPC(wpc)->cow_seq, XFS_COW_FORK);
231                 return false;
232         }
233         return true;
234 }
235 
236 static int
237 xfs_map_blocks(
238         struct iomap_writepage_ctx *wpc,
239         struct inode            *inode,
240         loff_t                  offset,
241         unsigned int            len)
242 {
243         struct xfs_inode        *ip = XFS_I(inode);
244         struct xfs_mount        *mp = ip->i_mount;
245         ssize_t                 count = i_blocksize(inode);
246         xfs_fileoff_t           offset_fsb = XFS_B_TO_FSBT(mp, offset);
247         xfs_fileoff_t           end_fsb = XFS_B_TO_FSB(mp, offset + count);
248         xfs_fileoff_t           cow_fsb;
249         int                     whichfork;
250         struct xfs_bmbt_irec    imap;
251         struct xfs_iext_cursor  icur;
252         int                     retries = 0;
253         int                     error = 0;
254         unsigned int            *seq;
255 
256         if (xfs_is_shutdown(mp))
257                 return -EIO;
258 
259         XFS_ERRORTAG_DELAY(mp, XFS_ERRTAG_WB_DELAY_MS);
260 
261         /*
262          * COW fork blocks can overlap data fork blocks even if the blocks
263          * aren't shared.  COW I/O always takes precedent, so we must always
264          * check for overlap on reflink inodes unless the mapping is already a
265          * COW one, or the COW fork hasn't changed from the last time we looked
266          * at it.
267          *
268          * It's safe to check the COW fork if_seq here without the ILOCK because
269          * we've indirectly protected against concurrent updates: writeback has
270          * the page locked, which prevents concurrent invalidations by reflink
271          * and directio and prevents concurrent buffered writes to the same
272          * page.  Changes to if_seq always happen under i_lock, which protects
273          * against concurrent updates and provides a memory barrier on the way
274          * out that ensures that we always see the current value.
275          */
276         if (xfs_imap_valid(wpc, ip, offset))
277                 return 0;
278 
279         /*
280          * If we don't have a valid map, now it's time to get a new one for this
281          * offset.  This will convert delayed allocations (including COW ones)
282          * into real extents.  If we return without a valid map, it means we
283          * landed in a hole and we skip the block.
284          */
285 retry:
286         cow_fsb = NULLFILEOFF;
287         whichfork = XFS_DATA_FORK;
288         xfs_ilock(ip, XFS_ILOCK_SHARED);
289         ASSERT(!xfs_need_iread_extents(&ip->i_df));
290 
291         /*
292          * Check if this is offset is covered by a COW extents, and if yes use
293          * it directly instead of looking up anything in the data fork.
294          */
295         if (xfs_inode_has_cow_data(ip) &&
296             xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
297                 cow_fsb = imap.br_startoff;
298         if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
299                 XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
300                 xfs_iunlock(ip, XFS_ILOCK_SHARED);
301 
302                 whichfork = XFS_COW_FORK;
303                 goto allocate_blocks;
304         }
305 
306         /*
307          * No COW extent overlap. Revalidate now that we may have updated
308          * ->cow_seq. If the data mapping is still valid, we're done.
309          */
310         if (xfs_imap_valid(wpc, ip, offset)) {
311                 xfs_iunlock(ip, XFS_ILOCK_SHARED);
312                 return 0;
313         }
314 
315         /*
316          * If we don't have a valid map, now it's time to get a new one for this
317          * offset.  This will convert delayed allocations (including COW ones)
318          * into real extents.
319          */
320         if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
321                 imap.br_startoff = end_fsb;     /* fake a hole past EOF */
322         XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
323         xfs_iunlock(ip, XFS_ILOCK_SHARED);
324 
325         /* landed in a hole or beyond EOF? */
326         if (imap.br_startoff > offset_fsb) {
327                 imap.br_blockcount = imap.br_startoff - offset_fsb;
328                 imap.br_startoff = offset_fsb;
329                 imap.br_startblock = HOLESTARTBLOCK;
330                 imap.br_state = XFS_EXT_NORM;
331         }
332 
333         /*
334          * Truncate to the next COW extent if there is one.  This is the only
335          * opportunity to do this because we can skip COW fork lookups for the
336          * subsequent blocks in the mapping; however, the requirement to treat
337          * the COW range separately remains.
338          */
339         if (cow_fsb != NULLFILEOFF &&
340             cow_fsb < imap.br_startoff + imap.br_blockcount)
341                 imap.br_blockcount = cow_fsb - imap.br_startoff;
342 
343         /* got a delalloc extent? */
344         if (imap.br_startblock != HOLESTARTBLOCK &&
345             isnullstartblock(imap.br_startblock))
346                 goto allocate_blocks;
347 
348         xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0, XFS_WPC(wpc)->data_seq);
349         trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
350         return 0;
351 allocate_blocks:
352         /*
353          * Convert a dellalloc extent to a real one. The current page is held
354          * locked so nothing could have removed the block backing offset_fsb,
355          * although it could have moved from the COW to the data fork by another
356          * thread.
357          */
358         if (whichfork == XFS_COW_FORK)
359                 seq = &XFS_WPC(wpc)->cow_seq;
360         else
361                 seq = &XFS_WPC(wpc)->data_seq;
362 
363         error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
364                                 &wpc->iomap, seq);
365         if (error) {
366                 /*
367                  * If we failed to find the extent in the COW fork we might have
368                  * raced with a COW to data fork conversion or truncate.
369                  * Restart the lookup to catch the extent in the data fork for
370                  * the former case, but prevent additional retries to avoid
371                  * looping forever for the latter case.
372                  */
373                 if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
374                         goto retry;
375                 ASSERT(error != -EAGAIN);
376                 return error;
377         }
378 
379         /*
380          * Due to merging the return real extent might be larger than the
381          * original delalloc one.  Trim the return extent to the next COW
382          * boundary again to force a re-lookup.
383          */
384         if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
385                 loff_t          cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
386 
387                 if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
388                         wpc->iomap.length = cow_offset - wpc->iomap.offset;
389         }
390 
391         ASSERT(wpc->iomap.offset <= offset);
392         ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
393         trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
394         return 0;
395 }
396 
397 static int
398 xfs_prepare_ioend(
399         struct iomap_ioend      *ioend,
400         int                     status)
401 {
402         unsigned int            nofs_flag;
403 
404         /*
405          * We can allocate memory here while doing writeback on behalf of
406          * memory reclaim.  To avoid memory allocation deadlocks set the
407          * task-wide nofs context for the following operations.
408          */
409         nofs_flag = memalloc_nofs_save();
410 
411         /* Convert CoW extents to regular */
412         if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
413                 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
414                                 ioend->io_offset, ioend->io_size);
415         }
416 
417         memalloc_nofs_restore(nofs_flag);
418 
419         /* send ioends that might require a transaction to the completion wq */
420         if (xfs_ioend_is_append(ioend) || ioend->io_type == IOMAP_UNWRITTEN ||
421             (ioend->io_flags & IOMAP_F_SHARED))
422                 ioend->io_bio.bi_end_io = xfs_end_bio;
423         return status;
424 }
425 
426 /*
427  * If the folio has delalloc blocks on it, the caller is asking us to punch them
428  * out. If we don't, we can leave a stale delalloc mapping covered by a clean
429  * page that needs to be dirtied again before the delalloc mapping can be
430  * converted. This stale delalloc mapping can trip up a later direct I/O read
431  * operation on the same region.
432  *
433  * We prevent this by truncating away the delalloc regions on the folio. Because
434  * they are delalloc, we can do this without needing a transaction. Indeed - if
435  * we get ENOSPC errors, we have to be able to do this truncation without a
436  * transaction as there is no space left for block reservation (typically why
437  * we see a ENOSPC in writeback).
438  */
439 static void
440 xfs_discard_folio(
441         struct folio            *folio,
442         loff_t                  pos)
443 {
444         struct xfs_inode        *ip = XFS_I(folio->mapping->host);
445         struct xfs_mount        *mp = ip->i_mount;
446 
447         if (xfs_is_shutdown(mp))
448                 return;
449 
450         xfs_alert_ratelimited(mp,
451                 "page discard on page "PTR_FMT", inode 0x%llx, pos %llu.",
452                         folio, ip->i_ino, pos);
453 
454         /*
455          * The end of the punch range is always the offset of the first
456          * byte of the next folio. Hence the end offset is only dependent on the
457          * folio itself and not the start offset that is passed in.
458          */
459         xfs_bmap_punch_delalloc_range(ip, pos,
460                                 folio_pos(folio) + folio_size(folio));
461 }
462 
463 static const struct iomap_writeback_ops xfs_writeback_ops = {
464         .map_blocks             = xfs_map_blocks,
465         .prepare_ioend          = xfs_prepare_ioend,
466         .discard_folio          = xfs_discard_folio,
467 };
468 
469 STATIC int
470 xfs_vm_writepages(
471         struct address_space    *mapping,
472         struct writeback_control *wbc)
473 {
474         struct xfs_writepage_ctx wpc = { };
475 
476         xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
477         return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
478 }
479 
480 STATIC int
481 xfs_dax_writepages(
482         struct address_space    *mapping,
483         struct writeback_control *wbc)
484 {
485         struct xfs_inode        *ip = XFS_I(mapping->host);
486 
487         xfs_iflags_clear(ip, XFS_ITRUNCATED);
488         return dax_writeback_mapping_range(mapping,
489                         xfs_inode_buftarg(ip)->bt_daxdev, wbc);
490 }
491 
492 STATIC sector_t
493 xfs_vm_bmap(
494         struct address_space    *mapping,
495         sector_t                block)
496 {
497         struct xfs_inode        *ip = XFS_I(mapping->host);
498 
499         trace_xfs_vm_bmap(ip);
500 
501         /*
502          * The swap code (ab-)uses ->bmap to get a block mapping and then
503          * bypasses the file system for actual I/O.  We really can't allow
504          * that on reflinks inodes, so we have to skip out here.  And yes,
505          * 0 is the magic code for a bmap error.
506          *
507          * Since we don't pass back blockdev info, we can't return bmap
508          * information for rt files either.
509          */
510         if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
511                 return 0;
512         return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
513 }
514 
515 STATIC int
516 xfs_vm_read_folio(
517         struct file             *unused,
518         struct folio            *folio)
519 {
520         return iomap_read_folio(folio, &xfs_read_iomap_ops);
521 }
522 
523 STATIC void
524 xfs_vm_readahead(
525         struct readahead_control        *rac)
526 {
527         iomap_readahead(rac, &xfs_read_iomap_ops);
528 }
529 
530 static int
531 xfs_iomap_swapfile_activate(
532         struct swap_info_struct         *sis,
533         struct file                     *swap_file,
534         sector_t                        *span)
535 {
536         sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev;
537         return iomap_swapfile_activate(sis, swap_file, span,
538                         &xfs_read_iomap_ops);
539 }
540 
541 const struct address_space_operations xfs_address_space_operations = {
542         .read_folio             = xfs_vm_read_folio,
543         .readahead              = xfs_vm_readahead,
544         .writepages             = xfs_vm_writepages,
545         .dirty_folio            = iomap_dirty_folio,
546         .release_folio          = iomap_release_folio,
547         .invalidate_folio       = iomap_invalidate_folio,
548         .bmap                   = xfs_vm_bmap,
549         .migrate_folio          = filemap_migrate_folio,
550         .is_partially_uptodate  = iomap_is_partially_uptodate,
551         .error_remove_folio     = generic_error_remove_folio,
552         .swap_activate          = xfs_iomap_swapfile_activate,
553 };
554 
555 const struct address_space_operations xfs_dax_aops = {
556         .writepages             = xfs_dax_writepages,
557         .dirty_folio            = noop_dirty_folio,
558         .swap_activate          = xfs_iomap_swapfile_activate,
559 };
560 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php