~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/ext4/page-io.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  * linux/fs/ext4/page-io.c
  4  *
  5  * This contains the new page_io functions for ext4
  6  *
  7  * Written by Theodore Ts'o, 2010.
  8  */
  9 
 10 #include <linux/fs.h>
 11 #include <linux/time.h>
 12 #include <linux/highuid.h>
 13 #include <linux/pagemap.h>
 14 #include <linux/quotaops.h>
 15 #include <linux/string.h>
 16 #include <linux/buffer_head.h>
 17 #include <linux/writeback.h>
 18 #include <linux/pagevec.h>
 19 #include <linux/mpage.h>
 20 #include <linux/namei.h>
 21 #include <linux/uio.h>
 22 #include <linux/bio.h>
 23 #include <linux/workqueue.h>
 24 #include <linux/kernel.h>
 25 #include <linux/slab.h>
 26 #include <linux/mm.h>
 27 #include <linux/sched/mm.h>
 28 
 29 #include "ext4_jbd2.h"
 30 #include "xattr.h"
 31 #include "acl.h"
 32 
 33 static struct kmem_cache *io_end_cachep;
 34 static struct kmem_cache *io_end_vec_cachep;
 35 
 36 int __init ext4_init_pageio(void)
 37 {
 38         io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
 39         if (io_end_cachep == NULL)
 40                 return -ENOMEM;
 41 
 42         io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0);
 43         if (io_end_vec_cachep == NULL) {
 44                 kmem_cache_destroy(io_end_cachep);
 45                 return -ENOMEM;
 46         }
 47         return 0;
 48 }
 49 
 50 void ext4_exit_pageio(void)
 51 {
 52         kmem_cache_destroy(io_end_cachep);
 53         kmem_cache_destroy(io_end_vec_cachep);
 54 }
 55 
 56 struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end)
 57 {
 58         struct ext4_io_end_vec *io_end_vec;
 59 
 60         io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS);
 61         if (!io_end_vec)
 62                 return ERR_PTR(-ENOMEM);
 63         INIT_LIST_HEAD(&io_end_vec->list);
 64         list_add_tail(&io_end_vec->list, &io_end->list_vec);
 65         return io_end_vec;
 66 }
 67 
 68 static void ext4_free_io_end_vec(ext4_io_end_t *io_end)
 69 {
 70         struct ext4_io_end_vec *io_end_vec, *tmp;
 71 
 72         if (list_empty(&io_end->list_vec))
 73                 return;
 74         list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) {
 75                 list_del(&io_end_vec->list);
 76                 kmem_cache_free(io_end_vec_cachep, io_end_vec);
 77         }
 78 }
 79 
 80 struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end)
 81 {
 82         BUG_ON(list_empty(&io_end->list_vec));
 83         return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list);
 84 }
 85 
 86 /*
 87  * Print an buffer I/O error compatible with the fs/buffer.c.  This
 88  * provides compatibility with dmesg scrapers that look for a specific
 89  * buffer I/O error message.  We really need a unified error reporting
 90  * structure to userspace ala Digital Unix's uerf system, but it's
 91  * probably not going to happen in my lifetime, due to LKML politics...
 92  */
 93 static void buffer_io_error(struct buffer_head *bh)
 94 {
 95         printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
 96                        bh->b_bdev,
 97                         (unsigned long long)bh->b_blocknr);
 98 }
 99 
100 static void ext4_finish_bio(struct bio *bio)
101 {
102         struct folio_iter fi;
103 
104         bio_for_each_folio_all(fi, bio) {
105                 struct folio *folio = fi.folio;
106                 struct folio *io_folio = NULL;
107                 struct buffer_head *bh, *head;
108                 size_t bio_start = fi.offset;
109                 size_t bio_end = bio_start + fi.length;
110                 unsigned under_io = 0;
111                 unsigned long flags;
112 
113                 if (fscrypt_is_bounce_folio(folio)) {
114                         io_folio = folio;
115                         folio = fscrypt_pagecache_folio(folio);
116                 }
117 
118                 if (bio->bi_status) {
119                         int err = blk_status_to_errno(bio->bi_status);
120                         mapping_set_error(folio->mapping, err);
121                 }
122                 bh = head = folio_buffers(folio);
123                 /*
124                  * We check all buffers in the folio under b_uptodate_lock
125                  * to avoid races with other end io clearing async_write flags
126                  */
127                 spin_lock_irqsave(&head->b_uptodate_lock, flags);
128                 do {
129                         if (bh_offset(bh) < bio_start ||
130                             bh_offset(bh) + bh->b_size > bio_end) {
131                                 if (buffer_async_write(bh))
132                                         under_io++;
133                                 continue;
134                         }
135                         clear_buffer_async_write(bh);
136                         if (bio->bi_status) {
137                                 set_buffer_write_io_error(bh);
138                                 buffer_io_error(bh);
139                         }
140                 } while ((bh = bh->b_this_page) != head);
141                 spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
142                 if (!under_io) {
143                         fscrypt_free_bounce_page(&io_folio->page);
144                         folio_end_writeback(folio);
145                 }
146         }
147 }
148 
149 static void ext4_release_io_end(ext4_io_end_t *io_end)
150 {
151         struct bio *bio, *next_bio;
152 
153         BUG_ON(!list_empty(&io_end->list));
154         BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
155         WARN_ON(io_end->handle);
156 
157         for (bio = io_end->bio; bio; bio = next_bio) {
158                 next_bio = bio->bi_private;
159                 ext4_finish_bio(bio);
160                 bio_put(bio);
161         }
162         ext4_free_io_end_vec(io_end);
163         kmem_cache_free(io_end_cachep, io_end);
164 }
165 
166 /*
167  * Check a range of space and convert unwritten extents to written. Note that
168  * we are protected from truncate touching same part of extent tree by the
169  * fact that truncate code waits for all DIO to finish (thus exclusion from
170  * direct IO is achieved) and also waits for PageWriteback bits. Thus we
171  * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
172  * completed (happens from ext4_free_ioend()).
173  */
174 static int ext4_end_io_end(ext4_io_end_t *io_end)
175 {
176         struct inode *inode = io_end->inode;
177         handle_t *handle = io_end->handle;
178         int ret = 0;
179 
180         ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
181                    "list->prev 0x%p\n",
182                    io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
183 
184         io_end->handle = NULL;  /* Following call will use up the handle */
185         ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
186         if (ret < 0 && !ext4_forced_shutdown(inode->i_sb)) {
187                 ext4_msg(inode->i_sb, KERN_EMERG,
188                          "failed to convert unwritten extents to written "
189                          "extents -- potential data loss!  "
190                          "(inode %lu, error %d)", inode->i_ino, ret);
191         }
192         ext4_clear_io_unwritten_flag(io_end);
193         ext4_release_io_end(io_end);
194         return ret;
195 }
196 
197 static void dump_completed_IO(struct inode *inode, struct list_head *head)
198 {
199 #ifdef  EXT4FS_DEBUG
200         struct list_head *cur, *before, *after;
201         ext4_io_end_t *io_end, *io_end0, *io_end1;
202 
203         if (list_empty(head))
204                 return;
205 
206         ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
207         list_for_each_entry(io_end, head, list) {
208                 cur = &io_end->list;
209                 before = cur->prev;
210                 io_end0 = container_of(before, ext4_io_end_t, list);
211                 after = cur->next;
212                 io_end1 = container_of(after, ext4_io_end_t, list);
213 
214                 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
215                             io_end, inode->i_ino, io_end0, io_end1);
216         }
217 #endif
218 }
219 
220 /* Add the io_end to per-inode completed end_io list. */
221 static void ext4_add_complete_io(ext4_io_end_t *io_end)
222 {
223         struct ext4_inode_info *ei = EXT4_I(io_end->inode);
224         struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
225         struct workqueue_struct *wq;
226         unsigned long flags;
227 
228         /* Only reserved conversions from writeback should enter here */
229         WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
230         WARN_ON(!io_end->handle && sbi->s_journal);
231         spin_lock_irqsave(&ei->i_completed_io_lock, flags);
232         wq = sbi->rsv_conversion_wq;
233         if (list_empty(&ei->i_rsv_conversion_list))
234                 queue_work(wq, &ei->i_rsv_conversion_work);
235         list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
236         spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
237 }
238 
239 static int ext4_do_flush_completed_IO(struct inode *inode,
240                                       struct list_head *head)
241 {
242         ext4_io_end_t *io_end;
243         struct list_head unwritten;
244         unsigned long flags;
245         struct ext4_inode_info *ei = EXT4_I(inode);
246         int err, ret = 0;
247 
248         spin_lock_irqsave(&ei->i_completed_io_lock, flags);
249         dump_completed_IO(inode, head);
250         list_replace_init(head, &unwritten);
251         spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
252 
253         while (!list_empty(&unwritten)) {
254                 io_end = list_entry(unwritten.next, ext4_io_end_t, list);
255                 BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
256                 list_del_init(&io_end->list);
257 
258                 err = ext4_end_io_end(io_end);
259                 if (unlikely(!ret && err))
260                         ret = err;
261         }
262         return ret;
263 }
264 
265 /*
266  * work on completed IO, to convert unwritten extents to extents
267  */
268 void ext4_end_io_rsv_work(struct work_struct *work)
269 {
270         struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
271                                                   i_rsv_conversion_work);
272         ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
273 }
274 
275 ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
276 {
277         ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags);
278 
279         if (io_end) {
280                 io_end->inode = inode;
281                 INIT_LIST_HEAD(&io_end->list);
282                 INIT_LIST_HEAD(&io_end->list_vec);
283                 refcount_set(&io_end->count, 1);
284         }
285         return io_end;
286 }
287 
288 void ext4_put_io_end_defer(ext4_io_end_t *io_end)
289 {
290         if (refcount_dec_and_test(&io_end->count)) {
291                 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
292                                 list_empty(&io_end->list_vec)) {
293                         ext4_release_io_end(io_end);
294                         return;
295                 }
296                 ext4_add_complete_io(io_end);
297         }
298 }
299 
300 int ext4_put_io_end(ext4_io_end_t *io_end)
301 {
302         int err = 0;
303 
304         if (refcount_dec_and_test(&io_end->count)) {
305                 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
306                         err = ext4_convert_unwritten_io_end_vec(io_end->handle,
307                                                                 io_end);
308                         io_end->handle = NULL;
309                         ext4_clear_io_unwritten_flag(io_end);
310                 }
311                 ext4_release_io_end(io_end);
312         }
313         return err;
314 }
315 
316 ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
317 {
318         refcount_inc(&io_end->count);
319         return io_end;
320 }
321 
322 /* BIO completion function for page writeback */
323 static void ext4_end_bio(struct bio *bio)
324 {
325         ext4_io_end_t *io_end = bio->bi_private;
326         sector_t bi_sector = bio->bi_iter.bi_sector;
327 
328         if (WARN_ONCE(!io_end, "io_end is NULL: %pg: sector %Lu len %u err %d\n",
329                       bio->bi_bdev,
330                       (long long) bio->bi_iter.bi_sector,
331                       (unsigned) bio_sectors(bio),
332                       bio->bi_status)) {
333                 ext4_finish_bio(bio);
334                 bio_put(bio);
335                 return;
336         }
337         bio->bi_end_io = NULL;
338 
339         if (bio->bi_status) {
340                 struct inode *inode = io_end->inode;
341 
342                 ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
343                              "starting block %llu)",
344                              bio->bi_status, inode->i_ino,
345                              (unsigned long long)
346                              bi_sector >> (inode->i_blkbits - 9));
347                 mapping_set_error(inode->i_mapping,
348                                 blk_status_to_errno(bio->bi_status));
349         }
350 
351         if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
352                 /*
353                  * Link bio into list hanging from io_end. We have to do it
354                  * atomically as bio completions can be racing against each
355                  * other.
356                  */
357                 bio->bi_private = xchg(&io_end->bio, bio);
358                 ext4_put_io_end_defer(io_end);
359         } else {
360                 /*
361                  * Drop io_end reference early. Inode can get freed once
362                  * we finish the bio.
363                  */
364                 ext4_put_io_end_defer(io_end);
365                 ext4_finish_bio(bio);
366                 bio_put(bio);
367         }
368 }
369 
370 void ext4_io_submit(struct ext4_io_submit *io)
371 {
372         struct bio *bio = io->io_bio;
373 
374         if (bio) {
375                 if (io->io_wbc->sync_mode == WB_SYNC_ALL)
376                         io->io_bio->bi_opf |= REQ_SYNC;
377                 submit_bio(io->io_bio);
378         }
379         io->io_bio = NULL;
380 }
381 
382 void ext4_io_submit_init(struct ext4_io_submit *io,
383                          struct writeback_control *wbc)
384 {
385         io->io_wbc = wbc;
386         io->io_bio = NULL;
387         io->io_end = NULL;
388 }
389 
390 static void io_submit_init_bio(struct ext4_io_submit *io,
391                                struct buffer_head *bh)
392 {
393         struct bio *bio;
394 
395         /*
396          * bio_alloc will _always_ be able to allocate a bio if
397          * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
398          */
399         bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO);
400         fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
401         bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
402         bio->bi_end_io = ext4_end_bio;
403         bio->bi_private = ext4_get_io_end(io->io_end);
404         io->io_bio = bio;
405         io->io_next_block = bh->b_blocknr;
406         wbc_init_bio(io->io_wbc, bio);
407 }
408 
409 static void io_submit_add_bh(struct ext4_io_submit *io,
410                              struct inode *inode,
411                              struct folio *folio,
412                              struct folio *io_folio,
413                              struct buffer_head *bh)
414 {
415         if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
416                            !fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
417 submit_and_retry:
418                 ext4_io_submit(io);
419         }
420         if (io->io_bio == NULL)
421                 io_submit_init_bio(io, bh);
422         if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh)))
423                 goto submit_and_retry;
424         wbc_account_cgroup_owner(io->io_wbc, &folio->page, bh->b_size);
425         io->io_next_block++;
426 }
427 
428 int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
429                 size_t len)
430 {
431         struct folio *io_folio = folio;
432         struct inode *inode = folio->mapping->host;
433         unsigned block_start;
434         struct buffer_head *bh, *head;
435         int ret = 0;
436         int nr_to_submit = 0;
437         struct writeback_control *wbc = io->io_wbc;
438         bool keep_towrite = false;
439 
440         BUG_ON(!folio_test_locked(folio));
441         BUG_ON(folio_test_writeback(folio));
442 
443         /*
444          * Comments copied from block_write_full_folio:
445          *
446          * The folio straddles i_size.  It must be zeroed out on each and every
447          * writepage invocation because it may be mmapped.  "A file is mapped
448          * in multiples of the page size.  For a file that is not a multiple of
449          * the page size, the remaining memory is zeroed when mapped, and
450          * writes to that region are not written out to the file."
451          */
452         if (len < folio_size(folio))
453                 folio_zero_segment(folio, len, folio_size(folio));
454         /*
455          * In the first loop we prepare and mark buffers to submit. We have to
456          * mark all buffers in the folio before submitting so that
457          * folio_end_writeback() cannot be called from ext4_end_bio() when IO
458          * on the first buffer finishes and we are still working on submitting
459          * the second buffer.
460          */
461         bh = head = folio_buffers(folio);
462         do {
463                 block_start = bh_offset(bh);
464                 if (block_start >= len) {
465                         clear_buffer_dirty(bh);
466                         set_buffer_uptodate(bh);
467                         continue;
468                 }
469                 if (!buffer_dirty(bh) || buffer_delay(bh) ||
470                     !buffer_mapped(bh) || buffer_unwritten(bh)) {
471                         /* A hole? We can safely clear the dirty bit */
472                         if (!buffer_mapped(bh))
473                                 clear_buffer_dirty(bh);
474                         /*
475                          * Keeping dirty some buffer we cannot write? Make sure
476                          * to redirty the folio and keep TOWRITE tag so that
477                          * racing WB_SYNC_ALL writeback does not skip the folio.
478                          * This happens e.g. when doing writeout for
479                          * transaction commit or when journalled data is not
480                          * yet committed.
481                          */
482                         if (buffer_dirty(bh) ||
483                             (buffer_jbd(bh) && buffer_jbddirty(bh))) {
484                                 if (!folio_test_dirty(folio))
485                                         folio_redirty_for_writepage(wbc, folio);
486                                 keep_towrite = true;
487                         }
488                         continue;
489                 }
490                 if (buffer_new(bh))
491                         clear_buffer_new(bh);
492                 set_buffer_async_write(bh);
493                 clear_buffer_dirty(bh);
494                 nr_to_submit++;
495         } while ((bh = bh->b_this_page) != head);
496 
497         /* Nothing to submit? Just unlock the folio... */
498         if (!nr_to_submit)
499                 return 0;
500 
501         bh = head = folio_buffers(folio);
502 
503         /*
504          * If any blocks are being written to an encrypted file, encrypt them
505          * into a bounce page.  For simplicity, just encrypt until the last
506          * block which might be needed.  This may cause some unneeded blocks
507          * (e.g. holes) to be unnecessarily encrypted, but this is rare and
508          * can't happen in the common case of blocksize == PAGE_SIZE.
509          */
510         if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
511                 gfp_t gfp_flags = GFP_NOFS;
512                 unsigned int enc_bytes = round_up(len, i_blocksize(inode));
513                 struct page *bounce_page;
514 
515                 /*
516                  * Since bounce page allocation uses a mempool, we can only use
517                  * a waiting mask (i.e. request guaranteed allocation) on the
518                  * first page of the bio.  Otherwise it can deadlock.
519                  */
520                 if (io->io_bio)
521                         gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
522         retry_encrypt:
523                 bounce_page = fscrypt_encrypt_pagecache_blocks(&folio->page,
524                                         enc_bytes, 0, gfp_flags);
525                 if (IS_ERR(bounce_page)) {
526                         ret = PTR_ERR(bounce_page);
527                         if (ret == -ENOMEM &&
528                             (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
529                                 gfp_t new_gfp_flags = GFP_NOFS;
530                                 if (io->io_bio)
531                                         ext4_io_submit(io);
532                                 else
533                                         new_gfp_flags |= __GFP_NOFAIL;
534                                 memalloc_retry_wait(gfp_flags);
535                                 gfp_flags = new_gfp_flags;
536                                 goto retry_encrypt;
537                         }
538 
539                         printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
540                         folio_redirty_for_writepage(wbc, folio);
541                         do {
542                                 if (buffer_async_write(bh)) {
543                                         clear_buffer_async_write(bh);
544                                         set_buffer_dirty(bh);
545                                 }
546                                 bh = bh->b_this_page;
547                         } while (bh != head);
548 
549                         return ret;
550                 }
551                 io_folio = page_folio(bounce_page);
552         }
553 
554         __folio_start_writeback(folio, keep_towrite);
555 
556         /* Now submit buffers to write */
557         do {
558                 if (!buffer_async_write(bh))
559                         continue;
560                 io_submit_add_bh(io, inode, folio, io_folio, bh);
561         } while ((bh = bh->b_this_page) != head);
562 
563         return 0;
564 }
565 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php