~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/ntfs3/file.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0
  2 /*
  3  *
  4  * Copyright (C) 2019-2021 Paragon Software GmbH, All rights reserved.
  5  *
  6  *  Regular file handling primitives for NTFS-based filesystems.
  7  *
  8  */
  9 
 10 #include <linux/backing-dev.h>
 11 #include <linux/blkdev.h>
 12 #include <linux/buffer_head.h>
 13 #include <linux/compat.h>
 14 #include <linux/falloc.h>
 15 #include <linux/fiemap.h>
 16 #include <linux/fileattr.h>
 17 
 18 #include "debug.h"
 19 #include "ntfs.h"
 20 #include "ntfs_fs.h"
 21 
 22 static int ntfs_ioctl_fitrim(struct ntfs_sb_info *sbi, unsigned long arg)
 23 {
 24         struct fstrim_range __user *user_range;
 25         struct fstrim_range range;
 26         struct block_device *dev;
 27         int err;
 28 
 29         if (!capable(CAP_SYS_ADMIN))
 30                 return -EPERM;
 31 
 32         dev = sbi->sb->s_bdev;
 33         if (!bdev_max_discard_sectors(dev))
 34                 return -EOPNOTSUPP;
 35 
 36         user_range = (struct fstrim_range __user *)arg;
 37         if (copy_from_user(&range, user_range, sizeof(range)))
 38                 return -EFAULT;
 39 
 40         range.minlen = max_t(u32, range.minlen, bdev_discard_granularity(dev));
 41 
 42         err = ntfs_trim_fs(sbi, &range);
 43         if (err < 0)
 44                 return err;
 45 
 46         if (copy_to_user(user_range, &range, sizeof(range)))
 47                 return -EFAULT;
 48 
 49         return 0;
 50 }
 51 
 52 /*
 53  * ntfs_fileattr_get - inode_operations::fileattr_get
 54  */
 55 int ntfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
 56 {
 57         struct inode *inode = d_inode(dentry);
 58         struct ntfs_inode *ni = ntfs_i(inode);
 59         u32 flags = 0;
 60 
 61         if (inode->i_flags & S_IMMUTABLE)
 62                 flags |= FS_IMMUTABLE_FL;
 63 
 64         if (inode->i_flags & S_APPEND)
 65                 flags |= FS_APPEND_FL;
 66 
 67         if (is_compressed(ni))
 68                 flags |= FS_COMPR_FL;
 69 
 70         if (is_encrypted(ni))
 71                 flags |= FS_ENCRYPT_FL;
 72 
 73         fileattr_fill_flags(fa, flags);
 74 
 75         return 0;
 76 }
 77 
 78 /*
 79  * ntfs_fileattr_set - inode_operations::fileattr_set
 80  */
 81 int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
 82                       struct fileattr *fa)
 83 {
 84         struct inode *inode = d_inode(dentry);
 85         u32 flags = fa->flags;
 86         unsigned int new_fl = 0;
 87 
 88         if (fileattr_has_fsx(fa))
 89                 return -EOPNOTSUPP;
 90 
 91         if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL))
 92                 return -EOPNOTSUPP;
 93 
 94         if (flags & FS_IMMUTABLE_FL)
 95                 new_fl |= S_IMMUTABLE;
 96 
 97         if (flags & FS_APPEND_FL)
 98                 new_fl |= S_APPEND;
 99 
100         inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND);
101 
102         inode_set_ctime_current(inode);
103         mark_inode_dirty(inode);
104 
105         return 0;
106 }
107 
108 /*
109  * ntfs_ioctl - file_operations::unlocked_ioctl
110  */
111 long ntfs_ioctl(struct file *filp, u32 cmd, unsigned long arg)
112 {
113         struct inode *inode = file_inode(filp);
114         struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
115 
116         switch (cmd) {
117         case FITRIM:
118                 return ntfs_ioctl_fitrim(sbi, arg);
119         }
120         return -ENOTTY; /* Inappropriate ioctl for device. */
121 }
122 
123 #ifdef CONFIG_COMPAT
124 long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg)
125 
126 {
127         return ntfs_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
128 }
129 #endif
130 
131 /*
132  * ntfs_getattr - inode_operations::getattr
133  */
134 int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
135                  struct kstat *stat, u32 request_mask, u32 flags)
136 {
137         struct inode *inode = d_inode(path->dentry);
138         struct ntfs_inode *ni = ntfs_i(inode);
139 
140         stat->result_mask |= STATX_BTIME;
141         stat->btime = ni->i_crtime;
142         stat->blksize = ni->mi.sbi->cluster_size; /* 512, 1K, ..., 2M */
143 
144         if (inode->i_flags & S_IMMUTABLE)
145                 stat->attributes |= STATX_ATTR_IMMUTABLE;
146 
147         if (inode->i_flags & S_APPEND)
148                 stat->attributes |= STATX_ATTR_APPEND;
149 
150         if (is_compressed(ni))
151                 stat->attributes |= STATX_ATTR_COMPRESSED;
152 
153         if (is_encrypted(ni))
154                 stat->attributes |= STATX_ATTR_ENCRYPTED;
155 
156         stat->attributes_mask |= STATX_ATTR_COMPRESSED | STATX_ATTR_ENCRYPTED |
157                                  STATX_ATTR_IMMUTABLE | STATX_ATTR_APPEND;
158 
159         generic_fillattr(idmap, request_mask, inode, stat);
160 
161         return 0;
162 }
163 
164 static int ntfs_extend_initialized_size(struct file *file,
165                                         struct ntfs_inode *ni,
166                                         const loff_t valid,
167                                         const loff_t new_valid)
168 {
169         struct inode *inode = &ni->vfs_inode;
170         struct address_space *mapping = inode->i_mapping;
171         struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
172         loff_t pos = valid;
173         int err;
174 
175         if (is_resident(ni)) {
176                 ni->i_valid = new_valid;
177                 return 0;
178         }
179 
180         WARN_ON(is_compressed(ni));
181         WARN_ON(valid >= new_valid);
182 
183         for (;;) {
184                 u32 zerofrom, len;
185                 struct page *page;
186                 u8 bits;
187                 CLST vcn, lcn, clen;
188 
189                 if (is_sparsed(ni)) {
190                         bits = sbi->cluster_bits;
191                         vcn = pos >> bits;
192 
193                         err = attr_data_get_block(ni, vcn, 1, &lcn, &clen, NULL,
194                                                   false);
195                         if (err)
196                                 goto out;
197 
198                         if (lcn == SPARSE_LCN) {
199                                 pos = ((loff_t)clen + vcn) << bits;
200                                 ni->i_valid = pos;
201                                 goto next;
202                         }
203                 }
204 
205                 zerofrom = pos & (PAGE_SIZE - 1);
206                 len = PAGE_SIZE - zerofrom;
207 
208                 if (pos + len > new_valid)
209                         len = new_valid - pos;
210 
211                 err = ntfs_write_begin(file, mapping, pos, len, &page, NULL);
212                 if (err)
213                         goto out;
214 
215                 zero_user_segment(page, zerofrom, PAGE_SIZE);
216 
217                 /* This function in any case puts page. */
218                 err = ntfs_write_end(file, mapping, pos, len, len, page, NULL);
219                 if (err < 0)
220                         goto out;
221                 pos += len;
222 
223 next:
224                 if (pos >= new_valid)
225                         break;
226 
227                 balance_dirty_pages_ratelimited(mapping);
228                 cond_resched();
229         }
230 
231         return 0;
232 
233 out:
234         ni->i_valid = valid;
235         ntfs_inode_warn(inode, "failed to extend initialized size to %llx.",
236                         new_valid);
237         return err;
238 }
239 
240 /*
241  * ntfs_zero_range - Helper function for punch_hole.
242  *
243  * It zeroes a range [vbo, vbo_to).
244  */
245 static int ntfs_zero_range(struct inode *inode, u64 vbo, u64 vbo_to)
246 {
247         int err = 0;
248         struct address_space *mapping = inode->i_mapping;
249         u32 blocksize = i_blocksize(inode);
250         pgoff_t idx = vbo >> PAGE_SHIFT;
251         u32 from = vbo & (PAGE_SIZE - 1);
252         pgoff_t idx_end = (vbo_to + PAGE_SIZE - 1) >> PAGE_SHIFT;
253         loff_t page_off;
254         struct buffer_head *head, *bh;
255         u32 bh_next, bh_off, to;
256         sector_t iblock;
257         struct folio *folio;
258         bool dirty = false;
259 
260         for (; idx < idx_end; idx += 1, from = 0) {
261                 page_off = (loff_t)idx << PAGE_SHIFT;
262                 to = (page_off + PAGE_SIZE) > vbo_to ? (vbo_to - page_off) :
263                                                        PAGE_SIZE;
264                 iblock = page_off >> inode->i_blkbits;
265 
266                 folio = __filemap_get_folio(
267                         mapping, idx, FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
268                         mapping_gfp_constraint(mapping, ~__GFP_FS));
269                 if (IS_ERR(folio))
270                         return PTR_ERR(folio);
271 
272                 head = folio_buffers(folio);
273                 if (!head)
274                         head = create_empty_buffers(folio, blocksize, 0);
275 
276                 bh = head;
277                 bh_off = 0;
278                 do {
279                         bh_next = bh_off + blocksize;
280 
281                         if (bh_next <= from || bh_off >= to)
282                                 continue;
283 
284                         if (!buffer_mapped(bh)) {
285                                 ntfs_get_block(inode, iblock, bh, 0);
286                                 /* Unmapped? It's a hole - nothing to do. */
287                                 if (!buffer_mapped(bh))
288                                         continue;
289                         }
290 
291                         /* Ok, it's mapped. Make sure it's up-to-date. */
292                         if (folio_test_uptodate(folio))
293                                 set_buffer_uptodate(bh);
294                         else if (bh_read(bh, 0) < 0) {
295                                 err = -EIO;
296                                 folio_unlock(folio);
297                                 folio_put(folio);
298                                 goto out;
299                         }
300 
301                         mark_buffer_dirty(bh);
302                 } while (bh_off = bh_next, iblock += 1,
303                          head != (bh = bh->b_this_page));
304 
305                 folio_zero_segment(folio, from, to);
306                 dirty = true;
307 
308                 folio_unlock(folio);
309                 folio_put(folio);
310                 cond_resched();
311         }
312 out:
313         if (dirty)
314                 mark_inode_dirty(inode);
315         return err;
316 }
317 
318 /*
319  * ntfs_file_mmap - file_operations::mmap
320  */
321 static int ntfs_file_mmap(struct file *file, struct vm_area_struct *vma)
322 {
323         struct inode *inode = file_inode(file);
324         struct ntfs_inode *ni = ntfs_i(inode);
325         u64 from = ((u64)vma->vm_pgoff << PAGE_SHIFT);
326         bool rw = vma->vm_flags & VM_WRITE;
327         int err;
328 
329         if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
330                 return -EIO;
331 
332         if (is_encrypted(ni)) {
333                 ntfs_inode_warn(inode, "mmap encrypted not supported");
334                 return -EOPNOTSUPP;
335         }
336 
337         if (is_dedup(ni)) {
338                 ntfs_inode_warn(inode, "mmap deduplicated not supported");
339                 return -EOPNOTSUPP;
340         }
341 
342         if (is_compressed(ni) && rw) {
343                 ntfs_inode_warn(inode, "mmap(write) compressed not supported");
344                 return -EOPNOTSUPP;
345         }
346 
347         if (rw) {
348                 u64 to = min_t(loff_t, i_size_read(inode),
349                                from + vma->vm_end - vma->vm_start);
350 
351                 if (is_sparsed(ni)) {
352                         /* Allocate clusters for rw map. */
353                         struct ntfs_sb_info *sbi = inode->i_sb->s_fs_info;
354                         CLST lcn, len;
355                         CLST vcn = from >> sbi->cluster_bits;
356                         CLST end = bytes_to_cluster(sbi, to);
357                         bool new;
358 
359                         for (; vcn < end; vcn += len) {
360                                 err = attr_data_get_block(ni, vcn, 1, &lcn,
361                                                           &len, &new, true);
362                                 if (err)
363                                         goto out;
364                         }
365                 }
366 
367                 if (ni->i_valid < to) {
368                         inode_lock(inode);
369                         err = ntfs_extend_initialized_size(file, ni,
370                                                            ni->i_valid, to);
371                         inode_unlock(inode);
372                         if (err)
373                                 goto out;
374                 }
375         }
376 
377         err = generic_file_mmap(file, vma);
378 out:
379         return err;
380 }
381 
382 static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
383                        struct file *file)
384 {
385         struct ntfs_inode *ni = ntfs_i(inode);
386         struct address_space *mapping = inode->i_mapping;
387         loff_t end = pos + count;
388         bool extend_init = file && pos > ni->i_valid;
389         int err;
390 
391         if (end <= inode->i_size && !extend_init)
392                 return 0;
393 
394         /* Mark rw ntfs as dirty. It will be cleared at umount. */
395         ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_DIRTY);
396 
397         if (end > inode->i_size) {
398                 err = ntfs_set_size(inode, end);
399                 if (err)
400                         goto out;
401         }
402 
403         if (extend_init && !is_compressed(ni)) {
404                 err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
405                 if (err)
406                         goto out;
407         } else {
408                 err = 0;
409         }
410 
411         if (file && is_sparsed(ni)) {
412                 /*
413                  * This code optimizes large writes to sparse file.
414                  * TODO: merge this fragment with fallocate fragment.
415                  */
416                 struct ntfs_sb_info *sbi = ni->mi.sbi;
417                 CLST vcn = pos >> sbi->cluster_bits;
418                 CLST cend = bytes_to_cluster(sbi, end);
419                 CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
420                 CLST lcn, clen;
421                 bool new;
422 
423                 if (cend_v > cend)
424                         cend_v = cend;
425 
426                 /*
427                  * Allocate and zero new clusters.
428                  * Zeroing these clusters may be too long.
429                  */
430                 for (; vcn < cend_v; vcn += clen) {
431                         err = attr_data_get_block(ni, vcn, cend_v - vcn, &lcn,
432                                                   &clen, &new, true);
433                         if (err)
434                                 goto out;
435                 }
436                 /*
437                  * Allocate but not zero new clusters.
438                  */
439                 for (; vcn < cend; vcn += clen) {
440                         err = attr_data_get_block(ni, vcn, cend - vcn, &lcn,
441                                                   &clen, &new, false);
442                         if (err)
443                                 goto out;
444                 }
445         }
446 
447         inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
448         mark_inode_dirty(inode);
449 
450         if (IS_SYNC(inode)) {
451                 int err2;
452 
453                 err = filemap_fdatawrite_range(mapping, pos, end - 1);
454                 err2 = sync_mapping_buffers(mapping);
455                 if (!err)
456                         err = err2;
457                 err2 = write_inode_now(inode, 1);
458                 if (!err)
459                         err = err2;
460                 if (!err)
461                         err = filemap_fdatawait_range(mapping, pos, end - 1);
462         }
463 
464 out:
465         return err;
466 }
467 
468 static int ntfs_truncate(struct inode *inode, loff_t new_size)
469 {
470         struct super_block *sb = inode->i_sb;
471         struct ntfs_inode *ni = ntfs_i(inode);
472         int err, dirty = 0;
473         u64 new_valid;
474 
475         if (!S_ISREG(inode->i_mode))
476                 return 0;
477 
478         if (is_compressed(ni)) {
479                 if (ni->i_valid > new_size)
480                         ni->i_valid = new_size;
481         } else {
482                 err = block_truncate_page(inode->i_mapping, new_size,
483                                           ntfs_get_block);
484                 if (err)
485                         return err;
486         }
487 
488         new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
489 
490         truncate_setsize(inode, new_size);
491 
492         ni_lock(ni);
493 
494         down_write(&ni->file.run_lock);
495         err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
496                             &new_valid, ni->mi.sbi->options->prealloc, NULL);
497         up_write(&ni->file.run_lock);
498 
499         if (new_valid < ni->i_valid)
500                 ni->i_valid = new_valid;
501 
502         ni_unlock(ni);
503 
504         ni->std_fa |= FILE_ATTRIBUTE_ARCHIVE;
505         inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
506         if (!IS_DIRSYNC(inode)) {
507                 dirty = 1;
508         } else {
509                 err = ntfs_sync_inode(inode);
510                 if (err)
511                         return err;
512         }
513 
514         if (dirty)
515                 mark_inode_dirty(inode);
516 
517         /*ntfs_flush_inodes(inode->i_sb, inode, NULL);*/
518 
519         return 0;
520 }
521 
522 /*
523  * ntfs_fallocate - file_operations::ntfs_fallocate
524  *
525  * Preallocate space for a file. This implements ntfs's fallocate file
526  * operation, which gets called from sys_fallocate system call. User
527  * space requests 'len' bytes at 'vbo'. If FALLOC_FL_KEEP_SIZE is set
528  * we just allocate clusters without zeroing them out. Otherwise we
529  * allocate and zero out clusters via an expanding truncate.
530  */
531 static long ntfs_fallocate(struct file *file, int mode, loff_t vbo, loff_t len)
532 {
533         struct inode *inode = file_inode(file);
534         struct address_space *mapping = inode->i_mapping;
535         struct super_block *sb = inode->i_sb;
536         struct ntfs_sb_info *sbi = sb->s_fs_info;
537         struct ntfs_inode *ni = ntfs_i(inode);
538         loff_t end = vbo + len;
539         loff_t vbo_down = round_down(vbo, max_t(unsigned long,
540                                                 sbi->cluster_size, PAGE_SIZE));
541         bool is_supported_holes = is_sparsed(ni) || is_compressed(ni);
542         loff_t i_size, new_size;
543         bool map_locked;
544         int err;
545 
546         /* No support for dir. */
547         if (!S_ISREG(inode->i_mode))
548                 return -EOPNOTSUPP;
549 
550         /*
551          * vfs_fallocate checks all possible combinations of mode.
552          * Do additional checks here before ntfs_set_state(dirty).
553          */
554         if (mode & FALLOC_FL_PUNCH_HOLE) {
555                 if (!is_supported_holes)
556                         return -EOPNOTSUPP;
557         } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
558         } else if (mode & FALLOC_FL_INSERT_RANGE) {
559                 if (!is_supported_holes)
560                         return -EOPNOTSUPP;
561         } else if (mode &
562                    ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
563                      FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)) {
564                 ntfs_inode_warn(inode, "fallocate(0x%x) is not supported",
565                                 mode);
566                 return -EOPNOTSUPP;
567         }
568 
569         ntfs_set_state(sbi, NTFS_DIRTY_DIRTY);
570 
571         inode_lock(inode);
572         i_size = inode->i_size;
573         new_size = max(end, i_size);
574         map_locked = false;
575 
576         if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
577                 /* Should never be here, see ntfs_file_open. */
578                 err = -EOPNOTSUPP;
579                 goto out;
580         }
581 
582         if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
583                     FALLOC_FL_INSERT_RANGE)) {
584                 inode_dio_wait(inode);
585                 filemap_invalidate_lock(mapping);
586                 map_locked = true;
587         }
588 
589         if (mode & FALLOC_FL_PUNCH_HOLE) {
590                 u32 frame_size;
591                 loff_t mask, vbo_a, end_a, tmp;
592 
593                 err = filemap_write_and_wait_range(mapping, vbo_down,
594                                                    LLONG_MAX);
595                 if (err)
596                         goto out;
597 
598                 truncate_pagecache(inode, vbo_down);
599 
600                 ni_lock(ni);
601                 err = attr_punch_hole(ni, vbo, len, &frame_size);
602                 ni_unlock(ni);
603                 if (!err)
604                         goto ok;
605 
606                 if (err != E_NTFS_NOTALIGNED)
607                         goto out;
608 
609                 /* Process not aligned punch. */
610                 err = 0;
611                 mask = frame_size - 1;
612                 vbo_a = (vbo + mask) & ~mask;
613                 end_a = end & ~mask;
614 
615                 tmp = min(vbo_a, end);
616                 if (tmp > vbo) {
617                         err = ntfs_zero_range(inode, vbo, tmp);
618                         if (err)
619                                 goto out;
620                 }
621 
622                 if (vbo < end_a && end_a < end) {
623                         err = ntfs_zero_range(inode, end_a, end);
624                         if (err)
625                                 goto out;
626                 }
627 
628                 /* Aligned punch_hole */
629                 if (end_a > vbo_a) {
630                         ni_lock(ni);
631                         err = attr_punch_hole(ni, vbo_a, end_a - vbo_a, NULL);
632                         ni_unlock(ni);
633                         if (err)
634                                 goto out;
635                 }
636         } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
637                 /*
638                  * Write tail of the last page before removed range since
639                  * it will get removed from the page cache below.
640                  */
641                 err = filemap_write_and_wait_range(mapping, vbo_down, vbo);
642                 if (err)
643                         goto out;
644 
645                 /*
646                  * Write data that will be shifted to preserve them
647                  * when discarding page cache below.
648                  */
649                 err = filemap_write_and_wait_range(mapping, end, LLONG_MAX);
650                 if (err)
651                         goto out;
652 
653                 truncate_pagecache(inode, vbo_down);
654 
655                 ni_lock(ni);
656                 err = attr_collapse_range(ni, vbo, len);
657                 ni_unlock(ni);
658                 if (err)
659                         goto out;
660         } else if (mode & FALLOC_FL_INSERT_RANGE) {
661                 /* Check new size. */
662                 err = inode_newsize_ok(inode, new_size);
663                 if (err)
664                         goto out;
665 
666                 /* Write out all dirty pages. */
667                 err = filemap_write_and_wait_range(mapping, vbo_down,
668                                                    LLONG_MAX);
669                 if (err)
670                         goto out;
671                 truncate_pagecache(inode, vbo_down);
672 
673                 ni_lock(ni);
674                 err = attr_insert_range(ni, vbo, len);
675                 ni_unlock(ni);
676                 if (err)
677                         goto out;
678         } else {
679                 /* Check new size. */
680                 u8 cluster_bits = sbi->cluster_bits;
681 
682                 /* Be sure file is non resident. */
683                 if (is_resident(ni)) {
684                         ni_lock(ni);
685                         err = attr_force_nonresident(ni);
686                         ni_unlock(ni);
687                         if (err)
688                                 goto out;
689                 }
690 
691                 /* generic/213: expected -ENOSPC instead of -EFBIG. */
692                 if (!is_supported_holes) {
693                         loff_t to_alloc = new_size - inode_get_bytes(inode);
694 
695                         if (to_alloc > 0 &&
696                             (to_alloc >> cluster_bits) >
697                                     wnd_zeroes(&sbi->used.bitmap)) {
698                                 err = -ENOSPC;
699                                 goto out;
700                         }
701                 }
702 
703                 err = inode_newsize_ok(inode, new_size);
704                 if (err)
705                         goto out;
706 
707                 if (new_size > i_size) {
708                         /*
709                          * Allocate clusters, do not change 'valid' size.
710                          */
711                         err = ntfs_set_size(inode, new_size);
712                         if (err)
713                                 goto out;
714                 }
715 
716                 if (is_supported_holes) {
717                         CLST vcn = vbo >> cluster_bits;
718                         CLST cend = bytes_to_cluster(sbi, end);
719                         CLST cend_v = bytes_to_cluster(sbi, ni->i_valid);
720                         CLST lcn, clen;
721                         bool new;
722 
723                         if (cend_v > cend)
724                                 cend_v = cend;
725 
726                         /*
727                          * Allocate and zero new clusters.
728                          * Zeroing these clusters may be too long.
729                          */
730                         for (; vcn < cend_v; vcn += clen) {
731                                 err = attr_data_get_block(ni, vcn, cend_v - vcn,
732                                                           &lcn, &clen, &new,
733                                                           true);
734                                 if (err)
735                                         goto out;
736                         }
737                         /*
738                          * Allocate but not zero new clusters.
739                          */
740                         for (; vcn < cend; vcn += clen) {
741                                 err = attr_data_get_block(ni, vcn, cend - vcn,
742                                                           &lcn, &clen, &new,
743                                                           false);
744                                 if (err)
745                                         goto out;
746                         }
747                 }
748 
749                 if (mode & FALLOC_FL_KEEP_SIZE) {
750                         ni_lock(ni);
751                         /* True - Keep preallocated. */
752                         err = attr_set_size(ni, ATTR_DATA, NULL, 0,
753                                             &ni->file.run, i_size, &ni->i_valid,
754                                             true, NULL);
755                         ni_unlock(ni);
756                         if (err)
757                                 goto out;
758                 } else if (new_size > i_size) {
759                         i_size_write(inode, new_size);
760                 }
761         }
762 
763 ok:
764         err = file_modified(file);
765         if (err)
766                 goto out;
767 
768 out:
769         if (map_locked)
770                 filemap_invalidate_unlock(mapping);
771 
772         if (!err) {
773                 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
774                 mark_inode_dirty(inode);
775         }
776 
777         inode_unlock(inode);
778         return err;
779 }
780 
781 /*
782  * ntfs3_setattr - inode_operations::setattr
783  */
784 int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
785                   struct iattr *attr)
786 {
787         struct inode *inode = d_inode(dentry);
788         struct ntfs_inode *ni = ntfs_i(inode);
789         u32 ia_valid = attr->ia_valid;
790         umode_t mode = inode->i_mode;
791         int err;
792 
793         if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
794                 return -EIO;
795 
796         err = setattr_prepare(idmap, dentry, attr);
797         if (err)
798                 goto out;
799 
800         if (ia_valid & ATTR_SIZE) {
801                 loff_t newsize, oldsize;
802 
803                 if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
804                         /* Should never be here, see ntfs_file_open(). */
805                         err = -EOPNOTSUPP;
806                         goto out;
807                 }
808                 inode_dio_wait(inode);
809                 oldsize = i_size_read(inode);
810                 newsize = attr->ia_size;
811 
812                 if (newsize <= oldsize)
813                         err = ntfs_truncate(inode, newsize);
814                 else
815                         err = ntfs_extend(inode, newsize, 0, NULL);
816 
817                 if (err)
818                         goto out;
819 
820                 ni->ni_flags |= NI_FLAG_UPDATE_PARENT;
821                 i_size_write(inode, newsize);
822         }
823 
824         setattr_copy(idmap, inode, attr);
825 
826         if (mode != inode->i_mode) {
827                 err = ntfs_acl_chmod(idmap, dentry);
828                 if (err)
829                         goto out;
830 
831                 /* Linux 'w' -> Windows 'ro'. */
832                 if (0222 & inode->i_mode)
833                         ni->std_fa &= ~FILE_ATTRIBUTE_READONLY;
834                 else
835                         ni->std_fa |= FILE_ATTRIBUTE_READONLY;
836         }
837 
838         if (ia_valid & (ATTR_UID | ATTR_GID | ATTR_MODE))
839                 ntfs_save_wsl_perm(inode, NULL);
840         mark_inode_dirty(inode);
841 out:
842         return err;
843 }
844 
845 static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
846 {
847         struct file *file = iocb->ki_filp;
848         struct inode *inode = file_inode(file);
849         struct ntfs_inode *ni = ntfs_i(inode);
850 
851         if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
852                 return -EIO;
853 
854         if (is_encrypted(ni)) {
855                 ntfs_inode_warn(inode, "encrypted i/o not supported");
856                 return -EOPNOTSUPP;
857         }
858 
859         if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
860                 ntfs_inode_warn(inode, "direct i/o + compressed not supported");
861                 return -EOPNOTSUPP;
862         }
863 
864 #ifndef CONFIG_NTFS3_LZX_XPRESS
865         if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
866                 ntfs_inode_warn(
867                         inode,
868                         "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
869                 return -EOPNOTSUPP;
870         }
871 #endif
872 
873         if (is_dedup(ni)) {
874                 ntfs_inode_warn(inode, "read deduplicated not supported");
875                 return -EOPNOTSUPP;
876         }
877 
878         return generic_file_read_iter(iocb, iter);
879 }
880 
881 static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
882                                      struct pipe_inode_info *pipe, size_t len,
883                                      unsigned int flags)
884 {
885         struct inode *inode = file_inode(in);
886         struct ntfs_inode *ni = ntfs_i(inode);
887 
888         if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
889                 return -EIO;
890 
891         if (is_encrypted(ni)) {
892                 ntfs_inode_warn(inode, "encrypted i/o not supported");
893                 return -EOPNOTSUPP;
894         }
895 
896 #ifndef CONFIG_NTFS3_LZX_XPRESS
897         if (ni->ni_flags & NI_FLAG_COMPRESSED_MASK) {
898                 ntfs_inode_warn(
899                         inode,
900                         "activate CONFIG_NTFS3_LZX_XPRESS to read external compressed files");
901                 return -EOPNOTSUPP;
902         }
903 #endif
904 
905         if (is_dedup(ni)) {
906                 ntfs_inode_warn(inode, "read deduplicated not supported");
907                 return -EOPNOTSUPP;
908         }
909 
910         return filemap_splice_read(in, ppos, pipe, len, flags);
911 }
912 
913 /*
914  * ntfs_get_frame_pages
915  *
916  * Return: Array of locked pages.
917  */
918 static int ntfs_get_frame_pages(struct address_space *mapping, pgoff_t index,
919                                 struct page **pages, u32 pages_per_frame,
920                                 bool *frame_uptodate)
921 {
922         gfp_t gfp_mask = mapping_gfp_mask(mapping);
923         u32 npages;
924 
925         *frame_uptodate = true;
926 
927         for (npages = 0; npages < pages_per_frame; npages++, index++) {
928                 struct folio *folio;
929 
930                 folio = __filemap_get_folio(mapping, index,
931                                             FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
932                                             gfp_mask);
933                 if (IS_ERR(folio)) {
934                         while (npages--) {
935                                 folio = page_folio(pages[npages]);
936                                 folio_unlock(folio);
937                                 folio_put(folio);
938                         }
939 
940                         return -ENOMEM;
941                 }
942 
943                 if (!folio_test_uptodate(folio))
944                         *frame_uptodate = false;
945 
946                 pages[npages] = &folio->page;
947         }
948 
949         return 0;
950 }
951 
952 /*
953  * ntfs_compress_write - Helper for ntfs_file_write_iter() (compressed files).
954  */
955 static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from)
956 {
957         int err;
958         struct file *file = iocb->ki_filp;
959         size_t count = iov_iter_count(from);
960         loff_t pos = iocb->ki_pos;
961         struct inode *inode = file_inode(file);
962         loff_t i_size = i_size_read(inode);
963         struct address_space *mapping = inode->i_mapping;
964         struct ntfs_inode *ni = ntfs_i(inode);
965         u64 valid = ni->i_valid;
966         struct ntfs_sb_info *sbi = ni->mi.sbi;
967         struct page *page, **pages = NULL;
968         size_t written = 0;
969         u8 frame_bits = NTFS_LZNT_CUNIT + sbi->cluster_bits;
970         u32 frame_size = 1u << frame_bits;
971         u32 pages_per_frame = frame_size >> PAGE_SHIFT;
972         u32 ip, off;
973         CLST frame;
974         u64 frame_vbo;
975         pgoff_t index;
976         bool frame_uptodate;
977 
978         if (frame_size < PAGE_SIZE) {
979                 /*
980                  * frame_size == 8K if cluster 512
981                  * frame_size == 64K if cluster 4096
982                  */
983                 ntfs_inode_warn(inode, "page size is bigger than frame size");
984                 return -EOPNOTSUPP;
985         }
986 
987         pages = kmalloc_array(pages_per_frame, sizeof(struct page *), GFP_NOFS);
988         if (!pages)
989                 return -ENOMEM;
990 
991         err = file_remove_privs(file);
992         if (err)
993                 goto out;
994 
995         err = file_update_time(file);
996         if (err)
997                 goto out;
998 
999         /* Zero range [valid : pos). */
1000         while (valid < pos) {
1001                 CLST lcn, clen;
1002 
1003                 frame = valid >> frame_bits;
1004                 frame_vbo = valid & ~(frame_size - 1);
1005                 off = valid & (frame_size - 1);
1006 
1007                 err = attr_data_get_block(ni, frame << NTFS_LZNT_CUNIT, 1, &lcn,
1008                                           &clen, NULL, false);
1009                 if (err)
1010                         goto out;
1011 
1012                 if (lcn == SPARSE_LCN) {
1013                         ni->i_valid = valid =
1014                                 frame_vbo + ((u64)clen << sbi->cluster_bits);
1015                         continue;
1016                 }
1017 
1018                 /* Load full frame. */
1019                 err = ntfs_get_frame_pages(mapping, frame_vbo >> PAGE_SHIFT,
1020                                            pages, pages_per_frame,
1021                                            &frame_uptodate);
1022                 if (err)
1023                         goto out;
1024 
1025                 if (!frame_uptodate && off) {
1026                         err = ni_read_frame(ni, frame_vbo, pages,
1027                                             pages_per_frame);
1028                         if (err) {
1029                                 for (ip = 0; ip < pages_per_frame; ip++) {
1030                                         page = pages[ip];
1031                                         unlock_page(page);
1032                                         put_page(page);
1033                                 }
1034                                 goto out;
1035                         }
1036                 }
1037 
1038                 ip = off >> PAGE_SHIFT;
1039                 off = offset_in_page(valid);
1040                 for (; ip < pages_per_frame; ip++, off = 0) {
1041                         page = pages[ip];
1042                         zero_user_segment(page, off, PAGE_SIZE);
1043                         flush_dcache_page(page);
1044                         SetPageUptodate(page);
1045                 }
1046 
1047                 ni_lock(ni);
1048                 err = ni_write_frame(ni, pages, pages_per_frame);
1049                 ni_unlock(ni);
1050 
1051                 for (ip = 0; ip < pages_per_frame; ip++) {
1052                         page = pages[ip];
1053                         SetPageUptodate(page);
1054                         unlock_page(page);
1055                         put_page(page);
1056                 }
1057 
1058                 if (err)
1059                         goto out;
1060 
1061                 ni->i_valid = valid = frame_vbo + frame_size;
1062         }
1063 
1064         /* Copy user data [pos : pos + count). */
1065         while (count) {
1066                 size_t copied, bytes;
1067 
1068                 off = pos & (frame_size - 1);
1069                 bytes = frame_size - off;
1070                 if (bytes > count)
1071                         bytes = count;
1072 
1073                 frame_vbo = pos & ~(frame_size - 1);
1074                 index = frame_vbo >> PAGE_SHIFT;
1075 
1076                 if (unlikely(fault_in_iov_iter_readable(from, bytes))) {
1077                         err = -EFAULT;
1078                         goto out;
1079                 }
1080 
1081                 /* Load full frame. */
1082                 err = ntfs_get_frame_pages(mapping, index, pages,
1083                                            pages_per_frame, &frame_uptodate);
1084                 if (err)
1085                         goto out;
1086 
1087                 if (!frame_uptodate) {
1088                         loff_t to = pos + bytes;
1089 
1090                         if (off || (to < i_size && (to & (frame_size - 1)))) {
1091                                 err = ni_read_frame(ni, frame_vbo, pages,
1092                                                     pages_per_frame);
1093                                 if (err) {
1094                                         for (ip = 0; ip < pages_per_frame;
1095                                              ip++) {
1096                                                 page = pages[ip];
1097                                                 unlock_page(page);
1098                                                 put_page(page);
1099                                         }
1100                                         goto out;
1101                                 }
1102                         }
1103                 }
1104 
1105                 WARN_ON(!bytes);
1106                 copied = 0;
1107                 ip = off >> PAGE_SHIFT;
1108                 off = offset_in_page(pos);
1109 
1110                 /* Copy user data to pages. */
1111                 for (;;) {
1112                         size_t cp, tail = PAGE_SIZE - off;
1113 
1114                         page = pages[ip];
1115                         cp = copy_page_from_iter_atomic(page, off,
1116                                                         min(tail, bytes), from);
1117                         flush_dcache_page(page);
1118 
1119                         copied += cp;
1120                         bytes -= cp;
1121                         if (!bytes || !cp)
1122                                 break;
1123 
1124                         if (cp < tail) {
1125                                 off += cp;
1126                         } else {
1127                                 ip++;
1128                                 off = 0;
1129                         }
1130                 }
1131 
1132                 ni_lock(ni);
1133                 err = ni_write_frame(ni, pages, pages_per_frame);
1134                 ni_unlock(ni);
1135 
1136                 for (ip = 0; ip < pages_per_frame; ip++) {
1137                         page = pages[ip];
1138                         ClearPageDirty(page);
1139                         SetPageUptodate(page);
1140                         unlock_page(page);
1141                         put_page(page);
1142                 }
1143 
1144                 if (err)
1145                         goto out;
1146 
1147                 /*
1148                  * We can loop for a long time in here. Be nice and allow
1149                  * us to schedule out to avoid softlocking if preempt
1150                  * is disabled.
1151                  */
1152                 cond_resched();
1153 
1154                 pos += copied;
1155                 written += copied;
1156 
1157                 count = iov_iter_count(from);
1158         }
1159 
1160 out:
1161         kfree(pages);
1162 
1163         if (err < 0)
1164                 return err;
1165 
1166         iocb->ki_pos += written;
1167         if (iocb->ki_pos > ni->i_valid)
1168                 ni->i_valid = iocb->ki_pos;
1169         if (iocb->ki_pos > i_size)
1170                 i_size_write(inode, iocb->ki_pos);
1171 
1172         return written;
1173 }
1174 
1175 /*
1176  * ntfs_file_write_iter - file_operations::write_iter
1177  */
1178 static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1179 {
1180         struct file *file = iocb->ki_filp;
1181         struct inode *inode = file_inode(file);
1182         ssize_t ret;
1183         int err;
1184         struct ntfs_inode *ni = ntfs_i(inode);
1185 
1186         if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1187                 return -EIO;
1188 
1189         if (is_encrypted(ni)) {
1190                 ntfs_inode_warn(inode, "encrypted i/o not supported");
1191                 return -EOPNOTSUPP;
1192         }
1193 
1194         if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
1195                 ntfs_inode_warn(inode, "direct i/o + compressed not supported");
1196                 return -EOPNOTSUPP;
1197         }
1198 
1199         if (is_dedup(ni)) {
1200                 ntfs_inode_warn(inode, "write into deduplicated not supported");
1201                 return -EOPNOTSUPP;
1202         }
1203 
1204         if (!inode_trylock(inode)) {
1205                 if (iocb->ki_flags & IOCB_NOWAIT)
1206                         return -EAGAIN;
1207                 inode_lock(inode);
1208         }
1209 
1210         ret = generic_write_checks(iocb, from);
1211         if (ret <= 0)
1212                 goto out;
1213 
1214         err = file_modified(iocb->ki_filp);
1215         if (err) {
1216                 ret = err;
1217                 goto out;
1218         }
1219 
1220         if (WARN_ON(ni->ni_flags & NI_FLAG_COMPRESSED_MASK)) {
1221                 /* Should never be here, see ntfs_file_open(). */
1222                 ret = -EOPNOTSUPP;
1223                 goto out;
1224         }
1225 
1226         ret = ntfs_extend(inode, iocb->ki_pos, ret, file);
1227         if (ret)
1228                 goto out;
1229 
1230         ret = is_compressed(ni) ? ntfs_compress_write(iocb, from) :
1231                                   __generic_file_write_iter(iocb, from);
1232 
1233 out:
1234         inode_unlock(inode);
1235 
1236         if (ret > 0)
1237                 ret = generic_write_sync(iocb, ret);
1238 
1239         return ret;
1240 }
1241 
1242 /*
1243  * ntfs_file_open - file_operations::open
1244  */
1245 int ntfs_file_open(struct inode *inode, struct file *file)
1246 {
1247         struct ntfs_inode *ni = ntfs_i(inode);
1248 
1249         if (unlikely(ntfs3_forced_shutdown(inode->i_sb)))
1250                 return -EIO;
1251 
1252         if (unlikely((is_compressed(ni) || is_encrypted(ni)) &&
1253                      (file->f_flags & O_DIRECT))) {
1254                 return -EOPNOTSUPP;
1255         }
1256 
1257         /* Decompress "external compressed" file if opened for rw. */
1258         if ((ni->ni_flags & NI_FLAG_COMPRESSED_MASK) &&
1259             (file->f_flags & (O_WRONLY | O_RDWR | O_TRUNC))) {
1260 #ifdef CONFIG_NTFS3_LZX_XPRESS
1261                 int err = ni_decompress_file(ni);
1262 
1263                 if (err)
1264                         return err;
1265 #else
1266                 ntfs_inode_warn(
1267                         inode,
1268                         "activate CONFIG_NTFS3_LZX_XPRESS to write external compressed files");
1269                 return -EOPNOTSUPP;
1270 #endif
1271         }
1272 
1273         return generic_file_open(inode, file);
1274 }
1275 
1276 /*
1277  * ntfs_file_release - file_operations::release
1278  */
1279 static int ntfs_file_release(struct inode *inode, struct file *file)
1280 {
1281         struct ntfs_inode *ni = ntfs_i(inode);
1282         struct ntfs_sb_info *sbi = ni->mi.sbi;
1283         int err = 0;
1284 
1285         /* If we are last writer on the inode, drop the block reservation. */
1286         if (sbi->options->prealloc &&
1287             ((file->f_mode & FMODE_WRITE) &&
1288              atomic_read(&inode->i_writecount) == 1)) {
1289                 ni_lock(ni);
1290                 down_write(&ni->file.run_lock);
1291 
1292                 err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run,
1293                                     i_size_read(inode), &ni->i_valid, false,
1294                                     NULL);
1295 
1296                 up_write(&ni->file.run_lock);
1297                 ni_unlock(ni);
1298         }
1299         return err;
1300 }
1301 
1302 /*
1303  * ntfs_fiemap - inode_operations::fiemap
1304  */
1305 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1306                 __u64 start, __u64 len)
1307 {
1308         int err;
1309         struct ntfs_inode *ni = ntfs_i(inode);
1310 
1311         err = fiemap_prep(inode, fieinfo, start, &len, ~FIEMAP_FLAG_XATTR);
1312         if (err)
1313                 return err;
1314 
1315         ni_lock(ni);
1316 
1317         err = ni_fiemap(ni, fieinfo, start, len);
1318 
1319         ni_unlock(ni);
1320 
1321         return err;
1322 }
1323 
1324 // clang-format off
1325 const struct inode_operations ntfs_file_inode_operations = {
1326         .getattr        = ntfs_getattr,
1327         .setattr        = ntfs3_setattr,
1328         .listxattr      = ntfs_listxattr,
1329         .get_acl        = ntfs_get_acl,
1330         .set_acl        = ntfs_set_acl,
1331         .fiemap         = ntfs_fiemap,
1332         .fileattr_get   = ntfs_fileattr_get,
1333         .fileattr_set   = ntfs_fileattr_set,
1334 };
1335 
1336 const struct file_operations ntfs_file_operations = {
1337         .llseek         = generic_file_llseek,
1338         .read_iter      = ntfs_file_read_iter,
1339         .write_iter     = ntfs_file_write_iter,
1340         .unlocked_ioctl = ntfs_ioctl,
1341 #ifdef CONFIG_COMPAT
1342         .compat_ioctl   = ntfs_compat_ioctl,
1343 #endif
1344         .splice_read    = ntfs_file_splice_read,
1345         .mmap           = ntfs_file_mmap,
1346         .open           = ntfs_file_open,
1347         .fsync          = generic_file_fsync,
1348         .splice_write   = iter_file_splice_write,
1349         .fallocate      = ntfs_fallocate,
1350         .release        = ntfs_file_release,
1351 };
1352 
1353 #if IS_ENABLED(CONFIG_NTFS_FS)
1354 const struct file_operations ntfs_legacy_file_operations = {
1355         .llseek         = generic_file_llseek,
1356         .read_iter      = ntfs_file_read_iter,
1357         .splice_read    = ntfs_file_splice_read,
1358         .open           = ntfs_file_open,
1359         .release        = ntfs_file_release,
1360 };
1361 #endif
1362 // clang-format on
1363 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php