1 // SPDX-License-Identifier: GPL-2.0 1 2 /* 3 * linux/fs/stat.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 7 8 #include <linux/blkdev.h> 9 #include <linux/export.h> 10 #include <linux/mm.h> 11 #include <linux/errno.h> 12 #include <linux/file.h> 13 #include <linux/highuid.h> 14 #include <linux/fs.h> 15 #include <linux/namei.h> 16 #include <linux/security.h> 17 #include <linux/cred.h> 18 #include <linux/syscalls.h> 19 #include <linux/pagemap.h> 20 #include <linux/compat.h> 21 #include <linux/iversion.h> 22 23 #include <linux/uaccess.h> 24 #include <asm/unistd.h> 25 26 #include "internal.h" 27 #include "mount.h" 28 29 /** 30 * generic_fillattr - Fill in the basic attrib 31 * @idmap: idmap of the mount the 32 * @request_mask: statx request_mask 33 * @inode: Inode to use as the so 34 * @stat: Where to fill in the a 35 * 36 * Fill in the basic attributes in the kstat s 37 * found on the VFS inode structure. This is 38 * operation is supplied. 39 * 40 * If the inode has been found through an idma 41 * the vfsmount must be passed through @idmap. 42 * take care to map the inode according to @id 43 * uid and gid filds. On non-idmapped mounts o 44 * performed on the raw inode simply pass @nop 45 */ 46 void generic_fillattr(struct mnt_idmap *idmap, 47 struct inode *inode, str 48 { 49 vfsuid_t vfsuid = i_uid_into_vfsuid(id 50 vfsgid_t vfsgid = i_gid_into_vfsgid(id 51 52 stat->dev = inode->i_sb->s_dev; 53 stat->ino = inode->i_ino; 54 stat->mode = inode->i_mode; 55 stat->nlink = inode->i_nlink; 56 stat->uid = vfsuid_into_kuid(vfsuid); 57 stat->gid = vfsgid_into_kgid(vfsgid); 58 stat->rdev = inode->i_rdev; 59 stat->size = i_size_read(inode); 60 stat->atime = inode_get_atime(inode); 61 stat->mtime = inode_get_mtime(inode); 62 stat->ctime = inode_get_ctime(inode); 63 stat->blksize = i_blocksize(inode); 64 stat->blocks = inode->i_blocks; 65 66 if ((request_mask & STATX_CHANGE_COOKI 67 stat->result_mask |= STATX_CHA 68 stat->change_cookie = inode_qu 69 } 70 71 } 72 EXPORT_SYMBOL(generic_fillattr); 73 74 /** 75 * generic_fill_statx_attr - Fill in the statx 76 * @inode: Inode to use as the source 77 * @stat: Where to fill in the attribute 78 * 79 * Fill in the STATX_ATTR_* flags in the kstat 80 * inode that are published on i_flags and enf 81 */ 82 void generic_fill_statx_attr(struct inode *ino 83 { 84 if (inode->i_flags & S_IMMUTABLE) 85 stat->attributes |= STATX_ATTR 86 if (inode->i_flags & S_APPEND) 87 stat->attributes |= STATX_ATTR 88 stat->attributes_mask |= KSTAT_ATTR_VF 89 } 90 EXPORT_SYMBOL(generic_fill_statx_attr); 91 92 /** 93 * generic_fill_statx_atomic_writes - Fill in 94 * @stat: Where to fill in the attribute 95 * @unit_min: Minimum supported atomic write 96 * @unit_max: Maximum supported atomic write 97 * 98 * Fill in the STATX{_ATTR}_WRITE_ATOMIC flags 99 * atomic write unit_min and unit_max values. 100 */ 101 void generic_fill_statx_atomic_writes(struct k 102 unsigned 103 unsigned 104 { 105 /* Confirm that the request type is kn 106 stat->result_mask |= STATX_WRITE_ATOMI 107 108 /* Confirm that the file attribute typ 109 stat->attributes_mask |= STATX_ATTR_WR 110 111 if (unit_min) { 112 stat->atomic_write_unit_min = 113 stat->atomic_write_unit_max = 114 /* Initially only allow 1x seg 115 stat->atomic_write_segments_ma 116 117 /* Confirm atomic writes are a 118 stat->attributes |= STATX_ATTR 119 } 120 } 121 EXPORT_SYMBOL_GPL(generic_fill_statx_atomic_wr 122 123 /** 124 * vfs_getattr_nosec - getattr without securit 125 * @path: file to get attributes from 126 * @stat: structure to return attributes in 127 * @request_mask: STATX_xxx flags indicating w 128 * @query_flags: Query mode (AT_STATX_SYNC_TYP 129 * 130 * Get attributes without calling security_ino 131 * 132 * Currently the only caller other than vfs_ge 133 * filehandle lookup code, which uses only the 134 * attributes to any user. Any other code pro 135 */ 136 int vfs_getattr_nosec(const struct path *path, 137 u32 request_mask, unsign 138 { 139 struct mnt_idmap *idmap; 140 struct inode *inode = d_backing_inode( 141 142 memset(stat, 0, sizeof(*stat)); 143 stat->result_mask |= STATX_BASIC_STATS 144 query_flags &= AT_STATX_SYNC_TYPE; 145 146 /* allow the fs to override these if i 147 /* SB_NOATIME means filesystem supplie 148 if (inode->i_sb->s_flags & SB_NOATIME) 149 stat->result_mask &= ~STATX_AT 150 151 /* 152 * Note: If you add another clause to 153 * update attributes_mask below. 154 */ 155 if (IS_AUTOMOUNT(inode)) 156 stat->attributes |= STATX_ATTR 157 158 if (IS_DAX(inode)) 159 stat->attributes |= STATX_ATTR 160 161 stat->attributes_mask |= (STATX_ATTR_A 162 STATX_ATTR_D 163 164 idmap = mnt_idmap(path->mnt); 165 if (inode->i_op->getattr) 166 return inode->i_op->getattr(id 167 re 168 qu 169 170 generic_fillattr(idmap, request_mask, 171 return 0; 172 } 173 EXPORT_SYMBOL(vfs_getattr_nosec); 174 175 /* 176 * vfs_getattr - Get the enhanced basic attrib 177 * @path: The file of interest 178 * @stat: Where to return the statistics 179 * @request_mask: STATX_xxx flags indicating w 180 * @query_flags: Query mode (AT_STATX_SYNC_TYP 181 * 182 * Ask the filesystem for a file's attributes. 183 * request_mask and query_flags to indicate wh 184 * 185 * If the file is remote, the filesystem can b 186 * from the backing store by passing AT_STATX_ 187 * suppress the update by passing AT_STATX_DON 188 * 189 * Bits must have been set in request_mask to 190 * caller wants retrieving. Any such attribut 191 * anyway, but the value may be approximate, a 192 * synchronised with the server. 193 * 194 * 0 will be returned on success, and a -ve er 195 */ 196 int vfs_getattr(const struct path *path, struc 197 u32 request_mask, unsigned int 198 { 199 int retval; 200 201 if (WARN_ON_ONCE(query_flags & AT_GETA 202 return -EPERM; 203 204 retval = security_inode_getattr(path); 205 if (retval) 206 return retval; 207 return vfs_getattr_nosec(path, stat, r 208 } 209 EXPORT_SYMBOL(vfs_getattr); 210 211 /** 212 * vfs_fstat - Get the basic attributes by fil 213 * @fd: The file descriptor referring to the f 214 * @stat: The result structure to fill in. 215 * 216 * This function is a wrapper around vfs_getat 217 * that it uses a file descriptor to determine 218 * 219 * 0 will be returned on success, and a -ve er 220 */ 221 int vfs_fstat(int fd, struct kstat *stat) 222 { 223 struct fd f; 224 int error; 225 226 f = fdget_raw(fd); 227 if (!f.file) 228 return -EBADF; 229 error = vfs_getattr(&f.file->f_path, s 230 fdput(f); 231 return error; 232 } 233 234 int getname_statx_lookup_flags(int flags) 235 { 236 int lookup_flags = 0; 237 238 if (!(flags & AT_SYMLINK_NOFOLLOW)) 239 lookup_flags |= LOOKUP_FOLLOW; 240 if (!(flags & AT_NO_AUTOMOUNT)) 241 lookup_flags |= LOOKUP_AUTOMOU 242 if (flags & AT_EMPTY_PATH) 243 lookup_flags |= LOOKUP_EMPTY; 244 245 return lookup_flags; 246 } 247 248 static int vfs_statx_path(struct path *path, i 249 u32 request_mask) 250 { 251 int error = vfs_getattr(path, stat, re 252 253 if (request_mask & STATX_MNT_ID_UNIQUE 254 stat->mnt_id = real_mount(path 255 stat->result_mask |= STATX_MNT 256 } else { 257 stat->mnt_id = real_mount(path 258 stat->result_mask |= STATX_MNT 259 } 260 261 if (path_mounted(path)) 262 stat->attributes |= STATX_ATTR 263 stat->attributes_mask |= STATX_ATTR_MO 264 265 /* 266 * If this is a block device inode, ov 267 * attributes with the block device sp 268 * obtained from the bdev backing inod 269 */ 270 if (S_ISBLK(stat->mode)) 271 bdev_statx(path, stat, request 272 273 return error; 274 } 275 276 static int vfs_statx_fd(int fd, int flags, str 277 u32 request_mask) 278 { 279 CLASS(fd_raw, f)(fd); 280 if (!f.file) 281 return -EBADF; 282 return vfs_statx_path(&f.file->f_path, 283 } 284 285 /** 286 * vfs_statx - Get basic and extra attributes 287 * @dfd: A file descriptor representing the ba 288 * @filename: The name of the file of interest 289 * @flags: Flags to control the query 290 * @stat: The result structure to fill in. 291 * @request_mask: STATX_xxx flags indicating w 292 * 293 * This function is a wrapper around vfs_getat 294 * that it uses a filename and base directory 295 * Additionally, the use of AT_SYMLINK_NOFOLLO 296 * at the given name from being referenced. 297 * 298 * 0 will be returned on success, and a -ve er 299 */ 300 static int vfs_statx(int dfd, struct filename 301 struct kstat *stat, u32 request_ 302 { 303 struct path path; 304 unsigned int lookup_flags = getname_st 305 int error; 306 307 if (flags & ~(AT_SYMLINK_NOFOLLOW | AT 308 AT_STATX_SYNC_TYPE)) 309 return -EINVAL; 310 311 retry: 312 error = filename_lookup(dfd, filename, 313 if (error) 314 return error; 315 error = vfs_statx_path(&path, flags, s 316 path_put(&path); 317 if (retry_estale(error, lookup_flags)) 318 lookup_flags |= LOOKUP_REVAL; 319 goto retry; 320 } 321 return error; 322 } 323 324 int vfs_fstatat(int dfd, const char __user *fi 325 struct kstat *st 326 { 327 int ret; 328 int statx_flags = flags | AT_NO_AUTOMO 329 struct filename *name; 330 331 /* 332 * Work around glibc turning fstat() i 333 * 334 * If AT_EMPTY_PATH is set, we expect 335 * empty path, and avoid doing all the 336 */ 337 if (flags == AT_EMPTY_PATH && vfs_empt 338 return vfs_fstat(dfd, stat); 339 340 name = getname_flags(filename, getname 341 ret = vfs_statx(dfd, name, statx_flags 342 putname(name); 343 344 return ret; 345 } 346 347 #ifdef __ARCH_WANT_OLD_STAT 348 349 /* 350 * For backward compatibility? Maybe this sho 351 * into arch/i386 instead? 352 */ 353 static int cp_old_stat(struct kstat *stat, str 354 { 355 static int warncount = 5; 356 struct __old_kernel_stat tmp; 357 358 if (warncount > 0) { 359 warncount--; 360 printk(KERN_WARNING "VFS: Warn 361 current->comm); 362 } else if (warncount < 0) { 363 /* it's laughable, but... */ 364 warncount = 0; 365 } 366 367 memset(&tmp, 0, sizeof(struct __old_ke 368 tmp.st_dev = old_encode_dev(stat->dev) 369 tmp.st_ino = stat->ino; 370 if (sizeof(tmp.st_ino) < sizeof(stat-> 371 return -EOVERFLOW; 372 tmp.st_mode = stat->mode; 373 tmp.st_nlink = stat->nlink; 374 if (tmp.st_nlink != stat->nlink) 375 return -EOVERFLOW; 376 SET_UID(tmp.st_uid, from_kuid_munged(c 377 SET_GID(tmp.st_gid, from_kgid_munged(c 378 tmp.st_rdev = old_encode_dev(stat->rde 379 #if BITS_PER_LONG == 32 380 if (stat->size > MAX_NON_LFS) 381 return -EOVERFLOW; 382 #endif 383 tmp.st_size = stat->size; 384 tmp.st_atime = stat->atime.tv_sec; 385 tmp.st_mtime = stat->mtime.tv_sec; 386 tmp.st_ctime = stat->ctime.tv_sec; 387 return copy_to_user(statbuf,&tmp,sizeo 388 } 389 390 SYSCALL_DEFINE2(stat, const char __user *, fil 391 struct __old_kernel_stat __use 392 { 393 struct kstat stat; 394 int error; 395 396 error = vfs_stat(filename, &stat); 397 if (error) 398 return error; 399 400 return cp_old_stat(&stat, statbuf); 401 } 402 403 SYSCALL_DEFINE2(lstat, const char __user *, fi 404 struct __old_kernel_stat __use 405 { 406 struct kstat stat; 407 int error; 408 409 error = vfs_lstat(filename, &stat); 410 if (error) 411 return error; 412 413 return cp_old_stat(&stat, statbuf); 414 } 415 416 SYSCALL_DEFINE2(fstat, unsigned int, fd, struc 417 { 418 struct kstat stat; 419 int error = vfs_fstat(fd, &stat); 420 421 if (!error) 422 error = cp_old_stat(&stat, sta 423 424 return error; 425 } 426 427 #endif /* __ARCH_WANT_OLD_STAT */ 428 429 #ifdef __ARCH_WANT_NEW_STAT 430 431 #ifndef INIT_STRUCT_STAT_PADDING 432 # define INIT_STRUCT_STAT_PADDING(st) memset( 433 #endif 434 435 static int cp_new_stat(struct kstat *stat, str 436 { 437 struct stat tmp; 438 439 if (sizeof(tmp.st_dev) < 4 && !old_val 440 return -EOVERFLOW; 441 if (sizeof(tmp.st_rdev) < 4 && !old_va 442 return -EOVERFLOW; 443 #if BITS_PER_LONG == 32 444 if (stat->size > MAX_NON_LFS) 445 return -EOVERFLOW; 446 #endif 447 448 INIT_STRUCT_STAT_PADDING(tmp); 449 tmp.st_dev = new_encode_dev(stat->dev) 450 tmp.st_ino = stat->ino; 451 if (sizeof(tmp.st_ino) < sizeof(stat-> 452 return -EOVERFLOW; 453 tmp.st_mode = stat->mode; 454 tmp.st_nlink = stat->nlink; 455 if (tmp.st_nlink != stat->nlink) 456 return -EOVERFLOW; 457 SET_UID(tmp.st_uid, from_kuid_munged(c 458 SET_GID(tmp.st_gid, from_kgid_munged(c 459 tmp.st_rdev = new_encode_dev(stat->rde 460 tmp.st_size = stat->size; 461 tmp.st_atime = stat->atime.tv_sec; 462 tmp.st_mtime = stat->mtime.tv_sec; 463 tmp.st_ctime = stat->ctime.tv_sec; 464 #ifdef STAT_HAVE_NSEC 465 tmp.st_atime_nsec = stat->atime.tv_nse 466 tmp.st_mtime_nsec = stat->mtime.tv_nse 467 tmp.st_ctime_nsec = stat->ctime.tv_nse 468 #endif 469 tmp.st_blocks = stat->blocks; 470 tmp.st_blksize = stat->blksize; 471 return copy_to_user(statbuf,&tmp,sizeo 472 } 473 474 SYSCALL_DEFINE2(newstat, const char __user *, 475 struct stat __user *, statbuf) 476 { 477 struct kstat stat; 478 int error = vfs_stat(filename, &stat); 479 480 if (error) 481 return error; 482 return cp_new_stat(&stat, statbuf); 483 } 484 485 SYSCALL_DEFINE2(newlstat, const char __user *, 486 struct stat __user *, statbuf) 487 { 488 struct kstat stat; 489 int error; 490 491 error = vfs_lstat(filename, &stat); 492 if (error) 493 return error; 494 495 return cp_new_stat(&stat, statbuf); 496 } 497 498 #if !defined(__ARCH_WANT_STAT64) || defined(__ 499 SYSCALL_DEFINE4(newfstatat, int, dfd, const ch 500 struct stat __user *, statbuf, 501 { 502 struct kstat stat; 503 int error; 504 505 error = vfs_fstatat(dfd, filename, &st 506 if (error) 507 return error; 508 return cp_new_stat(&stat, statbuf); 509 } 510 #endif 511 512 SYSCALL_DEFINE2(newfstat, unsigned int, fd, st 513 { 514 struct kstat stat; 515 int error = vfs_fstat(fd, &stat); 516 517 if (!error) 518 error = cp_new_stat(&stat, sta 519 520 return error; 521 } 522 #endif 523 524 static int do_readlinkat(int dfd, const char _ 525 char __user *buf, int 526 { 527 struct path path; 528 struct filename *name; 529 int error; 530 unsigned int lookup_flags = LOOKUP_EMP 531 532 if (bufsiz <= 0) 533 return -EINVAL; 534 535 retry: 536 name = getname_flags(pathname, lookup_ 537 error = filename_lookup(dfd, name, loo 538 if (unlikely(error)) { 539 putname(name); 540 return error; 541 } 542 543 /* 544 * AFS mountpoints allow readlink(2) b 545 */ 546 if (d_is_symlink(path.dentry) || 547 d_backing_inode(path.dentry)->i_op 548 error = security_inode_readlin 549 if (!error) { 550 touch_atime(&path); 551 error = vfs_readlink(p 552 } 553 } else { 554 error = (name->name[0] == '\0' 555 } 556 path_put(&path); 557 putname(name); 558 if (retry_estale(error, lookup_flags)) 559 lookup_flags |= LOOKUP_REVAL; 560 goto retry; 561 } 562 return error; 563 } 564 565 SYSCALL_DEFINE4(readlinkat, int, dfd, const ch 566 char __user *, buf, int, bufsi 567 { 568 return do_readlinkat(dfd, pathname, bu 569 } 570 571 SYSCALL_DEFINE3(readlink, const char __user *, 572 int, bufsiz) 573 { 574 return do_readlinkat(AT_FDCWD, path, b 575 } 576 577 578 /* ---------- LFS-64 ----------- */ 579 #if defined(__ARCH_WANT_STAT64) || defined(__A 580 581 #ifndef INIT_STRUCT_STAT64_PADDING 582 # define INIT_STRUCT_STAT64_PADDING(st) memse 583 #endif 584 585 static long cp_new_stat64(struct kstat *stat, 586 { 587 struct stat64 tmp; 588 589 INIT_STRUCT_STAT64_PADDING(tmp); 590 #ifdef CONFIG_MIPS 591 /* mips has weird padding, so we don't 592 tmp.st_dev = new_encode_dev(stat->dev) 593 tmp.st_rdev = new_encode_dev(stat->rde 594 #else 595 tmp.st_dev = huge_encode_dev(stat->dev 596 tmp.st_rdev = huge_encode_dev(stat->rd 597 #endif 598 tmp.st_ino = stat->ino; 599 if (sizeof(tmp.st_ino) < sizeof(stat-> 600 return -EOVERFLOW; 601 #ifdef STAT64_HAS_BROKEN_ST_INO 602 tmp.__st_ino = stat->ino; 603 #endif 604 tmp.st_mode = stat->mode; 605 tmp.st_nlink = stat->nlink; 606 tmp.st_uid = from_kuid_munged(current_ 607 tmp.st_gid = from_kgid_munged(current_ 608 tmp.st_atime = stat->atime.tv_sec; 609 tmp.st_atime_nsec = stat->atime.tv_nse 610 tmp.st_mtime = stat->mtime.tv_sec; 611 tmp.st_mtime_nsec = stat->mtime.tv_nse 612 tmp.st_ctime = stat->ctime.tv_sec; 613 tmp.st_ctime_nsec = stat->ctime.tv_nse 614 tmp.st_size = stat->size; 615 tmp.st_blocks = stat->blocks; 616 tmp.st_blksize = stat->blksize; 617 return copy_to_user(statbuf,&tmp,sizeo 618 } 619 620 SYSCALL_DEFINE2(stat64, const char __user *, f 621 struct stat64 __user *, statbu 622 { 623 struct kstat stat; 624 int error = vfs_stat(filename, &stat); 625 626 if (!error) 627 error = cp_new_stat64(&stat, s 628 629 return error; 630 } 631 632 SYSCALL_DEFINE2(lstat64, const char __user *, 633 struct stat64 __user *, statbu 634 { 635 struct kstat stat; 636 int error = vfs_lstat(filename, &stat) 637 638 if (!error) 639 error = cp_new_stat64(&stat, s 640 641 return error; 642 } 643 644 SYSCALL_DEFINE2(fstat64, unsigned long, fd, st 645 { 646 struct kstat stat; 647 int error = vfs_fstat(fd, &stat); 648 649 if (!error) 650 error = cp_new_stat64(&stat, s 651 652 return error; 653 } 654 655 SYSCALL_DEFINE4(fstatat64, int, dfd, const cha 656 struct stat64 __user *, statbu 657 { 658 struct kstat stat; 659 int error; 660 661 error = vfs_fstatat(dfd, filename, &st 662 if (error) 663 return error; 664 return cp_new_stat64(&stat, statbuf); 665 } 666 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_CO 667 668 static noinline_for_stack int 669 cp_statx(const struct kstat *stat, struct stat 670 { 671 struct statx tmp; 672 673 memset(&tmp, 0, sizeof(tmp)); 674 675 /* STATX_CHANGE_COOKIE is kernel-only 676 tmp.stx_mask = stat->result_mask & ~ST 677 tmp.stx_blksize = stat->blksize; 678 /* STATX_ATTR_CHANGE_MONOTONIC is kern 679 tmp.stx_attributes = stat->attributes 680 tmp.stx_nlink = stat->nlink; 681 tmp.stx_uid = from_kuid_munged(current 682 tmp.stx_gid = from_kgid_munged(current 683 tmp.stx_mode = stat->mode; 684 tmp.stx_ino = stat->ino; 685 tmp.stx_size = stat->size; 686 tmp.stx_blocks = stat->blocks; 687 tmp.stx_attributes_mask = stat->attrib 688 tmp.stx_atime.tv_sec = stat->atime.tv_ 689 tmp.stx_atime.tv_nsec = stat->atime.tv 690 tmp.stx_btime.tv_sec = stat->btime.tv_ 691 tmp.stx_btime.tv_nsec = stat->btime.tv 692 tmp.stx_ctime.tv_sec = stat->ctime.tv_ 693 tmp.stx_ctime.tv_nsec = stat->ctime.tv 694 tmp.stx_mtime.tv_sec = stat->mtime.tv_ 695 tmp.stx_mtime.tv_nsec = stat->mtime.tv 696 tmp.stx_rdev_major = MAJOR(stat->rdev) 697 tmp.stx_rdev_minor = MINOR(stat->rdev) 698 tmp.stx_dev_major = MAJOR(stat->dev); 699 tmp.stx_dev_minor = MINOR(stat->dev); 700 tmp.stx_mnt_id = stat->mnt_id; 701 tmp.stx_dio_mem_align = stat->dio_mem_ 702 tmp.stx_dio_offset_align = stat->dio_o 703 tmp.stx_subvol = stat->subvol; 704 tmp.stx_atomic_write_unit_min = stat-> 705 tmp.stx_atomic_write_unit_max = stat-> 706 tmp.stx_atomic_write_segments_max = st 707 708 return copy_to_user(buffer, &tmp, size 709 } 710 711 int do_statx(int dfd, struct filename *filenam 712 unsigned int mask, struct statx _ 713 { 714 struct kstat stat; 715 int error; 716 717 if (mask & STATX__RESERVED) 718 return -EINVAL; 719 if ((flags & AT_STATX_SYNC_TYPE) == AT 720 return -EINVAL; 721 722 /* 723 * STATX_CHANGE_COOKIE is kernel-only 724 * from userland. 725 */ 726 mask &= ~STATX_CHANGE_COOKIE; 727 728 error = vfs_statx(dfd, filename, flags 729 if (error) 730 return error; 731 732 return cp_statx(&stat, buffer); 733 } 734 735 int do_statx_fd(int fd, unsigned int flags, un 736 struct statx __user *buffer) 737 { 738 struct kstat stat; 739 int error; 740 741 if (mask & STATX__RESERVED) 742 return -EINVAL; 743 if ((flags & AT_STATX_SYNC_TYPE) == AT 744 return -EINVAL; 745 746 /* 747 * STATX_CHANGE_COOKIE is kernel-only 748 * from userland. 749 */ 750 mask &= ~STATX_CHANGE_COOKIE; 751 752 error = vfs_statx_fd(fd, flags, &stat, 753 if (error) 754 return error; 755 756 return cp_statx(&stat, buffer); 757 } 758 759 /** 760 * sys_statx - System call to get enhanced sta 761 * @dfd: Base directory to pathwalk from *or* 762 * @filename: File to stat or either NULL or " 763 * @flags: AT_* flags to control pathwalk. 764 * @mask: Parts of statx struct actually requi 765 * @buffer: Result buffer. 766 * 767 * Note that fstat() can be emulated by settin 768 * supplying "" (or preferably NULL) as the fi 769 * in the flags. 770 */ 771 SYSCALL_DEFINE5(statx, 772 int, dfd, const char __user *, 773 unsigned int, mask, 774 struct statx __user *, buffer) 775 { 776 int ret; 777 unsigned lflags; 778 struct filename *name; 779 780 /* 781 * Short-circuit handling of NULL and 782 * 783 * For a NULL path we require and acce 784 * (possibly |'d with AT_STATX flags). 785 * 786 * However, glibc on 32-bit architectu 787 * with the "" pathname and AT_NO_AUTO 788 * Supporting this results in the ugli 789 */ 790 lflags = flags & ~(AT_NO_AUTOMOUNT | A 791 if (lflags == AT_EMPTY_PATH && vfs_emp 792 return do_statx_fd(dfd, flags 793 794 name = getname_flags(filename, getname 795 ret = do_statx(dfd, name, flags, mask, 796 putname(name); 797 798 return ret; 799 } 800 801 #if defined(CONFIG_COMPAT) && defined(__ARCH_W 802 static int cp_compat_stat(struct kstat *stat, 803 { 804 struct compat_stat tmp; 805 806 if (sizeof(tmp.st_dev) < 4 && !old_val 807 return -EOVERFLOW; 808 if (sizeof(tmp.st_rdev) < 4 && !old_va 809 return -EOVERFLOW; 810 811 memset(&tmp, 0, sizeof(tmp)); 812 tmp.st_dev = new_encode_dev(stat->dev) 813 tmp.st_ino = stat->ino; 814 if (sizeof(tmp.st_ino) < sizeof(stat-> 815 return -EOVERFLOW; 816 tmp.st_mode = stat->mode; 817 tmp.st_nlink = stat->nlink; 818 if (tmp.st_nlink != stat->nlink) 819 return -EOVERFLOW; 820 SET_UID(tmp.st_uid, from_kuid_munged(c 821 SET_GID(tmp.st_gid, from_kgid_munged(c 822 tmp.st_rdev = new_encode_dev(stat->rde 823 if ((u64) stat->size > MAX_NON_LFS) 824 return -EOVERFLOW; 825 tmp.st_size = stat->size; 826 tmp.st_atime = stat->atime.tv_sec; 827 tmp.st_atime_nsec = stat->atime.tv_nse 828 tmp.st_mtime = stat->mtime.tv_sec; 829 tmp.st_mtime_nsec = stat->mtime.tv_nse 830 tmp.st_ctime = stat->ctime.tv_sec; 831 tmp.st_ctime_nsec = stat->ctime.tv_nse 832 tmp.st_blocks = stat->blocks; 833 tmp.st_blksize = stat->blksize; 834 return copy_to_user(ubuf, &tmp, sizeof 835 } 836 837 COMPAT_SYSCALL_DEFINE2(newstat, const char __u 838 struct compat_stat __us 839 { 840 struct kstat stat; 841 int error; 842 843 error = vfs_stat(filename, &stat); 844 if (error) 845 return error; 846 return cp_compat_stat(&stat, statbuf); 847 } 848 849 COMPAT_SYSCALL_DEFINE2(newlstat, const char __ 850 struct compat_stat __us 851 { 852 struct kstat stat; 853 int error; 854 855 error = vfs_lstat(filename, &stat); 856 if (error) 857 return error; 858 return cp_compat_stat(&stat, statbuf); 859 } 860 861 #ifndef __ARCH_WANT_STAT64 862 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned in 863 const char __user *, fi 864 struct compat_stat __us 865 { 866 struct kstat stat; 867 int error; 868 869 error = vfs_fstatat(dfd, filename, &st 870 if (error) 871 return error; 872 return cp_compat_stat(&stat, statbuf); 873 } 874 #endif 875 876 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, 877 struct compat_stat __us 878 { 879 struct kstat stat; 880 int error = vfs_fstat(fd, &stat); 881 882 if (!error) 883 error = cp_compat_stat(&stat, 884 return error; 885 } 886 #endif 887 888 /* Caller is here responsible for sufficient l 889 void __inode_add_bytes(struct inode *inode, lo 890 { 891 inode->i_blocks += bytes >> 9; 892 bytes &= 511; 893 inode->i_bytes += bytes; 894 if (inode->i_bytes >= 512) { 895 inode->i_blocks++; 896 inode->i_bytes -= 512; 897 } 898 } 899 EXPORT_SYMBOL(__inode_add_bytes); 900 901 void inode_add_bytes(struct inode *inode, loff 902 { 903 spin_lock(&inode->i_lock); 904 __inode_add_bytes(inode, bytes); 905 spin_unlock(&inode->i_lock); 906 } 907 908 EXPORT_SYMBOL(inode_add_bytes); 909 910 void __inode_sub_bytes(struct inode *inode, lo 911 { 912 inode->i_blocks -= bytes >> 9; 913 bytes &= 511; 914 if (inode->i_bytes < bytes) { 915 inode->i_blocks--; 916 inode->i_bytes += 512; 917 } 918 inode->i_bytes -= bytes; 919 } 920 921 EXPORT_SYMBOL(__inode_sub_bytes); 922 923 void inode_sub_bytes(struct inode *inode, loff 924 { 925 spin_lock(&inode->i_lock); 926 __inode_sub_bytes(inode, bytes); 927 spin_unlock(&inode->i_lock); 928 } 929 930 EXPORT_SYMBOL(inode_sub_bytes); 931 932 loff_t inode_get_bytes(struct inode *inode) 933 { 934 loff_t ret; 935 936 spin_lock(&inode->i_lock); 937 ret = __inode_get_bytes(inode); 938 spin_unlock(&inode->i_lock); 939 return ret; 940 } 941 942 EXPORT_SYMBOL(inode_get_bytes); 943 944 void inode_set_bytes(struct inode *inode, loff 945 { 946 /* Caller is here responsible for suff 947 * (ie. inode->i_lock) */ 948 inode->i_blocks = bytes >> 9; 949 inode->i_bytes = bytes & 511; 950 } 951 952 EXPORT_SYMBOL(inode_set_bytes); 953
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.