1 // SPDX-License-Identifier: GPL-2.0 1 // SPDX-License-Identifier: GPL-2.0 2 /* 2 /* 3 * linux/fs/stat.c 3 * linux/fs/stat.c 4 * 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 */ 6 */ 7 7 8 #include <linux/blkdev.h> 8 #include <linux/blkdev.h> 9 #include <linux/export.h> 9 #include <linux/export.h> 10 #include <linux/mm.h> 10 #include <linux/mm.h> 11 #include <linux/errno.h> 11 #include <linux/errno.h> 12 #include <linux/file.h> 12 #include <linux/file.h> 13 #include <linux/highuid.h> 13 #include <linux/highuid.h> 14 #include <linux/fs.h> 14 #include <linux/fs.h> 15 #include <linux/namei.h> 15 #include <linux/namei.h> 16 #include <linux/security.h> 16 #include <linux/security.h> 17 #include <linux/cred.h> 17 #include <linux/cred.h> 18 #include <linux/syscalls.h> 18 #include <linux/syscalls.h> 19 #include <linux/pagemap.h> 19 #include <linux/pagemap.h> 20 #include <linux/compat.h> 20 #include <linux/compat.h> 21 #include <linux/iversion.h> << 22 21 23 #include <linux/uaccess.h> 22 #include <linux/uaccess.h> 24 #include <asm/unistd.h> 23 #include <asm/unistd.h> 25 24 26 #include "internal.h" 25 #include "internal.h" 27 #include "mount.h" 26 #include "mount.h" 28 27 29 /** 28 /** 30 * generic_fillattr - Fill in the basic attrib 29 * generic_fillattr - Fill in the basic attributes from the inode struct 31 * @idmap: idmap of the mount the !! 30 * @mnt_userns: user namespace of the mount the inode was found from 32 * @request_mask: statx request_mask !! 31 * @inode: Inode to use as the source 33 * @inode: Inode to use as the so !! 32 * @stat: Where to fill in the attributes 34 * @stat: Where to fill in the a << 35 * 33 * 36 * Fill in the basic attributes in the kstat s 34 * Fill in the basic attributes in the kstat structure from data that's to be 37 * found on the VFS inode structure. This is 35 * found on the VFS inode structure. This is the default if no getattr inode 38 * operation is supplied. 36 * operation is supplied. 39 * 37 * 40 * If the inode has been found through an idma !! 38 * If the inode has been found through an idmapped mount the user namespace of 41 * the vfsmount must be passed through @idmap. !! 39 * the vfsmount must be passed through @mnt_userns. This function will then 42 * take care to map the inode according to @id !! 40 * take care to map the inode according to @mnt_userns before filling in the 43 * uid and gid filds. On non-idmapped mounts o 41 * uid and gid filds. On non-idmapped mounts or if permission checking is to be 44 * performed on the raw inode simply pass @nop !! 42 * performed on the raw inode simply passs init_user_ns. 45 */ 43 */ 46 void generic_fillattr(struct mnt_idmap *idmap, !! 44 void generic_fillattr(struct user_namespace *mnt_userns, struct inode *inode, 47 struct inode *inode, str !! 45 struct kstat *stat) 48 { 46 { 49 vfsuid_t vfsuid = i_uid_into_vfsuid(id << 50 vfsgid_t vfsgid = i_gid_into_vfsgid(id << 51 << 52 stat->dev = inode->i_sb->s_dev; 47 stat->dev = inode->i_sb->s_dev; 53 stat->ino = inode->i_ino; 48 stat->ino = inode->i_ino; 54 stat->mode = inode->i_mode; 49 stat->mode = inode->i_mode; 55 stat->nlink = inode->i_nlink; 50 stat->nlink = inode->i_nlink; 56 stat->uid = vfsuid_into_kuid(vfsuid); !! 51 stat->uid = i_uid_into_mnt(mnt_userns, inode); 57 stat->gid = vfsgid_into_kgid(vfsgid); !! 52 stat->gid = i_gid_into_mnt(mnt_userns, inode); 58 stat->rdev = inode->i_rdev; 53 stat->rdev = inode->i_rdev; 59 stat->size = i_size_read(inode); 54 stat->size = i_size_read(inode); 60 stat->atime = inode_get_atime(inode); !! 55 stat->atime = inode->i_atime; 61 stat->mtime = inode_get_mtime(inode); !! 56 stat->mtime = inode->i_mtime; 62 stat->ctime = inode_get_ctime(inode); !! 57 stat->ctime = inode->i_ctime; 63 stat->blksize = i_blocksize(inode); 58 stat->blksize = i_blocksize(inode); 64 stat->blocks = inode->i_blocks; 59 stat->blocks = inode->i_blocks; 65 << 66 if ((request_mask & STATX_CHANGE_COOKI << 67 stat->result_mask |= STATX_CHA << 68 stat->change_cookie = inode_qu << 69 } << 70 << 71 } 60 } 72 EXPORT_SYMBOL(generic_fillattr); 61 EXPORT_SYMBOL(generic_fillattr); 73 62 74 /** 63 /** 75 * generic_fill_statx_attr - Fill in the statx 64 * generic_fill_statx_attr - Fill in the statx attributes from the inode flags 76 * @inode: Inode to use as the source 65 * @inode: Inode to use as the source 77 * @stat: Where to fill in the attribute 66 * @stat: Where to fill in the attribute flags 78 * 67 * 79 * Fill in the STATX_ATTR_* flags in the kstat 68 * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the 80 * inode that are published on i_flags and enf 69 * inode that are published on i_flags and enforced by the VFS. 81 */ 70 */ 82 void generic_fill_statx_attr(struct inode *ino 71 void generic_fill_statx_attr(struct inode *inode, struct kstat *stat) 83 { 72 { 84 if (inode->i_flags & S_IMMUTABLE) 73 if (inode->i_flags & S_IMMUTABLE) 85 stat->attributes |= STATX_ATTR 74 stat->attributes |= STATX_ATTR_IMMUTABLE; 86 if (inode->i_flags & S_APPEND) 75 if (inode->i_flags & S_APPEND) 87 stat->attributes |= STATX_ATTR 76 stat->attributes |= STATX_ATTR_APPEND; 88 stat->attributes_mask |= KSTAT_ATTR_VF 77 stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS; 89 } 78 } 90 EXPORT_SYMBOL(generic_fill_statx_attr); 79 EXPORT_SYMBOL(generic_fill_statx_attr); 91 80 92 /** 81 /** 93 * generic_fill_statx_atomic_writes - Fill in << 94 * @stat: Where to fill in the attribute << 95 * @unit_min: Minimum supported atomic write << 96 * @unit_max: Maximum supported atomic write << 97 * << 98 * Fill in the STATX{_ATTR}_WRITE_ATOMIC flags << 99 * atomic write unit_min and unit_max values. << 100 */ << 101 void generic_fill_statx_atomic_writes(struct k << 102 unsigned << 103 unsigned << 104 { << 105 /* Confirm that the request type is kn << 106 stat->result_mask |= STATX_WRITE_ATOMI << 107 << 108 /* Confirm that the file attribute typ << 109 stat->attributes_mask |= STATX_ATTR_WR << 110 << 111 if (unit_min) { << 112 stat->atomic_write_unit_min = << 113 stat->atomic_write_unit_max = << 114 /* Initially only allow 1x seg << 115 stat->atomic_write_segments_ma << 116 << 117 /* Confirm atomic writes are a << 118 stat->attributes |= STATX_ATTR << 119 } << 120 } << 121 EXPORT_SYMBOL_GPL(generic_fill_statx_atomic_wr << 122 << 123 /** << 124 * vfs_getattr_nosec - getattr without securit 82 * vfs_getattr_nosec - getattr without security checks 125 * @path: file to get attributes from 83 * @path: file to get attributes from 126 * @stat: structure to return attributes in 84 * @stat: structure to return attributes in 127 * @request_mask: STATX_xxx flags indicating w 85 * @request_mask: STATX_xxx flags indicating what the caller wants 128 * @query_flags: Query mode (AT_STATX_SYNC_TYP 86 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 129 * 87 * 130 * Get attributes without calling security_ino 88 * Get attributes without calling security_inode_getattr. 131 * 89 * 132 * Currently the only caller other than vfs_ge 90 * Currently the only caller other than vfs_getattr is internal to the 133 * filehandle lookup code, which uses only the 91 * filehandle lookup code, which uses only the inode number and returns no 134 * attributes to any user. Any other code pro 92 * attributes to any user. Any other code probably wants vfs_getattr. 135 */ 93 */ 136 int vfs_getattr_nosec(const struct path *path, 94 int vfs_getattr_nosec(const struct path *path, struct kstat *stat, 137 u32 request_mask, unsign 95 u32 request_mask, unsigned int query_flags) 138 { 96 { 139 struct mnt_idmap *idmap; !! 97 struct user_namespace *mnt_userns; 140 struct inode *inode = d_backing_inode( 98 struct inode *inode = d_backing_inode(path->dentry); 141 99 142 memset(stat, 0, sizeof(*stat)); 100 memset(stat, 0, sizeof(*stat)); 143 stat->result_mask |= STATX_BASIC_STATS 101 stat->result_mask |= STATX_BASIC_STATS; 144 query_flags &= AT_STATX_SYNC_TYPE; 102 query_flags &= AT_STATX_SYNC_TYPE; 145 103 146 /* allow the fs to override these if i 104 /* allow the fs to override these if it really wants to */ 147 /* SB_NOATIME means filesystem supplie 105 /* SB_NOATIME means filesystem supplies dummy atime value */ 148 if (inode->i_sb->s_flags & SB_NOATIME) 106 if (inode->i_sb->s_flags & SB_NOATIME) 149 stat->result_mask &= ~STATX_AT 107 stat->result_mask &= ~STATX_ATIME; 150 108 151 /* 109 /* 152 * Note: If you add another clause to 110 * Note: If you add another clause to set an attribute flag, please 153 * update attributes_mask below. 111 * update attributes_mask below. 154 */ 112 */ 155 if (IS_AUTOMOUNT(inode)) 113 if (IS_AUTOMOUNT(inode)) 156 stat->attributes |= STATX_ATTR 114 stat->attributes |= STATX_ATTR_AUTOMOUNT; 157 115 158 if (IS_DAX(inode)) 116 if (IS_DAX(inode)) 159 stat->attributes |= STATX_ATTR 117 stat->attributes |= STATX_ATTR_DAX; 160 118 161 stat->attributes_mask |= (STATX_ATTR_A 119 stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT | 162 STATX_ATTR_D 120 STATX_ATTR_DAX); 163 121 164 idmap = mnt_idmap(path->mnt); !! 122 mnt_userns = mnt_user_ns(path->mnt); 165 if (inode->i_op->getattr) 123 if (inode->i_op->getattr) 166 return inode->i_op->getattr(id !! 124 return inode->i_op->getattr(mnt_userns, path, stat, 167 re !! 125 request_mask, query_flags); 168 qu << 169 126 170 generic_fillattr(idmap, request_mask, !! 127 generic_fillattr(mnt_userns, inode, stat); 171 return 0; 128 return 0; 172 } 129 } 173 EXPORT_SYMBOL(vfs_getattr_nosec); 130 EXPORT_SYMBOL(vfs_getattr_nosec); 174 131 175 /* 132 /* 176 * vfs_getattr - Get the enhanced basic attrib 133 * vfs_getattr - Get the enhanced basic attributes of a file 177 * @path: The file of interest 134 * @path: The file of interest 178 * @stat: Where to return the statistics 135 * @stat: Where to return the statistics 179 * @request_mask: STATX_xxx flags indicating w 136 * @request_mask: STATX_xxx flags indicating what the caller wants 180 * @query_flags: Query mode (AT_STATX_SYNC_TYP 137 * @query_flags: Query mode (AT_STATX_SYNC_TYPE) 181 * 138 * 182 * Ask the filesystem for a file's attributes. 139 * Ask the filesystem for a file's attributes. The caller must indicate in 183 * request_mask and query_flags to indicate wh 140 * request_mask and query_flags to indicate what they want. 184 * 141 * 185 * If the file is remote, the filesystem can b 142 * If the file is remote, the filesystem can be forced to update the attributes 186 * from the backing store by passing AT_STATX_ 143 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can 187 * suppress the update by passing AT_STATX_DON 144 * suppress the update by passing AT_STATX_DONT_SYNC. 188 * 145 * 189 * Bits must have been set in request_mask to 146 * Bits must have been set in request_mask to indicate which attributes the 190 * caller wants retrieving. Any such attribut 147 * caller wants retrieving. Any such attribute not requested may be returned 191 * anyway, but the value may be approximate, a 148 * anyway, but the value may be approximate, and, if remote, may not have been 192 * synchronised with the server. 149 * synchronised with the server. 193 * 150 * 194 * 0 will be returned on success, and a -ve er 151 * 0 will be returned on success, and a -ve error code if unsuccessful. 195 */ 152 */ 196 int vfs_getattr(const struct path *path, struc 153 int vfs_getattr(const struct path *path, struct kstat *stat, 197 u32 request_mask, unsigned int 154 u32 request_mask, unsigned int query_flags) 198 { 155 { 199 int retval; 156 int retval; 200 157 201 if (WARN_ON_ONCE(query_flags & AT_GETA << 202 return -EPERM; << 203 << 204 retval = security_inode_getattr(path); 158 retval = security_inode_getattr(path); 205 if (retval) 159 if (retval) 206 return retval; 160 return retval; 207 return vfs_getattr_nosec(path, stat, r 161 return vfs_getattr_nosec(path, stat, request_mask, query_flags); 208 } 162 } 209 EXPORT_SYMBOL(vfs_getattr); 163 EXPORT_SYMBOL(vfs_getattr); 210 164 211 /** 165 /** 212 * vfs_fstat - Get the basic attributes by fil 166 * vfs_fstat - Get the basic attributes by file descriptor 213 * @fd: The file descriptor referring to the f 167 * @fd: The file descriptor referring to the file of interest 214 * @stat: The result structure to fill in. 168 * @stat: The result structure to fill in. 215 * 169 * 216 * This function is a wrapper around vfs_getat 170 * This function is a wrapper around vfs_getattr(). The main difference is 217 * that it uses a file descriptor to determine 171 * that it uses a file descriptor to determine the file location. 218 * 172 * 219 * 0 will be returned on success, and a -ve er 173 * 0 will be returned on success, and a -ve error code if unsuccessful. 220 */ 174 */ 221 int vfs_fstat(int fd, struct kstat *stat) 175 int vfs_fstat(int fd, struct kstat *stat) 222 { 176 { 223 struct fd f; 177 struct fd f; 224 int error; 178 int error; 225 179 226 f = fdget_raw(fd); 180 f = fdget_raw(fd); 227 if (!fd_file(f)) !! 181 if (!f.file) 228 return -EBADF; 182 return -EBADF; 229 error = vfs_getattr(&fd_file(f)->f_pat !! 183 error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0); 230 fdput(f); 184 fdput(f); 231 return error; 185 return error; 232 } 186 } 233 187 234 int getname_statx_lookup_flags(int flags) 188 int getname_statx_lookup_flags(int flags) 235 { 189 { 236 int lookup_flags = 0; 190 int lookup_flags = 0; 237 191 238 if (!(flags & AT_SYMLINK_NOFOLLOW)) 192 if (!(flags & AT_SYMLINK_NOFOLLOW)) 239 lookup_flags |= LOOKUP_FOLLOW; 193 lookup_flags |= LOOKUP_FOLLOW; 240 if (!(flags & AT_NO_AUTOMOUNT)) 194 if (!(flags & AT_NO_AUTOMOUNT)) 241 lookup_flags |= LOOKUP_AUTOMOU 195 lookup_flags |= LOOKUP_AUTOMOUNT; 242 if (flags & AT_EMPTY_PATH) 196 if (flags & AT_EMPTY_PATH) 243 lookup_flags |= LOOKUP_EMPTY; 197 lookup_flags |= LOOKUP_EMPTY; 244 198 245 return lookup_flags; 199 return lookup_flags; 246 } 200 } 247 201 248 static int vfs_statx_path(struct path *path, i << 249 u32 request_mask) << 250 { << 251 int error = vfs_getattr(path, stat, re << 252 << 253 if (request_mask & STATX_MNT_ID_UNIQUE << 254 stat->mnt_id = real_mount(path << 255 stat->result_mask |= STATX_MNT << 256 } else { << 257 stat->mnt_id = real_mount(path << 258 stat->result_mask |= STATX_MNT << 259 } << 260 << 261 if (path_mounted(path)) << 262 stat->attributes |= STATX_ATTR << 263 stat->attributes_mask |= STATX_ATTR_MO << 264 << 265 /* << 266 * If this is a block device inode, ov << 267 * attributes with the block device sp << 268 * obtained from the bdev backing inod << 269 */ << 270 if (S_ISBLK(stat->mode)) << 271 bdev_statx(path, stat, request << 272 << 273 return error; << 274 } << 275 << 276 static int vfs_statx_fd(int fd, int flags, str << 277 u32 request_mask) << 278 { << 279 CLASS(fd_raw, f)(fd); << 280 if (!fd_file(f)) << 281 return -EBADF; << 282 return vfs_statx_path(&fd_file(f)->f_p << 283 } << 284 << 285 /** 202 /** 286 * vfs_statx - Get basic and extra attributes 203 * vfs_statx - Get basic and extra attributes by filename 287 * @dfd: A file descriptor representing the ba 204 * @dfd: A file descriptor representing the base dir for a relative filename 288 * @filename: The name of the file of interest 205 * @filename: The name of the file of interest 289 * @flags: Flags to control the query 206 * @flags: Flags to control the query 290 * @stat: The result structure to fill in. 207 * @stat: The result structure to fill in. 291 * @request_mask: STATX_xxx flags indicating w 208 * @request_mask: STATX_xxx flags indicating what the caller wants 292 * 209 * 293 * This function is a wrapper around vfs_getat 210 * This function is a wrapper around vfs_getattr(). The main difference is 294 * that it uses a filename and base directory 211 * that it uses a filename and base directory to determine the file location. 295 * Additionally, the use of AT_SYMLINK_NOFOLLO 212 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink 296 * at the given name from being referenced. 213 * at the given name from being referenced. 297 * 214 * 298 * 0 will be returned on success, and a -ve er 215 * 0 will be returned on success, and a -ve error code if unsuccessful. 299 */ 216 */ 300 static int vfs_statx(int dfd, struct filename 217 static int vfs_statx(int dfd, struct filename *filename, int flags, 301 struct kstat *stat, u32 request_ 218 struct kstat *stat, u32 request_mask) 302 { 219 { 303 struct path path; 220 struct path path; 304 unsigned int lookup_flags = getname_st 221 unsigned int lookup_flags = getname_statx_lookup_flags(flags); 305 int error; 222 int error; 306 223 307 if (flags & ~(AT_SYMLINK_NOFOLLOW | AT 224 if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH | 308 AT_STATX_SYNC_TYPE)) 225 AT_STATX_SYNC_TYPE)) 309 return -EINVAL; 226 return -EINVAL; 310 227 311 retry: 228 retry: 312 error = filename_lookup(dfd, filename, 229 error = filename_lookup(dfd, filename, lookup_flags, &path, NULL); 313 if (error) 230 if (error) 314 return error; !! 231 goto out; 315 error = vfs_statx_path(&path, flags, s !! 232 >> 233 error = vfs_getattr(&path, stat, request_mask, flags); >> 234 >> 235 stat->mnt_id = real_mount(path.mnt)->mnt_id; >> 236 stat->result_mask |= STATX_MNT_ID; >> 237 >> 238 if (path.mnt->mnt_root == path.dentry) >> 239 stat->attributes |= STATX_ATTR_MOUNT_ROOT; >> 240 stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT; >> 241 >> 242 /* Handle STATX_DIOALIGN for block devices. */ >> 243 if (request_mask & STATX_DIOALIGN) { >> 244 struct inode *inode = d_backing_inode(path.dentry); >> 245 >> 246 if (S_ISBLK(inode->i_mode)) >> 247 bdev_statx_dioalign(inode, stat); >> 248 } >> 249 316 path_put(&path); 250 path_put(&path); 317 if (retry_estale(error, lookup_flags)) 251 if (retry_estale(error, lookup_flags)) { 318 lookup_flags |= LOOKUP_REVAL; 252 lookup_flags |= LOOKUP_REVAL; 319 goto retry; 253 goto retry; 320 } 254 } >> 255 out: 321 return error; 256 return error; 322 } 257 } 323 258 324 int vfs_fstatat(int dfd, const char __user *fi 259 int vfs_fstatat(int dfd, const char __user *filename, 325 struct kstat *st 260 struct kstat *stat, int flags) 326 { 261 { 327 int ret; 262 int ret; 328 int statx_flags = flags | AT_NO_AUTOMO 263 int statx_flags = flags | AT_NO_AUTOMOUNT; 329 struct filename *name; 264 struct filename *name; 330 265 331 /* !! 266 name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL); 332 * Work around glibc turning fstat() i << 333 * << 334 * If AT_EMPTY_PATH is set, we expect << 335 * empty path, and avoid doing all the << 336 */ << 337 if (flags == AT_EMPTY_PATH && vfs_empt << 338 return vfs_fstat(dfd, stat); << 339 << 340 name = getname_flags(filename, getname << 341 ret = vfs_statx(dfd, name, statx_flags 267 ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS); 342 putname(name); 268 putname(name); 343 269 344 return ret; 270 return ret; 345 } 271 } 346 272 347 #ifdef __ARCH_WANT_OLD_STAT 273 #ifdef __ARCH_WANT_OLD_STAT 348 274 349 /* 275 /* 350 * For backward compatibility? Maybe this sho 276 * For backward compatibility? Maybe this should be moved 351 * into arch/i386 instead? 277 * into arch/i386 instead? 352 */ 278 */ 353 static int cp_old_stat(struct kstat *stat, str 279 static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf) 354 { 280 { 355 static int warncount = 5; 281 static int warncount = 5; 356 struct __old_kernel_stat tmp; 282 struct __old_kernel_stat tmp; 357 283 358 if (warncount > 0) { 284 if (warncount > 0) { 359 warncount--; 285 warncount--; 360 printk(KERN_WARNING "VFS: Warn 286 printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n", 361 current->comm); 287 current->comm); 362 } else if (warncount < 0) { 288 } else if (warncount < 0) { 363 /* it's laughable, but... */ 289 /* it's laughable, but... */ 364 warncount = 0; 290 warncount = 0; 365 } 291 } 366 292 367 memset(&tmp, 0, sizeof(struct __old_ke 293 memset(&tmp, 0, sizeof(struct __old_kernel_stat)); 368 tmp.st_dev = old_encode_dev(stat->dev) 294 tmp.st_dev = old_encode_dev(stat->dev); 369 tmp.st_ino = stat->ino; 295 tmp.st_ino = stat->ino; 370 if (sizeof(tmp.st_ino) < sizeof(stat-> 296 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 371 return -EOVERFLOW; 297 return -EOVERFLOW; 372 tmp.st_mode = stat->mode; 298 tmp.st_mode = stat->mode; 373 tmp.st_nlink = stat->nlink; 299 tmp.st_nlink = stat->nlink; 374 if (tmp.st_nlink != stat->nlink) 300 if (tmp.st_nlink != stat->nlink) 375 return -EOVERFLOW; 301 return -EOVERFLOW; 376 SET_UID(tmp.st_uid, from_kuid_munged(c 302 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 377 SET_GID(tmp.st_gid, from_kgid_munged(c 303 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 378 tmp.st_rdev = old_encode_dev(stat->rde 304 tmp.st_rdev = old_encode_dev(stat->rdev); 379 #if BITS_PER_LONG == 32 305 #if BITS_PER_LONG == 32 380 if (stat->size > MAX_NON_LFS) 306 if (stat->size > MAX_NON_LFS) 381 return -EOVERFLOW; 307 return -EOVERFLOW; 382 #endif 308 #endif 383 tmp.st_size = stat->size; 309 tmp.st_size = stat->size; 384 tmp.st_atime = stat->atime.tv_sec; 310 tmp.st_atime = stat->atime.tv_sec; 385 tmp.st_mtime = stat->mtime.tv_sec; 311 tmp.st_mtime = stat->mtime.tv_sec; 386 tmp.st_ctime = stat->ctime.tv_sec; 312 tmp.st_ctime = stat->ctime.tv_sec; 387 return copy_to_user(statbuf,&tmp,sizeo 313 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 388 } 314 } 389 315 390 SYSCALL_DEFINE2(stat, const char __user *, fil 316 SYSCALL_DEFINE2(stat, const char __user *, filename, 391 struct __old_kernel_stat __use 317 struct __old_kernel_stat __user *, statbuf) 392 { 318 { 393 struct kstat stat; 319 struct kstat stat; 394 int error; 320 int error; 395 321 396 error = vfs_stat(filename, &stat); 322 error = vfs_stat(filename, &stat); 397 if (error) 323 if (error) 398 return error; 324 return error; 399 325 400 return cp_old_stat(&stat, statbuf); 326 return cp_old_stat(&stat, statbuf); 401 } 327 } 402 328 403 SYSCALL_DEFINE2(lstat, const char __user *, fi 329 SYSCALL_DEFINE2(lstat, const char __user *, filename, 404 struct __old_kernel_stat __use 330 struct __old_kernel_stat __user *, statbuf) 405 { 331 { 406 struct kstat stat; 332 struct kstat stat; 407 int error; 333 int error; 408 334 409 error = vfs_lstat(filename, &stat); 335 error = vfs_lstat(filename, &stat); 410 if (error) 336 if (error) 411 return error; 337 return error; 412 338 413 return cp_old_stat(&stat, statbuf); 339 return cp_old_stat(&stat, statbuf); 414 } 340 } 415 341 416 SYSCALL_DEFINE2(fstat, unsigned int, fd, struc 342 SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf) 417 { 343 { 418 struct kstat stat; 344 struct kstat stat; 419 int error = vfs_fstat(fd, &stat); 345 int error = vfs_fstat(fd, &stat); 420 346 421 if (!error) 347 if (!error) 422 error = cp_old_stat(&stat, sta 348 error = cp_old_stat(&stat, statbuf); 423 349 424 return error; 350 return error; 425 } 351 } 426 352 427 #endif /* __ARCH_WANT_OLD_STAT */ 353 #endif /* __ARCH_WANT_OLD_STAT */ 428 354 429 #ifdef __ARCH_WANT_NEW_STAT 355 #ifdef __ARCH_WANT_NEW_STAT 430 356 >> 357 #if BITS_PER_LONG == 32 >> 358 # define choose_32_64(a,b) a >> 359 #else >> 360 # define choose_32_64(a,b) b >> 361 #endif >> 362 431 #ifndef INIT_STRUCT_STAT_PADDING 363 #ifndef INIT_STRUCT_STAT_PADDING 432 # define INIT_STRUCT_STAT_PADDING(st) memset( 364 # define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st)) 433 #endif 365 #endif 434 366 435 static int cp_new_stat(struct kstat *stat, str 367 static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf) 436 { 368 { 437 struct stat tmp; 369 struct stat tmp; 438 370 439 if (sizeof(tmp.st_dev) < 4 && !old_val 371 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 440 return -EOVERFLOW; 372 return -EOVERFLOW; 441 if (sizeof(tmp.st_rdev) < 4 && !old_va 373 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 442 return -EOVERFLOW; 374 return -EOVERFLOW; 443 #if BITS_PER_LONG == 32 375 #if BITS_PER_LONG == 32 444 if (stat->size > MAX_NON_LFS) 376 if (stat->size > MAX_NON_LFS) 445 return -EOVERFLOW; 377 return -EOVERFLOW; 446 #endif 378 #endif 447 379 448 INIT_STRUCT_STAT_PADDING(tmp); 380 INIT_STRUCT_STAT_PADDING(tmp); 449 tmp.st_dev = new_encode_dev(stat->dev) 381 tmp.st_dev = new_encode_dev(stat->dev); 450 tmp.st_ino = stat->ino; 382 tmp.st_ino = stat->ino; 451 if (sizeof(tmp.st_ino) < sizeof(stat-> 383 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 452 return -EOVERFLOW; 384 return -EOVERFLOW; 453 tmp.st_mode = stat->mode; 385 tmp.st_mode = stat->mode; 454 tmp.st_nlink = stat->nlink; 386 tmp.st_nlink = stat->nlink; 455 if (tmp.st_nlink != stat->nlink) 387 if (tmp.st_nlink != stat->nlink) 456 return -EOVERFLOW; 388 return -EOVERFLOW; 457 SET_UID(tmp.st_uid, from_kuid_munged(c 389 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 458 SET_GID(tmp.st_gid, from_kgid_munged(c 390 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 459 tmp.st_rdev = new_encode_dev(stat->rde 391 tmp.st_rdev = new_encode_dev(stat->rdev); 460 tmp.st_size = stat->size; 392 tmp.st_size = stat->size; 461 tmp.st_atime = stat->atime.tv_sec; 393 tmp.st_atime = stat->atime.tv_sec; 462 tmp.st_mtime = stat->mtime.tv_sec; 394 tmp.st_mtime = stat->mtime.tv_sec; 463 tmp.st_ctime = stat->ctime.tv_sec; 395 tmp.st_ctime = stat->ctime.tv_sec; 464 #ifdef STAT_HAVE_NSEC 396 #ifdef STAT_HAVE_NSEC 465 tmp.st_atime_nsec = stat->atime.tv_nse 397 tmp.st_atime_nsec = stat->atime.tv_nsec; 466 tmp.st_mtime_nsec = stat->mtime.tv_nse 398 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 467 tmp.st_ctime_nsec = stat->ctime.tv_nse 399 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 468 #endif 400 #endif 469 tmp.st_blocks = stat->blocks; 401 tmp.st_blocks = stat->blocks; 470 tmp.st_blksize = stat->blksize; 402 tmp.st_blksize = stat->blksize; 471 return copy_to_user(statbuf,&tmp,sizeo 403 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 472 } 404 } 473 405 474 SYSCALL_DEFINE2(newstat, const char __user *, 406 SYSCALL_DEFINE2(newstat, const char __user *, filename, 475 struct stat __user *, statbuf) 407 struct stat __user *, statbuf) 476 { 408 { 477 struct kstat stat; 409 struct kstat stat; 478 int error = vfs_stat(filename, &stat); 410 int error = vfs_stat(filename, &stat); 479 411 480 if (error) 412 if (error) 481 return error; 413 return error; 482 return cp_new_stat(&stat, statbuf); 414 return cp_new_stat(&stat, statbuf); 483 } 415 } 484 416 485 SYSCALL_DEFINE2(newlstat, const char __user *, 417 SYSCALL_DEFINE2(newlstat, const char __user *, filename, 486 struct stat __user *, statbuf) 418 struct stat __user *, statbuf) 487 { 419 { 488 struct kstat stat; 420 struct kstat stat; 489 int error; 421 int error; 490 422 491 error = vfs_lstat(filename, &stat); 423 error = vfs_lstat(filename, &stat); 492 if (error) 424 if (error) 493 return error; 425 return error; 494 426 495 return cp_new_stat(&stat, statbuf); 427 return cp_new_stat(&stat, statbuf); 496 } 428 } 497 429 498 #if !defined(__ARCH_WANT_STAT64) || defined(__ 430 #if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT) 499 SYSCALL_DEFINE4(newfstatat, int, dfd, const ch 431 SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename, 500 struct stat __user *, statbuf, 432 struct stat __user *, statbuf, int, flag) 501 { 433 { 502 struct kstat stat; 434 struct kstat stat; 503 int error; 435 int error; 504 436 505 error = vfs_fstatat(dfd, filename, &st 437 error = vfs_fstatat(dfd, filename, &stat, flag); 506 if (error) 438 if (error) 507 return error; 439 return error; 508 return cp_new_stat(&stat, statbuf); 440 return cp_new_stat(&stat, statbuf); 509 } 441 } 510 #endif 442 #endif 511 443 512 SYSCALL_DEFINE2(newfstat, unsigned int, fd, st 444 SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf) 513 { 445 { 514 struct kstat stat; 446 struct kstat stat; 515 int error = vfs_fstat(fd, &stat); 447 int error = vfs_fstat(fd, &stat); 516 448 517 if (!error) 449 if (!error) 518 error = cp_new_stat(&stat, sta 450 error = cp_new_stat(&stat, statbuf); 519 451 520 return error; 452 return error; 521 } 453 } 522 #endif 454 #endif 523 455 524 static int do_readlinkat(int dfd, const char _ 456 static int do_readlinkat(int dfd, const char __user *pathname, 525 char __user *buf, int 457 char __user *buf, int bufsiz) 526 { 458 { 527 struct path path; 459 struct path path; 528 struct filename *name; << 529 int error; 460 int error; >> 461 int empty = 0; 530 unsigned int lookup_flags = LOOKUP_EMP 462 unsigned int lookup_flags = LOOKUP_EMPTY; 531 463 532 if (bufsiz <= 0) 464 if (bufsiz <= 0) 533 return -EINVAL; 465 return -EINVAL; 534 466 535 retry: 467 retry: 536 name = getname_flags(pathname, lookup_ !! 468 error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty); 537 error = filename_lookup(dfd, name, loo !! 469 if (!error) { 538 if (unlikely(error)) { !! 470 struct inode *inode = d_backing_inode(path.dentry); 539 putname(name); !! 471 540 return error; !! 472 error = empty ? -ENOENT : -EINVAL; 541 } !! 473 /* 542 !! 474 * AFS mountpoints allow readlink(2) but are not symlinks 543 /* !! 475 */ 544 * AFS mountpoints allow readlink(2) b !! 476 if (d_is_symlink(path.dentry) || inode->i_op->readlink) { 545 */ !! 477 error = security_inode_readlink(path.dentry); 546 if (d_is_symlink(path.dentry) || !! 478 if (!error) { 547 d_backing_inode(path.dentry)->i_op !! 479 touch_atime(&path); 548 error = security_inode_readlin !! 480 error = vfs_readlink(path.dentry, buf, bufsiz); 549 if (!error) { !! 481 } 550 touch_atime(&path); !! 482 } 551 error = vfs_readlink(p !! 483 path_put(&path); >> 484 if (retry_estale(error, lookup_flags)) { >> 485 lookup_flags |= LOOKUP_REVAL; >> 486 goto retry; 552 } 487 } 553 } else { << 554 error = (name->name[0] == '\0' << 555 } << 556 path_put(&path); << 557 putname(name); << 558 if (retry_estale(error, lookup_flags)) << 559 lookup_flags |= LOOKUP_REVAL; << 560 goto retry; << 561 } 488 } 562 return error; 489 return error; 563 } 490 } 564 491 565 SYSCALL_DEFINE4(readlinkat, int, dfd, const ch 492 SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname, 566 char __user *, buf, int, bufsi 493 char __user *, buf, int, bufsiz) 567 { 494 { 568 return do_readlinkat(dfd, pathname, bu 495 return do_readlinkat(dfd, pathname, buf, bufsiz); 569 } 496 } 570 497 571 SYSCALL_DEFINE3(readlink, const char __user *, 498 SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf, 572 int, bufsiz) 499 int, bufsiz) 573 { 500 { 574 return do_readlinkat(AT_FDCWD, path, b 501 return do_readlinkat(AT_FDCWD, path, buf, bufsiz); 575 } 502 } 576 503 577 504 578 /* ---------- LFS-64 ----------- */ 505 /* ---------- LFS-64 ----------- */ 579 #if defined(__ARCH_WANT_STAT64) || defined(__A 506 #if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64) 580 507 581 #ifndef INIT_STRUCT_STAT64_PADDING 508 #ifndef INIT_STRUCT_STAT64_PADDING 582 # define INIT_STRUCT_STAT64_PADDING(st) memse 509 # define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st)) 583 #endif 510 #endif 584 511 585 static long cp_new_stat64(struct kstat *stat, 512 static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf) 586 { 513 { 587 struct stat64 tmp; 514 struct stat64 tmp; 588 515 589 INIT_STRUCT_STAT64_PADDING(tmp); 516 INIT_STRUCT_STAT64_PADDING(tmp); 590 #ifdef CONFIG_MIPS 517 #ifdef CONFIG_MIPS 591 /* mips has weird padding, so we don't 518 /* mips has weird padding, so we don't get 64 bits there */ 592 tmp.st_dev = new_encode_dev(stat->dev) 519 tmp.st_dev = new_encode_dev(stat->dev); 593 tmp.st_rdev = new_encode_dev(stat->rde 520 tmp.st_rdev = new_encode_dev(stat->rdev); 594 #else 521 #else 595 tmp.st_dev = huge_encode_dev(stat->dev 522 tmp.st_dev = huge_encode_dev(stat->dev); 596 tmp.st_rdev = huge_encode_dev(stat->rd 523 tmp.st_rdev = huge_encode_dev(stat->rdev); 597 #endif 524 #endif 598 tmp.st_ino = stat->ino; 525 tmp.st_ino = stat->ino; 599 if (sizeof(tmp.st_ino) < sizeof(stat-> 526 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 600 return -EOVERFLOW; 527 return -EOVERFLOW; 601 #ifdef STAT64_HAS_BROKEN_ST_INO 528 #ifdef STAT64_HAS_BROKEN_ST_INO 602 tmp.__st_ino = stat->ino; 529 tmp.__st_ino = stat->ino; 603 #endif 530 #endif 604 tmp.st_mode = stat->mode; 531 tmp.st_mode = stat->mode; 605 tmp.st_nlink = stat->nlink; 532 tmp.st_nlink = stat->nlink; 606 tmp.st_uid = from_kuid_munged(current_ 533 tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid); 607 tmp.st_gid = from_kgid_munged(current_ 534 tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid); 608 tmp.st_atime = stat->atime.tv_sec; 535 tmp.st_atime = stat->atime.tv_sec; 609 tmp.st_atime_nsec = stat->atime.tv_nse 536 tmp.st_atime_nsec = stat->atime.tv_nsec; 610 tmp.st_mtime = stat->mtime.tv_sec; 537 tmp.st_mtime = stat->mtime.tv_sec; 611 tmp.st_mtime_nsec = stat->mtime.tv_nse 538 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 612 tmp.st_ctime = stat->ctime.tv_sec; 539 tmp.st_ctime = stat->ctime.tv_sec; 613 tmp.st_ctime_nsec = stat->ctime.tv_nse 540 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 614 tmp.st_size = stat->size; 541 tmp.st_size = stat->size; 615 tmp.st_blocks = stat->blocks; 542 tmp.st_blocks = stat->blocks; 616 tmp.st_blksize = stat->blksize; 543 tmp.st_blksize = stat->blksize; 617 return copy_to_user(statbuf,&tmp,sizeo 544 return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0; 618 } 545 } 619 546 620 SYSCALL_DEFINE2(stat64, const char __user *, f 547 SYSCALL_DEFINE2(stat64, const char __user *, filename, 621 struct stat64 __user *, statbu 548 struct stat64 __user *, statbuf) 622 { 549 { 623 struct kstat stat; 550 struct kstat stat; 624 int error = vfs_stat(filename, &stat); 551 int error = vfs_stat(filename, &stat); 625 552 626 if (!error) 553 if (!error) 627 error = cp_new_stat64(&stat, s 554 error = cp_new_stat64(&stat, statbuf); 628 555 629 return error; 556 return error; 630 } 557 } 631 558 632 SYSCALL_DEFINE2(lstat64, const char __user *, 559 SYSCALL_DEFINE2(lstat64, const char __user *, filename, 633 struct stat64 __user *, statbu 560 struct stat64 __user *, statbuf) 634 { 561 { 635 struct kstat stat; 562 struct kstat stat; 636 int error = vfs_lstat(filename, &stat) 563 int error = vfs_lstat(filename, &stat); 637 564 638 if (!error) 565 if (!error) 639 error = cp_new_stat64(&stat, s 566 error = cp_new_stat64(&stat, statbuf); 640 567 641 return error; 568 return error; 642 } 569 } 643 570 644 SYSCALL_DEFINE2(fstat64, unsigned long, fd, st 571 SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf) 645 { 572 { 646 struct kstat stat; 573 struct kstat stat; 647 int error = vfs_fstat(fd, &stat); 574 int error = vfs_fstat(fd, &stat); 648 575 649 if (!error) 576 if (!error) 650 error = cp_new_stat64(&stat, s 577 error = cp_new_stat64(&stat, statbuf); 651 578 652 return error; 579 return error; 653 } 580 } 654 581 655 SYSCALL_DEFINE4(fstatat64, int, dfd, const cha 582 SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename, 656 struct stat64 __user *, statbu 583 struct stat64 __user *, statbuf, int, flag) 657 { 584 { 658 struct kstat stat; 585 struct kstat stat; 659 int error; 586 int error; 660 587 661 error = vfs_fstatat(dfd, filename, &st 588 error = vfs_fstatat(dfd, filename, &stat, flag); 662 if (error) 589 if (error) 663 return error; 590 return error; 664 return cp_new_stat64(&stat, statbuf); 591 return cp_new_stat64(&stat, statbuf); 665 } 592 } 666 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_CO 593 #endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */ 667 594 668 static noinline_for_stack int 595 static noinline_for_stack int 669 cp_statx(const struct kstat *stat, struct stat 596 cp_statx(const struct kstat *stat, struct statx __user *buffer) 670 { 597 { 671 struct statx tmp; 598 struct statx tmp; 672 599 673 memset(&tmp, 0, sizeof(tmp)); 600 memset(&tmp, 0, sizeof(tmp)); 674 601 675 /* STATX_CHANGE_COOKIE is kernel-only !! 602 tmp.stx_mask = stat->result_mask; 676 tmp.stx_mask = stat->result_mask & ~ST << 677 tmp.stx_blksize = stat->blksize; 603 tmp.stx_blksize = stat->blksize; 678 /* STATX_ATTR_CHANGE_MONOTONIC is kern !! 604 tmp.stx_attributes = stat->attributes; 679 tmp.stx_attributes = stat->attributes << 680 tmp.stx_nlink = stat->nlink; 605 tmp.stx_nlink = stat->nlink; 681 tmp.stx_uid = from_kuid_munged(current 606 tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid); 682 tmp.stx_gid = from_kgid_munged(current 607 tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid); 683 tmp.stx_mode = stat->mode; 608 tmp.stx_mode = stat->mode; 684 tmp.stx_ino = stat->ino; 609 tmp.stx_ino = stat->ino; 685 tmp.stx_size = stat->size; 610 tmp.stx_size = stat->size; 686 tmp.stx_blocks = stat->blocks; 611 tmp.stx_blocks = stat->blocks; 687 tmp.stx_attributes_mask = stat->attrib 612 tmp.stx_attributes_mask = stat->attributes_mask; 688 tmp.stx_atime.tv_sec = stat->atime.tv_ 613 tmp.stx_atime.tv_sec = stat->atime.tv_sec; 689 tmp.stx_atime.tv_nsec = stat->atime.tv 614 tmp.stx_atime.tv_nsec = stat->atime.tv_nsec; 690 tmp.stx_btime.tv_sec = stat->btime.tv_ 615 tmp.stx_btime.tv_sec = stat->btime.tv_sec; 691 tmp.stx_btime.tv_nsec = stat->btime.tv 616 tmp.stx_btime.tv_nsec = stat->btime.tv_nsec; 692 tmp.stx_ctime.tv_sec = stat->ctime.tv_ 617 tmp.stx_ctime.tv_sec = stat->ctime.tv_sec; 693 tmp.stx_ctime.tv_nsec = stat->ctime.tv 618 tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec; 694 tmp.stx_mtime.tv_sec = stat->mtime.tv_ 619 tmp.stx_mtime.tv_sec = stat->mtime.tv_sec; 695 tmp.stx_mtime.tv_nsec = stat->mtime.tv 620 tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec; 696 tmp.stx_rdev_major = MAJOR(stat->rdev) 621 tmp.stx_rdev_major = MAJOR(stat->rdev); 697 tmp.stx_rdev_minor = MINOR(stat->rdev) 622 tmp.stx_rdev_minor = MINOR(stat->rdev); 698 tmp.stx_dev_major = MAJOR(stat->dev); 623 tmp.stx_dev_major = MAJOR(stat->dev); 699 tmp.stx_dev_minor = MINOR(stat->dev); 624 tmp.stx_dev_minor = MINOR(stat->dev); 700 tmp.stx_mnt_id = stat->mnt_id; 625 tmp.stx_mnt_id = stat->mnt_id; 701 tmp.stx_dio_mem_align = stat->dio_mem_ 626 tmp.stx_dio_mem_align = stat->dio_mem_align; 702 tmp.stx_dio_offset_align = stat->dio_o 627 tmp.stx_dio_offset_align = stat->dio_offset_align; 703 tmp.stx_subvol = stat->subvol; << 704 tmp.stx_atomic_write_unit_min = stat-> << 705 tmp.stx_atomic_write_unit_max = stat-> << 706 tmp.stx_atomic_write_segments_max = st << 707 628 708 return copy_to_user(buffer, &tmp, size 629 return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; 709 } 630 } 710 631 711 int do_statx(int dfd, struct filename *filenam 632 int do_statx(int dfd, struct filename *filename, unsigned int flags, 712 unsigned int mask, struct statx _ 633 unsigned int mask, struct statx __user *buffer) 713 { 634 { 714 struct kstat stat; 635 struct kstat stat; 715 int error; 636 int error; 716 637 717 if (mask & STATX__RESERVED) 638 if (mask & STATX__RESERVED) 718 return -EINVAL; 639 return -EINVAL; 719 if ((flags & AT_STATX_SYNC_TYPE) == AT 640 if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE) 720 return -EINVAL; 641 return -EINVAL; 721 642 722 /* << 723 * STATX_CHANGE_COOKIE is kernel-only << 724 * from userland. << 725 */ << 726 mask &= ~STATX_CHANGE_COOKIE; << 727 << 728 error = vfs_statx(dfd, filename, flags 643 error = vfs_statx(dfd, filename, flags, &stat, mask); 729 if (error) 644 if (error) 730 return error; 645 return error; 731 646 732 return cp_statx(&stat, buffer); 647 return cp_statx(&stat, buffer); 733 } 648 } 734 649 735 int do_statx_fd(int fd, unsigned int flags, un << 736 struct statx __user *buffer) << 737 { << 738 struct kstat stat; << 739 int error; << 740 << 741 if (mask & STATX__RESERVED) << 742 return -EINVAL; << 743 if ((flags & AT_STATX_SYNC_TYPE) == AT << 744 return -EINVAL; << 745 << 746 /* << 747 * STATX_CHANGE_COOKIE is kernel-only << 748 * from userland. << 749 */ << 750 mask &= ~STATX_CHANGE_COOKIE; << 751 << 752 error = vfs_statx_fd(fd, flags, &stat, << 753 if (error) << 754 return error; << 755 << 756 return cp_statx(&stat, buffer); << 757 } << 758 << 759 /** 650 /** 760 * sys_statx - System call to get enhanced sta 651 * sys_statx - System call to get enhanced stats 761 * @dfd: Base directory to pathwalk from *or* 652 * @dfd: Base directory to pathwalk from *or* fd to stat. 762 * @filename: File to stat or either NULL or " !! 653 * @filename: File to stat or "" with AT_EMPTY_PATH 763 * @flags: AT_* flags to control pathwalk. 654 * @flags: AT_* flags to control pathwalk. 764 * @mask: Parts of statx struct actually requi 655 * @mask: Parts of statx struct actually required. 765 * @buffer: Result buffer. 656 * @buffer: Result buffer. 766 * 657 * 767 * Note that fstat() can be emulated by settin 658 * Note that fstat() can be emulated by setting dfd to the fd of interest, 768 * supplying "" (or preferably NULL) as the fi !! 659 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags. 769 * in the flags. << 770 */ 660 */ 771 SYSCALL_DEFINE5(statx, 661 SYSCALL_DEFINE5(statx, 772 int, dfd, const char __user *, 662 int, dfd, const char __user *, filename, unsigned, flags, 773 unsigned int, mask, 663 unsigned int, mask, 774 struct statx __user *, buffer) 664 struct statx __user *, buffer) 775 { 665 { 776 int ret; 666 int ret; 777 unsigned lflags; << 778 struct filename *name; 667 struct filename *name; 779 668 780 /* !! 669 name = getname_flags(filename, getname_statx_lookup_flags(flags), NULL); 781 * Short-circuit handling of NULL and << 782 * << 783 * For a NULL path we require and acce << 784 * (possibly |'d with AT_STATX flags). << 785 * << 786 * However, glibc on 32-bit architectu << 787 * with the "" pathname and AT_NO_AUTO << 788 * Supporting this results in the ugli << 789 */ << 790 lflags = flags & ~(AT_NO_AUTOMOUNT | A << 791 if (lflags == AT_EMPTY_PATH && vfs_emp << 792 return do_statx_fd(dfd, flags << 793 << 794 name = getname_flags(filename, getname << 795 ret = do_statx(dfd, name, flags, mask, 670 ret = do_statx(dfd, name, flags, mask, buffer); 796 putname(name); 671 putname(name); 797 672 798 return ret; 673 return ret; 799 } 674 } 800 675 801 #if defined(CONFIG_COMPAT) && defined(__ARCH_W 676 #if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT) 802 static int cp_compat_stat(struct kstat *stat, 677 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf) 803 { 678 { 804 struct compat_stat tmp; 679 struct compat_stat tmp; 805 680 806 if (sizeof(tmp.st_dev) < 4 && !old_val 681 if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev)) 807 return -EOVERFLOW; 682 return -EOVERFLOW; 808 if (sizeof(tmp.st_rdev) < 4 && !old_va 683 if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev)) 809 return -EOVERFLOW; 684 return -EOVERFLOW; 810 685 811 memset(&tmp, 0, sizeof(tmp)); 686 memset(&tmp, 0, sizeof(tmp)); 812 tmp.st_dev = new_encode_dev(stat->dev) 687 tmp.st_dev = new_encode_dev(stat->dev); 813 tmp.st_ino = stat->ino; 688 tmp.st_ino = stat->ino; 814 if (sizeof(tmp.st_ino) < sizeof(stat-> 689 if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino) 815 return -EOVERFLOW; 690 return -EOVERFLOW; 816 tmp.st_mode = stat->mode; 691 tmp.st_mode = stat->mode; 817 tmp.st_nlink = stat->nlink; 692 tmp.st_nlink = stat->nlink; 818 if (tmp.st_nlink != stat->nlink) 693 if (tmp.st_nlink != stat->nlink) 819 return -EOVERFLOW; 694 return -EOVERFLOW; 820 SET_UID(tmp.st_uid, from_kuid_munged(c 695 SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid)); 821 SET_GID(tmp.st_gid, from_kgid_munged(c 696 SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid)); 822 tmp.st_rdev = new_encode_dev(stat->rde 697 tmp.st_rdev = new_encode_dev(stat->rdev); 823 if ((u64) stat->size > MAX_NON_LFS) 698 if ((u64) stat->size > MAX_NON_LFS) 824 return -EOVERFLOW; 699 return -EOVERFLOW; 825 tmp.st_size = stat->size; 700 tmp.st_size = stat->size; 826 tmp.st_atime = stat->atime.tv_sec; 701 tmp.st_atime = stat->atime.tv_sec; 827 tmp.st_atime_nsec = stat->atime.tv_nse 702 tmp.st_atime_nsec = stat->atime.tv_nsec; 828 tmp.st_mtime = stat->mtime.tv_sec; 703 tmp.st_mtime = stat->mtime.tv_sec; 829 tmp.st_mtime_nsec = stat->mtime.tv_nse 704 tmp.st_mtime_nsec = stat->mtime.tv_nsec; 830 tmp.st_ctime = stat->ctime.tv_sec; 705 tmp.st_ctime = stat->ctime.tv_sec; 831 tmp.st_ctime_nsec = stat->ctime.tv_nse 706 tmp.st_ctime_nsec = stat->ctime.tv_nsec; 832 tmp.st_blocks = stat->blocks; 707 tmp.st_blocks = stat->blocks; 833 tmp.st_blksize = stat->blksize; 708 tmp.st_blksize = stat->blksize; 834 return copy_to_user(ubuf, &tmp, sizeof 709 return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0; 835 } 710 } 836 711 837 COMPAT_SYSCALL_DEFINE2(newstat, const char __u 712 COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename, 838 struct compat_stat __us 713 struct compat_stat __user *, statbuf) 839 { 714 { 840 struct kstat stat; 715 struct kstat stat; 841 int error; 716 int error; 842 717 843 error = vfs_stat(filename, &stat); 718 error = vfs_stat(filename, &stat); 844 if (error) 719 if (error) 845 return error; 720 return error; 846 return cp_compat_stat(&stat, statbuf); 721 return cp_compat_stat(&stat, statbuf); 847 } 722 } 848 723 849 COMPAT_SYSCALL_DEFINE2(newlstat, const char __ 724 COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename, 850 struct compat_stat __us 725 struct compat_stat __user *, statbuf) 851 { 726 { 852 struct kstat stat; 727 struct kstat stat; 853 int error; 728 int error; 854 729 855 error = vfs_lstat(filename, &stat); 730 error = vfs_lstat(filename, &stat); 856 if (error) 731 if (error) 857 return error; 732 return error; 858 return cp_compat_stat(&stat, statbuf); 733 return cp_compat_stat(&stat, statbuf); 859 } 734 } 860 735 861 #ifndef __ARCH_WANT_STAT64 736 #ifndef __ARCH_WANT_STAT64 862 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned in 737 COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd, 863 const char __user *, fi 738 const char __user *, filename, 864 struct compat_stat __us 739 struct compat_stat __user *, statbuf, int, flag) 865 { 740 { 866 struct kstat stat; 741 struct kstat stat; 867 int error; 742 int error; 868 743 869 error = vfs_fstatat(dfd, filename, &st 744 error = vfs_fstatat(dfd, filename, &stat, flag); 870 if (error) 745 if (error) 871 return error; 746 return error; 872 return cp_compat_stat(&stat, statbuf); 747 return cp_compat_stat(&stat, statbuf); 873 } 748 } 874 #endif 749 #endif 875 750 876 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, 751 COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd, 877 struct compat_stat __us 752 struct compat_stat __user *, statbuf) 878 { 753 { 879 struct kstat stat; 754 struct kstat stat; 880 int error = vfs_fstat(fd, &stat); 755 int error = vfs_fstat(fd, &stat); 881 756 882 if (!error) 757 if (!error) 883 error = cp_compat_stat(&stat, 758 error = cp_compat_stat(&stat, statbuf); 884 return error; 759 return error; 885 } 760 } 886 #endif 761 #endif 887 762 888 /* Caller is here responsible for sufficient l 763 /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ 889 void __inode_add_bytes(struct inode *inode, lo 764 void __inode_add_bytes(struct inode *inode, loff_t bytes) 890 { 765 { 891 inode->i_blocks += bytes >> 9; 766 inode->i_blocks += bytes >> 9; 892 bytes &= 511; 767 bytes &= 511; 893 inode->i_bytes += bytes; 768 inode->i_bytes += bytes; 894 if (inode->i_bytes >= 512) { 769 if (inode->i_bytes >= 512) { 895 inode->i_blocks++; 770 inode->i_blocks++; 896 inode->i_bytes -= 512; 771 inode->i_bytes -= 512; 897 } 772 } 898 } 773 } 899 EXPORT_SYMBOL(__inode_add_bytes); 774 EXPORT_SYMBOL(__inode_add_bytes); 900 775 901 void inode_add_bytes(struct inode *inode, loff 776 void inode_add_bytes(struct inode *inode, loff_t bytes) 902 { 777 { 903 spin_lock(&inode->i_lock); 778 spin_lock(&inode->i_lock); 904 __inode_add_bytes(inode, bytes); 779 __inode_add_bytes(inode, bytes); 905 spin_unlock(&inode->i_lock); 780 spin_unlock(&inode->i_lock); 906 } 781 } 907 782 908 EXPORT_SYMBOL(inode_add_bytes); 783 EXPORT_SYMBOL(inode_add_bytes); 909 784 910 void __inode_sub_bytes(struct inode *inode, lo 785 void __inode_sub_bytes(struct inode *inode, loff_t bytes) 911 { 786 { 912 inode->i_blocks -= bytes >> 9; 787 inode->i_blocks -= bytes >> 9; 913 bytes &= 511; 788 bytes &= 511; 914 if (inode->i_bytes < bytes) { 789 if (inode->i_bytes < bytes) { 915 inode->i_blocks--; 790 inode->i_blocks--; 916 inode->i_bytes += 512; 791 inode->i_bytes += 512; 917 } 792 } 918 inode->i_bytes -= bytes; 793 inode->i_bytes -= bytes; 919 } 794 } 920 795 921 EXPORT_SYMBOL(__inode_sub_bytes); 796 EXPORT_SYMBOL(__inode_sub_bytes); 922 797 923 void inode_sub_bytes(struct inode *inode, loff 798 void inode_sub_bytes(struct inode *inode, loff_t bytes) 924 { 799 { 925 spin_lock(&inode->i_lock); 800 spin_lock(&inode->i_lock); 926 __inode_sub_bytes(inode, bytes); 801 __inode_sub_bytes(inode, bytes); 927 spin_unlock(&inode->i_lock); 802 spin_unlock(&inode->i_lock); 928 } 803 } 929 804 930 EXPORT_SYMBOL(inode_sub_bytes); 805 EXPORT_SYMBOL(inode_sub_bytes); 931 806 932 loff_t inode_get_bytes(struct inode *inode) 807 loff_t inode_get_bytes(struct inode *inode) 933 { 808 { 934 loff_t ret; 809 loff_t ret; 935 810 936 spin_lock(&inode->i_lock); 811 spin_lock(&inode->i_lock); 937 ret = __inode_get_bytes(inode); 812 ret = __inode_get_bytes(inode); 938 spin_unlock(&inode->i_lock); 813 spin_unlock(&inode->i_lock); 939 return ret; 814 return ret; 940 } 815 } 941 816 942 EXPORT_SYMBOL(inode_get_bytes); 817 EXPORT_SYMBOL(inode_get_bytes); 943 818 944 void inode_set_bytes(struct inode *inode, loff 819 void inode_set_bytes(struct inode *inode, loff_t bytes) 945 { 820 { 946 /* Caller is here responsible for suff 821 /* Caller is here responsible for sufficient locking 947 * (ie. inode->i_lock) */ 822 * (ie. inode->i_lock) */ 948 inode->i_blocks = bytes >> 9; 823 inode->i_blocks = bytes >> 9; 949 inode->i_bytes = bytes & 511; 824 inode->i_bytes = bytes & 511; 950 } 825 } 951 826 952 EXPORT_SYMBOL(inode_set_bytes); 827 EXPORT_SYMBOL(inode_set_bytes); 953 828
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.