1 /* 1 /* 2 * Resizable virtual memory filesystem for Lin 2 * Resizable virtual memory filesystem for Linux. 3 * 3 * 4 * Copyright (C) 2000 Linus Torvalds. 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Co 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 13 * 14 * Extended attribute support for tmpfs: 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Lei 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Mor 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 17 * 18 * tiny-shmem: 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@ 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 20 * 21 * This file is released under the GPL. 21 * This file is released under the GPL. 22 */ 22 */ 23 23 24 #include <linux/fs.h> 24 #include <linux/fs.h> 25 #include <linux/init.h> 25 #include <linux/init.h> 26 #include <linux/vfs.h> 26 #include <linux/vfs.h> 27 #include <linux/mount.h> 27 #include <linux/mount.h> 28 #include <linux/ramfs.h> 28 #include <linux/ramfs.h> 29 #include <linux/pagemap.h> 29 #include <linux/pagemap.h> 30 #include <linux/file.h> 30 #include <linux/file.h> 31 #include <linux/fileattr.h> << 32 #include <linux/mm.h> 31 #include <linux/mm.h> 33 #include <linux/random.h> << 34 #include <linux/sched/signal.h> 32 #include <linux/sched/signal.h> 35 #include <linux/export.h> 33 #include <linux/export.h> 36 #include <linux/shmem_fs.h> << 37 #include <linux/swap.h> 34 #include <linux/swap.h> 38 #include <linux/uio.h> 35 #include <linux/uio.h> 39 #include <linux/hugetlb.h> !! 36 #include <linux/khugepaged.h> 40 #include <linux/fs_parser.h> !! 37 41 #include <linux/swapfile.h> !! 38 #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */ 42 #include <linux/iversion.h> << 43 #include "swap.h" << 44 39 45 static struct vfsmount *shm_mnt __ro_after_ini !! 40 static struct vfsmount *shm_mnt; 46 41 47 #ifdef CONFIG_SHMEM 42 #ifdef CONFIG_SHMEM 48 /* 43 /* 49 * This virtual memory filesystem is heavily b 44 * This virtual memory filesystem is heavily based on the ramfs. It 50 * extends ramfs by the ability to use swap an 45 * extends ramfs by the ability to use swap and honor resource limits 51 * which makes it a completely usable filesyst 46 * which makes it a completely usable filesystem. 52 */ 47 */ 53 48 54 #include <linux/xattr.h> 49 #include <linux/xattr.h> 55 #include <linux/exportfs.h> 50 #include <linux/exportfs.h> 56 #include <linux/posix_acl.h> 51 #include <linux/posix_acl.h> 57 #include <linux/posix_acl_xattr.h> 52 #include <linux/posix_acl_xattr.h> 58 #include <linux/mman.h> 53 #include <linux/mman.h> 59 #include <linux/string.h> 54 #include <linux/string.h> 60 #include <linux/slab.h> 55 #include <linux/slab.h> 61 #include <linux/backing-dev.h> 56 #include <linux/backing-dev.h> >> 57 #include <linux/shmem_fs.h> 62 #include <linux/writeback.h> 58 #include <linux/writeback.h> >> 59 #include <linux/blkdev.h> 63 #include <linux/pagevec.h> 60 #include <linux/pagevec.h> 64 #include <linux/percpu_counter.h> 61 #include <linux/percpu_counter.h> 65 #include <linux/falloc.h> 62 #include <linux/falloc.h> 66 #include <linux/splice.h> 63 #include <linux/splice.h> 67 #include <linux/security.h> 64 #include <linux/security.h> 68 #include <linux/swapops.h> 65 #include <linux/swapops.h> 69 #include <linux/mempolicy.h> 66 #include <linux/mempolicy.h> 70 #include <linux/namei.h> 67 #include <linux/namei.h> 71 #include <linux/ctype.h> 68 #include <linux/ctype.h> 72 #include <linux/migrate.h> 69 #include <linux/migrate.h> 73 #include <linux/highmem.h> 70 #include <linux/highmem.h> 74 #include <linux/seq_file.h> 71 #include <linux/seq_file.h> 75 #include <linux/magic.h> 72 #include <linux/magic.h> 76 #include <linux/syscalls.h> 73 #include <linux/syscalls.h> 77 #include <linux/fcntl.h> 74 #include <linux/fcntl.h> 78 #include <uapi/linux/memfd.h> 75 #include <uapi/linux/memfd.h> >> 76 #include <linux/userfaultfd_k.h> 79 #include <linux/rmap.h> 77 #include <linux/rmap.h> 80 #include <linux/uuid.h> << 81 #include <linux/quotaops.h> << 82 #include <linux/rcupdate_wait.h> << 83 78 84 #include <linux/uaccess.h> 79 #include <linux/uaccess.h> >> 80 #include <asm/pgtable.h> 85 81 86 #include "internal.h" 82 #include "internal.h" 87 83 88 #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 84 #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 89 #define VM_ACCT(size) (PAGE_ALIGN(size) >> 85 #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 90 86 91 /* Pretend that each entry is of this size in 87 /* Pretend that each entry is of this size in directory's i_size */ 92 #define BOGO_DIRENT_SIZE 20 88 #define BOGO_DIRENT_SIZE 20 93 89 94 /* Pretend that one inode + its dentry occupy << 95 #define BOGO_INODE_SIZE 1024 << 96 << 97 /* Symlink up to this size is kmalloc'ed inste 90 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 98 #define SHORT_SYMLINK_LEN 128 91 #define SHORT_SYMLINK_LEN 128 99 92 100 /* 93 /* 101 * shmem_fallocate communicates with shmem_fau 94 * shmem_fallocate communicates with shmem_fault or shmem_writepage via 102 * inode->i_private (with i_rwsem making sure !! 95 * inode->i_private (with i_mutex making sure that it has only one user at 103 * a time): we would prefer not to enlarge the 96 * a time): we would prefer not to enlarge the shmem inode just for that. 104 */ 97 */ 105 struct shmem_falloc { 98 struct shmem_falloc { 106 wait_queue_head_t *waitq; /* faults in 99 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 107 pgoff_t start; /* start of ra 100 pgoff_t start; /* start of range currently being fallocated */ 108 pgoff_t next; /* the next pa 101 pgoff_t next; /* the next page offset to be fallocated */ 109 pgoff_t nr_falloced; /* how many ne 102 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 110 pgoff_t nr_unswapped; /* how often w 103 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 111 }; 104 }; 112 105 113 struct shmem_options { << 114 unsigned long long blocks; << 115 unsigned long long inodes; << 116 struct mempolicy *mpol; << 117 kuid_t uid; << 118 kgid_t gid; << 119 umode_t mode; << 120 bool full_inums; << 121 int huge; << 122 int seen; << 123 bool noswap; << 124 unsigned short quota_types; << 125 struct shmem_quota_limits qlimits; << 126 #define SHMEM_SEEN_BLOCKS 1 << 127 #define SHMEM_SEEN_INODES 2 << 128 #define SHMEM_SEEN_HUGE 4 << 129 #define SHMEM_SEEN_INUMS 8 << 130 #define SHMEM_SEEN_NOSWAP 16 << 131 #define SHMEM_SEEN_QUOTA 32 << 132 }; << 133 << 134 #ifdef CONFIG_TRANSPARENT_HUGEPAGE << 135 static unsigned long huge_shmem_orders_always << 136 static unsigned long huge_shmem_orders_madvise << 137 static unsigned long huge_shmem_orders_inherit << 138 static unsigned long huge_shmem_orders_within_ << 139 #endif << 140 << 141 #ifdef CONFIG_TMPFS 106 #ifdef CONFIG_TMPFS 142 static unsigned long shmem_default_max_blocks( 107 static unsigned long shmem_default_max_blocks(void) 143 { 108 { 144 return totalram_pages() / 2; !! 109 return totalram_pages / 2; 145 } 110 } 146 111 147 static unsigned long shmem_default_max_inodes( 112 static unsigned long shmem_default_max_inodes(void) 148 { 113 { 149 unsigned long nr_pages = totalram_page !! 114 return min(totalram_pages - totalhigh_pages, totalram_pages / 2); 150 << 151 return min3(nr_pages - totalhigh_pages << 152 ULONG_MAX / BOGO_INODE << 153 } 115 } 154 #endif 116 #endif 155 117 156 static int shmem_swapin_folio(struct inode *in !! 118 static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 157 struct folio **foliop, !! 119 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 158 struct vm_area_struct !! 120 struct shmem_inode_info *info, pgoff_t index); >> 121 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, >> 122 struct page **pagep, enum sgp_type sgp, >> 123 gfp_t gfp, struct vm_area_struct *vma, >> 124 struct vm_fault *vmf, int *fault_type); >> 125 >> 126 int shmem_getpage(struct inode *inode, pgoff_t index, >> 127 struct page **pagep, enum sgp_type sgp) >> 128 { >> 129 return shmem_getpage_gfp(inode, index, pagep, sgp, >> 130 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); >> 131 } 159 132 160 static inline struct shmem_sb_info *SHMEM_SB(s 133 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 161 { 134 { 162 return sb->s_fs_info; 135 return sb->s_fs_info; 163 } 136 } 164 137 165 /* 138 /* 166 * shmem_file_setup pre-accounts the whole fix 139 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 167 * for shared memory and for shared anonymous 140 * for shared memory and for shared anonymous (/dev/zero) mappings 168 * (unless MAP_NORESERVE and sysctl_overcommit 141 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 169 * consistent with the pre-accounting of priva 142 * consistent with the pre-accounting of private mappings ... 170 */ 143 */ 171 static inline int shmem_acct_size(unsigned lon 144 static inline int shmem_acct_size(unsigned long flags, loff_t size) 172 { 145 { 173 return (flags & VM_NORESERVE) ? 146 return (flags & VM_NORESERVE) ? 174 0 : security_vm_enough_memory_ 147 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 175 } 148 } 176 149 177 static inline void shmem_unacct_size(unsigned 150 static inline void shmem_unacct_size(unsigned long flags, loff_t size) 178 { 151 { 179 if (!(flags & VM_NORESERVE)) 152 if (!(flags & VM_NORESERVE)) 180 vm_unacct_memory(VM_ACCT(size) 153 vm_unacct_memory(VM_ACCT(size)); 181 } 154 } 182 155 183 static inline int shmem_reacct_size(unsigned l 156 static inline int shmem_reacct_size(unsigned long flags, 184 loff_t oldsize, loff_t newsize 157 loff_t oldsize, loff_t newsize) 185 { 158 { 186 if (!(flags & VM_NORESERVE)) { 159 if (!(flags & VM_NORESERVE)) { 187 if (VM_ACCT(newsize) > VM_ACCT 160 if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 188 return security_vm_eno 161 return security_vm_enough_memory_mm(current->mm, 189 VM_ACC 162 VM_ACCT(newsize) - VM_ACCT(oldsize)); 190 else if (VM_ACCT(newsize) < VM 163 else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 191 vm_unacct_memory(VM_AC 164 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 192 } 165 } 193 return 0; 166 return 0; 194 } 167 } 195 168 196 /* 169 /* 197 * ... whereas tmpfs objects are accounted inc 170 * ... whereas tmpfs objects are accounted incrementally as 198 * pages are allocated, in order to allow larg 171 * pages are allocated, in order to allow large sparse files. 199 * shmem_get_folio reports shmem_acct_blocks f !! 172 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 200 * so that a failure on a sparse tmpfs mapping 173 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 201 */ 174 */ 202 static inline int shmem_acct_blocks(unsigned l !! 175 static inline int shmem_acct_block(unsigned long flags, long pages) 203 { 176 { 204 if (!(flags & VM_NORESERVE)) 177 if (!(flags & VM_NORESERVE)) 205 return 0; 178 return 0; 206 179 207 return security_vm_enough_memory_mm(cu 180 return security_vm_enough_memory_mm(current->mm, 208 pages * VM_ACCT(PAGE_S 181 pages * VM_ACCT(PAGE_SIZE)); 209 } 182 } 210 183 211 static inline void shmem_unacct_blocks(unsigne 184 static inline void shmem_unacct_blocks(unsigned long flags, long pages) 212 { 185 { 213 if (flags & VM_NORESERVE) 186 if (flags & VM_NORESERVE) 214 vm_unacct_memory(pages * VM_AC 187 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 215 } 188 } 216 189 217 static int shmem_inode_acct_blocks(struct inod << 218 { << 219 struct shmem_inode_info *info = SHMEM_ << 220 struct shmem_sb_info *sbinfo = SHMEM_S << 221 int err = -ENOSPC; << 222 << 223 if (shmem_acct_blocks(info->flags, pag << 224 return err; << 225 << 226 might_sleep(); /* when quotas */ << 227 if (sbinfo->max_blocks) { << 228 if (!percpu_counter_limited_ad << 229 << 230 goto unacct; << 231 << 232 err = dquot_alloc_block_nodirt << 233 if (err) { << 234 percpu_counter_sub(&sb << 235 goto unacct; << 236 } << 237 } else { << 238 err = dquot_alloc_block_nodirt << 239 if (err) << 240 goto unacct; << 241 } << 242 << 243 return 0; << 244 << 245 unacct: << 246 shmem_unacct_blocks(info->flags, pages << 247 return err; << 248 } << 249 << 250 static void shmem_inode_unacct_blocks(struct i << 251 { << 252 struct shmem_inode_info *info = SHMEM_ << 253 struct shmem_sb_info *sbinfo = SHMEM_S << 254 << 255 might_sleep(); /* when quotas */ << 256 dquot_free_block_nodirty(inode, pages) << 257 << 258 if (sbinfo->max_blocks) << 259 percpu_counter_sub(&sbinfo->us << 260 shmem_unacct_blocks(info->flags, pages << 261 } << 262 << 263 static const struct super_operations shmem_ops 190 static const struct super_operations shmem_ops; 264 static const struct address_space_operations s 191 static const struct address_space_operations shmem_aops; 265 static const struct file_operations shmem_file 192 static const struct file_operations shmem_file_operations; 266 static const struct inode_operations shmem_ino 193 static const struct inode_operations shmem_inode_operations; 267 static const struct inode_operations shmem_dir 194 static const struct inode_operations shmem_dir_inode_operations; 268 static const struct inode_operations shmem_spe 195 static const struct inode_operations shmem_special_inode_operations; 269 static const struct vm_operations_struct shmem 196 static const struct vm_operations_struct shmem_vm_ops; 270 static const struct vm_operations_struct shmem << 271 static struct file_system_type shmem_fs_type; 197 static struct file_system_type shmem_fs_type; 272 198 273 bool shmem_mapping(struct address_space *mappi << 274 { << 275 return mapping->a_ops == &shmem_aops; << 276 } << 277 EXPORT_SYMBOL_GPL(shmem_mapping); << 278 << 279 bool vma_is_anon_shmem(struct vm_area_struct * << 280 { << 281 return vma->vm_ops == &shmem_anon_vm_o << 282 } << 283 << 284 bool vma_is_shmem(struct vm_area_struct *vma) 199 bool vma_is_shmem(struct vm_area_struct *vma) 285 { 200 { 286 return vma_is_anon_shmem(vma) || vma-> !! 201 return vma->vm_ops == &shmem_vm_ops; 287 } 202 } 288 203 289 static LIST_HEAD(shmem_swaplist); 204 static LIST_HEAD(shmem_swaplist); 290 static DEFINE_MUTEX(shmem_swaplist_mutex); 205 static DEFINE_MUTEX(shmem_swaplist_mutex); 291 206 292 #ifdef CONFIG_TMPFS_QUOTA !! 207 static int shmem_reserve_inode(struct super_block *sb) 293 << 294 static int shmem_enable_quotas(struct super_bl << 295 unsigned short << 296 { << 297 int type, err = 0; << 298 << 299 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS << 300 for (type = 0; type < SHMEM_MAXQUOTAS; << 301 if (!(quota_types & (1 << type << 302 continue; << 303 err = dquot_load_quota_sb(sb, << 304 DQUO << 305 DQUO << 306 if (err) << 307 goto out_err; << 308 } << 309 return 0; << 310 << 311 out_err: << 312 pr_warn("tmpfs: failed to enable quota << 313 type, err); << 314 for (type--; type >= 0; type--) << 315 dquot_quota_off(sb, type); << 316 return err; << 317 } << 318 << 319 static void shmem_disable_quotas(struct super_ << 320 { << 321 int type; << 322 << 323 for (type = 0; type < SHMEM_MAXQUOTAS; << 324 dquot_quota_off(sb, type); << 325 } << 326 << 327 static struct dquot __rcu **shmem_get_dquots(s << 328 { << 329 return SHMEM_I(inode)->i_dquot; << 330 } << 331 #endif /* CONFIG_TMPFS_QUOTA */ << 332 << 333 /* << 334 * shmem_reserve_inode() performs bookkeeping << 335 * produces a novel ino for the newly allocate << 336 * << 337 * It may also be called when making a hard li << 338 * each dentry. However, in that case, no new << 339 * internally draws from another pool of inode << 340 * get_next_ino()). This case is indicated by << 341 */ << 342 #define SHMEM_INO_BATCH 1024 << 343 static int shmem_reserve_inode(struct super_bl << 344 { 208 { 345 struct shmem_sb_info *sbinfo = SHMEM_S 209 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 346 ino_t ino; !! 210 if (sbinfo->max_inodes) { 347 !! 211 spin_lock(&sbinfo->stat_lock); 348 if (!(sb->s_flags & SB_KERNMOUNT)) { !! 212 if (!sbinfo->free_inodes) { 349 raw_spin_lock(&sbinfo->stat_lo !! 213 spin_unlock(&sbinfo->stat_lock); 350 if (sbinfo->max_inodes) { !! 214 return -ENOSPC; 351 if (sbinfo->free_ispac << 352 raw_spin_unloc << 353 return -ENOSPC << 354 } << 355 sbinfo->free_ispace -= << 356 } << 357 if (inop) { << 358 ino = sbinfo->next_ino << 359 if (unlikely(is_zero_i << 360 ino = sbinfo-> << 361 if (unlikely(!sbinfo-> << 362 ino > UIN << 363 /* << 364 * Emulate get << 365 * compatibili << 366 */ << 367 if (IS_ENABLED << 368 pr_war << 369 << 370 sbinfo->next_i << 371 ino = sbinfo-> << 372 } << 373 *inop = ino; << 374 } 215 } 375 raw_spin_unlock(&sbinfo->stat_ !! 216 sbinfo->free_inodes--; 376 } else if (inop) { !! 217 spin_unlock(&sbinfo->stat_lock); 377 /* << 378 * __shmem_file_setup, one of << 379 * doesn't hold stat_lock in s << 380 * max_inodes is always 0, and << 381 * unknown contexts. As such, << 382 * which doesn't require the p << 383 * the batch boundary. << 384 * << 385 * We don't need to worry abou << 386 * shmem mounts are not expose << 387 * to worry about things like << 388 */ << 389 ino_t *next_ino; << 390 << 391 next_ino = per_cpu_ptr(sbinfo- << 392 ino = *next_ino; << 393 if (unlikely(ino % SHMEM_INO_B << 394 raw_spin_lock(&sbinfo- << 395 ino = sbinfo->next_ino << 396 sbinfo->next_ino += SH << 397 raw_spin_unlock(&sbinf << 398 if (unlikely(is_zero_i << 399 ino++; << 400 } << 401 *inop = ino; << 402 *next_ino = ++ino; << 403 put_cpu(); << 404 } 218 } 405 << 406 return 0; 219 return 0; 407 } 220 } 408 221 409 static void shmem_free_inode(struct super_bloc !! 222 static void shmem_free_inode(struct super_block *sb) 410 { 223 { 411 struct shmem_sb_info *sbinfo = SHMEM_S 224 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 412 if (sbinfo->max_inodes) { 225 if (sbinfo->max_inodes) { 413 raw_spin_lock(&sbinfo->stat_lo !! 226 spin_lock(&sbinfo->stat_lock); 414 sbinfo->free_ispace += BOGO_IN !! 227 sbinfo->free_inodes++; 415 raw_spin_unlock(&sbinfo->stat_ !! 228 spin_unlock(&sbinfo->stat_lock); 416 } 229 } 417 } 230 } 418 231 419 /** 232 /** 420 * shmem_recalc_inode - recalculate the block 233 * shmem_recalc_inode - recalculate the block usage of an inode 421 * @inode: inode to recalc 234 * @inode: inode to recalc 422 * @alloced: the change in number of pages all << 423 * @swapped: the change in number of pages swa << 424 * 235 * 425 * We have to calculate the free blocks since 236 * We have to calculate the free blocks since the mm can drop 426 * undirtied hole pages behind our back. 237 * undirtied hole pages behind our back. 427 * 238 * 428 * But normally info->alloced == inode->i_ma 239 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 429 * So mm freed is info->alloced - (inode->i_ma 240 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) >> 241 * >> 242 * It has to be called with the spinlock held. 430 */ 243 */ 431 static void shmem_recalc_inode(struct inode *i !! 244 static void shmem_recalc_inode(struct inode *inode) 432 { 245 { 433 struct shmem_inode_info *info = SHMEM_ 246 struct shmem_inode_info *info = SHMEM_I(inode); 434 long freed; 247 long freed; 435 248 436 spin_lock(&info->lock); !! 249 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 437 info->alloced += alloced; !! 250 if (freed > 0) { 438 info->swapped += swapped; !! 251 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 439 freed = info->alloced - info->swapped !! 252 if (sbinfo->max_blocks) 440 READ_ONCE(inode->i_mapping->nr !! 253 percpu_counter_add(&sbinfo->used_blocks, -freed); 441 /* << 442 * Special case: whereas normally shme << 443 * after i_mapping->nrpages has alread << 444 * shmem_writepage() has to raise swap << 445 * to stop a racing shmem_recalc_inode << 446 * been freed. Compensate here, to av << 447 */ << 448 if (swapped > 0) << 449 freed += swapped; << 450 if (freed > 0) << 451 info->alloced -= freed; 254 info->alloced -= freed; 452 spin_unlock(&info->lock); !! 255 inode->i_blocks -= freed * BLOCKS_PER_PAGE; 453 !! 256 shmem_unacct_blocks(info->flags, freed); 454 /* The quota case may block */ !! 257 } 455 if (freed > 0) << 456 shmem_inode_unacct_blocks(inod << 457 } 258 } 458 259 459 bool shmem_charge(struct inode *inode, long pa 260 bool shmem_charge(struct inode *inode, long pages) 460 { 261 { 461 struct address_space *mapping = inode- !! 262 struct shmem_inode_info *info = SHMEM_I(inode); >> 263 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); >> 264 unsigned long flags; 462 265 463 if (shmem_inode_acct_blocks(inode, pag !! 266 if (shmem_acct_block(info->flags, pages)) 464 return false; 267 return false; >> 268 spin_lock_irqsave(&info->lock, flags); >> 269 info->alloced += pages; >> 270 inode->i_blocks += pages * BLOCKS_PER_PAGE; >> 271 shmem_recalc_inode(inode); >> 272 spin_unlock_irqrestore(&info->lock, flags); >> 273 inode->i_mapping->nrpages += pages; 465 274 466 /* nrpages adjustment first, then shme !! 275 if (!sbinfo->max_blocks) 467 xa_lock_irq(&mapping->i_pages); !! 276 return true; 468 mapping->nrpages += pages; !! 277 if (percpu_counter_compare(&sbinfo->used_blocks, 469 xa_unlock_irq(&mapping->i_pages); !! 278 sbinfo->max_blocks - pages) > 0) { 470 !! 279 inode->i_mapping->nrpages -= pages; 471 shmem_recalc_inode(inode, pages, 0); !! 280 spin_lock_irqsave(&info->lock, flags); >> 281 info->alloced -= pages; >> 282 shmem_recalc_inode(inode); >> 283 spin_unlock_irqrestore(&info->lock, flags); >> 284 shmem_unacct_blocks(info->flags, pages); >> 285 return false; >> 286 } >> 287 percpu_counter_add(&sbinfo->used_blocks, pages); 472 return true; 288 return true; 473 } 289 } 474 290 475 void shmem_uncharge(struct inode *inode, long 291 void shmem_uncharge(struct inode *inode, long pages) 476 { 292 { 477 /* pages argument is currently unused: !! 293 struct shmem_inode_info *info = SHMEM_I(inode); 478 /* nrpages adjustment done by __filema !! 294 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); >> 295 unsigned long flags; >> 296 >> 297 spin_lock_irqsave(&info->lock, flags); >> 298 info->alloced -= pages; >> 299 inode->i_blocks -= pages * BLOCKS_PER_PAGE; >> 300 shmem_recalc_inode(inode); >> 301 spin_unlock_irqrestore(&info->lock, flags); 479 302 480 shmem_recalc_inode(inode, 0, 0); !! 303 if (sbinfo->max_blocks) >> 304 percpu_counter_sub(&sbinfo->used_blocks, pages); >> 305 shmem_unacct_blocks(info->flags, pages); 481 } 306 } 482 307 483 /* 308 /* 484 * Replace item expected in xarray by a new it !! 309 * Replace item expected in radix tree by a new item, while holding tree lock. 485 */ 310 */ 486 static int shmem_replace_entry(struct address_ !! 311 static int shmem_radix_tree_replace(struct address_space *mapping, 487 pgoff_t index, void *e 312 pgoff_t index, void *expected, void *replacement) 488 { 313 { 489 XA_STATE(xas, &mapping->i_pages, index !! 314 struct radix_tree_node *node; >> 315 void **pslot; 490 void *item; 316 void *item; 491 317 492 VM_BUG_ON(!expected); 318 VM_BUG_ON(!expected); 493 VM_BUG_ON(!replacement); 319 VM_BUG_ON(!replacement); 494 item = xas_load(&xas); !! 320 item = __radix_tree_lookup(&mapping->page_tree, index, &node, &pslot); >> 321 if (!item) >> 322 return -ENOENT; 495 if (item != expected) 323 if (item != expected) 496 return -ENOENT; 324 return -ENOENT; 497 xas_store(&xas, replacement); !! 325 __radix_tree_replace(&mapping->page_tree, node, pslot, >> 326 replacement, NULL, NULL); 498 return 0; 327 return 0; 499 } 328 } 500 329 501 /* 330 /* 502 * Sometimes, before we decide whether to proc 331 * Sometimes, before we decide whether to proceed or to fail, we must check 503 * that an entry was not already brought back 332 * that an entry was not already brought back from swap by a racing thread. 504 * 333 * 505 * Checking folio is not enough: by the time a !! 334 * Checking page is not enough: by the time a SwapCache page is locked, it 506 * might be reused, and again be swapcache, us !! 335 * might be reused, and again be SwapCache, using the same swap as before. 507 */ 336 */ 508 static bool shmem_confirm_swap(struct address_ 337 static bool shmem_confirm_swap(struct address_space *mapping, 509 pgoff_t index, 338 pgoff_t index, swp_entry_t swap) 510 { 339 { 511 return xa_load(&mapping->i_pages, inde !! 340 void *item; >> 341 >> 342 rcu_read_lock(); >> 343 item = radix_tree_lookup(&mapping->page_tree, index); >> 344 rcu_read_unlock(); >> 345 return item == swp_to_radix_entry(swap); 512 } 346 } 513 347 514 /* 348 /* 515 * Definitions for "huge tmpfs": tmpfs mounted 349 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 516 * 350 * 517 * SHMEM_HUGE_NEVER: 351 * SHMEM_HUGE_NEVER: 518 * disables huge pages for the mount; 352 * disables huge pages for the mount; 519 * SHMEM_HUGE_ALWAYS: 353 * SHMEM_HUGE_ALWAYS: 520 * enables huge pages for the mount; 354 * enables huge pages for the mount; 521 * SHMEM_HUGE_WITHIN_SIZE: 355 * SHMEM_HUGE_WITHIN_SIZE: 522 * only allocate huge pages if the page w 356 * only allocate huge pages if the page will be fully within i_size, 523 * also respect fadvise()/madvise() hints 357 * also respect fadvise()/madvise() hints; 524 * SHMEM_HUGE_ADVISE: 358 * SHMEM_HUGE_ADVISE: 525 * only allocate huge pages if requested 359 * only allocate huge pages if requested with fadvise()/madvise(); 526 */ 360 */ 527 361 528 #define SHMEM_HUGE_NEVER 0 362 #define SHMEM_HUGE_NEVER 0 529 #define SHMEM_HUGE_ALWAYS 1 363 #define SHMEM_HUGE_ALWAYS 1 530 #define SHMEM_HUGE_WITHIN_SIZE 2 364 #define SHMEM_HUGE_WITHIN_SIZE 2 531 #define SHMEM_HUGE_ADVISE 3 365 #define SHMEM_HUGE_ADVISE 3 532 366 533 /* 367 /* 534 * Special values. 368 * Special values. 535 * Only can be set via /sys/kernel/mm/transpar 369 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 536 * 370 * 537 * SHMEM_HUGE_DENY: 371 * SHMEM_HUGE_DENY: 538 * disables huge on shm_mnt and all mount 372 * disables huge on shm_mnt and all mounts, for emergency use; 539 * SHMEM_HUGE_FORCE: 373 * SHMEM_HUGE_FORCE: 540 * enables huge on shm_mnt and all mounts 374 * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 541 * 375 * 542 */ 376 */ 543 #define SHMEM_HUGE_DENY (-1) 377 #define SHMEM_HUGE_DENY (-1) 544 #define SHMEM_HUGE_FORCE (-2) 378 #define SHMEM_HUGE_FORCE (-2) 545 379 546 #ifdef CONFIG_TRANSPARENT_HUGEPAGE !! 380 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 547 /* ifdef here to avoid bloating shmem.o when n 381 /* ifdef here to avoid bloating shmem.o when not necessary */ 548 382 549 static int shmem_huge __read_mostly = SHMEM_HU !! 383 int shmem_huge __read_mostly; 550 << 551 static bool __shmem_huge_global_enabled(struct << 552 loff_t << 553 struct << 554 unsign << 555 { << 556 struct mm_struct *mm = vma ? vma->vm_m << 557 loff_t i_size; << 558 << 559 if (!S_ISREG(inode->i_mode)) << 560 return false; << 561 if (mm && ((vm_flags & VM_NOHUGEPAGE) << 562 return false; << 563 if (shmem_huge == SHMEM_HUGE_DENY) << 564 return false; << 565 if (shmem_huge_force || shmem_huge == << 566 return true; << 567 << 568 switch (SHMEM_SB(inode->i_sb)->huge) { << 569 case SHMEM_HUGE_ALWAYS: << 570 return true; << 571 case SHMEM_HUGE_WITHIN_SIZE: << 572 index = round_up(index + 1, HP << 573 i_size = max(write_end, i_size << 574 i_size = round_up(i_size, PAGE << 575 if (i_size >> PAGE_SHIFT >= in << 576 return true; << 577 fallthrough; << 578 case SHMEM_HUGE_ADVISE: << 579 if (mm && (vm_flags & VM_HUGEP << 580 return true; << 581 fallthrough; << 582 default: << 583 return false; << 584 } << 585 } << 586 << 587 static bool shmem_huge_global_enabled(struct i << 588 loff_t write_end, bool shme << 589 struct vm_area_struct *vma, << 590 { << 591 if (HPAGE_PMD_ORDER > MAX_PAGECACHE_OR << 592 return false; << 593 << 594 return __shmem_huge_global_enabled(ino << 595 shm << 596 } << 597 384 598 #if defined(CONFIG_SYSFS) !! 385 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 599 static int shmem_parse_huge(const char *str) 386 static int shmem_parse_huge(const char *str) 600 { 387 { 601 if (!strcmp(str, "never")) 388 if (!strcmp(str, "never")) 602 return SHMEM_HUGE_NEVER; 389 return SHMEM_HUGE_NEVER; 603 if (!strcmp(str, "always")) 390 if (!strcmp(str, "always")) 604 return SHMEM_HUGE_ALWAYS; 391 return SHMEM_HUGE_ALWAYS; 605 if (!strcmp(str, "within_size")) 392 if (!strcmp(str, "within_size")) 606 return SHMEM_HUGE_WITHIN_SIZE; 393 return SHMEM_HUGE_WITHIN_SIZE; 607 if (!strcmp(str, "advise")) 394 if (!strcmp(str, "advise")) 608 return SHMEM_HUGE_ADVISE; 395 return SHMEM_HUGE_ADVISE; 609 if (!strcmp(str, "deny")) 396 if (!strcmp(str, "deny")) 610 return SHMEM_HUGE_DENY; 397 return SHMEM_HUGE_DENY; 611 if (!strcmp(str, "force")) 398 if (!strcmp(str, "force")) 612 return SHMEM_HUGE_FORCE; 399 return SHMEM_HUGE_FORCE; 613 return -EINVAL; 400 return -EINVAL; 614 } 401 } 615 #endif << 616 402 617 #if defined(CONFIG_SYSFS) || defined(CONFIG_TM << 618 static const char *shmem_format_huge(int huge) 403 static const char *shmem_format_huge(int huge) 619 { 404 { 620 switch (huge) { 405 switch (huge) { 621 case SHMEM_HUGE_NEVER: 406 case SHMEM_HUGE_NEVER: 622 return "never"; 407 return "never"; 623 case SHMEM_HUGE_ALWAYS: 408 case SHMEM_HUGE_ALWAYS: 624 return "always"; 409 return "always"; 625 case SHMEM_HUGE_WITHIN_SIZE: 410 case SHMEM_HUGE_WITHIN_SIZE: 626 return "within_size"; 411 return "within_size"; 627 case SHMEM_HUGE_ADVISE: 412 case SHMEM_HUGE_ADVISE: 628 return "advise"; 413 return "advise"; 629 case SHMEM_HUGE_DENY: 414 case SHMEM_HUGE_DENY: 630 return "deny"; 415 return "deny"; 631 case SHMEM_HUGE_FORCE: 416 case SHMEM_HUGE_FORCE: 632 return "force"; 417 return "force"; 633 default: 418 default: 634 VM_BUG_ON(1); 419 VM_BUG_ON(1); 635 return "bad_val"; 420 return "bad_val"; 636 } 421 } 637 } 422 } 638 #endif 423 #endif 639 424 640 static unsigned long shmem_unused_huge_shrink( 425 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 641 struct shrink_control *sc, uns !! 426 struct shrink_control *sc, unsigned long nr_to_split) 642 { 427 { 643 LIST_HEAD(list), *pos, *next; 428 LIST_HEAD(list), *pos, *next; >> 429 LIST_HEAD(to_remove); 644 struct inode *inode; 430 struct inode *inode; 645 struct shmem_inode_info *info; 431 struct shmem_inode_info *info; 646 struct folio *folio; !! 432 struct page *page; 647 unsigned long batch = sc ? sc->nr_to_s 433 unsigned long batch = sc ? sc->nr_to_scan : 128; 648 unsigned long split = 0, freed = 0; !! 434 int removed = 0, split = 0; 649 435 650 if (list_empty(&sbinfo->shrinklist)) 436 if (list_empty(&sbinfo->shrinklist)) 651 return SHRINK_STOP; 437 return SHRINK_STOP; 652 438 653 spin_lock(&sbinfo->shrinklist_lock); 439 spin_lock(&sbinfo->shrinklist_lock); 654 list_for_each_safe(pos, next, &sbinfo- 440 list_for_each_safe(pos, next, &sbinfo->shrinklist) { 655 info = list_entry(pos, struct 441 info = list_entry(pos, struct shmem_inode_info, shrinklist); 656 442 657 /* pin the inode */ 443 /* pin the inode */ 658 inode = igrab(&info->vfs_inode 444 inode = igrab(&info->vfs_inode); 659 445 660 /* inode is about to be evicte 446 /* inode is about to be evicted */ 661 if (!inode) { 447 if (!inode) { 662 list_del_init(&info->s 448 list_del_init(&info->shrinklist); >> 449 removed++; >> 450 goto next; >> 451 } >> 452 >> 453 /* Check if there's anything to gain */ >> 454 if (round_up(inode->i_size, PAGE_SIZE) == >> 455 round_up(inode->i_size, HPAGE_PMD_SIZE)) { >> 456 list_move(&info->shrinklist, &to_remove); >> 457 removed++; 663 goto next; 458 goto next; 664 } 459 } 665 460 666 list_move(&info->shrinklist, & 461 list_move(&info->shrinklist, &list); 667 next: 462 next: 668 sbinfo->shrinklist_len--; << 669 if (!--batch) 463 if (!--batch) 670 break; 464 break; 671 } 465 } 672 spin_unlock(&sbinfo->shrinklist_lock); 466 spin_unlock(&sbinfo->shrinklist_lock); 673 467 >> 468 list_for_each_safe(pos, next, &to_remove) { >> 469 info = list_entry(pos, struct shmem_inode_info, shrinklist); >> 470 inode = &info->vfs_inode; >> 471 list_del_init(&info->shrinklist); >> 472 iput(inode); >> 473 } >> 474 674 list_for_each_safe(pos, next, &list) { 475 list_for_each_safe(pos, next, &list) { 675 pgoff_t next, end; << 676 loff_t i_size; << 677 int ret; 476 int ret; 678 477 679 info = list_entry(pos, struct 478 info = list_entry(pos, struct shmem_inode_info, shrinklist); 680 inode = &info->vfs_inode; 479 inode = &info->vfs_inode; 681 480 682 if (nr_to_free && freed >= nr_ !! 481 if (nr_to_split && split >= nr_to_split) { 683 goto move_back; !! 482 iput(inode); 684 !! 483 continue; 685 i_size = i_size_read(inode); !! 484 } 686 folio = filemap_get_entry(inod << 687 if (!folio || xa_is_value(foli << 688 goto drop; << 689 485 690 /* No large folio at the end o !! 486 page = find_lock_page(inode->i_mapping, 691 if (!folio_test_large(folio)) !! 487 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); 692 folio_put(folio); !! 488 if (!page) 693 goto drop; 489 goto drop; 694 } << 695 490 696 /* Check if there is anything !! 491 if (!PageTransHuge(page)) { 697 next = folio_next_index(folio) !! 492 unlock_page(page); 698 end = shmem_fallocend(inode, D !! 493 put_page(page); 699 if (end <= folio->index || end << 700 folio_put(folio); << 701 goto drop; 494 goto drop; 702 } 495 } 703 496 704 /* !! 497 ret = split_huge_page(page); 705 * Move the inode on the list !! 498 unlock_page(page); 706 * to lock the page at this ti !! 499 put_page(page); 707 * !! 500 708 * Waiting for the lock may le !! 501 if (ret) { 709 * reclaim path. !! 502 /* split failed: leave it on the list */ 710 */ !! 503 iput(inode); 711 if (!folio_trylock(folio)) { !! 504 continue; 712 folio_put(folio); << 713 goto move_back; << 714 } 505 } 715 506 716 ret = split_folio(folio); << 717 folio_unlock(folio); << 718 folio_put(folio); << 719 << 720 /* If split failed move the in << 721 if (ret) << 722 goto move_back; << 723 << 724 freed += next - end; << 725 split++; 507 split++; 726 drop: 508 drop: 727 list_del_init(&info->shrinklis 509 list_del_init(&info->shrinklist); 728 goto put; !! 510 removed++; 729 move_back: << 730 /* << 731 * Make sure the inode is eith << 732 * from any local list before << 733 * in another thread once we p << 734 * is corrupted). << 735 */ << 736 spin_lock(&sbinfo->shrinklist_ << 737 list_move(&info->shrinklist, & << 738 sbinfo->shrinklist_len++; << 739 spin_unlock(&sbinfo->shrinklis << 740 put: << 741 iput(inode); 511 iput(inode); 742 } 512 } 743 513 >> 514 spin_lock(&sbinfo->shrinklist_lock); >> 515 list_splice_tail(&list, &sbinfo->shrinklist); >> 516 sbinfo->shrinklist_len -= removed; >> 517 spin_unlock(&sbinfo->shrinklist_lock); >> 518 744 return split; 519 return split; 745 } 520 } 746 521 747 static long shmem_unused_huge_scan(struct supe 522 static long shmem_unused_huge_scan(struct super_block *sb, 748 struct shrink_control *sc) 523 struct shrink_control *sc) 749 { 524 { 750 struct shmem_sb_info *sbinfo = SHMEM_S 525 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 751 526 752 if (!READ_ONCE(sbinfo->shrinklist_len) 527 if (!READ_ONCE(sbinfo->shrinklist_len)) 753 return SHRINK_STOP; 528 return SHRINK_STOP; 754 529 755 return shmem_unused_huge_shrink(sbinfo 530 return shmem_unused_huge_shrink(sbinfo, sc, 0); 756 } 531 } 757 532 758 static long shmem_unused_huge_count(struct sup 533 static long shmem_unused_huge_count(struct super_block *sb, 759 struct shrink_control *sc) 534 struct shrink_control *sc) 760 { 535 { 761 struct shmem_sb_info *sbinfo = SHMEM_S 536 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 762 return READ_ONCE(sbinfo->shrinklist_le 537 return READ_ONCE(sbinfo->shrinklist_len); 763 } 538 } 764 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ !! 539 #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 765 540 766 #define shmem_huge SHMEM_HUGE_DENY 541 #define shmem_huge SHMEM_HUGE_DENY 767 542 768 static unsigned long shmem_unused_huge_shrink( 543 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 769 struct shrink_control *sc, uns !! 544 struct shrink_control *sc, unsigned long nr_to_split) 770 { 545 { 771 return 0; 546 return 0; 772 } 547 } 773 !! 548 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 774 static bool shmem_huge_global_enabled(struct i << 775 loff_t write_end, bool shmem_h << 776 struct vm_area_struct *vma, un << 777 { << 778 return false; << 779 } << 780 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ << 781 549 782 /* 550 /* 783 * Somewhat like filemap_add_folio, but error !! 551 * Like add_to_page_cache_locked, but error if expected item has gone. 784 */ 552 */ 785 static int shmem_add_to_page_cache(struct foli !! 553 static int shmem_add_to_page_cache(struct page *page, 786 struct addr 554 struct address_space *mapping, 787 pgoff_t ind !! 555 pgoff_t index, void *expected) 788 { 556 { 789 XA_STATE_ORDER(xas, &mapping->i_pages, !! 557 int error, nr = hpage_nr_pages(page); 790 long nr = folio_nr_pages(folio); << 791 558 792 VM_BUG_ON_FOLIO(index != round_down(in !! 559 VM_BUG_ON_PAGE(PageTail(page), page); 793 VM_BUG_ON_FOLIO(!folio_test_locked(fol !! 560 VM_BUG_ON_PAGE(index != round_down(index, nr), page); 794 VM_BUG_ON_FOLIO(!folio_test_swapbacked !! 561 VM_BUG_ON_PAGE(!PageLocked(page), page); 795 !! 562 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 796 folio_ref_add(folio, nr); !! 563 VM_BUG_ON(expected && PageTransHuge(page)); 797 folio->mapping = mapping; !! 564 798 folio->index = index; !! 565 page_ref_add(page, nr); 799 !! 566 page->mapping = mapping; 800 gfp &= GFP_RECLAIM_MASK; !! 567 page->index = index; 801 folio_throttle_swaprate(folio, gfp); !! 568 802 !! 569 spin_lock_irq(&mapping->tree_lock); 803 do { !! 570 if (PageTransHuge(page)) { 804 xas_lock_irq(&xas); !! 571 void __rcu **results; 805 if (expected != xas_find_confl !! 572 pgoff_t idx; 806 xas_set_err(&xas, -EEX !! 573 int i; 807 goto unlock; !! 574 808 } !! 575 error = 0; 809 if (expected && xas_find_confl !! 576 if (radix_tree_gang_lookup_slot(&mapping->page_tree, 810 xas_set_err(&xas, -EEX !! 577 &results, &idx, index, 1) && 811 goto unlock; !! 578 idx < index + HPAGE_PMD_NR) { >> 579 error = -EEXIST; 812 } 580 } 813 xas_store(&xas, folio); << 814 if (xas_error(&xas)) << 815 goto unlock; << 816 if (folio_test_pmd_mappable(fo << 817 __lruvec_stat_mod_foli << 818 __lruvec_stat_mod_folio(folio, << 819 __lruvec_stat_mod_folio(folio, << 820 mapping->nrpages += nr; << 821 unlock: << 822 xas_unlock_irq(&xas); << 823 } while (xas_nomem(&xas, gfp)); << 824 581 825 if (xas_error(&xas)) { !! 582 if (!error) { 826 folio->mapping = NULL; !! 583 for (i = 0; i < HPAGE_PMD_NR; i++) { 827 folio_ref_sub(folio, nr); !! 584 error = radix_tree_insert(&mapping->page_tree, 828 return xas_error(&xas); !! 585 index + i, page + i); >> 586 VM_BUG_ON(error); >> 587 } >> 588 count_vm_event(THP_FILE_ALLOC); >> 589 } >> 590 } else if (!expected) { >> 591 error = radix_tree_insert(&mapping->page_tree, index, page); >> 592 } else { >> 593 error = shmem_radix_tree_replace(mapping, index, expected, >> 594 page); 829 } 595 } 830 596 831 return 0; !! 597 if (!error) { >> 598 mapping->nrpages += nr; >> 599 if (PageTransHuge(page)) >> 600 __inc_node_page_state(page, NR_SHMEM_THPS); >> 601 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); >> 602 __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); >> 603 spin_unlock_irq(&mapping->tree_lock); >> 604 } else { >> 605 page->mapping = NULL; >> 606 spin_unlock_irq(&mapping->tree_lock); >> 607 page_ref_sub(page, nr); >> 608 } >> 609 return error; 832 } 610 } 833 611 834 /* 612 /* 835 * Somewhat like filemap_remove_folio, but sub !! 613 * Like delete_from_page_cache, but substitutes swap for page. 836 */ 614 */ 837 static void shmem_delete_from_page_cache(struc !! 615 static void shmem_delete_from_page_cache(struct page *page, void *radswap) 838 { 616 { 839 struct address_space *mapping = folio- !! 617 struct address_space *mapping = page->mapping; 840 long nr = folio_nr_pages(folio); << 841 int error; 618 int error; 842 619 843 xa_lock_irq(&mapping->i_pages); !! 620 VM_BUG_ON_PAGE(PageCompound(page), page); 844 error = shmem_replace_entry(mapping, f !! 621 845 folio->mapping = NULL; !! 622 spin_lock_irq(&mapping->tree_lock); 846 mapping->nrpages -= nr; !! 623 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); 847 __lruvec_stat_mod_folio(folio, NR_FILE !! 624 page->mapping = NULL; 848 __lruvec_stat_mod_folio(folio, NR_SHME !! 625 mapping->nrpages--; 849 xa_unlock_irq(&mapping->i_pages); !! 626 __dec_node_page_state(page, NR_FILE_PAGES); 850 folio_put_refs(folio, nr); !! 627 __dec_node_page_state(page, NR_SHMEM); >> 628 spin_unlock_irq(&mapping->tree_lock); >> 629 put_page(page); 851 BUG_ON(error); 630 BUG_ON(error); 852 } 631 } 853 632 854 /* 633 /* 855 * Remove swap entry from page cache, free the !! 634 * Remove swap entry from radix tree, free the swap and its page cache. 856 * the number of pages being freed. 0 means en << 857 * being freed). << 858 */ 635 */ 859 static long shmem_free_swap(struct address_spa !! 636 static int shmem_free_swap(struct address_space *mapping, 860 pgoff_t index, voi !! 637 pgoff_t index, void *radswap) 861 { 638 { 862 int order = xa_get_order(&mapping->i_p << 863 void *old; 639 void *old; 864 640 865 old = xa_cmpxchg_irq(&mapping->i_pages !! 641 spin_lock_irq(&mapping->tree_lock); >> 642 old = radix_tree_delete_item(&mapping->page_tree, index, radswap); >> 643 spin_unlock_irq(&mapping->tree_lock); 866 if (old != radswap) 644 if (old != radswap) 867 return 0; !! 645 return -ENOENT; 868 free_swap_and_cache_nr(radix_to_swp_en !! 646 free_swap_and_cache(radix_to_swp_entry(radswap)); 869 !! 647 return 0; 870 return 1 << order; << 871 } 648 } 872 649 873 /* 650 /* 874 * Determine (in bytes) how many of the shmem 651 * Determine (in bytes) how many of the shmem object's pages mapped by the 875 * given offsets are swapped out. 652 * given offsets are swapped out. 876 * 653 * 877 * This is safe to call without i_rwsem or the !! 654 * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU, 878 * as long as the inode doesn't go away and ra 655 * as long as the inode doesn't go away and racy results are not a problem. 879 */ 656 */ 880 unsigned long shmem_partial_swap_usage(struct 657 unsigned long shmem_partial_swap_usage(struct address_space *mapping, 881 658 pgoff_t start, pgoff_t end) 882 { 659 { 883 XA_STATE(xas, &mapping->i_pages, start !! 660 struct radix_tree_iter iter; >> 661 void **slot; 884 struct page *page; 662 struct page *page; 885 unsigned long swapped = 0; 663 unsigned long swapped = 0; 886 unsigned long max = end - 1; << 887 664 888 rcu_read_lock(); 665 rcu_read_lock(); 889 xas_for_each(&xas, page, max) { !! 666 890 if (xas_retry(&xas, page)) !! 667 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 891 continue; !! 668 if (iter.index >= end) 892 if (xa_is_value(page)) << 893 swapped += 1 << xas_ge << 894 if (xas.xa_index == max) << 895 break; 669 break; >> 670 >> 671 page = radix_tree_deref_slot(slot); >> 672 >> 673 if (radix_tree_deref_retry(page)) { >> 674 slot = radix_tree_iter_retry(&iter); >> 675 continue; >> 676 } >> 677 >> 678 if (radix_tree_exceptional_entry(page)) >> 679 swapped++; >> 680 896 if (need_resched()) { 681 if (need_resched()) { 897 xas_pause(&xas); !! 682 slot = radix_tree_iter_resume(slot, &iter); 898 cond_resched_rcu(); 683 cond_resched_rcu(); 899 } 684 } 900 } 685 } >> 686 901 rcu_read_unlock(); 687 rcu_read_unlock(); 902 688 903 return swapped << PAGE_SHIFT; 689 return swapped << PAGE_SHIFT; 904 } 690 } 905 691 906 /* 692 /* 907 * Determine (in bytes) how many of the shmem 693 * Determine (in bytes) how many of the shmem object's pages mapped by the 908 * given vma is swapped out. 694 * given vma is swapped out. 909 * 695 * 910 * This is safe to call without i_rwsem or the !! 696 * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU, 911 * as long as the inode doesn't go away and ra 697 * as long as the inode doesn't go away and racy results are not a problem. 912 */ 698 */ 913 unsigned long shmem_swap_usage(struct vm_area_ 699 unsigned long shmem_swap_usage(struct vm_area_struct *vma) 914 { 700 { 915 struct inode *inode = file_inode(vma-> 701 struct inode *inode = file_inode(vma->vm_file); 916 struct shmem_inode_info *info = SHMEM_ 702 struct shmem_inode_info *info = SHMEM_I(inode); 917 struct address_space *mapping = inode- 703 struct address_space *mapping = inode->i_mapping; 918 unsigned long swapped; 704 unsigned long swapped; 919 705 920 /* Be careful as we don't hold info->l 706 /* Be careful as we don't hold info->lock */ 921 swapped = READ_ONCE(info->swapped); 707 swapped = READ_ONCE(info->swapped); 922 708 923 /* 709 /* 924 * The easier cases are when the shmem 710 * The easier cases are when the shmem object has nothing in swap, or 925 * the vma maps it whole. Then we can 711 * the vma maps it whole. Then we can simply use the stats that we 926 * already track. 712 * already track. 927 */ 713 */ 928 if (!swapped) 714 if (!swapped) 929 return 0; 715 return 0; 930 716 931 if (!vma->vm_pgoff && vma->vm_end - vm 717 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 932 return swapped << PAGE_SHIFT; 718 return swapped << PAGE_SHIFT; 933 719 934 /* Here comes the more involved part * 720 /* Here comes the more involved part */ 935 return shmem_partial_swap_usage(mappin !! 721 return shmem_partial_swap_usage(mapping, 936 vma->v !! 722 linear_page_index(vma, vma->vm_start), >> 723 linear_page_index(vma, vma->vm_end)); 937 } 724 } 938 725 939 /* 726 /* 940 * SysV IPC SHM_UNLOCK restore Unevictable pag 727 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 941 */ 728 */ 942 void shmem_unlock_mapping(struct address_space 729 void shmem_unlock_mapping(struct address_space *mapping) 943 { 730 { 944 struct folio_batch fbatch; !! 731 struct pagevec pvec; >> 732 pgoff_t indices[PAGEVEC_SIZE]; 945 pgoff_t index = 0; 733 pgoff_t index = 0; 946 734 947 folio_batch_init(&fbatch); !! 735 pagevec_init(&pvec, 0); 948 /* 736 /* 949 * Minor point, but we might as well s 737 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 950 */ 738 */ 951 while (!mapping_unevictable(mapping) & !! 739 while (!mapping_unevictable(mapping)) { 952 filemap_get_folios(mapping, &in !! 740 /* 953 check_move_unevictable_folios( !! 741 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 954 folio_batch_release(&fbatch); !! 742 * has finished, if it hits a row of PAGEVEC_SIZE swap entries. >> 743 */ >> 744 pvec.nr = find_get_entries(mapping, index, >> 745 PAGEVEC_SIZE, pvec.pages, indices); >> 746 if (!pvec.nr) >> 747 break; >> 748 index = indices[pvec.nr - 1] + 1; >> 749 pagevec_remove_exceptionals(&pvec); >> 750 check_move_unevictable_pages(pvec.pages, pvec.nr); >> 751 pagevec_release(&pvec); 955 cond_resched(); 752 cond_resched(); 956 } 753 } 957 } 754 } 958 755 959 static struct folio *shmem_get_partial_folio(s << 960 { << 961 struct folio *folio; << 962 << 963 /* << 964 * At first avoid shmem_get_folio(,,,S << 965 * beyond i_size, and reports fallocat << 966 */ << 967 folio = filemap_get_entry(inode->i_map << 968 if (!folio) << 969 return folio; << 970 if (!xa_is_value(folio)) { << 971 folio_lock(folio); << 972 if (folio->mapping == inode->i << 973 return folio; << 974 /* The folio has been swapped << 975 folio_unlock(folio); << 976 folio_put(folio); << 977 } << 978 /* << 979 * But read a folio back from swap if << 980 * (although in some cases this is jus << 981 */ << 982 folio = NULL; << 983 shmem_get_folio(inode, index, 0, &foli << 984 return folio; << 985 } << 986 << 987 /* 756 /* 988 * Remove range of pages and swap entries from !! 757 * Remove range of pages and swap entries from radix tree, and free them. 989 * If !unfalloc, truncate or punch hole; if un 758 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 990 */ 759 */ 991 static void shmem_undo_range(struct inode *ino 760 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 992 761 bool unfalloc) 993 { 762 { 994 struct address_space *mapping = inode- 763 struct address_space *mapping = inode->i_mapping; 995 struct shmem_inode_info *info = SHMEM_ 764 struct shmem_inode_info *info = SHMEM_I(inode); 996 pgoff_t start = (lstart + PAGE_SIZE - 765 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 997 pgoff_t end = (lend + 1) >> PAGE_SHIFT 766 pgoff_t end = (lend + 1) >> PAGE_SHIFT; 998 struct folio_batch fbatch; !! 767 unsigned int partial_start = lstart & (PAGE_SIZE - 1); >> 768 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); >> 769 struct pagevec pvec; 999 pgoff_t indices[PAGEVEC_SIZE]; 770 pgoff_t indices[PAGEVEC_SIZE]; 1000 struct folio *folio; << 1001 bool same_folio; << 1002 long nr_swaps_freed = 0; 771 long nr_swaps_freed = 0; 1003 pgoff_t index; 772 pgoff_t index; 1004 int i; 773 int i; 1005 774 1006 if (lend == -1) 775 if (lend == -1) 1007 end = -1; /* unsigned, 776 end = -1; /* unsigned, so actually very big */ 1008 777 1009 if (info->fallocend > start && info-> !! 778 pagevec_init(&pvec, 0); 1010 info->fallocend = start; << 1011 << 1012 folio_batch_init(&fbatch); << 1013 index = start; 779 index = start; 1014 while (index < end && find_lock_entri !! 780 while (index < end) { 1015 &fbatch, indices)) { !! 781 pvec.nr = find_get_entries(mapping, index, 1016 for (i = 0; i < folio_batch_c !! 782 min(end - index, (pgoff_t)PAGEVEC_SIZE), 1017 folio = fbatch.folios !! 783 pvec.pages, indices); >> 784 if (!pvec.nr) >> 785 break; >> 786 for (i = 0; i < pagevec_count(&pvec); i++) { >> 787 struct page *page = pvec.pages[i]; >> 788 >> 789 index = indices[i]; >> 790 if (index >= end) >> 791 break; 1018 792 1019 if (xa_is_value(folio !! 793 if (radix_tree_exceptional_entry(page)) { 1020 if (unfalloc) 794 if (unfalloc) 1021 conti 795 continue; 1022 nr_swaps_free !! 796 nr_swaps_freed += !shmem_free_swap(mapping, 1023 !! 797 index, page); 1024 continue; 798 continue; 1025 } 799 } 1026 800 1027 if (!unfalloc || !fol !! 801 VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page); 1028 truncate_inod !! 802 1029 folio_unlock(folio); !! 803 if (!trylock_page(page)) >> 804 continue; >> 805 >> 806 if (PageTransTail(page)) { >> 807 /* Middle of THP: zero out the page */ >> 808 clear_highpage(page); >> 809 unlock_page(page); >> 810 continue; >> 811 } else if (PageTransHuge(page)) { >> 812 if (index == round_down(end, HPAGE_PMD_NR)) { >> 813 /* >> 814 * Range ends in the middle of THP: >> 815 * zero out the page >> 816 */ >> 817 clear_highpage(page); >> 818 unlock_page(page); >> 819 continue; >> 820 } >> 821 index += HPAGE_PMD_NR - 1; >> 822 i += HPAGE_PMD_NR - 1; >> 823 } >> 824 >> 825 if (!unfalloc || !PageUptodate(page)) { >> 826 VM_BUG_ON_PAGE(PageTail(page), page); >> 827 if (page_mapping(page) == mapping) { >> 828 VM_BUG_ON_PAGE(PageWriteback(page), page); >> 829 truncate_inode_page(mapping, page); >> 830 } >> 831 } >> 832 unlock_page(page); 1030 } 833 } 1031 folio_batch_remove_exceptiona !! 834 pagevec_remove_exceptionals(&pvec); 1032 folio_batch_release(&fbatch); !! 835 pagevec_release(&pvec); 1033 cond_resched(); 836 cond_resched(); >> 837 index++; 1034 } 838 } 1035 839 1036 /* !! 840 if (partial_start) { 1037 * When undoing a failed fallocate, w !! 841 struct page *page = NULL; 1038 * zeroing and splitting below, but s !! 842 shmem_getpage(inode, start - 1, &page, SGP_READ); 1039 * folio when !uptodate indicates tha !! 843 if (page) { 1040 * even when [lstart, lend] covers on !! 844 unsigned int top = PAGE_SIZE; 1041 */ !! 845 if (start > end) { 1042 if (unfalloc) !! 846 top = partial_end; 1043 goto whole_folios; !! 847 partial_end = 0; 1044 !! 848 } 1045 same_folio = (lstart >> PAGE_SHIFT) = !! 849 zero_user_segment(page, partial_start, top); 1046 folio = shmem_get_partial_folio(inode !! 850 set_page_dirty(page); 1047 if (folio) { !! 851 unlock_page(page); 1048 same_folio = lend < folio_pos !! 852 put_page(page); 1049 folio_mark_dirty(folio); !! 853 } 1050 if (!truncate_inode_partial_f << 1051 start = folio_next_in << 1052 if (same_folio) << 1053 end = folio-> << 1054 } << 1055 folio_unlock(folio); << 1056 folio_put(folio); << 1057 folio = NULL; << 1058 } << 1059 << 1060 if (!same_folio) << 1061 folio = shmem_get_partial_fol << 1062 if (folio) { << 1063 folio_mark_dirty(folio); << 1064 if (!truncate_inode_partial_f << 1065 end = folio->index; << 1066 folio_unlock(folio); << 1067 folio_put(folio); << 1068 } 854 } 1069 !! 855 if (partial_end) { 1070 whole_folios: !! 856 struct page *page = NULL; >> 857 shmem_getpage(inode, end, &page, SGP_READ); >> 858 if (page) { >> 859 zero_user_segment(page, 0, partial_end); >> 860 set_page_dirty(page); >> 861 unlock_page(page); >> 862 put_page(page); >> 863 } >> 864 } >> 865 if (start >= end) >> 866 return; 1071 867 1072 index = start; 868 index = start; 1073 while (index < end) { 869 while (index < end) { 1074 cond_resched(); 870 cond_resched(); 1075 871 1076 if (!find_get_entries(mapping !! 872 pvec.nr = find_get_entries(mapping, index, 1077 indices)) { !! 873 min(end - index, (pgoff_t)PAGEVEC_SIZE), >> 874 pvec.pages, indices); >> 875 if (!pvec.nr) { 1078 /* If all gone or hol 876 /* If all gone or hole-punch or unfalloc, we're done */ 1079 if (index == start || 877 if (index == start || end != -1) 1080 break; 878 break; 1081 /* But if truncating, 879 /* But if truncating, restart to make sure all gone */ 1082 index = start; 880 index = start; 1083 continue; 881 continue; 1084 } 882 } 1085 for (i = 0; i < folio_batch_c !! 883 for (i = 0; i < pagevec_count(&pvec); i++) { 1086 folio = fbatch.folios !! 884 struct page *page = pvec.pages[i]; 1087 885 1088 if (xa_is_value(folio !! 886 index = indices[i]; 1089 long swaps_fr !! 887 if (index >= end) >> 888 break; 1090 889 >> 890 if (radix_tree_exceptional_entry(page)) { 1091 if (unfalloc) 891 if (unfalloc) 1092 conti 892 continue; 1093 swaps_freed = !! 893 if (shmem_free_swap(mapping, index, page)) { 1094 if (!swaps_fr << 1095 /* Sw 894 /* Swap was replaced by page: retry */ 1096 index !! 895 index--; 1097 break 896 break; 1098 } 897 } 1099 nr_swaps_free !! 898 nr_swaps_freed++; 1100 continue; 899 continue; 1101 } 900 } 1102 901 1103 folio_lock(folio); !! 902 lock_page(page); 1104 << 1105 if (!unfalloc || !fol << 1106 if (folio_map << 1107 /* Pa << 1108 folio << 1109 index << 1110 break << 1111 } << 1112 VM_BUG_ON_FOL << 1113 << 1114 903 1115 if (!folio_te !! 904 if (PageTransTail(page)) { 1116 trunc !! 905 /* Middle of THP: zero out the page */ 1117 } else if (tr !! 906 clear_highpage(page); >> 907 unlock_page(page); >> 908 /* >> 909 * Partial thp truncate due 'start' in middle >> 910 * of THP: don't need to look on these pages >> 911 * again on !pvec.nr restart. >> 912 */ >> 913 if (index != round_down(end, HPAGE_PMD_NR)) >> 914 start++; >> 915 continue; >> 916 } else if (PageTransHuge(page)) { >> 917 if (index == round_down(end, HPAGE_PMD_NR)) { 1118 /* 918 /* 1119 * If !! 919 * Range ends in the middle of THP: 1120 * th !! 920 * zero out the page 1121 * Ot << 1122 * dr << 1123 * ze << 1124 * is << 1125 */ 921 */ 1126 if (! !! 922 clear_highpage(page); 1127 !! 923 unlock_page(page); 1128 !! 924 continue; 1129 !! 925 } 1130 } !! 926 index += HPAGE_PMD_NR - 1; >> 927 i += HPAGE_PMD_NR - 1; >> 928 } >> 929 >> 930 if (!unfalloc || !PageUptodate(page)) { >> 931 VM_BUG_ON_PAGE(PageTail(page), page); >> 932 if (page_mapping(page) == mapping) { >> 933 VM_BUG_ON_PAGE(PageWriteback(page), page); >> 934 truncate_inode_page(mapping, page); >> 935 } else { >> 936 /* Page was replaced by swap: retry */ >> 937 unlock_page(page); >> 938 index--; >> 939 break; 1131 } 940 } 1132 } 941 } 1133 folio_unlock(folio); !! 942 unlock_page(page); 1134 } 943 } 1135 folio_batch_remove_exceptiona !! 944 pagevec_remove_exceptionals(&pvec); 1136 folio_batch_release(&fbatch); !! 945 pagevec_release(&pvec); >> 946 index++; 1137 } 947 } 1138 948 1139 shmem_recalc_inode(inode, 0, -nr_swap !! 949 spin_lock_irq(&info->lock); >> 950 info->swapped -= nr_swaps_freed; >> 951 shmem_recalc_inode(inode); >> 952 spin_unlock_irq(&info->lock); 1140 } 953 } 1141 954 1142 void shmem_truncate_range(struct inode *inode 955 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 1143 { 956 { 1144 shmem_undo_range(inode, lstart, lend, 957 shmem_undo_range(inode, lstart, lend, false); 1145 inode_set_mtime_to_ts(inode, inode_se !! 958 inode->i_ctime = inode->i_mtime = current_time(inode); 1146 inode_inc_iversion(inode); << 1147 } 959 } 1148 EXPORT_SYMBOL_GPL(shmem_truncate_range); 960 EXPORT_SYMBOL_GPL(shmem_truncate_range); 1149 961 1150 static int shmem_getattr(struct mnt_idmap *id !! 962 static int shmem_getattr(const struct path *path, struct kstat *stat, 1151 const struct path *p << 1152 u32 request_mask, un 963 u32 request_mask, unsigned int query_flags) 1153 { 964 { 1154 struct inode *inode = path->dentry->d 965 struct inode *inode = path->dentry->d_inode; 1155 struct shmem_inode_info *info = SHMEM 966 struct shmem_inode_info *info = SHMEM_I(inode); 1156 967 1157 if (info->alloced - info->swapped != !! 968 if (info->alloced - info->swapped != inode->i_mapping->nrpages) { 1158 shmem_recalc_inode(inode, 0, !! 969 spin_lock_irq(&info->lock); 1159 !! 970 shmem_recalc_inode(inode); 1160 if (info->fsflags & FS_APPEND_FL) !! 971 spin_unlock_irq(&info->lock); 1161 stat->attributes |= STATX_ATT << 1162 if (info->fsflags & FS_IMMUTABLE_FL) << 1163 stat->attributes |= STATX_ATT << 1164 if (info->fsflags & FS_NODUMP_FL) << 1165 stat->attributes |= STATX_ATT << 1166 stat->attributes_mask |= (STATX_ATTR_ << 1167 STATX_ATTR_IMMUTABLE << 1168 STATX_ATTR_NODUMP); << 1169 inode_lock_shared(inode); << 1170 generic_fillattr(idmap, request_mask, << 1171 inode_unlock_shared(inode); << 1172 << 1173 if (shmem_huge_global_enabled(inode, << 1174 stat->blksize = HPAGE_PMD_SIZ << 1175 << 1176 if (request_mask & STATX_BTIME) { << 1177 stat->result_mask |= STATX_BT << 1178 stat->btime.tv_sec = info->i_ << 1179 stat->btime.tv_nsec = info->i << 1180 } 972 } 1181 !! 973 generic_fillattr(inode, stat); 1182 return 0; 974 return 0; 1183 } 975 } 1184 976 1185 static int shmem_setattr(struct mnt_idmap *id !! 977 static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 1186 struct dentry *dentr << 1187 { 978 { 1188 struct inode *inode = d_inode(dentry) 979 struct inode *inode = d_inode(dentry); 1189 struct shmem_inode_info *info = SHMEM 980 struct shmem_inode_info *info = SHMEM_I(inode); >> 981 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1190 int error; 982 int error; 1191 bool update_mtime = false; << 1192 bool update_ctime = true; << 1193 983 1194 error = setattr_prepare(idmap, dentry !! 984 error = setattr_prepare(dentry, attr); 1195 if (error) 985 if (error) 1196 return error; 986 return error; 1197 987 1198 if ((info->seals & F_SEAL_EXEC) && (a << 1199 if ((inode->i_mode ^ attr->ia << 1200 return -EPERM; << 1201 } << 1202 } << 1203 << 1204 if (S_ISREG(inode->i_mode) && (attr-> 988 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 1205 loff_t oldsize = inode->i_siz 989 loff_t oldsize = inode->i_size; 1206 loff_t newsize = attr->ia_siz 990 loff_t newsize = attr->ia_size; 1207 991 1208 /* protected by i_rwsem */ !! 992 /* protected by i_mutex */ 1209 if ((newsize < oldsize && (in 993 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 1210 (newsize > oldsize && (in 994 (newsize > oldsize && (info->seals & F_SEAL_GROW))) 1211 return -EPERM; 995 return -EPERM; 1212 996 1213 if (newsize != oldsize) { 997 if (newsize != oldsize) { 1214 error = shmem_reacct_ 998 error = shmem_reacct_size(SHMEM_I(inode)->flags, 1215 oldsi 999 oldsize, newsize); 1216 if (error) 1000 if (error) 1217 return error; 1001 return error; 1218 i_size_write(inode, n 1002 i_size_write(inode, newsize); 1219 update_mtime = true; !! 1003 inode->i_ctime = inode->i_mtime = current_time(inode); 1220 } else { << 1221 update_ctime = false; << 1222 } 1004 } 1223 if (newsize <= oldsize) { 1005 if (newsize <= oldsize) { 1224 loff_t holebegin = ro 1006 loff_t holebegin = round_up(newsize, PAGE_SIZE); 1225 if (oldsize > holebeg 1007 if (oldsize > holebegin) 1226 unmap_mapping 1008 unmap_mapping_range(inode->i_mapping, 1227 1009 holebegin, 0, 1); 1228 if (info->alloced) 1010 if (info->alloced) 1229 shmem_truncat 1011 shmem_truncate_range(inode, 1230 1012 newsize, (loff_t)-1); 1231 /* unmap again to rem 1013 /* unmap again to remove racily COWed private pages */ 1232 if (oldsize > holebeg 1014 if (oldsize > holebegin) 1233 unmap_mapping 1015 unmap_mapping_range(inode->i_mapping, 1234 1016 holebegin, 0, 1); 1235 } << 1236 } << 1237 1017 1238 if (is_quota_modification(idmap, inod !! 1018 /* 1239 error = dquot_initialize(inod !! 1019 * Part of the huge page can be beyond i_size: subject 1240 if (error) !! 1020 * to shrink under memory pressure. 1241 return error; !! 1021 */ 1242 } !! 1022 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { 1243 !! 1023 spin_lock(&sbinfo->shrinklist_lock); 1244 /* Transfer quota accounting */ !! 1024 /* 1245 if (i_uid_needs_update(idmap, attr, i !! 1025 * _careful to defend against unlocked access to 1246 i_gid_needs_update(idmap, attr, i !! 1026 * ->shrink_list in shmem_unused_huge_shrink() 1247 error = dquot_transfer(idmap, !! 1027 */ 1248 if (error) !! 1028 if (list_empty_careful(&info->shrinklist)) { 1249 return error; !! 1029 list_add_tail(&info->shrinklist, >> 1030 &sbinfo->shrinklist); >> 1031 sbinfo->shrinklist_len++; >> 1032 } >> 1033 spin_unlock(&sbinfo->shrinklist_lock); >> 1034 } >> 1035 } 1250 } 1036 } 1251 1037 1252 setattr_copy(idmap, inode, attr); !! 1038 setattr_copy(inode, attr); 1253 if (attr->ia_valid & ATTR_MODE) 1039 if (attr->ia_valid & ATTR_MODE) 1254 error = posix_acl_chmod(idmap !! 1040 error = posix_acl_chmod(inode, inode->i_mode); 1255 if (!error && update_ctime) { << 1256 inode_set_ctime_current(inode << 1257 if (update_mtime) << 1258 inode_set_mtime_to_ts << 1259 inode_inc_iversion(inode); << 1260 } << 1261 return error; 1041 return error; 1262 } 1042 } 1263 1043 1264 static void shmem_evict_inode(struct inode *i 1044 static void shmem_evict_inode(struct inode *inode) 1265 { 1045 { 1266 struct shmem_inode_info *info = SHMEM 1046 struct shmem_inode_info *info = SHMEM_I(inode); 1267 struct shmem_sb_info *sbinfo = SHMEM_ 1047 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1268 size_t freed = 0; << 1269 1048 1270 if (shmem_mapping(inode->i_mapping)) !! 1049 if (inode->i_mapping->a_ops == &shmem_aops) { 1271 shmem_unacct_size(info->flags 1050 shmem_unacct_size(info->flags, inode->i_size); 1272 inode->i_size = 0; 1051 inode->i_size = 0; 1273 mapping_set_exiting(inode->i_ << 1274 shmem_truncate_range(inode, 0 1052 shmem_truncate_range(inode, 0, (loff_t)-1); 1275 if (!list_empty(&info->shrink 1053 if (!list_empty(&info->shrinklist)) { 1276 spin_lock(&sbinfo->sh 1054 spin_lock(&sbinfo->shrinklist_lock); 1277 if (!list_empty(&info 1055 if (!list_empty(&info->shrinklist)) { 1278 list_del_init 1056 list_del_init(&info->shrinklist); 1279 sbinfo->shrin 1057 sbinfo->shrinklist_len--; 1280 } 1058 } 1281 spin_unlock(&sbinfo-> 1059 spin_unlock(&sbinfo->shrinklist_lock); 1282 } 1060 } 1283 while (!list_empty(&info->swa !! 1061 if (!list_empty(&info->swaplist)) { 1284 /* Wait while shmem_u << 1285 wait_var_event(&info- << 1286 !atomi << 1287 mutex_lock(&shmem_swa 1062 mutex_lock(&shmem_swaplist_mutex); 1288 /* ...but beware of t !! 1063 list_del_init(&info->swaplist); 1289 if (!atomic_read(&inf << 1290 list_del_init << 1291 mutex_unlock(&shmem_s 1064 mutex_unlock(&shmem_swaplist_mutex); 1292 } 1065 } 1293 } 1066 } 1294 1067 1295 simple_xattrs_free(&info->xattrs, sbi !! 1068 simple_xattrs_free(&info->xattrs); 1296 shmem_free_inode(inode->i_sb, freed); << 1297 WARN_ON(inode->i_blocks); 1069 WARN_ON(inode->i_blocks); >> 1070 shmem_free_inode(inode->i_sb); 1298 clear_inode(inode); 1071 clear_inode(inode); 1299 #ifdef CONFIG_TMPFS_QUOTA << 1300 dquot_free_inode(inode); << 1301 dquot_drop(inode); << 1302 #endif << 1303 } 1072 } 1304 1073 1305 static int shmem_find_swap_entries(struct add !! 1074 static unsigned long find_swap_entry(struct radix_tree_root *root, void *item) 1306 pgoff_t st !! 1075 { 1307 pgoff_t *i !! 1076 struct radix_tree_iter iter; 1308 { !! 1077 void **slot; 1309 XA_STATE(xas, &mapping->i_pages, star !! 1078 unsigned long found = -1; 1310 struct folio *folio; !! 1079 unsigned int checked = 0; 1311 swp_entry_t entry; << 1312 1080 1313 rcu_read_lock(); 1081 rcu_read_lock(); 1314 xas_for_each(&xas, folio, ULONG_MAX) !! 1082 radix_tree_for_each_slot(slot, root, &iter, 0) { 1315 if (xas_retry(&xas, folio)) !! 1083 if (*slot == item) { 1316 continue; !! 1084 found = iter.index; 1317 << 1318 if (!xa_is_value(folio)) << 1319 continue; << 1320 << 1321 entry = radix_to_swp_entry(fo << 1322 /* << 1323 * swapin error entries can b << 1324 * deliberately ignored here << 1325 */ << 1326 if (swp_type(entry) != type) << 1327 continue; << 1328 << 1329 indices[folio_batch_count(fba << 1330 if (!folio_batch_add(fbatch, << 1331 break; 1085 break; 1332 << 1333 if (need_resched()) { << 1334 xas_pause(&xas); << 1335 cond_resched_rcu(); << 1336 } 1086 } >> 1087 checked++; >> 1088 if ((checked % 4096) != 0) >> 1089 continue; >> 1090 slot = radix_tree_iter_resume(slot, &iter); >> 1091 cond_resched_rcu(); 1337 } 1092 } 1338 rcu_read_unlock(); << 1339 1093 1340 return xas.xa_index; !! 1094 rcu_read_unlock(); >> 1095 return found; 1341 } 1096 } 1342 1097 1343 /* 1098 /* 1344 * Move the swapped pages for an inode to pag !! 1099 * If swap found in inode, free it and move page from swapcache to filecache. 1345 * of pages swapped in, or the error in case << 1346 */ 1100 */ 1347 static int shmem_unuse_swap_entries(struct in !! 1101 static int shmem_unuse_inode(struct shmem_inode_info *info, 1348 struct folio_batch *fbatch, p !! 1102 swp_entry_t swap, struct page **pagep) 1349 { 1103 { 1350 int i = 0; !! 1104 struct address_space *mapping = info->vfs_inode.i_mapping; 1351 int ret = 0; !! 1105 void *radswap; >> 1106 pgoff_t index; >> 1107 gfp_t gfp; 1352 int error = 0; 1108 int error = 0; 1353 struct address_space *mapping = inode << 1354 1109 1355 for (i = 0; i < folio_batch_count(fba !! 1110 radswap = swp_to_radix_entry(swap); 1356 struct folio *folio = fbatch- !! 1111 index = find_swap_entry(&mapping->page_tree, radswap); >> 1112 if (index == -1) >> 1113 return -EAGAIN; /* tell shmem_unuse we found nothing */ 1357 1114 1358 if (!xa_is_value(folio)) !! 1115 /* 1359 continue; !! 1116 * Move _head_ to start search for next from here. 1360 error = shmem_swapin_folio(in !! 1117 * But be careful: shmem_evict_inode checks list_empty without taking 1361 mappi !! 1118 * mutex, and there's an instant in list_move_tail when info->swaplist 1362 if (error == 0) { !! 1119 * would appear empty, if it were the only one on shmem_swaplist. 1363 folio_unlock(folio); !! 1120 */ 1364 folio_put(folio); !! 1121 if (shmem_swaplist.next != &info->swaplist) 1365 ret++; !! 1122 list_move_tail(&shmem_swaplist, &info->swaplist); 1366 } << 1367 if (error == -ENOMEM) << 1368 break; << 1369 error = 0; << 1370 } << 1371 return error ? error : ret; << 1372 } << 1373 1123 1374 /* !! 1124 gfp = mapping_gfp_mask(mapping); 1375 * If swap found in inode, free it and move p !! 1125 if (shmem_should_replace_page(*pagep, gfp)) { 1376 */ !! 1126 mutex_unlock(&shmem_swaplist_mutex); 1377 static int shmem_unuse_inode(struct inode *in !! 1127 error = shmem_replace_page(pagep, gfp, info, index); 1378 { !! 1128 mutex_lock(&shmem_swaplist_mutex); 1379 struct address_space *mapping = inode !! 1129 /* 1380 pgoff_t start = 0; !! 1130 * We needed to drop mutex to make that restrictive page 1381 struct folio_batch fbatch; !! 1131 * allocation, but the inode might have been freed while we 1382 pgoff_t indices[PAGEVEC_SIZE]; !! 1132 * dropped it: although a racing shmem_evict_inode() cannot 1383 int ret = 0; !! 1133 * complete without emptying the radix_tree, our page lock >> 1134 * on this swapcache page is not enough to prevent that - >> 1135 * free_swap_and_cache() of our swap entry will only >> 1136 * trylock_page(), removing swap from radix_tree whatever. >> 1137 * >> 1138 * We must not proceed to shmem_add_to_page_cache() if the >> 1139 * inode has been freed, but of course we cannot rely on >> 1140 * inode or mapping or info to check that. However, we can >> 1141 * safely check if our swap entry is still in use (and here >> 1142 * it can't have got reused for another page): if it's still >> 1143 * in use, then the inode cannot have been freed yet, and we >> 1144 * can safely proceed (if it's no longer in use, that tells >> 1145 * nothing about the inode, but we don't need to unuse swap). >> 1146 */ >> 1147 if (!page_swapcount(*pagep)) >> 1148 error = -ENOENT; >> 1149 } 1384 1150 1385 do { !! 1151 /* 1386 folio_batch_init(&fbatch); !! 1152 * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 1387 shmem_find_swap_entries(mappi !! 1153 * but also to hold up shmem_evict_inode(): so inode cannot be freed 1388 if (folio_batch_count(&fbatch !! 1154 * beneath us (pagelock doesn't help until the page is in pagecache). 1389 ret = 0; !! 1155 */ 1390 break; !! 1156 if (!error) >> 1157 error = shmem_add_to_page_cache(*pagep, mapping, index, >> 1158 radswap); >> 1159 if (error != -ENOMEM) { >> 1160 /* >> 1161 * Truncation and eviction use free_swap_and_cache(), which >> 1162 * only does trylock page: if we raced, best clean up here. >> 1163 */ >> 1164 delete_from_swap_cache(*pagep); >> 1165 set_page_dirty(*pagep); >> 1166 if (!error) { >> 1167 spin_lock_irq(&info->lock); >> 1168 info->swapped--; >> 1169 spin_unlock_irq(&info->lock); >> 1170 swap_free(swap); 1391 } 1171 } 1392 !! 1172 } 1393 ret = shmem_unuse_swap_entrie !! 1173 return error; 1394 if (ret < 0) << 1395 break; << 1396 << 1397 start = indices[folio_batch_c << 1398 } while (true); << 1399 << 1400 return ret; << 1401 } 1174 } 1402 1175 1403 /* 1176 /* 1404 * Read all the shared memory data that resid !! 1177 * Search through swapped inodes to find and replace swap by page. 1405 * device 'type' back into memory, so the swa << 1406 * unused. << 1407 */ 1178 */ 1408 int shmem_unuse(unsigned int type) !! 1179 int shmem_unuse(swp_entry_t swap, struct page *page) 1409 { 1180 { 1410 struct shmem_inode_info *info, *next; !! 1181 struct list_head *this, *next; >> 1182 struct shmem_inode_info *info; >> 1183 struct mem_cgroup *memcg; 1411 int error = 0; 1184 int error = 0; 1412 1185 1413 if (list_empty(&shmem_swaplist)) !! 1186 /* 1414 return 0; !! 1187 * There's a faint possibility that swap page was replaced before >> 1188 * caller locked it: caller will come back later with the right page. >> 1189 */ >> 1190 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val)) >> 1191 goto out; >> 1192 >> 1193 /* >> 1194 * Charge page using GFP_KERNEL while we can wait, before taking >> 1195 * the shmem_swaplist_mutex which might hold up shmem_writepage(). >> 1196 * Charged back to the user (not to caller) when swap account is used. >> 1197 */ >> 1198 error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg, >> 1199 false); >> 1200 if (error) >> 1201 goto out; >> 1202 /* No radix_tree_preload: swap entry keeps a place for page in tree */ >> 1203 error = -EAGAIN; 1415 1204 1416 mutex_lock(&shmem_swaplist_mutex); 1205 mutex_lock(&shmem_swaplist_mutex); 1417 list_for_each_entry_safe(info, next, !! 1206 list_for_each_safe(this, next, &shmem_swaplist) { 1418 if (!info->swapped) { !! 1207 info = list_entry(this, struct shmem_inode_info, swaplist); >> 1208 if (info->swapped) >> 1209 error = shmem_unuse_inode(info, swap, &page); >> 1210 else 1419 list_del_init(&info-> 1211 list_del_init(&info->swaplist); 1420 continue; << 1421 } << 1422 /* << 1423 * Drop the swaplist mutex wh << 1424 * but before doing so, make << 1425 * remove placeholder inode f << 1426 * (igrab() would protect fro << 1427 */ << 1428 atomic_inc(&info->stop_evicti << 1429 mutex_unlock(&shmem_swaplist_ << 1430 << 1431 error = shmem_unuse_inode(&in << 1432 cond_resched(); 1212 cond_resched(); 1433 !! 1213 if (error != -EAGAIN) 1434 mutex_lock(&shmem_swaplist_mu << 1435 next = list_next_entry(info, << 1436 if (!info->swapped) << 1437 list_del_init(&info-> << 1438 if (atomic_dec_and_test(&info << 1439 wake_up_var(&info->st << 1440 if (error) << 1441 break; 1214 break; >> 1215 /* found nothing in this: move on to search the next */ 1442 } 1216 } 1443 mutex_unlock(&shmem_swaplist_mutex); 1217 mutex_unlock(&shmem_swaplist_mutex); 1444 1218 >> 1219 if (error) { >> 1220 if (error != -ENOMEM) >> 1221 error = 0; >> 1222 mem_cgroup_cancel_charge(page, memcg, false); >> 1223 } else >> 1224 mem_cgroup_commit_charge(page, memcg, true, false); >> 1225 out: >> 1226 unlock_page(page); >> 1227 put_page(page); 1445 return error; 1228 return error; 1446 } 1229 } 1447 1230 1448 /* 1231 /* 1449 * Move the page from the page cache to the s 1232 * Move the page from the page cache to the swap cache. 1450 */ 1233 */ 1451 static int shmem_writepage(struct page *page, 1234 static int shmem_writepage(struct page *page, struct writeback_control *wbc) 1452 { 1235 { 1453 struct folio *folio = page_folio(page !! 1236 struct shmem_inode_info *info; 1454 struct address_space *mapping = folio !! 1237 struct address_space *mapping; 1455 struct inode *inode = mapping->host; !! 1238 struct inode *inode; 1456 struct shmem_inode_info *info = SHMEM << 1457 struct shmem_sb_info *sbinfo = SHMEM_ << 1458 swp_entry_t swap; 1239 swp_entry_t swap; 1459 pgoff_t index; 1240 pgoff_t index; 1460 int nr_pages; !! 1241 1461 bool split = false; !! 1242 VM_BUG_ON_PAGE(PageCompound(page), page); >> 1243 BUG_ON(!PageLocked(page)); >> 1244 mapping = page->mapping; >> 1245 index = page->index; >> 1246 inode = mapping->host; >> 1247 info = SHMEM_I(inode); >> 1248 if (info->flags & VM_LOCKED) >> 1249 goto redirty; >> 1250 if (!total_swap_pages) >> 1251 goto redirty; 1462 1252 1463 /* 1253 /* 1464 * Our capabilities prevent regular w 1254 * Our capabilities prevent regular writeback or sync from ever calling 1465 * shmem_writepage; but a stacking fi 1255 * shmem_writepage; but a stacking filesystem might use ->writepage of 1466 * its underlying filesystem, in whic 1256 * its underlying filesystem, in which case tmpfs should write out to 1467 * swap only in response to memory pr 1257 * swap only in response to memory pressure, and not for the writeback 1468 * threads or sync. 1258 * threads or sync. 1469 */ 1259 */ 1470 if (WARN_ON_ONCE(!wbc->for_reclaim)) !! 1260 if (!wbc->for_reclaim) { >> 1261 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 1471 goto redirty; 1262 goto redirty; 1472 << 1473 if (WARN_ON_ONCE((info->flags & VM_LO << 1474 goto redirty; << 1475 << 1476 if (!total_swap_pages) << 1477 goto redirty; << 1478 << 1479 /* << 1480 * If CONFIG_THP_SWAP is not enabled, << 1481 * split when swapping. << 1482 * << 1483 * And shrinkage of pages beyond i_si << 1484 * swapout of a large folio crossing << 1485 * (unless fallocate has been used to << 1486 */ << 1487 if (folio_test_large(folio)) { << 1488 index = shmem_fallocend(inode << 1489 DIV_ROUND_UP(i_size_r << 1490 if ((index > folio->index && << 1491 !IS_ENABLED(CONFIG_THP_SW << 1492 split = true; << 1493 } << 1494 << 1495 if (split) { << 1496 try_split: << 1497 /* Ensure the subpages are st << 1498 folio_test_set_dirty(folio); << 1499 if (split_huge_page_to_list_t << 1500 goto redirty; << 1501 folio = page_folio(page); << 1502 folio_clear_dirty(folio); << 1503 } 1263 } 1504 1264 1505 index = folio->index; << 1506 nr_pages = folio_nr_pages(folio); << 1507 << 1508 /* 1265 /* 1509 * This is somewhat ridiculous, but w 1266 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 1510 * value into swapfile.c, the only wa 1267 * value into swapfile.c, the only way we can correctly account for a 1511 * fallocated folio arriving here is !! 1268 * fallocated page arriving here is now to initialize it and write it. 1512 * 1269 * 1513 * That's okay for a folio already fa !! 1270 * That's okay for a page already fallocated earlier, but if we have 1514 * not yet completed the fallocation, 1271 * not yet completed the fallocation, then (a) we want to keep track 1515 * of this folio in case we have to u !! 1272 * of this page in case we have to undo it, and (b) it may not be a 1516 * good idea to continue anyway, once 1273 * good idea to continue anyway, once we're pushing into swap. So 1517 * reactivate the folio, and let shme !! 1274 * reactivate the page, and let shmem_fallocate() quit when too many. 1518 */ 1275 */ 1519 if (!folio_test_uptodate(folio)) { !! 1276 if (!PageUptodate(page)) { 1520 if (inode->i_private) { 1277 if (inode->i_private) { 1521 struct shmem_falloc * 1278 struct shmem_falloc *shmem_falloc; 1522 spin_lock(&inode->i_l 1279 spin_lock(&inode->i_lock); 1523 shmem_falloc = inode- 1280 shmem_falloc = inode->i_private; 1524 if (shmem_falloc && 1281 if (shmem_falloc && 1525 !shmem_falloc->wa 1282 !shmem_falloc->waitq && 1526 index >= shmem_fa 1283 index >= shmem_falloc->start && 1527 index < shmem_fal 1284 index < shmem_falloc->next) 1528 shmem_falloc- 1285 shmem_falloc->nr_unswapped++; 1529 else 1286 else 1530 shmem_falloc 1287 shmem_falloc = NULL; 1531 spin_unlock(&inode->i 1288 spin_unlock(&inode->i_lock); 1532 if (shmem_falloc) 1289 if (shmem_falloc) 1533 goto redirty; 1290 goto redirty; 1534 } 1291 } 1535 folio_zero_range(folio, 0, fo !! 1292 clear_highpage(page); 1536 flush_dcache_folio(folio); !! 1293 flush_dcache_page(page); 1537 folio_mark_uptodate(folio); !! 1294 SetPageUptodate(page); 1538 } 1295 } 1539 1296 1540 swap = folio_alloc_swap(folio); !! 1297 swap = get_swap_page(); 1541 if (!swap.val) { !! 1298 if (!swap.val) 1542 if (nr_pages > 1) << 1543 goto try_split; << 1544 << 1545 goto redirty; 1299 goto redirty; 1546 } !! 1300 >> 1301 if (mem_cgroup_try_charge_swap(page, swap)) >> 1302 goto free_swap; 1547 1303 1548 /* 1304 /* 1549 * Add inode to shmem_unuse()'s list 1305 * Add inode to shmem_unuse()'s list of swapped-out inodes, 1550 * if it's not already there. Do it !! 1306 * if it's not already there. Do it now before the page is 1551 * moved to swap cache, when its page 1307 * moved to swap cache, when its pagelock no longer protects 1552 * the inode from eviction. But don' 1308 * the inode from eviction. But don't unlock the mutex until 1553 * we've incremented swapped, because 1309 * we've incremented swapped, because shmem_unuse_inode() will 1554 * prune a !swapped inode from the sw 1310 * prune a !swapped inode from the swaplist under this mutex. 1555 */ 1311 */ 1556 mutex_lock(&shmem_swaplist_mutex); 1312 mutex_lock(&shmem_swaplist_mutex); 1557 if (list_empty(&info->swaplist)) 1313 if (list_empty(&info->swaplist)) 1558 list_add(&info->swaplist, &sh !! 1314 list_add_tail(&info->swaplist, &shmem_swaplist); >> 1315 >> 1316 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { >> 1317 spin_lock_irq(&info->lock); >> 1318 shmem_recalc_inode(inode); >> 1319 info->swapped++; >> 1320 spin_unlock_irq(&info->lock); 1559 1321 1560 if (add_to_swap_cache(folio, swap, !! 1322 swap_shmem_alloc(swap); 1561 __GFP_HIGH | __GFP_NO !! 1323 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 1562 NULL) == 0) { << 1563 shmem_recalc_inode(inode, 0, << 1564 swap_shmem_alloc(swap, nr_pag << 1565 shmem_delete_from_page_cache( << 1566 1324 1567 mutex_unlock(&shmem_swaplist_ 1325 mutex_unlock(&shmem_swaplist_mutex); 1568 BUG_ON(folio_mapped(folio)); !! 1326 BUG_ON(page_mapped(page)); 1569 return swap_writepage(&folio- !! 1327 swap_writepage(page, wbc); >> 1328 return 0; 1570 } 1329 } 1571 1330 1572 mutex_unlock(&shmem_swaplist_mutex); 1331 mutex_unlock(&shmem_swaplist_mutex); 1573 put_swap_folio(folio, swap); !! 1332 free_swap: >> 1333 swapcache_free(swap); 1574 redirty: 1334 redirty: 1575 folio_mark_dirty(folio); !! 1335 set_page_dirty(page); 1576 if (wbc->for_reclaim) 1336 if (wbc->for_reclaim) 1577 return AOP_WRITEPAGE_ACTIVATE !! 1337 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 1578 folio_unlock(folio); !! 1338 unlock_page(page); 1579 return 0; 1339 return 0; 1580 } 1340 } 1581 1341 1582 #if defined(CONFIG_NUMA) && defined(CONFIG_TM 1342 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 1583 static void shmem_show_mpol(struct seq_file * 1343 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1584 { 1344 { 1585 char buffer[64]; 1345 char buffer[64]; 1586 1346 1587 if (!mpol || mpol->mode == MPOL_DEFAU 1347 if (!mpol || mpol->mode == MPOL_DEFAULT) 1588 return; /* show nothi 1348 return; /* show nothing */ 1589 1349 1590 mpol_to_str(buffer, sizeof(buffer), m 1350 mpol_to_str(buffer, sizeof(buffer), mpol); 1591 1351 1592 seq_printf(seq, ",mpol=%s", buffer); 1352 seq_printf(seq, ",mpol=%s", buffer); 1593 } 1353 } 1594 1354 1595 static struct mempolicy *shmem_get_sbmpol(str 1355 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1596 { 1356 { 1597 struct mempolicy *mpol = NULL; 1357 struct mempolicy *mpol = NULL; 1598 if (sbinfo->mpol) { 1358 if (sbinfo->mpol) { 1599 raw_spin_lock(&sbinfo->stat_l !! 1359 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 1600 mpol = sbinfo->mpol; 1360 mpol = sbinfo->mpol; 1601 mpol_get(mpol); 1361 mpol_get(mpol); 1602 raw_spin_unlock(&sbinfo->stat !! 1362 spin_unlock(&sbinfo->stat_lock); 1603 } 1363 } 1604 return mpol; 1364 return mpol; 1605 } 1365 } 1606 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 1366 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 1607 static inline void shmem_show_mpol(struct seq 1367 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1608 { 1368 { 1609 } 1369 } 1610 static inline struct mempolicy *shmem_get_sbm 1370 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1611 { 1371 { 1612 return NULL; 1372 return NULL; 1613 } 1373 } 1614 #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 1374 #endif /* CONFIG_NUMA && CONFIG_TMPFS */ >> 1375 #ifndef CONFIG_NUMA >> 1376 #define vm_policy vm_private_data >> 1377 #endif 1615 1378 1616 static struct mempolicy *shmem_get_pgoff_poli !! 1379 static void shmem_pseudo_vma_init(struct vm_area_struct *vma, 1617 pgoff_t index, unsign !! 1380 struct shmem_inode_info *info, pgoff_t index) 1618 << 1619 static struct folio *shmem_swapin_cluster(swp << 1620 struct shmem_inode_in << 1621 { 1381 { 1622 struct mempolicy *mpol; !! 1382 /* Create a pseudo vma that just contains the policy */ 1623 pgoff_t ilx; !! 1383 vma->vm_start = 0; 1624 struct folio *folio; !! 1384 /* Bias interleave by inode number to distribute better across nodes */ 1625 !! 1385 vma->vm_pgoff = index + info->vfs_inode.i_ino; 1626 mpol = shmem_get_pgoff_policy(info, i !! 1386 vma->vm_ops = NULL; 1627 folio = swap_cluster_readahead(swap, !! 1387 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1628 mpol_cond_put(mpol); << 1629 << 1630 return folio; << 1631 } 1388 } 1632 1389 1633 /* !! 1390 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) 1634 * Make sure huge_gfp is always more limited << 1635 * Some of the flags set permissions, while o << 1636 */ << 1637 static gfp_t limit_gfp_mask(gfp_t huge_gfp, g << 1638 { 1391 { 1639 gfp_t allowflags = __GFP_IO | __GFP_F !! 1392 /* Drop reference taken by mpol_shared_policy_lookup() */ 1640 gfp_t denyflags = __GFP_NOWARN | __GF !! 1393 mpol_cond_put(vma->vm_policy); 1641 gfp_t zoneflags = limit_gfp & GFP_ZON << 1642 gfp_t result = huge_gfp & ~(allowflag << 1643 << 1644 /* Allow allocations only from the or << 1645 result |= zoneflags; << 1646 << 1647 /* << 1648 * Minimize the result gfp by taking << 1649 * and the intersection of the allow << 1650 */ << 1651 result |= (limit_gfp & denyflags); << 1652 result |= (huge_gfp & limit_gfp) & al << 1653 << 1654 return result; << 1655 } 1394 } 1656 1395 1657 #ifdef CONFIG_TRANSPARENT_HUGEPAGE !! 1396 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 1658 unsigned long shmem_allowable_huge_orders(str !! 1397 struct shmem_inode_info *info, pgoff_t index) 1659 struct vm_are << 1660 loff_t write_ << 1661 { 1398 { 1662 unsigned long mask = READ_ONCE(huge_s !! 1399 struct vm_area_struct pvma; 1663 unsigned long within_size_orders = RE !! 1400 struct page *page; 1664 unsigned long vm_flags = vma ? vma->v << 1665 bool global_huge; << 1666 loff_t i_size; << 1667 int order; << 1668 << 1669 if (thp_disabled_by_hw() || (vma && v << 1670 return 0; << 1671 << 1672 global_huge = shmem_huge_global_enabl << 1673 shmem << 1674 if (!vma || !vma_is_anon_shmem(vma)) << 1675 /* << 1676 * For tmpfs, we now only sup << 1677 * is enabled, otherwise fall << 1678 */ << 1679 return global_huge ? BIT(HPAG << 1680 } << 1681 << 1682 /* << 1683 * Following the 'deny' semantics of << 1684 * option off from all mounts. << 1685 */ << 1686 if (shmem_huge == SHMEM_HUGE_DENY) << 1687 return 0; << 1688 << 1689 /* << 1690 * Only allow inherit orders if the t << 1691 * means non-PMD sized THP can not ov << 1692 */ << 1693 if (shmem_huge == SHMEM_HUGE_FORCE) << 1694 return READ_ONCE(huge_shmem_o << 1695 << 1696 /* Allow mTHP that will be fully with << 1697 order = highest_order(within_size_ord << 1698 while (within_size_orders) { << 1699 index = round_up(index + 1, o << 1700 i_size = round_up(i_size_read << 1701 if (i_size >> PAGE_SHIFT >= i << 1702 mask |= within_size_o << 1703 break; << 1704 } << 1705 << 1706 order = next_order(&within_si << 1707 } << 1708 << 1709 if (vm_flags & VM_HUGEPAGE) << 1710 mask |= READ_ONCE(huge_shmem_ << 1711 1401 1712 if (global_huge) !! 1402 shmem_pseudo_vma_init(&pvma, info, index); 1713 mask |= READ_ONCE(huge_shmem_ !! 1403 page = swapin_readahead(swap, gfp, &pvma, 0); >> 1404 shmem_pseudo_vma_destroy(&pvma); 1714 1405 1715 return THP_ORDERS_ALL_FILE_DEFAULT & !! 1406 return page; 1716 } 1407 } 1717 1408 1718 static unsigned long shmem_suitable_orders(st !! 1409 static struct page *shmem_alloc_hugepage(gfp_t gfp, 1719 st !! 1410 struct shmem_inode_info *info, pgoff_t index) 1720 un << 1721 { 1411 { 1722 struct vm_area_struct *vma = vmf ? vm !! 1412 struct vm_area_struct pvma; 1723 pgoff_t aligned_index; !! 1413 struct inode *inode = &info->vfs_inode; 1724 unsigned long pages; !! 1414 struct address_space *mapping = inode->i_mapping; 1725 int order; !! 1415 pgoff_t idx, hindex; >> 1416 void __rcu **results; >> 1417 struct page *page; 1726 1418 1727 if (vma) { !! 1419 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1728 orders = thp_vma_suitable_ord !! 1420 return NULL; 1729 if (!orders) << 1730 return 0; << 1731 } << 1732 1421 1733 /* Find the highest order that can ad !! 1422 hindex = round_down(index, HPAGE_PMD_NR); 1734 order = highest_order(orders); !! 1423 rcu_read_lock(); 1735 while (orders) { !! 1424 if (radix_tree_gang_lookup_slot(&mapping->page_tree, &results, &idx, 1736 pages = 1UL << order; !! 1425 hindex, 1) && idx < hindex + HPAGE_PMD_NR) { 1737 aligned_index = round_down(in !! 1426 rcu_read_unlock(); 1738 /* !! 1427 return NULL; 1739 * Check for conflict before << 1740 * Conflict might be that a h << 1741 * and added to page cache by << 1742 * is already at least one sm << 1743 * Be careful to retry when a << 1744 * Elsewhere -EEXIST would be << 1745 */ << 1746 if (!xa_find(&mapping->i_page << 1747 aligned_index + << 1748 break; << 1749 order = next_order(&orders, o << 1750 } 1428 } >> 1429 rcu_read_unlock(); 1751 1430 1752 return orders; !! 1431 shmem_pseudo_vma_init(&pvma, info, hindex); 1753 } !! 1432 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, 1754 #else !! 1433 HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); 1755 static unsigned long shmem_suitable_orders(st !! 1434 shmem_pseudo_vma_destroy(&pvma); 1756 st !! 1435 if (page) 1757 un !! 1436 prep_transhuge_page(page); 1758 { !! 1437 return page; 1759 return 0; << 1760 } 1438 } 1761 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ << 1762 1439 1763 static struct folio *shmem_alloc_folio(gfp_t !! 1440 static struct page *shmem_alloc_page(gfp_t gfp, 1764 struct shmem_inode_info *info !! 1441 struct shmem_inode_info *info, pgoff_t index) 1765 { 1442 { 1766 struct mempolicy *mpol; !! 1443 struct vm_area_struct pvma; 1767 pgoff_t ilx; !! 1444 struct page *page; 1768 struct folio *folio; << 1769 1445 1770 mpol = shmem_get_pgoff_policy(info, i !! 1446 shmem_pseudo_vma_init(&pvma, info, index); 1771 folio = folio_alloc_mpol(gfp, order, !! 1447 page = alloc_page_vma(gfp, &pvma, 0); 1772 mpol_cond_put(mpol); !! 1448 shmem_pseudo_vma_destroy(&pvma); 1773 1449 1774 return folio; !! 1450 return page; 1775 } 1451 } 1776 1452 1777 static struct folio *shmem_alloc_and_add_foli !! 1453 static struct page *shmem_alloc_and_acct_page(gfp_t gfp, 1778 gfp_t gfp, struct inode *inod !! 1454 struct shmem_inode_info *info, struct shmem_sb_info *sbinfo, 1779 struct mm_struct *fault_mm, u !! 1455 pgoff_t index, bool huge) 1780 { 1456 { 1781 struct address_space *mapping = inode !! 1457 struct page *page; 1782 struct shmem_inode_info *info = SHMEM !! 1458 int nr; 1783 unsigned long suitable_orders = 0; !! 1459 int err = -ENOSPC; 1784 struct folio *folio = NULL; << 1785 long pages; << 1786 int error, order; << 1787 << 1788 if (!IS_ENABLED(CONFIG_TRANSPARENT_HU << 1789 orders = 0; << 1790 << 1791 if (orders > 0) { << 1792 suitable_orders = shmem_suita << 1793 << 1794 << 1795 order = highest_order(suitabl << 1796 while (suitable_orders) { << 1797 pages = 1UL << order; << 1798 index = round_down(in << 1799 folio = shmem_alloc_f << 1800 if (folio) << 1801 goto allocate << 1802 << 1803 if (pages == HPAGE_PM << 1804 count_vm_even << 1805 count_mthp_stat(order << 1806 order = next_order(&s << 1807 } << 1808 } else { << 1809 pages = 1; << 1810 folio = shmem_alloc_folio(gfp << 1811 } << 1812 if (!folio) << 1813 return ERR_PTR(-ENOMEM); << 1814 1460 1815 allocated: !! 1461 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1816 __folio_set_locked(folio); !! 1462 huge = false; 1817 __folio_set_swapbacked(folio); !! 1463 nr = huge ? HPAGE_PMD_NR : 1; 1818 1464 1819 gfp &= GFP_RECLAIM_MASK; !! 1465 if (shmem_acct_block(info->flags, nr)) 1820 error = mem_cgroup_charge(folio, faul !! 1466 goto failed; 1821 if (error) { !! 1467 if (sbinfo->max_blocks) { 1822 if (xa_find(&mapping->i_pages !! 1468 if (percpu_counter_compare(&sbinfo->used_blocks, 1823 index + pages !! 1469 sbinfo->max_blocks - nr) > 0) 1824 error = -EEXIST; !! 1470 goto unacct; 1825 } else if (pages > 1) { !! 1471 percpu_counter_add(&sbinfo->used_blocks, nr); 1826 if (pages == HPAGE_PM << 1827 count_vm_even << 1828 count_vm_even << 1829 } << 1830 count_mthp_stat(folio << 1831 count_mthp_stat(folio << 1832 } << 1833 goto unlock; << 1834 } 1472 } 1835 1473 1836 error = shmem_add_to_page_cache(folio !! 1474 if (huge) 1837 if (error) !! 1475 page = shmem_alloc_hugepage(gfp, info, index); 1838 goto unlock; !! 1476 else 1839 !! 1477 page = shmem_alloc_page(gfp, info, index); 1840 error = shmem_inode_acct_blocks(inode !! 1478 if (page) { 1841 if (error) { !! 1479 __SetPageLocked(page); 1842 struct shmem_sb_info *sbinfo !! 1480 __SetPageSwapBacked(page); 1843 long freed; !! 1481 return page; 1844 /* << 1845 * Try to reclaim some space << 1846 * large folios beyond i_size << 1847 */ << 1848 shmem_unused_huge_shrink(sbin << 1849 /* << 1850 * And do a shmem_recalc_inod << 1851 * except our folio is there << 1852 */ << 1853 spin_lock(&info->lock); << 1854 freed = pages + info->alloced << 1855 READ_ONCE(mapping->nr << 1856 if (freed > 0) << 1857 info->alloced -= free << 1858 spin_unlock(&info->lock); << 1859 if (freed > 0) << 1860 shmem_inode_unacct_bl << 1861 error = shmem_inode_acct_bloc << 1862 if (error) { << 1863 filemap_remove_folio( << 1864 goto unlock; << 1865 } << 1866 } 1482 } 1867 1483 1868 shmem_recalc_inode(inode, pages, 0); !! 1484 err = -ENOMEM; 1869 folio_add_lru(folio); !! 1485 if (sbinfo->max_blocks) 1870 return folio; !! 1486 percpu_counter_add(&sbinfo->used_blocks, -nr); 1871 !! 1487 unacct: 1872 unlock: !! 1488 shmem_unacct_blocks(info->flags, nr); 1873 folio_unlock(folio); !! 1489 failed: 1874 folio_put(folio); !! 1490 return ERR_PTR(err); 1875 return ERR_PTR(error); << 1876 } 1491 } 1877 1492 1878 /* 1493 /* 1879 * When a page is moved from swapcache to shm 1494 * When a page is moved from swapcache to shmem filecache (either by the 1880 * usual swapin of shmem_get_folio_gfp(), or !! 1495 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 1881 * shmem_unuse_inode()), it may have been rea 1496 * shmem_unuse_inode()), it may have been read in earlier from swap, in 1882 * ignorance of the mapping it belongs to. I 1497 * ignorance of the mapping it belongs to. If that mapping has special 1883 * constraints (like the gma500 GEM driver, w 1498 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1884 * we may need to copy to a suitable page bef 1499 * we may need to copy to a suitable page before moving to filecache. 1885 * 1500 * 1886 * In a future release, this may well be exte 1501 * In a future release, this may well be extended to respect cpuset and 1887 * NUMA mempolicy, and applied also to anonym 1502 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1888 * but for now it is a simple matter of zone. 1503 * but for now it is a simple matter of zone. 1889 */ 1504 */ 1890 static bool shmem_should_replace_folio(struct !! 1505 static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 1891 { 1506 { 1892 return folio_zonenum(folio) > gfp_zon !! 1507 return page_zonenum(page) > gfp_zone(gfp); 1893 } 1508 } 1894 1509 1895 static int shmem_replace_folio(struct folio * !! 1510 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 1896 struct shmem_ !! 1511 struct shmem_inode_info *info, pgoff_t index) 1897 struct vm_are !! 1512 { 1898 { !! 1513 struct page *oldpage, *newpage; 1899 struct folio *new, *old = *foliop; !! 1514 struct address_space *swap_mapping; 1900 swp_entry_t entry = old->swap; !! 1515 pgoff_t swap_index; 1901 struct address_space *swap_mapping = !! 1516 int error; 1902 pgoff_t swap_index = swap_cache_index !! 1517 1903 XA_STATE(xas, &swap_mapping->i_pages, !! 1518 oldpage = *pagep; 1904 int nr_pages = folio_nr_pages(old); !! 1519 swap_index = page_private(oldpage); 1905 int error = 0, i; !! 1520 swap_mapping = page_mapping(oldpage); 1906 1521 1907 /* 1522 /* 1908 * We have arrived here because our z 1523 * We have arrived here because our zones are constrained, so don't 1909 * limit chance of success by further 1524 * limit chance of success by further cpuset and node constraints. 1910 */ 1525 */ 1911 gfp &= ~GFP_CONSTRAINT_MASK; 1526 gfp &= ~GFP_CONSTRAINT_MASK; 1912 #ifdef CONFIG_TRANSPARENT_HUGEPAGE !! 1527 newpage = shmem_alloc_page(gfp, info, index); 1913 if (nr_pages > 1) { !! 1528 if (!newpage) 1914 gfp_t huge_gfp = vma_thp_gfp_ << 1915 << 1916 gfp = limit_gfp_mask(huge_gfp << 1917 } << 1918 #endif << 1919 << 1920 new = shmem_alloc_folio(gfp, folio_or << 1921 if (!new) << 1922 return -ENOMEM; 1529 return -ENOMEM; 1923 1530 1924 folio_ref_add(new, nr_pages); !! 1531 get_page(newpage); 1925 folio_copy(new, old); !! 1532 copy_highpage(newpage, oldpage); 1926 flush_dcache_folio(new); !! 1533 flush_dcache_page(newpage); 1927 !! 1534 1928 __folio_set_locked(new); !! 1535 __SetPageLocked(newpage); 1929 __folio_set_swapbacked(new); !! 1536 __SetPageSwapBacked(newpage); 1930 folio_mark_uptodate(new); !! 1537 SetPageUptodate(newpage); 1931 new->swap = entry; !! 1538 set_page_private(newpage, swap_index); 1932 folio_set_swapcache(new); !! 1539 SetPageSwapCache(newpage); 1933 << 1934 /* Swap cache still stores N entries << 1935 xa_lock_irq(&swap_mapping->i_pages); << 1936 for (i = 0; i < nr_pages; i++) { << 1937 void *item = xas_load(&xas); << 1938 << 1939 if (item != old) { << 1940 error = -ENOENT; << 1941 break; << 1942 } << 1943 1540 1944 xas_store(&xas, new); !! 1541 /* 1945 xas_next(&xas); !! 1542 * Our caller will very soon move newpage out of swapcache, but it's 1946 } !! 1543 * a nice clean interface for us to replace oldpage by newpage there. >> 1544 */ >> 1545 spin_lock_irq(&swap_mapping->tree_lock); >> 1546 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, >> 1547 newpage); 1947 if (!error) { 1548 if (!error) { 1948 mem_cgroup_replace_folio(old, !! 1549 __inc_node_page_state(newpage, NR_FILE_PAGES); 1949 __lruvec_stat_mod_folio(new, !! 1550 __dec_node_page_state(oldpage, NR_FILE_PAGES); 1950 __lruvec_stat_mod_folio(new, << 1951 __lruvec_stat_mod_folio(old, << 1952 __lruvec_stat_mod_folio(old, << 1953 } 1551 } 1954 xa_unlock_irq(&swap_mapping->i_pages) !! 1552 spin_unlock_irq(&swap_mapping->tree_lock); 1955 1553 1956 if (unlikely(error)) { 1554 if (unlikely(error)) { 1957 /* 1555 /* 1958 * Is this possible? I think !! 1556 * Is this possible? I think not, now that our callers check 1959 * check both the swapcache f !! 1557 * both PageSwapCache and page_private after getting page lock; 1960 * after getting the folio lo !! 1558 * but be defensive. Reverse old to newpage for clear and free. 1961 * Reverse old to newpage for << 1962 */ 1559 */ 1963 old = new; !! 1560 oldpage = newpage; 1964 } else { 1561 } else { 1965 folio_add_lru(new); !! 1562 mem_cgroup_migrate(oldpage, newpage); 1966 *foliop = new; !! 1563 lru_cache_add_anon(newpage); >> 1564 *pagep = newpage; 1967 } 1565 } 1968 1566 1969 folio_clear_swapcache(old); !! 1567 ClearPageSwapCache(oldpage); 1970 old->private = NULL; !! 1568 set_page_private(oldpage, 0); 1971 1569 1972 folio_unlock(old); !! 1570 unlock_page(oldpage); 1973 /* !! 1571 put_page(oldpage); 1974 * The old folio are removed from swa !! 1572 put_page(oldpage); 1975 * reference, as well as one temporar << 1976 * cache. << 1977 */ << 1978 folio_put_refs(old, nr_pages + 1); << 1979 return error; 1573 return error; 1980 } 1574 } 1981 1575 1982 static void shmem_set_folio_swapin_error(stru !! 1576 /* 1983 stru !! 1577 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 1984 { !! 1578 * 1985 struct address_space *mapping = inode !! 1579 * If we allocate a new one we do not mark it dirty. That's up to the 1986 swp_entry_t swapin_error; !! 1580 * vm. If we swap it in we mark it dirty since we also free the swap 1987 void *old; !! 1581 * entry since a page cannot live in both the swap and page cache. 1988 int nr_pages; !! 1582 * 1989 !! 1583 * fault_mm and fault_type are only supplied by shmem_fault: 1990 swapin_error = make_poisoned_swp_entr !! 1584 * otherwise they are NULL. 1991 old = xa_cmpxchg_irq(&mapping->i_page !! 1585 */ 1992 swp_to_radix_ent !! 1586 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 1993 swp_to_radix_ent !! 1587 struct page **pagep, enum sgp_type sgp, gfp_t gfp, 1994 if (old != swp_to_radix_entry(swap)) !! 1588 struct vm_area_struct *vma, struct vm_fault *vmf, int *fault_type) 1995 return; << 1996 << 1997 nr_pages = folio_nr_pages(folio); << 1998 folio_wait_writeback(folio); << 1999 delete_from_swap_cache(folio); << 2000 /* << 2001 * Don't treat swapin error folio as << 2002 * won't be 0 when inode is released << 2003 * in shmem_evict_inode(). << 2004 */ << 2005 shmem_recalc_inode(inode, -nr_pages, << 2006 swap_free_nr(swap, nr_pages); << 2007 } << 2008 << 2009 static int shmem_split_large_entry(struct ino << 2010 swp_entry_ << 2011 { 1589 { 2012 struct address_space *mapping = inode 1590 struct address_space *mapping = inode->i_mapping; 2013 XA_STATE_ORDER(xas, &mapping->i_pages !! 1591 struct shmem_inode_info *info = SHMEM_I(inode); 2014 void *alloced_shadow = NULL; !! 1592 struct shmem_sb_info *sbinfo; 2015 int alloced_order = 0, i; !! 1593 struct mm_struct *charge_mm; >> 1594 struct mem_cgroup *memcg; >> 1595 struct page *page; >> 1596 swp_entry_t swap; >> 1597 enum sgp_type sgp_huge = sgp; >> 1598 pgoff_t hindex = index; >> 1599 int error; >> 1600 int once = 0; >> 1601 int alloced = 0; 2016 1602 2017 /* Convert user data gfp flags to xar !! 1603 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 2018 gfp &= GFP_RECLAIM_MASK; !! 1604 return -EFBIG; >> 1605 if (sgp == SGP_NOHUGE || sgp == SGP_HUGE) >> 1606 sgp = SGP_CACHE; >> 1607 repeat: >> 1608 swap.val = 0; >> 1609 page = find_lock_entry(mapping, index); >> 1610 if (radix_tree_exceptional_entry(page)) { >> 1611 swap = radix_to_swp_entry(page); >> 1612 page = NULL; >> 1613 } 2019 1614 2020 for (;;) { !! 1615 if (sgp <= SGP_CACHE && 2021 int order = -1, split_order = !! 1616 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 2022 void *old = NULL; !! 1617 error = -EINVAL; >> 1618 goto unlock; >> 1619 } 2023 1620 2024 xas_lock_irq(&xas); !! 1621 if (page && sgp == SGP_WRITE) 2025 old = xas_load(&xas); !! 1622 mark_page_accessed(page); 2026 if (!xa_is_value(old) || swp_ << 2027 xas_set_err(&xas, -EE << 2028 goto unlock; << 2029 } << 2030 1623 2031 order = xas_get_order(&xas); !! 1624 /* fallocated page? */ >> 1625 if (page && !PageUptodate(page)) { >> 1626 if (sgp != SGP_READ) >> 1627 goto clear; >> 1628 unlock_page(page); >> 1629 put_page(page); >> 1630 page = NULL; >> 1631 } >> 1632 if (page || (sgp == SGP_READ && !swap.val)) { >> 1633 *pagep = page; >> 1634 return 0; >> 1635 } 2032 1636 2033 /* Swap entry may have change !! 1637 /* 2034 if (alloced_order && !! 1638 * Fast cache lookup did not find it: 2035 (old != alloced_shadow || !! 1639 * bring it back from swap or allocate. 2036 xas_destroy(&xas); !! 1640 */ 2037 alloced_order = 0; !! 1641 sbinfo = SHMEM_SB(inode->i_sb); 2038 } !! 1642 charge_mm = vma ? vma->vm_mm : current->mm; 2039 1643 2040 /* Try to split large swap en !! 1644 if (swap.val) { 2041 if (order > 0) { !! 1645 /* Look it up and read it in.. */ 2042 if (!alloced_order) { !! 1646 page = lookup_swap_cache(swap); 2043 split_order = !! 1647 if (!page) { 2044 goto unlock; !! 1648 /* Or update major stats only when swapin succeeds?? */ >> 1649 if (fault_type) { >> 1650 *fault_type |= VM_FAULT_MAJOR; >> 1651 count_vm_event(PGMAJFAULT); >> 1652 mem_cgroup_count_vm_event(charge_mm, >> 1653 PGMAJFAULT); 2045 } 1654 } 2046 xas_split(&xas, old, !! 1655 /* Here we actually start the io */ 2047 !! 1656 page = shmem_swapin(swap, gfp, info, index); 2048 /* !! 1657 if (!page) { 2049 * Re-set the swap en !! 1658 error = -ENOMEM; 2050 * offset of the orig !! 1659 goto failed; 2051 */ << 2052 for (i = 0; i < 1 << << 2053 pgoff_t align << 2054 swp_entry_t t << 2055 << 2056 tmp = swp_ent << 2057 __xa_store(&m << 2058 sw << 2059 } 1660 } 2060 } 1661 } 2061 1662 2062 unlock: !! 1663 /* We have to do this with page locked to prevent races */ 2063 xas_unlock_irq(&xas); !! 1664 lock_page(page); 2064 !! 1665 if (!PageSwapCache(page) || page_private(page) != swap.val || 2065 /* split needed, alloc here a !! 1666 !shmem_confirm_swap(mapping, index, swap)) { 2066 if (split_order) { !! 1667 error = -EEXIST; /* try again */ 2067 xas_split_alloc(&xas, !! 1668 goto unlock; 2068 if (xas_error(&xas)) << 2069 goto error; << 2070 alloced_shadow = old; << 2071 alloced_order = split << 2072 xas_reset(&xas); << 2073 continue; << 2074 } << 2075 << 2076 if (!xas_nomem(&xas, gfp)) << 2077 break; << 2078 } << 2079 << 2080 error: << 2081 if (xas_error(&xas)) << 2082 return xas_error(&xas); << 2083 << 2084 return alloced_order; << 2085 } << 2086 << 2087 /* << 2088 * Swap in the folio pointed to by *foliop. << 2089 * Caller has to make sure that *foliop conta << 2090 * Returns 0 and the folio in foliop if succe << 2091 * error code and NULL in *foliop. << 2092 */ << 2093 static int shmem_swapin_folio(struct inode *i << 2094 struct folio **f << 2095 gfp_t gfp, struc << 2096 vm_fault_t *faul << 2097 { << 2098 struct address_space *mapping = inode << 2099 struct mm_struct *fault_mm = vma ? vm << 2100 struct shmem_inode_info *info = SHMEM << 2101 struct swap_info_struct *si; << 2102 struct folio *folio = NULL; << 2103 swp_entry_t swap; << 2104 int error, nr_pages; << 2105 << 2106 VM_BUG_ON(!*foliop || !xa_is_value(*f << 2107 swap = radix_to_swp_entry(*foliop); << 2108 *foliop = NULL; << 2109 << 2110 if (is_poisoned_swp_entry(swap)) << 2111 return -EIO; << 2112 << 2113 si = get_swap_device(swap); << 2114 if (!si) { << 2115 if (!shmem_confirm_swap(mappi << 2116 return -EEXIST; << 2117 else << 2118 return -EINVAL; << 2119 } << 2120 << 2121 /* Look it up and read it in.. */ << 2122 folio = swap_cache_get_folio(swap, NU << 2123 if (!folio) { << 2124 int split_order; << 2125 << 2126 /* Or update major stats only << 2127 if (fault_type) { << 2128 *fault_type |= VM_FAU << 2129 count_vm_event(PGMAJF << 2130 count_memcg_event_mm( << 2131 } 1669 } 2132 !! 1670 if (!PageUptodate(page)) { 2133 /* !! 1671 error = -EIO; 2134 * Now swap device can only s << 2135 * should split the large swa << 2136 * if necessary. << 2137 */ << 2138 split_order = shmem_split_lar << 2139 if (split_order < 0) { << 2140 error = split_order; << 2141 goto failed; 1672 goto failed; 2142 } 1673 } >> 1674 wait_on_page_writeback(page); 2143 1675 2144 /* !! 1676 if (shmem_should_replace_page(page, gfp)) { 2145 * If the large swap entry ha !! 1677 error = shmem_replace_page(&page, gfp, info, index); 2146 * necessary to recalculate t !! 1678 if (error) 2147 * the old order alignment. !! 1679 goto failed; 2148 */ << 2149 if (split_order > 0) { << 2150 pgoff_t offset = inde << 2151 << 2152 swap = swp_entry(swp_ << 2153 } 1680 } 2154 1681 2155 /* Here we actually start the !! 1682 error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg, 2156 folio = shmem_swapin_cluster( !! 1683 false); 2157 if (!folio) { !! 1684 if (!error) { 2158 error = -ENOMEM; !! 1685 error = shmem_add_to_page_cache(page, mapping, index, 2159 goto failed; !! 1686 swp_to_radix_entry(swap)); >> 1687 /* >> 1688 * We already confirmed swap under page lock, and make >> 1689 * no memory allocation here, so usually no possibility >> 1690 * of error; but free_swap_and_cache() only trylocks a >> 1691 * page, so it is just possible that the entry has been >> 1692 * truncated or holepunched since swap was confirmed. >> 1693 * shmem_undo_range() will have done some of the >> 1694 * unaccounting, now delete_from_swap_cache() will do >> 1695 * the rest. >> 1696 * Reset swap.val? No, leave it so "failed" goes back to >> 1697 * "repeat": reading a hole and writing should succeed. >> 1698 */ >> 1699 if (error) { >> 1700 mem_cgroup_cancel_charge(page, memcg, false); >> 1701 delete_from_swap_cache(page); >> 1702 } 2160 } 1703 } 2161 } << 2162 << 2163 /* We have to do this with folio lock << 2164 folio_lock(folio); << 2165 if (!folio_test_swapcache(folio) || << 2166 folio->swap.val != swap.val || << 2167 !shmem_confirm_swap(mapping, inde << 2168 error = -EEXIST; << 2169 goto unlock; << 2170 } << 2171 if (!folio_test_uptodate(folio)) { << 2172 error = -EIO; << 2173 goto failed; << 2174 } << 2175 folio_wait_writeback(folio); << 2176 nr_pages = folio_nr_pages(folio); << 2177 << 2178 /* << 2179 * Some architectures may have to res << 2180 * folio after reading from swap. << 2181 */ << 2182 arch_swap_restore(folio_swap(swap, fo << 2183 << 2184 if (shmem_should_replace_folio(folio, << 2185 error = shmem_replace_folio(& << 2186 if (error) 1704 if (error) 2187 goto failed; 1705 goto failed; 2188 } << 2189 << 2190 error = shmem_add_to_page_cache(folio << 2191 round << 2192 swp_t << 2193 if (error) << 2194 goto failed; << 2195 << 2196 shmem_recalc_inode(inode, 0, -nr_page << 2197 << 2198 if (sgp == SGP_WRITE) << 2199 folio_mark_accessed(folio); << 2200 << 2201 delete_from_swap_cache(folio); << 2202 folio_mark_dirty(folio); << 2203 swap_free_nr(swap, nr_pages); << 2204 put_swap_device(si); << 2205 << 2206 *foliop = folio; << 2207 return 0; << 2208 failed: << 2209 if (!shmem_confirm_swap(mapping, inde << 2210 error = -EEXIST; << 2211 if (error == -EIO) << 2212 shmem_set_folio_swapin_error( << 2213 unlock: << 2214 if (folio) { << 2215 folio_unlock(folio); << 2216 folio_put(folio); << 2217 } << 2218 put_swap_device(si); << 2219 1706 2220 return error; !! 1707 mem_cgroup_commit_charge(page, memcg, true, false); 2221 } << 2222 << 2223 /* << 2224 * shmem_get_folio_gfp - find page in cache, << 2225 * << 2226 * If we allocate a new one we do not mark it << 2227 * vm. If we swap it in we mark it dirty sinc << 2228 * entry since a page cannot live in both the << 2229 * << 2230 * vmf and fault_type are only supplied by sh << 2231 */ << 2232 static int shmem_get_folio_gfp(struct inode * << 2233 loff_t write_end, struct foli << 2234 gfp_t gfp, struct vm_fault *v << 2235 { << 2236 struct vm_area_struct *vma = vmf ? vm << 2237 struct mm_struct *fault_mm; << 2238 struct folio *folio; << 2239 int error; << 2240 bool alloced; << 2241 unsigned long orders = 0; << 2242 1708 2243 if (WARN_ON_ONCE(!shmem_mapping(inode !! 1709 spin_lock_irq(&info->lock); 2244 return -EINVAL; !! 1710 info->swapped--; >> 1711 shmem_recalc_inode(inode); >> 1712 spin_unlock_irq(&info->lock); 2245 1713 2246 if (index > (MAX_LFS_FILESIZE >> PAGE !! 1714 if (sgp == SGP_WRITE) 2247 return -EFBIG; !! 1715 mark_page_accessed(page); 2248 repeat: << 2249 if (sgp <= SGP_CACHE && << 2250 ((loff_t)index << PAGE_SHIFT) >= << 2251 return -EINVAL; << 2252 << 2253 alloced = false; << 2254 fault_mm = vma ? vma->vm_mm : NULL; << 2255 << 2256 folio = filemap_get_entry(inode->i_ma << 2257 if (folio && vma && userfaultfd_minor << 2258 if (!xa_is_value(folio)) << 2259 folio_put(folio); << 2260 *fault_type = handle_userfaul << 2261 return 0; << 2262 } << 2263 << 2264 if (xa_is_value(folio)) { << 2265 error = shmem_swapin_folio(in << 2266 sg << 2267 if (error == -EEXIST) << 2268 goto repeat; << 2269 << 2270 *foliop = folio; << 2271 return error; << 2272 } << 2273 1716 2274 if (folio) { !! 1717 delete_from_swap_cache(page); 2275 folio_lock(folio); !! 1718 set_page_dirty(page); >> 1719 swap_free(swap); 2276 1720 2277 /* Has the folio been truncat !! 1721 } else { 2278 if (unlikely(folio->mapping ! !! 1722 if (vma && userfaultfd_missing(vma)) { 2279 folio_unlock(folio); !! 1723 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 2280 folio_put(folio); !! 1724 return 0; 2281 goto repeat; << 2282 } 1725 } 2283 if (sgp == SGP_WRITE) << 2284 folio_mark_accessed(f << 2285 if (folio_test_uptodate(folio << 2286 goto out; << 2287 /* fallocated folio */ << 2288 if (sgp != SGP_READ) << 2289 goto clear; << 2290 folio_unlock(folio); << 2291 folio_put(folio); << 2292 } << 2293 1726 2294 /* !! 1727 /* shmem_symlink() */ 2295 * SGP_READ: succeed on hole, with NU !! 1728 if (mapping->a_ops != &shmem_aops) 2296 * SGP_NOALLOC: fail on hole, with NU !! 1729 goto alloc_nohuge; 2297 */ !! 1730 if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE) 2298 *foliop = NULL; !! 1731 goto alloc_nohuge; 2299 if (sgp == SGP_READ) !! 1732 if (shmem_huge == SHMEM_HUGE_FORCE) 2300 return 0; !! 1733 goto alloc_huge; 2301 if (sgp == SGP_NOALLOC) !! 1734 switch (sbinfo->huge) { 2302 return -ENOENT; !! 1735 loff_t i_size; >> 1736 pgoff_t off; >> 1737 case SHMEM_HUGE_NEVER: >> 1738 goto alloc_nohuge; >> 1739 case SHMEM_HUGE_WITHIN_SIZE: >> 1740 off = round_up(index, HPAGE_PMD_NR); >> 1741 i_size = round_up(i_size_read(inode), PAGE_SIZE); >> 1742 if (i_size >= HPAGE_PMD_SIZE && >> 1743 i_size >> PAGE_SHIFT >= off) >> 1744 goto alloc_huge; >> 1745 /* fallthrough */ >> 1746 case SHMEM_HUGE_ADVISE: >> 1747 if (sgp_huge == SGP_HUGE) >> 1748 goto alloc_huge; >> 1749 /* TODO: implement fadvise() hints */ >> 1750 goto alloc_nohuge; >> 1751 } >> 1752 >> 1753 alloc_huge: >> 1754 page = shmem_alloc_and_acct_page(gfp, info, sbinfo, >> 1755 index, true); >> 1756 if (IS_ERR(page)) { >> 1757 alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo, >> 1758 index, false); >> 1759 } >> 1760 if (IS_ERR(page)) { >> 1761 int retry = 5; >> 1762 error = PTR_ERR(page); >> 1763 page = NULL; >> 1764 if (error != -ENOSPC) >> 1765 goto failed; >> 1766 /* >> 1767 * Try to reclaim some spece by splitting a huge page >> 1768 * beyond i_size on the filesystem. >> 1769 */ >> 1770 while (retry--) { >> 1771 int ret; >> 1772 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); >> 1773 if (ret == SHRINK_STOP) >> 1774 break; >> 1775 if (ret) >> 1776 goto alloc_nohuge; >> 1777 } >> 1778 goto failed; >> 1779 } 2303 1780 2304 /* !! 1781 if (PageTransHuge(page)) 2305 * Fast cache lookup and swap lookup !! 1782 hindex = round_down(index, HPAGE_PMD_NR); 2306 */ !! 1783 else >> 1784 hindex = index; 2307 1785 2308 if (vma && userfaultfd_missing(vma)) !! 1786 if (sgp == SGP_WRITE) 2309 *fault_type = handle_userfaul !! 1787 __SetPageReferenced(page); 2310 return 0; << 2311 } << 2312 1788 2313 /* Find hugepage orders that are allo !! 1789 error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg, 2314 orders = shmem_allowable_huge_orders( !! 1790 PageTransHuge(page)); 2315 if (orders > 0) { !! 1791 if (error) 2316 gfp_t huge_gfp; !! 1792 goto unacct; 2317 !! 1793 error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK, 2318 huge_gfp = vma_thp_gfp_mask(v !! 1794 compound_order(page)); 2319 huge_gfp = limit_gfp_mask(hug !! 1795 if (!error) { 2320 folio = shmem_alloc_and_add_f !! 1796 error = shmem_add_to_page_cache(page, mapping, hindex, 2321 inode, index, !! 1797 NULL); 2322 if (!IS_ERR(folio)) { !! 1798 radix_tree_preload_end(); 2323 if (folio_test_pmd_ma !! 1799 } 2324 count_vm_even !! 1800 if (error) { 2325 count_mthp_stat(folio !! 1801 mem_cgroup_cancel_charge(page, memcg, 2326 goto alloced; !! 1802 PageTransHuge(page)); 2327 } !! 1803 goto unacct; 2328 if (PTR_ERR(folio) == -EEXIST !! 1804 } 2329 goto repeat; !! 1805 mem_cgroup_commit_charge(page, memcg, false, 2330 } !! 1806 PageTransHuge(page)); 2331 !! 1807 lru_cache_add_anon(page); 2332 folio = shmem_alloc_and_add_folio(vmf !! 1808 2333 if (IS_ERR(folio)) { !! 1809 spin_lock_irq(&info->lock); 2334 error = PTR_ERR(folio); !! 1810 info->alloced += 1 << compound_order(page); 2335 if (error == -EEXIST) !! 1811 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); 2336 goto repeat; !! 1812 shmem_recalc_inode(inode); 2337 folio = NULL; !! 1813 spin_unlock_irq(&info->lock); 2338 goto unlock; !! 1814 alloced = true; 2339 } !! 1815 >> 1816 if (PageTransHuge(page) && >> 1817 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < >> 1818 hindex + HPAGE_PMD_NR - 1) { >> 1819 /* >> 1820 * Part of the huge page is beyond i_size: subject >> 1821 * to shrink under memory pressure. >> 1822 */ >> 1823 spin_lock(&sbinfo->shrinklist_lock); >> 1824 /* >> 1825 * _careful to defend against unlocked access to >> 1826 * ->shrink_list in shmem_unused_huge_shrink() >> 1827 */ >> 1828 if (list_empty_careful(&info->shrinklist)) { >> 1829 list_add_tail(&info->shrinklist, >> 1830 &sbinfo->shrinklist); >> 1831 sbinfo->shrinklist_len++; >> 1832 } >> 1833 spin_unlock(&sbinfo->shrinklist_lock); >> 1834 } 2340 1835 2341 alloced: << 2342 alloced = true; << 2343 if (folio_test_large(folio) && << 2344 DIV_ROUND_UP(i_size_read(inode), << 2345 folio << 2346 struct shmem_sb_info *sbinfo << 2347 struct shmem_inode_info *info << 2348 /* 1836 /* 2349 * Part of the large folio is !! 1837 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 2350 * to shrink under memory pre << 2351 */ 1838 */ 2352 spin_lock(&sbinfo->shrinklist !! 1839 if (sgp == SGP_FALLOC) >> 1840 sgp = SGP_WRITE; >> 1841 clear: 2353 /* 1842 /* 2354 * _careful to defend against !! 1843 * Let SGP_WRITE caller clear ends if write does not fill page; 2355 * ->shrink_list in shmem_unu !! 1844 * but SGP_FALLOC on a page fallocated earlier must initialize >> 1845 * it now, lest undo on failure cancel our earlier guarantee. 2356 */ 1846 */ 2357 if (list_empty_careful(&info- !! 1847 if (sgp != SGP_WRITE && !PageUptodate(page)) { 2358 list_add_tail(&info-> !! 1848 struct page *head = compound_head(page); 2359 &sbinfo !! 1849 int i; 2360 sbinfo->shrinklist_le !! 1850 >> 1851 for (i = 0; i < (1 << compound_order(head)); i++) { >> 1852 clear_highpage(head + i); >> 1853 flush_dcache_page(head + i); >> 1854 } >> 1855 SetPageUptodate(head); 2361 } 1856 } 2362 spin_unlock(&sbinfo->shrinkli << 2363 } << 2364 << 2365 if (sgp == SGP_WRITE) << 2366 folio_set_referenced(folio); << 2367 /* << 2368 * Let SGP_FALLOC use the SGP_WRITE o << 2369 */ << 2370 if (sgp == SGP_FALLOC) << 2371 sgp = SGP_WRITE; << 2372 clear: << 2373 /* << 2374 * Let SGP_WRITE caller clear ends if << 2375 * but SGP_FALLOC on a folio fallocat << 2376 * it now, lest undo on failure cance << 2377 */ << 2378 if (sgp != SGP_WRITE && !folio_test_u << 2379 long i, n = folio_nr_pages(fo << 2380 << 2381 for (i = 0; i < n; i++) << 2382 clear_highpage(folio_ << 2383 flush_dcache_folio(folio); << 2384 folio_mark_uptodate(folio); << 2385 } 1857 } 2386 1858 2387 /* Perhaps the file has been truncate 1859 /* Perhaps the file has been truncated since we checked */ 2388 if (sgp <= SGP_CACHE && 1860 if (sgp <= SGP_CACHE && 2389 ((loff_t)index << PAGE_SHIFT) >= 1861 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { >> 1862 if (alloced) { >> 1863 ClearPageDirty(page); >> 1864 delete_from_page_cache(page); >> 1865 spin_lock_irq(&info->lock); >> 1866 shmem_recalc_inode(inode); >> 1867 spin_unlock_irq(&info->lock); >> 1868 } 2390 error = -EINVAL; 1869 error = -EINVAL; 2391 goto unlock; 1870 goto unlock; 2392 } 1871 } 2393 out: !! 1872 *pagep = page + index - hindex; 2394 *foliop = folio; << 2395 return 0; 1873 return 0; 2396 1874 2397 /* 1875 /* 2398 * Error recovery. 1876 * Error recovery. 2399 */ 1877 */ >> 1878 unacct: >> 1879 if (sbinfo->max_blocks) >> 1880 percpu_counter_sub(&sbinfo->used_blocks, >> 1881 1 << compound_order(page)); >> 1882 shmem_unacct_blocks(info->flags, 1 << compound_order(page)); >> 1883 >> 1884 if (PageTransHuge(page)) { >> 1885 unlock_page(page); >> 1886 put_page(page); >> 1887 goto alloc_nohuge; >> 1888 } >> 1889 failed: >> 1890 if (swap.val && !shmem_confirm_swap(mapping, index, swap)) >> 1891 error = -EEXIST; 2400 unlock: 1892 unlock: 2401 if (alloced) !! 1893 if (page) { 2402 filemap_remove_folio(folio); !! 1894 unlock_page(page); 2403 shmem_recalc_inode(inode, 0, 0); !! 1895 put_page(page); 2404 if (folio) { !! 1896 } 2405 folio_unlock(folio); !! 1897 if (error == -ENOSPC && !once++) { 2406 folio_put(folio); !! 1898 spin_lock_irq(&info->lock); >> 1899 shmem_recalc_inode(inode); >> 1900 spin_unlock_irq(&info->lock); >> 1901 goto repeat; 2407 } 1902 } >> 1903 if (error == -EEXIST) /* from above or from radix_tree_insert */ >> 1904 goto repeat; 2408 return error; 1905 return error; 2409 } 1906 } 2410 1907 2411 /** << 2412 * shmem_get_folio - find, and lock a shmem f << 2413 * @inode: inode to search << 2414 * @index: the page index. << 2415 * @write_end: end of a write, could extend << 2416 * @foliop: pointer to the folio if found << 2417 * @sgp: SGP_* flags to control behavi << 2418 * << 2419 * Looks up the page cache entry at @inode & << 2420 * present, it is returned locked with an inc << 2421 * << 2422 * If the caller modifies data in the folio, << 2423 * before unlocking the folio to ensure that << 2424 * There is no need to reserve space before c << 2425 * << 2426 * When no folio is found, the behavior depen << 2427 * - for SGP_READ, *@foliop is %NULL and 0 i << 2428 * - for SGP_NOALLOC, *@foliop is %NULL and << 2429 * - for all other flags a new folio is allo << 2430 * page cache and returned locked in @foli << 2431 * << 2432 * Context: May sleep. << 2433 * Return: 0 if successful, else a negative e << 2434 */ << 2435 int shmem_get_folio(struct inode *inode, pgof << 2436 struct folio **foliop, en << 2437 { << 2438 return shmem_get_folio_gfp(inode, ind << 2439 mapping_gfp_mask(inod << 2440 } << 2441 EXPORT_SYMBOL_GPL(shmem_get_folio); << 2442 << 2443 /* 1908 /* 2444 * This is like autoremove_wake_function, but 1909 * This is like autoremove_wake_function, but it removes the wait queue 2445 * entry unconditionally - even if something 1910 * entry unconditionally - even if something else had already woken the 2446 * target. 1911 * target. 2447 */ 1912 */ 2448 static int synchronous_wake_function(wait_que !! 1913 static int synchronous_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) 2449 unsigned int mode, in << 2450 { 1914 { 2451 int ret = default_wake_function(wait, 1915 int ret = default_wake_function(wait, mode, sync, key); 2452 list_del_init(&wait->entry); !! 1916 list_del_init(&wait->task_list); 2453 return ret; 1917 return ret; 2454 } 1918 } 2455 1919 2456 /* !! 1920 static int shmem_fault(struct vm_fault *vmf) 2457 * Trinity finds that probing a hole which tm << 2458 * prevent the hole-punch from ever completin << 2459 * locks writers out with its hold on i_rwsem << 2460 * faulting pages into the hole while it's be << 2461 * shmem_undo_range() does remove the additio << 2462 * keep up, as each new page needs its own un << 2463 * and the i_mmap tree grows ever slower to s << 2464 * << 2465 * It does not matter if we sometimes reach t << 2466 * hole-punch begins, so that one fault then << 2467 * we just need to make racing faults a rare << 2468 * << 2469 * The implementation below would be much sim << 2470 * standard mutex or completion: but we canno << 2471 * and bloating every shmem inode for this un << 2472 */ << 2473 static vm_fault_t shmem_falloc_wait(struct vm << 2474 { 1921 { 2475 struct shmem_falloc *shmem_falloc; !! 1922 struct vm_area_struct *vma = vmf->vma; 2476 struct file *fpin = NULL; !! 1923 struct inode *inode = file_inode(vma->vm_file); 2477 vm_fault_t ret = 0; << 2478 << 2479 spin_lock(&inode->i_lock); << 2480 shmem_falloc = inode->i_private; << 2481 if (shmem_falloc && << 2482 shmem_falloc->waitq && << 2483 vmf->pgoff >= shmem_falloc->start << 2484 vmf->pgoff < shmem_falloc->next) << 2485 wait_queue_head_t *shmem_fall << 2486 DEFINE_WAIT_FUNC(shmem_fault_ << 2487 << 2488 ret = VM_FAULT_NOPAGE; << 2489 fpin = maybe_unlock_mmap_for_ << 2490 shmem_falloc_waitq = shmem_fa << 2491 prepare_to_wait(shmem_falloc_ << 2492 TASK_UNINTERR << 2493 spin_unlock(&inode->i_lock); << 2494 schedule(); << 2495 << 2496 /* << 2497 * shmem_falloc_waitq points << 2498 * stack of the hole-punching << 2499 * is usually invalid by the << 2500 * finish_wait() does not der << 2501 * though i_lock needed lest << 2502 */ << 2503 spin_lock(&inode->i_lock); << 2504 finish_wait(shmem_falloc_wait << 2505 } << 2506 spin_unlock(&inode->i_lock); << 2507 if (fpin) { << 2508 fput(fpin); << 2509 ret = VM_FAULT_RETRY; << 2510 } << 2511 return ret; << 2512 } << 2513 << 2514 static vm_fault_t shmem_fault(struct vm_fault << 2515 { << 2516 struct inode *inode = file_inode(vmf- << 2517 gfp_t gfp = mapping_gfp_mask(inode->i 1924 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 2518 struct folio *folio = NULL; !! 1925 enum sgp_type sgp; 2519 vm_fault_t ret = 0; !! 1926 int error; 2520 int err; !! 1927 int ret = VM_FAULT_LOCKED; 2521 1928 2522 /* 1929 /* 2523 * Trinity finds that probing a hole 1930 * Trinity finds that probing a hole which tmpfs is punching can 2524 * prevent the hole-punch from ever c !! 1931 * prevent the hole-punch from ever completing: which in turn >> 1932 * locks writers out with its hold on i_mutex. So refrain from >> 1933 * faulting pages into the hole while it's being punched. Although >> 1934 * shmem_undo_range() does remove the additions, it may be unable to >> 1935 * keep up, as each new page needs its own unmap_mapping_range() call, >> 1936 * and the i_mmap tree grows ever slower to scan if new vmas are added. >> 1937 * >> 1938 * It does not matter if we sometimes reach this check just before the >> 1939 * hole-punch begins, so that one fault then races with the punch: >> 1940 * we just need to make racing faults a rare case. >> 1941 * >> 1942 * The implementation below would be much simpler if we just used a >> 1943 * standard mutex or completion: but we cannot take i_mutex in fault, >> 1944 * and bloating every shmem inode for this unlikely case would be sad. 2525 */ 1945 */ 2526 if (unlikely(inode->i_private)) { 1946 if (unlikely(inode->i_private)) { 2527 ret = shmem_falloc_wait(vmf, !! 1947 struct shmem_falloc *shmem_falloc; 2528 if (ret) !! 1948 >> 1949 spin_lock(&inode->i_lock); >> 1950 shmem_falloc = inode->i_private; >> 1951 if (shmem_falloc && >> 1952 shmem_falloc->waitq && >> 1953 vmf->pgoff >= shmem_falloc->start && >> 1954 vmf->pgoff < shmem_falloc->next) { >> 1955 wait_queue_head_t *shmem_falloc_waitq; >> 1956 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); >> 1957 >> 1958 ret = VM_FAULT_NOPAGE; >> 1959 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && >> 1960 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { >> 1961 /* It's polite to up mmap_sem if we can */ >> 1962 up_read(&vma->vm_mm->mmap_sem); >> 1963 ret = VM_FAULT_RETRY; >> 1964 } >> 1965 >> 1966 shmem_falloc_waitq = shmem_falloc->waitq; >> 1967 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, >> 1968 TASK_UNINTERRUPTIBLE); >> 1969 spin_unlock(&inode->i_lock); >> 1970 schedule(); >> 1971 >> 1972 /* >> 1973 * shmem_falloc_waitq points into the shmem_fallocate() >> 1974 * stack of the hole-punching task: shmem_falloc_waitq >> 1975 * is usually invalid by the time we reach here, but >> 1976 * finish_wait() does not dereference it in that case; >> 1977 * though i_lock needed lest racing with wake_up_all(). >> 1978 */ >> 1979 spin_lock(&inode->i_lock); >> 1980 finish_wait(shmem_falloc_waitq, &shmem_fault_wait); >> 1981 spin_unlock(&inode->i_lock); 2529 return ret; 1982 return ret; >> 1983 } >> 1984 spin_unlock(&inode->i_lock); 2530 } 1985 } 2531 1986 2532 WARN_ON_ONCE(vmf->page != NULL); !! 1987 sgp = SGP_CACHE; 2533 err = shmem_get_folio_gfp(inode, vmf- !! 1988 if (vma->vm_flags & VM_HUGEPAGE) 2534 gfp, vmf, & !! 1989 sgp = SGP_HUGE; 2535 if (err) !! 1990 else if (vma->vm_flags & VM_NOHUGEPAGE) 2536 return vmf_error(err); !! 1991 sgp = SGP_NOHUGE; 2537 if (folio) { !! 1992 2538 vmf->page = folio_file_page(f !! 1993 error = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, 2539 ret |= VM_FAULT_LOCKED; !! 1994 gfp, vma, vmf, &ret); 2540 } !! 1995 if (error) >> 1996 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 2541 return ret; 1997 return ret; 2542 } 1998 } 2543 1999 2544 unsigned long shmem_get_unmapped_area(struct 2000 unsigned long shmem_get_unmapped_area(struct file *file, 2545 unsigne 2001 unsigned long uaddr, unsigned long len, 2546 unsigne 2002 unsigned long pgoff, unsigned long flags) 2547 { 2003 { >> 2004 unsigned long (*get_area)(struct file *, >> 2005 unsigned long, unsigned long, unsigned long, unsigned long); 2548 unsigned long addr; 2006 unsigned long addr; 2549 unsigned long offset; 2007 unsigned long offset; 2550 unsigned long inflated_len; 2008 unsigned long inflated_len; 2551 unsigned long inflated_addr; 2009 unsigned long inflated_addr; 2552 unsigned long inflated_offset; 2010 unsigned long inflated_offset; 2553 unsigned long hpage_size; << 2554 2011 2555 if (len > TASK_SIZE) 2012 if (len > TASK_SIZE) 2556 return -ENOMEM; 2013 return -ENOMEM; 2557 2014 2558 addr = mm_get_unmapped_area(current-> !! 2015 get_area = current->mm->get_unmapped_area; 2559 flags); !! 2016 addr = get_area(file, uaddr, len, pgoff, flags); 2560 2017 2561 if (!IS_ENABLED(CONFIG_TRANSPARENT_HU !! 2018 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 2562 return addr; 2019 return addr; 2563 if (IS_ERR_VALUE(addr)) 2020 if (IS_ERR_VALUE(addr)) 2564 return addr; 2021 return addr; 2565 if (addr & ~PAGE_MASK) 2022 if (addr & ~PAGE_MASK) 2566 return addr; 2023 return addr; 2567 if (addr > TASK_SIZE - len) 2024 if (addr > TASK_SIZE - len) 2568 return addr; 2025 return addr; 2569 2026 2570 if (shmem_huge == SHMEM_HUGE_DENY) 2027 if (shmem_huge == SHMEM_HUGE_DENY) 2571 return addr; 2028 return addr; >> 2029 if (len < HPAGE_PMD_SIZE) >> 2030 return addr; 2572 if (flags & MAP_FIXED) 2031 if (flags & MAP_FIXED) 2573 return addr; 2032 return addr; 2574 /* 2033 /* 2575 * Our priority is to support MAP_SHA 2034 * Our priority is to support MAP_SHARED mapped hugely; 2576 * and support MAP_PRIVATE mapped hug 2035 * and support MAP_PRIVATE mapped hugely too, until it is COWed. 2577 * But if caller specified an address !! 2036 * But if caller specified an address hint, respect that as before. 2578 * successfully, respect that as befo << 2579 */ 2037 */ 2580 if (uaddr == addr) !! 2038 if (uaddr) 2581 return addr; 2039 return addr; 2582 2040 2583 hpage_size = HPAGE_PMD_SIZE; << 2584 if (shmem_huge != SHMEM_HUGE_FORCE) { 2041 if (shmem_huge != SHMEM_HUGE_FORCE) { 2585 struct super_block *sb; 2042 struct super_block *sb; 2586 unsigned long __maybe_unused << 2587 int order = 0; << 2588 2043 2589 if (file) { 2044 if (file) { 2590 VM_BUG_ON(file->f_op 2045 VM_BUG_ON(file->f_op != &shmem_file_operations); 2591 sb = file_inode(file) 2046 sb = file_inode(file)->i_sb; 2592 } else { 2047 } else { 2593 /* 2048 /* 2594 * Called directly fr 2049 * Called directly from mm/mmap.c, or drivers/char/mem.c 2595 * for "/dev/zero", t 2050 * for "/dev/zero", to create a shared anonymous object. 2596 */ 2051 */ 2597 if (IS_ERR(shm_mnt)) 2052 if (IS_ERR(shm_mnt)) 2598 return addr; 2053 return addr; 2599 sb = shm_mnt->mnt_sb; 2054 sb = shm_mnt->mnt_sb; 2600 << 2601 /* << 2602 * Find the highest m << 2603 * provide a suitable << 2604 */ << 2605 #ifdef CONFIG_TRANSPARENT_HUGEPAGE << 2606 hpage_orders = READ_O << 2607 hpage_orders |= READ_ << 2608 hpage_orders |= READ_ << 2609 if (SHMEM_SB(sb)->hug << 2610 hpage_orders << 2611 << 2612 if (hpage_orders > 0) << 2613 order = highe << 2614 hpage_size = << 2615 } << 2616 #endif << 2617 } 2055 } 2618 if (SHMEM_SB(sb)->huge == SHM !! 2056 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2619 return addr; 2057 return addr; 2620 } 2058 } 2621 2059 2622 if (len < hpage_size) !! 2060 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); 2623 return addr; !! 2061 if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2624 << 2625 offset = (pgoff << PAGE_SHIFT) & (hpa << 2626 if (offset && offset + len < 2 * hpag << 2627 return addr; 2062 return addr; 2628 if ((addr & (hpage_size - 1)) == offs !! 2063 if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2629 return addr; 2064 return addr; 2630 2065 2631 inflated_len = len + hpage_size - PAG !! 2066 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2632 if (inflated_len > TASK_SIZE) 2067 if (inflated_len > TASK_SIZE) 2633 return addr; 2068 return addr; 2634 if (inflated_len < len) 2069 if (inflated_len < len) 2635 return addr; 2070 return addr; 2636 2071 2637 inflated_addr = mm_get_unmapped_area( !! 2072 inflated_addr = get_area(NULL, 0, inflated_len, 0, flags); 2638 << 2639 if (IS_ERR_VALUE(inflated_addr)) 2073 if (IS_ERR_VALUE(inflated_addr)) 2640 return addr; 2074 return addr; 2641 if (inflated_addr & ~PAGE_MASK) 2075 if (inflated_addr & ~PAGE_MASK) 2642 return addr; 2076 return addr; 2643 2077 2644 inflated_offset = inflated_addr & (hp !! 2078 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2645 inflated_addr += offset - inflated_of 2079 inflated_addr += offset - inflated_offset; 2646 if (inflated_offset > offset) 2080 if (inflated_offset > offset) 2647 inflated_addr += hpage_size; !! 2081 inflated_addr += HPAGE_PMD_SIZE; 2648 2082 2649 if (inflated_addr > TASK_SIZE - len) 2083 if (inflated_addr > TASK_SIZE - len) 2650 return addr; 2084 return addr; 2651 return inflated_addr; 2085 return inflated_addr; 2652 } 2086 } 2653 2087 2654 #ifdef CONFIG_NUMA 2088 #ifdef CONFIG_NUMA 2655 static int shmem_set_policy(struct vm_area_st 2089 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 2656 { 2090 { 2657 struct inode *inode = file_inode(vma- 2091 struct inode *inode = file_inode(vma->vm_file); 2658 return mpol_set_shared_policy(&SHMEM_ 2092 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 2659 } 2093 } 2660 2094 2661 static struct mempolicy *shmem_get_policy(str 2095 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2662 uns !! 2096 unsigned long addr) 2663 { 2097 { 2664 struct inode *inode = file_inode(vma- 2098 struct inode *inode = file_inode(vma->vm_file); 2665 pgoff_t index; 2099 pgoff_t index; 2666 2100 2667 /* << 2668 * Bias interleave by inode number to << 2669 * but this interface is independent << 2670 * supplies only that bias, letting c << 2671 * by page order, as in shmem_get_pgo << 2672 */ << 2673 *ilx = inode->i_ino; << 2674 index = ((addr - vma->vm_start) >> PA 2101 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 2675 return mpol_shared_policy_lookup(&SHM 2102 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 2676 } 2103 } >> 2104 #endif 2677 2105 2678 static struct mempolicy *shmem_get_pgoff_poli !! 2106 int shmem_lock(struct file *file, int lock, struct user_struct *user) 2679 pgoff_t index, unsign << 2680 { << 2681 struct mempolicy *mpol; << 2682 << 2683 /* Bias interleave by inode number to << 2684 *ilx = info->vfs_inode.i_ino + (index << 2685 << 2686 mpol = mpol_shared_policy_lookup(&inf << 2687 return mpol ? mpol : get_task_policy( << 2688 } << 2689 #else << 2690 static struct mempolicy *shmem_get_pgoff_poli << 2691 pgoff_t index, unsign << 2692 { << 2693 *ilx = 0; << 2694 return NULL; << 2695 } << 2696 #endif /* CONFIG_NUMA */ << 2697 << 2698 int shmem_lock(struct file *file, int lock, s << 2699 { 2107 { 2700 struct inode *inode = file_inode(file 2108 struct inode *inode = file_inode(file); 2701 struct shmem_inode_info *info = SHMEM 2109 struct shmem_inode_info *info = SHMEM_I(inode); 2702 int retval = -ENOMEM; 2110 int retval = -ENOMEM; 2703 2111 2704 /* !! 2112 spin_lock_irq(&info->lock); 2705 * What serializes the accesses to in << 2706 * ipc_lock_object() when called from << 2707 * no serialization needed when calle << 2708 */ << 2709 if (lock && !(info->flags & VM_LOCKED 2113 if (lock && !(info->flags & VM_LOCKED)) { 2710 if (!user_shm_lock(inode->i_s !! 2114 if (!user_shm_lock(inode->i_size, user)) 2711 goto out_nomem; 2115 goto out_nomem; 2712 info->flags |= VM_LOCKED; 2116 info->flags |= VM_LOCKED; 2713 mapping_set_unevictable(file- 2117 mapping_set_unevictable(file->f_mapping); 2714 } 2118 } 2715 if (!lock && (info->flags & VM_LOCKED !! 2119 if (!lock && (info->flags & VM_LOCKED) && user) { 2716 user_shm_unlock(inode->i_size !! 2120 user_shm_unlock(inode->i_size, user); 2717 info->flags &= ~VM_LOCKED; 2121 info->flags &= ~VM_LOCKED; 2718 mapping_clear_unevictable(fil 2122 mapping_clear_unevictable(file->f_mapping); 2719 } 2123 } 2720 retval = 0; 2124 retval = 0; 2721 2125 2722 out_nomem: 2126 out_nomem: >> 2127 spin_unlock_irq(&info->lock); 2723 return retval; 2128 return retval; 2724 } 2129 } 2725 2130 2726 static int shmem_mmap(struct file *file, stru 2131 static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 2727 { 2132 { 2728 struct inode *inode = file_inode(file << 2729 struct shmem_inode_info *info = SHMEM << 2730 int ret; << 2731 << 2732 ret = seal_check_write(info->seals, v << 2733 if (ret) << 2734 return ret; << 2735 << 2736 file_accessed(file); 2133 file_accessed(file); 2737 /* This is anonymous shared memory if !! 2134 vma->vm_ops = &shmem_vm_ops; 2738 if (inode->i_nlink) !! 2135 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 2739 vma->vm_ops = &shmem_vm_ops; !! 2136 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 2740 else !! 2137 (vma->vm_end & HPAGE_PMD_MASK)) { 2741 vma->vm_ops = &shmem_anon_vm_ !! 2138 khugepaged_enter(vma, vma->vm_flags); >> 2139 } 2742 return 0; 2140 return 0; 2743 } 2141 } 2744 2142 2745 static int shmem_file_open(struct inode *inod !! 2143 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 2746 { !! 2144 umode_t mode, dev_t dev, unsigned long flags) 2747 file->f_mode |= FMODE_CAN_ODIRECT; << 2748 return generic_file_open(inode, file) << 2749 } << 2750 << 2751 #ifdef CONFIG_TMPFS_XATTR << 2752 static int shmem_initxattrs(struct inode *, c << 2753 << 2754 /* << 2755 * chattr's fsflags are unrelated to extended << 2756 * but tmpfs has chosen to enable them under << 2757 */ << 2758 static void shmem_set_inode_flags(struct inod << 2759 { << 2760 unsigned int i_flags = 0; << 2761 << 2762 if (fsflags & FS_NOATIME_FL) << 2763 i_flags |= S_NOATIME; << 2764 if (fsflags & FS_APPEND_FL) << 2765 i_flags |= S_APPEND; << 2766 if (fsflags & FS_IMMUTABLE_FL) << 2767 i_flags |= S_IMMUTABLE; << 2768 /* << 2769 * But FS_NODUMP_FL does not require << 2770 */ << 2771 inode_set_flags(inode, i_flags, S_NOA << 2772 } << 2773 #else << 2774 static void shmem_set_inode_flags(struct inod << 2775 { << 2776 } << 2777 #define shmem_initxattrs NULL << 2778 #endif << 2779 << 2780 static struct offset_ctx *shmem_get_offset_ct << 2781 { << 2782 return &SHMEM_I(inode)->dir_offsets; << 2783 } << 2784 << 2785 static struct inode *__shmem_get_inode(struct << 2786 << 2787 << 2788 << 2789 { 2145 { 2790 struct inode *inode; 2146 struct inode *inode; 2791 struct shmem_inode_info *info; 2147 struct shmem_inode_info *info; 2792 struct shmem_sb_info *sbinfo = SHMEM_ 2148 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2793 ino_t ino; << 2794 int err; << 2795 2149 2796 err = shmem_reserve_inode(sb, &ino); !! 2150 if (shmem_reserve_inode(sb)) 2797 if (err) !! 2151 return NULL; 2798 return ERR_PTR(err); << 2799 2152 2800 inode = new_inode(sb); 2153 inode = new_inode(sb); 2801 if (!inode) { !! 2154 if (inode) { 2802 shmem_free_inode(sb, 0); !! 2155 inode->i_ino = get_next_ino(); 2803 return ERR_PTR(-ENOSPC); !! 2156 inode_init_owner(inode, dir, mode); 2804 } !! 2157 inode->i_blocks = 0; 2805 !! 2158 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 2806 inode->i_ino = ino; !! 2159 inode->i_generation = get_seconds(); 2807 inode_init_owner(idmap, inode, dir, m !! 2160 info = SHMEM_I(inode); 2808 inode->i_blocks = 0; !! 2161 memset(info, 0, (char *)inode - (char *)info); 2809 simple_inode_init_ts(inode); !! 2162 spin_lock_init(&info->lock); 2810 inode->i_generation = get_random_u32( !! 2163 info->seals = F_SEAL_SEAL; 2811 info = SHMEM_I(inode); !! 2164 info->flags = flags & VM_NORESERVE; 2812 memset(info, 0, (char *)inode - (char !! 2165 INIT_LIST_HEAD(&info->shrinklist); 2813 spin_lock_init(&info->lock); !! 2166 INIT_LIST_HEAD(&info->swaplist); 2814 atomic_set(&info->stop_eviction, 0); !! 2167 simple_xattrs_init(&info->xattrs); 2815 info->seals = F_SEAL_SEAL; !! 2168 cache_no_acl(inode); 2816 info->flags = flags & VM_NORESERVE; !! 2169 2817 info->i_crtime = inode_get_mtime(inod !! 2170 switch (mode & S_IFMT) { 2818 info->fsflags = (dir == NULL) ? 0 : !! 2171 default: 2819 SHMEM_I(dir)->fsflags & SHMEM !! 2172 inode->i_op = &shmem_special_inode_operations; 2820 if (info->fsflags) !! 2173 init_special_inode(inode, mode, dev); 2821 shmem_set_inode_flags(inode, !! 2174 break; 2822 INIT_LIST_HEAD(&info->shrinklist); !! 2175 case S_IFREG: 2823 INIT_LIST_HEAD(&info->swaplist); !! 2176 inode->i_mapping->a_ops = &shmem_aops; 2824 simple_xattrs_init(&info->xattrs); !! 2177 inode->i_op = &shmem_inode_operations; 2825 cache_no_acl(inode); !! 2178 inode->i_fop = &shmem_file_operations; 2826 if (sbinfo->noswap) !! 2179 mpol_shared_policy_init(&info->policy, 2827 mapping_set_unevictable(inode !! 2180 shmem_get_sbmpol(sbinfo)); 2828 mapping_set_large_folios(inode->i_map !! 2181 break; 2829 !! 2182 case S_IFDIR: 2830 switch (mode & S_IFMT) { !! 2183 inc_nlink(inode); 2831 default: !! 2184 /* Some things misbehave if size == 0 on a directory */ 2832 inode->i_op = &shmem_special_ !! 2185 inode->i_size = 2 * BOGO_DIRENT_SIZE; 2833 init_special_inode(inode, mod !! 2186 inode->i_op = &shmem_dir_inode_operations; 2834 break; !! 2187 inode->i_fop = &simple_dir_operations; 2835 case S_IFREG: !! 2188 break; 2836 inode->i_mapping->a_ops = &sh !! 2189 case S_IFLNK: 2837 inode->i_op = &shmem_inode_op !! 2190 /* 2838 inode->i_fop = &shmem_file_op !! 2191 * Must not load anything in the rbtree, 2839 mpol_shared_policy_init(&info !! 2192 * mpol_free_shared_policy will not be called. 2840 shme !! 2193 */ 2841 break; !! 2194 mpol_shared_policy_init(&info->policy, NULL); 2842 case S_IFDIR: !! 2195 break; 2843 inc_nlink(inode); !! 2196 } 2844 /* Some things misbehave if s !! 2197 } else 2845 inode->i_size = 2 * BOGO_DIRE !! 2198 shmem_free_inode(sb); 2846 inode->i_op = &shmem_dir_inod << 2847 inode->i_fop = &simple_offset << 2848 simple_offset_init(shmem_get_ << 2849 break; << 2850 case S_IFLNK: << 2851 /* << 2852 * Must not load anything in << 2853 * mpol_free_shared_policy wi << 2854 */ << 2855 mpol_shared_policy_init(&info << 2856 break; << 2857 } << 2858 << 2859 lockdep_annotate_inode_mutex_key(inod << 2860 return inode; 2199 return inode; 2861 } 2200 } 2862 2201 2863 #ifdef CONFIG_TMPFS_QUOTA !! 2202 bool shmem_mapping(struct address_space *mapping) 2864 static struct inode *shmem_get_inode(struct m << 2865 struct s << 2866 umode_t << 2867 { << 2868 int err; << 2869 struct inode *inode; << 2870 << 2871 inode = __shmem_get_inode(idmap, sb, << 2872 if (IS_ERR(inode)) << 2873 return inode; << 2874 << 2875 err = dquot_initialize(inode); << 2876 if (err) << 2877 goto errout; << 2878 << 2879 err = dquot_alloc_inode(inode); << 2880 if (err) { << 2881 dquot_drop(inode); << 2882 goto errout; << 2883 } << 2884 return inode; << 2885 << 2886 errout: << 2887 inode->i_flags |= S_NOQUOTA; << 2888 iput(inode); << 2889 return ERR_PTR(err); << 2890 } << 2891 #else << 2892 static inline struct inode *shmem_get_inode(s << 2893 struct s << 2894 umode_t << 2895 { 2203 { 2896 return __shmem_get_inode(idmap, sb, d !! 2204 return mapping->a_ops == &shmem_aops; 2897 } 2205 } 2898 #endif /* CONFIG_TMPFS_QUOTA */ << 2899 2206 2900 #ifdef CONFIG_USERFAULTFD !! 2207 int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, 2901 int shmem_mfill_atomic_pte(pmd_t *dst_pmd, !! 2208 pmd_t *dst_pmd, 2902 struct vm_area_str 2209 struct vm_area_struct *dst_vma, 2903 unsigned long dst_ 2210 unsigned long dst_addr, 2904 unsigned long src_ 2211 unsigned long src_addr, 2905 uffd_flags_t flags !! 2212 struct page **pagep) 2906 struct folio **fol << 2907 { 2213 { 2908 struct inode *inode = file_inode(dst_ 2214 struct inode *inode = file_inode(dst_vma->vm_file); 2909 struct shmem_inode_info *info = SHMEM 2215 struct shmem_inode_info *info = SHMEM_I(inode); >> 2216 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2910 struct address_space *mapping = inode 2217 struct address_space *mapping = inode->i_mapping; 2911 gfp_t gfp = mapping_gfp_mask(mapping) 2218 gfp_t gfp = mapping_gfp_mask(mapping); 2912 pgoff_t pgoff = linear_page_index(dst 2219 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); >> 2220 struct mem_cgroup *memcg; >> 2221 spinlock_t *ptl; 2913 void *page_kaddr; 2222 void *page_kaddr; 2914 struct folio *folio; !! 2223 struct page *page; >> 2224 pte_t _dst_pte, *dst_pte; 2915 int ret; 2225 int ret; 2916 pgoff_t max_off; << 2917 << 2918 if (shmem_inode_acct_blocks(inode, 1) << 2919 /* << 2920 * We may have got a page, re << 2921 * and now we find ourselves << 2922 * avoid a BUG_ON in our call << 2923 */ << 2924 if (unlikely(*foliop)) { << 2925 folio_put(*foliop); << 2926 *foliop = NULL; << 2927 } << 2928 return -ENOMEM; << 2929 } << 2930 2226 2931 if (!*foliop) { !! 2227 ret = -ENOMEM; 2932 ret = -ENOMEM; !! 2228 if (shmem_acct_block(info->flags, 1)) 2933 folio = shmem_alloc_folio(gfp !! 2229 goto out; 2934 if (!folio) !! 2230 if (sbinfo->max_blocks) { >> 2231 if (percpu_counter_compare(&sbinfo->used_blocks, >> 2232 sbinfo->max_blocks) >= 0) 2935 goto out_unacct_block 2233 goto out_unacct_blocks; >> 2234 percpu_counter_inc(&sbinfo->used_blocks); >> 2235 } 2936 2236 2937 if (uffd_flags_mode_is(flags, !! 2237 if (!*pagep) { 2938 page_kaddr = kmap_loc !! 2238 page = shmem_alloc_page(gfp, info, pgoff); 2939 /* !! 2239 if (!page) 2940 * The read mmap_lock !! 2240 goto out_dec_used_blocks; 2941 * mmap_lock being re !! 2241 2942 * possible if a writ !! 2242 page_kaddr = kmap_atomic(page); 2943 * !! 2243 ret = copy_from_user(page_kaddr, (const void __user *)src_addr, 2944 * process A thread 1 !! 2244 PAGE_SIZE); 2945 * process A thread 2 !! 2245 kunmap_atomic(page_kaddr); 2946 * process B thread 1 !! 2246 2947 * process B thread 2 !! 2247 /* fallback to copy_from_user outside mmap_sem */ 2948 * process A thread 1 !! 2248 if (unlikely(ret)) { 2949 * process B thread 1 !! 2249 *pagep = page; 2950 * !! 2250 if (sbinfo->max_blocks) 2951 * Disable page fault !! 2251 percpu_counter_add(&sbinfo->used_blocks, -1); 2952 * and retry the copy !! 2252 shmem_unacct_blocks(info->flags, 1); 2953 */ !! 2253 /* don't free the page */ 2954 pagefault_disable(); !! 2254 return -EFAULT; 2955 ret = copy_from_user( << 2956 << 2957 << 2958 pagefault_enable(); << 2959 kunmap_local(page_kad << 2960 << 2961 /* fallback to copy_f << 2962 if (unlikely(ret)) { << 2963 *foliop = fol << 2964 ret = -ENOENT << 2965 /* don't free << 2966 goto out_unac << 2967 } << 2968 << 2969 flush_dcache_folio(fo << 2970 } else { /* ZE << 2971 clear_user_highpage(& << 2972 } 2255 } 2973 } else { 2256 } else { 2974 folio = *foliop; !! 2257 page = *pagep; 2975 VM_BUG_ON_FOLIO(folio_test_la !! 2258 *pagep = NULL; 2976 *foliop = NULL; << 2977 } 2259 } 2978 2260 2979 VM_BUG_ON(folio_test_locked(folio)); !! 2261 VM_BUG_ON(PageLocked(page) || PageSwapBacked(page)); 2980 VM_BUG_ON(folio_test_swapbacked(folio !! 2262 __SetPageLocked(page); 2981 __folio_set_locked(folio); !! 2263 __SetPageSwapBacked(page); 2982 __folio_set_swapbacked(folio); !! 2264 __SetPageUptodate(page); 2983 __folio_mark_uptodate(folio); << 2984 << 2985 ret = -EFAULT; << 2986 max_off = DIV_ROUND_UP(i_size_read(in << 2987 if (unlikely(pgoff >= max_off)) << 2988 goto out_release; << 2989 2265 2990 ret = mem_cgroup_charge(folio, dst_vm !! 2266 ret = mem_cgroup_try_charge(page, dst_mm, gfp, &memcg, false); 2991 if (ret) << 2992 goto out_release; << 2993 ret = shmem_add_to_page_cache(folio, << 2994 if (ret) 2267 if (ret) 2995 goto out_release; 2268 goto out_release; 2996 2269 2997 ret = mfill_atomic_install_pte(dst_pm !! 2270 ret = radix_tree_maybe_preload(gfp & GFP_RECLAIM_MASK); 2998 &folio !! 2271 if (!ret) { >> 2272 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL); >> 2273 radix_tree_preload_end(); >> 2274 } 2999 if (ret) 2275 if (ret) 3000 goto out_delete_from_cache; !! 2276 goto out_release_uncharge; 3001 2277 3002 shmem_recalc_inode(inode, 1, 0); !! 2278 mem_cgroup_commit_charge(page, memcg, false, false); 3003 folio_unlock(folio); !! 2279 3004 return 0; !! 2280 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 3005 out_delete_from_cache: !! 2281 if (dst_vma->vm_flags & VM_WRITE) 3006 filemap_remove_folio(folio); !! 2282 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); >> 2283 >> 2284 ret = -EEXIST; >> 2285 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); >> 2286 if (!pte_none(*dst_pte)) >> 2287 goto out_release_uncharge_unlock; >> 2288 >> 2289 lru_cache_add_anon(page); >> 2290 >> 2291 spin_lock(&info->lock); >> 2292 info->alloced++; >> 2293 inode->i_blocks += BLOCKS_PER_PAGE; >> 2294 shmem_recalc_inode(inode); >> 2295 spin_unlock(&info->lock); >> 2296 >> 2297 inc_mm_counter(dst_mm, mm_counter_file(page)); >> 2298 page_add_file_rmap(page, false); >> 2299 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); >> 2300 >> 2301 /* No need to invalidate - it was non-present before */ >> 2302 update_mmu_cache(dst_vma, dst_addr, dst_pte); >> 2303 unlock_page(page); >> 2304 pte_unmap_unlock(dst_pte, ptl); >> 2305 ret = 0; >> 2306 out: >> 2307 return ret; >> 2308 out_release_uncharge_unlock: >> 2309 pte_unmap_unlock(dst_pte, ptl); >> 2310 out_release_uncharge: >> 2311 mem_cgroup_cancel_charge(page, memcg, false); 3007 out_release: 2312 out_release: 3008 folio_unlock(folio); !! 2313 unlock_page(page); 3009 folio_put(folio); !! 2314 put_page(page); >> 2315 out_dec_used_blocks: >> 2316 if (sbinfo->max_blocks) >> 2317 percpu_counter_add(&sbinfo->used_blocks, -1); 3010 out_unacct_blocks: 2318 out_unacct_blocks: 3011 shmem_inode_unacct_blocks(inode, 1); !! 2319 shmem_unacct_blocks(info->flags, 1); 3012 return ret; !! 2320 goto out; 3013 } 2321 } 3014 #endif /* CONFIG_USERFAULTFD */ << 3015 2322 3016 #ifdef CONFIG_TMPFS 2323 #ifdef CONFIG_TMPFS 3017 static const struct inode_operations shmem_sy 2324 static const struct inode_operations shmem_symlink_inode_operations; 3018 static const struct inode_operations shmem_sh 2325 static const struct inode_operations shmem_short_symlink_operations; 3019 2326 >> 2327 #ifdef CONFIG_TMPFS_XATTR >> 2328 static int shmem_initxattrs(struct inode *, const struct xattr *, void *); >> 2329 #else >> 2330 #define shmem_initxattrs NULL >> 2331 #endif >> 2332 3020 static int 2333 static int 3021 shmem_write_begin(struct file *file, struct a 2334 shmem_write_begin(struct file *file, struct address_space *mapping, 3022 loff_t pos, unsigned !! 2335 loff_t pos, unsigned len, unsigned flags, 3023 struct folio **foliop !! 2336 struct page **pagep, void **fsdata) 3024 { 2337 { 3025 struct inode *inode = mapping->host; 2338 struct inode *inode = mapping->host; 3026 struct shmem_inode_info *info = SHMEM 2339 struct shmem_inode_info *info = SHMEM_I(inode); 3027 pgoff_t index = pos >> PAGE_SHIFT; 2340 pgoff_t index = pos >> PAGE_SHIFT; 3028 struct folio *folio; << 3029 int ret = 0; << 3030 2341 3031 /* i_rwsem is held by caller */ !! 2342 /* i_mutex is held by caller */ 3032 if (unlikely(info->seals & (F_SEAL_GR !! 2343 if (unlikely(info->seals & (F_SEAL_WRITE | F_SEAL_GROW))) { 3033 F_SEAL_WRI !! 2344 if (info->seals & F_SEAL_WRITE) 3034 if (info->seals & (F_SEAL_WRI << 3035 return -EPERM; 2345 return -EPERM; 3036 if ((info->seals & F_SEAL_GRO 2346 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 3037 return -EPERM; 2347 return -EPERM; 3038 } 2348 } 3039 2349 3040 ret = shmem_get_folio(inode, index, p !! 2350 return shmem_getpage(inode, index, pagep, SGP_WRITE); 3041 if (ret) << 3042 return ret; << 3043 << 3044 if (folio_test_hwpoison(folio) || << 3045 (folio_test_large(folio) && folio << 3046 folio_unlock(folio); << 3047 folio_put(folio); << 3048 return -EIO; << 3049 } << 3050 << 3051 *foliop = folio; << 3052 return 0; << 3053 } 2351 } 3054 2352 3055 static int 2353 static int 3056 shmem_write_end(struct file *file, struct add 2354 shmem_write_end(struct file *file, struct address_space *mapping, 3057 loff_t pos, unsigned 2355 loff_t pos, unsigned len, unsigned copied, 3058 struct folio *folio, !! 2356 struct page *page, void *fsdata) 3059 { 2357 { 3060 struct inode *inode = mapping->host; 2358 struct inode *inode = mapping->host; 3061 2359 3062 if (pos + copied > inode->i_size) 2360 if (pos + copied > inode->i_size) 3063 i_size_write(inode, pos + cop 2361 i_size_write(inode, pos + copied); 3064 2362 3065 if (!folio_test_uptodate(folio)) { !! 2363 if (!PageUptodate(page)) { 3066 if (copied < folio_size(folio !! 2364 struct page *head = compound_head(page); 3067 size_t from = offset_ !! 2365 if (PageTransCompound(page)) { 3068 folio_zero_segments(f !! 2366 int i; 3069 from !! 2367 3070 } !! 2368 for (i = 0; i < HPAGE_PMD_NR; i++) { 3071 folio_mark_uptodate(folio); !! 2369 if (head + i == page) 3072 } !! 2370 continue; 3073 folio_mark_dirty(folio); !! 2371 clear_highpage(head + i); 3074 folio_unlock(folio); !! 2372 flush_dcache_page(head + i); 3075 folio_put(folio); !! 2373 } >> 2374 } >> 2375 if (copied < PAGE_SIZE) { >> 2376 unsigned from = pos & (PAGE_SIZE - 1); >> 2377 zero_user_segments(page, 0, from, >> 2378 from + copied, PAGE_SIZE); >> 2379 } >> 2380 SetPageUptodate(head); >> 2381 } >> 2382 set_page_dirty(page); >> 2383 unlock_page(page); >> 2384 put_page(page); 3076 2385 3077 return copied; 2386 return copied; 3078 } 2387 } 3079 2388 3080 static ssize_t shmem_file_read_iter(struct ki 2389 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 3081 { 2390 { 3082 struct file *file = iocb->ki_filp; 2391 struct file *file = iocb->ki_filp; 3083 struct inode *inode = file_inode(file 2392 struct inode *inode = file_inode(file); 3084 struct address_space *mapping = inode 2393 struct address_space *mapping = inode->i_mapping; 3085 pgoff_t index; 2394 pgoff_t index; 3086 unsigned long offset; 2395 unsigned long offset; >> 2396 enum sgp_type sgp = SGP_READ; 3087 int error = 0; 2397 int error = 0; 3088 ssize_t retval = 0; 2398 ssize_t retval = 0; 3089 loff_t *ppos = &iocb->ki_pos; 2399 loff_t *ppos = &iocb->ki_pos; 3090 2400 >> 2401 /* >> 2402 * Might this read be for a stacking filesystem? Then when reading >> 2403 * holes of a sparse file, we actually need to allocate those pages, >> 2404 * and even mark them dirty, so it cannot exceed the max_blocks limit. >> 2405 */ >> 2406 if (!iter_is_iovec(to)) >> 2407 sgp = SGP_CACHE; >> 2408 3091 index = *ppos >> PAGE_SHIFT; 2409 index = *ppos >> PAGE_SHIFT; 3092 offset = *ppos & ~PAGE_MASK; 2410 offset = *ppos & ~PAGE_MASK; 3093 2411 3094 for (;;) { 2412 for (;;) { 3095 struct folio *folio = NULL; << 3096 struct page *page = NULL; 2413 struct page *page = NULL; 3097 pgoff_t end_index; 2414 pgoff_t end_index; 3098 unsigned long nr, ret; 2415 unsigned long nr, ret; 3099 loff_t i_size = i_size_read(i 2416 loff_t i_size = i_size_read(inode); 3100 2417 3101 end_index = i_size >> PAGE_SH 2418 end_index = i_size >> PAGE_SHIFT; 3102 if (index > end_index) 2419 if (index > end_index) 3103 break; 2420 break; 3104 if (index == end_index) { 2421 if (index == end_index) { 3105 nr = i_size & ~PAGE_M 2422 nr = i_size & ~PAGE_MASK; 3106 if (nr <= offset) 2423 if (nr <= offset) 3107 break; 2424 break; 3108 } 2425 } 3109 2426 3110 error = shmem_get_folio(inode !! 2427 error = shmem_getpage(inode, index, &page, sgp); 3111 if (error) { 2428 if (error) { 3112 if (error == -EINVAL) 2429 if (error == -EINVAL) 3113 error = 0; 2430 error = 0; 3114 break; 2431 break; 3115 } 2432 } 3116 if (folio) { !! 2433 if (page) { 3117 folio_unlock(folio); !! 2434 if (sgp == SGP_CACHE) 3118 !! 2435 set_page_dirty(page); 3119 page = folio_file_pag !! 2436 unlock_page(page); 3120 if (PageHWPoison(page << 3121 folio_put(fol << 3122 error = -EIO; << 3123 break; << 3124 } << 3125 } 2437 } 3126 2438 3127 /* 2439 /* 3128 * We must evaluate after, si 2440 * We must evaluate after, since reads (unlike writes) 3129 * are called without i_rwsem !! 2441 * are called without i_mutex protection against truncate 3130 */ 2442 */ 3131 nr = PAGE_SIZE; 2443 nr = PAGE_SIZE; 3132 i_size = i_size_read(inode); 2444 i_size = i_size_read(inode); 3133 end_index = i_size >> PAGE_SH 2445 end_index = i_size >> PAGE_SHIFT; 3134 if (index == end_index) { 2446 if (index == end_index) { 3135 nr = i_size & ~PAGE_M 2447 nr = i_size & ~PAGE_MASK; 3136 if (nr <= offset) { 2448 if (nr <= offset) { 3137 if (folio) !! 2449 if (page) 3138 folio !! 2450 put_page(page); 3139 break; 2451 break; 3140 } 2452 } 3141 } 2453 } 3142 nr -= offset; 2454 nr -= offset; 3143 2455 3144 if (folio) { !! 2456 if (page) { 3145 /* 2457 /* 3146 * If users can be wr 2458 * If users can be writing to this page using arbitrary 3147 * virtual addresses, 2459 * virtual addresses, take care about potential aliasing 3148 * before reading the 2460 * before reading the page on the kernel side. 3149 */ 2461 */ 3150 if (mapping_writably_ 2462 if (mapping_writably_mapped(mapping)) 3151 flush_dcache_ 2463 flush_dcache_page(page); 3152 /* 2464 /* 3153 * Mark the page acce 2465 * Mark the page accessed if we read the beginning. 3154 */ 2466 */ 3155 if (!offset) 2467 if (!offset) 3156 folio_mark_ac !! 2468 mark_page_accessed(page); 3157 /* << 3158 * Ok, we have the pa << 3159 * now we can copy it << 3160 */ << 3161 ret = copy_page_to_it << 3162 folio_put(folio); << 3163 << 3164 } else if (user_backed_iter(t << 3165 /* << 3166 * Copy to user tends << 3167 * clear_user() not s << 3168 * faster to copy the << 3169 */ << 3170 ret = copy_page_to_it << 3171 } else { 2469 } else { 3172 /* !! 2470 page = ZERO_PAGE(0); 3173 * But submitting the !! 2471 get_page(page); 3174 * splice() - or othe << 3175 * so don't attempt t << 3176 */ << 3177 ret = iov_iter_zero(n << 3178 } 2472 } 3179 2473 >> 2474 /* >> 2475 * Ok, we have the page, and it's up-to-date, so >> 2476 * now we can copy it to user space... >> 2477 */ >> 2478 ret = copy_page_to_iter(page, offset, nr, to); 3180 retval += ret; 2479 retval += ret; 3181 offset += ret; 2480 offset += ret; 3182 index += offset >> PAGE_SHIFT 2481 index += offset >> PAGE_SHIFT; 3183 offset &= ~PAGE_MASK; 2482 offset &= ~PAGE_MASK; 3184 2483 >> 2484 put_page(page); 3185 if (!iov_iter_count(to)) 2485 if (!iov_iter_count(to)) 3186 break; 2486 break; 3187 if (ret < nr) { 2487 if (ret < nr) { 3188 error = -EFAULT; 2488 error = -EFAULT; 3189 break; 2489 break; 3190 } 2490 } 3191 cond_resched(); 2491 cond_resched(); 3192 } 2492 } 3193 2493 3194 *ppos = ((loff_t) index << PAGE_SHIFT 2494 *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 3195 file_accessed(file); 2495 file_accessed(file); 3196 return retval ? retval : error; 2496 return retval ? retval : error; 3197 } 2497 } 3198 2498 3199 static ssize_t shmem_file_write_iter(struct k !! 2499 /* >> 2500 * llseek SEEK_DATA or SEEK_HOLE through the radix_tree. >> 2501 */ >> 2502 static pgoff_t shmem_seek_hole_data(struct address_space *mapping, >> 2503 pgoff_t index, pgoff_t end, int whence) 3200 { 2504 { 3201 struct file *file = iocb->ki_filp; !! 2505 struct page *page; 3202 struct inode *inode = file->f_mapping !! 2506 struct pagevec pvec; 3203 ssize_t ret; !! 2507 pgoff_t indices[PAGEVEC_SIZE]; >> 2508 bool done = false; >> 2509 int i; 3204 2510 3205 inode_lock(inode); !! 2511 pagevec_init(&pvec, 0); 3206 ret = generic_write_checks(iocb, from !! 2512 pvec.nr = 1; /* start small: we may be there already */ 3207 if (ret <= 0) !! 2513 while (!done) { 3208 goto unlock; !! 2514 pvec.nr = find_get_entries(mapping, index, 3209 ret = file_remove_privs(file); !! 2515 pvec.nr, pvec.pages, indices); 3210 if (ret) !! 2516 if (!pvec.nr) { 3211 goto unlock; !! 2517 if (whence == SEEK_DATA) 3212 ret = file_update_time(file); !! 2518 index = end; 3213 if (ret) !! 2519 break; 3214 goto unlock; !! 2520 } 3215 ret = generic_perform_write(iocb, fro !! 2521 for (i = 0; i < pvec.nr; i++, index++) { 3216 unlock: !! 2522 if (index < indices[i]) { 3217 inode_unlock(inode); !! 2523 if (whence == SEEK_HOLE) { 3218 return ret; !! 2524 done = true; >> 2525 break; >> 2526 } >> 2527 index = indices[i]; >> 2528 } >> 2529 page = pvec.pages[i]; >> 2530 if (page && !radix_tree_exceptional_entry(page)) { >> 2531 if (!PageUptodate(page)) >> 2532 page = NULL; >> 2533 } >> 2534 if (index >= end || >> 2535 (page && whence == SEEK_DATA) || >> 2536 (!page && whence == SEEK_HOLE)) { >> 2537 done = true; >> 2538 break; >> 2539 } >> 2540 } >> 2541 pagevec_remove_exceptionals(&pvec); >> 2542 pagevec_release(&pvec); >> 2543 pvec.nr = PAGEVEC_SIZE; >> 2544 cond_resched(); >> 2545 } >> 2546 return index; 3219 } 2547 } 3220 2548 3221 static bool zero_pipe_buf_get(struct pipe_ino !! 2549 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 3222 struct pipe_buf << 3223 { 2550 { 3224 return true; !! 2551 struct address_space *mapping = file->f_mapping; 3225 } !! 2552 struct inode *inode = mapping->host; >> 2553 pgoff_t start, end; >> 2554 loff_t new_offset; 3226 2555 3227 static void zero_pipe_buf_release(struct pipe !! 2556 if (whence != SEEK_DATA && whence != SEEK_HOLE) 3228 struct pipe !! 2557 return generic_file_llseek_size(file, offset, whence, 3229 { !! 2558 MAX_LFS_FILESIZE, i_size_read(inode)); 3230 } !! 2559 inode_lock(inode); >> 2560 /* We're holding i_mutex so we can access i_size directly */ 3231 2561 3232 static bool zero_pipe_buf_try_steal(struct pi !! 2562 if (offset < 0) 3233 struct pi !! 2563 offset = -EINVAL; 3234 { !! 2564 else if (offset >= inode->i_size) 3235 return false; !! 2565 offset = -ENXIO; >> 2566 else { >> 2567 start = offset >> PAGE_SHIFT; >> 2568 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; >> 2569 new_offset = shmem_seek_hole_data(mapping, start, end, whence); >> 2570 new_offset <<= PAGE_SHIFT; >> 2571 if (new_offset > offset) { >> 2572 if (new_offset < inode->i_size) >> 2573 offset = new_offset; >> 2574 else if (whence == SEEK_DATA) >> 2575 offset = -ENXIO; >> 2576 else >> 2577 offset = inode->i_size; >> 2578 } >> 2579 } >> 2580 >> 2581 if (offset >= 0) >> 2582 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); >> 2583 inode_unlock(inode); >> 2584 return offset; 3236 } 2585 } 3237 2586 3238 static const struct pipe_buf_operations zero_ !! 2587 /* 3239 .release = zero_pipe_buf_relea !! 2588 * We need a tag: a new tag would expand every radix_tree_node by 8 bytes, 3240 .try_steal = zero_pipe_buf_try_s !! 2589 * so reuse a tag which we firmly believe is never set or cleared on shmem. 3241 .get = zero_pipe_buf_get, !! 2590 */ 3242 }; !! 2591 #define SHMEM_TAG_PINNED PAGECACHE_TAG_TOWRITE >> 2592 #define LAST_SCAN 4 /* about 150ms max */ 3243 2593 3244 static size_t splice_zeropage_into_pipe(struc !! 2594 static void shmem_tag_pins(struct address_space *mapping) 3245 loff_ << 3246 { 2595 { 3247 size_t offset = fpos & ~PAGE_MASK; !! 2596 struct radix_tree_iter iter; >> 2597 void **slot; >> 2598 pgoff_t start; >> 2599 struct page *page; 3248 2600 3249 size = min_t(size_t, size, PAGE_SIZE !! 2601 lru_add_drain(); >> 2602 start = 0; >> 2603 rcu_read_lock(); 3250 2604 3251 if (!pipe_full(pipe->head, pipe->tail !! 2605 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { 3252 struct pipe_buffer *buf = pip !! 2606 page = radix_tree_deref_slot(slot); >> 2607 if (!page || radix_tree_exception(page)) { >> 2608 if (radix_tree_deref_retry(page)) { >> 2609 slot = radix_tree_iter_retry(&iter); >> 2610 continue; >> 2611 } >> 2612 } else if (page_count(page) - page_mapcount(page) > 1) { >> 2613 spin_lock_irq(&mapping->tree_lock); >> 2614 radix_tree_tag_set(&mapping->page_tree, iter.index, >> 2615 SHMEM_TAG_PINNED); >> 2616 spin_unlock_irq(&mapping->tree_lock); >> 2617 } 3253 2618 3254 *buf = (struct pipe_buffer) { !! 2619 if (need_resched()) { 3255 .ops = &zero_pipe_ !! 2620 slot = radix_tree_iter_resume(slot, &iter); 3256 .page = ZERO_PAGE(0 !! 2621 cond_resched_rcu(); 3257 .offset = offset, !! 2622 } 3258 .len = size, << 3259 }; << 3260 pipe->head++; << 3261 } 2623 } 3262 !! 2624 rcu_read_unlock(); 3263 return size; << 3264 } 2625 } 3265 2626 3266 static ssize_t shmem_file_splice_read(struct !! 2627 /* 3267 struct !! 2628 * Setting SEAL_WRITE requires us to verify there's no pending writer. However, 3268 size_t !! 2629 * via get_user_pages(), drivers might have some pending I/O without any active 3269 { !! 2630 * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages 3270 struct inode *inode = file_inode(in); !! 2631 * and see whether it has an elevated ref-count. If so, we tag them and wait for 3271 struct address_space *mapping = inode !! 2632 * them to be dropped. 3272 struct folio *folio = NULL; !! 2633 * The caller must guarantee that no new user will acquire writable references 3273 size_t total_spliced = 0, used, npage !! 2634 * to those pages to avoid races. 3274 loff_t isize; !! 2635 */ 3275 int error = 0; !! 2636 static int shmem_wait_for_pins(struct address_space *mapping) >> 2637 { >> 2638 struct radix_tree_iter iter; >> 2639 void **slot; >> 2640 pgoff_t start; >> 2641 struct page *page; >> 2642 int error, scan; 3276 2643 3277 /* Work out how much data we can actu !! 2644 shmem_tag_pins(mapping); 3278 used = pipe_occupancy(pipe->head, pip << 3279 npages = max_t(ssize_t, pipe->max_usa << 3280 len = min_t(size_t, len, npages * PAG << 3281 2645 3282 do { !! 2646 error = 0; 3283 if (*ppos >= i_size_read(inod !! 2647 for (scan = 0; scan <= LAST_SCAN; scan++) { >> 2648 if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED)) 3284 break; 2649 break; 3285 2650 3286 error = shmem_get_folio(inode !! 2651 if (!scan) 3287 SGP_R !! 2652 lru_add_drain_all(); 3288 if (error) { !! 2653 else if (schedule_timeout_killable((HZ << scan) / 200)) 3289 if (error == -EINVAL) !! 2654 scan = LAST_SCAN; 3290 error = 0; !! 2655 3291 break; !! 2656 start = 0; 3292 } !! 2657 rcu_read_lock(); 3293 if (folio) { !! 2658 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, 3294 folio_unlock(folio); !! 2659 start, SHMEM_TAG_PINNED) { >> 2660 >> 2661 page = radix_tree_deref_slot(slot); >> 2662 if (radix_tree_exception(page)) { >> 2663 if (radix_tree_deref_retry(page)) { >> 2664 slot = radix_tree_iter_retry(&iter); >> 2665 continue; >> 2666 } 3295 2667 3296 if (folio_test_hwpois !! 2668 page = NULL; 3297 (folio_test_large !! 2669 } 3298 folio_test_has_h !! 2670 3299 error = -EIO; !! 2671 if (page && 3300 break; !! 2672 page_count(page) - page_mapcount(page) != 1) { >> 2673 if (scan < LAST_SCAN) >> 2674 goto continue_resched; >> 2675 >> 2676 /* >> 2677 * On the last scan, we clean up all those tags >> 2678 * we inserted; but make a note that we still >> 2679 * found pages pinned. >> 2680 */ >> 2681 error = -EBUSY; >> 2682 } >> 2683 >> 2684 spin_lock_irq(&mapping->tree_lock); >> 2685 radix_tree_tag_clear(&mapping->page_tree, >> 2686 iter.index, SHMEM_TAG_PINNED); >> 2687 spin_unlock_irq(&mapping->tree_lock); >> 2688 continue_resched: >> 2689 if (need_resched()) { >> 2690 slot = radix_tree_iter_resume(slot, &iter); >> 2691 cond_resched_rcu(); 3301 } 2692 } 3302 } 2693 } >> 2694 rcu_read_unlock(); >> 2695 } 3303 2696 3304 /* !! 2697 return error; 3305 * i_size must be checked aft !! 2698 } 3306 * << 3307 * Checking i_size after the << 3308 * the correct value for "nr" << 3309 * part of the page is not co << 3310 * another truncate extends t << 3311 */ << 3312 isize = i_size_read(inode); << 3313 if (unlikely(*ppos >= isize)) << 3314 break; << 3315 part = min_t(loff_t, isize - << 3316 2699 3317 if (folio) { !! 2700 #define F_ALL_SEALS (F_SEAL_SEAL | \ 3318 /* !! 2701 F_SEAL_SHRINK | \ 3319 * If users can be wr !! 2702 F_SEAL_GROW | \ 3320 * virtual addresses, !! 2703 F_SEAL_WRITE) 3321 * before reading the !! 2704 3322 */ !! 2705 int shmem_add_seals(struct file *file, unsigned int seals) 3323 if (mapping_writably_ !! 2706 { 3324 flush_dcache_ !! 2707 struct inode *inode = file_inode(file); 3325 folio_mark_accessed(f !! 2708 struct shmem_inode_info *info = SHMEM_I(inode); 3326 /* !! 2709 int error; 3327 * Ok, we have the pa !! 2710 3328 * now splice it into !! 2711 /* 3329 */ !! 2712 * SEALING 3330 n = splice_folio_into !! 2713 * Sealing allows multiple parties to share a shmem-file but restrict 3331 folio_put(folio); !! 2714 * access to a specific subset of file operations. Seals can only be 3332 folio = NULL; !! 2715 * added, but never removed. This way, mutually untrusted parties can 3333 } else { !! 2716 * share common memory regions with a well-defined policy. A malicious 3334 n = splice_zeropage_i !! 2717 * peer can thus never perform unwanted operations on a shared object. >> 2718 * >> 2719 * Seals are only supported on special shmem-files and always affect >> 2720 * the whole underlying inode. Once a seal is set, it may prevent some >> 2721 * kinds of access to the file. Currently, the following seals are >> 2722 * defined: >> 2723 * SEAL_SEAL: Prevent further seals from being set on this file >> 2724 * SEAL_SHRINK: Prevent the file from shrinking >> 2725 * SEAL_GROW: Prevent the file from growing >> 2726 * SEAL_WRITE: Prevent write access to the file >> 2727 * >> 2728 * As we don't require any trust relationship between two parties, we >> 2729 * must prevent seals from being removed. Therefore, sealing a file >> 2730 * only adds a given set of seals to the file, it never touches >> 2731 * existing seals. Furthermore, the "setting seals"-operation can be >> 2732 * sealed itself, which basically prevents any further seal from being >> 2733 * added. >> 2734 * >> 2735 * Semantics of sealing are only defined on volatile files. Only >> 2736 * anonymous shmem files support sealing. More importantly, seals are >> 2737 * never written to disk. Therefore, there's no plan to support it on >> 2738 * other file types. >> 2739 */ >> 2740 >> 2741 if (file->f_op != &shmem_file_operations) >> 2742 return -EINVAL; >> 2743 if (!(file->f_mode & FMODE_WRITE)) >> 2744 return -EPERM; >> 2745 if (seals & ~(unsigned int)F_ALL_SEALS) >> 2746 return -EINVAL; >> 2747 >> 2748 inode_lock(inode); >> 2749 >> 2750 if (info->seals & F_SEAL_SEAL) { >> 2751 error = -EPERM; >> 2752 goto unlock; >> 2753 } >> 2754 >> 2755 if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) { >> 2756 error = mapping_deny_writable(file->f_mapping); >> 2757 if (error) >> 2758 goto unlock; >> 2759 >> 2760 error = shmem_wait_for_pins(file->f_mapping); >> 2761 if (error) { >> 2762 mapping_allow_writable(file->f_mapping); >> 2763 goto unlock; 3335 } 2764 } >> 2765 } 3336 2766 3337 if (!n) !! 2767 info->seals |= seals; 3338 break; !! 2768 error = 0; 3339 len -= n; << 3340 total_spliced += n; << 3341 *ppos += n; << 3342 in->f_ra.prev_pos = *ppos; << 3343 if (pipe_full(pipe->head, pip << 3344 break; << 3345 2769 3346 cond_resched(); !! 2770 unlock: 3347 } while (len); !! 2771 inode_unlock(inode); >> 2772 return error; >> 2773 } >> 2774 EXPORT_SYMBOL_GPL(shmem_add_seals); 3348 2775 3349 if (folio) !! 2776 int shmem_get_seals(struct file *file) 3350 folio_put(folio); !! 2777 { >> 2778 if (file->f_op != &shmem_file_operations) >> 2779 return -EINVAL; 3351 2780 3352 file_accessed(in); !! 2781 return SHMEM_I(file_inode(file))->seals; 3353 return total_spliced ? total_spliced << 3354 } 2782 } >> 2783 EXPORT_SYMBOL_GPL(shmem_get_seals); 3355 2784 3356 static loff_t shmem_file_llseek(struct file * !! 2785 long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg) 3357 { 2786 { 3358 struct address_space *mapping = file- !! 2787 long error; 3359 struct inode *inode = mapping->host; << 3360 2788 3361 if (whence != SEEK_DATA && whence != !! 2789 switch (cmd) { 3362 return generic_file_llseek_si !! 2790 case F_ADD_SEALS: 3363 MAX_L !! 2791 /* disallow upper 32bit */ 3364 if (offset < 0) !! 2792 if (arg > UINT_MAX) 3365 return -ENXIO; !! 2793 return -EINVAL; 3366 2794 3367 inode_lock(inode); !! 2795 error = shmem_add_seals(file, arg); 3368 /* We're holding i_rwsem so we can ac !! 2796 break; 3369 offset = mapping_seek_hole_data(mappi !! 2797 case F_GET_SEALS: 3370 if (offset >= 0) !! 2798 error = shmem_get_seals(file); 3371 offset = vfs_setpos(file, off !! 2799 break; 3372 inode_unlock(inode); !! 2800 default: 3373 return offset; !! 2801 error = -EINVAL; >> 2802 break; >> 2803 } >> 2804 >> 2805 return error; 3374 } 2806 } 3375 2807 3376 static long shmem_fallocate(struct file *file 2808 static long shmem_fallocate(struct file *file, int mode, loff_t offset, 3377 2809 loff_t len) 3378 { 2810 { 3379 struct inode *inode = file_inode(file 2811 struct inode *inode = file_inode(file); 3380 struct shmem_sb_info *sbinfo = SHMEM_ 2812 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 3381 struct shmem_inode_info *info = SHMEM 2813 struct shmem_inode_info *info = SHMEM_I(inode); 3382 struct shmem_falloc shmem_falloc; 2814 struct shmem_falloc shmem_falloc; 3383 pgoff_t start, index, end, undo_fallo !! 2815 pgoff_t start, index, end; 3384 int error; 2816 int error; 3385 2817 3386 if (mode & ~(FALLOC_FL_KEEP_SIZE | FA 2818 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 3387 return -EOPNOTSUPP; 2819 return -EOPNOTSUPP; 3388 2820 3389 inode_lock(inode); 2821 inode_lock(inode); 3390 2822 3391 if (mode & FALLOC_FL_PUNCH_HOLE) { 2823 if (mode & FALLOC_FL_PUNCH_HOLE) { 3392 struct address_space *mapping 2824 struct address_space *mapping = file->f_mapping; 3393 loff_t unmap_start = round_up 2825 loff_t unmap_start = round_up(offset, PAGE_SIZE); 3394 loff_t unmap_end = round_down 2826 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 3395 DECLARE_WAIT_QUEUE_HEAD_ONSTA 2827 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 3396 2828 3397 /* protected by i_rwsem */ !! 2829 /* protected by i_mutex */ 3398 if (info->seals & (F_SEAL_WRI !! 2830 if (info->seals & F_SEAL_WRITE) { 3399 error = -EPERM; 2831 error = -EPERM; 3400 goto out; 2832 goto out; 3401 } 2833 } 3402 2834 3403 shmem_falloc.waitq = &shmem_f 2835 shmem_falloc.waitq = &shmem_falloc_waitq; 3404 shmem_falloc.start = (u64)unm !! 2836 shmem_falloc.start = unmap_start >> PAGE_SHIFT; 3405 shmem_falloc.next = (unmap_en 2837 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 3406 spin_lock(&inode->i_lock); 2838 spin_lock(&inode->i_lock); 3407 inode->i_private = &shmem_fal 2839 inode->i_private = &shmem_falloc; 3408 spin_unlock(&inode->i_lock); 2840 spin_unlock(&inode->i_lock); 3409 2841 3410 if ((u64)unmap_end > (u64)unm 2842 if ((u64)unmap_end > (u64)unmap_start) 3411 unmap_mapping_range(m 2843 unmap_mapping_range(mapping, unmap_start, 3412 1 2844 1 + unmap_end - unmap_start, 0); 3413 shmem_truncate_range(inode, o 2845 shmem_truncate_range(inode, offset, offset + len - 1); 3414 /* No need to unmap again: ho 2846 /* No need to unmap again: hole-punching leaves COWed pages */ 3415 2847 3416 spin_lock(&inode->i_lock); 2848 spin_lock(&inode->i_lock); 3417 inode->i_private = NULL; 2849 inode->i_private = NULL; 3418 wake_up_all(&shmem_falloc_wai 2850 wake_up_all(&shmem_falloc_waitq); 3419 WARN_ON_ONCE(!list_empty(&shm !! 2851 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.task_list)); 3420 spin_unlock(&inode->i_lock); 2852 spin_unlock(&inode->i_lock); 3421 error = 0; 2853 error = 0; 3422 goto out; 2854 goto out; 3423 } 2855 } 3424 2856 3425 /* We need to check rlimit even when 2857 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 3426 error = inode_newsize_ok(inode, offse 2858 error = inode_newsize_ok(inode, offset + len); 3427 if (error) 2859 if (error) 3428 goto out; 2860 goto out; 3429 2861 3430 if ((info->seals & F_SEAL_GROW) && of 2862 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 3431 error = -EPERM; 2863 error = -EPERM; 3432 goto out; 2864 goto out; 3433 } 2865 } 3434 2866 3435 start = offset >> PAGE_SHIFT; 2867 start = offset >> PAGE_SHIFT; 3436 end = (offset + len + PAGE_SIZE - 1) 2868 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 3437 /* Try to avoid a swapstorm if len is 2869 /* Try to avoid a swapstorm if len is impossible to satisfy */ 3438 if (sbinfo->max_blocks && end - start 2870 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 3439 error = -ENOSPC; 2871 error = -ENOSPC; 3440 goto out; 2872 goto out; 3441 } 2873 } 3442 2874 3443 shmem_falloc.waitq = NULL; 2875 shmem_falloc.waitq = NULL; 3444 shmem_falloc.start = start; 2876 shmem_falloc.start = start; 3445 shmem_falloc.next = start; 2877 shmem_falloc.next = start; 3446 shmem_falloc.nr_falloced = 0; 2878 shmem_falloc.nr_falloced = 0; 3447 shmem_falloc.nr_unswapped = 0; 2879 shmem_falloc.nr_unswapped = 0; 3448 spin_lock(&inode->i_lock); 2880 spin_lock(&inode->i_lock); 3449 inode->i_private = &shmem_falloc; 2881 inode->i_private = &shmem_falloc; 3450 spin_unlock(&inode->i_lock); 2882 spin_unlock(&inode->i_lock); 3451 2883 3452 /* !! 2884 for (index = start; index < end; index++) { 3453 * info->fallocend is only relevant w !! 2885 struct page *page; 3454 * involved: to prevent split_huge_pa << 3455 * pages when FALLOC_FL_KEEP_SIZE com << 3456 */ << 3457 undo_fallocend = info->fallocend; << 3458 if (info->fallocend < end) << 3459 info->fallocend = end; << 3460 << 3461 for (index = start; index < end; ) { << 3462 struct folio *folio; << 3463 2886 3464 /* 2887 /* 3465 * Check for fatal signal so !! 2888 * Good, the fallocate(2) manpage permits EINTR: we may have 3466 * situations. We don't want !! 2889 * been interrupted because we are using up too much memory. 3467 * signals as large fallocate << 3468 * e.g. periodic timers may r << 3469 * restarting. << 3470 */ 2890 */ 3471 if (fatal_signal_pending(curr !! 2891 if (signal_pending(current)) 3472 error = -EINTR; 2892 error = -EINTR; 3473 else if (shmem_falloc.nr_unsw 2893 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 3474 error = -ENOMEM; 2894 error = -ENOMEM; 3475 else 2895 else 3476 error = shmem_get_fol !! 2896 error = shmem_getpage(inode, index, &page, SGP_FALLOC); 3477 << 3478 if (error) { 2897 if (error) { 3479 info->fallocend = und !! 2898 /* Remove the !PageUptodate pages we added */ 3480 /* Remove the !uptoda << 3481 if (index > start) { 2899 if (index > start) { 3482 shmem_undo_ra 2900 shmem_undo_range(inode, 3483 (loff_t)s 2901 (loff_t)start << PAGE_SHIFT, 3484 ((loff_t) 2902 ((loff_t)index << PAGE_SHIFT) - 1, true); 3485 } 2903 } 3486 goto undone; 2904 goto undone; 3487 } 2905 } 3488 2906 3489 /* 2907 /* 3490 * Here is a more important o << 3491 * a second SGP_FALLOC on the << 3492 * making it uptodate and un- << 3493 */ << 3494 index = folio_next_index(foli << 3495 /* Beware 32-bit wraparound * << 3496 if (!index) << 3497 index--; << 3498 << 3499 /* << 3500 * Inform shmem_writepage() h 2908 * Inform shmem_writepage() how far we have reached. 3501 * No need for lock or barrie 2909 * No need for lock or barrier: we have the page lock. 3502 */ 2910 */ 3503 if (!folio_test_uptodate(foli !! 2911 shmem_falloc.next++; 3504 shmem_falloc.nr_fallo !! 2912 if (!PageUptodate(page)) 3505 shmem_falloc.next = index; !! 2913 shmem_falloc.nr_falloced++; 3506 2914 3507 /* 2915 /* 3508 * If !uptodate, leave it tha !! 2916 * If !PageUptodate, leave it that way so that freeable pages 3509 * can be recognized if we ne 2917 * can be recognized if we need to rollback on error later. 3510 * But mark it dirty so that !! 2918 * But set_page_dirty so that memory pressure will swap rather 3511 * than free the folios we ar !! 2919 * than free the pages we are allocating (and SGP_CACHE pages 3512 * might still be clean: we n 2920 * might still be clean: we now need to mark those dirty too). 3513 */ 2921 */ 3514 folio_mark_dirty(folio); !! 2922 set_page_dirty(page); 3515 folio_unlock(folio); !! 2923 unlock_page(page); 3516 folio_put(folio); !! 2924 put_page(page); 3517 cond_resched(); 2925 cond_resched(); 3518 } 2926 } 3519 2927 3520 if (!(mode & FALLOC_FL_KEEP_SIZE) && 2928 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 3521 i_size_write(inode, offset + 2929 i_size_write(inode, offset + len); >> 2930 inode->i_ctime = current_time(inode); 3522 undone: 2931 undone: 3523 spin_lock(&inode->i_lock); 2932 spin_lock(&inode->i_lock); 3524 inode->i_private = NULL; 2933 inode->i_private = NULL; 3525 spin_unlock(&inode->i_lock); 2934 spin_unlock(&inode->i_lock); 3526 out: 2935 out: 3527 if (!error) << 3528 file_modified(file); << 3529 inode_unlock(inode); 2936 inode_unlock(inode); 3530 return error; 2937 return error; 3531 } 2938 } 3532 2939 3533 static int shmem_statfs(struct dentry *dentry 2940 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 3534 { 2941 { 3535 struct shmem_sb_info *sbinfo = SHMEM_ 2942 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 3536 2943 3537 buf->f_type = TMPFS_MAGIC; 2944 buf->f_type = TMPFS_MAGIC; 3538 buf->f_bsize = PAGE_SIZE; 2945 buf->f_bsize = PAGE_SIZE; 3539 buf->f_namelen = NAME_MAX; 2946 buf->f_namelen = NAME_MAX; 3540 if (sbinfo->max_blocks) { 2947 if (sbinfo->max_blocks) { 3541 buf->f_blocks = sbinfo->max_b 2948 buf->f_blocks = sbinfo->max_blocks; 3542 buf->f_bavail = 2949 buf->f_bavail = 3543 buf->f_bfree = sbinfo->max_b 2950 buf->f_bfree = sbinfo->max_blocks - 3544 percpu_counte 2951 percpu_counter_sum(&sbinfo->used_blocks); 3545 } 2952 } 3546 if (sbinfo->max_inodes) { 2953 if (sbinfo->max_inodes) { 3547 buf->f_files = sbinfo->max_in 2954 buf->f_files = sbinfo->max_inodes; 3548 buf->f_ffree = sbinfo->free_i !! 2955 buf->f_ffree = sbinfo->free_inodes; 3549 } 2956 } 3550 /* else leave those fields 0 like sim 2957 /* else leave those fields 0 like simple_statfs */ 3551 << 3552 buf->f_fsid = uuid_to_fsid(dentry->d_ << 3553 << 3554 return 0; 2958 return 0; 3555 } 2959 } 3556 2960 3557 /* 2961 /* 3558 * File creation. Allocate an inode, and we'r 2962 * File creation. Allocate an inode, and we're done.. 3559 */ 2963 */ 3560 static int 2964 static int 3561 shmem_mknod(struct mnt_idmap *idmap, struct i !! 2965 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 3562 struct dentry *dentry, umode_t mo << 3563 { 2966 { 3564 struct inode *inode; 2967 struct inode *inode; 3565 int error; !! 2968 int error = -ENOSPC; 3566 2969 3567 inode = shmem_get_inode(idmap, dir->i !! 2970 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 3568 if (IS_ERR(inode)) !! 2971 if (inode) { 3569 return PTR_ERR(inode); !! 2972 error = simple_acl_create(dir, inode); 3570 !! 2973 if (error) 3571 error = simple_acl_create(dir, inode) !! 2974 goto out_iput; 3572 if (error) !! 2975 error = security_inode_init_security(inode, dir, 3573 goto out_iput; !! 2976 &dentry->d_name, 3574 error = security_inode_init_security( !! 2977 shmem_initxattrs, NULL); 3575 !! 2978 if (error && error != -EOPNOTSUPP) 3576 if (error && error != -EOPNOTSUPP) !! 2979 goto out_iput; 3577 goto out_iput; << 3578 << 3579 error = simple_offset_add(shmem_get_o << 3580 if (error) << 3581 goto out_iput; << 3582 2980 3583 dir->i_size += BOGO_DIRENT_SIZE; !! 2981 error = 0; 3584 inode_set_mtime_to_ts(dir, inode_set_ !! 2982 dir->i_size += BOGO_DIRENT_SIZE; 3585 inode_inc_iversion(dir); !! 2983 dir->i_ctime = dir->i_mtime = current_time(dir); 3586 d_instantiate(dentry, inode); !! 2984 d_instantiate(dentry, inode); 3587 dget(dentry); /* Extra count - pin th !! 2985 dget(dentry); /* Extra count - pin the dentry in core */ >> 2986 } 3588 return error; 2987 return error; 3589 << 3590 out_iput: 2988 out_iput: 3591 iput(inode); 2989 iput(inode); 3592 return error; 2990 return error; 3593 } 2991 } 3594 2992 3595 static int 2993 static int 3596 shmem_tmpfile(struct mnt_idmap *idmap, struct !! 2994 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 3597 struct file *file, umode_t mode << 3598 { 2995 { 3599 struct inode *inode; 2996 struct inode *inode; 3600 int error; !! 2997 int error = -ENOSPC; 3601 2998 3602 inode = shmem_get_inode(idmap, dir->i !! 2999 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 3603 if (IS_ERR(inode)) { !! 3000 if (inode) { 3604 error = PTR_ERR(inode); !! 3001 error = security_inode_init_security(inode, dir, 3605 goto err_out; !! 3002 NULL, >> 3003 shmem_initxattrs, NULL); >> 3004 if (error && error != -EOPNOTSUPP) >> 3005 goto out_iput; >> 3006 error = simple_acl_create(dir, inode); >> 3007 if (error) >> 3008 goto out_iput; >> 3009 d_tmpfile(dentry, inode); 3606 } 3010 } 3607 error = security_inode_init_security( !! 3011 return error; 3608 << 3609 if (error && error != -EOPNOTSUPP) << 3610 goto out_iput; << 3611 error = simple_acl_create(dir, inode) << 3612 if (error) << 3613 goto out_iput; << 3614 d_tmpfile(file, inode); << 3615 << 3616 err_out: << 3617 return finish_open_simple(file, error << 3618 out_iput: 3012 out_iput: 3619 iput(inode); 3013 iput(inode); 3620 return error; 3014 return error; 3621 } 3015 } 3622 3016 3623 static int shmem_mkdir(struct mnt_idmap *idma !! 3017 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 3624 struct dentry *dentry, << 3625 { 3018 { 3626 int error; 3019 int error; 3627 3020 3628 error = shmem_mknod(idmap, dir, dentr !! 3021 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 3629 if (error) << 3630 return error; 3022 return error; 3631 inc_nlink(dir); 3023 inc_nlink(dir); 3632 return 0; 3024 return 0; 3633 } 3025 } 3634 3026 3635 static int shmem_create(struct mnt_idmap *idm !! 3027 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 3636 struct dentry *dentry !! 3028 bool excl) 3637 { 3029 { 3638 return shmem_mknod(idmap, dir, dentry !! 3030 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 3639 } 3031 } 3640 3032 3641 /* 3033 /* 3642 * Link a file.. 3034 * Link a file.. 3643 */ 3035 */ 3644 static int shmem_link(struct dentry *old_dent !! 3036 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 3645 struct dentry *dentry) << 3646 { 3037 { 3647 struct inode *inode = d_inode(old_den 3038 struct inode *inode = d_inode(old_dentry); 3648 int ret = 0; !! 3039 int ret; 3649 3040 3650 /* 3041 /* 3651 * No ordinary (disk based) filesyste 3042 * No ordinary (disk based) filesystem counts links as inodes; 3652 * but each new link needs a new dent 3043 * but each new link needs a new dentry, pinning lowmem, and 3653 * tmpfs dentries cannot be pruned un 3044 * tmpfs dentries cannot be pruned until they are unlinked. 3654 * But if an O_TMPFILE file is linked << 3655 * first link must skip that, to get << 3656 */ 3045 */ 3657 if (inode->i_nlink) { !! 3046 ret = shmem_reserve_inode(inode->i_sb); 3658 ret = shmem_reserve_inode(ino !! 3047 if (ret) 3659 if (ret) << 3660 goto out; << 3661 } << 3662 << 3663 ret = simple_offset_add(shmem_get_off << 3664 if (ret) { << 3665 if (inode->i_nlink) << 3666 shmem_free_inode(inod << 3667 goto out; 3048 goto out; 3668 } << 3669 3049 3670 dir->i_size += BOGO_DIRENT_SIZE; 3050 dir->i_size += BOGO_DIRENT_SIZE; 3671 inode_set_mtime_to_ts(dir, !! 3051 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 3672 inode_set_ctime << 3673 inode_inc_iversion(dir); << 3674 inc_nlink(inode); 3052 inc_nlink(inode); 3675 ihold(inode); /* New dentry referen 3053 ihold(inode); /* New dentry reference */ 3676 dget(dentry); /* Extra pinning coun !! 3054 dget(dentry); /* Extra pinning count for the created dentry */ 3677 d_instantiate(dentry, inode); 3055 d_instantiate(dentry, inode); 3678 out: 3056 out: 3679 return ret; 3057 return ret; 3680 } 3058 } 3681 3059 3682 static int shmem_unlink(struct inode *dir, st 3060 static int shmem_unlink(struct inode *dir, struct dentry *dentry) 3683 { 3061 { 3684 struct inode *inode = d_inode(dentry) 3062 struct inode *inode = d_inode(dentry); 3685 3063 3686 if (inode->i_nlink > 1 && !S_ISDIR(in 3064 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 3687 shmem_free_inode(inode->i_sb, !! 3065 shmem_free_inode(inode->i_sb); 3688 << 3689 simple_offset_remove(shmem_get_offset << 3690 3066 3691 dir->i_size -= BOGO_DIRENT_SIZE; 3067 dir->i_size -= BOGO_DIRENT_SIZE; 3692 inode_set_mtime_to_ts(dir, !! 3068 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 3693 inode_set_ctime << 3694 inode_inc_iversion(dir); << 3695 drop_nlink(inode); 3069 drop_nlink(inode); 3696 dput(dentry); /* Undo the count fro !! 3070 dput(dentry); /* Undo the count from "create" - this does all the work */ 3697 return 0; 3071 return 0; 3698 } 3072 } 3699 3073 3700 static int shmem_rmdir(struct inode *dir, str 3074 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 3701 { 3075 { 3702 if (!simple_offset_empty(dentry)) !! 3076 if (!simple_empty(dentry)) 3703 return -ENOTEMPTY; 3077 return -ENOTEMPTY; 3704 3078 3705 drop_nlink(d_inode(dentry)); 3079 drop_nlink(d_inode(dentry)); 3706 drop_nlink(dir); 3080 drop_nlink(dir); 3707 return shmem_unlink(dir, dentry); 3081 return shmem_unlink(dir, dentry); 3708 } 3082 } 3709 3083 3710 static int shmem_whiteout(struct mnt_idmap *i !! 3084 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 3711 struct inode *old_d !! 3085 { >> 3086 bool old_is_dir = d_is_dir(old_dentry); >> 3087 bool new_is_dir = d_is_dir(new_dentry); >> 3088 >> 3089 if (old_dir != new_dir && old_is_dir != new_is_dir) { >> 3090 if (old_is_dir) { >> 3091 drop_nlink(old_dir); >> 3092 inc_nlink(new_dir); >> 3093 } else { >> 3094 drop_nlink(new_dir); >> 3095 inc_nlink(old_dir); >> 3096 } >> 3097 } >> 3098 old_dir->i_ctime = old_dir->i_mtime = >> 3099 new_dir->i_ctime = new_dir->i_mtime = >> 3100 d_inode(old_dentry)->i_ctime = >> 3101 d_inode(new_dentry)->i_ctime = current_time(old_dir); >> 3102 >> 3103 return 0; >> 3104 } >> 3105 >> 3106 static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry) 3712 { 3107 { 3713 struct dentry *whiteout; 3108 struct dentry *whiteout; 3714 int error; 3109 int error; 3715 3110 3716 whiteout = d_alloc(old_dentry->d_pare 3111 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 3717 if (!whiteout) 3112 if (!whiteout) 3718 return -ENOMEM; 3113 return -ENOMEM; 3719 3114 3720 error = shmem_mknod(idmap, old_dir, w !! 3115 error = shmem_mknod(old_dir, whiteout, 3721 S_IFCHR | WHITEOU 3116 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 3722 dput(whiteout); 3117 dput(whiteout); 3723 if (error) 3118 if (error) 3724 return error; 3119 return error; 3725 3120 3726 /* 3121 /* 3727 * Cheat and hash the whiteout while 3122 * Cheat and hash the whiteout while the old dentry is still in 3728 * place, instead of playing games wi 3123 * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 3729 * 3124 * 3730 * d_lookup() will consistently find 3125 * d_lookup() will consistently find one of them at this point, 3731 * not sure which one, but that isn't 3126 * not sure which one, but that isn't even important. 3732 */ 3127 */ 3733 d_rehash(whiteout); 3128 d_rehash(whiteout); 3734 return 0; 3129 return 0; 3735 } 3130 } 3736 3131 3737 /* 3132 /* 3738 * The VFS layer already does all the dentry 3133 * The VFS layer already does all the dentry stuff for rename, 3739 * we just have to decrement the usage count 3134 * we just have to decrement the usage count for the target if 3740 * it exists so that the VFS layer correctly 3135 * it exists so that the VFS layer correctly free's it when it 3741 * gets overwritten. 3136 * gets overwritten. 3742 */ 3137 */ 3743 static int shmem_rename2(struct mnt_idmap *id !! 3138 static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) 3744 struct inode *old_di << 3745 struct inode *new_di << 3746 unsigned int flags) << 3747 { 3139 { 3748 struct inode *inode = d_inode(old_den 3140 struct inode *inode = d_inode(old_dentry); 3749 int they_are_dirs = S_ISDIR(inode->i_ 3141 int they_are_dirs = S_ISDIR(inode->i_mode); 3750 int error; << 3751 3142 3752 if (flags & ~(RENAME_NOREPLACE | RENA 3143 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 3753 return -EINVAL; 3144 return -EINVAL; 3754 3145 3755 if (flags & RENAME_EXCHANGE) 3146 if (flags & RENAME_EXCHANGE) 3756 return simple_offset_rename_e !! 3147 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry); 3757 << 3758 3148 3759 if (!simple_offset_empty(new_dentry)) !! 3149 if (!simple_empty(new_dentry)) 3760 return -ENOTEMPTY; 3150 return -ENOTEMPTY; 3761 3151 3762 if (flags & RENAME_WHITEOUT) { 3152 if (flags & RENAME_WHITEOUT) { 3763 error = shmem_whiteout(idmap, !! 3153 int error; >> 3154 >> 3155 error = shmem_whiteout(old_dir, old_dentry); 3764 if (error) 3156 if (error) 3765 return error; 3157 return error; 3766 } 3158 } 3767 3159 3768 error = simple_offset_rename(old_dir, << 3769 if (error) << 3770 return error; << 3771 << 3772 if (d_really_is_positive(new_dentry)) 3160 if (d_really_is_positive(new_dentry)) { 3773 (void) shmem_unlink(new_dir, 3161 (void) shmem_unlink(new_dir, new_dentry); 3774 if (they_are_dirs) { 3162 if (they_are_dirs) { 3775 drop_nlink(d_inode(ne 3163 drop_nlink(d_inode(new_dentry)); 3776 drop_nlink(old_dir); 3164 drop_nlink(old_dir); 3777 } 3165 } 3778 } else if (they_are_dirs) { 3166 } else if (they_are_dirs) { 3779 drop_nlink(old_dir); 3167 drop_nlink(old_dir); 3780 inc_nlink(new_dir); 3168 inc_nlink(new_dir); 3781 } 3169 } 3782 3170 3783 old_dir->i_size -= BOGO_DIRENT_SIZE; 3171 old_dir->i_size -= BOGO_DIRENT_SIZE; 3784 new_dir->i_size += BOGO_DIRENT_SIZE; 3172 new_dir->i_size += BOGO_DIRENT_SIZE; 3785 simple_rename_timestamp(old_dir, old_ !! 3173 old_dir->i_ctime = old_dir->i_mtime = 3786 inode_inc_iversion(old_dir); !! 3174 new_dir->i_ctime = new_dir->i_mtime = 3787 inode_inc_iversion(new_dir); !! 3175 inode->i_ctime = current_time(old_dir); 3788 return 0; 3176 return 0; 3789 } 3177 } 3790 3178 3791 static int shmem_symlink(struct mnt_idmap *id !! 3179 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 3792 struct dentry *dentr << 3793 { 3180 { 3794 int error; 3181 int error; 3795 int len; 3182 int len; 3796 struct inode *inode; 3183 struct inode *inode; 3797 struct folio *folio; !! 3184 struct page *page; >> 3185 struct shmem_inode_info *info; 3798 3186 3799 len = strlen(symname) + 1; 3187 len = strlen(symname) + 1; 3800 if (len > PAGE_SIZE) 3188 if (len > PAGE_SIZE) 3801 return -ENAMETOOLONG; 3189 return -ENAMETOOLONG; 3802 3190 3803 inode = shmem_get_inode(idmap, dir->i !! 3191 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 3804 VM_NORESERVE) !! 3192 if (!inode) 3805 if (IS_ERR(inode)) !! 3193 return -ENOSPC; 3806 return PTR_ERR(inode); << 3807 3194 3808 error = security_inode_init_security( 3195 error = security_inode_init_security(inode, dir, &dentry->d_name, 3809 3196 shmem_initxattrs, NULL); 3810 if (error && error != -EOPNOTSUPP) !! 3197 if (error) { 3811 goto out_iput; !! 3198 if (error != -EOPNOTSUPP) { 3812 !! 3199 iput(inode); 3813 error = simple_offset_add(shmem_get_o !! 3200 return error; 3814 if (error) !! 3201 } 3815 goto out_iput; !! 3202 error = 0; >> 3203 } 3816 3204 >> 3205 info = SHMEM_I(inode); 3817 inode->i_size = len-1; 3206 inode->i_size = len-1; 3818 if (len <= SHORT_SYMLINK_LEN) { 3207 if (len <= SHORT_SYMLINK_LEN) { 3819 inode->i_link = kmemdup(symna 3208 inode->i_link = kmemdup(symname, len, GFP_KERNEL); 3820 if (!inode->i_link) { 3209 if (!inode->i_link) { 3821 error = -ENOMEM; !! 3210 iput(inode); 3822 goto out_remove_offse !! 3211 return -ENOMEM; 3823 } 3212 } 3824 inode->i_op = &shmem_short_sy 3213 inode->i_op = &shmem_short_symlink_operations; 3825 } else { 3214 } else { 3826 inode_nohighmem(inode); 3215 inode_nohighmem(inode); >> 3216 error = shmem_getpage(inode, 0, &page, SGP_WRITE); >> 3217 if (error) { >> 3218 iput(inode); >> 3219 return error; >> 3220 } 3827 inode->i_mapping->a_ops = &sh 3221 inode->i_mapping->a_ops = &shmem_aops; 3828 error = shmem_get_folio(inode << 3829 if (error) << 3830 goto out_remove_offse << 3831 inode->i_op = &shmem_symlink_ 3222 inode->i_op = &shmem_symlink_inode_operations; 3832 memcpy(folio_address(folio), !! 3223 memcpy(page_address(page), symname, len); 3833 folio_mark_uptodate(folio); !! 3224 SetPageUptodate(page); 3834 folio_mark_dirty(folio); !! 3225 set_page_dirty(page); 3835 folio_unlock(folio); !! 3226 unlock_page(page); 3836 folio_put(folio); !! 3227 put_page(page); 3837 } 3228 } 3838 dir->i_size += BOGO_DIRENT_SIZE; 3229 dir->i_size += BOGO_DIRENT_SIZE; 3839 inode_set_mtime_to_ts(dir, inode_set_ !! 3230 dir->i_ctime = dir->i_mtime = current_time(dir); 3840 inode_inc_iversion(dir); << 3841 d_instantiate(dentry, inode); 3231 d_instantiate(dentry, inode); 3842 dget(dentry); 3232 dget(dentry); 3843 return 0; 3233 return 0; 3844 << 3845 out_remove_offset: << 3846 simple_offset_remove(shmem_get_offset << 3847 out_iput: << 3848 iput(inode); << 3849 return error; << 3850 } 3234 } 3851 3235 3852 static void shmem_put_link(void *arg) 3236 static void shmem_put_link(void *arg) 3853 { 3237 { 3854 folio_mark_accessed(arg); !! 3238 mark_page_accessed(arg); 3855 folio_put(arg); !! 3239 put_page(arg); 3856 } 3240 } 3857 3241 3858 static const char *shmem_get_link(struct dent !! 3242 static const char *shmem_get_link(struct dentry *dentry, >> 3243 struct inode *inode, 3859 struct dela 3244 struct delayed_call *done) 3860 { 3245 { 3861 struct folio *folio = NULL; !! 3246 struct page *page = NULL; 3862 int error; 3247 int error; 3863 << 3864 if (!dentry) { 3248 if (!dentry) { 3865 folio = filemap_get_folio(ino !! 3249 page = find_get_page(inode->i_mapping, 0); 3866 if (IS_ERR(folio)) !! 3250 if (!page) 3867 return ERR_PTR(-ECHIL 3251 return ERR_PTR(-ECHILD); 3868 if (PageHWPoison(folio_page(f !! 3252 if (!PageUptodate(page)) { 3869 !folio_test_uptodate(foli !! 3253 put_page(page); 3870 folio_put(folio); << 3871 return ERR_PTR(-ECHIL 3254 return ERR_PTR(-ECHILD); 3872 } 3255 } 3873 } else { 3256 } else { 3874 error = shmem_get_folio(inode !! 3257 error = shmem_getpage(inode, 0, &page, SGP_READ); 3875 if (error) 3258 if (error) 3876 return ERR_PTR(error) 3259 return ERR_PTR(error); 3877 if (!folio) !! 3260 unlock_page(page); 3878 return ERR_PTR(-ECHIL << 3879 if (PageHWPoison(folio_page(f << 3880 folio_unlock(folio); << 3881 folio_put(folio); << 3882 return ERR_PTR(-ECHIL << 3883 } << 3884 folio_unlock(folio); << 3885 } 3261 } 3886 set_delayed_call(done, shmem_put_link !! 3262 set_delayed_call(done, shmem_put_link, page); 3887 return folio_address(folio); !! 3263 return page_address(page); 3888 } 3264 } 3889 3265 3890 #ifdef CONFIG_TMPFS_XATTR 3266 #ifdef CONFIG_TMPFS_XATTR 3891 << 3892 static int shmem_fileattr_get(struct dentry * << 3893 { << 3894 struct shmem_inode_info *info = SHMEM << 3895 << 3896 fileattr_fill_flags(fa, info->fsflags << 3897 << 3898 return 0; << 3899 } << 3900 << 3901 static int shmem_fileattr_set(struct mnt_idma << 3902 struct dentry * << 3903 { << 3904 struct inode *inode = d_inode(dentry) << 3905 struct shmem_inode_info *info = SHMEM << 3906 << 3907 if (fileattr_has_fsx(fa)) << 3908 return -EOPNOTSUPP; << 3909 if (fa->flags & ~SHMEM_FL_USER_MODIFI << 3910 return -EOPNOTSUPP; << 3911 << 3912 info->fsflags = (info->fsflags & ~SHM << 3913 (fa->flags & SHMEM_FL_USER_MO << 3914 << 3915 shmem_set_inode_flags(inode, info->fs << 3916 inode_set_ctime_current(inode); << 3917 inode_inc_iversion(inode); << 3918 return 0; << 3919 } << 3920 << 3921 /* 3267 /* 3922 * Superblocks without xattr inode operations 3268 * Superblocks without xattr inode operations may get some security.* xattr 3923 * support from the LSM "for free". As soon a 3269 * support from the LSM "for free". As soon as we have any other xattrs 3924 * like ACLs, we also need to implement the s 3270 * like ACLs, we also need to implement the security.* handlers at 3925 * filesystem level, though. 3271 * filesystem level, though. 3926 */ 3272 */ 3927 3273 3928 /* 3274 /* 3929 * Callback for security_inode_init_security( 3275 * Callback for security_inode_init_security() for acquiring xattrs. 3930 */ 3276 */ 3931 static int shmem_initxattrs(struct inode *ino 3277 static int shmem_initxattrs(struct inode *inode, 3932 const struct xatt !! 3278 const struct xattr *xattr_array, >> 3279 void *fs_info) 3933 { 3280 { 3934 struct shmem_inode_info *info = SHMEM 3281 struct shmem_inode_info *info = SHMEM_I(inode); 3935 struct shmem_sb_info *sbinfo = SHMEM_ << 3936 const struct xattr *xattr; 3282 const struct xattr *xattr; 3937 struct simple_xattr *new_xattr; 3283 struct simple_xattr *new_xattr; 3938 size_t ispace = 0; << 3939 size_t len; 3284 size_t len; 3940 3285 3941 if (sbinfo->max_inodes) { << 3942 for (xattr = xattr_array; xat << 3943 ispace += simple_xatt << 3944 xattr->value_ << 3945 } << 3946 if (ispace) { << 3947 raw_spin_lock(&sbinfo << 3948 if (sbinfo->free_ispa << 3949 ispace = 0; << 3950 else << 3951 sbinfo->free_ << 3952 raw_spin_unlock(&sbin << 3953 if (!ispace) << 3954 return -ENOSP << 3955 } << 3956 } << 3957 << 3958 for (xattr = xattr_array; xattr->name 3286 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 3959 new_xattr = simple_xattr_allo 3287 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 3960 if (!new_xattr) 3288 if (!new_xattr) 3961 break; !! 3289 return -ENOMEM; 3962 3290 3963 len = strlen(xattr->name) + 1 3291 len = strlen(xattr->name) + 1; 3964 new_xattr->name = kmalloc(XAT 3292 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 3965 GFP !! 3293 GFP_KERNEL); 3966 if (!new_xattr->name) { 3294 if (!new_xattr->name) { 3967 kvfree(new_xattr); !! 3295 kfree(new_xattr); 3968 break; !! 3296 return -ENOMEM; 3969 } 3297 } 3970 3298 3971 memcpy(new_xattr->name, XATTR 3299 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 3972 XATTR_SECURITY_PREFIX_ 3300 XATTR_SECURITY_PREFIX_LEN); 3973 memcpy(new_xattr->name + XATT 3301 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 3974 xattr->name, len); 3302 xattr->name, len); 3975 3303 3976 simple_xattr_add(&info->xattr !! 3304 simple_xattr_list_add(&info->xattrs, new_xattr); 3977 } << 3978 << 3979 if (xattr->name != NULL) { << 3980 if (ispace) { << 3981 raw_spin_lock(&sbinfo << 3982 sbinfo->free_ispace + << 3983 raw_spin_unlock(&sbin << 3984 } << 3985 simple_xattrs_free(&info->xat << 3986 return -ENOMEM; << 3987 } 3305 } 3988 3306 3989 return 0; 3307 return 0; 3990 } 3308 } 3991 3309 3992 static int shmem_xattr_handler_get(const stru 3310 static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3993 struct den 3311 struct dentry *unused, struct inode *inode, 3994 const char 3312 const char *name, void *buffer, size_t size) 3995 { 3313 { 3996 struct shmem_inode_info *info = SHMEM 3314 struct shmem_inode_info *info = SHMEM_I(inode); 3997 3315 3998 name = xattr_full_name(handler, name) 3316 name = xattr_full_name(handler, name); 3999 return simple_xattr_get(&info->xattrs 3317 return simple_xattr_get(&info->xattrs, name, buffer, size); 4000 } 3318 } 4001 3319 4002 static int shmem_xattr_handler_set(const stru 3320 static int shmem_xattr_handler_set(const struct xattr_handler *handler, 4003 struct mnt << 4004 struct den 3321 struct dentry *unused, struct inode *inode, 4005 const char 3322 const char *name, const void *value, 4006 size_t siz 3323 size_t size, int flags) 4007 { 3324 { 4008 struct shmem_inode_info *info = SHMEM 3325 struct shmem_inode_info *info = SHMEM_I(inode); 4009 struct shmem_sb_info *sbinfo = SHMEM_ << 4010 struct simple_xattr *old_xattr; << 4011 size_t ispace = 0; << 4012 3326 4013 name = xattr_full_name(handler, name) 3327 name = xattr_full_name(handler, name); 4014 if (value && sbinfo->max_inodes) { !! 3328 return simple_xattr_set(&info->xattrs, name, value, size, flags); 4015 ispace = simple_xattr_space(n << 4016 raw_spin_lock(&sbinfo->stat_l << 4017 if (sbinfo->free_ispace < isp << 4018 ispace = 0; << 4019 else << 4020 sbinfo->free_ispace - << 4021 raw_spin_unlock(&sbinfo->stat << 4022 if (!ispace) << 4023 return -ENOSPC; << 4024 } << 4025 << 4026 old_xattr = simple_xattr_set(&info->x << 4027 if (!IS_ERR(old_xattr)) { << 4028 ispace = 0; << 4029 if (old_xattr && sbinfo->max_ << 4030 ispace = simple_xattr << 4031 << 4032 simple_xattr_free(old_xattr); << 4033 old_xattr = NULL; << 4034 inode_set_ctime_current(inode << 4035 inode_inc_iversion(inode); << 4036 } << 4037 if (ispace) { << 4038 raw_spin_lock(&sbinfo->stat_l << 4039 sbinfo->free_ispace += ispace << 4040 raw_spin_unlock(&sbinfo->stat << 4041 } << 4042 return PTR_ERR(old_xattr); << 4043 } 3329 } 4044 3330 4045 static const struct xattr_handler shmem_secur 3331 static const struct xattr_handler shmem_security_xattr_handler = { 4046 .prefix = XATTR_SECURITY_PREFIX, 3332 .prefix = XATTR_SECURITY_PREFIX, 4047 .get = shmem_xattr_handler_get, 3333 .get = shmem_xattr_handler_get, 4048 .set = shmem_xattr_handler_set, 3334 .set = shmem_xattr_handler_set, 4049 }; 3335 }; 4050 3336 4051 static const struct xattr_handler shmem_trust 3337 static const struct xattr_handler shmem_trusted_xattr_handler = { 4052 .prefix = XATTR_TRUSTED_PREFIX, 3338 .prefix = XATTR_TRUSTED_PREFIX, 4053 .get = shmem_xattr_handler_get, 3339 .get = shmem_xattr_handler_get, 4054 .set = shmem_xattr_handler_set, 3340 .set = shmem_xattr_handler_set, 4055 }; 3341 }; 4056 3342 4057 static const struct xattr_handler shmem_user_ !! 3343 static const struct xattr_handler *shmem_xattr_handlers[] = { 4058 .prefix = XATTR_USER_PREFIX, !! 3344 #ifdef CONFIG_TMPFS_POSIX_ACL 4059 .get = shmem_xattr_handler_get, !! 3345 &posix_acl_access_xattr_handler, 4060 .set = shmem_xattr_handler_set, !! 3346 &posix_acl_default_xattr_handler, 4061 }; !! 3347 #endif 4062 << 4063 static const struct xattr_handler * const shm << 4064 &shmem_security_xattr_handler, 3348 &shmem_security_xattr_handler, 4065 &shmem_trusted_xattr_handler, 3349 &shmem_trusted_xattr_handler, 4066 &shmem_user_xattr_handler, << 4067 NULL 3350 NULL 4068 }; 3351 }; 4069 3352 4070 static ssize_t shmem_listxattr(struct dentry 3353 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 4071 { 3354 { 4072 struct shmem_inode_info *info = SHMEM 3355 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 4073 return simple_xattr_list(d_inode(dent 3356 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 4074 } 3357 } 4075 #endif /* CONFIG_TMPFS_XATTR */ 3358 #endif /* CONFIG_TMPFS_XATTR */ 4076 3359 4077 static const struct inode_operations shmem_sh 3360 static const struct inode_operations shmem_short_symlink_operations = { 4078 .getattr = shmem_getattr, << 4079 .setattr = shmem_setattr, << 4080 .get_link = simple_get_link, 3361 .get_link = simple_get_link, 4081 #ifdef CONFIG_TMPFS_XATTR 3362 #ifdef CONFIG_TMPFS_XATTR 4082 .listxattr = shmem_listxattr, 3363 .listxattr = shmem_listxattr, 4083 #endif 3364 #endif 4084 }; 3365 }; 4085 3366 4086 static const struct inode_operations shmem_sy 3367 static const struct inode_operations shmem_symlink_inode_operations = { 4087 .getattr = shmem_getattr, << 4088 .setattr = shmem_setattr, << 4089 .get_link = shmem_get_link, 3368 .get_link = shmem_get_link, 4090 #ifdef CONFIG_TMPFS_XATTR 3369 #ifdef CONFIG_TMPFS_XATTR 4091 .listxattr = shmem_listxattr, 3370 .listxattr = shmem_listxattr, 4092 #endif 3371 #endif 4093 }; 3372 }; 4094 3373 4095 static struct dentry *shmem_get_parent(struct 3374 static struct dentry *shmem_get_parent(struct dentry *child) 4096 { 3375 { 4097 return ERR_PTR(-ESTALE); 3376 return ERR_PTR(-ESTALE); 4098 } 3377 } 4099 3378 4100 static int shmem_match(struct inode *ino, voi 3379 static int shmem_match(struct inode *ino, void *vfh) 4101 { 3380 { 4102 __u32 *fh = vfh; 3381 __u32 *fh = vfh; 4103 __u64 inum = fh[2]; 3382 __u64 inum = fh[2]; 4104 inum = (inum << 32) | fh[1]; 3383 inum = (inum << 32) | fh[1]; 4105 return ino->i_ino == inum && fh[0] == 3384 return ino->i_ino == inum && fh[0] == ino->i_generation; 4106 } 3385 } 4107 3386 4108 /* Find any alias of inode, but prefer a hash << 4109 static struct dentry *shmem_find_alias(struct << 4110 { << 4111 struct dentry *alias = d_find_alias(i << 4112 << 4113 return alias ?: d_find_any_alias(inod << 4114 } << 4115 << 4116 static struct dentry *shmem_fh_to_dentry(stru 3387 static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 4117 struct fid *fid, int fh_len, 3388 struct fid *fid, int fh_len, int fh_type) 4118 { 3389 { 4119 struct inode *inode; 3390 struct inode *inode; 4120 struct dentry *dentry = NULL; 3391 struct dentry *dentry = NULL; 4121 u64 inum; 3392 u64 inum; 4122 3393 4123 if (fh_len < 3) 3394 if (fh_len < 3) 4124 return NULL; 3395 return NULL; 4125 3396 4126 inum = fid->raw[2]; 3397 inum = fid->raw[2]; 4127 inum = (inum << 32) | fid->raw[1]; 3398 inum = (inum << 32) | fid->raw[1]; 4128 3399 4129 inode = ilookup5(sb, (unsigned long)( 3400 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 4130 shmem_match, fid->raw 3401 shmem_match, fid->raw); 4131 if (inode) { 3402 if (inode) { 4132 dentry = shmem_find_alias(ino !! 3403 dentry = d_find_alias(inode); 4133 iput(inode); 3404 iput(inode); 4134 } 3405 } 4135 3406 4136 return dentry; 3407 return dentry; 4137 } 3408 } 4138 3409 4139 static int shmem_encode_fh(struct inode *inod 3410 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 4140 struct inode 3411 struct inode *parent) 4141 { 3412 { 4142 if (*len < 3) { 3413 if (*len < 3) { 4143 *len = 3; 3414 *len = 3; 4144 return FILEID_INVALID; 3415 return FILEID_INVALID; 4145 } 3416 } 4146 3417 4147 if (inode_unhashed(inode)) { 3418 if (inode_unhashed(inode)) { 4148 /* Unfortunately insert_inode 3419 /* Unfortunately insert_inode_hash is not idempotent, 4149 * so as we hash inodes here 3420 * so as we hash inodes here rather than at creation 4150 * time, we need a lock to en 3421 * time, we need a lock to ensure we only try 4151 * to do it once 3422 * to do it once 4152 */ 3423 */ 4153 static DEFINE_SPINLOCK(lock); 3424 static DEFINE_SPINLOCK(lock); 4154 spin_lock(&lock); 3425 spin_lock(&lock); 4155 if (inode_unhashed(inode)) 3426 if (inode_unhashed(inode)) 4156 __insert_inode_hash(i 3427 __insert_inode_hash(inode, 4157 i 3428 inode->i_ino + inode->i_generation); 4158 spin_unlock(&lock); 3429 spin_unlock(&lock); 4159 } 3430 } 4160 3431 4161 fh[0] = inode->i_generation; 3432 fh[0] = inode->i_generation; 4162 fh[1] = inode->i_ino; 3433 fh[1] = inode->i_ino; 4163 fh[2] = ((__u64)inode->i_ino) >> 32; 3434 fh[2] = ((__u64)inode->i_ino) >> 32; 4164 3435 4165 *len = 3; 3436 *len = 3; 4166 return 1; 3437 return 1; 4167 } 3438 } 4168 3439 4169 static const struct export_operations shmem_e 3440 static const struct export_operations shmem_export_ops = { 4170 .get_parent = shmem_get_parent, 3441 .get_parent = shmem_get_parent, 4171 .encode_fh = shmem_encode_fh, 3442 .encode_fh = shmem_encode_fh, 4172 .fh_to_dentry = shmem_fh_to_dentry, 3443 .fh_to_dentry = shmem_fh_to_dentry, 4173 }; 3444 }; 4174 3445 4175 enum shmem_param { !! 3446 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, 4176 Opt_gid, !! 3447 bool remount) 4177 Opt_huge, << 4178 Opt_mode, << 4179 Opt_mpol, << 4180 Opt_nr_blocks, << 4181 Opt_nr_inodes, << 4182 Opt_size, << 4183 Opt_uid, << 4184 Opt_inode32, << 4185 Opt_inode64, << 4186 Opt_noswap, << 4187 Opt_quota, << 4188 Opt_usrquota, << 4189 Opt_grpquota, << 4190 Opt_usrquota_block_hardlimit, << 4191 Opt_usrquota_inode_hardlimit, << 4192 Opt_grpquota_block_hardlimit, << 4193 Opt_grpquota_inode_hardlimit, << 4194 }; << 4195 << 4196 static const struct constant_table shmem_para << 4197 {"never", SHMEM_HUGE_NEVER }, << 4198 {"always", SHMEM_HUGE_ALWAYS }, << 4199 {"within_size", SHMEM_HUGE_WITHIN_SIZ << 4200 {"advise", SHMEM_HUGE_ADVISE }, << 4201 {} << 4202 }; << 4203 << 4204 const struct fs_parameter_spec shmem_fs_param << 4205 fsparam_gid ("gid", Opt_g << 4206 fsparam_enum ("huge", Opt_h << 4207 fsparam_u32oct("mode", Opt_m << 4208 fsparam_string("mpol", Opt_m << 4209 fsparam_string("nr_blocks", Opt_n << 4210 fsparam_string("nr_inodes", Opt_n << 4211 fsparam_string("size", Opt_s << 4212 fsparam_uid ("uid", Opt_u << 4213 fsparam_flag ("inode32", Opt_i << 4214 fsparam_flag ("inode64", Opt_i << 4215 fsparam_flag ("noswap", Opt_n << 4216 #ifdef CONFIG_TMPFS_QUOTA << 4217 fsparam_flag ("quota", Opt_q << 4218 fsparam_flag ("usrquota", Opt_u << 4219 fsparam_flag ("grpquota", Opt_g << 4220 fsparam_string("usrquota_block_hardli << 4221 fsparam_string("usrquota_inode_hardli << 4222 fsparam_string("grpquota_block_hardli << 4223 fsparam_string("grpquota_inode_hardli << 4224 #endif << 4225 {} << 4226 }; << 4227 << 4228 static int shmem_parse_one(struct fs_context << 4229 { << 4230 struct shmem_options *ctx = fc->fs_pr << 4231 struct fs_parse_result result; << 4232 unsigned long long size; << 4233 char *rest; << 4234 int opt; << 4235 kuid_t kuid; << 4236 kgid_t kgid; << 4237 << 4238 opt = fs_parse(fc, shmem_fs_parameter << 4239 if (opt < 0) << 4240 return opt; << 4241 << 4242 switch (opt) { << 4243 case Opt_size: << 4244 size = memparse(param->string << 4245 if (*rest == '%') { << 4246 size <<= PAGE_SHIFT; << 4247 size *= totalram_page << 4248 do_div(size, 100); << 4249 rest++; << 4250 } << 4251 if (*rest) << 4252 goto bad_value; << 4253 ctx->blocks = DIV_ROUND_UP(si << 4254 ctx->seen |= SHMEM_SEEN_BLOCK << 4255 break; << 4256 case Opt_nr_blocks: << 4257 ctx->blocks = memparse(param- << 4258 if (*rest || ctx->blocks > LO << 4259 goto bad_value; << 4260 ctx->seen |= SHMEM_SEEN_BLOCK << 4261 break; << 4262 case Opt_nr_inodes: << 4263 ctx->inodes = memparse(param- << 4264 if (*rest || ctx->inodes > UL << 4265 goto bad_value; << 4266 ctx->seen |= SHMEM_SEEN_INODE << 4267 break; << 4268 case Opt_mode: << 4269 ctx->mode = result.uint_32 & << 4270 break; << 4271 case Opt_uid: << 4272 kuid = result.uid; << 4273 << 4274 /* << 4275 * The requested uid must be << 4276 * filesystem's idmapping. << 4277 */ << 4278 if (!kuid_has_mapping(fc->use << 4279 goto bad_value; << 4280 << 4281 ctx->uid = kuid; << 4282 break; << 4283 case Opt_gid: << 4284 kgid = result.gid; << 4285 << 4286 /* << 4287 * The requested gid must be << 4288 * filesystem's idmapping. << 4289 */ << 4290 if (!kgid_has_mapping(fc->use << 4291 goto bad_value; << 4292 << 4293 ctx->gid = kgid; << 4294 break; << 4295 case Opt_huge: << 4296 ctx->huge = result.uint_32; << 4297 if (ctx->huge != SHMEM_HUGE_N << 4298 !(IS_ENABLED(CONFIG_TRANS << 4299 has_transparent_hugepag << 4300 goto unsupported_para << 4301 ctx->seen |= SHMEM_SEEN_HUGE; << 4302 break; << 4303 case Opt_mpol: << 4304 if (IS_ENABLED(CONFIG_NUMA)) << 4305 mpol_put(ctx->mpol); << 4306 ctx->mpol = NULL; << 4307 if (mpol_parse_str(pa << 4308 goto bad_valu << 4309 break; << 4310 } << 4311 goto unsupported_parameter; << 4312 case Opt_inode32: << 4313 ctx->full_inums = false; << 4314 ctx->seen |= SHMEM_SEEN_INUMS << 4315 break; << 4316 case Opt_inode64: << 4317 if (sizeof(ino_t) < 8) { << 4318 return invalfc(fc, << 4319 "Canno << 4320 } << 4321 ctx->full_inums = true; << 4322 ctx->seen |= SHMEM_SEEN_INUMS << 4323 break; << 4324 case Opt_noswap: << 4325 if ((fc->user_ns != &init_use << 4326 return invalfc(fc, << 4327 "Turni << 4328 } << 4329 ctx->noswap = true; << 4330 ctx->seen |= SHMEM_SEEN_NOSWA << 4331 break; << 4332 case Opt_quota: << 4333 if (fc->user_ns != &init_user << 4334 return invalfc(fc, "Q << 4335 ctx->seen |= SHMEM_SEEN_QUOTA << 4336 ctx->quota_types |= (QTYPE_MA << 4337 break; << 4338 case Opt_usrquota: << 4339 if (fc->user_ns != &init_user << 4340 return invalfc(fc, "Q << 4341 ctx->seen |= SHMEM_SEEN_QUOTA << 4342 ctx->quota_types |= QTYPE_MAS << 4343 break; << 4344 case Opt_grpquota: << 4345 if (fc->user_ns != &init_user << 4346 return invalfc(fc, "Q << 4347 ctx->seen |= SHMEM_SEEN_QUOTA << 4348 ctx->quota_types |= QTYPE_MAS << 4349 break; << 4350 case Opt_usrquota_block_hardlimit: << 4351 size = memparse(param->string << 4352 if (*rest || !size) << 4353 goto bad_value; << 4354 if (size > SHMEM_QUOTA_MAX_SP << 4355 return invalfc(fc, << 4356 "User << 4357 ctx->qlimits.usrquota_bhardli << 4358 break; << 4359 case Opt_grpquota_block_hardlimit: << 4360 size = memparse(param->string << 4361 if (*rest || !size) << 4362 goto bad_value; << 4363 if (size > SHMEM_QUOTA_MAX_SP << 4364 return invalfc(fc, << 4365 "Group << 4366 ctx->qlimits.grpquota_bhardli << 4367 break; << 4368 case Opt_usrquota_inode_hardlimit: << 4369 size = memparse(param->string << 4370 if (*rest || !size) << 4371 goto bad_value; << 4372 if (size > SHMEM_QUOTA_MAX_IN << 4373 return invalfc(fc, << 4374 "User << 4375 ctx->qlimits.usrquota_ihardli << 4376 break; << 4377 case Opt_grpquota_inode_hardlimit: << 4378 size = memparse(param->string << 4379 if (*rest || !size) << 4380 goto bad_value; << 4381 if (size > SHMEM_QUOTA_MAX_IN << 4382 return invalfc(fc, << 4383 "Group << 4384 ctx->qlimits.grpquota_ihardli << 4385 break; << 4386 } << 4387 return 0; << 4388 << 4389 unsupported_parameter: << 4390 return invalfc(fc, "Unsupported param << 4391 bad_value: << 4392 return invalfc(fc, "Bad value for '%s << 4393 } << 4394 << 4395 static int shmem_parse_options(struct fs_cont << 4396 { 3448 { 4397 char *options = data; !! 3449 char *this_char, *value, *rest; 4398 !! 3450 struct mempolicy *mpol = NULL; 4399 if (options) { !! 3451 uid_t uid; 4400 int err = security_sb_eat_lsm !! 3452 gid_t gid; 4401 if (err) << 4402 return err; << 4403 } << 4404 3453 4405 while (options != NULL) { 3454 while (options != NULL) { 4406 char *this_char = options; !! 3455 this_char = options; 4407 for (;;) { 3456 for (;;) { 4408 /* 3457 /* 4409 * NUL-terminate this 3458 * NUL-terminate this option: unfortunately, 4410 * mount options form 3459 * mount options form a comma-separated list, 4411 * but mpol's nodelis 3460 * but mpol's nodelist may also contain commas. 4412 */ 3461 */ 4413 options = strchr(opti 3462 options = strchr(options, ','); 4414 if (options == NULL) 3463 if (options == NULL) 4415 break; 3464 break; 4416 options++; 3465 options++; 4417 if (!isdigit(*options 3466 if (!isdigit(*options)) { 4418 options[-1] = 3467 options[-1] = '\0'; 4419 break; 3468 break; 4420 } 3469 } 4421 } 3470 } 4422 if (*this_char) { !! 3471 if (!*this_char) 4423 char *value = strchr( !! 3472 continue; 4424 size_t len = 0; !! 3473 if ((value = strchr(this_char,'=')) != NULL) { 4425 int err; !! 3474 *value++ = 0; 4426 !! 3475 } else { 4427 if (value) { !! 3476 pr_err("tmpfs: No value for mount option '%s'\n", 4428 *value++ = '\ !! 3477 this_char); 4429 len = strlen( !! 3478 goto error; >> 3479 } >> 3480 >> 3481 if (!strcmp(this_char,"size")) { >> 3482 unsigned long long size; >> 3483 size = memparse(value,&rest); >> 3484 if (*rest == '%') { >> 3485 size <<= PAGE_SHIFT; >> 3486 size *= totalram_pages; >> 3487 do_div(size, 100); >> 3488 rest++; 4430 } 3489 } 4431 err = vfs_parse_fs_st !! 3490 if (*rest) 4432 if (err < 0) !! 3491 goto bad_val; 4433 return err; !! 3492 sbinfo->max_blocks = >> 3493 DIV_ROUND_UP(size, PAGE_SIZE); >> 3494 } else if (!strcmp(this_char,"nr_blocks")) { >> 3495 sbinfo->max_blocks = memparse(value, &rest); >> 3496 if (*rest) >> 3497 goto bad_val; >> 3498 } else if (!strcmp(this_char,"nr_inodes")) { >> 3499 sbinfo->max_inodes = memparse(value, &rest); >> 3500 if (*rest) >> 3501 goto bad_val; >> 3502 } else if (!strcmp(this_char,"mode")) { >> 3503 if (remount) >> 3504 continue; >> 3505 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777; >> 3506 if (*rest) >> 3507 goto bad_val; >> 3508 } else if (!strcmp(this_char,"uid")) { >> 3509 if (remount) >> 3510 continue; >> 3511 uid = simple_strtoul(value, &rest, 0); >> 3512 if (*rest) >> 3513 goto bad_val; >> 3514 sbinfo->uid = make_kuid(current_user_ns(), uid); >> 3515 if (!uid_valid(sbinfo->uid)) >> 3516 goto bad_val; >> 3517 } else if (!strcmp(this_char,"gid")) { >> 3518 if (remount) >> 3519 continue; >> 3520 gid = simple_strtoul(value, &rest, 0); >> 3521 if (*rest) >> 3522 goto bad_val; >> 3523 sbinfo->gid = make_kgid(current_user_ns(), gid); >> 3524 if (!gid_valid(sbinfo->gid)) >> 3525 goto bad_val; >> 3526 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE >> 3527 } else if (!strcmp(this_char, "huge")) { >> 3528 int huge; >> 3529 huge = shmem_parse_huge(value); >> 3530 if (huge < 0) >> 3531 goto bad_val; >> 3532 if (!has_transparent_hugepage() && >> 3533 huge != SHMEM_HUGE_NEVER) >> 3534 goto bad_val; >> 3535 sbinfo->huge = huge; >> 3536 #endif >> 3537 #ifdef CONFIG_NUMA >> 3538 } else if (!strcmp(this_char,"mpol")) { >> 3539 mpol_put(mpol); >> 3540 mpol = NULL; >> 3541 if (mpol_parse_str(value, &mpol)) >> 3542 goto bad_val; >> 3543 #endif >> 3544 } else { >> 3545 pr_err("tmpfs: Bad mount option %s\n", this_char); >> 3546 goto error; 4434 } 3547 } 4435 } 3548 } >> 3549 sbinfo->mpol = mpol; 4436 return 0; 3550 return 0; >> 3551 >> 3552 bad_val: >> 3553 pr_err("tmpfs: Bad value '%s' for mount option '%s'\n", >> 3554 value, this_char); >> 3555 error: >> 3556 mpol_put(mpol); >> 3557 return 1; >> 3558 4437 } 3559 } 4438 3560 4439 /* !! 3561 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data) 4440 * Reconfigure a shmem filesystem. << 4441 */ << 4442 static int shmem_reconfigure(struct fs_contex << 4443 { 3562 { 4444 struct shmem_options *ctx = fc->fs_pr !! 3563 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 4445 struct shmem_sb_info *sbinfo = SHMEM_ !! 3564 struct shmem_sb_info config = *sbinfo; 4446 unsigned long used_isp; !! 3565 unsigned long inodes; 4447 struct mempolicy *mpol = NULL; !! 3566 int error = -EINVAL; 4448 const char *err; << 4449 << 4450 raw_spin_lock(&sbinfo->stat_lock); << 4451 used_isp = sbinfo->max_inodes * BOGO_ << 4452 3567 4453 if ((ctx->seen & SHMEM_SEEN_BLOCKS) & !! 3568 config.mpol = NULL; 4454 if (!sbinfo->max_blocks) { !! 3569 if (shmem_parse_options(data, &config, true)) 4455 err = "Cannot retroac !! 3570 return error; 4456 goto out; << 4457 } << 4458 if (percpu_counter_compare(&s << 4459 ct << 4460 err = "Too small a si << 4461 goto out; << 4462 } << 4463 } << 4464 if ((ctx->seen & SHMEM_SEEN_INODES) & << 4465 if (!sbinfo->max_inodes) { << 4466 err = "Cannot retroac << 4467 goto out; << 4468 } << 4469 if (ctx->inodes * BOGO_INODE_ << 4470 err = "Too few inodes << 4471 goto out; << 4472 } << 4473 } << 4474 3571 4475 if ((ctx->seen & SHMEM_SEEN_INUMS) && !! 3572 spin_lock(&sbinfo->stat_lock); 4476 sbinfo->next_ino > UINT_MAX) { !! 3573 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 4477 err = "Current inum too high !! 3574 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0) 4478 goto out; << 4479 } << 4480 if ((ctx->seen & SHMEM_SEEN_NOSWAP) & << 4481 err = "Cannot disable swap on << 4482 goto out; 3575 goto out; 4483 } !! 3576 if (config.max_inodes < inodes) 4484 if (!(ctx->seen & SHMEM_SEEN_NOSWAP) << 4485 err = "Cannot enable swap on << 4486 goto out; 3577 goto out; 4487 } !! 3578 /* 4488 !! 3579 * Those tests disallow limited->unlimited while any are in use; 4489 if (ctx->seen & SHMEM_SEEN_QUOTA && !! 3580 * but we must separately disallow unlimited->limited, because 4490 !sb_any_quota_loaded(fc->root->d_ !! 3581 * in that case we have no record of how much is already in use. 4491 err = "Cannot enable quota on !! 3582 */ >> 3583 if (config.max_blocks && !sbinfo->max_blocks) 4492 goto out; 3584 goto out; 4493 } !! 3585 if (config.max_inodes && !sbinfo->max_inodes) 4494 << 4495 #ifdef CONFIG_TMPFS_QUOTA << 4496 #define CHANGED_LIMIT(name) << 4497 (ctx->qlimits.name## hardlimit && << 4498 (ctx->qlimits.name## hardlimit != sbi << 4499 << 4500 if (CHANGED_LIMIT(usrquota_b) || CHAN << 4501 CHANGED_LIMIT(grpquota_b) || CHAN << 4502 err = "Cannot change global q << 4503 goto out; 3586 goto out; 4504 } << 4505 #endif /* CONFIG_TMPFS_QUOTA */ << 4506 3587 4507 if (ctx->seen & SHMEM_SEEN_HUGE) !! 3588 error = 0; 4508 sbinfo->huge = ctx->huge; !! 3589 sbinfo->huge = config.huge; 4509 if (ctx->seen & SHMEM_SEEN_INUMS) !! 3590 sbinfo->max_blocks = config.max_blocks; 4510 sbinfo->full_inums = ctx->ful !! 3591 sbinfo->max_inodes = config.max_inodes; 4511 if (ctx->seen & SHMEM_SEEN_BLOCKS) !! 3592 sbinfo->free_inodes = config.max_inodes - inodes; 4512 sbinfo->max_blocks = ctx->bl << 4513 if (ctx->seen & SHMEM_SEEN_INODES) { << 4514 sbinfo->max_inodes = ctx->in << 4515 sbinfo->free_ispace = ctx->in << 4516 } << 4517 3593 4518 /* 3594 /* 4519 * Preserve previous mempolicy unless 3595 * Preserve previous mempolicy unless mpol remount option was specified. 4520 */ 3596 */ 4521 if (ctx->mpol) { !! 3597 if (config.mpol) { 4522 mpol = sbinfo->mpol; !! 3598 mpol_put(sbinfo->mpol); 4523 sbinfo->mpol = ctx->mpol; !! 3599 sbinfo->mpol = config.mpol; /* transfers initial ref */ 4524 ctx->mpol = NULL; << 4525 } 3600 } 4526 << 4527 if (ctx->noswap) << 4528 sbinfo->noswap = true; << 4529 << 4530 raw_spin_unlock(&sbinfo->stat_lock); << 4531 mpol_put(mpol); << 4532 return 0; << 4533 out: 3601 out: 4534 raw_spin_unlock(&sbinfo->stat_lock); !! 3602 spin_unlock(&sbinfo->stat_lock); 4535 return invalfc(fc, "%s", err); !! 3603 return error; 4536 } 3604 } 4537 3605 4538 static int shmem_show_options(struct seq_file 3606 static int shmem_show_options(struct seq_file *seq, struct dentry *root) 4539 { 3607 { 4540 struct shmem_sb_info *sbinfo = SHMEM_ 3608 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 4541 struct mempolicy *mpol; << 4542 3609 4543 if (sbinfo->max_blocks != shmem_defau 3610 if (sbinfo->max_blocks != shmem_default_max_blocks()) 4544 seq_printf(seq, ",size=%luk", !! 3611 seq_printf(seq, ",size=%luk", >> 3612 sbinfo->max_blocks << (PAGE_SHIFT - 10)); 4545 if (sbinfo->max_inodes != shmem_defau 3613 if (sbinfo->max_inodes != shmem_default_max_inodes()) 4546 seq_printf(seq, ",nr_inodes=% 3614 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 4547 if (sbinfo->mode != (0777 | S_ISVTX)) !! 3615 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 4548 seq_printf(seq, ",mode=%03ho" 3616 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 4549 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_ 3617 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 4550 seq_printf(seq, ",uid=%u", 3618 seq_printf(seq, ",uid=%u", 4551 from_kuid_mun 3619 from_kuid_munged(&init_user_ns, sbinfo->uid)); 4552 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_ 3620 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 4553 seq_printf(seq, ",gid=%u", 3621 seq_printf(seq, ",gid=%u", 4554 from_kgid_mun 3622 from_kgid_munged(&init_user_ns, sbinfo->gid)); 4555 !! 3623 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 4556 /* << 4557 * Showing inode{64,32} might be usef << 4558 * since then people don't have to re << 4559 * /proc/config.gz to confirm 64-bit << 4560 * (which may not even exist if IKCON << 4561 * << 4562 * We hide it when inode64 isn't the << 4563 * inodes, since that probably just m << 4564 * consideration. << 4565 * << 4566 * As such: << 4567 * << 4568 * +------------- << 4569 * | TMPFS_INODE6 << 4570 * +------------------+------------- << 4571 * | full_inums=true | show << 4572 * | full_inums=false | show << 4573 * +------------------+------------- << 4574 * << 4575 */ << 4576 if (IS_ENABLED(CONFIG_TMPFS_INODE64) << 4577 seq_printf(seq, ",inode%d", ( << 4578 #ifdef CONFIG_TRANSPARENT_HUGEPAGE << 4579 /* Rightly or wrongly, show huge moun 3624 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 4580 if (sbinfo->huge) 3625 if (sbinfo->huge) 4581 seq_printf(seq, ",huge=%s", s 3626 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 4582 #endif 3627 #endif 4583 mpol = shmem_get_sbmpol(sbinfo); !! 3628 shmem_show_mpol(seq, sbinfo->mpol); 4584 shmem_show_mpol(seq, mpol); << 4585 mpol_put(mpol); << 4586 if (sbinfo->noswap) << 4587 seq_printf(seq, ",noswap"); << 4588 #ifdef CONFIG_TMPFS_QUOTA << 4589 if (sb_has_quota_active(root->d_sb, U << 4590 seq_printf(seq, ",usrquota"); << 4591 if (sb_has_quota_active(root->d_sb, G << 4592 seq_printf(seq, ",grpquota"); << 4593 if (sbinfo->qlimits.usrquota_bhardlim << 4594 seq_printf(seq, ",usrquota_bl << 4595 sbinfo->qlimits.us << 4596 if (sbinfo->qlimits.grpquota_bhardlim << 4597 seq_printf(seq, ",grpquota_bl << 4598 sbinfo->qlimits.gr << 4599 if (sbinfo->qlimits.usrquota_ihardlim << 4600 seq_printf(seq, ",usrquota_in << 4601 sbinfo->qlimits.us << 4602 if (sbinfo->qlimits.grpquota_ihardlim << 4603 seq_printf(seq, ",grpquota_in << 4604 sbinfo->qlimits.gr << 4605 #endif << 4606 return 0; 3629 return 0; 4607 } 3630 } 4608 3631 >> 3632 #define MFD_NAME_PREFIX "memfd:" >> 3633 #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1) >> 3634 #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN) >> 3635 >> 3636 #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING) >> 3637 >> 3638 SYSCALL_DEFINE2(memfd_create, >> 3639 const char __user *, uname, >> 3640 unsigned int, flags) >> 3641 { >> 3642 struct shmem_inode_info *info; >> 3643 struct file *file; >> 3644 int fd, error; >> 3645 char *name; >> 3646 long len; >> 3647 >> 3648 if (flags & ~(unsigned int)MFD_ALL_FLAGS) >> 3649 return -EINVAL; >> 3650 >> 3651 /* length includes terminating zero */ >> 3652 len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1); >> 3653 if (len <= 0) >> 3654 return -EFAULT; >> 3655 if (len > MFD_NAME_MAX_LEN + 1) >> 3656 return -EINVAL; >> 3657 >> 3658 name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY); >> 3659 if (!name) >> 3660 return -ENOMEM; >> 3661 >> 3662 strcpy(name, MFD_NAME_PREFIX); >> 3663 if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) { >> 3664 error = -EFAULT; >> 3665 goto err_name; >> 3666 } >> 3667 >> 3668 /* terminating-zero may have changed after strnlen_user() returned */ >> 3669 if (name[len + MFD_NAME_PREFIX_LEN - 1]) { >> 3670 error = -EFAULT; >> 3671 goto err_name; >> 3672 } >> 3673 >> 3674 fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0); >> 3675 if (fd < 0) { >> 3676 error = fd; >> 3677 goto err_name; >> 3678 } >> 3679 >> 3680 file = shmem_file_setup(name, 0, VM_NORESERVE); >> 3681 if (IS_ERR(file)) { >> 3682 error = PTR_ERR(file); >> 3683 goto err_fd; >> 3684 } >> 3685 info = SHMEM_I(file_inode(file)); >> 3686 file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE; >> 3687 file->f_flags |= O_RDWR | O_LARGEFILE; >> 3688 if (flags & MFD_ALLOW_SEALING) >> 3689 info->seals &= ~F_SEAL_SEAL; >> 3690 >> 3691 fd_install(fd, file); >> 3692 kfree(name); >> 3693 return fd; >> 3694 >> 3695 err_fd: >> 3696 put_unused_fd(fd); >> 3697 err_name: >> 3698 kfree(name); >> 3699 return error; >> 3700 } >> 3701 4609 #endif /* CONFIG_TMPFS */ 3702 #endif /* CONFIG_TMPFS */ 4610 3703 4611 static void shmem_put_super(struct super_bloc 3704 static void shmem_put_super(struct super_block *sb) 4612 { 3705 { 4613 struct shmem_sb_info *sbinfo = SHMEM_ 3706 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 4614 3707 4615 #ifdef CONFIG_TMPFS_QUOTA << 4616 shmem_disable_quotas(sb); << 4617 #endif << 4618 free_percpu(sbinfo->ino_batch); << 4619 percpu_counter_destroy(&sbinfo->used_ 3708 percpu_counter_destroy(&sbinfo->used_blocks); 4620 mpol_put(sbinfo->mpol); 3709 mpol_put(sbinfo->mpol); 4621 kfree(sbinfo); 3710 kfree(sbinfo); 4622 sb->s_fs_info = NULL; 3711 sb->s_fs_info = NULL; 4623 } 3712 } 4624 3713 4625 static int shmem_fill_super(struct super_bloc !! 3714 int shmem_fill_super(struct super_block *sb, void *data, int silent) 4626 { 3715 { 4627 struct shmem_options *ctx = fc->fs_pr << 4628 struct inode *inode; 3716 struct inode *inode; 4629 struct shmem_sb_info *sbinfo; 3717 struct shmem_sb_info *sbinfo; 4630 int error = -ENOMEM; !! 3718 int err = -ENOMEM; 4631 3719 4632 /* Round up to L1_CACHE_BYTES to resi 3720 /* Round up to L1_CACHE_BYTES to resist false sharing */ 4633 sbinfo = kzalloc(max((int)sizeof(stru 3721 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 4634 L1_CACHE_BYTE 3722 L1_CACHE_BYTES), GFP_KERNEL); 4635 if (!sbinfo) 3723 if (!sbinfo) 4636 return error; !! 3724 return -ENOMEM; 4637 3725 >> 3726 sbinfo->mode = S_IRWXUGO | S_ISVTX; >> 3727 sbinfo->uid = current_fsuid(); >> 3728 sbinfo->gid = current_fsgid(); 4638 sb->s_fs_info = sbinfo; 3729 sb->s_fs_info = sbinfo; 4639 3730 4640 #ifdef CONFIG_TMPFS 3731 #ifdef CONFIG_TMPFS 4641 /* 3732 /* 4642 * Per default we only allow half of 3733 * Per default we only allow half of the physical ram per 4643 * tmpfs instance, limiting inodes to 3734 * tmpfs instance, limiting inodes to one per page of lowmem; 4644 * but the internal instance is left 3735 * but the internal instance is left unlimited. 4645 */ 3736 */ 4646 if (!(sb->s_flags & SB_KERNMOUNT)) { !! 3737 if (!(sb->s_flags & MS_KERNMOUNT)) { 4647 if (!(ctx->seen & SHMEM_SEEN_ !! 3738 sbinfo->max_blocks = shmem_default_max_blocks(); 4648 ctx->blocks = shmem_d !! 3739 sbinfo->max_inodes = shmem_default_max_inodes(); 4649 if (!(ctx->seen & SHMEM_SEEN_ !! 3740 if (shmem_parse_options(data, sbinfo, false)) { 4650 ctx->inodes = shmem_d !! 3741 err = -EINVAL; 4651 if (!(ctx->seen & SHMEM_SEEN_ !! 3742 goto failed; 4652 ctx->full_inums = IS_ !! 3743 } 4653 sbinfo->noswap = ctx->noswap; << 4654 } else { 3744 } else { 4655 sb->s_flags |= SB_NOUSER; !! 3745 sb->s_flags |= MS_NOUSER; 4656 } 3746 } 4657 sb->s_export_op = &shmem_export_ops; 3747 sb->s_export_op = &shmem_export_ops; 4658 sb->s_flags |= SB_NOSEC | SB_I_VERSIO !! 3748 sb->s_flags |= MS_NOSEC; 4659 #else 3749 #else 4660 sb->s_flags |= SB_NOUSER; !! 3750 sb->s_flags |= MS_NOUSER; 4661 #endif 3751 #endif 4662 sbinfo->max_blocks = ctx->blocks; << 4663 sbinfo->max_inodes = ctx->inodes; << 4664 sbinfo->free_ispace = sbinfo->max_ino << 4665 if (sb->s_flags & SB_KERNMOUNT) { << 4666 sbinfo->ino_batch = alloc_per << 4667 if (!sbinfo->ino_batch) << 4668 goto failed; << 4669 } << 4670 sbinfo->uid = ctx->uid; << 4671 sbinfo->gid = ctx->gid; << 4672 sbinfo->full_inums = ctx->full_inums; << 4673 sbinfo->mode = ctx->mode; << 4674 sbinfo->huge = ctx->huge; << 4675 sbinfo->mpol = ctx->mpol; << 4676 ctx->mpol = NULL; << 4677 3752 4678 raw_spin_lock_init(&sbinfo->stat_lock !! 3753 spin_lock_init(&sbinfo->stat_lock); 4679 if (percpu_counter_init(&sbinfo->used 3754 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 4680 goto failed; 3755 goto failed; >> 3756 sbinfo->free_inodes = sbinfo->max_inodes; 4681 spin_lock_init(&sbinfo->shrinklist_lo 3757 spin_lock_init(&sbinfo->shrinklist_lock); 4682 INIT_LIST_HEAD(&sbinfo->shrinklist); 3758 INIT_LIST_HEAD(&sbinfo->shrinklist); 4683 3759 4684 sb->s_maxbytes = MAX_LFS_FILESIZE; 3760 sb->s_maxbytes = MAX_LFS_FILESIZE; 4685 sb->s_blocksize = PAGE_SIZE; 3761 sb->s_blocksize = PAGE_SIZE; 4686 sb->s_blocksize_bits = PAGE_SHIFT; 3762 sb->s_blocksize_bits = PAGE_SHIFT; 4687 sb->s_magic = TMPFS_MAGIC; 3763 sb->s_magic = TMPFS_MAGIC; 4688 sb->s_op = &shmem_ops; 3764 sb->s_op = &shmem_ops; 4689 sb->s_time_gran = 1; 3765 sb->s_time_gran = 1; 4690 #ifdef CONFIG_TMPFS_XATTR 3766 #ifdef CONFIG_TMPFS_XATTR 4691 sb->s_xattr = shmem_xattr_handlers; 3767 sb->s_xattr = shmem_xattr_handlers; 4692 #endif 3768 #endif 4693 #ifdef CONFIG_TMPFS_POSIX_ACL 3769 #ifdef CONFIG_TMPFS_POSIX_ACL 4694 sb->s_flags |= SB_POSIXACL; !! 3770 sb->s_flags |= MS_POSIXACL; 4695 #endif 3771 #endif 4696 uuid_t uuid; << 4697 uuid_gen(&uuid); << 4698 super_set_uuid(sb, uuid.b, sizeof(uui << 4699 << 4700 #ifdef CONFIG_TMPFS_QUOTA << 4701 if (ctx->seen & SHMEM_SEEN_QUOTA) { << 4702 sb->dq_op = &shmem_quota_oper << 4703 sb->s_qcop = &dquot_quotactl_ << 4704 sb->s_quota_types = QTYPE_MAS << 4705 << 4706 /* Copy the default limits fr << 4707 memcpy(&sbinfo->qlimits, &ctx << 4708 sizeof(struct shmem_qu << 4709 3772 4710 if (shmem_enable_quotas(sb, c !! 3773 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 4711 goto failed; !! 3774 if (!inode) 4712 } << 4713 #endif /* CONFIG_TMPFS_QUOTA */ << 4714 << 4715 inode = shmem_get_inode(&nop_mnt_idma << 4716 S_IFDIR | sbi << 4717 if (IS_ERR(inode)) { << 4718 error = PTR_ERR(inode); << 4719 goto failed; 3775 goto failed; 4720 } << 4721 inode->i_uid = sbinfo->uid; 3776 inode->i_uid = sbinfo->uid; 4722 inode->i_gid = sbinfo->gid; 3777 inode->i_gid = sbinfo->gid; 4723 sb->s_root = d_make_root(inode); 3778 sb->s_root = d_make_root(inode); 4724 if (!sb->s_root) 3779 if (!sb->s_root) 4725 goto failed; 3780 goto failed; 4726 return 0; 3781 return 0; 4727 3782 4728 failed: 3783 failed: 4729 shmem_put_super(sb); 3784 shmem_put_super(sb); 4730 return error; !! 3785 return err; 4731 } << 4732 << 4733 static int shmem_get_tree(struct fs_context * << 4734 { << 4735 return get_tree_nodev(fc, shmem_fill_ << 4736 } << 4737 << 4738 static void shmem_free_fc(struct fs_context * << 4739 { << 4740 struct shmem_options *ctx = fc->fs_pr << 4741 << 4742 if (ctx) { << 4743 mpol_put(ctx->mpol); << 4744 kfree(ctx); << 4745 } << 4746 } 3786 } 4747 3787 4748 static const struct fs_context_operations shm !! 3788 static struct kmem_cache *shmem_inode_cachep; 4749 .free = shmem_free_ << 4750 .get_tree = shmem_get_t << 4751 #ifdef CONFIG_TMPFS << 4752 .parse_monolithic = shmem_parse << 4753 .parse_param = shmem_parse << 4754 .reconfigure = shmem_recon << 4755 #endif << 4756 }; << 4757 << 4758 static struct kmem_cache *shmem_inode_cachep << 4759 3789 4760 static struct inode *shmem_alloc_inode(struct 3790 static struct inode *shmem_alloc_inode(struct super_block *sb) 4761 { 3791 { 4762 struct shmem_inode_info *info; 3792 struct shmem_inode_info *info; 4763 info = alloc_inode_sb(sb, shmem_inode !! 3793 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 4764 if (!info) 3794 if (!info) 4765 return NULL; 3795 return NULL; 4766 return &info->vfs_inode; 3796 return &info->vfs_inode; 4767 } 3797 } 4768 3798 4769 static void shmem_free_in_core_inode(struct i !! 3799 static void shmem_destroy_callback(struct rcu_head *head) 4770 { 3800 { >> 3801 struct inode *inode = container_of(head, struct inode, i_rcu); 4771 if (S_ISLNK(inode->i_mode)) 3802 if (S_ISLNK(inode->i_mode)) 4772 kfree(inode->i_link); 3803 kfree(inode->i_link); 4773 kmem_cache_free(shmem_inode_cachep, S 3804 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 4774 } 3805 } 4775 3806 4776 static void shmem_destroy_inode(struct inode 3807 static void shmem_destroy_inode(struct inode *inode) 4777 { 3808 { 4778 if (S_ISREG(inode->i_mode)) 3809 if (S_ISREG(inode->i_mode)) 4779 mpol_free_shared_policy(&SHME 3810 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 4780 if (S_ISDIR(inode->i_mode)) !! 3811 call_rcu(&inode->i_rcu, shmem_destroy_callback); 4781 simple_offset_destroy(shmem_g << 4782 } 3812 } 4783 3813 4784 static void shmem_init_inode(void *foo) 3814 static void shmem_init_inode(void *foo) 4785 { 3815 { 4786 struct shmem_inode_info *info = foo; 3816 struct shmem_inode_info *info = foo; 4787 inode_init_once(&info->vfs_inode); 3817 inode_init_once(&info->vfs_inode); 4788 } 3818 } 4789 3819 4790 static void __init shmem_init_inodecache(void !! 3820 static int shmem_init_inodecache(void) 4791 { 3821 { 4792 shmem_inode_cachep = kmem_cache_creat 3822 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 4793 sizeof(struct 3823 sizeof(struct shmem_inode_info), 4794 0, SLAB_PANIC 3824 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); >> 3825 return 0; 4795 } 3826 } 4796 3827 4797 static void __init shmem_destroy_inodecache(v !! 3828 static void shmem_destroy_inodecache(void) 4798 { 3829 { 4799 kmem_cache_destroy(shmem_inode_cachep 3830 kmem_cache_destroy(shmem_inode_cachep); 4800 } 3831 } 4801 3832 4802 /* Keep the page in page cache instead of tru << 4803 static int shmem_error_remove_folio(struct ad << 4804 struct fol << 4805 { << 4806 return 0; << 4807 } << 4808 << 4809 static const struct address_space_operations 3833 static const struct address_space_operations shmem_aops = { 4810 .writepage = shmem_writepage, 3834 .writepage = shmem_writepage, 4811 .dirty_folio = noop_dirty_folio, !! 3835 .set_page_dirty = __set_page_dirty_no_writeback, 4812 #ifdef CONFIG_TMPFS 3836 #ifdef CONFIG_TMPFS 4813 .write_begin = shmem_write_begin, 3837 .write_begin = shmem_write_begin, 4814 .write_end = shmem_write_end, 3838 .write_end = shmem_write_end, 4815 #endif 3839 #endif 4816 #ifdef CONFIG_MIGRATION 3840 #ifdef CONFIG_MIGRATION 4817 .migrate_folio = migrate_folio, !! 3841 .migratepage = migrate_page, 4818 #endif 3842 #endif 4819 .error_remove_folio = shmem_error_rem !! 3843 .error_remove_page = generic_error_remove_page, 4820 }; 3844 }; 4821 3845 4822 static const struct file_operations shmem_fil 3846 static const struct file_operations shmem_file_operations = { 4823 .mmap = shmem_mmap, 3847 .mmap = shmem_mmap, 4824 .open = shmem_file_open, << 4825 .get_unmapped_area = shmem_get_unmapp 3848 .get_unmapped_area = shmem_get_unmapped_area, 4826 #ifdef CONFIG_TMPFS 3849 #ifdef CONFIG_TMPFS 4827 .llseek = shmem_file_llseek, 3850 .llseek = shmem_file_llseek, 4828 .read_iter = shmem_file_read_ite 3851 .read_iter = shmem_file_read_iter, 4829 .write_iter = shmem_file_write_it !! 3852 .write_iter = generic_file_write_iter, 4830 .fsync = noop_fsync, 3853 .fsync = noop_fsync, 4831 .splice_read = shmem_file_splice_r !! 3854 .splice_read = generic_file_splice_read, 4832 .splice_write = iter_file_splice_wr 3855 .splice_write = iter_file_splice_write, 4833 .fallocate = shmem_fallocate, 3856 .fallocate = shmem_fallocate, 4834 #endif 3857 #endif 4835 }; 3858 }; 4836 3859 4837 static const struct inode_operations shmem_in 3860 static const struct inode_operations shmem_inode_operations = { 4838 .getattr = shmem_getattr, 3861 .getattr = shmem_getattr, 4839 .setattr = shmem_setattr, 3862 .setattr = shmem_setattr, 4840 #ifdef CONFIG_TMPFS_XATTR 3863 #ifdef CONFIG_TMPFS_XATTR 4841 .listxattr = shmem_listxattr, 3864 .listxattr = shmem_listxattr, 4842 .set_acl = simple_set_acl, 3865 .set_acl = simple_set_acl, 4843 .fileattr_get = shmem_fileattr_get, << 4844 .fileattr_set = shmem_fileattr_set, << 4845 #endif 3866 #endif 4846 }; 3867 }; 4847 3868 4848 static const struct inode_operations shmem_di 3869 static const struct inode_operations shmem_dir_inode_operations = { 4849 #ifdef CONFIG_TMPFS 3870 #ifdef CONFIG_TMPFS 4850 .getattr = shmem_getattr, << 4851 .create = shmem_create, 3871 .create = shmem_create, 4852 .lookup = simple_lookup, 3872 .lookup = simple_lookup, 4853 .link = shmem_link, 3873 .link = shmem_link, 4854 .unlink = shmem_unlink, 3874 .unlink = shmem_unlink, 4855 .symlink = shmem_symlink, 3875 .symlink = shmem_symlink, 4856 .mkdir = shmem_mkdir, 3876 .mkdir = shmem_mkdir, 4857 .rmdir = shmem_rmdir, 3877 .rmdir = shmem_rmdir, 4858 .mknod = shmem_mknod, 3878 .mknod = shmem_mknod, 4859 .rename = shmem_rename2, 3879 .rename = shmem_rename2, 4860 .tmpfile = shmem_tmpfile, 3880 .tmpfile = shmem_tmpfile, 4861 .get_offset_ctx = shmem_get_offset_ct << 4862 #endif 3881 #endif 4863 #ifdef CONFIG_TMPFS_XATTR 3882 #ifdef CONFIG_TMPFS_XATTR 4864 .listxattr = shmem_listxattr, 3883 .listxattr = shmem_listxattr, 4865 .fileattr_get = shmem_fileattr_get, << 4866 .fileattr_set = shmem_fileattr_set, << 4867 #endif 3884 #endif 4868 #ifdef CONFIG_TMPFS_POSIX_ACL 3885 #ifdef CONFIG_TMPFS_POSIX_ACL 4869 .setattr = shmem_setattr, 3886 .setattr = shmem_setattr, 4870 .set_acl = simple_set_acl, 3887 .set_acl = simple_set_acl, 4871 #endif 3888 #endif 4872 }; 3889 }; 4873 3890 4874 static const struct inode_operations shmem_sp 3891 static const struct inode_operations shmem_special_inode_operations = { 4875 .getattr = shmem_getattr, << 4876 #ifdef CONFIG_TMPFS_XATTR 3892 #ifdef CONFIG_TMPFS_XATTR 4877 .listxattr = shmem_listxattr, 3893 .listxattr = shmem_listxattr, 4878 #endif 3894 #endif 4879 #ifdef CONFIG_TMPFS_POSIX_ACL 3895 #ifdef CONFIG_TMPFS_POSIX_ACL 4880 .setattr = shmem_setattr, 3896 .setattr = shmem_setattr, 4881 .set_acl = simple_set_acl, 3897 .set_acl = simple_set_acl, 4882 #endif 3898 #endif 4883 }; 3899 }; 4884 3900 4885 static const struct super_operations shmem_op 3901 static const struct super_operations shmem_ops = { 4886 .alloc_inode = shmem_alloc_inode, 3902 .alloc_inode = shmem_alloc_inode, 4887 .free_inode = shmem_free_in_core_ << 4888 .destroy_inode = shmem_destroy_inode 3903 .destroy_inode = shmem_destroy_inode, 4889 #ifdef CONFIG_TMPFS 3904 #ifdef CONFIG_TMPFS 4890 .statfs = shmem_statfs, 3905 .statfs = shmem_statfs, >> 3906 .remount_fs = shmem_remount_fs, 4891 .show_options = shmem_show_options, 3907 .show_options = shmem_show_options, 4892 #endif 3908 #endif 4893 #ifdef CONFIG_TMPFS_QUOTA << 4894 .get_dquots = shmem_get_dquots, << 4895 #endif << 4896 .evict_inode = shmem_evict_inode, 3909 .evict_inode = shmem_evict_inode, 4897 .drop_inode = generic_delete_inod 3910 .drop_inode = generic_delete_inode, 4898 .put_super = shmem_put_super, 3911 .put_super = shmem_put_super, 4899 #ifdef CONFIG_TRANSPARENT_HUGEPAGE !! 3912 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 4900 .nr_cached_objects = shmem_unuse 3913 .nr_cached_objects = shmem_unused_huge_count, 4901 .free_cached_objects = shmem_unuse 3914 .free_cached_objects = shmem_unused_huge_scan, 4902 #endif 3915 #endif 4903 }; 3916 }; 4904 3917 4905 static const struct vm_operations_struct shme 3918 static const struct vm_operations_struct shmem_vm_ops = { 4906 .fault = shmem_fault, 3919 .fault = shmem_fault, 4907 .map_pages = filemap_map_pages, 3920 .map_pages = filemap_map_pages, 4908 #ifdef CONFIG_NUMA 3921 #ifdef CONFIG_NUMA 4909 .set_policy = shmem_set_policy, 3922 .set_policy = shmem_set_policy, 4910 .get_policy = shmem_get_policy, 3923 .get_policy = shmem_get_policy, 4911 #endif 3924 #endif 4912 }; 3925 }; 4913 3926 4914 static const struct vm_operations_struct shme !! 3927 static struct dentry *shmem_mount(struct file_system_type *fs_type, 4915 .fault = shmem_fault, !! 3928 int flags, const char *dev_name, void *data) 4916 .map_pages = filemap_map_pages, << 4917 #ifdef CONFIG_NUMA << 4918 .set_policy = shmem_set_policy, << 4919 .get_policy = shmem_get_policy, << 4920 #endif << 4921 }; << 4922 << 4923 int shmem_init_fs_context(struct fs_context * << 4924 { 3929 { 4925 struct shmem_options *ctx; !! 3930 return mount_nodev(fs_type, flags, data, shmem_fill_super); 4926 << 4927 ctx = kzalloc(sizeof(struct shmem_opt << 4928 if (!ctx) << 4929 return -ENOMEM; << 4930 << 4931 ctx->mode = 0777 | S_ISVTX; << 4932 ctx->uid = current_fsuid(); << 4933 ctx->gid = current_fsgid(); << 4934 << 4935 fc->fs_private = ctx; << 4936 fc->ops = &shmem_fs_context_ops; << 4937 return 0; << 4938 } 3931 } 4939 3932 4940 static struct file_system_type shmem_fs_type 3933 static struct file_system_type shmem_fs_type = { 4941 .owner = THIS_MODULE, 3934 .owner = THIS_MODULE, 4942 .name = "tmpfs", 3935 .name = "tmpfs", 4943 .init_fs_context = shmem_init_fs_cont !! 3936 .mount = shmem_mount, 4944 #ifdef CONFIG_TMPFS << 4945 .parameters = shmem_fs_parameters << 4946 #endif << 4947 .kill_sb = kill_litter_super, 3937 .kill_sb = kill_litter_super, 4948 .fs_flags = FS_USERNS_MOUNT | F !! 3938 .fs_flags = FS_USERNS_MOUNT, 4949 }; 3939 }; 4950 3940 4951 void __init shmem_init(void) !! 3941 int __init shmem_init(void) 4952 { 3942 { 4953 int error; 3943 int error; 4954 3944 4955 shmem_init_inodecache(); !! 3945 /* If rootfs called this, don't re-init */ >> 3946 if (shmem_inode_cachep) >> 3947 return 0; 4956 3948 4957 #ifdef CONFIG_TMPFS_QUOTA !! 3949 error = shmem_init_inodecache(); 4958 register_quota_format(&shmem_quota_fo !! 3950 if (error) 4959 #endif !! 3951 goto out3; 4960 3952 4961 error = register_filesystem(&shmem_fs 3953 error = register_filesystem(&shmem_fs_type); 4962 if (error) { 3954 if (error) { 4963 pr_err("Could not register tm 3955 pr_err("Could not register tmpfs\n"); 4964 goto out2; 3956 goto out2; 4965 } 3957 } 4966 3958 4967 shm_mnt = kern_mount(&shmem_fs_type); 3959 shm_mnt = kern_mount(&shmem_fs_type); 4968 if (IS_ERR(shm_mnt)) { 3960 if (IS_ERR(shm_mnt)) { 4969 error = PTR_ERR(shm_mnt); 3961 error = PTR_ERR(shm_mnt); 4970 pr_err("Could not kern_mount 3962 pr_err("Could not kern_mount tmpfs\n"); 4971 goto out1; 3963 goto out1; 4972 } 3964 } 4973 3965 4974 #ifdef CONFIG_TRANSPARENT_HUGEPAGE !! 3966 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 4975 if (has_transparent_hugepage() && shm 3967 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 4976 SHMEM_SB(shm_mnt->mnt_sb)->hu 3968 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 4977 else 3969 else 4978 shmem_huge = SHMEM_HUGE_NEVER !! 3970 shmem_huge = 0; /* just in case it was patched */ 4979 << 4980 /* << 4981 * Default to setting PMD-sized THP t << 4982 * disable all other multi-size THPs. << 4983 */ << 4984 huge_shmem_orders_inherit = BIT(HPAGE << 4985 #endif 3971 #endif 4986 return; !! 3972 return 0; 4987 3973 4988 out1: 3974 out1: 4989 unregister_filesystem(&shmem_fs_type) 3975 unregister_filesystem(&shmem_fs_type); 4990 out2: 3976 out2: 4991 #ifdef CONFIG_TMPFS_QUOTA << 4992 unregister_quota_format(&shmem_quota_ << 4993 #endif << 4994 shmem_destroy_inodecache(); 3977 shmem_destroy_inodecache(); >> 3978 out3: 4995 shm_mnt = ERR_PTR(error); 3979 shm_mnt = ERR_PTR(error); >> 3980 return error; 4996 } 3981 } 4997 3982 4998 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && d !! 3983 #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS) 4999 static ssize_t shmem_enabled_show(struct kobj 3984 static ssize_t shmem_enabled_show(struct kobject *kobj, 5000 struct kobj !! 3985 struct kobj_attribute *attr, char *buf) 5001 { 3986 { 5002 static const int values[] = { !! 3987 int values[] = { 5003 SHMEM_HUGE_ALWAYS, 3988 SHMEM_HUGE_ALWAYS, 5004 SHMEM_HUGE_WITHIN_SIZE, 3989 SHMEM_HUGE_WITHIN_SIZE, 5005 SHMEM_HUGE_ADVISE, 3990 SHMEM_HUGE_ADVISE, 5006 SHMEM_HUGE_NEVER, 3991 SHMEM_HUGE_NEVER, 5007 SHMEM_HUGE_DENY, 3992 SHMEM_HUGE_DENY, 5008 SHMEM_HUGE_FORCE, 3993 SHMEM_HUGE_FORCE, 5009 }; 3994 }; 5010 int len = 0; !! 3995 int i, count; 5011 int i; << 5012 3996 5013 for (i = 0; i < ARRAY_SIZE(values); i !! 3997 for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) { 5014 len += sysfs_emit_at(buf, len !! 3998 const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s "; 5015 shmem_huge == << 5016 i ? " " : "", << 5017 } << 5018 len += sysfs_emit_at(buf, len, "\n"); << 5019 3999 5020 return len; !! 4000 count += sprintf(buf + count, fmt, >> 4001 shmem_format_huge(values[i])); >> 4002 } >> 4003 buf[count - 1] = '\n'; >> 4004 return count; 5021 } 4005 } 5022 4006 5023 static ssize_t shmem_enabled_store(struct kob 4007 static ssize_t shmem_enabled_store(struct kobject *kobj, 5024 struct kobj_attribute *attr, 4008 struct kobj_attribute *attr, const char *buf, size_t count) 5025 { 4009 { 5026 char tmp[16]; 4010 char tmp[16]; 5027 int huge; 4011 int huge; 5028 4012 5029 if (count + 1 > sizeof(tmp)) 4013 if (count + 1 > sizeof(tmp)) 5030 return -EINVAL; 4014 return -EINVAL; 5031 memcpy(tmp, buf, count); 4015 memcpy(tmp, buf, count); 5032 tmp[count] = '\0'; 4016 tmp[count] = '\0'; 5033 if (count && tmp[count - 1] == '\n') 4017 if (count && tmp[count - 1] == '\n') 5034 tmp[count - 1] = '\0'; 4018 tmp[count - 1] = '\0'; 5035 4019 5036 huge = shmem_parse_huge(tmp); 4020 huge = shmem_parse_huge(tmp); 5037 if (huge == -EINVAL) 4021 if (huge == -EINVAL) 5038 return -EINVAL; 4022 return -EINVAL; 5039 if (!has_transparent_hugepage() && 4023 if (!has_transparent_hugepage() && 5040 huge != SHMEM_HUGE_NE 4024 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 5041 return -EINVAL; 4025 return -EINVAL; 5042 4026 5043 /* Do not override huge allocation po << 5044 if (huge == SHMEM_HUGE_FORCE && << 5045 huge_shmem_orders_inherit != BIT( << 5046 return -EINVAL; << 5047 << 5048 shmem_huge = huge; 4027 shmem_huge = huge; 5049 if (shmem_huge > SHMEM_HUGE_DENY) 4028 if (shmem_huge > SHMEM_HUGE_DENY) 5050 SHMEM_SB(shm_mnt->mnt_sb)->hu 4029 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 5051 return count; 4030 return count; 5052 } 4031 } 5053 4032 5054 struct kobj_attribute shmem_enabled_attr = __ !! 4033 struct kobj_attribute shmem_enabled_attr = 5055 static DEFINE_SPINLOCK(huge_shmem_orders_lock !! 4034 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); >> 4035 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ 5056 4036 5057 static ssize_t thpsize_shmem_enabled_show(str !! 4037 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 5058 str !! 4038 bool shmem_huge_enabled(struct vm_area_struct *vma) 5059 { 4039 { 5060 int order = to_thpsize(kobj)->order; !! 4040 struct inode *inode = file_inode(vma->vm_file); 5061 const char *output; !! 4041 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 5062 !! 4042 loff_t i_size; 5063 if (test_bit(order, &huge_shmem_order !! 4043 pgoff_t off; 5064 output = "[always] inherit wi << 5065 else if (test_bit(order, &huge_shmem_ << 5066 output = "always [inherit] wi << 5067 else if (test_bit(order, &huge_shmem_ << 5068 output = "always inherit [wit << 5069 else if (test_bit(order, &huge_shmem_ << 5070 output = "always inherit with << 5071 else << 5072 output = "always inherit with << 5073 << 5074 return sysfs_emit(buf, "%s\n", output << 5075 } << 5076 << 5077 static ssize_t thpsize_shmem_enabled_store(st << 5078 st << 5079 co << 5080 { << 5081 int order = to_thpsize(kobj)->order; << 5082 ssize_t ret = count; << 5083 << 5084 if (sysfs_streq(buf, "always")) { << 5085 spin_lock(&huge_shmem_orders_ << 5086 clear_bit(order, &huge_shmem_ << 5087 clear_bit(order, &huge_shmem_ << 5088 clear_bit(order, &huge_shmem_ << 5089 set_bit(order, &huge_shmem_or << 5090 spin_unlock(&huge_shmem_order << 5091 } else if (sysfs_streq(buf, "inherit" << 5092 /* Do not override huge alloc << 5093 if (shmem_huge == SHMEM_HUGE_ << 5094 order != HPAGE_PMD_ORDER) << 5095 return -EINVAL; << 5096 4044 5097 spin_lock(&huge_shmem_orders_ !! 4045 if (shmem_huge == SHMEM_HUGE_FORCE) 5098 clear_bit(order, &huge_shmem_ !! 4046 return true; 5099 clear_bit(order, &huge_shmem_ !! 4047 if (shmem_huge == SHMEM_HUGE_DENY) 5100 clear_bit(order, &huge_shmem_ !! 4048 return false; 5101 set_bit(order, &huge_shmem_or !! 4049 switch (sbinfo->huge) { 5102 spin_unlock(&huge_shmem_order !! 4050 case SHMEM_HUGE_NEVER: 5103 } else if (sysfs_streq(buf, "within_s !! 4051 return false; 5104 spin_lock(&huge_shmem_orders_ !! 4052 case SHMEM_HUGE_ALWAYS: 5105 clear_bit(order, &huge_shmem_ !! 4053 return true; 5106 clear_bit(order, &huge_shmem_ !! 4054 case SHMEM_HUGE_WITHIN_SIZE: 5107 clear_bit(order, &huge_shmem_ !! 4055 off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); 5108 set_bit(order, &huge_shmem_or !! 4056 i_size = round_up(i_size_read(inode), PAGE_SIZE); 5109 spin_unlock(&huge_shmem_order !! 4057 if (i_size >= HPAGE_PMD_SIZE && 5110 } else if (sysfs_streq(buf, "advise") !! 4058 i_size >> PAGE_SHIFT >= off) 5111 spin_lock(&huge_shmem_orders_ !! 4059 return true; 5112 clear_bit(order, &huge_shmem_ !! 4060 case SHMEM_HUGE_ADVISE: 5113 clear_bit(order, &huge_shmem_ !! 4061 /* TODO: implement fadvise() hints */ 5114 clear_bit(order, &huge_shmem_ !! 4062 return (vma->vm_flags & VM_HUGEPAGE); 5115 set_bit(order, &huge_shmem_or !! 4063 default: 5116 spin_unlock(&huge_shmem_order !! 4064 VM_BUG_ON(1); 5117 } else if (sysfs_streq(buf, "never")) !! 4065 return false; 5118 spin_lock(&huge_shmem_orders_ << 5119 clear_bit(order, &huge_shmem_ << 5120 clear_bit(order, &huge_shmem_ << 5121 clear_bit(order, &huge_shmem_ << 5122 clear_bit(order, &huge_shmem_ << 5123 spin_unlock(&huge_shmem_order << 5124 } else { << 5125 ret = -EINVAL; << 5126 } 4066 } 5127 << 5128 return ret; << 5129 } 4067 } 5130 !! 4068 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 5131 struct kobj_attribute thpsize_shmem_enabled_a << 5132 __ATTR(shmem_enabled, 0644, thpsize_s << 5133 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONF << 5134 4069 5135 #else /* !CONFIG_SHMEM */ 4070 #else /* !CONFIG_SHMEM */ 5136 4071 5137 /* 4072 /* 5138 * tiny-shmem: simple shmemfs and tmpfs using 4073 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 5139 * 4074 * 5140 * This is intended for small system where th 4075 * This is intended for small system where the benefits of the full 5141 * shmem code (swap-backed and resource-limit 4076 * shmem code (swap-backed and resource-limited) are outweighed by 5142 * their complexity. On systems without swap 4077 * their complexity. On systems without swap this code should be 5143 * effectively equivalent, but much lighter w 4078 * effectively equivalent, but much lighter weight. 5144 */ 4079 */ 5145 4080 5146 static struct file_system_type shmem_fs_type 4081 static struct file_system_type shmem_fs_type = { 5147 .name = "tmpfs", 4082 .name = "tmpfs", 5148 .init_fs_context = ramfs_init_fs_cont !! 4083 .mount = ramfs_mount, 5149 .parameters = ramfs_fs_parameters !! 4084 .kill_sb = kill_litter_super, 5150 .kill_sb = ramfs_kill_sb, << 5151 .fs_flags = FS_USERNS_MOUNT, 4085 .fs_flags = FS_USERNS_MOUNT, 5152 }; 4086 }; 5153 4087 5154 void __init shmem_init(void) !! 4088 int __init shmem_init(void) 5155 { 4089 { 5156 BUG_ON(register_filesystem(&shmem_fs_ 4090 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 5157 4091 5158 shm_mnt = kern_mount(&shmem_fs_type); 4092 shm_mnt = kern_mount(&shmem_fs_type); 5159 BUG_ON(IS_ERR(shm_mnt)); 4093 BUG_ON(IS_ERR(shm_mnt)); >> 4094 >> 4095 return 0; 5160 } 4096 } 5161 4097 5162 int shmem_unuse(unsigned int type) !! 4098 int shmem_unuse(swp_entry_t swap, struct page *page) 5163 { 4099 { 5164 return 0; 4100 return 0; 5165 } 4101 } 5166 4102 5167 int shmem_lock(struct file *file, int lock, s !! 4103 int shmem_lock(struct file *file, int lock, struct user_struct *user) 5168 { 4104 { 5169 return 0; 4105 return 0; 5170 } 4106 } 5171 4107 5172 void shmem_unlock_mapping(struct address_spac 4108 void shmem_unlock_mapping(struct address_space *mapping) 5173 { 4109 { 5174 } 4110 } 5175 4111 5176 #ifdef CONFIG_MMU 4112 #ifdef CONFIG_MMU 5177 unsigned long shmem_get_unmapped_area(struct 4113 unsigned long shmem_get_unmapped_area(struct file *file, 5178 unsigne 4114 unsigned long addr, unsigned long len, 5179 unsigne 4115 unsigned long pgoff, unsigned long flags) 5180 { 4116 { 5181 return mm_get_unmapped_area(current-> !! 4117 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 5182 } 4118 } 5183 #endif 4119 #endif 5184 4120 5185 void shmem_truncate_range(struct inode *inode 4121 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 5186 { 4122 { 5187 truncate_inode_pages_range(inode->i_m 4123 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 5188 } 4124 } 5189 EXPORT_SYMBOL_GPL(shmem_truncate_range); 4125 EXPORT_SYMBOL_GPL(shmem_truncate_range); 5190 4126 5191 #define shmem_vm_ops 4127 #define shmem_vm_ops generic_file_vm_ops 5192 #define shmem_anon_vm_ops << 5193 #define shmem_file_operations 4128 #define shmem_file_operations ramfs_file_operations >> 4129 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 5194 #define shmem_acct_size(flags, size) 4130 #define shmem_acct_size(flags, size) 0 5195 #define shmem_unacct_size(flags, size) 4131 #define shmem_unacct_size(flags, size) do {} while (0) 5196 4132 5197 static inline struct inode *shmem_get_inode(s << 5198 struct super_ << 5199 umode_t mode, << 5200 { << 5201 struct inode *inode = ramfs_get_inode << 5202 return inode ? inode : ERR_PTR(-ENOSP << 5203 } << 5204 << 5205 #endif /* CONFIG_SHMEM */ 4133 #endif /* CONFIG_SHMEM */ 5206 4134 5207 /* common code */ 4135 /* common code */ 5208 4136 5209 static struct file *__shmem_file_setup(struct !! 4137 static const struct dentry_operations anon_ops = { 5210 loff_t size, unsigned !! 4138 .d_dname = simple_dname >> 4139 }; >> 4140 >> 4141 static struct file *__shmem_file_setup(const char *name, loff_t size, >> 4142 unsigned long flags, unsigned int i_flags) 5211 { 4143 { 5212 struct inode *inode; << 5213 struct file *res; 4144 struct file *res; >> 4145 struct inode *inode; >> 4146 struct path path; >> 4147 struct super_block *sb; >> 4148 struct qstr this; 5214 4149 5215 if (IS_ERR(mnt)) !! 4150 if (IS_ERR(shm_mnt)) 5216 return ERR_CAST(mnt); !! 4151 return ERR_CAST(shm_mnt); 5217 4152 5218 if (size < 0 || size > MAX_LFS_FILESI 4153 if (size < 0 || size > MAX_LFS_FILESIZE) 5219 return ERR_PTR(-EINVAL); 4154 return ERR_PTR(-EINVAL); 5220 4155 5221 if (shmem_acct_size(flags, size)) 4156 if (shmem_acct_size(flags, size)) 5222 return ERR_PTR(-ENOMEM); 4157 return ERR_PTR(-ENOMEM); 5223 4158 5224 if (is_idmapped_mnt(mnt)) !! 4159 res = ERR_PTR(-ENOMEM); 5225 return ERR_PTR(-EINVAL); !! 4160 this.name = name; >> 4161 this.len = strlen(name); >> 4162 this.hash = 0; /* will go */ >> 4163 sb = shm_mnt->mnt_sb; >> 4164 path.mnt = mntget(shm_mnt); >> 4165 path.dentry = d_alloc_pseudo(sb, &this); >> 4166 if (!path.dentry) >> 4167 goto put_memory; >> 4168 d_set_d_op(path.dentry, &anon_ops); >> 4169 >> 4170 res = ERR_PTR(-ENOSPC); >> 4171 inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); >> 4172 if (!inode) >> 4173 goto put_memory; 5226 4174 5227 inode = shmem_get_inode(&nop_mnt_idma << 5228 S_IFREG | S_I << 5229 if (IS_ERR(inode)) { << 5230 shmem_unacct_size(flags, size << 5231 return ERR_CAST(inode); << 5232 } << 5233 inode->i_flags |= i_flags; 4175 inode->i_flags |= i_flags; >> 4176 d_instantiate(path.dentry, inode); 5234 inode->i_size = size; 4177 inode->i_size = size; 5235 clear_nlink(inode); /* It is unli 4178 clear_nlink(inode); /* It is unlinked */ 5236 res = ERR_PTR(ramfs_nommu_expand_for_ 4179 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 5237 if (!IS_ERR(res)) << 5238 res = alloc_file_pseudo(inode << 5239 &shmem_file_o << 5240 if (IS_ERR(res)) 4180 if (IS_ERR(res)) 5241 iput(inode); !! 4181 goto put_path; >> 4182 >> 4183 res = alloc_file(&path, FMODE_WRITE | FMODE_READ, >> 4184 &shmem_file_operations); >> 4185 if (IS_ERR(res)) >> 4186 goto put_path; >> 4187 >> 4188 return res; >> 4189 >> 4190 put_memory: >> 4191 shmem_unacct_size(flags, size); >> 4192 put_path: >> 4193 path_put(&path); 5242 return res; 4194 return res; 5243 } 4195 } 5244 4196 5245 /** 4197 /** 5246 * shmem_kernel_file_setup - get an unlinked 4198 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 5247 * kernel internal. There will be NO LS 4199 * kernel internal. There will be NO LSM permission checks against the 5248 * underlying inode. So users of this i 4200 * underlying inode. So users of this interface must do LSM checks at a 5249 * higher layer. The users are the big_ 4201 * higher layer. The users are the big_key and shm implementations. LSM 5250 * checks are provided at the key or shm 4202 * checks are provided at the key or shm level rather than the inode. 5251 * @name: name for dentry (to be seen in /pro 4203 * @name: name for dentry (to be seen in /proc/<pid>/maps 5252 * @size: size to be set for the file 4204 * @size: size to be set for the file 5253 * @flags: VM_NORESERVE suppresses pre-accoun 4205 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 5254 */ 4206 */ 5255 struct file *shmem_kernel_file_setup(const ch 4207 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 5256 { 4208 { 5257 return __shmem_file_setup(shm_mnt, na !! 4209 return __shmem_file_setup(name, size, flags, S_PRIVATE); 5258 } 4210 } 5259 EXPORT_SYMBOL_GPL(shmem_kernel_file_setup); << 5260 4211 5261 /** 4212 /** 5262 * shmem_file_setup - get an unlinked file li 4213 * shmem_file_setup - get an unlinked file living in tmpfs 5263 * @name: name for dentry (to be seen in /pro 4214 * @name: name for dentry (to be seen in /proc/<pid>/maps 5264 * @size: size to be set for the file 4215 * @size: size to be set for the file 5265 * @flags: VM_NORESERVE suppresses pre-accoun 4216 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 5266 */ 4217 */ 5267 struct file *shmem_file_setup(const char *nam 4218 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 5268 { 4219 { 5269 return __shmem_file_setup(shm_mnt, na !! 4220 return __shmem_file_setup(name, size, flags, 0); 5270 } 4221 } 5271 EXPORT_SYMBOL_GPL(shmem_file_setup); 4222 EXPORT_SYMBOL_GPL(shmem_file_setup); 5272 4223 5273 /** 4224 /** 5274 * shmem_file_setup_with_mnt - get an unlinke << 5275 * @mnt: the tmpfs mount where the file will << 5276 * @name: name for dentry (to be seen in /pro << 5277 * @size: size to be set for the file << 5278 * @flags: VM_NORESERVE suppresses pre-accoun << 5279 */ << 5280 struct file *shmem_file_setup_with_mnt(struct << 5281 loff_t << 5282 { << 5283 return __shmem_file_setup(mnt, name, << 5284 } << 5285 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); << 5286 << 5287 /** << 5288 * shmem_zero_setup - setup a shared anonymou 4225 * shmem_zero_setup - setup a shared anonymous mapping 5289 * @vma: the vma to be mmapped is prepared by !! 4226 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 5290 */ 4227 */ 5291 int shmem_zero_setup(struct vm_area_struct *v 4228 int shmem_zero_setup(struct vm_area_struct *vma) 5292 { 4229 { 5293 struct file *file; 4230 struct file *file; 5294 loff_t size = vma->vm_end - vma->vm_s 4231 loff_t size = vma->vm_end - vma->vm_start; 5295 4232 5296 /* 4233 /* 5297 * Cloning a new file under mmap_lock !! 4234 * Cloning a new file under mmap_sem leads to a lock ordering conflict 5298 * between XFS directory reading and 4235 * between XFS directory reading and selinux: since this file is only 5299 * accessible to the user through its 4236 * accessible to the user through its mapping, use S_PRIVATE flag to 5300 * bypass file security, in the same 4237 * bypass file security, in the same way as shmem_kernel_file_setup(). 5301 */ 4238 */ 5302 file = shmem_kernel_file_setup("dev/z !! 4239 file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE); 5303 if (IS_ERR(file)) 4240 if (IS_ERR(file)) 5304 return PTR_ERR(file); 4241 return PTR_ERR(file); 5305 4242 5306 if (vma->vm_file) 4243 if (vma->vm_file) 5307 fput(vma->vm_file); 4244 fput(vma->vm_file); 5308 vma->vm_file = file; 4245 vma->vm_file = file; 5309 vma->vm_ops = &shmem_anon_vm_ops; !! 4246 vma->vm_ops = &shmem_vm_ops; >> 4247 >> 4248 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && >> 4249 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < >> 4250 (vma->vm_end & HPAGE_PMD_MASK)) { >> 4251 khugepaged_enter(vma, vma->vm_flags); >> 4252 } 5310 4253 5311 return 0; 4254 return 0; 5312 } 4255 } 5313 4256 5314 /** 4257 /** 5315 * shmem_read_folio_gfp - read into page cach !! 4258 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 5316 * @mapping: the folio's address_space !! 4259 * @mapping: the page's address_space 5317 * @index: the folio index !! 4260 * @index: the page index 5318 * @gfp: the page allocator flags to u 4261 * @gfp: the page allocator flags to use if allocating 5319 * 4262 * 5320 * This behaves as a tmpfs "read_cache_page_g 4263 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 5321 * with any new page allocations done using t 4264 * with any new page allocations done using the specified allocation flags. 5322 * But read_cache_page_gfp() uses the ->read_ !! 4265 * But read_cache_page_gfp() uses the ->readpage() method: which does not 5323 * suit tmpfs, since it may have pages in swa 4266 * suit tmpfs, since it may have pages in swapcache, and needs to find those 5324 * for itself; although drivers/gpu/drm i915 4267 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 5325 * 4268 * 5326 * i915_gem_object_get_pages_gtt() mixes __GF 4269 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 5327 * with the mapping_gfp_mask(), to avoid OOMi 4270 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 5328 */ 4271 */ 5329 struct folio *shmem_read_folio_gfp(struct add !! 4272 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 5330 pgoff_t index, gfp_t gfp) !! 4273 pgoff_t index, gfp_t gfp) 5331 { 4274 { 5332 #ifdef CONFIG_SHMEM 4275 #ifdef CONFIG_SHMEM 5333 struct inode *inode = mapping->host; 4276 struct inode *inode = mapping->host; 5334 struct folio *folio; !! 4277 struct page *page; 5335 int error; 4278 int error; 5336 4279 5337 error = shmem_get_folio_gfp(inode, in !! 4280 BUG_ON(mapping->a_ops != &shmem_aops); 5338 gfp, NULL !! 4281 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, >> 4282 gfp, NULL, NULL, NULL); 5339 if (error) 4283 if (error) 5340 return ERR_PTR(error); !! 4284 page = ERR_PTR(error); 5341 !! 4285 else 5342 folio_unlock(folio); !! 4286 unlock_page(page); 5343 return folio; !! 4287 return page; 5344 #else 4288 #else 5345 /* 4289 /* 5346 * The tiny !SHMEM case uses ramfs wi 4290 * The tiny !SHMEM case uses ramfs without swap 5347 */ 4291 */ 5348 return mapping_read_folio_gfp(mapping !! 4292 return read_cache_page_gfp(mapping, index, gfp); 5349 #endif 4293 #endif 5350 } << 5351 EXPORT_SYMBOL_GPL(shmem_read_folio_gfp); << 5352 << 5353 struct page *shmem_read_mapping_page_gfp(stru << 5354 pgof << 5355 { << 5356 struct folio *folio = shmem_read_foli << 5357 struct page *page; << 5358 << 5359 if (IS_ERR(folio)) << 5360 return &folio->page; << 5361 << 5362 page = folio_file_page(folio, index); << 5363 if (PageHWPoison(page)) { << 5364 folio_put(folio); << 5365 return ERR_PTR(-EIO); << 5366 } << 5367 << 5368 return page; << 5369 } 4294 } 5370 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp 4295 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 5371 4296
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.