1 /* 1 /* 2 * Resizable virtual memory filesystem for Lin 2 * Resizable virtual memory filesystem for Linux. 3 * 3 * 4 * Copyright (C) 2000 Linus Torvalds. 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Co 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 13 * 14 * Extended attribute support for tmpfs: 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Lei 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Mor 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 17 * 18 * tiny-shmem: 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@ 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 20 * 21 * This file is released under the GPL. 21 * This file is released under the GPL. 22 */ 22 */ 23 23 24 #include <linux/fs.h> 24 #include <linux/fs.h> 25 #include <linux/init.h> 25 #include <linux/init.h> 26 #include <linux/vfs.h> 26 #include <linux/vfs.h> 27 #include <linux/mount.h> 27 #include <linux/mount.h> 28 #include <linux/ramfs.h> 28 #include <linux/ramfs.h> 29 #include <linux/pagemap.h> 29 #include <linux/pagemap.h> 30 #include <linux/file.h> 30 #include <linux/file.h> 31 #include <linux/fileattr.h> << 32 #include <linux/mm.h> 31 #include <linux/mm.h> 33 #include <linux/random.h> 32 #include <linux/random.h> 34 #include <linux/sched/signal.h> 33 #include <linux/sched/signal.h> 35 #include <linux/export.h> 34 #include <linux/export.h> 36 #include <linux/shmem_fs.h> << 37 #include <linux/swap.h> 35 #include <linux/swap.h> 38 #include <linux/uio.h> 36 #include <linux/uio.h> >> 37 #include <linux/khugepaged.h> 39 #include <linux/hugetlb.h> 38 #include <linux/hugetlb.h> >> 39 #include <linux/frontswap.h> 40 #include <linux/fs_parser.h> 40 #include <linux/fs_parser.h> 41 #include <linux/swapfile.h> << 42 #include <linux/iversion.h> << 43 #include "swap.h" << 44 41 45 static struct vfsmount *shm_mnt __ro_after_ini !! 42 #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */ >> 43 >> 44 static struct vfsmount *shm_mnt; 46 45 47 #ifdef CONFIG_SHMEM 46 #ifdef CONFIG_SHMEM 48 /* 47 /* 49 * This virtual memory filesystem is heavily b 48 * This virtual memory filesystem is heavily based on the ramfs. It 50 * extends ramfs by the ability to use swap an 49 * extends ramfs by the ability to use swap and honor resource limits 51 * which makes it a completely usable filesyst 50 * which makes it a completely usable filesystem. 52 */ 51 */ 53 52 54 #include <linux/xattr.h> 53 #include <linux/xattr.h> 55 #include <linux/exportfs.h> 54 #include <linux/exportfs.h> 56 #include <linux/posix_acl.h> 55 #include <linux/posix_acl.h> 57 #include <linux/posix_acl_xattr.h> 56 #include <linux/posix_acl_xattr.h> 58 #include <linux/mman.h> 57 #include <linux/mman.h> 59 #include <linux/string.h> 58 #include <linux/string.h> 60 #include <linux/slab.h> 59 #include <linux/slab.h> 61 #include <linux/backing-dev.h> 60 #include <linux/backing-dev.h> >> 61 #include <linux/shmem_fs.h> 62 #include <linux/writeback.h> 62 #include <linux/writeback.h> >> 63 #include <linux/blkdev.h> 63 #include <linux/pagevec.h> 64 #include <linux/pagevec.h> 64 #include <linux/percpu_counter.h> 65 #include <linux/percpu_counter.h> 65 #include <linux/falloc.h> 66 #include <linux/falloc.h> 66 #include <linux/splice.h> 67 #include <linux/splice.h> 67 #include <linux/security.h> 68 #include <linux/security.h> 68 #include <linux/swapops.h> 69 #include <linux/swapops.h> 69 #include <linux/mempolicy.h> 70 #include <linux/mempolicy.h> 70 #include <linux/namei.h> 71 #include <linux/namei.h> 71 #include <linux/ctype.h> 72 #include <linux/ctype.h> 72 #include <linux/migrate.h> 73 #include <linux/migrate.h> 73 #include <linux/highmem.h> 74 #include <linux/highmem.h> 74 #include <linux/seq_file.h> 75 #include <linux/seq_file.h> 75 #include <linux/magic.h> 76 #include <linux/magic.h> 76 #include <linux/syscalls.h> 77 #include <linux/syscalls.h> 77 #include <linux/fcntl.h> 78 #include <linux/fcntl.h> 78 #include <uapi/linux/memfd.h> 79 #include <uapi/linux/memfd.h> >> 80 #include <linux/userfaultfd_k.h> 79 #include <linux/rmap.h> 81 #include <linux/rmap.h> 80 #include <linux/uuid.h> 82 #include <linux/uuid.h> 81 #include <linux/quotaops.h> << 82 #include <linux/rcupdate_wait.h> << 83 83 84 #include <linux/uaccess.h> 84 #include <linux/uaccess.h> >> 85 #include <asm/pgtable.h> 85 86 86 #include "internal.h" 87 #include "internal.h" 87 88 88 #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 89 #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 89 #define VM_ACCT(size) (PAGE_ALIGN(size) >> 90 #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 90 91 91 /* Pretend that each entry is of this size in 92 /* Pretend that each entry is of this size in directory's i_size */ 92 #define BOGO_DIRENT_SIZE 20 93 #define BOGO_DIRENT_SIZE 20 93 94 94 /* Pretend that one inode + its dentry occupy << 95 #define BOGO_INODE_SIZE 1024 << 96 << 97 /* Symlink up to this size is kmalloc'ed inste 95 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 98 #define SHORT_SYMLINK_LEN 128 96 #define SHORT_SYMLINK_LEN 128 99 97 100 /* 98 /* 101 * shmem_fallocate communicates with shmem_fau 99 * shmem_fallocate communicates with shmem_fault or shmem_writepage via 102 * inode->i_private (with i_rwsem making sure !! 100 * inode->i_private (with i_mutex making sure that it has only one user at 103 * a time): we would prefer not to enlarge the 101 * a time): we would prefer not to enlarge the shmem inode just for that. 104 */ 102 */ 105 struct shmem_falloc { 103 struct shmem_falloc { 106 wait_queue_head_t *waitq; /* faults in 104 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 107 pgoff_t start; /* start of ra 105 pgoff_t start; /* start of range currently being fallocated */ 108 pgoff_t next; /* the next pa 106 pgoff_t next; /* the next page offset to be fallocated */ 109 pgoff_t nr_falloced; /* how many ne 107 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 110 pgoff_t nr_unswapped; /* how often w 108 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 111 }; 109 }; 112 110 113 struct shmem_options { 111 struct shmem_options { 114 unsigned long long blocks; 112 unsigned long long blocks; 115 unsigned long long inodes; 113 unsigned long long inodes; 116 struct mempolicy *mpol; 114 struct mempolicy *mpol; 117 kuid_t uid; 115 kuid_t uid; 118 kgid_t gid; 116 kgid_t gid; 119 umode_t mode; 117 umode_t mode; 120 bool full_inums; << 121 int huge; 118 int huge; 122 int seen; 119 int seen; 123 bool noswap; << 124 unsigned short quota_types; << 125 struct shmem_quota_limits qlimits; << 126 #define SHMEM_SEEN_BLOCKS 1 120 #define SHMEM_SEEN_BLOCKS 1 127 #define SHMEM_SEEN_INODES 2 121 #define SHMEM_SEEN_INODES 2 128 #define SHMEM_SEEN_HUGE 4 122 #define SHMEM_SEEN_HUGE 4 129 #define SHMEM_SEEN_INUMS 8 << 130 #define SHMEM_SEEN_NOSWAP 16 << 131 #define SHMEM_SEEN_QUOTA 32 << 132 }; 123 }; 133 124 134 #ifdef CONFIG_TRANSPARENT_HUGEPAGE << 135 static unsigned long huge_shmem_orders_always << 136 static unsigned long huge_shmem_orders_madvise << 137 static unsigned long huge_shmem_orders_inherit << 138 static unsigned long huge_shmem_orders_within_ << 139 #endif << 140 << 141 #ifdef CONFIG_TMPFS 125 #ifdef CONFIG_TMPFS 142 static unsigned long shmem_default_max_blocks( 126 static unsigned long shmem_default_max_blocks(void) 143 { 127 { 144 return totalram_pages() / 2; 128 return totalram_pages() / 2; 145 } 129 } 146 130 147 static unsigned long shmem_default_max_inodes( 131 static unsigned long shmem_default_max_inodes(void) 148 { 132 { 149 unsigned long nr_pages = totalram_page 133 unsigned long nr_pages = totalram_pages(); 150 134 151 return min3(nr_pages - totalhigh_pages !! 135 return min(nr_pages - totalhigh_pages(), nr_pages / 2); 152 ULONG_MAX / BOGO_INODE << 153 } 136 } 154 #endif 137 #endif 155 138 156 static int shmem_swapin_folio(struct inode *in !! 139 static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 157 struct folio **foliop, !! 140 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 158 struct vm_area_struct !! 141 struct shmem_inode_info *info, pgoff_t index); >> 142 static int shmem_swapin_page(struct inode *inode, pgoff_t index, >> 143 struct page **pagep, enum sgp_type sgp, >> 144 gfp_t gfp, struct vm_area_struct *vma, >> 145 vm_fault_t *fault_type); >> 146 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, >> 147 struct page **pagep, enum sgp_type sgp, >> 148 gfp_t gfp, struct vm_area_struct *vma, >> 149 struct vm_fault *vmf, vm_fault_t *fault_type); >> 150 >> 151 int shmem_getpage(struct inode *inode, pgoff_t index, >> 152 struct page **pagep, enum sgp_type sgp) >> 153 { >> 154 return shmem_getpage_gfp(inode, index, pagep, sgp, >> 155 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); >> 156 } 159 157 160 static inline struct shmem_sb_info *SHMEM_SB(s 158 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 161 { 159 { 162 return sb->s_fs_info; 160 return sb->s_fs_info; 163 } 161 } 164 162 165 /* 163 /* 166 * shmem_file_setup pre-accounts the whole fix 164 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 167 * for shared memory and for shared anonymous 165 * for shared memory and for shared anonymous (/dev/zero) mappings 168 * (unless MAP_NORESERVE and sysctl_overcommit 166 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 169 * consistent with the pre-accounting of priva 167 * consistent with the pre-accounting of private mappings ... 170 */ 168 */ 171 static inline int shmem_acct_size(unsigned lon 169 static inline int shmem_acct_size(unsigned long flags, loff_t size) 172 { 170 { 173 return (flags & VM_NORESERVE) ? 171 return (flags & VM_NORESERVE) ? 174 0 : security_vm_enough_memory_ 172 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 175 } 173 } 176 174 177 static inline void shmem_unacct_size(unsigned 175 static inline void shmem_unacct_size(unsigned long flags, loff_t size) 178 { 176 { 179 if (!(flags & VM_NORESERVE)) 177 if (!(flags & VM_NORESERVE)) 180 vm_unacct_memory(VM_ACCT(size) 178 vm_unacct_memory(VM_ACCT(size)); 181 } 179 } 182 180 183 static inline int shmem_reacct_size(unsigned l 181 static inline int shmem_reacct_size(unsigned long flags, 184 loff_t oldsize, loff_t newsize 182 loff_t oldsize, loff_t newsize) 185 { 183 { 186 if (!(flags & VM_NORESERVE)) { 184 if (!(flags & VM_NORESERVE)) { 187 if (VM_ACCT(newsize) > VM_ACCT 185 if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 188 return security_vm_eno 186 return security_vm_enough_memory_mm(current->mm, 189 VM_ACC 187 VM_ACCT(newsize) - VM_ACCT(oldsize)); 190 else if (VM_ACCT(newsize) < VM 188 else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 191 vm_unacct_memory(VM_AC 189 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 192 } 190 } 193 return 0; 191 return 0; 194 } 192 } 195 193 196 /* 194 /* 197 * ... whereas tmpfs objects are accounted inc 195 * ... whereas tmpfs objects are accounted incrementally as 198 * pages are allocated, in order to allow larg 196 * pages are allocated, in order to allow large sparse files. 199 * shmem_get_folio reports shmem_acct_blocks f !! 197 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 200 * so that a failure on a sparse tmpfs mapping 198 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 201 */ 199 */ 202 static inline int shmem_acct_blocks(unsigned l !! 200 static inline int shmem_acct_block(unsigned long flags, long pages) 203 { 201 { 204 if (!(flags & VM_NORESERVE)) 202 if (!(flags & VM_NORESERVE)) 205 return 0; 203 return 0; 206 204 207 return security_vm_enough_memory_mm(cu 205 return security_vm_enough_memory_mm(current->mm, 208 pages * VM_ACCT(PAGE_S 206 pages * VM_ACCT(PAGE_SIZE)); 209 } 207 } 210 208 211 static inline void shmem_unacct_blocks(unsigne 209 static inline void shmem_unacct_blocks(unsigned long flags, long pages) 212 { 210 { 213 if (flags & VM_NORESERVE) 211 if (flags & VM_NORESERVE) 214 vm_unacct_memory(pages * VM_AC 212 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 215 } 213 } 216 214 217 static int shmem_inode_acct_blocks(struct inod !! 215 static inline bool shmem_inode_acct_block(struct inode *inode, long pages) 218 { 216 { 219 struct shmem_inode_info *info = SHMEM_ 217 struct shmem_inode_info *info = SHMEM_I(inode); 220 struct shmem_sb_info *sbinfo = SHMEM_S 218 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 221 int err = -ENOSPC; << 222 219 223 if (shmem_acct_blocks(info->flags, pag !! 220 if (shmem_acct_block(info->flags, pages)) 224 return err; !! 221 return false; 225 222 226 might_sleep(); /* when quotas */ << 227 if (sbinfo->max_blocks) { 223 if (sbinfo->max_blocks) { 228 if (!percpu_counter_limited_ad !! 224 if (percpu_counter_compare(&sbinfo->used_blocks, 229 !! 225 sbinfo->max_blocks - pages) > 0) 230 goto unacct; << 231 << 232 err = dquot_alloc_block_nodirt << 233 if (err) { << 234 percpu_counter_sub(&sb << 235 goto unacct; << 236 } << 237 } else { << 238 err = dquot_alloc_block_nodirt << 239 if (err) << 240 goto unacct; 226 goto unacct; >> 227 percpu_counter_add(&sbinfo->used_blocks, pages); 241 } 228 } 242 229 243 return 0; !! 230 return true; 244 231 245 unacct: 232 unacct: 246 shmem_unacct_blocks(info->flags, pages 233 shmem_unacct_blocks(info->flags, pages); 247 return err; !! 234 return false; 248 } 235 } 249 236 250 static void shmem_inode_unacct_blocks(struct i !! 237 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) 251 { 238 { 252 struct shmem_inode_info *info = SHMEM_ 239 struct shmem_inode_info *info = SHMEM_I(inode); 253 struct shmem_sb_info *sbinfo = SHMEM_S 240 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 254 241 255 might_sleep(); /* when quotas */ << 256 dquot_free_block_nodirty(inode, pages) << 257 << 258 if (sbinfo->max_blocks) 242 if (sbinfo->max_blocks) 259 percpu_counter_sub(&sbinfo->us 243 percpu_counter_sub(&sbinfo->used_blocks, pages); 260 shmem_unacct_blocks(info->flags, pages 244 shmem_unacct_blocks(info->flags, pages); 261 } 245 } 262 246 263 static const struct super_operations shmem_ops 247 static const struct super_operations shmem_ops; 264 static const struct address_space_operations s 248 static const struct address_space_operations shmem_aops; 265 static const struct file_operations shmem_file 249 static const struct file_operations shmem_file_operations; 266 static const struct inode_operations shmem_ino 250 static const struct inode_operations shmem_inode_operations; 267 static const struct inode_operations shmem_dir 251 static const struct inode_operations shmem_dir_inode_operations; 268 static const struct inode_operations shmem_spe 252 static const struct inode_operations shmem_special_inode_operations; 269 static const struct vm_operations_struct shmem 253 static const struct vm_operations_struct shmem_vm_ops; 270 static const struct vm_operations_struct shmem << 271 static struct file_system_type shmem_fs_type; 254 static struct file_system_type shmem_fs_type; 272 255 273 bool shmem_mapping(struct address_space *mappi << 274 { << 275 return mapping->a_ops == &shmem_aops; << 276 } << 277 EXPORT_SYMBOL_GPL(shmem_mapping); << 278 << 279 bool vma_is_anon_shmem(struct vm_area_struct * << 280 { << 281 return vma->vm_ops == &shmem_anon_vm_o << 282 } << 283 << 284 bool vma_is_shmem(struct vm_area_struct *vma) 256 bool vma_is_shmem(struct vm_area_struct *vma) 285 { 257 { 286 return vma_is_anon_shmem(vma) || vma-> !! 258 return vma->vm_ops == &shmem_vm_ops; 287 } 259 } 288 260 289 static LIST_HEAD(shmem_swaplist); 261 static LIST_HEAD(shmem_swaplist); 290 static DEFINE_MUTEX(shmem_swaplist_mutex); 262 static DEFINE_MUTEX(shmem_swaplist_mutex); 291 263 292 #ifdef CONFIG_TMPFS_QUOTA !! 264 static int shmem_reserve_inode(struct super_block *sb) 293 << 294 static int shmem_enable_quotas(struct super_bl << 295 unsigned short << 296 { << 297 int type, err = 0; << 298 << 299 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS << 300 for (type = 0; type < SHMEM_MAXQUOTAS; << 301 if (!(quota_types & (1 << type << 302 continue; << 303 err = dquot_load_quota_sb(sb, << 304 DQUO << 305 DQUO << 306 if (err) << 307 goto out_err; << 308 } << 309 return 0; << 310 << 311 out_err: << 312 pr_warn("tmpfs: failed to enable quota << 313 type, err); << 314 for (type--; type >= 0; type--) << 315 dquot_quota_off(sb, type); << 316 return err; << 317 } << 318 << 319 static void shmem_disable_quotas(struct super_ << 320 { << 321 int type; << 322 << 323 for (type = 0; type < SHMEM_MAXQUOTAS; << 324 dquot_quota_off(sb, type); << 325 } << 326 << 327 static struct dquot __rcu **shmem_get_dquots(s << 328 { << 329 return SHMEM_I(inode)->i_dquot; << 330 } << 331 #endif /* CONFIG_TMPFS_QUOTA */ << 332 << 333 /* << 334 * shmem_reserve_inode() performs bookkeeping << 335 * produces a novel ino for the newly allocate << 336 * << 337 * It may also be called when making a hard li << 338 * each dentry. However, in that case, no new << 339 * internally draws from another pool of inode << 340 * get_next_ino()). This case is indicated by << 341 */ << 342 #define SHMEM_INO_BATCH 1024 << 343 static int shmem_reserve_inode(struct super_bl << 344 { 265 { 345 struct shmem_sb_info *sbinfo = SHMEM_S 266 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 346 ino_t ino; !! 267 if (sbinfo->max_inodes) { 347 !! 268 spin_lock(&sbinfo->stat_lock); 348 if (!(sb->s_flags & SB_KERNMOUNT)) { !! 269 if (!sbinfo->free_inodes) { 349 raw_spin_lock(&sbinfo->stat_lo !! 270 spin_unlock(&sbinfo->stat_lock); 350 if (sbinfo->max_inodes) { !! 271 return -ENOSPC; 351 if (sbinfo->free_ispac << 352 raw_spin_unloc << 353 return -ENOSPC << 354 } << 355 sbinfo->free_ispace -= << 356 } << 357 if (inop) { << 358 ino = sbinfo->next_ino << 359 if (unlikely(is_zero_i << 360 ino = sbinfo-> << 361 if (unlikely(!sbinfo-> << 362 ino > UIN << 363 /* << 364 * Emulate get << 365 * compatibili << 366 */ << 367 if (IS_ENABLED << 368 pr_war << 369 << 370 sbinfo->next_i << 371 ino = sbinfo-> << 372 } << 373 *inop = ino; << 374 } 272 } 375 raw_spin_unlock(&sbinfo->stat_ !! 273 sbinfo->free_inodes--; 376 } else if (inop) { !! 274 spin_unlock(&sbinfo->stat_lock); 377 /* << 378 * __shmem_file_setup, one of << 379 * doesn't hold stat_lock in s << 380 * max_inodes is always 0, and << 381 * unknown contexts. As such, << 382 * which doesn't require the p << 383 * the batch boundary. << 384 * << 385 * We don't need to worry abou << 386 * shmem mounts are not expose << 387 * to worry about things like << 388 */ << 389 ino_t *next_ino; << 390 << 391 next_ino = per_cpu_ptr(sbinfo- << 392 ino = *next_ino; << 393 if (unlikely(ino % SHMEM_INO_B << 394 raw_spin_lock(&sbinfo- << 395 ino = sbinfo->next_ino << 396 sbinfo->next_ino += SH << 397 raw_spin_unlock(&sbinf << 398 if (unlikely(is_zero_i << 399 ino++; << 400 } << 401 *inop = ino; << 402 *next_ino = ++ino; << 403 put_cpu(); << 404 } 275 } 405 << 406 return 0; 276 return 0; 407 } 277 } 408 278 409 static void shmem_free_inode(struct super_bloc !! 279 static void shmem_free_inode(struct super_block *sb) 410 { 280 { 411 struct shmem_sb_info *sbinfo = SHMEM_S 281 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 412 if (sbinfo->max_inodes) { 282 if (sbinfo->max_inodes) { 413 raw_spin_lock(&sbinfo->stat_lo !! 283 spin_lock(&sbinfo->stat_lock); 414 sbinfo->free_ispace += BOGO_IN !! 284 sbinfo->free_inodes++; 415 raw_spin_unlock(&sbinfo->stat_ !! 285 spin_unlock(&sbinfo->stat_lock); 416 } 286 } 417 } 287 } 418 288 419 /** 289 /** 420 * shmem_recalc_inode - recalculate the block 290 * shmem_recalc_inode - recalculate the block usage of an inode 421 * @inode: inode to recalc 291 * @inode: inode to recalc 422 * @alloced: the change in number of pages all << 423 * @swapped: the change in number of pages swa << 424 * 292 * 425 * We have to calculate the free blocks since 293 * We have to calculate the free blocks since the mm can drop 426 * undirtied hole pages behind our back. 294 * undirtied hole pages behind our back. 427 * 295 * 428 * But normally info->alloced == inode->i_ma 296 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 429 * So mm freed is info->alloced - (inode->i_ma 297 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) >> 298 * >> 299 * It has to be called with the spinlock held. 430 */ 300 */ 431 static void shmem_recalc_inode(struct inode *i !! 301 static void shmem_recalc_inode(struct inode *inode) 432 { 302 { 433 struct shmem_inode_info *info = SHMEM_ 303 struct shmem_inode_info *info = SHMEM_I(inode); 434 long freed; 304 long freed; 435 305 436 spin_lock(&info->lock); !! 306 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 437 info->alloced += alloced; !! 307 if (freed > 0) { 438 info->swapped += swapped; << 439 freed = info->alloced - info->swapped << 440 READ_ONCE(inode->i_mapping->nr << 441 /* << 442 * Special case: whereas normally shme << 443 * after i_mapping->nrpages has alread << 444 * shmem_writepage() has to raise swap << 445 * to stop a racing shmem_recalc_inode << 446 * been freed. Compensate here, to av << 447 */ << 448 if (swapped > 0) << 449 freed += swapped; << 450 if (freed > 0) << 451 info->alloced -= freed; 308 info->alloced -= freed; 452 spin_unlock(&info->lock); !! 309 inode->i_blocks -= freed * BLOCKS_PER_PAGE; 453 << 454 /* The quota case may block */ << 455 if (freed > 0) << 456 shmem_inode_unacct_blocks(inod 310 shmem_inode_unacct_blocks(inode, freed); >> 311 } 457 } 312 } 458 313 459 bool shmem_charge(struct inode *inode, long pa 314 bool shmem_charge(struct inode *inode, long pages) 460 { 315 { 461 struct address_space *mapping = inode- !! 316 struct shmem_inode_info *info = SHMEM_I(inode); >> 317 unsigned long flags; 462 318 463 if (shmem_inode_acct_blocks(inode, pag !! 319 if (!shmem_inode_acct_block(inode, pages)) 464 return false; 320 return false; 465 321 466 /* nrpages adjustment first, then shme 322 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ 467 xa_lock_irq(&mapping->i_pages); !! 323 inode->i_mapping->nrpages += pages; 468 mapping->nrpages += pages; !! 324 469 xa_unlock_irq(&mapping->i_pages); !! 325 spin_lock_irqsave(&info->lock, flags); >> 326 info->alloced += pages; >> 327 inode->i_blocks += pages * BLOCKS_PER_PAGE; >> 328 shmem_recalc_inode(inode); >> 329 spin_unlock_irqrestore(&info->lock, flags); 470 330 471 shmem_recalc_inode(inode, pages, 0); << 472 return true; 331 return true; 473 } 332 } 474 333 475 void shmem_uncharge(struct inode *inode, long 334 void shmem_uncharge(struct inode *inode, long pages) 476 { 335 { 477 /* pages argument is currently unused: !! 336 struct shmem_inode_info *info = SHMEM_I(inode); 478 /* nrpages adjustment done by __filema !! 337 unsigned long flags; 479 338 480 shmem_recalc_inode(inode, 0, 0); !! 339 /* nrpages adjustment done by __delete_from_page_cache() or caller */ >> 340 >> 341 spin_lock_irqsave(&info->lock, flags); >> 342 info->alloced -= pages; >> 343 inode->i_blocks -= pages * BLOCKS_PER_PAGE; >> 344 shmem_recalc_inode(inode); >> 345 spin_unlock_irqrestore(&info->lock, flags); >> 346 >> 347 shmem_inode_unacct_blocks(inode, pages); 481 } 348 } 482 349 483 /* 350 /* 484 * Replace item expected in xarray by a new it 351 * Replace item expected in xarray by a new item, while holding xa_lock. 485 */ 352 */ 486 static int shmem_replace_entry(struct address_ 353 static int shmem_replace_entry(struct address_space *mapping, 487 pgoff_t index, void *e 354 pgoff_t index, void *expected, void *replacement) 488 { 355 { 489 XA_STATE(xas, &mapping->i_pages, index 356 XA_STATE(xas, &mapping->i_pages, index); 490 void *item; 357 void *item; 491 358 492 VM_BUG_ON(!expected); 359 VM_BUG_ON(!expected); 493 VM_BUG_ON(!replacement); 360 VM_BUG_ON(!replacement); 494 item = xas_load(&xas); 361 item = xas_load(&xas); 495 if (item != expected) 362 if (item != expected) 496 return -ENOENT; 363 return -ENOENT; 497 xas_store(&xas, replacement); 364 xas_store(&xas, replacement); 498 return 0; 365 return 0; 499 } 366 } 500 367 501 /* 368 /* 502 * Sometimes, before we decide whether to proc 369 * Sometimes, before we decide whether to proceed or to fail, we must check 503 * that an entry was not already brought back 370 * that an entry was not already brought back from swap by a racing thread. 504 * 371 * 505 * Checking folio is not enough: by the time a !! 372 * Checking page is not enough: by the time a SwapCache page is locked, it 506 * might be reused, and again be swapcache, us !! 373 * might be reused, and again be SwapCache, using the same swap as before. 507 */ 374 */ 508 static bool shmem_confirm_swap(struct address_ 375 static bool shmem_confirm_swap(struct address_space *mapping, 509 pgoff_t index, 376 pgoff_t index, swp_entry_t swap) 510 { 377 { 511 return xa_load(&mapping->i_pages, inde 378 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); 512 } 379 } 513 380 514 /* 381 /* 515 * Definitions for "huge tmpfs": tmpfs mounted 382 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 516 * 383 * 517 * SHMEM_HUGE_NEVER: 384 * SHMEM_HUGE_NEVER: 518 * disables huge pages for the mount; 385 * disables huge pages for the mount; 519 * SHMEM_HUGE_ALWAYS: 386 * SHMEM_HUGE_ALWAYS: 520 * enables huge pages for the mount; 387 * enables huge pages for the mount; 521 * SHMEM_HUGE_WITHIN_SIZE: 388 * SHMEM_HUGE_WITHIN_SIZE: 522 * only allocate huge pages if the page w 389 * only allocate huge pages if the page will be fully within i_size, 523 * also respect fadvise()/madvise() hints 390 * also respect fadvise()/madvise() hints; 524 * SHMEM_HUGE_ADVISE: 391 * SHMEM_HUGE_ADVISE: 525 * only allocate huge pages if requested 392 * only allocate huge pages if requested with fadvise()/madvise(); 526 */ 393 */ 527 394 528 #define SHMEM_HUGE_NEVER 0 395 #define SHMEM_HUGE_NEVER 0 529 #define SHMEM_HUGE_ALWAYS 1 396 #define SHMEM_HUGE_ALWAYS 1 530 #define SHMEM_HUGE_WITHIN_SIZE 2 397 #define SHMEM_HUGE_WITHIN_SIZE 2 531 #define SHMEM_HUGE_ADVISE 3 398 #define SHMEM_HUGE_ADVISE 3 532 399 533 /* 400 /* 534 * Special values. 401 * Special values. 535 * Only can be set via /sys/kernel/mm/transpar 402 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 536 * 403 * 537 * SHMEM_HUGE_DENY: 404 * SHMEM_HUGE_DENY: 538 * disables huge on shm_mnt and all mount 405 * disables huge on shm_mnt and all mounts, for emergency use; 539 * SHMEM_HUGE_FORCE: 406 * SHMEM_HUGE_FORCE: 540 * enables huge on shm_mnt and all mounts 407 * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 541 * 408 * 542 */ 409 */ 543 #define SHMEM_HUGE_DENY (-1) 410 #define SHMEM_HUGE_DENY (-1) 544 #define SHMEM_HUGE_FORCE (-2) 411 #define SHMEM_HUGE_FORCE (-2) 545 412 546 #ifdef CONFIG_TRANSPARENT_HUGEPAGE !! 413 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 547 /* ifdef here to avoid bloating shmem.o when n 414 /* ifdef here to avoid bloating shmem.o when not necessary */ 548 415 549 static int shmem_huge __read_mostly = SHMEM_HU !! 416 static int shmem_huge __read_mostly; 550 << 551 static bool __shmem_huge_global_enabled(struct << 552 loff_t << 553 struct << 554 unsign << 555 { << 556 struct mm_struct *mm = vma ? vma->vm_m << 557 loff_t i_size; << 558 << 559 if (!S_ISREG(inode->i_mode)) << 560 return false; << 561 if (mm && ((vm_flags & VM_NOHUGEPAGE) << 562 return false; << 563 if (shmem_huge == SHMEM_HUGE_DENY) << 564 return false; << 565 if (shmem_huge_force || shmem_huge == << 566 return true; << 567 << 568 switch (SHMEM_SB(inode->i_sb)->huge) { << 569 case SHMEM_HUGE_ALWAYS: << 570 return true; << 571 case SHMEM_HUGE_WITHIN_SIZE: << 572 index = round_up(index + 1, HP << 573 i_size = max(write_end, i_size << 574 i_size = round_up(i_size, PAGE << 575 if (i_size >> PAGE_SHIFT >= in << 576 return true; << 577 fallthrough; << 578 case SHMEM_HUGE_ADVISE: << 579 if (mm && (vm_flags & VM_HUGEP << 580 return true; << 581 fallthrough; << 582 default: << 583 return false; << 584 } << 585 } << 586 << 587 static bool shmem_huge_global_enabled(struct i << 588 loff_t write_end, bool shme << 589 struct vm_area_struct *vma, << 590 { << 591 if (HPAGE_PMD_ORDER > MAX_PAGECACHE_OR << 592 return false; << 593 << 594 return __shmem_huge_global_enabled(ino << 595 shm << 596 } << 597 417 598 #if defined(CONFIG_SYSFS) 418 #if defined(CONFIG_SYSFS) 599 static int shmem_parse_huge(const char *str) 419 static int shmem_parse_huge(const char *str) 600 { 420 { 601 if (!strcmp(str, "never")) 421 if (!strcmp(str, "never")) 602 return SHMEM_HUGE_NEVER; 422 return SHMEM_HUGE_NEVER; 603 if (!strcmp(str, "always")) 423 if (!strcmp(str, "always")) 604 return SHMEM_HUGE_ALWAYS; 424 return SHMEM_HUGE_ALWAYS; 605 if (!strcmp(str, "within_size")) 425 if (!strcmp(str, "within_size")) 606 return SHMEM_HUGE_WITHIN_SIZE; 426 return SHMEM_HUGE_WITHIN_SIZE; 607 if (!strcmp(str, "advise")) 427 if (!strcmp(str, "advise")) 608 return SHMEM_HUGE_ADVISE; 428 return SHMEM_HUGE_ADVISE; 609 if (!strcmp(str, "deny")) 429 if (!strcmp(str, "deny")) 610 return SHMEM_HUGE_DENY; 430 return SHMEM_HUGE_DENY; 611 if (!strcmp(str, "force")) 431 if (!strcmp(str, "force")) 612 return SHMEM_HUGE_FORCE; 432 return SHMEM_HUGE_FORCE; 613 return -EINVAL; 433 return -EINVAL; 614 } 434 } 615 #endif 435 #endif 616 436 617 #if defined(CONFIG_SYSFS) || defined(CONFIG_TM 437 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 618 static const char *shmem_format_huge(int huge) 438 static const char *shmem_format_huge(int huge) 619 { 439 { 620 switch (huge) { 440 switch (huge) { 621 case SHMEM_HUGE_NEVER: 441 case SHMEM_HUGE_NEVER: 622 return "never"; 442 return "never"; 623 case SHMEM_HUGE_ALWAYS: 443 case SHMEM_HUGE_ALWAYS: 624 return "always"; 444 return "always"; 625 case SHMEM_HUGE_WITHIN_SIZE: 445 case SHMEM_HUGE_WITHIN_SIZE: 626 return "within_size"; 446 return "within_size"; 627 case SHMEM_HUGE_ADVISE: 447 case SHMEM_HUGE_ADVISE: 628 return "advise"; 448 return "advise"; 629 case SHMEM_HUGE_DENY: 449 case SHMEM_HUGE_DENY: 630 return "deny"; 450 return "deny"; 631 case SHMEM_HUGE_FORCE: 451 case SHMEM_HUGE_FORCE: 632 return "force"; 452 return "force"; 633 default: 453 default: 634 VM_BUG_ON(1); 454 VM_BUG_ON(1); 635 return "bad_val"; 455 return "bad_val"; 636 } 456 } 637 } 457 } 638 #endif 458 #endif 639 459 640 static unsigned long shmem_unused_huge_shrink( 460 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 641 struct shrink_control *sc, uns !! 461 struct shrink_control *sc, unsigned long nr_to_split) 642 { 462 { 643 LIST_HEAD(list), *pos, *next; 463 LIST_HEAD(list), *pos, *next; >> 464 LIST_HEAD(to_remove); 644 struct inode *inode; 465 struct inode *inode; 645 struct shmem_inode_info *info; 466 struct shmem_inode_info *info; 646 struct folio *folio; !! 467 struct page *page; 647 unsigned long batch = sc ? sc->nr_to_s 468 unsigned long batch = sc ? sc->nr_to_scan : 128; 648 unsigned long split = 0, freed = 0; !! 469 int split = 0; 649 470 650 if (list_empty(&sbinfo->shrinklist)) 471 if (list_empty(&sbinfo->shrinklist)) 651 return SHRINK_STOP; 472 return SHRINK_STOP; 652 473 653 spin_lock(&sbinfo->shrinklist_lock); 474 spin_lock(&sbinfo->shrinklist_lock); 654 list_for_each_safe(pos, next, &sbinfo- 475 list_for_each_safe(pos, next, &sbinfo->shrinklist) { 655 info = list_entry(pos, struct 476 info = list_entry(pos, struct shmem_inode_info, shrinklist); 656 477 657 /* pin the inode */ 478 /* pin the inode */ 658 inode = igrab(&info->vfs_inode 479 inode = igrab(&info->vfs_inode); 659 480 660 /* inode is about to be evicte 481 /* inode is about to be evicted */ 661 if (!inode) { 482 if (!inode) { 662 list_del_init(&info->s 483 list_del_init(&info->shrinklist); 663 goto next; 484 goto next; 664 } 485 } 665 486 >> 487 /* Check if there's anything to gain */ >> 488 if (round_up(inode->i_size, PAGE_SIZE) == >> 489 round_up(inode->i_size, HPAGE_PMD_SIZE)) { >> 490 list_move(&info->shrinklist, &to_remove); >> 491 goto next; >> 492 } >> 493 666 list_move(&info->shrinklist, & 494 list_move(&info->shrinklist, &list); 667 next: 495 next: 668 sbinfo->shrinklist_len--; 496 sbinfo->shrinklist_len--; 669 if (!--batch) 497 if (!--batch) 670 break; 498 break; 671 } 499 } 672 spin_unlock(&sbinfo->shrinklist_lock); 500 spin_unlock(&sbinfo->shrinklist_lock); 673 501 >> 502 list_for_each_safe(pos, next, &to_remove) { >> 503 info = list_entry(pos, struct shmem_inode_info, shrinklist); >> 504 inode = &info->vfs_inode; >> 505 list_del_init(&info->shrinklist); >> 506 iput(inode); >> 507 } >> 508 674 list_for_each_safe(pos, next, &list) { 509 list_for_each_safe(pos, next, &list) { 675 pgoff_t next, end; << 676 loff_t i_size; << 677 int ret; 510 int ret; 678 511 679 info = list_entry(pos, struct 512 info = list_entry(pos, struct shmem_inode_info, shrinklist); 680 inode = &info->vfs_inode; 513 inode = &info->vfs_inode; 681 514 682 if (nr_to_free && freed >= nr_ !! 515 if (nr_to_split && split >= nr_to_split) 683 goto move_back; 516 goto move_back; 684 517 685 i_size = i_size_read(inode); !! 518 page = find_get_page(inode->i_mapping, 686 folio = filemap_get_entry(inod !! 519 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); 687 if (!folio || xa_is_value(foli !! 520 if (!page) 688 goto drop; << 689 << 690 /* No large folio at the end o << 691 if (!folio_test_large(folio)) << 692 folio_put(folio); << 693 goto drop; 521 goto drop; 694 } << 695 522 696 /* Check if there is anything !! 523 /* No huge page at the end of the file: nothing to split */ 697 next = folio_next_index(folio) !! 524 if (!PageTransHuge(page)) { 698 end = shmem_fallocend(inode, D !! 525 put_page(page); 699 if (end <= folio->index || end << 700 folio_put(folio); << 701 goto drop; 526 goto drop; 702 } 527 } 703 528 704 /* 529 /* 705 * Move the inode on the list 530 * Move the inode on the list back to shrinklist if we failed 706 * to lock the page at this ti 531 * to lock the page at this time. 707 * 532 * 708 * Waiting for the lock may le 533 * Waiting for the lock may lead to deadlock in the 709 * reclaim path. 534 * reclaim path. 710 */ 535 */ 711 if (!folio_trylock(folio)) { !! 536 if (!trylock_page(page)) { 712 folio_put(folio); !! 537 put_page(page); 713 goto move_back; 538 goto move_back; 714 } 539 } 715 540 716 ret = split_folio(folio); !! 541 ret = split_huge_page(page); 717 folio_unlock(folio); !! 542 unlock_page(page); 718 folio_put(folio); !! 543 put_page(page); 719 544 720 /* If split failed move the in 545 /* If split failed move the inode on the list back to shrinklist */ 721 if (ret) 546 if (ret) 722 goto move_back; 547 goto move_back; 723 548 724 freed += next - end; << 725 split++; 549 split++; 726 drop: 550 drop: 727 list_del_init(&info->shrinklis 551 list_del_init(&info->shrinklist); 728 goto put; 552 goto put; 729 move_back: 553 move_back: 730 /* 554 /* 731 * Make sure the inode is eith 555 * Make sure the inode is either on the global list or deleted 732 * from any local list before 556 * from any local list before iput() since it could be deleted 733 * in another thread once we p 557 * in another thread once we put the inode (then the local list 734 * is corrupted). 558 * is corrupted). 735 */ 559 */ 736 spin_lock(&sbinfo->shrinklist_ 560 spin_lock(&sbinfo->shrinklist_lock); 737 list_move(&info->shrinklist, & 561 list_move(&info->shrinklist, &sbinfo->shrinklist); 738 sbinfo->shrinklist_len++; 562 sbinfo->shrinklist_len++; 739 spin_unlock(&sbinfo->shrinklis 563 spin_unlock(&sbinfo->shrinklist_lock); 740 put: 564 put: 741 iput(inode); 565 iput(inode); 742 } 566 } 743 567 744 return split; 568 return split; 745 } 569 } 746 570 747 static long shmem_unused_huge_scan(struct supe 571 static long shmem_unused_huge_scan(struct super_block *sb, 748 struct shrink_control *sc) 572 struct shrink_control *sc) 749 { 573 { 750 struct shmem_sb_info *sbinfo = SHMEM_S 574 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 751 575 752 if (!READ_ONCE(sbinfo->shrinklist_len) 576 if (!READ_ONCE(sbinfo->shrinklist_len)) 753 return SHRINK_STOP; 577 return SHRINK_STOP; 754 578 755 return shmem_unused_huge_shrink(sbinfo 579 return shmem_unused_huge_shrink(sbinfo, sc, 0); 756 } 580 } 757 581 758 static long shmem_unused_huge_count(struct sup 582 static long shmem_unused_huge_count(struct super_block *sb, 759 struct shrink_control *sc) 583 struct shrink_control *sc) 760 { 584 { 761 struct shmem_sb_info *sbinfo = SHMEM_S 585 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 762 return READ_ONCE(sbinfo->shrinklist_le 586 return READ_ONCE(sbinfo->shrinklist_len); 763 } 587 } 764 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ !! 588 #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 765 589 766 #define shmem_huge SHMEM_HUGE_DENY 590 #define shmem_huge SHMEM_HUGE_DENY 767 591 768 static unsigned long shmem_unused_huge_shrink( 592 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 769 struct shrink_control *sc, uns !! 593 struct shrink_control *sc, unsigned long nr_to_split) 770 { 594 { 771 return 0; 595 return 0; 772 } 596 } >> 597 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 773 598 774 static bool shmem_huge_global_enabled(struct i !! 599 static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) 775 loff_t write_end, bool shmem_h << 776 struct vm_area_struct *vma, un << 777 { 600 { >> 601 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && >> 602 (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) && >> 603 shmem_huge != SHMEM_HUGE_DENY) >> 604 return true; 778 return false; 605 return false; 779 } 606 } 780 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ << 781 607 782 /* 608 /* 783 * Somewhat like filemap_add_folio, but error !! 609 * Like add_to_page_cache_locked, but error if expected item has gone. 784 */ 610 */ 785 static int shmem_add_to_page_cache(struct foli !! 611 static int shmem_add_to_page_cache(struct page *page, 786 struct addr 612 struct address_space *mapping, 787 pgoff_t ind 613 pgoff_t index, void *expected, gfp_t gfp) 788 { 614 { 789 XA_STATE_ORDER(xas, &mapping->i_pages, !! 615 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); 790 long nr = folio_nr_pages(folio); !! 616 unsigned long i = 0; 791 !! 617 unsigned long nr = compound_nr(page); 792 VM_BUG_ON_FOLIO(index != round_down(in !! 618 793 VM_BUG_ON_FOLIO(!folio_test_locked(fol !! 619 VM_BUG_ON_PAGE(PageTail(page), page); 794 VM_BUG_ON_FOLIO(!folio_test_swapbacked !! 620 VM_BUG_ON_PAGE(index != round_down(index, nr), page); 795 !! 621 VM_BUG_ON_PAGE(!PageLocked(page), page); 796 folio_ref_add(folio, nr); !! 622 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 797 folio->mapping = mapping; !! 623 VM_BUG_ON(expected && PageTransHuge(page)); 798 folio->index = index; !! 624 799 !! 625 page_ref_add(page, nr); 800 gfp &= GFP_RECLAIM_MASK; !! 626 page->mapping = mapping; 801 folio_throttle_swaprate(folio, gfp); !! 627 page->index = index; 802 628 803 do { 629 do { >> 630 void *entry; 804 xas_lock_irq(&xas); 631 xas_lock_irq(&xas); 805 if (expected != xas_find_confl !! 632 entry = xas_find_conflict(&xas); >> 633 if (entry != expected) 806 xas_set_err(&xas, -EEX 634 xas_set_err(&xas, -EEXIST); >> 635 xas_create_range(&xas); >> 636 if (xas_error(&xas)) 807 goto unlock; 637 goto unlock; >> 638 next: >> 639 xas_store(&xas, page); >> 640 if (++i < nr) { >> 641 xas_next(&xas); >> 642 goto next; 808 } 643 } 809 if (expected && xas_find_confl !! 644 if (PageTransHuge(page)) { 810 xas_set_err(&xas, -EEX !! 645 count_vm_event(THP_FILE_ALLOC); 811 goto unlock; !! 646 __inc_node_page_state(page, NR_SHMEM_THPS); 812 } 647 } 813 xas_store(&xas, folio); << 814 if (xas_error(&xas)) << 815 goto unlock; << 816 if (folio_test_pmd_mappable(fo << 817 __lruvec_stat_mod_foli << 818 __lruvec_stat_mod_folio(folio, << 819 __lruvec_stat_mod_folio(folio, << 820 mapping->nrpages += nr; 648 mapping->nrpages += nr; >> 649 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); >> 650 __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr); 821 unlock: 651 unlock: 822 xas_unlock_irq(&xas); 652 xas_unlock_irq(&xas); 823 } while (xas_nomem(&xas, gfp)); 653 } while (xas_nomem(&xas, gfp)); 824 654 825 if (xas_error(&xas)) { 655 if (xas_error(&xas)) { 826 folio->mapping = NULL; !! 656 page->mapping = NULL; 827 folio_ref_sub(folio, nr); !! 657 page_ref_sub(page, nr); 828 return xas_error(&xas); 658 return xas_error(&xas); 829 } 659 } 830 660 831 return 0; 661 return 0; 832 } 662 } 833 663 834 /* 664 /* 835 * Somewhat like filemap_remove_folio, but sub !! 665 * Like delete_from_page_cache, but substitutes swap for page. 836 */ 666 */ 837 static void shmem_delete_from_page_cache(struc !! 667 static void shmem_delete_from_page_cache(struct page *page, void *radswap) 838 { 668 { 839 struct address_space *mapping = folio- !! 669 struct address_space *mapping = page->mapping; 840 long nr = folio_nr_pages(folio); << 841 int error; 670 int error; 842 671 >> 672 VM_BUG_ON_PAGE(PageCompound(page), page); >> 673 843 xa_lock_irq(&mapping->i_pages); 674 xa_lock_irq(&mapping->i_pages); 844 error = shmem_replace_entry(mapping, f !! 675 error = shmem_replace_entry(mapping, page->index, page, radswap); 845 folio->mapping = NULL; !! 676 page->mapping = NULL; 846 mapping->nrpages -= nr; !! 677 mapping->nrpages--; 847 __lruvec_stat_mod_folio(folio, NR_FILE !! 678 __dec_node_page_state(page, NR_FILE_PAGES); 848 __lruvec_stat_mod_folio(folio, NR_SHME !! 679 __dec_node_page_state(page, NR_SHMEM); 849 xa_unlock_irq(&mapping->i_pages); 680 xa_unlock_irq(&mapping->i_pages); 850 folio_put_refs(folio, nr); !! 681 put_page(page); 851 BUG_ON(error); 682 BUG_ON(error); 852 } 683 } 853 684 854 /* 685 /* 855 * Remove swap entry from page cache, free the !! 686 * Remove swap entry from page cache, free the swap and its page cache. 856 * the number of pages being freed. 0 means en << 857 * being freed). << 858 */ 687 */ 859 static long shmem_free_swap(struct address_spa !! 688 static int shmem_free_swap(struct address_space *mapping, 860 pgoff_t index, voi !! 689 pgoff_t index, void *radswap) 861 { 690 { 862 int order = xa_get_order(&mapping->i_p << 863 void *old; 691 void *old; 864 692 865 old = xa_cmpxchg_irq(&mapping->i_pages 693 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); 866 if (old != radswap) 694 if (old != radswap) 867 return 0; !! 695 return -ENOENT; 868 free_swap_and_cache_nr(radix_to_swp_en !! 696 free_swap_and_cache(radix_to_swp_entry(radswap)); 869 !! 697 return 0; 870 return 1 << order; << 871 } 698 } 872 699 873 /* 700 /* 874 * Determine (in bytes) how many of the shmem 701 * Determine (in bytes) how many of the shmem object's pages mapped by the 875 * given offsets are swapped out. 702 * given offsets are swapped out. 876 * 703 * 877 * This is safe to call without i_rwsem or the !! 704 * This is safe to call without i_mutex or the i_pages lock thanks to RCU, 878 * as long as the inode doesn't go away and ra 705 * as long as the inode doesn't go away and racy results are not a problem. 879 */ 706 */ 880 unsigned long shmem_partial_swap_usage(struct 707 unsigned long shmem_partial_swap_usage(struct address_space *mapping, 881 708 pgoff_t start, pgoff_t end) 882 { 709 { 883 XA_STATE(xas, &mapping->i_pages, start 710 XA_STATE(xas, &mapping->i_pages, start); 884 struct page *page; 711 struct page *page; 885 unsigned long swapped = 0; 712 unsigned long swapped = 0; 886 unsigned long max = end - 1; << 887 713 888 rcu_read_lock(); 714 rcu_read_lock(); 889 xas_for_each(&xas, page, max) { !! 715 xas_for_each(&xas, page, end - 1) { 890 if (xas_retry(&xas, page)) 716 if (xas_retry(&xas, page)) 891 continue; 717 continue; 892 if (xa_is_value(page)) 718 if (xa_is_value(page)) 893 swapped += 1 << xas_ge !! 719 swapped++; 894 if (xas.xa_index == max) !! 720 895 break; << 896 if (need_resched()) { 721 if (need_resched()) { 897 xas_pause(&xas); 722 xas_pause(&xas); 898 cond_resched_rcu(); 723 cond_resched_rcu(); 899 } 724 } 900 } 725 } >> 726 901 rcu_read_unlock(); 727 rcu_read_unlock(); 902 728 903 return swapped << PAGE_SHIFT; 729 return swapped << PAGE_SHIFT; 904 } 730 } 905 731 906 /* 732 /* 907 * Determine (in bytes) how many of the shmem 733 * Determine (in bytes) how many of the shmem object's pages mapped by the 908 * given vma is swapped out. 734 * given vma is swapped out. 909 * 735 * 910 * This is safe to call without i_rwsem or the !! 736 * This is safe to call without i_mutex or the i_pages lock thanks to RCU, 911 * as long as the inode doesn't go away and ra 737 * as long as the inode doesn't go away and racy results are not a problem. 912 */ 738 */ 913 unsigned long shmem_swap_usage(struct vm_area_ 739 unsigned long shmem_swap_usage(struct vm_area_struct *vma) 914 { 740 { 915 struct inode *inode = file_inode(vma-> 741 struct inode *inode = file_inode(vma->vm_file); 916 struct shmem_inode_info *info = SHMEM_ 742 struct shmem_inode_info *info = SHMEM_I(inode); 917 struct address_space *mapping = inode- 743 struct address_space *mapping = inode->i_mapping; 918 unsigned long swapped; 744 unsigned long swapped; 919 745 920 /* Be careful as we don't hold info->l 746 /* Be careful as we don't hold info->lock */ 921 swapped = READ_ONCE(info->swapped); 747 swapped = READ_ONCE(info->swapped); 922 748 923 /* 749 /* 924 * The easier cases are when the shmem 750 * The easier cases are when the shmem object has nothing in swap, or 925 * the vma maps it whole. Then we can 751 * the vma maps it whole. Then we can simply use the stats that we 926 * already track. 752 * already track. 927 */ 753 */ 928 if (!swapped) 754 if (!swapped) 929 return 0; 755 return 0; 930 756 931 if (!vma->vm_pgoff && vma->vm_end - vm 757 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 932 return swapped << PAGE_SHIFT; 758 return swapped << PAGE_SHIFT; 933 759 934 /* Here comes the more involved part * 760 /* Here comes the more involved part */ 935 return shmem_partial_swap_usage(mappin !! 761 return shmem_partial_swap_usage(mapping, 936 vma->v !! 762 linear_page_index(vma, vma->vm_start), >> 763 linear_page_index(vma, vma->vm_end)); 937 } 764 } 938 765 939 /* 766 /* 940 * SysV IPC SHM_UNLOCK restore Unevictable pag 767 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 941 */ 768 */ 942 void shmem_unlock_mapping(struct address_space 769 void shmem_unlock_mapping(struct address_space *mapping) 943 { 770 { 944 struct folio_batch fbatch; !! 771 struct pagevec pvec; >> 772 pgoff_t indices[PAGEVEC_SIZE]; 945 pgoff_t index = 0; 773 pgoff_t index = 0; 946 774 947 folio_batch_init(&fbatch); !! 775 pagevec_init(&pvec); 948 /* 776 /* 949 * Minor point, but we might as well s 777 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 950 */ 778 */ 951 while (!mapping_unevictable(mapping) & !! 779 while (!mapping_unevictable(mapping)) { 952 filemap_get_folios(mapping, &in !! 780 /* 953 check_move_unevictable_folios( !! 781 * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it 954 folio_batch_release(&fbatch); !! 782 * has finished, if it hits a row of PAGEVEC_SIZE swap entries. >> 783 */ >> 784 pvec.nr = find_get_entries(mapping, index, >> 785 PAGEVEC_SIZE, pvec.pages, indices); >> 786 if (!pvec.nr) >> 787 break; >> 788 index = indices[pvec.nr - 1] + 1; >> 789 pagevec_remove_exceptionals(&pvec); >> 790 check_move_unevictable_pages(&pvec); >> 791 pagevec_release(&pvec); 955 cond_resched(); 792 cond_resched(); 956 } 793 } 957 } 794 } 958 795 959 static struct folio *shmem_get_partial_folio(s << 960 { << 961 struct folio *folio; << 962 << 963 /* << 964 * At first avoid shmem_get_folio(,,,S << 965 * beyond i_size, and reports fallocat << 966 */ << 967 folio = filemap_get_entry(inode->i_map << 968 if (!folio) << 969 return folio; << 970 if (!xa_is_value(folio)) { << 971 folio_lock(folio); << 972 if (folio->mapping == inode->i << 973 return folio; << 974 /* The folio has been swapped << 975 folio_unlock(folio); << 976 folio_put(folio); << 977 } << 978 /* << 979 * But read a folio back from swap if << 980 * (although in some cases this is jus << 981 */ << 982 folio = NULL; << 983 shmem_get_folio(inode, index, 0, &foli << 984 return folio; << 985 } << 986 << 987 /* 796 /* 988 * Remove range of pages and swap entries from 797 * Remove range of pages and swap entries from page cache, and free them. 989 * If !unfalloc, truncate or punch hole; if un 798 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 990 */ 799 */ 991 static void shmem_undo_range(struct inode *ino 800 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 992 801 bool unfalloc) 993 { 802 { 994 struct address_space *mapping = inode- 803 struct address_space *mapping = inode->i_mapping; 995 struct shmem_inode_info *info = SHMEM_ 804 struct shmem_inode_info *info = SHMEM_I(inode); 996 pgoff_t start = (lstart + PAGE_SIZE - 805 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 997 pgoff_t end = (lend + 1) >> PAGE_SHIFT 806 pgoff_t end = (lend + 1) >> PAGE_SHIFT; 998 struct folio_batch fbatch; !! 807 unsigned int partial_start = lstart & (PAGE_SIZE - 1); >> 808 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); >> 809 struct pagevec pvec; 999 pgoff_t indices[PAGEVEC_SIZE]; 810 pgoff_t indices[PAGEVEC_SIZE]; 1000 struct folio *folio; << 1001 bool same_folio; << 1002 long nr_swaps_freed = 0; 811 long nr_swaps_freed = 0; 1003 pgoff_t index; 812 pgoff_t index; 1004 int i; 813 int i; 1005 814 1006 if (lend == -1) 815 if (lend == -1) 1007 end = -1; /* unsigned, 816 end = -1; /* unsigned, so actually very big */ 1008 817 1009 if (info->fallocend > start && info-> !! 818 pagevec_init(&pvec); 1010 info->fallocend = start; << 1011 << 1012 folio_batch_init(&fbatch); << 1013 index = start; 819 index = start; 1014 while (index < end && find_lock_entri !! 820 while (index < end) { 1015 &fbatch, indices)) { !! 821 pvec.nr = find_get_entries(mapping, index, 1016 for (i = 0; i < folio_batch_c !! 822 min(end - index, (pgoff_t)PAGEVEC_SIZE), 1017 folio = fbatch.folios !! 823 pvec.pages, indices); >> 824 if (!pvec.nr) >> 825 break; >> 826 for (i = 0; i < pagevec_count(&pvec); i++) { >> 827 struct page *page = pvec.pages[i]; >> 828 >> 829 index = indices[i]; >> 830 if (index >= end) >> 831 break; 1018 832 1019 if (xa_is_value(folio !! 833 if (xa_is_value(page)) { 1020 if (unfalloc) 834 if (unfalloc) 1021 conti 835 continue; 1022 nr_swaps_free !! 836 nr_swaps_freed += !shmem_free_swap(mapping, 1023 !! 837 index, page); 1024 continue; 838 continue; 1025 } 839 } 1026 840 1027 if (!unfalloc || !fol !! 841 VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page); 1028 truncate_inod !! 842 1029 folio_unlock(folio); !! 843 if (!trylock_page(page)) >> 844 continue; >> 845 >> 846 if (PageTransTail(page)) { >> 847 /* Middle of THP: zero out the page */ >> 848 clear_highpage(page); >> 849 unlock_page(page); >> 850 continue; >> 851 } else if (PageTransHuge(page)) { >> 852 if (index == round_down(end, HPAGE_PMD_NR)) { >> 853 /* >> 854 * Range ends in the middle of THP: >> 855 * zero out the page >> 856 */ >> 857 clear_highpage(page); >> 858 unlock_page(page); >> 859 continue; >> 860 } >> 861 index += HPAGE_PMD_NR - 1; >> 862 i += HPAGE_PMD_NR - 1; >> 863 } >> 864 >> 865 if (!unfalloc || !PageUptodate(page)) { >> 866 VM_BUG_ON_PAGE(PageTail(page), page); >> 867 if (page_mapping(page) == mapping) { >> 868 VM_BUG_ON_PAGE(PageWriteback(page), page); >> 869 truncate_inode_page(mapping, page); >> 870 } >> 871 } >> 872 unlock_page(page); 1030 } 873 } 1031 folio_batch_remove_exceptiona !! 874 pagevec_remove_exceptionals(&pvec); 1032 folio_batch_release(&fbatch); !! 875 pagevec_release(&pvec); 1033 cond_resched(); 876 cond_resched(); >> 877 index++; 1034 } 878 } 1035 879 1036 /* !! 880 if (partial_start) { 1037 * When undoing a failed fallocate, w !! 881 struct page *page = NULL; 1038 * zeroing and splitting below, but s !! 882 shmem_getpage(inode, start - 1, &page, SGP_READ); 1039 * folio when !uptodate indicates tha !! 883 if (page) { 1040 * even when [lstart, lend] covers on !! 884 unsigned int top = PAGE_SIZE; 1041 */ !! 885 if (start > end) { 1042 if (unfalloc) !! 886 top = partial_end; 1043 goto whole_folios; !! 887 partial_end = 0; 1044 !! 888 } 1045 same_folio = (lstart >> PAGE_SHIFT) = !! 889 zero_user_segment(page, partial_start, top); 1046 folio = shmem_get_partial_folio(inode !! 890 set_page_dirty(page); 1047 if (folio) { !! 891 unlock_page(page); 1048 same_folio = lend < folio_pos !! 892 put_page(page); 1049 folio_mark_dirty(folio); !! 893 } 1050 if (!truncate_inode_partial_f << 1051 start = folio_next_in << 1052 if (same_folio) << 1053 end = folio-> << 1054 } << 1055 folio_unlock(folio); << 1056 folio_put(folio); << 1057 folio = NULL; << 1058 } << 1059 << 1060 if (!same_folio) << 1061 folio = shmem_get_partial_fol << 1062 if (folio) { << 1063 folio_mark_dirty(folio); << 1064 if (!truncate_inode_partial_f << 1065 end = folio->index; << 1066 folio_unlock(folio); << 1067 folio_put(folio); << 1068 } 894 } 1069 !! 895 if (partial_end) { 1070 whole_folios: !! 896 struct page *page = NULL; >> 897 shmem_getpage(inode, end, &page, SGP_READ); >> 898 if (page) { >> 899 zero_user_segment(page, 0, partial_end); >> 900 set_page_dirty(page); >> 901 unlock_page(page); >> 902 put_page(page); >> 903 } >> 904 } >> 905 if (start >= end) >> 906 return; 1071 907 1072 index = start; 908 index = start; 1073 while (index < end) { 909 while (index < end) { 1074 cond_resched(); 910 cond_resched(); 1075 911 1076 if (!find_get_entries(mapping !! 912 pvec.nr = find_get_entries(mapping, index, 1077 indices)) { !! 913 min(end - index, (pgoff_t)PAGEVEC_SIZE), >> 914 pvec.pages, indices); >> 915 if (!pvec.nr) { 1078 /* If all gone or hol 916 /* If all gone or hole-punch or unfalloc, we're done */ 1079 if (index == start || 917 if (index == start || end != -1) 1080 break; 918 break; 1081 /* But if truncating, 919 /* But if truncating, restart to make sure all gone */ 1082 index = start; 920 index = start; 1083 continue; 921 continue; 1084 } 922 } 1085 for (i = 0; i < folio_batch_c !! 923 for (i = 0; i < pagevec_count(&pvec); i++) { 1086 folio = fbatch.folios !! 924 struct page *page = pvec.pages[i]; 1087 925 1088 if (xa_is_value(folio !! 926 index = indices[i]; 1089 long swaps_fr !! 927 if (index >= end) >> 928 break; 1090 929 >> 930 if (xa_is_value(page)) { 1091 if (unfalloc) 931 if (unfalloc) 1092 conti 932 continue; 1093 swaps_freed = !! 933 if (shmem_free_swap(mapping, index, page)) { 1094 if (!swaps_fr << 1095 /* Sw 934 /* Swap was replaced by page: retry */ 1096 index !! 935 index--; 1097 break 936 break; 1098 } 937 } 1099 nr_swaps_free !! 938 nr_swaps_freed++; 1100 continue; 939 continue; 1101 } 940 } 1102 941 1103 folio_lock(folio); !! 942 lock_page(page); 1104 << 1105 if (!unfalloc || !fol << 1106 if (folio_map << 1107 /* Pa << 1108 folio << 1109 index << 1110 break << 1111 } << 1112 VM_BUG_ON_FOL << 1113 << 1114 943 1115 if (!folio_te !! 944 if (PageTransTail(page)) { 1116 trunc !! 945 /* Middle of THP: zero out the page */ 1117 } else if (tr !! 946 clear_highpage(page); >> 947 unlock_page(page); >> 948 /* >> 949 * Partial thp truncate due 'start' in middle >> 950 * of THP: don't need to look on these pages >> 951 * again on !pvec.nr restart. >> 952 */ >> 953 if (index != round_down(end, HPAGE_PMD_NR)) >> 954 start++; >> 955 continue; >> 956 } else if (PageTransHuge(page)) { >> 957 if (index == round_down(end, HPAGE_PMD_NR)) { 1118 /* 958 /* 1119 * If !! 959 * Range ends in the middle of THP: 1120 * th !! 960 * zero out the page 1121 * Ot << 1122 * dr << 1123 * ze << 1124 * is << 1125 */ 961 */ 1126 if (! !! 962 clear_highpage(page); 1127 !! 963 unlock_page(page); 1128 !! 964 continue; 1129 !! 965 } 1130 } !! 966 index += HPAGE_PMD_NR - 1; >> 967 i += HPAGE_PMD_NR - 1; >> 968 } >> 969 >> 970 if (!unfalloc || !PageUptodate(page)) { >> 971 VM_BUG_ON_PAGE(PageTail(page), page); >> 972 if (page_mapping(page) == mapping) { >> 973 VM_BUG_ON_PAGE(PageWriteback(page), page); >> 974 truncate_inode_page(mapping, page); >> 975 } else { >> 976 /* Page was replaced by swap: retry */ >> 977 unlock_page(page); >> 978 index--; >> 979 break; 1131 } 980 } 1132 } 981 } 1133 folio_unlock(folio); !! 982 unlock_page(page); 1134 } 983 } 1135 folio_batch_remove_exceptiona !! 984 pagevec_remove_exceptionals(&pvec); 1136 folio_batch_release(&fbatch); !! 985 pagevec_release(&pvec); >> 986 index++; 1137 } 987 } 1138 988 1139 shmem_recalc_inode(inode, 0, -nr_swap !! 989 spin_lock_irq(&info->lock); >> 990 info->swapped -= nr_swaps_freed; >> 991 shmem_recalc_inode(inode); >> 992 spin_unlock_irq(&info->lock); 1140 } 993 } 1141 994 1142 void shmem_truncate_range(struct inode *inode 995 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 1143 { 996 { 1144 shmem_undo_range(inode, lstart, lend, 997 shmem_undo_range(inode, lstart, lend, false); 1145 inode_set_mtime_to_ts(inode, inode_se !! 998 inode->i_ctime = inode->i_mtime = current_time(inode); 1146 inode_inc_iversion(inode); << 1147 } 999 } 1148 EXPORT_SYMBOL_GPL(shmem_truncate_range); 1000 EXPORT_SYMBOL_GPL(shmem_truncate_range); 1149 1001 1150 static int shmem_getattr(struct mnt_idmap *id !! 1002 static int shmem_getattr(const struct path *path, struct kstat *stat, 1151 const struct path *p << 1152 u32 request_mask, un 1003 u32 request_mask, unsigned int query_flags) 1153 { 1004 { 1154 struct inode *inode = path->dentry->d 1005 struct inode *inode = path->dentry->d_inode; 1155 struct shmem_inode_info *info = SHMEM 1006 struct shmem_inode_info *info = SHMEM_I(inode); >> 1007 struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb); 1156 1008 1157 if (info->alloced - info->swapped != !! 1009 if (info->alloced - info->swapped != inode->i_mapping->nrpages) { 1158 shmem_recalc_inode(inode, 0, !! 1010 spin_lock_irq(&info->lock); 1159 !! 1011 shmem_recalc_inode(inode); 1160 if (info->fsflags & FS_APPEND_FL) !! 1012 spin_unlock_irq(&info->lock); 1161 stat->attributes |= STATX_ATT !! 1013 } 1162 if (info->fsflags & FS_IMMUTABLE_FL) << 1163 stat->attributes |= STATX_ATT << 1164 if (info->fsflags & FS_NODUMP_FL) << 1165 stat->attributes |= STATX_ATT << 1166 stat->attributes_mask |= (STATX_ATTR_ << 1167 STATX_ATTR_IMMUTABLE << 1168 STATX_ATTR_NODUMP); << 1169 inode_lock_shared(inode); 1014 inode_lock_shared(inode); 1170 generic_fillattr(idmap, request_mask, !! 1015 generic_fillattr(inode, stat); 1171 inode_unlock_shared(inode); 1016 inode_unlock_shared(inode); 1172 1017 1173 if (shmem_huge_global_enabled(inode, !! 1018 if (is_huge_enabled(sb_info)) 1174 stat->blksize = HPAGE_PMD_SIZ 1019 stat->blksize = HPAGE_PMD_SIZE; 1175 1020 1176 if (request_mask & STATX_BTIME) { << 1177 stat->result_mask |= STATX_BT << 1178 stat->btime.tv_sec = info->i_ << 1179 stat->btime.tv_nsec = info->i << 1180 } << 1181 << 1182 return 0; 1021 return 0; 1183 } 1022 } 1184 1023 1185 static int shmem_setattr(struct mnt_idmap *id !! 1024 static int shmem_setattr(struct dentry *dentry, struct iattr *attr) 1186 struct dentry *dentr << 1187 { 1025 { 1188 struct inode *inode = d_inode(dentry) 1026 struct inode *inode = d_inode(dentry); 1189 struct shmem_inode_info *info = SHMEM 1027 struct shmem_inode_info *info = SHMEM_I(inode); >> 1028 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1190 int error; 1029 int error; 1191 bool update_mtime = false; << 1192 bool update_ctime = true; << 1193 1030 1194 error = setattr_prepare(idmap, dentry !! 1031 error = setattr_prepare(dentry, attr); 1195 if (error) 1032 if (error) 1196 return error; 1033 return error; 1197 1034 1198 if ((info->seals & F_SEAL_EXEC) && (a << 1199 if ((inode->i_mode ^ attr->ia << 1200 return -EPERM; << 1201 } << 1202 } << 1203 << 1204 if (S_ISREG(inode->i_mode) && (attr-> 1035 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 1205 loff_t oldsize = inode->i_siz 1036 loff_t oldsize = inode->i_size; 1206 loff_t newsize = attr->ia_siz 1037 loff_t newsize = attr->ia_size; 1207 1038 1208 /* protected by i_rwsem */ !! 1039 /* protected by i_mutex */ 1209 if ((newsize < oldsize && (in 1040 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 1210 (newsize > oldsize && (in 1041 (newsize > oldsize && (info->seals & F_SEAL_GROW))) 1211 return -EPERM; 1042 return -EPERM; 1212 1043 1213 if (newsize != oldsize) { 1044 if (newsize != oldsize) { 1214 error = shmem_reacct_ 1045 error = shmem_reacct_size(SHMEM_I(inode)->flags, 1215 oldsi 1046 oldsize, newsize); 1216 if (error) 1047 if (error) 1217 return error; 1048 return error; 1218 i_size_write(inode, n 1049 i_size_write(inode, newsize); 1219 update_mtime = true; !! 1050 inode->i_ctime = inode->i_mtime = current_time(inode); 1220 } else { << 1221 update_ctime = false; << 1222 } 1051 } 1223 if (newsize <= oldsize) { 1052 if (newsize <= oldsize) { 1224 loff_t holebegin = ro 1053 loff_t holebegin = round_up(newsize, PAGE_SIZE); 1225 if (oldsize > holebeg 1054 if (oldsize > holebegin) 1226 unmap_mapping 1055 unmap_mapping_range(inode->i_mapping, 1227 1056 holebegin, 0, 1); 1228 if (info->alloced) 1057 if (info->alloced) 1229 shmem_truncat 1058 shmem_truncate_range(inode, 1230 1059 newsize, (loff_t)-1); 1231 /* unmap again to rem 1060 /* unmap again to remove racily COWed private pages */ 1232 if (oldsize > holebeg 1061 if (oldsize > holebegin) 1233 unmap_mapping 1062 unmap_mapping_range(inode->i_mapping, 1234 1063 holebegin, 0, 1); 1235 } << 1236 } << 1237 << 1238 if (is_quota_modification(idmap, inod << 1239 error = dquot_initialize(inod << 1240 if (error) << 1241 return error; << 1242 } << 1243 1064 1244 /* Transfer quota accounting */ !! 1065 /* 1245 if (i_uid_needs_update(idmap, attr, i !! 1066 * Part of the huge page can be beyond i_size: subject 1246 i_gid_needs_update(idmap, attr, i !! 1067 * to shrink under memory pressure. 1247 error = dquot_transfer(idmap, !! 1068 */ 1248 if (error) !! 1069 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) { 1249 return error; !! 1070 spin_lock(&sbinfo->shrinklist_lock); >> 1071 /* >> 1072 * _careful to defend against unlocked access to >> 1073 * ->shrink_list in shmem_unused_huge_shrink() >> 1074 */ >> 1075 if (list_empty_careful(&info->shrinklist)) { >> 1076 list_add_tail(&info->shrinklist, >> 1077 &sbinfo->shrinklist); >> 1078 sbinfo->shrinklist_len++; >> 1079 } >> 1080 spin_unlock(&sbinfo->shrinklist_lock); >> 1081 } >> 1082 } 1250 } 1083 } 1251 1084 1252 setattr_copy(idmap, inode, attr); !! 1085 setattr_copy(inode, attr); 1253 if (attr->ia_valid & ATTR_MODE) 1086 if (attr->ia_valid & ATTR_MODE) 1254 error = posix_acl_chmod(idmap !! 1087 error = posix_acl_chmod(inode, inode->i_mode); 1255 if (!error && update_ctime) { << 1256 inode_set_ctime_current(inode << 1257 if (update_mtime) << 1258 inode_set_mtime_to_ts << 1259 inode_inc_iversion(inode); << 1260 } << 1261 return error; 1088 return error; 1262 } 1089 } 1263 1090 1264 static void shmem_evict_inode(struct inode *i 1091 static void shmem_evict_inode(struct inode *inode) 1265 { 1092 { 1266 struct shmem_inode_info *info = SHMEM 1093 struct shmem_inode_info *info = SHMEM_I(inode); 1267 struct shmem_sb_info *sbinfo = SHMEM_ 1094 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1268 size_t freed = 0; << 1269 1095 1270 if (shmem_mapping(inode->i_mapping)) !! 1096 if (inode->i_mapping->a_ops == &shmem_aops) { 1271 shmem_unacct_size(info->flags 1097 shmem_unacct_size(info->flags, inode->i_size); 1272 inode->i_size = 0; 1098 inode->i_size = 0; 1273 mapping_set_exiting(inode->i_ << 1274 shmem_truncate_range(inode, 0 1099 shmem_truncate_range(inode, 0, (loff_t)-1); 1275 if (!list_empty(&info->shrink 1100 if (!list_empty(&info->shrinklist)) { 1276 spin_lock(&sbinfo->sh 1101 spin_lock(&sbinfo->shrinklist_lock); 1277 if (!list_empty(&info 1102 if (!list_empty(&info->shrinklist)) { 1278 list_del_init 1103 list_del_init(&info->shrinklist); 1279 sbinfo->shrin 1104 sbinfo->shrinklist_len--; 1280 } 1105 } 1281 spin_unlock(&sbinfo-> 1106 spin_unlock(&sbinfo->shrinklist_lock); 1282 } 1107 } 1283 while (!list_empty(&info->swa 1108 while (!list_empty(&info->swaplist)) { 1284 /* Wait while shmem_u 1109 /* Wait while shmem_unuse() is scanning this inode... */ 1285 wait_var_event(&info- 1110 wait_var_event(&info->stop_eviction, 1286 !atomi 1111 !atomic_read(&info->stop_eviction)); 1287 mutex_lock(&shmem_swa 1112 mutex_lock(&shmem_swaplist_mutex); 1288 /* ...but beware of t 1113 /* ...but beware of the race if we peeked too early */ 1289 if (!atomic_read(&inf 1114 if (!atomic_read(&info->stop_eviction)) 1290 list_del_init 1115 list_del_init(&info->swaplist); 1291 mutex_unlock(&shmem_s 1116 mutex_unlock(&shmem_swaplist_mutex); 1292 } 1117 } 1293 } 1118 } 1294 1119 1295 simple_xattrs_free(&info->xattrs, sbi !! 1120 simple_xattrs_free(&info->xattrs); 1296 shmem_free_inode(inode->i_sb, freed); << 1297 WARN_ON(inode->i_blocks); 1121 WARN_ON(inode->i_blocks); >> 1122 shmem_free_inode(inode->i_sb); 1298 clear_inode(inode); 1123 clear_inode(inode); 1299 #ifdef CONFIG_TMPFS_QUOTA << 1300 dquot_free_inode(inode); << 1301 dquot_drop(inode); << 1302 #endif << 1303 } 1124 } 1304 1125 >> 1126 extern struct swap_info_struct *swap_info[]; >> 1127 1305 static int shmem_find_swap_entries(struct add 1128 static int shmem_find_swap_entries(struct address_space *mapping, 1306 pgoff_t st !! 1129 pgoff_t start, unsigned int nr_entries, 1307 pgoff_t *i !! 1130 struct page **entries, pgoff_t *indices, >> 1131 unsigned int type, bool frontswap) 1308 { 1132 { 1309 XA_STATE(xas, &mapping->i_pages, star 1133 XA_STATE(xas, &mapping->i_pages, start); 1310 struct folio *folio; !! 1134 struct page *page; 1311 swp_entry_t entry; 1135 swp_entry_t entry; >> 1136 unsigned int ret = 0; >> 1137 >> 1138 if (!nr_entries) >> 1139 return 0; 1312 1140 1313 rcu_read_lock(); 1141 rcu_read_lock(); 1314 xas_for_each(&xas, folio, ULONG_MAX) !! 1142 xas_for_each(&xas, page, ULONG_MAX) { 1315 if (xas_retry(&xas, folio)) !! 1143 if (xas_retry(&xas, page)) 1316 continue; 1144 continue; 1317 1145 1318 if (!xa_is_value(folio)) !! 1146 if (!xa_is_value(page)) 1319 continue; 1147 continue; 1320 1148 1321 entry = radix_to_swp_entry(fo !! 1149 entry = radix_to_swp_entry(page); 1322 /* << 1323 * swapin error entries can b << 1324 * deliberately ignored here << 1325 */ << 1326 if (swp_type(entry) != type) 1150 if (swp_type(entry) != type) 1327 continue; 1151 continue; >> 1152 if (frontswap && >> 1153 !frontswap_test(swap_info[type], swp_offset(entry))) >> 1154 continue; 1328 1155 1329 indices[folio_batch_count(fba !! 1156 indices[ret] = xas.xa_index; 1330 if (!folio_batch_add(fbatch, !! 1157 entries[ret] = page; 1331 break; << 1332 1158 1333 if (need_resched()) { 1159 if (need_resched()) { 1334 xas_pause(&xas); 1160 xas_pause(&xas); 1335 cond_resched_rcu(); 1161 cond_resched_rcu(); 1336 } 1162 } >> 1163 if (++ret == nr_entries) >> 1164 break; 1337 } 1165 } 1338 rcu_read_unlock(); 1166 rcu_read_unlock(); 1339 1167 1340 return xas.xa_index; !! 1168 return ret; 1341 } 1169 } 1342 1170 1343 /* 1171 /* 1344 * Move the swapped pages for an inode to pag 1172 * Move the swapped pages for an inode to page cache. Returns the count 1345 * of pages swapped in, or the error in case 1173 * of pages swapped in, or the error in case of failure. 1346 */ 1174 */ 1347 static int shmem_unuse_swap_entries(struct in !! 1175 static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec, 1348 struct folio_batch *fbatch, p !! 1176 pgoff_t *indices) 1349 { 1177 { 1350 int i = 0; 1178 int i = 0; 1351 int ret = 0; 1179 int ret = 0; 1352 int error = 0; 1180 int error = 0; 1353 struct address_space *mapping = inode 1181 struct address_space *mapping = inode->i_mapping; 1354 1182 1355 for (i = 0; i < folio_batch_count(fba !! 1183 for (i = 0; i < pvec.nr; i++) { 1356 struct folio *folio = fbatch- !! 1184 struct page *page = pvec.pages[i]; 1357 1185 1358 if (!xa_is_value(folio)) !! 1186 if (!xa_is_value(page)) 1359 continue; 1187 continue; 1360 error = shmem_swapin_folio(in !! 1188 error = shmem_swapin_page(inode, indices[i], 1361 mappi !! 1189 &page, SGP_CACHE, >> 1190 mapping_gfp_mask(mapping), >> 1191 NULL, NULL); 1362 if (error == 0) { 1192 if (error == 0) { 1363 folio_unlock(folio); !! 1193 unlock_page(page); 1364 folio_put(folio); !! 1194 put_page(page); 1365 ret++; 1195 ret++; 1366 } 1196 } 1367 if (error == -ENOMEM) 1197 if (error == -ENOMEM) 1368 break; 1198 break; 1369 error = 0; 1199 error = 0; 1370 } 1200 } 1371 return error ? error : ret; 1201 return error ? error : ret; 1372 } 1202 } 1373 1203 1374 /* 1204 /* 1375 * If swap found in inode, free it and move p 1205 * If swap found in inode, free it and move page from swapcache to filecache. 1376 */ 1206 */ 1377 static int shmem_unuse_inode(struct inode *in !! 1207 static int shmem_unuse_inode(struct inode *inode, unsigned int type, >> 1208 bool frontswap, unsigned long *fs_pages_to_unuse) 1378 { 1209 { 1379 struct address_space *mapping = inode 1210 struct address_space *mapping = inode->i_mapping; 1380 pgoff_t start = 0; 1211 pgoff_t start = 0; 1381 struct folio_batch fbatch; !! 1212 struct pagevec pvec; 1382 pgoff_t indices[PAGEVEC_SIZE]; 1213 pgoff_t indices[PAGEVEC_SIZE]; >> 1214 bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0); 1383 int ret = 0; 1215 int ret = 0; 1384 1216 >> 1217 pagevec_init(&pvec); 1385 do { 1218 do { 1386 folio_batch_init(&fbatch); !! 1219 unsigned int nr_entries = PAGEVEC_SIZE; 1387 shmem_find_swap_entries(mappi !! 1220 1388 if (folio_batch_count(&fbatch !! 1221 if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE) >> 1222 nr_entries = *fs_pages_to_unuse; >> 1223 >> 1224 pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries, >> 1225 pvec.pages, indices, >> 1226 type, frontswap); >> 1227 if (pvec.nr == 0) { 1389 ret = 0; 1228 ret = 0; 1390 break; 1229 break; 1391 } 1230 } 1392 1231 1393 ret = shmem_unuse_swap_entrie !! 1232 ret = shmem_unuse_swap_entries(inode, pvec, indices); 1394 if (ret < 0) 1233 if (ret < 0) 1395 break; 1234 break; 1396 1235 1397 start = indices[folio_batch_c !! 1236 if (frontswap_partial) { >> 1237 *fs_pages_to_unuse -= ret; >> 1238 if (*fs_pages_to_unuse == 0) { >> 1239 ret = FRONTSWAP_PAGES_UNUSED; >> 1240 break; >> 1241 } >> 1242 } >> 1243 >> 1244 start = indices[pvec.nr - 1]; 1398 } while (true); 1245 } while (true); 1399 1246 1400 return ret; 1247 return ret; 1401 } 1248 } 1402 1249 1403 /* 1250 /* 1404 * Read all the shared memory data that resid 1251 * Read all the shared memory data that resides in the swap 1405 * device 'type' back into memory, so the swa 1252 * device 'type' back into memory, so the swap device can be 1406 * unused. 1253 * unused. 1407 */ 1254 */ 1408 int shmem_unuse(unsigned int type) !! 1255 int shmem_unuse(unsigned int type, bool frontswap, >> 1256 unsigned long *fs_pages_to_unuse) 1409 { 1257 { 1410 struct shmem_inode_info *info, *next; 1258 struct shmem_inode_info *info, *next; 1411 int error = 0; 1259 int error = 0; 1412 1260 1413 if (list_empty(&shmem_swaplist)) 1261 if (list_empty(&shmem_swaplist)) 1414 return 0; 1262 return 0; 1415 1263 1416 mutex_lock(&shmem_swaplist_mutex); 1264 mutex_lock(&shmem_swaplist_mutex); 1417 list_for_each_entry_safe(info, next, 1265 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { 1418 if (!info->swapped) { 1266 if (!info->swapped) { 1419 list_del_init(&info-> 1267 list_del_init(&info->swaplist); 1420 continue; 1268 continue; 1421 } 1269 } 1422 /* 1270 /* 1423 * Drop the swaplist mutex wh 1271 * Drop the swaplist mutex while searching the inode for swap; 1424 * but before doing so, make 1272 * but before doing so, make sure shmem_evict_inode() will not 1425 * remove placeholder inode f 1273 * remove placeholder inode from swaplist, nor let it be freed 1426 * (igrab() would protect fro 1274 * (igrab() would protect from unlink, but not from unmount). 1427 */ 1275 */ 1428 atomic_inc(&info->stop_evicti 1276 atomic_inc(&info->stop_eviction); 1429 mutex_unlock(&shmem_swaplist_ 1277 mutex_unlock(&shmem_swaplist_mutex); 1430 1278 1431 error = shmem_unuse_inode(&in !! 1279 error = shmem_unuse_inode(&info->vfs_inode, type, frontswap, >> 1280 fs_pages_to_unuse); 1432 cond_resched(); 1281 cond_resched(); 1433 1282 1434 mutex_lock(&shmem_swaplist_mu 1283 mutex_lock(&shmem_swaplist_mutex); 1435 next = list_next_entry(info, 1284 next = list_next_entry(info, swaplist); 1436 if (!info->swapped) 1285 if (!info->swapped) 1437 list_del_init(&info-> 1286 list_del_init(&info->swaplist); 1438 if (atomic_dec_and_test(&info 1287 if (atomic_dec_and_test(&info->stop_eviction)) 1439 wake_up_var(&info->st 1288 wake_up_var(&info->stop_eviction); 1440 if (error) 1289 if (error) 1441 break; 1290 break; 1442 } 1291 } 1443 mutex_unlock(&shmem_swaplist_mutex); 1292 mutex_unlock(&shmem_swaplist_mutex); 1444 1293 1445 return error; 1294 return error; 1446 } 1295 } 1447 1296 1448 /* 1297 /* 1449 * Move the page from the page cache to the s 1298 * Move the page from the page cache to the swap cache. 1450 */ 1299 */ 1451 static int shmem_writepage(struct page *page, 1300 static int shmem_writepage(struct page *page, struct writeback_control *wbc) 1452 { 1301 { 1453 struct folio *folio = page_folio(page !! 1302 struct shmem_inode_info *info; 1454 struct address_space *mapping = folio !! 1303 struct address_space *mapping; 1455 struct inode *inode = mapping->host; !! 1304 struct inode *inode; 1456 struct shmem_inode_info *info = SHMEM << 1457 struct shmem_sb_info *sbinfo = SHMEM_ << 1458 swp_entry_t swap; 1305 swp_entry_t swap; 1459 pgoff_t index; 1306 pgoff_t index; 1460 int nr_pages; !! 1307 1461 bool split = false; !! 1308 VM_BUG_ON_PAGE(PageCompound(page), page); >> 1309 BUG_ON(!PageLocked(page)); >> 1310 mapping = page->mapping; >> 1311 index = page->index; >> 1312 inode = mapping->host; >> 1313 info = SHMEM_I(inode); >> 1314 if (info->flags & VM_LOCKED) >> 1315 goto redirty; >> 1316 if (!total_swap_pages) >> 1317 goto redirty; 1462 1318 1463 /* 1319 /* 1464 * Our capabilities prevent regular w 1320 * Our capabilities prevent regular writeback or sync from ever calling 1465 * shmem_writepage; but a stacking fi 1321 * shmem_writepage; but a stacking filesystem might use ->writepage of 1466 * its underlying filesystem, in whic 1322 * its underlying filesystem, in which case tmpfs should write out to 1467 * swap only in response to memory pr 1323 * swap only in response to memory pressure, and not for the writeback 1468 * threads or sync. 1324 * threads or sync. 1469 */ 1325 */ 1470 if (WARN_ON_ONCE(!wbc->for_reclaim)) !! 1326 if (!wbc->for_reclaim) { 1471 goto redirty; !! 1327 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 1472 << 1473 if (WARN_ON_ONCE((info->flags & VM_LO << 1474 goto redirty; << 1475 << 1476 if (!total_swap_pages) << 1477 goto redirty; 1328 goto redirty; 1478 << 1479 /* << 1480 * If CONFIG_THP_SWAP is not enabled, << 1481 * split when swapping. << 1482 * << 1483 * And shrinkage of pages beyond i_si << 1484 * swapout of a large folio crossing << 1485 * (unless fallocate has been used to << 1486 */ << 1487 if (folio_test_large(folio)) { << 1488 index = shmem_fallocend(inode << 1489 DIV_ROUND_UP(i_size_r << 1490 if ((index > folio->index && << 1491 !IS_ENABLED(CONFIG_THP_SW << 1492 split = true; << 1493 } << 1494 << 1495 if (split) { << 1496 try_split: << 1497 /* Ensure the subpages are st << 1498 folio_test_set_dirty(folio); << 1499 if (split_huge_page_to_list_t << 1500 goto redirty; << 1501 folio = page_folio(page); << 1502 folio_clear_dirty(folio); << 1503 } 1329 } 1504 1330 1505 index = folio->index; << 1506 nr_pages = folio_nr_pages(folio); << 1507 << 1508 /* 1331 /* 1509 * This is somewhat ridiculous, but w 1332 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 1510 * value into swapfile.c, the only wa 1333 * value into swapfile.c, the only way we can correctly account for a 1511 * fallocated folio arriving here is !! 1334 * fallocated page arriving here is now to initialize it and write it. 1512 * 1335 * 1513 * That's okay for a folio already fa !! 1336 * That's okay for a page already fallocated earlier, but if we have 1514 * not yet completed the fallocation, 1337 * not yet completed the fallocation, then (a) we want to keep track 1515 * of this folio in case we have to u !! 1338 * of this page in case we have to undo it, and (b) it may not be a 1516 * good idea to continue anyway, once 1339 * good idea to continue anyway, once we're pushing into swap. So 1517 * reactivate the folio, and let shme !! 1340 * reactivate the page, and let shmem_fallocate() quit when too many. 1518 */ 1341 */ 1519 if (!folio_test_uptodate(folio)) { !! 1342 if (!PageUptodate(page)) { 1520 if (inode->i_private) { 1343 if (inode->i_private) { 1521 struct shmem_falloc * 1344 struct shmem_falloc *shmem_falloc; 1522 spin_lock(&inode->i_l 1345 spin_lock(&inode->i_lock); 1523 shmem_falloc = inode- 1346 shmem_falloc = inode->i_private; 1524 if (shmem_falloc && 1347 if (shmem_falloc && 1525 !shmem_falloc->wa 1348 !shmem_falloc->waitq && 1526 index >= shmem_fa 1349 index >= shmem_falloc->start && 1527 index < shmem_fal 1350 index < shmem_falloc->next) 1528 shmem_falloc- 1351 shmem_falloc->nr_unswapped++; 1529 else 1352 else 1530 shmem_falloc 1353 shmem_falloc = NULL; 1531 spin_unlock(&inode->i 1354 spin_unlock(&inode->i_lock); 1532 if (shmem_falloc) 1355 if (shmem_falloc) 1533 goto redirty; 1356 goto redirty; 1534 } 1357 } 1535 folio_zero_range(folio, 0, fo !! 1358 clear_highpage(page); 1536 flush_dcache_folio(folio); !! 1359 flush_dcache_page(page); 1537 folio_mark_uptodate(folio); !! 1360 SetPageUptodate(page); 1538 } 1361 } 1539 1362 1540 swap = folio_alloc_swap(folio); !! 1363 swap = get_swap_page(page); 1541 if (!swap.val) { !! 1364 if (!swap.val) 1542 if (nr_pages > 1) << 1543 goto try_split; << 1544 << 1545 goto redirty; 1365 goto redirty; 1546 } << 1547 1366 1548 /* 1367 /* 1549 * Add inode to shmem_unuse()'s list 1368 * Add inode to shmem_unuse()'s list of swapped-out inodes, 1550 * if it's not already there. Do it !! 1369 * if it's not already there. Do it now before the page is 1551 * moved to swap cache, when its page 1370 * moved to swap cache, when its pagelock no longer protects 1552 * the inode from eviction. But don' 1371 * the inode from eviction. But don't unlock the mutex until 1553 * we've incremented swapped, because 1372 * we've incremented swapped, because shmem_unuse_inode() will 1554 * prune a !swapped inode from the sw 1373 * prune a !swapped inode from the swaplist under this mutex. 1555 */ 1374 */ 1556 mutex_lock(&shmem_swaplist_mutex); 1375 mutex_lock(&shmem_swaplist_mutex); 1557 if (list_empty(&info->swaplist)) 1376 if (list_empty(&info->swaplist)) 1558 list_add(&info->swaplist, &sh 1377 list_add(&info->swaplist, &shmem_swaplist); 1559 1378 1560 if (add_to_swap_cache(folio, swap, !! 1379 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { 1561 __GFP_HIGH | __GFP_NO !! 1380 spin_lock_irq(&info->lock); 1562 NULL) == 0) { !! 1381 shmem_recalc_inode(inode); 1563 shmem_recalc_inode(inode, 0, !! 1382 info->swapped++; 1564 swap_shmem_alloc(swap, nr_pag !! 1383 spin_unlock_irq(&info->lock); 1565 shmem_delete_from_page_cache( !! 1384 >> 1385 swap_shmem_alloc(swap); >> 1386 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 1566 1387 1567 mutex_unlock(&shmem_swaplist_ 1388 mutex_unlock(&shmem_swaplist_mutex); 1568 BUG_ON(folio_mapped(folio)); !! 1389 BUG_ON(page_mapped(page)); 1569 return swap_writepage(&folio- !! 1390 swap_writepage(page, wbc); >> 1391 return 0; 1570 } 1392 } 1571 1393 1572 mutex_unlock(&shmem_swaplist_mutex); 1394 mutex_unlock(&shmem_swaplist_mutex); 1573 put_swap_folio(folio, swap); !! 1395 put_swap_page(page, swap); 1574 redirty: 1396 redirty: 1575 folio_mark_dirty(folio); !! 1397 set_page_dirty(page); 1576 if (wbc->for_reclaim) 1398 if (wbc->for_reclaim) 1577 return AOP_WRITEPAGE_ACTIVATE !! 1399 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 1578 folio_unlock(folio); !! 1400 unlock_page(page); 1579 return 0; 1401 return 0; 1580 } 1402 } 1581 1403 1582 #if defined(CONFIG_NUMA) && defined(CONFIG_TM 1404 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 1583 static void shmem_show_mpol(struct seq_file * 1405 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1584 { 1406 { 1585 char buffer[64]; 1407 char buffer[64]; 1586 1408 1587 if (!mpol || mpol->mode == MPOL_DEFAU 1409 if (!mpol || mpol->mode == MPOL_DEFAULT) 1588 return; /* show nothi 1410 return; /* show nothing */ 1589 1411 1590 mpol_to_str(buffer, sizeof(buffer), m 1412 mpol_to_str(buffer, sizeof(buffer), mpol); 1591 1413 1592 seq_printf(seq, ",mpol=%s", buffer); 1414 seq_printf(seq, ",mpol=%s", buffer); 1593 } 1415 } 1594 1416 1595 static struct mempolicy *shmem_get_sbmpol(str 1417 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1596 { 1418 { 1597 struct mempolicy *mpol = NULL; 1419 struct mempolicy *mpol = NULL; 1598 if (sbinfo->mpol) { 1420 if (sbinfo->mpol) { 1599 raw_spin_lock(&sbinfo->stat_l !! 1421 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 1600 mpol = sbinfo->mpol; 1422 mpol = sbinfo->mpol; 1601 mpol_get(mpol); 1423 mpol_get(mpol); 1602 raw_spin_unlock(&sbinfo->stat !! 1424 spin_unlock(&sbinfo->stat_lock); 1603 } 1425 } 1604 return mpol; 1426 return mpol; 1605 } 1427 } 1606 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 1428 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 1607 static inline void shmem_show_mpol(struct seq 1429 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1608 { 1430 { 1609 } 1431 } 1610 static inline struct mempolicy *shmem_get_sbm 1432 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1611 { 1433 { 1612 return NULL; 1434 return NULL; 1613 } 1435 } 1614 #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 1436 #endif /* CONFIG_NUMA && CONFIG_TMPFS */ >> 1437 #ifndef CONFIG_NUMA >> 1438 #define vm_policy vm_private_data >> 1439 #endif 1615 1440 1616 static struct mempolicy *shmem_get_pgoff_poli !! 1441 static void shmem_pseudo_vma_init(struct vm_area_struct *vma, 1617 pgoff_t index, unsign !! 1442 struct shmem_inode_info *info, pgoff_t index) 1618 << 1619 static struct folio *shmem_swapin_cluster(swp << 1620 struct shmem_inode_in << 1621 { 1443 { 1622 struct mempolicy *mpol; !! 1444 /* Create a pseudo vma that just contains the policy */ 1623 pgoff_t ilx; !! 1445 vma_init(vma, NULL); 1624 struct folio *folio; !! 1446 /* Bias interleave by inode number to distribute better across nodes */ 1625 !! 1447 vma->vm_pgoff = index + info->vfs_inode.i_ino; 1626 mpol = shmem_get_pgoff_policy(info, i !! 1448 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); 1627 folio = swap_cluster_readahead(swap, << 1628 mpol_cond_put(mpol); << 1629 << 1630 return folio; << 1631 } 1449 } 1632 1450 1633 /* !! 1451 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) 1634 * Make sure huge_gfp is always more limited << 1635 * Some of the flags set permissions, while o << 1636 */ << 1637 static gfp_t limit_gfp_mask(gfp_t huge_gfp, g << 1638 { 1452 { 1639 gfp_t allowflags = __GFP_IO | __GFP_F !! 1453 /* Drop reference taken by mpol_shared_policy_lookup() */ 1640 gfp_t denyflags = __GFP_NOWARN | __GF !! 1454 mpol_cond_put(vma->vm_policy); 1641 gfp_t zoneflags = limit_gfp & GFP_ZON << 1642 gfp_t result = huge_gfp & ~(allowflag << 1643 << 1644 /* Allow allocations only from the or << 1645 result |= zoneflags; << 1646 << 1647 /* << 1648 * Minimize the result gfp by taking << 1649 * and the intersection of the allow << 1650 */ << 1651 result |= (limit_gfp & denyflags); << 1652 result |= (huge_gfp & limit_gfp) & al << 1653 << 1654 return result; << 1655 } 1455 } 1656 1456 1657 #ifdef CONFIG_TRANSPARENT_HUGEPAGE !! 1457 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 1658 unsigned long shmem_allowable_huge_orders(str !! 1458 struct shmem_inode_info *info, pgoff_t index) 1659 struct vm_are << 1660 loff_t write_ << 1661 { 1459 { 1662 unsigned long mask = READ_ONCE(huge_s !! 1460 struct vm_area_struct pvma; 1663 unsigned long within_size_orders = RE !! 1461 struct page *page; 1664 unsigned long vm_flags = vma ? vma->v !! 1462 struct vm_fault vmf; 1665 bool global_huge; << 1666 loff_t i_size; << 1667 int order; << 1668 << 1669 if (thp_disabled_by_hw() || (vma && v << 1670 return 0; << 1671 << 1672 global_huge = shmem_huge_global_enabl << 1673 shmem << 1674 if (!vma || !vma_is_anon_shmem(vma)) << 1675 /* << 1676 * For tmpfs, we now only sup << 1677 * is enabled, otherwise fall << 1678 */ << 1679 return global_huge ? BIT(HPAG << 1680 } << 1681 << 1682 /* << 1683 * Following the 'deny' semantics of << 1684 * option off from all mounts. << 1685 */ << 1686 if (shmem_huge == SHMEM_HUGE_DENY) << 1687 return 0; << 1688 << 1689 /* << 1690 * Only allow inherit orders if the t << 1691 * means non-PMD sized THP can not ov << 1692 */ << 1693 if (shmem_huge == SHMEM_HUGE_FORCE) << 1694 return READ_ONCE(huge_shmem_o << 1695 << 1696 /* Allow mTHP that will be fully with << 1697 order = highest_order(within_size_ord << 1698 while (within_size_orders) { << 1699 index = round_up(index + 1, o << 1700 i_size = round_up(i_size_read << 1701 if (i_size >> PAGE_SHIFT >= i << 1702 mask |= within_size_o << 1703 break; << 1704 } << 1705 << 1706 order = next_order(&within_si << 1707 } << 1708 << 1709 if (vm_flags & VM_HUGEPAGE) << 1710 mask |= READ_ONCE(huge_shmem_ << 1711 1463 1712 if (global_huge) !! 1464 shmem_pseudo_vma_init(&pvma, info, index); 1713 mask |= READ_ONCE(huge_shmem_ !! 1465 vmf.vma = &pvma; >> 1466 vmf.address = 0; >> 1467 page = swap_cluster_readahead(swap, gfp, &vmf); >> 1468 shmem_pseudo_vma_destroy(&pvma); 1714 1469 1715 return THP_ORDERS_ALL_FILE_DEFAULT & !! 1470 return page; 1716 } 1471 } 1717 1472 1718 static unsigned long shmem_suitable_orders(st !! 1473 static struct page *shmem_alloc_hugepage(gfp_t gfp, 1719 st !! 1474 struct shmem_inode_info *info, pgoff_t index) 1720 un << 1721 { 1475 { 1722 struct vm_area_struct *vma = vmf ? vm !! 1476 struct vm_area_struct pvma; 1723 pgoff_t aligned_index; !! 1477 struct address_space *mapping = info->vfs_inode.i_mapping; 1724 unsigned long pages; !! 1478 pgoff_t hindex; 1725 int order; !! 1479 struct page *page; 1726 1480 1727 if (vma) { !! 1481 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1728 orders = thp_vma_suitable_ord !! 1482 return NULL; 1729 if (!orders) << 1730 return 0; << 1731 } << 1732 1483 1733 /* Find the highest order that can ad !! 1484 hindex = round_down(index, HPAGE_PMD_NR); 1734 order = highest_order(orders); !! 1485 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, 1735 while (orders) { !! 1486 XA_PRESENT)) 1736 pages = 1UL << order; !! 1487 return NULL; 1737 aligned_index = round_down(in << 1738 /* << 1739 * Check for conflict before << 1740 * Conflict might be that a h << 1741 * and added to page cache by << 1742 * is already at least one sm << 1743 * Be careful to retry when a << 1744 * Elsewhere -EEXIST would be << 1745 */ << 1746 if (!xa_find(&mapping->i_page << 1747 aligned_index + << 1748 break; << 1749 order = next_order(&orders, o << 1750 } << 1751 1488 1752 return orders; !! 1489 shmem_pseudo_vma_init(&pvma, info, hindex); 1753 } !! 1490 page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN, 1754 #else !! 1491 HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true); 1755 static unsigned long shmem_suitable_orders(st !! 1492 shmem_pseudo_vma_destroy(&pvma); 1756 st !! 1493 if (page) 1757 un !! 1494 prep_transhuge_page(page); 1758 { !! 1495 return page; 1759 return 0; << 1760 } 1496 } 1761 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ << 1762 1497 1763 static struct folio *shmem_alloc_folio(gfp_t !! 1498 static struct page *shmem_alloc_page(gfp_t gfp, 1764 struct shmem_inode_info *info !! 1499 struct shmem_inode_info *info, pgoff_t index) 1765 { 1500 { 1766 struct mempolicy *mpol; !! 1501 struct vm_area_struct pvma; 1767 pgoff_t ilx; !! 1502 struct page *page; 1768 struct folio *folio; << 1769 1503 1770 mpol = shmem_get_pgoff_policy(info, i !! 1504 shmem_pseudo_vma_init(&pvma, info, index); 1771 folio = folio_alloc_mpol(gfp, order, !! 1505 page = alloc_page_vma(gfp, &pvma, 0); 1772 mpol_cond_put(mpol); !! 1506 shmem_pseudo_vma_destroy(&pvma); 1773 1507 1774 return folio; !! 1508 return page; 1775 } 1509 } 1776 1510 1777 static struct folio *shmem_alloc_and_add_foli !! 1511 static struct page *shmem_alloc_and_acct_page(gfp_t gfp, 1778 gfp_t gfp, struct inode *inod !! 1512 struct inode *inode, 1779 struct mm_struct *fault_mm, u !! 1513 pgoff_t index, bool huge) 1780 { 1514 { 1781 struct address_space *mapping = inode << 1782 struct shmem_inode_info *info = SHMEM 1515 struct shmem_inode_info *info = SHMEM_I(inode); 1783 unsigned long suitable_orders = 0; !! 1516 struct page *page; 1784 struct folio *folio = NULL; !! 1517 int nr; 1785 long pages; !! 1518 int err = -ENOSPC; 1786 int error, order; << 1787 << 1788 if (!IS_ENABLED(CONFIG_TRANSPARENT_HU << 1789 orders = 0; << 1790 << 1791 if (orders > 0) { << 1792 suitable_orders = shmem_suita << 1793 << 1794 << 1795 order = highest_order(suitabl << 1796 while (suitable_orders) { << 1797 pages = 1UL << order; << 1798 index = round_down(in << 1799 folio = shmem_alloc_f << 1800 if (folio) << 1801 goto allocate << 1802 << 1803 if (pages == HPAGE_PM << 1804 count_vm_even << 1805 count_mthp_stat(order << 1806 order = next_order(&s << 1807 } << 1808 } else { << 1809 pages = 1; << 1810 folio = shmem_alloc_folio(gfp << 1811 } << 1812 if (!folio) << 1813 return ERR_PTR(-ENOMEM); << 1814 << 1815 allocated: << 1816 __folio_set_locked(folio); << 1817 __folio_set_swapbacked(folio); << 1818 1519 1819 gfp &= GFP_RECLAIM_MASK; !! 1520 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 1820 error = mem_cgroup_charge(folio, faul !! 1521 huge = false; 1821 if (error) { !! 1522 nr = huge ? HPAGE_PMD_NR : 1; 1822 if (xa_find(&mapping->i_pages << 1823 index + pages << 1824 error = -EEXIST; << 1825 } else if (pages > 1) { << 1826 if (pages == HPAGE_PM << 1827 count_vm_even << 1828 count_vm_even << 1829 } << 1830 count_mthp_stat(folio << 1831 count_mthp_stat(folio << 1832 } << 1833 goto unlock; << 1834 } << 1835 1523 1836 error = shmem_add_to_page_cache(folio !! 1524 if (!shmem_inode_acct_block(inode, nr)) 1837 if (error) !! 1525 goto failed; 1838 goto unlock; << 1839 1526 1840 error = shmem_inode_acct_blocks(inode !! 1527 if (huge) 1841 if (error) { !! 1528 page = shmem_alloc_hugepage(gfp, info, index); 1842 struct shmem_sb_info *sbinfo !! 1529 else 1843 long freed; !! 1530 page = shmem_alloc_page(gfp, info, index); 1844 /* !! 1531 if (page) { 1845 * Try to reclaim some space !! 1532 __SetPageLocked(page); 1846 * large folios beyond i_size !! 1533 __SetPageSwapBacked(page); 1847 */ !! 1534 return page; 1848 shmem_unused_huge_shrink(sbin << 1849 /* << 1850 * And do a shmem_recalc_inod << 1851 * except our folio is there << 1852 */ << 1853 spin_lock(&info->lock); << 1854 freed = pages + info->alloced << 1855 READ_ONCE(mapping->nr << 1856 if (freed > 0) << 1857 info->alloced -= free << 1858 spin_unlock(&info->lock); << 1859 if (freed > 0) << 1860 shmem_inode_unacct_bl << 1861 error = shmem_inode_acct_bloc << 1862 if (error) { << 1863 filemap_remove_folio( << 1864 goto unlock; << 1865 } << 1866 } 1535 } 1867 1536 1868 shmem_recalc_inode(inode, pages, 0); !! 1537 err = -ENOMEM; 1869 folio_add_lru(folio); !! 1538 shmem_inode_unacct_blocks(inode, nr); 1870 return folio; !! 1539 failed: 1871 !! 1540 return ERR_PTR(err); 1872 unlock: << 1873 folio_unlock(folio); << 1874 folio_put(folio); << 1875 return ERR_PTR(error); << 1876 } 1541 } 1877 1542 1878 /* 1543 /* 1879 * When a page is moved from swapcache to shm 1544 * When a page is moved from swapcache to shmem filecache (either by the 1880 * usual swapin of shmem_get_folio_gfp(), or !! 1545 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 1881 * shmem_unuse_inode()), it may have been rea 1546 * shmem_unuse_inode()), it may have been read in earlier from swap, in 1882 * ignorance of the mapping it belongs to. I 1547 * ignorance of the mapping it belongs to. If that mapping has special 1883 * constraints (like the gma500 GEM driver, w 1548 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1884 * we may need to copy to a suitable page bef 1549 * we may need to copy to a suitable page before moving to filecache. 1885 * 1550 * 1886 * In a future release, this may well be exte 1551 * In a future release, this may well be extended to respect cpuset and 1887 * NUMA mempolicy, and applied also to anonym 1552 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1888 * but for now it is a simple matter of zone. 1553 * but for now it is a simple matter of zone. 1889 */ 1554 */ 1890 static bool shmem_should_replace_folio(struct !! 1555 static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 1891 { 1556 { 1892 return folio_zonenum(folio) > gfp_zon !! 1557 return page_zonenum(page) > gfp_zone(gfp); 1893 } 1558 } 1894 1559 1895 static int shmem_replace_folio(struct folio * !! 1560 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 1896 struct shmem_ !! 1561 struct shmem_inode_info *info, pgoff_t index) 1897 struct vm_are !! 1562 { 1898 { !! 1563 struct page *oldpage, *newpage; 1899 struct folio *new, *old = *foliop; !! 1564 struct address_space *swap_mapping; 1900 swp_entry_t entry = old->swap; !! 1565 swp_entry_t entry; 1901 struct address_space *swap_mapping = !! 1566 pgoff_t swap_index; 1902 pgoff_t swap_index = swap_cache_index !! 1567 int error; 1903 XA_STATE(xas, &swap_mapping->i_pages, !! 1568 1904 int nr_pages = folio_nr_pages(old); !! 1569 oldpage = *pagep; 1905 int error = 0, i; !! 1570 entry.val = page_private(oldpage); >> 1571 swap_index = swp_offset(entry); >> 1572 swap_mapping = page_mapping(oldpage); 1906 1573 1907 /* 1574 /* 1908 * We have arrived here because our z 1575 * We have arrived here because our zones are constrained, so don't 1909 * limit chance of success by further 1576 * limit chance of success by further cpuset and node constraints. 1910 */ 1577 */ 1911 gfp &= ~GFP_CONSTRAINT_MASK; 1578 gfp &= ~GFP_CONSTRAINT_MASK; 1912 #ifdef CONFIG_TRANSPARENT_HUGEPAGE !! 1579 newpage = shmem_alloc_page(gfp, info, index); 1913 if (nr_pages > 1) { !! 1580 if (!newpage) 1914 gfp_t huge_gfp = vma_thp_gfp_ << 1915 << 1916 gfp = limit_gfp_mask(huge_gfp << 1917 } << 1918 #endif << 1919 << 1920 new = shmem_alloc_folio(gfp, folio_or << 1921 if (!new) << 1922 return -ENOMEM; 1581 return -ENOMEM; 1923 1582 1924 folio_ref_add(new, nr_pages); !! 1583 get_page(newpage); 1925 folio_copy(new, old); !! 1584 copy_highpage(newpage, oldpage); 1926 flush_dcache_folio(new); !! 1585 flush_dcache_page(newpage); 1927 !! 1586 1928 __folio_set_locked(new); !! 1587 __SetPageLocked(newpage); 1929 __folio_set_swapbacked(new); !! 1588 __SetPageSwapBacked(newpage); 1930 folio_mark_uptodate(new); !! 1589 SetPageUptodate(newpage); 1931 new->swap = entry; !! 1590 set_page_private(newpage, entry.val); 1932 folio_set_swapcache(new); !! 1591 SetPageSwapCache(newpage); 1933 1592 1934 /* Swap cache still stores N entries !! 1593 /* >> 1594 * Our caller will very soon move newpage out of swapcache, but it's >> 1595 * a nice clean interface for us to replace oldpage by newpage there. >> 1596 */ 1935 xa_lock_irq(&swap_mapping->i_pages); 1597 xa_lock_irq(&swap_mapping->i_pages); 1936 for (i = 0; i < nr_pages; i++) { !! 1598 error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage); 1937 void *item = xas_load(&xas); << 1938 << 1939 if (item != old) { << 1940 error = -ENOENT; << 1941 break; << 1942 } << 1943 << 1944 xas_store(&xas, new); << 1945 xas_next(&xas); << 1946 } << 1947 if (!error) { 1599 if (!error) { 1948 mem_cgroup_replace_folio(old, !! 1600 __inc_node_page_state(newpage, NR_FILE_PAGES); 1949 __lruvec_stat_mod_folio(new, !! 1601 __dec_node_page_state(oldpage, NR_FILE_PAGES); 1950 __lruvec_stat_mod_folio(new, << 1951 __lruvec_stat_mod_folio(old, << 1952 __lruvec_stat_mod_folio(old, << 1953 } 1602 } 1954 xa_unlock_irq(&swap_mapping->i_pages) 1603 xa_unlock_irq(&swap_mapping->i_pages); 1955 1604 1956 if (unlikely(error)) { 1605 if (unlikely(error)) { 1957 /* 1606 /* 1958 * Is this possible? I think !! 1607 * Is this possible? I think not, now that our callers check 1959 * check both the swapcache f !! 1608 * both PageSwapCache and page_private after getting page lock; 1960 * after getting the folio lo !! 1609 * but be defensive. Reverse old to newpage for clear and free. 1961 * Reverse old to newpage for << 1962 */ 1610 */ 1963 old = new; !! 1611 oldpage = newpage; 1964 } else { 1612 } else { 1965 folio_add_lru(new); !! 1613 mem_cgroup_migrate(oldpage, newpage); 1966 *foliop = new; !! 1614 lru_cache_add_anon(newpage); >> 1615 *pagep = newpage; 1967 } 1616 } 1968 1617 1969 folio_clear_swapcache(old); !! 1618 ClearPageSwapCache(oldpage); 1970 old->private = NULL; !! 1619 set_page_private(oldpage, 0); 1971 1620 1972 folio_unlock(old); !! 1621 unlock_page(oldpage); 1973 /* !! 1622 put_page(oldpage); 1974 * The old folio are removed from swa !! 1623 put_page(oldpage); 1975 * reference, as well as one temporar << 1976 * cache. << 1977 */ << 1978 folio_put_refs(old, nr_pages + 1); << 1979 return error; 1624 return error; 1980 } 1625 } 1981 1626 1982 static void shmem_set_folio_swapin_error(stru << 1983 stru << 1984 { << 1985 struct address_space *mapping = inode << 1986 swp_entry_t swapin_error; << 1987 void *old; << 1988 int nr_pages; << 1989 << 1990 swapin_error = make_poisoned_swp_entr << 1991 old = xa_cmpxchg_irq(&mapping->i_page << 1992 swp_to_radix_ent << 1993 swp_to_radix_ent << 1994 if (old != swp_to_radix_entry(swap)) << 1995 return; << 1996 << 1997 nr_pages = folio_nr_pages(folio); << 1998 folio_wait_writeback(folio); << 1999 delete_from_swap_cache(folio); << 2000 /* << 2001 * Don't treat swapin error folio as << 2002 * won't be 0 when inode is released << 2003 * in shmem_evict_inode(). << 2004 */ << 2005 shmem_recalc_inode(inode, -nr_pages, << 2006 swap_free_nr(swap, nr_pages); << 2007 } << 2008 << 2009 static int shmem_split_large_entry(struct ino << 2010 swp_entry_ << 2011 { << 2012 struct address_space *mapping = inode << 2013 XA_STATE_ORDER(xas, &mapping->i_pages << 2014 void *alloced_shadow = NULL; << 2015 int alloced_order = 0, i; << 2016 << 2017 /* Convert user data gfp flags to xar << 2018 gfp &= GFP_RECLAIM_MASK; << 2019 << 2020 for (;;) { << 2021 int order = -1, split_order = << 2022 void *old = NULL; << 2023 << 2024 xas_lock_irq(&xas); << 2025 old = xas_load(&xas); << 2026 if (!xa_is_value(old) || swp_ << 2027 xas_set_err(&xas, -EE << 2028 goto unlock; << 2029 } << 2030 << 2031 order = xas_get_order(&xas); << 2032 << 2033 /* Swap entry may have change << 2034 if (alloced_order && << 2035 (old != alloced_shadow || << 2036 xas_destroy(&xas); << 2037 alloced_order = 0; << 2038 } << 2039 << 2040 /* Try to split large swap en << 2041 if (order > 0) { << 2042 if (!alloced_order) { << 2043 split_order = << 2044 goto unlock; << 2045 } << 2046 xas_split(&xas, old, << 2047 << 2048 /* << 2049 * Re-set the swap en << 2050 * offset of the orig << 2051 */ << 2052 for (i = 0; i < 1 << << 2053 pgoff_t align << 2054 swp_entry_t t << 2055 << 2056 tmp = swp_ent << 2057 __xa_store(&m << 2058 sw << 2059 } << 2060 } << 2061 << 2062 unlock: << 2063 xas_unlock_irq(&xas); << 2064 << 2065 /* split needed, alloc here a << 2066 if (split_order) { << 2067 xas_split_alloc(&xas, << 2068 if (xas_error(&xas)) << 2069 goto error; << 2070 alloced_shadow = old; << 2071 alloced_order = split << 2072 xas_reset(&xas); << 2073 continue; << 2074 } << 2075 << 2076 if (!xas_nomem(&xas, gfp)) << 2077 break; << 2078 } << 2079 << 2080 error: << 2081 if (xas_error(&xas)) << 2082 return xas_error(&xas); << 2083 << 2084 return alloced_order; << 2085 } << 2086 << 2087 /* 1627 /* 2088 * Swap in the folio pointed to by *foliop. !! 1628 * Swap in the page pointed to by *pagep. 2089 * Caller has to make sure that *foliop conta !! 1629 * Caller has to make sure that *pagep contains a valid swapped page. 2090 * Returns 0 and the folio in foliop if succe !! 1630 * Returns 0 and the page in pagep if success. On failure, returns the 2091 * error code and NULL in *foliop. !! 1631 * the error code and NULL in *pagep. 2092 */ 1632 */ 2093 static int shmem_swapin_folio(struct inode *i !! 1633 static int shmem_swapin_page(struct inode *inode, pgoff_t index, 2094 struct folio **f !! 1634 struct page **pagep, enum sgp_type sgp, 2095 gfp_t gfp, struc 1635 gfp_t gfp, struct vm_area_struct *vma, 2096 vm_fault_t *faul 1636 vm_fault_t *fault_type) 2097 { 1637 { 2098 struct address_space *mapping = inode 1638 struct address_space *mapping = inode->i_mapping; 2099 struct mm_struct *fault_mm = vma ? vm << 2100 struct shmem_inode_info *info = SHMEM 1639 struct shmem_inode_info *info = SHMEM_I(inode); 2101 struct swap_info_struct *si; !! 1640 struct mm_struct *charge_mm = vma ? vma->vm_mm : current->mm; 2102 struct folio *folio = NULL; !! 1641 struct mem_cgroup *memcg; >> 1642 struct page *page; 2103 swp_entry_t swap; 1643 swp_entry_t swap; 2104 int error, nr_pages; !! 1644 int error; 2105 1645 2106 VM_BUG_ON(!*foliop || !xa_is_value(*f !! 1646 VM_BUG_ON(!*pagep || !xa_is_value(*pagep)); 2107 swap = radix_to_swp_entry(*foliop); !! 1647 swap = radix_to_swp_entry(*pagep); 2108 *foliop = NULL; !! 1648 *pagep = NULL; 2109 << 2110 if (is_poisoned_swp_entry(swap)) << 2111 return -EIO; << 2112 << 2113 si = get_swap_device(swap); << 2114 if (!si) { << 2115 if (!shmem_confirm_swap(mappi << 2116 return -EEXIST; << 2117 else << 2118 return -EINVAL; << 2119 } << 2120 1649 2121 /* Look it up and read it in.. */ 1650 /* Look it up and read it in.. */ 2122 folio = swap_cache_get_folio(swap, NU !! 1651 page = lookup_swap_cache(swap, NULL, 0); 2123 if (!folio) { !! 1652 if (!page) { 2124 int split_order; << 2125 << 2126 /* Or update major stats only 1653 /* Or update major stats only when swapin succeeds?? */ 2127 if (fault_type) { 1654 if (fault_type) { 2128 *fault_type |= VM_FAU 1655 *fault_type |= VM_FAULT_MAJOR; 2129 count_vm_event(PGMAJF 1656 count_vm_event(PGMAJFAULT); 2130 count_memcg_event_mm( !! 1657 count_memcg_event_mm(charge_mm, PGMAJFAULT); 2131 } << 2132 << 2133 /* << 2134 * Now swap device can only s << 2135 * should split the large swa << 2136 * if necessary. << 2137 */ << 2138 split_order = shmem_split_lar << 2139 if (split_order < 0) { << 2140 error = split_order; << 2141 goto failed; << 2142 } 1658 } 2143 << 2144 /* << 2145 * If the large swap entry ha << 2146 * necessary to recalculate t << 2147 * the old order alignment. << 2148 */ << 2149 if (split_order > 0) { << 2150 pgoff_t offset = inde << 2151 << 2152 swap = swp_entry(swp_ << 2153 } << 2154 << 2155 /* Here we actually start the 1659 /* Here we actually start the io */ 2156 folio = shmem_swapin_cluster( !! 1660 page = shmem_swapin(swap, gfp, info, index); 2157 if (!folio) { !! 1661 if (!page) { 2158 error = -ENOMEM; 1662 error = -ENOMEM; 2159 goto failed; 1663 goto failed; 2160 } 1664 } 2161 } 1665 } 2162 1666 2163 /* We have to do this with folio lock !! 1667 /* We have to do this with page locked to prevent races */ 2164 folio_lock(folio); !! 1668 lock_page(page); 2165 if (!folio_test_swapcache(folio) || !! 1669 if (!PageSwapCache(page) || page_private(page) != swap.val || 2166 folio->swap.val != swap.val || << 2167 !shmem_confirm_swap(mapping, inde 1670 !shmem_confirm_swap(mapping, index, swap)) { 2168 error = -EEXIST; 1671 error = -EEXIST; 2169 goto unlock; 1672 goto unlock; 2170 } 1673 } 2171 if (!folio_test_uptodate(folio)) { !! 1674 if (!PageUptodate(page)) { 2172 error = -EIO; 1675 error = -EIO; 2173 goto failed; 1676 goto failed; 2174 } 1677 } 2175 folio_wait_writeback(folio); !! 1678 wait_on_page_writeback(page); 2176 nr_pages = folio_nr_pages(folio); << 2177 1679 2178 /* !! 1680 if (shmem_should_replace_page(page, gfp)) { 2179 * Some architectures may have to res !! 1681 error = shmem_replace_page(&page, gfp, info, index); 2180 * folio after reading from swap. << 2181 */ << 2182 arch_swap_restore(folio_swap(swap, fo << 2183 << 2184 if (shmem_should_replace_folio(folio, << 2185 error = shmem_replace_folio(& << 2186 if (error) 1682 if (error) 2187 goto failed; 1683 goto failed; 2188 } 1684 } 2189 1685 2190 error = shmem_add_to_page_cache(folio !! 1686 error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg, 2191 round !! 1687 false); 2192 swp_t !! 1688 if (!error) { >> 1689 error = shmem_add_to_page_cache(page, mapping, index, >> 1690 swp_to_radix_entry(swap), gfp); >> 1691 /* >> 1692 * We already confirmed swap under page lock, and make >> 1693 * no memory allocation here, so usually no possibility >> 1694 * of error; but free_swap_and_cache() only trylocks a >> 1695 * page, so it is just possible that the entry has been >> 1696 * truncated or holepunched since swap was confirmed. >> 1697 * shmem_undo_range() will have done some of the >> 1698 * unaccounting, now delete_from_swap_cache() will do >> 1699 * the rest. >> 1700 */ >> 1701 if (error) { >> 1702 mem_cgroup_cancel_charge(page, memcg, false); >> 1703 delete_from_swap_cache(page); >> 1704 } >> 1705 } 2193 if (error) 1706 if (error) 2194 goto failed; 1707 goto failed; 2195 1708 2196 shmem_recalc_inode(inode, 0, -nr_page !! 1709 mem_cgroup_commit_charge(page, memcg, true, false); >> 1710 >> 1711 spin_lock_irq(&info->lock); >> 1712 info->swapped--; >> 1713 shmem_recalc_inode(inode); >> 1714 spin_unlock_irq(&info->lock); 2197 1715 2198 if (sgp == SGP_WRITE) 1716 if (sgp == SGP_WRITE) 2199 folio_mark_accessed(folio); !! 1717 mark_page_accessed(page); 2200 1718 2201 delete_from_swap_cache(folio); !! 1719 delete_from_swap_cache(page); 2202 folio_mark_dirty(folio); !! 1720 set_page_dirty(page); 2203 swap_free_nr(swap, nr_pages); !! 1721 swap_free(swap); 2204 put_swap_device(si); << 2205 1722 2206 *foliop = folio; !! 1723 *pagep = page; 2207 return 0; 1724 return 0; 2208 failed: 1725 failed: 2209 if (!shmem_confirm_swap(mapping, inde 1726 if (!shmem_confirm_swap(mapping, index, swap)) 2210 error = -EEXIST; 1727 error = -EEXIST; 2211 if (error == -EIO) << 2212 shmem_set_folio_swapin_error( << 2213 unlock: 1728 unlock: 2214 if (folio) { !! 1729 if (page) { 2215 folio_unlock(folio); !! 1730 unlock_page(page); 2216 folio_put(folio); !! 1731 put_page(page); 2217 } 1732 } 2218 put_swap_device(si); << 2219 1733 2220 return error; 1734 return error; 2221 } 1735 } 2222 1736 2223 /* 1737 /* 2224 * shmem_get_folio_gfp - find page in cache, !! 1738 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 2225 * 1739 * 2226 * If we allocate a new one we do not mark it 1740 * If we allocate a new one we do not mark it dirty. That's up to the 2227 * vm. If we swap it in we mark it dirty sinc 1741 * vm. If we swap it in we mark it dirty since we also free the swap 2228 * entry since a page cannot live in both the 1742 * entry since a page cannot live in both the swap and page cache. 2229 * 1743 * 2230 * vmf and fault_type are only supplied by sh !! 1744 * vmf and fault_type are only supplied by shmem_fault: >> 1745 * otherwise they are NULL. 2231 */ 1746 */ 2232 static int shmem_get_folio_gfp(struct inode * !! 1747 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 2233 loff_t write_end, struct foli !! 1748 struct page **pagep, enum sgp_type sgp, gfp_t gfp, 2234 gfp_t gfp, struct vm_fault *v !! 1749 struct vm_area_struct *vma, struct vm_fault *vmf, 2235 { !! 1750 vm_fault_t *fault_type) 2236 struct vm_area_struct *vma = vmf ? vm !! 1751 { 2237 struct mm_struct *fault_mm; !! 1752 struct address_space *mapping = inode->i_mapping; 2238 struct folio *folio; !! 1753 struct shmem_inode_info *info = SHMEM_I(inode); >> 1754 struct shmem_sb_info *sbinfo; >> 1755 struct mm_struct *charge_mm; >> 1756 struct mem_cgroup *memcg; >> 1757 struct page *page; >> 1758 enum sgp_type sgp_huge = sgp; >> 1759 pgoff_t hindex = index; 2239 int error; 1760 int error; 2240 bool alloced; !! 1761 int once = 0; 2241 unsigned long orders = 0; !! 1762 int alloced = 0; 2242 << 2243 if (WARN_ON_ONCE(!shmem_mapping(inode << 2244 return -EINVAL; << 2245 1763 2246 if (index > (MAX_LFS_FILESIZE >> PAGE 1764 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 2247 return -EFBIG; 1765 return -EFBIG; >> 1766 if (sgp == SGP_NOHUGE || sgp == SGP_HUGE) >> 1767 sgp = SGP_CACHE; 2248 repeat: 1768 repeat: 2249 if (sgp <= SGP_CACHE && 1769 if (sgp <= SGP_CACHE && 2250 ((loff_t)index << PAGE_SHIFT) >= !! 1770 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 2251 return -EINVAL; 1771 return -EINVAL; 2252 << 2253 alloced = false; << 2254 fault_mm = vma ? vma->vm_mm : NULL; << 2255 << 2256 folio = filemap_get_entry(inode->i_ma << 2257 if (folio && vma && userfaultfd_minor << 2258 if (!xa_is_value(folio)) << 2259 folio_put(folio); << 2260 *fault_type = handle_userfaul << 2261 return 0; << 2262 } 1772 } 2263 1773 2264 if (xa_is_value(folio)) { !! 1774 sbinfo = SHMEM_SB(inode->i_sb); 2265 error = shmem_swapin_folio(in !! 1775 charge_mm = vma ? vma->vm_mm : current->mm; 2266 sg !! 1776 >> 1777 page = find_lock_entry(mapping, index); >> 1778 if (xa_is_value(page)) { >> 1779 error = shmem_swapin_page(inode, index, &page, >> 1780 sgp, gfp, vma, fault_type); 2267 if (error == -EEXIST) 1781 if (error == -EEXIST) 2268 goto repeat; 1782 goto repeat; 2269 1783 2270 *foliop = folio; !! 1784 *pagep = page; 2271 return error; 1785 return error; 2272 } 1786 } 2273 1787 2274 if (folio) { !! 1788 if (page && sgp == SGP_WRITE) 2275 folio_lock(folio); !! 1789 mark_page_accessed(page); 2276 1790 2277 /* Has the folio been truncat !! 1791 /* fallocated page? */ 2278 if (unlikely(folio->mapping ! !! 1792 if (page && !PageUptodate(page)) { 2279 folio_unlock(folio); << 2280 folio_put(folio); << 2281 goto repeat; << 2282 } << 2283 if (sgp == SGP_WRITE) << 2284 folio_mark_accessed(f << 2285 if (folio_test_uptodate(folio << 2286 goto out; << 2287 /* fallocated folio */ << 2288 if (sgp != SGP_READ) 1793 if (sgp != SGP_READ) 2289 goto clear; 1794 goto clear; 2290 folio_unlock(folio); !! 1795 unlock_page(page); 2291 folio_put(folio); !! 1796 put_page(page); >> 1797 page = NULL; 2292 } 1798 } 2293 !! 1799 if (page || sgp == SGP_READ) { 2294 /* !! 1800 *pagep = page; 2295 * SGP_READ: succeed on hole, with NU << 2296 * SGP_NOALLOC: fail on hole, with NU << 2297 */ << 2298 *foliop = NULL; << 2299 if (sgp == SGP_READ) << 2300 return 0; 1801 return 0; 2301 if (sgp == SGP_NOALLOC) !! 1802 } 2302 return -ENOENT; << 2303 1803 2304 /* 1804 /* 2305 * Fast cache lookup and swap lookup !! 1805 * Fast cache lookup did not find it: >> 1806 * bring it back from swap or allocate. 2306 */ 1807 */ 2307 1808 2308 if (vma && userfaultfd_missing(vma)) 1809 if (vma && userfaultfd_missing(vma)) { 2309 *fault_type = handle_userfaul 1810 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 2310 return 0; 1811 return 0; 2311 } 1812 } 2312 1813 2313 /* Find hugepage orders that are allo !! 1814 /* shmem_symlink() */ 2314 orders = shmem_allowable_huge_orders( !! 1815 if (mapping->a_ops != &shmem_aops) 2315 if (orders > 0) { !! 1816 goto alloc_nohuge; 2316 gfp_t huge_gfp; !! 1817 if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE) 2317 !! 1818 goto alloc_nohuge; 2318 huge_gfp = vma_thp_gfp_mask(v !! 1819 if (shmem_huge == SHMEM_HUGE_FORCE) 2319 huge_gfp = limit_gfp_mask(hug !! 1820 goto alloc_huge; 2320 folio = shmem_alloc_and_add_f !! 1821 switch (sbinfo->huge) { 2321 inode, index, !! 1822 loff_t i_size; 2322 if (!IS_ERR(folio)) { !! 1823 pgoff_t off; 2323 if (folio_test_pmd_ma !! 1824 case SHMEM_HUGE_NEVER: 2324 count_vm_even !! 1825 goto alloc_nohuge; 2325 count_mthp_stat(folio !! 1826 case SHMEM_HUGE_WITHIN_SIZE: 2326 goto alloced; !! 1827 off = round_up(index, HPAGE_PMD_NR); 2327 } !! 1828 i_size = round_up(i_size_read(inode), PAGE_SIZE); 2328 if (PTR_ERR(folio) == -EEXIST !! 1829 if (i_size >= HPAGE_PMD_SIZE && 2329 goto repeat; !! 1830 i_size >> PAGE_SHIFT >= off) 2330 } !! 1831 goto alloc_huge; >> 1832 /* fallthrough */ >> 1833 case SHMEM_HUGE_ADVISE: >> 1834 if (sgp_huge == SGP_HUGE) >> 1835 goto alloc_huge; >> 1836 /* TODO: implement fadvise() hints */ >> 1837 goto alloc_nohuge; >> 1838 } >> 1839 >> 1840 alloc_huge: >> 1841 page = shmem_alloc_and_acct_page(gfp, inode, index, true); >> 1842 if (IS_ERR(page)) { >> 1843 alloc_nohuge: >> 1844 page = shmem_alloc_and_acct_page(gfp, inode, >> 1845 index, false); >> 1846 } >> 1847 if (IS_ERR(page)) { >> 1848 int retry = 5; >> 1849 >> 1850 error = PTR_ERR(page); >> 1851 page = NULL; >> 1852 if (error != -ENOSPC) >> 1853 goto unlock; >> 1854 /* >> 1855 * Try to reclaim some space by splitting a huge page >> 1856 * beyond i_size on the filesystem. >> 1857 */ >> 1858 while (retry--) { >> 1859 int ret; 2331 1860 2332 folio = shmem_alloc_and_add_folio(vmf !! 1861 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); 2333 if (IS_ERR(folio)) { !! 1862 if (ret == SHRINK_STOP) 2334 error = PTR_ERR(folio); !! 1863 break; 2335 if (error == -EEXIST) !! 1864 if (ret) 2336 goto repeat; !! 1865 goto alloc_nohuge; 2337 folio = NULL; !! 1866 } 2338 goto unlock; 1867 goto unlock; 2339 } 1868 } 2340 1869 2341 alloced: !! 1870 if (PageTransHuge(page)) >> 1871 hindex = round_down(index, HPAGE_PMD_NR); >> 1872 else >> 1873 hindex = index; >> 1874 >> 1875 if (sgp == SGP_WRITE) >> 1876 __SetPageReferenced(page); >> 1877 >> 1878 error = mem_cgroup_try_charge_delay(page, charge_mm, gfp, &memcg, >> 1879 PageTransHuge(page)); >> 1880 if (error) >> 1881 goto unacct; >> 1882 error = shmem_add_to_page_cache(page, mapping, hindex, >> 1883 NULL, gfp & GFP_RECLAIM_MASK); >> 1884 if (error) { >> 1885 mem_cgroup_cancel_charge(page, memcg, >> 1886 PageTransHuge(page)); >> 1887 goto unacct; >> 1888 } >> 1889 mem_cgroup_commit_charge(page, memcg, false, >> 1890 PageTransHuge(page)); >> 1891 lru_cache_add_anon(page); >> 1892 >> 1893 spin_lock_irq(&info->lock); >> 1894 info->alloced += compound_nr(page); >> 1895 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); >> 1896 shmem_recalc_inode(inode); >> 1897 spin_unlock_irq(&info->lock); 2342 alloced = true; 1898 alloced = true; 2343 if (folio_test_large(folio) && !! 1899 >> 1900 if (PageTransHuge(page) && 2344 DIV_ROUND_UP(i_size_read(inode), 1901 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 2345 folio !! 1902 hindex + HPAGE_PMD_NR - 1) { 2346 struct shmem_sb_info *sbinfo << 2347 struct shmem_inode_info *info << 2348 /* 1903 /* 2349 * Part of the large folio is !! 1904 * Part of the huge page is beyond i_size: subject 2350 * to shrink under memory pre 1905 * to shrink under memory pressure. 2351 */ 1906 */ 2352 spin_lock(&sbinfo->shrinklist 1907 spin_lock(&sbinfo->shrinklist_lock); 2353 /* 1908 /* 2354 * _careful to defend against 1909 * _careful to defend against unlocked access to 2355 * ->shrink_list in shmem_unu 1910 * ->shrink_list in shmem_unused_huge_shrink() 2356 */ 1911 */ 2357 if (list_empty_careful(&info- 1912 if (list_empty_careful(&info->shrinklist)) { 2358 list_add_tail(&info-> 1913 list_add_tail(&info->shrinklist, 2359 &sbinfo 1914 &sbinfo->shrinklist); 2360 sbinfo->shrinklist_le 1915 sbinfo->shrinklist_len++; 2361 } 1916 } 2362 spin_unlock(&sbinfo->shrinkli 1917 spin_unlock(&sbinfo->shrinklist_lock); 2363 } 1918 } 2364 1919 2365 if (sgp == SGP_WRITE) << 2366 folio_set_referenced(folio); << 2367 /* 1920 /* 2368 * Let SGP_FALLOC use the SGP_WRITE o !! 1921 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 2369 */ 1922 */ 2370 if (sgp == SGP_FALLOC) 1923 if (sgp == SGP_FALLOC) 2371 sgp = SGP_WRITE; 1924 sgp = SGP_WRITE; 2372 clear: 1925 clear: 2373 /* 1926 /* 2374 * Let SGP_WRITE caller clear ends if !! 1927 * Let SGP_WRITE caller clear ends if write does not fill page; 2375 * but SGP_FALLOC on a folio fallocat !! 1928 * but SGP_FALLOC on a page fallocated earlier must initialize 2376 * it now, lest undo on failure cance 1929 * it now, lest undo on failure cancel our earlier guarantee. 2377 */ 1930 */ 2378 if (sgp != SGP_WRITE && !folio_test_u !! 1931 if (sgp != SGP_WRITE && !PageUptodate(page)) { 2379 long i, n = folio_nr_pages(fo !! 1932 struct page *head = compound_head(page); 2380 !! 1933 int i; 2381 for (i = 0; i < n; i++) !! 1934 2382 clear_highpage(folio_ !! 1935 for (i = 0; i < compound_nr(head); i++) { 2383 flush_dcache_folio(folio); !! 1936 clear_highpage(head + i); 2384 folio_mark_uptodate(folio); !! 1937 flush_dcache_page(head + i); >> 1938 } >> 1939 SetPageUptodate(head); 2385 } 1940 } 2386 1941 2387 /* Perhaps the file has been truncate 1942 /* Perhaps the file has been truncated since we checked */ 2388 if (sgp <= SGP_CACHE && 1943 if (sgp <= SGP_CACHE && 2389 ((loff_t)index << PAGE_SHIFT) >= 1944 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { >> 1945 if (alloced) { >> 1946 ClearPageDirty(page); >> 1947 delete_from_page_cache(page); >> 1948 spin_lock_irq(&info->lock); >> 1949 shmem_recalc_inode(inode); >> 1950 spin_unlock_irq(&info->lock); >> 1951 } 2390 error = -EINVAL; 1952 error = -EINVAL; 2391 goto unlock; 1953 goto unlock; 2392 } 1954 } 2393 out: !! 1955 *pagep = page + index - hindex; 2394 *foliop = folio; << 2395 return 0; 1956 return 0; 2396 1957 2397 /* 1958 /* 2398 * Error recovery. 1959 * Error recovery. 2399 */ 1960 */ >> 1961 unacct: >> 1962 shmem_inode_unacct_blocks(inode, compound_nr(page)); >> 1963 >> 1964 if (PageTransHuge(page)) { >> 1965 unlock_page(page); >> 1966 put_page(page); >> 1967 goto alloc_nohuge; >> 1968 } 2400 unlock: 1969 unlock: 2401 if (alloced) !! 1970 if (page) { 2402 filemap_remove_folio(folio); !! 1971 unlock_page(page); 2403 shmem_recalc_inode(inode, 0, 0); !! 1972 put_page(page); 2404 if (folio) { !! 1973 } 2405 folio_unlock(folio); !! 1974 if (error == -ENOSPC && !once++) { 2406 folio_put(folio); !! 1975 spin_lock_irq(&info->lock); >> 1976 shmem_recalc_inode(inode); >> 1977 spin_unlock_irq(&info->lock); >> 1978 goto repeat; 2407 } 1979 } >> 1980 if (error == -EEXIST) >> 1981 goto repeat; 2408 return error; 1982 return error; 2409 } 1983 } 2410 1984 2411 /** << 2412 * shmem_get_folio - find, and lock a shmem f << 2413 * @inode: inode to search << 2414 * @index: the page index. << 2415 * @write_end: end of a write, could extend << 2416 * @foliop: pointer to the folio if found << 2417 * @sgp: SGP_* flags to control behavi << 2418 * << 2419 * Looks up the page cache entry at @inode & << 2420 * present, it is returned locked with an inc << 2421 * << 2422 * If the caller modifies data in the folio, << 2423 * before unlocking the folio to ensure that << 2424 * There is no need to reserve space before c << 2425 * << 2426 * When no folio is found, the behavior depen << 2427 * - for SGP_READ, *@foliop is %NULL and 0 i << 2428 * - for SGP_NOALLOC, *@foliop is %NULL and << 2429 * - for all other flags a new folio is allo << 2430 * page cache and returned locked in @foli << 2431 * << 2432 * Context: May sleep. << 2433 * Return: 0 if successful, else a negative e << 2434 */ << 2435 int shmem_get_folio(struct inode *inode, pgof << 2436 struct folio **foliop, en << 2437 { << 2438 return shmem_get_folio_gfp(inode, ind << 2439 mapping_gfp_mask(inod << 2440 } << 2441 EXPORT_SYMBOL_GPL(shmem_get_folio); << 2442 << 2443 /* 1985 /* 2444 * This is like autoremove_wake_function, but 1986 * This is like autoremove_wake_function, but it removes the wait queue 2445 * entry unconditionally - even if something 1987 * entry unconditionally - even if something else had already woken the 2446 * target. 1988 * target. 2447 */ 1989 */ 2448 static int synchronous_wake_function(wait_que !! 1990 static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 2449 unsigned int mode, in << 2450 { 1991 { 2451 int ret = default_wake_function(wait, 1992 int ret = default_wake_function(wait, mode, sync, key); 2452 list_del_init(&wait->entry); 1993 list_del_init(&wait->entry); 2453 return ret; 1994 return ret; 2454 } 1995 } 2455 1996 2456 /* << 2457 * Trinity finds that probing a hole which tm << 2458 * prevent the hole-punch from ever completin << 2459 * locks writers out with its hold on i_rwsem << 2460 * faulting pages into the hole while it's be << 2461 * shmem_undo_range() does remove the additio << 2462 * keep up, as each new page needs its own un << 2463 * and the i_mmap tree grows ever slower to s << 2464 * << 2465 * It does not matter if we sometimes reach t << 2466 * hole-punch begins, so that one fault then << 2467 * we just need to make racing faults a rare << 2468 * << 2469 * The implementation below would be much sim << 2470 * standard mutex or completion: but we canno << 2471 * and bloating every shmem inode for this un << 2472 */ << 2473 static vm_fault_t shmem_falloc_wait(struct vm << 2474 { << 2475 struct shmem_falloc *shmem_falloc; << 2476 struct file *fpin = NULL; << 2477 vm_fault_t ret = 0; << 2478 << 2479 spin_lock(&inode->i_lock); << 2480 shmem_falloc = inode->i_private; << 2481 if (shmem_falloc && << 2482 shmem_falloc->waitq && << 2483 vmf->pgoff >= shmem_falloc->start << 2484 vmf->pgoff < shmem_falloc->next) << 2485 wait_queue_head_t *shmem_fall << 2486 DEFINE_WAIT_FUNC(shmem_fault_ << 2487 << 2488 ret = VM_FAULT_NOPAGE; << 2489 fpin = maybe_unlock_mmap_for_ << 2490 shmem_falloc_waitq = shmem_fa << 2491 prepare_to_wait(shmem_falloc_ << 2492 TASK_UNINTERR << 2493 spin_unlock(&inode->i_lock); << 2494 schedule(); << 2495 << 2496 /* << 2497 * shmem_falloc_waitq points << 2498 * stack of the hole-punching << 2499 * is usually invalid by the << 2500 * finish_wait() does not der << 2501 * though i_lock needed lest << 2502 */ << 2503 spin_lock(&inode->i_lock); << 2504 finish_wait(shmem_falloc_wait << 2505 } << 2506 spin_unlock(&inode->i_lock); << 2507 if (fpin) { << 2508 fput(fpin); << 2509 ret = VM_FAULT_RETRY; << 2510 } << 2511 return ret; << 2512 } << 2513 << 2514 static vm_fault_t shmem_fault(struct vm_fault 1997 static vm_fault_t shmem_fault(struct vm_fault *vmf) 2515 { 1998 { 2516 struct inode *inode = file_inode(vmf- !! 1999 struct vm_area_struct *vma = vmf->vma; >> 2000 struct inode *inode = file_inode(vma->vm_file); 2517 gfp_t gfp = mapping_gfp_mask(inode->i 2001 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 2518 struct folio *folio = NULL; !! 2002 enum sgp_type sgp; 2519 vm_fault_t ret = 0; << 2520 int err; 2003 int err; >> 2004 vm_fault_t ret = VM_FAULT_LOCKED; 2521 2005 2522 /* 2006 /* 2523 * Trinity finds that probing a hole 2007 * Trinity finds that probing a hole which tmpfs is punching can 2524 * prevent the hole-punch from ever c !! 2008 * prevent the hole-punch from ever completing: which in turn >> 2009 * locks writers out with its hold on i_mutex. So refrain from >> 2010 * faulting pages into the hole while it's being punched. Although >> 2011 * shmem_undo_range() does remove the additions, it may be unable to >> 2012 * keep up, as each new page needs its own unmap_mapping_range() call, >> 2013 * and the i_mmap tree grows ever slower to scan if new vmas are added. >> 2014 * >> 2015 * It does not matter if we sometimes reach this check just before the >> 2016 * hole-punch begins, so that one fault then races with the punch: >> 2017 * we just need to make racing faults a rare case. >> 2018 * >> 2019 * The implementation below would be much simpler if we just used a >> 2020 * standard mutex or completion: but we cannot take i_mutex in fault, >> 2021 * and bloating every shmem inode for this unlikely case would be sad. 2525 */ 2022 */ 2526 if (unlikely(inode->i_private)) { 2023 if (unlikely(inode->i_private)) { 2527 ret = shmem_falloc_wait(vmf, !! 2024 struct shmem_falloc *shmem_falloc; 2528 if (ret) !! 2025 >> 2026 spin_lock(&inode->i_lock); >> 2027 shmem_falloc = inode->i_private; >> 2028 if (shmem_falloc && >> 2029 shmem_falloc->waitq && >> 2030 vmf->pgoff >= shmem_falloc->start && >> 2031 vmf->pgoff < shmem_falloc->next) { >> 2032 struct file *fpin; >> 2033 wait_queue_head_t *shmem_falloc_waitq; >> 2034 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); >> 2035 >> 2036 ret = VM_FAULT_NOPAGE; >> 2037 fpin = maybe_unlock_mmap_for_io(vmf, NULL); >> 2038 if (fpin) >> 2039 ret = VM_FAULT_RETRY; >> 2040 >> 2041 shmem_falloc_waitq = shmem_falloc->waitq; >> 2042 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, >> 2043 TASK_UNINTERRUPTIBLE); >> 2044 spin_unlock(&inode->i_lock); >> 2045 schedule(); >> 2046 >> 2047 /* >> 2048 * shmem_falloc_waitq points into the shmem_fallocate() >> 2049 * stack of the hole-punching task: shmem_falloc_waitq >> 2050 * is usually invalid by the time we reach here, but >> 2051 * finish_wait() does not dereference it in that case; >> 2052 * though i_lock needed lest racing with wake_up_all(). >> 2053 */ >> 2054 spin_lock(&inode->i_lock); >> 2055 finish_wait(shmem_falloc_waitq, &shmem_fault_wait); >> 2056 spin_unlock(&inode->i_lock); >> 2057 >> 2058 if (fpin) >> 2059 fput(fpin); 2529 return ret; 2060 return ret; >> 2061 } >> 2062 spin_unlock(&inode->i_lock); 2530 } 2063 } 2531 2064 2532 WARN_ON_ONCE(vmf->page != NULL); !! 2065 sgp = SGP_CACHE; 2533 err = shmem_get_folio_gfp(inode, vmf- !! 2066 2534 gfp, vmf, & !! 2067 if ((vma->vm_flags & VM_NOHUGEPAGE) || >> 2068 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) >> 2069 sgp = SGP_NOHUGE; >> 2070 else if (vma->vm_flags & VM_HUGEPAGE) >> 2071 sgp = SGP_HUGE; >> 2072 >> 2073 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, >> 2074 gfp, vma, vmf, &ret); 2535 if (err) 2075 if (err) 2536 return vmf_error(err); 2076 return vmf_error(err); 2537 if (folio) { << 2538 vmf->page = folio_file_page(f << 2539 ret |= VM_FAULT_LOCKED; << 2540 } << 2541 return ret; 2077 return ret; 2542 } 2078 } 2543 2079 2544 unsigned long shmem_get_unmapped_area(struct 2080 unsigned long shmem_get_unmapped_area(struct file *file, 2545 unsigne 2081 unsigned long uaddr, unsigned long len, 2546 unsigne 2082 unsigned long pgoff, unsigned long flags) 2547 { 2083 { >> 2084 unsigned long (*get_area)(struct file *, >> 2085 unsigned long, unsigned long, unsigned long, unsigned long); 2548 unsigned long addr; 2086 unsigned long addr; 2549 unsigned long offset; 2087 unsigned long offset; 2550 unsigned long inflated_len; 2088 unsigned long inflated_len; 2551 unsigned long inflated_addr; 2089 unsigned long inflated_addr; 2552 unsigned long inflated_offset; 2090 unsigned long inflated_offset; 2553 unsigned long hpage_size; << 2554 2091 2555 if (len > TASK_SIZE) 2092 if (len > TASK_SIZE) 2556 return -ENOMEM; 2093 return -ENOMEM; 2557 2094 2558 addr = mm_get_unmapped_area(current-> !! 2095 get_area = current->mm->get_unmapped_area; 2559 flags); !! 2096 addr = get_area(file, uaddr, len, pgoff, flags); 2560 2097 2561 if (!IS_ENABLED(CONFIG_TRANSPARENT_HU !! 2098 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) 2562 return addr; 2099 return addr; 2563 if (IS_ERR_VALUE(addr)) 2100 if (IS_ERR_VALUE(addr)) 2564 return addr; 2101 return addr; 2565 if (addr & ~PAGE_MASK) 2102 if (addr & ~PAGE_MASK) 2566 return addr; 2103 return addr; 2567 if (addr > TASK_SIZE - len) 2104 if (addr > TASK_SIZE - len) 2568 return addr; 2105 return addr; 2569 2106 2570 if (shmem_huge == SHMEM_HUGE_DENY) 2107 if (shmem_huge == SHMEM_HUGE_DENY) 2571 return addr; 2108 return addr; >> 2109 if (len < HPAGE_PMD_SIZE) >> 2110 return addr; 2572 if (flags & MAP_FIXED) 2111 if (flags & MAP_FIXED) 2573 return addr; 2112 return addr; 2574 /* 2113 /* 2575 * Our priority is to support MAP_SHA 2114 * Our priority is to support MAP_SHARED mapped hugely; 2576 * and support MAP_PRIVATE mapped hug 2115 * and support MAP_PRIVATE mapped hugely too, until it is COWed. 2577 * But if caller specified an address 2116 * But if caller specified an address hint and we allocated area there 2578 * successfully, respect that as befo 2117 * successfully, respect that as before. 2579 */ 2118 */ 2580 if (uaddr == addr) 2119 if (uaddr == addr) 2581 return addr; 2120 return addr; 2582 2121 2583 hpage_size = HPAGE_PMD_SIZE; << 2584 if (shmem_huge != SHMEM_HUGE_FORCE) { 2122 if (shmem_huge != SHMEM_HUGE_FORCE) { 2585 struct super_block *sb; 2123 struct super_block *sb; 2586 unsigned long __maybe_unused << 2587 int order = 0; << 2588 2124 2589 if (file) { 2125 if (file) { 2590 VM_BUG_ON(file->f_op 2126 VM_BUG_ON(file->f_op != &shmem_file_operations); 2591 sb = file_inode(file) 2127 sb = file_inode(file)->i_sb; 2592 } else { 2128 } else { 2593 /* 2129 /* 2594 * Called directly fr 2130 * Called directly from mm/mmap.c, or drivers/char/mem.c 2595 * for "/dev/zero", t 2131 * for "/dev/zero", to create a shared anonymous object. 2596 */ 2132 */ 2597 if (IS_ERR(shm_mnt)) 2133 if (IS_ERR(shm_mnt)) 2598 return addr; 2134 return addr; 2599 sb = shm_mnt->mnt_sb; 2135 sb = shm_mnt->mnt_sb; 2600 << 2601 /* << 2602 * Find the highest m << 2603 * provide a suitable << 2604 */ << 2605 #ifdef CONFIG_TRANSPARENT_HUGEPAGE << 2606 hpage_orders = READ_O << 2607 hpage_orders |= READ_ << 2608 hpage_orders |= READ_ << 2609 if (SHMEM_SB(sb)->hug << 2610 hpage_orders << 2611 << 2612 if (hpage_orders > 0) << 2613 order = highe << 2614 hpage_size = << 2615 } << 2616 #endif << 2617 } 2136 } 2618 if (SHMEM_SB(sb)->huge == SHM !! 2137 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2619 return addr; 2138 return addr; 2620 } 2139 } 2621 2140 2622 if (len < hpage_size) !! 2141 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); 2623 return addr; !! 2142 if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2624 << 2625 offset = (pgoff << PAGE_SHIFT) & (hpa << 2626 if (offset && offset + len < 2 * hpag << 2627 return addr; 2143 return addr; 2628 if ((addr & (hpage_size - 1)) == offs !! 2144 if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2629 return addr; 2145 return addr; 2630 2146 2631 inflated_len = len + hpage_size - PAG !! 2147 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2632 if (inflated_len > TASK_SIZE) 2148 if (inflated_len > TASK_SIZE) 2633 return addr; 2149 return addr; 2634 if (inflated_len < len) 2150 if (inflated_len < len) 2635 return addr; 2151 return addr; 2636 2152 2637 inflated_addr = mm_get_unmapped_area( !! 2153 inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags); 2638 << 2639 if (IS_ERR_VALUE(inflated_addr)) 2154 if (IS_ERR_VALUE(inflated_addr)) 2640 return addr; 2155 return addr; 2641 if (inflated_addr & ~PAGE_MASK) 2156 if (inflated_addr & ~PAGE_MASK) 2642 return addr; 2157 return addr; 2643 2158 2644 inflated_offset = inflated_addr & (hp !! 2159 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2645 inflated_addr += offset - inflated_of 2160 inflated_addr += offset - inflated_offset; 2646 if (inflated_offset > offset) 2161 if (inflated_offset > offset) 2647 inflated_addr += hpage_size; !! 2162 inflated_addr += HPAGE_PMD_SIZE; 2648 2163 2649 if (inflated_addr > TASK_SIZE - len) 2164 if (inflated_addr > TASK_SIZE - len) 2650 return addr; 2165 return addr; 2651 return inflated_addr; 2166 return inflated_addr; 2652 } 2167 } 2653 2168 2654 #ifdef CONFIG_NUMA 2169 #ifdef CONFIG_NUMA 2655 static int shmem_set_policy(struct vm_area_st 2170 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 2656 { 2171 { 2657 struct inode *inode = file_inode(vma- 2172 struct inode *inode = file_inode(vma->vm_file); 2658 return mpol_set_shared_policy(&SHMEM_ 2173 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 2659 } 2174 } 2660 2175 2661 static struct mempolicy *shmem_get_policy(str 2176 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2662 uns !! 2177 unsigned long addr) 2663 { 2178 { 2664 struct inode *inode = file_inode(vma- 2179 struct inode *inode = file_inode(vma->vm_file); 2665 pgoff_t index; 2180 pgoff_t index; 2666 2181 2667 /* << 2668 * Bias interleave by inode number to << 2669 * but this interface is independent << 2670 * supplies only that bias, letting c << 2671 * by page order, as in shmem_get_pgo << 2672 */ << 2673 *ilx = inode->i_ino; << 2674 index = ((addr - vma->vm_start) >> PA 2182 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 2675 return mpol_shared_policy_lookup(&SHM 2183 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 2676 } 2184 } >> 2185 #endif 2677 2186 2678 static struct mempolicy *shmem_get_pgoff_poli !! 2187 int shmem_lock(struct file *file, int lock, struct user_struct *user) 2679 pgoff_t index, unsign << 2680 { << 2681 struct mempolicy *mpol; << 2682 << 2683 /* Bias interleave by inode number to << 2684 *ilx = info->vfs_inode.i_ino + (index << 2685 << 2686 mpol = mpol_shared_policy_lookup(&inf << 2687 return mpol ? mpol : get_task_policy( << 2688 } << 2689 #else << 2690 static struct mempolicy *shmem_get_pgoff_poli << 2691 pgoff_t index, unsign << 2692 { << 2693 *ilx = 0; << 2694 return NULL; << 2695 } << 2696 #endif /* CONFIG_NUMA */ << 2697 << 2698 int shmem_lock(struct file *file, int lock, s << 2699 { 2188 { 2700 struct inode *inode = file_inode(file 2189 struct inode *inode = file_inode(file); 2701 struct shmem_inode_info *info = SHMEM 2190 struct shmem_inode_info *info = SHMEM_I(inode); 2702 int retval = -ENOMEM; 2191 int retval = -ENOMEM; 2703 2192 2704 /* 2193 /* 2705 * What serializes the accesses to in 2194 * What serializes the accesses to info->flags? 2706 * ipc_lock_object() when called from 2195 * ipc_lock_object() when called from shmctl_do_lock(), 2707 * no serialization needed when calle 2196 * no serialization needed when called from shm_destroy(). 2708 */ 2197 */ 2709 if (lock && !(info->flags & VM_LOCKED 2198 if (lock && !(info->flags & VM_LOCKED)) { 2710 if (!user_shm_lock(inode->i_s !! 2199 if (!user_shm_lock(inode->i_size, user)) 2711 goto out_nomem; 2200 goto out_nomem; 2712 info->flags |= VM_LOCKED; 2201 info->flags |= VM_LOCKED; 2713 mapping_set_unevictable(file- 2202 mapping_set_unevictable(file->f_mapping); 2714 } 2203 } 2715 if (!lock && (info->flags & VM_LOCKED !! 2204 if (!lock && (info->flags & VM_LOCKED) && user) { 2716 user_shm_unlock(inode->i_size !! 2205 user_shm_unlock(inode->i_size, user); 2717 info->flags &= ~VM_LOCKED; 2206 info->flags &= ~VM_LOCKED; 2718 mapping_clear_unevictable(fil 2207 mapping_clear_unevictable(file->f_mapping); 2719 } 2208 } 2720 retval = 0; 2209 retval = 0; 2721 2210 2722 out_nomem: 2211 out_nomem: 2723 return retval; 2212 return retval; 2724 } 2213 } 2725 2214 2726 static int shmem_mmap(struct file *file, stru 2215 static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 2727 { 2216 { 2728 struct inode *inode = file_inode(file !! 2217 struct shmem_inode_info *info = SHMEM_I(file_inode(file)); 2729 struct shmem_inode_info *info = SHMEM << 2730 int ret; 2218 int ret; 2731 2219 2732 ret = seal_check_write(info->seals, v !! 2220 ret = seal_check_future_write(info->seals, vma); 2733 if (ret) 2221 if (ret) 2734 return ret; 2222 return ret; 2735 2223 2736 file_accessed(file); 2224 file_accessed(file); 2737 /* This is anonymous shared memory if !! 2225 vma->vm_ops = &shmem_vm_ops; 2738 if (inode->i_nlink) !! 2226 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 2739 vma->vm_ops = &shmem_vm_ops; !! 2227 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 2740 else !! 2228 (vma->vm_end & HPAGE_PMD_MASK)) { 2741 vma->vm_ops = &shmem_anon_vm_ !! 2229 khugepaged_enter(vma, vma->vm_flags); >> 2230 } 2742 return 0; 2231 return 0; 2743 } 2232 } 2744 2233 2745 static int shmem_file_open(struct inode *inod !! 2234 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 2746 { !! 2235 umode_t mode, dev_t dev, unsigned long flags) 2747 file->f_mode |= FMODE_CAN_ODIRECT; << 2748 return generic_file_open(inode, file) << 2749 } << 2750 << 2751 #ifdef CONFIG_TMPFS_XATTR << 2752 static int shmem_initxattrs(struct inode *, c << 2753 << 2754 /* << 2755 * chattr's fsflags are unrelated to extended << 2756 * but tmpfs has chosen to enable them under << 2757 */ << 2758 static void shmem_set_inode_flags(struct inod << 2759 { << 2760 unsigned int i_flags = 0; << 2761 << 2762 if (fsflags & FS_NOATIME_FL) << 2763 i_flags |= S_NOATIME; << 2764 if (fsflags & FS_APPEND_FL) << 2765 i_flags |= S_APPEND; << 2766 if (fsflags & FS_IMMUTABLE_FL) << 2767 i_flags |= S_IMMUTABLE; << 2768 /* << 2769 * But FS_NODUMP_FL does not require << 2770 */ << 2771 inode_set_flags(inode, i_flags, S_NOA << 2772 } << 2773 #else << 2774 static void shmem_set_inode_flags(struct inod << 2775 { << 2776 } << 2777 #define shmem_initxattrs NULL << 2778 #endif << 2779 << 2780 static struct offset_ctx *shmem_get_offset_ct << 2781 { << 2782 return &SHMEM_I(inode)->dir_offsets; << 2783 } << 2784 << 2785 static struct inode *__shmem_get_inode(struct << 2786 << 2787 << 2788 << 2789 { 2236 { 2790 struct inode *inode; 2237 struct inode *inode; 2791 struct shmem_inode_info *info; 2238 struct shmem_inode_info *info; 2792 struct shmem_sb_info *sbinfo = SHMEM_ 2239 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2793 ino_t ino; << 2794 int err; << 2795 2240 2796 err = shmem_reserve_inode(sb, &ino); !! 2241 if (shmem_reserve_inode(sb)) 2797 if (err) !! 2242 return NULL; 2798 return ERR_PTR(err); << 2799 2243 2800 inode = new_inode(sb); 2244 inode = new_inode(sb); 2801 if (!inode) { !! 2245 if (inode) { 2802 shmem_free_inode(sb, 0); !! 2246 inode->i_ino = get_next_ino(); 2803 return ERR_PTR(-ENOSPC); !! 2247 inode_init_owner(inode, dir, mode); 2804 } !! 2248 inode->i_blocks = 0; 2805 !! 2249 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 2806 inode->i_ino = ino; !! 2250 inode->i_generation = prandom_u32(); 2807 inode_init_owner(idmap, inode, dir, m !! 2251 info = SHMEM_I(inode); 2808 inode->i_blocks = 0; !! 2252 memset(info, 0, (char *)inode - (char *)info); 2809 simple_inode_init_ts(inode); !! 2253 spin_lock_init(&info->lock); 2810 inode->i_generation = get_random_u32( !! 2254 atomic_set(&info->stop_eviction, 0); 2811 info = SHMEM_I(inode); !! 2255 info->seals = F_SEAL_SEAL; 2812 memset(info, 0, (char *)inode - (char !! 2256 info->flags = flags & VM_NORESERVE; 2813 spin_lock_init(&info->lock); !! 2257 INIT_LIST_HEAD(&info->shrinklist); 2814 atomic_set(&info->stop_eviction, 0); !! 2258 INIT_LIST_HEAD(&info->swaplist); 2815 info->seals = F_SEAL_SEAL; !! 2259 simple_xattrs_init(&info->xattrs); 2816 info->flags = flags & VM_NORESERVE; !! 2260 cache_no_acl(inode); 2817 info->i_crtime = inode_get_mtime(inod !! 2261 2818 info->fsflags = (dir == NULL) ? 0 : !! 2262 switch (mode & S_IFMT) { 2819 SHMEM_I(dir)->fsflags & SHMEM !! 2263 default: 2820 if (info->fsflags) !! 2264 inode->i_op = &shmem_special_inode_operations; 2821 shmem_set_inode_flags(inode, !! 2265 init_special_inode(inode, mode, dev); 2822 INIT_LIST_HEAD(&info->shrinklist); !! 2266 break; 2823 INIT_LIST_HEAD(&info->swaplist); !! 2267 case S_IFREG: 2824 simple_xattrs_init(&info->xattrs); !! 2268 inode->i_mapping->a_ops = &shmem_aops; 2825 cache_no_acl(inode); !! 2269 inode->i_op = &shmem_inode_operations; 2826 if (sbinfo->noswap) !! 2270 inode->i_fop = &shmem_file_operations; 2827 mapping_set_unevictable(inode !! 2271 mpol_shared_policy_init(&info->policy, 2828 mapping_set_large_folios(inode->i_map !! 2272 shmem_get_sbmpol(sbinfo)); 2829 !! 2273 break; 2830 switch (mode & S_IFMT) { !! 2274 case S_IFDIR: 2831 default: !! 2275 inc_nlink(inode); 2832 inode->i_op = &shmem_special_ !! 2276 /* Some things misbehave if size == 0 on a directory */ 2833 init_special_inode(inode, mod !! 2277 inode->i_size = 2 * BOGO_DIRENT_SIZE; 2834 break; !! 2278 inode->i_op = &shmem_dir_inode_operations; 2835 case S_IFREG: !! 2279 inode->i_fop = &simple_dir_operations; 2836 inode->i_mapping->a_ops = &sh !! 2280 break; 2837 inode->i_op = &shmem_inode_op !! 2281 case S_IFLNK: 2838 inode->i_fop = &shmem_file_op !! 2282 /* 2839 mpol_shared_policy_init(&info !! 2283 * Must not load anything in the rbtree, 2840 shme !! 2284 * mpol_free_shared_policy will not be called. 2841 break; !! 2285 */ 2842 case S_IFDIR: !! 2286 mpol_shared_policy_init(&info->policy, NULL); 2843 inc_nlink(inode); !! 2287 break; 2844 /* Some things misbehave if s !! 2288 } 2845 inode->i_size = 2 * BOGO_DIRE << 2846 inode->i_op = &shmem_dir_inod << 2847 inode->i_fop = &simple_offset << 2848 simple_offset_init(shmem_get_ << 2849 break; << 2850 case S_IFLNK: << 2851 /* << 2852 * Must not load anything in << 2853 * mpol_free_shared_policy wi << 2854 */ << 2855 mpol_shared_policy_init(&info << 2856 break; << 2857 } << 2858 2289 2859 lockdep_annotate_inode_mutex_key(inod !! 2290 lockdep_annotate_inode_mutex_key(inode); >> 2291 } else >> 2292 shmem_free_inode(sb); 2860 return inode; 2293 return inode; 2861 } 2294 } 2862 2295 2863 #ifdef CONFIG_TMPFS_QUOTA !! 2296 bool shmem_mapping(struct address_space *mapping) 2864 static struct inode *shmem_get_inode(struct m << 2865 struct s << 2866 umode_t << 2867 { << 2868 int err; << 2869 struct inode *inode; << 2870 << 2871 inode = __shmem_get_inode(idmap, sb, << 2872 if (IS_ERR(inode)) << 2873 return inode; << 2874 << 2875 err = dquot_initialize(inode); << 2876 if (err) << 2877 goto errout; << 2878 << 2879 err = dquot_alloc_inode(inode); << 2880 if (err) { << 2881 dquot_drop(inode); << 2882 goto errout; << 2883 } << 2884 return inode; << 2885 << 2886 errout: << 2887 inode->i_flags |= S_NOQUOTA; << 2888 iput(inode); << 2889 return ERR_PTR(err); << 2890 } << 2891 #else << 2892 static inline struct inode *shmem_get_inode(s << 2893 struct s << 2894 umode_t << 2895 { 2297 { 2896 return __shmem_get_inode(idmap, sb, d !! 2298 return mapping->a_ops == &shmem_aops; 2897 } 2299 } 2898 #endif /* CONFIG_TMPFS_QUOTA */ << 2899 2300 2900 #ifdef CONFIG_USERFAULTFD !! 2301 static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, 2901 int shmem_mfill_atomic_pte(pmd_t *dst_pmd, !! 2302 pmd_t *dst_pmd, 2902 struct vm_area_str !! 2303 struct vm_area_struct *dst_vma, 2903 unsigned long dst_ !! 2304 unsigned long dst_addr, 2904 unsigned long src_ !! 2305 unsigned long src_addr, 2905 uffd_flags_t flags !! 2306 bool zeropage, 2906 struct folio **fol !! 2307 struct page **pagep) 2907 { 2308 { 2908 struct inode *inode = file_inode(dst_ 2309 struct inode *inode = file_inode(dst_vma->vm_file); 2909 struct shmem_inode_info *info = SHMEM 2310 struct shmem_inode_info *info = SHMEM_I(inode); 2910 struct address_space *mapping = inode 2311 struct address_space *mapping = inode->i_mapping; 2911 gfp_t gfp = mapping_gfp_mask(mapping) 2312 gfp_t gfp = mapping_gfp_mask(mapping); 2912 pgoff_t pgoff = linear_page_index(dst 2313 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); >> 2314 struct mem_cgroup *memcg; >> 2315 spinlock_t *ptl; 2913 void *page_kaddr; 2316 void *page_kaddr; 2914 struct folio *folio; !! 2317 struct page *page; >> 2318 pte_t _dst_pte, *dst_pte; 2915 int ret; 2319 int ret; 2916 pgoff_t max_off; !! 2320 pgoff_t offset, max_off; 2917 2321 2918 if (shmem_inode_acct_blocks(inode, 1) !! 2322 ret = -ENOMEM; >> 2323 if (!shmem_inode_acct_block(inode, 1)) { 2919 /* 2324 /* 2920 * We may have got a page, re 2325 * We may have got a page, returned -ENOENT triggering a retry, 2921 * and now we find ourselves 2326 * and now we find ourselves with -ENOMEM. Release the page, to 2922 * avoid a BUG_ON in our call 2327 * avoid a BUG_ON in our caller. 2923 */ 2328 */ 2924 if (unlikely(*foliop)) { !! 2329 if (unlikely(*pagep)) { 2925 folio_put(*foliop); !! 2330 put_page(*pagep); 2926 *foliop = NULL; !! 2331 *pagep = NULL; 2927 } 2332 } 2928 return -ENOMEM; !! 2333 goto out; 2929 } 2334 } 2930 2335 2931 if (!*foliop) { !! 2336 if (!*pagep) { 2932 ret = -ENOMEM; !! 2337 page = shmem_alloc_page(gfp, info, pgoff); 2933 folio = shmem_alloc_folio(gfp !! 2338 if (!page) 2934 if (!folio) << 2935 goto out_unacct_block 2339 goto out_unacct_blocks; 2936 2340 2937 if (uffd_flags_mode_is(flags, !! 2341 if (!zeropage) { /* mcopy_atomic */ 2938 page_kaddr = kmap_loc !! 2342 page_kaddr = kmap_atomic(page); 2939 /* << 2940 * The read mmap_lock << 2941 * mmap_lock being re << 2942 * possible if a writ << 2943 * << 2944 * process A thread 1 << 2945 * process A thread 2 << 2946 * process B thread 1 << 2947 * process B thread 2 << 2948 * process A thread 1 << 2949 * process B thread 1 << 2950 * << 2951 * Disable page fault << 2952 * and retry the copy << 2953 */ << 2954 pagefault_disable(); << 2955 ret = copy_from_user( 2343 ret = copy_from_user(page_kaddr, 2956 2344 (const void __user *)src_addr, 2957 2345 PAGE_SIZE); 2958 pagefault_enable(); !! 2346 kunmap_atomic(page_kaddr); 2959 kunmap_local(page_kad << 2960 2347 2961 /* fallback to copy_f !! 2348 /* fallback to copy_from_user outside mmap_sem */ 2962 if (unlikely(ret)) { 2349 if (unlikely(ret)) { 2963 *foliop = fol !! 2350 *pagep = page; 2964 ret = -ENOENT !! 2351 shmem_inode_unacct_blocks(inode, 1); 2965 /* don't free 2352 /* don't free the page */ 2966 goto out_unac !! 2353 return -ENOENT; 2967 } 2354 } 2968 !! 2355 } else { /* mfill_zeropage_atomic */ 2969 flush_dcache_folio(fo !! 2356 clear_highpage(page); 2970 } else { /* ZE << 2971 clear_user_highpage(& << 2972 } 2357 } 2973 } else { 2358 } else { 2974 folio = *foliop; !! 2359 page = *pagep; 2975 VM_BUG_ON_FOLIO(folio_test_la !! 2360 *pagep = NULL; 2976 *foliop = NULL; << 2977 } 2361 } 2978 2362 2979 VM_BUG_ON(folio_test_locked(folio)); !! 2363 VM_BUG_ON(PageLocked(page) || PageSwapBacked(page)); 2980 VM_BUG_ON(folio_test_swapbacked(folio !! 2364 __SetPageLocked(page); 2981 __folio_set_locked(folio); !! 2365 __SetPageSwapBacked(page); 2982 __folio_set_swapbacked(folio); !! 2366 __SetPageUptodate(page); 2983 __folio_mark_uptodate(folio); << 2984 2367 2985 ret = -EFAULT; 2368 ret = -EFAULT; >> 2369 offset = linear_page_index(dst_vma, dst_addr); 2986 max_off = DIV_ROUND_UP(i_size_read(in 2370 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2987 if (unlikely(pgoff >= max_off)) !! 2371 if (unlikely(offset >= max_off)) 2988 goto out_release; 2372 goto out_release; 2989 2373 2990 ret = mem_cgroup_charge(folio, dst_vm !! 2374 ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false); 2991 if (ret) << 2992 goto out_release; << 2993 ret = shmem_add_to_page_cache(folio, << 2994 if (ret) 2375 if (ret) 2995 goto out_release; 2376 goto out_release; 2996 2377 2997 ret = mfill_atomic_install_pte(dst_pm !! 2378 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, 2998 &folio !! 2379 gfp & GFP_RECLAIM_MASK); 2999 if (ret) 2380 if (ret) 3000 goto out_delete_from_cache; !! 2381 goto out_release_uncharge; 3001 2382 3002 shmem_recalc_inode(inode, 1, 0); !! 2383 mem_cgroup_commit_charge(page, memcg, false, false); 3003 folio_unlock(folio); !! 2384 3004 return 0; !! 2385 _dst_pte = mk_pte(page, dst_vma->vm_page_prot); 3005 out_delete_from_cache: !! 2386 if (dst_vma->vm_flags & VM_WRITE) 3006 filemap_remove_folio(folio); !! 2387 _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); >> 2388 else { >> 2389 /* >> 2390 * We don't set the pte dirty if the vma has no >> 2391 * VM_WRITE permission, so mark the page dirty or it >> 2392 * could be freed from under us. We could do it >> 2393 * unconditionally before unlock_page(), but doing it >> 2394 * only if VM_WRITE is not set is faster. >> 2395 */ >> 2396 set_page_dirty(page); >> 2397 } >> 2398 >> 2399 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); >> 2400 >> 2401 ret = -EFAULT; >> 2402 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); >> 2403 if (unlikely(offset >= max_off)) >> 2404 goto out_release_uncharge_unlock; >> 2405 >> 2406 ret = -EEXIST; >> 2407 if (!pte_none(*dst_pte)) >> 2408 goto out_release_uncharge_unlock; >> 2409 >> 2410 lru_cache_add_anon(page); >> 2411 >> 2412 spin_lock_irq(&info->lock); >> 2413 info->alloced++; >> 2414 inode->i_blocks += BLOCKS_PER_PAGE; >> 2415 shmem_recalc_inode(inode); >> 2416 spin_unlock_irq(&info->lock); >> 2417 >> 2418 inc_mm_counter(dst_mm, mm_counter_file(page)); >> 2419 page_add_file_rmap(page, false); >> 2420 set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); >> 2421 >> 2422 /* No need to invalidate - it was non-present before */ >> 2423 update_mmu_cache(dst_vma, dst_addr, dst_pte); >> 2424 pte_unmap_unlock(dst_pte, ptl); >> 2425 unlock_page(page); >> 2426 ret = 0; >> 2427 out: >> 2428 return ret; >> 2429 out_release_uncharge_unlock: >> 2430 pte_unmap_unlock(dst_pte, ptl); >> 2431 ClearPageDirty(page); >> 2432 delete_from_page_cache(page); >> 2433 out_release_uncharge: >> 2434 mem_cgroup_cancel_charge(page, memcg, false); 3007 out_release: 2435 out_release: 3008 folio_unlock(folio); !! 2436 unlock_page(page); 3009 folio_put(folio); !! 2437 put_page(page); 3010 out_unacct_blocks: 2438 out_unacct_blocks: 3011 shmem_inode_unacct_blocks(inode, 1); 2439 shmem_inode_unacct_blocks(inode, 1); 3012 return ret; !! 2440 goto out; >> 2441 } >> 2442 >> 2443 int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, >> 2444 pmd_t *dst_pmd, >> 2445 struct vm_area_struct *dst_vma, >> 2446 unsigned long dst_addr, >> 2447 unsigned long src_addr, >> 2448 struct page **pagep) >> 2449 { >> 2450 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, >> 2451 dst_addr, src_addr, false, pagep); >> 2452 } >> 2453 >> 2454 int shmem_mfill_zeropage_pte(struct mm_struct *dst_mm, >> 2455 pmd_t *dst_pmd, >> 2456 struct vm_area_struct *dst_vma, >> 2457 unsigned long dst_addr) >> 2458 { >> 2459 struct page *page = NULL; >> 2460 >> 2461 return shmem_mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, >> 2462 dst_addr, 0, true, &page); 3013 } 2463 } 3014 #endif /* CONFIG_USERFAULTFD */ << 3015 2464 3016 #ifdef CONFIG_TMPFS 2465 #ifdef CONFIG_TMPFS 3017 static const struct inode_operations shmem_sy 2466 static const struct inode_operations shmem_symlink_inode_operations; 3018 static const struct inode_operations shmem_sh 2467 static const struct inode_operations shmem_short_symlink_operations; 3019 2468 >> 2469 #ifdef CONFIG_TMPFS_XATTR >> 2470 static int shmem_initxattrs(struct inode *, const struct xattr *, void *); >> 2471 #else >> 2472 #define shmem_initxattrs NULL >> 2473 #endif >> 2474 3020 static int 2475 static int 3021 shmem_write_begin(struct file *file, struct a 2476 shmem_write_begin(struct file *file, struct address_space *mapping, 3022 loff_t pos, unsigned !! 2477 loff_t pos, unsigned len, unsigned flags, 3023 struct folio **foliop !! 2478 struct page **pagep, void **fsdata) 3024 { 2479 { 3025 struct inode *inode = mapping->host; 2480 struct inode *inode = mapping->host; 3026 struct shmem_inode_info *info = SHMEM 2481 struct shmem_inode_info *info = SHMEM_I(inode); 3027 pgoff_t index = pos >> PAGE_SHIFT; 2482 pgoff_t index = pos >> PAGE_SHIFT; 3028 struct folio *folio; << 3029 int ret = 0; << 3030 2483 3031 /* i_rwsem is held by caller */ !! 2484 /* i_mutex is held by caller */ 3032 if (unlikely(info->seals & (F_SEAL_GR 2485 if (unlikely(info->seals & (F_SEAL_GROW | 3033 F_SEAL_WRI 2486 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) { 3034 if (info->seals & (F_SEAL_WRI 2487 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) 3035 return -EPERM; 2488 return -EPERM; 3036 if ((info->seals & F_SEAL_GRO 2489 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 3037 return -EPERM; 2490 return -EPERM; 3038 } 2491 } 3039 2492 3040 ret = shmem_get_folio(inode, index, p !! 2493 return shmem_getpage(inode, index, pagep, SGP_WRITE); 3041 if (ret) << 3042 return ret; << 3043 << 3044 if (folio_test_hwpoison(folio) || << 3045 (folio_test_large(folio) && folio << 3046 folio_unlock(folio); << 3047 folio_put(folio); << 3048 return -EIO; << 3049 } << 3050 << 3051 *foliop = folio; << 3052 return 0; << 3053 } 2494 } 3054 2495 3055 static int 2496 static int 3056 shmem_write_end(struct file *file, struct add 2497 shmem_write_end(struct file *file, struct address_space *mapping, 3057 loff_t pos, unsigned 2498 loff_t pos, unsigned len, unsigned copied, 3058 struct folio *folio, !! 2499 struct page *page, void *fsdata) 3059 { 2500 { 3060 struct inode *inode = mapping->host; 2501 struct inode *inode = mapping->host; 3061 2502 3062 if (pos + copied > inode->i_size) 2503 if (pos + copied > inode->i_size) 3063 i_size_write(inode, pos + cop 2504 i_size_write(inode, pos + copied); 3064 2505 3065 if (!folio_test_uptodate(folio)) { !! 2506 if (!PageUptodate(page)) { 3066 if (copied < folio_size(folio !! 2507 struct page *head = compound_head(page); 3067 size_t from = offset_ !! 2508 if (PageTransCompound(page)) { 3068 folio_zero_segments(f !! 2509 int i; 3069 from !! 2510 3070 } !! 2511 for (i = 0; i < HPAGE_PMD_NR; i++) { 3071 folio_mark_uptodate(folio); !! 2512 if (head + i == page) 3072 } !! 2513 continue; 3073 folio_mark_dirty(folio); !! 2514 clear_highpage(head + i); 3074 folio_unlock(folio); !! 2515 flush_dcache_page(head + i); 3075 folio_put(folio); !! 2516 } >> 2517 } >> 2518 if (copied < PAGE_SIZE) { >> 2519 unsigned from = pos & (PAGE_SIZE - 1); >> 2520 zero_user_segments(page, 0, from, >> 2521 from + copied, PAGE_SIZE); >> 2522 } >> 2523 SetPageUptodate(head); >> 2524 } >> 2525 set_page_dirty(page); >> 2526 unlock_page(page); >> 2527 put_page(page); 3076 2528 3077 return copied; 2529 return copied; 3078 } 2530 } 3079 2531 3080 static ssize_t shmem_file_read_iter(struct ki 2532 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 3081 { 2533 { 3082 struct file *file = iocb->ki_filp; 2534 struct file *file = iocb->ki_filp; 3083 struct inode *inode = file_inode(file 2535 struct inode *inode = file_inode(file); 3084 struct address_space *mapping = inode 2536 struct address_space *mapping = inode->i_mapping; 3085 pgoff_t index; 2537 pgoff_t index; 3086 unsigned long offset; 2538 unsigned long offset; >> 2539 enum sgp_type sgp = SGP_READ; 3087 int error = 0; 2540 int error = 0; 3088 ssize_t retval = 0; 2541 ssize_t retval = 0; 3089 loff_t *ppos = &iocb->ki_pos; 2542 loff_t *ppos = &iocb->ki_pos; 3090 2543 >> 2544 /* >> 2545 * Might this read be for a stacking filesystem? Then when reading >> 2546 * holes of a sparse file, we actually need to allocate those pages, >> 2547 * and even mark them dirty, so it cannot exceed the max_blocks limit. >> 2548 */ >> 2549 if (!iter_is_iovec(to)) >> 2550 sgp = SGP_CACHE; >> 2551 3091 index = *ppos >> PAGE_SHIFT; 2552 index = *ppos >> PAGE_SHIFT; 3092 offset = *ppos & ~PAGE_MASK; 2553 offset = *ppos & ~PAGE_MASK; 3093 2554 3094 for (;;) { 2555 for (;;) { 3095 struct folio *folio = NULL; << 3096 struct page *page = NULL; 2556 struct page *page = NULL; 3097 pgoff_t end_index; 2557 pgoff_t end_index; 3098 unsigned long nr, ret; 2558 unsigned long nr, ret; 3099 loff_t i_size = i_size_read(i 2559 loff_t i_size = i_size_read(inode); 3100 2560 3101 end_index = i_size >> PAGE_SH 2561 end_index = i_size >> PAGE_SHIFT; 3102 if (index > end_index) 2562 if (index > end_index) 3103 break; 2563 break; 3104 if (index == end_index) { 2564 if (index == end_index) { 3105 nr = i_size & ~PAGE_M 2565 nr = i_size & ~PAGE_MASK; 3106 if (nr <= offset) 2566 if (nr <= offset) 3107 break; 2567 break; 3108 } 2568 } 3109 2569 3110 error = shmem_get_folio(inode !! 2570 error = shmem_getpage(inode, index, &page, sgp); 3111 if (error) { 2571 if (error) { 3112 if (error == -EINVAL) 2572 if (error == -EINVAL) 3113 error = 0; 2573 error = 0; 3114 break; 2574 break; 3115 } 2575 } 3116 if (folio) { !! 2576 if (page) { 3117 folio_unlock(folio); !! 2577 if (sgp == SGP_CACHE) 3118 !! 2578 set_page_dirty(page); 3119 page = folio_file_pag !! 2579 unlock_page(page); 3120 if (PageHWPoison(page << 3121 folio_put(fol << 3122 error = -EIO; << 3123 break; << 3124 } << 3125 } 2580 } 3126 2581 3127 /* 2582 /* 3128 * We must evaluate after, si 2583 * We must evaluate after, since reads (unlike writes) 3129 * are called without i_rwsem !! 2584 * are called without i_mutex protection against truncate 3130 */ 2585 */ 3131 nr = PAGE_SIZE; 2586 nr = PAGE_SIZE; 3132 i_size = i_size_read(inode); 2587 i_size = i_size_read(inode); 3133 end_index = i_size >> PAGE_SH 2588 end_index = i_size >> PAGE_SHIFT; 3134 if (index == end_index) { 2589 if (index == end_index) { 3135 nr = i_size & ~PAGE_M 2590 nr = i_size & ~PAGE_MASK; 3136 if (nr <= offset) { 2591 if (nr <= offset) { 3137 if (folio) !! 2592 if (page) 3138 folio !! 2593 put_page(page); 3139 break; 2594 break; 3140 } 2595 } 3141 } 2596 } 3142 nr -= offset; 2597 nr -= offset; 3143 2598 3144 if (folio) { !! 2599 if (page) { 3145 /* 2600 /* 3146 * If users can be wr 2601 * If users can be writing to this page using arbitrary 3147 * virtual addresses, 2602 * virtual addresses, take care about potential aliasing 3148 * before reading the 2603 * before reading the page on the kernel side. 3149 */ 2604 */ 3150 if (mapping_writably_ 2605 if (mapping_writably_mapped(mapping)) 3151 flush_dcache_ 2606 flush_dcache_page(page); 3152 /* 2607 /* 3153 * Mark the page acce 2608 * Mark the page accessed if we read the beginning. 3154 */ 2609 */ 3155 if (!offset) 2610 if (!offset) 3156 folio_mark_ac !! 2611 mark_page_accessed(page); 3157 /* << 3158 * Ok, we have the pa << 3159 * now we can copy it << 3160 */ << 3161 ret = copy_page_to_it << 3162 folio_put(folio); << 3163 << 3164 } else if (user_backed_iter(t << 3165 /* << 3166 * Copy to user tends << 3167 * clear_user() not s << 3168 * faster to copy the << 3169 */ << 3170 ret = copy_page_to_it << 3171 } else { 2612 } else { 3172 /* !! 2613 page = ZERO_PAGE(0); 3173 * But submitting the !! 2614 get_page(page); 3174 * splice() - or othe << 3175 * so don't attempt t << 3176 */ << 3177 ret = iov_iter_zero(n << 3178 } 2615 } 3179 2616 >> 2617 /* >> 2618 * Ok, we have the page, and it's up-to-date, so >> 2619 * now we can copy it to user space... >> 2620 */ >> 2621 ret = copy_page_to_iter(page, offset, nr, to); 3180 retval += ret; 2622 retval += ret; 3181 offset += ret; 2623 offset += ret; 3182 index += offset >> PAGE_SHIFT 2624 index += offset >> PAGE_SHIFT; 3183 offset &= ~PAGE_MASK; 2625 offset &= ~PAGE_MASK; 3184 2626 >> 2627 put_page(page); 3185 if (!iov_iter_count(to)) 2628 if (!iov_iter_count(to)) 3186 break; 2629 break; 3187 if (ret < nr) { 2630 if (ret < nr) { 3188 error = -EFAULT; 2631 error = -EFAULT; 3189 break; 2632 break; 3190 } 2633 } 3191 cond_resched(); 2634 cond_resched(); 3192 } 2635 } 3193 2636 3194 *ppos = ((loff_t) index << PAGE_SHIFT 2637 *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 3195 file_accessed(file); 2638 file_accessed(file); 3196 return retval ? retval : error; 2639 return retval ? retval : error; 3197 } 2640 } 3198 2641 3199 static ssize_t shmem_file_write_iter(struct k !! 2642 /* 3200 { !! 2643 * llseek SEEK_DATA or SEEK_HOLE through the page cache. 3201 struct file *file = iocb->ki_filp; !! 2644 */ 3202 struct inode *inode = file->f_mapping !! 2645 static pgoff_t shmem_seek_hole_data(struct address_space *mapping, 3203 ssize_t ret; !! 2646 pgoff_t index, pgoff_t end, int whence) 3204 << 3205 inode_lock(inode); << 3206 ret = generic_write_checks(iocb, from << 3207 if (ret <= 0) << 3208 goto unlock; << 3209 ret = file_remove_privs(file); << 3210 if (ret) << 3211 goto unlock; << 3212 ret = file_update_time(file); << 3213 if (ret) << 3214 goto unlock; << 3215 ret = generic_perform_write(iocb, fro << 3216 unlock: << 3217 inode_unlock(inode); << 3218 return ret; << 3219 } << 3220 << 3221 static bool zero_pipe_buf_get(struct pipe_ino << 3222 struct pipe_buf << 3223 { << 3224 return true; << 3225 } << 3226 << 3227 static void zero_pipe_buf_release(struct pipe << 3228 struct pipe << 3229 { << 3230 } << 3231 << 3232 static bool zero_pipe_buf_try_steal(struct pi << 3233 struct pi << 3234 { << 3235 return false; << 3236 } << 3237 << 3238 static const struct pipe_buf_operations zero_ << 3239 .release = zero_pipe_buf_relea << 3240 .try_steal = zero_pipe_buf_try_s << 3241 .get = zero_pipe_buf_get, << 3242 }; << 3243 << 3244 static size_t splice_zeropage_into_pipe(struc << 3245 loff_ << 3246 { << 3247 size_t offset = fpos & ~PAGE_MASK; << 3248 << 3249 size = min_t(size_t, size, PAGE_SIZE << 3250 << 3251 if (!pipe_full(pipe->head, pipe->tail << 3252 struct pipe_buffer *buf = pip << 3253 << 3254 *buf = (struct pipe_buffer) { << 3255 .ops = &zero_pipe_ << 3256 .page = ZERO_PAGE(0 << 3257 .offset = offset, << 3258 .len = size, << 3259 }; << 3260 pipe->head++; << 3261 } << 3262 << 3263 return size; << 3264 } << 3265 << 3266 static ssize_t shmem_file_splice_read(struct << 3267 struct << 3268 size_t << 3269 { 2647 { 3270 struct inode *inode = file_inode(in); !! 2648 struct page *page; 3271 struct address_space *mapping = inode !! 2649 struct pagevec pvec; 3272 struct folio *folio = NULL; !! 2650 pgoff_t indices[PAGEVEC_SIZE]; 3273 size_t total_spliced = 0, used, npage !! 2651 bool done = false; 3274 loff_t isize; !! 2652 int i; 3275 int error = 0; << 3276 << 3277 /* Work out how much data we can actu << 3278 used = pipe_occupancy(pipe->head, pip << 3279 npages = max_t(ssize_t, pipe->max_usa << 3280 len = min_t(size_t, len, npages * PAG << 3281 << 3282 do { << 3283 if (*ppos >= i_size_read(inod << 3284 break; << 3285 2653 3286 error = shmem_get_folio(inode !! 2654 pagevec_init(&pvec); 3287 SGP_R !! 2655 pvec.nr = 1; /* start small: we may be there already */ 3288 if (error) { !! 2656 while (!done) { 3289 if (error == -EINVAL) !! 2657 pvec.nr = find_get_entries(mapping, index, 3290 error = 0; !! 2658 pvec.nr, pvec.pages, indices); >> 2659 if (!pvec.nr) { >> 2660 if (whence == SEEK_DATA) >> 2661 index = end; 3291 break; 2662 break; 3292 } 2663 } 3293 if (folio) { !! 2664 for (i = 0; i < pvec.nr; i++, index++) { 3294 folio_unlock(folio); !! 2665 if (index < indices[i]) { 3295 !! 2666 if (whence == SEEK_HOLE) { 3296 if (folio_test_hwpois !! 2667 done = true; 3297 (folio_test_large !! 2668 break; 3298 folio_test_has_h !! 2669 } 3299 error = -EIO; !! 2670 index = indices[i]; >> 2671 } >> 2672 page = pvec.pages[i]; >> 2673 if (page && !xa_is_value(page)) { >> 2674 if (!PageUptodate(page)) >> 2675 page = NULL; >> 2676 } >> 2677 if (index >= end || >> 2678 (page && whence == SEEK_DATA) || >> 2679 (!page && whence == SEEK_HOLE)) { >> 2680 done = true; 3300 break; 2681 break; 3301 } 2682 } 3302 } 2683 } 3303 !! 2684 pagevec_remove_exceptionals(&pvec); 3304 /* !! 2685 pagevec_release(&pvec); 3305 * i_size must be checked aft !! 2686 pvec.nr = PAGEVEC_SIZE; 3306 * << 3307 * Checking i_size after the << 3308 * the correct value for "nr" << 3309 * part of the page is not co << 3310 * another truncate extends t << 3311 */ << 3312 isize = i_size_read(inode); << 3313 if (unlikely(*ppos >= isize)) << 3314 break; << 3315 part = min_t(loff_t, isize - << 3316 << 3317 if (folio) { << 3318 /* << 3319 * If users can be wr << 3320 * virtual addresses, << 3321 * before reading the << 3322 */ << 3323 if (mapping_writably_ << 3324 flush_dcache_ << 3325 folio_mark_accessed(f << 3326 /* << 3327 * Ok, we have the pa << 3328 * now splice it into << 3329 */ << 3330 n = splice_folio_into << 3331 folio_put(folio); << 3332 folio = NULL; << 3333 } else { << 3334 n = splice_zeropage_i << 3335 } << 3336 << 3337 if (!n) << 3338 break; << 3339 len -= n; << 3340 total_spliced += n; << 3341 *ppos += n; << 3342 in->f_ra.prev_pos = *ppos; << 3343 if (pipe_full(pipe->head, pip << 3344 break; << 3345 << 3346 cond_resched(); 2687 cond_resched(); 3347 } while (len); !! 2688 } 3348 !! 2689 return index; 3349 if (folio) << 3350 folio_put(folio); << 3351 << 3352 file_accessed(in); << 3353 return total_spliced ? total_spliced << 3354 } 2690 } 3355 2691 3356 static loff_t shmem_file_llseek(struct file * 2692 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 3357 { 2693 { 3358 struct address_space *mapping = file- 2694 struct address_space *mapping = file->f_mapping; 3359 struct inode *inode = mapping->host; 2695 struct inode *inode = mapping->host; >> 2696 pgoff_t start, end; >> 2697 loff_t new_offset; 3360 2698 3361 if (whence != SEEK_DATA && whence != 2699 if (whence != SEEK_DATA && whence != SEEK_HOLE) 3362 return generic_file_llseek_si 2700 return generic_file_llseek_size(file, offset, whence, 3363 MAX_L 2701 MAX_LFS_FILESIZE, i_size_read(inode)); 3364 if (offset < 0) << 3365 return -ENXIO; << 3366 << 3367 inode_lock(inode); 2702 inode_lock(inode); 3368 /* We're holding i_rwsem so we can ac !! 2703 /* We're holding i_mutex so we can access i_size directly */ 3369 offset = mapping_seek_hole_data(mappi !! 2704 >> 2705 if (offset < 0 || offset >= inode->i_size) >> 2706 offset = -ENXIO; >> 2707 else { >> 2708 start = offset >> PAGE_SHIFT; >> 2709 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; >> 2710 new_offset = shmem_seek_hole_data(mapping, start, end, whence); >> 2711 new_offset <<= PAGE_SHIFT; >> 2712 if (new_offset > offset) { >> 2713 if (new_offset < inode->i_size) >> 2714 offset = new_offset; >> 2715 else if (whence == SEEK_DATA) >> 2716 offset = -ENXIO; >> 2717 else >> 2718 offset = inode->i_size; >> 2719 } >> 2720 } >> 2721 3370 if (offset >= 0) 2722 if (offset >= 0) 3371 offset = vfs_setpos(file, off 2723 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 3372 inode_unlock(inode); 2724 inode_unlock(inode); 3373 return offset; 2725 return offset; 3374 } 2726 } 3375 2727 3376 static long shmem_fallocate(struct file *file 2728 static long shmem_fallocate(struct file *file, int mode, loff_t offset, 3377 2729 loff_t len) 3378 { 2730 { 3379 struct inode *inode = file_inode(file 2731 struct inode *inode = file_inode(file); 3380 struct shmem_sb_info *sbinfo = SHMEM_ 2732 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 3381 struct shmem_inode_info *info = SHMEM 2733 struct shmem_inode_info *info = SHMEM_I(inode); 3382 struct shmem_falloc shmem_falloc; 2734 struct shmem_falloc shmem_falloc; 3383 pgoff_t start, index, end, undo_fallo !! 2735 pgoff_t start, index, end; 3384 int error; 2736 int error; 3385 2737 3386 if (mode & ~(FALLOC_FL_KEEP_SIZE | FA 2738 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 3387 return -EOPNOTSUPP; 2739 return -EOPNOTSUPP; 3388 2740 3389 inode_lock(inode); 2741 inode_lock(inode); 3390 2742 3391 if (mode & FALLOC_FL_PUNCH_HOLE) { 2743 if (mode & FALLOC_FL_PUNCH_HOLE) { 3392 struct address_space *mapping 2744 struct address_space *mapping = file->f_mapping; 3393 loff_t unmap_start = round_up 2745 loff_t unmap_start = round_up(offset, PAGE_SIZE); 3394 loff_t unmap_end = round_down 2746 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 3395 DECLARE_WAIT_QUEUE_HEAD_ONSTA 2747 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 3396 2748 3397 /* protected by i_rwsem */ !! 2749 /* protected by i_mutex */ 3398 if (info->seals & (F_SEAL_WRI 2750 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 3399 error = -EPERM; 2751 error = -EPERM; 3400 goto out; 2752 goto out; 3401 } 2753 } 3402 2754 3403 shmem_falloc.waitq = &shmem_f 2755 shmem_falloc.waitq = &shmem_falloc_waitq; 3404 shmem_falloc.start = (u64)unm 2756 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT; 3405 shmem_falloc.next = (unmap_en 2757 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 3406 spin_lock(&inode->i_lock); 2758 spin_lock(&inode->i_lock); 3407 inode->i_private = &shmem_fal 2759 inode->i_private = &shmem_falloc; 3408 spin_unlock(&inode->i_lock); 2760 spin_unlock(&inode->i_lock); 3409 2761 3410 if ((u64)unmap_end > (u64)unm 2762 if ((u64)unmap_end > (u64)unmap_start) 3411 unmap_mapping_range(m 2763 unmap_mapping_range(mapping, unmap_start, 3412 1 2764 1 + unmap_end - unmap_start, 0); 3413 shmem_truncate_range(inode, o 2765 shmem_truncate_range(inode, offset, offset + len - 1); 3414 /* No need to unmap again: ho 2766 /* No need to unmap again: hole-punching leaves COWed pages */ 3415 2767 3416 spin_lock(&inode->i_lock); 2768 spin_lock(&inode->i_lock); 3417 inode->i_private = NULL; 2769 inode->i_private = NULL; 3418 wake_up_all(&shmem_falloc_wai 2770 wake_up_all(&shmem_falloc_waitq); 3419 WARN_ON_ONCE(!list_empty(&shm 2771 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); 3420 spin_unlock(&inode->i_lock); 2772 spin_unlock(&inode->i_lock); 3421 error = 0; 2773 error = 0; 3422 goto out; 2774 goto out; 3423 } 2775 } 3424 2776 3425 /* We need to check rlimit even when 2777 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 3426 error = inode_newsize_ok(inode, offse 2778 error = inode_newsize_ok(inode, offset + len); 3427 if (error) 2779 if (error) 3428 goto out; 2780 goto out; 3429 2781 3430 if ((info->seals & F_SEAL_GROW) && of 2782 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 3431 error = -EPERM; 2783 error = -EPERM; 3432 goto out; 2784 goto out; 3433 } 2785 } 3434 2786 3435 start = offset >> PAGE_SHIFT; 2787 start = offset >> PAGE_SHIFT; 3436 end = (offset + len + PAGE_SIZE - 1) 2788 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 3437 /* Try to avoid a swapstorm if len is 2789 /* Try to avoid a swapstorm if len is impossible to satisfy */ 3438 if (sbinfo->max_blocks && end - start 2790 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 3439 error = -ENOSPC; 2791 error = -ENOSPC; 3440 goto out; 2792 goto out; 3441 } 2793 } 3442 2794 3443 shmem_falloc.waitq = NULL; 2795 shmem_falloc.waitq = NULL; 3444 shmem_falloc.start = start; 2796 shmem_falloc.start = start; 3445 shmem_falloc.next = start; 2797 shmem_falloc.next = start; 3446 shmem_falloc.nr_falloced = 0; 2798 shmem_falloc.nr_falloced = 0; 3447 shmem_falloc.nr_unswapped = 0; 2799 shmem_falloc.nr_unswapped = 0; 3448 spin_lock(&inode->i_lock); 2800 spin_lock(&inode->i_lock); 3449 inode->i_private = &shmem_falloc; 2801 inode->i_private = &shmem_falloc; 3450 spin_unlock(&inode->i_lock); 2802 spin_unlock(&inode->i_lock); 3451 2803 3452 /* !! 2804 for (index = start; index < end; index++) { 3453 * info->fallocend is only relevant w !! 2805 struct page *page; 3454 * involved: to prevent split_huge_pa << 3455 * pages when FALLOC_FL_KEEP_SIZE com << 3456 */ << 3457 undo_fallocend = info->fallocend; << 3458 if (info->fallocend < end) << 3459 info->fallocend = end; << 3460 << 3461 for (index = start; index < end; ) { << 3462 struct folio *folio; << 3463 2806 3464 /* 2807 /* 3465 * Check for fatal signal so !! 2808 * Good, the fallocate(2) manpage permits EINTR: we may have 3466 * situations. We don't want !! 2809 * been interrupted because we are using up too much memory. 3467 * signals as large fallocate << 3468 * e.g. periodic timers may r << 3469 * restarting. << 3470 */ 2810 */ 3471 if (fatal_signal_pending(curr !! 2811 if (signal_pending(current)) 3472 error = -EINTR; 2812 error = -EINTR; 3473 else if (shmem_falloc.nr_unsw 2813 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 3474 error = -ENOMEM; 2814 error = -ENOMEM; 3475 else 2815 else 3476 error = shmem_get_fol !! 2816 error = shmem_getpage(inode, index, &page, SGP_FALLOC); 3477 << 3478 if (error) { 2817 if (error) { 3479 info->fallocend = und !! 2818 /* Remove the !PageUptodate pages we added */ 3480 /* Remove the !uptoda << 3481 if (index > start) { 2819 if (index > start) { 3482 shmem_undo_ra 2820 shmem_undo_range(inode, 3483 (loff_t)s 2821 (loff_t)start << PAGE_SHIFT, 3484 ((loff_t) 2822 ((loff_t)index << PAGE_SHIFT) - 1, true); 3485 } 2823 } 3486 goto undone; 2824 goto undone; 3487 } 2825 } 3488 2826 3489 /* 2827 /* 3490 * Here is a more important o << 3491 * a second SGP_FALLOC on the << 3492 * making it uptodate and un- << 3493 */ << 3494 index = folio_next_index(foli << 3495 /* Beware 32-bit wraparound * << 3496 if (!index) << 3497 index--; << 3498 << 3499 /* << 3500 * Inform shmem_writepage() h 2828 * Inform shmem_writepage() how far we have reached. 3501 * No need for lock or barrie 2829 * No need for lock or barrier: we have the page lock. 3502 */ 2830 */ 3503 if (!folio_test_uptodate(foli !! 2831 shmem_falloc.next++; 3504 shmem_falloc.nr_fallo !! 2832 if (!PageUptodate(page)) 3505 shmem_falloc.next = index; !! 2833 shmem_falloc.nr_falloced++; 3506 2834 3507 /* 2835 /* 3508 * If !uptodate, leave it tha !! 2836 * If !PageUptodate, leave it that way so that freeable pages 3509 * can be recognized if we ne 2837 * can be recognized if we need to rollback on error later. 3510 * But mark it dirty so that !! 2838 * But set_page_dirty so that memory pressure will swap rather 3511 * than free the folios we ar !! 2839 * than free the pages we are allocating (and SGP_CACHE pages 3512 * might still be clean: we n 2840 * might still be clean: we now need to mark those dirty too). 3513 */ 2841 */ 3514 folio_mark_dirty(folio); !! 2842 set_page_dirty(page); 3515 folio_unlock(folio); !! 2843 unlock_page(page); 3516 folio_put(folio); !! 2844 put_page(page); 3517 cond_resched(); 2845 cond_resched(); 3518 } 2846 } 3519 2847 3520 if (!(mode & FALLOC_FL_KEEP_SIZE) && 2848 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 3521 i_size_write(inode, offset + 2849 i_size_write(inode, offset + len); >> 2850 inode->i_ctime = current_time(inode); 3522 undone: 2851 undone: 3523 spin_lock(&inode->i_lock); 2852 spin_lock(&inode->i_lock); 3524 inode->i_private = NULL; 2853 inode->i_private = NULL; 3525 spin_unlock(&inode->i_lock); 2854 spin_unlock(&inode->i_lock); 3526 out: 2855 out: 3527 if (!error) << 3528 file_modified(file); << 3529 inode_unlock(inode); 2856 inode_unlock(inode); 3530 return error; 2857 return error; 3531 } 2858 } 3532 2859 3533 static int shmem_statfs(struct dentry *dentry 2860 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 3534 { 2861 { 3535 struct shmem_sb_info *sbinfo = SHMEM_ 2862 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 3536 2863 3537 buf->f_type = TMPFS_MAGIC; 2864 buf->f_type = TMPFS_MAGIC; 3538 buf->f_bsize = PAGE_SIZE; 2865 buf->f_bsize = PAGE_SIZE; 3539 buf->f_namelen = NAME_MAX; 2866 buf->f_namelen = NAME_MAX; 3540 if (sbinfo->max_blocks) { 2867 if (sbinfo->max_blocks) { 3541 buf->f_blocks = sbinfo->max_b 2868 buf->f_blocks = sbinfo->max_blocks; 3542 buf->f_bavail = 2869 buf->f_bavail = 3543 buf->f_bfree = sbinfo->max_b 2870 buf->f_bfree = sbinfo->max_blocks - 3544 percpu_counte 2871 percpu_counter_sum(&sbinfo->used_blocks); 3545 } 2872 } 3546 if (sbinfo->max_inodes) { 2873 if (sbinfo->max_inodes) { 3547 buf->f_files = sbinfo->max_in 2874 buf->f_files = sbinfo->max_inodes; 3548 buf->f_ffree = sbinfo->free_i !! 2875 buf->f_ffree = sbinfo->free_inodes; 3549 } 2876 } 3550 /* else leave those fields 0 like sim 2877 /* else leave those fields 0 like simple_statfs */ 3551 << 3552 buf->f_fsid = uuid_to_fsid(dentry->d_ << 3553 << 3554 return 0; 2878 return 0; 3555 } 2879 } 3556 2880 3557 /* 2881 /* 3558 * File creation. Allocate an inode, and we'r 2882 * File creation. Allocate an inode, and we're done.. 3559 */ 2883 */ 3560 static int 2884 static int 3561 shmem_mknod(struct mnt_idmap *idmap, struct i !! 2885 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) 3562 struct dentry *dentry, umode_t mo << 3563 { 2886 { 3564 struct inode *inode; 2887 struct inode *inode; 3565 int error; !! 2888 int error = -ENOSPC; 3566 2889 3567 inode = shmem_get_inode(idmap, dir->i !! 2890 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 3568 if (IS_ERR(inode)) !! 2891 if (inode) { 3569 return PTR_ERR(inode); !! 2892 error = simple_acl_create(dir, inode); 3570 !! 2893 if (error) 3571 error = simple_acl_create(dir, inode) !! 2894 goto out_iput; 3572 if (error) !! 2895 error = security_inode_init_security(inode, dir, 3573 goto out_iput; !! 2896 &dentry->d_name, 3574 error = security_inode_init_security( !! 2897 shmem_initxattrs, NULL); 3575 !! 2898 if (error && error != -EOPNOTSUPP) 3576 if (error && error != -EOPNOTSUPP) !! 2899 goto out_iput; 3577 goto out_iput; << 3578 << 3579 error = simple_offset_add(shmem_get_o << 3580 if (error) << 3581 goto out_iput; << 3582 2900 3583 dir->i_size += BOGO_DIRENT_SIZE; !! 2901 error = 0; 3584 inode_set_mtime_to_ts(dir, inode_set_ !! 2902 dir->i_size += BOGO_DIRENT_SIZE; 3585 inode_inc_iversion(dir); !! 2903 dir->i_ctime = dir->i_mtime = current_time(dir); 3586 d_instantiate(dentry, inode); !! 2904 d_instantiate(dentry, inode); 3587 dget(dentry); /* Extra count - pin th !! 2905 dget(dentry); /* Extra count - pin the dentry in core */ >> 2906 } 3588 return error; 2907 return error; 3589 << 3590 out_iput: 2908 out_iput: 3591 iput(inode); 2909 iput(inode); 3592 return error; 2910 return error; 3593 } 2911 } 3594 2912 3595 static int 2913 static int 3596 shmem_tmpfile(struct mnt_idmap *idmap, struct !! 2914 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) 3597 struct file *file, umode_t mode << 3598 { 2915 { 3599 struct inode *inode; 2916 struct inode *inode; 3600 int error; !! 2917 int error = -ENOSPC; 3601 2918 3602 inode = shmem_get_inode(idmap, dir->i !! 2919 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 3603 if (IS_ERR(inode)) { !! 2920 if (inode) { 3604 error = PTR_ERR(inode); !! 2921 error = security_inode_init_security(inode, dir, 3605 goto err_out; !! 2922 NULL, >> 2923 shmem_initxattrs, NULL); >> 2924 if (error && error != -EOPNOTSUPP) >> 2925 goto out_iput; >> 2926 error = simple_acl_create(dir, inode); >> 2927 if (error) >> 2928 goto out_iput; >> 2929 d_tmpfile(dentry, inode); 3606 } 2930 } 3607 error = security_inode_init_security( !! 2931 return error; 3608 << 3609 if (error && error != -EOPNOTSUPP) << 3610 goto out_iput; << 3611 error = simple_acl_create(dir, inode) << 3612 if (error) << 3613 goto out_iput; << 3614 d_tmpfile(file, inode); << 3615 << 3616 err_out: << 3617 return finish_open_simple(file, error << 3618 out_iput: 2932 out_iput: 3619 iput(inode); 2933 iput(inode); 3620 return error; 2934 return error; 3621 } 2935 } 3622 2936 3623 static int shmem_mkdir(struct mnt_idmap *idma !! 2937 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 3624 struct dentry *dentry, << 3625 { 2938 { 3626 int error; 2939 int error; 3627 2940 3628 error = shmem_mknod(idmap, dir, dentr !! 2941 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0))) 3629 if (error) << 3630 return error; 2942 return error; 3631 inc_nlink(dir); 2943 inc_nlink(dir); 3632 return 0; 2944 return 0; 3633 } 2945 } 3634 2946 3635 static int shmem_create(struct mnt_idmap *idm !! 2947 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, 3636 struct dentry *dentry !! 2948 bool excl) 3637 { 2949 { 3638 return shmem_mknod(idmap, dir, dentry !! 2950 return shmem_mknod(dir, dentry, mode | S_IFREG, 0); 3639 } 2951 } 3640 2952 3641 /* 2953 /* 3642 * Link a file.. 2954 * Link a file.. 3643 */ 2955 */ 3644 static int shmem_link(struct dentry *old_dent !! 2956 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 3645 struct dentry *dentry) << 3646 { 2957 { 3647 struct inode *inode = d_inode(old_den 2958 struct inode *inode = d_inode(old_dentry); 3648 int ret = 0; 2959 int ret = 0; 3649 2960 3650 /* 2961 /* 3651 * No ordinary (disk based) filesyste 2962 * No ordinary (disk based) filesystem counts links as inodes; 3652 * but each new link needs a new dent 2963 * but each new link needs a new dentry, pinning lowmem, and 3653 * tmpfs dentries cannot be pruned un 2964 * tmpfs dentries cannot be pruned until they are unlinked. 3654 * But if an O_TMPFILE file is linked 2965 * But if an O_TMPFILE file is linked into the tmpfs, the 3655 * first link must skip that, to get 2966 * first link must skip that, to get the accounting right. 3656 */ 2967 */ 3657 if (inode->i_nlink) { 2968 if (inode->i_nlink) { 3658 ret = shmem_reserve_inode(ino !! 2969 ret = shmem_reserve_inode(inode->i_sb); 3659 if (ret) 2970 if (ret) 3660 goto out; 2971 goto out; 3661 } 2972 } 3662 2973 3663 ret = simple_offset_add(shmem_get_off << 3664 if (ret) { << 3665 if (inode->i_nlink) << 3666 shmem_free_inode(inod << 3667 goto out; << 3668 } << 3669 << 3670 dir->i_size += BOGO_DIRENT_SIZE; 2974 dir->i_size += BOGO_DIRENT_SIZE; 3671 inode_set_mtime_to_ts(dir, !! 2975 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 3672 inode_set_ctime << 3673 inode_inc_iversion(dir); << 3674 inc_nlink(inode); 2976 inc_nlink(inode); 3675 ihold(inode); /* New dentry referen 2977 ihold(inode); /* New dentry reference */ 3676 dget(dentry); /* Extra pinning coun !! 2978 dget(dentry); /* Extra pinning count for the created dentry */ 3677 d_instantiate(dentry, inode); 2979 d_instantiate(dentry, inode); 3678 out: 2980 out: 3679 return ret; 2981 return ret; 3680 } 2982 } 3681 2983 3682 static int shmem_unlink(struct inode *dir, st 2984 static int shmem_unlink(struct inode *dir, struct dentry *dentry) 3683 { 2985 { 3684 struct inode *inode = d_inode(dentry) 2986 struct inode *inode = d_inode(dentry); 3685 2987 3686 if (inode->i_nlink > 1 && !S_ISDIR(in 2988 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 3687 shmem_free_inode(inode->i_sb, !! 2989 shmem_free_inode(inode->i_sb); 3688 << 3689 simple_offset_remove(shmem_get_offset << 3690 2990 3691 dir->i_size -= BOGO_DIRENT_SIZE; 2991 dir->i_size -= BOGO_DIRENT_SIZE; 3692 inode_set_mtime_to_ts(dir, !! 2992 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 3693 inode_set_ctime << 3694 inode_inc_iversion(dir); << 3695 drop_nlink(inode); 2993 drop_nlink(inode); 3696 dput(dentry); /* Undo the count fro !! 2994 dput(dentry); /* Undo the count from "create" - this does all the work */ 3697 return 0; 2995 return 0; 3698 } 2996 } 3699 2997 3700 static int shmem_rmdir(struct inode *dir, str 2998 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 3701 { 2999 { 3702 if (!simple_offset_empty(dentry)) !! 3000 if (!simple_empty(dentry)) 3703 return -ENOTEMPTY; 3001 return -ENOTEMPTY; 3704 3002 3705 drop_nlink(d_inode(dentry)); 3003 drop_nlink(d_inode(dentry)); 3706 drop_nlink(dir); 3004 drop_nlink(dir); 3707 return shmem_unlink(dir, dentry); 3005 return shmem_unlink(dir, dentry); 3708 } 3006 } 3709 3007 3710 static int shmem_whiteout(struct mnt_idmap *i !! 3008 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) 3711 struct inode *old_d !! 3009 { >> 3010 bool old_is_dir = d_is_dir(old_dentry); >> 3011 bool new_is_dir = d_is_dir(new_dentry); >> 3012 >> 3013 if (old_dir != new_dir && old_is_dir != new_is_dir) { >> 3014 if (old_is_dir) { >> 3015 drop_nlink(old_dir); >> 3016 inc_nlink(new_dir); >> 3017 } else { >> 3018 drop_nlink(new_dir); >> 3019 inc_nlink(old_dir); >> 3020 } >> 3021 } >> 3022 old_dir->i_ctime = old_dir->i_mtime = >> 3023 new_dir->i_ctime = new_dir->i_mtime = >> 3024 d_inode(old_dentry)->i_ctime = >> 3025 d_inode(new_dentry)->i_ctime = current_time(old_dir); >> 3026 >> 3027 return 0; >> 3028 } >> 3029 >> 3030 static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry) 3712 { 3031 { 3713 struct dentry *whiteout; 3032 struct dentry *whiteout; 3714 int error; 3033 int error; 3715 3034 3716 whiteout = d_alloc(old_dentry->d_pare 3035 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 3717 if (!whiteout) 3036 if (!whiteout) 3718 return -ENOMEM; 3037 return -ENOMEM; 3719 3038 3720 error = shmem_mknod(idmap, old_dir, w !! 3039 error = shmem_mknod(old_dir, whiteout, 3721 S_IFCHR | WHITEOU 3040 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 3722 dput(whiteout); 3041 dput(whiteout); 3723 if (error) 3042 if (error) 3724 return error; 3043 return error; 3725 3044 3726 /* 3045 /* 3727 * Cheat and hash the whiteout while 3046 * Cheat and hash the whiteout while the old dentry is still in 3728 * place, instead of playing games wi 3047 * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 3729 * 3048 * 3730 * d_lookup() will consistently find 3049 * d_lookup() will consistently find one of them at this point, 3731 * not sure which one, but that isn't 3050 * not sure which one, but that isn't even important. 3732 */ 3051 */ 3733 d_rehash(whiteout); 3052 d_rehash(whiteout); 3734 return 0; 3053 return 0; 3735 } 3054 } 3736 3055 3737 /* 3056 /* 3738 * The VFS layer already does all the dentry 3057 * The VFS layer already does all the dentry stuff for rename, 3739 * we just have to decrement the usage count 3058 * we just have to decrement the usage count for the target if 3740 * it exists so that the VFS layer correctly 3059 * it exists so that the VFS layer correctly free's it when it 3741 * gets overwritten. 3060 * gets overwritten. 3742 */ 3061 */ 3743 static int shmem_rename2(struct mnt_idmap *id !! 3062 static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) 3744 struct inode *old_di << 3745 struct inode *new_di << 3746 unsigned int flags) << 3747 { 3063 { 3748 struct inode *inode = d_inode(old_den 3064 struct inode *inode = d_inode(old_dentry); 3749 int they_are_dirs = S_ISDIR(inode->i_ 3065 int they_are_dirs = S_ISDIR(inode->i_mode); 3750 int error; << 3751 3066 3752 if (flags & ~(RENAME_NOREPLACE | RENA 3067 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 3753 return -EINVAL; 3068 return -EINVAL; 3754 3069 3755 if (flags & RENAME_EXCHANGE) 3070 if (flags & RENAME_EXCHANGE) 3756 return simple_offset_rename_e !! 3071 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry); 3757 << 3758 3072 3759 if (!simple_offset_empty(new_dentry)) !! 3073 if (!simple_empty(new_dentry)) 3760 return -ENOTEMPTY; 3074 return -ENOTEMPTY; 3761 3075 3762 if (flags & RENAME_WHITEOUT) { 3076 if (flags & RENAME_WHITEOUT) { 3763 error = shmem_whiteout(idmap, !! 3077 int error; >> 3078 >> 3079 error = shmem_whiteout(old_dir, old_dentry); 3764 if (error) 3080 if (error) 3765 return error; 3081 return error; 3766 } 3082 } 3767 3083 3768 error = simple_offset_rename(old_dir, << 3769 if (error) << 3770 return error; << 3771 << 3772 if (d_really_is_positive(new_dentry)) 3084 if (d_really_is_positive(new_dentry)) { 3773 (void) shmem_unlink(new_dir, 3085 (void) shmem_unlink(new_dir, new_dentry); 3774 if (they_are_dirs) { 3086 if (they_are_dirs) { 3775 drop_nlink(d_inode(ne 3087 drop_nlink(d_inode(new_dentry)); 3776 drop_nlink(old_dir); 3088 drop_nlink(old_dir); 3777 } 3089 } 3778 } else if (they_are_dirs) { 3090 } else if (they_are_dirs) { 3779 drop_nlink(old_dir); 3091 drop_nlink(old_dir); 3780 inc_nlink(new_dir); 3092 inc_nlink(new_dir); 3781 } 3093 } 3782 3094 3783 old_dir->i_size -= BOGO_DIRENT_SIZE; 3095 old_dir->i_size -= BOGO_DIRENT_SIZE; 3784 new_dir->i_size += BOGO_DIRENT_SIZE; 3096 new_dir->i_size += BOGO_DIRENT_SIZE; 3785 simple_rename_timestamp(old_dir, old_ !! 3097 old_dir->i_ctime = old_dir->i_mtime = 3786 inode_inc_iversion(old_dir); !! 3098 new_dir->i_ctime = new_dir->i_mtime = 3787 inode_inc_iversion(new_dir); !! 3099 inode->i_ctime = current_time(old_dir); 3788 return 0; 3100 return 0; 3789 } 3101 } 3790 3102 3791 static int shmem_symlink(struct mnt_idmap *id !! 3103 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname) 3792 struct dentry *dentr << 3793 { 3104 { 3794 int error; 3105 int error; 3795 int len; 3106 int len; 3796 struct inode *inode; 3107 struct inode *inode; 3797 struct folio *folio; !! 3108 struct page *page; 3798 3109 3799 len = strlen(symname) + 1; 3110 len = strlen(symname) + 1; 3800 if (len > PAGE_SIZE) 3111 if (len > PAGE_SIZE) 3801 return -ENAMETOOLONG; 3112 return -ENAMETOOLONG; 3802 3113 3803 inode = shmem_get_inode(idmap, dir->i !! 3114 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0, 3804 VM_NORESERVE) 3115 VM_NORESERVE); 3805 if (IS_ERR(inode)) !! 3116 if (!inode) 3806 return PTR_ERR(inode); !! 3117 return -ENOSPC; 3807 3118 3808 error = security_inode_init_security( 3119 error = security_inode_init_security(inode, dir, &dentry->d_name, 3809 3120 shmem_initxattrs, NULL); 3810 if (error && error != -EOPNOTSUPP) !! 3121 if (error) { 3811 goto out_iput; !! 3122 if (error != -EOPNOTSUPP) { 3812 !! 3123 iput(inode); 3813 error = simple_offset_add(shmem_get_o !! 3124 return error; 3814 if (error) !! 3125 } 3815 goto out_iput; !! 3126 error = 0; >> 3127 } 3816 3128 3817 inode->i_size = len-1; 3129 inode->i_size = len-1; 3818 if (len <= SHORT_SYMLINK_LEN) { 3130 if (len <= SHORT_SYMLINK_LEN) { 3819 inode->i_link = kmemdup(symna 3131 inode->i_link = kmemdup(symname, len, GFP_KERNEL); 3820 if (!inode->i_link) { 3132 if (!inode->i_link) { 3821 error = -ENOMEM; !! 3133 iput(inode); 3822 goto out_remove_offse !! 3134 return -ENOMEM; 3823 } 3135 } 3824 inode->i_op = &shmem_short_sy 3136 inode->i_op = &shmem_short_symlink_operations; 3825 } else { 3137 } else { 3826 inode_nohighmem(inode); 3138 inode_nohighmem(inode); >> 3139 error = shmem_getpage(inode, 0, &page, SGP_WRITE); >> 3140 if (error) { >> 3141 iput(inode); >> 3142 return error; >> 3143 } 3827 inode->i_mapping->a_ops = &sh 3144 inode->i_mapping->a_ops = &shmem_aops; 3828 error = shmem_get_folio(inode << 3829 if (error) << 3830 goto out_remove_offse << 3831 inode->i_op = &shmem_symlink_ 3145 inode->i_op = &shmem_symlink_inode_operations; 3832 memcpy(folio_address(folio), !! 3146 memcpy(page_address(page), symname, len); 3833 folio_mark_uptodate(folio); !! 3147 SetPageUptodate(page); 3834 folio_mark_dirty(folio); !! 3148 set_page_dirty(page); 3835 folio_unlock(folio); !! 3149 unlock_page(page); 3836 folio_put(folio); !! 3150 put_page(page); 3837 } 3151 } 3838 dir->i_size += BOGO_DIRENT_SIZE; 3152 dir->i_size += BOGO_DIRENT_SIZE; 3839 inode_set_mtime_to_ts(dir, inode_set_ !! 3153 dir->i_ctime = dir->i_mtime = current_time(dir); 3840 inode_inc_iversion(dir); << 3841 d_instantiate(dentry, inode); 3154 d_instantiate(dentry, inode); 3842 dget(dentry); 3155 dget(dentry); 3843 return 0; 3156 return 0; 3844 << 3845 out_remove_offset: << 3846 simple_offset_remove(shmem_get_offset << 3847 out_iput: << 3848 iput(inode); << 3849 return error; << 3850 } 3157 } 3851 3158 3852 static void shmem_put_link(void *arg) 3159 static void shmem_put_link(void *arg) 3853 { 3160 { 3854 folio_mark_accessed(arg); !! 3161 mark_page_accessed(arg); 3855 folio_put(arg); !! 3162 put_page(arg); 3856 } 3163 } 3857 3164 3858 static const char *shmem_get_link(struct dent !! 3165 static const char *shmem_get_link(struct dentry *dentry, >> 3166 struct inode *inode, 3859 struct dela 3167 struct delayed_call *done) 3860 { 3168 { 3861 struct folio *folio = NULL; !! 3169 struct page *page = NULL; 3862 int error; 3170 int error; 3863 << 3864 if (!dentry) { 3171 if (!dentry) { 3865 folio = filemap_get_folio(ino !! 3172 page = find_get_page(inode->i_mapping, 0); 3866 if (IS_ERR(folio)) !! 3173 if (!page) 3867 return ERR_PTR(-ECHIL 3174 return ERR_PTR(-ECHILD); 3868 if (PageHWPoison(folio_page(f !! 3175 if (!PageUptodate(page)) { 3869 !folio_test_uptodate(foli !! 3176 put_page(page); 3870 folio_put(folio); << 3871 return ERR_PTR(-ECHIL 3177 return ERR_PTR(-ECHILD); 3872 } 3178 } 3873 } else { 3179 } else { 3874 error = shmem_get_folio(inode !! 3180 error = shmem_getpage(inode, 0, &page, SGP_READ); 3875 if (error) 3181 if (error) 3876 return ERR_PTR(error) 3182 return ERR_PTR(error); 3877 if (!folio) !! 3183 unlock_page(page); 3878 return ERR_PTR(-ECHIL << 3879 if (PageHWPoison(folio_page(f << 3880 folio_unlock(folio); << 3881 folio_put(folio); << 3882 return ERR_PTR(-ECHIL << 3883 } << 3884 folio_unlock(folio); << 3885 } 3184 } 3886 set_delayed_call(done, shmem_put_link !! 3185 set_delayed_call(done, shmem_put_link, page); 3887 return folio_address(folio); !! 3186 return page_address(page); 3888 } 3187 } 3889 3188 3890 #ifdef CONFIG_TMPFS_XATTR 3189 #ifdef CONFIG_TMPFS_XATTR 3891 << 3892 static int shmem_fileattr_get(struct dentry * << 3893 { << 3894 struct shmem_inode_info *info = SHMEM << 3895 << 3896 fileattr_fill_flags(fa, info->fsflags << 3897 << 3898 return 0; << 3899 } << 3900 << 3901 static int shmem_fileattr_set(struct mnt_idma << 3902 struct dentry * << 3903 { << 3904 struct inode *inode = d_inode(dentry) << 3905 struct shmem_inode_info *info = SHMEM << 3906 << 3907 if (fileattr_has_fsx(fa)) << 3908 return -EOPNOTSUPP; << 3909 if (fa->flags & ~SHMEM_FL_USER_MODIFI << 3910 return -EOPNOTSUPP; << 3911 << 3912 info->fsflags = (info->fsflags & ~SHM << 3913 (fa->flags & SHMEM_FL_USER_MO << 3914 << 3915 shmem_set_inode_flags(inode, info->fs << 3916 inode_set_ctime_current(inode); << 3917 inode_inc_iversion(inode); << 3918 return 0; << 3919 } << 3920 << 3921 /* 3190 /* 3922 * Superblocks without xattr inode operations 3191 * Superblocks without xattr inode operations may get some security.* xattr 3923 * support from the LSM "for free". As soon a 3192 * support from the LSM "for free". As soon as we have any other xattrs 3924 * like ACLs, we also need to implement the s 3193 * like ACLs, we also need to implement the security.* handlers at 3925 * filesystem level, though. 3194 * filesystem level, though. 3926 */ 3195 */ 3927 3196 3928 /* 3197 /* 3929 * Callback for security_inode_init_security( 3198 * Callback for security_inode_init_security() for acquiring xattrs. 3930 */ 3199 */ 3931 static int shmem_initxattrs(struct inode *ino 3200 static int shmem_initxattrs(struct inode *inode, 3932 const struct xatt !! 3201 const struct xattr *xattr_array, >> 3202 void *fs_info) 3933 { 3203 { 3934 struct shmem_inode_info *info = SHMEM 3204 struct shmem_inode_info *info = SHMEM_I(inode); 3935 struct shmem_sb_info *sbinfo = SHMEM_ << 3936 const struct xattr *xattr; 3205 const struct xattr *xattr; 3937 struct simple_xattr *new_xattr; 3206 struct simple_xattr *new_xattr; 3938 size_t ispace = 0; << 3939 size_t len; 3207 size_t len; 3940 3208 3941 if (sbinfo->max_inodes) { << 3942 for (xattr = xattr_array; xat << 3943 ispace += simple_xatt << 3944 xattr->value_ << 3945 } << 3946 if (ispace) { << 3947 raw_spin_lock(&sbinfo << 3948 if (sbinfo->free_ispa << 3949 ispace = 0; << 3950 else << 3951 sbinfo->free_ << 3952 raw_spin_unlock(&sbin << 3953 if (!ispace) << 3954 return -ENOSP << 3955 } << 3956 } << 3957 << 3958 for (xattr = xattr_array; xattr->name 3209 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 3959 new_xattr = simple_xattr_allo 3210 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 3960 if (!new_xattr) 3211 if (!new_xattr) 3961 break; !! 3212 return -ENOMEM; 3962 3213 3963 len = strlen(xattr->name) + 1 3214 len = strlen(xattr->name) + 1; 3964 new_xattr->name = kmalloc(XAT 3215 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 3965 GFP !! 3216 GFP_KERNEL); 3966 if (!new_xattr->name) { 3217 if (!new_xattr->name) { 3967 kvfree(new_xattr); !! 3218 kfree(new_xattr); 3968 break; !! 3219 return -ENOMEM; 3969 } 3220 } 3970 3221 3971 memcpy(new_xattr->name, XATTR 3222 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 3972 XATTR_SECURITY_PREFIX_ 3223 XATTR_SECURITY_PREFIX_LEN); 3973 memcpy(new_xattr->name + XATT 3224 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 3974 xattr->name, len); 3225 xattr->name, len); 3975 3226 3976 simple_xattr_add(&info->xattr !! 3227 simple_xattr_list_add(&info->xattrs, new_xattr); 3977 } << 3978 << 3979 if (xattr->name != NULL) { << 3980 if (ispace) { << 3981 raw_spin_lock(&sbinfo << 3982 sbinfo->free_ispace + << 3983 raw_spin_unlock(&sbin << 3984 } << 3985 simple_xattrs_free(&info->xat << 3986 return -ENOMEM; << 3987 } 3228 } 3988 3229 3989 return 0; 3230 return 0; 3990 } 3231 } 3991 3232 3992 static int shmem_xattr_handler_get(const stru 3233 static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3993 struct den 3234 struct dentry *unused, struct inode *inode, 3994 const char 3235 const char *name, void *buffer, size_t size) 3995 { 3236 { 3996 struct shmem_inode_info *info = SHMEM 3237 struct shmem_inode_info *info = SHMEM_I(inode); 3997 3238 3998 name = xattr_full_name(handler, name) 3239 name = xattr_full_name(handler, name); 3999 return simple_xattr_get(&info->xattrs 3240 return simple_xattr_get(&info->xattrs, name, buffer, size); 4000 } 3241 } 4001 3242 4002 static int shmem_xattr_handler_set(const stru 3243 static int shmem_xattr_handler_set(const struct xattr_handler *handler, 4003 struct mnt << 4004 struct den 3244 struct dentry *unused, struct inode *inode, 4005 const char 3245 const char *name, const void *value, 4006 size_t siz 3246 size_t size, int flags) 4007 { 3247 { 4008 struct shmem_inode_info *info = SHMEM 3248 struct shmem_inode_info *info = SHMEM_I(inode); 4009 struct shmem_sb_info *sbinfo = SHMEM_ << 4010 struct simple_xattr *old_xattr; << 4011 size_t ispace = 0; << 4012 3249 4013 name = xattr_full_name(handler, name) 3250 name = xattr_full_name(handler, name); 4014 if (value && sbinfo->max_inodes) { !! 3251 return simple_xattr_set(&info->xattrs, name, value, size, flags); 4015 ispace = simple_xattr_space(n << 4016 raw_spin_lock(&sbinfo->stat_l << 4017 if (sbinfo->free_ispace < isp << 4018 ispace = 0; << 4019 else << 4020 sbinfo->free_ispace - << 4021 raw_spin_unlock(&sbinfo->stat << 4022 if (!ispace) << 4023 return -ENOSPC; << 4024 } << 4025 << 4026 old_xattr = simple_xattr_set(&info->x << 4027 if (!IS_ERR(old_xattr)) { << 4028 ispace = 0; << 4029 if (old_xattr && sbinfo->max_ << 4030 ispace = simple_xattr << 4031 << 4032 simple_xattr_free(old_xattr); << 4033 old_xattr = NULL; << 4034 inode_set_ctime_current(inode << 4035 inode_inc_iversion(inode); << 4036 } << 4037 if (ispace) { << 4038 raw_spin_lock(&sbinfo->stat_l << 4039 sbinfo->free_ispace += ispace << 4040 raw_spin_unlock(&sbinfo->stat << 4041 } << 4042 return PTR_ERR(old_xattr); << 4043 } 3252 } 4044 3253 4045 static const struct xattr_handler shmem_secur 3254 static const struct xattr_handler shmem_security_xattr_handler = { 4046 .prefix = XATTR_SECURITY_PREFIX, 3255 .prefix = XATTR_SECURITY_PREFIX, 4047 .get = shmem_xattr_handler_get, 3256 .get = shmem_xattr_handler_get, 4048 .set = shmem_xattr_handler_set, 3257 .set = shmem_xattr_handler_set, 4049 }; 3258 }; 4050 3259 4051 static const struct xattr_handler shmem_trust 3260 static const struct xattr_handler shmem_trusted_xattr_handler = { 4052 .prefix = XATTR_TRUSTED_PREFIX, 3261 .prefix = XATTR_TRUSTED_PREFIX, 4053 .get = shmem_xattr_handler_get, 3262 .get = shmem_xattr_handler_get, 4054 .set = shmem_xattr_handler_set, 3263 .set = shmem_xattr_handler_set, 4055 }; 3264 }; 4056 3265 4057 static const struct xattr_handler shmem_user_ !! 3266 static const struct xattr_handler *shmem_xattr_handlers[] = { 4058 .prefix = XATTR_USER_PREFIX, !! 3267 #ifdef CONFIG_TMPFS_POSIX_ACL 4059 .get = shmem_xattr_handler_get, !! 3268 &posix_acl_access_xattr_handler, 4060 .set = shmem_xattr_handler_set, !! 3269 &posix_acl_default_xattr_handler, 4061 }; !! 3270 #endif 4062 << 4063 static const struct xattr_handler * const shm << 4064 &shmem_security_xattr_handler, 3271 &shmem_security_xattr_handler, 4065 &shmem_trusted_xattr_handler, 3272 &shmem_trusted_xattr_handler, 4066 &shmem_user_xattr_handler, << 4067 NULL 3273 NULL 4068 }; 3274 }; 4069 3275 4070 static ssize_t shmem_listxattr(struct dentry 3276 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 4071 { 3277 { 4072 struct shmem_inode_info *info = SHMEM 3278 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 4073 return simple_xattr_list(d_inode(dent 3279 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 4074 } 3280 } 4075 #endif /* CONFIG_TMPFS_XATTR */ 3281 #endif /* CONFIG_TMPFS_XATTR */ 4076 3282 4077 static const struct inode_operations shmem_sh 3283 static const struct inode_operations shmem_short_symlink_operations = { 4078 .getattr = shmem_getattr, << 4079 .setattr = shmem_setattr, << 4080 .get_link = simple_get_link, 3284 .get_link = simple_get_link, 4081 #ifdef CONFIG_TMPFS_XATTR 3285 #ifdef CONFIG_TMPFS_XATTR 4082 .listxattr = shmem_listxattr, 3286 .listxattr = shmem_listxattr, 4083 #endif 3287 #endif 4084 }; 3288 }; 4085 3289 4086 static const struct inode_operations shmem_sy 3290 static const struct inode_operations shmem_symlink_inode_operations = { 4087 .getattr = shmem_getattr, << 4088 .setattr = shmem_setattr, << 4089 .get_link = shmem_get_link, 3291 .get_link = shmem_get_link, 4090 #ifdef CONFIG_TMPFS_XATTR 3292 #ifdef CONFIG_TMPFS_XATTR 4091 .listxattr = shmem_listxattr, 3293 .listxattr = shmem_listxattr, 4092 #endif 3294 #endif 4093 }; 3295 }; 4094 3296 4095 static struct dentry *shmem_get_parent(struct 3297 static struct dentry *shmem_get_parent(struct dentry *child) 4096 { 3298 { 4097 return ERR_PTR(-ESTALE); 3299 return ERR_PTR(-ESTALE); 4098 } 3300 } 4099 3301 4100 static int shmem_match(struct inode *ino, voi 3302 static int shmem_match(struct inode *ino, void *vfh) 4101 { 3303 { 4102 __u32 *fh = vfh; 3304 __u32 *fh = vfh; 4103 __u64 inum = fh[2]; 3305 __u64 inum = fh[2]; 4104 inum = (inum << 32) | fh[1]; 3306 inum = (inum << 32) | fh[1]; 4105 return ino->i_ino == inum && fh[0] == 3307 return ino->i_ino == inum && fh[0] == ino->i_generation; 4106 } 3308 } 4107 3309 4108 /* Find any alias of inode, but prefer a hash 3310 /* Find any alias of inode, but prefer a hashed alias */ 4109 static struct dentry *shmem_find_alias(struct 3311 static struct dentry *shmem_find_alias(struct inode *inode) 4110 { 3312 { 4111 struct dentry *alias = d_find_alias(i 3313 struct dentry *alias = d_find_alias(inode); 4112 3314 4113 return alias ?: d_find_any_alias(inod 3315 return alias ?: d_find_any_alias(inode); 4114 } 3316 } 4115 3317 >> 3318 4116 static struct dentry *shmem_fh_to_dentry(stru 3319 static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 4117 struct fid *fid, int fh_len, 3320 struct fid *fid, int fh_len, int fh_type) 4118 { 3321 { 4119 struct inode *inode; 3322 struct inode *inode; 4120 struct dentry *dentry = NULL; 3323 struct dentry *dentry = NULL; 4121 u64 inum; 3324 u64 inum; 4122 3325 4123 if (fh_len < 3) 3326 if (fh_len < 3) 4124 return NULL; 3327 return NULL; 4125 3328 4126 inum = fid->raw[2]; 3329 inum = fid->raw[2]; 4127 inum = (inum << 32) | fid->raw[1]; 3330 inum = (inum << 32) | fid->raw[1]; 4128 3331 4129 inode = ilookup5(sb, (unsigned long)( 3332 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 4130 shmem_match, fid->raw 3333 shmem_match, fid->raw); 4131 if (inode) { 3334 if (inode) { 4132 dentry = shmem_find_alias(ino 3335 dentry = shmem_find_alias(inode); 4133 iput(inode); 3336 iput(inode); 4134 } 3337 } 4135 3338 4136 return dentry; 3339 return dentry; 4137 } 3340 } 4138 3341 4139 static int shmem_encode_fh(struct inode *inod 3342 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 4140 struct inode 3343 struct inode *parent) 4141 { 3344 { 4142 if (*len < 3) { 3345 if (*len < 3) { 4143 *len = 3; 3346 *len = 3; 4144 return FILEID_INVALID; 3347 return FILEID_INVALID; 4145 } 3348 } 4146 3349 4147 if (inode_unhashed(inode)) { 3350 if (inode_unhashed(inode)) { 4148 /* Unfortunately insert_inode 3351 /* Unfortunately insert_inode_hash is not idempotent, 4149 * so as we hash inodes here 3352 * so as we hash inodes here rather than at creation 4150 * time, we need a lock to en 3353 * time, we need a lock to ensure we only try 4151 * to do it once 3354 * to do it once 4152 */ 3355 */ 4153 static DEFINE_SPINLOCK(lock); 3356 static DEFINE_SPINLOCK(lock); 4154 spin_lock(&lock); 3357 spin_lock(&lock); 4155 if (inode_unhashed(inode)) 3358 if (inode_unhashed(inode)) 4156 __insert_inode_hash(i 3359 __insert_inode_hash(inode, 4157 i 3360 inode->i_ino + inode->i_generation); 4158 spin_unlock(&lock); 3361 spin_unlock(&lock); 4159 } 3362 } 4160 3363 4161 fh[0] = inode->i_generation; 3364 fh[0] = inode->i_generation; 4162 fh[1] = inode->i_ino; 3365 fh[1] = inode->i_ino; 4163 fh[2] = ((__u64)inode->i_ino) >> 32; 3366 fh[2] = ((__u64)inode->i_ino) >> 32; 4164 3367 4165 *len = 3; 3368 *len = 3; 4166 return 1; 3369 return 1; 4167 } 3370 } 4168 3371 4169 static const struct export_operations shmem_e 3372 static const struct export_operations shmem_export_ops = { 4170 .get_parent = shmem_get_parent, 3373 .get_parent = shmem_get_parent, 4171 .encode_fh = shmem_encode_fh, 3374 .encode_fh = shmem_encode_fh, 4172 .fh_to_dentry = shmem_fh_to_dentry, 3375 .fh_to_dentry = shmem_fh_to_dentry, 4173 }; 3376 }; 4174 3377 4175 enum shmem_param { 3378 enum shmem_param { 4176 Opt_gid, 3379 Opt_gid, 4177 Opt_huge, 3380 Opt_huge, 4178 Opt_mode, 3381 Opt_mode, 4179 Opt_mpol, 3382 Opt_mpol, 4180 Opt_nr_blocks, 3383 Opt_nr_blocks, 4181 Opt_nr_inodes, 3384 Opt_nr_inodes, 4182 Opt_size, 3385 Opt_size, 4183 Opt_uid, 3386 Opt_uid, 4184 Opt_inode32, << 4185 Opt_inode64, << 4186 Opt_noswap, << 4187 Opt_quota, << 4188 Opt_usrquota, << 4189 Opt_grpquota, << 4190 Opt_usrquota_block_hardlimit, << 4191 Opt_usrquota_inode_hardlimit, << 4192 Opt_grpquota_block_hardlimit, << 4193 Opt_grpquota_inode_hardlimit, << 4194 }; << 4195 << 4196 static const struct constant_table shmem_para << 4197 {"never", SHMEM_HUGE_NEVER }, << 4198 {"always", SHMEM_HUGE_ALWAYS }, << 4199 {"within_size", SHMEM_HUGE_WITHIN_SIZ << 4200 {"advise", SHMEM_HUGE_ADVISE }, << 4201 {} << 4202 }; 3387 }; 4203 3388 4204 const struct fs_parameter_spec shmem_fs_param !! 3389 static const struct fs_parameter_spec shmem_param_specs[] = { 4205 fsparam_gid ("gid", Opt_g !! 3390 fsparam_u32 ("gid", Opt_gid), 4206 fsparam_enum ("huge", Opt_h !! 3391 fsparam_enum ("huge", Opt_huge), 4207 fsparam_u32oct("mode", Opt_m 3392 fsparam_u32oct("mode", Opt_mode), 4208 fsparam_string("mpol", Opt_m 3393 fsparam_string("mpol", Opt_mpol), 4209 fsparam_string("nr_blocks", Opt_n 3394 fsparam_string("nr_blocks", Opt_nr_blocks), 4210 fsparam_string("nr_inodes", Opt_n 3395 fsparam_string("nr_inodes", Opt_nr_inodes), 4211 fsparam_string("size", Opt_s 3396 fsparam_string("size", Opt_size), 4212 fsparam_uid ("uid", Opt_u !! 3397 fsparam_u32 ("uid", Opt_uid), 4213 fsparam_flag ("inode32", Opt_i !! 3398 {} 4214 fsparam_flag ("inode64", Opt_i !! 3399 }; 4215 fsparam_flag ("noswap", Opt_n !! 3400 4216 #ifdef CONFIG_TMPFS_QUOTA !! 3401 static const struct fs_parameter_enum shmem_param_enums[] = { 4217 fsparam_flag ("quota", Opt_q !! 3402 { Opt_huge, "never", SHMEM_HUGE_NEVER }, 4218 fsparam_flag ("usrquota", Opt_u !! 3403 { Opt_huge, "always", SHMEM_HUGE_ALWAYS }, 4219 fsparam_flag ("grpquota", Opt_g !! 3404 { Opt_huge, "within_size", SHMEM_HUGE_WITHIN_SIZE }, 4220 fsparam_string("usrquota_block_hardli !! 3405 { Opt_huge, "advise", SHMEM_HUGE_ADVISE }, 4221 fsparam_string("usrquota_inode_hardli << 4222 fsparam_string("grpquota_block_hardli << 4223 fsparam_string("grpquota_inode_hardli << 4224 #endif << 4225 {} 3406 {} 4226 }; 3407 }; 4227 3408 >> 3409 const struct fs_parameter_description shmem_fs_parameters = { >> 3410 .name = "tmpfs", >> 3411 .specs = shmem_param_specs, >> 3412 .enums = shmem_param_enums, >> 3413 }; >> 3414 4228 static int shmem_parse_one(struct fs_context 3415 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) 4229 { 3416 { 4230 struct shmem_options *ctx = fc->fs_pr 3417 struct shmem_options *ctx = fc->fs_private; 4231 struct fs_parse_result result; 3418 struct fs_parse_result result; 4232 unsigned long long size; 3419 unsigned long long size; 4233 char *rest; 3420 char *rest; 4234 int opt; 3421 int opt; 4235 kuid_t kuid; 3422 kuid_t kuid; 4236 kgid_t kgid; 3423 kgid_t kgid; 4237 3424 4238 opt = fs_parse(fc, shmem_fs_parameter !! 3425 opt = fs_parse(fc, &shmem_fs_parameters, param, &result); 4239 if (opt < 0) 3426 if (opt < 0) 4240 return opt; 3427 return opt; 4241 3428 4242 switch (opt) { 3429 switch (opt) { 4243 case Opt_size: 3430 case Opt_size: 4244 size = memparse(param->string 3431 size = memparse(param->string, &rest); 4245 if (*rest == '%') { 3432 if (*rest == '%') { 4246 size <<= PAGE_SHIFT; 3433 size <<= PAGE_SHIFT; 4247 size *= totalram_page 3434 size *= totalram_pages(); 4248 do_div(size, 100); 3435 do_div(size, 100); 4249 rest++; 3436 rest++; 4250 } 3437 } 4251 if (*rest) 3438 if (*rest) 4252 goto bad_value; 3439 goto bad_value; 4253 ctx->blocks = DIV_ROUND_UP(si 3440 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); 4254 ctx->seen |= SHMEM_SEEN_BLOCK 3441 ctx->seen |= SHMEM_SEEN_BLOCKS; 4255 break; 3442 break; 4256 case Opt_nr_blocks: 3443 case Opt_nr_blocks: 4257 ctx->blocks = memparse(param- 3444 ctx->blocks = memparse(param->string, &rest); 4258 if (*rest || ctx->blocks > LO !! 3445 if (*rest) 4259 goto bad_value; 3446 goto bad_value; 4260 ctx->seen |= SHMEM_SEEN_BLOCK 3447 ctx->seen |= SHMEM_SEEN_BLOCKS; 4261 break; 3448 break; 4262 case Opt_nr_inodes: 3449 case Opt_nr_inodes: 4263 ctx->inodes = memparse(param- 3450 ctx->inodes = memparse(param->string, &rest); 4264 if (*rest || ctx->inodes > UL !! 3451 if (*rest) 4265 goto bad_value; 3452 goto bad_value; 4266 ctx->seen |= SHMEM_SEEN_INODE 3453 ctx->seen |= SHMEM_SEEN_INODES; 4267 break; 3454 break; 4268 case Opt_mode: 3455 case Opt_mode: 4269 ctx->mode = result.uint_32 & 3456 ctx->mode = result.uint_32 & 07777; 4270 break; 3457 break; 4271 case Opt_uid: 3458 case Opt_uid: 4272 kuid = result.uid; !! 3459 kuid = make_kuid(current_user_ns(), result.uint_32); >> 3460 if (!uid_valid(kuid)) >> 3461 goto bad_value; 4273 3462 4274 /* 3463 /* 4275 * The requested uid must be 3464 * The requested uid must be representable in the 4276 * filesystem's idmapping. 3465 * filesystem's idmapping. 4277 */ 3466 */ 4278 if (!kuid_has_mapping(fc->use 3467 if (!kuid_has_mapping(fc->user_ns, kuid)) 4279 goto bad_value; 3468 goto bad_value; 4280 3469 4281 ctx->uid = kuid; 3470 ctx->uid = kuid; 4282 break; 3471 break; 4283 case Opt_gid: 3472 case Opt_gid: 4284 kgid = result.gid; !! 3473 kgid = make_kgid(current_user_ns(), result.uint_32); >> 3474 if (!gid_valid(kgid)) >> 3475 goto bad_value; 4285 3476 4286 /* 3477 /* 4287 * The requested gid must be 3478 * The requested gid must be representable in the 4288 * filesystem's idmapping. 3479 * filesystem's idmapping. 4289 */ 3480 */ 4290 if (!kgid_has_mapping(fc->use 3481 if (!kgid_has_mapping(fc->user_ns, kgid)) 4291 goto bad_value; 3482 goto bad_value; 4292 3483 4293 ctx->gid = kgid; 3484 ctx->gid = kgid; 4294 break; 3485 break; 4295 case Opt_huge: 3486 case Opt_huge: 4296 ctx->huge = result.uint_32; 3487 ctx->huge = result.uint_32; 4297 if (ctx->huge != SHMEM_HUGE_N 3488 if (ctx->huge != SHMEM_HUGE_NEVER && 4298 !(IS_ENABLED(CONFIG_TRANS !! 3489 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && 4299 has_transparent_hugepag 3490 has_transparent_hugepage())) 4300 goto unsupported_para 3491 goto unsupported_parameter; 4301 ctx->seen |= SHMEM_SEEN_HUGE; 3492 ctx->seen |= SHMEM_SEEN_HUGE; 4302 break; 3493 break; 4303 case Opt_mpol: 3494 case Opt_mpol: 4304 if (IS_ENABLED(CONFIG_NUMA)) 3495 if (IS_ENABLED(CONFIG_NUMA)) { 4305 mpol_put(ctx->mpol); 3496 mpol_put(ctx->mpol); 4306 ctx->mpol = NULL; 3497 ctx->mpol = NULL; 4307 if (mpol_parse_str(pa 3498 if (mpol_parse_str(param->string, &ctx->mpol)) 4308 goto bad_valu 3499 goto bad_value; 4309 break; 3500 break; 4310 } 3501 } 4311 goto unsupported_parameter; 3502 goto unsupported_parameter; 4312 case Opt_inode32: << 4313 ctx->full_inums = false; << 4314 ctx->seen |= SHMEM_SEEN_INUMS << 4315 break; << 4316 case Opt_inode64: << 4317 if (sizeof(ino_t) < 8) { << 4318 return invalfc(fc, << 4319 "Canno << 4320 } << 4321 ctx->full_inums = true; << 4322 ctx->seen |= SHMEM_SEEN_INUMS << 4323 break; << 4324 case Opt_noswap: << 4325 if ((fc->user_ns != &init_use << 4326 return invalfc(fc, << 4327 "Turni << 4328 } << 4329 ctx->noswap = true; << 4330 ctx->seen |= SHMEM_SEEN_NOSWA << 4331 break; << 4332 case Opt_quota: << 4333 if (fc->user_ns != &init_user << 4334 return invalfc(fc, "Q << 4335 ctx->seen |= SHMEM_SEEN_QUOTA << 4336 ctx->quota_types |= (QTYPE_MA << 4337 break; << 4338 case Opt_usrquota: << 4339 if (fc->user_ns != &init_user << 4340 return invalfc(fc, "Q << 4341 ctx->seen |= SHMEM_SEEN_QUOTA << 4342 ctx->quota_types |= QTYPE_MAS << 4343 break; << 4344 case Opt_grpquota: << 4345 if (fc->user_ns != &init_user << 4346 return invalfc(fc, "Q << 4347 ctx->seen |= SHMEM_SEEN_QUOTA << 4348 ctx->quota_types |= QTYPE_MAS << 4349 break; << 4350 case Opt_usrquota_block_hardlimit: << 4351 size = memparse(param->string << 4352 if (*rest || !size) << 4353 goto bad_value; << 4354 if (size > SHMEM_QUOTA_MAX_SP << 4355 return invalfc(fc, << 4356 "User << 4357 ctx->qlimits.usrquota_bhardli << 4358 break; << 4359 case Opt_grpquota_block_hardlimit: << 4360 size = memparse(param->string << 4361 if (*rest || !size) << 4362 goto bad_value; << 4363 if (size > SHMEM_QUOTA_MAX_SP << 4364 return invalfc(fc, << 4365 "Group << 4366 ctx->qlimits.grpquota_bhardli << 4367 break; << 4368 case Opt_usrquota_inode_hardlimit: << 4369 size = memparse(param->string << 4370 if (*rest || !size) << 4371 goto bad_value; << 4372 if (size > SHMEM_QUOTA_MAX_IN << 4373 return invalfc(fc, << 4374 "User << 4375 ctx->qlimits.usrquota_ihardli << 4376 break; << 4377 case Opt_grpquota_inode_hardlimit: << 4378 size = memparse(param->string << 4379 if (*rest || !size) << 4380 goto bad_value; << 4381 if (size > SHMEM_QUOTA_MAX_IN << 4382 return invalfc(fc, << 4383 "Group << 4384 ctx->qlimits.grpquota_ihardli << 4385 break; << 4386 } 3503 } 4387 return 0; 3504 return 0; 4388 3505 4389 unsupported_parameter: 3506 unsupported_parameter: 4390 return invalfc(fc, "Unsupported param !! 3507 return invalf(fc, "tmpfs: Unsupported parameter '%s'", param->key); 4391 bad_value: 3508 bad_value: 4392 return invalfc(fc, "Bad value for '%s !! 3509 return invalf(fc, "tmpfs: Bad value for '%s'", param->key); 4393 } 3510 } 4394 3511 4395 static int shmem_parse_options(struct fs_cont 3512 static int shmem_parse_options(struct fs_context *fc, void *data) 4396 { 3513 { 4397 char *options = data; 3514 char *options = data; 4398 3515 4399 if (options) { 3516 if (options) { 4400 int err = security_sb_eat_lsm 3517 int err = security_sb_eat_lsm_opts(options, &fc->security); 4401 if (err) 3518 if (err) 4402 return err; 3519 return err; 4403 } 3520 } 4404 3521 4405 while (options != NULL) { 3522 while (options != NULL) { 4406 char *this_char = options; 3523 char *this_char = options; 4407 for (;;) { 3524 for (;;) { 4408 /* 3525 /* 4409 * NUL-terminate this 3526 * NUL-terminate this option: unfortunately, 4410 * mount options form 3527 * mount options form a comma-separated list, 4411 * but mpol's nodelis 3528 * but mpol's nodelist may also contain commas. 4412 */ 3529 */ 4413 options = strchr(opti 3530 options = strchr(options, ','); 4414 if (options == NULL) 3531 if (options == NULL) 4415 break; 3532 break; 4416 options++; 3533 options++; 4417 if (!isdigit(*options 3534 if (!isdigit(*options)) { 4418 options[-1] = 3535 options[-1] = '\0'; 4419 break; 3536 break; 4420 } 3537 } 4421 } 3538 } 4422 if (*this_char) { 3539 if (*this_char) { 4423 char *value = strchr( !! 3540 char *value = strchr(this_char,'='); 4424 size_t len = 0; 3541 size_t len = 0; 4425 int err; 3542 int err; 4426 3543 4427 if (value) { 3544 if (value) { 4428 *value++ = '\ 3545 *value++ = '\0'; 4429 len = strlen( 3546 len = strlen(value); 4430 } 3547 } 4431 err = vfs_parse_fs_st 3548 err = vfs_parse_fs_string(fc, this_char, value, len); 4432 if (err < 0) 3549 if (err < 0) 4433 return err; 3550 return err; 4434 } 3551 } 4435 } 3552 } 4436 return 0; 3553 return 0; 4437 } 3554 } 4438 3555 4439 /* 3556 /* 4440 * Reconfigure a shmem filesystem. 3557 * Reconfigure a shmem filesystem. >> 3558 * >> 3559 * Note that we disallow change from limited->unlimited blocks/inodes while any >> 3560 * are in use; but we must separately disallow unlimited->limited, because in >> 3561 * that case we have no record of how much is already in use. 4441 */ 3562 */ 4442 static int shmem_reconfigure(struct fs_contex 3563 static int shmem_reconfigure(struct fs_context *fc) 4443 { 3564 { 4444 struct shmem_options *ctx = fc->fs_pr 3565 struct shmem_options *ctx = fc->fs_private; 4445 struct shmem_sb_info *sbinfo = SHMEM_ 3566 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); 4446 unsigned long used_isp; !! 3567 unsigned long inodes; 4447 struct mempolicy *mpol = NULL; << 4448 const char *err; 3568 const char *err; 4449 3569 4450 raw_spin_lock(&sbinfo->stat_lock); !! 3570 spin_lock(&sbinfo->stat_lock); 4451 used_isp = sbinfo->max_inodes * BOGO_ !! 3571 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 4452 << 4453 if ((ctx->seen & SHMEM_SEEN_BLOCKS) & 3572 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { 4454 if (!sbinfo->max_blocks) { 3573 if (!sbinfo->max_blocks) { 4455 err = "Cannot retroac 3574 err = "Cannot retroactively limit size"; 4456 goto out; 3575 goto out; 4457 } 3576 } 4458 if (percpu_counter_compare(&s 3577 if (percpu_counter_compare(&sbinfo->used_blocks, 4459 ct 3578 ctx->blocks) > 0) { 4460 err = "Too small a si 3579 err = "Too small a size for current use"; 4461 goto out; 3580 goto out; 4462 } 3581 } 4463 } 3582 } 4464 if ((ctx->seen & SHMEM_SEEN_INODES) & 3583 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { 4465 if (!sbinfo->max_inodes) { 3584 if (!sbinfo->max_inodes) { 4466 err = "Cannot retroac 3585 err = "Cannot retroactively limit inodes"; 4467 goto out; 3586 goto out; 4468 } 3587 } 4469 if (ctx->inodes * BOGO_INODE_ !! 3588 if (ctx->inodes < inodes) { 4470 err = "Too few inodes 3589 err = "Too few inodes for current use"; 4471 goto out; 3590 goto out; 4472 } 3591 } 4473 } 3592 } 4474 3593 4475 if ((ctx->seen & SHMEM_SEEN_INUMS) && << 4476 sbinfo->next_ino > UINT_MAX) { << 4477 err = "Current inum too high << 4478 goto out; << 4479 } << 4480 if ((ctx->seen & SHMEM_SEEN_NOSWAP) & << 4481 err = "Cannot disable swap on << 4482 goto out; << 4483 } << 4484 if (!(ctx->seen & SHMEM_SEEN_NOSWAP) << 4485 err = "Cannot enable swap on << 4486 goto out; << 4487 } << 4488 << 4489 if (ctx->seen & SHMEM_SEEN_QUOTA && << 4490 !sb_any_quota_loaded(fc->root->d_ << 4491 err = "Cannot enable quota on << 4492 goto out; << 4493 } << 4494 << 4495 #ifdef CONFIG_TMPFS_QUOTA << 4496 #define CHANGED_LIMIT(name) << 4497 (ctx->qlimits.name## hardlimit && << 4498 (ctx->qlimits.name## hardlimit != sbi << 4499 << 4500 if (CHANGED_LIMIT(usrquota_b) || CHAN << 4501 CHANGED_LIMIT(grpquota_b) || CHAN << 4502 err = "Cannot change global q << 4503 goto out; << 4504 } << 4505 #endif /* CONFIG_TMPFS_QUOTA */ << 4506 << 4507 if (ctx->seen & SHMEM_SEEN_HUGE) 3594 if (ctx->seen & SHMEM_SEEN_HUGE) 4508 sbinfo->huge = ctx->huge; 3595 sbinfo->huge = ctx->huge; 4509 if (ctx->seen & SHMEM_SEEN_INUMS) << 4510 sbinfo->full_inums = ctx->ful << 4511 if (ctx->seen & SHMEM_SEEN_BLOCKS) 3596 if (ctx->seen & SHMEM_SEEN_BLOCKS) 4512 sbinfo->max_blocks = ctx->bl 3597 sbinfo->max_blocks = ctx->blocks; 4513 if (ctx->seen & SHMEM_SEEN_INODES) { 3598 if (ctx->seen & SHMEM_SEEN_INODES) { 4514 sbinfo->max_inodes = ctx->in 3599 sbinfo->max_inodes = ctx->inodes; 4515 sbinfo->free_ispace = ctx->in !! 3600 sbinfo->free_inodes = ctx->inodes - inodes; 4516 } 3601 } 4517 3602 4518 /* 3603 /* 4519 * Preserve previous mempolicy unless 3604 * Preserve previous mempolicy unless mpol remount option was specified. 4520 */ 3605 */ 4521 if (ctx->mpol) { 3606 if (ctx->mpol) { 4522 mpol = sbinfo->mpol; !! 3607 mpol_put(sbinfo->mpol); 4523 sbinfo->mpol = ctx->mpol; 3608 sbinfo->mpol = ctx->mpol; /* transfers initial ref */ 4524 ctx->mpol = NULL; 3609 ctx->mpol = NULL; 4525 } 3610 } 4526 !! 3611 spin_unlock(&sbinfo->stat_lock); 4527 if (ctx->noswap) << 4528 sbinfo->noswap = true; << 4529 << 4530 raw_spin_unlock(&sbinfo->stat_lock); << 4531 mpol_put(mpol); << 4532 return 0; 3612 return 0; 4533 out: 3613 out: 4534 raw_spin_unlock(&sbinfo->stat_lock); !! 3614 spin_unlock(&sbinfo->stat_lock); 4535 return invalfc(fc, "%s", err); !! 3615 return invalf(fc, "tmpfs: %s", err); 4536 } 3616 } 4537 3617 4538 static int shmem_show_options(struct seq_file 3618 static int shmem_show_options(struct seq_file *seq, struct dentry *root) 4539 { 3619 { 4540 struct shmem_sb_info *sbinfo = SHMEM_ 3620 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 4541 struct mempolicy *mpol; << 4542 3621 4543 if (sbinfo->max_blocks != shmem_defau 3622 if (sbinfo->max_blocks != shmem_default_max_blocks()) 4544 seq_printf(seq, ",size=%luk", !! 3623 seq_printf(seq, ",size=%luk", >> 3624 sbinfo->max_blocks << (PAGE_SHIFT - 10)); 4545 if (sbinfo->max_inodes != shmem_defau 3625 if (sbinfo->max_inodes != shmem_default_max_inodes()) 4546 seq_printf(seq, ",nr_inodes=% 3626 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 4547 if (sbinfo->mode != (0777 | S_ISVTX)) 3627 if (sbinfo->mode != (0777 | S_ISVTX)) 4548 seq_printf(seq, ",mode=%03ho" 3628 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 4549 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_ 3629 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 4550 seq_printf(seq, ",uid=%u", 3630 seq_printf(seq, ",uid=%u", 4551 from_kuid_mun 3631 from_kuid_munged(&init_user_ns, sbinfo->uid)); 4552 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_ 3632 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 4553 seq_printf(seq, ",gid=%u", 3633 seq_printf(seq, ",gid=%u", 4554 from_kgid_mun 3634 from_kgid_munged(&init_user_ns, sbinfo->gid)); 4555 !! 3635 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 4556 /* << 4557 * Showing inode{64,32} might be usef << 4558 * since then people don't have to re << 4559 * /proc/config.gz to confirm 64-bit << 4560 * (which may not even exist if IKCON << 4561 * << 4562 * We hide it when inode64 isn't the << 4563 * inodes, since that probably just m << 4564 * consideration. << 4565 * << 4566 * As such: << 4567 * << 4568 * +------------- << 4569 * | TMPFS_INODE6 << 4570 * +------------------+------------- << 4571 * | full_inums=true | show << 4572 * | full_inums=false | show << 4573 * +------------------+------------- << 4574 * << 4575 */ << 4576 if (IS_ENABLED(CONFIG_TMPFS_INODE64) << 4577 seq_printf(seq, ",inode%d", ( << 4578 #ifdef CONFIG_TRANSPARENT_HUGEPAGE << 4579 /* Rightly or wrongly, show huge moun 3636 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 4580 if (sbinfo->huge) 3637 if (sbinfo->huge) 4581 seq_printf(seq, ",huge=%s", s 3638 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 4582 #endif 3639 #endif 4583 mpol = shmem_get_sbmpol(sbinfo); !! 3640 shmem_show_mpol(seq, sbinfo->mpol); 4584 shmem_show_mpol(seq, mpol); << 4585 mpol_put(mpol); << 4586 if (sbinfo->noswap) << 4587 seq_printf(seq, ",noswap"); << 4588 #ifdef CONFIG_TMPFS_QUOTA << 4589 if (sb_has_quota_active(root->d_sb, U << 4590 seq_printf(seq, ",usrquota"); << 4591 if (sb_has_quota_active(root->d_sb, G << 4592 seq_printf(seq, ",grpquota"); << 4593 if (sbinfo->qlimits.usrquota_bhardlim << 4594 seq_printf(seq, ",usrquota_bl << 4595 sbinfo->qlimits.us << 4596 if (sbinfo->qlimits.grpquota_bhardlim << 4597 seq_printf(seq, ",grpquota_bl << 4598 sbinfo->qlimits.gr << 4599 if (sbinfo->qlimits.usrquota_ihardlim << 4600 seq_printf(seq, ",usrquota_in << 4601 sbinfo->qlimits.us << 4602 if (sbinfo->qlimits.grpquota_ihardlim << 4603 seq_printf(seq, ",grpquota_in << 4604 sbinfo->qlimits.gr << 4605 #endif << 4606 return 0; 3641 return 0; 4607 } 3642 } 4608 3643 4609 #endif /* CONFIG_TMPFS */ 3644 #endif /* CONFIG_TMPFS */ 4610 3645 4611 static void shmem_put_super(struct super_bloc 3646 static void shmem_put_super(struct super_block *sb) 4612 { 3647 { 4613 struct shmem_sb_info *sbinfo = SHMEM_ 3648 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 4614 3649 4615 #ifdef CONFIG_TMPFS_QUOTA << 4616 shmem_disable_quotas(sb); << 4617 #endif << 4618 free_percpu(sbinfo->ino_batch); << 4619 percpu_counter_destroy(&sbinfo->used_ 3650 percpu_counter_destroy(&sbinfo->used_blocks); 4620 mpol_put(sbinfo->mpol); 3651 mpol_put(sbinfo->mpol); 4621 kfree(sbinfo); 3652 kfree(sbinfo); 4622 sb->s_fs_info = NULL; 3653 sb->s_fs_info = NULL; 4623 } 3654 } 4624 3655 4625 static int shmem_fill_super(struct super_bloc 3656 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) 4626 { 3657 { 4627 struct shmem_options *ctx = fc->fs_pr 3658 struct shmem_options *ctx = fc->fs_private; 4628 struct inode *inode; 3659 struct inode *inode; 4629 struct shmem_sb_info *sbinfo; 3660 struct shmem_sb_info *sbinfo; 4630 int error = -ENOMEM; !! 3661 int err = -ENOMEM; 4631 3662 4632 /* Round up to L1_CACHE_BYTES to resi 3663 /* Round up to L1_CACHE_BYTES to resist false sharing */ 4633 sbinfo = kzalloc(max((int)sizeof(stru 3664 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 4634 L1_CACHE_BYTE 3665 L1_CACHE_BYTES), GFP_KERNEL); 4635 if (!sbinfo) 3666 if (!sbinfo) 4636 return error; !! 3667 return -ENOMEM; 4637 3668 4638 sb->s_fs_info = sbinfo; 3669 sb->s_fs_info = sbinfo; 4639 3670 4640 #ifdef CONFIG_TMPFS 3671 #ifdef CONFIG_TMPFS 4641 /* 3672 /* 4642 * Per default we only allow half of 3673 * Per default we only allow half of the physical ram per 4643 * tmpfs instance, limiting inodes to 3674 * tmpfs instance, limiting inodes to one per page of lowmem; 4644 * but the internal instance is left 3675 * but the internal instance is left unlimited. 4645 */ 3676 */ 4646 if (!(sb->s_flags & SB_KERNMOUNT)) { 3677 if (!(sb->s_flags & SB_KERNMOUNT)) { 4647 if (!(ctx->seen & SHMEM_SEEN_ 3678 if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) 4648 ctx->blocks = shmem_d 3679 ctx->blocks = shmem_default_max_blocks(); 4649 if (!(ctx->seen & SHMEM_SEEN_ 3680 if (!(ctx->seen & SHMEM_SEEN_INODES)) 4650 ctx->inodes = shmem_d 3681 ctx->inodes = shmem_default_max_inodes(); 4651 if (!(ctx->seen & SHMEM_SEEN_ << 4652 ctx->full_inums = IS_ << 4653 sbinfo->noswap = ctx->noswap; << 4654 } else { 3682 } else { 4655 sb->s_flags |= SB_NOUSER; 3683 sb->s_flags |= SB_NOUSER; 4656 } 3684 } 4657 sb->s_export_op = &shmem_export_ops; 3685 sb->s_export_op = &shmem_export_ops; 4658 sb->s_flags |= SB_NOSEC | SB_I_VERSIO !! 3686 sb->s_flags |= SB_NOSEC; 4659 #else 3687 #else 4660 sb->s_flags |= SB_NOUSER; 3688 sb->s_flags |= SB_NOUSER; 4661 #endif 3689 #endif 4662 sbinfo->max_blocks = ctx->blocks; 3690 sbinfo->max_blocks = ctx->blocks; 4663 sbinfo->max_inodes = ctx->inodes; !! 3691 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes; 4664 sbinfo->free_ispace = sbinfo->max_ino << 4665 if (sb->s_flags & SB_KERNMOUNT) { << 4666 sbinfo->ino_batch = alloc_per << 4667 if (!sbinfo->ino_batch) << 4668 goto failed; << 4669 } << 4670 sbinfo->uid = ctx->uid; 3692 sbinfo->uid = ctx->uid; 4671 sbinfo->gid = ctx->gid; 3693 sbinfo->gid = ctx->gid; 4672 sbinfo->full_inums = ctx->full_inums; << 4673 sbinfo->mode = ctx->mode; 3694 sbinfo->mode = ctx->mode; 4674 sbinfo->huge = ctx->huge; 3695 sbinfo->huge = ctx->huge; 4675 sbinfo->mpol = ctx->mpol; 3696 sbinfo->mpol = ctx->mpol; 4676 ctx->mpol = NULL; 3697 ctx->mpol = NULL; 4677 3698 4678 raw_spin_lock_init(&sbinfo->stat_lock !! 3699 spin_lock_init(&sbinfo->stat_lock); 4679 if (percpu_counter_init(&sbinfo->used 3700 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 4680 goto failed; 3701 goto failed; 4681 spin_lock_init(&sbinfo->shrinklist_lo 3702 spin_lock_init(&sbinfo->shrinklist_lock); 4682 INIT_LIST_HEAD(&sbinfo->shrinklist); 3703 INIT_LIST_HEAD(&sbinfo->shrinklist); 4683 3704 4684 sb->s_maxbytes = MAX_LFS_FILESIZE; 3705 sb->s_maxbytes = MAX_LFS_FILESIZE; 4685 sb->s_blocksize = PAGE_SIZE; 3706 sb->s_blocksize = PAGE_SIZE; 4686 sb->s_blocksize_bits = PAGE_SHIFT; 3707 sb->s_blocksize_bits = PAGE_SHIFT; 4687 sb->s_magic = TMPFS_MAGIC; 3708 sb->s_magic = TMPFS_MAGIC; 4688 sb->s_op = &shmem_ops; 3709 sb->s_op = &shmem_ops; 4689 sb->s_time_gran = 1; 3710 sb->s_time_gran = 1; 4690 #ifdef CONFIG_TMPFS_XATTR 3711 #ifdef CONFIG_TMPFS_XATTR 4691 sb->s_xattr = shmem_xattr_handlers; 3712 sb->s_xattr = shmem_xattr_handlers; 4692 #endif 3713 #endif 4693 #ifdef CONFIG_TMPFS_POSIX_ACL 3714 #ifdef CONFIG_TMPFS_POSIX_ACL 4694 sb->s_flags |= SB_POSIXACL; 3715 sb->s_flags |= SB_POSIXACL; 4695 #endif 3716 #endif 4696 uuid_t uuid; !! 3717 uuid_gen(&sb->s_uuid); 4697 uuid_gen(&uuid); << 4698 super_set_uuid(sb, uuid.b, sizeof(uui << 4699 << 4700 #ifdef CONFIG_TMPFS_QUOTA << 4701 if (ctx->seen & SHMEM_SEEN_QUOTA) { << 4702 sb->dq_op = &shmem_quota_oper << 4703 sb->s_qcop = &dquot_quotactl_ << 4704 sb->s_quota_types = QTYPE_MAS << 4705 << 4706 /* Copy the default limits fr << 4707 memcpy(&sbinfo->qlimits, &ctx << 4708 sizeof(struct shmem_qu << 4709 3718 4710 if (shmem_enable_quotas(sb, c !! 3719 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 4711 goto failed; !! 3720 if (!inode) 4712 } << 4713 #endif /* CONFIG_TMPFS_QUOTA */ << 4714 << 4715 inode = shmem_get_inode(&nop_mnt_idma << 4716 S_IFDIR | sbi << 4717 if (IS_ERR(inode)) { << 4718 error = PTR_ERR(inode); << 4719 goto failed; 3721 goto failed; 4720 } << 4721 inode->i_uid = sbinfo->uid; 3722 inode->i_uid = sbinfo->uid; 4722 inode->i_gid = sbinfo->gid; 3723 inode->i_gid = sbinfo->gid; 4723 sb->s_root = d_make_root(inode); 3724 sb->s_root = d_make_root(inode); 4724 if (!sb->s_root) 3725 if (!sb->s_root) 4725 goto failed; 3726 goto failed; 4726 return 0; 3727 return 0; 4727 3728 4728 failed: 3729 failed: 4729 shmem_put_super(sb); 3730 shmem_put_super(sb); 4730 return error; !! 3731 return err; 4731 } 3732 } 4732 3733 4733 static int shmem_get_tree(struct fs_context * 3734 static int shmem_get_tree(struct fs_context *fc) 4734 { 3735 { 4735 return get_tree_nodev(fc, shmem_fill_ 3736 return get_tree_nodev(fc, shmem_fill_super); 4736 } 3737 } 4737 3738 4738 static void shmem_free_fc(struct fs_context * 3739 static void shmem_free_fc(struct fs_context *fc) 4739 { 3740 { 4740 struct shmem_options *ctx = fc->fs_pr 3741 struct shmem_options *ctx = fc->fs_private; 4741 3742 4742 if (ctx) { 3743 if (ctx) { 4743 mpol_put(ctx->mpol); 3744 mpol_put(ctx->mpol); 4744 kfree(ctx); 3745 kfree(ctx); 4745 } 3746 } 4746 } 3747 } 4747 3748 4748 static const struct fs_context_operations shm 3749 static const struct fs_context_operations shmem_fs_context_ops = { 4749 .free = shmem_free_ 3750 .free = shmem_free_fc, 4750 .get_tree = shmem_get_t 3751 .get_tree = shmem_get_tree, 4751 #ifdef CONFIG_TMPFS 3752 #ifdef CONFIG_TMPFS 4752 .parse_monolithic = shmem_parse 3753 .parse_monolithic = shmem_parse_options, 4753 .parse_param = shmem_parse 3754 .parse_param = shmem_parse_one, 4754 .reconfigure = shmem_recon 3755 .reconfigure = shmem_reconfigure, 4755 #endif 3756 #endif 4756 }; 3757 }; 4757 3758 4758 static struct kmem_cache *shmem_inode_cachep !! 3759 static struct kmem_cache *shmem_inode_cachep; 4759 3760 4760 static struct inode *shmem_alloc_inode(struct 3761 static struct inode *shmem_alloc_inode(struct super_block *sb) 4761 { 3762 { 4762 struct shmem_inode_info *info; 3763 struct shmem_inode_info *info; 4763 info = alloc_inode_sb(sb, shmem_inode !! 3764 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 4764 if (!info) 3765 if (!info) 4765 return NULL; 3766 return NULL; 4766 return &info->vfs_inode; 3767 return &info->vfs_inode; 4767 } 3768 } 4768 3769 4769 static void shmem_free_in_core_inode(struct i 3770 static void shmem_free_in_core_inode(struct inode *inode) 4770 { 3771 { 4771 if (S_ISLNK(inode->i_mode)) 3772 if (S_ISLNK(inode->i_mode)) 4772 kfree(inode->i_link); 3773 kfree(inode->i_link); 4773 kmem_cache_free(shmem_inode_cachep, S 3774 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 4774 } 3775 } 4775 3776 4776 static void shmem_destroy_inode(struct inode 3777 static void shmem_destroy_inode(struct inode *inode) 4777 { 3778 { 4778 if (S_ISREG(inode->i_mode)) 3779 if (S_ISREG(inode->i_mode)) 4779 mpol_free_shared_policy(&SHME 3780 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 4780 if (S_ISDIR(inode->i_mode)) << 4781 simple_offset_destroy(shmem_g << 4782 } 3781 } 4783 3782 4784 static void shmem_init_inode(void *foo) 3783 static void shmem_init_inode(void *foo) 4785 { 3784 { 4786 struct shmem_inode_info *info = foo; 3785 struct shmem_inode_info *info = foo; 4787 inode_init_once(&info->vfs_inode); 3786 inode_init_once(&info->vfs_inode); 4788 } 3787 } 4789 3788 4790 static void __init shmem_init_inodecache(void !! 3789 static void shmem_init_inodecache(void) 4791 { 3790 { 4792 shmem_inode_cachep = kmem_cache_creat 3791 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 4793 sizeof(struct 3792 sizeof(struct shmem_inode_info), 4794 0, SLAB_PANIC 3793 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 4795 } 3794 } 4796 3795 4797 static void __init shmem_destroy_inodecache(v !! 3796 static void shmem_destroy_inodecache(void) 4798 { 3797 { 4799 kmem_cache_destroy(shmem_inode_cachep 3798 kmem_cache_destroy(shmem_inode_cachep); 4800 } 3799 } 4801 3800 4802 /* Keep the page in page cache instead of tru << 4803 static int shmem_error_remove_folio(struct ad << 4804 struct fol << 4805 { << 4806 return 0; << 4807 } << 4808 << 4809 static const struct address_space_operations 3801 static const struct address_space_operations shmem_aops = { 4810 .writepage = shmem_writepage, 3802 .writepage = shmem_writepage, 4811 .dirty_folio = noop_dirty_folio, !! 3803 .set_page_dirty = __set_page_dirty_no_writeback, 4812 #ifdef CONFIG_TMPFS 3804 #ifdef CONFIG_TMPFS 4813 .write_begin = shmem_write_begin, 3805 .write_begin = shmem_write_begin, 4814 .write_end = shmem_write_end, 3806 .write_end = shmem_write_end, 4815 #endif 3807 #endif 4816 #ifdef CONFIG_MIGRATION 3808 #ifdef CONFIG_MIGRATION 4817 .migrate_folio = migrate_folio, !! 3809 .migratepage = migrate_page, 4818 #endif 3810 #endif 4819 .error_remove_folio = shmem_error_rem !! 3811 .error_remove_page = generic_error_remove_page, 4820 }; 3812 }; 4821 3813 4822 static const struct file_operations shmem_fil 3814 static const struct file_operations shmem_file_operations = { 4823 .mmap = shmem_mmap, 3815 .mmap = shmem_mmap, 4824 .open = shmem_file_open, << 4825 .get_unmapped_area = shmem_get_unmapp 3816 .get_unmapped_area = shmem_get_unmapped_area, 4826 #ifdef CONFIG_TMPFS 3817 #ifdef CONFIG_TMPFS 4827 .llseek = shmem_file_llseek, 3818 .llseek = shmem_file_llseek, 4828 .read_iter = shmem_file_read_ite 3819 .read_iter = shmem_file_read_iter, 4829 .write_iter = shmem_file_write_it !! 3820 .write_iter = generic_file_write_iter, 4830 .fsync = noop_fsync, 3821 .fsync = noop_fsync, 4831 .splice_read = shmem_file_splice_r !! 3822 .splice_read = generic_file_splice_read, 4832 .splice_write = iter_file_splice_wr 3823 .splice_write = iter_file_splice_write, 4833 .fallocate = shmem_fallocate, 3824 .fallocate = shmem_fallocate, 4834 #endif 3825 #endif 4835 }; 3826 }; 4836 3827 4837 static const struct inode_operations shmem_in 3828 static const struct inode_operations shmem_inode_operations = { 4838 .getattr = shmem_getattr, 3829 .getattr = shmem_getattr, 4839 .setattr = shmem_setattr, 3830 .setattr = shmem_setattr, 4840 #ifdef CONFIG_TMPFS_XATTR 3831 #ifdef CONFIG_TMPFS_XATTR 4841 .listxattr = shmem_listxattr, 3832 .listxattr = shmem_listxattr, 4842 .set_acl = simple_set_acl, 3833 .set_acl = simple_set_acl, 4843 .fileattr_get = shmem_fileattr_get, << 4844 .fileattr_set = shmem_fileattr_set, << 4845 #endif 3834 #endif 4846 }; 3835 }; 4847 3836 4848 static const struct inode_operations shmem_di 3837 static const struct inode_operations shmem_dir_inode_operations = { 4849 #ifdef CONFIG_TMPFS 3838 #ifdef CONFIG_TMPFS 4850 .getattr = shmem_getattr, << 4851 .create = shmem_create, 3839 .create = shmem_create, 4852 .lookup = simple_lookup, 3840 .lookup = simple_lookup, 4853 .link = shmem_link, 3841 .link = shmem_link, 4854 .unlink = shmem_unlink, 3842 .unlink = shmem_unlink, 4855 .symlink = shmem_symlink, 3843 .symlink = shmem_symlink, 4856 .mkdir = shmem_mkdir, 3844 .mkdir = shmem_mkdir, 4857 .rmdir = shmem_rmdir, 3845 .rmdir = shmem_rmdir, 4858 .mknod = shmem_mknod, 3846 .mknod = shmem_mknod, 4859 .rename = shmem_rename2, 3847 .rename = shmem_rename2, 4860 .tmpfile = shmem_tmpfile, 3848 .tmpfile = shmem_tmpfile, 4861 .get_offset_ctx = shmem_get_offset_ct << 4862 #endif 3849 #endif 4863 #ifdef CONFIG_TMPFS_XATTR 3850 #ifdef CONFIG_TMPFS_XATTR 4864 .listxattr = shmem_listxattr, 3851 .listxattr = shmem_listxattr, 4865 .fileattr_get = shmem_fileattr_get, << 4866 .fileattr_set = shmem_fileattr_set, << 4867 #endif 3852 #endif 4868 #ifdef CONFIG_TMPFS_POSIX_ACL 3853 #ifdef CONFIG_TMPFS_POSIX_ACL 4869 .setattr = shmem_setattr, 3854 .setattr = shmem_setattr, 4870 .set_acl = simple_set_acl, 3855 .set_acl = simple_set_acl, 4871 #endif 3856 #endif 4872 }; 3857 }; 4873 3858 4874 static const struct inode_operations shmem_sp 3859 static const struct inode_operations shmem_special_inode_operations = { 4875 .getattr = shmem_getattr, << 4876 #ifdef CONFIG_TMPFS_XATTR 3860 #ifdef CONFIG_TMPFS_XATTR 4877 .listxattr = shmem_listxattr, 3861 .listxattr = shmem_listxattr, 4878 #endif 3862 #endif 4879 #ifdef CONFIG_TMPFS_POSIX_ACL 3863 #ifdef CONFIG_TMPFS_POSIX_ACL 4880 .setattr = shmem_setattr, 3864 .setattr = shmem_setattr, 4881 .set_acl = simple_set_acl, 3865 .set_acl = simple_set_acl, 4882 #endif 3866 #endif 4883 }; 3867 }; 4884 3868 4885 static const struct super_operations shmem_op 3869 static const struct super_operations shmem_ops = { 4886 .alloc_inode = shmem_alloc_inode, 3870 .alloc_inode = shmem_alloc_inode, 4887 .free_inode = shmem_free_in_core_ 3871 .free_inode = shmem_free_in_core_inode, 4888 .destroy_inode = shmem_destroy_inode 3872 .destroy_inode = shmem_destroy_inode, 4889 #ifdef CONFIG_TMPFS 3873 #ifdef CONFIG_TMPFS 4890 .statfs = shmem_statfs, 3874 .statfs = shmem_statfs, 4891 .show_options = shmem_show_options, 3875 .show_options = shmem_show_options, 4892 #endif 3876 #endif 4893 #ifdef CONFIG_TMPFS_QUOTA << 4894 .get_dquots = shmem_get_dquots, << 4895 #endif << 4896 .evict_inode = shmem_evict_inode, 3877 .evict_inode = shmem_evict_inode, 4897 .drop_inode = generic_delete_inod 3878 .drop_inode = generic_delete_inode, 4898 .put_super = shmem_put_super, 3879 .put_super = shmem_put_super, 4899 #ifdef CONFIG_TRANSPARENT_HUGEPAGE !! 3880 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 4900 .nr_cached_objects = shmem_unuse 3881 .nr_cached_objects = shmem_unused_huge_count, 4901 .free_cached_objects = shmem_unuse 3882 .free_cached_objects = shmem_unused_huge_scan, 4902 #endif 3883 #endif 4903 }; 3884 }; 4904 3885 4905 static const struct vm_operations_struct shme 3886 static const struct vm_operations_struct shmem_vm_ops = { 4906 .fault = shmem_fault, 3887 .fault = shmem_fault, 4907 .map_pages = filemap_map_pages, 3888 .map_pages = filemap_map_pages, 4908 #ifdef CONFIG_NUMA 3889 #ifdef CONFIG_NUMA 4909 .set_policy = shmem_set_policy, 3890 .set_policy = shmem_set_policy, 4910 .get_policy = shmem_get_policy, 3891 .get_policy = shmem_get_policy, 4911 #endif 3892 #endif 4912 }; 3893 }; 4913 3894 4914 static const struct vm_operations_struct shme << 4915 .fault = shmem_fault, << 4916 .map_pages = filemap_map_pages, << 4917 #ifdef CONFIG_NUMA << 4918 .set_policy = shmem_set_policy, << 4919 .get_policy = shmem_get_policy, << 4920 #endif << 4921 }; << 4922 << 4923 int shmem_init_fs_context(struct fs_context * 3895 int shmem_init_fs_context(struct fs_context *fc) 4924 { 3896 { 4925 struct shmem_options *ctx; 3897 struct shmem_options *ctx; 4926 3898 4927 ctx = kzalloc(sizeof(struct shmem_opt 3899 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL); 4928 if (!ctx) 3900 if (!ctx) 4929 return -ENOMEM; 3901 return -ENOMEM; 4930 3902 4931 ctx->mode = 0777 | S_ISVTX; 3903 ctx->mode = 0777 | S_ISVTX; 4932 ctx->uid = current_fsuid(); 3904 ctx->uid = current_fsuid(); 4933 ctx->gid = current_fsgid(); 3905 ctx->gid = current_fsgid(); 4934 3906 4935 fc->fs_private = ctx; 3907 fc->fs_private = ctx; 4936 fc->ops = &shmem_fs_context_ops; 3908 fc->ops = &shmem_fs_context_ops; 4937 return 0; 3909 return 0; 4938 } 3910 } 4939 3911 4940 static struct file_system_type shmem_fs_type 3912 static struct file_system_type shmem_fs_type = { 4941 .owner = THIS_MODULE, 3913 .owner = THIS_MODULE, 4942 .name = "tmpfs", 3914 .name = "tmpfs", 4943 .init_fs_context = shmem_init_fs_cont 3915 .init_fs_context = shmem_init_fs_context, 4944 #ifdef CONFIG_TMPFS 3916 #ifdef CONFIG_TMPFS 4945 .parameters = shmem_fs_parameters !! 3917 .parameters = &shmem_fs_parameters, 4946 #endif 3918 #endif 4947 .kill_sb = kill_litter_super, 3919 .kill_sb = kill_litter_super, 4948 .fs_flags = FS_USERNS_MOUNT | F !! 3920 .fs_flags = FS_USERNS_MOUNT, 4949 }; 3921 }; 4950 3922 4951 void __init shmem_init(void) !! 3923 int __init shmem_init(void) 4952 { 3924 { 4953 int error; 3925 int error; 4954 3926 4955 shmem_init_inodecache(); 3927 shmem_init_inodecache(); 4956 3928 4957 #ifdef CONFIG_TMPFS_QUOTA << 4958 register_quota_format(&shmem_quota_fo << 4959 #endif << 4960 << 4961 error = register_filesystem(&shmem_fs 3929 error = register_filesystem(&shmem_fs_type); 4962 if (error) { 3930 if (error) { 4963 pr_err("Could not register tm 3931 pr_err("Could not register tmpfs\n"); 4964 goto out2; 3932 goto out2; 4965 } 3933 } 4966 3934 4967 shm_mnt = kern_mount(&shmem_fs_type); 3935 shm_mnt = kern_mount(&shmem_fs_type); 4968 if (IS_ERR(shm_mnt)) { 3936 if (IS_ERR(shm_mnt)) { 4969 error = PTR_ERR(shm_mnt); 3937 error = PTR_ERR(shm_mnt); 4970 pr_err("Could not kern_mount 3938 pr_err("Could not kern_mount tmpfs\n"); 4971 goto out1; 3939 goto out1; 4972 } 3940 } 4973 3941 4974 #ifdef CONFIG_TRANSPARENT_HUGEPAGE !! 3942 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 4975 if (has_transparent_hugepage() && shm 3943 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 4976 SHMEM_SB(shm_mnt->mnt_sb)->hu 3944 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 4977 else 3945 else 4978 shmem_huge = SHMEM_HUGE_NEVER !! 3946 shmem_huge = 0; /* just in case it was patched */ 4979 << 4980 /* << 4981 * Default to setting PMD-sized THP t << 4982 * disable all other multi-size THPs. << 4983 */ << 4984 huge_shmem_orders_inherit = BIT(HPAGE << 4985 #endif 3947 #endif 4986 return; !! 3948 return 0; 4987 3949 4988 out1: 3950 out1: 4989 unregister_filesystem(&shmem_fs_type) 3951 unregister_filesystem(&shmem_fs_type); 4990 out2: 3952 out2: 4991 #ifdef CONFIG_TMPFS_QUOTA << 4992 unregister_quota_format(&shmem_quota_ << 4993 #endif << 4994 shmem_destroy_inodecache(); 3953 shmem_destroy_inodecache(); 4995 shm_mnt = ERR_PTR(error); 3954 shm_mnt = ERR_PTR(error); >> 3955 return error; 4996 } 3956 } 4997 3957 4998 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && d !! 3958 #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS) 4999 static ssize_t shmem_enabled_show(struct kobj 3959 static ssize_t shmem_enabled_show(struct kobject *kobj, 5000 struct kobj !! 3960 struct kobj_attribute *attr, char *buf) 5001 { 3961 { 5002 static const int values[] = { !! 3962 int values[] = { 5003 SHMEM_HUGE_ALWAYS, 3963 SHMEM_HUGE_ALWAYS, 5004 SHMEM_HUGE_WITHIN_SIZE, 3964 SHMEM_HUGE_WITHIN_SIZE, 5005 SHMEM_HUGE_ADVISE, 3965 SHMEM_HUGE_ADVISE, 5006 SHMEM_HUGE_NEVER, 3966 SHMEM_HUGE_NEVER, 5007 SHMEM_HUGE_DENY, 3967 SHMEM_HUGE_DENY, 5008 SHMEM_HUGE_FORCE, 3968 SHMEM_HUGE_FORCE, 5009 }; 3969 }; 5010 int len = 0; !! 3970 int i, count; 5011 int i; << 5012 3971 5013 for (i = 0; i < ARRAY_SIZE(values); i !! 3972 for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) { 5014 len += sysfs_emit_at(buf, len !! 3973 const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s "; 5015 shmem_huge == << 5016 i ? " " : "", << 5017 } << 5018 len += sysfs_emit_at(buf, len, "\n"); << 5019 3974 5020 return len; !! 3975 count += sprintf(buf + count, fmt, >> 3976 shmem_format_huge(values[i])); >> 3977 } >> 3978 buf[count - 1] = '\n'; >> 3979 return count; 5021 } 3980 } 5022 3981 5023 static ssize_t shmem_enabled_store(struct kob 3982 static ssize_t shmem_enabled_store(struct kobject *kobj, 5024 struct kobj_attribute *attr, 3983 struct kobj_attribute *attr, const char *buf, size_t count) 5025 { 3984 { 5026 char tmp[16]; 3985 char tmp[16]; 5027 int huge; 3986 int huge; 5028 3987 5029 if (count + 1 > sizeof(tmp)) 3988 if (count + 1 > sizeof(tmp)) 5030 return -EINVAL; 3989 return -EINVAL; 5031 memcpy(tmp, buf, count); 3990 memcpy(tmp, buf, count); 5032 tmp[count] = '\0'; 3991 tmp[count] = '\0'; 5033 if (count && tmp[count - 1] == '\n') 3992 if (count && tmp[count - 1] == '\n') 5034 tmp[count - 1] = '\0'; 3993 tmp[count - 1] = '\0'; 5035 3994 5036 huge = shmem_parse_huge(tmp); 3995 huge = shmem_parse_huge(tmp); 5037 if (huge == -EINVAL) 3996 if (huge == -EINVAL) 5038 return -EINVAL; 3997 return -EINVAL; 5039 if (!has_transparent_hugepage() && 3998 if (!has_transparent_hugepage() && 5040 huge != SHMEM_HUGE_NE 3999 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 5041 return -EINVAL; 4000 return -EINVAL; 5042 4001 5043 /* Do not override huge allocation po << 5044 if (huge == SHMEM_HUGE_FORCE && << 5045 huge_shmem_orders_inherit != BIT( << 5046 return -EINVAL; << 5047 << 5048 shmem_huge = huge; 4002 shmem_huge = huge; 5049 if (shmem_huge > SHMEM_HUGE_DENY) 4003 if (shmem_huge > SHMEM_HUGE_DENY) 5050 SHMEM_SB(shm_mnt->mnt_sb)->hu 4004 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 5051 return count; 4005 return count; 5052 } 4006 } 5053 4007 5054 struct kobj_attribute shmem_enabled_attr = __ !! 4008 struct kobj_attribute shmem_enabled_attr = 5055 static DEFINE_SPINLOCK(huge_shmem_orders_lock !! 4009 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); >> 4010 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */ 5056 4011 5057 static ssize_t thpsize_shmem_enabled_show(str !! 4012 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE 5058 str !! 4013 bool shmem_huge_enabled(struct vm_area_struct *vma) 5059 { 4014 { 5060 int order = to_thpsize(kobj)->order; !! 4015 struct inode *inode = file_inode(vma->vm_file); 5061 const char *output; !! 4016 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 5062 !! 4017 loff_t i_size; 5063 if (test_bit(order, &huge_shmem_order !! 4018 pgoff_t off; 5064 output = "[always] inherit wi << 5065 else if (test_bit(order, &huge_shmem_ << 5066 output = "always [inherit] wi << 5067 else if (test_bit(order, &huge_shmem_ << 5068 output = "always inherit [wit << 5069 else if (test_bit(order, &huge_shmem_ << 5070 output = "always inherit with << 5071 else << 5072 output = "always inherit with << 5073 << 5074 return sysfs_emit(buf, "%s\n", output << 5075 } << 5076 4019 5077 static ssize_t thpsize_shmem_enabled_store(st !! 4020 if ((vma->vm_flags & VM_NOHUGEPAGE) || 5078 st !! 4021 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) 5079 co !! 4022 return false; 5080 { !! 4023 if (shmem_huge == SHMEM_HUGE_FORCE) 5081 int order = to_thpsize(kobj)->order; !! 4024 return true; 5082 ssize_t ret = count; !! 4025 if (shmem_huge == SHMEM_HUGE_DENY) 5083 !! 4026 return false; 5084 if (sysfs_streq(buf, "always")) { !! 4027 switch (sbinfo->huge) { 5085 spin_lock(&huge_shmem_orders_ !! 4028 case SHMEM_HUGE_NEVER: 5086 clear_bit(order, &huge_shmem_ !! 4029 return false; 5087 clear_bit(order, &huge_shmem_ !! 4030 case SHMEM_HUGE_ALWAYS: 5088 clear_bit(order, &huge_shmem_ !! 4031 return true; 5089 set_bit(order, &huge_shmem_or !! 4032 case SHMEM_HUGE_WITHIN_SIZE: 5090 spin_unlock(&huge_shmem_order !! 4033 off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); 5091 } else if (sysfs_streq(buf, "inherit" !! 4034 i_size = round_up(i_size_read(inode), PAGE_SIZE); 5092 /* Do not override huge alloc !! 4035 if (i_size >= HPAGE_PMD_SIZE && 5093 if (shmem_huge == SHMEM_HUGE_ !! 4036 i_size >> PAGE_SHIFT >= off) 5094 order != HPAGE_PMD_ORDER) !! 4037 return true; 5095 return -EINVAL; !! 4038 /* fall through */ 5096 !! 4039 case SHMEM_HUGE_ADVISE: 5097 spin_lock(&huge_shmem_orders_ !! 4040 /* TODO: implement fadvise() hints */ 5098 clear_bit(order, &huge_shmem_ !! 4041 return (vma->vm_flags & VM_HUGEPAGE); 5099 clear_bit(order, &huge_shmem_ !! 4042 default: 5100 clear_bit(order, &huge_shmem_ !! 4043 VM_BUG_ON(1); 5101 set_bit(order, &huge_shmem_or !! 4044 return false; 5102 spin_unlock(&huge_shmem_order << 5103 } else if (sysfs_streq(buf, "within_s << 5104 spin_lock(&huge_shmem_orders_ << 5105 clear_bit(order, &huge_shmem_ << 5106 clear_bit(order, &huge_shmem_ << 5107 clear_bit(order, &huge_shmem_ << 5108 set_bit(order, &huge_shmem_or << 5109 spin_unlock(&huge_shmem_order << 5110 } else if (sysfs_streq(buf, "advise") << 5111 spin_lock(&huge_shmem_orders_ << 5112 clear_bit(order, &huge_shmem_ << 5113 clear_bit(order, &huge_shmem_ << 5114 clear_bit(order, &huge_shmem_ << 5115 set_bit(order, &huge_shmem_or << 5116 spin_unlock(&huge_shmem_order << 5117 } else if (sysfs_streq(buf, "never")) << 5118 spin_lock(&huge_shmem_orders_ << 5119 clear_bit(order, &huge_shmem_ << 5120 clear_bit(order, &huge_shmem_ << 5121 clear_bit(order, &huge_shmem_ << 5122 clear_bit(order, &huge_shmem_ << 5123 spin_unlock(&huge_shmem_order << 5124 } else { << 5125 ret = -EINVAL; << 5126 } 4045 } 5127 << 5128 return ret; << 5129 } 4046 } 5130 !! 4047 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */ 5131 struct kobj_attribute thpsize_shmem_enabled_a << 5132 __ATTR(shmem_enabled, 0644, thpsize_s << 5133 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONF << 5134 4048 5135 #else /* !CONFIG_SHMEM */ 4049 #else /* !CONFIG_SHMEM */ 5136 4050 5137 /* 4051 /* 5138 * tiny-shmem: simple shmemfs and tmpfs using 4052 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 5139 * 4053 * 5140 * This is intended for small system where th 4054 * This is intended for small system where the benefits of the full 5141 * shmem code (swap-backed and resource-limit 4055 * shmem code (swap-backed and resource-limited) are outweighed by 5142 * their complexity. On systems without swap 4056 * their complexity. On systems without swap this code should be 5143 * effectively equivalent, but much lighter w 4057 * effectively equivalent, but much lighter weight. 5144 */ 4058 */ 5145 4059 5146 static struct file_system_type shmem_fs_type 4060 static struct file_system_type shmem_fs_type = { 5147 .name = "tmpfs", 4061 .name = "tmpfs", 5148 .init_fs_context = ramfs_init_fs_cont 4062 .init_fs_context = ramfs_init_fs_context, 5149 .parameters = ramfs_fs_parameters !! 4063 .parameters = &ramfs_fs_parameters, 5150 .kill_sb = ramfs_kill_sb, !! 4064 .kill_sb = kill_litter_super, 5151 .fs_flags = FS_USERNS_MOUNT, 4065 .fs_flags = FS_USERNS_MOUNT, 5152 }; 4066 }; 5153 4067 5154 void __init shmem_init(void) !! 4068 int __init shmem_init(void) 5155 { 4069 { 5156 BUG_ON(register_filesystem(&shmem_fs_ 4070 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 5157 4071 5158 shm_mnt = kern_mount(&shmem_fs_type); 4072 shm_mnt = kern_mount(&shmem_fs_type); 5159 BUG_ON(IS_ERR(shm_mnt)); 4073 BUG_ON(IS_ERR(shm_mnt)); >> 4074 >> 4075 return 0; 5160 } 4076 } 5161 4077 5162 int shmem_unuse(unsigned int type) !! 4078 int shmem_unuse(unsigned int type, bool frontswap, >> 4079 unsigned long *fs_pages_to_unuse) 5163 { 4080 { 5164 return 0; 4081 return 0; 5165 } 4082 } 5166 4083 5167 int shmem_lock(struct file *file, int lock, s !! 4084 int shmem_lock(struct file *file, int lock, struct user_struct *user) 5168 { 4085 { 5169 return 0; 4086 return 0; 5170 } 4087 } 5171 4088 5172 void shmem_unlock_mapping(struct address_spac 4089 void shmem_unlock_mapping(struct address_space *mapping) 5173 { 4090 { 5174 } 4091 } 5175 4092 5176 #ifdef CONFIG_MMU 4093 #ifdef CONFIG_MMU 5177 unsigned long shmem_get_unmapped_area(struct 4094 unsigned long shmem_get_unmapped_area(struct file *file, 5178 unsigne 4095 unsigned long addr, unsigned long len, 5179 unsigne 4096 unsigned long pgoff, unsigned long flags) 5180 { 4097 { 5181 return mm_get_unmapped_area(current-> !! 4098 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 5182 } 4099 } 5183 #endif 4100 #endif 5184 4101 5185 void shmem_truncate_range(struct inode *inode 4102 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 5186 { 4103 { 5187 truncate_inode_pages_range(inode->i_m 4104 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 5188 } 4105 } 5189 EXPORT_SYMBOL_GPL(shmem_truncate_range); 4106 EXPORT_SYMBOL_GPL(shmem_truncate_range); 5190 4107 5191 #define shmem_vm_ops 4108 #define shmem_vm_ops generic_file_vm_ops 5192 #define shmem_anon_vm_ops << 5193 #define shmem_file_operations 4109 #define shmem_file_operations ramfs_file_operations >> 4110 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 5194 #define shmem_acct_size(flags, size) 4111 #define shmem_acct_size(flags, size) 0 5195 #define shmem_unacct_size(flags, size) 4112 #define shmem_unacct_size(flags, size) do {} while (0) 5196 4113 5197 static inline struct inode *shmem_get_inode(s << 5198 struct super_ << 5199 umode_t mode, << 5200 { << 5201 struct inode *inode = ramfs_get_inode << 5202 return inode ? inode : ERR_PTR(-ENOSP << 5203 } << 5204 << 5205 #endif /* CONFIG_SHMEM */ 4114 #endif /* CONFIG_SHMEM */ 5206 4115 5207 /* common code */ 4116 /* common code */ 5208 4117 5209 static struct file *__shmem_file_setup(struct !! 4118 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size, 5210 loff_t size, unsigned !! 4119 unsigned long flags, unsigned int i_flags) 5211 { 4120 { 5212 struct inode *inode; 4121 struct inode *inode; 5213 struct file *res; 4122 struct file *res; 5214 4123 5215 if (IS_ERR(mnt)) 4124 if (IS_ERR(mnt)) 5216 return ERR_CAST(mnt); 4125 return ERR_CAST(mnt); 5217 4126 5218 if (size < 0 || size > MAX_LFS_FILESI 4127 if (size < 0 || size > MAX_LFS_FILESIZE) 5219 return ERR_PTR(-EINVAL); 4128 return ERR_PTR(-EINVAL); 5220 4129 5221 if (shmem_acct_size(flags, size)) 4130 if (shmem_acct_size(flags, size)) 5222 return ERR_PTR(-ENOMEM); 4131 return ERR_PTR(-ENOMEM); 5223 4132 5224 if (is_idmapped_mnt(mnt)) !! 4133 inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0, 5225 return ERR_PTR(-EINVAL); !! 4134 flags); 5226 !! 4135 if (unlikely(!inode)) { 5227 inode = shmem_get_inode(&nop_mnt_idma << 5228 S_IFREG | S_I << 5229 if (IS_ERR(inode)) { << 5230 shmem_unacct_size(flags, size 4136 shmem_unacct_size(flags, size); 5231 return ERR_CAST(inode); !! 4137 return ERR_PTR(-ENOSPC); 5232 } 4138 } 5233 inode->i_flags |= i_flags; 4139 inode->i_flags |= i_flags; 5234 inode->i_size = size; 4140 inode->i_size = size; 5235 clear_nlink(inode); /* It is unli 4141 clear_nlink(inode); /* It is unlinked */ 5236 res = ERR_PTR(ramfs_nommu_expand_for_ 4142 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 5237 if (!IS_ERR(res)) 4143 if (!IS_ERR(res)) 5238 res = alloc_file_pseudo(inode 4144 res = alloc_file_pseudo(inode, mnt, name, O_RDWR, 5239 &shmem_file_o 4145 &shmem_file_operations); 5240 if (IS_ERR(res)) 4146 if (IS_ERR(res)) 5241 iput(inode); 4147 iput(inode); 5242 return res; 4148 return res; 5243 } 4149 } 5244 4150 5245 /** 4151 /** 5246 * shmem_kernel_file_setup - get an unlinked 4152 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 5247 * kernel internal. There will be NO LS 4153 * kernel internal. There will be NO LSM permission checks against the 5248 * underlying inode. So users of this i 4154 * underlying inode. So users of this interface must do LSM checks at a 5249 * higher layer. The users are the big_ 4155 * higher layer. The users are the big_key and shm implementations. LSM 5250 * checks are provided at the key or shm 4156 * checks are provided at the key or shm level rather than the inode. 5251 * @name: name for dentry (to be seen in /pro 4157 * @name: name for dentry (to be seen in /proc/<pid>/maps 5252 * @size: size to be set for the file 4158 * @size: size to be set for the file 5253 * @flags: VM_NORESERVE suppresses pre-accoun 4159 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 5254 */ 4160 */ 5255 struct file *shmem_kernel_file_setup(const ch 4161 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 5256 { 4162 { 5257 return __shmem_file_setup(shm_mnt, na 4163 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE); 5258 } 4164 } 5259 EXPORT_SYMBOL_GPL(shmem_kernel_file_setup); << 5260 4165 5261 /** 4166 /** 5262 * shmem_file_setup - get an unlinked file li 4167 * shmem_file_setup - get an unlinked file living in tmpfs 5263 * @name: name for dentry (to be seen in /pro 4168 * @name: name for dentry (to be seen in /proc/<pid>/maps 5264 * @size: size to be set for the file 4169 * @size: size to be set for the file 5265 * @flags: VM_NORESERVE suppresses pre-accoun 4170 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 5266 */ 4171 */ 5267 struct file *shmem_file_setup(const char *nam 4172 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 5268 { 4173 { 5269 return __shmem_file_setup(shm_mnt, na 4174 return __shmem_file_setup(shm_mnt, name, size, flags, 0); 5270 } 4175 } 5271 EXPORT_SYMBOL_GPL(shmem_file_setup); 4176 EXPORT_SYMBOL_GPL(shmem_file_setup); 5272 4177 5273 /** 4178 /** 5274 * shmem_file_setup_with_mnt - get an unlinke 4179 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs 5275 * @mnt: the tmpfs mount where the file will 4180 * @mnt: the tmpfs mount where the file will be created 5276 * @name: name for dentry (to be seen in /pro 4181 * @name: name for dentry (to be seen in /proc/<pid>/maps 5277 * @size: size to be set for the file 4182 * @size: size to be set for the file 5278 * @flags: VM_NORESERVE suppresses pre-accoun 4183 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 5279 */ 4184 */ 5280 struct file *shmem_file_setup_with_mnt(struct 4185 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, 5281 loff_t 4186 loff_t size, unsigned long flags) 5282 { 4187 { 5283 return __shmem_file_setup(mnt, name, 4188 return __shmem_file_setup(mnt, name, size, flags, 0); 5284 } 4189 } 5285 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 4190 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 5286 4191 5287 /** 4192 /** 5288 * shmem_zero_setup - setup a shared anonymou 4193 * shmem_zero_setup - setup a shared anonymous mapping 5289 * @vma: the vma to be mmapped is prepared by !! 4194 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff 5290 */ 4195 */ 5291 int shmem_zero_setup(struct vm_area_struct *v 4196 int shmem_zero_setup(struct vm_area_struct *vma) 5292 { 4197 { 5293 struct file *file; 4198 struct file *file; 5294 loff_t size = vma->vm_end - vma->vm_s 4199 loff_t size = vma->vm_end - vma->vm_start; 5295 4200 5296 /* 4201 /* 5297 * Cloning a new file under mmap_lock !! 4202 * Cloning a new file under mmap_sem leads to a lock ordering conflict 5298 * between XFS directory reading and 4203 * between XFS directory reading and selinux: since this file is only 5299 * accessible to the user through its 4204 * accessible to the user through its mapping, use S_PRIVATE flag to 5300 * bypass file security, in the same 4205 * bypass file security, in the same way as shmem_kernel_file_setup(). 5301 */ 4206 */ 5302 file = shmem_kernel_file_setup("dev/z 4207 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); 5303 if (IS_ERR(file)) 4208 if (IS_ERR(file)) 5304 return PTR_ERR(file); 4209 return PTR_ERR(file); 5305 4210 5306 if (vma->vm_file) 4211 if (vma->vm_file) 5307 fput(vma->vm_file); 4212 fput(vma->vm_file); 5308 vma->vm_file = file; 4213 vma->vm_file = file; 5309 vma->vm_ops = &shmem_anon_vm_ops; !! 4214 vma->vm_ops = &shmem_vm_ops; >> 4215 >> 4216 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && >> 4217 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < >> 4218 (vma->vm_end & HPAGE_PMD_MASK)) { >> 4219 khugepaged_enter(vma, vma->vm_flags); >> 4220 } 5310 4221 5311 return 0; 4222 return 0; 5312 } 4223 } 5313 4224 5314 /** 4225 /** 5315 * shmem_read_folio_gfp - read into page cach !! 4226 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 5316 * @mapping: the folio's address_space !! 4227 * @mapping: the page's address_space 5317 * @index: the folio index !! 4228 * @index: the page index 5318 * @gfp: the page allocator flags to u 4229 * @gfp: the page allocator flags to use if allocating 5319 * 4230 * 5320 * This behaves as a tmpfs "read_cache_page_g 4231 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 5321 * with any new page allocations done using t 4232 * with any new page allocations done using the specified allocation flags. 5322 * But read_cache_page_gfp() uses the ->read_ !! 4233 * But read_cache_page_gfp() uses the ->readpage() method: which does not 5323 * suit tmpfs, since it may have pages in swa 4234 * suit tmpfs, since it may have pages in swapcache, and needs to find those 5324 * for itself; although drivers/gpu/drm i915 4235 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 5325 * 4236 * 5326 * i915_gem_object_get_pages_gtt() mixes __GF 4237 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 5327 * with the mapping_gfp_mask(), to avoid OOMi 4238 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 5328 */ 4239 */ 5329 struct folio *shmem_read_folio_gfp(struct add !! 4240 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 5330 pgoff_t index, gfp_t gfp) !! 4241 pgoff_t index, gfp_t gfp) 5331 { 4242 { 5332 #ifdef CONFIG_SHMEM 4243 #ifdef CONFIG_SHMEM 5333 struct inode *inode = mapping->host; 4244 struct inode *inode = mapping->host; 5334 struct folio *folio; !! 4245 struct page *page; 5335 int error; 4246 int error; 5336 4247 5337 error = shmem_get_folio_gfp(inode, in !! 4248 BUG_ON(mapping->a_ops != &shmem_aops); 5338 gfp, NULL !! 4249 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, >> 4250 gfp, NULL, NULL, NULL); 5339 if (error) 4251 if (error) 5340 return ERR_PTR(error); !! 4252 page = ERR_PTR(error); 5341 !! 4253 else 5342 folio_unlock(folio); !! 4254 unlock_page(page); 5343 return folio; !! 4255 return page; 5344 #else 4256 #else 5345 /* 4257 /* 5346 * The tiny !SHMEM case uses ramfs wi 4258 * The tiny !SHMEM case uses ramfs without swap 5347 */ 4259 */ 5348 return mapping_read_folio_gfp(mapping !! 4260 return read_cache_page_gfp(mapping, index, gfp); 5349 #endif 4261 #endif 5350 } << 5351 EXPORT_SYMBOL_GPL(shmem_read_folio_gfp); << 5352 << 5353 struct page *shmem_read_mapping_page_gfp(stru << 5354 pgof << 5355 { << 5356 struct folio *folio = shmem_read_foli << 5357 struct page *page; << 5358 << 5359 if (IS_ERR(folio)) << 5360 return &folio->page; << 5361 << 5362 page = folio_file_page(folio, index); << 5363 if (PageHWPoison(page)) { << 5364 folio_put(folio); << 5365 return ERR_PTR(-EIO); << 5366 } << 5367 << 5368 return page; << 5369 } 4262 } 5370 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp 4263 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 5371 4264
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.