1 /* 1 /* 2 * Resizable virtual memory filesystem for Lin 2 * Resizable virtual memory filesystem for Linux. 3 * 3 * 4 * Copyright (C) 2000 Linus Torvalds. 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Co 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 13 * 14 * Extended attribute support for tmpfs: 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Lei 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Mor 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 17 * 18 * tiny-shmem: 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@ 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 20 * 21 * This file is released under the GPL. 21 * This file is released under the GPL. 22 */ 22 */ 23 23 24 #include <linux/fs.h> 24 #include <linux/fs.h> 25 #include <linux/init.h> 25 #include <linux/init.h> 26 #include <linux/vfs.h> 26 #include <linux/vfs.h> 27 #include <linux/mount.h> 27 #include <linux/mount.h> 28 #include <linux/ramfs.h> 28 #include <linux/ramfs.h> 29 #include <linux/pagemap.h> 29 #include <linux/pagemap.h> 30 #include <linux/file.h> 30 #include <linux/file.h> 31 #include <linux/fileattr.h> << 32 #include <linux/mm.h> 31 #include <linux/mm.h> 33 #include <linux/random.h> 32 #include <linux/random.h> 34 #include <linux/sched/signal.h> 33 #include <linux/sched/signal.h> 35 #include <linux/export.h> 34 #include <linux/export.h> 36 #include <linux/shmem_fs.h> << 37 #include <linux/swap.h> 35 #include <linux/swap.h> 38 #include <linux/uio.h> 36 #include <linux/uio.h> >> 37 #include <linux/khugepaged.h> 39 #include <linux/hugetlb.h> 38 #include <linux/hugetlb.h> >> 39 #include <linux/frontswap.h> 40 #include <linux/fs_parser.h> 40 #include <linux/fs_parser.h> 41 #include <linux/swapfile.h> << 42 #include <linux/iversion.h> << 43 #include "swap.h" << 44 41 45 static struct vfsmount *shm_mnt __ro_after_ini !! 42 #include <asm/tlbflush.h> /* for arch/microblaze update_mmu_cache() */ >> 43 >> 44 static struct vfsmount *shm_mnt; 46 45 47 #ifdef CONFIG_SHMEM 46 #ifdef CONFIG_SHMEM 48 /* 47 /* 49 * This virtual memory filesystem is heavily b 48 * This virtual memory filesystem is heavily based on the ramfs. It 50 * extends ramfs by the ability to use swap an 49 * extends ramfs by the ability to use swap and honor resource limits 51 * which makes it a completely usable filesyst 50 * which makes it a completely usable filesystem. 52 */ 51 */ 53 52 54 #include <linux/xattr.h> 53 #include <linux/xattr.h> 55 #include <linux/exportfs.h> 54 #include <linux/exportfs.h> 56 #include <linux/posix_acl.h> 55 #include <linux/posix_acl.h> 57 #include <linux/posix_acl_xattr.h> 56 #include <linux/posix_acl_xattr.h> 58 #include <linux/mman.h> 57 #include <linux/mman.h> 59 #include <linux/string.h> 58 #include <linux/string.h> 60 #include <linux/slab.h> 59 #include <linux/slab.h> 61 #include <linux/backing-dev.h> 60 #include <linux/backing-dev.h> >> 61 #include <linux/shmem_fs.h> 62 #include <linux/writeback.h> 62 #include <linux/writeback.h> >> 63 #include <linux/blkdev.h> 63 #include <linux/pagevec.h> 64 #include <linux/pagevec.h> 64 #include <linux/percpu_counter.h> 65 #include <linux/percpu_counter.h> 65 #include <linux/falloc.h> 66 #include <linux/falloc.h> 66 #include <linux/splice.h> 67 #include <linux/splice.h> 67 #include <linux/security.h> 68 #include <linux/security.h> 68 #include <linux/swapops.h> 69 #include <linux/swapops.h> 69 #include <linux/mempolicy.h> 70 #include <linux/mempolicy.h> 70 #include <linux/namei.h> 71 #include <linux/namei.h> 71 #include <linux/ctype.h> 72 #include <linux/ctype.h> 72 #include <linux/migrate.h> 73 #include <linux/migrate.h> 73 #include <linux/highmem.h> 74 #include <linux/highmem.h> 74 #include <linux/seq_file.h> 75 #include <linux/seq_file.h> 75 #include <linux/magic.h> 76 #include <linux/magic.h> 76 #include <linux/syscalls.h> 77 #include <linux/syscalls.h> 77 #include <linux/fcntl.h> 78 #include <linux/fcntl.h> 78 #include <uapi/linux/memfd.h> 79 #include <uapi/linux/memfd.h> >> 80 #include <linux/userfaultfd_k.h> 79 #include <linux/rmap.h> 81 #include <linux/rmap.h> 80 #include <linux/uuid.h> 82 #include <linux/uuid.h> 81 #include <linux/quotaops.h> << 82 #include <linux/rcupdate_wait.h> << 83 83 84 #include <linux/uaccess.h> 84 #include <linux/uaccess.h> 85 85 86 #include "internal.h" 86 #include "internal.h" 87 87 88 #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 88 #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 89 #define VM_ACCT(size) (PAGE_ALIGN(size) >> 89 #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 90 90 91 /* Pretend that each entry is of this size in 91 /* Pretend that each entry is of this size in directory's i_size */ 92 #define BOGO_DIRENT_SIZE 20 92 #define BOGO_DIRENT_SIZE 20 93 93 94 /* Pretend that one inode + its dentry occupy << 95 #define BOGO_INODE_SIZE 1024 << 96 << 97 /* Symlink up to this size is kmalloc'ed inste 94 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 98 #define SHORT_SYMLINK_LEN 128 95 #define SHORT_SYMLINK_LEN 128 99 96 100 /* 97 /* 101 * shmem_fallocate communicates with shmem_fau 98 * shmem_fallocate communicates with shmem_fault or shmem_writepage via 102 * inode->i_private (with i_rwsem making sure !! 99 * inode->i_private (with i_mutex making sure that it has only one user at 103 * a time): we would prefer not to enlarge the 100 * a time): we would prefer not to enlarge the shmem inode just for that. 104 */ 101 */ 105 struct shmem_falloc { 102 struct shmem_falloc { 106 wait_queue_head_t *waitq; /* faults in 103 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 107 pgoff_t start; /* start of ra 104 pgoff_t start; /* start of range currently being fallocated */ 108 pgoff_t next; /* the next pa 105 pgoff_t next; /* the next page offset to be fallocated */ 109 pgoff_t nr_falloced; /* how many ne 106 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 110 pgoff_t nr_unswapped; /* how often w 107 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 111 }; 108 }; 112 109 113 struct shmem_options { 110 struct shmem_options { 114 unsigned long long blocks; 111 unsigned long long blocks; 115 unsigned long long inodes; 112 unsigned long long inodes; 116 struct mempolicy *mpol; 113 struct mempolicy *mpol; 117 kuid_t uid; 114 kuid_t uid; 118 kgid_t gid; 115 kgid_t gid; 119 umode_t mode; 116 umode_t mode; 120 bool full_inums; 117 bool full_inums; 121 int huge; 118 int huge; 122 int seen; 119 int seen; 123 bool noswap; << 124 unsigned short quota_types; << 125 struct shmem_quota_limits qlimits; << 126 #define SHMEM_SEEN_BLOCKS 1 120 #define SHMEM_SEEN_BLOCKS 1 127 #define SHMEM_SEEN_INODES 2 121 #define SHMEM_SEEN_INODES 2 128 #define SHMEM_SEEN_HUGE 4 122 #define SHMEM_SEEN_HUGE 4 129 #define SHMEM_SEEN_INUMS 8 123 #define SHMEM_SEEN_INUMS 8 130 #define SHMEM_SEEN_NOSWAP 16 << 131 #define SHMEM_SEEN_QUOTA 32 << 132 }; 124 }; 133 125 134 #ifdef CONFIG_TRANSPARENT_HUGEPAGE << 135 static unsigned long huge_shmem_orders_always << 136 static unsigned long huge_shmem_orders_madvise << 137 static unsigned long huge_shmem_orders_inherit << 138 static unsigned long huge_shmem_orders_within_ << 139 #endif << 140 << 141 #ifdef CONFIG_TMPFS 126 #ifdef CONFIG_TMPFS 142 static unsigned long shmem_default_max_blocks( 127 static unsigned long shmem_default_max_blocks(void) 143 { 128 { 144 return totalram_pages() / 2; 129 return totalram_pages() / 2; 145 } 130 } 146 131 147 static unsigned long shmem_default_max_inodes( 132 static unsigned long shmem_default_max_inodes(void) 148 { 133 { 149 unsigned long nr_pages = totalram_page 134 unsigned long nr_pages = totalram_pages(); 150 135 151 return min3(nr_pages - totalhigh_pages !! 136 return min(nr_pages - totalhigh_pages(), nr_pages / 2); 152 ULONG_MAX / BOGO_INODE << 153 } 137 } 154 #endif 138 #endif 155 139 156 static int shmem_swapin_folio(struct inode *in !! 140 static bool shmem_should_replace_page(struct page *page, gfp_t gfp); 157 struct folio **foliop, !! 141 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 158 struct vm_area_struct !! 142 struct shmem_inode_info *info, pgoff_t index); >> 143 static int shmem_swapin_page(struct inode *inode, pgoff_t index, >> 144 struct page **pagep, enum sgp_type sgp, >> 145 gfp_t gfp, struct vm_area_struct *vma, >> 146 vm_fault_t *fault_type); >> 147 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, >> 148 struct page **pagep, enum sgp_type sgp, >> 149 gfp_t gfp, struct vm_area_struct *vma, >> 150 struct vm_fault *vmf, vm_fault_t *fault_type); >> 151 >> 152 int shmem_getpage(struct inode *inode, pgoff_t index, >> 153 struct page **pagep, enum sgp_type sgp) >> 154 { >> 155 return shmem_getpage_gfp(inode, index, pagep, sgp, >> 156 mapping_gfp_mask(inode->i_mapping), NULL, NULL, NULL); >> 157 } 159 158 160 static inline struct shmem_sb_info *SHMEM_SB(s 159 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 161 { 160 { 162 return sb->s_fs_info; 161 return sb->s_fs_info; 163 } 162 } 164 163 165 /* 164 /* 166 * shmem_file_setup pre-accounts the whole fix 165 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 167 * for shared memory and for shared anonymous 166 * for shared memory and for shared anonymous (/dev/zero) mappings 168 * (unless MAP_NORESERVE and sysctl_overcommit 167 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 169 * consistent with the pre-accounting of priva 168 * consistent with the pre-accounting of private mappings ... 170 */ 169 */ 171 static inline int shmem_acct_size(unsigned lon 170 static inline int shmem_acct_size(unsigned long flags, loff_t size) 172 { 171 { 173 return (flags & VM_NORESERVE) ? 172 return (flags & VM_NORESERVE) ? 174 0 : security_vm_enough_memory_ 173 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 175 } 174 } 176 175 177 static inline void shmem_unacct_size(unsigned 176 static inline void shmem_unacct_size(unsigned long flags, loff_t size) 178 { 177 { 179 if (!(flags & VM_NORESERVE)) 178 if (!(flags & VM_NORESERVE)) 180 vm_unacct_memory(VM_ACCT(size) 179 vm_unacct_memory(VM_ACCT(size)); 181 } 180 } 182 181 183 static inline int shmem_reacct_size(unsigned l 182 static inline int shmem_reacct_size(unsigned long flags, 184 loff_t oldsize, loff_t newsize 183 loff_t oldsize, loff_t newsize) 185 { 184 { 186 if (!(flags & VM_NORESERVE)) { 185 if (!(flags & VM_NORESERVE)) { 187 if (VM_ACCT(newsize) > VM_ACCT 186 if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 188 return security_vm_eno 187 return security_vm_enough_memory_mm(current->mm, 189 VM_ACC 188 VM_ACCT(newsize) - VM_ACCT(oldsize)); 190 else if (VM_ACCT(newsize) < VM 189 else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 191 vm_unacct_memory(VM_AC 190 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 192 } 191 } 193 return 0; 192 return 0; 194 } 193 } 195 194 196 /* 195 /* 197 * ... whereas tmpfs objects are accounted inc 196 * ... whereas tmpfs objects are accounted incrementally as 198 * pages are allocated, in order to allow larg 197 * pages are allocated, in order to allow large sparse files. 199 * shmem_get_folio reports shmem_acct_blocks f !! 198 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM, 200 * so that a failure on a sparse tmpfs mapping 199 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 201 */ 200 */ 202 static inline int shmem_acct_blocks(unsigned l !! 201 static inline int shmem_acct_block(unsigned long flags, long pages) 203 { 202 { 204 if (!(flags & VM_NORESERVE)) 203 if (!(flags & VM_NORESERVE)) 205 return 0; 204 return 0; 206 205 207 return security_vm_enough_memory_mm(cu 206 return security_vm_enough_memory_mm(current->mm, 208 pages * VM_ACCT(PAGE_S 207 pages * VM_ACCT(PAGE_SIZE)); 209 } 208 } 210 209 211 static inline void shmem_unacct_blocks(unsigne 210 static inline void shmem_unacct_blocks(unsigned long flags, long pages) 212 { 211 { 213 if (flags & VM_NORESERVE) 212 if (flags & VM_NORESERVE) 214 vm_unacct_memory(pages * VM_AC 213 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 215 } 214 } 216 215 217 static int shmem_inode_acct_blocks(struct inod !! 216 static inline bool shmem_inode_acct_block(struct inode *inode, long pages) 218 { 217 { 219 struct shmem_inode_info *info = SHMEM_ 218 struct shmem_inode_info *info = SHMEM_I(inode); 220 struct shmem_sb_info *sbinfo = SHMEM_S 219 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 221 int err = -ENOSPC; << 222 220 223 if (shmem_acct_blocks(info->flags, pag !! 221 if (shmem_acct_block(info->flags, pages)) 224 return err; !! 222 return false; 225 223 226 might_sleep(); /* when quotas */ << 227 if (sbinfo->max_blocks) { 224 if (sbinfo->max_blocks) { 228 if (!percpu_counter_limited_ad !! 225 if (percpu_counter_compare(&sbinfo->used_blocks, 229 !! 226 sbinfo->max_blocks - pages) > 0) 230 goto unacct; << 231 << 232 err = dquot_alloc_block_nodirt << 233 if (err) { << 234 percpu_counter_sub(&sb << 235 goto unacct; << 236 } << 237 } else { << 238 err = dquot_alloc_block_nodirt << 239 if (err) << 240 goto unacct; 227 goto unacct; >> 228 percpu_counter_add(&sbinfo->used_blocks, pages); 241 } 229 } 242 230 243 return 0; !! 231 return true; 244 232 245 unacct: 233 unacct: 246 shmem_unacct_blocks(info->flags, pages 234 shmem_unacct_blocks(info->flags, pages); 247 return err; !! 235 return false; 248 } 236 } 249 237 250 static void shmem_inode_unacct_blocks(struct i !! 238 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages) 251 { 239 { 252 struct shmem_inode_info *info = SHMEM_ 240 struct shmem_inode_info *info = SHMEM_I(inode); 253 struct shmem_sb_info *sbinfo = SHMEM_S 241 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 254 242 255 might_sleep(); /* when quotas */ << 256 dquot_free_block_nodirty(inode, pages) << 257 << 258 if (sbinfo->max_blocks) 243 if (sbinfo->max_blocks) 259 percpu_counter_sub(&sbinfo->us 244 percpu_counter_sub(&sbinfo->used_blocks, pages); 260 shmem_unacct_blocks(info->flags, pages 245 shmem_unacct_blocks(info->flags, pages); 261 } 246 } 262 247 263 static const struct super_operations shmem_ops 248 static const struct super_operations shmem_ops; 264 static const struct address_space_operations s !! 249 const struct address_space_operations shmem_aops; 265 static const struct file_operations shmem_file 250 static const struct file_operations shmem_file_operations; 266 static const struct inode_operations shmem_ino 251 static const struct inode_operations shmem_inode_operations; 267 static const struct inode_operations shmem_dir 252 static const struct inode_operations shmem_dir_inode_operations; 268 static const struct inode_operations shmem_spe 253 static const struct inode_operations shmem_special_inode_operations; 269 static const struct vm_operations_struct shmem 254 static const struct vm_operations_struct shmem_vm_ops; 270 static const struct vm_operations_struct shmem << 271 static struct file_system_type shmem_fs_type; 255 static struct file_system_type shmem_fs_type; 272 256 273 bool shmem_mapping(struct address_space *mappi << 274 { << 275 return mapping->a_ops == &shmem_aops; << 276 } << 277 EXPORT_SYMBOL_GPL(shmem_mapping); << 278 << 279 bool vma_is_anon_shmem(struct vm_area_struct * << 280 { << 281 return vma->vm_ops == &shmem_anon_vm_o << 282 } << 283 << 284 bool vma_is_shmem(struct vm_area_struct *vma) 257 bool vma_is_shmem(struct vm_area_struct *vma) 285 { 258 { 286 return vma_is_anon_shmem(vma) || vma-> !! 259 return vma->vm_ops == &shmem_vm_ops; 287 } 260 } 288 261 289 static LIST_HEAD(shmem_swaplist); 262 static LIST_HEAD(shmem_swaplist); 290 static DEFINE_MUTEX(shmem_swaplist_mutex); 263 static DEFINE_MUTEX(shmem_swaplist_mutex); 291 264 292 #ifdef CONFIG_TMPFS_QUOTA << 293 << 294 static int shmem_enable_quotas(struct super_bl << 295 unsigned short << 296 { << 297 int type, err = 0; << 298 << 299 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS << 300 for (type = 0; type < SHMEM_MAXQUOTAS; << 301 if (!(quota_types & (1 << type << 302 continue; << 303 err = dquot_load_quota_sb(sb, << 304 DQUO << 305 DQUO << 306 if (err) << 307 goto out_err; << 308 } << 309 return 0; << 310 << 311 out_err: << 312 pr_warn("tmpfs: failed to enable quota << 313 type, err); << 314 for (type--; type >= 0; type--) << 315 dquot_quota_off(sb, type); << 316 return err; << 317 } << 318 << 319 static void shmem_disable_quotas(struct super_ << 320 { << 321 int type; << 322 << 323 for (type = 0; type < SHMEM_MAXQUOTAS; << 324 dquot_quota_off(sb, type); << 325 } << 326 << 327 static struct dquot __rcu **shmem_get_dquots(s << 328 { << 329 return SHMEM_I(inode)->i_dquot; << 330 } << 331 #endif /* CONFIG_TMPFS_QUOTA */ << 332 << 333 /* 265 /* 334 * shmem_reserve_inode() performs bookkeeping 266 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and 335 * produces a novel ino for the newly allocate 267 * produces a novel ino for the newly allocated inode. 336 * 268 * 337 * It may also be called when making a hard li 269 * It may also be called when making a hard link to permit the space needed by 338 * each dentry. However, in that case, no new 270 * each dentry. However, in that case, no new inode number is needed since that 339 * internally draws from another pool of inode 271 * internally draws from another pool of inode numbers (currently global 340 * get_next_ino()). This case is indicated by 272 * get_next_ino()). This case is indicated by passing NULL as inop. 341 */ 273 */ 342 #define SHMEM_INO_BATCH 1024 274 #define SHMEM_INO_BATCH 1024 343 static int shmem_reserve_inode(struct super_bl 275 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) 344 { 276 { 345 struct shmem_sb_info *sbinfo = SHMEM_S 277 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 346 ino_t ino; 278 ino_t ino; 347 279 348 if (!(sb->s_flags & SB_KERNMOUNT)) { 280 if (!(sb->s_flags & SB_KERNMOUNT)) { 349 raw_spin_lock(&sbinfo->stat_lo !! 281 spin_lock(&sbinfo->stat_lock); 350 if (sbinfo->max_inodes) { 282 if (sbinfo->max_inodes) { 351 if (sbinfo->free_ispac !! 283 if (!sbinfo->free_inodes) { 352 raw_spin_unloc !! 284 spin_unlock(&sbinfo->stat_lock); 353 return -ENOSPC 285 return -ENOSPC; 354 } 286 } 355 sbinfo->free_ispace -= !! 287 sbinfo->free_inodes--; 356 } 288 } 357 if (inop) { 289 if (inop) { 358 ino = sbinfo->next_ino 290 ino = sbinfo->next_ino++; 359 if (unlikely(is_zero_i 291 if (unlikely(is_zero_ino(ino))) 360 ino = sbinfo-> 292 ino = sbinfo->next_ino++; 361 if (unlikely(!sbinfo-> 293 if (unlikely(!sbinfo->full_inums && 362 ino > UIN 294 ino > UINT_MAX)) { 363 /* 295 /* 364 * Emulate get 296 * Emulate get_next_ino uint wraparound for 365 * compatibili 297 * compatibility 366 */ 298 */ 367 if (IS_ENABLED 299 if (IS_ENABLED(CONFIG_64BIT)) 368 pr_war 300 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n", 369 301 __func__, MINOR(sb->s_dev)); 370 sbinfo->next_i 302 sbinfo->next_ino = 1; 371 ino = sbinfo-> 303 ino = sbinfo->next_ino++; 372 } 304 } 373 *inop = ino; 305 *inop = ino; 374 } 306 } 375 raw_spin_unlock(&sbinfo->stat_ !! 307 spin_unlock(&sbinfo->stat_lock); 376 } else if (inop) { 308 } else if (inop) { 377 /* 309 /* 378 * __shmem_file_setup, one of 310 * __shmem_file_setup, one of our callers, is lock-free: it 379 * doesn't hold stat_lock in s 311 * doesn't hold stat_lock in shmem_reserve_inode since 380 * max_inodes is always 0, and 312 * max_inodes is always 0, and is called from potentially 381 * unknown contexts. As such, 313 * unknown contexts. As such, use a per-cpu batched allocator 382 * which doesn't require the p 314 * which doesn't require the per-sb stat_lock unless we are at 383 * the batch boundary. 315 * the batch boundary. 384 * 316 * 385 * We don't need to worry abou 317 * We don't need to worry about inode{32,64} since SB_KERNMOUNT 386 * shmem mounts are not expose 318 * shmem mounts are not exposed to userspace, so we don't need 387 * to worry about things like 319 * to worry about things like glibc compatibility. 388 */ 320 */ 389 ino_t *next_ino; 321 ino_t *next_ino; 390 << 391 next_ino = per_cpu_ptr(sbinfo- 322 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); 392 ino = *next_ino; 323 ino = *next_ino; 393 if (unlikely(ino % SHMEM_INO_B 324 if (unlikely(ino % SHMEM_INO_BATCH == 0)) { 394 raw_spin_lock(&sbinfo- !! 325 spin_lock(&sbinfo->stat_lock); 395 ino = sbinfo->next_ino 326 ino = sbinfo->next_ino; 396 sbinfo->next_ino += SH 327 sbinfo->next_ino += SHMEM_INO_BATCH; 397 raw_spin_unlock(&sbinf !! 328 spin_unlock(&sbinfo->stat_lock); 398 if (unlikely(is_zero_i 329 if (unlikely(is_zero_ino(ino))) 399 ino++; 330 ino++; 400 } 331 } 401 *inop = ino; 332 *inop = ino; 402 *next_ino = ++ino; 333 *next_ino = ++ino; 403 put_cpu(); 334 put_cpu(); 404 } 335 } 405 336 406 return 0; 337 return 0; 407 } 338 } 408 339 409 static void shmem_free_inode(struct super_bloc !! 340 static void shmem_free_inode(struct super_block *sb) 410 { 341 { 411 struct shmem_sb_info *sbinfo = SHMEM_S 342 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 412 if (sbinfo->max_inodes) { 343 if (sbinfo->max_inodes) { 413 raw_spin_lock(&sbinfo->stat_lo !! 344 spin_lock(&sbinfo->stat_lock); 414 sbinfo->free_ispace += BOGO_IN !! 345 sbinfo->free_inodes++; 415 raw_spin_unlock(&sbinfo->stat_ !! 346 spin_unlock(&sbinfo->stat_lock); 416 } 347 } 417 } 348 } 418 349 419 /** 350 /** 420 * shmem_recalc_inode - recalculate the block 351 * shmem_recalc_inode - recalculate the block usage of an inode 421 * @inode: inode to recalc 352 * @inode: inode to recalc 422 * @alloced: the change in number of pages all << 423 * @swapped: the change in number of pages swa << 424 * 353 * 425 * We have to calculate the free blocks since 354 * We have to calculate the free blocks since the mm can drop 426 * undirtied hole pages behind our back. 355 * undirtied hole pages behind our back. 427 * 356 * 428 * But normally info->alloced == inode->i_ma 357 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 429 * So mm freed is info->alloced - (inode->i_ma 358 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) >> 359 * >> 360 * It has to be called with the spinlock held. 430 */ 361 */ 431 static void shmem_recalc_inode(struct inode *i !! 362 static void shmem_recalc_inode(struct inode *inode) 432 { 363 { 433 struct shmem_inode_info *info = SHMEM_ 364 struct shmem_inode_info *info = SHMEM_I(inode); 434 long freed; 365 long freed; 435 366 436 spin_lock(&info->lock); !! 367 freed = info->alloced - info->swapped - inode->i_mapping->nrpages; 437 info->alloced += alloced; !! 368 if (freed > 0) { 438 info->swapped += swapped; << 439 freed = info->alloced - info->swapped << 440 READ_ONCE(inode->i_mapping->nr << 441 /* << 442 * Special case: whereas normally shme << 443 * after i_mapping->nrpages has alread << 444 * shmem_writepage() has to raise swap << 445 * to stop a racing shmem_recalc_inode << 446 * been freed. Compensate here, to av << 447 */ << 448 if (swapped > 0) << 449 freed += swapped; << 450 if (freed > 0) << 451 info->alloced -= freed; 369 info->alloced -= freed; 452 spin_unlock(&info->lock); !! 370 inode->i_blocks -= freed * BLOCKS_PER_PAGE; 453 << 454 /* The quota case may block */ << 455 if (freed > 0) << 456 shmem_inode_unacct_blocks(inod 371 shmem_inode_unacct_blocks(inode, freed); >> 372 } 457 } 373 } 458 374 459 bool shmem_charge(struct inode *inode, long pa 375 bool shmem_charge(struct inode *inode, long pages) 460 { 376 { 461 struct address_space *mapping = inode- !! 377 struct shmem_inode_info *info = SHMEM_I(inode); >> 378 unsigned long flags; 462 379 463 if (shmem_inode_acct_blocks(inode, pag !! 380 if (!shmem_inode_acct_block(inode, pages)) 464 return false; 381 return false; 465 382 466 /* nrpages adjustment first, then shme 383 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ 467 xa_lock_irq(&mapping->i_pages); !! 384 inode->i_mapping->nrpages += pages; 468 mapping->nrpages += pages; !! 385 469 xa_unlock_irq(&mapping->i_pages); !! 386 spin_lock_irqsave(&info->lock, flags); >> 387 info->alloced += pages; >> 388 inode->i_blocks += pages * BLOCKS_PER_PAGE; >> 389 shmem_recalc_inode(inode); >> 390 spin_unlock_irqrestore(&info->lock, flags); 470 391 471 shmem_recalc_inode(inode, pages, 0); << 472 return true; 392 return true; 473 } 393 } 474 394 475 void shmem_uncharge(struct inode *inode, long 395 void shmem_uncharge(struct inode *inode, long pages) 476 { 396 { 477 /* pages argument is currently unused: !! 397 struct shmem_inode_info *info = SHMEM_I(inode); 478 /* nrpages adjustment done by __filema !! 398 unsigned long flags; 479 399 480 shmem_recalc_inode(inode, 0, 0); !! 400 /* nrpages adjustment done by __delete_from_page_cache() or caller */ >> 401 >> 402 spin_lock_irqsave(&info->lock, flags); >> 403 info->alloced -= pages; >> 404 inode->i_blocks -= pages * BLOCKS_PER_PAGE; >> 405 shmem_recalc_inode(inode); >> 406 spin_unlock_irqrestore(&info->lock, flags); >> 407 >> 408 shmem_inode_unacct_blocks(inode, pages); 481 } 409 } 482 410 483 /* 411 /* 484 * Replace item expected in xarray by a new it 412 * Replace item expected in xarray by a new item, while holding xa_lock. 485 */ 413 */ 486 static int shmem_replace_entry(struct address_ 414 static int shmem_replace_entry(struct address_space *mapping, 487 pgoff_t index, void *e 415 pgoff_t index, void *expected, void *replacement) 488 { 416 { 489 XA_STATE(xas, &mapping->i_pages, index 417 XA_STATE(xas, &mapping->i_pages, index); 490 void *item; 418 void *item; 491 419 492 VM_BUG_ON(!expected); 420 VM_BUG_ON(!expected); 493 VM_BUG_ON(!replacement); 421 VM_BUG_ON(!replacement); 494 item = xas_load(&xas); 422 item = xas_load(&xas); 495 if (item != expected) 423 if (item != expected) 496 return -ENOENT; 424 return -ENOENT; 497 xas_store(&xas, replacement); 425 xas_store(&xas, replacement); 498 return 0; 426 return 0; 499 } 427 } 500 428 501 /* 429 /* 502 * Sometimes, before we decide whether to proc 430 * Sometimes, before we decide whether to proceed or to fail, we must check 503 * that an entry was not already brought back 431 * that an entry was not already brought back from swap by a racing thread. 504 * 432 * 505 * Checking folio is not enough: by the time a !! 433 * Checking page is not enough: by the time a SwapCache page is locked, it 506 * might be reused, and again be swapcache, us !! 434 * might be reused, and again be SwapCache, using the same swap as before. 507 */ 435 */ 508 static bool shmem_confirm_swap(struct address_ 436 static bool shmem_confirm_swap(struct address_space *mapping, 509 pgoff_t index, 437 pgoff_t index, swp_entry_t swap) 510 { 438 { 511 return xa_load(&mapping->i_pages, inde 439 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); 512 } 440 } 513 441 514 /* 442 /* 515 * Definitions for "huge tmpfs": tmpfs mounted 443 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 516 * 444 * 517 * SHMEM_HUGE_NEVER: 445 * SHMEM_HUGE_NEVER: 518 * disables huge pages for the mount; 446 * disables huge pages for the mount; 519 * SHMEM_HUGE_ALWAYS: 447 * SHMEM_HUGE_ALWAYS: 520 * enables huge pages for the mount; 448 * enables huge pages for the mount; 521 * SHMEM_HUGE_WITHIN_SIZE: 449 * SHMEM_HUGE_WITHIN_SIZE: 522 * only allocate huge pages if the page w 450 * only allocate huge pages if the page will be fully within i_size, 523 * also respect fadvise()/madvise() hints 451 * also respect fadvise()/madvise() hints; 524 * SHMEM_HUGE_ADVISE: 452 * SHMEM_HUGE_ADVISE: 525 * only allocate huge pages if requested 453 * only allocate huge pages if requested with fadvise()/madvise(); 526 */ 454 */ 527 455 528 #define SHMEM_HUGE_NEVER 0 456 #define SHMEM_HUGE_NEVER 0 529 #define SHMEM_HUGE_ALWAYS 1 457 #define SHMEM_HUGE_ALWAYS 1 530 #define SHMEM_HUGE_WITHIN_SIZE 2 458 #define SHMEM_HUGE_WITHIN_SIZE 2 531 #define SHMEM_HUGE_ADVISE 3 459 #define SHMEM_HUGE_ADVISE 3 532 460 533 /* 461 /* 534 * Special values. 462 * Special values. 535 * Only can be set via /sys/kernel/mm/transpar 463 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 536 * 464 * 537 * SHMEM_HUGE_DENY: 465 * SHMEM_HUGE_DENY: 538 * disables huge on shm_mnt and all mount 466 * disables huge on shm_mnt and all mounts, for emergency use; 539 * SHMEM_HUGE_FORCE: 467 * SHMEM_HUGE_FORCE: 540 * enables huge on shm_mnt and all mounts 468 * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 541 * 469 * 542 */ 470 */ 543 #define SHMEM_HUGE_DENY (-1) 471 #define SHMEM_HUGE_DENY (-1) 544 #define SHMEM_HUGE_FORCE (-2) 472 #define SHMEM_HUGE_FORCE (-2) 545 473 546 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 474 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 547 /* ifdef here to avoid bloating shmem.o when n 475 /* ifdef here to avoid bloating shmem.o when not necessary */ 548 476 549 static int shmem_huge __read_mostly = SHMEM_HU !! 477 static int shmem_huge __read_mostly; 550 << 551 static bool __shmem_huge_global_enabled(struct << 552 loff_t << 553 struct << 554 unsign << 555 { << 556 struct mm_struct *mm = vma ? vma->vm_m << 557 loff_t i_size; << 558 << 559 if (!S_ISREG(inode->i_mode)) << 560 return false; << 561 if (mm && ((vm_flags & VM_NOHUGEPAGE) << 562 return false; << 563 if (shmem_huge == SHMEM_HUGE_DENY) << 564 return false; << 565 if (shmem_huge_force || shmem_huge == << 566 return true; << 567 << 568 switch (SHMEM_SB(inode->i_sb)->huge) { << 569 case SHMEM_HUGE_ALWAYS: << 570 return true; << 571 case SHMEM_HUGE_WITHIN_SIZE: << 572 index = round_up(index + 1, HP << 573 i_size = max(write_end, i_size << 574 i_size = round_up(i_size, PAGE << 575 if (i_size >> PAGE_SHIFT >= in << 576 return true; << 577 fallthrough; << 578 case SHMEM_HUGE_ADVISE: << 579 if (mm && (vm_flags & VM_HUGEP << 580 return true; << 581 fallthrough; << 582 default: << 583 return false; << 584 } << 585 } << 586 << 587 static bool shmem_huge_global_enabled(struct i << 588 loff_t write_end, bool shme << 589 struct vm_area_struct *vma, << 590 { << 591 if (HPAGE_PMD_ORDER > MAX_PAGECACHE_OR << 592 return false; << 593 << 594 return __shmem_huge_global_enabled(ino << 595 shm << 596 } << 597 478 598 #if defined(CONFIG_SYSFS) 479 #if defined(CONFIG_SYSFS) 599 static int shmem_parse_huge(const char *str) 480 static int shmem_parse_huge(const char *str) 600 { 481 { 601 if (!strcmp(str, "never")) 482 if (!strcmp(str, "never")) 602 return SHMEM_HUGE_NEVER; 483 return SHMEM_HUGE_NEVER; 603 if (!strcmp(str, "always")) 484 if (!strcmp(str, "always")) 604 return SHMEM_HUGE_ALWAYS; 485 return SHMEM_HUGE_ALWAYS; 605 if (!strcmp(str, "within_size")) 486 if (!strcmp(str, "within_size")) 606 return SHMEM_HUGE_WITHIN_SIZE; 487 return SHMEM_HUGE_WITHIN_SIZE; 607 if (!strcmp(str, "advise")) 488 if (!strcmp(str, "advise")) 608 return SHMEM_HUGE_ADVISE; 489 return SHMEM_HUGE_ADVISE; 609 if (!strcmp(str, "deny")) 490 if (!strcmp(str, "deny")) 610 return SHMEM_HUGE_DENY; 491 return SHMEM_HUGE_DENY; 611 if (!strcmp(str, "force")) 492 if (!strcmp(str, "force")) 612 return SHMEM_HUGE_FORCE; 493 return SHMEM_HUGE_FORCE; 613 return -EINVAL; 494 return -EINVAL; 614 } 495 } 615 #endif 496 #endif 616 497 617 #if defined(CONFIG_SYSFS) || defined(CONFIG_TM 498 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 618 static const char *shmem_format_huge(int huge) 499 static const char *shmem_format_huge(int huge) 619 { 500 { 620 switch (huge) { 501 switch (huge) { 621 case SHMEM_HUGE_NEVER: 502 case SHMEM_HUGE_NEVER: 622 return "never"; 503 return "never"; 623 case SHMEM_HUGE_ALWAYS: 504 case SHMEM_HUGE_ALWAYS: 624 return "always"; 505 return "always"; 625 case SHMEM_HUGE_WITHIN_SIZE: 506 case SHMEM_HUGE_WITHIN_SIZE: 626 return "within_size"; 507 return "within_size"; 627 case SHMEM_HUGE_ADVISE: 508 case SHMEM_HUGE_ADVISE: 628 return "advise"; 509 return "advise"; 629 case SHMEM_HUGE_DENY: 510 case SHMEM_HUGE_DENY: 630 return "deny"; 511 return "deny"; 631 case SHMEM_HUGE_FORCE: 512 case SHMEM_HUGE_FORCE: 632 return "force"; 513 return "force"; 633 default: 514 default: 634 VM_BUG_ON(1); 515 VM_BUG_ON(1); 635 return "bad_val"; 516 return "bad_val"; 636 } 517 } 637 } 518 } 638 #endif 519 #endif 639 520 640 static unsigned long shmem_unused_huge_shrink( 521 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 641 struct shrink_control *sc, uns !! 522 struct shrink_control *sc, unsigned long nr_to_split) 642 { 523 { 643 LIST_HEAD(list), *pos, *next; 524 LIST_HEAD(list), *pos, *next; >> 525 LIST_HEAD(to_remove); 644 struct inode *inode; 526 struct inode *inode; 645 struct shmem_inode_info *info; 527 struct shmem_inode_info *info; 646 struct folio *folio; !! 528 struct page *page; 647 unsigned long batch = sc ? sc->nr_to_s 529 unsigned long batch = sc ? sc->nr_to_scan : 128; 648 unsigned long split = 0, freed = 0; !! 530 int removed = 0, split = 0; 649 531 650 if (list_empty(&sbinfo->shrinklist)) 532 if (list_empty(&sbinfo->shrinklist)) 651 return SHRINK_STOP; 533 return SHRINK_STOP; 652 534 653 spin_lock(&sbinfo->shrinklist_lock); 535 spin_lock(&sbinfo->shrinklist_lock); 654 list_for_each_safe(pos, next, &sbinfo- 536 list_for_each_safe(pos, next, &sbinfo->shrinklist) { 655 info = list_entry(pos, struct 537 info = list_entry(pos, struct shmem_inode_info, shrinklist); 656 538 657 /* pin the inode */ 539 /* pin the inode */ 658 inode = igrab(&info->vfs_inode 540 inode = igrab(&info->vfs_inode); 659 541 660 /* inode is about to be evicte 542 /* inode is about to be evicted */ 661 if (!inode) { 543 if (!inode) { 662 list_del_init(&info->s 544 list_del_init(&info->shrinklist); >> 545 removed++; >> 546 goto next; >> 547 } >> 548 >> 549 /* Check if there's anything to gain */ >> 550 if (round_up(inode->i_size, PAGE_SIZE) == >> 551 round_up(inode->i_size, HPAGE_PMD_SIZE)) { >> 552 list_move(&info->shrinklist, &to_remove); >> 553 removed++; 663 goto next; 554 goto next; 664 } 555 } 665 556 666 list_move(&info->shrinklist, & 557 list_move(&info->shrinklist, &list); 667 next: 558 next: 668 sbinfo->shrinklist_len--; << 669 if (!--batch) 559 if (!--batch) 670 break; 560 break; 671 } 561 } 672 spin_unlock(&sbinfo->shrinklist_lock); 562 spin_unlock(&sbinfo->shrinklist_lock); 673 563 >> 564 list_for_each_safe(pos, next, &to_remove) { >> 565 info = list_entry(pos, struct shmem_inode_info, shrinklist); >> 566 inode = &info->vfs_inode; >> 567 list_del_init(&info->shrinklist); >> 568 iput(inode); >> 569 } >> 570 674 list_for_each_safe(pos, next, &list) { 571 list_for_each_safe(pos, next, &list) { 675 pgoff_t next, end; << 676 loff_t i_size; << 677 int ret; 572 int ret; 678 573 679 info = list_entry(pos, struct 574 info = list_entry(pos, struct shmem_inode_info, shrinklist); 680 inode = &info->vfs_inode; 575 inode = &info->vfs_inode; 681 576 682 if (nr_to_free && freed >= nr_ !! 577 if (nr_to_split && split >= nr_to_split) 683 goto move_back; !! 578 goto leave; 684 579 685 i_size = i_size_read(inode); !! 580 page = find_get_page(inode->i_mapping, 686 folio = filemap_get_entry(inod !! 581 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT); 687 if (!folio || xa_is_value(foli !! 582 if (!page) 688 goto drop; 583 goto drop; 689 584 690 /* No large folio at the end o !! 585 /* No huge page at the end of the file: nothing to split */ 691 if (!folio_test_large(folio)) !! 586 if (!PageTransHuge(page)) { 692 folio_put(folio); !! 587 put_page(page); 693 goto drop; << 694 } << 695 << 696 /* Check if there is anything << 697 next = folio_next_index(folio) << 698 end = shmem_fallocend(inode, D << 699 if (end <= folio->index || end << 700 folio_put(folio); << 701 goto drop; 588 goto drop; 702 } 589 } 703 590 704 /* 591 /* 705 * Move the inode on the list !! 592 * Leave the inode on the list if we failed to lock 706 * to lock the page at this ti !! 593 * the page at this time. 707 * 594 * 708 * Waiting for the lock may le 595 * Waiting for the lock may lead to deadlock in the 709 * reclaim path. 596 * reclaim path. 710 */ 597 */ 711 if (!folio_trylock(folio)) { !! 598 if (!trylock_page(page)) { 712 folio_put(folio); !! 599 put_page(page); 713 goto move_back; !! 600 goto leave; 714 } 601 } 715 602 716 ret = split_folio(folio); !! 603 ret = split_huge_page(page); 717 folio_unlock(folio); !! 604 unlock_page(page); 718 folio_put(folio); !! 605 put_page(page); 719 606 720 /* If split failed move the in !! 607 /* If split failed leave the inode on the list */ 721 if (ret) 608 if (ret) 722 goto move_back; !! 609 goto leave; 723 610 724 freed += next - end; << 725 split++; 611 split++; 726 drop: 612 drop: 727 list_del_init(&info->shrinklis 613 list_del_init(&info->shrinklist); 728 goto put; !! 614 removed++; 729 move_back: !! 615 leave: 730 /* << 731 * Make sure the inode is eith << 732 * from any local list before << 733 * in another thread once we p << 734 * is corrupted). << 735 */ << 736 spin_lock(&sbinfo->shrinklist_ << 737 list_move(&info->shrinklist, & << 738 sbinfo->shrinklist_len++; << 739 spin_unlock(&sbinfo->shrinklis << 740 put: << 741 iput(inode); 616 iput(inode); 742 } 617 } 743 618 >> 619 spin_lock(&sbinfo->shrinklist_lock); >> 620 list_splice_tail(&list, &sbinfo->shrinklist); >> 621 sbinfo->shrinklist_len -= removed; >> 622 spin_unlock(&sbinfo->shrinklist_lock); >> 623 744 return split; 624 return split; 745 } 625 } 746 626 747 static long shmem_unused_huge_scan(struct supe 627 static long shmem_unused_huge_scan(struct super_block *sb, 748 struct shrink_control *sc) 628 struct shrink_control *sc) 749 { 629 { 750 struct shmem_sb_info *sbinfo = SHMEM_S 630 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 751 631 752 if (!READ_ONCE(sbinfo->shrinklist_len) 632 if (!READ_ONCE(sbinfo->shrinklist_len)) 753 return SHRINK_STOP; 633 return SHRINK_STOP; 754 634 755 return shmem_unused_huge_shrink(sbinfo 635 return shmem_unused_huge_shrink(sbinfo, sc, 0); 756 } 636 } 757 637 758 static long shmem_unused_huge_count(struct sup 638 static long shmem_unused_huge_count(struct super_block *sb, 759 struct shrink_control *sc) 639 struct shrink_control *sc) 760 { 640 { 761 struct shmem_sb_info *sbinfo = SHMEM_S 641 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 762 return READ_ONCE(sbinfo->shrinklist_le 642 return READ_ONCE(sbinfo->shrinklist_len); 763 } 643 } 764 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 644 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 765 645 766 #define shmem_huge SHMEM_HUGE_DENY 646 #define shmem_huge SHMEM_HUGE_DENY 767 647 768 static unsigned long shmem_unused_huge_shrink( 648 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 769 struct shrink_control *sc, uns !! 649 struct shrink_control *sc, unsigned long nr_to_split) 770 { 650 { 771 return 0; 651 return 0; 772 } 652 } >> 653 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 773 654 774 static bool shmem_huge_global_enabled(struct i !! 655 static inline bool is_huge_enabled(struct shmem_sb_info *sbinfo) 775 loff_t write_end, bool shmem_h << 776 struct vm_area_struct *vma, un << 777 { 656 { >> 657 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && >> 658 (shmem_huge == SHMEM_HUGE_FORCE || sbinfo->huge) && >> 659 shmem_huge != SHMEM_HUGE_DENY) >> 660 return true; 778 return false; 661 return false; 779 } 662 } 780 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ << 781 663 782 /* 664 /* 783 * Somewhat like filemap_add_folio, but error !! 665 * Like add_to_page_cache_locked, but error if expected item has gone. 784 */ 666 */ 785 static int shmem_add_to_page_cache(struct foli !! 667 static int shmem_add_to_page_cache(struct page *page, 786 struct addr 668 struct address_space *mapping, 787 pgoff_t ind !! 669 pgoff_t index, void *expected, gfp_t gfp, >> 670 struct mm_struct *charge_mm) 788 { 671 { 789 XA_STATE_ORDER(xas, &mapping->i_pages, !! 672 XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); 790 long nr = folio_nr_pages(folio); !! 673 unsigned long i = 0; 791 !! 674 unsigned long nr = compound_nr(page); 792 VM_BUG_ON_FOLIO(index != round_down(in !! 675 int error; 793 VM_BUG_ON_FOLIO(!folio_test_locked(fol << 794 VM_BUG_ON_FOLIO(!folio_test_swapbacked << 795 676 796 folio_ref_add(folio, nr); !! 677 VM_BUG_ON_PAGE(PageTail(page), page); 797 folio->mapping = mapping; !! 678 VM_BUG_ON_PAGE(index != round_down(index, nr), page); 798 folio->index = index; !! 679 VM_BUG_ON_PAGE(!PageLocked(page), page); >> 680 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); >> 681 VM_BUG_ON(expected && PageTransHuge(page)); >> 682 >> 683 page_ref_add(page, nr); >> 684 page->mapping = mapping; >> 685 page->index = index; 799 686 800 gfp &= GFP_RECLAIM_MASK; !! 687 if (!PageSwapCache(page)) { 801 folio_throttle_swaprate(folio, gfp); !! 688 error = mem_cgroup_charge(page, charge_mm, gfp); >> 689 if (error) { >> 690 if (PageTransHuge(page)) { >> 691 count_vm_event(THP_FILE_FALLBACK); >> 692 count_vm_event(THP_FILE_FALLBACK_CHARGE); >> 693 } >> 694 goto error; >> 695 } >> 696 } >> 697 cgroup_throttle_swaprate(page, gfp); 802 698 803 do { 699 do { >> 700 void *entry; 804 xas_lock_irq(&xas); 701 xas_lock_irq(&xas); 805 if (expected != xas_find_confl !! 702 entry = xas_find_conflict(&xas); >> 703 if (entry != expected) 806 xas_set_err(&xas, -EEX 704 xas_set_err(&xas, -EEXIST); >> 705 xas_create_range(&xas); >> 706 if (xas_error(&xas)) 807 goto unlock; 707 goto unlock; >> 708 next: >> 709 xas_store(&xas, page); >> 710 if (++i < nr) { >> 711 xas_next(&xas); >> 712 goto next; 808 } 713 } 809 if (expected && xas_find_confl !! 714 if (PageTransHuge(page)) { 810 xas_set_err(&xas, -EEX !! 715 count_vm_event(THP_FILE_ALLOC); 811 goto unlock; !! 716 __mod_lruvec_page_state(page, NR_SHMEM_THPS, nr); 812 } 717 } 813 xas_store(&xas, folio); << 814 if (xas_error(&xas)) << 815 goto unlock; << 816 if (folio_test_pmd_mappable(fo << 817 __lruvec_stat_mod_foli << 818 __lruvec_stat_mod_folio(folio, << 819 __lruvec_stat_mod_folio(folio, << 820 mapping->nrpages += nr; 718 mapping->nrpages += nr; >> 719 __mod_lruvec_page_state(page, NR_FILE_PAGES, nr); >> 720 __mod_lruvec_page_state(page, NR_SHMEM, nr); 821 unlock: 721 unlock: 822 xas_unlock_irq(&xas); 722 xas_unlock_irq(&xas); 823 } while (xas_nomem(&xas, gfp)); 723 } while (xas_nomem(&xas, gfp)); 824 724 825 if (xas_error(&xas)) { 725 if (xas_error(&xas)) { 826 folio->mapping = NULL; !! 726 error = xas_error(&xas); 827 folio_ref_sub(folio, nr); !! 727 goto error; 828 return xas_error(&xas); << 829 } 728 } 830 729 831 return 0; 730 return 0; >> 731 error: >> 732 page->mapping = NULL; >> 733 page_ref_sub(page, nr); >> 734 return error; 832 } 735 } 833 736 834 /* 737 /* 835 * Somewhat like filemap_remove_folio, but sub !! 738 * Like delete_from_page_cache, but substitutes swap for page. 836 */ 739 */ 837 static void shmem_delete_from_page_cache(struc !! 740 static void shmem_delete_from_page_cache(struct page *page, void *radswap) 838 { 741 { 839 struct address_space *mapping = folio- !! 742 struct address_space *mapping = page->mapping; 840 long nr = folio_nr_pages(folio); << 841 int error; 743 int error; 842 744 >> 745 VM_BUG_ON_PAGE(PageCompound(page), page); >> 746 843 xa_lock_irq(&mapping->i_pages); 747 xa_lock_irq(&mapping->i_pages); 844 error = shmem_replace_entry(mapping, f !! 748 error = shmem_replace_entry(mapping, page->index, page, radswap); 845 folio->mapping = NULL; !! 749 page->mapping = NULL; 846 mapping->nrpages -= nr; !! 750 mapping->nrpages--; 847 __lruvec_stat_mod_folio(folio, NR_FILE !! 751 __dec_lruvec_page_state(page, NR_FILE_PAGES); 848 __lruvec_stat_mod_folio(folio, NR_SHME !! 752 __dec_lruvec_page_state(page, NR_SHMEM); 849 xa_unlock_irq(&mapping->i_pages); 753 xa_unlock_irq(&mapping->i_pages); 850 folio_put_refs(folio, nr); !! 754 put_page(page); 851 BUG_ON(error); 755 BUG_ON(error); 852 } 756 } 853 757 854 /* 758 /* 855 * Remove swap entry from page cache, free the !! 759 * Remove swap entry from page cache, free the swap and its page cache. 856 * the number of pages being freed. 0 means en << 857 * being freed). << 858 */ 760 */ 859 static long shmem_free_swap(struct address_spa !! 761 static int shmem_free_swap(struct address_space *mapping, 860 pgoff_t index, voi !! 762 pgoff_t index, void *radswap) 861 { 763 { 862 int order = xa_get_order(&mapping->i_p << 863 void *old; 764 void *old; 864 765 865 old = xa_cmpxchg_irq(&mapping->i_pages 766 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); 866 if (old != radswap) 767 if (old != radswap) 867 return 0; !! 768 return -ENOENT; 868 free_swap_and_cache_nr(radix_to_swp_en !! 769 free_swap_and_cache(radix_to_swp_entry(radswap)); 869 !! 770 return 0; 870 return 1 << order; << 871 } 771 } 872 772 873 /* 773 /* 874 * Determine (in bytes) how many of the shmem 774 * Determine (in bytes) how many of the shmem object's pages mapped by the 875 * given offsets are swapped out. 775 * given offsets are swapped out. 876 * 776 * 877 * This is safe to call without i_rwsem or the !! 777 * This is safe to call without i_mutex or the i_pages lock thanks to RCU, 878 * as long as the inode doesn't go away and ra 778 * as long as the inode doesn't go away and racy results are not a problem. 879 */ 779 */ 880 unsigned long shmem_partial_swap_usage(struct 780 unsigned long shmem_partial_swap_usage(struct address_space *mapping, 881 781 pgoff_t start, pgoff_t end) 882 { 782 { 883 XA_STATE(xas, &mapping->i_pages, start 783 XA_STATE(xas, &mapping->i_pages, start); 884 struct page *page; 784 struct page *page; 885 unsigned long swapped = 0; 785 unsigned long swapped = 0; 886 unsigned long max = end - 1; << 887 786 888 rcu_read_lock(); 787 rcu_read_lock(); 889 xas_for_each(&xas, page, max) { !! 788 xas_for_each(&xas, page, end - 1) { 890 if (xas_retry(&xas, page)) 789 if (xas_retry(&xas, page)) 891 continue; 790 continue; 892 if (xa_is_value(page)) 791 if (xa_is_value(page)) 893 swapped += 1 << xas_ge !! 792 swapped++; 894 if (xas.xa_index == max) !! 793 895 break; << 896 if (need_resched()) { 794 if (need_resched()) { 897 xas_pause(&xas); 795 xas_pause(&xas); 898 cond_resched_rcu(); 796 cond_resched_rcu(); 899 } 797 } 900 } 798 } >> 799 901 rcu_read_unlock(); 800 rcu_read_unlock(); 902 801 903 return swapped << PAGE_SHIFT; 802 return swapped << PAGE_SHIFT; 904 } 803 } 905 804 906 /* 805 /* 907 * Determine (in bytes) how many of the shmem 806 * Determine (in bytes) how many of the shmem object's pages mapped by the 908 * given vma is swapped out. 807 * given vma is swapped out. 909 * 808 * 910 * This is safe to call without i_rwsem or the !! 809 * This is safe to call without i_mutex or the i_pages lock thanks to RCU, 911 * as long as the inode doesn't go away and ra 810 * as long as the inode doesn't go away and racy results are not a problem. 912 */ 811 */ 913 unsigned long shmem_swap_usage(struct vm_area_ 812 unsigned long shmem_swap_usage(struct vm_area_struct *vma) 914 { 813 { 915 struct inode *inode = file_inode(vma-> 814 struct inode *inode = file_inode(vma->vm_file); 916 struct shmem_inode_info *info = SHMEM_ 815 struct shmem_inode_info *info = SHMEM_I(inode); 917 struct address_space *mapping = inode- 816 struct address_space *mapping = inode->i_mapping; 918 unsigned long swapped; 817 unsigned long swapped; 919 818 920 /* Be careful as we don't hold info->l 819 /* Be careful as we don't hold info->lock */ 921 swapped = READ_ONCE(info->swapped); 820 swapped = READ_ONCE(info->swapped); 922 821 923 /* 822 /* 924 * The easier cases are when the shmem 823 * The easier cases are when the shmem object has nothing in swap, or 925 * the vma maps it whole. Then we can 824 * the vma maps it whole. Then we can simply use the stats that we 926 * already track. 825 * already track. 927 */ 826 */ 928 if (!swapped) 827 if (!swapped) 929 return 0; 828 return 0; 930 829 931 if (!vma->vm_pgoff && vma->vm_end - vm 830 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 932 return swapped << PAGE_SHIFT; 831 return swapped << PAGE_SHIFT; 933 832 934 /* Here comes the more involved part * 833 /* Here comes the more involved part */ 935 return shmem_partial_swap_usage(mappin !! 834 return shmem_partial_swap_usage(mapping, 936 vma->v !! 835 linear_page_index(vma, vma->vm_start), >> 836 linear_page_index(vma, vma->vm_end)); 937 } 837 } 938 838 939 /* 839 /* 940 * SysV IPC SHM_UNLOCK restore Unevictable pag 840 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 941 */ 841 */ 942 void shmem_unlock_mapping(struct address_space 842 void shmem_unlock_mapping(struct address_space *mapping) 943 { 843 { 944 struct folio_batch fbatch; !! 844 struct pagevec pvec; 945 pgoff_t index = 0; 845 pgoff_t index = 0; 946 846 947 folio_batch_init(&fbatch); !! 847 pagevec_init(&pvec); 948 /* 848 /* 949 * Minor point, but we might as well s 849 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 950 */ 850 */ 951 while (!mapping_unevictable(mapping) & !! 851 while (!mapping_unevictable(mapping)) { 952 filemap_get_folios(mapping, &in !! 852 if (!pagevec_lookup(&pvec, mapping, &index)) 953 check_move_unevictable_folios( !! 853 break; 954 folio_batch_release(&fbatch); !! 854 check_move_unevictable_pages(&pvec); >> 855 pagevec_release(&pvec); 955 cond_resched(); 856 cond_resched(); 956 } 857 } 957 } 858 } 958 859 959 static struct folio *shmem_get_partial_folio(s !! 860 /* >> 861 * Check whether a hole-punch or truncation needs to split a huge page, >> 862 * returning true if no split was required, or the split has been successful. >> 863 * >> 864 * Eviction (or truncation to 0 size) should never need to split a huge page; >> 865 * but in rare cases might do so, if shmem_undo_range() failed to trylock on >> 866 * head, and then succeeded to trylock on tail. >> 867 * >> 868 * A split can only succeed when there are no additional references on the >> 869 * huge page: so the split below relies upon find_get_entries() having stopped >> 870 * when it found a subpage of the huge page, without getting further references. >> 871 */ >> 872 static bool shmem_punch_compound(struct page *page, pgoff_t start, pgoff_t end) 960 { 873 { 961 struct folio *folio; !! 874 if (!PageTransCompound(page)) >> 875 return true; 962 876 963 /* !! 877 /* Just proceed to delete a huge page wholly within the range punched */ 964 * At first avoid shmem_get_folio(,,,S !! 878 if (PageHead(page) && 965 * beyond i_size, and reports fallocat !! 879 page->index >= start && page->index + HPAGE_PMD_NR <= end) 966 */ !! 880 return true; 967 folio = filemap_get_entry(inode->i_map !! 881 968 if (!folio) !! 882 /* Try to split huge page, so we can truly punch the hole or truncate */ 969 return folio; !! 883 return split_huge_page(page) >= 0; 970 if (!xa_is_value(folio)) { << 971 folio_lock(folio); << 972 if (folio->mapping == inode->i << 973 return folio; << 974 /* The folio has been swapped << 975 folio_unlock(folio); << 976 folio_put(folio); << 977 } << 978 /* << 979 * But read a folio back from swap if << 980 * (although in some cases this is jus << 981 */ << 982 folio = NULL; << 983 shmem_get_folio(inode, index, 0, &foli << 984 return folio; << 985 } 884 } 986 885 987 /* 886 /* 988 * Remove range of pages and swap entries from 887 * Remove range of pages and swap entries from page cache, and free them. 989 * If !unfalloc, truncate or punch hole; if un 888 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 990 */ 889 */ 991 static void shmem_undo_range(struct inode *ino 890 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 992 891 bool unfalloc) 993 { 892 { 994 struct address_space *mapping = inode- 893 struct address_space *mapping = inode->i_mapping; 995 struct shmem_inode_info *info = SHMEM_ 894 struct shmem_inode_info *info = SHMEM_I(inode); 996 pgoff_t start = (lstart + PAGE_SIZE - 895 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 997 pgoff_t end = (lend + 1) >> PAGE_SHIFT 896 pgoff_t end = (lend + 1) >> PAGE_SHIFT; 998 struct folio_batch fbatch; !! 897 unsigned int partial_start = lstart & (PAGE_SIZE - 1); >> 898 unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); >> 899 struct pagevec pvec; 999 pgoff_t indices[PAGEVEC_SIZE]; 900 pgoff_t indices[PAGEVEC_SIZE]; 1000 struct folio *folio; << 1001 bool same_folio; << 1002 long nr_swaps_freed = 0; 901 long nr_swaps_freed = 0; 1003 pgoff_t index; 902 pgoff_t index; 1004 int i; 903 int i; 1005 904 1006 if (lend == -1) 905 if (lend == -1) 1007 end = -1; /* unsigned, 906 end = -1; /* unsigned, so actually very big */ 1008 907 1009 if (info->fallocend > start && info-> !! 908 pagevec_init(&pvec); 1010 info->fallocend = start; << 1011 << 1012 folio_batch_init(&fbatch); << 1013 index = start; 909 index = start; 1014 while (index < end && find_lock_entri !! 910 while (index < end && find_lock_entries(mapping, index, end - 1, 1015 &fbatch, indices)) { !! 911 &pvec, indices)) { 1016 for (i = 0; i < folio_batch_c !! 912 for (i = 0; i < pagevec_count(&pvec); i++) { 1017 folio = fbatch.folios !! 913 struct page *page = pvec.pages[i]; >> 914 >> 915 index = indices[i]; 1018 916 1019 if (xa_is_value(folio !! 917 if (xa_is_value(page)) { 1020 if (unfalloc) 918 if (unfalloc) 1021 conti 919 continue; 1022 nr_swaps_free !! 920 nr_swaps_freed += !shmem_free_swap(mapping, 1023 !! 921 index, page); 1024 continue; 922 continue; 1025 } 923 } >> 924 index += thp_nr_pages(page) - 1; 1026 925 1027 if (!unfalloc || !fol !! 926 if (!unfalloc || !PageUptodate(page)) 1028 truncate_inod !! 927 truncate_inode_page(mapping, page); 1029 folio_unlock(folio); !! 928 unlock_page(page); 1030 } 929 } 1031 folio_batch_remove_exceptiona !! 930 pagevec_remove_exceptionals(&pvec); 1032 folio_batch_release(&fbatch); !! 931 pagevec_release(&pvec); 1033 cond_resched(); 932 cond_resched(); >> 933 index++; 1034 } 934 } 1035 935 1036 /* !! 936 if (partial_start) { 1037 * When undoing a failed fallocate, w !! 937 struct page *page = NULL; 1038 * zeroing and splitting below, but s !! 938 shmem_getpage(inode, start - 1, &page, SGP_READ); 1039 * folio when !uptodate indicates tha !! 939 if (page) { 1040 * even when [lstart, lend] covers on !! 940 unsigned int top = PAGE_SIZE; 1041 */ !! 941 if (start > end) { 1042 if (unfalloc) !! 942 top = partial_end; 1043 goto whole_folios; !! 943 partial_end = 0; 1044 !! 944 } 1045 same_folio = (lstart >> PAGE_SHIFT) = !! 945 zero_user_segment(page, partial_start, top); 1046 folio = shmem_get_partial_folio(inode !! 946 set_page_dirty(page); 1047 if (folio) { !! 947 unlock_page(page); 1048 same_folio = lend < folio_pos !! 948 put_page(page); 1049 folio_mark_dirty(folio); !! 949 } 1050 if (!truncate_inode_partial_f << 1051 start = folio_next_in << 1052 if (same_folio) << 1053 end = folio-> << 1054 } << 1055 folio_unlock(folio); << 1056 folio_put(folio); << 1057 folio = NULL; << 1058 } << 1059 << 1060 if (!same_folio) << 1061 folio = shmem_get_partial_fol << 1062 if (folio) { << 1063 folio_mark_dirty(folio); << 1064 if (!truncate_inode_partial_f << 1065 end = folio->index; << 1066 folio_unlock(folio); << 1067 folio_put(folio); << 1068 } 950 } 1069 !! 951 if (partial_end) { 1070 whole_folios: !! 952 struct page *page = NULL; >> 953 shmem_getpage(inode, end, &page, SGP_READ); >> 954 if (page) { >> 955 zero_user_segment(page, 0, partial_end); >> 956 set_page_dirty(page); >> 957 unlock_page(page); >> 958 put_page(page); >> 959 } >> 960 } >> 961 if (start >= end) >> 962 return; 1071 963 1072 index = start; 964 index = start; 1073 while (index < end) { 965 while (index < end) { 1074 cond_resched(); 966 cond_resched(); 1075 967 1076 if (!find_get_entries(mapping !! 968 if (!find_get_entries(mapping, index, end - 1, &pvec, 1077 indices)) { 969 indices)) { 1078 /* If all gone or hol 970 /* If all gone or hole-punch or unfalloc, we're done */ 1079 if (index == start || 971 if (index == start || end != -1) 1080 break; 972 break; 1081 /* But if truncating, 973 /* But if truncating, restart to make sure all gone */ 1082 index = start; 974 index = start; 1083 continue; 975 continue; 1084 } 976 } 1085 for (i = 0; i < folio_batch_c !! 977 for (i = 0; i < pagevec_count(&pvec); i++) { 1086 folio = fbatch.folios !! 978 struct page *page = pvec.pages[i]; 1087 << 1088 if (xa_is_value(folio << 1089 long swaps_fr << 1090 979 >> 980 index = indices[i]; >> 981 if (xa_is_value(page)) { 1091 if (unfalloc) 982 if (unfalloc) 1092 conti 983 continue; 1093 swaps_freed = !! 984 if (shmem_free_swap(mapping, index, page)) { 1094 if (!swaps_fr << 1095 /* Sw 985 /* Swap was replaced by page: retry */ 1096 index !! 986 index--; 1097 break 987 break; 1098 } 988 } 1099 nr_swaps_free !! 989 nr_swaps_freed++; 1100 continue; 990 continue; 1101 } 991 } 1102 992 1103 folio_lock(folio); !! 993 lock_page(page); 1104 994 1105 if (!unfalloc || !fol !! 995 if (!unfalloc || !PageUptodate(page)) { 1106 if (folio_map !! 996 if (page_mapping(page) != mapping) { 1107 /* Pa 997 /* Page was replaced by swap: retry */ 1108 folio !! 998 unlock_page(page); 1109 index !! 999 index--; 1110 break 1000 break; 1111 } 1001 } 1112 VM_BUG_ON_FOL !! 1002 VM_BUG_ON_PAGE(PageWriteback(page), page); 1113 !! 1003 if (shmem_punch_compound(page, start, end)) 1114 !! 1004 truncate_inode_page(mapping, page); 1115 if (!folio_te !! 1005 else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 1116 trunc !! 1006 /* Wipe the page and don't get stuck */ 1117 } else if (tr !! 1007 clear_highpage(page); 1118 /* !! 1008 flush_dcache_page(page); 1119 * If !! 1009 set_page_dirty(page); 1120 * th !! 1010 if (index < 1121 * Ot !! 1011 round_up(start, HPAGE_PMD_NR)) 1122 * dr !! 1012 start = index + 1; 1123 * ze << 1124 * is << 1125 */ << 1126 if (! << 1127 << 1128 << 1129 << 1130 } << 1131 } 1013 } 1132 } 1014 } 1133 folio_unlock(folio); !! 1015 unlock_page(page); 1134 } 1016 } 1135 folio_batch_remove_exceptiona !! 1017 pagevec_remove_exceptionals(&pvec); 1136 folio_batch_release(&fbatch); !! 1018 pagevec_release(&pvec); >> 1019 index++; 1137 } 1020 } 1138 1021 1139 shmem_recalc_inode(inode, 0, -nr_swap !! 1022 spin_lock_irq(&info->lock); >> 1023 info->swapped -= nr_swaps_freed; >> 1024 shmem_recalc_inode(inode); >> 1025 spin_unlock_irq(&info->lock); 1140 } 1026 } 1141 1027 1142 void shmem_truncate_range(struct inode *inode 1028 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 1143 { 1029 { 1144 shmem_undo_range(inode, lstart, lend, 1030 shmem_undo_range(inode, lstart, lend, false); 1145 inode_set_mtime_to_ts(inode, inode_se !! 1031 inode->i_ctime = inode->i_mtime = current_time(inode); 1146 inode_inc_iversion(inode); << 1147 } 1032 } 1148 EXPORT_SYMBOL_GPL(shmem_truncate_range); 1033 EXPORT_SYMBOL_GPL(shmem_truncate_range); 1149 1034 1150 static int shmem_getattr(struct mnt_idmap *id !! 1035 static int shmem_getattr(struct user_namespace *mnt_userns, 1151 const struct path *p 1036 const struct path *path, struct kstat *stat, 1152 u32 request_mask, un 1037 u32 request_mask, unsigned int query_flags) 1153 { 1038 { 1154 struct inode *inode = path->dentry->d 1039 struct inode *inode = path->dentry->d_inode; 1155 struct shmem_inode_info *info = SHMEM 1040 struct shmem_inode_info *info = SHMEM_I(inode); >> 1041 struct shmem_sb_info *sb_info = SHMEM_SB(inode->i_sb); 1156 1042 1157 if (info->alloced - info->swapped != !! 1043 if (info->alloced - info->swapped != inode->i_mapping->nrpages) { 1158 shmem_recalc_inode(inode, 0, !! 1044 spin_lock_irq(&info->lock); 1159 !! 1045 shmem_recalc_inode(inode); 1160 if (info->fsflags & FS_APPEND_FL) !! 1046 spin_unlock_irq(&info->lock); 1161 stat->attributes |= STATX_ATT !! 1047 } 1162 if (info->fsflags & FS_IMMUTABLE_FL) !! 1048 generic_fillattr(&init_user_ns, inode, stat); 1163 stat->attributes |= STATX_ATT << 1164 if (info->fsflags & FS_NODUMP_FL) << 1165 stat->attributes |= STATX_ATT << 1166 stat->attributes_mask |= (STATX_ATTR_ << 1167 STATX_ATTR_IMMUTABLE << 1168 STATX_ATTR_NODUMP); << 1169 inode_lock_shared(inode); << 1170 generic_fillattr(idmap, request_mask, << 1171 inode_unlock_shared(inode); << 1172 1049 1173 if (shmem_huge_global_enabled(inode, !! 1050 if (is_huge_enabled(sb_info)) 1174 stat->blksize = HPAGE_PMD_SIZ 1051 stat->blksize = HPAGE_PMD_SIZE; 1175 1052 1176 if (request_mask & STATX_BTIME) { << 1177 stat->result_mask |= STATX_BT << 1178 stat->btime.tv_sec = info->i_ << 1179 stat->btime.tv_nsec = info->i << 1180 } << 1181 << 1182 return 0; 1053 return 0; 1183 } 1054 } 1184 1055 1185 static int shmem_setattr(struct mnt_idmap *id !! 1056 static int shmem_setattr(struct user_namespace *mnt_userns, 1186 struct dentry *dentr 1057 struct dentry *dentry, struct iattr *attr) 1187 { 1058 { 1188 struct inode *inode = d_inode(dentry) 1059 struct inode *inode = d_inode(dentry); 1189 struct shmem_inode_info *info = SHMEM 1060 struct shmem_inode_info *info = SHMEM_I(inode); >> 1061 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1190 int error; 1062 int error; 1191 bool update_mtime = false; << 1192 bool update_ctime = true; << 1193 1063 1194 error = setattr_prepare(idmap, dentry !! 1064 error = setattr_prepare(&init_user_ns, dentry, attr); 1195 if (error) 1065 if (error) 1196 return error; 1066 return error; 1197 1067 1198 if ((info->seals & F_SEAL_EXEC) && (a << 1199 if ((inode->i_mode ^ attr->ia << 1200 return -EPERM; << 1201 } << 1202 } << 1203 << 1204 if (S_ISREG(inode->i_mode) && (attr-> 1068 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 1205 loff_t oldsize = inode->i_siz 1069 loff_t oldsize = inode->i_size; 1206 loff_t newsize = attr->ia_siz 1070 loff_t newsize = attr->ia_size; 1207 1071 1208 /* protected by i_rwsem */ !! 1072 /* protected by i_mutex */ 1209 if ((newsize < oldsize && (in 1073 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 1210 (newsize > oldsize && (in 1074 (newsize > oldsize && (info->seals & F_SEAL_GROW))) 1211 return -EPERM; 1075 return -EPERM; 1212 1076 1213 if (newsize != oldsize) { 1077 if (newsize != oldsize) { 1214 error = shmem_reacct_ 1078 error = shmem_reacct_size(SHMEM_I(inode)->flags, 1215 oldsi 1079 oldsize, newsize); 1216 if (error) 1080 if (error) 1217 return error; 1081 return error; 1218 i_size_write(inode, n 1082 i_size_write(inode, newsize); 1219 update_mtime = true; !! 1083 inode->i_ctime = inode->i_mtime = current_time(inode); 1220 } else { << 1221 update_ctime = false; << 1222 } 1084 } 1223 if (newsize <= oldsize) { 1085 if (newsize <= oldsize) { 1224 loff_t holebegin = ro 1086 loff_t holebegin = round_up(newsize, PAGE_SIZE); 1225 if (oldsize > holebeg 1087 if (oldsize > holebegin) 1226 unmap_mapping 1088 unmap_mapping_range(inode->i_mapping, 1227 1089 holebegin, 0, 1); 1228 if (info->alloced) 1090 if (info->alloced) 1229 shmem_truncat 1091 shmem_truncate_range(inode, 1230 1092 newsize, (loff_t)-1); 1231 /* unmap again to rem 1093 /* unmap again to remove racily COWed private pages */ 1232 if (oldsize > holebeg 1094 if (oldsize > holebegin) 1233 unmap_mapping 1095 unmap_mapping_range(inode->i_mapping, 1234 1096 holebegin, 0, 1); 1235 } << 1236 } << 1237 << 1238 if (is_quota_modification(idmap, inod << 1239 error = dquot_initialize(inod << 1240 if (error) << 1241 return error; << 1242 } << 1243 1097 1244 /* Transfer quota accounting */ !! 1098 /* 1245 if (i_uid_needs_update(idmap, attr, i !! 1099 * Part of the huge page can be beyond i_size: subject 1246 i_gid_needs_update(idmap, attr, i !! 1100 * to shrink under memory pressure. 1247 error = dquot_transfer(idmap, !! 1101 */ 1248 if (error) !! 1102 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { 1249 return error; !! 1103 spin_lock(&sbinfo->shrinklist_lock); >> 1104 /* >> 1105 * _careful to defend against unlocked access to >> 1106 * ->shrink_list in shmem_unused_huge_shrink() >> 1107 */ >> 1108 if (list_empty_careful(&info->shrinklist)) { >> 1109 list_add_tail(&info->shrinklist, >> 1110 &sbinfo->shrinklist); >> 1111 sbinfo->shrinklist_len++; >> 1112 } >> 1113 spin_unlock(&sbinfo->shrinklist_lock); >> 1114 } >> 1115 } 1250 } 1116 } 1251 1117 1252 setattr_copy(idmap, inode, attr); !! 1118 setattr_copy(&init_user_ns, inode, attr); 1253 if (attr->ia_valid & ATTR_MODE) 1119 if (attr->ia_valid & ATTR_MODE) 1254 error = posix_acl_chmod(idmap !! 1120 error = posix_acl_chmod(&init_user_ns, inode, inode->i_mode); 1255 if (!error && update_ctime) { << 1256 inode_set_ctime_current(inode << 1257 if (update_mtime) << 1258 inode_set_mtime_to_ts << 1259 inode_inc_iversion(inode); << 1260 } << 1261 return error; 1121 return error; 1262 } 1122 } 1263 1123 1264 static void shmem_evict_inode(struct inode *i 1124 static void shmem_evict_inode(struct inode *inode) 1265 { 1125 { 1266 struct shmem_inode_info *info = SHMEM 1126 struct shmem_inode_info *info = SHMEM_I(inode); 1267 struct shmem_sb_info *sbinfo = SHMEM_ 1127 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1268 size_t freed = 0; << 1269 1128 1270 if (shmem_mapping(inode->i_mapping)) 1129 if (shmem_mapping(inode->i_mapping)) { 1271 shmem_unacct_size(info->flags 1130 shmem_unacct_size(info->flags, inode->i_size); 1272 inode->i_size = 0; 1131 inode->i_size = 0; 1273 mapping_set_exiting(inode->i_ << 1274 shmem_truncate_range(inode, 0 1132 shmem_truncate_range(inode, 0, (loff_t)-1); 1275 if (!list_empty(&info->shrink 1133 if (!list_empty(&info->shrinklist)) { 1276 spin_lock(&sbinfo->sh 1134 spin_lock(&sbinfo->shrinklist_lock); 1277 if (!list_empty(&info 1135 if (!list_empty(&info->shrinklist)) { 1278 list_del_init 1136 list_del_init(&info->shrinklist); 1279 sbinfo->shrin 1137 sbinfo->shrinklist_len--; 1280 } 1138 } 1281 spin_unlock(&sbinfo-> 1139 spin_unlock(&sbinfo->shrinklist_lock); 1282 } 1140 } 1283 while (!list_empty(&info->swa 1141 while (!list_empty(&info->swaplist)) { 1284 /* Wait while shmem_u 1142 /* Wait while shmem_unuse() is scanning this inode... */ 1285 wait_var_event(&info- 1143 wait_var_event(&info->stop_eviction, 1286 !atomi 1144 !atomic_read(&info->stop_eviction)); 1287 mutex_lock(&shmem_swa 1145 mutex_lock(&shmem_swaplist_mutex); 1288 /* ...but beware of t 1146 /* ...but beware of the race if we peeked too early */ 1289 if (!atomic_read(&inf 1147 if (!atomic_read(&info->stop_eviction)) 1290 list_del_init 1148 list_del_init(&info->swaplist); 1291 mutex_unlock(&shmem_s 1149 mutex_unlock(&shmem_swaplist_mutex); 1292 } 1150 } 1293 } 1151 } 1294 1152 1295 simple_xattrs_free(&info->xattrs, sbi !! 1153 simple_xattrs_free(&info->xattrs); 1296 shmem_free_inode(inode->i_sb, freed); << 1297 WARN_ON(inode->i_blocks); 1154 WARN_ON(inode->i_blocks); >> 1155 shmem_free_inode(inode->i_sb); 1298 clear_inode(inode); 1156 clear_inode(inode); 1299 #ifdef CONFIG_TMPFS_QUOTA << 1300 dquot_free_inode(inode); << 1301 dquot_drop(inode); << 1302 #endif << 1303 } 1157 } 1304 1158 >> 1159 extern struct swap_info_struct *swap_info[]; >> 1160 1305 static int shmem_find_swap_entries(struct add 1161 static int shmem_find_swap_entries(struct address_space *mapping, 1306 pgoff_t st !! 1162 pgoff_t start, unsigned int nr_entries, 1307 pgoff_t *i !! 1163 struct page **entries, pgoff_t *indices, >> 1164 unsigned int type, bool frontswap) 1308 { 1165 { 1309 XA_STATE(xas, &mapping->i_pages, star 1166 XA_STATE(xas, &mapping->i_pages, start); 1310 struct folio *folio; !! 1167 struct page *page; 1311 swp_entry_t entry; 1168 swp_entry_t entry; >> 1169 unsigned int ret = 0; >> 1170 >> 1171 if (!nr_entries) >> 1172 return 0; 1312 1173 1313 rcu_read_lock(); 1174 rcu_read_lock(); 1314 xas_for_each(&xas, folio, ULONG_MAX) !! 1175 xas_for_each(&xas, page, ULONG_MAX) { 1315 if (xas_retry(&xas, folio)) !! 1176 if (xas_retry(&xas, page)) 1316 continue; 1177 continue; 1317 1178 1318 if (!xa_is_value(folio)) !! 1179 if (!xa_is_value(page)) 1319 continue; 1180 continue; 1320 1181 1321 entry = radix_to_swp_entry(fo !! 1182 entry = radix_to_swp_entry(page); 1322 /* << 1323 * swapin error entries can b << 1324 * deliberately ignored here << 1325 */ << 1326 if (swp_type(entry) != type) 1183 if (swp_type(entry) != type) 1327 continue; 1184 continue; >> 1185 if (frontswap && >> 1186 !frontswap_test(swap_info[type], swp_offset(entry))) >> 1187 continue; 1328 1188 1329 indices[folio_batch_count(fba !! 1189 indices[ret] = xas.xa_index; 1330 if (!folio_batch_add(fbatch, !! 1190 entries[ret] = page; 1331 break; << 1332 1191 1333 if (need_resched()) { 1192 if (need_resched()) { 1334 xas_pause(&xas); 1193 xas_pause(&xas); 1335 cond_resched_rcu(); 1194 cond_resched_rcu(); 1336 } 1195 } >> 1196 if (++ret == nr_entries) >> 1197 break; 1337 } 1198 } 1338 rcu_read_unlock(); 1199 rcu_read_unlock(); 1339 1200 1340 return xas.xa_index; !! 1201 return ret; 1341 } 1202 } 1342 1203 1343 /* 1204 /* 1344 * Move the swapped pages for an inode to pag 1205 * Move the swapped pages for an inode to page cache. Returns the count 1345 * of pages swapped in, or the error in case 1206 * of pages swapped in, or the error in case of failure. 1346 */ 1207 */ 1347 static int shmem_unuse_swap_entries(struct in !! 1208 static int shmem_unuse_swap_entries(struct inode *inode, struct pagevec pvec, 1348 struct folio_batch *fbatch, p !! 1209 pgoff_t *indices) 1349 { 1210 { 1350 int i = 0; 1211 int i = 0; 1351 int ret = 0; 1212 int ret = 0; 1352 int error = 0; 1213 int error = 0; 1353 struct address_space *mapping = inode 1214 struct address_space *mapping = inode->i_mapping; 1354 1215 1355 for (i = 0; i < folio_batch_count(fba !! 1216 for (i = 0; i < pvec.nr; i++) { 1356 struct folio *folio = fbatch- !! 1217 struct page *page = pvec.pages[i]; 1357 1218 1358 if (!xa_is_value(folio)) !! 1219 if (!xa_is_value(page)) 1359 continue; 1220 continue; 1360 error = shmem_swapin_folio(in !! 1221 error = shmem_swapin_page(inode, indices[i], 1361 mappi !! 1222 &page, SGP_CACHE, >> 1223 mapping_gfp_mask(mapping), >> 1224 NULL, NULL); 1362 if (error == 0) { 1225 if (error == 0) { 1363 folio_unlock(folio); !! 1226 unlock_page(page); 1364 folio_put(folio); !! 1227 put_page(page); 1365 ret++; 1228 ret++; 1366 } 1229 } 1367 if (error == -ENOMEM) 1230 if (error == -ENOMEM) 1368 break; 1231 break; 1369 error = 0; 1232 error = 0; 1370 } 1233 } 1371 return error ? error : ret; 1234 return error ? error : ret; 1372 } 1235 } 1373 1236 1374 /* 1237 /* 1375 * If swap found in inode, free it and move p 1238 * If swap found in inode, free it and move page from swapcache to filecache. 1376 */ 1239 */ 1377 static int shmem_unuse_inode(struct inode *in !! 1240 static int shmem_unuse_inode(struct inode *inode, unsigned int type, >> 1241 bool frontswap, unsigned long *fs_pages_to_unuse) 1378 { 1242 { 1379 struct address_space *mapping = inode 1243 struct address_space *mapping = inode->i_mapping; 1380 pgoff_t start = 0; 1244 pgoff_t start = 0; 1381 struct folio_batch fbatch; !! 1245 struct pagevec pvec; 1382 pgoff_t indices[PAGEVEC_SIZE]; 1246 pgoff_t indices[PAGEVEC_SIZE]; >> 1247 bool frontswap_partial = (frontswap && *fs_pages_to_unuse > 0); 1383 int ret = 0; 1248 int ret = 0; 1384 1249 >> 1250 pagevec_init(&pvec); 1385 do { 1251 do { 1386 folio_batch_init(&fbatch); !! 1252 unsigned int nr_entries = PAGEVEC_SIZE; 1387 shmem_find_swap_entries(mappi !! 1253 1388 if (folio_batch_count(&fbatch !! 1254 if (frontswap_partial && *fs_pages_to_unuse < PAGEVEC_SIZE) >> 1255 nr_entries = *fs_pages_to_unuse; >> 1256 >> 1257 pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries, >> 1258 pvec.pages, indices, >> 1259 type, frontswap); >> 1260 if (pvec.nr == 0) { 1389 ret = 0; 1261 ret = 0; 1390 break; 1262 break; 1391 } 1263 } 1392 1264 1393 ret = shmem_unuse_swap_entrie !! 1265 ret = shmem_unuse_swap_entries(inode, pvec, indices); 1394 if (ret < 0) 1266 if (ret < 0) 1395 break; 1267 break; 1396 1268 1397 start = indices[folio_batch_c !! 1269 if (frontswap_partial) { >> 1270 *fs_pages_to_unuse -= ret; >> 1271 if (*fs_pages_to_unuse == 0) { >> 1272 ret = FRONTSWAP_PAGES_UNUSED; >> 1273 break; >> 1274 } >> 1275 } >> 1276 >> 1277 start = indices[pvec.nr - 1]; 1398 } while (true); 1278 } while (true); 1399 1279 1400 return ret; 1280 return ret; 1401 } 1281 } 1402 1282 1403 /* 1283 /* 1404 * Read all the shared memory data that resid 1284 * Read all the shared memory data that resides in the swap 1405 * device 'type' back into memory, so the swa 1285 * device 'type' back into memory, so the swap device can be 1406 * unused. 1286 * unused. 1407 */ 1287 */ 1408 int shmem_unuse(unsigned int type) !! 1288 int shmem_unuse(unsigned int type, bool frontswap, >> 1289 unsigned long *fs_pages_to_unuse) 1409 { 1290 { 1410 struct shmem_inode_info *info, *next; 1291 struct shmem_inode_info *info, *next; 1411 int error = 0; 1292 int error = 0; 1412 1293 1413 if (list_empty(&shmem_swaplist)) 1294 if (list_empty(&shmem_swaplist)) 1414 return 0; 1295 return 0; 1415 1296 1416 mutex_lock(&shmem_swaplist_mutex); 1297 mutex_lock(&shmem_swaplist_mutex); 1417 list_for_each_entry_safe(info, next, 1298 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { 1418 if (!info->swapped) { 1299 if (!info->swapped) { 1419 list_del_init(&info-> 1300 list_del_init(&info->swaplist); 1420 continue; 1301 continue; 1421 } 1302 } 1422 /* 1303 /* 1423 * Drop the swaplist mutex wh 1304 * Drop the swaplist mutex while searching the inode for swap; 1424 * but before doing so, make 1305 * but before doing so, make sure shmem_evict_inode() will not 1425 * remove placeholder inode f 1306 * remove placeholder inode from swaplist, nor let it be freed 1426 * (igrab() would protect fro 1307 * (igrab() would protect from unlink, but not from unmount). 1427 */ 1308 */ 1428 atomic_inc(&info->stop_evicti 1309 atomic_inc(&info->stop_eviction); 1429 mutex_unlock(&shmem_swaplist_ 1310 mutex_unlock(&shmem_swaplist_mutex); 1430 1311 1431 error = shmem_unuse_inode(&in !! 1312 error = shmem_unuse_inode(&info->vfs_inode, type, frontswap, >> 1313 fs_pages_to_unuse); 1432 cond_resched(); 1314 cond_resched(); 1433 1315 1434 mutex_lock(&shmem_swaplist_mu 1316 mutex_lock(&shmem_swaplist_mutex); 1435 next = list_next_entry(info, 1317 next = list_next_entry(info, swaplist); 1436 if (!info->swapped) 1318 if (!info->swapped) 1437 list_del_init(&info-> 1319 list_del_init(&info->swaplist); 1438 if (atomic_dec_and_test(&info 1320 if (atomic_dec_and_test(&info->stop_eviction)) 1439 wake_up_var(&info->st 1321 wake_up_var(&info->stop_eviction); 1440 if (error) 1322 if (error) 1441 break; 1323 break; 1442 } 1324 } 1443 mutex_unlock(&shmem_swaplist_mutex); 1325 mutex_unlock(&shmem_swaplist_mutex); 1444 1326 1445 return error; 1327 return error; 1446 } 1328 } 1447 1329 1448 /* 1330 /* 1449 * Move the page from the page cache to the s 1331 * Move the page from the page cache to the swap cache. 1450 */ 1332 */ 1451 static int shmem_writepage(struct page *page, 1333 static int shmem_writepage(struct page *page, struct writeback_control *wbc) 1452 { 1334 { 1453 struct folio *folio = page_folio(page !! 1335 struct shmem_inode_info *info; 1454 struct address_space *mapping = folio !! 1336 struct address_space *mapping; 1455 struct inode *inode = mapping->host; !! 1337 struct inode *inode; 1456 struct shmem_inode_info *info = SHMEM << 1457 struct shmem_sb_info *sbinfo = SHMEM_ << 1458 swp_entry_t swap; 1338 swp_entry_t swap; 1459 pgoff_t index; 1339 pgoff_t index; 1460 int nr_pages; !! 1340 1461 bool split = false; !! 1341 VM_BUG_ON_PAGE(PageCompound(page), page); >> 1342 BUG_ON(!PageLocked(page)); >> 1343 mapping = page->mapping; >> 1344 index = page->index; >> 1345 inode = mapping->host; >> 1346 info = SHMEM_I(inode); >> 1347 if (info->flags & VM_LOCKED) >> 1348 goto redirty; >> 1349 if (!total_swap_pages) >> 1350 goto redirty; 1462 1351 1463 /* 1352 /* 1464 * Our capabilities prevent regular w 1353 * Our capabilities prevent regular writeback or sync from ever calling 1465 * shmem_writepage; but a stacking fi 1354 * shmem_writepage; but a stacking filesystem might use ->writepage of 1466 * its underlying filesystem, in whic 1355 * its underlying filesystem, in which case tmpfs should write out to 1467 * swap only in response to memory pr 1356 * swap only in response to memory pressure, and not for the writeback 1468 * threads or sync. 1357 * threads or sync. 1469 */ 1358 */ 1470 if (WARN_ON_ONCE(!wbc->for_reclaim)) !! 1359 if (!wbc->for_reclaim) { 1471 goto redirty; !! 1360 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 1472 << 1473 if (WARN_ON_ONCE((info->flags & VM_LO << 1474 goto redirty; << 1475 << 1476 if (!total_swap_pages) << 1477 goto redirty; 1361 goto redirty; 1478 << 1479 /* << 1480 * If CONFIG_THP_SWAP is not enabled, << 1481 * split when swapping. << 1482 * << 1483 * And shrinkage of pages beyond i_si << 1484 * swapout of a large folio crossing << 1485 * (unless fallocate has been used to << 1486 */ << 1487 if (folio_test_large(folio)) { << 1488 index = shmem_fallocend(inode << 1489 DIV_ROUND_UP(i_size_r << 1490 if ((index > folio->index && << 1491 !IS_ENABLED(CONFIG_THP_SW << 1492 split = true; << 1493 } << 1494 << 1495 if (split) { << 1496 try_split: << 1497 /* Ensure the subpages are st << 1498 folio_test_set_dirty(folio); << 1499 if (split_huge_page_to_list_t << 1500 goto redirty; << 1501 folio = page_folio(page); << 1502 folio_clear_dirty(folio); << 1503 } 1362 } 1504 1363 1505 index = folio->index; << 1506 nr_pages = folio_nr_pages(folio); << 1507 << 1508 /* 1364 /* 1509 * This is somewhat ridiculous, but w 1365 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 1510 * value into swapfile.c, the only wa 1366 * value into swapfile.c, the only way we can correctly account for a 1511 * fallocated folio arriving here is !! 1367 * fallocated page arriving here is now to initialize it and write it. 1512 * 1368 * 1513 * That's okay for a folio already fa !! 1369 * That's okay for a page already fallocated earlier, but if we have 1514 * not yet completed the fallocation, 1370 * not yet completed the fallocation, then (a) we want to keep track 1515 * of this folio in case we have to u !! 1371 * of this page in case we have to undo it, and (b) it may not be a 1516 * good idea to continue anyway, once 1372 * good idea to continue anyway, once we're pushing into swap. So 1517 * reactivate the folio, and let shme !! 1373 * reactivate the page, and let shmem_fallocate() quit when too many. 1518 */ 1374 */ 1519 if (!folio_test_uptodate(folio)) { !! 1375 if (!PageUptodate(page)) { 1520 if (inode->i_private) { 1376 if (inode->i_private) { 1521 struct shmem_falloc * 1377 struct shmem_falloc *shmem_falloc; 1522 spin_lock(&inode->i_l 1378 spin_lock(&inode->i_lock); 1523 shmem_falloc = inode- 1379 shmem_falloc = inode->i_private; 1524 if (shmem_falloc && 1380 if (shmem_falloc && 1525 !shmem_falloc->wa 1381 !shmem_falloc->waitq && 1526 index >= shmem_fa 1382 index >= shmem_falloc->start && 1527 index < shmem_fal 1383 index < shmem_falloc->next) 1528 shmem_falloc- 1384 shmem_falloc->nr_unswapped++; 1529 else 1385 else 1530 shmem_falloc 1386 shmem_falloc = NULL; 1531 spin_unlock(&inode->i 1387 spin_unlock(&inode->i_lock); 1532 if (shmem_falloc) 1388 if (shmem_falloc) 1533 goto redirty; 1389 goto redirty; 1534 } 1390 } 1535 folio_zero_range(folio, 0, fo !! 1391 clear_highpage(page); 1536 flush_dcache_folio(folio); !! 1392 flush_dcache_page(page); 1537 folio_mark_uptodate(folio); !! 1393 SetPageUptodate(page); 1538 } 1394 } 1539 1395 1540 swap = folio_alloc_swap(folio); !! 1396 swap = get_swap_page(page); 1541 if (!swap.val) { !! 1397 if (!swap.val) 1542 if (nr_pages > 1) << 1543 goto try_split; << 1544 << 1545 goto redirty; 1398 goto redirty; 1546 } << 1547 1399 1548 /* 1400 /* 1549 * Add inode to shmem_unuse()'s list 1401 * Add inode to shmem_unuse()'s list of swapped-out inodes, 1550 * if it's not already there. Do it !! 1402 * if it's not already there. Do it now before the page is 1551 * moved to swap cache, when its page 1403 * moved to swap cache, when its pagelock no longer protects 1552 * the inode from eviction. But don' 1404 * the inode from eviction. But don't unlock the mutex until 1553 * we've incremented swapped, because 1405 * we've incremented swapped, because shmem_unuse_inode() will 1554 * prune a !swapped inode from the sw 1406 * prune a !swapped inode from the swaplist under this mutex. 1555 */ 1407 */ 1556 mutex_lock(&shmem_swaplist_mutex); 1408 mutex_lock(&shmem_swaplist_mutex); 1557 if (list_empty(&info->swaplist)) 1409 if (list_empty(&info->swaplist)) 1558 list_add(&info->swaplist, &sh 1410 list_add(&info->swaplist, &shmem_swaplist); 1559 1411 1560 if (add_to_swap_cache(folio, swap, !! 1412 if (add_to_swap_cache(page, swap, 1561 __GFP_HIGH | __GFP_NO 1413 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, 1562 NULL) == 0) { 1414 NULL) == 0) { 1563 shmem_recalc_inode(inode, 0, !! 1415 spin_lock_irq(&info->lock); 1564 swap_shmem_alloc(swap, nr_pag !! 1416 shmem_recalc_inode(inode); 1565 shmem_delete_from_page_cache( !! 1417 info->swapped++; >> 1418 spin_unlock_irq(&info->lock); >> 1419 >> 1420 swap_shmem_alloc(swap); >> 1421 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap)); 1566 1422 1567 mutex_unlock(&shmem_swaplist_ 1423 mutex_unlock(&shmem_swaplist_mutex); 1568 BUG_ON(folio_mapped(folio)); !! 1424 BUG_ON(page_mapped(page)); 1569 return swap_writepage(&folio- !! 1425 swap_writepage(page, wbc); >> 1426 return 0; 1570 } 1427 } 1571 1428 1572 mutex_unlock(&shmem_swaplist_mutex); 1429 mutex_unlock(&shmem_swaplist_mutex); 1573 put_swap_folio(folio, swap); !! 1430 put_swap_page(page, swap); 1574 redirty: 1431 redirty: 1575 folio_mark_dirty(folio); !! 1432 set_page_dirty(page); 1576 if (wbc->for_reclaim) 1433 if (wbc->for_reclaim) 1577 return AOP_WRITEPAGE_ACTIVATE !! 1434 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */ 1578 folio_unlock(folio); !! 1435 unlock_page(page); 1579 return 0; 1436 return 0; 1580 } 1437 } 1581 1438 1582 #if defined(CONFIG_NUMA) && defined(CONFIG_TM 1439 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 1583 static void shmem_show_mpol(struct seq_file * 1440 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1584 { 1441 { 1585 char buffer[64]; 1442 char buffer[64]; 1586 1443 1587 if (!mpol || mpol->mode == MPOL_DEFAU 1444 if (!mpol || mpol->mode == MPOL_DEFAULT) 1588 return; /* show nothi 1445 return; /* show nothing */ 1589 1446 1590 mpol_to_str(buffer, sizeof(buffer), m 1447 mpol_to_str(buffer, sizeof(buffer), mpol); 1591 1448 1592 seq_printf(seq, ",mpol=%s", buffer); 1449 seq_printf(seq, ",mpol=%s", buffer); 1593 } 1450 } 1594 1451 1595 static struct mempolicy *shmem_get_sbmpol(str 1452 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1596 { 1453 { 1597 struct mempolicy *mpol = NULL; 1454 struct mempolicy *mpol = NULL; 1598 if (sbinfo->mpol) { 1455 if (sbinfo->mpol) { 1599 raw_spin_lock(&sbinfo->stat_l !! 1456 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 1600 mpol = sbinfo->mpol; 1457 mpol = sbinfo->mpol; 1601 mpol_get(mpol); 1458 mpol_get(mpol); 1602 raw_spin_unlock(&sbinfo->stat !! 1459 spin_unlock(&sbinfo->stat_lock); 1603 } 1460 } 1604 return mpol; 1461 return mpol; 1605 } 1462 } 1606 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 1463 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 1607 static inline void shmem_show_mpol(struct seq 1464 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1608 { 1465 { 1609 } 1466 } 1610 static inline struct mempolicy *shmem_get_sbm 1467 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1611 { 1468 { 1612 return NULL; 1469 return NULL; 1613 } 1470 } 1614 #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 1471 #endif /* CONFIG_NUMA && CONFIG_TMPFS */ >> 1472 #ifndef CONFIG_NUMA >> 1473 #define vm_policy vm_private_data >> 1474 #endif >> 1475 >> 1476 static void shmem_pseudo_vma_init(struct vm_area_struct *vma, >> 1477 struct shmem_inode_info *info, pgoff_t index) >> 1478 { >> 1479 /* Create a pseudo vma that just contains the policy */ >> 1480 vma_init(vma, NULL); >> 1481 /* Bias interleave by inode number to distribute better across nodes */ >> 1482 vma->vm_pgoff = index + info->vfs_inode.i_ino; >> 1483 vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); >> 1484 } 1615 1485 1616 static struct mempolicy *shmem_get_pgoff_poli !! 1486 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma) 1617 pgoff_t index, unsign !! 1487 { >> 1488 /* Drop reference taken by mpol_shared_policy_lookup() */ >> 1489 mpol_cond_put(vma->vm_policy); >> 1490 } 1618 1491 1619 static struct folio *shmem_swapin_cluster(swp !! 1492 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, 1620 struct shmem_inode_in 1493 struct shmem_inode_info *info, pgoff_t index) 1621 { 1494 { 1622 struct mempolicy *mpol; !! 1495 struct vm_area_struct pvma; 1623 pgoff_t ilx; !! 1496 struct page *page; 1624 struct folio *folio; !! 1497 struct vm_fault vmf = { >> 1498 .vma = &pvma, >> 1499 }; 1625 1500 1626 mpol = shmem_get_pgoff_policy(info, i !! 1501 shmem_pseudo_vma_init(&pvma, info, index); 1627 folio = swap_cluster_readahead(swap, !! 1502 page = swap_cluster_readahead(swap, gfp, &vmf); 1628 mpol_cond_put(mpol); !! 1503 shmem_pseudo_vma_destroy(&pvma); 1629 1504 1630 return folio; !! 1505 return page; 1631 } 1506 } 1632 1507 1633 /* 1508 /* 1634 * Make sure huge_gfp is always more limited 1509 * Make sure huge_gfp is always more limited than limit_gfp. 1635 * Some of the flags set permissions, while o 1510 * Some of the flags set permissions, while others set limitations. 1636 */ 1511 */ 1637 static gfp_t limit_gfp_mask(gfp_t huge_gfp, g 1512 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) 1638 { 1513 { 1639 gfp_t allowflags = __GFP_IO | __GFP_F 1514 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 1640 gfp_t denyflags = __GFP_NOWARN | __GF 1515 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY; 1641 gfp_t zoneflags = limit_gfp & GFP_ZON 1516 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK; 1642 gfp_t result = huge_gfp & ~(allowflag 1517 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK); 1643 1518 1644 /* Allow allocations only from the or 1519 /* Allow allocations only from the originally specified zones. */ 1645 result |= zoneflags; 1520 result |= zoneflags; 1646 1521 1647 /* 1522 /* 1648 * Minimize the result gfp by taking 1523 * Minimize the result gfp by taking the union with the deny flags, 1649 * and the intersection of the allow 1524 * and the intersection of the allow flags. 1650 */ 1525 */ 1651 result |= (limit_gfp & denyflags); 1526 result |= (limit_gfp & denyflags); 1652 result |= (huge_gfp & limit_gfp) & al 1527 result |= (huge_gfp & limit_gfp) & allowflags; 1653 1528 1654 return result; 1529 return result; 1655 } 1530 } 1656 1531 1657 #ifdef CONFIG_TRANSPARENT_HUGEPAGE !! 1532 static struct page *shmem_alloc_hugepage(gfp_t gfp, 1658 unsigned long shmem_allowable_huge_orders(str !! 1533 struct shmem_inode_info *info, pgoff_t index) 1659 struct vm_are << 1660 loff_t write_ << 1661 { << 1662 unsigned long mask = READ_ONCE(huge_s << 1663 unsigned long within_size_orders = RE << 1664 unsigned long vm_flags = vma ? vma->v << 1665 bool global_huge; << 1666 loff_t i_size; << 1667 int order; << 1668 << 1669 if (thp_disabled_by_hw() || (vma && v << 1670 return 0; << 1671 << 1672 global_huge = shmem_huge_global_enabl << 1673 shmem << 1674 if (!vma || !vma_is_anon_shmem(vma)) << 1675 /* << 1676 * For tmpfs, we now only sup << 1677 * is enabled, otherwise fall << 1678 */ << 1679 return global_huge ? BIT(HPAG << 1680 } << 1681 << 1682 /* << 1683 * Following the 'deny' semantics of << 1684 * option off from all mounts. << 1685 */ << 1686 if (shmem_huge == SHMEM_HUGE_DENY) << 1687 return 0; << 1688 << 1689 /* << 1690 * Only allow inherit orders if the t << 1691 * means non-PMD sized THP can not ov << 1692 */ << 1693 if (shmem_huge == SHMEM_HUGE_FORCE) << 1694 return READ_ONCE(huge_shmem_o << 1695 << 1696 /* Allow mTHP that will be fully with << 1697 order = highest_order(within_size_ord << 1698 while (within_size_orders) { << 1699 index = round_up(index + 1, o << 1700 i_size = round_up(i_size_read << 1701 if (i_size >> PAGE_SHIFT >= i << 1702 mask |= within_size_o << 1703 break; << 1704 } << 1705 << 1706 order = next_order(&within_si << 1707 } << 1708 << 1709 if (vm_flags & VM_HUGEPAGE) << 1710 mask |= READ_ONCE(huge_shmem_ << 1711 << 1712 if (global_huge) << 1713 mask |= READ_ONCE(huge_shmem_ << 1714 << 1715 return THP_ORDERS_ALL_FILE_DEFAULT & << 1716 } << 1717 << 1718 static unsigned long shmem_suitable_orders(st << 1719 st << 1720 un << 1721 { 1534 { 1722 struct vm_area_struct *vma = vmf ? vm !! 1535 struct vm_area_struct pvma; 1723 pgoff_t aligned_index; !! 1536 struct address_space *mapping = info->vfs_inode.i_mapping; 1724 unsigned long pages; !! 1537 pgoff_t hindex; 1725 int order; !! 1538 struct page *page; 1726 << 1727 if (vma) { << 1728 orders = thp_vma_suitable_ord << 1729 if (!orders) << 1730 return 0; << 1731 } << 1732 1539 1733 /* Find the highest order that can ad !! 1540 hindex = round_down(index, HPAGE_PMD_NR); 1734 order = highest_order(orders); !! 1541 if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, 1735 while (orders) { !! 1542 XA_PRESENT)) 1736 pages = 1UL << order; !! 1543 return NULL; 1737 aligned_index = round_down(in << 1738 /* << 1739 * Check for conflict before << 1740 * Conflict might be that a h << 1741 * and added to page cache by << 1742 * is already at least one sm << 1743 * Be careful to retry when a << 1744 * Elsewhere -EEXIST would be << 1745 */ << 1746 if (!xa_find(&mapping->i_page << 1747 aligned_index + << 1748 break; << 1749 order = next_order(&orders, o << 1750 } << 1751 1544 1752 return orders; !! 1545 shmem_pseudo_vma_init(&pvma, info, hindex); 1753 } !! 1546 page = alloc_pages_vma(gfp, HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), 1754 #else !! 1547 true); 1755 static unsigned long shmem_suitable_orders(st !! 1548 shmem_pseudo_vma_destroy(&pvma); 1756 st !! 1549 if (page) 1757 un !! 1550 prep_transhuge_page(page); 1758 { !! 1551 else 1759 return 0; !! 1552 count_vm_event(THP_FILE_FALLBACK); >> 1553 return page; 1760 } 1554 } 1761 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ << 1762 1555 1763 static struct folio *shmem_alloc_folio(gfp_t !! 1556 static struct page *shmem_alloc_page(gfp_t gfp, 1764 struct shmem_inode_info *info !! 1557 struct shmem_inode_info *info, pgoff_t index) 1765 { 1558 { 1766 struct mempolicy *mpol; !! 1559 struct vm_area_struct pvma; 1767 pgoff_t ilx; !! 1560 struct page *page; 1768 struct folio *folio; << 1769 1561 1770 mpol = shmem_get_pgoff_policy(info, i !! 1562 shmem_pseudo_vma_init(&pvma, info, index); 1771 folio = folio_alloc_mpol(gfp, order, !! 1563 page = alloc_page_vma(gfp, &pvma, 0); 1772 mpol_cond_put(mpol); !! 1564 shmem_pseudo_vma_destroy(&pvma); 1773 1565 1774 return folio; !! 1566 return page; 1775 } 1567 } 1776 1568 1777 static struct folio *shmem_alloc_and_add_foli !! 1569 static struct page *shmem_alloc_and_acct_page(gfp_t gfp, 1778 gfp_t gfp, struct inode *inod !! 1570 struct inode *inode, 1779 struct mm_struct *fault_mm, u !! 1571 pgoff_t index, bool huge) 1780 { 1572 { 1781 struct address_space *mapping = inode << 1782 struct shmem_inode_info *info = SHMEM 1573 struct shmem_inode_info *info = SHMEM_I(inode); 1783 unsigned long suitable_orders = 0; !! 1574 struct page *page; 1784 struct folio *folio = NULL; !! 1575 int nr; 1785 long pages; !! 1576 int err = -ENOSPC; 1786 int error, order; << 1787 1577 1788 if (!IS_ENABLED(CONFIG_TRANSPARENT_HU 1578 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1789 orders = 0; !! 1579 huge = false; 1790 !! 1580 nr = huge ? HPAGE_PMD_NR : 1; 1791 if (orders > 0) { << 1792 suitable_orders = shmem_suita << 1793 << 1794 << 1795 order = highest_order(suitabl << 1796 while (suitable_orders) { << 1797 pages = 1UL << order; << 1798 index = round_down(in << 1799 folio = shmem_alloc_f << 1800 if (folio) << 1801 goto allocate << 1802 1581 1803 if (pages == HPAGE_PM !! 1582 if (!shmem_inode_acct_block(inode, nr)) 1804 count_vm_even !! 1583 goto failed; 1805 count_mthp_stat(order << 1806 order = next_order(&s << 1807 } << 1808 } else { << 1809 pages = 1; << 1810 folio = shmem_alloc_folio(gfp << 1811 } << 1812 if (!folio) << 1813 return ERR_PTR(-ENOMEM); << 1814 << 1815 allocated: << 1816 __folio_set_locked(folio); << 1817 __folio_set_swapbacked(folio); << 1818 << 1819 gfp &= GFP_RECLAIM_MASK; << 1820 error = mem_cgroup_charge(folio, faul << 1821 if (error) { << 1822 if (xa_find(&mapping->i_pages << 1823 index + pages << 1824 error = -EEXIST; << 1825 } else if (pages > 1) { << 1826 if (pages == HPAGE_PM << 1827 count_vm_even << 1828 count_vm_even << 1829 } << 1830 count_mthp_stat(folio << 1831 count_mthp_stat(folio << 1832 } << 1833 goto unlock; << 1834 } << 1835 << 1836 error = shmem_add_to_page_cache(folio << 1837 if (error) << 1838 goto unlock; << 1839 1584 1840 error = shmem_inode_acct_blocks(inode !! 1585 if (huge) 1841 if (error) { !! 1586 page = shmem_alloc_hugepage(gfp, info, index); 1842 struct shmem_sb_info *sbinfo !! 1587 else 1843 long freed; !! 1588 page = shmem_alloc_page(gfp, info, index); 1844 /* !! 1589 if (page) { 1845 * Try to reclaim some space !! 1590 __SetPageLocked(page); 1846 * large folios beyond i_size !! 1591 __SetPageSwapBacked(page); 1847 */ !! 1592 return page; 1848 shmem_unused_huge_shrink(sbin << 1849 /* << 1850 * And do a shmem_recalc_inod << 1851 * except our folio is there << 1852 */ << 1853 spin_lock(&info->lock); << 1854 freed = pages + info->alloced << 1855 READ_ONCE(mapping->nr << 1856 if (freed > 0) << 1857 info->alloced -= free << 1858 spin_unlock(&info->lock); << 1859 if (freed > 0) << 1860 shmem_inode_unacct_bl << 1861 error = shmem_inode_acct_bloc << 1862 if (error) { << 1863 filemap_remove_folio( << 1864 goto unlock; << 1865 } << 1866 } 1593 } 1867 1594 1868 shmem_recalc_inode(inode, pages, 0); !! 1595 err = -ENOMEM; 1869 folio_add_lru(folio); !! 1596 shmem_inode_unacct_blocks(inode, nr); 1870 return folio; !! 1597 failed: 1871 !! 1598 return ERR_PTR(err); 1872 unlock: << 1873 folio_unlock(folio); << 1874 folio_put(folio); << 1875 return ERR_PTR(error); << 1876 } 1599 } 1877 1600 1878 /* 1601 /* 1879 * When a page is moved from swapcache to shm 1602 * When a page is moved from swapcache to shmem filecache (either by the 1880 * usual swapin of shmem_get_folio_gfp(), or !! 1603 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of 1881 * shmem_unuse_inode()), it may have been rea 1604 * shmem_unuse_inode()), it may have been read in earlier from swap, in 1882 * ignorance of the mapping it belongs to. I 1605 * ignorance of the mapping it belongs to. If that mapping has special 1883 * constraints (like the gma500 GEM driver, w 1606 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1884 * we may need to copy to a suitable page bef 1607 * we may need to copy to a suitable page before moving to filecache. 1885 * 1608 * 1886 * In a future release, this may well be exte 1609 * In a future release, this may well be extended to respect cpuset and 1887 * NUMA mempolicy, and applied also to anonym 1610 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1888 * but for now it is a simple matter of zone. 1611 * but for now it is a simple matter of zone. 1889 */ 1612 */ 1890 static bool shmem_should_replace_folio(struct !! 1613 static bool shmem_should_replace_page(struct page *page, gfp_t gfp) 1891 { 1614 { 1892 return folio_zonenum(folio) > gfp_zon !! 1615 return page_zonenum(page) > gfp_zone(gfp); 1893 } 1616 } 1894 1617 1895 static int shmem_replace_folio(struct folio * !! 1618 static int shmem_replace_page(struct page **pagep, gfp_t gfp, 1896 struct shmem_ !! 1619 struct shmem_inode_info *info, pgoff_t index) 1897 struct vm_are !! 1620 { 1898 { !! 1621 struct page *oldpage, *newpage; 1899 struct folio *new, *old = *foliop; !! 1622 struct address_space *swap_mapping; 1900 swp_entry_t entry = old->swap; !! 1623 swp_entry_t entry; 1901 struct address_space *swap_mapping = !! 1624 pgoff_t swap_index; 1902 pgoff_t swap_index = swap_cache_index !! 1625 int error; 1903 XA_STATE(xas, &swap_mapping->i_pages, !! 1626 1904 int nr_pages = folio_nr_pages(old); !! 1627 oldpage = *pagep; 1905 int error = 0, i; !! 1628 entry.val = page_private(oldpage); >> 1629 swap_index = swp_offset(entry); >> 1630 swap_mapping = page_mapping(oldpage); 1906 1631 1907 /* 1632 /* 1908 * We have arrived here because our z 1633 * We have arrived here because our zones are constrained, so don't 1909 * limit chance of success by further 1634 * limit chance of success by further cpuset and node constraints. 1910 */ 1635 */ 1911 gfp &= ~GFP_CONSTRAINT_MASK; 1636 gfp &= ~GFP_CONSTRAINT_MASK; 1912 #ifdef CONFIG_TRANSPARENT_HUGEPAGE !! 1637 newpage = shmem_alloc_page(gfp, info, index); 1913 if (nr_pages > 1) { !! 1638 if (!newpage) 1914 gfp_t huge_gfp = vma_thp_gfp_ << 1915 << 1916 gfp = limit_gfp_mask(huge_gfp << 1917 } << 1918 #endif << 1919 << 1920 new = shmem_alloc_folio(gfp, folio_or << 1921 if (!new) << 1922 return -ENOMEM; 1639 return -ENOMEM; 1923 1640 1924 folio_ref_add(new, nr_pages); !! 1641 get_page(newpage); 1925 folio_copy(new, old); !! 1642 copy_highpage(newpage, oldpage); 1926 flush_dcache_folio(new); !! 1643 flush_dcache_page(newpage); 1927 !! 1644 1928 __folio_set_locked(new); !! 1645 __SetPageLocked(newpage); 1929 __folio_set_swapbacked(new); !! 1646 __SetPageSwapBacked(newpage); 1930 folio_mark_uptodate(new); !! 1647 SetPageUptodate(newpage); 1931 new->swap = entry; !! 1648 set_page_private(newpage, entry.val); 1932 folio_set_swapcache(new); !! 1649 SetPageSwapCache(newpage); 1933 1650 1934 /* Swap cache still stores N entries !! 1651 /* >> 1652 * Our caller will very soon move newpage out of swapcache, but it's >> 1653 * a nice clean interface for us to replace oldpage by newpage there. >> 1654 */ 1935 xa_lock_irq(&swap_mapping->i_pages); 1655 xa_lock_irq(&swap_mapping->i_pages); 1936 for (i = 0; i < nr_pages; i++) { !! 1656 error = shmem_replace_entry(swap_mapping, swap_index, oldpage, newpage); 1937 void *item = xas_load(&xas); << 1938 << 1939 if (item != old) { << 1940 error = -ENOENT; << 1941 break; << 1942 } << 1943 << 1944 xas_store(&xas, new); << 1945 xas_next(&xas); << 1946 } << 1947 if (!error) { 1657 if (!error) { 1948 mem_cgroup_replace_folio(old, !! 1658 mem_cgroup_migrate(oldpage, newpage); 1949 __lruvec_stat_mod_folio(new, !! 1659 __inc_lruvec_page_state(newpage, NR_FILE_PAGES); 1950 __lruvec_stat_mod_folio(new, !! 1660 __dec_lruvec_page_state(oldpage, NR_FILE_PAGES); 1951 __lruvec_stat_mod_folio(old, << 1952 __lruvec_stat_mod_folio(old, << 1953 } 1661 } 1954 xa_unlock_irq(&swap_mapping->i_pages) 1662 xa_unlock_irq(&swap_mapping->i_pages); 1955 1663 1956 if (unlikely(error)) { 1664 if (unlikely(error)) { 1957 /* 1665 /* 1958 * Is this possible? I think !! 1666 * Is this possible? I think not, now that our callers check 1959 * check both the swapcache f !! 1667 * both PageSwapCache and page_private after getting page lock; 1960 * after getting the folio lo !! 1668 * but be defensive. Reverse old to newpage for clear and free. 1961 * Reverse old to newpage for << 1962 */ 1669 */ 1963 old = new; !! 1670 oldpage = newpage; 1964 } else { 1671 } else { 1965 folio_add_lru(new); !! 1672 lru_cache_add(newpage); 1966 *foliop = new; !! 1673 *pagep = newpage; 1967 } 1674 } 1968 1675 1969 folio_clear_swapcache(old); !! 1676 ClearPageSwapCache(oldpage); 1970 old->private = NULL; !! 1677 set_page_private(oldpage, 0); 1971 1678 1972 folio_unlock(old); !! 1679 unlock_page(oldpage); 1973 /* !! 1680 put_page(oldpage); 1974 * The old folio are removed from swa !! 1681 put_page(oldpage); 1975 * reference, as well as one temporar << 1976 * cache. << 1977 */ << 1978 folio_put_refs(old, nr_pages + 1); << 1979 return error; 1682 return error; 1980 } 1683 } 1981 1684 1982 static void shmem_set_folio_swapin_error(stru << 1983 stru << 1984 { << 1985 struct address_space *mapping = inode << 1986 swp_entry_t swapin_error; << 1987 void *old; << 1988 int nr_pages; << 1989 << 1990 swapin_error = make_poisoned_swp_entr << 1991 old = xa_cmpxchg_irq(&mapping->i_page << 1992 swp_to_radix_ent << 1993 swp_to_radix_ent << 1994 if (old != swp_to_radix_entry(swap)) << 1995 return; << 1996 << 1997 nr_pages = folio_nr_pages(folio); << 1998 folio_wait_writeback(folio); << 1999 delete_from_swap_cache(folio); << 2000 /* << 2001 * Don't treat swapin error folio as << 2002 * won't be 0 when inode is released << 2003 * in shmem_evict_inode(). << 2004 */ << 2005 shmem_recalc_inode(inode, -nr_pages, << 2006 swap_free_nr(swap, nr_pages); << 2007 } << 2008 << 2009 static int shmem_split_large_entry(struct ino << 2010 swp_entry_ << 2011 { << 2012 struct address_space *mapping = inode << 2013 XA_STATE_ORDER(xas, &mapping->i_pages << 2014 void *alloced_shadow = NULL; << 2015 int alloced_order = 0, i; << 2016 << 2017 /* Convert user data gfp flags to xar << 2018 gfp &= GFP_RECLAIM_MASK; << 2019 << 2020 for (;;) { << 2021 int order = -1, split_order = << 2022 void *old = NULL; << 2023 << 2024 xas_lock_irq(&xas); << 2025 old = xas_load(&xas); << 2026 if (!xa_is_value(old) || swp_ << 2027 xas_set_err(&xas, -EE << 2028 goto unlock; << 2029 } << 2030 << 2031 order = xas_get_order(&xas); << 2032 << 2033 /* Swap entry may have change << 2034 if (alloced_order && << 2035 (old != alloced_shadow || << 2036 xas_destroy(&xas); << 2037 alloced_order = 0; << 2038 } << 2039 << 2040 /* Try to split large swap en << 2041 if (order > 0) { << 2042 if (!alloced_order) { << 2043 split_order = << 2044 goto unlock; << 2045 } << 2046 xas_split(&xas, old, << 2047 << 2048 /* << 2049 * Re-set the swap en << 2050 * offset of the orig << 2051 */ << 2052 for (i = 0; i < 1 << << 2053 pgoff_t align << 2054 swp_entry_t t << 2055 << 2056 tmp = swp_ent << 2057 __xa_store(&m << 2058 sw << 2059 } << 2060 } << 2061 << 2062 unlock: << 2063 xas_unlock_irq(&xas); << 2064 << 2065 /* split needed, alloc here a << 2066 if (split_order) { << 2067 xas_split_alloc(&xas, << 2068 if (xas_error(&xas)) << 2069 goto error; << 2070 alloced_shadow = old; << 2071 alloced_order = split << 2072 xas_reset(&xas); << 2073 continue; << 2074 } << 2075 << 2076 if (!xas_nomem(&xas, gfp)) << 2077 break; << 2078 } << 2079 << 2080 error: << 2081 if (xas_error(&xas)) << 2082 return xas_error(&xas); << 2083 << 2084 return alloced_order; << 2085 } << 2086 << 2087 /* 1685 /* 2088 * Swap in the folio pointed to by *foliop. !! 1686 * Swap in the page pointed to by *pagep. 2089 * Caller has to make sure that *foliop conta !! 1687 * Caller has to make sure that *pagep contains a valid swapped page. 2090 * Returns 0 and the folio in foliop if succe !! 1688 * Returns 0 and the page in pagep if success. On failure, returns the 2091 * error code and NULL in *foliop. !! 1689 * error code and NULL in *pagep. 2092 */ 1690 */ 2093 static int shmem_swapin_folio(struct inode *i !! 1691 static int shmem_swapin_page(struct inode *inode, pgoff_t index, 2094 struct folio **f !! 1692 struct page **pagep, enum sgp_type sgp, 2095 gfp_t gfp, struc 1693 gfp_t gfp, struct vm_area_struct *vma, 2096 vm_fault_t *faul 1694 vm_fault_t *fault_type) 2097 { 1695 { 2098 struct address_space *mapping = inode 1696 struct address_space *mapping = inode->i_mapping; 2099 struct mm_struct *fault_mm = vma ? vm << 2100 struct shmem_inode_info *info = SHMEM 1697 struct shmem_inode_info *info = SHMEM_I(inode); 2101 struct swap_info_struct *si; !! 1698 struct mm_struct *charge_mm = vma ? vma->vm_mm : NULL; 2102 struct folio *folio = NULL; !! 1699 struct page *page; 2103 swp_entry_t swap; 1700 swp_entry_t swap; 2104 int error, nr_pages; !! 1701 int error; 2105 1702 2106 VM_BUG_ON(!*foliop || !xa_is_value(*f !! 1703 VM_BUG_ON(!*pagep || !xa_is_value(*pagep)); 2107 swap = radix_to_swp_entry(*foliop); !! 1704 swap = radix_to_swp_entry(*pagep); 2108 *foliop = NULL; !! 1705 *pagep = NULL; 2109 << 2110 if (is_poisoned_swp_entry(swap)) << 2111 return -EIO; << 2112 << 2113 si = get_swap_device(swap); << 2114 if (!si) { << 2115 if (!shmem_confirm_swap(mappi << 2116 return -EEXIST; << 2117 else << 2118 return -EINVAL; << 2119 } << 2120 1706 2121 /* Look it up and read it in.. */ 1707 /* Look it up and read it in.. */ 2122 folio = swap_cache_get_folio(swap, NU !! 1708 page = lookup_swap_cache(swap, NULL, 0); 2123 if (!folio) { !! 1709 if (!page) { 2124 int split_order; << 2125 << 2126 /* Or update major stats only 1710 /* Or update major stats only when swapin succeeds?? */ 2127 if (fault_type) { 1711 if (fault_type) { 2128 *fault_type |= VM_FAU 1712 *fault_type |= VM_FAULT_MAJOR; 2129 count_vm_event(PGMAJF 1713 count_vm_event(PGMAJFAULT); 2130 count_memcg_event_mm( !! 1714 count_memcg_event_mm(charge_mm, PGMAJFAULT); 2131 } << 2132 << 2133 /* << 2134 * Now swap device can only s << 2135 * should split the large swa << 2136 * if necessary. << 2137 */ << 2138 split_order = shmem_split_lar << 2139 if (split_order < 0) { << 2140 error = split_order; << 2141 goto failed; << 2142 } << 2143 << 2144 /* << 2145 * If the large swap entry ha << 2146 * necessary to recalculate t << 2147 * the old order alignment. << 2148 */ << 2149 if (split_order > 0) { << 2150 pgoff_t offset = inde << 2151 << 2152 swap = swp_entry(swp_ << 2153 } 1715 } 2154 << 2155 /* Here we actually start the 1716 /* Here we actually start the io */ 2156 folio = shmem_swapin_cluster( !! 1717 page = shmem_swapin(swap, gfp, info, index); 2157 if (!folio) { !! 1718 if (!page) { 2158 error = -ENOMEM; 1719 error = -ENOMEM; 2159 goto failed; 1720 goto failed; 2160 } 1721 } 2161 } 1722 } 2162 1723 2163 /* We have to do this with folio lock !! 1724 /* We have to do this with page locked to prevent races */ 2164 folio_lock(folio); !! 1725 lock_page(page); 2165 if (!folio_test_swapcache(folio) || !! 1726 if (!PageSwapCache(page) || page_private(page) != swap.val || 2166 folio->swap.val != swap.val || << 2167 !shmem_confirm_swap(mapping, inde 1727 !shmem_confirm_swap(mapping, index, swap)) { 2168 error = -EEXIST; 1728 error = -EEXIST; 2169 goto unlock; 1729 goto unlock; 2170 } 1730 } 2171 if (!folio_test_uptodate(folio)) { !! 1731 if (!PageUptodate(page)) { 2172 error = -EIO; 1732 error = -EIO; 2173 goto failed; 1733 goto failed; 2174 } 1734 } 2175 folio_wait_writeback(folio); !! 1735 wait_on_page_writeback(page); 2176 nr_pages = folio_nr_pages(folio); << 2177 1736 2178 /* 1737 /* 2179 * Some architectures may have to res 1738 * Some architectures may have to restore extra metadata to the 2180 * folio after reading from swap. !! 1739 * physical page after reading from swap. 2181 */ 1740 */ 2182 arch_swap_restore(folio_swap(swap, fo !! 1741 arch_swap_restore(swap, page); 2183 1742 2184 if (shmem_should_replace_folio(folio, !! 1743 if (shmem_should_replace_page(page, gfp)) { 2185 error = shmem_replace_folio(& !! 1744 error = shmem_replace_page(&page, gfp, info, index); 2186 if (error) 1745 if (error) 2187 goto failed; 1746 goto failed; 2188 } 1747 } 2189 1748 2190 error = shmem_add_to_page_cache(folio !! 1749 error = shmem_add_to_page_cache(page, mapping, index, 2191 round !! 1750 swp_to_radix_entry(swap), gfp, 2192 swp_t !! 1751 charge_mm); 2193 if (error) 1752 if (error) 2194 goto failed; 1753 goto failed; 2195 1754 2196 shmem_recalc_inode(inode, 0, -nr_page !! 1755 spin_lock_irq(&info->lock); >> 1756 info->swapped--; >> 1757 shmem_recalc_inode(inode); >> 1758 spin_unlock_irq(&info->lock); 2197 1759 2198 if (sgp == SGP_WRITE) 1760 if (sgp == SGP_WRITE) 2199 folio_mark_accessed(folio); !! 1761 mark_page_accessed(page); 2200 1762 2201 delete_from_swap_cache(folio); !! 1763 delete_from_swap_cache(page); 2202 folio_mark_dirty(folio); !! 1764 set_page_dirty(page); 2203 swap_free_nr(swap, nr_pages); !! 1765 swap_free(swap); 2204 put_swap_device(si); << 2205 1766 2206 *foliop = folio; !! 1767 *pagep = page; 2207 return 0; 1768 return 0; 2208 failed: 1769 failed: 2209 if (!shmem_confirm_swap(mapping, inde 1770 if (!shmem_confirm_swap(mapping, index, swap)) 2210 error = -EEXIST; 1771 error = -EEXIST; 2211 if (error == -EIO) << 2212 shmem_set_folio_swapin_error( << 2213 unlock: 1772 unlock: 2214 if (folio) { !! 1773 if (page) { 2215 folio_unlock(folio); !! 1774 unlock_page(page); 2216 folio_put(folio); !! 1775 put_page(page); 2217 } 1776 } 2218 put_swap_device(si); << 2219 1777 2220 return error; 1778 return error; 2221 } 1779 } 2222 1780 2223 /* 1781 /* 2224 * shmem_get_folio_gfp - find page in cache, !! 1782 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 2225 * 1783 * 2226 * If we allocate a new one we do not mark it 1784 * If we allocate a new one we do not mark it dirty. That's up to the 2227 * vm. If we swap it in we mark it dirty sinc 1785 * vm. If we swap it in we mark it dirty since we also free the swap 2228 * entry since a page cannot live in both the 1786 * entry since a page cannot live in both the swap and page cache. 2229 * 1787 * 2230 * vmf and fault_type are only supplied by sh !! 1788 * vma, vmf, and fault_type are only supplied by shmem_fault: >> 1789 * otherwise they are NULL. 2231 */ 1790 */ 2232 static int shmem_get_folio_gfp(struct inode * !! 1791 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 2233 loff_t write_end, struct foli !! 1792 struct page **pagep, enum sgp_type sgp, gfp_t gfp, 2234 gfp_t gfp, struct vm_fault *v !! 1793 struct vm_area_struct *vma, struct vm_fault *vmf, 2235 { !! 1794 vm_fault_t *fault_type) 2236 struct vm_area_struct *vma = vmf ? vm !! 1795 { 2237 struct mm_struct *fault_mm; !! 1796 struct address_space *mapping = inode->i_mapping; 2238 struct folio *folio; !! 1797 struct shmem_inode_info *info = SHMEM_I(inode); >> 1798 struct shmem_sb_info *sbinfo; >> 1799 struct mm_struct *charge_mm; >> 1800 struct page *page; >> 1801 enum sgp_type sgp_huge = sgp; >> 1802 pgoff_t hindex = index; >> 1803 gfp_t huge_gfp; 2239 int error; 1804 int error; 2240 bool alloced; !! 1805 int once = 0; 2241 unsigned long orders = 0; !! 1806 int alloced = 0; 2242 << 2243 if (WARN_ON_ONCE(!shmem_mapping(inode << 2244 return -EINVAL; << 2245 1807 2246 if (index > (MAX_LFS_FILESIZE >> PAGE 1808 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 2247 return -EFBIG; 1809 return -EFBIG; >> 1810 if (sgp == SGP_NOHUGE || sgp == SGP_HUGE) >> 1811 sgp = SGP_CACHE; 2248 repeat: 1812 repeat: 2249 if (sgp <= SGP_CACHE && 1813 if (sgp <= SGP_CACHE && 2250 ((loff_t)index << PAGE_SHIFT) >= !! 1814 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 2251 return -EINVAL; 1815 return -EINVAL; >> 1816 } 2252 1817 2253 alloced = false; !! 1818 sbinfo = SHMEM_SB(inode->i_sb); 2254 fault_mm = vma ? vma->vm_mm : NULL; !! 1819 charge_mm = vma ? vma->vm_mm : NULL; 2255 1820 2256 folio = filemap_get_entry(inode->i_ma !! 1821 page = pagecache_get_page(mapping, index, 2257 if (folio && vma && userfaultfd_minor !! 1822 FGP_ENTRY | FGP_HEAD | FGP_LOCK, 0); 2258 if (!xa_is_value(folio)) !! 1823 2259 folio_put(folio); !! 1824 if (page && vma && userfaultfd_minor(vma)) { >> 1825 if (!xa_is_value(page)) { >> 1826 unlock_page(page); >> 1827 put_page(page); >> 1828 } 2260 *fault_type = handle_userfaul 1829 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR); 2261 return 0; 1830 return 0; 2262 } 1831 } 2263 1832 2264 if (xa_is_value(folio)) { !! 1833 if (xa_is_value(page)) { 2265 error = shmem_swapin_folio(in !! 1834 error = shmem_swapin_page(inode, index, &page, 2266 sg !! 1835 sgp, gfp, vma, fault_type); 2267 if (error == -EEXIST) 1836 if (error == -EEXIST) 2268 goto repeat; 1837 goto repeat; 2269 1838 2270 *foliop = folio; !! 1839 *pagep = page; 2271 return error; 1840 return error; 2272 } 1841 } 2273 1842 2274 if (folio) { !! 1843 if (page) 2275 folio_lock(folio); !! 1844 hindex = page->index; >> 1845 if (page && sgp == SGP_WRITE) >> 1846 mark_page_accessed(page); 2276 1847 2277 /* Has the folio been truncat !! 1848 /* fallocated page? */ 2278 if (unlikely(folio->mapping ! !! 1849 if (page && !PageUptodate(page)) { 2279 folio_unlock(folio); << 2280 folio_put(folio); << 2281 goto repeat; << 2282 } << 2283 if (sgp == SGP_WRITE) << 2284 folio_mark_accessed(f << 2285 if (folio_test_uptodate(folio << 2286 goto out; << 2287 /* fallocated folio */ << 2288 if (sgp != SGP_READ) 1850 if (sgp != SGP_READ) 2289 goto clear; 1851 goto clear; 2290 folio_unlock(folio); !! 1852 unlock_page(page); 2291 folio_put(folio); !! 1853 put_page(page); >> 1854 page = NULL; >> 1855 hindex = index; 2292 } 1856 } >> 1857 if (page || sgp == SGP_READ) >> 1858 goto out; 2293 1859 2294 /* 1860 /* 2295 * SGP_READ: succeed on hole, with NU !! 1861 * Fast cache lookup did not find it: 2296 * SGP_NOALLOC: fail on hole, with NU !! 1862 * bring it back from swap or allocate. 2297 */ << 2298 *foliop = NULL; << 2299 if (sgp == SGP_READ) << 2300 return 0; << 2301 if (sgp == SGP_NOALLOC) << 2302 return -ENOENT; << 2303 << 2304 /* << 2305 * Fast cache lookup and swap lookup << 2306 */ 1863 */ 2307 1864 2308 if (vma && userfaultfd_missing(vma)) 1865 if (vma && userfaultfd_missing(vma)) { 2309 *fault_type = handle_userfaul 1866 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 2310 return 0; 1867 return 0; 2311 } 1868 } 2312 1869 2313 /* Find hugepage orders that are allo !! 1870 /* shmem_symlink() */ 2314 orders = shmem_allowable_huge_orders( !! 1871 if (!shmem_mapping(mapping)) 2315 if (orders > 0) { !! 1872 goto alloc_nohuge; 2316 gfp_t huge_gfp; !! 1873 if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE) 2317 !! 1874 goto alloc_nohuge; 2318 huge_gfp = vma_thp_gfp_mask(v !! 1875 if (shmem_huge == SHMEM_HUGE_FORCE) 2319 huge_gfp = limit_gfp_mask(hug !! 1876 goto alloc_huge; 2320 folio = shmem_alloc_and_add_f !! 1877 switch (sbinfo->huge) { 2321 inode, index, !! 1878 case SHMEM_HUGE_NEVER: 2322 if (!IS_ERR(folio)) { !! 1879 goto alloc_nohuge; 2323 if (folio_test_pmd_ma !! 1880 case SHMEM_HUGE_WITHIN_SIZE: { 2324 count_vm_even !! 1881 loff_t i_size; 2325 count_mthp_stat(folio !! 1882 pgoff_t off; 2326 goto alloced; !! 1883 2327 } !! 1884 off = round_up(index, HPAGE_PMD_NR); 2328 if (PTR_ERR(folio) == -EEXIST !! 1885 i_size = round_up(i_size_read(inode), PAGE_SIZE); 2329 goto repeat; !! 1886 if (i_size >= HPAGE_PMD_SIZE && >> 1887 i_size >> PAGE_SHIFT >= off) >> 1888 goto alloc_huge; >> 1889 >> 1890 fallthrough; 2330 } 1891 } >> 1892 case SHMEM_HUGE_ADVISE: >> 1893 if (sgp_huge == SGP_HUGE) >> 1894 goto alloc_huge; >> 1895 /* TODO: implement fadvise() hints */ >> 1896 goto alloc_nohuge; >> 1897 } >> 1898 >> 1899 alloc_huge: >> 1900 huge_gfp = vma_thp_gfp_mask(vma); >> 1901 huge_gfp = limit_gfp_mask(huge_gfp, gfp); >> 1902 page = shmem_alloc_and_acct_page(huge_gfp, inode, index, true); >> 1903 if (IS_ERR(page)) { >> 1904 alloc_nohuge: >> 1905 page = shmem_alloc_and_acct_page(gfp, inode, >> 1906 index, false); >> 1907 } >> 1908 if (IS_ERR(page)) { >> 1909 int retry = 5; >> 1910 >> 1911 error = PTR_ERR(page); >> 1912 page = NULL; >> 1913 if (error != -ENOSPC) >> 1914 goto unlock; >> 1915 /* >> 1916 * Try to reclaim some space by splitting a huge page >> 1917 * beyond i_size on the filesystem. >> 1918 */ >> 1919 while (retry--) { >> 1920 int ret; 2331 1921 2332 folio = shmem_alloc_and_add_folio(vmf !! 1922 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1); 2333 if (IS_ERR(folio)) { !! 1923 if (ret == SHRINK_STOP) 2334 error = PTR_ERR(folio); !! 1924 break; 2335 if (error == -EEXIST) !! 1925 if (ret) 2336 goto repeat; !! 1926 goto alloc_nohuge; 2337 folio = NULL; !! 1927 } 2338 goto unlock; 1928 goto unlock; 2339 } 1929 } 2340 1930 2341 alloced: !! 1931 if (PageTransHuge(page)) >> 1932 hindex = round_down(index, HPAGE_PMD_NR); >> 1933 else >> 1934 hindex = index; >> 1935 >> 1936 if (sgp == SGP_WRITE) >> 1937 __SetPageReferenced(page); >> 1938 >> 1939 error = shmem_add_to_page_cache(page, mapping, hindex, >> 1940 NULL, gfp & GFP_RECLAIM_MASK, >> 1941 charge_mm); >> 1942 if (error) >> 1943 goto unacct; >> 1944 lru_cache_add(page); >> 1945 >> 1946 spin_lock_irq(&info->lock); >> 1947 info->alloced += compound_nr(page); >> 1948 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page); >> 1949 shmem_recalc_inode(inode); >> 1950 spin_unlock_irq(&info->lock); 2342 alloced = true; 1951 alloced = true; 2343 if (folio_test_large(folio) && !! 1952 >> 1953 if (PageTransHuge(page) && 2344 DIV_ROUND_UP(i_size_read(inode), 1954 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 2345 folio !! 1955 hindex + HPAGE_PMD_NR - 1) { 2346 struct shmem_sb_info *sbinfo << 2347 struct shmem_inode_info *info << 2348 /* 1956 /* 2349 * Part of the large folio is !! 1957 * Part of the huge page is beyond i_size: subject 2350 * to shrink under memory pre 1958 * to shrink under memory pressure. 2351 */ 1959 */ 2352 spin_lock(&sbinfo->shrinklist 1960 spin_lock(&sbinfo->shrinklist_lock); 2353 /* 1961 /* 2354 * _careful to defend against 1962 * _careful to defend against unlocked access to 2355 * ->shrink_list in shmem_unu 1963 * ->shrink_list in shmem_unused_huge_shrink() 2356 */ 1964 */ 2357 if (list_empty_careful(&info- 1965 if (list_empty_careful(&info->shrinklist)) { 2358 list_add_tail(&info-> 1966 list_add_tail(&info->shrinklist, 2359 &sbinfo 1967 &sbinfo->shrinklist); 2360 sbinfo->shrinklist_le 1968 sbinfo->shrinklist_len++; 2361 } 1969 } 2362 spin_unlock(&sbinfo->shrinkli 1970 spin_unlock(&sbinfo->shrinklist_lock); 2363 } 1971 } 2364 1972 2365 if (sgp == SGP_WRITE) << 2366 folio_set_referenced(folio); << 2367 /* 1973 /* 2368 * Let SGP_FALLOC use the SGP_WRITE o !! 1974 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page. 2369 */ 1975 */ 2370 if (sgp == SGP_FALLOC) 1976 if (sgp == SGP_FALLOC) 2371 sgp = SGP_WRITE; 1977 sgp = SGP_WRITE; 2372 clear: 1978 clear: 2373 /* 1979 /* 2374 * Let SGP_WRITE caller clear ends if !! 1980 * Let SGP_WRITE caller clear ends if write does not fill page; 2375 * but SGP_FALLOC on a folio fallocat !! 1981 * but SGP_FALLOC on a page fallocated earlier must initialize 2376 * it now, lest undo on failure cance 1982 * it now, lest undo on failure cancel our earlier guarantee. 2377 */ 1983 */ 2378 if (sgp != SGP_WRITE && !folio_test_u !! 1984 if (sgp != SGP_WRITE && !PageUptodate(page)) { 2379 long i, n = folio_nr_pages(fo !! 1985 int i; 2380 1986 2381 for (i = 0; i < n; i++) !! 1987 for (i = 0; i < compound_nr(page); i++) { 2382 clear_highpage(folio_ !! 1988 clear_highpage(page + i); 2383 flush_dcache_folio(folio); !! 1989 flush_dcache_page(page + i); 2384 folio_mark_uptodate(folio); !! 1990 } >> 1991 SetPageUptodate(page); 2385 } 1992 } 2386 1993 2387 /* Perhaps the file has been truncate 1994 /* Perhaps the file has been truncated since we checked */ 2388 if (sgp <= SGP_CACHE && 1995 if (sgp <= SGP_CACHE && 2389 ((loff_t)index << PAGE_SHIFT) >= 1996 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { >> 1997 if (alloced) { >> 1998 ClearPageDirty(page); >> 1999 delete_from_page_cache(page); >> 2000 spin_lock_irq(&info->lock); >> 2001 shmem_recalc_inode(inode); >> 2002 spin_unlock_irq(&info->lock); >> 2003 } 2390 error = -EINVAL; 2004 error = -EINVAL; 2391 goto unlock; 2005 goto unlock; 2392 } 2006 } 2393 out: 2007 out: 2394 *foliop = folio; !! 2008 *pagep = page + index - hindex; 2395 return 0; 2009 return 0; 2396 2010 2397 /* 2011 /* 2398 * Error recovery. 2012 * Error recovery. 2399 */ 2013 */ >> 2014 unacct: >> 2015 shmem_inode_unacct_blocks(inode, compound_nr(page)); >> 2016 >> 2017 if (PageTransHuge(page)) { >> 2018 unlock_page(page); >> 2019 put_page(page); >> 2020 goto alloc_nohuge; >> 2021 } 2400 unlock: 2022 unlock: 2401 if (alloced) !! 2023 if (page) { 2402 filemap_remove_folio(folio); !! 2024 unlock_page(page); 2403 shmem_recalc_inode(inode, 0, 0); !! 2025 put_page(page); 2404 if (folio) { !! 2026 } 2405 folio_unlock(folio); !! 2027 if (error == -ENOSPC && !once++) { 2406 folio_put(folio); !! 2028 spin_lock_irq(&info->lock); >> 2029 shmem_recalc_inode(inode); >> 2030 spin_unlock_irq(&info->lock); >> 2031 goto repeat; 2407 } 2032 } >> 2033 if (error == -EEXIST) >> 2034 goto repeat; 2408 return error; 2035 return error; 2409 } 2036 } 2410 2037 2411 /** << 2412 * shmem_get_folio - find, and lock a shmem f << 2413 * @inode: inode to search << 2414 * @index: the page index. << 2415 * @write_end: end of a write, could extend << 2416 * @foliop: pointer to the folio if found << 2417 * @sgp: SGP_* flags to control behavi << 2418 * << 2419 * Looks up the page cache entry at @inode & << 2420 * present, it is returned locked with an inc << 2421 * << 2422 * If the caller modifies data in the folio, << 2423 * before unlocking the folio to ensure that << 2424 * There is no need to reserve space before c << 2425 * << 2426 * When no folio is found, the behavior depen << 2427 * - for SGP_READ, *@foliop is %NULL and 0 i << 2428 * - for SGP_NOALLOC, *@foliop is %NULL and << 2429 * - for all other flags a new folio is allo << 2430 * page cache and returned locked in @foli << 2431 * << 2432 * Context: May sleep. << 2433 * Return: 0 if successful, else a negative e << 2434 */ << 2435 int shmem_get_folio(struct inode *inode, pgof << 2436 struct folio **foliop, en << 2437 { << 2438 return shmem_get_folio_gfp(inode, ind << 2439 mapping_gfp_mask(inod << 2440 } << 2441 EXPORT_SYMBOL_GPL(shmem_get_folio); << 2442 << 2443 /* 2038 /* 2444 * This is like autoremove_wake_function, but 2039 * This is like autoremove_wake_function, but it removes the wait queue 2445 * entry unconditionally - even if something 2040 * entry unconditionally - even if something else had already woken the 2446 * target. 2041 * target. 2447 */ 2042 */ 2448 static int synchronous_wake_function(wait_que !! 2043 static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) 2449 unsigned int mode, in << 2450 { 2044 { 2451 int ret = default_wake_function(wait, 2045 int ret = default_wake_function(wait, mode, sync, key); 2452 list_del_init(&wait->entry); 2046 list_del_init(&wait->entry); 2453 return ret; 2047 return ret; 2454 } 2048 } 2455 2049 2456 /* << 2457 * Trinity finds that probing a hole which tm << 2458 * prevent the hole-punch from ever completin << 2459 * locks writers out with its hold on i_rwsem << 2460 * faulting pages into the hole while it's be << 2461 * shmem_undo_range() does remove the additio << 2462 * keep up, as each new page needs its own un << 2463 * and the i_mmap tree grows ever slower to s << 2464 * << 2465 * It does not matter if we sometimes reach t << 2466 * hole-punch begins, so that one fault then << 2467 * we just need to make racing faults a rare << 2468 * << 2469 * The implementation below would be much sim << 2470 * standard mutex or completion: but we canno << 2471 * and bloating every shmem inode for this un << 2472 */ << 2473 static vm_fault_t shmem_falloc_wait(struct vm << 2474 { << 2475 struct shmem_falloc *shmem_falloc; << 2476 struct file *fpin = NULL; << 2477 vm_fault_t ret = 0; << 2478 << 2479 spin_lock(&inode->i_lock); << 2480 shmem_falloc = inode->i_private; << 2481 if (shmem_falloc && << 2482 shmem_falloc->waitq && << 2483 vmf->pgoff >= shmem_falloc->start << 2484 vmf->pgoff < shmem_falloc->next) << 2485 wait_queue_head_t *shmem_fall << 2486 DEFINE_WAIT_FUNC(shmem_fault_ << 2487 << 2488 ret = VM_FAULT_NOPAGE; << 2489 fpin = maybe_unlock_mmap_for_ << 2490 shmem_falloc_waitq = shmem_fa << 2491 prepare_to_wait(shmem_falloc_ << 2492 TASK_UNINTERR << 2493 spin_unlock(&inode->i_lock); << 2494 schedule(); << 2495 << 2496 /* << 2497 * shmem_falloc_waitq points << 2498 * stack of the hole-punching << 2499 * is usually invalid by the << 2500 * finish_wait() does not der << 2501 * though i_lock needed lest << 2502 */ << 2503 spin_lock(&inode->i_lock); << 2504 finish_wait(shmem_falloc_wait << 2505 } << 2506 spin_unlock(&inode->i_lock); << 2507 if (fpin) { << 2508 fput(fpin); << 2509 ret = VM_FAULT_RETRY; << 2510 } << 2511 return ret; << 2512 } << 2513 << 2514 static vm_fault_t shmem_fault(struct vm_fault 2050 static vm_fault_t shmem_fault(struct vm_fault *vmf) 2515 { 2051 { 2516 struct inode *inode = file_inode(vmf- !! 2052 struct vm_area_struct *vma = vmf->vma; >> 2053 struct inode *inode = file_inode(vma->vm_file); 2517 gfp_t gfp = mapping_gfp_mask(inode->i 2054 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 2518 struct folio *folio = NULL; !! 2055 enum sgp_type sgp; 2519 vm_fault_t ret = 0; << 2520 int err; 2056 int err; >> 2057 vm_fault_t ret = VM_FAULT_LOCKED; 2521 2058 2522 /* 2059 /* 2523 * Trinity finds that probing a hole 2060 * Trinity finds that probing a hole which tmpfs is punching can 2524 * prevent the hole-punch from ever c !! 2061 * prevent the hole-punch from ever completing: which in turn >> 2062 * locks writers out with its hold on i_mutex. So refrain from >> 2063 * faulting pages into the hole while it's being punched. Although >> 2064 * shmem_undo_range() does remove the additions, it may be unable to >> 2065 * keep up, as each new page needs its own unmap_mapping_range() call, >> 2066 * and the i_mmap tree grows ever slower to scan if new vmas are added. >> 2067 * >> 2068 * It does not matter if we sometimes reach this check just before the >> 2069 * hole-punch begins, so that one fault then races with the punch: >> 2070 * we just need to make racing faults a rare case. >> 2071 * >> 2072 * The implementation below would be much simpler if we just used a >> 2073 * standard mutex or completion: but we cannot take i_mutex in fault, >> 2074 * and bloating every shmem inode for this unlikely case would be sad. 2525 */ 2075 */ 2526 if (unlikely(inode->i_private)) { 2076 if (unlikely(inode->i_private)) { 2527 ret = shmem_falloc_wait(vmf, !! 2077 struct shmem_falloc *shmem_falloc; 2528 if (ret) !! 2078 >> 2079 spin_lock(&inode->i_lock); >> 2080 shmem_falloc = inode->i_private; >> 2081 if (shmem_falloc && >> 2082 shmem_falloc->waitq && >> 2083 vmf->pgoff >= shmem_falloc->start && >> 2084 vmf->pgoff < shmem_falloc->next) { >> 2085 struct file *fpin; >> 2086 wait_queue_head_t *shmem_falloc_waitq; >> 2087 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); >> 2088 >> 2089 ret = VM_FAULT_NOPAGE; >> 2090 fpin = maybe_unlock_mmap_for_io(vmf, NULL); >> 2091 if (fpin) >> 2092 ret = VM_FAULT_RETRY; >> 2093 >> 2094 shmem_falloc_waitq = shmem_falloc->waitq; >> 2095 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, >> 2096 TASK_UNINTERRUPTIBLE); >> 2097 spin_unlock(&inode->i_lock); >> 2098 schedule(); >> 2099 >> 2100 /* >> 2101 * shmem_falloc_waitq points into the shmem_fallocate() >> 2102 * stack of the hole-punching task: shmem_falloc_waitq >> 2103 * is usually invalid by the time we reach here, but >> 2104 * finish_wait() does not dereference it in that case; >> 2105 * though i_lock needed lest racing with wake_up_all(). >> 2106 */ >> 2107 spin_lock(&inode->i_lock); >> 2108 finish_wait(shmem_falloc_waitq, &shmem_fault_wait); >> 2109 spin_unlock(&inode->i_lock); >> 2110 >> 2111 if (fpin) >> 2112 fput(fpin); 2529 return ret; 2113 return ret; >> 2114 } >> 2115 spin_unlock(&inode->i_lock); 2530 } 2116 } 2531 2117 2532 WARN_ON_ONCE(vmf->page != NULL); !! 2118 sgp = SGP_CACHE; 2533 err = shmem_get_folio_gfp(inode, vmf- !! 2119 2534 gfp, vmf, & !! 2120 if ((vma->vm_flags & VM_NOHUGEPAGE) || >> 2121 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) >> 2122 sgp = SGP_NOHUGE; >> 2123 else if (vma->vm_flags & VM_HUGEPAGE) >> 2124 sgp = SGP_HUGE; >> 2125 >> 2126 err = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp, >> 2127 gfp, vma, vmf, &ret); 2535 if (err) 2128 if (err) 2536 return vmf_error(err); 2129 return vmf_error(err); 2537 if (folio) { << 2538 vmf->page = folio_file_page(f << 2539 ret |= VM_FAULT_LOCKED; << 2540 } << 2541 return ret; 2130 return ret; 2542 } 2131 } 2543 2132 2544 unsigned long shmem_get_unmapped_area(struct 2133 unsigned long shmem_get_unmapped_area(struct file *file, 2545 unsigne 2134 unsigned long uaddr, unsigned long len, 2546 unsigne 2135 unsigned long pgoff, unsigned long flags) 2547 { 2136 { >> 2137 unsigned long (*get_area)(struct file *, >> 2138 unsigned long, unsigned long, unsigned long, unsigned long); 2548 unsigned long addr; 2139 unsigned long addr; 2549 unsigned long offset; 2140 unsigned long offset; 2550 unsigned long inflated_len; 2141 unsigned long inflated_len; 2551 unsigned long inflated_addr; 2142 unsigned long inflated_addr; 2552 unsigned long inflated_offset; 2143 unsigned long inflated_offset; 2553 unsigned long hpage_size; << 2554 2144 2555 if (len > TASK_SIZE) 2145 if (len > TASK_SIZE) 2556 return -ENOMEM; 2146 return -ENOMEM; 2557 2147 2558 addr = mm_get_unmapped_area(current-> !! 2148 get_area = current->mm->get_unmapped_area; 2559 flags); !! 2149 addr = get_area(file, uaddr, len, pgoff, flags); 2560 2150 2561 if (!IS_ENABLED(CONFIG_TRANSPARENT_HU 2151 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 2562 return addr; 2152 return addr; 2563 if (IS_ERR_VALUE(addr)) 2153 if (IS_ERR_VALUE(addr)) 2564 return addr; 2154 return addr; 2565 if (addr & ~PAGE_MASK) 2155 if (addr & ~PAGE_MASK) 2566 return addr; 2156 return addr; 2567 if (addr > TASK_SIZE - len) 2157 if (addr > TASK_SIZE - len) 2568 return addr; 2158 return addr; 2569 2159 2570 if (shmem_huge == SHMEM_HUGE_DENY) 2160 if (shmem_huge == SHMEM_HUGE_DENY) 2571 return addr; 2161 return addr; >> 2162 if (len < HPAGE_PMD_SIZE) >> 2163 return addr; 2572 if (flags & MAP_FIXED) 2164 if (flags & MAP_FIXED) 2573 return addr; 2165 return addr; 2574 /* 2166 /* 2575 * Our priority is to support MAP_SHA 2167 * Our priority is to support MAP_SHARED mapped hugely; 2576 * and support MAP_PRIVATE mapped hug 2168 * and support MAP_PRIVATE mapped hugely too, until it is COWed. 2577 * But if caller specified an address 2169 * But if caller specified an address hint and we allocated area there 2578 * successfully, respect that as befo 2170 * successfully, respect that as before. 2579 */ 2171 */ 2580 if (uaddr == addr) 2172 if (uaddr == addr) 2581 return addr; 2173 return addr; 2582 2174 2583 hpage_size = HPAGE_PMD_SIZE; << 2584 if (shmem_huge != SHMEM_HUGE_FORCE) { 2175 if (shmem_huge != SHMEM_HUGE_FORCE) { 2585 struct super_block *sb; 2176 struct super_block *sb; 2586 unsigned long __maybe_unused << 2587 int order = 0; << 2588 2177 2589 if (file) { 2178 if (file) { 2590 VM_BUG_ON(file->f_op 2179 VM_BUG_ON(file->f_op != &shmem_file_operations); 2591 sb = file_inode(file) 2180 sb = file_inode(file)->i_sb; 2592 } else { 2181 } else { 2593 /* 2182 /* 2594 * Called directly fr 2183 * Called directly from mm/mmap.c, or drivers/char/mem.c 2595 * for "/dev/zero", t 2184 * for "/dev/zero", to create a shared anonymous object. 2596 */ 2185 */ 2597 if (IS_ERR(shm_mnt)) 2186 if (IS_ERR(shm_mnt)) 2598 return addr; 2187 return addr; 2599 sb = shm_mnt->mnt_sb; 2188 sb = shm_mnt->mnt_sb; 2600 << 2601 /* << 2602 * Find the highest m << 2603 * provide a suitable << 2604 */ << 2605 #ifdef CONFIG_TRANSPARENT_HUGEPAGE << 2606 hpage_orders = READ_O << 2607 hpage_orders |= READ_ << 2608 hpage_orders |= READ_ << 2609 if (SHMEM_SB(sb)->hug << 2610 hpage_orders << 2611 << 2612 if (hpage_orders > 0) << 2613 order = highe << 2614 hpage_size = << 2615 } << 2616 #endif << 2617 } 2189 } 2618 if (SHMEM_SB(sb)->huge == SHM !! 2190 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2619 return addr; 2191 return addr; 2620 } 2192 } 2621 2193 2622 if (len < hpage_size) !! 2194 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); >> 2195 if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2623 return addr; 2196 return addr; 2624 !! 2197 if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2625 offset = (pgoff << PAGE_SHIFT) & (hpa << 2626 if (offset && offset + len < 2 * hpag << 2627 return addr; << 2628 if ((addr & (hpage_size - 1)) == offs << 2629 return addr; 2198 return addr; 2630 2199 2631 inflated_len = len + hpage_size - PAG !! 2200 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2632 if (inflated_len > TASK_SIZE) 2201 if (inflated_len > TASK_SIZE) 2633 return addr; 2202 return addr; 2634 if (inflated_len < len) 2203 if (inflated_len < len) 2635 return addr; 2204 return addr; 2636 2205 2637 inflated_addr = mm_get_unmapped_area( !! 2206 inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags); 2638 << 2639 if (IS_ERR_VALUE(inflated_addr)) 2207 if (IS_ERR_VALUE(inflated_addr)) 2640 return addr; 2208 return addr; 2641 if (inflated_addr & ~PAGE_MASK) 2209 if (inflated_addr & ~PAGE_MASK) 2642 return addr; 2210 return addr; 2643 2211 2644 inflated_offset = inflated_addr & (hp !! 2212 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2645 inflated_addr += offset - inflated_of 2213 inflated_addr += offset - inflated_offset; 2646 if (inflated_offset > offset) 2214 if (inflated_offset > offset) 2647 inflated_addr += hpage_size; !! 2215 inflated_addr += HPAGE_PMD_SIZE; 2648 2216 2649 if (inflated_addr > TASK_SIZE - len) 2217 if (inflated_addr > TASK_SIZE - len) 2650 return addr; 2218 return addr; 2651 return inflated_addr; 2219 return inflated_addr; 2652 } 2220 } 2653 2221 2654 #ifdef CONFIG_NUMA 2222 #ifdef CONFIG_NUMA 2655 static int shmem_set_policy(struct vm_area_st 2223 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 2656 { 2224 { 2657 struct inode *inode = file_inode(vma- 2225 struct inode *inode = file_inode(vma->vm_file); 2658 return mpol_set_shared_policy(&SHMEM_ 2226 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 2659 } 2227 } 2660 2228 2661 static struct mempolicy *shmem_get_policy(str 2229 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2662 uns !! 2230 unsigned long addr) 2663 { 2231 { 2664 struct inode *inode = file_inode(vma- 2232 struct inode *inode = file_inode(vma->vm_file); 2665 pgoff_t index; 2233 pgoff_t index; 2666 2234 2667 /* << 2668 * Bias interleave by inode number to << 2669 * but this interface is independent << 2670 * supplies only that bias, letting c << 2671 * by page order, as in shmem_get_pgo << 2672 */ << 2673 *ilx = inode->i_ino; << 2674 index = ((addr - vma->vm_start) >> PA 2235 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 2675 return mpol_shared_policy_lookup(&SHM 2236 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 2676 } 2237 } 2677 !! 2238 #endif 2678 static struct mempolicy *shmem_get_pgoff_poli << 2679 pgoff_t index, unsign << 2680 { << 2681 struct mempolicy *mpol; << 2682 << 2683 /* Bias interleave by inode number to << 2684 *ilx = info->vfs_inode.i_ino + (index << 2685 << 2686 mpol = mpol_shared_policy_lookup(&inf << 2687 return mpol ? mpol : get_task_policy( << 2688 } << 2689 #else << 2690 static struct mempolicy *shmem_get_pgoff_poli << 2691 pgoff_t index, unsign << 2692 { << 2693 *ilx = 0; << 2694 return NULL; << 2695 } << 2696 #endif /* CONFIG_NUMA */ << 2697 2239 2698 int shmem_lock(struct file *file, int lock, s 2240 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 2699 { 2241 { 2700 struct inode *inode = file_inode(file 2242 struct inode *inode = file_inode(file); 2701 struct shmem_inode_info *info = SHMEM 2243 struct shmem_inode_info *info = SHMEM_I(inode); 2702 int retval = -ENOMEM; 2244 int retval = -ENOMEM; 2703 2245 2704 /* 2246 /* 2705 * What serializes the accesses to in 2247 * What serializes the accesses to info->flags? 2706 * ipc_lock_object() when called from 2248 * ipc_lock_object() when called from shmctl_do_lock(), 2707 * no serialization needed when calle 2249 * no serialization needed when called from shm_destroy(). 2708 */ 2250 */ 2709 if (lock && !(info->flags & VM_LOCKED 2251 if (lock && !(info->flags & VM_LOCKED)) { 2710 if (!user_shm_lock(inode->i_s 2252 if (!user_shm_lock(inode->i_size, ucounts)) 2711 goto out_nomem; 2253 goto out_nomem; 2712 info->flags |= VM_LOCKED; 2254 info->flags |= VM_LOCKED; 2713 mapping_set_unevictable(file- 2255 mapping_set_unevictable(file->f_mapping); 2714 } 2256 } 2715 if (!lock && (info->flags & VM_LOCKED 2257 if (!lock && (info->flags & VM_LOCKED) && ucounts) { 2716 user_shm_unlock(inode->i_size 2258 user_shm_unlock(inode->i_size, ucounts); 2717 info->flags &= ~VM_LOCKED; 2259 info->flags &= ~VM_LOCKED; 2718 mapping_clear_unevictable(fil 2260 mapping_clear_unevictable(file->f_mapping); 2719 } 2261 } 2720 retval = 0; 2262 retval = 0; 2721 2263 2722 out_nomem: 2264 out_nomem: 2723 return retval; 2265 return retval; 2724 } 2266 } 2725 2267 2726 static int shmem_mmap(struct file *file, stru 2268 static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 2727 { 2269 { 2728 struct inode *inode = file_inode(file !! 2270 struct shmem_inode_info *info = SHMEM_I(file_inode(file)); 2729 struct shmem_inode_info *info = SHMEM << 2730 int ret; 2271 int ret; 2731 2272 2732 ret = seal_check_write(info->seals, v !! 2273 ret = seal_check_future_write(info->seals, vma); 2733 if (ret) 2274 if (ret) 2734 return ret; 2275 return ret; 2735 2276 >> 2277 /* arm64 - allow memory tagging on RAM-based files */ >> 2278 vma->vm_flags |= VM_MTE_ALLOWED; >> 2279 2736 file_accessed(file); 2280 file_accessed(file); 2737 /* This is anonymous shared memory if !! 2281 vma->vm_ops = &shmem_vm_ops; 2738 if (inode->i_nlink) !! 2282 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 2739 vma->vm_ops = &shmem_vm_ops; !! 2283 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < 2740 else !! 2284 (vma->vm_end & HPAGE_PMD_MASK)) { 2741 vma->vm_ops = &shmem_anon_vm_ !! 2285 khugepaged_enter(vma, vma->vm_flags); >> 2286 } 2742 return 0; 2287 return 0; 2743 } 2288 } 2744 2289 2745 static int shmem_file_open(struct inode *inod !! 2290 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir, 2746 { !! 2291 umode_t mode, dev_t dev, unsigned long flags) 2747 file->f_mode |= FMODE_CAN_ODIRECT; << 2748 return generic_file_open(inode, file) << 2749 } << 2750 << 2751 #ifdef CONFIG_TMPFS_XATTR << 2752 static int shmem_initxattrs(struct inode *, c << 2753 << 2754 /* << 2755 * chattr's fsflags are unrelated to extended << 2756 * but tmpfs has chosen to enable them under << 2757 */ << 2758 static void shmem_set_inode_flags(struct inod << 2759 { << 2760 unsigned int i_flags = 0; << 2761 << 2762 if (fsflags & FS_NOATIME_FL) << 2763 i_flags |= S_NOATIME; << 2764 if (fsflags & FS_APPEND_FL) << 2765 i_flags |= S_APPEND; << 2766 if (fsflags & FS_IMMUTABLE_FL) << 2767 i_flags |= S_IMMUTABLE; << 2768 /* << 2769 * But FS_NODUMP_FL does not require << 2770 */ << 2771 inode_set_flags(inode, i_flags, S_NOA << 2772 } << 2773 #else << 2774 static void shmem_set_inode_flags(struct inod << 2775 { << 2776 } << 2777 #define shmem_initxattrs NULL << 2778 #endif << 2779 << 2780 static struct offset_ctx *shmem_get_offset_ct << 2781 { << 2782 return &SHMEM_I(inode)->dir_offsets; << 2783 } << 2784 << 2785 static struct inode *__shmem_get_inode(struct << 2786 << 2787 << 2788 << 2789 { 2292 { 2790 struct inode *inode; 2293 struct inode *inode; 2791 struct shmem_inode_info *info; 2294 struct shmem_inode_info *info; 2792 struct shmem_sb_info *sbinfo = SHMEM_ 2295 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2793 ino_t ino; 2296 ino_t ino; 2794 int err; << 2795 2297 2796 err = shmem_reserve_inode(sb, &ino); !! 2298 if (shmem_reserve_inode(sb, &ino)) 2797 if (err) !! 2299 return NULL; 2798 return ERR_PTR(err); << 2799 2300 2800 inode = new_inode(sb); 2301 inode = new_inode(sb); 2801 if (!inode) { !! 2302 if (inode) { 2802 shmem_free_inode(sb, 0); !! 2303 inode->i_ino = ino; 2803 return ERR_PTR(-ENOSPC); !! 2304 inode_init_owner(&init_user_ns, inode, dir, mode); 2804 } !! 2305 inode->i_blocks = 0; 2805 !! 2306 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); 2806 inode->i_ino = ino; !! 2307 inode->i_generation = prandom_u32(); 2807 inode_init_owner(idmap, inode, dir, m !! 2308 info = SHMEM_I(inode); 2808 inode->i_blocks = 0; !! 2309 memset(info, 0, (char *)inode - (char *)info); 2809 simple_inode_init_ts(inode); !! 2310 spin_lock_init(&info->lock); 2810 inode->i_generation = get_random_u32( !! 2311 atomic_set(&info->stop_eviction, 0); 2811 info = SHMEM_I(inode); !! 2312 info->seals = F_SEAL_SEAL; 2812 memset(info, 0, (char *)inode - (char !! 2313 info->flags = flags & VM_NORESERVE; 2813 spin_lock_init(&info->lock); !! 2314 INIT_LIST_HEAD(&info->shrinklist); 2814 atomic_set(&info->stop_eviction, 0); !! 2315 INIT_LIST_HEAD(&info->swaplist); 2815 info->seals = F_SEAL_SEAL; !! 2316 simple_xattrs_init(&info->xattrs); 2816 info->flags = flags & VM_NORESERVE; !! 2317 cache_no_acl(inode); 2817 info->i_crtime = inode_get_mtime(inod !! 2318 2818 info->fsflags = (dir == NULL) ? 0 : !! 2319 switch (mode & S_IFMT) { 2819 SHMEM_I(dir)->fsflags & SHMEM !! 2320 default: 2820 if (info->fsflags) !! 2321 inode->i_op = &shmem_special_inode_operations; 2821 shmem_set_inode_flags(inode, !! 2322 init_special_inode(inode, mode, dev); 2822 INIT_LIST_HEAD(&info->shrinklist); !! 2323 break; 2823 INIT_LIST_HEAD(&info->swaplist); !! 2324 case S_IFREG: 2824 simple_xattrs_init(&info->xattrs); !! 2325 inode->i_mapping->a_ops = &shmem_aops; 2825 cache_no_acl(inode); !! 2326 inode->i_op = &shmem_inode_operations; 2826 if (sbinfo->noswap) !! 2327 inode->i_fop = &shmem_file_operations; 2827 mapping_set_unevictable(inode !! 2328 mpol_shared_policy_init(&info->policy, 2828 mapping_set_large_folios(inode->i_map !! 2329 shmem_get_sbmpol(sbinfo)); 2829 !! 2330 break; 2830 switch (mode & S_IFMT) { !! 2331 case S_IFDIR: 2831 default: !! 2332 inc_nlink(inode); 2832 inode->i_op = &shmem_special_ !! 2333 /* Some things misbehave if size == 0 on a directory */ 2833 init_special_inode(inode, mod !! 2334 inode->i_size = 2 * BOGO_DIRENT_SIZE; 2834 break; !! 2335 inode->i_op = &shmem_dir_inode_operations; 2835 case S_IFREG: !! 2336 inode->i_fop = &simple_dir_operations; 2836 inode->i_mapping->a_ops = &sh !! 2337 break; 2837 inode->i_op = &shmem_inode_op !! 2338 case S_IFLNK: 2838 inode->i_fop = &shmem_file_op !! 2339 /* 2839 mpol_shared_policy_init(&info !! 2340 * Must not load anything in the rbtree, 2840 shme !! 2341 * mpol_free_shared_policy will not be called. 2841 break; !! 2342 */ 2842 case S_IFDIR: !! 2343 mpol_shared_policy_init(&info->policy, NULL); 2843 inc_nlink(inode); !! 2344 break; 2844 /* Some things misbehave if s !! 2345 } 2845 inode->i_size = 2 * BOGO_DIRE << 2846 inode->i_op = &shmem_dir_inod << 2847 inode->i_fop = &simple_offset << 2848 simple_offset_init(shmem_get_ << 2849 break; << 2850 case S_IFLNK: << 2851 /* << 2852 * Must not load anything in << 2853 * mpol_free_shared_policy wi << 2854 */ << 2855 mpol_shared_policy_init(&info << 2856 break; << 2857 } << 2858 << 2859 lockdep_annotate_inode_mutex_key(inod << 2860 return inode; << 2861 } << 2862 << 2863 #ifdef CONFIG_TMPFS_QUOTA << 2864 static struct inode *shmem_get_inode(struct m << 2865 struct s << 2866 umode_t << 2867 { << 2868 int err; << 2869 struct inode *inode; << 2870 << 2871 inode = __shmem_get_inode(idmap, sb, << 2872 if (IS_ERR(inode)) << 2873 return inode; << 2874 << 2875 err = dquot_initialize(inode); << 2876 if (err) << 2877 goto errout; << 2878 2346 2879 err = dquot_alloc_inode(inode); !! 2347 lockdep_annotate_inode_mutex_key(inode); 2880 if (err) { !! 2348 } else 2881 dquot_drop(inode); !! 2349 shmem_free_inode(sb); 2882 goto errout; << 2883 } << 2884 return inode; 2350 return inode; 2885 << 2886 errout: << 2887 inode->i_flags |= S_NOQUOTA; << 2888 iput(inode); << 2889 return ERR_PTR(err); << 2890 } 2351 } 2891 #else << 2892 static inline struct inode *shmem_get_inode(s << 2893 struct s << 2894 umode_t << 2895 { << 2896 return __shmem_get_inode(idmap, sb, d << 2897 } << 2898 #endif /* CONFIG_TMPFS_QUOTA */ << 2899 2352 2900 #ifdef CONFIG_USERFAULTFD 2353 #ifdef CONFIG_USERFAULTFD 2901 int shmem_mfill_atomic_pte(pmd_t *dst_pmd, !! 2354 int shmem_mfill_atomic_pte(struct mm_struct *dst_mm, >> 2355 pmd_t *dst_pmd, 2902 struct vm_area_str 2356 struct vm_area_struct *dst_vma, 2903 unsigned long dst_ 2357 unsigned long dst_addr, 2904 unsigned long src_ 2358 unsigned long src_addr, 2905 uffd_flags_t flags !! 2359 bool zeropage, 2906 struct folio **fol !! 2360 struct page **pagep) 2907 { 2361 { 2908 struct inode *inode = file_inode(dst_ 2362 struct inode *inode = file_inode(dst_vma->vm_file); 2909 struct shmem_inode_info *info = SHMEM 2363 struct shmem_inode_info *info = SHMEM_I(inode); 2910 struct address_space *mapping = inode 2364 struct address_space *mapping = inode->i_mapping; 2911 gfp_t gfp = mapping_gfp_mask(mapping) 2365 gfp_t gfp = mapping_gfp_mask(mapping); 2912 pgoff_t pgoff = linear_page_index(dst 2366 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 2913 void *page_kaddr; 2367 void *page_kaddr; 2914 struct folio *folio; !! 2368 struct page *page; 2915 int ret; 2369 int ret; 2916 pgoff_t max_off; 2370 pgoff_t max_off; 2917 2371 2918 if (shmem_inode_acct_blocks(inode, 1) !! 2372 if (!shmem_inode_acct_block(inode, 1)) { 2919 /* 2373 /* 2920 * We may have got a page, re 2374 * We may have got a page, returned -ENOENT triggering a retry, 2921 * and now we find ourselves 2375 * and now we find ourselves with -ENOMEM. Release the page, to 2922 * avoid a BUG_ON in our call 2376 * avoid a BUG_ON in our caller. 2923 */ 2377 */ 2924 if (unlikely(*foliop)) { !! 2378 if (unlikely(*pagep)) { 2925 folio_put(*foliop); !! 2379 put_page(*pagep); 2926 *foliop = NULL; !! 2380 *pagep = NULL; 2927 } 2381 } 2928 return -ENOMEM; 2382 return -ENOMEM; 2929 } 2383 } 2930 2384 2931 if (!*foliop) { !! 2385 if (!*pagep) { 2932 ret = -ENOMEM; 2386 ret = -ENOMEM; 2933 folio = shmem_alloc_folio(gfp !! 2387 page = shmem_alloc_page(gfp, info, pgoff); 2934 if (!folio) !! 2388 if (!page) 2935 goto out_unacct_block 2389 goto out_unacct_blocks; 2936 2390 2937 if (uffd_flags_mode_is(flags, !! 2391 if (!zeropage) { /* COPY */ 2938 page_kaddr = kmap_loc !! 2392 page_kaddr = kmap_atomic(page); 2939 /* << 2940 * The read mmap_lock << 2941 * mmap_lock being re << 2942 * possible if a writ << 2943 * << 2944 * process A thread 1 << 2945 * process A thread 2 << 2946 * process B thread 1 << 2947 * process B thread 2 << 2948 * process A thread 1 << 2949 * process B thread 1 << 2950 * << 2951 * Disable page fault << 2952 * and retry the copy << 2953 */ << 2954 pagefault_disable(); << 2955 ret = copy_from_user( 2393 ret = copy_from_user(page_kaddr, 2956 2394 (const void __user *)src_addr, 2957 2395 PAGE_SIZE); 2958 pagefault_enable(); !! 2396 kunmap_atomic(page_kaddr); 2959 kunmap_local(page_kad << 2960 2397 2961 /* fallback to copy_f 2398 /* fallback to copy_from_user outside mmap_lock */ 2962 if (unlikely(ret)) { 2399 if (unlikely(ret)) { 2963 *foliop = fol !! 2400 *pagep = page; 2964 ret = -ENOENT 2401 ret = -ENOENT; 2965 /* don't free 2402 /* don't free the page */ 2966 goto out_unac 2403 goto out_unacct_blocks; 2967 } 2404 } 2968 << 2969 flush_dcache_folio(fo << 2970 } else { /* ZE 2405 } else { /* ZEROPAGE */ 2971 clear_user_highpage(& !! 2406 clear_highpage(page); 2972 } 2407 } 2973 } else { 2408 } else { 2974 folio = *foliop; !! 2409 page = *pagep; 2975 VM_BUG_ON_FOLIO(folio_test_la !! 2410 *pagep = NULL; 2976 *foliop = NULL; << 2977 } 2411 } 2978 2412 2979 VM_BUG_ON(folio_test_locked(folio)); !! 2413 VM_BUG_ON(PageLocked(page)); 2980 VM_BUG_ON(folio_test_swapbacked(folio !! 2414 VM_BUG_ON(PageSwapBacked(page)); 2981 __folio_set_locked(folio); !! 2415 __SetPageLocked(page); 2982 __folio_set_swapbacked(folio); !! 2416 __SetPageSwapBacked(page); 2983 __folio_mark_uptodate(folio); !! 2417 __SetPageUptodate(page); 2984 2418 2985 ret = -EFAULT; 2419 ret = -EFAULT; 2986 max_off = DIV_ROUND_UP(i_size_read(in 2420 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2987 if (unlikely(pgoff >= max_off)) 2421 if (unlikely(pgoff >= max_off)) 2988 goto out_release; 2422 goto out_release; 2989 2423 2990 ret = mem_cgroup_charge(folio, dst_vm !! 2424 ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, 2991 if (ret) !! 2425 gfp & GFP_RECLAIM_MASK, dst_mm); 2992 goto out_release; << 2993 ret = shmem_add_to_page_cache(folio, << 2994 if (ret) 2426 if (ret) 2995 goto out_release; 2427 goto out_release; 2996 2428 2997 ret = mfill_atomic_install_pte(dst_pm !! 2429 ret = mfill_atomic_install_pte(dst_mm, dst_pmd, dst_vma, dst_addr, 2998 &folio !! 2430 page, true, false); 2999 if (ret) 2431 if (ret) 3000 goto out_delete_from_cache; 2432 goto out_delete_from_cache; 3001 2433 3002 shmem_recalc_inode(inode, 1, 0); !! 2434 spin_lock_irq(&info->lock); 3003 folio_unlock(folio); !! 2435 info->alloced++; >> 2436 inode->i_blocks += BLOCKS_PER_PAGE; >> 2437 shmem_recalc_inode(inode); >> 2438 spin_unlock_irq(&info->lock); >> 2439 >> 2440 SetPageDirty(page); >> 2441 unlock_page(page); 3004 return 0; 2442 return 0; 3005 out_delete_from_cache: 2443 out_delete_from_cache: 3006 filemap_remove_folio(folio); !! 2444 delete_from_page_cache(page); 3007 out_release: 2445 out_release: 3008 folio_unlock(folio); !! 2446 unlock_page(page); 3009 folio_put(folio); !! 2447 put_page(page); 3010 out_unacct_blocks: 2448 out_unacct_blocks: 3011 shmem_inode_unacct_blocks(inode, 1); 2449 shmem_inode_unacct_blocks(inode, 1); 3012 return ret; 2450 return ret; 3013 } 2451 } 3014 #endif /* CONFIG_USERFAULTFD */ 2452 #endif /* CONFIG_USERFAULTFD */ 3015 2453 3016 #ifdef CONFIG_TMPFS 2454 #ifdef CONFIG_TMPFS 3017 static const struct inode_operations shmem_sy 2455 static const struct inode_operations shmem_symlink_inode_operations; 3018 static const struct inode_operations shmem_sh 2456 static const struct inode_operations shmem_short_symlink_operations; 3019 2457 >> 2458 #ifdef CONFIG_TMPFS_XATTR >> 2459 static int shmem_initxattrs(struct inode *, const struct xattr *, void *); >> 2460 #else >> 2461 #define shmem_initxattrs NULL >> 2462 #endif >> 2463 3020 static int 2464 static int 3021 shmem_write_begin(struct file *file, struct a 2465 shmem_write_begin(struct file *file, struct address_space *mapping, 3022 loff_t pos, unsigned !! 2466 loff_t pos, unsigned len, unsigned flags, 3023 struct folio **foliop !! 2467 struct page **pagep, void **fsdata) 3024 { 2468 { 3025 struct inode *inode = mapping->host; 2469 struct inode *inode = mapping->host; 3026 struct shmem_inode_info *info = SHMEM 2470 struct shmem_inode_info *info = SHMEM_I(inode); 3027 pgoff_t index = pos >> PAGE_SHIFT; 2471 pgoff_t index = pos >> PAGE_SHIFT; 3028 struct folio *folio; << 3029 int ret = 0; << 3030 2472 3031 /* i_rwsem is held by caller */ !! 2473 /* i_mutex is held by caller */ 3032 if (unlikely(info->seals & (F_SEAL_GR 2474 if (unlikely(info->seals & (F_SEAL_GROW | 3033 F_SEAL_WRI 2475 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) { 3034 if (info->seals & (F_SEAL_WRI 2476 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) 3035 return -EPERM; 2477 return -EPERM; 3036 if ((info->seals & F_SEAL_GRO 2478 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 3037 return -EPERM; 2479 return -EPERM; 3038 } 2480 } 3039 2481 3040 ret = shmem_get_folio(inode, index, p !! 2482 return shmem_getpage(inode, index, pagep, SGP_WRITE); 3041 if (ret) << 3042 return ret; << 3043 << 3044 if (folio_test_hwpoison(folio) || << 3045 (folio_test_large(folio) && folio << 3046 folio_unlock(folio); << 3047 folio_put(folio); << 3048 return -EIO; << 3049 } << 3050 << 3051 *foliop = folio; << 3052 return 0; << 3053 } 2483 } 3054 2484 3055 static int 2485 static int 3056 shmem_write_end(struct file *file, struct add 2486 shmem_write_end(struct file *file, struct address_space *mapping, 3057 loff_t pos, unsigned 2487 loff_t pos, unsigned len, unsigned copied, 3058 struct folio *folio, !! 2488 struct page *page, void *fsdata) 3059 { 2489 { 3060 struct inode *inode = mapping->host; 2490 struct inode *inode = mapping->host; 3061 2491 3062 if (pos + copied > inode->i_size) 2492 if (pos + copied > inode->i_size) 3063 i_size_write(inode, pos + cop 2493 i_size_write(inode, pos + copied); 3064 2494 3065 if (!folio_test_uptodate(folio)) { !! 2495 if (!PageUptodate(page)) { 3066 if (copied < folio_size(folio !! 2496 struct page *head = compound_head(page); 3067 size_t from = offset_ !! 2497 if (PageTransCompound(page)) { 3068 folio_zero_segments(f !! 2498 int i; 3069 from !! 2499 3070 } !! 2500 for (i = 0; i < HPAGE_PMD_NR; i++) { 3071 folio_mark_uptodate(folio); !! 2501 if (head + i == page) 3072 } !! 2502 continue; 3073 folio_mark_dirty(folio); !! 2503 clear_highpage(head + i); 3074 folio_unlock(folio); !! 2504 flush_dcache_page(head + i); 3075 folio_put(folio); !! 2505 } >> 2506 } >> 2507 if (copied < PAGE_SIZE) { >> 2508 unsigned from = pos & (PAGE_SIZE - 1); >> 2509 zero_user_segments(page, 0, from, >> 2510 from + copied, PAGE_SIZE); >> 2511 } >> 2512 SetPageUptodate(head); >> 2513 } >> 2514 set_page_dirty(page); >> 2515 unlock_page(page); >> 2516 put_page(page); 3076 2517 3077 return copied; 2518 return copied; 3078 } 2519 } 3079 2520 3080 static ssize_t shmem_file_read_iter(struct ki 2521 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 3081 { 2522 { 3082 struct file *file = iocb->ki_filp; 2523 struct file *file = iocb->ki_filp; 3083 struct inode *inode = file_inode(file 2524 struct inode *inode = file_inode(file); 3084 struct address_space *mapping = inode 2525 struct address_space *mapping = inode->i_mapping; 3085 pgoff_t index; 2526 pgoff_t index; 3086 unsigned long offset; 2527 unsigned long offset; >> 2528 enum sgp_type sgp = SGP_READ; 3087 int error = 0; 2529 int error = 0; 3088 ssize_t retval = 0; 2530 ssize_t retval = 0; 3089 loff_t *ppos = &iocb->ki_pos; 2531 loff_t *ppos = &iocb->ki_pos; 3090 2532 >> 2533 /* >> 2534 * Might this read be for a stacking filesystem? Then when reading >> 2535 * holes of a sparse file, we actually need to allocate those pages, >> 2536 * and even mark them dirty, so it cannot exceed the max_blocks limit. >> 2537 */ >> 2538 if (!iter_is_iovec(to)) >> 2539 sgp = SGP_CACHE; >> 2540 3091 index = *ppos >> PAGE_SHIFT; 2541 index = *ppos >> PAGE_SHIFT; 3092 offset = *ppos & ~PAGE_MASK; 2542 offset = *ppos & ~PAGE_MASK; 3093 2543 3094 for (;;) { 2544 for (;;) { 3095 struct folio *folio = NULL; << 3096 struct page *page = NULL; 2545 struct page *page = NULL; 3097 pgoff_t end_index; 2546 pgoff_t end_index; 3098 unsigned long nr, ret; 2547 unsigned long nr, ret; 3099 loff_t i_size = i_size_read(i 2548 loff_t i_size = i_size_read(inode); 3100 2549 3101 end_index = i_size >> PAGE_SH 2550 end_index = i_size >> PAGE_SHIFT; 3102 if (index > end_index) 2551 if (index > end_index) 3103 break; 2552 break; 3104 if (index == end_index) { 2553 if (index == end_index) { 3105 nr = i_size & ~PAGE_M 2554 nr = i_size & ~PAGE_MASK; 3106 if (nr <= offset) 2555 if (nr <= offset) 3107 break; 2556 break; 3108 } 2557 } 3109 2558 3110 error = shmem_get_folio(inode !! 2559 error = shmem_getpage(inode, index, &page, sgp); 3111 if (error) { 2560 if (error) { 3112 if (error == -EINVAL) 2561 if (error == -EINVAL) 3113 error = 0; 2562 error = 0; 3114 break; 2563 break; 3115 } 2564 } 3116 if (folio) { !! 2565 if (page) { 3117 folio_unlock(folio); !! 2566 if (sgp == SGP_CACHE) 3118 !! 2567 set_page_dirty(page); 3119 page = folio_file_pag !! 2568 unlock_page(page); 3120 if (PageHWPoison(page << 3121 folio_put(fol << 3122 error = -EIO; << 3123 break; << 3124 } << 3125 } 2569 } 3126 2570 3127 /* 2571 /* 3128 * We must evaluate after, si 2572 * We must evaluate after, since reads (unlike writes) 3129 * are called without i_rwsem !! 2573 * are called without i_mutex protection against truncate 3130 */ 2574 */ 3131 nr = PAGE_SIZE; 2575 nr = PAGE_SIZE; 3132 i_size = i_size_read(inode); 2576 i_size = i_size_read(inode); 3133 end_index = i_size >> PAGE_SH 2577 end_index = i_size >> PAGE_SHIFT; 3134 if (index == end_index) { 2578 if (index == end_index) { 3135 nr = i_size & ~PAGE_M 2579 nr = i_size & ~PAGE_MASK; 3136 if (nr <= offset) { 2580 if (nr <= offset) { 3137 if (folio) !! 2581 if (page) 3138 folio !! 2582 put_page(page); 3139 break; 2583 break; 3140 } 2584 } 3141 } 2585 } 3142 nr -= offset; 2586 nr -= offset; 3143 2587 3144 if (folio) { !! 2588 if (page) { 3145 /* 2589 /* 3146 * If users can be wr 2590 * If users can be writing to this page using arbitrary 3147 * virtual addresses, 2591 * virtual addresses, take care about potential aliasing 3148 * before reading the 2592 * before reading the page on the kernel side. 3149 */ 2593 */ 3150 if (mapping_writably_ 2594 if (mapping_writably_mapped(mapping)) 3151 flush_dcache_ 2595 flush_dcache_page(page); 3152 /* 2596 /* 3153 * Mark the page acce 2597 * Mark the page accessed if we read the beginning. 3154 */ 2598 */ 3155 if (!offset) 2599 if (!offset) 3156 folio_mark_ac !! 2600 mark_page_accessed(page); 3157 /* << 3158 * Ok, we have the pa << 3159 * now we can copy it << 3160 */ << 3161 ret = copy_page_to_it << 3162 folio_put(folio); << 3163 << 3164 } else if (user_backed_iter(t << 3165 /* << 3166 * Copy to user tends << 3167 * clear_user() not s << 3168 * faster to copy the << 3169 */ << 3170 ret = copy_page_to_it << 3171 } else { 2601 } else { 3172 /* !! 2602 page = ZERO_PAGE(0); 3173 * But submitting the !! 2603 get_page(page); 3174 * splice() - or othe << 3175 * so don't attempt t << 3176 */ << 3177 ret = iov_iter_zero(n << 3178 } 2604 } 3179 2605 >> 2606 /* >> 2607 * Ok, we have the page, and it's up-to-date, so >> 2608 * now we can copy it to user space... >> 2609 */ >> 2610 ret = copy_page_to_iter(page, offset, nr, to); 3180 retval += ret; 2611 retval += ret; 3181 offset += ret; 2612 offset += ret; 3182 index += offset >> PAGE_SHIFT 2613 index += offset >> PAGE_SHIFT; 3183 offset &= ~PAGE_MASK; 2614 offset &= ~PAGE_MASK; 3184 2615 >> 2616 put_page(page); 3185 if (!iov_iter_count(to)) 2617 if (!iov_iter_count(to)) 3186 break; 2618 break; 3187 if (ret < nr) { 2619 if (ret < nr) { 3188 error = -EFAULT; 2620 error = -EFAULT; 3189 break; 2621 break; 3190 } 2622 } 3191 cond_resched(); 2623 cond_resched(); 3192 } 2624 } 3193 2625 3194 *ppos = ((loff_t) index << PAGE_SHIFT 2626 *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 3195 file_accessed(file); 2627 file_accessed(file); 3196 return retval ? retval : error; 2628 return retval ? retval : error; 3197 } 2629 } 3198 2630 3199 static ssize_t shmem_file_write_iter(struct k << 3200 { << 3201 struct file *file = iocb->ki_filp; << 3202 struct inode *inode = file->f_mapping << 3203 ssize_t ret; << 3204 << 3205 inode_lock(inode); << 3206 ret = generic_write_checks(iocb, from << 3207 if (ret <= 0) << 3208 goto unlock; << 3209 ret = file_remove_privs(file); << 3210 if (ret) << 3211 goto unlock; << 3212 ret = file_update_time(file); << 3213 if (ret) << 3214 goto unlock; << 3215 ret = generic_perform_write(iocb, fro << 3216 unlock: << 3217 inode_unlock(inode); << 3218 return ret; << 3219 } << 3220 << 3221 static bool zero_pipe_buf_get(struct pipe_ino << 3222 struct pipe_buf << 3223 { << 3224 return true; << 3225 } << 3226 << 3227 static void zero_pipe_buf_release(struct pipe << 3228 struct pipe << 3229 { << 3230 } << 3231 << 3232 static bool zero_pipe_buf_try_steal(struct pi << 3233 struct pi << 3234 { << 3235 return false; << 3236 } << 3237 << 3238 static const struct pipe_buf_operations zero_ << 3239 .release = zero_pipe_buf_relea << 3240 .try_steal = zero_pipe_buf_try_s << 3241 .get = zero_pipe_buf_get, << 3242 }; << 3243 << 3244 static size_t splice_zeropage_into_pipe(struc << 3245 loff_ << 3246 { << 3247 size_t offset = fpos & ~PAGE_MASK; << 3248 << 3249 size = min_t(size_t, size, PAGE_SIZE << 3250 << 3251 if (!pipe_full(pipe->head, pipe->tail << 3252 struct pipe_buffer *buf = pip << 3253 << 3254 *buf = (struct pipe_buffer) { << 3255 .ops = &zero_pipe_ << 3256 .page = ZERO_PAGE(0 << 3257 .offset = offset, << 3258 .len = size, << 3259 }; << 3260 pipe->head++; << 3261 } << 3262 << 3263 return size; << 3264 } << 3265 << 3266 static ssize_t shmem_file_splice_read(struct << 3267 struct << 3268 size_t << 3269 { << 3270 struct inode *inode = file_inode(in); << 3271 struct address_space *mapping = inode << 3272 struct folio *folio = NULL; << 3273 size_t total_spliced = 0, used, npage << 3274 loff_t isize; << 3275 int error = 0; << 3276 << 3277 /* Work out how much data we can actu << 3278 used = pipe_occupancy(pipe->head, pip << 3279 npages = max_t(ssize_t, pipe->max_usa << 3280 len = min_t(size_t, len, npages * PAG << 3281 << 3282 do { << 3283 if (*ppos >= i_size_read(inod << 3284 break; << 3285 << 3286 error = shmem_get_folio(inode << 3287 SGP_R << 3288 if (error) { << 3289 if (error == -EINVAL) << 3290 error = 0; << 3291 break; << 3292 } << 3293 if (folio) { << 3294 folio_unlock(folio); << 3295 << 3296 if (folio_test_hwpois << 3297 (folio_test_large << 3298 folio_test_has_h << 3299 error = -EIO; << 3300 break; << 3301 } << 3302 } << 3303 << 3304 /* << 3305 * i_size must be checked aft << 3306 * << 3307 * Checking i_size after the << 3308 * the correct value for "nr" << 3309 * part of the page is not co << 3310 * another truncate extends t << 3311 */ << 3312 isize = i_size_read(inode); << 3313 if (unlikely(*ppos >= isize)) << 3314 break; << 3315 part = min_t(loff_t, isize - << 3316 << 3317 if (folio) { << 3318 /* << 3319 * If users can be wr << 3320 * virtual addresses, << 3321 * before reading the << 3322 */ << 3323 if (mapping_writably_ << 3324 flush_dcache_ << 3325 folio_mark_accessed(f << 3326 /* << 3327 * Ok, we have the pa << 3328 * now splice it into << 3329 */ << 3330 n = splice_folio_into << 3331 folio_put(folio); << 3332 folio = NULL; << 3333 } else { << 3334 n = splice_zeropage_i << 3335 } << 3336 << 3337 if (!n) << 3338 break; << 3339 len -= n; << 3340 total_spliced += n; << 3341 *ppos += n; << 3342 in->f_ra.prev_pos = *ppos; << 3343 if (pipe_full(pipe->head, pip << 3344 break; << 3345 << 3346 cond_resched(); << 3347 } while (len); << 3348 << 3349 if (folio) << 3350 folio_put(folio); << 3351 << 3352 file_accessed(in); << 3353 return total_spliced ? total_spliced << 3354 } << 3355 << 3356 static loff_t shmem_file_llseek(struct file * 2631 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 3357 { 2632 { 3358 struct address_space *mapping = file- 2633 struct address_space *mapping = file->f_mapping; 3359 struct inode *inode = mapping->host; 2634 struct inode *inode = mapping->host; 3360 2635 3361 if (whence != SEEK_DATA && whence != 2636 if (whence != SEEK_DATA && whence != SEEK_HOLE) 3362 return generic_file_llseek_si 2637 return generic_file_llseek_size(file, offset, whence, 3363 MAX_L 2638 MAX_LFS_FILESIZE, i_size_read(inode)); 3364 if (offset < 0) 2639 if (offset < 0) 3365 return -ENXIO; 2640 return -ENXIO; 3366 2641 3367 inode_lock(inode); 2642 inode_lock(inode); 3368 /* We're holding i_rwsem so we can ac !! 2643 /* We're holding i_mutex so we can access i_size directly */ 3369 offset = mapping_seek_hole_data(mappi 2644 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); 3370 if (offset >= 0) 2645 if (offset >= 0) 3371 offset = vfs_setpos(file, off 2646 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 3372 inode_unlock(inode); 2647 inode_unlock(inode); 3373 return offset; 2648 return offset; 3374 } 2649 } 3375 2650 3376 static long shmem_fallocate(struct file *file 2651 static long shmem_fallocate(struct file *file, int mode, loff_t offset, 3377 2652 loff_t len) 3378 { 2653 { 3379 struct inode *inode = file_inode(file 2654 struct inode *inode = file_inode(file); 3380 struct shmem_sb_info *sbinfo = SHMEM_ 2655 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 3381 struct shmem_inode_info *info = SHMEM 2656 struct shmem_inode_info *info = SHMEM_I(inode); 3382 struct shmem_falloc shmem_falloc; 2657 struct shmem_falloc shmem_falloc; 3383 pgoff_t start, index, end, undo_fallo !! 2658 pgoff_t start, index, end; 3384 int error; 2659 int error; 3385 2660 3386 if (mode & ~(FALLOC_FL_KEEP_SIZE | FA 2661 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 3387 return -EOPNOTSUPP; 2662 return -EOPNOTSUPP; 3388 2663 3389 inode_lock(inode); 2664 inode_lock(inode); 3390 2665 3391 if (mode & FALLOC_FL_PUNCH_HOLE) { 2666 if (mode & FALLOC_FL_PUNCH_HOLE) { 3392 struct address_space *mapping 2667 struct address_space *mapping = file->f_mapping; 3393 loff_t unmap_start = round_up 2668 loff_t unmap_start = round_up(offset, PAGE_SIZE); 3394 loff_t unmap_end = round_down 2669 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 3395 DECLARE_WAIT_QUEUE_HEAD_ONSTA 2670 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 3396 2671 3397 /* protected by i_rwsem */ !! 2672 /* protected by i_mutex */ 3398 if (info->seals & (F_SEAL_WRI 2673 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 3399 error = -EPERM; 2674 error = -EPERM; 3400 goto out; 2675 goto out; 3401 } 2676 } 3402 2677 3403 shmem_falloc.waitq = &shmem_f 2678 shmem_falloc.waitq = &shmem_falloc_waitq; 3404 shmem_falloc.start = (u64)unm 2679 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT; 3405 shmem_falloc.next = (unmap_en 2680 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 3406 spin_lock(&inode->i_lock); 2681 spin_lock(&inode->i_lock); 3407 inode->i_private = &shmem_fal 2682 inode->i_private = &shmem_falloc; 3408 spin_unlock(&inode->i_lock); 2683 spin_unlock(&inode->i_lock); 3409 2684 3410 if ((u64)unmap_end > (u64)unm 2685 if ((u64)unmap_end > (u64)unmap_start) 3411 unmap_mapping_range(m 2686 unmap_mapping_range(mapping, unmap_start, 3412 1 2687 1 + unmap_end - unmap_start, 0); 3413 shmem_truncate_range(inode, o 2688 shmem_truncate_range(inode, offset, offset + len - 1); 3414 /* No need to unmap again: ho 2689 /* No need to unmap again: hole-punching leaves COWed pages */ 3415 2690 3416 spin_lock(&inode->i_lock); 2691 spin_lock(&inode->i_lock); 3417 inode->i_private = NULL; 2692 inode->i_private = NULL; 3418 wake_up_all(&shmem_falloc_wai 2693 wake_up_all(&shmem_falloc_waitq); 3419 WARN_ON_ONCE(!list_empty(&shm 2694 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); 3420 spin_unlock(&inode->i_lock); 2695 spin_unlock(&inode->i_lock); 3421 error = 0; 2696 error = 0; 3422 goto out; 2697 goto out; 3423 } 2698 } 3424 2699 3425 /* We need to check rlimit even when 2700 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 3426 error = inode_newsize_ok(inode, offse 2701 error = inode_newsize_ok(inode, offset + len); 3427 if (error) 2702 if (error) 3428 goto out; 2703 goto out; 3429 2704 3430 if ((info->seals & F_SEAL_GROW) && of 2705 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 3431 error = -EPERM; 2706 error = -EPERM; 3432 goto out; 2707 goto out; 3433 } 2708 } 3434 2709 3435 start = offset >> PAGE_SHIFT; 2710 start = offset >> PAGE_SHIFT; 3436 end = (offset + len + PAGE_SIZE - 1) 2711 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 3437 /* Try to avoid a swapstorm if len is 2712 /* Try to avoid a swapstorm if len is impossible to satisfy */ 3438 if (sbinfo->max_blocks && end - start 2713 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 3439 error = -ENOSPC; 2714 error = -ENOSPC; 3440 goto out; 2715 goto out; 3441 } 2716 } 3442 2717 3443 shmem_falloc.waitq = NULL; 2718 shmem_falloc.waitq = NULL; 3444 shmem_falloc.start = start; 2719 shmem_falloc.start = start; 3445 shmem_falloc.next = start; 2720 shmem_falloc.next = start; 3446 shmem_falloc.nr_falloced = 0; 2721 shmem_falloc.nr_falloced = 0; 3447 shmem_falloc.nr_unswapped = 0; 2722 shmem_falloc.nr_unswapped = 0; 3448 spin_lock(&inode->i_lock); 2723 spin_lock(&inode->i_lock); 3449 inode->i_private = &shmem_falloc; 2724 inode->i_private = &shmem_falloc; 3450 spin_unlock(&inode->i_lock); 2725 spin_unlock(&inode->i_lock); 3451 2726 3452 /* !! 2727 for (index = start; index < end; index++) { 3453 * info->fallocend is only relevant w !! 2728 struct page *page; 3454 * involved: to prevent split_huge_pa << 3455 * pages when FALLOC_FL_KEEP_SIZE com << 3456 */ << 3457 undo_fallocend = info->fallocend; << 3458 if (info->fallocend < end) << 3459 info->fallocend = end; << 3460 << 3461 for (index = start; index < end; ) { << 3462 struct folio *folio; << 3463 2729 3464 /* 2730 /* 3465 * Check for fatal signal so !! 2731 * Good, the fallocate(2) manpage permits EINTR: we may have 3466 * situations. We don't want !! 2732 * been interrupted because we are using up too much memory. 3467 * signals as large fallocate << 3468 * e.g. periodic timers may r << 3469 * restarting. << 3470 */ 2733 */ 3471 if (fatal_signal_pending(curr !! 2734 if (signal_pending(current)) 3472 error = -EINTR; 2735 error = -EINTR; 3473 else if (shmem_falloc.nr_unsw 2736 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 3474 error = -ENOMEM; 2737 error = -ENOMEM; 3475 else 2738 else 3476 error = shmem_get_fol !! 2739 error = shmem_getpage(inode, index, &page, SGP_FALLOC); 3477 << 3478 if (error) { 2740 if (error) { 3479 info->fallocend = und !! 2741 /* Remove the !PageUptodate pages we added */ 3480 /* Remove the !uptoda << 3481 if (index > start) { 2742 if (index > start) { 3482 shmem_undo_ra 2743 shmem_undo_range(inode, 3483 (loff_t)s 2744 (loff_t)start << PAGE_SHIFT, 3484 ((loff_t) 2745 ((loff_t)index << PAGE_SHIFT) - 1, true); 3485 } 2746 } 3486 goto undone; 2747 goto undone; 3487 } 2748 } 3488 2749 3489 /* 2750 /* 3490 * Here is a more important o << 3491 * a second SGP_FALLOC on the << 3492 * making it uptodate and un- << 3493 */ << 3494 index = folio_next_index(foli << 3495 /* Beware 32-bit wraparound * << 3496 if (!index) << 3497 index--; << 3498 << 3499 /* << 3500 * Inform shmem_writepage() h 2751 * Inform shmem_writepage() how far we have reached. 3501 * No need for lock or barrie 2752 * No need for lock or barrier: we have the page lock. 3502 */ 2753 */ 3503 if (!folio_test_uptodate(foli !! 2754 shmem_falloc.next++; 3504 shmem_falloc.nr_fallo !! 2755 if (!PageUptodate(page)) 3505 shmem_falloc.next = index; !! 2756 shmem_falloc.nr_falloced++; 3506 2757 3507 /* 2758 /* 3508 * If !uptodate, leave it tha !! 2759 * If !PageUptodate, leave it that way so that freeable pages 3509 * can be recognized if we ne 2760 * can be recognized if we need to rollback on error later. 3510 * But mark it dirty so that !! 2761 * But set_page_dirty so that memory pressure will swap rather 3511 * than free the folios we ar !! 2762 * than free the pages we are allocating (and SGP_CACHE pages 3512 * might still be clean: we n 2763 * might still be clean: we now need to mark those dirty too). 3513 */ 2764 */ 3514 folio_mark_dirty(folio); !! 2765 set_page_dirty(page); 3515 folio_unlock(folio); !! 2766 unlock_page(page); 3516 folio_put(folio); !! 2767 put_page(page); 3517 cond_resched(); 2768 cond_resched(); 3518 } 2769 } 3519 2770 3520 if (!(mode & FALLOC_FL_KEEP_SIZE) && 2771 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 3521 i_size_write(inode, offset + 2772 i_size_write(inode, offset + len); >> 2773 inode->i_ctime = current_time(inode); 3522 undone: 2774 undone: 3523 spin_lock(&inode->i_lock); 2775 spin_lock(&inode->i_lock); 3524 inode->i_private = NULL; 2776 inode->i_private = NULL; 3525 spin_unlock(&inode->i_lock); 2777 spin_unlock(&inode->i_lock); 3526 out: 2778 out: 3527 if (!error) << 3528 file_modified(file); << 3529 inode_unlock(inode); 2779 inode_unlock(inode); 3530 return error; 2780 return error; 3531 } 2781 } 3532 2782 3533 static int shmem_statfs(struct dentry *dentry 2783 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 3534 { 2784 { 3535 struct shmem_sb_info *sbinfo = SHMEM_ 2785 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 3536 2786 3537 buf->f_type = TMPFS_MAGIC; 2787 buf->f_type = TMPFS_MAGIC; 3538 buf->f_bsize = PAGE_SIZE; 2788 buf->f_bsize = PAGE_SIZE; 3539 buf->f_namelen = NAME_MAX; 2789 buf->f_namelen = NAME_MAX; 3540 if (sbinfo->max_blocks) { 2790 if (sbinfo->max_blocks) { 3541 buf->f_blocks = sbinfo->max_b 2791 buf->f_blocks = sbinfo->max_blocks; 3542 buf->f_bavail = 2792 buf->f_bavail = 3543 buf->f_bfree = sbinfo->max_b 2793 buf->f_bfree = sbinfo->max_blocks - 3544 percpu_counte 2794 percpu_counter_sum(&sbinfo->used_blocks); 3545 } 2795 } 3546 if (sbinfo->max_inodes) { 2796 if (sbinfo->max_inodes) { 3547 buf->f_files = sbinfo->max_in 2797 buf->f_files = sbinfo->max_inodes; 3548 buf->f_ffree = sbinfo->free_i !! 2798 buf->f_ffree = sbinfo->free_inodes; 3549 } 2799 } 3550 /* else leave those fields 0 like sim 2800 /* else leave those fields 0 like simple_statfs */ 3551 2801 3552 buf->f_fsid = uuid_to_fsid(dentry->d_ 2802 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); 3553 2803 3554 return 0; 2804 return 0; 3555 } 2805 } 3556 2806 3557 /* 2807 /* 3558 * File creation. Allocate an inode, and we'r 2808 * File creation. Allocate an inode, and we're done.. 3559 */ 2809 */ 3560 static int 2810 static int 3561 shmem_mknod(struct mnt_idmap *idmap, struct i !! 2811 shmem_mknod(struct user_namespace *mnt_userns, struct inode *dir, 3562 struct dentry *dentry, umode_t mo 2812 struct dentry *dentry, umode_t mode, dev_t dev) 3563 { 2813 { 3564 struct inode *inode; 2814 struct inode *inode; 3565 int error; !! 2815 int error = -ENOSPC; 3566 2816 3567 inode = shmem_get_inode(idmap, dir->i !! 2817 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE); 3568 if (IS_ERR(inode)) !! 2818 if (inode) { 3569 return PTR_ERR(inode); !! 2819 error = simple_acl_create(dir, inode); 3570 !! 2820 if (error) 3571 error = simple_acl_create(dir, inode) !! 2821 goto out_iput; 3572 if (error) !! 2822 error = security_inode_init_security(inode, dir, 3573 goto out_iput; !! 2823 &dentry->d_name, 3574 error = security_inode_init_security( !! 2824 shmem_initxattrs, NULL); 3575 !! 2825 if (error && error != -EOPNOTSUPP) 3576 if (error && error != -EOPNOTSUPP) !! 2826 goto out_iput; 3577 goto out_iput; << 3578 << 3579 error = simple_offset_add(shmem_get_o << 3580 if (error) << 3581 goto out_iput; << 3582 2827 3583 dir->i_size += BOGO_DIRENT_SIZE; !! 2828 error = 0; 3584 inode_set_mtime_to_ts(dir, inode_set_ !! 2829 dir->i_size += BOGO_DIRENT_SIZE; 3585 inode_inc_iversion(dir); !! 2830 dir->i_ctime = dir->i_mtime = current_time(dir); 3586 d_instantiate(dentry, inode); !! 2831 d_instantiate(dentry, inode); 3587 dget(dentry); /* Extra count - pin th !! 2832 dget(dentry); /* Extra count - pin the dentry in core */ >> 2833 } 3588 return error; 2834 return error; 3589 << 3590 out_iput: 2835 out_iput: 3591 iput(inode); 2836 iput(inode); 3592 return error; 2837 return error; 3593 } 2838 } 3594 2839 3595 static int 2840 static int 3596 shmem_tmpfile(struct mnt_idmap *idmap, struct !! 2841 shmem_tmpfile(struct user_namespace *mnt_userns, struct inode *dir, 3597 struct file *file, umode_t mode !! 2842 struct dentry *dentry, umode_t mode) 3598 { 2843 { 3599 struct inode *inode; 2844 struct inode *inode; 3600 int error; !! 2845 int error = -ENOSPC; 3601 2846 3602 inode = shmem_get_inode(idmap, dir->i !! 2847 inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE); 3603 if (IS_ERR(inode)) { !! 2848 if (inode) { 3604 error = PTR_ERR(inode); !! 2849 error = security_inode_init_security(inode, dir, 3605 goto err_out; !! 2850 NULL, >> 2851 shmem_initxattrs, NULL); >> 2852 if (error && error != -EOPNOTSUPP) >> 2853 goto out_iput; >> 2854 error = simple_acl_create(dir, inode); >> 2855 if (error) >> 2856 goto out_iput; >> 2857 d_tmpfile(dentry, inode); 3606 } 2858 } 3607 error = security_inode_init_security( !! 2859 return error; 3608 << 3609 if (error && error != -EOPNOTSUPP) << 3610 goto out_iput; << 3611 error = simple_acl_create(dir, inode) << 3612 if (error) << 3613 goto out_iput; << 3614 d_tmpfile(file, inode); << 3615 << 3616 err_out: << 3617 return finish_open_simple(file, error << 3618 out_iput: 2860 out_iput: 3619 iput(inode); 2861 iput(inode); 3620 return error; 2862 return error; 3621 } 2863 } 3622 2864 3623 static int shmem_mkdir(struct mnt_idmap *idma !! 2865 static int shmem_mkdir(struct user_namespace *mnt_userns, struct inode *dir, 3624 struct dentry *dentry, 2866 struct dentry *dentry, umode_t mode) 3625 { 2867 { 3626 int error; 2868 int error; 3627 2869 3628 error = shmem_mknod(idmap, dir, dentr !! 2870 if ((error = shmem_mknod(&init_user_ns, dir, dentry, 3629 if (error) !! 2871 mode | S_IFDIR, 0))) 3630 return error; 2872 return error; 3631 inc_nlink(dir); 2873 inc_nlink(dir); 3632 return 0; 2874 return 0; 3633 } 2875 } 3634 2876 3635 static int shmem_create(struct mnt_idmap *idm !! 2877 static int shmem_create(struct user_namespace *mnt_userns, struct inode *dir, 3636 struct dentry *dentry 2878 struct dentry *dentry, umode_t mode, bool excl) 3637 { 2879 { 3638 return shmem_mknod(idmap, dir, dentry !! 2880 return shmem_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0); 3639 } 2881 } 3640 2882 3641 /* 2883 /* 3642 * Link a file.. 2884 * Link a file.. 3643 */ 2885 */ 3644 static int shmem_link(struct dentry *old_dent !! 2886 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) 3645 struct dentry *dentry) << 3646 { 2887 { 3647 struct inode *inode = d_inode(old_den 2888 struct inode *inode = d_inode(old_dentry); 3648 int ret = 0; 2889 int ret = 0; 3649 2890 3650 /* 2891 /* 3651 * No ordinary (disk based) filesyste 2892 * No ordinary (disk based) filesystem counts links as inodes; 3652 * but each new link needs a new dent 2893 * but each new link needs a new dentry, pinning lowmem, and 3653 * tmpfs dentries cannot be pruned un 2894 * tmpfs dentries cannot be pruned until they are unlinked. 3654 * But if an O_TMPFILE file is linked 2895 * But if an O_TMPFILE file is linked into the tmpfs, the 3655 * first link must skip that, to get 2896 * first link must skip that, to get the accounting right. 3656 */ 2897 */ 3657 if (inode->i_nlink) { 2898 if (inode->i_nlink) { 3658 ret = shmem_reserve_inode(ino 2899 ret = shmem_reserve_inode(inode->i_sb, NULL); 3659 if (ret) 2900 if (ret) 3660 goto out; 2901 goto out; 3661 } 2902 } 3662 2903 3663 ret = simple_offset_add(shmem_get_off << 3664 if (ret) { << 3665 if (inode->i_nlink) << 3666 shmem_free_inode(inod << 3667 goto out; << 3668 } << 3669 << 3670 dir->i_size += BOGO_DIRENT_SIZE; 2904 dir->i_size += BOGO_DIRENT_SIZE; 3671 inode_set_mtime_to_ts(dir, !! 2905 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 3672 inode_set_ctime << 3673 inode_inc_iversion(dir); << 3674 inc_nlink(inode); 2906 inc_nlink(inode); 3675 ihold(inode); /* New dentry referen 2907 ihold(inode); /* New dentry reference */ 3676 dget(dentry); /* Extra pinning coun !! 2908 dget(dentry); /* Extra pinning count for the created dentry */ 3677 d_instantiate(dentry, inode); 2909 d_instantiate(dentry, inode); 3678 out: 2910 out: 3679 return ret; 2911 return ret; 3680 } 2912 } 3681 2913 3682 static int shmem_unlink(struct inode *dir, st 2914 static int shmem_unlink(struct inode *dir, struct dentry *dentry) 3683 { 2915 { 3684 struct inode *inode = d_inode(dentry) 2916 struct inode *inode = d_inode(dentry); 3685 2917 3686 if (inode->i_nlink > 1 && !S_ISDIR(in 2918 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 3687 shmem_free_inode(inode->i_sb, !! 2919 shmem_free_inode(inode->i_sb); 3688 << 3689 simple_offset_remove(shmem_get_offset << 3690 2920 3691 dir->i_size -= BOGO_DIRENT_SIZE; 2921 dir->i_size -= BOGO_DIRENT_SIZE; 3692 inode_set_mtime_to_ts(dir, !! 2922 inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); 3693 inode_set_ctime << 3694 inode_inc_iversion(dir); << 3695 drop_nlink(inode); 2923 drop_nlink(inode); 3696 dput(dentry); /* Undo the count fro !! 2924 dput(dentry); /* Undo the count from "create" - this does all the work */ 3697 return 0; 2925 return 0; 3698 } 2926 } 3699 2927 3700 static int shmem_rmdir(struct inode *dir, str 2928 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 3701 { 2929 { 3702 if (!simple_offset_empty(dentry)) !! 2930 if (!simple_empty(dentry)) 3703 return -ENOTEMPTY; 2931 return -ENOTEMPTY; 3704 2932 3705 drop_nlink(d_inode(dentry)); 2933 drop_nlink(d_inode(dentry)); 3706 drop_nlink(dir); 2934 drop_nlink(dir); 3707 return shmem_unlink(dir, dentry); 2935 return shmem_unlink(dir, dentry); 3708 } 2936 } 3709 2937 3710 static int shmem_whiteout(struct mnt_idmap *i !! 2938 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) >> 2939 { >> 2940 bool old_is_dir = d_is_dir(old_dentry); >> 2941 bool new_is_dir = d_is_dir(new_dentry); >> 2942 >> 2943 if (old_dir != new_dir && old_is_dir != new_is_dir) { >> 2944 if (old_is_dir) { >> 2945 drop_nlink(old_dir); >> 2946 inc_nlink(new_dir); >> 2947 } else { >> 2948 drop_nlink(new_dir); >> 2949 inc_nlink(old_dir); >> 2950 } >> 2951 } >> 2952 old_dir->i_ctime = old_dir->i_mtime = >> 2953 new_dir->i_ctime = new_dir->i_mtime = >> 2954 d_inode(old_dentry)->i_ctime = >> 2955 d_inode(new_dentry)->i_ctime = current_time(old_dir); >> 2956 >> 2957 return 0; >> 2958 } >> 2959 >> 2960 static int shmem_whiteout(struct user_namespace *mnt_userns, 3711 struct inode *old_d 2961 struct inode *old_dir, struct dentry *old_dentry) 3712 { 2962 { 3713 struct dentry *whiteout; 2963 struct dentry *whiteout; 3714 int error; 2964 int error; 3715 2965 3716 whiteout = d_alloc(old_dentry->d_pare 2966 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 3717 if (!whiteout) 2967 if (!whiteout) 3718 return -ENOMEM; 2968 return -ENOMEM; 3719 2969 3720 error = shmem_mknod(idmap, old_dir, w !! 2970 error = shmem_mknod(&init_user_ns, old_dir, whiteout, 3721 S_IFCHR | WHITEOU 2971 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 3722 dput(whiteout); 2972 dput(whiteout); 3723 if (error) 2973 if (error) 3724 return error; 2974 return error; 3725 2975 3726 /* 2976 /* 3727 * Cheat and hash the whiteout while 2977 * Cheat and hash the whiteout while the old dentry is still in 3728 * place, instead of playing games wi 2978 * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 3729 * 2979 * 3730 * d_lookup() will consistently find 2980 * d_lookup() will consistently find one of them at this point, 3731 * not sure which one, but that isn't 2981 * not sure which one, but that isn't even important. 3732 */ 2982 */ 3733 d_rehash(whiteout); 2983 d_rehash(whiteout); 3734 return 0; 2984 return 0; 3735 } 2985 } 3736 2986 3737 /* 2987 /* 3738 * The VFS layer already does all the dentry 2988 * The VFS layer already does all the dentry stuff for rename, 3739 * we just have to decrement the usage count 2989 * we just have to decrement the usage count for the target if 3740 * it exists so that the VFS layer correctly 2990 * it exists so that the VFS layer correctly free's it when it 3741 * gets overwritten. 2991 * gets overwritten. 3742 */ 2992 */ 3743 static int shmem_rename2(struct mnt_idmap *id !! 2993 static int shmem_rename2(struct user_namespace *mnt_userns, 3744 struct inode *old_di 2994 struct inode *old_dir, struct dentry *old_dentry, 3745 struct inode *new_di 2995 struct inode *new_dir, struct dentry *new_dentry, 3746 unsigned int flags) 2996 unsigned int flags) 3747 { 2997 { 3748 struct inode *inode = d_inode(old_den 2998 struct inode *inode = d_inode(old_dentry); 3749 int they_are_dirs = S_ISDIR(inode->i_ 2999 int they_are_dirs = S_ISDIR(inode->i_mode); 3750 int error; << 3751 3000 3752 if (flags & ~(RENAME_NOREPLACE | RENA 3001 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 3753 return -EINVAL; 3002 return -EINVAL; 3754 3003 3755 if (flags & RENAME_EXCHANGE) 3004 if (flags & RENAME_EXCHANGE) 3756 return simple_offset_rename_e !! 3005 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry); 3757 << 3758 3006 3759 if (!simple_offset_empty(new_dentry)) !! 3007 if (!simple_empty(new_dentry)) 3760 return -ENOTEMPTY; 3008 return -ENOTEMPTY; 3761 3009 3762 if (flags & RENAME_WHITEOUT) { 3010 if (flags & RENAME_WHITEOUT) { 3763 error = shmem_whiteout(idmap, !! 3011 int error; >> 3012 >> 3013 error = shmem_whiteout(&init_user_ns, old_dir, old_dentry); 3764 if (error) 3014 if (error) 3765 return error; 3015 return error; 3766 } 3016 } 3767 3017 3768 error = simple_offset_rename(old_dir, << 3769 if (error) << 3770 return error; << 3771 << 3772 if (d_really_is_positive(new_dentry)) 3018 if (d_really_is_positive(new_dentry)) { 3773 (void) shmem_unlink(new_dir, 3019 (void) shmem_unlink(new_dir, new_dentry); 3774 if (they_are_dirs) { 3020 if (they_are_dirs) { 3775 drop_nlink(d_inode(ne 3021 drop_nlink(d_inode(new_dentry)); 3776 drop_nlink(old_dir); 3022 drop_nlink(old_dir); 3777 } 3023 } 3778 } else if (they_are_dirs) { 3024 } else if (they_are_dirs) { 3779 drop_nlink(old_dir); 3025 drop_nlink(old_dir); 3780 inc_nlink(new_dir); 3026 inc_nlink(new_dir); 3781 } 3027 } 3782 3028 3783 old_dir->i_size -= BOGO_DIRENT_SIZE; 3029 old_dir->i_size -= BOGO_DIRENT_SIZE; 3784 new_dir->i_size += BOGO_DIRENT_SIZE; 3030 new_dir->i_size += BOGO_DIRENT_SIZE; 3785 simple_rename_timestamp(old_dir, old_ !! 3031 old_dir->i_ctime = old_dir->i_mtime = 3786 inode_inc_iversion(old_dir); !! 3032 new_dir->i_ctime = new_dir->i_mtime = 3787 inode_inc_iversion(new_dir); !! 3033 inode->i_ctime = current_time(old_dir); 3788 return 0; 3034 return 0; 3789 } 3035 } 3790 3036 3791 static int shmem_symlink(struct mnt_idmap *id !! 3037 static int shmem_symlink(struct user_namespace *mnt_userns, struct inode *dir, 3792 struct dentry *dentr 3038 struct dentry *dentry, const char *symname) 3793 { 3039 { 3794 int error; 3040 int error; 3795 int len; 3041 int len; 3796 struct inode *inode; 3042 struct inode *inode; 3797 struct folio *folio; !! 3043 struct page *page; 3798 3044 3799 len = strlen(symname) + 1; 3045 len = strlen(symname) + 1; 3800 if (len > PAGE_SIZE) 3046 if (len > PAGE_SIZE) 3801 return -ENAMETOOLONG; 3047 return -ENAMETOOLONG; 3802 3048 3803 inode = shmem_get_inode(idmap, dir->i !! 3049 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0, 3804 VM_NORESERVE) 3050 VM_NORESERVE); 3805 if (IS_ERR(inode)) !! 3051 if (!inode) 3806 return PTR_ERR(inode); !! 3052 return -ENOSPC; 3807 3053 3808 error = security_inode_init_security( 3054 error = security_inode_init_security(inode, dir, &dentry->d_name, 3809 3055 shmem_initxattrs, NULL); 3810 if (error && error != -EOPNOTSUPP) !! 3056 if (error && error != -EOPNOTSUPP) { 3811 goto out_iput; !! 3057 iput(inode); 3812 !! 3058 return error; 3813 error = simple_offset_add(shmem_get_o !! 3059 } 3814 if (error) << 3815 goto out_iput; << 3816 3060 3817 inode->i_size = len-1; 3061 inode->i_size = len-1; 3818 if (len <= SHORT_SYMLINK_LEN) { 3062 if (len <= SHORT_SYMLINK_LEN) { 3819 inode->i_link = kmemdup(symna 3063 inode->i_link = kmemdup(symname, len, GFP_KERNEL); 3820 if (!inode->i_link) { 3064 if (!inode->i_link) { 3821 error = -ENOMEM; !! 3065 iput(inode); 3822 goto out_remove_offse !! 3066 return -ENOMEM; 3823 } 3067 } 3824 inode->i_op = &shmem_short_sy 3068 inode->i_op = &shmem_short_symlink_operations; 3825 } else { 3069 } else { 3826 inode_nohighmem(inode); 3070 inode_nohighmem(inode); >> 3071 error = shmem_getpage(inode, 0, &page, SGP_WRITE); >> 3072 if (error) { >> 3073 iput(inode); >> 3074 return error; >> 3075 } 3827 inode->i_mapping->a_ops = &sh 3076 inode->i_mapping->a_ops = &shmem_aops; 3828 error = shmem_get_folio(inode << 3829 if (error) << 3830 goto out_remove_offse << 3831 inode->i_op = &shmem_symlink_ 3077 inode->i_op = &shmem_symlink_inode_operations; 3832 memcpy(folio_address(folio), !! 3078 memcpy(page_address(page), symname, len); 3833 folio_mark_uptodate(folio); !! 3079 SetPageUptodate(page); 3834 folio_mark_dirty(folio); !! 3080 set_page_dirty(page); 3835 folio_unlock(folio); !! 3081 unlock_page(page); 3836 folio_put(folio); !! 3082 put_page(page); 3837 } 3083 } 3838 dir->i_size += BOGO_DIRENT_SIZE; 3084 dir->i_size += BOGO_DIRENT_SIZE; 3839 inode_set_mtime_to_ts(dir, inode_set_ !! 3085 dir->i_ctime = dir->i_mtime = current_time(dir); 3840 inode_inc_iversion(dir); << 3841 d_instantiate(dentry, inode); 3086 d_instantiate(dentry, inode); 3842 dget(dentry); 3087 dget(dentry); 3843 return 0; 3088 return 0; 3844 << 3845 out_remove_offset: << 3846 simple_offset_remove(shmem_get_offset << 3847 out_iput: << 3848 iput(inode); << 3849 return error; << 3850 } 3089 } 3851 3090 3852 static void shmem_put_link(void *arg) 3091 static void shmem_put_link(void *arg) 3853 { 3092 { 3854 folio_mark_accessed(arg); !! 3093 mark_page_accessed(arg); 3855 folio_put(arg); !! 3094 put_page(arg); 3856 } 3095 } 3857 3096 3858 static const char *shmem_get_link(struct dent !! 3097 static const char *shmem_get_link(struct dentry *dentry, >> 3098 struct inode *inode, 3859 struct dela 3099 struct delayed_call *done) 3860 { 3100 { 3861 struct folio *folio = NULL; !! 3101 struct page *page = NULL; 3862 int error; 3102 int error; 3863 << 3864 if (!dentry) { 3103 if (!dentry) { 3865 folio = filemap_get_folio(ino !! 3104 page = find_get_page(inode->i_mapping, 0); 3866 if (IS_ERR(folio)) !! 3105 if (!page) 3867 return ERR_PTR(-ECHIL 3106 return ERR_PTR(-ECHILD); 3868 if (PageHWPoison(folio_page(f !! 3107 if (!PageUptodate(page)) { 3869 !folio_test_uptodate(foli !! 3108 put_page(page); 3870 folio_put(folio); << 3871 return ERR_PTR(-ECHIL 3109 return ERR_PTR(-ECHILD); 3872 } 3110 } 3873 } else { 3111 } else { 3874 error = shmem_get_folio(inode !! 3112 error = shmem_getpage(inode, 0, &page, SGP_READ); 3875 if (error) 3113 if (error) 3876 return ERR_PTR(error) 3114 return ERR_PTR(error); 3877 if (!folio) !! 3115 unlock_page(page); 3878 return ERR_PTR(-ECHIL << 3879 if (PageHWPoison(folio_page(f << 3880 folio_unlock(folio); << 3881 folio_put(folio); << 3882 return ERR_PTR(-ECHIL << 3883 } << 3884 folio_unlock(folio); << 3885 } 3116 } 3886 set_delayed_call(done, shmem_put_link !! 3117 set_delayed_call(done, shmem_put_link, page); 3887 return folio_address(folio); !! 3118 return page_address(page); 3888 } 3119 } 3889 3120 3890 #ifdef CONFIG_TMPFS_XATTR 3121 #ifdef CONFIG_TMPFS_XATTR 3891 << 3892 static int shmem_fileattr_get(struct dentry * << 3893 { << 3894 struct shmem_inode_info *info = SHMEM << 3895 << 3896 fileattr_fill_flags(fa, info->fsflags << 3897 << 3898 return 0; << 3899 } << 3900 << 3901 static int shmem_fileattr_set(struct mnt_idma << 3902 struct dentry * << 3903 { << 3904 struct inode *inode = d_inode(dentry) << 3905 struct shmem_inode_info *info = SHMEM << 3906 << 3907 if (fileattr_has_fsx(fa)) << 3908 return -EOPNOTSUPP; << 3909 if (fa->flags & ~SHMEM_FL_USER_MODIFI << 3910 return -EOPNOTSUPP; << 3911 << 3912 info->fsflags = (info->fsflags & ~SHM << 3913 (fa->flags & SHMEM_FL_USER_MO << 3914 << 3915 shmem_set_inode_flags(inode, info->fs << 3916 inode_set_ctime_current(inode); << 3917 inode_inc_iversion(inode); << 3918 return 0; << 3919 } << 3920 << 3921 /* 3122 /* 3922 * Superblocks without xattr inode operations 3123 * Superblocks without xattr inode operations may get some security.* xattr 3923 * support from the LSM "for free". As soon a 3124 * support from the LSM "for free". As soon as we have any other xattrs 3924 * like ACLs, we also need to implement the s 3125 * like ACLs, we also need to implement the security.* handlers at 3925 * filesystem level, though. 3126 * filesystem level, though. 3926 */ 3127 */ 3927 3128 3928 /* 3129 /* 3929 * Callback for security_inode_init_security( 3130 * Callback for security_inode_init_security() for acquiring xattrs. 3930 */ 3131 */ 3931 static int shmem_initxattrs(struct inode *ino 3132 static int shmem_initxattrs(struct inode *inode, 3932 const struct xatt !! 3133 const struct xattr *xattr_array, >> 3134 void *fs_info) 3933 { 3135 { 3934 struct shmem_inode_info *info = SHMEM 3136 struct shmem_inode_info *info = SHMEM_I(inode); 3935 struct shmem_sb_info *sbinfo = SHMEM_ << 3936 const struct xattr *xattr; 3137 const struct xattr *xattr; 3937 struct simple_xattr *new_xattr; 3138 struct simple_xattr *new_xattr; 3938 size_t ispace = 0; << 3939 size_t len; 3139 size_t len; 3940 3140 3941 if (sbinfo->max_inodes) { << 3942 for (xattr = xattr_array; xat << 3943 ispace += simple_xatt << 3944 xattr->value_ << 3945 } << 3946 if (ispace) { << 3947 raw_spin_lock(&sbinfo << 3948 if (sbinfo->free_ispa << 3949 ispace = 0; << 3950 else << 3951 sbinfo->free_ << 3952 raw_spin_unlock(&sbin << 3953 if (!ispace) << 3954 return -ENOSP << 3955 } << 3956 } << 3957 << 3958 for (xattr = xattr_array; xattr->name 3141 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 3959 new_xattr = simple_xattr_allo 3142 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 3960 if (!new_xattr) 3143 if (!new_xattr) 3961 break; !! 3144 return -ENOMEM; 3962 3145 3963 len = strlen(xattr->name) + 1 3146 len = strlen(xattr->name) + 1; 3964 new_xattr->name = kmalloc(XAT 3147 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 3965 GFP !! 3148 GFP_KERNEL); 3966 if (!new_xattr->name) { 3149 if (!new_xattr->name) { 3967 kvfree(new_xattr); 3150 kvfree(new_xattr); 3968 break; !! 3151 return -ENOMEM; 3969 } 3152 } 3970 3153 3971 memcpy(new_xattr->name, XATTR 3154 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 3972 XATTR_SECURITY_PREFIX_ 3155 XATTR_SECURITY_PREFIX_LEN); 3973 memcpy(new_xattr->name + XATT 3156 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 3974 xattr->name, len); 3157 xattr->name, len); 3975 3158 3976 simple_xattr_add(&info->xattr !! 3159 simple_xattr_list_add(&info->xattrs, new_xattr); 3977 } << 3978 << 3979 if (xattr->name != NULL) { << 3980 if (ispace) { << 3981 raw_spin_lock(&sbinfo << 3982 sbinfo->free_ispace + << 3983 raw_spin_unlock(&sbin << 3984 } << 3985 simple_xattrs_free(&info->xat << 3986 return -ENOMEM; << 3987 } 3160 } 3988 3161 3989 return 0; 3162 return 0; 3990 } 3163 } 3991 3164 3992 static int shmem_xattr_handler_get(const stru 3165 static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3993 struct den 3166 struct dentry *unused, struct inode *inode, 3994 const char 3167 const char *name, void *buffer, size_t size) 3995 { 3168 { 3996 struct shmem_inode_info *info = SHMEM 3169 struct shmem_inode_info *info = SHMEM_I(inode); 3997 3170 3998 name = xattr_full_name(handler, name) 3171 name = xattr_full_name(handler, name); 3999 return simple_xattr_get(&info->xattrs 3172 return simple_xattr_get(&info->xattrs, name, buffer, size); 4000 } 3173 } 4001 3174 4002 static int shmem_xattr_handler_set(const stru 3175 static int shmem_xattr_handler_set(const struct xattr_handler *handler, 4003 struct mnt !! 3176 struct user_namespace *mnt_userns, 4004 struct den 3177 struct dentry *unused, struct inode *inode, 4005 const char 3178 const char *name, const void *value, 4006 size_t siz 3179 size_t size, int flags) 4007 { 3180 { 4008 struct shmem_inode_info *info = SHMEM 3181 struct shmem_inode_info *info = SHMEM_I(inode); 4009 struct shmem_sb_info *sbinfo = SHMEM_ << 4010 struct simple_xattr *old_xattr; << 4011 size_t ispace = 0; << 4012 3182 4013 name = xattr_full_name(handler, name) 3183 name = xattr_full_name(handler, name); 4014 if (value && sbinfo->max_inodes) { !! 3184 return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL); 4015 ispace = simple_xattr_space(n << 4016 raw_spin_lock(&sbinfo->stat_l << 4017 if (sbinfo->free_ispace < isp << 4018 ispace = 0; << 4019 else << 4020 sbinfo->free_ispace - << 4021 raw_spin_unlock(&sbinfo->stat << 4022 if (!ispace) << 4023 return -ENOSPC; << 4024 } << 4025 << 4026 old_xattr = simple_xattr_set(&info->x << 4027 if (!IS_ERR(old_xattr)) { << 4028 ispace = 0; << 4029 if (old_xattr && sbinfo->max_ << 4030 ispace = simple_xattr << 4031 << 4032 simple_xattr_free(old_xattr); << 4033 old_xattr = NULL; << 4034 inode_set_ctime_current(inode << 4035 inode_inc_iversion(inode); << 4036 } << 4037 if (ispace) { << 4038 raw_spin_lock(&sbinfo->stat_l << 4039 sbinfo->free_ispace += ispace << 4040 raw_spin_unlock(&sbinfo->stat << 4041 } << 4042 return PTR_ERR(old_xattr); << 4043 } 3185 } 4044 3186 4045 static const struct xattr_handler shmem_secur 3187 static const struct xattr_handler shmem_security_xattr_handler = { 4046 .prefix = XATTR_SECURITY_PREFIX, 3188 .prefix = XATTR_SECURITY_PREFIX, 4047 .get = shmem_xattr_handler_get, 3189 .get = shmem_xattr_handler_get, 4048 .set = shmem_xattr_handler_set, 3190 .set = shmem_xattr_handler_set, 4049 }; 3191 }; 4050 3192 4051 static const struct xattr_handler shmem_trust 3193 static const struct xattr_handler shmem_trusted_xattr_handler = { 4052 .prefix = XATTR_TRUSTED_PREFIX, 3194 .prefix = XATTR_TRUSTED_PREFIX, 4053 .get = shmem_xattr_handler_get, 3195 .get = shmem_xattr_handler_get, 4054 .set = shmem_xattr_handler_set, 3196 .set = shmem_xattr_handler_set, 4055 }; 3197 }; 4056 3198 4057 static const struct xattr_handler shmem_user_ !! 3199 static const struct xattr_handler *shmem_xattr_handlers[] = { 4058 .prefix = XATTR_USER_PREFIX, !! 3200 #ifdef CONFIG_TMPFS_POSIX_ACL 4059 .get = shmem_xattr_handler_get, !! 3201 &posix_acl_access_xattr_handler, 4060 .set = shmem_xattr_handler_set, !! 3202 &posix_acl_default_xattr_handler, 4061 }; !! 3203 #endif 4062 << 4063 static const struct xattr_handler * const shm << 4064 &shmem_security_xattr_handler, 3204 &shmem_security_xattr_handler, 4065 &shmem_trusted_xattr_handler, 3205 &shmem_trusted_xattr_handler, 4066 &shmem_user_xattr_handler, << 4067 NULL 3206 NULL 4068 }; 3207 }; 4069 3208 4070 static ssize_t shmem_listxattr(struct dentry 3209 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 4071 { 3210 { 4072 struct shmem_inode_info *info = SHMEM 3211 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 4073 return simple_xattr_list(d_inode(dent 3212 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 4074 } 3213 } 4075 #endif /* CONFIG_TMPFS_XATTR */ 3214 #endif /* CONFIG_TMPFS_XATTR */ 4076 3215 4077 static const struct inode_operations shmem_sh 3216 static const struct inode_operations shmem_short_symlink_operations = { 4078 .getattr = shmem_getattr, << 4079 .setattr = shmem_setattr, << 4080 .get_link = simple_get_link, 3217 .get_link = simple_get_link, 4081 #ifdef CONFIG_TMPFS_XATTR 3218 #ifdef CONFIG_TMPFS_XATTR 4082 .listxattr = shmem_listxattr, 3219 .listxattr = shmem_listxattr, 4083 #endif 3220 #endif 4084 }; 3221 }; 4085 3222 4086 static const struct inode_operations shmem_sy 3223 static const struct inode_operations shmem_symlink_inode_operations = { 4087 .getattr = shmem_getattr, << 4088 .setattr = shmem_setattr, << 4089 .get_link = shmem_get_link, 3224 .get_link = shmem_get_link, 4090 #ifdef CONFIG_TMPFS_XATTR 3225 #ifdef CONFIG_TMPFS_XATTR 4091 .listxattr = shmem_listxattr, 3226 .listxattr = shmem_listxattr, 4092 #endif 3227 #endif 4093 }; 3228 }; 4094 3229 4095 static struct dentry *shmem_get_parent(struct 3230 static struct dentry *shmem_get_parent(struct dentry *child) 4096 { 3231 { 4097 return ERR_PTR(-ESTALE); 3232 return ERR_PTR(-ESTALE); 4098 } 3233 } 4099 3234 4100 static int shmem_match(struct inode *ino, voi 3235 static int shmem_match(struct inode *ino, void *vfh) 4101 { 3236 { 4102 __u32 *fh = vfh; 3237 __u32 *fh = vfh; 4103 __u64 inum = fh[2]; 3238 __u64 inum = fh[2]; 4104 inum = (inum << 32) | fh[1]; 3239 inum = (inum << 32) | fh[1]; 4105 return ino->i_ino == inum && fh[0] == 3240 return ino->i_ino == inum && fh[0] == ino->i_generation; 4106 } 3241 } 4107 3242 4108 /* Find any alias of inode, but prefer a hash 3243 /* Find any alias of inode, but prefer a hashed alias */ 4109 static struct dentry *shmem_find_alias(struct 3244 static struct dentry *shmem_find_alias(struct inode *inode) 4110 { 3245 { 4111 struct dentry *alias = d_find_alias(i 3246 struct dentry *alias = d_find_alias(inode); 4112 3247 4113 return alias ?: d_find_any_alias(inod 3248 return alias ?: d_find_any_alias(inode); 4114 } 3249 } 4115 3250 >> 3251 4116 static struct dentry *shmem_fh_to_dentry(stru 3252 static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 4117 struct fid *fid, int fh_len, 3253 struct fid *fid, int fh_len, int fh_type) 4118 { 3254 { 4119 struct inode *inode; 3255 struct inode *inode; 4120 struct dentry *dentry = NULL; 3256 struct dentry *dentry = NULL; 4121 u64 inum; 3257 u64 inum; 4122 3258 4123 if (fh_len < 3) 3259 if (fh_len < 3) 4124 return NULL; 3260 return NULL; 4125 3261 4126 inum = fid->raw[2]; 3262 inum = fid->raw[2]; 4127 inum = (inum << 32) | fid->raw[1]; 3263 inum = (inum << 32) | fid->raw[1]; 4128 3264 4129 inode = ilookup5(sb, (unsigned long)( 3265 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 4130 shmem_match, fid->raw 3266 shmem_match, fid->raw); 4131 if (inode) { 3267 if (inode) { 4132 dentry = shmem_find_alias(ino 3268 dentry = shmem_find_alias(inode); 4133 iput(inode); 3269 iput(inode); 4134 } 3270 } 4135 3271 4136 return dentry; 3272 return dentry; 4137 } 3273 } 4138 3274 4139 static int shmem_encode_fh(struct inode *inod 3275 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 4140 struct inode 3276 struct inode *parent) 4141 { 3277 { 4142 if (*len < 3) { 3278 if (*len < 3) { 4143 *len = 3; 3279 *len = 3; 4144 return FILEID_INVALID; 3280 return FILEID_INVALID; 4145 } 3281 } 4146 3282 4147 if (inode_unhashed(inode)) { 3283 if (inode_unhashed(inode)) { 4148 /* Unfortunately insert_inode 3284 /* Unfortunately insert_inode_hash is not idempotent, 4149 * so as we hash inodes here 3285 * so as we hash inodes here rather than at creation 4150 * time, we need a lock to en 3286 * time, we need a lock to ensure we only try 4151 * to do it once 3287 * to do it once 4152 */ 3288 */ 4153 static DEFINE_SPINLOCK(lock); 3289 static DEFINE_SPINLOCK(lock); 4154 spin_lock(&lock); 3290 spin_lock(&lock); 4155 if (inode_unhashed(inode)) 3291 if (inode_unhashed(inode)) 4156 __insert_inode_hash(i 3292 __insert_inode_hash(inode, 4157 i 3293 inode->i_ino + inode->i_generation); 4158 spin_unlock(&lock); 3294 spin_unlock(&lock); 4159 } 3295 } 4160 3296 4161 fh[0] = inode->i_generation; 3297 fh[0] = inode->i_generation; 4162 fh[1] = inode->i_ino; 3298 fh[1] = inode->i_ino; 4163 fh[2] = ((__u64)inode->i_ino) >> 32; 3299 fh[2] = ((__u64)inode->i_ino) >> 32; 4164 3300 4165 *len = 3; 3301 *len = 3; 4166 return 1; 3302 return 1; 4167 } 3303 } 4168 3304 4169 static const struct export_operations shmem_e 3305 static const struct export_operations shmem_export_ops = { 4170 .get_parent = shmem_get_parent, 3306 .get_parent = shmem_get_parent, 4171 .encode_fh = shmem_encode_fh, 3307 .encode_fh = shmem_encode_fh, 4172 .fh_to_dentry = shmem_fh_to_dentry, 3308 .fh_to_dentry = shmem_fh_to_dentry, 4173 }; 3309 }; 4174 3310 4175 enum shmem_param { 3311 enum shmem_param { 4176 Opt_gid, 3312 Opt_gid, 4177 Opt_huge, 3313 Opt_huge, 4178 Opt_mode, 3314 Opt_mode, 4179 Opt_mpol, 3315 Opt_mpol, 4180 Opt_nr_blocks, 3316 Opt_nr_blocks, 4181 Opt_nr_inodes, 3317 Opt_nr_inodes, 4182 Opt_size, 3318 Opt_size, 4183 Opt_uid, 3319 Opt_uid, 4184 Opt_inode32, 3320 Opt_inode32, 4185 Opt_inode64, 3321 Opt_inode64, 4186 Opt_noswap, << 4187 Opt_quota, << 4188 Opt_usrquota, << 4189 Opt_grpquota, << 4190 Opt_usrquota_block_hardlimit, << 4191 Opt_usrquota_inode_hardlimit, << 4192 Opt_grpquota_block_hardlimit, << 4193 Opt_grpquota_inode_hardlimit, << 4194 }; 3322 }; 4195 3323 4196 static const struct constant_table shmem_para 3324 static const struct constant_table shmem_param_enums_huge[] = { 4197 {"never", SHMEM_HUGE_NEVER }, 3325 {"never", SHMEM_HUGE_NEVER }, 4198 {"always", SHMEM_HUGE_ALWAYS }, 3326 {"always", SHMEM_HUGE_ALWAYS }, 4199 {"within_size", SHMEM_HUGE_WITHIN_SIZ 3327 {"within_size", SHMEM_HUGE_WITHIN_SIZE }, 4200 {"advise", SHMEM_HUGE_ADVISE }, 3328 {"advise", SHMEM_HUGE_ADVISE }, 4201 {} 3329 {} 4202 }; 3330 }; 4203 3331 4204 const struct fs_parameter_spec shmem_fs_param 3332 const struct fs_parameter_spec shmem_fs_parameters[] = { 4205 fsparam_gid ("gid", Opt_g !! 3333 fsparam_u32 ("gid", Opt_gid), 4206 fsparam_enum ("huge", Opt_h 3334 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge), 4207 fsparam_u32oct("mode", Opt_m 3335 fsparam_u32oct("mode", Opt_mode), 4208 fsparam_string("mpol", Opt_m 3336 fsparam_string("mpol", Opt_mpol), 4209 fsparam_string("nr_blocks", Opt_n 3337 fsparam_string("nr_blocks", Opt_nr_blocks), 4210 fsparam_string("nr_inodes", Opt_n 3338 fsparam_string("nr_inodes", Opt_nr_inodes), 4211 fsparam_string("size", Opt_s 3339 fsparam_string("size", Opt_size), 4212 fsparam_uid ("uid", Opt_u !! 3340 fsparam_u32 ("uid", Opt_uid), 4213 fsparam_flag ("inode32", Opt_i 3341 fsparam_flag ("inode32", Opt_inode32), 4214 fsparam_flag ("inode64", Opt_i 3342 fsparam_flag ("inode64", Opt_inode64), 4215 fsparam_flag ("noswap", Opt_n << 4216 #ifdef CONFIG_TMPFS_QUOTA << 4217 fsparam_flag ("quota", Opt_q << 4218 fsparam_flag ("usrquota", Opt_u << 4219 fsparam_flag ("grpquota", Opt_g << 4220 fsparam_string("usrquota_block_hardli << 4221 fsparam_string("usrquota_inode_hardli << 4222 fsparam_string("grpquota_block_hardli << 4223 fsparam_string("grpquota_inode_hardli << 4224 #endif << 4225 {} 3343 {} 4226 }; 3344 }; 4227 3345 4228 static int shmem_parse_one(struct fs_context 3346 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) 4229 { 3347 { 4230 struct shmem_options *ctx = fc->fs_pr 3348 struct shmem_options *ctx = fc->fs_private; 4231 struct fs_parse_result result; 3349 struct fs_parse_result result; 4232 unsigned long long size; 3350 unsigned long long size; 4233 char *rest; 3351 char *rest; 4234 int opt; 3352 int opt; 4235 kuid_t kuid; << 4236 kgid_t kgid; << 4237 3353 4238 opt = fs_parse(fc, shmem_fs_parameter 3354 opt = fs_parse(fc, shmem_fs_parameters, param, &result); 4239 if (opt < 0) 3355 if (opt < 0) 4240 return opt; 3356 return opt; 4241 3357 4242 switch (opt) { 3358 switch (opt) { 4243 case Opt_size: 3359 case Opt_size: 4244 size = memparse(param->string 3360 size = memparse(param->string, &rest); 4245 if (*rest == '%') { 3361 if (*rest == '%') { 4246 size <<= PAGE_SHIFT; 3362 size <<= PAGE_SHIFT; 4247 size *= totalram_page 3363 size *= totalram_pages(); 4248 do_div(size, 100); 3364 do_div(size, 100); 4249 rest++; 3365 rest++; 4250 } 3366 } 4251 if (*rest) 3367 if (*rest) 4252 goto bad_value; 3368 goto bad_value; 4253 ctx->blocks = DIV_ROUND_UP(si 3369 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); 4254 ctx->seen |= SHMEM_SEEN_BLOCK 3370 ctx->seen |= SHMEM_SEEN_BLOCKS; 4255 break; 3371 break; 4256 case Opt_nr_blocks: 3372 case Opt_nr_blocks: 4257 ctx->blocks = memparse(param- 3373 ctx->blocks = memparse(param->string, &rest); 4258 if (*rest || ctx->blocks > LO !! 3374 if (*rest) 4259 goto bad_value; 3375 goto bad_value; 4260 ctx->seen |= SHMEM_SEEN_BLOCK 3376 ctx->seen |= SHMEM_SEEN_BLOCKS; 4261 break; 3377 break; 4262 case Opt_nr_inodes: 3378 case Opt_nr_inodes: 4263 ctx->inodes = memparse(param- 3379 ctx->inodes = memparse(param->string, &rest); 4264 if (*rest || ctx->inodes > UL !! 3380 if (*rest) 4265 goto bad_value; 3381 goto bad_value; 4266 ctx->seen |= SHMEM_SEEN_INODE 3382 ctx->seen |= SHMEM_SEEN_INODES; 4267 break; 3383 break; 4268 case Opt_mode: 3384 case Opt_mode: 4269 ctx->mode = result.uint_32 & 3385 ctx->mode = result.uint_32 & 07777; 4270 break; 3386 break; 4271 case Opt_uid: 3387 case Opt_uid: 4272 kuid = result.uid; !! 3388 ctx->uid = make_kuid(current_user_ns(), result.uint_32); 4273 !! 3389 if (!uid_valid(ctx->uid)) 4274 /* << 4275 * The requested uid must be << 4276 * filesystem's idmapping. << 4277 */ << 4278 if (!kuid_has_mapping(fc->use << 4279 goto bad_value; 3390 goto bad_value; 4280 << 4281 ctx->uid = kuid; << 4282 break; 3391 break; 4283 case Opt_gid: 3392 case Opt_gid: 4284 kgid = result.gid; !! 3393 ctx->gid = make_kgid(current_user_ns(), result.uint_32); 4285 !! 3394 if (!gid_valid(ctx->gid)) 4286 /* << 4287 * The requested gid must be << 4288 * filesystem's idmapping. << 4289 */ << 4290 if (!kgid_has_mapping(fc->use << 4291 goto bad_value; 3395 goto bad_value; 4292 << 4293 ctx->gid = kgid; << 4294 break; 3396 break; 4295 case Opt_huge: 3397 case Opt_huge: 4296 ctx->huge = result.uint_32; 3398 ctx->huge = result.uint_32; 4297 if (ctx->huge != SHMEM_HUGE_N 3399 if (ctx->huge != SHMEM_HUGE_NEVER && 4298 !(IS_ENABLED(CONFIG_TRANS 3400 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 4299 has_transparent_hugepag 3401 has_transparent_hugepage())) 4300 goto unsupported_para 3402 goto unsupported_parameter; 4301 ctx->seen |= SHMEM_SEEN_HUGE; 3403 ctx->seen |= SHMEM_SEEN_HUGE; 4302 break; 3404 break; 4303 case Opt_mpol: 3405 case Opt_mpol: 4304 if (IS_ENABLED(CONFIG_NUMA)) 3406 if (IS_ENABLED(CONFIG_NUMA)) { 4305 mpol_put(ctx->mpol); 3407 mpol_put(ctx->mpol); 4306 ctx->mpol = NULL; 3408 ctx->mpol = NULL; 4307 if (mpol_parse_str(pa 3409 if (mpol_parse_str(param->string, &ctx->mpol)) 4308 goto bad_valu 3410 goto bad_value; 4309 break; 3411 break; 4310 } 3412 } 4311 goto unsupported_parameter; 3413 goto unsupported_parameter; 4312 case Opt_inode32: 3414 case Opt_inode32: 4313 ctx->full_inums = false; 3415 ctx->full_inums = false; 4314 ctx->seen |= SHMEM_SEEN_INUMS 3416 ctx->seen |= SHMEM_SEEN_INUMS; 4315 break; 3417 break; 4316 case Opt_inode64: 3418 case Opt_inode64: 4317 if (sizeof(ino_t) < 8) { 3419 if (sizeof(ino_t) < 8) { 4318 return invalfc(fc, 3420 return invalfc(fc, 4319 "Canno 3421 "Cannot use inode64 with <64bit inums in kernel\n"); 4320 } 3422 } 4321 ctx->full_inums = true; 3423 ctx->full_inums = true; 4322 ctx->seen |= SHMEM_SEEN_INUMS 3424 ctx->seen |= SHMEM_SEEN_INUMS; 4323 break; 3425 break; 4324 case Opt_noswap: << 4325 if ((fc->user_ns != &init_use << 4326 return invalfc(fc, << 4327 "Turni << 4328 } << 4329 ctx->noswap = true; << 4330 ctx->seen |= SHMEM_SEEN_NOSWA << 4331 break; << 4332 case Opt_quota: << 4333 if (fc->user_ns != &init_user << 4334 return invalfc(fc, "Q << 4335 ctx->seen |= SHMEM_SEEN_QUOTA << 4336 ctx->quota_types |= (QTYPE_MA << 4337 break; << 4338 case Opt_usrquota: << 4339 if (fc->user_ns != &init_user << 4340 return invalfc(fc, "Q << 4341 ctx->seen |= SHMEM_SEEN_QUOTA << 4342 ctx->quota_types |= QTYPE_MAS << 4343 break; << 4344 case Opt_grpquota: << 4345 if (fc->user_ns != &init_user << 4346 return invalfc(fc, "Q << 4347 ctx->seen |= SHMEM_SEEN_QUOTA << 4348 ctx->quota_types |= QTYPE_MAS << 4349 break; << 4350 case Opt_usrquota_block_hardlimit: << 4351 size = memparse(param->string << 4352 if (*rest || !size) << 4353 goto bad_value; << 4354 if (size > SHMEM_QUOTA_MAX_SP << 4355 return invalfc(fc, << 4356 "User << 4357 ctx->qlimits.usrquota_bhardli << 4358 break; << 4359 case Opt_grpquota_block_hardlimit: << 4360 size = memparse(param->string << 4361 if (*rest || !size) << 4362 goto bad_value; << 4363 if (size > SHMEM_QUOTA_MAX_SP << 4364 return invalfc(fc, << 4365 "Group << 4366 ctx->qlimits.grpquota_bhardli << 4367 break; << 4368 case Opt_usrquota_inode_hardlimit: << 4369 size = memparse(param->string << 4370 if (*rest || !size) << 4371 goto bad_value; << 4372 if (size > SHMEM_QUOTA_MAX_IN << 4373 return invalfc(fc, << 4374 "User << 4375 ctx->qlimits.usrquota_ihardli << 4376 break; << 4377 case Opt_grpquota_inode_hardlimit: << 4378 size = memparse(param->string << 4379 if (*rest || !size) << 4380 goto bad_value; << 4381 if (size > SHMEM_QUOTA_MAX_IN << 4382 return invalfc(fc, << 4383 "Group << 4384 ctx->qlimits.grpquota_ihardli << 4385 break; << 4386 } 3426 } 4387 return 0; 3427 return 0; 4388 3428 4389 unsupported_parameter: 3429 unsupported_parameter: 4390 return invalfc(fc, "Unsupported param 3430 return invalfc(fc, "Unsupported parameter '%s'", param->key); 4391 bad_value: 3431 bad_value: 4392 return invalfc(fc, "Bad value for '%s 3432 return invalfc(fc, "Bad value for '%s'", param->key); 4393 } 3433 } 4394 3434 4395 static int shmem_parse_options(struct fs_cont 3435 static int shmem_parse_options(struct fs_context *fc, void *data) 4396 { 3436 { 4397 char *options = data; 3437 char *options = data; 4398 3438 4399 if (options) { 3439 if (options) { 4400 int err = security_sb_eat_lsm 3440 int err = security_sb_eat_lsm_opts(options, &fc->security); 4401 if (err) 3441 if (err) 4402 return err; 3442 return err; 4403 } 3443 } 4404 3444 4405 while (options != NULL) { 3445 while (options != NULL) { 4406 char *this_char = options; 3446 char *this_char = options; 4407 for (;;) { 3447 for (;;) { 4408 /* 3448 /* 4409 * NUL-terminate this 3449 * NUL-terminate this option: unfortunately, 4410 * mount options form 3450 * mount options form a comma-separated list, 4411 * but mpol's nodelis 3451 * but mpol's nodelist may also contain commas. 4412 */ 3452 */ 4413 options = strchr(opti 3453 options = strchr(options, ','); 4414 if (options == NULL) 3454 if (options == NULL) 4415 break; 3455 break; 4416 options++; 3456 options++; 4417 if (!isdigit(*options 3457 if (!isdigit(*options)) { 4418 options[-1] = 3458 options[-1] = '\0'; 4419 break; 3459 break; 4420 } 3460 } 4421 } 3461 } 4422 if (*this_char) { 3462 if (*this_char) { 4423 char *value = strchr( 3463 char *value = strchr(this_char, '='); 4424 size_t len = 0; 3464 size_t len = 0; 4425 int err; 3465 int err; 4426 3466 4427 if (value) { 3467 if (value) { 4428 *value++ = '\ 3468 *value++ = '\0'; 4429 len = strlen( 3469 len = strlen(value); 4430 } 3470 } 4431 err = vfs_parse_fs_st 3471 err = vfs_parse_fs_string(fc, this_char, value, len); 4432 if (err < 0) 3472 if (err < 0) 4433 return err; 3473 return err; 4434 } 3474 } 4435 } 3475 } 4436 return 0; 3476 return 0; 4437 } 3477 } 4438 3478 4439 /* 3479 /* 4440 * Reconfigure a shmem filesystem. 3480 * Reconfigure a shmem filesystem. >> 3481 * >> 3482 * Note that we disallow change from limited->unlimited blocks/inodes while any >> 3483 * are in use; but we must separately disallow unlimited->limited, because in >> 3484 * that case we have no record of how much is already in use. 4441 */ 3485 */ 4442 static int shmem_reconfigure(struct fs_contex 3486 static int shmem_reconfigure(struct fs_context *fc) 4443 { 3487 { 4444 struct shmem_options *ctx = fc->fs_pr 3488 struct shmem_options *ctx = fc->fs_private; 4445 struct shmem_sb_info *sbinfo = SHMEM_ 3489 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); 4446 unsigned long used_isp; !! 3490 unsigned long inodes; 4447 struct mempolicy *mpol = NULL; << 4448 const char *err; 3491 const char *err; 4449 3492 4450 raw_spin_lock(&sbinfo->stat_lock); !! 3493 spin_lock(&sbinfo->stat_lock); 4451 used_isp = sbinfo->max_inodes * BOGO_ !! 3494 inodes = sbinfo->max_inodes - sbinfo->free_inodes; 4452 << 4453 if ((ctx->seen & SHMEM_SEEN_BLOCKS) & 3495 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { 4454 if (!sbinfo->max_blocks) { 3496 if (!sbinfo->max_blocks) { 4455 err = "Cannot retroac 3497 err = "Cannot retroactively limit size"; 4456 goto out; 3498 goto out; 4457 } 3499 } 4458 if (percpu_counter_compare(&s 3500 if (percpu_counter_compare(&sbinfo->used_blocks, 4459 ct 3501 ctx->blocks) > 0) { 4460 err = "Too small a si 3502 err = "Too small a size for current use"; 4461 goto out; 3503 goto out; 4462 } 3504 } 4463 } 3505 } 4464 if ((ctx->seen & SHMEM_SEEN_INODES) & 3506 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { 4465 if (!sbinfo->max_inodes) { 3507 if (!sbinfo->max_inodes) { 4466 err = "Cannot retroac 3508 err = "Cannot retroactively limit inodes"; 4467 goto out; 3509 goto out; 4468 } 3510 } 4469 if (ctx->inodes * BOGO_INODE_ !! 3511 if (ctx->inodes < inodes) { 4470 err = "Too few inodes 3512 err = "Too few inodes for current use"; 4471 goto out; 3513 goto out; 4472 } 3514 } 4473 } 3515 } 4474 3516 4475 if ((ctx->seen & SHMEM_SEEN_INUMS) && 3517 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && 4476 sbinfo->next_ino > UINT_MAX) { 3518 sbinfo->next_ino > UINT_MAX) { 4477 err = "Current inum too high 3519 err = "Current inum too high to switch to 32-bit inums"; 4478 goto out; 3520 goto out; 4479 } 3521 } 4480 if ((ctx->seen & SHMEM_SEEN_NOSWAP) & << 4481 err = "Cannot disable swap on << 4482 goto out; << 4483 } << 4484 if (!(ctx->seen & SHMEM_SEEN_NOSWAP) << 4485 err = "Cannot enable swap on << 4486 goto out; << 4487 } << 4488 << 4489 if (ctx->seen & SHMEM_SEEN_QUOTA && << 4490 !sb_any_quota_loaded(fc->root->d_ << 4491 err = "Cannot enable quota on << 4492 goto out; << 4493 } << 4494 << 4495 #ifdef CONFIG_TMPFS_QUOTA << 4496 #define CHANGED_LIMIT(name) << 4497 (ctx->qlimits.name## hardlimit && << 4498 (ctx->qlimits.name## hardlimit != sbi << 4499 << 4500 if (CHANGED_LIMIT(usrquota_b) || CHAN << 4501 CHANGED_LIMIT(grpquota_b) || CHAN << 4502 err = "Cannot change global q << 4503 goto out; << 4504 } << 4505 #endif /* CONFIG_TMPFS_QUOTA */ << 4506 3522 4507 if (ctx->seen & SHMEM_SEEN_HUGE) 3523 if (ctx->seen & SHMEM_SEEN_HUGE) 4508 sbinfo->huge = ctx->huge; 3524 sbinfo->huge = ctx->huge; 4509 if (ctx->seen & SHMEM_SEEN_INUMS) 3525 if (ctx->seen & SHMEM_SEEN_INUMS) 4510 sbinfo->full_inums = ctx->ful 3526 sbinfo->full_inums = ctx->full_inums; 4511 if (ctx->seen & SHMEM_SEEN_BLOCKS) 3527 if (ctx->seen & SHMEM_SEEN_BLOCKS) 4512 sbinfo->max_blocks = ctx->bl 3528 sbinfo->max_blocks = ctx->blocks; 4513 if (ctx->seen & SHMEM_SEEN_INODES) { 3529 if (ctx->seen & SHMEM_SEEN_INODES) { 4514 sbinfo->max_inodes = ctx->in 3530 sbinfo->max_inodes = ctx->inodes; 4515 sbinfo->free_ispace = ctx->in !! 3531 sbinfo->free_inodes = ctx->inodes - inodes; 4516 } 3532 } 4517 3533 4518 /* 3534 /* 4519 * Preserve previous mempolicy unless 3535 * Preserve previous mempolicy unless mpol remount option was specified. 4520 */ 3536 */ 4521 if (ctx->mpol) { 3537 if (ctx->mpol) { 4522 mpol = sbinfo->mpol; !! 3538 mpol_put(sbinfo->mpol); 4523 sbinfo->mpol = ctx->mpol; 3539 sbinfo->mpol = ctx->mpol; /* transfers initial ref */ 4524 ctx->mpol = NULL; 3540 ctx->mpol = NULL; 4525 } 3541 } 4526 !! 3542 spin_unlock(&sbinfo->stat_lock); 4527 if (ctx->noswap) << 4528 sbinfo->noswap = true; << 4529 << 4530 raw_spin_unlock(&sbinfo->stat_lock); << 4531 mpol_put(mpol); << 4532 return 0; 3543 return 0; 4533 out: 3544 out: 4534 raw_spin_unlock(&sbinfo->stat_lock); !! 3545 spin_unlock(&sbinfo->stat_lock); 4535 return invalfc(fc, "%s", err); 3546 return invalfc(fc, "%s", err); 4536 } 3547 } 4537 3548 4538 static int shmem_show_options(struct seq_file 3549 static int shmem_show_options(struct seq_file *seq, struct dentry *root) 4539 { 3550 { 4540 struct shmem_sb_info *sbinfo = SHMEM_ 3551 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 4541 struct mempolicy *mpol; << 4542 3552 4543 if (sbinfo->max_blocks != shmem_defau 3553 if (sbinfo->max_blocks != shmem_default_max_blocks()) 4544 seq_printf(seq, ",size=%luk", !! 3554 seq_printf(seq, ",size=%luk", >> 3555 sbinfo->max_blocks << (PAGE_SHIFT - 10)); 4545 if (sbinfo->max_inodes != shmem_defau 3556 if (sbinfo->max_inodes != shmem_default_max_inodes()) 4546 seq_printf(seq, ",nr_inodes=% 3557 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 4547 if (sbinfo->mode != (0777 | S_ISVTX)) 3558 if (sbinfo->mode != (0777 | S_ISVTX)) 4548 seq_printf(seq, ",mode=%03ho" 3559 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 4549 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_ 3560 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 4550 seq_printf(seq, ",uid=%u", 3561 seq_printf(seq, ",uid=%u", 4551 from_kuid_mun 3562 from_kuid_munged(&init_user_ns, sbinfo->uid)); 4552 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_ 3563 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 4553 seq_printf(seq, ",gid=%u", 3564 seq_printf(seq, ",gid=%u", 4554 from_kgid_mun 3565 from_kgid_munged(&init_user_ns, sbinfo->gid)); 4555 3566 4556 /* 3567 /* 4557 * Showing inode{64,32} might be usef 3568 * Showing inode{64,32} might be useful even if it's the system default, 4558 * since then people don't have to re 3569 * since then people don't have to resort to checking both here and 4559 * /proc/config.gz to confirm 64-bit 3570 * /proc/config.gz to confirm 64-bit inums were successfully applied 4560 * (which may not even exist if IKCON 3571 * (which may not even exist if IKCONFIG_PROC isn't enabled). 4561 * 3572 * 4562 * We hide it when inode64 isn't the 3573 * We hide it when inode64 isn't the default and we are using 32-bit 4563 * inodes, since that probably just m 3574 * inodes, since that probably just means the feature isn't even under 4564 * consideration. 3575 * consideration. 4565 * 3576 * 4566 * As such: 3577 * As such: 4567 * 3578 * 4568 * +------------- 3579 * +-----------------+-----------------+ 4569 * | TMPFS_INODE6 3580 * | TMPFS_INODE64=y | TMPFS_INODE64=n | 4570 * +------------------+------------- 3581 * +------------------+-----------------+-----------------+ 4571 * | full_inums=true | show 3582 * | full_inums=true | show | show | 4572 * | full_inums=false | show 3583 * | full_inums=false | show | hide | 4573 * +------------------+------------- 3584 * +------------------+-----------------+-----------------+ 4574 * 3585 * 4575 */ 3586 */ 4576 if (IS_ENABLED(CONFIG_TMPFS_INODE64) 3587 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) 4577 seq_printf(seq, ",inode%d", ( 3588 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); 4578 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3589 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4579 /* Rightly or wrongly, show huge moun 3590 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 4580 if (sbinfo->huge) 3591 if (sbinfo->huge) 4581 seq_printf(seq, ",huge=%s", s 3592 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 4582 #endif 3593 #endif 4583 mpol = shmem_get_sbmpol(sbinfo); !! 3594 shmem_show_mpol(seq, sbinfo->mpol); 4584 shmem_show_mpol(seq, mpol); << 4585 mpol_put(mpol); << 4586 if (sbinfo->noswap) << 4587 seq_printf(seq, ",noswap"); << 4588 #ifdef CONFIG_TMPFS_QUOTA << 4589 if (sb_has_quota_active(root->d_sb, U << 4590 seq_printf(seq, ",usrquota"); << 4591 if (sb_has_quota_active(root->d_sb, G << 4592 seq_printf(seq, ",grpquota"); << 4593 if (sbinfo->qlimits.usrquota_bhardlim << 4594 seq_printf(seq, ",usrquota_bl << 4595 sbinfo->qlimits.us << 4596 if (sbinfo->qlimits.grpquota_bhardlim << 4597 seq_printf(seq, ",grpquota_bl << 4598 sbinfo->qlimits.gr << 4599 if (sbinfo->qlimits.usrquota_ihardlim << 4600 seq_printf(seq, ",usrquota_in << 4601 sbinfo->qlimits.us << 4602 if (sbinfo->qlimits.grpquota_ihardlim << 4603 seq_printf(seq, ",grpquota_in << 4604 sbinfo->qlimits.gr << 4605 #endif << 4606 return 0; 3595 return 0; 4607 } 3596 } 4608 3597 4609 #endif /* CONFIG_TMPFS */ 3598 #endif /* CONFIG_TMPFS */ 4610 3599 4611 static void shmem_put_super(struct super_bloc 3600 static void shmem_put_super(struct super_block *sb) 4612 { 3601 { 4613 struct shmem_sb_info *sbinfo = SHMEM_ 3602 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 4614 3603 4615 #ifdef CONFIG_TMPFS_QUOTA << 4616 shmem_disable_quotas(sb); << 4617 #endif << 4618 free_percpu(sbinfo->ino_batch); 3604 free_percpu(sbinfo->ino_batch); 4619 percpu_counter_destroy(&sbinfo->used_ 3605 percpu_counter_destroy(&sbinfo->used_blocks); 4620 mpol_put(sbinfo->mpol); 3606 mpol_put(sbinfo->mpol); 4621 kfree(sbinfo); 3607 kfree(sbinfo); 4622 sb->s_fs_info = NULL; 3608 sb->s_fs_info = NULL; 4623 } 3609 } 4624 3610 4625 static int shmem_fill_super(struct super_bloc 3611 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) 4626 { 3612 { 4627 struct shmem_options *ctx = fc->fs_pr 3613 struct shmem_options *ctx = fc->fs_private; 4628 struct inode *inode; 3614 struct inode *inode; 4629 struct shmem_sb_info *sbinfo; 3615 struct shmem_sb_info *sbinfo; 4630 int error = -ENOMEM; !! 3616 int err = -ENOMEM; 4631 3617 4632 /* Round up to L1_CACHE_BYTES to resi 3618 /* Round up to L1_CACHE_BYTES to resist false sharing */ 4633 sbinfo = kzalloc(max((int)sizeof(stru 3619 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 4634 L1_CACHE_BYTE 3620 L1_CACHE_BYTES), GFP_KERNEL); 4635 if (!sbinfo) 3621 if (!sbinfo) 4636 return error; !! 3622 return -ENOMEM; 4637 3623 4638 sb->s_fs_info = sbinfo; 3624 sb->s_fs_info = sbinfo; 4639 3625 4640 #ifdef CONFIG_TMPFS 3626 #ifdef CONFIG_TMPFS 4641 /* 3627 /* 4642 * Per default we only allow half of 3628 * Per default we only allow half of the physical ram per 4643 * tmpfs instance, limiting inodes to 3629 * tmpfs instance, limiting inodes to one per page of lowmem; 4644 * but the internal instance is left 3630 * but the internal instance is left unlimited. 4645 */ 3631 */ 4646 if (!(sb->s_flags & SB_KERNMOUNT)) { 3632 if (!(sb->s_flags & SB_KERNMOUNT)) { 4647 if (!(ctx->seen & SHMEM_SEEN_ 3633 if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) 4648 ctx->blocks = shmem_d 3634 ctx->blocks = shmem_default_max_blocks(); 4649 if (!(ctx->seen & SHMEM_SEEN_ 3635 if (!(ctx->seen & SHMEM_SEEN_INODES)) 4650 ctx->inodes = shmem_d 3636 ctx->inodes = shmem_default_max_inodes(); 4651 if (!(ctx->seen & SHMEM_SEEN_ 3637 if (!(ctx->seen & SHMEM_SEEN_INUMS)) 4652 ctx->full_inums = IS_ 3638 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); 4653 sbinfo->noswap = ctx->noswap; << 4654 } else { 3639 } else { 4655 sb->s_flags |= SB_NOUSER; 3640 sb->s_flags |= SB_NOUSER; 4656 } 3641 } 4657 sb->s_export_op = &shmem_export_ops; 3642 sb->s_export_op = &shmem_export_ops; 4658 sb->s_flags |= SB_NOSEC | SB_I_VERSIO !! 3643 sb->s_flags |= SB_NOSEC; 4659 #else 3644 #else 4660 sb->s_flags |= SB_NOUSER; 3645 sb->s_flags |= SB_NOUSER; 4661 #endif 3646 #endif 4662 sbinfo->max_blocks = ctx->blocks; 3647 sbinfo->max_blocks = ctx->blocks; 4663 sbinfo->max_inodes = ctx->inodes; !! 3648 sbinfo->free_inodes = sbinfo->max_inodes = ctx->inodes; 4664 sbinfo->free_ispace = sbinfo->max_ino << 4665 if (sb->s_flags & SB_KERNMOUNT) { 3649 if (sb->s_flags & SB_KERNMOUNT) { 4666 sbinfo->ino_batch = alloc_per 3650 sbinfo->ino_batch = alloc_percpu(ino_t); 4667 if (!sbinfo->ino_batch) 3651 if (!sbinfo->ino_batch) 4668 goto failed; 3652 goto failed; 4669 } 3653 } 4670 sbinfo->uid = ctx->uid; 3654 sbinfo->uid = ctx->uid; 4671 sbinfo->gid = ctx->gid; 3655 sbinfo->gid = ctx->gid; 4672 sbinfo->full_inums = ctx->full_inums; 3656 sbinfo->full_inums = ctx->full_inums; 4673 sbinfo->mode = ctx->mode; 3657 sbinfo->mode = ctx->mode; 4674 sbinfo->huge = ctx->huge; 3658 sbinfo->huge = ctx->huge; 4675 sbinfo->mpol = ctx->mpol; 3659 sbinfo->mpol = ctx->mpol; 4676 ctx->mpol = NULL; 3660 ctx->mpol = NULL; 4677 3661 4678 raw_spin_lock_init(&sbinfo->stat_lock !! 3662 spin_lock_init(&sbinfo->stat_lock); 4679 if (percpu_counter_init(&sbinfo->used 3663 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 4680 goto failed; 3664 goto failed; 4681 spin_lock_init(&sbinfo->shrinklist_lo 3665 spin_lock_init(&sbinfo->shrinklist_lock); 4682 INIT_LIST_HEAD(&sbinfo->shrinklist); 3666 INIT_LIST_HEAD(&sbinfo->shrinklist); 4683 3667 4684 sb->s_maxbytes = MAX_LFS_FILESIZE; 3668 sb->s_maxbytes = MAX_LFS_FILESIZE; 4685 sb->s_blocksize = PAGE_SIZE; 3669 sb->s_blocksize = PAGE_SIZE; 4686 sb->s_blocksize_bits = PAGE_SHIFT; 3670 sb->s_blocksize_bits = PAGE_SHIFT; 4687 sb->s_magic = TMPFS_MAGIC; 3671 sb->s_magic = TMPFS_MAGIC; 4688 sb->s_op = &shmem_ops; 3672 sb->s_op = &shmem_ops; 4689 sb->s_time_gran = 1; 3673 sb->s_time_gran = 1; 4690 #ifdef CONFIG_TMPFS_XATTR 3674 #ifdef CONFIG_TMPFS_XATTR 4691 sb->s_xattr = shmem_xattr_handlers; 3675 sb->s_xattr = shmem_xattr_handlers; 4692 #endif 3676 #endif 4693 #ifdef CONFIG_TMPFS_POSIX_ACL 3677 #ifdef CONFIG_TMPFS_POSIX_ACL 4694 sb->s_flags |= SB_POSIXACL; 3678 sb->s_flags |= SB_POSIXACL; 4695 #endif 3679 #endif 4696 uuid_t uuid; !! 3680 uuid_gen(&sb->s_uuid); 4697 uuid_gen(&uuid); << 4698 super_set_uuid(sb, uuid.b, sizeof(uui << 4699 << 4700 #ifdef CONFIG_TMPFS_QUOTA << 4701 if (ctx->seen & SHMEM_SEEN_QUOTA) { << 4702 sb->dq_op = &shmem_quota_oper << 4703 sb->s_qcop = &dquot_quotactl_ << 4704 sb->s_quota_types = QTYPE_MAS << 4705 << 4706 /* Copy the default limits fr << 4707 memcpy(&sbinfo->qlimits, &ctx << 4708 sizeof(struct shmem_qu << 4709 << 4710 if (shmem_enable_quotas(sb, c << 4711 goto failed; << 4712 } << 4713 #endif /* CONFIG_TMPFS_QUOTA */ << 4714 3681 4715 inode = shmem_get_inode(&nop_mnt_idma !! 3682 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 4716 S_IFDIR | sbi !! 3683 if (!inode) 4717 if (IS_ERR(inode)) { << 4718 error = PTR_ERR(inode); << 4719 goto failed; 3684 goto failed; 4720 } << 4721 inode->i_uid = sbinfo->uid; 3685 inode->i_uid = sbinfo->uid; 4722 inode->i_gid = sbinfo->gid; 3686 inode->i_gid = sbinfo->gid; 4723 sb->s_root = d_make_root(inode); 3687 sb->s_root = d_make_root(inode); 4724 if (!sb->s_root) 3688 if (!sb->s_root) 4725 goto failed; 3689 goto failed; 4726 return 0; 3690 return 0; 4727 3691 4728 failed: 3692 failed: 4729 shmem_put_super(sb); 3693 shmem_put_super(sb); 4730 return error; !! 3694 return err; 4731 } 3695 } 4732 3696 4733 static int shmem_get_tree(struct fs_context * 3697 static int shmem_get_tree(struct fs_context *fc) 4734 { 3698 { 4735 return get_tree_nodev(fc, shmem_fill_ 3699 return get_tree_nodev(fc, shmem_fill_super); 4736 } 3700 } 4737 3701 4738 static void shmem_free_fc(struct fs_context * 3702 static void shmem_free_fc(struct fs_context *fc) 4739 { 3703 { 4740 struct shmem_options *ctx = fc->fs_pr 3704 struct shmem_options *ctx = fc->fs_private; 4741 3705 4742 if (ctx) { 3706 if (ctx) { 4743 mpol_put(ctx->mpol); 3707 mpol_put(ctx->mpol); 4744 kfree(ctx); 3708 kfree(ctx); 4745 } 3709 } 4746 } 3710 } 4747 3711 4748 static const struct fs_context_operations shm 3712 static const struct fs_context_operations shmem_fs_context_ops = { 4749 .free = shmem_free_ 3713 .free = shmem_free_fc, 4750 .get_tree = shmem_get_t 3714 .get_tree = shmem_get_tree, 4751 #ifdef CONFIG_TMPFS 3715 #ifdef CONFIG_TMPFS 4752 .parse_monolithic = shmem_parse 3716 .parse_monolithic = shmem_parse_options, 4753 .parse_param = shmem_parse 3717 .parse_param = shmem_parse_one, 4754 .reconfigure = shmem_recon 3718 .reconfigure = shmem_reconfigure, 4755 #endif 3719 #endif 4756 }; 3720 }; 4757 3721 4758 static struct kmem_cache *shmem_inode_cachep !! 3722 static struct kmem_cache *shmem_inode_cachep; 4759 3723 4760 static struct inode *shmem_alloc_inode(struct 3724 static struct inode *shmem_alloc_inode(struct super_block *sb) 4761 { 3725 { 4762 struct shmem_inode_info *info; 3726 struct shmem_inode_info *info; 4763 info = alloc_inode_sb(sb, shmem_inode !! 3727 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL); 4764 if (!info) 3728 if (!info) 4765 return NULL; 3729 return NULL; 4766 return &info->vfs_inode; 3730 return &info->vfs_inode; 4767 } 3731 } 4768 3732 4769 static void shmem_free_in_core_inode(struct i 3733 static void shmem_free_in_core_inode(struct inode *inode) 4770 { 3734 { 4771 if (S_ISLNK(inode->i_mode)) 3735 if (S_ISLNK(inode->i_mode)) 4772 kfree(inode->i_link); 3736 kfree(inode->i_link); 4773 kmem_cache_free(shmem_inode_cachep, S 3737 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 4774 } 3738 } 4775 3739 4776 static void shmem_destroy_inode(struct inode 3740 static void shmem_destroy_inode(struct inode *inode) 4777 { 3741 { 4778 if (S_ISREG(inode->i_mode)) 3742 if (S_ISREG(inode->i_mode)) 4779 mpol_free_shared_policy(&SHME 3743 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 4780 if (S_ISDIR(inode->i_mode)) << 4781 simple_offset_destroy(shmem_g << 4782 } 3744 } 4783 3745 4784 static void shmem_init_inode(void *foo) 3746 static void shmem_init_inode(void *foo) 4785 { 3747 { 4786 struct shmem_inode_info *info = foo; 3748 struct shmem_inode_info *info = foo; 4787 inode_init_once(&info->vfs_inode); 3749 inode_init_once(&info->vfs_inode); 4788 } 3750 } 4789 3751 4790 static void __init shmem_init_inodecache(void !! 3752 static void shmem_init_inodecache(void) 4791 { 3753 { 4792 shmem_inode_cachep = kmem_cache_creat 3754 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 4793 sizeof(struct 3755 sizeof(struct shmem_inode_info), 4794 0, SLAB_PANIC 3756 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 4795 } 3757 } 4796 3758 4797 static void __init shmem_destroy_inodecache(v !! 3759 static void shmem_destroy_inodecache(void) 4798 { 3760 { 4799 kmem_cache_destroy(shmem_inode_cachep 3761 kmem_cache_destroy(shmem_inode_cachep); 4800 } 3762 } 4801 3763 4802 /* Keep the page in page cache instead of tru !! 3764 const struct address_space_operations shmem_aops = { 4803 static int shmem_error_remove_folio(struct ad << 4804 struct fol << 4805 { << 4806 return 0; << 4807 } << 4808 << 4809 static const struct address_space_operations << 4810 .writepage = shmem_writepage, 3765 .writepage = shmem_writepage, 4811 .dirty_folio = noop_dirty_folio, !! 3766 .set_page_dirty = __set_page_dirty_no_writeback, 4812 #ifdef CONFIG_TMPFS 3767 #ifdef CONFIG_TMPFS 4813 .write_begin = shmem_write_begin, 3768 .write_begin = shmem_write_begin, 4814 .write_end = shmem_write_end, 3769 .write_end = shmem_write_end, 4815 #endif 3770 #endif 4816 #ifdef CONFIG_MIGRATION 3771 #ifdef CONFIG_MIGRATION 4817 .migrate_folio = migrate_folio, !! 3772 .migratepage = migrate_page, 4818 #endif 3773 #endif 4819 .error_remove_folio = shmem_error_rem !! 3774 .error_remove_page = generic_error_remove_page, 4820 }; 3775 }; >> 3776 EXPORT_SYMBOL(shmem_aops); 4821 3777 4822 static const struct file_operations shmem_fil 3778 static const struct file_operations shmem_file_operations = { 4823 .mmap = shmem_mmap, 3779 .mmap = shmem_mmap, 4824 .open = shmem_file_open, << 4825 .get_unmapped_area = shmem_get_unmapp 3780 .get_unmapped_area = shmem_get_unmapped_area, 4826 #ifdef CONFIG_TMPFS 3781 #ifdef CONFIG_TMPFS 4827 .llseek = shmem_file_llseek, 3782 .llseek = shmem_file_llseek, 4828 .read_iter = shmem_file_read_ite 3783 .read_iter = shmem_file_read_iter, 4829 .write_iter = shmem_file_write_it !! 3784 .write_iter = generic_file_write_iter, 4830 .fsync = noop_fsync, 3785 .fsync = noop_fsync, 4831 .splice_read = shmem_file_splice_r !! 3786 .splice_read = generic_file_splice_read, 4832 .splice_write = iter_file_splice_wr 3787 .splice_write = iter_file_splice_write, 4833 .fallocate = shmem_fallocate, 3788 .fallocate = shmem_fallocate, 4834 #endif 3789 #endif 4835 }; 3790 }; 4836 3791 4837 static const struct inode_operations shmem_in 3792 static const struct inode_operations shmem_inode_operations = { 4838 .getattr = shmem_getattr, 3793 .getattr = shmem_getattr, 4839 .setattr = shmem_setattr, 3794 .setattr = shmem_setattr, 4840 #ifdef CONFIG_TMPFS_XATTR 3795 #ifdef CONFIG_TMPFS_XATTR 4841 .listxattr = shmem_listxattr, 3796 .listxattr = shmem_listxattr, 4842 .set_acl = simple_set_acl, 3797 .set_acl = simple_set_acl, 4843 .fileattr_get = shmem_fileattr_get, << 4844 .fileattr_set = shmem_fileattr_set, << 4845 #endif 3798 #endif 4846 }; 3799 }; 4847 3800 4848 static const struct inode_operations shmem_di 3801 static const struct inode_operations shmem_dir_inode_operations = { 4849 #ifdef CONFIG_TMPFS 3802 #ifdef CONFIG_TMPFS 4850 .getattr = shmem_getattr, << 4851 .create = shmem_create, 3803 .create = shmem_create, 4852 .lookup = simple_lookup, 3804 .lookup = simple_lookup, 4853 .link = shmem_link, 3805 .link = shmem_link, 4854 .unlink = shmem_unlink, 3806 .unlink = shmem_unlink, 4855 .symlink = shmem_symlink, 3807 .symlink = shmem_symlink, 4856 .mkdir = shmem_mkdir, 3808 .mkdir = shmem_mkdir, 4857 .rmdir = shmem_rmdir, 3809 .rmdir = shmem_rmdir, 4858 .mknod = shmem_mknod, 3810 .mknod = shmem_mknod, 4859 .rename = shmem_rename2, 3811 .rename = shmem_rename2, 4860 .tmpfile = shmem_tmpfile, 3812 .tmpfile = shmem_tmpfile, 4861 .get_offset_ctx = shmem_get_offset_ct << 4862 #endif 3813 #endif 4863 #ifdef CONFIG_TMPFS_XATTR 3814 #ifdef CONFIG_TMPFS_XATTR 4864 .listxattr = shmem_listxattr, 3815 .listxattr = shmem_listxattr, 4865 .fileattr_get = shmem_fileattr_get, << 4866 .fileattr_set = shmem_fileattr_set, << 4867 #endif 3816 #endif 4868 #ifdef CONFIG_TMPFS_POSIX_ACL 3817 #ifdef CONFIG_TMPFS_POSIX_ACL 4869 .setattr = shmem_setattr, 3818 .setattr = shmem_setattr, 4870 .set_acl = simple_set_acl, 3819 .set_acl = simple_set_acl, 4871 #endif 3820 #endif 4872 }; 3821 }; 4873 3822 4874 static const struct inode_operations shmem_sp 3823 static const struct inode_operations shmem_special_inode_operations = { 4875 .getattr = shmem_getattr, << 4876 #ifdef CONFIG_TMPFS_XATTR 3824 #ifdef CONFIG_TMPFS_XATTR 4877 .listxattr = shmem_listxattr, 3825 .listxattr = shmem_listxattr, 4878 #endif 3826 #endif 4879 #ifdef CONFIG_TMPFS_POSIX_ACL 3827 #ifdef CONFIG_TMPFS_POSIX_ACL 4880 .setattr = shmem_setattr, 3828 .setattr = shmem_setattr, 4881 .set_acl = simple_set_acl, 3829 .set_acl = simple_set_acl, 4882 #endif 3830 #endif 4883 }; 3831 }; 4884 3832 4885 static const struct super_operations shmem_op 3833 static const struct super_operations shmem_ops = { 4886 .alloc_inode = shmem_alloc_inode, 3834 .alloc_inode = shmem_alloc_inode, 4887 .free_inode = shmem_free_in_core_ 3835 .free_inode = shmem_free_in_core_inode, 4888 .destroy_inode = shmem_destroy_inode 3836 .destroy_inode = shmem_destroy_inode, 4889 #ifdef CONFIG_TMPFS 3837 #ifdef CONFIG_TMPFS 4890 .statfs = shmem_statfs, 3838 .statfs = shmem_statfs, 4891 .show_options = shmem_show_options, 3839 .show_options = shmem_show_options, 4892 #endif 3840 #endif 4893 #ifdef CONFIG_TMPFS_QUOTA << 4894 .get_dquots = shmem_get_dquots, << 4895 #endif << 4896 .evict_inode = shmem_evict_inode, 3841 .evict_inode = shmem_evict_inode, 4897 .drop_inode = generic_delete_inod 3842 .drop_inode = generic_delete_inode, 4898 .put_super = shmem_put_super, 3843 .put_super = shmem_put_super, 4899 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3844 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4900 .nr_cached_objects = shmem_unuse 3845 .nr_cached_objects = shmem_unused_huge_count, 4901 .free_cached_objects = shmem_unuse 3846 .free_cached_objects = shmem_unused_huge_scan, 4902 #endif 3847 #endif 4903 }; 3848 }; 4904 3849 4905 static const struct vm_operations_struct shme 3850 static const struct vm_operations_struct shmem_vm_ops = { 4906 .fault = shmem_fault, 3851 .fault = shmem_fault, 4907 .map_pages = filemap_map_pages, 3852 .map_pages = filemap_map_pages, 4908 #ifdef CONFIG_NUMA 3853 #ifdef CONFIG_NUMA 4909 .set_policy = shmem_set_policy, 3854 .set_policy = shmem_set_policy, 4910 .get_policy = shmem_get_policy, 3855 .get_policy = shmem_get_policy, 4911 #endif 3856 #endif 4912 }; 3857 }; 4913 3858 4914 static const struct vm_operations_struct shme << 4915 .fault = shmem_fault, << 4916 .map_pages = filemap_map_pages, << 4917 #ifdef CONFIG_NUMA << 4918 .set_policy = shmem_set_policy, << 4919 .get_policy = shmem_get_policy, << 4920 #endif << 4921 }; << 4922 << 4923 int shmem_init_fs_context(struct fs_context * 3859 int shmem_init_fs_context(struct fs_context *fc) 4924 { 3860 { 4925 struct shmem_options *ctx; 3861 struct shmem_options *ctx; 4926 3862 4927 ctx = kzalloc(sizeof(struct shmem_opt 3863 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL); 4928 if (!ctx) 3864 if (!ctx) 4929 return -ENOMEM; 3865 return -ENOMEM; 4930 3866 4931 ctx->mode = 0777 | S_ISVTX; 3867 ctx->mode = 0777 | S_ISVTX; 4932 ctx->uid = current_fsuid(); 3868 ctx->uid = current_fsuid(); 4933 ctx->gid = current_fsgid(); 3869 ctx->gid = current_fsgid(); 4934 3870 4935 fc->fs_private = ctx; 3871 fc->fs_private = ctx; 4936 fc->ops = &shmem_fs_context_ops; 3872 fc->ops = &shmem_fs_context_ops; 4937 return 0; 3873 return 0; 4938 } 3874 } 4939 3875 4940 static struct file_system_type shmem_fs_type 3876 static struct file_system_type shmem_fs_type = { 4941 .owner = THIS_MODULE, 3877 .owner = THIS_MODULE, 4942 .name = "tmpfs", 3878 .name = "tmpfs", 4943 .init_fs_context = shmem_init_fs_cont 3879 .init_fs_context = shmem_init_fs_context, 4944 #ifdef CONFIG_TMPFS 3880 #ifdef CONFIG_TMPFS 4945 .parameters = shmem_fs_parameters 3881 .parameters = shmem_fs_parameters, 4946 #endif 3882 #endif 4947 .kill_sb = kill_litter_super, 3883 .kill_sb = kill_litter_super, 4948 .fs_flags = FS_USERNS_MOUNT | F !! 3884 .fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT, 4949 }; 3885 }; 4950 3886 4951 void __init shmem_init(void) !! 3887 int __init shmem_init(void) 4952 { 3888 { 4953 int error; 3889 int error; 4954 3890 4955 shmem_init_inodecache(); 3891 shmem_init_inodecache(); 4956 3892 4957 #ifdef CONFIG_TMPFS_QUOTA << 4958 register_quota_format(&shmem_quota_fo << 4959 #endif << 4960 << 4961 error = register_filesystem(&shmem_fs 3893 error = register_filesystem(&shmem_fs_type); 4962 if (error) { 3894 if (error) { 4963 pr_err("Could not register tm 3895 pr_err("Could not register tmpfs\n"); 4964 goto out2; 3896 goto out2; 4965 } 3897 } 4966 3898 4967 shm_mnt = kern_mount(&shmem_fs_type); 3899 shm_mnt = kern_mount(&shmem_fs_type); 4968 if (IS_ERR(shm_mnt)) { 3900 if (IS_ERR(shm_mnt)) { 4969 error = PTR_ERR(shm_mnt); 3901 error = PTR_ERR(shm_mnt); 4970 pr_err("Could not kern_mount 3902 pr_err("Could not kern_mount tmpfs\n"); 4971 goto out1; 3903 goto out1; 4972 } 3904 } 4973 3905 4974 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 3906 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4975 if (has_transparent_hugepage() && shm 3907 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 4976 SHMEM_SB(shm_mnt->mnt_sb)->hu 3908 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 4977 else 3909 else 4978 shmem_huge = SHMEM_HUGE_NEVER !! 3910 shmem_huge = 0; /* just in case it was patched */ 4979 << 4980 /* << 4981 * Default to setting PMD-sized THP t << 4982 * disable all other multi-size THPs. << 4983 */ << 4984 huge_shmem_orders_inherit = BIT(HPAGE << 4985 #endif 3911 #endif 4986 return; !! 3912 return 0; 4987 3913 4988 out1: 3914 out1: 4989 unregister_filesystem(&shmem_fs_type) 3915 unregister_filesystem(&shmem_fs_type); 4990 out2: 3916 out2: 4991 #ifdef CONFIG_TMPFS_QUOTA << 4992 unregister_quota_format(&shmem_quota_ << 4993 #endif << 4994 shmem_destroy_inodecache(); 3917 shmem_destroy_inodecache(); 4995 shm_mnt = ERR_PTR(error); 3918 shm_mnt = ERR_PTR(error); >> 3919 return error; 4996 } 3920 } 4997 3921 4998 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && d 3922 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) 4999 static ssize_t shmem_enabled_show(struct kobj 3923 static ssize_t shmem_enabled_show(struct kobject *kobj, 5000 struct kobj 3924 struct kobj_attribute *attr, char *buf) 5001 { 3925 { 5002 static const int values[] = { 3926 static const int values[] = { 5003 SHMEM_HUGE_ALWAYS, 3927 SHMEM_HUGE_ALWAYS, 5004 SHMEM_HUGE_WITHIN_SIZE, 3928 SHMEM_HUGE_WITHIN_SIZE, 5005 SHMEM_HUGE_ADVISE, 3929 SHMEM_HUGE_ADVISE, 5006 SHMEM_HUGE_NEVER, 3930 SHMEM_HUGE_NEVER, 5007 SHMEM_HUGE_DENY, 3931 SHMEM_HUGE_DENY, 5008 SHMEM_HUGE_FORCE, 3932 SHMEM_HUGE_FORCE, 5009 }; 3933 }; 5010 int len = 0; 3934 int len = 0; 5011 int i; 3935 int i; 5012 3936 5013 for (i = 0; i < ARRAY_SIZE(values); i 3937 for (i = 0; i < ARRAY_SIZE(values); i++) { 5014 len += sysfs_emit_at(buf, len 3938 len += sysfs_emit_at(buf, len, 5015 shmem_huge == !! 3939 shmem_huge == values[i] ? "%s[%s]" : "%s%s", 5016 i ? " " : "", !! 3940 i ? " " : "", >> 3941 shmem_format_huge(values[i])); 5017 } 3942 } >> 3943 5018 len += sysfs_emit_at(buf, len, "\n"); 3944 len += sysfs_emit_at(buf, len, "\n"); 5019 3945 5020 return len; 3946 return len; 5021 } 3947 } 5022 3948 5023 static ssize_t shmem_enabled_store(struct kob 3949 static ssize_t shmem_enabled_store(struct kobject *kobj, 5024 struct kobj_attribute *attr, 3950 struct kobj_attribute *attr, const char *buf, size_t count) 5025 { 3951 { 5026 char tmp[16]; 3952 char tmp[16]; 5027 int huge; 3953 int huge; 5028 3954 5029 if (count + 1 > sizeof(tmp)) 3955 if (count + 1 > sizeof(tmp)) 5030 return -EINVAL; 3956 return -EINVAL; 5031 memcpy(tmp, buf, count); 3957 memcpy(tmp, buf, count); 5032 tmp[count] = '\0'; 3958 tmp[count] = '\0'; 5033 if (count && tmp[count - 1] == '\n') 3959 if (count && tmp[count - 1] == '\n') 5034 tmp[count - 1] = '\0'; 3960 tmp[count - 1] = '\0'; 5035 3961 5036 huge = shmem_parse_huge(tmp); 3962 huge = shmem_parse_huge(tmp); 5037 if (huge == -EINVAL) 3963 if (huge == -EINVAL) 5038 return -EINVAL; 3964 return -EINVAL; 5039 if (!has_transparent_hugepage() && 3965 if (!has_transparent_hugepage() && 5040 huge != SHMEM_HUGE_NE 3966 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 5041 return -EINVAL; 3967 return -EINVAL; 5042 3968 5043 /* Do not override huge allocation po << 5044 if (huge == SHMEM_HUGE_FORCE && << 5045 huge_shmem_orders_inherit != BIT( << 5046 return -EINVAL; << 5047 << 5048 shmem_huge = huge; 3969 shmem_huge = huge; 5049 if (shmem_huge > SHMEM_HUGE_DENY) 3970 if (shmem_huge > SHMEM_HUGE_DENY) 5050 SHMEM_SB(shm_mnt->mnt_sb)->hu 3971 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 5051 return count; 3972 return count; 5052 } 3973 } 5053 3974 5054 struct kobj_attribute shmem_enabled_attr = __ !! 3975 struct kobj_attribute shmem_enabled_attr = 5055 static DEFINE_SPINLOCK(huge_shmem_orders_lock !! 3976 __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store); >> 3977 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ 5056 3978 5057 static ssize_t thpsize_shmem_enabled_show(str !! 3979 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 5058 str !! 3980 bool shmem_huge_enabled(struct vm_area_struct *vma) 5059 { 3981 { 5060 int order = to_thpsize(kobj)->order; !! 3982 struct inode *inode = file_inode(vma->vm_file); 5061 const char *output; !! 3983 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 5062 !! 3984 loff_t i_size; 5063 if (test_bit(order, &huge_shmem_order !! 3985 pgoff_t off; 5064 output = "[always] inherit wi << 5065 else if (test_bit(order, &huge_shmem_ << 5066 output = "always [inherit] wi << 5067 else if (test_bit(order, &huge_shmem_ << 5068 output = "always inherit [wit << 5069 else if (test_bit(order, &huge_shmem_ << 5070 output = "always inherit with << 5071 else << 5072 output = "always inherit with << 5073 << 5074 return sysfs_emit(buf, "%s\n", output << 5075 } << 5076 3986 5077 static ssize_t thpsize_shmem_enabled_store(st !! 3987 if (!transhuge_vma_enabled(vma, vma->vm_flags)) 5078 st !! 3988 return false; 5079 co !! 3989 if (shmem_huge == SHMEM_HUGE_FORCE) 5080 { !! 3990 return true; 5081 int order = to_thpsize(kobj)->order; !! 3991 if (shmem_huge == SHMEM_HUGE_DENY) 5082 ssize_t ret = count; !! 3992 return false; 5083 !! 3993 switch (sbinfo->huge) { 5084 if (sysfs_streq(buf, "always")) { !! 3994 case SHMEM_HUGE_NEVER: 5085 spin_lock(&huge_shmem_orders_ !! 3995 return false; 5086 clear_bit(order, &huge_shmem_ !! 3996 case SHMEM_HUGE_ALWAYS: 5087 clear_bit(order, &huge_shmem_ !! 3997 return true; 5088 clear_bit(order, &huge_shmem_ !! 3998 case SHMEM_HUGE_WITHIN_SIZE: 5089 set_bit(order, &huge_shmem_or !! 3999 off = round_up(vma->vm_pgoff, HPAGE_PMD_NR); 5090 spin_unlock(&huge_shmem_order !! 4000 i_size = round_up(i_size_read(inode), PAGE_SIZE); 5091 } else if (sysfs_streq(buf, "inherit" !! 4001 if (i_size >= HPAGE_PMD_SIZE && 5092 /* Do not override huge alloc !! 4002 i_size >> PAGE_SHIFT >= off) 5093 if (shmem_huge == SHMEM_HUGE_ !! 4003 return true; 5094 order != HPAGE_PMD_ORDER) !! 4004 fallthrough; 5095 return -EINVAL; !! 4005 case SHMEM_HUGE_ADVISE: 5096 !! 4006 /* TODO: implement fadvise() hints */ 5097 spin_lock(&huge_shmem_orders_ !! 4007 return (vma->vm_flags & VM_HUGEPAGE); 5098 clear_bit(order, &huge_shmem_ !! 4008 default: 5099 clear_bit(order, &huge_shmem_ !! 4009 VM_BUG_ON(1); 5100 clear_bit(order, &huge_shmem_ !! 4010 return false; 5101 set_bit(order, &huge_shmem_or << 5102 spin_unlock(&huge_shmem_order << 5103 } else if (sysfs_streq(buf, "within_s << 5104 spin_lock(&huge_shmem_orders_ << 5105 clear_bit(order, &huge_shmem_ << 5106 clear_bit(order, &huge_shmem_ << 5107 clear_bit(order, &huge_shmem_ << 5108 set_bit(order, &huge_shmem_or << 5109 spin_unlock(&huge_shmem_order << 5110 } else if (sysfs_streq(buf, "advise") << 5111 spin_lock(&huge_shmem_orders_ << 5112 clear_bit(order, &huge_shmem_ << 5113 clear_bit(order, &huge_shmem_ << 5114 clear_bit(order, &huge_shmem_ << 5115 set_bit(order, &huge_shmem_or << 5116 spin_unlock(&huge_shmem_order << 5117 } else if (sysfs_streq(buf, "never")) << 5118 spin_lock(&huge_shmem_orders_ << 5119 clear_bit(order, &huge_shmem_ << 5120 clear_bit(order, &huge_shmem_ << 5121 clear_bit(order, &huge_shmem_ << 5122 clear_bit(order, &huge_shmem_ << 5123 spin_unlock(&huge_shmem_order << 5124 } else { << 5125 ret = -EINVAL; << 5126 } 4011 } 5127 << 5128 return ret; << 5129 } 4012 } 5130 !! 4013 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 5131 struct kobj_attribute thpsize_shmem_enabled_a << 5132 __ATTR(shmem_enabled, 0644, thpsize_s << 5133 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONF << 5134 4014 5135 #else /* !CONFIG_SHMEM */ 4015 #else /* !CONFIG_SHMEM */ 5136 4016 5137 /* 4017 /* 5138 * tiny-shmem: simple shmemfs and tmpfs using 4018 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 5139 * 4019 * 5140 * This is intended for small system where th 4020 * This is intended for small system where the benefits of the full 5141 * shmem code (swap-backed and resource-limit 4021 * shmem code (swap-backed and resource-limited) are outweighed by 5142 * their complexity. On systems without swap 4022 * their complexity. On systems without swap this code should be 5143 * effectively equivalent, but much lighter w 4023 * effectively equivalent, but much lighter weight. 5144 */ 4024 */ 5145 4025 5146 static struct file_system_type shmem_fs_type 4026 static struct file_system_type shmem_fs_type = { 5147 .name = "tmpfs", 4027 .name = "tmpfs", 5148 .init_fs_context = ramfs_init_fs_cont 4028 .init_fs_context = ramfs_init_fs_context, 5149 .parameters = ramfs_fs_parameters 4029 .parameters = ramfs_fs_parameters, 5150 .kill_sb = ramfs_kill_sb, !! 4030 .kill_sb = kill_litter_super, 5151 .fs_flags = FS_USERNS_MOUNT, 4031 .fs_flags = FS_USERNS_MOUNT, 5152 }; 4032 }; 5153 4033 5154 void __init shmem_init(void) !! 4034 int __init shmem_init(void) 5155 { 4035 { 5156 BUG_ON(register_filesystem(&shmem_fs_ 4036 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 5157 4037 5158 shm_mnt = kern_mount(&shmem_fs_type); 4038 shm_mnt = kern_mount(&shmem_fs_type); 5159 BUG_ON(IS_ERR(shm_mnt)); 4039 BUG_ON(IS_ERR(shm_mnt)); >> 4040 >> 4041 return 0; 5160 } 4042 } 5161 4043 5162 int shmem_unuse(unsigned int type) !! 4044 int shmem_unuse(unsigned int type, bool frontswap, >> 4045 unsigned long *fs_pages_to_unuse) 5163 { 4046 { 5164 return 0; 4047 return 0; 5165 } 4048 } 5166 4049 5167 int shmem_lock(struct file *file, int lock, s 4050 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 5168 { 4051 { 5169 return 0; 4052 return 0; 5170 } 4053 } 5171 4054 5172 void shmem_unlock_mapping(struct address_spac 4055 void shmem_unlock_mapping(struct address_space *mapping) 5173 { 4056 { 5174 } 4057 } 5175 4058 5176 #ifdef CONFIG_MMU 4059 #ifdef CONFIG_MMU 5177 unsigned long shmem_get_unmapped_area(struct 4060 unsigned long shmem_get_unmapped_area(struct file *file, 5178 unsigne 4061 unsigned long addr, unsigned long len, 5179 unsigne 4062 unsigned long pgoff, unsigned long flags) 5180 { 4063 { 5181 return mm_get_unmapped_area(current-> !! 4064 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 5182 } 4065 } 5183 #endif 4066 #endif 5184 4067 5185 void shmem_truncate_range(struct inode *inode 4068 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 5186 { 4069 { 5187 truncate_inode_pages_range(inode->i_m 4070 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 5188 } 4071 } 5189 EXPORT_SYMBOL_GPL(shmem_truncate_range); 4072 EXPORT_SYMBOL_GPL(shmem_truncate_range); 5190 4073 5191 #define shmem_vm_ops 4074 #define shmem_vm_ops generic_file_vm_ops 5192 #define shmem_anon_vm_ops << 5193 #define shmem_file_operations 4075 #define shmem_file_operations ramfs_file_operations >> 4076 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) 5194 #define shmem_acct_size(flags, size) 4077 #define shmem_acct_size(flags, size) 0 5195 #define shmem_unacct_size(flags, size) 4078 #define shmem_unacct_size(flags, size) do {} while (0) 5196 4079 5197 static inline struct inode *shmem_get_inode(s << 5198 struct super_ << 5199 umode_t mode, << 5200 { << 5201 struct inode *inode = ramfs_get_inode << 5202 return inode ? inode : ERR_PTR(-ENOSP << 5203 } << 5204 << 5205 #endif /* CONFIG_SHMEM */ 4080 #endif /* CONFIG_SHMEM */ 5206 4081 5207 /* common code */ 4082 /* common code */ 5208 4083 5209 static struct file *__shmem_file_setup(struct !! 4084 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, loff_t size, 5210 loff_t size, unsigned !! 4085 unsigned long flags, unsigned int i_flags) 5211 { 4086 { 5212 struct inode *inode; 4087 struct inode *inode; 5213 struct file *res; 4088 struct file *res; 5214 4089 5215 if (IS_ERR(mnt)) 4090 if (IS_ERR(mnt)) 5216 return ERR_CAST(mnt); 4091 return ERR_CAST(mnt); 5217 4092 5218 if (size < 0 || size > MAX_LFS_FILESI 4093 if (size < 0 || size > MAX_LFS_FILESIZE) 5219 return ERR_PTR(-EINVAL); 4094 return ERR_PTR(-EINVAL); 5220 4095 5221 if (shmem_acct_size(flags, size)) 4096 if (shmem_acct_size(flags, size)) 5222 return ERR_PTR(-ENOMEM); 4097 return ERR_PTR(-ENOMEM); 5223 4098 5224 if (is_idmapped_mnt(mnt)) !! 4099 inode = shmem_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0, 5225 return ERR_PTR(-EINVAL); !! 4100 flags); 5226 !! 4101 if (unlikely(!inode)) { 5227 inode = shmem_get_inode(&nop_mnt_idma << 5228 S_IFREG | S_I << 5229 if (IS_ERR(inode)) { << 5230 shmem_unacct_size(flags, size 4102 shmem_unacct_size(flags, size); 5231 return ERR_CAST(inode); !! 4103 return ERR_PTR(-ENOSPC); 5232 } 4104 } 5233 inode->i_flags |= i_flags; 4105 inode->i_flags |= i_flags; 5234 inode->i_size = size; 4106 inode->i_size = size; 5235 clear_nlink(inode); /* It is unli 4107 clear_nlink(inode); /* It is unlinked */ 5236 res = ERR_PTR(ramfs_nommu_expand_for_ 4108 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 5237 if (!IS_ERR(res)) 4109 if (!IS_ERR(res)) 5238 res = alloc_file_pseudo(inode 4110 res = alloc_file_pseudo(inode, mnt, name, O_RDWR, 5239 &shmem_file_o 4111 &shmem_file_operations); 5240 if (IS_ERR(res)) 4112 if (IS_ERR(res)) 5241 iput(inode); 4113 iput(inode); 5242 return res; 4114 return res; 5243 } 4115 } 5244 4116 5245 /** 4117 /** 5246 * shmem_kernel_file_setup - get an unlinked 4118 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 5247 * kernel internal. There will be NO LS 4119 * kernel internal. There will be NO LSM permission checks against the 5248 * underlying inode. So users of this i 4120 * underlying inode. So users of this interface must do LSM checks at a 5249 * higher layer. The users are the big_ 4121 * higher layer. The users are the big_key and shm implementations. LSM 5250 * checks are provided at the key or shm 4122 * checks are provided at the key or shm level rather than the inode. 5251 * @name: name for dentry (to be seen in /pro 4123 * @name: name for dentry (to be seen in /proc/<pid>/maps 5252 * @size: size to be set for the file 4124 * @size: size to be set for the file 5253 * @flags: VM_NORESERVE suppresses pre-accoun 4125 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 5254 */ 4126 */ 5255 struct file *shmem_kernel_file_setup(const ch 4127 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 5256 { 4128 { 5257 return __shmem_file_setup(shm_mnt, na 4129 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE); 5258 } 4130 } 5259 EXPORT_SYMBOL_GPL(shmem_kernel_file_setup); << 5260 4131 5261 /** 4132 /** 5262 * shmem_file_setup - get an unlinked file li 4133 * shmem_file_setup - get an unlinked file living in tmpfs 5263 * @name: name for dentry (to be seen in /pro 4134 * @name: name for dentry (to be seen in /proc/<pid>/maps 5264 * @size: size to be set for the file 4135 * @size: size to be set for the file 5265 * @flags: VM_NORESERVE suppresses pre-accoun 4136 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 5266 */ 4137 */ 5267 struct file *shmem_file_setup(const char *nam 4138 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 5268 { 4139 { 5269 return __shmem_file_setup(shm_mnt, na 4140 return __shmem_file_setup(shm_mnt, name, size, flags, 0); 5270 } 4141 } 5271 EXPORT_SYMBOL_GPL(shmem_file_setup); 4142 EXPORT_SYMBOL_GPL(shmem_file_setup); 5272 4143 5273 /** 4144 /** 5274 * shmem_file_setup_with_mnt - get an unlinke 4145 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs 5275 * @mnt: the tmpfs mount where the file will 4146 * @mnt: the tmpfs mount where the file will be created 5276 * @name: name for dentry (to be seen in /pro 4147 * @name: name for dentry (to be seen in /proc/<pid>/maps 5277 * @size: size to be set for the file 4148 * @size: size to be set for the file 5278 * @flags: VM_NORESERVE suppresses pre-accoun 4149 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 5279 */ 4150 */ 5280 struct file *shmem_file_setup_with_mnt(struct 4151 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, 5281 loff_t 4152 loff_t size, unsigned long flags) 5282 { 4153 { 5283 return __shmem_file_setup(mnt, name, 4154 return __shmem_file_setup(mnt, name, size, flags, 0); 5284 } 4155 } 5285 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 4156 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 5286 4157 5287 /** 4158 /** 5288 * shmem_zero_setup - setup a shared anonymou 4159 * shmem_zero_setup - setup a shared anonymous mapping 5289 * @vma: the vma to be mmapped is prepared by 4160 * @vma: the vma to be mmapped is prepared by do_mmap 5290 */ 4161 */ 5291 int shmem_zero_setup(struct vm_area_struct *v 4162 int shmem_zero_setup(struct vm_area_struct *vma) 5292 { 4163 { 5293 struct file *file; 4164 struct file *file; 5294 loff_t size = vma->vm_end - vma->vm_s 4165 loff_t size = vma->vm_end - vma->vm_start; 5295 4166 5296 /* 4167 /* 5297 * Cloning a new file under mmap_lock 4168 * Cloning a new file under mmap_lock leads to a lock ordering conflict 5298 * between XFS directory reading and 4169 * between XFS directory reading and selinux: since this file is only 5299 * accessible to the user through its 4170 * accessible to the user through its mapping, use S_PRIVATE flag to 5300 * bypass file security, in the same 4171 * bypass file security, in the same way as shmem_kernel_file_setup(). 5301 */ 4172 */ 5302 file = shmem_kernel_file_setup("dev/z 4173 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); 5303 if (IS_ERR(file)) 4174 if (IS_ERR(file)) 5304 return PTR_ERR(file); 4175 return PTR_ERR(file); 5305 4176 5306 if (vma->vm_file) 4177 if (vma->vm_file) 5307 fput(vma->vm_file); 4178 fput(vma->vm_file); 5308 vma->vm_file = file; 4179 vma->vm_file = file; 5309 vma->vm_ops = &shmem_anon_vm_ops; !! 4180 vma->vm_ops = &shmem_vm_ops; >> 4181 >> 4182 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && >> 4183 ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) < >> 4184 (vma->vm_end & HPAGE_PMD_MASK)) { >> 4185 khugepaged_enter(vma, vma->vm_flags); >> 4186 } 5310 4187 5311 return 0; 4188 return 0; 5312 } 4189 } 5313 4190 5314 /** 4191 /** 5315 * shmem_read_folio_gfp - read into page cach !! 4192 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags. 5316 * @mapping: the folio's address_space !! 4193 * @mapping: the page's address_space 5317 * @index: the folio index !! 4194 * @index: the page index 5318 * @gfp: the page allocator flags to u 4195 * @gfp: the page allocator flags to use if allocating 5319 * 4196 * 5320 * This behaves as a tmpfs "read_cache_page_g 4197 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 5321 * with any new page allocations done using t 4198 * with any new page allocations done using the specified allocation flags. 5322 * But read_cache_page_gfp() uses the ->read_ !! 4199 * But read_cache_page_gfp() uses the ->readpage() method: which does not 5323 * suit tmpfs, since it may have pages in swa 4200 * suit tmpfs, since it may have pages in swapcache, and needs to find those 5324 * for itself; although drivers/gpu/drm i915 4201 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 5325 * 4202 * 5326 * i915_gem_object_get_pages_gtt() mixes __GF 4203 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 5327 * with the mapping_gfp_mask(), to avoid OOMi 4204 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 5328 */ 4205 */ 5329 struct folio *shmem_read_folio_gfp(struct add !! 4206 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 5330 pgoff_t index, gfp_t gfp) !! 4207 pgoff_t index, gfp_t gfp) 5331 { 4208 { 5332 #ifdef CONFIG_SHMEM 4209 #ifdef CONFIG_SHMEM 5333 struct inode *inode = mapping->host; 4210 struct inode *inode = mapping->host; 5334 struct folio *folio; !! 4211 struct page *page; 5335 int error; 4212 int error; 5336 4213 5337 error = shmem_get_folio_gfp(inode, in !! 4214 BUG_ON(!shmem_mapping(mapping)); 5338 gfp, NULL !! 4215 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, >> 4216 gfp, NULL, NULL, NULL); 5339 if (error) 4217 if (error) 5340 return ERR_PTR(error); !! 4218 page = ERR_PTR(error); 5341 !! 4219 else 5342 folio_unlock(folio); !! 4220 unlock_page(page); 5343 return folio; !! 4221 return page; 5344 #else 4222 #else 5345 /* 4223 /* 5346 * The tiny !SHMEM case uses ramfs wi 4224 * The tiny !SHMEM case uses ramfs without swap 5347 */ 4225 */ 5348 return mapping_read_folio_gfp(mapping !! 4226 return read_cache_page_gfp(mapping, index, gfp); 5349 #endif 4227 #endif 5350 } << 5351 EXPORT_SYMBOL_GPL(shmem_read_folio_gfp); << 5352 << 5353 struct page *shmem_read_mapping_page_gfp(stru << 5354 pgof << 5355 { << 5356 struct folio *folio = shmem_read_foli << 5357 struct page *page; << 5358 << 5359 if (IS_ERR(folio)) << 5360 return &folio->page; << 5361 << 5362 page = folio_file_page(folio, index); << 5363 if (PageHWPoison(page)) { << 5364 folio_put(folio); << 5365 return ERR_PTR(-EIO); << 5366 } << 5367 << 5368 return page; << 5369 } 4228 } 5370 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp 4229 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 5371 4230
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.