1 /* 1 /* 2 * Resizable virtual memory filesystem for Lin 2 * Resizable virtual memory filesystem for Linux. 3 * 3 * 4 * Copyright (C) 2000 Linus Torvalds. 4 * Copyright (C) 2000 Linus Torvalds. 5 * 2000 Transmeta Corp. 5 * 2000 Transmeta Corp. 6 * 2000-2001 Christoph Rohland 6 * 2000-2001 Christoph Rohland 7 * 2000-2001 SAP AG 7 * 2000-2001 SAP AG 8 * 2002 Red Hat Inc. 8 * 2002 Red Hat Inc. 9 * Copyright (C) 2002-2011 Hugh Dickins. 9 * Copyright (C) 2002-2011 Hugh Dickins. 10 * Copyright (C) 2011 Google Inc. 10 * Copyright (C) 2011 Google Inc. 11 * Copyright (C) 2002-2005 VERITAS Software Co 11 * Copyright (C) 2002-2005 VERITAS Software Corporation. 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 12 * Copyright (C) 2004 Andi Kleen, SuSE Labs 13 * 13 * 14 * Extended attribute support for tmpfs: 14 * Extended attribute support for tmpfs: 15 * Copyright (c) 2004, Luke Kenneth Casson Lei 15 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net> 16 * Copyright (c) 2004 Red Hat, Inc., James Mor 16 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com> 17 * 17 * 18 * tiny-shmem: 18 * tiny-shmem: 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@ 19 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com> 20 * 20 * 21 * This file is released under the GPL. 21 * This file is released under the GPL. 22 */ 22 */ 23 23 24 #include <linux/fs.h> 24 #include <linux/fs.h> 25 #include <linux/init.h> 25 #include <linux/init.h> 26 #include <linux/vfs.h> 26 #include <linux/vfs.h> 27 #include <linux/mount.h> 27 #include <linux/mount.h> 28 #include <linux/ramfs.h> 28 #include <linux/ramfs.h> 29 #include <linux/pagemap.h> 29 #include <linux/pagemap.h> 30 #include <linux/file.h> 30 #include <linux/file.h> 31 #include <linux/fileattr.h> 31 #include <linux/fileattr.h> 32 #include <linux/mm.h> 32 #include <linux/mm.h> 33 #include <linux/random.h> 33 #include <linux/random.h> 34 #include <linux/sched/signal.h> 34 #include <linux/sched/signal.h> 35 #include <linux/export.h> 35 #include <linux/export.h> 36 #include <linux/shmem_fs.h> 36 #include <linux/shmem_fs.h> 37 #include <linux/swap.h> 37 #include <linux/swap.h> 38 #include <linux/uio.h> 38 #include <linux/uio.h> 39 #include <linux/hugetlb.h> 39 #include <linux/hugetlb.h> 40 #include <linux/fs_parser.h> 40 #include <linux/fs_parser.h> 41 #include <linux/swapfile.h> 41 #include <linux/swapfile.h> 42 #include <linux/iversion.h> 42 #include <linux/iversion.h> 43 #include "swap.h" 43 #include "swap.h" 44 44 45 static struct vfsmount *shm_mnt __ro_after_ini 45 static struct vfsmount *shm_mnt __ro_after_init; 46 46 47 #ifdef CONFIG_SHMEM 47 #ifdef CONFIG_SHMEM 48 /* 48 /* 49 * This virtual memory filesystem is heavily b 49 * This virtual memory filesystem is heavily based on the ramfs. It 50 * extends ramfs by the ability to use swap an 50 * extends ramfs by the ability to use swap and honor resource limits 51 * which makes it a completely usable filesyst 51 * which makes it a completely usable filesystem. 52 */ 52 */ 53 53 54 #include <linux/xattr.h> 54 #include <linux/xattr.h> 55 #include <linux/exportfs.h> 55 #include <linux/exportfs.h> 56 #include <linux/posix_acl.h> 56 #include <linux/posix_acl.h> 57 #include <linux/posix_acl_xattr.h> 57 #include <linux/posix_acl_xattr.h> 58 #include <linux/mman.h> 58 #include <linux/mman.h> 59 #include <linux/string.h> 59 #include <linux/string.h> 60 #include <linux/slab.h> 60 #include <linux/slab.h> 61 #include <linux/backing-dev.h> 61 #include <linux/backing-dev.h> 62 #include <linux/writeback.h> 62 #include <linux/writeback.h> 63 #include <linux/pagevec.h> 63 #include <linux/pagevec.h> 64 #include <linux/percpu_counter.h> 64 #include <linux/percpu_counter.h> 65 #include <linux/falloc.h> 65 #include <linux/falloc.h> 66 #include <linux/splice.h> 66 #include <linux/splice.h> 67 #include <linux/security.h> 67 #include <linux/security.h> 68 #include <linux/swapops.h> 68 #include <linux/swapops.h> 69 #include <linux/mempolicy.h> 69 #include <linux/mempolicy.h> 70 #include <linux/namei.h> 70 #include <linux/namei.h> 71 #include <linux/ctype.h> 71 #include <linux/ctype.h> 72 #include <linux/migrate.h> 72 #include <linux/migrate.h> 73 #include <linux/highmem.h> 73 #include <linux/highmem.h> 74 #include <linux/seq_file.h> 74 #include <linux/seq_file.h> 75 #include <linux/magic.h> 75 #include <linux/magic.h> 76 #include <linux/syscalls.h> 76 #include <linux/syscalls.h> 77 #include <linux/fcntl.h> 77 #include <linux/fcntl.h> 78 #include <uapi/linux/memfd.h> 78 #include <uapi/linux/memfd.h> 79 #include <linux/rmap.h> 79 #include <linux/rmap.h> 80 #include <linux/uuid.h> 80 #include <linux/uuid.h> 81 #include <linux/quotaops.h> 81 #include <linux/quotaops.h> 82 #include <linux/rcupdate_wait.h> 82 #include <linux/rcupdate_wait.h> 83 83 84 #include <linux/uaccess.h> 84 #include <linux/uaccess.h> 85 85 86 #include "internal.h" 86 #include "internal.h" 87 87 88 #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 88 #define BLOCKS_PER_PAGE (PAGE_SIZE/512) 89 #define VM_ACCT(size) (PAGE_ALIGN(size) >> 89 #define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) 90 90 91 /* Pretend that each entry is of this size in 91 /* Pretend that each entry is of this size in directory's i_size */ 92 #define BOGO_DIRENT_SIZE 20 92 #define BOGO_DIRENT_SIZE 20 93 93 94 /* Pretend that one inode + its dentry occupy 94 /* Pretend that one inode + its dentry occupy this much memory */ 95 #define BOGO_INODE_SIZE 1024 95 #define BOGO_INODE_SIZE 1024 96 96 97 /* Symlink up to this size is kmalloc'ed inste 97 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ 98 #define SHORT_SYMLINK_LEN 128 98 #define SHORT_SYMLINK_LEN 128 99 99 100 /* 100 /* 101 * shmem_fallocate communicates with shmem_fau 101 * shmem_fallocate communicates with shmem_fault or shmem_writepage via 102 * inode->i_private (with i_rwsem making sure 102 * inode->i_private (with i_rwsem making sure that it has only one user at 103 * a time): we would prefer not to enlarge the 103 * a time): we would prefer not to enlarge the shmem inode just for that. 104 */ 104 */ 105 struct shmem_falloc { 105 struct shmem_falloc { 106 wait_queue_head_t *waitq; /* faults in 106 wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ 107 pgoff_t start; /* start of ra 107 pgoff_t start; /* start of range currently being fallocated */ 108 pgoff_t next; /* the next pa 108 pgoff_t next; /* the next page offset to be fallocated */ 109 pgoff_t nr_falloced; /* how many ne 109 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 110 pgoff_t nr_unswapped; /* how often w 110 pgoff_t nr_unswapped; /* how often writepage refused to swap out */ 111 }; 111 }; 112 112 113 struct shmem_options { 113 struct shmem_options { 114 unsigned long long blocks; 114 unsigned long long blocks; 115 unsigned long long inodes; 115 unsigned long long inodes; 116 struct mempolicy *mpol; 116 struct mempolicy *mpol; 117 kuid_t uid; 117 kuid_t uid; 118 kgid_t gid; 118 kgid_t gid; 119 umode_t mode; 119 umode_t mode; 120 bool full_inums; 120 bool full_inums; 121 int huge; 121 int huge; 122 int seen; 122 int seen; 123 bool noswap; 123 bool noswap; 124 unsigned short quota_types; 124 unsigned short quota_types; 125 struct shmem_quota_limits qlimits; 125 struct shmem_quota_limits qlimits; 126 #define SHMEM_SEEN_BLOCKS 1 126 #define SHMEM_SEEN_BLOCKS 1 127 #define SHMEM_SEEN_INODES 2 127 #define SHMEM_SEEN_INODES 2 128 #define SHMEM_SEEN_HUGE 4 128 #define SHMEM_SEEN_HUGE 4 129 #define SHMEM_SEEN_INUMS 8 129 #define SHMEM_SEEN_INUMS 8 130 #define SHMEM_SEEN_NOSWAP 16 130 #define SHMEM_SEEN_NOSWAP 16 131 #define SHMEM_SEEN_QUOTA 32 131 #define SHMEM_SEEN_QUOTA 32 132 }; 132 }; 133 133 134 #ifdef CONFIG_TRANSPARENT_HUGEPAGE << 135 static unsigned long huge_shmem_orders_always << 136 static unsigned long huge_shmem_orders_madvise << 137 static unsigned long huge_shmem_orders_inherit << 138 static unsigned long huge_shmem_orders_within_ << 139 #endif << 140 << 141 #ifdef CONFIG_TMPFS 134 #ifdef CONFIG_TMPFS 142 static unsigned long shmem_default_max_blocks( 135 static unsigned long shmem_default_max_blocks(void) 143 { 136 { 144 return totalram_pages() / 2; 137 return totalram_pages() / 2; 145 } 138 } 146 139 147 static unsigned long shmem_default_max_inodes( 140 static unsigned long shmem_default_max_inodes(void) 148 { 141 { 149 unsigned long nr_pages = totalram_page 142 unsigned long nr_pages = totalram_pages(); 150 143 151 return min3(nr_pages - totalhigh_pages 144 return min3(nr_pages - totalhigh_pages(), nr_pages / 2, 152 ULONG_MAX / BOGO_INODE 145 ULONG_MAX / BOGO_INODE_SIZE); 153 } 146 } 154 #endif 147 #endif 155 148 156 static int shmem_swapin_folio(struct inode *in 149 static int shmem_swapin_folio(struct inode *inode, pgoff_t index, 157 struct folio **foliop, 150 struct folio **foliop, enum sgp_type sgp, gfp_t gfp, 158 struct vm_area_struct !! 151 struct mm_struct *fault_mm, vm_fault_t *fault_type); 159 152 160 static inline struct shmem_sb_info *SHMEM_SB(s 153 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb) 161 { 154 { 162 return sb->s_fs_info; 155 return sb->s_fs_info; 163 } 156 } 164 157 165 /* 158 /* 166 * shmem_file_setup pre-accounts the whole fix 159 * shmem_file_setup pre-accounts the whole fixed size of a VM object, 167 * for shared memory and for shared anonymous 160 * for shared memory and for shared anonymous (/dev/zero) mappings 168 * (unless MAP_NORESERVE and sysctl_overcommit 161 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1), 169 * consistent with the pre-accounting of priva 162 * consistent with the pre-accounting of private mappings ... 170 */ 163 */ 171 static inline int shmem_acct_size(unsigned lon 164 static inline int shmem_acct_size(unsigned long flags, loff_t size) 172 { 165 { 173 return (flags & VM_NORESERVE) ? 166 return (flags & VM_NORESERVE) ? 174 0 : security_vm_enough_memory_ 167 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size)); 175 } 168 } 176 169 177 static inline void shmem_unacct_size(unsigned 170 static inline void shmem_unacct_size(unsigned long flags, loff_t size) 178 { 171 { 179 if (!(flags & VM_NORESERVE)) 172 if (!(flags & VM_NORESERVE)) 180 vm_unacct_memory(VM_ACCT(size) 173 vm_unacct_memory(VM_ACCT(size)); 181 } 174 } 182 175 183 static inline int shmem_reacct_size(unsigned l 176 static inline int shmem_reacct_size(unsigned long flags, 184 loff_t oldsize, loff_t newsize 177 loff_t oldsize, loff_t newsize) 185 { 178 { 186 if (!(flags & VM_NORESERVE)) { 179 if (!(flags & VM_NORESERVE)) { 187 if (VM_ACCT(newsize) > VM_ACCT 180 if (VM_ACCT(newsize) > VM_ACCT(oldsize)) 188 return security_vm_eno 181 return security_vm_enough_memory_mm(current->mm, 189 VM_ACC 182 VM_ACCT(newsize) - VM_ACCT(oldsize)); 190 else if (VM_ACCT(newsize) < VM 183 else if (VM_ACCT(newsize) < VM_ACCT(oldsize)) 191 vm_unacct_memory(VM_AC 184 vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize)); 192 } 185 } 193 return 0; 186 return 0; 194 } 187 } 195 188 196 /* 189 /* 197 * ... whereas tmpfs objects are accounted inc 190 * ... whereas tmpfs objects are accounted incrementally as 198 * pages are allocated, in order to allow larg 191 * pages are allocated, in order to allow large sparse files. 199 * shmem_get_folio reports shmem_acct_blocks f 192 * shmem_get_folio reports shmem_acct_blocks failure as -ENOSPC not -ENOMEM, 200 * so that a failure on a sparse tmpfs mapping 193 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM. 201 */ 194 */ 202 static inline int shmem_acct_blocks(unsigned l 195 static inline int shmem_acct_blocks(unsigned long flags, long pages) 203 { 196 { 204 if (!(flags & VM_NORESERVE)) 197 if (!(flags & VM_NORESERVE)) 205 return 0; 198 return 0; 206 199 207 return security_vm_enough_memory_mm(cu 200 return security_vm_enough_memory_mm(current->mm, 208 pages * VM_ACCT(PAGE_S 201 pages * VM_ACCT(PAGE_SIZE)); 209 } 202 } 210 203 211 static inline void shmem_unacct_blocks(unsigne 204 static inline void shmem_unacct_blocks(unsigned long flags, long pages) 212 { 205 { 213 if (flags & VM_NORESERVE) 206 if (flags & VM_NORESERVE) 214 vm_unacct_memory(pages * VM_AC 207 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); 215 } 208 } 216 209 217 static int shmem_inode_acct_blocks(struct inod 210 static int shmem_inode_acct_blocks(struct inode *inode, long pages) 218 { 211 { 219 struct shmem_inode_info *info = SHMEM_ 212 struct shmem_inode_info *info = SHMEM_I(inode); 220 struct shmem_sb_info *sbinfo = SHMEM_S 213 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 221 int err = -ENOSPC; 214 int err = -ENOSPC; 222 215 223 if (shmem_acct_blocks(info->flags, pag 216 if (shmem_acct_blocks(info->flags, pages)) 224 return err; 217 return err; 225 218 226 might_sleep(); /* when quotas */ 219 might_sleep(); /* when quotas */ 227 if (sbinfo->max_blocks) { 220 if (sbinfo->max_blocks) { 228 if (!percpu_counter_limited_ad 221 if (!percpu_counter_limited_add(&sbinfo->used_blocks, 229 222 sbinfo->max_blocks, pages)) 230 goto unacct; 223 goto unacct; 231 224 232 err = dquot_alloc_block_nodirt 225 err = dquot_alloc_block_nodirty(inode, pages); 233 if (err) { 226 if (err) { 234 percpu_counter_sub(&sb 227 percpu_counter_sub(&sbinfo->used_blocks, pages); 235 goto unacct; 228 goto unacct; 236 } 229 } 237 } else { 230 } else { 238 err = dquot_alloc_block_nodirt 231 err = dquot_alloc_block_nodirty(inode, pages); 239 if (err) 232 if (err) 240 goto unacct; 233 goto unacct; 241 } 234 } 242 235 243 return 0; 236 return 0; 244 237 245 unacct: 238 unacct: 246 shmem_unacct_blocks(info->flags, pages 239 shmem_unacct_blocks(info->flags, pages); 247 return err; 240 return err; 248 } 241 } 249 242 250 static void shmem_inode_unacct_blocks(struct i 243 static void shmem_inode_unacct_blocks(struct inode *inode, long pages) 251 { 244 { 252 struct shmem_inode_info *info = SHMEM_ 245 struct shmem_inode_info *info = SHMEM_I(inode); 253 struct shmem_sb_info *sbinfo = SHMEM_S 246 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 254 247 255 might_sleep(); /* when quotas */ 248 might_sleep(); /* when quotas */ 256 dquot_free_block_nodirty(inode, pages) 249 dquot_free_block_nodirty(inode, pages); 257 250 258 if (sbinfo->max_blocks) 251 if (sbinfo->max_blocks) 259 percpu_counter_sub(&sbinfo->us 252 percpu_counter_sub(&sbinfo->used_blocks, pages); 260 shmem_unacct_blocks(info->flags, pages 253 shmem_unacct_blocks(info->flags, pages); 261 } 254 } 262 255 263 static const struct super_operations shmem_ops 256 static const struct super_operations shmem_ops; 264 static const struct address_space_operations s 257 static const struct address_space_operations shmem_aops; 265 static const struct file_operations shmem_file 258 static const struct file_operations shmem_file_operations; 266 static const struct inode_operations shmem_ino 259 static const struct inode_operations shmem_inode_operations; 267 static const struct inode_operations shmem_dir 260 static const struct inode_operations shmem_dir_inode_operations; 268 static const struct inode_operations shmem_spe 261 static const struct inode_operations shmem_special_inode_operations; 269 static const struct vm_operations_struct shmem 262 static const struct vm_operations_struct shmem_vm_ops; 270 static const struct vm_operations_struct shmem 263 static const struct vm_operations_struct shmem_anon_vm_ops; 271 static struct file_system_type shmem_fs_type; 264 static struct file_system_type shmem_fs_type; 272 265 273 bool shmem_mapping(struct address_space *mappi 266 bool shmem_mapping(struct address_space *mapping) 274 { 267 { 275 return mapping->a_ops == &shmem_aops; 268 return mapping->a_ops == &shmem_aops; 276 } 269 } 277 EXPORT_SYMBOL_GPL(shmem_mapping); 270 EXPORT_SYMBOL_GPL(shmem_mapping); 278 271 279 bool vma_is_anon_shmem(struct vm_area_struct * 272 bool vma_is_anon_shmem(struct vm_area_struct *vma) 280 { 273 { 281 return vma->vm_ops == &shmem_anon_vm_o 274 return vma->vm_ops == &shmem_anon_vm_ops; 282 } 275 } 283 276 284 bool vma_is_shmem(struct vm_area_struct *vma) 277 bool vma_is_shmem(struct vm_area_struct *vma) 285 { 278 { 286 return vma_is_anon_shmem(vma) || vma-> 279 return vma_is_anon_shmem(vma) || vma->vm_ops == &shmem_vm_ops; 287 } 280 } 288 281 289 static LIST_HEAD(shmem_swaplist); 282 static LIST_HEAD(shmem_swaplist); 290 static DEFINE_MUTEX(shmem_swaplist_mutex); 283 static DEFINE_MUTEX(shmem_swaplist_mutex); 291 284 292 #ifdef CONFIG_TMPFS_QUOTA 285 #ifdef CONFIG_TMPFS_QUOTA 293 286 294 static int shmem_enable_quotas(struct super_bl 287 static int shmem_enable_quotas(struct super_block *sb, 295 unsigned short 288 unsigned short quota_types) 296 { 289 { 297 int type, err = 0; 290 int type, err = 0; 298 291 299 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS 292 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE | DQUOT_NOLIST_DIRTY; 300 for (type = 0; type < SHMEM_MAXQUOTAS; 293 for (type = 0; type < SHMEM_MAXQUOTAS; type++) { 301 if (!(quota_types & (1 << type 294 if (!(quota_types & (1 << type))) 302 continue; 295 continue; 303 err = dquot_load_quota_sb(sb, 296 err = dquot_load_quota_sb(sb, type, QFMT_SHMEM, 304 DQUO 297 DQUOT_USAGE_ENABLED | 305 DQUO 298 DQUOT_LIMITS_ENABLED); 306 if (err) 299 if (err) 307 goto out_err; 300 goto out_err; 308 } 301 } 309 return 0; 302 return 0; 310 303 311 out_err: 304 out_err: 312 pr_warn("tmpfs: failed to enable quota 305 pr_warn("tmpfs: failed to enable quota tracking (type=%d, err=%d)\n", 313 type, err); 306 type, err); 314 for (type--; type >= 0; type--) 307 for (type--; type >= 0; type--) 315 dquot_quota_off(sb, type); 308 dquot_quota_off(sb, type); 316 return err; 309 return err; 317 } 310 } 318 311 319 static void shmem_disable_quotas(struct super_ 312 static void shmem_disable_quotas(struct super_block *sb) 320 { 313 { 321 int type; 314 int type; 322 315 323 for (type = 0; type < SHMEM_MAXQUOTAS; 316 for (type = 0; type < SHMEM_MAXQUOTAS; type++) 324 dquot_quota_off(sb, type); 317 dquot_quota_off(sb, type); 325 } 318 } 326 319 327 static struct dquot __rcu **shmem_get_dquots(s 320 static struct dquot __rcu **shmem_get_dquots(struct inode *inode) 328 { 321 { 329 return SHMEM_I(inode)->i_dquot; 322 return SHMEM_I(inode)->i_dquot; 330 } 323 } 331 #endif /* CONFIG_TMPFS_QUOTA */ 324 #endif /* CONFIG_TMPFS_QUOTA */ 332 325 333 /* 326 /* 334 * shmem_reserve_inode() performs bookkeeping 327 * shmem_reserve_inode() performs bookkeeping to reserve a shmem inode, and 335 * produces a novel ino for the newly allocate 328 * produces a novel ino for the newly allocated inode. 336 * 329 * 337 * It may also be called when making a hard li 330 * It may also be called when making a hard link to permit the space needed by 338 * each dentry. However, in that case, no new 331 * each dentry. However, in that case, no new inode number is needed since that 339 * internally draws from another pool of inode 332 * internally draws from another pool of inode numbers (currently global 340 * get_next_ino()). This case is indicated by 333 * get_next_ino()). This case is indicated by passing NULL as inop. 341 */ 334 */ 342 #define SHMEM_INO_BATCH 1024 335 #define SHMEM_INO_BATCH 1024 343 static int shmem_reserve_inode(struct super_bl 336 static int shmem_reserve_inode(struct super_block *sb, ino_t *inop) 344 { 337 { 345 struct shmem_sb_info *sbinfo = SHMEM_S 338 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 346 ino_t ino; 339 ino_t ino; 347 340 348 if (!(sb->s_flags & SB_KERNMOUNT)) { 341 if (!(sb->s_flags & SB_KERNMOUNT)) { 349 raw_spin_lock(&sbinfo->stat_lo 342 raw_spin_lock(&sbinfo->stat_lock); 350 if (sbinfo->max_inodes) { 343 if (sbinfo->max_inodes) { 351 if (sbinfo->free_ispac 344 if (sbinfo->free_ispace < BOGO_INODE_SIZE) { 352 raw_spin_unloc 345 raw_spin_unlock(&sbinfo->stat_lock); 353 return -ENOSPC 346 return -ENOSPC; 354 } 347 } 355 sbinfo->free_ispace -= 348 sbinfo->free_ispace -= BOGO_INODE_SIZE; 356 } 349 } 357 if (inop) { 350 if (inop) { 358 ino = sbinfo->next_ino 351 ino = sbinfo->next_ino++; 359 if (unlikely(is_zero_i 352 if (unlikely(is_zero_ino(ino))) 360 ino = sbinfo-> 353 ino = sbinfo->next_ino++; 361 if (unlikely(!sbinfo-> 354 if (unlikely(!sbinfo->full_inums && 362 ino > UIN 355 ino > UINT_MAX)) { 363 /* 356 /* 364 * Emulate get 357 * Emulate get_next_ino uint wraparound for 365 * compatibili 358 * compatibility 366 */ 359 */ 367 if (IS_ENABLED 360 if (IS_ENABLED(CONFIG_64BIT)) 368 pr_war 361 pr_warn("%s: inode number overflow on device %d, consider using inode64 mount option\n", 369 362 __func__, MINOR(sb->s_dev)); 370 sbinfo->next_i 363 sbinfo->next_ino = 1; 371 ino = sbinfo-> 364 ino = sbinfo->next_ino++; 372 } 365 } 373 *inop = ino; 366 *inop = ino; 374 } 367 } 375 raw_spin_unlock(&sbinfo->stat_ 368 raw_spin_unlock(&sbinfo->stat_lock); 376 } else if (inop) { 369 } else if (inop) { 377 /* 370 /* 378 * __shmem_file_setup, one of 371 * __shmem_file_setup, one of our callers, is lock-free: it 379 * doesn't hold stat_lock in s 372 * doesn't hold stat_lock in shmem_reserve_inode since 380 * max_inodes is always 0, and 373 * max_inodes is always 0, and is called from potentially 381 * unknown contexts. As such, 374 * unknown contexts. As such, use a per-cpu batched allocator 382 * which doesn't require the p 375 * which doesn't require the per-sb stat_lock unless we are at 383 * the batch boundary. 376 * the batch boundary. 384 * 377 * 385 * We don't need to worry abou 378 * We don't need to worry about inode{32,64} since SB_KERNMOUNT 386 * shmem mounts are not expose 379 * shmem mounts are not exposed to userspace, so we don't need 387 * to worry about things like 380 * to worry about things like glibc compatibility. 388 */ 381 */ 389 ino_t *next_ino; 382 ino_t *next_ino; 390 383 391 next_ino = per_cpu_ptr(sbinfo- 384 next_ino = per_cpu_ptr(sbinfo->ino_batch, get_cpu()); 392 ino = *next_ino; 385 ino = *next_ino; 393 if (unlikely(ino % SHMEM_INO_B 386 if (unlikely(ino % SHMEM_INO_BATCH == 0)) { 394 raw_spin_lock(&sbinfo- 387 raw_spin_lock(&sbinfo->stat_lock); 395 ino = sbinfo->next_ino 388 ino = sbinfo->next_ino; 396 sbinfo->next_ino += SH 389 sbinfo->next_ino += SHMEM_INO_BATCH; 397 raw_spin_unlock(&sbinf 390 raw_spin_unlock(&sbinfo->stat_lock); 398 if (unlikely(is_zero_i 391 if (unlikely(is_zero_ino(ino))) 399 ino++; 392 ino++; 400 } 393 } 401 *inop = ino; 394 *inop = ino; 402 *next_ino = ++ino; 395 *next_ino = ++ino; 403 put_cpu(); 396 put_cpu(); 404 } 397 } 405 398 406 return 0; 399 return 0; 407 } 400 } 408 401 409 static void shmem_free_inode(struct super_bloc 402 static void shmem_free_inode(struct super_block *sb, size_t freed_ispace) 410 { 403 { 411 struct shmem_sb_info *sbinfo = SHMEM_S 404 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 412 if (sbinfo->max_inodes) { 405 if (sbinfo->max_inodes) { 413 raw_spin_lock(&sbinfo->stat_lo 406 raw_spin_lock(&sbinfo->stat_lock); 414 sbinfo->free_ispace += BOGO_IN 407 sbinfo->free_ispace += BOGO_INODE_SIZE + freed_ispace; 415 raw_spin_unlock(&sbinfo->stat_ 408 raw_spin_unlock(&sbinfo->stat_lock); 416 } 409 } 417 } 410 } 418 411 419 /** 412 /** 420 * shmem_recalc_inode - recalculate the block 413 * shmem_recalc_inode - recalculate the block usage of an inode 421 * @inode: inode to recalc 414 * @inode: inode to recalc 422 * @alloced: the change in number of pages all 415 * @alloced: the change in number of pages allocated to inode 423 * @swapped: the change in number of pages swa 416 * @swapped: the change in number of pages swapped from inode 424 * 417 * 425 * We have to calculate the free blocks since 418 * We have to calculate the free blocks since the mm can drop 426 * undirtied hole pages behind our back. 419 * undirtied hole pages behind our back. 427 * 420 * 428 * But normally info->alloced == inode->i_ma 421 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped 429 * So mm freed is info->alloced - (inode->i_ma 422 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped) 430 */ 423 */ 431 static void shmem_recalc_inode(struct inode *i 424 static void shmem_recalc_inode(struct inode *inode, long alloced, long swapped) 432 { 425 { 433 struct shmem_inode_info *info = SHMEM_ 426 struct shmem_inode_info *info = SHMEM_I(inode); 434 long freed; 427 long freed; 435 428 436 spin_lock(&info->lock); 429 spin_lock(&info->lock); 437 info->alloced += alloced; 430 info->alloced += alloced; 438 info->swapped += swapped; 431 info->swapped += swapped; 439 freed = info->alloced - info->swapped 432 freed = info->alloced - info->swapped - 440 READ_ONCE(inode->i_mapping->nr 433 READ_ONCE(inode->i_mapping->nrpages); 441 /* 434 /* 442 * Special case: whereas normally shme 435 * Special case: whereas normally shmem_recalc_inode() is called 443 * after i_mapping->nrpages has alread 436 * after i_mapping->nrpages has already been adjusted (up or down), 444 * shmem_writepage() has to raise swap 437 * shmem_writepage() has to raise swapped before nrpages is lowered - 445 * to stop a racing shmem_recalc_inode 438 * to stop a racing shmem_recalc_inode() from thinking that a page has 446 * been freed. Compensate here, to av 439 * been freed. Compensate here, to avoid the need for a followup call. 447 */ 440 */ 448 if (swapped > 0) 441 if (swapped > 0) 449 freed += swapped; 442 freed += swapped; 450 if (freed > 0) 443 if (freed > 0) 451 info->alloced -= freed; 444 info->alloced -= freed; 452 spin_unlock(&info->lock); 445 spin_unlock(&info->lock); 453 446 454 /* The quota case may block */ 447 /* The quota case may block */ 455 if (freed > 0) 448 if (freed > 0) 456 shmem_inode_unacct_blocks(inod 449 shmem_inode_unacct_blocks(inode, freed); 457 } 450 } 458 451 459 bool shmem_charge(struct inode *inode, long pa 452 bool shmem_charge(struct inode *inode, long pages) 460 { 453 { 461 struct address_space *mapping = inode- 454 struct address_space *mapping = inode->i_mapping; 462 455 463 if (shmem_inode_acct_blocks(inode, pag 456 if (shmem_inode_acct_blocks(inode, pages)) 464 return false; 457 return false; 465 458 466 /* nrpages adjustment first, then shme 459 /* nrpages adjustment first, then shmem_recalc_inode() when balanced */ 467 xa_lock_irq(&mapping->i_pages); 460 xa_lock_irq(&mapping->i_pages); 468 mapping->nrpages += pages; 461 mapping->nrpages += pages; 469 xa_unlock_irq(&mapping->i_pages); 462 xa_unlock_irq(&mapping->i_pages); 470 463 471 shmem_recalc_inode(inode, pages, 0); 464 shmem_recalc_inode(inode, pages, 0); 472 return true; 465 return true; 473 } 466 } 474 467 475 void shmem_uncharge(struct inode *inode, long 468 void shmem_uncharge(struct inode *inode, long pages) 476 { 469 { 477 /* pages argument is currently unused: 470 /* pages argument is currently unused: keep it to help debugging */ 478 /* nrpages adjustment done by __filema 471 /* nrpages adjustment done by __filemap_remove_folio() or caller */ 479 472 480 shmem_recalc_inode(inode, 0, 0); 473 shmem_recalc_inode(inode, 0, 0); 481 } 474 } 482 475 483 /* 476 /* 484 * Replace item expected in xarray by a new it 477 * Replace item expected in xarray by a new item, while holding xa_lock. 485 */ 478 */ 486 static int shmem_replace_entry(struct address_ 479 static int shmem_replace_entry(struct address_space *mapping, 487 pgoff_t index, void *e 480 pgoff_t index, void *expected, void *replacement) 488 { 481 { 489 XA_STATE(xas, &mapping->i_pages, index 482 XA_STATE(xas, &mapping->i_pages, index); 490 void *item; 483 void *item; 491 484 492 VM_BUG_ON(!expected); 485 VM_BUG_ON(!expected); 493 VM_BUG_ON(!replacement); 486 VM_BUG_ON(!replacement); 494 item = xas_load(&xas); 487 item = xas_load(&xas); 495 if (item != expected) 488 if (item != expected) 496 return -ENOENT; 489 return -ENOENT; 497 xas_store(&xas, replacement); 490 xas_store(&xas, replacement); 498 return 0; 491 return 0; 499 } 492 } 500 493 501 /* 494 /* 502 * Sometimes, before we decide whether to proc 495 * Sometimes, before we decide whether to proceed or to fail, we must check 503 * that an entry was not already brought back 496 * that an entry was not already brought back from swap by a racing thread. 504 * 497 * 505 * Checking folio is not enough: by the time a !! 498 * Checking page is not enough: by the time a SwapCache page is locked, it 506 * might be reused, and again be swapcache, us !! 499 * might be reused, and again be SwapCache, using the same swap as before. 507 */ 500 */ 508 static bool shmem_confirm_swap(struct address_ 501 static bool shmem_confirm_swap(struct address_space *mapping, 509 pgoff_t index, 502 pgoff_t index, swp_entry_t swap) 510 { 503 { 511 return xa_load(&mapping->i_pages, inde 504 return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); 512 } 505 } 513 506 514 /* 507 /* 515 * Definitions for "huge tmpfs": tmpfs mounted 508 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 516 * 509 * 517 * SHMEM_HUGE_NEVER: 510 * SHMEM_HUGE_NEVER: 518 * disables huge pages for the mount; 511 * disables huge pages for the mount; 519 * SHMEM_HUGE_ALWAYS: 512 * SHMEM_HUGE_ALWAYS: 520 * enables huge pages for the mount; 513 * enables huge pages for the mount; 521 * SHMEM_HUGE_WITHIN_SIZE: 514 * SHMEM_HUGE_WITHIN_SIZE: 522 * only allocate huge pages if the page w 515 * only allocate huge pages if the page will be fully within i_size, 523 * also respect fadvise()/madvise() hints 516 * also respect fadvise()/madvise() hints; 524 * SHMEM_HUGE_ADVISE: 517 * SHMEM_HUGE_ADVISE: 525 * only allocate huge pages if requested 518 * only allocate huge pages if requested with fadvise()/madvise(); 526 */ 519 */ 527 520 528 #define SHMEM_HUGE_NEVER 0 521 #define SHMEM_HUGE_NEVER 0 529 #define SHMEM_HUGE_ALWAYS 1 522 #define SHMEM_HUGE_ALWAYS 1 530 #define SHMEM_HUGE_WITHIN_SIZE 2 523 #define SHMEM_HUGE_WITHIN_SIZE 2 531 #define SHMEM_HUGE_ADVISE 3 524 #define SHMEM_HUGE_ADVISE 3 532 525 533 /* 526 /* 534 * Special values. 527 * Special values. 535 * Only can be set via /sys/kernel/mm/transpar 528 * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled: 536 * 529 * 537 * SHMEM_HUGE_DENY: 530 * SHMEM_HUGE_DENY: 538 * disables huge on shm_mnt and all mount 531 * disables huge on shm_mnt and all mounts, for emergency use; 539 * SHMEM_HUGE_FORCE: 532 * SHMEM_HUGE_FORCE: 540 * enables huge on shm_mnt and all mounts 533 * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 541 * 534 * 542 */ 535 */ 543 #define SHMEM_HUGE_DENY (-1) 536 #define SHMEM_HUGE_DENY (-1) 544 #define SHMEM_HUGE_FORCE (-2) 537 #define SHMEM_HUGE_FORCE (-2) 545 538 546 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 539 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 547 /* ifdef here to avoid bloating shmem.o when n 540 /* ifdef here to avoid bloating shmem.o when not necessary */ 548 541 549 static int shmem_huge __read_mostly = SHMEM_HU 542 static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; 550 543 551 static bool __shmem_huge_global_enabled(struct !! 544 static bool __shmem_is_huge(struct inode *inode, pgoff_t index, 552 loff_t !! 545 bool shmem_huge_force, struct mm_struct *mm, 553 struct !! 546 unsigned long vm_flags) 554 unsign << 555 { 547 { 556 struct mm_struct *mm = vma ? vma->vm_m << 557 loff_t i_size; 548 loff_t i_size; 558 549 559 if (!S_ISREG(inode->i_mode)) 550 if (!S_ISREG(inode->i_mode)) 560 return false; 551 return false; 561 if (mm && ((vm_flags & VM_NOHUGEPAGE) 552 if (mm && ((vm_flags & VM_NOHUGEPAGE) || test_bit(MMF_DISABLE_THP, &mm->flags))) 562 return false; 553 return false; 563 if (shmem_huge == SHMEM_HUGE_DENY) 554 if (shmem_huge == SHMEM_HUGE_DENY) 564 return false; 555 return false; 565 if (shmem_huge_force || shmem_huge == 556 if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE) 566 return true; 557 return true; 567 558 568 switch (SHMEM_SB(inode->i_sb)->huge) { 559 switch (SHMEM_SB(inode->i_sb)->huge) { 569 case SHMEM_HUGE_ALWAYS: 560 case SHMEM_HUGE_ALWAYS: 570 return true; 561 return true; 571 case SHMEM_HUGE_WITHIN_SIZE: 562 case SHMEM_HUGE_WITHIN_SIZE: 572 index = round_up(index + 1, HP 563 index = round_up(index + 1, HPAGE_PMD_NR); 573 i_size = max(write_end, i_size !! 564 i_size = round_up(i_size_read(inode), PAGE_SIZE); 574 i_size = round_up(i_size, PAGE << 575 if (i_size >> PAGE_SHIFT >= in 565 if (i_size >> PAGE_SHIFT >= index) 576 return true; 566 return true; 577 fallthrough; 567 fallthrough; 578 case SHMEM_HUGE_ADVISE: 568 case SHMEM_HUGE_ADVISE: 579 if (mm && (vm_flags & VM_HUGEP 569 if (mm && (vm_flags & VM_HUGEPAGE)) 580 return true; 570 return true; 581 fallthrough; 571 fallthrough; 582 default: 572 default: 583 return false; 573 return false; 584 } 574 } 585 } 575 } 586 576 587 static bool shmem_huge_global_enabled(struct i !! 577 bool shmem_is_huge(struct inode *inode, pgoff_t index, 588 loff_t write_end, bool shme !! 578 bool shmem_huge_force, struct mm_struct *mm, 589 struct vm_area_struct *vma, !! 579 unsigned long vm_flags) 590 { 580 { 591 if (HPAGE_PMD_ORDER > MAX_PAGECACHE_OR 581 if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) 592 return false; 582 return false; 593 583 594 return __shmem_huge_global_enabled(ino !! 584 return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags); 595 shm << 596 } 585 } 597 586 598 #if defined(CONFIG_SYSFS) 587 #if defined(CONFIG_SYSFS) 599 static int shmem_parse_huge(const char *str) 588 static int shmem_parse_huge(const char *str) 600 { 589 { 601 if (!strcmp(str, "never")) 590 if (!strcmp(str, "never")) 602 return SHMEM_HUGE_NEVER; 591 return SHMEM_HUGE_NEVER; 603 if (!strcmp(str, "always")) 592 if (!strcmp(str, "always")) 604 return SHMEM_HUGE_ALWAYS; 593 return SHMEM_HUGE_ALWAYS; 605 if (!strcmp(str, "within_size")) 594 if (!strcmp(str, "within_size")) 606 return SHMEM_HUGE_WITHIN_SIZE; 595 return SHMEM_HUGE_WITHIN_SIZE; 607 if (!strcmp(str, "advise")) 596 if (!strcmp(str, "advise")) 608 return SHMEM_HUGE_ADVISE; 597 return SHMEM_HUGE_ADVISE; 609 if (!strcmp(str, "deny")) 598 if (!strcmp(str, "deny")) 610 return SHMEM_HUGE_DENY; 599 return SHMEM_HUGE_DENY; 611 if (!strcmp(str, "force")) 600 if (!strcmp(str, "force")) 612 return SHMEM_HUGE_FORCE; 601 return SHMEM_HUGE_FORCE; 613 return -EINVAL; 602 return -EINVAL; 614 } 603 } 615 #endif 604 #endif 616 605 617 #if defined(CONFIG_SYSFS) || defined(CONFIG_TM 606 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS) 618 static const char *shmem_format_huge(int huge) 607 static const char *shmem_format_huge(int huge) 619 { 608 { 620 switch (huge) { 609 switch (huge) { 621 case SHMEM_HUGE_NEVER: 610 case SHMEM_HUGE_NEVER: 622 return "never"; 611 return "never"; 623 case SHMEM_HUGE_ALWAYS: 612 case SHMEM_HUGE_ALWAYS: 624 return "always"; 613 return "always"; 625 case SHMEM_HUGE_WITHIN_SIZE: 614 case SHMEM_HUGE_WITHIN_SIZE: 626 return "within_size"; 615 return "within_size"; 627 case SHMEM_HUGE_ADVISE: 616 case SHMEM_HUGE_ADVISE: 628 return "advise"; 617 return "advise"; 629 case SHMEM_HUGE_DENY: 618 case SHMEM_HUGE_DENY: 630 return "deny"; 619 return "deny"; 631 case SHMEM_HUGE_FORCE: 620 case SHMEM_HUGE_FORCE: 632 return "force"; 621 return "force"; 633 default: 622 default: 634 VM_BUG_ON(1); 623 VM_BUG_ON(1); 635 return "bad_val"; 624 return "bad_val"; 636 } 625 } 637 } 626 } 638 #endif 627 #endif 639 628 640 static unsigned long shmem_unused_huge_shrink( 629 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 641 struct shrink_control *sc, uns !! 630 struct shrink_control *sc, unsigned long nr_to_split) 642 { 631 { 643 LIST_HEAD(list), *pos, *next; 632 LIST_HEAD(list), *pos, *next; >> 633 LIST_HEAD(to_remove); 644 struct inode *inode; 634 struct inode *inode; 645 struct shmem_inode_info *info; 635 struct shmem_inode_info *info; 646 struct folio *folio; 636 struct folio *folio; 647 unsigned long batch = sc ? sc->nr_to_s 637 unsigned long batch = sc ? sc->nr_to_scan : 128; 648 unsigned long split = 0, freed = 0; !! 638 int split = 0; 649 639 650 if (list_empty(&sbinfo->shrinklist)) 640 if (list_empty(&sbinfo->shrinklist)) 651 return SHRINK_STOP; 641 return SHRINK_STOP; 652 642 653 spin_lock(&sbinfo->shrinklist_lock); 643 spin_lock(&sbinfo->shrinklist_lock); 654 list_for_each_safe(pos, next, &sbinfo- 644 list_for_each_safe(pos, next, &sbinfo->shrinklist) { 655 info = list_entry(pos, struct 645 info = list_entry(pos, struct shmem_inode_info, shrinklist); 656 646 657 /* pin the inode */ 647 /* pin the inode */ 658 inode = igrab(&info->vfs_inode 648 inode = igrab(&info->vfs_inode); 659 649 660 /* inode is about to be evicte 650 /* inode is about to be evicted */ 661 if (!inode) { 651 if (!inode) { 662 list_del_init(&info->s 652 list_del_init(&info->shrinklist); 663 goto next; 653 goto next; 664 } 654 } 665 655 >> 656 /* Check if there's anything to gain */ >> 657 if (round_up(inode->i_size, PAGE_SIZE) == >> 658 round_up(inode->i_size, HPAGE_PMD_SIZE)) { >> 659 list_move(&info->shrinklist, &to_remove); >> 660 goto next; >> 661 } >> 662 666 list_move(&info->shrinklist, & 663 list_move(&info->shrinklist, &list); 667 next: 664 next: 668 sbinfo->shrinklist_len--; 665 sbinfo->shrinklist_len--; 669 if (!--batch) 666 if (!--batch) 670 break; 667 break; 671 } 668 } 672 spin_unlock(&sbinfo->shrinklist_lock); 669 spin_unlock(&sbinfo->shrinklist_lock); 673 670 >> 671 list_for_each_safe(pos, next, &to_remove) { >> 672 info = list_entry(pos, struct shmem_inode_info, shrinklist); >> 673 inode = &info->vfs_inode; >> 674 list_del_init(&info->shrinklist); >> 675 iput(inode); >> 676 } >> 677 674 list_for_each_safe(pos, next, &list) { 678 list_for_each_safe(pos, next, &list) { 675 pgoff_t next, end; << 676 loff_t i_size; << 677 int ret; 679 int ret; >> 680 pgoff_t index; 678 681 679 info = list_entry(pos, struct 682 info = list_entry(pos, struct shmem_inode_info, shrinklist); 680 inode = &info->vfs_inode; 683 inode = &info->vfs_inode; 681 684 682 if (nr_to_free && freed >= nr_ !! 685 if (nr_to_split && split >= nr_to_split) 683 goto move_back; 686 goto move_back; 684 687 685 i_size = i_size_read(inode); !! 688 index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT; 686 folio = filemap_get_entry(inod !! 689 folio = filemap_get_folio(inode->i_mapping, index); 687 if (!folio || xa_is_value(foli !! 690 if (IS_ERR(folio)) 688 goto drop; 691 goto drop; 689 692 690 /* No large folio at the end o !! 693 /* No huge page at the end of the file: nothing to split */ 691 if (!folio_test_large(folio)) 694 if (!folio_test_large(folio)) { 692 folio_put(folio); 695 folio_put(folio); 693 goto drop; 696 goto drop; 694 } 697 } 695 698 696 /* Check if there is anything << 697 next = folio_next_index(folio) << 698 end = shmem_fallocend(inode, D << 699 if (end <= folio->index || end << 700 folio_put(folio); << 701 goto drop; << 702 } << 703 << 704 /* 699 /* 705 * Move the inode on the list 700 * Move the inode on the list back to shrinklist if we failed 706 * to lock the page at this ti 701 * to lock the page at this time. 707 * 702 * 708 * Waiting for the lock may le 703 * Waiting for the lock may lead to deadlock in the 709 * reclaim path. 704 * reclaim path. 710 */ 705 */ 711 if (!folio_trylock(folio)) { 706 if (!folio_trylock(folio)) { 712 folio_put(folio); 707 folio_put(folio); 713 goto move_back; 708 goto move_back; 714 } 709 } 715 710 716 ret = split_folio(folio); 711 ret = split_folio(folio); 717 folio_unlock(folio); 712 folio_unlock(folio); 718 folio_put(folio); 713 folio_put(folio); 719 714 720 /* If split failed move the in 715 /* If split failed move the inode on the list back to shrinklist */ 721 if (ret) 716 if (ret) 722 goto move_back; 717 goto move_back; 723 718 724 freed += next - end; << 725 split++; 719 split++; 726 drop: 720 drop: 727 list_del_init(&info->shrinklis 721 list_del_init(&info->shrinklist); 728 goto put; 722 goto put; 729 move_back: 723 move_back: 730 /* 724 /* 731 * Make sure the inode is eith 725 * Make sure the inode is either on the global list or deleted 732 * from any local list before 726 * from any local list before iput() since it could be deleted 733 * in another thread once we p 727 * in another thread once we put the inode (then the local list 734 * is corrupted). 728 * is corrupted). 735 */ 729 */ 736 spin_lock(&sbinfo->shrinklist_ 730 spin_lock(&sbinfo->shrinklist_lock); 737 list_move(&info->shrinklist, & 731 list_move(&info->shrinklist, &sbinfo->shrinklist); 738 sbinfo->shrinklist_len++; 732 sbinfo->shrinklist_len++; 739 spin_unlock(&sbinfo->shrinklis 733 spin_unlock(&sbinfo->shrinklist_lock); 740 put: 734 put: 741 iput(inode); 735 iput(inode); 742 } 736 } 743 737 744 return split; 738 return split; 745 } 739 } 746 740 747 static long shmem_unused_huge_scan(struct supe 741 static long shmem_unused_huge_scan(struct super_block *sb, 748 struct shrink_control *sc) 742 struct shrink_control *sc) 749 { 743 { 750 struct shmem_sb_info *sbinfo = SHMEM_S 744 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 751 745 752 if (!READ_ONCE(sbinfo->shrinklist_len) 746 if (!READ_ONCE(sbinfo->shrinklist_len)) 753 return SHRINK_STOP; 747 return SHRINK_STOP; 754 748 755 return shmem_unused_huge_shrink(sbinfo 749 return shmem_unused_huge_shrink(sbinfo, sc, 0); 756 } 750 } 757 751 758 static long shmem_unused_huge_count(struct sup 752 static long shmem_unused_huge_count(struct super_block *sb, 759 struct shrink_control *sc) 753 struct shrink_control *sc) 760 { 754 { 761 struct shmem_sb_info *sbinfo = SHMEM_S 755 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 762 return READ_ONCE(sbinfo->shrinklist_le 756 return READ_ONCE(sbinfo->shrinklist_len); 763 } 757 } 764 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 758 #else /* !CONFIG_TRANSPARENT_HUGEPAGE */ 765 759 766 #define shmem_huge SHMEM_HUGE_DENY 760 #define shmem_huge SHMEM_HUGE_DENY 767 761 768 static unsigned long shmem_unused_huge_shrink( 762 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, 769 struct shrink_control *sc, uns !! 763 struct shrink_control *sc, unsigned long nr_to_split) 770 { 764 { 771 return 0; 765 return 0; 772 } 766 } 773 << 774 static bool shmem_huge_global_enabled(struct i << 775 loff_t write_end, bool shmem_h << 776 struct vm_area_struct *vma, un << 777 { << 778 return false; << 779 } << 780 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 767 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 781 768 782 /* 769 /* 783 * Somewhat like filemap_add_folio, but error 770 * Somewhat like filemap_add_folio, but error if expected item has gone. 784 */ 771 */ 785 static int shmem_add_to_page_cache(struct foli 772 static int shmem_add_to_page_cache(struct folio *folio, 786 struct addr 773 struct address_space *mapping, 787 pgoff_t ind 774 pgoff_t index, void *expected, gfp_t gfp) 788 { 775 { 789 XA_STATE_ORDER(xas, &mapping->i_pages, 776 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); 790 long nr = folio_nr_pages(folio); 777 long nr = folio_nr_pages(folio); 791 778 792 VM_BUG_ON_FOLIO(index != round_down(in 779 VM_BUG_ON_FOLIO(index != round_down(index, nr), folio); 793 VM_BUG_ON_FOLIO(!folio_test_locked(fol 780 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 794 VM_BUG_ON_FOLIO(!folio_test_swapbacked 781 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); >> 782 VM_BUG_ON(expected && folio_test_large(folio)); 795 783 796 folio_ref_add(folio, nr); 784 folio_ref_add(folio, nr); 797 folio->mapping = mapping; 785 folio->mapping = mapping; 798 folio->index = index; 786 folio->index = index; 799 787 800 gfp &= GFP_RECLAIM_MASK; 788 gfp &= GFP_RECLAIM_MASK; 801 folio_throttle_swaprate(folio, gfp); 789 folio_throttle_swaprate(folio, gfp); 802 790 803 do { 791 do { 804 xas_lock_irq(&xas); 792 xas_lock_irq(&xas); 805 if (expected != xas_find_confl 793 if (expected != xas_find_conflict(&xas)) { 806 xas_set_err(&xas, -EEX 794 xas_set_err(&xas, -EEXIST); 807 goto unlock; 795 goto unlock; 808 } 796 } 809 if (expected && xas_find_confl 797 if (expected && xas_find_conflict(&xas)) { 810 xas_set_err(&xas, -EEX 798 xas_set_err(&xas, -EEXIST); 811 goto unlock; 799 goto unlock; 812 } 800 } 813 xas_store(&xas, folio); 801 xas_store(&xas, folio); 814 if (xas_error(&xas)) 802 if (xas_error(&xas)) 815 goto unlock; 803 goto unlock; 816 if (folio_test_pmd_mappable(fo 804 if (folio_test_pmd_mappable(folio)) 817 __lruvec_stat_mod_foli 805 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr); 818 __lruvec_stat_mod_folio(folio, 806 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr); 819 __lruvec_stat_mod_folio(folio, 807 __lruvec_stat_mod_folio(folio, NR_SHMEM, nr); 820 mapping->nrpages += nr; 808 mapping->nrpages += nr; 821 unlock: 809 unlock: 822 xas_unlock_irq(&xas); 810 xas_unlock_irq(&xas); 823 } while (xas_nomem(&xas, gfp)); 811 } while (xas_nomem(&xas, gfp)); 824 812 825 if (xas_error(&xas)) { 813 if (xas_error(&xas)) { 826 folio->mapping = NULL; 814 folio->mapping = NULL; 827 folio_ref_sub(folio, nr); 815 folio_ref_sub(folio, nr); 828 return xas_error(&xas); 816 return xas_error(&xas); 829 } 817 } 830 818 831 return 0; 819 return 0; 832 } 820 } 833 821 834 /* 822 /* 835 * Somewhat like filemap_remove_folio, but sub 823 * Somewhat like filemap_remove_folio, but substitutes swap for @folio. 836 */ 824 */ 837 static void shmem_delete_from_page_cache(struc 825 static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) 838 { 826 { 839 struct address_space *mapping = folio- 827 struct address_space *mapping = folio->mapping; 840 long nr = folio_nr_pages(folio); 828 long nr = folio_nr_pages(folio); 841 int error; 829 int error; 842 830 843 xa_lock_irq(&mapping->i_pages); 831 xa_lock_irq(&mapping->i_pages); 844 error = shmem_replace_entry(mapping, f 832 error = shmem_replace_entry(mapping, folio->index, folio, radswap); 845 folio->mapping = NULL; 833 folio->mapping = NULL; 846 mapping->nrpages -= nr; 834 mapping->nrpages -= nr; 847 __lruvec_stat_mod_folio(folio, NR_FILE 835 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); 848 __lruvec_stat_mod_folio(folio, NR_SHME 836 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); 849 xa_unlock_irq(&mapping->i_pages); 837 xa_unlock_irq(&mapping->i_pages); 850 folio_put_refs(folio, nr); !! 838 folio_put(folio); 851 BUG_ON(error); 839 BUG_ON(error); 852 } 840 } 853 841 854 /* 842 /* 855 * Remove swap entry from page cache, free the !! 843 * Remove swap entry from page cache, free the swap and its page cache. 856 * the number of pages being freed. 0 means en << 857 * being freed). << 858 */ 844 */ 859 static long shmem_free_swap(struct address_spa !! 845 static int shmem_free_swap(struct address_space *mapping, 860 pgoff_t index, voi !! 846 pgoff_t index, void *radswap) 861 { 847 { 862 int order = xa_get_order(&mapping->i_p << 863 void *old; 848 void *old; 864 849 865 old = xa_cmpxchg_irq(&mapping->i_pages 850 old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); 866 if (old != radswap) 851 if (old != radswap) 867 return 0; !! 852 return -ENOENT; 868 free_swap_and_cache_nr(radix_to_swp_en !! 853 free_swap_and_cache(radix_to_swp_entry(radswap)); 869 !! 854 return 0; 870 return 1 << order; << 871 } 855 } 872 856 873 /* 857 /* 874 * Determine (in bytes) how many of the shmem 858 * Determine (in bytes) how many of the shmem object's pages mapped by the 875 * given offsets are swapped out. 859 * given offsets are swapped out. 876 * 860 * 877 * This is safe to call without i_rwsem or the 861 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU, 878 * as long as the inode doesn't go away and ra 862 * as long as the inode doesn't go away and racy results are not a problem. 879 */ 863 */ 880 unsigned long shmem_partial_swap_usage(struct 864 unsigned long shmem_partial_swap_usage(struct address_space *mapping, 881 865 pgoff_t start, pgoff_t end) 882 { 866 { 883 XA_STATE(xas, &mapping->i_pages, start 867 XA_STATE(xas, &mapping->i_pages, start); 884 struct page *page; 868 struct page *page; 885 unsigned long swapped = 0; 869 unsigned long swapped = 0; 886 unsigned long max = end - 1; 870 unsigned long max = end - 1; 887 871 888 rcu_read_lock(); 872 rcu_read_lock(); 889 xas_for_each(&xas, page, max) { 873 xas_for_each(&xas, page, max) { 890 if (xas_retry(&xas, page)) 874 if (xas_retry(&xas, page)) 891 continue; 875 continue; 892 if (xa_is_value(page)) 876 if (xa_is_value(page)) 893 swapped += 1 << xas_ge !! 877 swapped++; 894 if (xas.xa_index == max) 878 if (xas.xa_index == max) 895 break; 879 break; 896 if (need_resched()) { 880 if (need_resched()) { 897 xas_pause(&xas); 881 xas_pause(&xas); 898 cond_resched_rcu(); 882 cond_resched_rcu(); 899 } 883 } 900 } 884 } 901 rcu_read_unlock(); 885 rcu_read_unlock(); 902 886 903 return swapped << PAGE_SHIFT; 887 return swapped << PAGE_SHIFT; 904 } 888 } 905 889 906 /* 890 /* 907 * Determine (in bytes) how many of the shmem 891 * Determine (in bytes) how many of the shmem object's pages mapped by the 908 * given vma is swapped out. 892 * given vma is swapped out. 909 * 893 * 910 * This is safe to call without i_rwsem or the 894 * This is safe to call without i_rwsem or the i_pages lock thanks to RCU, 911 * as long as the inode doesn't go away and ra 895 * as long as the inode doesn't go away and racy results are not a problem. 912 */ 896 */ 913 unsigned long shmem_swap_usage(struct vm_area_ 897 unsigned long shmem_swap_usage(struct vm_area_struct *vma) 914 { 898 { 915 struct inode *inode = file_inode(vma-> 899 struct inode *inode = file_inode(vma->vm_file); 916 struct shmem_inode_info *info = SHMEM_ 900 struct shmem_inode_info *info = SHMEM_I(inode); 917 struct address_space *mapping = inode- 901 struct address_space *mapping = inode->i_mapping; 918 unsigned long swapped; 902 unsigned long swapped; 919 903 920 /* Be careful as we don't hold info->l 904 /* Be careful as we don't hold info->lock */ 921 swapped = READ_ONCE(info->swapped); 905 swapped = READ_ONCE(info->swapped); 922 906 923 /* 907 /* 924 * The easier cases are when the shmem 908 * The easier cases are when the shmem object has nothing in swap, or 925 * the vma maps it whole. Then we can 909 * the vma maps it whole. Then we can simply use the stats that we 926 * already track. 910 * already track. 927 */ 911 */ 928 if (!swapped) 912 if (!swapped) 929 return 0; 913 return 0; 930 914 931 if (!vma->vm_pgoff && vma->vm_end - vm 915 if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size) 932 return swapped << PAGE_SHIFT; 916 return swapped << PAGE_SHIFT; 933 917 934 /* Here comes the more involved part * 918 /* Here comes the more involved part */ 935 return shmem_partial_swap_usage(mappin 919 return shmem_partial_swap_usage(mapping, vma->vm_pgoff, 936 vma->v 920 vma->vm_pgoff + vma_pages(vma)); 937 } 921 } 938 922 939 /* 923 /* 940 * SysV IPC SHM_UNLOCK restore Unevictable pag 924 * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists. 941 */ 925 */ 942 void shmem_unlock_mapping(struct address_space 926 void shmem_unlock_mapping(struct address_space *mapping) 943 { 927 { 944 struct folio_batch fbatch; 928 struct folio_batch fbatch; 945 pgoff_t index = 0; 929 pgoff_t index = 0; 946 930 947 folio_batch_init(&fbatch); 931 folio_batch_init(&fbatch); 948 /* 932 /* 949 * Minor point, but we might as well s 933 * Minor point, but we might as well stop if someone else SHM_LOCKs it. 950 */ 934 */ 951 while (!mapping_unevictable(mapping) & 935 while (!mapping_unevictable(mapping) && 952 filemap_get_folios(mapping, &in 936 filemap_get_folios(mapping, &index, ~0UL, &fbatch)) { 953 check_move_unevictable_folios( 937 check_move_unevictable_folios(&fbatch); 954 folio_batch_release(&fbatch); 938 folio_batch_release(&fbatch); 955 cond_resched(); 939 cond_resched(); 956 } 940 } 957 } 941 } 958 942 959 static struct folio *shmem_get_partial_folio(s 943 static struct folio *shmem_get_partial_folio(struct inode *inode, pgoff_t index) 960 { 944 { 961 struct folio *folio; 945 struct folio *folio; 962 946 963 /* 947 /* 964 * At first avoid shmem_get_folio(,,,S 948 * At first avoid shmem_get_folio(,,,SGP_READ): that fails 965 * beyond i_size, and reports fallocat 949 * beyond i_size, and reports fallocated folios as holes. 966 */ 950 */ 967 folio = filemap_get_entry(inode->i_map 951 folio = filemap_get_entry(inode->i_mapping, index); 968 if (!folio) 952 if (!folio) 969 return folio; 953 return folio; 970 if (!xa_is_value(folio)) { 954 if (!xa_is_value(folio)) { 971 folio_lock(folio); 955 folio_lock(folio); 972 if (folio->mapping == inode->i 956 if (folio->mapping == inode->i_mapping) 973 return folio; 957 return folio; 974 /* The folio has been swapped 958 /* The folio has been swapped out */ 975 folio_unlock(folio); 959 folio_unlock(folio); 976 folio_put(folio); 960 folio_put(folio); 977 } 961 } 978 /* 962 /* 979 * But read a folio back from swap if 963 * But read a folio back from swap if any of it is within i_size 980 * (although in some cases this is jus 964 * (although in some cases this is just a waste of time). 981 */ 965 */ 982 folio = NULL; 966 folio = NULL; 983 shmem_get_folio(inode, index, 0, &foli !! 967 shmem_get_folio(inode, index, &folio, SGP_READ); 984 return folio; 968 return folio; 985 } 969 } 986 970 987 /* 971 /* 988 * Remove range of pages and swap entries from 972 * Remove range of pages and swap entries from page cache, and free them. 989 * If !unfalloc, truncate or punch hole; if un 973 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate. 990 */ 974 */ 991 static void shmem_undo_range(struct inode *ino 975 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, 992 976 bool unfalloc) 993 { 977 { 994 struct address_space *mapping = inode- 978 struct address_space *mapping = inode->i_mapping; 995 struct shmem_inode_info *info = SHMEM_ 979 struct shmem_inode_info *info = SHMEM_I(inode); 996 pgoff_t start = (lstart + PAGE_SIZE - 980 pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; 997 pgoff_t end = (lend + 1) >> PAGE_SHIFT 981 pgoff_t end = (lend + 1) >> PAGE_SHIFT; 998 struct folio_batch fbatch; 982 struct folio_batch fbatch; 999 pgoff_t indices[PAGEVEC_SIZE]; 983 pgoff_t indices[PAGEVEC_SIZE]; 1000 struct folio *folio; 984 struct folio *folio; 1001 bool same_folio; 985 bool same_folio; 1002 long nr_swaps_freed = 0; 986 long nr_swaps_freed = 0; 1003 pgoff_t index; 987 pgoff_t index; 1004 int i; 988 int i; 1005 989 1006 if (lend == -1) 990 if (lend == -1) 1007 end = -1; /* unsigned, 991 end = -1; /* unsigned, so actually very big */ 1008 992 1009 if (info->fallocend > start && info-> 993 if (info->fallocend > start && info->fallocend <= end && !unfalloc) 1010 info->fallocend = start; 994 info->fallocend = start; 1011 995 1012 folio_batch_init(&fbatch); 996 folio_batch_init(&fbatch); 1013 index = start; 997 index = start; 1014 while (index < end && find_lock_entri 998 while (index < end && find_lock_entries(mapping, &index, end - 1, 1015 &fbatch, indices)) { 999 &fbatch, indices)) { 1016 for (i = 0; i < folio_batch_c 1000 for (i = 0; i < folio_batch_count(&fbatch); i++) { 1017 folio = fbatch.folios 1001 folio = fbatch.folios[i]; 1018 1002 1019 if (xa_is_value(folio 1003 if (xa_is_value(folio)) { 1020 if (unfalloc) 1004 if (unfalloc) 1021 conti 1005 continue; 1022 nr_swaps_free !! 1006 nr_swaps_freed += !shmem_free_swap(mapping, 1023 1007 indices[i], folio); 1024 continue; 1008 continue; 1025 } 1009 } 1026 1010 1027 if (!unfalloc || !fol 1011 if (!unfalloc || !folio_test_uptodate(folio)) 1028 truncate_inod 1012 truncate_inode_folio(mapping, folio); 1029 folio_unlock(folio); 1013 folio_unlock(folio); 1030 } 1014 } 1031 folio_batch_remove_exceptiona 1015 folio_batch_remove_exceptionals(&fbatch); 1032 folio_batch_release(&fbatch); 1016 folio_batch_release(&fbatch); 1033 cond_resched(); 1017 cond_resched(); 1034 } 1018 } 1035 1019 1036 /* 1020 /* 1037 * When undoing a failed fallocate, w 1021 * When undoing a failed fallocate, we want none of the partial folio 1038 * zeroing and splitting below, but s 1022 * zeroing and splitting below, but shall want to truncate the whole 1039 * folio when !uptodate indicates tha 1023 * folio when !uptodate indicates that it was added by this fallocate, 1040 * even when [lstart, lend] covers on 1024 * even when [lstart, lend] covers only a part of the folio. 1041 */ 1025 */ 1042 if (unfalloc) 1026 if (unfalloc) 1043 goto whole_folios; 1027 goto whole_folios; 1044 1028 1045 same_folio = (lstart >> PAGE_SHIFT) = 1029 same_folio = (lstart >> PAGE_SHIFT) == (lend >> PAGE_SHIFT); 1046 folio = shmem_get_partial_folio(inode 1030 folio = shmem_get_partial_folio(inode, lstart >> PAGE_SHIFT); 1047 if (folio) { 1031 if (folio) { 1048 same_folio = lend < folio_pos 1032 same_folio = lend < folio_pos(folio) + folio_size(folio); 1049 folio_mark_dirty(folio); 1033 folio_mark_dirty(folio); 1050 if (!truncate_inode_partial_f 1034 if (!truncate_inode_partial_folio(folio, lstart, lend)) { 1051 start = folio_next_in 1035 start = folio_next_index(folio); 1052 if (same_folio) 1036 if (same_folio) 1053 end = folio-> 1037 end = folio->index; 1054 } 1038 } 1055 folio_unlock(folio); 1039 folio_unlock(folio); 1056 folio_put(folio); 1040 folio_put(folio); 1057 folio = NULL; 1041 folio = NULL; 1058 } 1042 } 1059 1043 1060 if (!same_folio) 1044 if (!same_folio) 1061 folio = shmem_get_partial_fol 1045 folio = shmem_get_partial_folio(inode, lend >> PAGE_SHIFT); 1062 if (folio) { 1046 if (folio) { 1063 folio_mark_dirty(folio); 1047 folio_mark_dirty(folio); 1064 if (!truncate_inode_partial_f 1048 if (!truncate_inode_partial_folio(folio, lstart, lend)) 1065 end = folio->index; 1049 end = folio->index; 1066 folio_unlock(folio); 1050 folio_unlock(folio); 1067 folio_put(folio); 1051 folio_put(folio); 1068 } 1052 } 1069 1053 1070 whole_folios: 1054 whole_folios: 1071 1055 1072 index = start; 1056 index = start; 1073 while (index < end) { 1057 while (index < end) { 1074 cond_resched(); 1058 cond_resched(); 1075 1059 1076 if (!find_get_entries(mapping 1060 if (!find_get_entries(mapping, &index, end - 1, &fbatch, 1077 indices)) { 1061 indices)) { 1078 /* If all gone or hol 1062 /* If all gone or hole-punch or unfalloc, we're done */ 1079 if (index == start || 1063 if (index == start || end != -1) 1080 break; 1064 break; 1081 /* But if truncating, 1065 /* But if truncating, restart to make sure all gone */ 1082 index = start; 1066 index = start; 1083 continue; 1067 continue; 1084 } 1068 } 1085 for (i = 0; i < folio_batch_c 1069 for (i = 0; i < folio_batch_count(&fbatch); i++) { 1086 folio = fbatch.folios 1070 folio = fbatch.folios[i]; 1087 1071 1088 if (xa_is_value(folio 1072 if (xa_is_value(folio)) { 1089 long swaps_fr << 1090 << 1091 if (unfalloc) 1073 if (unfalloc) 1092 conti 1074 continue; 1093 swaps_freed = !! 1075 if (shmem_free_swap(mapping, indices[i], folio)) { 1094 if (!swaps_fr << 1095 /* Sw 1076 /* Swap was replaced by page: retry */ 1096 index 1077 index = indices[i]; 1097 break 1078 break; 1098 } 1079 } 1099 nr_swaps_free !! 1080 nr_swaps_freed++; 1100 continue; 1081 continue; 1101 } 1082 } 1102 1083 1103 folio_lock(folio); 1084 folio_lock(folio); 1104 1085 1105 if (!unfalloc || !fol 1086 if (!unfalloc || !folio_test_uptodate(folio)) { 1106 if (folio_map 1087 if (folio_mapping(folio) != mapping) { 1107 /* Pa 1088 /* Page was replaced by swap: retry */ 1108 folio 1089 folio_unlock(folio); 1109 index 1090 index = indices[i]; 1110 break 1091 break; 1111 } 1092 } 1112 VM_BUG_ON_FOL 1093 VM_BUG_ON_FOLIO(folio_test_writeback(folio), 1113 1094 folio); 1114 1095 1115 if (!folio_te 1096 if (!folio_test_large(folio)) { 1116 trunc 1097 truncate_inode_folio(mapping, folio); 1117 } else if (tr 1098 } else if (truncate_inode_partial_folio(folio, lstart, lend)) { 1118 /* 1099 /* 1119 * If 1100 * If we split a page, reset the loop so 1120 * th 1101 * that we pick up the new sub pages. 1121 * Ot 1102 * Otherwise the THP was entirely 1122 * dr 1103 * dropped or the target range was 1123 * ze 1104 * zeroed, so just continue the loop as 1124 * is 1105 * is. 1125 */ 1106 */ 1126 if (! 1107 if (!folio_test_large(folio)) { 1127 1108 folio_unlock(folio); 1128 1109 index = start; 1129 1110 break; 1130 } 1111 } 1131 } 1112 } 1132 } 1113 } 1133 folio_unlock(folio); 1114 folio_unlock(folio); 1134 } 1115 } 1135 folio_batch_remove_exceptiona 1116 folio_batch_remove_exceptionals(&fbatch); 1136 folio_batch_release(&fbatch); 1117 folio_batch_release(&fbatch); 1137 } 1118 } 1138 1119 1139 shmem_recalc_inode(inode, 0, -nr_swap 1120 shmem_recalc_inode(inode, 0, -nr_swaps_freed); 1140 } 1121 } 1141 1122 1142 void shmem_truncate_range(struct inode *inode 1123 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 1143 { 1124 { 1144 shmem_undo_range(inode, lstart, lend, 1125 shmem_undo_range(inode, lstart, lend, false); 1145 inode_set_mtime_to_ts(inode, inode_se 1126 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 1146 inode_inc_iversion(inode); 1127 inode_inc_iversion(inode); 1147 } 1128 } 1148 EXPORT_SYMBOL_GPL(shmem_truncate_range); 1129 EXPORT_SYMBOL_GPL(shmem_truncate_range); 1149 1130 1150 static int shmem_getattr(struct mnt_idmap *id 1131 static int shmem_getattr(struct mnt_idmap *idmap, 1151 const struct path *p 1132 const struct path *path, struct kstat *stat, 1152 u32 request_mask, un 1133 u32 request_mask, unsigned int query_flags) 1153 { 1134 { 1154 struct inode *inode = path->dentry->d 1135 struct inode *inode = path->dentry->d_inode; 1155 struct shmem_inode_info *info = SHMEM 1136 struct shmem_inode_info *info = SHMEM_I(inode); 1156 1137 1157 if (info->alloced - info->swapped != 1138 if (info->alloced - info->swapped != inode->i_mapping->nrpages) 1158 shmem_recalc_inode(inode, 0, 1139 shmem_recalc_inode(inode, 0, 0); 1159 1140 1160 if (info->fsflags & FS_APPEND_FL) 1141 if (info->fsflags & FS_APPEND_FL) 1161 stat->attributes |= STATX_ATT 1142 stat->attributes |= STATX_ATTR_APPEND; 1162 if (info->fsflags & FS_IMMUTABLE_FL) 1143 if (info->fsflags & FS_IMMUTABLE_FL) 1163 stat->attributes |= STATX_ATT 1144 stat->attributes |= STATX_ATTR_IMMUTABLE; 1164 if (info->fsflags & FS_NODUMP_FL) 1145 if (info->fsflags & FS_NODUMP_FL) 1165 stat->attributes |= STATX_ATT 1146 stat->attributes |= STATX_ATTR_NODUMP; 1166 stat->attributes_mask |= (STATX_ATTR_ 1147 stat->attributes_mask |= (STATX_ATTR_APPEND | 1167 STATX_ATTR_IMMUTABLE 1148 STATX_ATTR_IMMUTABLE | 1168 STATX_ATTR_NODUMP); 1149 STATX_ATTR_NODUMP); 1169 inode_lock_shared(inode); << 1170 generic_fillattr(idmap, request_mask, 1150 generic_fillattr(idmap, request_mask, inode, stat); 1171 inode_unlock_shared(inode); << 1172 1151 1173 if (shmem_huge_global_enabled(inode, !! 1152 if (shmem_is_huge(inode, 0, false, NULL, 0)) 1174 stat->blksize = HPAGE_PMD_SIZ 1153 stat->blksize = HPAGE_PMD_SIZE; 1175 1154 1176 if (request_mask & STATX_BTIME) { 1155 if (request_mask & STATX_BTIME) { 1177 stat->result_mask |= STATX_BT 1156 stat->result_mask |= STATX_BTIME; 1178 stat->btime.tv_sec = info->i_ 1157 stat->btime.tv_sec = info->i_crtime.tv_sec; 1179 stat->btime.tv_nsec = info->i 1158 stat->btime.tv_nsec = info->i_crtime.tv_nsec; 1180 } 1159 } 1181 1160 1182 return 0; 1161 return 0; 1183 } 1162 } 1184 1163 1185 static int shmem_setattr(struct mnt_idmap *id 1164 static int shmem_setattr(struct mnt_idmap *idmap, 1186 struct dentry *dentr 1165 struct dentry *dentry, struct iattr *attr) 1187 { 1166 { 1188 struct inode *inode = d_inode(dentry) 1167 struct inode *inode = d_inode(dentry); 1189 struct shmem_inode_info *info = SHMEM 1168 struct shmem_inode_info *info = SHMEM_I(inode); 1190 int error; 1169 int error; 1191 bool update_mtime = false; 1170 bool update_mtime = false; 1192 bool update_ctime = true; 1171 bool update_ctime = true; 1193 1172 1194 error = setattr_prepare(idmap, dentry 1173 error = setattr_prepare(idmap, dentry, attr); 1195 if (error) 1174 if (error) 1196 return error; 1175 return error; 1197 1176 1198 if ((info->seals & F_SEAL_EXEC) && (a 1177 if ((info->seals & F_SEAL_EXEC) && (attr->ia_valid & ATTR_MODE)) { 1199 if ((inode->i_mode ^ attr->ia 1178 if ((inode->i_mode ^ attr->ia_mode) & 0111) { 1200 return -EPERM; 1179 return -EPERM; 1201 } 1180 } 1202 } 1181 } 1203 1182 1204 if (S_ISREG(inode->i_mode) && (attr-> 1183 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { 1205 loff_t oldsize = inode->i_siz 1184 loff_t oldsize = inode->i_size; 1206 loff_t newsize = attr->ia_siz 1185 loff_t newsize = attr->ia_size; 1207 1186 1208 /* protected by i_rwsem */ 1187 /* protected by i_rwsem */ 1209 if ((newsize < oldsize && (in 1188 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) || 1210 (newsize > oldsize && (in 1189 (newsize > oldsize && (info->seals & F_SEAL_GROW))) 1211 return -EPERM; 1190 return -EPERM; 1212 1191 1213 if (newsize != oldsize) { 1192 if (newsize != oldsize) { 1214 error = shmem_reacct_ 1193 error = shmem_reacct_size(SHMEM_I(inode)->flags, 1215 oldsi 1194 oldsize, newsize); 1216 if (error) 1195 if (error) 1217 return error; 1196 return error; 1218 i_size_write(inode, n 1197 i_size_write(inode, newsize); 1219 update_mtime = true; 1198 update_mtime = true; 1220 } else { 1199 } else { 1221 update_ctime = false; 1200 update_ctime = false; 1222 } 1201 } 1223 if (newsize <= oldsize) { 1202 if (newsize <= oldsize) { 1224 loff_t holebegin = ro 1203 loff_t holebegin = round_up(newsize, PAGE_SIZE); 1225 if (oldsize > holebeg 1204 if (oldsize > holebegin) 1226 unmap_mapping 1205 unmap_mapping_range(inode->i_mapping, 1227 1206 holebegin, 0, 1); 1228 if (info->alloced) 1207 if (info->alloced) 1229 shmem_truncat 1208 shmem_truncate_range(inode, 1230 1209 newsize, (loff_t)-1); 1231 /* unmap again to rem 1210 /* unmap again to remove racily COWed private pages */ 1232 if (oldsize > holebeg 1211 if (oldsize > holebegin) 1233 unmap_mapping 1212 unmap_mapping_range(inode->i_mapping, 1234 1213 holebegin, 0, 1); 1235 } 1214 } 1236 } 1215 } 1237 1216 1238 if (is_quota_modification(idmap, inod 1217 if (is_quota_modification(idmap, inode, attr)) { 1239 error = dquot_initialize(inod 1218 error = dquot_initialize(inode); 1240 if (error) 1219 if (error) 1241 return error; 1220 return error; 1242 } 1221 } 1243 1222 1244 /* Transfer quota accounting */ 1223 /* Transfer quota accounting */ 1245 if (i_uid_needs_update(idmap, attr, i 1224 if (i_uid_needs_update(idmap, attr, inode) || 1246 i_gid_needs_update(idmap, attr, i 1225 i_gid_needs_update(idmap, attr, inode)) { 1247 error = dquot_transfer(idmap, 1226 error = dquot_transfer(idmap, inode, attr); 1248 if (error) 1227 if (error) 1249 return error; 1228 return error; 1250 } 1229 } 1251 1230 1252 setattr_copy(idmap, inode, attr); 1231 setattr_copy(idmap, inode, attr); 1253 if (attr->ia_valid & ATTR_MODE) 1232 if (attr->ia_valid & ATTR_MODE) 1254 error = posix_acl_chmod(idmap 1233 error = posix_acl_chmod(idmap, dentry, inode->i_mode); 1255 if (!error && update_ctime) { 1234 if (!error && update_ctime) { 1256 inode_set_ctime_current(inode 1235 inode_set_ctime_current(inode); 1257 if (update_mtime) 1236 if (update_mtime) 1258 inode_set_mtime_to_ts 1237 inode_set_mtime_to_ts(inode, inode_get_ctime(inode)); 1259 inode_inc_iversion(inode); 1238 inode_inc_iversion(inode); 1260 } 1239 } 1261 return error; 1240 return error; 1262 } 1241 } 1263 1242 1264 static void shmem_evict_inode(struct inode *i 1243 static void shmem_evict_inode(struct inode *inode) 1265 { 1244 { 1266 struct shmem_inode_info *info = SHMEM 1245 struct shmem_inode_info *info = SHMEM_I(inode); 1267 struct shmem_sb_info *sbinfo = SHMEM_ 1246 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1268 size_t freed = 0; 1247 size_t freed = 0; 1269 1248 1270 if (shmem_mapping(inode->i_mapping)) 1249 if (shmem_mapping(inode->i_mapping)) { 1271 shmem_unacct_size(info->flags 1250 shmem_unacct_size(info->flags, inode->i_size); 1272 inode->i_size = 0; 1251 inode->i_size = 0; 1273 mapping_set_exiting(inode->i_ 1252 mapping_set_exiting(inode->i_mapping); 1274 shmem_truncate_range(inode, 0 1253 shmem_truncate_range(inode, 0, (loff_t)-1); 1275 if (!list_empty(&info->shrink 1254 if (!list_empty(&info->shrinklist)) { 1276 spin_lock(&sbinfo->sh 1255 spin_lock(&sbinfo->shrinklist_lock); 1277 if (!list_empty(&info 1256 if (!list_empty(&info->shrinklist)) { 1278 list_del_init 1257 list_del_init(&info->shrinklist); 1279 sbinfo->shrin 1258 sbinfo->shrinklist_len--; 1280 } 1259 } 1281 spin_unlock(&sbinfo-> 1260 spin_unlock(&sbinfo->shrinklist_lock); 1282 } 1261 } 1283 while (!list_empty(&info->swa 1262 while (!list_empty(&info->swaplist)) { 1284 /* Wait while shmem_u 1263 /* Wait while shmem_unuse() is scanning this inode... */ 1285 wait_var_event(&info- 1264 wait_var_event(&info->stop_eviction, 1286 !atomi 1265 !atomic_read(&info->stop_eviction)); 1287 mutex_lock(&shmem_swa 1266 mutex_lock(&shmem_swaplist_mutex); 1288 /* ...but beware of t 1267 /* ...but beware of the race if we peeked too early */ 1289 if (!atomic_read(&inf 1268 if (!atomic_read(&info->stop_eviction)) 1290 list_del_init 1269 list_del_init(&info->swaplist); 1291 mutex_unlock(&shmem_s 1270 mutex_unlock(&shmem_swaplist_mutex); 1292 } 1271 } 1293 } 1272 } 1294 1273 1295 simple_xattrs_free(&info->xattrs, sbi 1274 simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL); 1296 shmem_free_inode(inode->i_sb, freed); 1275 shmem_free_inode(inode->i_sb, freed); 1297 WARN_ON(inode->i_blocks); 1276 WARN_ON(inode->i_blocks); 1298 clear_inode(inode); 1277 clear_inode(inode); 1299 #ifdef CONFIG_TMPFS_QUOTA 1278 #ifdef CONFIG_TMPFS_QUOTA 1300 dquot_free_inode(inode); 1279 dquot_free_inode(inode); 1301 dquot_drop(inode); 1280 dquot_drop(inode); 1302 #endif 1281 #endif 1303 } 1282 } 1304 1283 1305 static int shmem_find_swap_entries(struct add 1284 static int shmem_find_swap_entries(struct address_space *mapping, 1306 pgoff_t st 1285 pgoff_t start, struct folio_batch *fbatch, 1307 pgoff_t *i 1286 pgoff_t *indices, unsigned int type) 1308 { 1287 { 1309 XA_STATE(xas, &mapping->i_pages, star 1288 XA_STATE(xas, &mapping->i_pages, start); 1310 struct folio *folio; 1289 struct folio *folio; 1311 swp_entry_t entry; 1290 swp_entry_t entry; 1312 1291 1313 rcu_read_lock(); 1292 rcu_read_lock(); 1314 xas_for_each(&xas, folio, ULONG_MAX) 1293 xas_for_each(&xas, folio, ULONG_MAX) { 1315 if (xas_retry(&xas, folio)) 1294 if (xas_retry(&xas, folio)) 1316 continue; 1295 continue; 1317 1296 1318 if (!xa_is_value(folio)) 1297 if (!xa_is_value(folio)) 1319 continue; 1298 continue; 1320 1299 1321 entry = radix_to_swp_entry(fo 1300 entry = radix_to_swp_entry(folio); 1322 /* 1301 /* 1323 * swapin error entries can b 1302 * swapin error entries can be found in the mapping. But they're 1324 * deliberately ignored here 1303 * deliberately ignored here as we've done everything we can do. 1325 */ 1304 */ 1326 if (swp_type(entry) != type) 1305 if (swp_type(entry) != type) 1327 continue; 1306 continue; 1328 1307 1329 indices[folio_batch_count(fba 1308 indices[folio_batch_count(fbatch)] = xas.xa_index; 1330 if (!folio_batch_add(fbatch, 1309 if (!folio_batch_add(fbatch, folio)) 1331 break; 1310 break; 1332 1311 1333 if (need_resched()) { 1312 if (need_resched()) { 1334 xas_pause(&xas); 1313 xas_pause(&xas); 1335 cond_resched_rcu(); 1314 cond_resched_rcu(); 1336 } 1315 } 1337 } 1316 } 1338 rcu_read_unlock(); 1317 rcu_read_unlock(); 1339 1318 1340 return xas.xa_index; 1319 return xas.xa_index; 1341 } 1320 } 1342 1321 1343 /* 1322 /* 1344 * Move the swapped pages for an inode to pag 1323 * Move the swapped pages for an inode to page cache. Returns the count 1345 * of pages swapped in, or the error in case 1324 * of pages swapped in, or the error in case of failure. 1346 */ 1325 */ 1347 static int shmem_unuse_swap_entries(struct in 1326 static int shmem_unuse_swap_entries(struct inode *inode, 1348 struct folio_batch *fbatch, p 1327 struct folio_batch *fbatch, pgoff_t *indices) 1349 { 1328 { 1350 int i = 0; 1329 int i = 0; 1351 int ret = 0; 1330 int ret = 0; 1352 int error = 0; 1331 int error = 0; 1353 struct address_space *mapping = inode 1332 struct address_space *mapping = inode->i_mapping; 1354 1333 1355 for (i = 0; i < folio_batch_count(fba 1334 for (i = 0; i < folio_batch_count(fbatch); i++) { 1356 struct folio *folio = fbatch- 1335 struct folio *folio = fbatch->folios[i]; 1357 1336 1358 if (!xa_is_value(folio)) 1337 if (!xa_is_value(folio)) 1359 continue; 1338 continue; 1360 error = shmem_swapin_folio(in 1339 error = shmem_swapin_folio(inode, indices[i], &folio, SGP_CACHE, 1361 mappi 1340 mapping_gfp_mask(mapping), NULL, NULL); 1362 if (error == 0) { 1341 if (error == 0) { 1363 folio_unlock(folio); 1342 folio_unlock(folio); 1364 folio_put(folio); 1343 folio_put(folio); 1365 ret++; 1344 ret++; 1366 } 1345 } 1367 if (error == -ENOMEM) 1346 if (error == -ENOMEM) 1368 break; 1347 break; 1369 error = 0; 1348 error = 0; 1370 } 1349 } 1371 return error ? error : ret; 1350 return error ? error : ret; 1372 } 1351 } 1373 1352 1374 /* 1353 /* 1375 * If swap found in inode, free it and move p 1354 * If swap found in inode, free it and move page from swapcache to filecache. 1376 */ 1355 */ 1377 static int shmem_unuse_inode(struct inode *in 1356 static int shmem_unuse_inode(struct inode *inode, unsigned int type) 1378 { 1357 { 1379 struct address_space *mapping = inode 1358 struct address_space *mapping = inode->i_mapping; 1380 pgoff_t start = 0; 1359 pgoff_t start = 0; 1381 struct folio_batch fbatch; 1360 struct folio_batch fbatch; 1382 pgoff_t indices[PAGEVEC_SIZE]; 1361 pgoff_t indices[PAGEVEC_SIZE]; 1383 int ret = 0; 1362 int ret = 0; 1384 1363 1385 do { 1364 do { 1386 folio_batch_init(&fbatch); 1365 folio_batch_init(&fbatch); 1387 shmem_find_swap_entries(mappi 1366 shmem_find_swap_entries(mapping, start, &fbatch, indices, type); 1388 if (folio_batch_count(&fbatch 1367 if (folio_batch_count(&fbatch) == 0) { 1389 ret = 0; 1368 ret = 0; 1390 break; 1369 break; 1391 } 1370 } 1392 1371 1393 ret = shmem_unuse_swap_entrie 1372 ret = shmem_unuse_swap_entries(inode, &fbatch, indices); 1394 if (ret < 0) 1373 if (ret < 0) 1395 break; 1374 break; 1396 1375 1397 start = indices[folio_batch_c 1376 start = indices[folio_batch_count(&fbatch) - 1]; 1398 } while (true); 1377 } while (true); 1399 1378 1400 return ret; 1379 return ret; 1401 } 1380 } 1402 1381 1403 /* 1382 /* 1404 * Read all the shared memory data that resid 1383 * Read all the shared memory data that resides in the swap 1405 * device 'type' back into memory, so the swa 1384 * device 'type' back into memory, so the swap device can be 1406 * unused. 1385 * unused. 1407 */ 1386 */ 1408 int shmem_unuse(unsigned int type) 1387 int shmem_unuse(unsigned int type) 1409 { 1388 { 1410 struct shmem_inode_info *info, *next; 1389 struct shmem_inode_info *info, *next; 1411 int error = 0; 1390 int error = 0; 1412 1391 1413 if (list_empty(&shmem_swaplist)) 1392 if (list_empty(&shmem_swaplist)) 1414 return 0; 1393 return 0; 1415 1394 1416 mutex_lock(&shmem_swaplist_mutex); 1395 mutex_lock(&shmem_swaplist_mutex); 1417 list_for_each_entry_safe(info, next, 1396 list_for_each_entry_safe(info, next, &shmem_swaplist, swaplist) { 1418 if (!info->swapped) { 1397 if (!info->swapped) { 1419 list_del_init(&info-> 1398 list_del_init(&info->swaplist); 1420 continue; 1399 continue; 1421 } 1400 } 1422 /* 1401 /* 1423 * Drop the swaplist mutex wh 1402 * Drop the swaplist mutex while searching the inode for swap; 1424 * but before doing so, make 1403 * but before doing so, make sure shmem_evict_inode() will not 1425 * remove placeholder inode f 1404 * remove placeholder inode from swaplist, nor let it be freed 1426 * (igrab() would protect fro 1405 * (igrab() would protect from unlink, but not from unmount). 1427 */ 1406 */ 1428 atomic_inc(&info->stop_evicti 1407 atomic_inc(&info->stop_eviction); 1429 mutex_unlock(&shmem_swaplist_ 1408 mutex_unlock(&shmem_swaplist_mutex); 1430 1409 1431 error = shmem_unuse_inode(&in 1410 error = shmem_unuse_inode(&info->vfs_inode, type); 1432 cond_resched(); 1411 cond_resched(); 1433 1412 1434 mutex_lock(&shmem_swaplist_mu 1413 mutex_lock(&shmem_swaplist_mutex); 1435 next = list_next_entry(info, 1414 next = list_next_entry(info, swaplist); 1436 if (!info->swapped) 1415 if (!info->swapped) 1437 list_del_init(&info-> 1416 list_del_init(&info->swaplist); 1438 if (atomic_dec_and_test(&info 1417 if (atomic_dec_and_test(&info->stop_eviction)) 1439 wake_up_var(&info->st 1418 wake_up_var(&info->stop_eviction); 1440 if (error) 1419 if (error) 1441 break; 1420 break; 1442 } 1421 } 1443 mutex_unlock(&shmem_swaplist_mutex); 1422 mutex_unlock(&shmem_swaplist_mutex); 1444 1423 1445 return error; 1424 return error; 1446 } 1425 } 1447 1426 1448 /* 1427 /* 1449 * Move the page from the page cache to the s 1428 * Move the page from the page cache to the swap cache. 1450 */ 1429 */ 1451 static int shmem_writepage(struct page *page, 1430 static int shmem_writepage(struct page *page, struct writeback_control *wbc) 1452 { 1431 { 1453 struct folio *folio = page_folio(page 1432 struct folio *folio = page_folio(page); 1454 struct address_space *mapping = folio 1433 struct address_space *mapping = folio->mapping; 1455 struct inode *inode = mapping->host; 1434 struct inode *inode = mapping->host; 1456 struct shmem_inode_info *info = SHMEM 1435 struct shmem_inode_info *info = SHMEM_I(inode); 1457 struct shmem_sb_info *sbinfo = SHMEM_ 1436 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1458 swp_entry_t swap; 1437 swp_entry_t swap; 1459 pgoff_t index; 1438 pgoff_t index; 1460 int nr_pages; << 1461 bool split = false; << 1462 1439 1463 /* 1440 /* 1464 * Our capabilities prevent regular w 1441 * Our capabilities prevent regular writeback or sync from ever calling 1465 * shmem_writepage; but a stacking fi 1442 * shmem_writepage; but a stacking filesystem might use ->writepage of 1466 * its underlying filesystem, in whic 1443 * its underlying filesystem, in which case tmpfs should write out to 1467 * swap only in response to memory pr 1444 * swap only in response to memory pressure, and not for the writeback 1468 * threads or sync. 1445 * threads or sync. 1469 */ 1446 */ 1470 if (WARN_ON_ONCE(!wbc->for_reclaim)) 1447 if (WARN_ON_ONCE(!wbc->for_reclaim)) 1471 goto redirty; 1448 goto redirty; 1472 1449 1473 if (WARN_ON_ONCE((info->flags & VM_LO 1450 if (WARN_ON_ONCE((info->flags & VM_LOCKED) || sbinfo->noswap)) 1474 goto redirty; 1451 goto redirty; 1475 1452 1476 if (!total_swap_pages) 1453 if (!total_swap_pages) 1477 goto redirty; 1454 goto redirty; 1478 1455 1479 /* 1456 /* 1480 * If CONFIG_THP_SWAP is not enabled, !! 1457 * If /sys/kernel/mm/transparent_hugepage/shmem_enabled is "always" or 1481 * split when swapping. !! 1458 * "force", drivers/gpu/drm/i915/gem/i915_gem_shmem.c gets huge pages, 1482 * !! 1459 * and its shmem_writeback() needs them to be split when swapping. 1483 * And shrinkage of pages beyond i_si << 1484 * swapout of a large folio crossing << 1485 * (unless fallocate has been used to << 1486 */ 1460 */ 1487 if (folio_test_large(folio)) { 1461 if (folio_test_large(folio)) { 1488 index = shmem_fallocend(inode << 1489 DIV_ROUND_UP(i_size_r << 1490 if ((index > folio->index && << 1491 !IS_ENABLED(CONFIG_THP_SW << 1492 split = true; << 1493 } << 1494 << 1495 if (split) { << 1496 try_split: << 1497 /* Ensure the subpages are st 1462 /* Ensure the subpages are still dirty */ 1498 folio_test_set_dirty(folio); 1463 folio_test_set_dirty(folio); 1499 if (split_huge_page_to_list_t !! 1464 if (split_huge_page(page) < 0) 1500 goto redirty; 1465 goto redirty; 1501 folio = page_folio(page); 1466 folio = page_folio(page); 1502 folio_clear_dirty(folio); 1467 folio_clear_dirty(folio); 1503 } 1468 } 1504 1469 1505 index = folio->index; 1470 index = folio->index; 1506 nr_pages = folio_nr_pages(folio); << 1507 1471 1508 /* 1472 /* 1509 * This is somewhat ridiculous, but w 1473 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC 1510 * value into swapfile.c, the only wa 1474 * value into swapfile.c, the only way we can correctly account for a 1511 * fallocated folio arriving here is 1475 * fallocated folio arriving here is now to initialize it and write it. 1512 * 1476 * 1513 * That's okay for a folio already fa 1477 * That's okay for a folio already fallocated earlier, but if we have 1514 * not yet completed the fallocation, 1478 * not yet completed the fallocation, then (a) we want to keep track 1515 * of this folio in case we have to u 1479 * of this folio in case we have to undo it, and (b) it may not be a 1516 * good idea to continue anyway, once 1480 * good idea to continue anyway, once we're pushing into swap. So 1517 * reactivate the folio, and let shme 1481 * reactivate the folio, and let shmem_fallocate() quit when too many. 1518 */ 1482 */ 1519 if (!folio_test_uptodate(folio)) { 1483 if (!folio_test_uptodate(folio)) { 1520 if (inode->i_private) { 1484 if (inode->i_private) { 1521 struct shmem_falloc * 1485 struct shmem_falloc *shmem_falloc; 1522 spin_lock(&inode->i_l 1486 spin_lock(&inode->i_lock); 1523 shmem_falloc = inode- 1487 shmem_falloc = inode->i_private; 1524 if (shmem_falloc && 1488 if (shmem_falloc && 1525 !shmem_falloc->wa 1489 !shmem_falloc->waitq && 1526 index >= shmem_fa 1490 index >= shmem_falloc->start && 1527 index < shmem_fal 1491 index < shmem_falloc->next) 1528 shmem_falloc- 1492 shmem_falloc->nr_unswapped++; 1529 else 1493 else 1530 shmem_falloc 1494 shmem_falloc = NULL; 1531 spin_unlock(&inode->i 1495 spin_unlock(&inode->i_lock); 1532 if (shmem_falloc) 1496 if (shmem_falloc) 1533 goto redirty; 1497 goto redirty; 1534 } 1498 } 1535 folio_zero_range(folio, 0, fo 1499 folio_zero_range(folio, 0, folio_size(folio)); 1536 flush_dcache_folio(folio); 1500 flush_dcache_folio(folio); 1537 folio_mark_uptodate(folio); 1501 folio_mark_uptodate(folio); 1538 } 1502 } 1539 1503 1540 swap = folio_alloc_swap(folio); 1504 swap = folio_alloc_swap(folio); 1541 if (!swap.val) { !! 1505 if (!swap.val) 1542 if (nr_pages > 1) << 1543 goto try_split; << 1544 << 1545 goto redirty; 1506 goto redirty; 1546 } << 1547 1507 1548 /* 1508 /* 1549 * Add inode to shmem_unuse()'s list 1509 * Add inode to shmem_unuse()'s list of swapped-out inodes, 1550 * if it's not already there. Do it 1510 * if it's not already there. Do it now before the folio is 1551 * moved to swap cache, when its page 1511 * moved to swap cache, when its pagelock no longer protects 1552 * the inode from eviction. But don' 1512 * the inode from eviction. But don't unlock the mutex until 1553 * we've incremented swapped, because 1513 * we've incremented swapped, because shmem_unuse_inode() will 1554 * prune a !swapped inode from the sw 1514 * prune a !swapped inode from the swaplist under this mutex. 1555 */ 1515 */ 1556 mutex_lock(&shmem_swaplist_mutex); 1516 mutex_lock(&shmem_swaplist_mutex); 1557 if (list_empty(&info->swaplist)) 1517 if (list_empty(&info->swaplist)) 1558 list_add(&info->swaplist, &sh 1518 list_add(&info->swaplist, &shmem_swaplist); 1559 1519 1560 if (add_to_swap_cache(folio, swap, 1520 if (add_to_swap_cache(folio, swap, 1561 __GFP_HIGH | __GFP_NO 1521 __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN, 1562 NULL) == 0) { 1522 NULL) == 0) { 1563 shmem_recalc_inode(inode, 0, !! 1523 shmem_recalc_inode(inode, 0, 1); 1564 swap_shmem_alloc(swap, nr_pag !! 1524 swap_shmem_alloc(swap); 1565 shmem_delete_from_page_cache( 1525 shmem_delete_from_page_cache(folio, swp_to_radix_entry(swap)); 1566 1526 1567 mutex_unlock(&shmem_swaplist_ 1527 mutex_unlock(&shmem_swaplist_mutex); 1568 BUG_ON(folio_mapped(folio)); 1528 BUG_ON(folio_mapped(folio)); 1569 return swap_writepage(&folio- 1529 return swap_writepage(&folio->page, wbc); 1570 } 1530 } 1571 1531 1572 mutex_unlock(&shmem_swaplist_mutex); 1532 mutex_unlock(&shmem_swaplist_mutex); 1573 put_swap_folio(folio, swap); 1533 put_swap_folio(folio, swap); 1574 redirty: 1534 redirty: 1575 folio_mark_dirty(folio); 1535 folio_mark_dirty(folio); 1576 if (wbc->for_reclaim) 1536 if (wbc->for_reclaim) 1577 return AOP_WRITEPAGE_ACTIVATE 1537 return AOP_WRITEPAGE_ACTIVATE; /* Return with folio locked */ 1578 folio_unlock(folio); 1538 folio_unlock(folio); 1579 return 0; 1539 return 0; 1580 } 1540 } 1581 1541 1582 #if defined(CONFIG_NUMA) && defined(CONFIG_TM 1542 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS) 1583 static void shmem_show_mpol(struct seq_file * 1543 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1584 { 1544 { 1585 char buffer[64]; 1545 char buffer[64]; 1586 1546 1587 if (!mpol || mpol->mode == MPOL_DEFAU 1547 if (!mpol || mpol->mode == MPOL_DEFAULT) 1588 return; /* show nothi 1548 return; /* show nothing */ 1589 1549 1590 mpol_to_str(buffer, sizeof(buffer), m 1550 mpol_to_str(buffer, sizeof(buffer), mpol); 1591 1551 1592 seq_printf(seq, ",mpol=%s", buffer); 1552 seq_printf(seq, ",mpol=%s", buffer); 1593 } 1553 } 1594 1554 1595 static struct mempolicy *shmem_get_sbmpol(str 1555 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1596 { 1556 { 1597 struct mempolicy *mpol = NULL; 1557 struct mempolicy *mpol = NULL; 1598 if (sbinfo->mpol) { 1558 if (sbinfo->mpol) { 1599 raw_spin_lock(&sbinfo->stat_l 1559 raw_spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */ 1600 mpol = sbinfo->mpol; 1560 mpol = sbinfo->mpol; 1601 mpol_get(mpol); 1561 mpol_get(mpol); 1602 raw_spin_unlock(&sbinfo->stat 1562 raw_spin_unlock(&sbinfo->stat_lock); 1603 } 1563 } 1604 return mpol; 1564 return mpol; 1605 } 1565 } 1606 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 1566 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */ 1607 static inline void shmem_show_mpol(struct seq 1567 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) 1608 { 1568 { 1609 } 1569 } 1610 static inline struct mempolicy *shmem_get_sbm 1570 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) 1611 { 1571 { 1612 return NULL; 1572 return NULL; 1613 } 1573 } 1614 #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 1574 #endif /* CONFIG_NUMA && CONFIG_TMPFS */ 1615 1575 1616 static struct mempolicy *shmem_get_pgoff_poli 1576 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info, 1617 pgoff_t index, unsign 1577 pgoff_t index, unsigned int order, pgoff_t *ilx); 1618 1578 1619 static struct folio *shmem_swapin_cluster(swp 1579 static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp, 1620 struct shmem_inode_in 1580 struct shmem_inode_info *info, pgoff_t index) 1621 { 1581 { 1622 struct mempolicy *mpol; 1582 struct mempolicy *mpol; 1623 pgoff_t ilx; 1583 pgoff_t ilx; 1624 struct folio *folio; 1584 struct folio *folio; 1625 1585 1626 mpol = shmem_get_pgoff_policy(info, i 1586 mpol = shmem_get_pgoff_policy(info, index, 0, &ilx); 1627 folio = swap_cluster_readahead(swap, 1587 folio = swap_cluster_readahead(swap, gfp, mpol, ilx); 1628 mpol_cond_put(mpol); 1588 mpol_cond_put(mpol); 1629 1589 1630 return folio; 1590 return folio; 1631 } 1591 } 1632 1592 1633 /* 1593 /* 1634 * Make sure huge_gfp is always more limited 1594 * Make sure huge_gfp is always more limited than limit_gfp. 1635 * Some of the flags set permissions, while o 1595 * Some of the flags set permissions, while others set limitations. 1636 */ 1596 */ 1637 static gfp_t limit_gfp_mask(gfp_t huge_gfp, g 1597 static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp) 1638 { 1598 { 1639 gfp_t allowflags = __GFP_IO | __GFP_F 1599 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM; 1640 gfp_t denyflags = __GFP_NOWARN | __GF 1600 gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY; 1641 gfp_t zoneflags = limit_gfp & GFP_ZON 1601 gfp_t zoneflags = limit_gfp & GFP_ZONEMASK; 1642 gfp_t result = huge_gfp & ~(allowflag 1602 gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK); 1643 1603 1644 /* Allow allocations only from the or 1604 /* Allow allocations only from the originally specified zones. */ 1645 result |= zoneflags; 1605 result |= zoneflags; 1646 1606 1647 /* 1607 /* 1648 * Minimize the result gfp by taking 1608 * Minimize the result gfp by taking the union with the deny flags, 1649 * and the intersection of the allow 1609 * and the intersection of the allow flags. 1650 */ 1610 */ 1651 result |= (limit_gfp & denyflags); 1611 result |= (limit_gfp & denyflags); 1652 result |= (huge_gfp & limit_gfp) & al 1612 result |= (huge_gfp & limit_gfp) & allowflags; 1653 1613 1654 return result; 1614 return result; 1655 } 1615 } 1656 1616 1657 #ifdef CONFIG_TRANSPARENT_HUGEPAGE !! 1617 static struct folio *shmem_alloc_hugefolio(gfp_t gfp, 1658 unsigned long shmem_allowable_huge_orders(str !! 1618 struct shmem_inode_info *info, pgoff_t index) 1659 struct vm_are << 1660 loff_t write_ << 1661 { << 1662 unsigned long mask = READ_ONCE(huge_s << 1663 unsigned long within_size_orders = RE << 1664 unsigned long vm_flags = vma ? vma->v << 1665 bool global_huge; << 1666 loff_t i_size; << 1667 int order; << 1668 << 1669 if (thp_disabled_by_hw() || (vma && v << 1670 return 0; << 1671 << 1672 global_huge = shmem_huge_global_enabl << 1673 shmem << 1674 if (!vma || !vma_is_anon_shmem(vma)) << 1675 /* << 1676 * For tmpfs, we now only sup << 1677 * is enabled, otherwise fall << 1678 */ << 1679 return global_huge ? BIT(HPAG << 1680 } << 1681 << 1682 /* << 1683 * Following the 'deny' semantics of << 1684 * option off from all mounts. << 1685 */ << 1686 if (shmem_huge == SHMEM_HUGE_DENY) << 1687 return 0; << 1688 << 1689 /* << 1690 * Only allow inherit orders if the t << 1691 * means non-PMD sized THP can not ov << 1692 */ << 1693 if (shmem_huge == SHMEM_HUGE_FORCE) << 1694 return READ_ONCE(huge_shmem_o << 1695 << 1696 /* Allow mTHP that will be fully with << 1697 order = highest_order(within_size_ord << 1698 while (within_size_orders) { << 1699 index = round_up(index + 1, o << 1700 i_size = round_up(i_size_read << 1701 if (i_size >> PAGE_SHIFT >= i << 1702 mask |= within_size_o << 1703 break; << 1704 } << 1705 << 1706 order = next_order(&within_si << 1707 } << 1708 << 1709 if (vm_flags & VM_HUGEPAGE) << 1710 mask |= READ_ONCE(huge_shmem_ << 1711 << 1712 if (global_huge) << 1713 mask |= READ_ONCE(huge_shmem_ << 1714 << 1715 return THP_ORDERS_ALL_FILE_DEFAULT & << 1716 } << 1717 << 1718 static unsigned long shmem_suitable_orders(st << 1719 st << 1720 un << 1721 { 1619 { 1722 struct vm_area_struct *vma = vmf ? vm !! 1620 struct mempolicy *mpol; 1723 pgoff_t aligned_index; !! 1621 pgoff_t ilx; 1724 unsigned long pages; !! 1622 struct page *page; 1725 int order; << 1726 << 1727 if (vma) { << 1728 orders = thp_vma_suitable_ord << 1729 if (!orders) << 1730 return 0; << 1731 } << 1732 1623 1733 /* Find the highest order that can ad !! 1624 mpol = shmem_get_pgoff_policy(info, index, HPAGE_PMD_ORDER, &ilx); 1734 order = highest_order(orders); !! 1625 page = alloc_pages_mpol(gfp, HPAGE_PMD_ORDER, mpol, ilx, numa_node_id()); 1735 while (orders) { !! 1626 mpol_cond_put(mpol); 1736 pages = 1UL << order; << 1737 aligned_index = round_down(in << 1738 /* << 1739 * Check for conflict before << 1740 * Conflict might be that a h << 1741 * and added to page cache by << 1742 * is already at least one sm << 1743 * Be careful to retry when a << 1744 * Elsewhere -EEXIST would be << 1745 */ << 1746 if (!xa_find(&mapping->i_page << 1747 aligned_index + << 1748 break; << 1749 order = next_order(&orders, o << 1750 } << 1751 1627 1752 return orders; !! 1628 return page_rmappable_folio(page); 1753 } 1629 } 1754 #else << 1755 static unsigned long shmem_suitable_orders(st << 1756 st << 1757 un << 1758 { << 1759 return 0; << 1760 } << 1761 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ << 1762 1630 1763 static struct folio *shmem_alloc_folio(gfp_t !! 1631 static struct folio *shmem_alloc_folio(gfp_t gfp, 1764 struct shmem_inode_info *info 1632 struct shmem_inode_info *info, pgoff_t index) 1765 { 1633 { 1766 struct mempolicy *mpol; 1634 struct mempolicy *mpol; 1767 pgoff_t ilx; 1635 pgoff_t ilx; 1768 struct folio *folio; !! 1636 struct page *page; 1769 1637 1770 mpol = shmem_get_pgoff_policy(info, i !! 1638 mpol = shmem_get_pgoff_policy(info, index, 0, &ilx); 1771 folio = folio_alloc_mpol(gfp, order, !! 1639 page = alloc_pages_mpol(gfp, 0, mpol, ilx, numa_node_id()); 1772 mpol_cond_put(mpol); 1640 mpol_cond_put(mpol); 1773 1641 1774 return folio; !! 1642 return (struct folio *)page; 1775 } 1643 } 1776 1644 1777 static struct folio *shmem_alloc_and_add_foli !! 1645 static struct folio *shmem_alloc_and_add_folio(gfp_t gfp, 1778 gfp_t gfp, struct inode *inod !! 1646 struct inode *inode, pgoff_t index, 1779 struct mm_struct *fault_mm, u !! 1647 struct mm_struct *fault_mm, bool huge) 1780 { 1648 { 1781 struct address_space *mapping = inode 1649 struct address_space *mapping = inode->i_mapping; 1782 struct shmem_inode_info *info = SHMEM 1650 struct shmem_inode_info *info = SHMEM_I(inode); 1783 unsigned long suitable_orders = 0; !! 1651 struct folio *folio; 1784 struct folio *folio = NULL; << 1785 long pages; 1652 long pages; 1786 int error, order; !! 1653 int error; 1787 1654 1788 if (!IS_ENABLED(CONFIG_TRANSPARENT_HU 1655 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 1789 orders = 0; !! 1656 huge = false; 1790 1657 1791 if (orders > 0) { !! 1658 if (huge) { 1792 suitable_orders = shmem_suita !! 1659 pages = HPAGE_PMD_NR; 1793 !! 1660 index = round_down(index, HPAGE_PMD_NR); 1794 !! 1661 1795 order = highest_order(suitabl !! 1662 /* 1796 while (suitable_orders) { !! 1663 * Check for conflict before waiting on a huge allocation. 1797 pages = 1UL << order; !! 1664 * Conflict might be that a huge page has just been allocated 1798 index = round_down(in !! 1665 * and added to page cache by a racing thread, or that there 1799 folio = shmem_alloc_f !! 1666 * is already at least one small page in the huge extent. 1800 if (folio) !! 1667 * Be careful to retry when appropriate, but not forever! 1801 goto allocate !! 1668 * Elsewhere -EEXIST would be the right code, but not here. 1802 !! 1669 */ 1803 if (pages == HPAGE_PM !! 1670 if (xa_find(&mapping->i_pages, &index, 1804 count_vm_even !! 1671 index + HPAGE_PMD_NR - 1, XA_PRESENT)) 1805 count_mthp_stat(order !! 1672 return ERR_PTR(-E2BIG); 1806 order = next_order(&s !! 1673 1807 } !! 1674 folio = shmem_alloc_hugefolio(gfp, info, index); >> 1675 if (!folio) >> 1676 count_vm_event(THP_FILE_FALLBACK); 1808 } else { 1677 } else { 1809 pages = 1; 1678 pages = 1; 1810 folio = shmem_alloc_folio(gfp !! 1679 folio = shmem_alloc_folio(gfp, info, index); 1811 } 1680 } 1812 if (!folio) 1681 if (!folio) 1813 return ERR_PTR(-ENOMEM); 1682 return ERR_PTR(-ENOMEM); 1814 1683 1815 allocated: << 1816 __folio_set_locked(folio); 1684 __folio_set_locked(folio); 1817 __folio_set_swapbacked(folio); 1685 __folio_set_swapbacked(folio); 1818 1686 1819 gfp &= GFP_RECLAIM_MASK; 1687 gfp &= GFP_RECLAIM_MASK; 1820 error = mem_cgroup_charge(folio, faul 1688 error = mem_cgroup_charge(folio, fault_mm, gfp); 1821 if (error) { 1689 if (error) { 1822 if (xa_find(&mapping->i_pages 1690 if (xa_find(&mapping->i_pages, &index, 1823 index + pages 1691 index + pages - 1, XA_PRESENT)) { 1824 error = -EEXIST; 1692 error = -EEXIST; 1825 } else if (pages > 1) { !! 1693 } else if (huge) { 1826 if (pages == HPAGE_PM !! 1694 count_vm_event(THP_FILE_FALLBACK); 1827 count_vm_even !! 1695 count_vm_event(THP_FILE_FALLBACK_CHARGE); 1828 count_vm_even << 1829 } << 1830 count_mthp_stat(folio << 1831 count_mthp_stat(folio << 1832 } 1696 } 1833 goto unlock; 1697 goto unlock; 1834 } 1698 } 1835 1699 1836 error = shmem_add_to_page_cache(folio 1700 error = shmem_add_to_page_cache(folio, mapping, index, NULL, gfp); 1837 if (error) 1701 if (error) 1838 goto unlock; 1702 goto unlock; 1839 1703 1840 error = shmem_inode_acct_blocks(inode 1704 error = shmem_inode_acct_blocks(inode, pages); 1841 if (error) { 1705 if (error) { 1842 struct shmem_sb_info *sbinfo 1706 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 1843 long freed; 1707 long freed; 1844 /* 1708 /* 1845 * Try to reclaim some space 1709 * Try to reclaim some space by splitting a few 1846 * large folios beyond i_size 1710 * large folios beyond i_size on the filesystem. 1847 */ 1711 */ 1848 shmem_unused_huge_shrink(sbin !! 1712 shmem_unused_huge_shrink(sbinfo, NULL, 2); 1849 /* 1713 /* 1850 * And do a shmem_recalc_inod 1714 * And do a shmem_recalc_inode() to account for freed pages: 1851 * except our folio is there 1715 * except our folio is there in cache, so not quite balanced. 1852 */ 1716 */ 1853 spin_lock(&info->lock); 1717 spin_lock(&info->lock); 1854 freed = pages + info->alloced 1718 freed = pages + info->alloced - info->swapped - 1855 READ_ONCE(mapping->nr 1719 READ_ONCE(mapping->nrpages); 1856 if (freed > 0) 1720 if (freed > 0) 1857 info->alloced -= free 1721 info->alloced -= freed; 1858 spin_unlock(&info->lock); 1722 spin_unlock(&info->lock); 1859 if (freed > 0) 1723 if (freed > 0) 1860 shmem_inode_unacct_bl 1724 shmem_inode_unacct_blocks(inode, freed); 1861 error = shmem_inode_acct_bloc 1725 error = shmem_inode_acct_blocks(inode, pages); 1862 if (error) { 1726 if (error) { 1863 filemap_remove_folio( 1727 filemap_remove_folio(folio); 1864 goto unlock; 1728 goto unlock; 1865 } 1729 } 1866 } 1730 } 1867 1731 1868 shmem_recalc_inode(inode, pages, 0); 1732 shmem_recalc_inode(inode, pages, 0); 1869 folio_add_lru(folio); 1733 folio_add_lru(folio); 1870 return folio; 1734 return folio; 1871 1735 1872 unlock: 1736 unlock: 1873 folio_unlock(folio); 1737 folio_unlock(folio); 1874 folio_put(folio); 1738 folio_put(folio); 1875 return ERR_PTR(error); 1739 return ERR_PTR(error); 1876 } 1740 } 1877 1741 1878 /* 1742 /* 1879 * When a page is moved from swapcache to shm 1743 * When a page is moved from swapcache to shmem filecache (either by the 1880 * usual swapin of shmem_get_folio_gfp(), or 1744 * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of 1881 * shmem_unuse_inode()), it may have been rea 1745 * shmem_unuse_inode()), it may have been read in earlier from swap, in 1882 * ignorance of the mapping it belongs to. I 1746 * ignorance of the mapping it belongs to. If that mapping has special 1883 * constraints (like the gma500 GEM driver, w 1747 * constraints (like the gma500 GEM driver, which requires RAM below 4GB), 1884 * we may need to copy to a suitable page bef 1748 * we may need to copy to a suitable page before moving to filecache. 1885 * 1749 * 1886 * In a future release, this may well be exte 1750 * In a future release, this may well be extended to respect cpuset and 1887 * NUMA mempolicy, and applied also to anonym 1751 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page(); 1888 * but for now it is a simple matter of zone. 1752 * but for now it is a simple matter of zone. 1889 */ 1753 */ 1890 static bool shmem_should_replace_folio(struct 1754 static bool shmem_should_replace_folio(struct folio *folio, gfp_t gfp) 1891 { 1755 { 1892 return folio_zonenum(folio) > gfp_zon 1756 return folio_zonenum(folio) > gfp_zone(gfp); 1893 } 1757 } 1894 1758 1895 static int shmem_replace_folio(struct folio * 1759 static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, 1896 struct shmem_ !! 1760 struct shmem_inode_info *info, pgoff_t index) 1897 struct vm_are << 1898 { 1761 { 1899 struct folio *new, *old = *foliop; !! 1762 struct folio *old, *new; 1900 swp_entry_t entry = old->swap; !! 1763 struct address_space *swap_mapping; 1901 struct address_space *swap_mapping = !! 1764 swp_entry_t entry; 1902 pgoff_t swap_index = swap_cache_index !! 1765 pgoff_t swap_index; 1903 XA_STATE(xas, &swap_mapping->i_pages, !! 1766 int error; 1904 int nr_pages = folio_nr_pages(old); !! 1767 1905 int error = 0, i; !! 1768 old = *foliop; >> 1769 entry = old->swap; >> 1770 swap_index = swp_offset(entry); >> 1771 swap_mapping = swap_address_space(entry); 1906 1772 1907 /* 1773 /* 1908 * We have arrived here because our z 1774 * We have arrived here because our zones are constrained, so don't 1909 * limit chance of success by further 1775 * limit chance of success by further cpuset and node constraints. 1910 */ 1776 */ 1911 gfp &= ~GFP_CONSTRAINT_MASK; 1777 gfp &= ~GFP_CONSTRAINT_MASK; 1912 #ifdef CONFIG_TRANSPARENT_HUGEPAGE !! 1778 VM_BUG_ON_FOLIO(folio_test_large(old), old); 1913 if (nr_pages > 1) { !! 1779 new = shmem_alloc_folio(gfp, info, index); 1914 gfp_t huge_gfp = vma_thp_gfp_ << 1915 << 1916 gfp = limit_gfp_mask(huge_gfp << 1917 } << 1918 #endif << 1919 << 1920 new = shmem_alloc_folio(gfp, folio_or << 1921 if (!new) 1780 if (!new) 1922 return -ENOMEM; 1781 return -ENOMEM; 1923 1782 1924 folio_ref_add(new, nr_pages); !! 1783 folio_get(new); 1925 folio_copy(new, old); 1784 folio_copy(new, old); 1926 flush_dcache_folio(new); 1785 flush_dcache_folio(new); 1927 1786 1928 __folio_set_locked(new); 1787 __folio_set_locked(new); 1929 __folio_set_swapbacked(new); 1788 __folio_set_swapbacked(new); 1930 folio_mark_uptodate(new); 1789 folio_mark_uptodate(new); 1931 new->swap = entry; 1790 new->swap = entry; 1932 folio_set_swapcache(new); 1791 folio_set_swapcache(new); 1933 1792 1934 /* Swap cache still stores N entries !! 1793 /* >> 1794 * Our caller will very soon move newpage out of swapcache, but it's >> 1795 * a nice clean interface for us to replace oldpage by newpage there. >> 1796 */ 1935 xa_lock_irq(&swap_mapping->i_pages); 1797 xa_lock_irq(&swap_mapping->i_pages); 1936 for (i = 0; i < nr_pages; i++) { !! 1798 error = shmem_replace_entry(swap_mapping, swap_index, old, new); 1937 void *item = xas_load(&xas); << 1938 << 1939 if (item != old) { << 1940 error = -ENOENT; << 1941 break; << 1942 } << 1943 << 1944 xas_store(&xas, new); << 1945 xas_next(&xas); << 1946 } << 1947 if (!error) { 1799 if (!error) { 1948 mem_cgroup_replace_folio(old, 1800 mem_cgroup_replace_folio(old, new); 1949 __lruvec_stat_mod_folio(new, !! 1801 __lruvec_stat_mod_folio(new, NR_FILE_PAGES, 1); 1950 __lruvec_stat_mod_folio(new, !! 1802 __lruvec_stat_mod_folio(new, NR_SHMEM, 1); 1951 __lruvec_stat_mod_folio(old, !! 1803 __lruvec_stat_mod_folio(old, NR_FILE_PAGES, -1); 1952 __lruvec_stat_mod_folio(old, !! 1804 __lruvec_stat_mod_folio(old, NR_SHMEM, -1); 1953 } 1805 } 1954 xa_unlock_irq(&swap_mapping->i_pages) 1806 xa_unlock_irq(&swap_mapping->i_pages); 1955 1807 1956 if (unlikely(error)) { 1808 if (unlikely(error)) { 1957 /* 1809 /* 1958 * Is this possible? I think !! 1810 * Is this possible? I think not, now that our callers check 1959 * check both the swapcache f !! 1811 * both PageSwapCache and page_private after getting page lock; 1960 * after getting the folio lo !! 1812 * but be defensive. Reverse old to newpage for clear and free. 1961 * Reverse old to newpage for << 1962 */ 1813 */ 1963 old = new; 1814 old = new; 1964 } else { 1815 } else { 1965 folio_add_lru(new); 1816 folio_add_lru(new); 1966 *foliop = new; 1817 *foliop = new; 1967 } 1818 } 1968 1819 1969 folio_clear_swapcache(old); 1820 folio_clear_swapcache(old); 1970 old->private = NULL; 1821 old->private = NULL; 1971 1822 1972 folio_unlock(old); 1823 folio_unlock(old); 1973 /* !! 1824 folio_put_refs(old, 2); 1974 * The old folio are removed from swa << 1975 * reference, as well as one temporar << 1976 * cache. << 1977 */ << 1978 folio_put_refs(old, nr_pages + 1); << 1979 return error; 1825 return error; 1980 } 1826 } 1981 1827 1982 static void shmem_set_folio_swapin_error(stru 1828 static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, 1983 stru 1829 struct folio *folio, swp_entry_t swap) 1984 { 1830 { 1985 struct address_space *mapping = inode 1831 struct address_space *mapping = inode->i_mapping; 1986 swp_entry_t swapin_error; 1832 swp_entry_t swapin_error; 1987 void *old; 1833 void *old; 1988 int nr_pages; << 1989 1834 1990 swapin_error = make_poisoned_swp_entr 1835 swapin_error = make_poisoned_swp_entry(); 1991 old = xa_cmpxchg_irq(&mapping->i_page 1836 old = xa_cmpxchg_irq(&mapping->i_pages, index, 1992 swp_to_radix_ent 1837 swp_to_radix_entry(swap), 1993 swp_to_radix_ent 1838 swp_to_radix_entry(swapin_error), 0); 1994 if (old != swp_to_radix_entry(swap)) 1839 if (old != swp_to_radix_entry(swap)) 1995 return; 1840 return; 1996 1841 1997 nr_pages = folio_nr_pages(folio); << 1998 folio_wait_writeback(folio); 1842 folio_wait_writeback(folio); 1999 delete_from_swap_cache(folio); 1843 delete_from_swap_cache(folio); 2000 /* 1844 /* 2001 * Don't treat swapin error folio as 1845 * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks 2002 * won't be 0 when inode is released 1846 * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks) 2003 * in shmem_evict_inode(). 1847 * in shmem_evict_inode(). 2004 */ 1848 */ 2005 shmem_recalc_inode(inode, -nr_pages, !! 1849 shmem_recalc_inode(inode, -1, -1); 2006 swap_free_nr(swap, nr_pages); !! 1850 swap_free(swap); 2007 } << 2008 << 2009 static int shmem_split_large_entry(struct ino << 2010 swp_entry_ << 2011 { << 2012 struct address_space *mapping = inode << 2013 XA_STATE_ORDER(xas, &mapping->i_pages << 2014 void *alloced_shadow = NULL; << 2015 int alloced_order = 0, i; << 2016 << 2017 /* Convert user data gfp flags to xar << 2018 gfp &= GFP_RECLAIM_MASK; << 2019 << 2020 for (;;) { << 2021 int order = -1, split_order = << 2022 void *old = NULL; << 2023 << 2024 xas_lock_irq(&xas); << 2025 old = xas_load(&xas); << 2026 if (!xa_is_value(old) || swp_ << 2027 xas_set_err(&xas, -EE << 2028 goto unlock; << 2029 } << 2030 << 2031 order = xas_get_order(&xas); << 2032 << 2033 /* Swap entry may have change << 2034 if (alloced_order && << 2035 (old != alloced_shadow || << 2036 xas_destroy(&xas); << 2037 alloced_order = 0; << 2038 } << 2039 << 2040 /* Try to split large swap en << 2041 if (order > 0) { << 2042 if (!alloced_order) { << 2043 split_order = << 2044 goto unlock; << 2045 } << 2046 xas_split(&xas, old, << 2047 << 2048 /* << 2049 * Re-set the swap en << 2050 * offset of the orig << 2051 */ << 2052 for (i = 0; i < 1 << << 2053 pgoff_t align << 2054 swp_entry_t t << 2055 << 2056 tmp = swp_ent << 2057 __xa_store(&m << 2058 sw << 2059 } << 2060 } << 2061 << 2062 unlock: << 2063 xas_unlock_irq(&xas); << 2064 << 2065 /* split needed, alloc here a << 2066 if (split_order) { << 2067 xas_split_alloc(&xas, << 2068 if (xas_error(&xas)) << 2069 goto error; << 2070 alloced_shadow = old; << 2071 alloced_order = split << 2072 xas_reset(&xas); << 2073 continue; << 2074 } << 2075 << 2076 if (!xas_nomem(&xas, gfp)) << 2077 break; << 2078 } << 2079 << 2080 error: << 2081 if (xas_error(&xas)) << 2082 return xas_error(&xas); << 2083 << 2084 return alloced_order; << 2085 } 1851 } 2086 1852 2087 /* 1853 /* 2088 * Swap in the folio pointed to by *foliop. 1854 * Swap in the folio pointed to by *foliop. 2089 * Caller has to make sure that *foliop conta 1855 * Caller has to make sure that *foliop contains a valid swapped folio. 2090 * Returns 0 and the folio in foliop if succe 1856 * Returns 0 and the folio in foliop if success. On failure, returns the 2091 * error code and NULL in *foliop. 1857 * error code and NULL in *foliop. 2092 */ 1858 */ 2093 static int shmem_swapin_folio(struct inode *i 1859 static int shmem_swapin_folio(struct inode *inode, pgoff_t index, 2094 struct folio **f 1860 struct folio **foliop, enum sgp_type sgp, 2095 gfp_t gfp, struc !! 1861 gfp_t gfp, struct mm_struct *fault_mm, 2096 vm_fault_t *faul 1862 vm_fault_t *fault_type) 2097 { 1863 { 2098 struct address_space *mapping = inode 1864 struct address_space *mapping = inode->i_mapping; 2099 struct mm_struct *fault_mm = vma ? vm << 2100 struct shmem_inode_info *info = SHMEM 1865 struct shmem_inode_info *info = SHMEM_I(inode); 2101 struct swap_info_struct *si; 1866 struct swap_info_struct *si; 2102 struct folio *folio = NULL; 1867 struct folio *folio = NULL; 2103 swp_entry_t swap; 1868 swp_entry_t swap; 2104 int error, nr_pages; !! 1869 int error; 2105 1870 2106 VM_BUG_ON(!*foliop || !xa_is_value(*f 1871 VM_BUG_ON(!*foliop || !xa_is_value(*foliop)); 2107 swap = radix_to_swp_entry(*foliop); 1872 swap = radix_to_swp_entry(*foliop); 2108 *foliop = NULL; 1873 *foliop = NULL; 2109 1874 2110 if (is_poisoned_swp_entry(swap)) 1875 if (is_poisoned_swp_entry(swap)) 2111 return -EIO; 1876 return -EIO; 2112 1877 2113 si = get_swap_device(swap); 1878 si = get_swap_device(swap); 2114 if (!si) { 1879 if (!si) { 2115 if (!shmem_confirm_swap(mappi 1880 if (!shmem_confirm_swap(mapping, index, swap)) 2116 return -EEXIST; 1881 return -EEXIST; 2117 else 1882 else 2118 return -EINVAL; 1883 return -EINVAL; 2119 } 1884 } 2120 1885 2121 /* Look it up and read it in.. */ 1886 /* Look it up and read it in.. */ 2122 folio = swap_cache_get_folio(swap, NU 1887 folio = swap_cache_get_folio(swap, NULL, 0); 2123 if (!folio) { 1888 if (!folio) { 2124 int split_order; << 2125 << 2126 /* Or update major stats only 1889 /* Or update major stats only when swapin succeeds?? */ 2127 if (fault_type) { 1890 if (fault_type) { 2128 *fault_type |= VM_FAU 1891 *fault_type |= VM_FAULT_MAJOR; 2129 count_vm_event(PGMAJF 1892 count_vm_event(PGMAJFAULT); 2130 count_memcg_event_mm( 1893 count_memcg_event_mm(fault_mm, PGMAJFAULT); 2131 } 1894 } 2132 << 2133 /* << 2134 * Now swap device can only s << 2135 * should split the large swa << 2136 * if necessary. << 2137 */ << 2138 split_order = shmem_split_lar << 2139 if (split_order < 0) { << 2140 error = split_order; << 2141 goto failed; << 2142 } << 2143 << 2144 /* << 2145 * If the large swap entry ha << 2146 * necessary to recalculate t << 2147 * the old order alignment. << 2148 */ << 2149 if (split_order > 0) { << 2150 pgoff_t offset = inde << 2151 << 2152 swap = swp_entry(swp_ << 2153 } << 2154 << 2155 /* Here we actually start the 1895 /* Here we actually start the io */ 2156 folio = shmem_swapin_cluster( 1896 folio = shmem_swapin_cluster(swap, gfp, info, index); 2157 if (!folio) { 1897 if (!folio) { 2158 error = -ENOMEM; 1898 error = -ENOMEM; 2159 goto failed; 1899 goto failed; 2160 } 1900 } 2161 } 1901 } 2162 1902 2163 /* We have to do this with folio lock 1903 /* We have to do this with folio locked to prevent races */ 2164 folio_lock(folio); 1904 folio_lock(folio); 2165 if (!folio_test_swapcache(folio) || 1905 if (!folio_test_swapcache(folio) || 2166 folio->swap.val != swap.val || 1906 folio->swap.val != swap.val || 2167 !shmem_confirm_swap(mapping, inde 1907 !shmem_confirm_swap(mapping, index, swap)) { 2168 error = -EEXIST; 1908 error = -EEXIST; 2169 goto unlock; 1909 goto unlock; 2170 } 1910 } 2171 if (!folio_test_uptodate(folio)) { 1911 if (!folio_test_uptodate(folio)) { 2172 error = -EIO; 1912 error = -EIO; 2173 goto failed; 1913 goto failed; 2174 } 1914 } 2175 folio_wait_writeback(folio); 1915 folio_wait_writeback(folio); 2176 nr_pages = folio_nr_pages(folio); << 2177 1916 2178 /* 1917 /* 2179 * Some architectures may have to res 1918 * Some architectures may have to restore extra metadata to the 2180 * folio after reading from swap. 1919 * folio after reading from swap. 2181 */ 1920 */ 2182 arch_swap_restore(folio_swap(swap, fo !! 1921 arch_swap_restore(swap, folio); 2183 1922 2184 if (shmem_should_replace_folio(folio, 1923 if (shmem_should_replace_folio(folio, gfp)) { 2185 error = shmem_replace_folio(& !! 1924 error = shmem_replace_folio(&folio, gfp, info, index); 2186 if (error) 1925 if (error) 2187 goto failed; 1926 goto failed; 2188 } 1927 } 2189 1928 2190 error = shmem_add_to_page_cache(folio !! 1929 error = shmem_add_to_page_cache(folio, mapping, index, 2191 round << 2192 swp_t 1930 swp_to_radix_entry(swap), gfp); 2193 if (error) 1931 if (error) 2194 goto failed; 1932 goto failed; 2195 1933 2196 shmem_recalc_inode(inode, 0, -nr_page !! 1934 shmem_recalc_inode(inode, 0, -1); 2197 1935 2198 if (sgp == SGP_WRITE) 1936 if (sgp == SGP_WRITE) 2199 folio_mark_accessed(folio); 1937 folio_mark_accessed(folio); 2200 1938 2201 delete_from_swap_cache(folio); 1939 delete_from_swap_cache(folio); 2202 folio_mark_dirty(folio); 1940 folio_mark_dirty(folio); 2203 swap_free_nr(swap, nr_pages); !! 1941 swap_free(swap); 2204 put_swap_device(si); 1942 put_swap_device(si); 2205 1943 2206 *foliop = folio; 1944 *foliop = folio; 2207 return 0; 1945 return 0; 2208 failed: 1946 failed: 2209 if (!shmem_confirm_swap(mapping, inde 1947 if (!shmem_confirm_swap(mapping, index, swap)) 2210 error = -EEXIST; 1948 error = -EEXIST; 2211 if (error == -EIO) 1949 if (error == -EIO) 2212 shmem_set_folio_swapin_error( 1950 shmem_set_folio_swapin_error(inode, index, folio, swap); 2213 unlock: 1951 unlock: 2214 if (folio) { 1952 if (folio) { 2215 folio_unlock(folio); 1953 folio_unlock(folio); 2216 folio_put(folio); 1954 folio_put(folio); 2217 } 1955 } 2218 put_swap_device(si); 1956 put_swap_device(si); 2219 1957 2220 return error; 1958 return error; 2221 } 1959 } 2222 1960 2223 /* 1961 /* 2224 * shmem_get_folio_gfp - find page in cache, 1962 * shmem_get_folio_gfp - find page in cache, or get from swap, or allocate 2225 * 1963 * 2226 * If we allocate a new one we do not mark it 1964 * If we allocate a new one we do not mark it dirty. That's up to the 2227 * vm. If we swap it in we mark it dirty sinc 1965 * vm. If we swap it in we mark it dirty since we also free the swap 2228 * entry since a page cannot live in both the 1966 * entry since a page cannot live in both the swap and page cache. 2229 * 1967 * 2230 * vmf and fault_type are only supplied by sh 1968 * vmf and fault_type are only supplied by shmem_fault: otherwise they are NULL. 2231 */ 1969 */ 2232 static int shmem_get_folio_gfp(struct inode * 1970 static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index, 2233 loff_t write_end, struct foli !! 1971 struct folio **foliop, enum sgp_type sgp, gfp_t gfp, 2234 gfp_t gfp, struct vm_fault *v !! 1972 struct vm_fault *vmf, vm_fault_t *fault_type) 2235 { 1973 { 2236 struct vm_area_struct *vma = vmf ? vm 1974 struct vm_area_struct *vma = vmf ? vmf->vma : NULL; 2237 struct mm_struct *fault_mm; 1975 struct mm_struct *fault_mm; 2238 struct folio *folio; 1976 struct folio *folio; 2239 int error; 1977 int error; 2240 bool alloced; 1978 bool alloced; 2241 unsigned long orders = 0; << 2242 1979 2243 if (WARN_ON_ONCE(!shmem_mapping(inode 1980 if (WARN_ON_ONCE(!shmem_mapping(inode->i_mapping))) 2244 return -EINVAL; 1981 return -EINVAL; 2245 1982 2246 if (index > (MAX_LFS_FILESIZE >> PAGE 1983 if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) 2247 return -EFBIG; 1984 return -EFBIG; 2248 repeat: 1985 repeat: 2249 if (sgp <= SGP_CACHE && 1986 if (sgp <= SGP_CACHE && 2250 ((loff_t)index << PAGE_SHIFT) >= 1987 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) 2251 return -EINVAL; 1988 return -EINVAL; 2252 1989 2253 alloced = false; 1990 alloced = false; 2254 fault_mm = vma ? vma->vm_mm : NULL; 1991 fault_mm = vma ? vma->vm_mm : NULL; 2255 1992 2256 folio = filemap_get_entry(inode->i_ma 1993 folio = filemap_get_entry(inode->i_mapping, index); 2257 if (folio && vma && userfaultfd_minor 1994 if (folio && vma && userfaultfd_minor(vma)) { 2258 if (!xa_is_value(folio)) 1995 if (!xa_is_value(folio)) 2259 folio_put(folio); 1996 folio_put(folio); 2260 *fault_type = handle_userfaul 1997 *fault_type = handle_userfault(vmf, VM_UFFD_MINOR); 2261 return 0; 1998 return 0; 2262 } 1999 } 2263 2000 2264 if (xa_is_value(folio)) { 2001 if (xa_is_value(folio)) { 2265 error = shmem_swapin_folio(in 2002 error = shmem_swapin_folio(inode, index, &folio, 2266 sg !! 2003 sgp, gfp, fault_mm, fault_type); 2267 if (error == -EEXIST) 2004 if (error == -EEXIST) 2268 goto repeat; 2005 goto repeat; 2269 2006 2270 *foliop = folio; 2007 *foliop = folio; 2271 return error; 2008 return error; 2272 } 2009 } 2273 2010 2274 if (folio) { 2011 if (folio) { 2275 folio_lock(folio); 2012 folio_lock(folio); 2276 2013 2277 /* Has the folio been truncat 2014 /* Has the folio been truncated or swapped out? */ 2278 if (unlikely(folio->mapping ! 2015 if (unlikely(folio->mapping != inode->i_mapping)) { 2279 folio_unlock(folio); 2016 folio_unlock(folio); 2280 folio_put(folio); 2017 folio_put(folio); 2281 goto repeat; 2018 goto repeat; 2282 } 2019 } 2283 if (sgp == SGP_WRITE) 2020 if (sgp == SGP_WRITE) 2284 folio_mark_accessed(f 2021 folio_mark_accessed(folio); 2285 if (folio_test_uptodate(folio 2022 if (folio_test_uptodate(folio)) 2286 goto out; 2023 goto out; 2287 /* fallocated folio */ 2024 /* fallocated folio */ 2288 if (sgp != SGP_READ) 2025 if (sgp != SGP_READ) 2289 goto clear; 2026 goto clear; 2290 folio_unlock(folio); 2027 folio_unlock(folio); 2291 folio_put(folio); 2028 folio_put(folio); 2292 } 2029 } 2293 2030 2294 /* 2031 /* 2295 * SGP_READ: succeed on hole, with NU 2032 * SGP_READ: succeed on hole, with NULL folio, letting caller zero. 2296 * SGP_NOALLOC: fail on hole, with NU 2033 * SGP_NOALLOC: fail on hole, with NULL folio, letting caller fail. 2297 */ 2034 */ 2298 *foliop = NULL; 2035 *foliop = NULL; 2299 if (sgp == SGP_READ) 2036 if (sgp == SGP_READ) 2300 return 0; 2037 return 0; 2301 if (sgp == SGP_NOALLOC) 2038 if (sgp == SGP_NOALLOC) 2302 return -ENOENT; 2039 return -ENOENT; 2303 2040 2304 /* 2041 /* 2305 * Fast cache lookup and swap lookup 2042 * Fast cache lookup and swap lookup did not find it: allocate. 2306 */ 2043 */ 2307 2044 2308 if (vma && userfaultfd_missing(vma)) 2045 if (vma && userfaultfd_missing(vma)) { 2309 *fault_type = handle_userfaul 2046 *fault_type = handle_userfault(vmf, VM_UFFD_MISSING); 2310 return 0; 2047 return 0; 2311 } 2048 } 2312 2049 2313 /* Find hugepage orders that are allo !! 2050 if (shmem_is_huge(inode, index, false, fault_mm, 2314 orders = shmem_allowable_huge_orders( !! 2051 vma ? vma->vm_flags : 0)) { 2315 if (orders > 0) { << 2316 gfp_t huge_gfp; 2052 gfp_t huge_gfp; 2317 2053 2318 huge_gfp = vma_thp_gfp_mask(v 2054 huge_gfp = vma_thp_gfp_mask(vma); 2319 huge_gfp = limit_gfp_mask(hug 2055 huge_gfp = limit_gfp_mask(huge_gfp, gfp); 2320 folio = shmem_alloc_and_add_f !! 2056 folio = shmem_alloc_and_add_folio(huge_gfp, 2321 inode, index, !! 2057 inode, index, fault_mm, true); 2322 if (!IS_ERR(folio)) { 2058 if (!IS_ERR(folio)) { 2323 if (folio_test_pmd_ma !! 2059 count_vm_event(THP_FILE_ALLOC); 2324 count_vm_even << 2325 count_mthp_stat(folio << 2326 goto alloced; 2060 goto alloced; 2327 } 2061 } 2328 if (PTR_ERR(folio) == -EEXIST 2062 if (PTR_ERR(folio) == -EEXIST) 2329 goto repeat; 2063 goto repeat; 2330 } 2064 } 2331 2065 2332 folio = shmem_alloc_and_add_folio(vmf !! 2066 folio = shmem_alloc_and_add_folio(gfp, inode, index, fault_mm, false); 2333 if (IS_ERR(folio)) { 2067 if (IS_ERR(folio)) { 2334 error = PTR_ERR(folio); 2068 error = PTR_ERR(folio); 2335 if (error == -EEXIST) 2069 if (error == -EEXIST) 2336 goto repeat; 2070 goto repeat; 2337 folio = NULL; 2071 folio = NULL; 2338 goto unlock; 2072 goto unlock; 2339 } 2073 } 2340 2074 2341 alloced: 2075 alloced: 2342 alloced = true; 2076 alloced = true; 2343 if (folio_test_large(folio) && !! 2077 if (folio_test_pmd_mappable(folio) && 2344 DIV_ROUND_UP(i_size_read(inode), 2078 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) < 2345 folio !! 2079 folio_next_index(folio) - 1) { 2346 struct shmem_sb_info *sbinfo 2080 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 2347 struct shmem_inode_info *info 2081 struct shmem_inode_info *info = SHMEM_I(inode); 2348 /* 2082 /* 2349 * Part of the large folio is 2083 * Part of the large folio is beyond i_size: subject 2350 * to shrink under memory pre 2084 * to shrink under memory pressure. 2351 */ 2085 */ 2352 spin_lock(&sbinfo->shrinklist 2086 spin_lock(&sbinfo->shrinklist_lock); 2353 /* 2087 /* 2354 * _careful to defend against 2088 * _careful to defend against unlocked access to 2355 * ->shrink_list in shmem_unu 2089 * ->shrink_list in shmem_unused_huge_shrink() 2356 */ 2090 */ 2357 if (list_empty_careful(&info- 2091 if (list_empty_careful(&info->shrinklist)) { 2358 list_add_tail(&info-> 2092 list_add_tail(&info->shrinklist, 2359 &sbinfo 2093 &sbinfo->shrinklist); 2360 sbinfo->shrinklist_le 2094 sbinfo->shrinklist_len++; 2361 } 2095 } 2362 spin_unlock(&sbinfo->shrinkli 2096 spin_unlock(&sbinfo->shrinklist_lock); 2363 } 2097 } 2364 2098 2365 if (sgp == SGP_WRITE) 2099 if (sgp == SGP_WRITE) 2366 folio_set_referenced(folio); 2100 folio_set_referenced(folio); 2367 /* 2101 /* 2368 * Let SGP_FALLOC use the SGP_WRITE o 2102 * Let SGP_FALLOC use the SGP_WRITE optimization on a new folio. 2369 */ 2103 */ 2370 if (sgp == SGP_FALLOC) 2104 if (sgp == SGP_FALLOC) 2371 sgp = SGP_WRITE; 2105 sgp = SGP_WRITE; 2372 clear: 2106 clear: 2373 /* 2107 /* 2374 * Let SGP_WRITE caller clear ends if 2108 * Let SGP_WRITE caller clear ends if write does not fill folio; 2375 * but SGP_FALLOC on a folio fallocat 2109 * but SGP_FALLOC on a folio fallocated earlier must initialize 2376 * it now, lest undo on failure cance 2110 * it now, lest undo on failure cancel our earlier guarantee. 2377 */ 2111 */ 2378 if (sgp != SGP_WRITE && !folio_test_u 2112 if (sgp != SGP_WRITE && !folio_test_uptodate(folio)) { 2379 long i, n = folio_nr_pages(fo 2113 long i, n = folio_nr_pages(folio); 2380 2114 2381 for (i = 0; i < n; i++) 2115 for (i = 0; i < n; i++) 2382 clear_highpage(folio_ 2116 clear_highpage(folio_page(folio, i)); 2383 flush_dcache_folio(folio); 2117 flush_dcache_folio(folio); 2384 folio_mark_uptodate(folio); 2118 folio_mark_uptodate(folio); 2385 } 2119 } 2386 2120 2387 /* Perhaps the file has been truncate 2121 /* Perhaps the file has been truncated since we checked */ 2388 if (sgp <= SGP_CACHE && 2122 if (sgp <= SGP_CACHE && 2389 ((loff_t)index << PAGE_SHIFT) >= 2123 ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { 2390 error = -EINVAL; 2124 error = -EINVAL; 2391 goto unlock; 2125 goto unlock; 2392 } 2126 } 2393 out: 2127 out: 2394 *foliop = folio; 2128 *foliop = folio; 2395 return 0; 2129 return 0; 2396 2130 2397 /* 2131 /* 2398 * Error recovery. 2132 * Error recovery. 2399 */ 2133 */ 2400 unlock: 2134 unlock: 2401 if (alloced) 2135 if (alloced) 2402 filemap_remove_folio(folio); 2136 filemap_remove_folio(folio); 2403 shmem_recalc_inode(inode, 0, 0); 2137 shmem_recalc_inode(inode, 0, 0); 2404 if (folio) { 2138 if (folio) { 2405 folio_unlock(folio); 2139 folio_unlock(folio); 2406 folio_put(folio); 2140 folio_put(folio); 2407 } 2141 } 2408 return error; 2142 return error; 2409 } 2143 } 2410 2144 2411 /** 2145 /** 2412 * shmem_get_folio - find, and lock a shmem f 2146 * shmem_get_folio - find, and lock a shmem folio. 2413 * @inode: inode to search 2147 * @inode: inode to search 2414 * @index: the page index. 2148 * @index: the page index. 2415 * @write_end: end of a write, could extend << 2416 * @foliop: pointer to the folio if found 2149 * @foliop: pointer to the folio if found 2417 * @sgp: SGP_* flags to control behavi 2150 * @sgp: SGP_* flags to control behavior 2418 * 2151 * 2419 * Looks up the page cache entry at @inode & 2152 * Looks up the page cache entry at @inode & @index. If a folio is 2420 * present, it is returned locked with an inc 2153 * present, it is returned locked with an increased refcount. 2421 * 2154 * 2422 * If the caller modifies data in the folio, 2155 * If the caller modifies data in the folio, it must call folio_mark_dirty() 2423 * before unlocking the folio to ensure that 2156 * before unlocking the folio to ensure that the folio is not reclaimed. 2424 * There is no need to reserve space before c 2157 * There is no need to reserve space before calling folio_mark_dirty(). 2425 * 2158 * 2426 * When no folio is found, the behavior depen 2159 * When no folio is found, the behavior depends on @sgp: 2427 * - for SGP_READ, *@foliop is %NULL and 0 i 2160 * - for SGP_READ, *@foliop is %NULL and 0 is returned 2428 * - for SGP_NOALLOC, *@foliop is %NULL and 2161 * - for SGP_NOALLOC, *@foliop is %NULL and -ENOENT is returned 2429 * - for all other flags a new folio is allo 2162 * - for all other flags a new folio is allocated, inserted into the 2430 * page cache and returned locked in @foli 2163 * page cache and returned locked in @foliop. 2431 * 2164 * 2432 * Context: May sleep. 2165 * Context: May sleep. 2433 * Return: 0 if successful, else a negative e 2166 * Return: 0 if successful, else a negative error code. 2434 */ 2167 */ 2435 int shmem_get_folio(struct inode *inode, pgof !! 2168 int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, 2436 struct folio **foliop, en !! 2169 enum sgp_type sgp) 2437 { 2170 { 2438 return shmem_get_folio_gfp(inode, ind !! 2171 return shmem_get_folio_gfp(inode, index, foliop, sgp, 2439 mapping_gfp_mask(inod 2172 mapping_gfp_mask(inode->i_mapping), NULL, NULL); 2440 } 2173 } 2441 EXPORT_SYMBOL_GPL(shmem_get_folio); 2174 EXPORT_SYMBOL_GPL(shmem_get_folio); 2442 2175 2443 /* 2176 /* 2444 * This is like autoremove_wake_function, but 2177 * This is like autoremove_wake_function, but it removes the wait queue 2445 * entry unconditionally - even if something 2178 * entry unconditionally - even if something else had already woken the 2446 * target. 2179 * target. 2447 */ 2180 */ 2448 static int synchronous_wake_function(wait_que 2181 static int synchronous_wake_function(wait_queue_entry_t *wait, 2449 unsigned int mode, in 2182 unsigned int mode, int sync, void *key) 2450 { 2183 { 2451 int ret = default_wake_function(wait, 2184 int ret = default_wake_function(wait, mode, sync, key); 2452 list_del_init(&wait->entry); 2185 list_del_init(&wait->entry); 2453 return ret; 2186 return ret; 2454 } 2187 } 2455 2188 2456 /* 2189 /* 2457 * Trinity finds that probing a hole which tm 2190 * Trinity finds that probing a hole which tmpfs is punching can 2458 * prevent the hole-punch from ever completin 2191 * prevent the hole-punch from ever completing: which in turn 2459 * locks writers out with its hold on i_rwsem 2192 * locks writers out with its hold on i_rwsem. So refrain from 2460 * faulting pages into the hole while it's be 2193 * faulting pages into the hole while it's being punched. Although 2461 * shmem_undo_range() does remove the additio 2194 * shmem_undo_range() does remove the additions, it may be unable to 2462 * keep up, as each new page needs its own un 2195 * keep up, as each new page needs its own unmap_mapping_range() call, 2463 * and the i_mmap tree grows ever slower to s 2196 * and the i_mmap tree grows ever slower to scan if new vmas are added. 2464 * 2197 * 2465 * It does not matter if we sometimes reach t 2198 * It does not matter if we sometimes reach this check just before the 2466 * hole-punch begins, so that one fault then 2199 * hole-punch begins, so that one fault then races with the punch: 2467 * we just need to make racing faults a rare 2200 * we just need to make racing faults a rare case. 2468 * 2201 * 2469 * The implementation below would be much sim 2202 * The implementation below would be much simpler if we just used a 2470 * standard mutex or completion: but we canno 2203 * standard mutex or completion: but we cannot take i_rwsem in fault, 2471 * and bloating every shmem inode for this un 2204 * and bloating every shmem inode for this unlikely case would be sad. 2472 */ 2205 */ 2473 static vm_fault_t shmem_falloc_wait(struct vm 2206 static vm_fault_t shmem_falloc_wait(struct vm_fault *vmf, struct inode *inode) 2474 { 2207 { 2475 struct shmem_falloc *shmem_falloc; 2208 struct shmem_falloc *shmem_falloc; 2476 struct file *fpin = NULL; 2209 struct file *fpin = NULL; 2477 vm_fault_t ret = 0; 2210 vm_fault_t ret = 0; 2478 2211 2479 spin_lock(&inode->i_lock); 2212 spin_lock(&inode->i_lock); 2480 shmem_falloc = inode->i_private; 2213 shmem_falloc = inode->i_private; 2481 if (shmem_falloc && 2214 if (shmem_falloc && 2482 shmem_falloc->waitq && 2215 shmem_falloc->waitq && 2483 vmf->pgoff >= shmem_falloc->start 2216 vmf->pgoff >= shmem_falloc->start && 2484 vmf->pgoff < shmem_falloc->next) 2217 vmf->pgoff < shmem_falloc->next) { 2485 wait_queue_head_t *shmem_fall 2218 wait_queue_head_t *shmem_falloc_waitq; 2486 DEFINE_WAIT_FUNC(shmem_fault_ 2219 DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function); 2487 2220 2488 ret = VM_FAULT_NOPAGE; 2221 ret = VM_FAULT_NOPAGE; 2489 fpin = maybe_unlock_mmap_for_ 2222 fpin = maybe_unlock_mmap_for_io(vmf, NULL); 2490 shmem_falloc_waitq = shmem_fa 2223 shmem_falloc_waitq = shmem_falloc->waitq; 2491 prepare_to_wait(shmem_falloc_ 2224 prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, 2492 TASK_UNINTERR 2225 TASK_UNINTERRUPTIBLE); 2493 spin_unlock(&inode->i_lock); 2226 spin_unlock(&inode->i_lock); 2494 schedule(); 2227 schedule(); 2495 2228 2496 /* 2229 /* 2497 * shmem_falloc_waitq points 2230 * shmem_falloc_waitq points into the shmem_fallocate() 2498 * stack of the hole-punching 2231 * stack of the hole-punching task: shmem_falloc_waitq 2499 * is usually invalid by the 2232 * is usually invalid by the time we reach here, but 2500 * finish_wait() does not der 2233 * finish_wait() does not dereference it in that case; 2501 * though i_lock needed lest 2234 * though i_lock needed lest racing with wake_up_all(). 2502 */ 2235 */ 2503 spin_lock(&inode->i_lock); 2236 spin_lock(&inode->i_lock); 2504 finish_wait(shmem_falloc_wait 2237 finish_wait(shmem_falloc_waitq, &shmem_fault_wait); 2505 } 2238 } 2506 spin_unlock(&inode->i_lock); 2239 spin_unlock(&inode->i_lock); 2507 if (fpin) { 2240 if (fpin) { 2508 fput(fpin); 2241 fput(fpin); 2509 ret = VM_FAULT_RETRY; 2242 ret = VM_FAULT_RETRY; 2510 } 2243 } 2511 return ret; 2244 return ret; 2512 } 2245 } 2513 2246 2514 static vm_fault_t shmem_fault(struct vm_fault 2247 static vm_fault_t shmem_fault(struct vm_fault *vmf) 2515 { 2248 { 2516 struct inode *inode = file_inode(vmf- 2249 struct inode *inode = file_inode(vmf->vma->vm_file); 2517 gfp_t gfp = mapping_gfp_mask(inode->i 2250 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); 2518 struct folio *folio = NULL; 2251 struct folio *folio = NULL; 2519 vm_fault_t ret = 0; 2252 vm_fault_t ret = 0; 2520 int err; 2253 int err; 2521 2254 2522 /* 2255 /* 2523 * Trinity finds that probing a hole 2256 * Trinity finds that probing a hole which tmpfs is punching can 2524 * prevent the hole-punch from ever c 2257 * prevent the hole-punch from ever completing: noted in i_private. 2525 */ 2258 */ 2526 if (unlikely(inode->i_private)) { 2259 if (unlikely(inode->i_private)) { 2527 ret = shmem_falloc_wait(vmf, 2260 ret = shmem_falloc_wait(vmf, inode); 2528 if (ret) 2261 if (ret) 2529 return ret; 2262 return ret; 2530 } 2263 } 2531 2264 2532 WARN_ON_ONCE(vmf->page != NULL); 2265 WARN_ON_ONCE(vmf->page != NULL); 2533 err = shmem_get_folio_gfp(inode, vmf- !! 2266 err = shmem_get_folio_gfp(inode, vmf->pgoff, &folio, SGP_CACHE, 2534 gfp, vmf, & 2267 gfp, vmf, &ret); 2535 if (err) 2268 if (err) 2536 return vmf_error(err); 2269 return vmf_error(err); 2537 if (folio) { 2270 if (folio) { 2538 vmf->page = folio_file_page(f 2271 vmf->page = folio_file_page(folio, vmf->pgoff); 2539 ret |= VM_FAULT_LOCKED; 2272 ret |= VM_FAULT_LOCKED; 2540 } 2273 } 2541 return ret; 2274 return ret; 2542 } 2275 } 2543 2276 2544 unsigned long shmem_get_unmapped_area(struct 2277 unsigned long shmem_get_unmapped_area(struct file *file, 2545 unsigne 2278 unsigned long uaddr, unsigned long len, 2546 unsigne 2279 unsigned long pgoff, unsigned long flags) 2547 { 2280 { >> 2281 unsigned long (*get_area)(struct file *, >> 2282 unsigned long, unsigned long, unsigned long, unsigned long); 2548 unsigned long addr; 2283 unsigned long addr; 2549 unsigned long offset; 2284 unsigned long offset; 2550 unsigned long inflated_len; 2285 unsigned long inflated_len; 2551 unsigned long inflated_addr; 2286 unsigned long inflated_addr; 2552 unsigned long inflated_offset; 2287 unsigned long inflated_offset; 2553 unsigned long hpage_size; << 2554 2288 2555 if (len > TASK_SIZE) 2289 if (len > TASK_SIZE) 2556 return -ENOMEM; 2290 return -ENOMEM; 2557 2291 2558 addr = mm_get_unmapped_area(current-> !! 2292 get_area = current->mm->get_unmapped_area; 2559 flags); !! 2293 addr = get_area(file, uaddr, len, pgoff, flags); 2560 2294 2561 if (!IS_ENABLED(CONFIG_TRANSPARENT_HU 2295 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) 2562 return addr; 2296 return addr; 2563 if (IS_ERR_VALUE(addr)) 2297 if (IS_ERR_VALUE(addr)) 2564 return addr; 2298 return addr; 2565 if (addr & ~PAGE_MASK) 2299 if (addr & ~PAGE_MASK) 2566 return addr; 2300 return addr; 2567 if (addr > TASK_SIZE - len) 2301 if (addr > TASK_SIZE - len) 2568 return addr; 2302 return addr; 2569 2303 2570 if (shmem_huge == SHMEM_HUGE_DENY) 2304 if (shmem_huge == SHMEM_HUGE_DENY) 2571 return addr; 2305 return addr; >> 2306 if (len < HPAGE_PMD_SIZE) >> 2307 return addr; 2572 if (flags & MAP_FIXED) 2308 if (flags & MAP_FIXED) 2573 return addr; 2309 return addr; 2574 /* 2310 /* 2575 * Our priority is to support MAP_SHA 2311 * Our priority is to support MAP_SHARED mapped hugely; 2576 * and support MAP_PRIVATE mapped hug 2312 * and support MAP_PRIVATE mapped hugely too, until it is COWed. 2577 * But if caller specified an address 2313 * But if caller specified an address hint and we allocated area there 2578 * successfully, respect that as befo 2314 * successfully, respect that as before. 2579 */ 2315 */ 2580 if (uaddr == addr) 2316 if (uaddr == addr) 2581 return addr; 2317 return addr; 2582 2318 2583 hpage_size = HPAGE_PMD_SIZE; << 2584 if (shmem_huge != SHMEM_HUGE_FORCE) { 2319 if (shmem_huge != SHMEM_HUGE_FORCE) { 2585 struct super_block *sb; 2320 struct super_block *sb; 2586 unsigned long __maybe_unused << 2587 int order = 0; << 2588 2321 2589 if (file) { 2322 if (file) { 2590 VM_BUG_ON(file->f_op 2323 VM_BUG_ON(file->f_op != &shmem_file_operations); 2591 sb = file_inode(file) 2324 sb = file_inode(file)->i_sb; 2592 } else { 2325 } else { 2593 /* 2326 /* 2594 * Called directly fr 2327 * Called directly from mm/mmap.c, or drivers/char/mem.c 2595 * for "/dev/zero", t 2328 * for "/dev/zero", to create a shared anonymous object. 2596 */ 2329 */ 2597 if (IS_ERR(shm_mnt)) 2330 if (IS_ERR(shm_mnt)) 2598 return addr; 2331 return addr; 2599 sb = shm_mnt->mnt_sb; 2332 sb = shm_mnt->mnt_sb; 2600 << 2601 /* << 2602 * Find the highest m << 2603 * provide a suitable << 2604 */ << 2605 #ifdef CONFIG_TRANSPARENT_HUGEPAGE << 2606 hpage_orders = READ_O << 2607 hpage_orders |= READ_ << 2608 hpage_orders |= READ_ << 2609 if (SHMEM_SB(sb)->hug << 2610 hpage_orders << 2611 << 2612 if (hpage_orders > 0) << 2613 order = highe << 2614 hpage_size = << 2615 } << 2616 #endif << 2617 } 2333 } 2618 if (SHMEM_SB(sb)->huge == SHM !! 2334 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 2619 return addr; 2335 return addr; 2620 } 2336 } 2621 2337 2622 if (len < hpage_size) !! 2338 offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1); >> 2339 if (offset && offset + len < 2 * HPAGE_PMD_SIZE) 2623 return addr; 2340 return addr; 2624 !! 2341 if ((addr & (HPAGE_PMD_SIZE-1)) == offset) 2625 offset = (pgoff << PAGE_SHIFT) & (hpa << 2626 if (offset && offset + len < 2 * hpag << 2627 return addr; << 2628 if ((addr & (hpage_size - 1)) == offs << 2629 return addr; 2342 return addr; 2630 2343 2631 inflated_len = len + hpage_size - PAG !! 2344 inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE; 2632 if (inflated_len > TASK_SIZE) 2345 if (inflated_len > TASK_SIZE) 2633 return addr; 2346 return addr; 2634 if (inflated_len < len) 2347 if (inflated_len < len) 2635 return addr; 2348 return addr; 2636 2349 2637 inflated_addr = mm_get_unmapped_area( !! 2350 inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags); 2638 << 2639 if (IS_ERR_VALUE(inflated_addr)) 2351 if (IS_ERR_VALUE(inflated_addr)) 2640 return addr; 2352 return addr; 2641 if (inflated_addr & ~PAGE_MASK) 2353 if (inflated_addr & ~PAGE_MASK) 2642 return addr; 2354 return addr; 2643 2355 2644 inflated_offset = inflated_addr & (hp !! 2356 inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1); 2645 inflated_addr += offset - inflated_of 2357 inflated_addr += offset - inflated_offset; 2646 if (inflated_offset > offset) 2358 if (inflated_offset > offset) 2647 inflated_addr += hpage_size; !! 2359 inflated_addr += HPAGE_PMD_SIZE; 2648 2360 2649 if (inflated_addr > TASK_SIZE - len) 2361 if (inflated_addr > TASK_SIZE - len) 2650 return addr; 2362 return addr; 2651 return inflated_addr; 2363 return inflated_addr; 2652 } 2364 } 2653 2365 2654 #ifdef CONFIG_NUMA 2366 #ifdef CONFIG_NUMA 2655 static int shmem_set_policy(struct vm_area_st 2367 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) 2656 { 2368 { 2657 struct inode *inode = file_inode(vma- 2369 struct inode *inode = file_inode(vma->vm_file); 2658 return mpol_set_shared_policy(&SHMEM_ 2370 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol); 2659 } 2371 } 2660 2372 2661 static struct mempolicy *shmem_get_policy(str 2373 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, 2662 uns 2374 unsigned long addr, pgoff_t *ilx) 2663 { 2375 { 2664 struct inode *inode = file_inode(vma- 2376 struct inode *inode = file_inode(vma->vm_file); 2665 pgoff_t index; 2377 pgoff_t index; 2666 2378 2667 /* 2379 /* 2668 * Bias interleave by inode number to 2380 * Bias interleave by inode number to distribute better across nodes; 2669 * but this interface is independent 2381 * but this interface is independent of which page order is used, so 2670 * supplies only that bias, letting c 2382 * supplies only that bias, letting caller apply the offset (adjusted 2671 * by page order, as in shmem_get_pgo 2383 * by page order, as in shmem_get_pgoff_policy() and get_vma_policy()). 2672 */ 2384 */ 2673 *ilx = inode->i_ino; 2385 *ilx = inode->i_ino; 2674 index = ((addr - vma->vm_start) >> PA 2386 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 2675 return mpol_shared_policy_lookup(&SHM 2387 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index); 2676 } 2388 } 2677 2389 2678 static struct mempolicy *shmem_get_pgoff_poli 2390 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info, 2679 pgoff_t index, unsign 2391 pgoff_t index, unsigned int order, pgoff_t *ilx) 2680 { 2392 { 2681 struct mempolicy *mpol; 2393 struct mempolicy *mpol; 2682 2394 2683 /* Bias interleave by inode number to 2395 /* Bias interleave by inode number to distribute better across nodes */ 2684 *ilx = info->vfs_inode.i_ino + (index 2396 *ilx = info->vfs_inode.i_ino + (index >> order); 2685 2397 2686 mpol = mpol_shared_policy_lookup(&inf 2398 mpol = mpol_shared_policy_lookup(&info->policy, index); 2687 return mpol ? mpol : get_task_policy( 2399 return mpol ? mpol : get_task_policy(current); 2688 } 2400 } 2689 #else 2401 #else 2690 static struct mempolicy *shmem_get_pgoff_poli 2402 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info, 2691 pgoff_t index, unsign 2403 pgoff_t index, unsigned int order, pgoff_t *ilx) 2692 { 2404 { 2693 *ilx = 0; 2405 *ilx = 0; 2694 return NULL; 2406 return NULL; 2695 } 2407 } 2696 #endif /* CONFIG_NUMA */ 2408 #endif /* CONFIG_NUMA */ 2697 2409 2698 int shmem_lock(struct file *file, int lock, s 2410 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 2699 { 2411 { 2700 struct inode *inode = file_inode(file 2412 struct inode *inode = file_inode(file); 2701 struct shmem_inode_info *info = SHMEM 2413 struct shmem_inode_info *info = SHMEM_I(inode); 2702 int retval = -ENOMEM; 2414 int retval = -ENOMEM; 2703 2415 2704 /* 2416 /* 2705 * What serializes the accesses to in 2417 * What serializes the accesses to info->flags? 2706 * ipc_lock_object() when called from 2418 * ipc_lock_object() when called from shmctl_do_lock(), 2707 * no serialization needed when calle 2419 * no serialization needed when called from shm_destroy(). 2708 */ 2420 */ 2709 if (lock && !(info->flags & VM_LOCKED 2421 if (lock && !(info->flags & VM_LOCKED)) { 2710 if (!user_shm_lock(inode->i_s 2422 if (!user_shm_lock(inode->i_size, ucounts)) 2711 goto out_nomem; 2423 goto out_nomem; 2712 info->flags |= VM_LOCKED; 2424 info->flags |= VM_LOCKED; 2713 mapping_set_unevictable(file- 2425 mapping_set_unevictable(file->f_mapping); 2714 } 2426 } 2715 if (!lock && (info->flags & VM_LOCKED 2427 if (!lock && (info->flags & VM_LOCKED) && ucounts) { 2716 user_shm_unlock(inode->i_size 2428 user_shm_unlock(inode->i_size, ucounts); 2717 info->flags &= ~VM_LOCKED; 2429 info->flags &= ~VM_LOCKED; 2718 mapping_clear_unevictable(fil 2430 mapping_clear_unevictable(file->f_mapping); 2719 } 2431 } 2720 retval = 0; 2432 retval = 0; 2721 2433 2722 out_nomem: 2434 out_nomem: 2723 return retval; 2435 return retval; 2724 } 2436 } 2725 2437 2726 static int shmem_mmap(struct file *file, stru 2438 static int shmem_mmap(struct file *file, struct vm_area_struct *vma) 2727 { 2439 { 2728 struct inode *inode = file_inode(file 2440 struct inode *inode = file_inode(file); 2729 struct shmem_inode_info *info = SHMEM 2441 struct shmem_inode_info *info = SHMEM_I(inode); 2730 int ret; 2442 int ret; 2731 2443 2732 ret = seal_check_write(info->seals, v 2444 ret = seal_check_write(info->seals, vma); 2733 if (ret) 2445 if (ret) 2734 return ret; 2446 return ret; 2735 2447 >> 2448 /* arm64 - allow memory tagging on RAM-based files */ >> 2449 vm_flags_set(vma, VM_MTE_ALLOWED); >> 2450 2736 file_accessed(file); 2451 file_accessed(file); 2737 /* This is anonymous shared memory if 2452 /* This is anonymous shared memory if it is unlinked at the time of mmap */ 2738 if (inode->i_nlink) 2453 if (inode->i_nlink) 2739 vma->vm_ops = &shmem_vm_ops; 2454 vma->vm_ops = &shmem_vm_ops; 2740 else 2455 else 2741 vma->vm_ops = &shmem_anon_vm_ 2456 vma->vm_ops = &shmem_anon_vm_ops; 2742 return 0; 2457 return 0; 2743 } 2458 } 2744 2459 2745 static int shmem_file_open(struct inode *inod 2460 static int shmem_file_open(struct inode *inode, struct file *file) 2746 { 2461 { 2747 file->f_mode |= FMODE_CAN_ODIRECT; 2462 file->f_mode |= FMODE_CAN_ODIRECT; 2748 return generic_file_open(inode, file) 2463 return generic_file_open(inode, file); 2749 } 2464 } 2750 2465 2751 #ifdef CONFIG_TMPFS_XATTR 2466 #ifdef CONFIG_TMPFS_XATTR 2752 static int shmem_initxattrs(struct inode *, c 2467 static int shmem_initxattrs(struct inode *, const struct xattr *, void *); 2753 2468 2754 /* 2469 /* 2755 * chattr's fsflags are unrelated to extended 2470 * chattr's fsflags are unrelated to extended attributes, 2756 * but tmpfs has chosen to enable them under 2471 * but tmpfs has chosen to enable them under the same config option. 2757 */ 2472 */ 2758 static void shmem_set_inode_flags(struct inod 2473 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) 2759 { 2474 { 2760 unsigned int i_flags = 0; 2475 unsigned int i_flags = 0; 2761 2476 2762 if (fsflags & FS_NOATIME_FL) 2477 if (fsflags & FS_NOATIME_FL) 2763 i_flags |= S_NOATIME; 2478 i_flags |= S_NOATIME; 2764 if (fsflags & FS_APPEND_FL) 2479 if (fsflags & FS_APPEND_FL) 2765 i_flags |= S_APPEND; 2480 i_flags |= S_APPEND; 2766 if (fsflags & FS_IMMUTABLE_FL) 2481 if (fsflags & FS_IMMUTABLE_FL) 2767 i_flags |= S_IMMUTABLE; 2482 i_flags |= S_IMMUTABLE; 2768 /* 2483 /* 2769 * But FS_NODUMP_FL does not require 2484 * But FS_NODUMP_FL does not require any action in i_flags. 2770 */ 2485 */ 2771 inode_set_flags(inode, i_flags, S_NOA 2486 inode_set_flags(inode, i_flags, S_NOATIME | S_APPEND | S_IMMUTABLE); 2772 } 2487 } 2773 #else 2488 #else 2774 static void shmem_set_inode_flags(struct inod 2489 static void shmem_set_inode_flags(struct inode *inode, unsigned int fsflags) 2775 { 2490 { 2776 } 2491 } 2777 #define shmem_initxattrs NULL 2492 #define shmem_initxattrs NULL 2778 #endif 2493 #endif 2779 2494 2780 static struct offset_ctx *shmem_get_offset_ct 2495 static struct offset_ctx *shmem_get_offset_ctx(struct inode *inode) 2781 { 2496 { 2782 return &SHMEM_I(inode)->dir_offsets; 2497 return &SHMEM_I(inode)->dir_offsets; 2783 } 2498 } 2784 2499 2785 static struct inode *__shmem_get_inode(struct 2500 static struct inode *__shmem_get_inode(struct mnt_idmap *idmap, 2786 2501 struct super_block *sb, 2787 2502 struct inode *dir, umode_t mode, 2788 2503 dev_t dev, unsigned long flags) 2789 { 2504 { 2790 struct inode *inode; 2505 struct inode *inode; 2791 struct shmem_inode_info *info; 2506 struct shmem_inode_info *info; 2792 struct shmem_sb_info *sbinfo = SHMEM_ 2507 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 2793 ino_t ino; 2508 ino_t ino; 2794 int err; 2509 int err; 2795 2510 2796 err = shmem_reserve_inode(sb, &ino); 2511 err = shmem_reserve_inode(sb, &ino); 2797 if (err) 2512 if (err) 2798 return ERR_PTR(err); 2513 return ERR_PTR(err); 2799 2514 2800 inode = new_inode(sb); 2515 inode = new_inode(sb); 2801 if (!inode) { 2516 if (!inode) { 2802 shmem_free_inode(sb, 0); 2517 shmem_free_inode(sb, 0); 2803 return ERR_PTR(-ENOSPC); 2518 return ERR_PTR(-ENOSPC); 2804 } 2519 } 2805 2520 2806 inode->i_ino = ino; 2521 inode->i_ino = ino; 2807 inode_init_owner(idmap, inode, dir, m 2522 inode_init_owner(idmap, inode, dir, mode); 2808 inode->i_blocks = 0; 2523 inode->i_blocks = 0; 2809 simple_inode_init_ts(inode); 2524 simple_inode_init_ts(inode); 2810 inode->i_generation = get_random_u32( 2525 inode->i_generation = get_random_u32(); 2811 info = SHMEM_I(inode); 2526 info = SHMEM_I(inode); 2812 memset(info, 0, (char *)inode - (char 2527 memset(info, 0, (char *)inode - (char *)info); 2813 spin_lock_init(&info->lock); 2528 spin_lock_init(&info->lock); 2814 atomic_set(&info->stop_eviction, 0); 2529 atomic_set(&info->stop_eviction, 0); 2815 info->seals = F_SEAL_SEAL; 2530 info->seals = F_SEAL_SEAL; 2816 info->flags = flags & VM_NORESERVE; 2531 info->flags = flags & VM_NORESERVE; 2817 info->i_crtime = inode_get_mtime(inod 2532 info->i_crtime = inode_get_mtime(inode); 2818 info->fsflags = (dir == NULL) ? 0 : 2533 info->fsflags = (dir == NULL) ? 0 : 2819 SHMEM_I(dir)->fsflags & SHMEM 2534 SHMEM_I(dir)->fsflags & SHMEM_FL_INHERITED; 2820 if (info->fsflags) 2535 if (info->fsflags) 2821 shmem_set_inode_flags(inode, 2536 shmem_set_inode_flags(inode, info->fsflags); 2822 INIT_LIST_HEAD(&info->shrinklist); 2537 INIT_LIST_HEAD(&info->shrinklist); 2823 INIT_LIST_HEAD(&info->swaplist); 2538 INIT_LIST_HEAD(&info->swaplist); 2824 simple_xattrs_init(&info->xattrs); 2539 simple_xattrs_init(&info->xattrs); 2825 cache_no_acl(inode); 2540 cache_no_acl(inode); 2826 if (sbinfo->noswap) 2541 if (sbinfo->noswap) 2827 mapping_set_unevictable(inode 2542 mapping_set_unevictable(inode->i_mapping); 2828 mapping_set_large_folios(inode->i_map 2543 mapping_set_large_folios(inode->i_mapping); 2829 2544 2830 switch (mode & S_IFMT) { 2545 switch (mode & S_IFMT) { 2831 default: 2546 default: 2832 inode->i_op = &shmem_special_ 2547 inode->i_op = &shmem_special_inode_operations; 2833 init_special_inode(inode, mod 2548 init_special_inode(inode, mode, dev); 2834 break; 2549 break; 2835 case S_IFREG: 2550 case S_IFREG: 2836 inode->i_mapping->a_ops = &sh 2551 inode->i_mapping->a_ops = &shmem_aops; 2837 inode->i_op = &shmem_inode_op 2552 inode->i_op = &shmem_inode_operations; 2838 inode->i_fop = &shmem_file_op 2553 inode->i_fop = &shmem_file_operations; 2839 mpol_shared_policy_init(&info 2554 mpol_shared_policy_init(&info->policy, 2840 shme 2555 shmem_get_sbmpol(sbinfo)); 2841 break; 2556 break; 2842 case S_IFDIR: 2557 case S_IFDIR: 2843 inc_nlink(inode); 2558 inc_nlink(inode); 2844 /* Some things misbehave if s 2559 /* Some things misbehave if size == 0 on a directory */ 2845 inode->i_size = 2 * BOGO_DIRE 2560 inode->i_size = 2 * BOGO_DIRENT_SIZE; 2846 inode->i_op = &shmem_dir_inod 2561 inode->i_op = &shmem_dir_inode_operations; 2847 inode->i_fop = &simple_offset 2562 inode->i_fop = &simple_offset_dir_operations; 2848 simple_offset_init(shmem_get_ 2563 simple_offset_init(shmem_get_offset_ctx(inode)); 2849 break; 2564 break; 2850 case S_IFLNK: 2565 case S_IFLNK: 2851 /* 2566 /* 2852 * Must not load anything in 2567 * Must not load anything in the rbtree, 2853 * mpol_free_shared_policy wi 2568 * mpol_free_shared_policy will not be called. 2854 */ 2569 */ 2855 mpol_shared_policy_init(&info 2570 mpol_shared_policy_init(&info->policy, NULL); 2856 break; 2571 break; 2857 } 2572 } 2858 2573 2859 lockdep_annotate_inode_mutex_key(inod 2574 lockdep_annotate_inode_mutex_key(inode); 2860 return inode; 2575 return inode; 2861 } 2576 } 2862 2577 2863 #ifdef CONFIG_TMPFS_QUOTA 2578 #ifdef CONFIG_TMPFS_QUOTA 2864 static struct inode *shmem_get_inode(struct m 2579 static struct inode *shmem_get_inode(struct mnt_idmap *idmap, 2865 struct s 2580 struct super_block *sb, struct inode *dir, 2866 umode_t 2581 umode_t mode, dev_t dev, unsigned long flags) 2867 { 2582 { 2868 int err; 2583 int err; 2869 struct inode *inode; 2584 struct inode *inode; 2870 2585 2871 inode = __shmem_get_inode(idmap, sb, 2586 inode = __shmem_get_inode(idmap, sb, dir, mode, dev, flags); 2872 if (IS_ERR(inode)) 2587 if (IS_ERR(inode)) 2873 return inode; 2588 return inode; 2874 2589 2875 err = dquot_initialize(inode); 2590 err = dquot_initialize(inode); 2876 if (err) 2591 if (err) 2877 goto errout; 2592 goto errout; 2878 2593 2879 err = dquot_alloc_inode(inode); 2594 err = dquot_alloc_inode(inode); 2880 if (err) { 2595 if (err) { 2881 dquot_drop(inode); 2596 dquot_drop(inode); 2882 goto errout; 2597 goto errout; 2883 } 2598 } 2884 return inode; 2599 return inode; 2885 2600 2886 errout: 2601 errout: 2887 inode->i_flags |= S_NOQUOTA; 2602 inode->i_flags |= S_NOQUOTA; 2888 iput(inode); 2603 iput(inode); 2889 return ERR_PTR(err); 2604 return ERR_PTR(err); 2890 } 2605 } 2891 #else 2606 #else 2892 static inline struct inode *shmem_get_inode(s 2607 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, 2893 struct s 2608 struct super_block *sb, struct inode *dir, 2894 umode_t 2609 umode_t mode, dev_t dev, unsigned long flags) 2895 { 2610 { 2896 return __shmem_get_inode(idmap, sb, d 2611 return __shmem_get_inode(idmap, sb, dir, mode, dev, flags); 2897 } 2612 } 2898 #endif /* CONFIG_TMPFS_QUOTA */ 2613 #endif /* CONFIG_TMPFS_QUOTA */ 2899 2614 2900 #ifdef CONFIG_USERFAULTFD 2615 #ifdef CONFIG_USERFAULTFD 2901 int shmem_mfill_atomic_pte(pmd_t *dst_pmd, 2616 int shmem_mfill_atomic_pte(pmd_t *dst_pmd, 2902 struct vm_area_str 2617 struct vm_area_struct *dst_vma, 2903 unsigned long dst_ 2618 unsigned long dst_addr, 2904 unsigned long src_ 2619 unsigned long src_addr, 2905 uffd_flags_t flags 2620 uffd_flags_t flags, 2906 struct folio **fol 2621 struct folio **foliop) 2907 { 2622 { 2908 struct inode *inode = file_inode(dst_ 2623 struct inode *inode = file_inode(dst_vma->vm_file); 2909 struct shmem_inode_info *info = SHMEM 2624 struct shmem_inode_info *info = SHMEM_I(inode); 2910 struct address_space *mapping = inode 2625 struct address_space *mapping = inode->i_mapping; 2911 gfp_t gfp = mapping_gfp_mask(mapping) 2626 gfp_t gfp = mapping_gfp_mask(mapping); 2912 pgoff_t pgoff = linear_page_index(dst 2627 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr); 2913 void *page_kaddr; 2628 void *page_kaddr; 2914 struct folio *folio; 2629 struct folio *folio; 2915 int ret; 2630 int ret; 2916 pgoff_t max_off; 2631 pgoff_t max_off; 2917 2632 2918 if (shmem_inode_acct_blocks(inode, 1) 2633 if (shmem_inode_acct_blocks(inode, 1)) { 2919 /* 2634 /* 2920 * We may have got a page, re 2635 * We may have got a page, returned -ENOENT triggering a retry, 2921 * and now we find ourselves 2636 * and now we find ourselves with -ENOMEM. Release the page, to 2922 * avoid a BUG_ON in our call 2637 * avoid a BUG_ON in our caller. 2923 */ 2638 */ 2924 if (unlikely(*foliop)) { 2639 if (unlikely(*foliop)) { 2925 folio_put(*foliop); 2640 folio_put(*foliop); 2926 *foliop = NULL; 2641 *foliop = NULL; 2927 } 2642 } 2928 return -ENOMEM; 2643 return -ENOMEM; 2929 } 2644 } 2930 2645 2931 if (!*foliop) { 2646 if (!*foliop) { 2932 ret = -ENOMEM; 2647 ret = -ENOMEM; 2933 folio = shmem_alloc_folio(gfp !! 2648 folio = shmem_alloc_folio(gfp, info, pgoff); 2934 if (!folio) 2649 if (!folio) 2935 goto out_unacct_block 2650 goto out_unacct_blocks; 2936 2651 2937 if (uffd_flags_mode_is(flags, 2652 if (uffd_flags_mode_is(flags, MFILL_ATOMIC_COPY)) { 2938 page_kaddr = kmap_loc 2653 page_kaddr = kmap_local_folio(folio, 0); 2939 /* 2654 /* 2940 * The read mmap_lock 2655 * The read mmap_lock is held here. Despite the 2941 * mmap_lock being re 2656 * mmap_lock being read recursive a deadlock is still 2942 * possible if a writ 2657 * possible if a writer has taken a lock. For example: 2943 * 2658 * 2944 * process A thread 1 2659 * process A thread 1 takes read lock on own mmap_lock 2945 * process A thread 2 2660 * process A thread 2 calls mmap, blocks taking write lock 2946 * process B thread 1 2661 * process B thread 1 takes page fault, read lock on own mmap lock 2947 * process B thread 2 2662 * process B thread 2 calls mmap, blocks taking write lock 2948 * process A thread 1 2663 * process A thread 1 blocks taking read lock on process B 2949 * process B thread 1 2664 * process B thread 1 blocks taking read lock on process A 2950 * 2665 * 2951 * Disable page fault 2666 * Disable page faults to prevent potential deadlock 2952 * and retry the copy 2667 * and retry the copy outside the mmap_lock. 2953 */ 2668 */ 2954 pagefault_disable(); 2669 pagefault_disable(); 2955 ret = copy_from_user( 2670 ret = copy_from_user(page_kaddr, 2956 2671 (const void __user *)src_addr, 2957 2672 PAGE_SIZE); 2958 pagefault_enable(); 2673 pagefault_enable(); 2959 kunmap_local(page_kad 2674 kunmap_local(page_kaddr); 2960 2675 2961 /* fallback to copy_f 2676 /* fallback to copy_from_user outside mmap_lock */ 2962 if (unlikely(ret)) { 2677 if (unlikely(ret)) { 2963 *foliop = fol 2678 *foliop = folio; 2964 ret = -ENOENT 2679 ret = -ENOENT; 2965 /* don't free 2680 /* don't free the page */ 2966 goto out_unac 2681 goto out_unacct_blocks; 2967 } 2682 } 2968 2683 2969 flush_dcache_folio(fo 2684 flush_dcache_folio(folio); 2970 } else { /* ZE 2685 } else { /* ZEROPAGE */ 2971 clear_user_highpage(& 2686 clear_user_highpage(&folio->page, dst_addr); 2972 } 2687 } 2973 } else { 2688 } else { 2974 folio = *foliop; 2689 folio = *foliop; 2975 VM_BUG_ON_FOLIO(folio_test_la 2690 VM_BUG_ON_FOLIO(folio_test_large(folio), folio); 2976 *foliop = NULL; 2691 *foliop = NULL; 2977 } 2692 } 2978 2693 2979 VM_BUG_ON(folio_test_locked(folio)); 2694 VM_BUG_ON(folio_test_locked(folio)); 2980 VM_BUG_ON(folio_test_swapbacked(folio 2695 VM_BUG_ON(folio_test_swapbacked(folio)); 2981 __folio_set_locked(folio); 2696 __folio_set_locked(folio); 2982 __folio_set_swapbacked(folio); 2697 __folio_set_swapbacked(folio); 2983 __folio_mark_uptodate(folio); 2698 __folio_mark_uptodate(folio); 2984 2699 2985 ret = -EFAULT; 2700 ret = -EFAULT; 2986 max_off = DIV_ROUND_UP(i_size_read(in 2701 max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); 2987 if (unlikely(pgoff >= max_off)) 2702 if (unlikely(pgoff >= max_off)) 2988 goto out_release; 2703 goto out_release; 2989 2704 2990 ret = mem_cgroup_charge(folio, dst_vm 2705 ret = mem_cgroup_charge(folio, dst_vma->vm_mm, gfp); 2991 if (ret) 2706 if (ret) 2992 goto out_release; 2707 goto out_release; 2993 ret = shmem_add_to_page_cache(folio, 2708 ret = shmem_add_to_page_cache(folio, mapping, pgoff, NULL, gfp); 2994 if (ret) 2709 if (ret) 2995 goto out_release; 2710 goto out_release; 2996 2711 2997 ret = mfill_atomic_install_pte(dst_pm 2712 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr, 2998 &folio 2713 &folio->page, true, flags); 2999 if (ret) 2714 if (ret) 3000 goto out_delete_from_cache; 2715 goto out_delete_from_cache; 3001 2716 3002 shmem_recalc_inode(inode, 1, 0); 2717 shmem_recalc_inode(inode, 1, 0); 3003 folio_unlock(folio); 2718 folio_unlock(folio); 3004 return 0; 2719 return 0; 3005 out_delete_from_cache: 2720 out_delete_from_cache: 3006 filemap_remove_folio(folio); 2721 filemap_remove_folio(folio); 3007 out_release: 2722 out_release: 3008 folio_unlock(folio); 2723 folio_unlock(folio); 3009 folio_put(folio); 2724 folio_put(folio); 3010 out_unacct_blocks: 2725 out_unacct_blocks: 3011 shmem_inode_unacct_blocks(inode, 1); 2726 shmem_inode_unacct_blocks(inode, 1); 3012 return ret; 2727 return ret; 3013 } 2728 } 3014 #endif /* CONFIG_USERFAULTFD */ 2729 #endif /* CONFIG_USERFAULTFD */ 3015 2730 3016 #ifdef CONFIG_TMPFS 2731 #ifdef CONFIG_TMPFS 3017 static const struct inode_operations shmem_sy 2732 static const struct inode_operations shmem_symlink_inode_operations; 3018 static const struct inode_operations shmem_sh 2733 static const struct inode_operations shmem_short_symlink_operations; 3019 2734 3020 static int 2735 static int 3021 shmem_write_begin(struct file *file, struct a 2736 shmem_write_begin(struct file *file, struct address_space *mapping, 3022 loff_t pos, unsigned 2737 loff_t pos, unsigned len, 3023 struct folio **foliop !! 2738 struct page **pagep, void **fsdata) 3024 { 2739 { 3025 struct inode *inode = mapping->host; 2740 struct inode *inode = mapping->host; 3026 struct shmem_inode_info *info = SHMEM 2741 struct shmem_inode_info *info = SHMEM_I(inode); 3027 pgoff_t index = pos >> PAGE_SHIFT; 2742 pgoff_t index = pos >> PAGE_SHIFT; 3028 struct folio *folio; 2743 struct folio *folio; 3029 int ret = 0; 2744 int ret = 0; 3030 2745 3031 /* i_rwsem is held by caller */ 2746 /* i_rwsem is held by caller */ 3032 if (unlikely(info->seals & (F_SEAL_GR 2747 if (unlikely(info->seals & (F_SEAL_GROW | 3033 F_SEAL_WRI 2748 F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) { 3034 if (info->seals & (F_SEAL_WRI 2749 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) 3035 return -EPERM; 2750 return -EPERM; 3036 if ((info->seals & F_SEAL_GRO 2751 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size) 3037 return -EPERM; 2752 return -EPERM; 3038 } 2753 } 3039 2754 3040 ret = shmem_get_folio(inode, index, p !! 2755 ret = shmem_get_folio(inode, index, &folio, SGP_WRITE); 3041 if (ret) 2756 if (ret) 3042 return ret; 2757 return ret; 3043 2758 3044 if (folio_test_hwpoison(folio) || !! 2759 *pagep = folio_file_page(folio, index); 3045 (folio_test_large(folio) && folio !! 2760 if (PageHWPoison(*pagep)) { 3046 folio_unlock(folio); 2761 folio_unlock(folio); 3047 folio_put(folio); 2762 folio_put(folio); >> 2763 *pagep = NULL; 3048 return -EIO; 2764 return -EIO; 3049 } 2765 } 3050 2766 3051 *foliop = folio; << 3052 return 0; 2767 return 0; 3053 } 2768 } 3054 2769 3055 static int 2770 static int 3056 shmem_write_end(struct file *file, struct add 2771 shmem_write_end(struct file *file, struct address_space *mapping, 3057 loff_t pos, unsigned 2772 loff_t pos, unsigned len, unsigned copied, 3058 struct folio *folio, !! 2773 struct page *page, void *fsdata) 3059 { 2774 { >> 2775 struct folio *folio = page_folio(page); 3060 struct inode *inode = mapping->host; 2776 struct inode *inode = mapping->host; 3061 2777 3062 if (pos + copied > inode->i_size) 2778 if (pos + copied > inode->i_size) 3063 i_size_write(inode, pos + cop 2779 i_size_write(inode, pos + copied); 3064 2780 3065 if (!folio_test_uptodate(folio)) { 2781 if (!folio_test_uptodate(folio)) { 3066 if (copied < folio_size(folio 2782 if (copied < folio_size(folio)) { 3067 size_t from = offset_ 2783 size_t from = offset_in_folio(folio, pos); 3068 folio_zero_segments(f 2784 folio_zero_segments(folio, 0, from, 3069 from 2785 from + copied, folio_size(folio)); 3070 } 2786 } 3071 folio_mark_uptodate(folio); 2787 folio_mark_uptodate(folio); 3072 } 2788 } 3073 folio_mark_dirty(folio); 2789 folio_mark_dirty(folio); 3074 folio_unlock(folio); 2790 folio_unlock(folio); 3075 folio_put(folio); 2791 folio_put(folio); 3076 2792 3077 return copied; 2793 return copied; 3078 } 2794 } 3079 2795 3080 static ssize_t shmem_file_read_iter(struct ki 2796 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) 3081 { 2797 { 3082 struct file *file = iocb->ki_filp; 2798 struct file *file = iocb->ki_filp; 3083 struct inode *inode = file_inode(file 2799 struct inode *inode = file_inode(file); 3084 struct address_space *mapping = inode 2800 struct address_space *mapping = inode->i_mapping; 3085 pgoff_t index; 2801 pgoff_t index; 3086 unsigned long offset; 2802 unsigned long offset; 3087 int error = 0; 2803 int error = 0; 3088 ssize_t retval = 0; 2804 ssize_t retval = 0; 3089 loff_t *ppos = &iocb->ki_pos; 2805 loff_t *ppos = &iocb->ki_pos; 3090 2806 3091 index = *ppos >> PAGE_SHIFT; 2807 index = *ppos >> PAGE_SHIFT; 3092 offset = *ppos & ~PAGE_MASK; 2808 offset = *ppos & ~PAGE_MASK; 3093 2809 3094 for (;;) { 2810 for (;;) { 3095 struct folio *folio = NULL; 2811 struct folio *folio = NULL; 3096 struct page *page = NULL; 2812 struct page *page = NULL; 3097 pgoff_t end_index; 2813 pgoff_t end_index; 3098 unsigned long nr, ret; 2814 unsigned long nr, ret; 3099 loff_t i_size = i_size_read(i 2815 loff_t i_size = i_size_read(inode); 3100 2816 3101 end_index = i_size >> PAGE_SH 2817 end_index = i_size >> PAGE_SHIFT; 3102 if (index > end_index) 2818 if (index > end_index) 3103 break; 2819 break; 3104 if (index == end_index) { 2820 if (index == end_index) { 3105 nr = i_size & ~PAGE_M 2821 nr = i_size & ~PAGE_MASK; 3106 if (nr <= offset) 2822 if (nr <= offset) 3107 break; 2823 break; 3108 } 2824 } 3109 2825 3110 error = shmem_get_folio(inode !! 2826 error = shmem_get_folio(inode, index, &folio, SGP_READ); 3111 if (error) { 2827 if (error) { 3112 if (error == -EINVAL) 2828 if (error == -EINVAL) 3113 error = 0; 2829 error = 0; 3114 break; 2830 break; 3115 } 2831 } 3116 if (folio) { 2832 if (folio) { 3117 folio_unlock(folio); 2833 folio_unlock(folio); 3118 2834 3119 page = folio_file_pag 2835 page = folio_file_page(folio, index); 3120 if (PageHWPoison(page 2836 if (PageHWPoison(page)) { 3121 folio_put(fol 2837 folio_put(folio); 3122 error = -EIO; 2838 error = -EIO; 3123 break; 2839 break; 3124 } 2840 } 3125 } 2841 } 3126 2842 3127 /* 2843 /* 3128 * We must evaluate after, si 2844 * We must evaluate after, since reads (unlike writes) 3129 * are called without i_rwsem 2845 * are called without i_rwsem protection against truncate 3130 */ 2846 */ 3131 nr = PAGE_SIZE; 2847 nr = PAGE_SIZE; 3132 i_size = i_size_read(inode); 2848 i_size = i_size_read(inode); 3133 end_index = i_size >> PAGE_SH 2849 end_index = i_size >> PAGE_SHIFT; 3134 if (index == end_index) { 2850 if (index == end_index) { 3135 nr = i_size & ~PAGE_M 2851 nr = i_size & ~PAGE_MASK; 3136 if (nr <= offset) { 2852 if (nr <= offset) { 3137 if (folio) 2853 if (folio) 3138 folio 2854 folio_put(folio); 3139 break; 2855 break; 3140 } 2856 } 3141 } 2857 } 3142 nr -= offset; 2858 nr -= offset; 3143 2859 3144 if (folio) { 2860 if (folio) { 3145 /* 2861 /* 3146 * If users can be wr 2862 * If users can be writing to this page using arbitrary 3147 * virtual addresses, 2863 * virtual addresses, take care about potential aliasing 3148 * before reading the 2864 * before reading the page on the kernel side. 3149 */ 2865 */ 3150 if (mapping_writably_ 2866 if (mapping_writably_mapped(mapping)) 3151 flush_dcache_ 2867 flush_dcache_page(page); 3152 /* 2868 /* 3153 * Mark the page acce 2869 * Mark the page accessed if we read the beginning. 3154 */ 2870 */ 3155 if (!offset) 2871 if (!offset) 3156 folio_mark_ac 2872 folio_mark_accessed(folio); 3157 /* 2873 /* 3158 * Ok, we have the pa 2874 * Ok, we have the page, and it's up-to-date, so 3159 * now we can copy it 2875 * now we can copy it to user space... 3160 */ 2876 */ 3161 ret = copy_page_to_it 2877 ret = copy_page_to_iter(page, offset, nr, to); 3162 folio_put(folio); 2878 folio_put(folio); 3163 2879 3164 } else if (user_backed_iter(t 2880 } else if (user_backed_iter(to)) { 3165 /* 2881 /* 3166 * Copy to user tends 2882 * Copy to user tends to be so well optimized, but 3167 * clear_user() not s 2883 * clear_user() not so much, that it is noticeably 3168 * faster to copy the 2884 * faster to copy the zero page instead of clearing. 3169 */ 2885 */ 3170 ret = copy_page_to_it 2886 ret = copy_page_to_iter(ZERO_PAGE(0), offset, nr, to); 3171 } else { 2887 } else { 3172 /* 2888 /* 3173 * But submitting the 2889 * But submitting the same page twice in a row to 3174 * splice() - or othe 2890 * splice() - or others? - can result in confusion: 3175 * so don't attempt t 2891 * so don't attempt that optimization on pipes etc. 3176 */ 2892 */ 3177 ret = iov_iter_zero(n 2893 ret = iov_iter_zero(nr, to); 3178 } 2894 } 3179 2895 3180 retval += ret; 2896 retval += ret; 3181 offset += ret; 2897 offset += ret; 3182 index += offset >> PAGE_SHIFT 2898 index += offset >> PAGE_SHIFT; 3183 offset &= ~PAGE_MASK; 2899 offset &= ~PAGE_MASK; 3184 2900 3185 if (!iov_iter_count(to)) 2901 if (!iov_iter_count(to)) 3186 break; 2902 break; 3187 if (ret < nr) { 2903 if (ret < nr) { 3188 error = -EFAULT; 2904 error = -EFAULT; 3189 break; 2905 break; 3190 } 2906 } 3191 cond_resched(); 2907 cond_resched(); 3192 } 2908 } 3193 2909 3194 *ppos = ((loff_t) index << PAGE_SHIFT 2910 *ppos = ((loff_t) index << PAGE_SHIFT) + offset; 3195 file_accessed(file); 2911 file_accessed(file); 3196 return retval ? retval : error; 2912 return retval ? retval : error; 3197 } 2913 } 3198 2914 3199 static ssize_t shmem_file_write_iter(struct k 2915 static ssize_t shmem_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 3200 { 2916 { 3201 struct file *file = iocb->ki_filp; 2917 struct file *file = iocb->ki_filp; 3202 struct inode *inode = file->f_mapping 2918 struct inode *inode = file->f_mapping->host; 3203 ssize_t ret; 2919 ssize_t ret; 3204 2920 3205 inode_lock(inode); 2921 inode_lock(inode); 3206 ret = generic_write_checks(iocb, from 2922 ret = generic_write_checks(iocb, from); 3207 if (ret <= 0) 2923 if (ret <= 0) 3208 goto unlock; 2924 goto unlock; 3209 ret = file_remove_privs(file); 2925 ret = file_remove_privs(file); 3210 if (ret) 2926 if (ret) 3211 goto unlock; 2927 goto unlock; 3212 ret = file_update_time(file); 2928 ret = file_update_time(file); 3213 if (ret) 2929 if (ret) 3214 goto unlock; 2930 goto unlock; 3215 ret = generic_perform_write(iocb, fro 2931 ret = generic_perform_write(iocb, from); 3216 unlock: 2932 unlock: 3217 inode_unlock(inode); 2933 inode_unlock(inode); 3218 return ret; 2934 return ret; 3219 } 2935 } 3220 2936 3221 static bool zero_pipe_buf_get(struct pipe_ino 2937 static bool zero_pipe_buf_get(struct pipe_inode_info *pipe, 3222 struct pipe_buf 2938 struct pipe_buffer *buf) 3223 { 2939 { 3224 return true; 2940 return true; 3225 } 2941 } 3226 2942 3227 static void zero_pipe_buf_release(struct pipe 2943 static void zero_pipe_buf_release(struct pipe_inode_info *pipe, 3228 struct pipe 2944 struct pipe_buffer *buf) 3229 { 2945 { 3230 } 2946 } 3231 2947 3232 static bool zero_pipe_buf_try_steal(struct pi 2948 static bool zero_pipe_buf_try_steal(struct pipe_inode_info *pipe, 3233 struct pi 2949 struct pipe_buffer *buf) 3234 { 2950 { 3235 return false; 2951 return false; 3236 } 2952 } 3237 2953 3238 static const struct pipe_buf_operations zero_ 2954 static const struct pipe_buf_operations zero_pipe_buf_ops = { 3239 .release = zero_pipe_buf_relea 2955 .release = zero_pipe_buf_release, 3240 .try_steal = zero_pipe_buf_try_s 2956 .try_steal = zero_pipe_buf_try_steal, 3241 .get = zero_pipe_buf_get, 2957 .get = zero_pipe_buf_get, 3242 }; 2958 }; 3243 2959 3244 static size_t splice_zeropage_into_pipe(struc 2960 static size_t splice_zeropage_into_pipe(struct pipe_inode_info *pipe, 3245 loff_ 2961 loff_t fpos, size_t size) 3246 { 2962 { 3247 size_t offset = fpos & ~PAGE_MASK; 2963 size_t offset = fpos & ~PAGE_MASK; 3248 2964 3249 size = min_t(size_t, size, PAGE_SIZE 2965 size = min_t(size_t, size, PAGE_SIZE - offset); 3250 2966 3251 if (!pipe_full(pipe->head, pipe->tail 2967 if (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { 3252 struct pipe_buffer *buf = pip 2968 struct pipe_buffer *buf = pipe_head_buf(pipe); 3253 2969 3254 *buf = (struct pipe_buffer) { 2970 *buf = (struct pipe_buffer) { 3255 .ops = &zero_pipe_ 2971 .ops = &zero_pipe_buf_ops, 3256 .page = ZERO_PAGE(0 2972 .page = ZERO_PAGE(0), 3257 .offset = offset, 2973 .offset = offset, 3258 .len = size, 2974 .len = size, 3259 }; 2975 }; 3260 pipe->head++; 2976 pipe->head++; 3261 } 2977 } 3262 2978 3263 return size; 2979 return size; 3264 } 2980 } 3265 2981 3266 static ssize_t shmem_file_splice_read(struct 2982 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, 3267 struct 2983 struct pipe_inode_info *pipe, 3268 size_t 2984 size_t len, unsigned int flags) 3269 { 2985 { 3270 struct inode *inode = file_inode(in); 2986 struct inode *inode = file_inode(in); 3271 struct address_space *mapping = inode 2987 struct address_space *mapping = inode->i_mapping; 3272 struct folio *folio = NULL; 2988 struct folio *folio = NULL; 3273 size_t total_spliced = 0, used, npage 2989 size_t total_spliced = 0, used, npages, n, part; 3274 loff_t isize; 2990 loff_t isize; 3275 int error = 0; 2991 int error = 0; 3276 2992 3277 /* Work out how much data we can actu 2993 /* Work out how much data we can actually add into the pipe */ 3278 used = pipe_occupancy(pipe->head, pip 2994 used = pipe_occupancy(pipe->head, pipe->tail); 3279 npages = max_t(ssize_t, pipe->max_usa 2995 npages = max_t(ssize_t, pipe->max_usage - used, 0); 3280 len = min_t(size_t, len, npages * PAG 2996 len = min_t(size_t, len, npages * PAGE_SIZE); 3281 2997 3282 do { 2998 do { 3283 if (*ppos >= i_size_read(inod 2999 if (*ppos >= i_size_read(inode)) 3284 break; 3000 break; 3285 3001 3286 error = shmem_get_folio(inode !! 3002 error = shmem_get_folio(inode, *ppos / PAGE_SIZE, &folio, 3287 SGP_R 3003 SGP_READ); 3288 if (error) { 3004 if (error) { 3289 if (error == -EINVAL) 3005 if (error == -EINVAL) 3290 error = 0; 3006 error = 0; 3291 break; 3007 break; 3292 } 3008 } 3293 if (folio) { 3009 if (folio) { 3294 folio_unlock(folio); 3010 folio_unlock(folio); 3295 3011 3296 if (folio_test_hwpois 3012 if (folio_test_hwpoison(folio) || 3297 (folio_test_large 3013 (folio_test_large(folio) && 3298 folio_test_has_h 3014 folio_test_has_hwpoisoned(folio))) { 3299 error = -EIO; 3015 error = -EIO; 3300 break; 3016 break; 3301 } 3017 } 3302 } 3018 } 3303 3019 3304 /* 3020 /* 3305 * i_size must be checked aft 3021 * i_size must be checked after we know the pages are Uptodate. 3306 * 3022 * 3307 * Checking i_size after the 3023 * Checking i_size after the check allows us to calculate 3308 * the correct value for "nr" 3024 * the correct value for "nr", which means the zero-filled 3309 * part of the page is not co 3025 * part of the page is not copied back to userspace (unless 3310 * another truncate extends t 3026 * another truncate extends the file - this is desired though). 3311 */ 3027 */ 3312 isize = i_size_read(inode); 3028 isize = i_size_read(inode); 3313 if (unlikely(*ppos >= isize)) 3029 if (unlikely(*ppos >= isize)) 3314 break; 3030 break; 3315 part = min_t(loff_t, isize - 3031 part = min_t(loff_t, isize - *ppos, len); 3316 3032 3317 if (folio) { 3033 if (folio) { 3318 /* 3034 /* 3319 * If users can be wr 3035 * If users can be writing to this page using arbitrary 3320 * virtual addresses, 3036 * virtual addresses, take care about potential aliasing 3321 * before reading the 3037 * before reading the page on the kernel side. 3322 */ 3038 */ 3323 if (mapping_writably_ 3039 if (mapping_writably_mapped(mapping)) 3324 flush_dcache_ 3040 flush_dcache_folio(folio); 3325 folio_mark_accessed(f 3041 folio_mark_accessed(folio); 3326 /* 3042 /* 3327 * Ok, we have the pa 3043 * Ok, we have the page, and it's up-to-date, so we can 3328 * now splice it into 3044 * now splice it into the pipe. 3329 */ 3045 */ 3330 n = splice_folio_into 3046 n = splice_folio_into_pipe(pipe, folio, *ppos, part); 3331 folio_put(folio); 3047 folio_put(folio); 3332 folio = NULL; 3048 folio = NULL; 3333 } else { 3049 } else { 3334 n = splice_zeropage_i 3050 n = splice_zeropage_into_pipe(pipe, *ppos, part); 3335 } 3051 } 3336 3052 3337 if (!n) 3053 if (!n) 3338 break; 3054 break; 3339 len -= n; 3055 len -= n; 3340 total_spliced += n; 3056 total_spliced += n; 3341 *ppos += n; 3057 *ppos += n; 3342 in->f_ra.prev_pos = *ppos; 3058 in->f_ra.prev_pos = *ppos; 3343 if (pipe_full(pipe->head, pip 3059 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 3344 break; 3060 break; 3345 3061 3346 cond_resched(); 3062 cond_resched(); 3347 } while (len); 3063 } while (len); 3348 3064 3349 if (folio) 3065 if (folio) 3350 folio_put(folio); 3066 folio_put(folio); 3351 3067 3352 file_accessed(in); 3068 file_accessed(in); 3353 return total_spliced ? total_spliced 3069 return total_spliced ? total_spliced : error; 3354 } 3070 } 3355 3071 3356 static loff_t shmem_file_llseek(struct file * 3072 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) 3357 { 3073 { 3358 struct address_space *mapping = file- 3074 struct address_space *mapping = file->f_mapping; 3359 struct inode *inode = mapping->host; 3075 struct inode *inode = mapping->host; 3360 3076 3361 if (whence != SEEK_DATA && whence != 3077 if (whence != SEEK_DATA && whence != SEEK_HOLE) 3362 return generic_file_llseek_si 3078 return generic_file_llseek_size(file, offset, whence, 3363 MAX_L 3079 MAX_LFS_FILESIZE, i_size_read(inode)); 3364 if (offset < 0) 3080 if (offset < 0) 3365 return -ENXIO; 3081 return -ENXIO; 3366 3082 3367 inode_lock(inode); 3083 inode_lock(inode); 3368 /* We're holding i_rwsem so we can ac 3084 /* We're holding i_rwsem so we can access i_size directly */ 3369 offset = mapping_seek_hole_data(mappi 3085 offset = mapping_seek_hole_data(mapping, offset, inode->i_size, whence); 3370 if (offset >= 0) 3086 if (offset >= 0) 3371 offset = vfs_setpos(file, off 3087 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE); 3372 inode_unlock(inode); 3088 inode_unlock(inode); 3373 return offset; 3089 return offset; 3374 } 3090 } 3375 3091 3376 static long shmem_fallocate(struct file *file 3092 static long shmem_fallocate(struct file *file, int mode, loff_t offset, 3377 3093 loff_t len) 3378 { 3094 { 3379 struct inode *inode = file_inode(file 3095 struct inode *inode = file_inode(file); 3380 struct shmem_sb_info *sbinfo = SHMEM_ 3096 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 3381 struct shmem_inode_info *info = SHMEM 3097 struct shmem_inode_info *info = SHMEM_I(inode); 3382 struct shmem_falloc shmem_falloc; 3098 struct shmem_falloc shmem_falloc; 3383 pgoff_t start, index, end, undo_fallo 3099 pgoff_t start, index, end, undo_fallocend; 3384 int error; 3100 int error; 3385 3101 3386 if (mode & ~(FALLOC_FL_KEEP_SIZE | FA 3102 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 3387 return -EOPNOTSUPP; 3103 return -EOPNOTSUPP; 3388 3104 3389 inode_lock(inode); 3105 inode_lock(inode); 3390 3106 3391 if (mode & FALLOC_FL_PUNCH_HOLE) { 3107 if (mode & FALLOC_FL_PUNCH_HOLE) { 3392 struct address_space *mapping 3108 struct address_space *mapping = file->f_mapping; 3393 loff_t unmap_start = round_up 3109 loff_t unmap_start = round_up(offset, PAGE_SIZE); 3394 loff_t unmap_end = round_down 3110 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 3395 DECLARE_WAIT_QUEUE_HEAD_ONSTA 3111 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); 3396 3112 3397 /* protected by i_rwsem */ 3113 /* protected by i_rwsem */ 3398 if (info->seals & (F_SEAL_WRI 3114 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) { 3399 error = -EPERM; 3115 error = -EPERM; 3400 goto out; 3116 goto out; 3401 } 3117 } 3402 3118 3403 shmem_falloc.waitq = &shmem_f 3119 shmem_falloc.waitq = &shmem_falloc_waitq; 3404 shmem_falloc.start = (u64)unm 3120 shmem_falloc.start = (u64)unmap_start >> PAGE_SHIFT; 3405 shmem_falloc.next = (unmap_en 3121 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; 3406 spin_lock(&inode->i_lock); 3122 spin_lock(&inode->i_lock); 3407 inode->i_private = &shmem_fal 3123 inode->i_private = &shmem_falloc; 3408 spin_unlock(&inode->i_lock); 3124 spin_unlock(&inode->i_lock); 3409 3125 3410 if ((u64)unmap_end > (u64)unm 3126 if ((u64)unmap_end > (u64)unmap_start) 3411 unmap_mapping_range(m 3127 unmap_mapping_range(mapping, unmap_start, 3412 1 3128 1 + unmap_end - unmap_start, 0); 3413 shmem_truncate_range(inode, o 3129 shmem_truncate_range(inode, offset, offset + len - 1); 3414 /* No need to unmap again: ho 3130 /* No need to unmap again: hole-punching leaves COWed pages */ 3415 3131 3416 spin_lock(&inode->i_lock); 3132 spin_lock(&inode->i_lock); 3417 inode->i_private = NULL; 3133 inode->i_private = NULL; 3418 wake_up_all(&shmem_falloc_wai 3134 wake_up_all(&shmem_falloc_waitq); 3419 WARN_ON_ONCE(!list_empty(&shm 3135 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head)); 3420 spin_unlock(&inode->i_lock); 3136 spin_unlock(&inode->i_lock); 3421 error = 0; 3137 error = 0; 3422 goto out; 3138 goto out; 3423 } 3139 } 3424 3140 3425 /* We need to check rlimit even when 3141 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 3426 error = inode_newsize_ok(inode, offse 3142 error = inode_newsize_ok(inode, offset + len); 3427 if (error) 3143 if (error) 3428 goto out; 3144 goto out; 3429 3145 3430 if ((info->seals & F_SEAL_GROW) && of 3146 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) { 3431 error = -EPERM; 3147 error = -EPERM; 3432 goto out; 3148 goto out; 3433 } 3149 } 3434 3150 3435 start = offset >> PAGE_SHIFT; 3151 start = offset >> PAGE_SHIFT; 3436 end = (offset + len + PAGE_SIZE - 1) 3152 end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 3437 /* Try to avoid a swapstorm if len is 3153 /* Try to avoid a swapstorm if len is impossible to satisfy */ 3438 if (sbinfo->max_blocks && end - start 3154 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { 3439 error = -ENOSPC; 3155 error = -ENOSPC; 3440 goto out; 3156 goto out; 3441 } 3157 } 3442 3158 3443 shmem_falloc.waitq = NULL; 3159 shmem_falloc.waitq = NULL; 3444 shmem_falloc.start = start; 3160 shmem_falloc.start = start; 3445 shmem_falloc.next = start; 3161 shmem_falloc.next = start; 3446 shmem_falloc.nr_falloced = 0; 3162 shmem_falloc.nr_falloced = 0; 3447 shmem_falloc.nr_unswapped = 0; 3163 shmem_falloc.nr_unswapped = 0; 3448 spin_lock(&inode->i_lock); 3164 spin_lock(&inode->i_lock); 3449 inode->i_private = &shmem_falloc; 3165 inode->i_private = &shmem_falloc; 3450 spin_unlock(&inode->i_lock); 3166 spin_unlock(&inode->i_lock); 3451 3167 3452 /* 3168 /* 3453 * info->fallocend is only relevant w 3169 * info->fallocend is only relevant when huge pages might be 3454 * involved: to prevent split_huge_pa 3170 * involved: to prevent split_huge_page() freeing fallocated 3455 * pages when FALLOC_FL_KEEP_SIZE com 3171 * pages when FALLOC_FL_KEEP_SIZE committed beyond i_size. 3456 */ 3172 */ 3457 undo_fallocend = info->fallocend; 3173 undo_fallocend = info->fallocend; 3458 if (info->fallocend < end) 3174 if (info->fallocend < end) 3459 info->fallocend = end; 3175 info->fallocend = end; 3460 3176 3461 for (index = start; index < end; ) { 3177 for (index = start; index < end; ) { 3462 struct folio *folio; 3178 struct folio *folio; 3463 3179 3464 /* 3180 /* 3465 * Check for fatal signal so !! 3181 * Good, the fallocate(2) manpage permits EINTR: we may have 3466 * situations. We don't want !! 3182 * been interrupted because we are using up too much memory. 3467 * signals as large fallocate << 3468 * e.g. periodic timers may r << 3469 * restarting. << 3470 */ 3183 */ 3471 if (fatal_signal_pending(curr !! 3184 if (signal_pending(current)) 3472 error = -EINTR; 3185 error = -EINTR; 3473 else if (shmem_falloc.nr_unsw 3186 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced) 3474 error = -ENOMEM; 3187 error = -ENOMEM; 3475 else 3188 else 3476 error = shmem_get_fol !! 3189 error = shmem_get_folio(inode, index, &folio, 3477 !! 3190 SGP_FALLOC); 3478 if (error) { 3191 if (error) { 3479 info->fallocend = und 3192 info->fallocend = undo_fallocend; 3480 /* Remove the !uptoda 3193 /* Remove the !uptodate folios we added */ 3481 if (index > start) { 3194 if (index > start) { 3482 shmem_undo_ra 3195 shmem_undo_range(inode, 3483 (loff_t)s 3196 (loff_t)start << PAGE_SHIFT, 3484 ((loff_t) 3197 ((loff_t)index << PAGE_SHIFT) - 1, true); 3485 } 3198 } 3486 goto undone; 3199 goto undone; 3487 } 3200 } 3488 3201 3489 /* 3202 /* 3490 * Here is a more important o 3203 * Here is a more important optimization than it appears: 3491 * a second SGP_FALLOC on the 3204 * a second SGP_FALLOC on the same large folio will clear it, 3492 * making it uptodate and un- 3205 * making it uptodate and un-undoable if we fail later. 3493 */ 3206 */ 3494 index = folio_next_index(foli 3207 index = folio_next_index(folio); 3495 /* Beware 32-bit wraparound * 3208 /* Beware 32-bit wraparound */ 3496 if (!index) 3209 if (!index) 3497 index--; 3210 index--; 3498 3211 3499 /* 3212 /* 3500 * Inform shmem_writepage() h 3213 * Inform shmem_writepage() how far we have reached. 3501 * No need for lock or barrie 3214 * No need for lock or barrier: we have the page lock. 3502 */ 3215 */ 3503 if (!folio_test_uptodate(foli 3216 if (!folio_test_uptodate(folio)) 3504 shmem_falloc.nr_fallo 3217 shmem_falloc.nr_falloced += index - shmem_falloc.next; 3505 shmem_falloc.next = index; 3218 shmem_falloc.next = index; 3506 3219 3507 /* 3220 /* 3508 * If !uptodate, leave it tha 3221 * If !uptodate, leave it that way so that freeable folios 3509 * can be recognized if we ne 3222 * can be recognized if we need to rollback on error later. 3510 * But mark it dirty so that 3223 * But mark it dirty so that memory pressure will swap rather 3511 * than free the folios we ar 3224 * than free the folios we are allocating (and SGP_CACHE folios 3512 * might still be clean: we n 3225 * might still be clean: we now need to mark those dirty too). 3513 */ 3226 */ 3514 folio_mark_dirty(folio); 3227 folio_mark_dirty(folio); 3515 folio_unlock(folio); 3228 folio_unlock(folio); 3516 folio_put(folio); 3229 folio_put(folio); 3517 cond_resched(); 3230 cond_resched(); 3518 } 3231 } 3519 3232 3520 if (!(mode & FALLOC_FL_KEEP_SIZE) && 3233 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) 3521 i_size_write(inode, offset + 3234 i_size_write(inode, offset + len); 3522 undone: 3235 undone: 3523 spin_lock(&inode->i_lock); 3236 spin_lock(&inode->i_lock); 3524 inode->i_private = NULL; 3237 inode->i_private = NULL; 3525 spin_unlock(&inode->i_lock); 3238 spin_unlock(&inode->i_lock); 3526 out: 3239 out: 3527 if (!error) 3240 if (!error) 3528 file_modified(file); 3241 file_modified(file); 3529 inode_unlock(inode); 3242 inode_unlock(inode); 3530 return error; 3243 return error; 3531 } 3244 } 3532 3245 3533 static int shmem_statfs(struct dentry *dentry 3246 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 3534 { 3247 { 3535 struct shmem_sb_info *sbinfo = SHMEM_ 3248 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 3536 3249 3537 buf->f_type = TMPFS_MAGIC; 3250 buf->f_type = TMPFS_MAGIC; 3538 buf->f_bsize = PAGE_SIZE; 3251 buf->f_bsize = PAGE_SIZE; 3539 buf->f_namelen = NAME_MAX; 3252 buf->f_namelen = NAME_MAX; 3540 if (sbinfo->max_blocks) { 3253 if (sbinfo->max_blocks) { 3541 buf->f_blocks = sbinfo->max_b 3254 buf->f_blocks = sbinfo->max_blocks; 3542 buf->f_bavail = 3255 buf->f_bavail = 3543 buf->f_bfree = sbinfo->max_b 3256 buf->f_bfree = sbinfo->max_blocks - 3544 percpu_counte 3257 percpu_counter_sum(&sbinfo->used_blocks); 3545 } 3258 } 3546 if (sbinfo->max_inodes) { 3259 if (sbinfo->max_inodes) { 3547 buf->f_files = sbinfo->max_in 3260 buf->f_files = sbinfo->max_inodes; 3548 buf->f_ffree = sbinfo->free_i 3261 buf->f_ffree = sbinfo->free_ispace / BOGO_INODE_SIZE; 3549 } 3262 } 3550 /* else leave those fields 0 like sim 3263 /* else leave those fields 0 like simple_statfs */ 3551 3264 3552 buf->f_fsid = uuid_to_fsid(dentry->d_ 3265 buf->f_fsid = uuid_to_fsid(dentry->d_sb->s_uuid.b); 3553 3266 3554 return 0; 3267 return 0; 3555 } 3268 } 3556 3269 3557 /* 3270 /* 3558 * File creation. Allocate an inode, and we'r 3271 * File creation. Allocate an inode, and we're done.. 3559 */ 3272 */ 3560 static int 3273 static int 3561 shmem_mknod(struct mnt_idmap *idmap, struct i 3274 shmem_mknod(struct mnt_idmap *idmap, struct inode *dir, 3562 struct dentry *dentry, umode_t mo 3275 struct dentry *dentry, umode_t mode, dev_t dev) 3563 { 3276 { 3564 struct inode *inode; 3277 struct inode *inode; 3565 int error; 3278 int error; 3566 3279 3567 inode = shmem_get_inode(idmap, dir->i 3280 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, dev, VM_NORESERVE); 3568 if (IS_ERR(inode)) 3281 if (IS_ERR(inode)) 3569 return PTR_ERR(inode); 3282 return PTR_ERR(inode); 3570 3283 3571 error = simple_acl_create(dir, inode) 3284 error = simple_acl_create(dir, inode); 3572 if (error) 3285 if (error) 3573 goto out_iput; 3286 goto out_iput; 3574 error = security_inode_init_security( 3287 error = security_inode_init_security(inode, dir, &dentry->d_name, 3575 3288 shmem_initxattrs, NULL); 3576 if (error && error != -EOPNOTSUPP) 3289 if (error && error != -EOPNOTSUPP) 3577 goto out_iput; 3290 goto out_iput; 3578 3291 3579 error = simple_offset_add(shmem_get_o 3292 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry); 3580 if (error) 3293 if (error) 3581 goto out_iput; 3294 goto out_iput; 3582 3295 3583 dir->i_size += BOGO_DIRENT_SIZE; 3296 dir->i_size += BOGO_DIRENT_SIZE; 3584 inode_set_mtime_to_ts(dir, inode_set_ 3297 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 3585 inode_inc_iversion(dir); 3298 inode_inc_iversion(dir); 3586 d_instantiate(dentry, inode); 3299 d_instantiate(dentry, inode); 3587 dget(dentry); /* Extra count - pin th 3300 dget(dentry); /* Extra count - pin the dentry in core */ 3588 return error; 3301 return error; 3589 3302 3590 out_iput: 3303 out_iput: 3591 iput(inode); 3304 iput(inode); 3592 return error; 3305 return error; 3593 } 3306 } 3594 3307 3595 static int 3308 static int 3596 shmem_tmpfile(struct mnt_idmap *idmap, struct 3309 shmem_tmpfile(struct mnt_idmap *idmap, struct inode *dir, 3597 struct file *file, umode_t mode 3310 struct file *file, umode_t mode) 3598 { 3311 { 3599 struct inode *inode; 3312 struct inode *inode; 3600 int error; 3313 int error; 3601 3314 3602 inode = shmem_get_inode(idmap, dir->i 3315 inode = shmem_get_inode(idmap, dir->i_sb, dir, mode, 0, VM_NORESERVE); 3603 if (IS_ERR(inode)) { 3316 if (IS_ERR(inode)) { 3604 error = PTR_ERR(inode); 3317 error = PTR_ERR(inode); 3605 goto err_out; 3318 goto err_out; 3606 } 3319 } 3607 error = security_inode_init_security( 3320 error = security_inode_init_security(inode, dir, NULL, 3608 3321 shmem_initxattrs, NULL); 3609 if (error && error != -EOPNOTSUPP) 3322 if (error && error != -EOPNOTSUPP) 3610 goto out_iput; 3323 goto out_iput; 3611 error = simple_acl_create(dir, inode) 3324 error = simple_acl_create(dir, inode); 3612 if (error) 3325 if (error) 3613 goto out_iput; 3326 goto out_iput; 3614 d_tmpfile(file, inode); 3327 d_tmpfile(file, inode); 3615 3328 3616 err_out: 3329 err_out: 3617 return finish_open_simple(file, error 3330 return finish_open_simple(file, error); 3618 out_iput: 3331 out_iput: 3619 iput(inode); 3332 iput(inode); 3620 return error; 3333 return error; 3621 } 3334 } 3622 3335 3623 static int shmem_mkdir(struct mnt_idmap *idma 3336 static int shmem_mkdir(struct mnt_idmap *idmap, struct inode *dir, 3624 struct dentry *dentry, 3337 struct dentry *dentry, umode_t mode) 3625 { 3338 { 3626 int error; 3339 int error; 3627 3340 3628 error = shmem_mknod(idmap, dir, dentr 3341 error = shmem_mknod(idmap, dir, dentry, mode | S_IFDIR, 0); 3629 if (error) 3342 if (error) 3630 return error; 3343 return error; 3631 inc_nlink(dir); 3344 inc_nlink(dir); 3632 return 0; 3345 return 0; 3633 } 3346 } 3634 3347 3635 static int shmem_create(struct mnt_idmap *idm 3348 static int shmem_create(struct mnt_idmap *idmap, struct inode *dir, 3636 struct dentry *dentry 3349 struct dentry *dentry, umode_t mode, bool excl) 3637 { 3350 { 3638 return shmem_mknod(idmap, dir, dentry 3351 return shmem_mknod(idmap, dir, dentry, mode | S_IFREG, 0); 3639 } 3352 } 3640 3353 3641 /* 3354 /* 3642 * Link a file.. 3355 * Link a file.. 3643 */ 3356 */ 3644 static int shmem_link(struct dentry *old_dent 3357 static int shmem_link(struct dentry *old_dentry, struct inode *dir, 3645 struct dentry *dentry) 3358 struct dentry *dentry) 3646 { 3359 { 3647 struct inode *inode = d_inode(old_den 3360 struct inode *inode = d_inode(old_dentry); 3648 int ret = 0; 3361 int ret = 0; 3649 3362 3650 /* 3363 /* 3651 * No ordinary (disk based) filesyste 3364 * No ordinary (disk based) filesystem counts links as inodes; 3652 * but each new link needs a new dent 3365 * but each new link needs a new dentry, pinning lowmem, and 3653 * tmpfs dentries cannot be pruned un 3366 * tmpfs dentries cannot be pruned until they are unlinked. 3654 * But if an O_TMPFILE file is linked 3367 * But if an O_TMPFILE file is linked into the tmpfs, the 3655 * first link must skip that, to get 3368 * first link must skip that, to get the accounting right. 3656 */ 3369 */ 3657 if (inode->i_nlink) { 3370 if (inode->i_nlink) { 3658 ret = shmem_reserve_inode(ino 3371 ret = shmem_reserve_inode(inode->i_sb, NULL); 3659 if (ret) 3372 if (ret) 3660 goto out; 3373 goto out; 3661 } 3374 } 3662 3375 3663 ret = simple_offset_add(shmem_get_off 3376 ret = simple_offset_add(shmem_get_offset_ctx(dir), dentry); 3664 if (ret) { 3377 if (ret) { 3665 if (inode->i_nlink) 3378 if (inode->i_nlink) 3666 shmem_free_inode(inod 3379 shmem_free_inode(inode->i_sb, 0); 3667 goto out; 3380 goto out; 3668 } 3381 } 3669 3382 3670 dir->i_size += BOGO_DIRENT_SIZE; 3383 dir->i_size += BOGO_DIRENT_SIZE; 3671 inode_set_mtime_to_ts(dir, 3384 inode_set_mtime_to_ts(dir, 3672 inode_set_ctime 3385 inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode))); 3673 inode_inc_iversion(dir); 3386 inode_inc_iversion(dir); 3674 inc_nlink(inode); 3387 inc_nlink(inode); 3675 ihold(inode); /* New dentry referen 3388 ihold(inode); /* New dentry reference */ 3676 dget(dentry); /* Extra pinning coun 3389 dget(dentry); /* Extra pinning count for the created dentry */ 3677 d_instantiate(dentry, inode); 3390 d_instantiate(dentry, inode); 3678 out: 3391 out: 3679 return ret; 3392 return ret; 3680 } 3393 } 3681 3394 3682 static int shmem_unlink(struct inode *dir, st 3395 static int shmem_unlink(struct inode *dir, struct dentry *dentry) 3683 { 3396 { 3684 struct inode *inode = d_inode(dentry) 3397 struct inode *inode = d_inode(dentry); 3685 3398 3686 if (inode->i_nlink > 1 && !S_ISDIR(in 3399 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) 3687 shmem_free_inode(inode->i_sb, 3400 shmem_free_inode(inode->i_sb, 0); 3688 3401 3689 simple_offset_remove(shmem_get_offset 3402 simple_offset_remove(shmem_get_offset_ctx(dir), dentry); 3690 3403 3691 dir->i_size -= BOGO_DIRENT_SIZE; 3404 dir->i_size -= BOGO_DIRENT_SIZE; 3692 inode_set_mtime_to_ts(dir, 3405 inode_set_mtime_to_ts(dir, 3693 inode_set_ctime 3406 inode_set_ctime_to_ts(dir, inode_set_ctime_current(inode))); 3694 inode_inc_iversion(dir); 3407 inode_inc_iversion(dir); 3695 drop_nlink(inode); 3408 drop_nlink(inode); 3696 dput(dentry); /* Undo the count fro 3409 dput(dentry); /* Undo the count from "create" - does all the work */ 3697 return 0; 3410 return 0; 3698 } 3411 } 3699 3412 3700 static int shmem_rmdir(struct inode *dir, str 3413 static int shmem_rmdir(struct inode *dir, struct dentry *dentry) 3701 { 3414 { 3702 if (!simple_offset_empty(dentry)) 3415 if (!simple_offset_empty(dentry)) 3703 return -ENOTEMPTY; 3416 return -ENOTEMPTY; 3704 3417 3705 drop_nlink(d_inode(dentry)); 3418 drop_nlink(d_inode(dentry)); 3706 drop_nlink(dir); 3419 drop_nlink(dir); 3707 return shmem_unlink(dir, dentry); 3420 return shmem_unlink(dir, dentry); 3708 } 3421 } 3709 3422 3710 static int shmem_whiteout(struct mnt_idmap *i 3423 static int shmem_whiteout(struct mnt_idmap *idmap, 3711 struct inode *old_d 3424 struct inode *old_dir, struct dentry *old_dentry) 3712 { 3425 { 3713 struct dentry *whiteout; 3426 struct dentry *whiteout; 3714 int error; 3427 int error; 3715 3428 3716 whiteout = d_alloc(old_dentry->d_pare 3429 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name); 3717 if (!whiteout) 3430 if (!whiteout) 3718 return -ENOMEM; 3431 return -ENOMEM; 3719 3432 3720 error = shmem_mknod(idmap, old_dir, w 3433 error = shmem_mknod(idmap, old_dir, whiteout, 3721 S_IFCHR | WHITEOU 3434 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); 3722 dput(whiteout); 3435 dput(whiteout); 3723 if (error) 3436 if (error) 3724 return error; 3437 return error; 3725 3438 3726 /* 3439 /* 3727 * Cheat and hash the whiteout while 3440 * Cheat and hash the whiteout while the old dentry is still in 3728 * place, instead of playing games wi 3441 * place, instead of playing games with FS_RENAME_DOES_D_MOVE. 3729 * 3442 * 3730 * d_lookup() will consistently find 3443 * d_lookup() will consistently find one of them at this point, 3731 * not sure which one, but that isn't 3444 * not sure which one, but that isn't even important. 3732 */ 3445 */ 3733 d_rehash(whiteout); 3446 d_rehash(whiteout); 3734 return 0; 3447 return 0; 3735 } 3448 } 3736 3449 3737 /* 3450 /* 3738 * The VFS layer already does all the dentry 3451 * The VFS layer already does all the dentry stuff for rename, 3739 * we just have to decrement the usage count 3452 * we just have to decrement the usage count for the target if 3740 * it exists so that the VFS layer correctly 3453 * it exists so that the VFS layer correctly free's it when it 3741 * gets overwritten. 3454 * gets overwritten. 3742 */ 3455 */ 3743 static int shmem_rename2(struct mnt_idmap *id 3456 static int shmem_rename2(struct mnt_idmap *idmap, 3744 struct inode *old_di 3457 struct inode *old_dir, struct dentry *old_dentry, 3745 struct inode *new_di 3458 struct inode *new_dir, struct dentry *new_dentry, 3746 unsigned int flags) 3459 unsigned int flags) 3747 { 3460 { 3748 struct inode *inode = d_inode(old_den 3461 struct inode *inode = d_inode(old_dentry); 3749 int they_are_dirs = S_ISDIR(inode->i_ 3462 int they_are_dirs = S_ISDIR(inode->i_mode); 3750 int error; 3463 int error; 3751 3464 3752 if (flags & ~(RENAME_NOREPLACE | RENA 3465 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) 3753 return -EINVAL; 3466 return -EINVAL; 3754 3467 3755 if (flags & RENAME_EXCHANGE) 3468 if (flags & RENAME_EXCHANGE) 3756 return simple_offset_rename_e 3469 return simple_offset_rename_exchange(old_dir, old_dentry, 3757 3470 new_dir, new_dentry); 3758 3471 3759 if (!simple_offset_empty(new_dentry)) 3472 if (!simple_offset_empty(new_dentry)) 3760 return -ENOTEMPTY; 3473 return -ENOTEMPTY; 3761 3474 3762 if (flags & RENAME_WHITEOUT) { 3475 if (flags & RENAME_WHITEOUT) { 3763 error = shmem_whiteout(idmap, 3476 error = shmem_whiteout(idmap, old_dir, old_dentry); 3764 if (error) 3477 if (error) 3765 return error; 3478 return error; 3766 } 3479 } 3767 3480 3768 error = simple_offset_rename(old_dir, 3481 error = simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry); 3769 if (error) 3482 if (error) 3770 return error; 3483 return error; 3771 3484 3772 if (d_really_is_positive(new_dentry)) 3485 if (d_really_is_positive(new_dentry)) { 3773 (void) shmem_unlink(new_dir, 3486 (void) shmem_unlink(new_dir, new_dentry); 3774 if (they_are_dirs) { 3487 if (they_are_dirs) { 3775 drop_nlink(d_inode(ne 3488 drop_nlink(d_inode(new_dentry)); 3776 drop_nlink(old_dir); 3489 drop_nlink(old_dir); 3777 } 3490 } 3778 } else if (they_are_dirs) { 3491 } else if (they_are_dirs) { 3779 drop_nlink(old_dir); 3492 drop_nlink(old_dir); 3780 inc_nlink(new_dir); 3493 inc_nlink(new_dir); 3781 } 3494 } 3782 3495 3783 old_dir->i_size -= BOGO_DIRENT_SIZE; 3496 old_dir->i_size -= BOGO_DIRENT_SIZE; 3784 new_dir->i_size += BOGO_DIRENT_SIZE; 3497 new_dir->i_size += BOGO_DIRENT_SIZE; 3785 simple_rename_timestamp(old_dir, old_ 3498 simple_rename_timestamp(old_dir, old_dentry, new_dir, new_dentry); 3786 inode_inc_iversion(old_dir); 3499 inode_inc_iversion(old_dir); 3787 inode_inc_iversion(new_dir); 3500 inode_inc_iversion(new_dir); 3788 return 0; 3501 return 0; 3789 } 3502 } 3790 3503 3791 static int shmem_symlink(struct mnt_idmap *id 3504 static int shmem_symlink(struct mnt_idmap *idmap, struct inode *dir, 3792 struct dentry *dentr 3505 struct dentry *dentry, const char *symname) 3793 { 3506 { 3794 int error; 3507 int error; 3795 int len; 3508 int len; 3796 struct inode *inode; 3509 struct inode *inode; 3797 struct folio *folio; 3510 struct folio *folio; 3798 3511 3799 len = strlen(symname) + 1; 3512 len = strlen(symname) + 1; 3800 if (len > PAGE_SIZE) 3513 if (len > PAGE_SIZE) 3801 return -ENAMETOOLONG; 3514 return -ENAMETOOLONG; 3802 3515 3803 inode = shmem_get_inode(idmap, dir->i 3516 inode = shmem_get_inode(idmap, dir->i_sb, dir, S_IFLNK | 0777, 0, 3804 VM_NORESERVE) 3517 VM_NORESERVE); 3805 if (IS_ERR(inode)) 3518 if (IS_ERR(inode)) 3806 return PTR_ERR(inode); 3519 return PTR_ERR(inode); 3807 3520 3808 error = security_inode_init_security( 3521 error = security_inode_init_security(inode, dir, &dentry->d_name, 3809 3522 shmem_initxattrs, NULL); 3810 if (error && error != -EOPNOTSUPP) 3523 if (error && error != -EOPNOTSUPP) 3811 goto out_iput; 3524 goto out_iput; 3812 3525 3813 error = simple_offset_add(shmem_get_o 3526 error = simple_offset_add(shmem_get_offset_ctx(dir), dentry); 3814 if (error) 3527 if (error) 3815 goto out_iput; 3528 goto out_iput; 3816 3529 3817 inode->i_size = len-1; 3530 inode->i_size = len-1; 3818 if (len <= SHORT_SYMLINK_LEN) { 3531 if (len <= SHORT_SYMLINK_LEN) { 3819 inode->i_link = kmemdup(symna 3532 inode->i_link = kmemdup(symname, len, GFP_KERNEL); 3820 if (!inode->i_link) { 3533 if (!inode->i_link) { 3821 error = -ENOMEM; 3534 error = -ENOMEM; 3822 goto out_remove_offse 3535 goto out_remove_offset; 3823 } 3536 } 3824 inode->i_op = &shmem_short_sy 3537 inode->i_op = &shmem_short_symlink_operations; 3825 } else { 3538 } else { 3826 inode_nohighmem(inode); 3539 inode_nohighmem(inode); 3827 inode->i_mapping->a_ops = &sh 3540 inode->i_mapping->a_ops = &shmem_aops; 3828 error = shmem_get_folio(inode !! 3541 error = shmem_get_folio(inode, 0, &folio, SGP_WRITE); 3829 if (error) 3542 if (error) 3830 goto out_remove_offse 3543 goto out_remove_offset; 3831 inode->i_op = &shmem_symlink_ 3544 inode->i_op = &shmem_symlink_inode_operations; 3832 memcpy(folio_address(folio), 3545 memcpy(folio_address(folio), symname, len); 3833 folio_mark_uptodate(folio); 3546 folio_mark_uptodate(folio); 3834 folio_mark_dirty(folio); 3547 folio_mark_dirty(folio); 3835 folio_unlock(folio); 3548 folio_unlock(folio); 3836 folio_put(folio); 3549 folio_put(folio); 3837 } 3550 } 3838 dir->i_size += BOGO_DIRENT_SIZE; 3551 dir->i_size += BOGO_DIRENT_SIZE; 3839 inode_set_mtime_to_ts(dir, inode_set_ 3552 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); 3840 inode_inc_iversion(dir); 3553 inode_inc_iversion(dir); 3841 d_instantiate(dentry, inode); 3554 d_instantiate(dentry, inode); 3842 dget(dentry); 3555 dget(dentry); 3843 return 0; 3556 return 0; 3844 3557 3845 out_remove_offset: 3558 out_remove_offset: 3846 simple_offset_remove(shmem_get_offset 3559 simple_offset_remove(shmem_get_offset_ctx(dir), dentry); 3847 out_iput: 3560 out_iput: 3848 iput(inode); 3561 iput(inode); 3849 return error; 3562 return error; 3850 } 3563 } 3851 3564 3852 static void shmem_put_link(void *arg) 3565 static void shmem_put_link(void *arg) 3853 { 3566 { 3854 folio_mark_accessed(arg); 3567 folio_mark_accessed(arg); 3855 folio_put(arg); 3568 folio_put(arg); 3856 } 3569 } 3857 3570 3858 static const char *shmem_get_link(struct dent 3571 static const char *shmem_get_link(struct dentry *dentry, struct inode *inode, 3859 struct dela 3572 struct delayed_call *done) 3860 { 3573 { 3861 struct folio *folio = NULL; 3574 struct folio *folio = NULL; 3862 int error; 3575 int error; 3863 3576 3864 if (!dentry) { 3577 if (!dentry) { 3865 folio = filemap_get_folio(ino 3578 folio = filemap_get_folio(inode->i_mapping, 0); 3866 if (IS_ERR(folio)) 3579 if (IS_ERR(folio)) 3867 return ERR_PTR(-ECHIL 3580 return ERR_PTR(-ECHILD); 3868 if (PageHWPoison(folio_page(f 3581 if (PageHWPoison(folio_page(folio, 0)) || 3869 !folio_test_uptodate(foli 3582 !folio_test_uptodate(folio)) { 3870 folio_put(folio); 3583 folio_put(folio); 3871 return ERR_PTR(-ECHIL 3584 return ERR_PTR(-ECHILD); 3872 } 3585 } 3873 } else { 3586 } else { 3874 error = shmem_get_folio(inode !! 3587 error = shmem_get_folio(inode, 0, &folio, SGP_READ); 3875 if (error) 3588 if (error) 3876 return ERR_PTR(error) 3589 return ERR_PTR(error); 3877 if (!folio) 3590 if (!folio) 3878 return ERR_PTR(-ECHIL 3591 return ERR_PTR(-ECHILD); 3879 if (PageHWPoison(folio_page(f 3592 if (PageHWPoison(folio_page(folio, 0))) { 3880 folio_unlock(folio); 3593 folio_unlock(folio); 3881 folio_put(folio); 3594 folio_put(folio); 3882 return ERR_PTR(-ECHIL 3595 return ERR_PTR(-ECHILD); 3883 } 3596 } 3884 folio_unlock(folio); 3597 folio_unlock(folio); 3885 } 3598 } 3886 set_delayed_call(done, shmem_put_link 3599 set_delayed_call(done, shmem_put_link, folio); 3887 return folio_address(folio); 3600 return folio_address(folio); 3888 } 3601 } 3889 3602 3890 #ifdef CONFIG_TMPFS_XATTR 3603 #ifdef CONFIG_TMPFS_XATTR 3891 3604 3892 static int shmem_fileattr_get(struct dentry * 3605 static int shmem_fileattr_get(struct dentry *dentry, struct fileattr *fa) 3893 { 3606 { 3894 struct shmem_inode_info *info = SHMEM 3607 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 3895 3608 3896 fileattr_fill_flags(fa, info->fsflags 3609 fileattr_fill_flags(fa, info->fsflags & SHMEM_FL_USER_VISIBLE); 3897 3610 3898 return 0; 3611 return 0; 3899 } 3612 } 3900 3613 3901 static int shmem_fileattr_set(struct mnt_idma 3614 static int shmem_fileattr_set(struct mnt_idmap *idmap, 3902 struct dentry * 3615 struct dentry *dentry, struct fileattr *fa) 3903 { 3616 { 3904 struct inode *inode = d_inode(dentry) 3617 struct inode *inode = d_inode(dentry); 3905 struct shmem_inode_info *info = SHMEM 3618 struct shmem_inode_info *info = SHMEM_I(inode); 3906 3619 3907 if (fileattr_has_fsx(fa)) 3620 if (fileattr_has_fsx(fa)) 3908 return -EOPNOTSUPP; 3621 return -EOPNOTSUPP; 3909 if (fa->flags & ~SHMEM_FL_USER_MODIFI 3622 if (fa->flags & ~SHMEM_FL_USER_MODIFIABLE) 3910 return -EOPNOTSUPP; 3623 return -EOPNOTSUPP; 3911 3624 3912 info->fsflags = (info->fsflags & ~SHM 3625 info->fsflags = (info->fsflags & ~SHMEM_FL_USER_MODIFIABLE) | 3913 (fa->flags & SHMEM_FL_USER_MO 3626 (fa->flags & SHMEM_FL_USER_MODIFIABLE); 3914 3627 3915 shmem_set_inode_flags(inode, info->fs 3628 shmem_set_inode_flags(inode, info->fsflags); 3916 inode_set_ctime_current(inode); 3629 inode_set_ctime_current(inode); 3917 inode_inc_iversion(inode); 3630 inode_inc_iversion(inode); 3918 return 0; 3631 return 0; 3919 } 3632 } 3920 3633 3921 /* 3634 /* 3922 * Superblocks without xattr inode operations 3635 * Superblocks without xattr inode operations may get some security.* xattr 3923 * support from the LSM "for free". As soon a 3636 * support from the LSM "for free". As soon as we have any other xattrs 3924 * like ACLs, we also need to implement the s 3637 * like ACLs, we also need to implement the security.* handlers at 3925 * filesystem level, though. 3638 * filesystem level, though. 3926 */ 3639 */ 3927 3640 3928 /* 3641 /* 3929 * Callback for security_inode_init_security( 3642 * Callback for security_inode_init_security() for acquiring xattrs. 3930 */ 3643 */ 3931 static int shmem_initxattrs(struct inode *ino 3644 static int shmem_initxattrs(struct inode *inode, 3932 const struct xatt 3645 const struct xattr *xattr_array, void *fs_info) 3933 { 3646 { 3934 struct shmem_inode_info *info = SHMEM 3647 struct shmem_inode_info *info = SHMEM_I(inode); 3935 struct shmem_sb_info *sbinfo = SHMEM_ 3648 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 3936 const struct xattr *xattr; 3649 const struct xattr *xattr; 3937 struct simple_xattr *new_xattr; 3650 struct simple_xattr *new_xattr; 3938 size_t ispace = 0; 3651 size_t ispace = 0; 3939 size_t len; 3652 size_t len; 3940 3653 3941 if (sbinfo->max_inodes) { 3654 if (sbinfo->max_inodes) { 3942 for (xattr = xattr_array; xat 3655 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 3943 ispace += simple_xatt 3656 ispace += simple_xattr_space(xattr->name, 3944 xattr->value_ 3657 xattr->value_len + XATTR_SECURITY_PREFIX_LEN); 3945 } 3658 } 3946 if (ispace) { 3659 if (ispace) { 3947 raw_spin_lock(&sbinfo 3660 raw_spin_lock(&sbinfo->stat_lock); 3948 if (sbinfo->free_ispa 3661 if (sbinfo->free_ispace < ispace) 3949 ispace = 0; 3662 ispace = 0; 3950 else 3663 else 3951 sbinfo->free_ 3664 sbinfo->free_ispace -= ispace; 3952 raw_spin_unlock(&sbin 3665 raw_spin_unlock(&sbinfo->stat_lock); 3953 if (!ispace) 3666 if (!ispace) 3954 return -ENOSP 3667 return -ENOSPC; 3955 } 3668 } 3956 } 3669 } 3957 3670 3958 for (xattr = xattr_array; xattr->name 3671 for (xattr = xattr_array; xattr->name != NULL; xattr++) { 3959 new_xattr = simple_xattr_allo 3672 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len); 3960 if (!new_xattr) 3673 if (!new_xattr) 3961 break; 3674 break; 3962 3675 3963 len = strlen(xattr->name) + 1 3676 len = strlen(xattr->name) + 1; 3964 new_xattr->name = kmalloc(XAT 3677 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len, 3965 GFP 3678 GFP_KERNEL_ACCOUNT); 3966 if (!new_xattr->name) { 3679 if (!new_xattr->name) { 3967 kvfree(new_xattr); 3680 kvfree(new_xattr); 3968 break; 3681 break; 3969 } 3682 } 3970 3683 3971 memcpy(new_xattr->name, XATTR 3684 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX, 3972 XATTR_SECURITY_PREFIX_ 3685 XATTR_SECURITY_PREFIX_LEN); 3973 memcpy(new_xattr->name + XATT 3686 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN, 3974 xattr->name, len); 3687 xattr->name, len); 3975 3688 3976 simple_xattr_add(&info->xattr 3689 simple_xattr_add(&info->xattrs, new_xattr); 3977 } 3690 } 3978 3691 3979 if (xattr->name != NULL) { 3692 if (xattr->name != NULL) { 3980 if (ispace) { 3693 if (ispace) { 3981 raw_spin_lock(&sbinfo 3694 raw_spin_lock(&sbinfo->stat_lock); 3982 sbinfo->free_ispace + 3695 sbinfo->free_ispace += ispace; 3983 raw_spin_unlock(&sbin 3696 raw_spin_unlock(&sbinfo->stat_lock); 3984 } 3697 } 3985 simple_xattrs_free(&info->xat 3698 simple_xattrs_free(&info->xattrs, NULL); 3986 return -ENOMEM; 3699 return -ENOMEM; 3987 } 3700 } 3988 3701 3989 return 0; 3702 return 0; 3990 } 3703 } 3991 3704 3992 static int shmem_xattr_handler_get(const stru 3705 static int shmem_xattr_handler_get(const struct xattr_handler *handler, 3993 struct den 3706 struct dentry *unused, struct inode *inode, 3994 const char 3707 const char *name, void *buffer, size_t size) 3995 { 3708 { 3996 struct shmem_inode_info *info = SHMEM 3709 struct shmem_inode_info *info = SHMEM_I(inode); 3997 3710 3998 name = xattr_full_name(handler, name) 3711 name = xattr_full_name(handler, name); 3999 return simple_xattr_get(&info->xattrs 3712 return simple_xattr_get(&info->xattrs, name, buffer, size); 4000 } 3713 } 4001 3714 4002 static int shmem_xattr_handler_set(const stru 3715 static int shmem_xattr_handler_set(const struct xattr_handler *handler, 4003 struct mnt 3716 struct mnt_idmap *idmap, 4004 struct den 3717 struct dentry *unused, struct inode *inode, 4005 const char 3718 const char *name, const void *value, 4006 size_t siz 3719 size_t size, int flags) 4007 { 3720 { 4008 struct shmem_inode_info *info = SHMEM 3721 struct shmem_inode_info *info = SHMEM_I(inode); 4009 struct shmem_sb_info *sbinfo = SHMEM_ 3722 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb); 4010 struct simple_xattr *old_xattr; 3723 struct simple_xattr *old_xattr; 4011 size_t ispace = 0; 3724 size_t ispace = 0; 4012 3725 4013 name = xattr_full_name(handler, name) 3726 name = xattr_full_name(handler, name); 4014 if (value && sbinfo->max_inodes) { 3727 if (value && sbinfo->max_inodes) { 4015 ispace = simple_xattr_space(n 3728 ispace = simple_xattr_space(name, size); 4016 raw_spin_lock(&sbinfo->stat_l 3729 raw_spin_lock(&sbinfo->stat_lock); 4017 if (sbinfo->free_ispace < isp 3730 if (sbinfo->free_ispace < ispace) 4018 ispace = 0; 3731 ispace = 0; 4019 else 3732 else 4020 sbinfo->free_ispace - 3733 sbinfo->free_ispace -= ispace; 4021 raw_spin_unlock(&sbinfo->stat 3734 raw_spin_unlock(&sbinfo->stat_lock); 4022 if (!ispace) 3735 if (!ispace) 4023 return -ENOSPC; 3736 return -ENOSPC; 4024 } 3737 } 4025 3738 4026 old_xattr = simple_xattr_set(&info->x 3739 old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags); 4027 if (!IS_ERR(old_xattr)) { 3740 if (!IS_ERR(old_xattr)) { 4028 ispace = 0; 3741 ispace = 0; 4029 if (old_xattr && sbinfo->max_ 3742 if (old_xattr && sbinfo->max_inodes) 4030 ispace = simple_xattr 3743 ispace = simple_xattr_space(old_xattr->name, 4031 3744 old_xattr->size); 4032 simple_xattr_free(old_xattr); 3745 simple_xattr_free(old_xattr); 4033 old_xattr = NULL; 3746 old_xattr = NULL; 4034 inode_set_ctime_current(inode 3747 inode_set_ctime_current(inode); 4035 inode_inc_iversion(inode); 3748 inode_inc_iversion(inode); 4036 } 3749 } 4037 if (ispace) { 3750 if (ispace) { 4038 raw_spin_lock(&sbinfo->stat_l 3751 raw_spin_lock(&sbinfo->stat_lock); 4039 sbinfo->free_ispace += ispace 3752 sbinfo->free_ispace += ispace; 4040 raw_spin_unlock(&sbinfo->stat 3753 raw_spin_unlock(&sbinfo->stat_lock); 4041 } 3754 } 4042 return PTR_ERR(old_xattr); 3755 return PTR_ERR(old_xattr); 4043 } 3756 } 4044 3757 4045 static const struct xattr_handler shmem_secur 3758 static const struct xattr_handler shmem_security_xattr_handler = { 4046 .prefix = XATTR_SECURITY_PREFIX, 3759 .prefix = XATTR_SECURITY_PREFIX, 4047 .get = shmem_xattr_handler_get, 3760 .get = shmem_xattr_handler_get, 4048 .set = shmem_xattr_handler_set, 3761 .set = shmem_xattr_handler_set, 4049 }; 3762 }; 4050 3763 4051 static const struct xattr_handler shmem_trust 3764 static const struct xattr_handler shmem_trusted_xattr_handler = { 4052 .prefix = XATTR_TRUSTED_PREFIX, 3765 .prefix = XATTR_TRUSTED_PREFIX, 4053 .get = shmem_xattr_handler_get, 3766 .get = shmem_xattr_handler_get, 4054 .set = shmem_xattr_handler_set, 3767 .set = shmem_xattr_handler_set, 4055 }; 3768 }; 4056 3769 4057 static const struct xattr_handler shmem_user_ 3770 static const struct xattr_handler shmem_user_xattr_handler = { 4058 .prefix = XATTR_USER_PREFIX, 3771 .prefix = XATTR_USER_PREFIX, 4059 .get = shmem_xattr_handler_get, 3772 .get = shmem_xattr_handler_get, 4060 .set = shmem_xattr_handler_set, 3773 .set = shmem_xattr_handler_set, 4061 }; 3774 }; 4062 3775 4063 static const struct xattr_handler * const shm 3776 static const struct xattr_handler * const shmem_xattr_handlers[] = { 4064 &shmem_security_xattr_handler, 3777 &shmem_security_xattr_handler, 4065 &shmem_trusted_xattr_handler, 3778 &shmem_trusted_xattr_handler, 4066 &shmem_user_xattr_handler, 3779 &shmem_user_xattr_handler, 4067 NULL 3780 NULL 4068 }; 3781 }; 4069 3782 4070 static ssize_t shmem_listxattr(struct dentry 3783 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size) 4071 { 3784 { 4072 struct shmem_inode_info *info = SHMEM 3785 struct shmem_inode_info *info = SHMEM_I(d_inode(dentry)); 4073 return simple_xattr_list(d_inode(dent 3786 return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size); 4074 } 3787 } 4075 #endif /* CONFIG_TMPFS_XATTR */ 3788 #endif /* CONFIG_TMPFS_XATTR */ 4076 3789 4077 static const struct inode_operations shmem_sh 3790 static const struct inode_operations shmem_short_symlink_operations = { 4078 .getattr = shmem_getattr, 3791 .getattr = shmem_getattr, 4079 .setattr = shmem_setattr, 3792 .setattr = shmem_setattr, 4080 .get_link = simple_get_link, 3793 .get_link = simple_get_link, 4081 #ifdef CONFIG_TMPFS_XATTR 3794 #ifdef CONFIG_TMPFS_XATTR 4082 .listxattr = shmem_listxattr, 3795 .listxattr = shmem_listxattr, 4083 #endif 3796 #endif 4084 }; 3797 }; 4085 3798 4086 static const struct inode_operations shmem_sy 3799 static const struct inode_operations shmem_symlink_inode_operations = { 4087 .getattr = shmem_getattr, 3800 .getattr = shmem_getattr, 4088 .setattr = shmem_setattr, 3801 .setattr = shmem_setattr, 4089 .get_link = shmem_get_link, 3802 .get_link = shmem_get_link, 4090 #ifdef CONFIG_TMPFS_XATTR 3803 #ifdef CONFIG_TMPFS_XATTR 4091 .listxattr = shmem_listxattr, 3804 .listxattr = shmem_listxattr, 4092 #endif 3805 #endif 4093 }; 3806 }; 4094 3807 4095 static struct dentry *shmem_get_parent(struct 3808 static struct dentry *shmem_get_parent(struct dentry *child) 4096 { 3809 { 4097 return ERR_PTR(-ESTALE); 3810 return ERR_PTR(-ESTALE); 4098 } 3811 } 4099 3812 4100 static int shmem_match(struct inode *ino, voi 3813 static int shmem_match(struct inode *ino, void *vfh) 4101 { 3814 { 4102 __u32 *fh = vfh; 3815 __u32 *fh = vfh; 4103 __u64 inum = fh[2]; 3816 __u64 inum = fh[2]; 4104 inum = (inum << 32) | fh[1]; 3817 inum = (inum << 32) | fh[1]; 4105 return ino->i_ino == inum && fh[0] == 3818 return ino->i_ino == inum && fh[0] == ino->i_generation; 4106 } 3819 } 4107 3820 4108 /* Find any alias of inode, but prefer a hash 3821 /* Find any alias of inode, but prefer a hashed alias */ 4109 static struct dentry *shmem_find_alias(struct 3822 static struct dentry *shmem_find_alias(struct inode *inode) 4110 { 3823 { 4111 struct dentry *alias = d_find_alias(i 3824 struct dentry *alias = d_find_alias(inode); 4112 3825 4113 return alias ?: d_find_any_alias(inod 3826 return alias ?: d_find_any_alias(inode); 4114 } 3827 } 4115 3828 4116 static struct dentry *shmem_fh_to_dentry(stru 3829 static struct dentry *shmem_fh_to_dentry(struct super_block *sb, 4117 struct fid *fid, int fh_len, 3830 struct fid *fid, int fh_len, int fh_type) 4118 { 3831 { 4119 struct inode *inode; 3832 struct inode *inode; 4120 struct dentry *dentry = NULL; 3833 struct dentry *dentry = NULL; 4121 u64 inum; 3834 u64 inum; 4122 3835 4123 if (fh_len < 3) 3836 if (fh_len < 3) 4124 return NULL; 3837 return NULL; 4125 3838 4126 inum = fid->raw[2]; 3839 inum = fid->raw[2]; 4127 inum = (inum << 32) | fid->raw[1]; 3840 inum = (inum << 32) | fid->raw[1]; 4128 3841 4129 inode = ilookup5(sb, (unsigned long)( 3842 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), 4130 shmem_match, fid->raw 3843 shmem_match, fid->raw); 4131 if (inode) { 3844 if (inode) { 4132 dentry = shmem_find_alias(ino 3845 dentry = shmem_find_alias(inode); 4133 iput(inode); 3846 iput(inode); 4134 } 3847 } 4135 3848 4136 return dentry; 3849 return dentry; 4137 } 3850 } 4138 3851 4139 static int shmem_encode_fh(struct inode *inod 3852 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len, 4140 struct inode 3853 struct inode *parent) 4141 { 3854 { 4142 if (*len < 3) { 3855 if (*len < 3) { 4143 *len = 3; 3856 *len = 3; 4144 return FILEID_INVALID; 3857 return FILEID_INVALID; 4145 } 3858 } 4146 3859 4147 if (inode_unhashed(inode)) { 3860 if (inode_unhashed(inode)) { 4148 /* Unfortunately insert_inode 3861 /* Unfortunately insert_inode_hash is not idempotent, 4149 * so as we hash inodes here 3862 * so as we hash inodes here rather than at creation 4150 * time, we need a lock to en 3863 * time, we need a lock to ensure we only try 4151 * to do it once 3864 * to do it once 4152 */ 3865 */ 4153 static DEFINE_SPINLOCK(lock); 3866 static DEFINE_SPINLOCK(lock); 4154 spin_lock(&lock); 3867 spin_lock(&lock); 4155 if (inode_unhashed(inode)) 3868 if (inode_unhashed(inode)) 4156 __insert_inode_hash(i 3869 __insert_inode_hash(inode, 4157 i 3870 inode->i_ino + inode->i_generation); 4158 spin_unlock(&lock); 3871 spin_unlock(&lock); 4159 } 3872 } 4160 3873 4161 fh[0] = inode->i_generation; 3874 fh[0] = inode->i_generation; 4162 fh[1] = inode->i_ino; 3875 fh[1] = inode->i_ino; 4163 fh[2] = ((__u64)inode->i_ino) >> 32; 3876 fh[2] = ((__u64)inode->i_ino) >> 32; 4164 3877 4165 *len = 3; 3878 *len = 3; 4166 return 1; 3879 return 1; 4167 } 3880 } 4168 3881 4169 static const struct export_operations shmem_e 3882 static const struct export_operations shmem_export_ops = { 4170 .get_parent = shmem_get_parent, 3883 .get_parent = shmem_get_parent, 4171 .encode_fh = shmem_encode_fh, 3884 .encode_fh = shmem_encode_fh, 4172 .fh_to_dentry = shmem_fh_to_dentry, 3885 .fh_to_dentry = shmem_fh_to_dentry, 4173 }; 3886 }; 4174 3887 4175 enum shmem_param { 3888 enum shmem_param { 4176 Opt_gid, 3889 Opt_gid, 4177 Opt_huge, 3890 Opt_huge, 4178 Opt_mode, 3891 Opt_mode, 4179 Opt_mpol, 3892 Opt_mpol, 4180 Opt_nr_blocks, 3893 Opt_nr_blocks, 4181 Opt_nr_inodes, 3894 Opt_nr_inodes, 4182 Opt_size, 3895 Opt_size, 4183 Opt_uid, 3896 Opt_uid, 4184 Opt_inode32, 3897 Opt_inode32, 4185 Opt_inode64, 3898 Opt_inode64, 4186 Opt_noswap, 3899 Opt_noswap, 4187 Opt_quota, 3900 Opt_quota, 4188 Opt_usrquota, 3901 Opt_usrquota, 4189 Opt_grpquota, 3902 Opt_grpquota, 4190 Opt_usrquota_block_hardlimit, 3903 Opt_usrquota_block_hardlimit, 4191 Opt_usrquota_inode_hardlimit, 3904 Opt_usrquota_inode_hardlimit, 4192 Opt_grpquota_block_hardlimit, 3905 Opt_grpquota_block_hardlimit, 4193 Opt_grpquota_inode_hardlimit, 3906 Opt_grpquota_inode_hardlimit, 4194 }; 3907 }; 4195 3908 4196 static const struct constant_table shmem_para 3909 static const struct constant_table shmem_param_enums_huge[] = { 4197 {"never", SHMEM_HUGE_NEVER }, 3910 {"never", SHMEM_HUGE_NEVER }, 4198 {"always", SHMEM_HUGE_ALWAYS }, 3911 {"always", SHMEM_HUGE_ALWAYS }, 4199 {"within_size", SHMEM_HUGE_WITHIN_SIZ 3912 {"within_size", SHMEM_HUGE_WITHIN_SIZE }, 4200 {"advise", SHMEM_HUGE_ADVISE }, 3913 {"advise", SHMEM_HUGE_ADVISE }, 4201 {} 3914 {} 4202 }; 3915 }; 4203 3916 4204 const struct fs_parameter_spec shmem_fs_param 3917 const struct fs_parameter_spec shmem_fs_parameters[] = { 4205 fsparam_gid ("gid", Opt_g !! 3918 fsparam_u32 ("gid", Opt_gid), 4206 fsparam_enum ("huge", Opt_h 3919 fsparam_enum ("huge", Opt_huge, shmem_param_enums_huge), 4207 fsparam_u32oct("mode", Opt_m 3920 fsparam_u32oct("mode", Opt_mode), 4208 fsparam_string("mpol", Opt_m 3921 fsparam_string("mpol", Opt_mpol), 4209 fsparam_string("nr_blocks", Opt_n 3922 fsparam_string("nr_blocks", Opt_nr_blocks), 4210 fsparam_string("nr_inodes", Opt_n 3923 fsparam_string("nr_inodes", Opt_nr_inodes), 4211 fsparam_string("size", Opt_s 3924 fsparam_string("size", Opt_size), 4212 fsparam_uid ("uid", Opt_u !! 3925 fsparam_u32 ("uid", Opt_uid), 4213 fsparam_flag ("inode32", Opt_i 3926 fsparam_flag ("inode32", Opt_inode32), 4214 fsparam_flag ("inode64", Opt_i 3927 fsparam_flag ("inode64", Opt_inode64), 4215 fsparam_flag ("noswap", Opt_n 3928 fsparam_flag ("noswap", Opt_noswap), 4216 #ifdef CONFIG_TMPFS_QUOTA 3929 #ifdef CONFIG_TMPFS_QUOTA 4217 fsparam_flag ("quota", Opt_q 3930 fsparam_flag ("quota", Opt_quota), 4218 fsparam_flag ("usrquota", Opt_u 3931 fsparam_flag ("usrquota", Opt_usrquota), 4219 fsparam_flag ("grpquota", Opt_g 3932 fsparam_flag ("grpquota", Opt_grpquota), 4220 fsparam_string("usrquota_block_hardli 3933 fsparam_string("usrquota_block_hardlimit", Opt_usrquota_block_hardlimit), 4221 fsparam_string("usrquota_inode_hardli 3934 fsparam_string("usrquota_inode_hardlimit", Opt_usrquota_inode_hardlimit), 4222 fsparam_string("grpquota_block_hardli 3935 fsparam_string("grpquota_block_hardlimit", Opt_grpquota_block_hardlimit), 4223 fsparam_string("grpquota_inode_hardli 3936 fsparam_string("grpquota_inode_hardlimit", Opt_grpquota_inode_hardlimit), 4224 #endif 3937 #endif 4225 {} 3938 {} 4226 }; 3939 }; 4227 3940 4228 static int shmem_parse_one(struct fs_context 3941 static int shmem_parse_one(struct fs_context *fc, struct fs_parameter *param) 4229 { 3942 { 4230 struct shmem_options *ctx = fc->fs_pr 3943 struct shmem_options *ctx = fc->fs_private; 4231 struct fs_parse_result result; 3944 struct fs_parse_result result; 4232 unsigned long long size; 3945 unsigned long long size; 4233 char *rest; 3946 char *rest; 4234 int opt; 3947 int opt; 4235 kuid_t kuid; 3948 kuid_t kuid; 4236 kgid_t kgid; 3949 kgid_t kgid; 4237 3950 4238 opt = fs_parse(fc, shmem_fs_parameter 3951 opt = fs_parse(fc, shmem_fs_parameters, param, &result); 4239 if (opt < 0) 3952 if (opt < 0) 4240 return opt; 3953 return opt; 4241 3954 4242 switch (opt) { 3955 switch (opt) { 4243 case Opt_size: 3956 case Opt_size: 4244 size = memparse(param->string 3957 size = memparse(param->string, &rest); 4245 if (*rest == '%') { 3958 if (*rest == '%') { 4246 size <<= PAGE_SHIFT; 3959 size <<= PAGE_SHIFT; 4247 size *= totalram_page 3960 size *= totalram_pages(); 4248 do_div(size, 100); 3961 do_div(size, 100); 4249 rest++; 3962 rest++; 4250 } 3963 } 4251 if (*rest) 3964 if (*rest) 4252 goto bad_value; 3965 goto bad_value; 4253 ctx->blocks = DIV_ROUND_UP(si 3966 ctx->blocks = DIV_ROUND_UP(size, PAGE_SIZE); 4254 ctx->seen |= SHMEM_SEEN_BLOCK 3967 ctx->seen |= SHMEM_SEEN_BLOCKS; 4255 break; 3968 break; 4256 case Opt_nr_blocks: 3969 case Opt_nr_blocks: 4257 ctx->blocks = memparse(param- 3970 ctx->blocks = memparse(param->string, &rest); 4258 if (*rest || ctx->blocks > LO 3971 if (*rest || ctx->blocks > LONG_MAX) 4259 goto bad_value; 3972 goto bad_value; 4260 ctx->seen |= SHMEM_SEEN_BLOCK 3973 ctx->seen |= SHMEM_SEEN_BLOCKS; 4261 break; 3974 break; 4262 case Opt_nr_inodes: 3975 case Opt_nr_inodes: 4263 ctx->inodes = memparse(param- 3976 ctx->inodes = memparse(param->string, &rest); 4264 if (*rest || ctx->inodes > UL 3977 if (*rest || ctx->inodes > ULONG_MAX / BOGO_INODE_SIZE) 4265 goto bad_value; 3978 goto bad_value; 4266 ctx->seen |= SHMEM_SEEN_INODE 3979 ctx->seen |= SHMEM_SEEN_INODES; 4267 break; 3980 break; 4268 case Opt_mode: 3981 case Opt_mode: 4269 ctx->mode = result.uint_32 & 3982 ctx->mode = result.uint_32 & 07777; 4270 break; 3983 break; 4271 case Opt_uid: 3984 case Opt_uid: 4272 kuid = result.uid; !! 3985 kuid = make_kuid(current_user_ns(), result.uint_32); >> 3986 if (!uid_valid(kuid)) >> 3987 goto bad_value; 4273 3988 4274 /* 3989 /* 4275 * The requested uid must be 3990 * The requested uid must be representable in the 4276 * filesystem's idmapping. 3991 * filesystem's idmapping. 4277 */ 3992 */ 4278 if (!kuid_has_mapping(fc->use 3993 if (!kuid_has_mapping(fc->user_ns, kuid)) 4279 goto bad_value; 3994 goto bad_value; 4280 3995 4281 ctx->uid = kuid; 3996 ctx->uid = kuid; 4282 break; 3997 break; 4283 case Opt_gid: 3998 case Opt_gid: 4284 kgid = result.gid; !! 3999 kgid = make_kgid(current_user_ns(), result.uint_32); >> 4000 if (!gid_valid(kgid)) >> 4001 goto bad_value; 4285 4002 4286 /* 4003 /* 4287 * The requested gid must be 4004 * The requested gid must be representable in the 4288 * filesystem's idmapping. 4005 * filesystem's idmapping. 4289 */ 4006 */ 4290 if (!kgid_has_mapping(fc->use 4007 if (!kgid_has_mapping(fc->user_ns, kgid)) 4291 goto bad_value; 4008 goto bad_value; 4292 4009 4293 ctx->gid = kgid; 4010 ctx->gid = kgid; 4294 break; 4011 break; 4295 case Opt_huge: 4012 case Opt_huge: 4296 ctx->huge = result.uint_32; 4013 ctx->huge = result.uint_32; 4297 if (ctx->huge != SHMEM_HUGE_N 4014 if (ctx->huge != SHMEM_HUGE_NEVER && 4298 !(IS_ENABLED(CONFIG_TRANS 4015 !(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && 4299 has_transparent_hugepag 4016 has_transparent_hugepage())) 4300 goto unsupported_para 4017 goto unsupported_parameter; 4301 ctx->seen |= SHMEM_SEEN_HUGE; 4018 ctx->seen |= SHMEM_SEEN_HUGE; 4302 break; 4019 break; 4303 case Opt_mpol: 4020 case Opt_mpol: 4304 if (IS_ENABLED(CONFIG_NUMA)) 4021 if (IS_ENABLED(CONFIG_NUMA)) { 4305 mpol_put(ctx->mpol); 4022 mpol_put(ctx->mpol); 4306 ctx->mpol = NULL; 4023 ctx->mpol = NULL; 4307 if (mpol_parse_str(pa 4024 if (mpol_parse_str(param->string, &ctx->mpol)) 4308 goto bad_valu 4025 goto bad_value; 4309 break; 4026 break; 4310 } 4027 } 4311 goto unsupported_parameter; 4028 goto unsupported_parameter; 4312 case Opt_inode32: 4029 case Opt_inode32: 4313 ctx->full_inums = false; 4030 ctx->full_inums = false; 4314 ctx->seen |= SHMEM_SEEN_INUMS 4031 ctx->seen |= SHMEM_SEEN_INUMS; 4315 break; 4032 break; 4316 case Opt_inode64: 4033 case Opt_inode64: 4317 if (sizeof(ino_t) < 8) { 4034 if (sizeof(ino_t) < 8) { 4318 return invalfc(fc, 4035 return invalfc(fc, 4319 "Canno 4036 "Cannot use inode64 with <64bit inums in kernel\n"); 4320 } 4037 } 4321 ctx->full_inums = true; 4038 ctx->full_inums = true; 4322 ctx->seen |= SHMEM_SEEN_INUMS 4039 ctx->seen |= SHMEM_SEEN_INUMS; 4323 break; 4040 break; 4324 case Opt_noswap: 4041 case Opt_noswap: 4325 if ((fc->user_ns != &init_use 4042 if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) { 4326 return invalfc(fc, 4043 return invalfc(fc, 4327 "Turni 4044 "Turning off swap in unprivileged tmpfs mounts unsupported"); 4328 } 4045 } 4329 ctx->noswap = true; 4046 ctx->noswap = true; 4330 ctx->seen |= SHMEM_SEEN_NOSWA 4047 ctx->seen |= SHMEM_SEEN_NOSWAP; 4331 break; 4048 break; 4332 case Opt_quota: 4049 case Opt_quota: 4333 if (fc->user_ns != &init_user 4050 if (fc->user_ns != &init_user_ns) 4334 return invalfc(fc, "Q 4051 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); 4335 ctx->seen |= SHMEM_SEEN_QUOTA 4052 ctx->seen |= SHMEM_SEEN_QUOTA; 4336 ctx->quota_types |= (QTYPE_MA 4053 ctx->quota_types |= (QTYPE_MASK_USR | QTYPE_MASK_GRP); 4337 break; 4054 break; 4338 case Opt_usrquota: 4055 case Opt_usrquota: 4339 if (fc->user_ns != &init_user 4056 if (fc->user_ns != &init_user_ns) 4340 return invalfc(fc, "Q 4057 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); 4341 ctx->seen |= SHMEM_SEEN_QUOTA 4058 ctx->seen |= SHMEM_SEEN_QUOTA; 4342 ctx->quota_types |= QTYPE_MAS 4059 ctx->quota_types |= QTYPE_MASK_USR; 4343 break; 4060 break; 4344 case Opt_grpquota: 4061 case Opt_grpquota: 4345 if (fc->user_ns != &init_user 4062 if (fc->user_ns != &init_user_ns) 4346 return invalfc(fc, "Q 4063 return invalfc(fc, "Quotas in unprivileged tmpfs mounts are unsupported"); 4347 ctx->seen |= SHMEM_SEEN_QUOTA 4064 ctx->seen |= SHMEM_SEEN_QUOTA; 4348 ctx->quota_types |= QTYPE_MAS 4065 ctx->quota_types |= QTYPE_MASK_GRP; 4349 break; 4066 break; 4350 case Opt_usrquota_block_hardlimit: 4067 case Opt_usrquota_block_hardlimit: 4351 size = memparse(param->string 4068 size = memparse(param->string, &rest); 4352 if (*rest || !size) 4069 if (*rest || !size) 4353 goto bad_value; 4070 goto bad_value; 4354 if (size > SHMEM_QUOTA_MAX_SP 4071 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT) 4355 return invalfc(fc, 4072 return invalfc(fc, 4356 "User 4073 "User quota block hardlimit too large."); 4357 ctx->qlimits.usrquota_bhardli 4074 ctx->qlimits.usrquota_bhardlimit = size; 4358 break; 4075 break; 4359 case Opt_grpquota_block_hardlimit: 4076 case Opt_grpquota_block_hardlimit: 4360 size = memparse(param->string 4077 size = memparse(param->string, &rest); 4361 if (*rest || !size) 4078 if (*rest || !size) 4362 goto bad_value; 4079 goto bad_value; 4363 if (size > SHMEM_QUOTA_MAX_SP 4080 if (size > SHMEM_QUOTA_MAX_SPC_LIMIT) 4364 return invalfc(fc, 4081 return invalfc(fc, 4365 "Group 4082 "Group quota block hardlimit too large."); 4366 ctx->qlimits.grpquota_bhardli 4083 ctx->qlimits.grpquota_bhardlimit = size; 4367 break; 4084 break; 4368 case Opt_usrquota_inode_hardlimit: 4085 case Opt_usrquota_inode_hardlimit: 4369 size = memparse(param->string 4086 size = memparse(param->string, &rest); 4370 if (*rest || !size) 4087 if (*rest || !size) 4371 goto bad_value; 4088 goto bad_value; 4372 if (size > SHMEM_QUOTA_MAX_IN 4089 if (size > SHMEM_QUOTA_MAX_INO_LIMIT) 4373 return invalfc(fc, 4090 return invalfc(fc, 4374 "User 4091 "User quota inode hardlimit too large."); 4375 ctx->qlimits.usrquota_ihardli 4092 ctx->qlimits.usrquota_ihardlimit = size; 4376 break; 4093 break; 4377 case Opt_grpquota_inode_hardlimit: 4094 case Opt_grpquota_inode_hardlimit: 4378 size = memparse(param->string 4095 size = memparse(param->string, &rest); 4379 if (*rest || !size) 4096 if (*rest || !size) 4380 goto bad_value; 4097 goto bad_value; 4381 if (size > SHMEM_QUOTA_MAX_IN 4098 if (size > SHMEM_QUOTA_MAX_INO_LIMIT) 4382 return invalfc(fc, 4099 return invalfc(fc, 4383 "Group 4100 "Group quota inode hardlimit too large."); 4384 ctx->qlimits.grpquota_ihardli 4101 ctx->qlimits.grpquota_ihardlimit = size; 4385 break; 4102 break; 4386 } 4103 } 4387 return 0; 4104 return 0; 4388 4105 4389 unsupported_parameter: 4106 unsupported_parameter: 4390 return invalfc(fc, "Unsupported param 4107 return invalfc(fc, "Unsupported parameter '%s'", param->key); 4391 bad_value: 4108 bad_value: 4392 return invalfc(fc, "Bad value for '%s 4109 return invalfc(fc, "Bad value for '%s'", param->key); 4393 } 4110 } 4394 4111 4395 static int shmem_parse_options(struct fs_cont 4112 static int shmem_parse_options(struct fs_context *fc, void *data) 4396 { 4113 { 4397 char *options = data; 4114 char *options = data; 4398 4115 4399 if (options) { 4116 if (options) { 4400 int err = security_sb_eat_lsm 4117 int err = security_sb_eat_lsm_opts(options, &fc->security); 4401 if (err) 4118 if (err) 4402 return err; 4119 return err; 4403 } 4120 } 4404 4121 4405 while (options != NULL) { 4122 while (options != NULL) { 4406 char *this_char = options; 4123 char *this_char = options; 4407 for (;;) { 4124 for (;;) { 4408 /* 4125 /* 4409 * NUL-terminate this 4126 * NUL-terminate this option: unfortunately, 4410 * mount options form 4127 * mount options form a comma-separated list, 4411 * but mpol's nodelis 4128 * but mpol's nodelist may also contain commas. 4412 */ 4129 */ 4413 options = strchr(opti 4130 options = strchr(options, ','); 4414 if (options == NULL) 4131 if (options == NULL) 4415 break; 4132 break; 4416 options++; 4133 options++; 4417 if (!isdigit(*options 4134 if (!isdigit(*options)) { 4418 options[-1] = 4135 options[-1] = '\0'; 4419 break; 4136 break; 4420 } 4137 } 4421 } 4138 } 4422 if (*this_char) { 4139 if (*this_char) { 4423 char *value = strchr( 4140 char *value = strchr(this_char, '='); 4424 size_t len = 0; 4141 size_t len = 0; 4425 int err; 4142 int err; 4426 4143 4427 if (value) { 4144 if (value) { 4428 *value++ = '\ 4145 *value++ = '\0'; 4429 len = strlen( 4146 len = strlen(value); 4430 } 4147 } 4431 err = vfs_parse_fs_st 4148 err = vfs_parse_fs_string(fc, this_char, value, len); 4432 if (err < 0) 4149 if (err < 0) 4433 return err; 4150 return err; 4434 } 4151 } 4435 } 4152 } 4436 return 0; 4153 return 0; 4437 } 4154 } 4438 4155 4439 /* 4156 /* 4440 * Reconfigure a shmem filesystem. 4157 * Reconfigure a shmem filesystem. 4441 */ 4158 */ 4442 static int shmem_reconfigure(struct fs_contex 4159 static int shmem_reconfigure(struct fs_context *fc) 4443 { 4160 { 4444 struct shmem_options *ctx = fc->fs_pr 4161 struct shmem_options *ctx = fc->fs_private; 4445 struct shmem_sb_info *sbinfo = SHMEM_ 4162 struct shmem_sb_info *sbinfo = SHMEM_SB(fc->root->d_sb); 4446 unsigned long used_isp; 4163 unsigned long used_isp; 4447 struct mempolicy *mpol = NULL; 4164 struct mempolicy *mpol = NULL; 4448 const char *err; 4165 const char *err; 4449 4166 4450 raw_spin_lock(&sbinfo->stat_lock); 4167 raw_spin_lock(&sbinfo->stat_lock); 4451 used_isp = sbinfo->max_inodes * BOGO_ 4168 used_isp = sbinfo->max_inodes * BOGO_INODE_SIZE - sbinfo->free_ispace; 4452 4169 4453 if ((ctx->seen & SHMEM_SEEN_BLOCKS) & 4170 if ((ctx->seen & SHMEM_SEEN_BLOCKS) && ctx->blocks) { 4454 if (!sbinfo->max_blocks) { 4171 if (!sbinfo->max_blocks) { 4455 err = "Cannot retroac 4172 err = "Cannot retroactively limit size"; 4456 goto out; 4173 goto out; 4457 } 4174 } 4458 if (percpu_counter_compare(&s 4175 if (percpu_counter_compare(&sbinfo->used_blocks, 4459 ct 4176 ctx->blocks) > 0) { 4460 err = "Too small a si 4177 err = "Too small a size for current use"; 4461 goto out; 4178 goto out; 4462 } 4179 } 4463 } 4180 } 4464 if ((ctx->seen & SHMEM_SEEN_INODES) & 4181 if ((ctx->seen & SHMEM_SEEN_INODES) && ctx->inodes) { 4465 if (!sbinfo->max_inodes) { 4182 if (!sbinfo->max_inodes) { 4466 err = "Cannot retroac 4183 err = "Cannot retroactively limit inodes"; 4467 goto out; 4184 goto out; 4468 } 4185 } 4469 if (ctx->inodes * BOGO_INODE_ 4186 if (ctx->inodes * BOGO_INODE_SIZE < used_isp) { 4470 err = "Too few inodes 4187 err = "Too few inodes for current use"; 4471 goto out; 4188 goto out; 4472 } 4189 } 4473 } 4190 } 4474 4191 4475 if ((ctx->seen & SHMEM_SEEN_INUMS) && 4192 if ((ctx->seen & SHMEM_SEEN_INUMS) && !ctx->full_inums && 4476 sbinfo->next_ino > UINT_MAX) { 4193 sbinfo->next_ino > UINT_MAX) { 4477 err = "Current inum too high 4194 err = "Current inum too high to switch to 32-bit inums"; 4478 goto out; 4195 goto out; 4479 } 4196 } 4480 if ((ctx->seen & SHMEM_SEEN_NOSWAP) & 4197 if ((ctx->seen & SHMEM_SEEN_NOSWAP) && ctx->noswap && !sbinfo->noswap) { 4481 err = "Cannot disable swap on 4198 err = "Cannot disable swap on remount"; 4482 goto out; 4199 goto out; 4483 } 4200 } 4484 if (!(ctx->seen & SHMEM_SEEN_NOSWAP) 4201 if (!(ctx->seen & SHMEM_SEEN_NOSWAP) && !ctx->noswap && sbinfo->noswap) { 4485 err = "Cannot enable swap on 4202 err = "Cannot enable swap on remount if it was disabled on first mount"; 4486 goto out; 4203 goto out; 4487 } 4204 } 4488 4205 4489 if (ctx->seen & SHMEM_SEEN_QUOTA && 4206 if (ctx->seen & SHMEM_SEEN_QUOTA && 4490 !sb_any_quota_loaded(fc->root->d_ 4207 !sb_any_quota_loaded(fc->root->d_sb)) { 4491 err = "Cannot enable quota on 4208 err = "Cannot enable quota on remount"; 4492 goto out; 4209 goto out; 4493 } 4210 } 4494 4211 4495 #ifdef CONFIG_TMPFS_QUOTA 4212 #ifdef CONFIG_TMPFS_QUOTA 4496 #define CHANGED_LIMIT(name) 4213 #define CHANGED_LIMIT(name) \ 4497 (ctx->qlimits.name## hardlimit && 4214 (ctx->qlimits.name## hardlimit && \ 4498 (ctx->qlimits.name## hardlimit != sbi 4215 (ctx->qlimits.name## hardlimit != sbinfo->qlimits.name## hardlimit)) 4499 4216 4500 if (CHANGED_LIMIT(usrquota_b) || CHAN 4217 if (CHANGED_LIMIT(usrquota_b) || CHANGED_LIMIT(usrquota_i) || 4501 CHANGED_LIMIT(grpquota_b) || CHAN 4218 CHANGED_LIMIT(grpquota_b) || CHANGED_LIMIT(grpquota_i)) { 4502 err = "Cannot change global q 4219 err = "Cannot change global quota limit on remount"; 4503 goto out; 4220 goto out; 4504 } 4221 } 4505 #endif /* CONFIG_TMPFS_QUOTA */ 4222 #endif /* CONFIG_TMPFS_QUOTA */ 4506 4223 4507 if (ctx->seen & SHMEM_SEEN_HUGE) 4224 if (ctx->seen & SHMEM_SEEN_HUGE) 4508 sbinfo->huge = ctx->huge; 4225 sbinfo->huge = ctx->huge; 4509 if (ctx->seen & SHMEM_SEEN_INUMS) 4226 if (ctx->seen & SHMEM_SEEN_INUMS) 4510 sbinfo->full_inums = ctx->ful 4227 sbinfo->full_inums = ctx->full_inums; 4511 if (ctx->seen & SHMEM_SEEN_BLOCKS) 4228 if (ctx->seen & SHMEM_SEEN_BLOCKS) 4512 sbinfo->max_blocks = ctx->bl 4229 sbinfo->max_blocks = ctx->blocks; 4513 if (ctx->seen & SHMEM_SEEN_INODES) { 4230 if (ctx->seen & SHMEM_SEEN_INODES) { 4514 sbinfo->max_inodes = ctx->in 4231 sbinfo->max_inodes = ctx->inodes; 4515 sbinfo->free_ispace = ctx->in 4232 sbinfo->free_ispace = ctx->inodes * BOGO_INODE_SIZE - used_isp; 4516 } 4233 } 4517 4234 4518 /* 4235 /* 4519 * Preserve previous mempolicy unless 4236 * Preserve previous mempolicy unless mpol remount option was specified. 4520 */ 4237 */ 4521 if (ctx->mpol) { 4238 if (ctx->mpol) { 4522 mpol = sbinfo->mpol; 4239 mpol = sbinfo->mpol; 4523 sbinfo->mpol = ctx->mpol; 4240 sbinfo->mpol = ctx->mpol; /* transfers initial ref */ 4524 ctx->mpol = NULL; 4241 ctx->mpol = NULL; 4525 } 4242 } 4526 4243 4527 if (ctx->noswap) 4244 if (ctx->noswap) 4528 sbinfo->noswap = true; 4245 sbinfo->noswap = true; 4529 4246 4530 raw_spin_unlock(&sbinfo->stat_lock); 4247 raw_spin_unlock(&sbinfo->stat_lock); 4531 mpol_put(mpol); 4248 mpol_put(mpol); 4532 return 0; 4249 return 0; 4533 out: 4250 out: 4534 raw_spin_unlock(&sbinfo->stat_lock); 4251 raw_spin_unlock(&sbinfo->stat_lock); 4535 return invalfc(fc, "%s", err); 4252 return invalfc(fc, "%s", err); 4536 } 4253 } 4537 4254 4538 static int shmem_show_options(struct seq_file 4255 static int shmem_show_options(struct seq_file *seq, struct dentry *root) 4539 { 4256 { 4540 struct shmem_sb_info *sbinfo = SHMEM_ 4257 struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); 4541 struct mempolicy *mpol; 4258 struct mempolicy *mpol; 4542 4259 4543 if (sbinfo->max_blocks != shmem_defau 4260 if (sbinfo->max_blocks != shmem_default_max_blocks()) 4544 seq_printf(seq, ",size=%luk", 4261 seq_printf(seq, ",size=%luk", K(sbinfo->max_blocks)); 4545 if (sbinfo->max_inodes != shmem_defau 4262 if (sbinfo->max_inodes != shmem_default_max_inodes()) 4546 seq_printf(seq, ",nr_inodes=% 4263 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 4547 if (sbinfo->mode != (0777 | S_ISVTX)) 4264 if (sbinfo->mode != (0777 | S_ISVTX)) 4548 seq_printf(seq, ",mode=%03ho" 4265 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 4549 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_ 4266 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 4550 seq_printf(seq, ",uid=%u", 4267 seq_printf(seq, ",uid=%u", 4551 from_kuid_mun 4268 from_kuid_munged(&init_user_ns, sbinfo->uid)); 4552 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_ 4269 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) 4553 seq_printf(seq, ",gid=%u", 4270 seq_printf(seq, ",gid=%u", 4554 from_kgid_mun 4271 from_kgid_munged(&init_user_ns, sbinfo->gid)); 4555 4272 4556 /* 4273 /* 4557 * Showing inode{64,32} might be usef 4274 * Showing inode{64,32} might be useful even if it's the system default, 4558 * since then people don't have to re 4275 * since then people don't have to resort to checking both here and 4559 * /proc/config.gz to confirm 64-bit 4276 * /proc/config.gz to confirm 64-bit inums were successfully applied 4560 * (which may not even exist if IKCON 4277 * (which may not even exist if IKCONFIG_PROC isn't enabled). 4561 * 4278 * 4562 * We hide it when inode64 isn't the 4279 * We hide it when inode64 isn't the default and we are using 32-bit 4563 * inodes, since that probably just m 4280 * inodes, since that probably just means the feature isn't even under 4564 * consideration. 4281 * consideration. 4565 * 4282 * 4566 * As such: 4283 * As such: 4567 * 4284 * 4568 * +------------- 4285 * +-----------------+-----------------+ 4569 * | TMPFS_INODE6 4286 * | TMPFS_INODE64=y | TMPFS_INODE64=n | 4570 * +------------------+------------- 4287 * +------------------+-----------------+-----------------+ 4571 * | full_inums=true | show 4288 * | full_inums=true | show | show | 4572 * | full_inums=false | show 4289 * | full_inums=false | show | hide | 4573 * +------------------+------------- 4290 * +------------------+-----------------+-----------------+ 4574 * 4291 * 4575 */ 4292 */ 4576 if (IS_ENABLED(CONFIG_TMPFS_INODE64) 4293 if (IS_ENABLED(CONFIG_TMPFS_INODE64) || sbinfo->full_inums) 4577 seq_printf(seq, ",inode%d", ( 4294 seq_printf(seq, ",inode%d", (sbinfo->full_inums ? 64 : 32)); 4578 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4295 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4579 /* Rightly or wrongly, show huge moun 4296 /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */ 4580 if (sbinfo->huge) 4297 if (sbinfo->huge) 4581 seq_printf(seq, ",huge=%s", s 4298 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); 4582 #endif 4299 #endif 4583 mpol = shmem_get_sbmpol(sbinfo); 4300 mpol = shmem_get_sbmpol(sbinfo); 4584 shmem_show_mpol(seq, mpol); 4301 shmem_show_mpol(seq, mpol); 4585 mpol_put(mpol); 4302 mpol_put(mpol); 4586 if (sbinfo->noswap) 4303 if (sbinfo->noswap) 4587 seq_printf(seq, ",noswap"); 4304 seq_printf(seq, ",noswap"); 4588 #ifdef CONFIG_TMPFS_QUOTA 4305 #ifdef CONFIG_TMPFS_QUOTA 4589 if (sb_has_quota_active(root->d_sb, U 4306 if (sb_has_quota_active(root->d_sb, USRQUOTA)) 4590 seq_printf(seq, ",usrquota"); 4307 seq_printf(seq, ",usrquota"); 4591 if (sb_has_quota_active(root->d_sb, G 4308 if (sb_has_quota_active(root->d_sb, GRPQUOTA)) 4592 seq_printf(seq, ",grpquota"); 4309 seq_printf(seq, ",grpquota"); 4593 if (sbinfo->qlimits.usrquota_bhardlim 4310 if (sbinfo->qlimits.usrquota_bhardlimit) 4594 seq_printf(seq, ",usrquota_bl 4311 seq_printf(seq, ",usrquota_block_hardlimit=%lld", 4595 sbinfo->qlimits.us 4312 sbinfo->qlimits.usrquota_bhardlimit); 4596 if (sbinfo->qlimits.grpquota_bhardlim 4313 if (sbinfo->qlimits.grpquota_bhardlimit) 4597 seq_printf(seq, ",grpquota_bl 4314 seq_printf(seq, ",grpquota_block_hardlimit=%lld", 4598 sbinfo->qlimits.gr 4315 sbinfo->qlimits.grpquota_bhardlimit); 4599 if (sbinfo->qlimits.usrquota_ihardlim 4316 if (sbinfo->qlimits.usrquota_ihardlimit) 4600 seq_printf(seq, ",usrquota_in 4317 seq_printf(seq, ",usrquota_inode_hardlimit=%lld", 4601 sbinfo->qlimits.us 4318 sbinfo->qlimits.usrquota_ihardlimit); 4602 if (sbinfo->qlimits.grpquota_ihardlim 4319 if (sbinfo->qlimits.grpquota_ihardlimit) 4603 seq_printf(seq, ",grpquota_in 4320 seq_printf(seq, ",grpquota_inode_hardlimit=%lld", 4604 sbinfo->qlimits.gr 4321 sbinfo->qlimits.grpquota_ihardlimit); 4605 #endif 4322 #endif 4606 return 0; 4323 return 0; 4607 } 4324 } 4608 4325 4609 #endif /* CONFIG_TMPFS */ 4326 #endif /* CONFIG_TMPFS */ 4610 4327 4611 static void shmem_put_super(struct super_bloc 4328 static void shmem_put_super(struct super_block *sb) 4612 { 4329 { 4613 struct shmem_sb_info *sbinfo = SHMEM_ 4330 struct shmem_sb_info *sbinfo = SHMEM_SB(sb); 4614 4331 4615 #ifdef CONFIG_TMPFS_QUOTA 4332 #ifdef CONFIG_TMPFS_QUOTA 4616 shmem_disable_quotas(sb); 4333 shmem_disable_quotas(sb); 4617 #endif 4334 #endif 4618 free_percpu(sbinfo->ino_batch); 4335 free_percpu(sbinfo->ino_batch); 4619 percpu_counter_destroy(&sbinfo->used_ 4336 percpu_counter_destroy(&sbinfo->used_blocks); 4620 mpol_put(sbinfo->mpol); 4337 mpol_put(sbinfo->mpol); 4621 kfree(sbinfo); 4338 kfree(sbinfo); 4622 sb->s_fs_info = NULL; 4339 sb->s_fs_info = NULL; 4623 } 4340 } 4624 4341 4625 static int shmem_fill_super(struct super_bloc 4342 static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) 4626 { 4343 { 4627 struct shmem_options *ctx = fc->fs_pr 4344 struct shmem_options *ctx = fc->fs_private; 4628 struct inode *inode; 4345 struct inode *inode; 4629 struct shmem_sb_info *sbinfo; 4346 struct shmem_sb_info *sbinfo; 4630 int error = -ENOMEM; 4347 int error = -ENOMEM; 4631 4348 4632 /* Round up to L1_CACHE_BYTES to resi 4349 /* Round up to L1_CACHE_BYTES to resist false sharing */ 4633 sbinfo = kzalloc(max((int)sizeof(stru 4350 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info), 4634 L1_CACHE_BYTE 4351 L1_CACHE_BYTES), GFP_KERNEL); 4635 if (!sbinfo) 4352 if (!sbinfo) 4636 return error; 4353 return error; 4637 4354 4638 sb->s_fs_info = sbinfo; 4355 sb->s_fs_info = sbinfo; 4639 4356 4640 #ifdef CONFIG_TMPFS 4357 #ifdef CONFIG_TMPFS 4641 /* 4358 /* 4642 * Per default we only allow half of 4359 * Per default we only allow half of the physical ram per 4643 * tmpfs instance, limiting inodes to 4360 * tmpfs instance, limiting inodes to one per page of lowmem; 4644 * but the internal instance is left 4361 * but the internal instance is left unlimited. 4645 */ 4362 */ 4646 if (!(sb->s_flags & SB_KERNMOUNT)) { 4363 if (!(sb->s_flags & SB_KERNMOUNT)) { 4647 if (!(ctx->seen & SHMEM_SEEN_ 4364 if (!(ctx->seen & SHMEM_SEEN_BLOCKS)) 4648 ctx->blocks = shmem_d 4365 ctx->blocks = shmem_default_max_blocks(); 4649 if (!(ctx->seen & SHMEM_SEEN_ 4366 if (!(ctx->seen & SHMEM_SEEN_INODES)) 4650 ctx->inodes = shmem_d 4367 ctx->inodes = shmem_default_max_inodes(); 4651 if (!(ctx->seen & SHMEM_SEEN_ 4368 if (!(ctx->seen & SHMEM_SEEN_INUMS)) 4652 ctx->full_inums = IS_ 4369 ctx->full_inums = IS_ENABLED(CONFIG_TMPFS_INODE64); 4653 sbinfo->noswap = ctx->noswap; 4370 sbinfo->noswap = ctx->noswap; 4654 } else { 4371 } else { 4655 sb->s_flags |= SB_NOUSER; 4372 sb->s_flags |= SB_NOUSER; 4656 } 4373 } 4657 sb->s_export_op = &shmem_export_ops; 4374 sb->s_export_op = &shmem_export_ops; 4658 sb->s_flags |= SB_NOSEC | SB_I_VERSIO 4375 sb->s_flags |= SB_NOSEC | SB_I_VERSION; 4659 #else 4376 #else 4660 sb->s_flags |= SB_NOUSER; 4377 sb->s_flags |= SB_NOUSER; 4661 #endif 4378 #endif 4662 sbinfo->max_blocks = ctx->blocks; 4379 sbinfo->max_blocks = ctx->blocks; 4663 sbinfo->max_inodes = ctx->inodes; 4380 sbinfo->max_inodes = ctx->inodes; 4664 sbinfo->free_ispace = sbinfo->max_ino 4381 sbinfo->free_ispace = sbinfo->max_inodes * BOGO_INODE_SIZE; 4665 if (sb->s_flags & SB_KERNMOUNT) { 4382 if (sb->s_flags & SB_KERNMOUNT) { 4666 sbinfo->ino_batch = alloc_per 4383 sbinfo->ino_batch = alloc_percpu(ino_t); 4667 if (!sbinfo->ino_batch) 4384 if (!sbinfo->ino_batch) 4668 goto failed; 4385 goto failed; 4669 } 4386 } 4670 sbinfo->uid = ctx->uid; 4387 sbinfo->uid = ctx->uid; 4671 sbinfo->gid = ctx->gid; 4388 sbinfo->gid = ctx->gid; 4672 sbinfo->full_inums = ctx->full_inums; 4389 sbinfo->full_inums = ctx->full_inums; 4673 sbinfo->mode = ctx->mode; 4390 sbinfo->mode = ctx->mode; 4674 sbinfo->huge = ctx->huge; 4391 sbinfo->huge = ctx->huge; 4675 sbinfo->mpol = ctx->mpol; 4392 sbinfo->mpol = ctx->mpol; 4676 ctx->mpol = NULL; 4393 ctx->mpol = NULL; 4677 4394 4678 raw_spin_lock_init(&sbinfo->stat_lock 4395 raw_spin_lock_init(&sbinfo->stat_lock); 4679 if (percpu_counter_init(&sbinfo->used 4396 if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL)) 4680 goto failed; 4397 goto failed; 4681 spin_lock_init(&sbinfo->shrinklist_lo 4398 spin_lock_init(&sbinfo->shrinklist_lock); 4682 INIT_LIST_HEAD(&sbinfo->shrinklist); 4399 INIT_LIST_HEAD(&sbinfo->shrinklist); 4683 4400 4684 sb->s_maxbytes = MAX_LFS_FILESIZE; 4401 sb->s_maxbytes = MAX_LFS_FILESIZE; 4685 sb->s_blocksize = PAGE_SIZE; 4402 sb->s_blocksize = PAGE_SIZE; 4686 sb->s_blocksize_bits = PAGE_SHIFT; 4403 sb->s_blocksize_bits = PAGE_SHIFT; 4687 sb->s_magic = TMPFS_MAGIC; 4404 sb->s_magic = TMPFS_MAGIC; 4688 sb->s_op = &shmem_ops; 4405 sb->s_op = &shmem_ops; 4689 sb->s_time_gran = 1; 4406 sb->s_time_gran = 1; 4690 #ifdef CONFIG_TMPFS_XATTR 4407 #ifdef CONFIG_TMPFS_XATTR 4691 sb->s_xattr = shmem_xattr_handlers; 4408 sb->s_xattr = shmem_xattr_handlers; 4692 #endif 4409 #endif 4693 #ifdef CONFIG_TMPFS_POSIX_ACL 4410 #ifdef CONFIG_TMPFS_POSIX_ACL 4694 sb->s_flags |= SB_POSIXACL; 4411 sb->s_flags |= SB_POSIXACL; 4695 #endif 4412 #endif 4696 uuid_t uuid; 4413 uuid_t uuid; 4697 uuid_gen(&uuid); 4414 uuid_gen(&uuid); 4698 super_set_uuid(sb, uuid.b, sizeof(uui 4415 super_set_uuid(sb, uuid.b, sizeof(uuid)); 4699 4416 4700 #ifdef CONFIG_TMPFS_QUOTA 4417 #ifdef CONFIG_TMPFS_QUOTA 4701 if (ctx->seen & SHMEM_SEEN_QUOTA) { 4418 if (ctx->seen & SHMEM_SEEN_QUOTA) { 4702 sb->dq_op = &shmem_quota_oper 4419 sb->dq_op = &shmem_quota_operations; 4703 sb->s_qcop = &dquot_quotactl_ 4420 sb->s_qcop = &dquot_quotactl_sysfile_ops; 4704 sb->s_quota_types = QTYPE_MAS 4421 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP; 4705 4422 4706 /* Copy the default limits fr 4423 /* Copy the default limits from ctx into sbinfo */ 4707 memcpy(&sbinfo->qlimits, &ctx 4424 memcpy(&sbinfo->qlimits, &ctx->qlimits, 4708 sizeof(struct shmem_qu 4425 sizeof(struct shmem_quota_limits)); 4709 4426 4710 if (shmem_enable_quotas(sb, c 4427 if (shmem_enable_quotas(sb, ctx->quota_types)) 4711 goto failed; 4428 goto failed; 4712 } 4429 } 4713 #endif /* CONFIG_TMPFS_QUOTA */ 4430 #endif /* CONFIG_TMPFS_QUOTA */ 4714 4431 4715 inode = shmem_get_inode(&nop_mnt_idma 4432 inode = shmem_get_inode(&nop_mnt_idmap, sb, NULL, 4716 S_IFDIR | sbi 4433 S_IFDIR | sbinfo->mode, 0, VM_NORESERVE); 4717 if (IS_ERR(inode)) { 4434 if (IS_ERR(inode)) { 4718 error = PTR_ERR(inode); 4435 error = PTR_ERR(inode); 4719 goto failed; 4436 goto failed; 4720 } 4437 } 4721 inode->i_uid = sbinfo->uid; 4438 inode->i_uid = sbinfo->uid; 4722 inode->i_gid = sbinfo->gid; 4439 inode->i_gid = sbinfo->gid; 4723 sb->s_root = d_make_root(inode); 4440 sb->s_root = d_make_root(inode); 4724 if (!sb->s_root) 4441 if (!sb->s_root) 4725 goto failed; 4442 goto failed; 4726 return 0; 4443 return 0; 4727 4444 4728 failed: 4445 failed: 4729 shmem_put_super(sb); 4446 shmem_put_super(sb); 4730 return error; 4447 return error; 4731 } 4448 } 4732 4449 4733 static int shmem_get_tree(struct fs_context * 4450 static int shmem_get_tree(struct fs_context *fc) 4734 { 4451 { 4735 return get_tree_nodev(fc, shmem_fill_ 4452 return get_tree_nodev(fc, shmem_fill_super); 4736 } 4453 } 4737 4454 4738 static void shmem_free_fc(struct fs_context * 4455 static void shmem_free_fc(struct fs_context *fc) 4739 { 4456 { 4740 struct shmem_options *ctx = fc->fs_pr 4457 struct shmem_options *ctx = fc->fs_private; 4741 4458 4742 if (ctx) { 4459 if (ctx) { 4743 mpol_put(ctx->mpol); 4460 mpol_put(ctx->mpol); 4744 kfree(ctx); 4461 kfree(ctx); 4745 } 4462 } 4746 } 4463 } 4747 4464 4748 static const struct fs_context_operations shm 4465 static const struct fs_context_operations shmem_fs_context_ops = { 4749 .free = shmem_free_ 4466 .free = shmem_free_fc, 4750 .get_tree = shmem_get_t 4467 .get_tree = shmem_get_tree, 4751 #ifdef CONFIG_TMPFS 4468 #ifdef CONFIG_TMPFS 4752 .parse_monolithic = shmem_parse 4469 .parse_monolithic = shmem_parse_options, 4753 .parse_param = shmem_parse 4470 .parse_param = shmem_parse_one, 4754 .reconfigure = shmem_recon 4471 .reconfigure = shmem_reconfigure, 4755 #endif 4472 #endif 4756 }; 4473 }; 4757 4474 4758 static struct kmem_cache *shmem_inode_cachep 4475 static struct kmem_cache *shmem_inode_cachep __ro_after_init; 4759 4476 4760 static struct inode *shmem_alloc_inode(struct 4477 static struct inode *shmem_alloc_inode(struct super_block *sb) 4761 { 4478 { 4762 struct shmem_inode_info *info; 4479 struct shmem_inode_info *info; 4763 info = alloc_inode_sb(sb, shmem_inode 4480 info = alloc_inode_sb(sb, shmem_inode_cachep, GFP_KERNEL); 4764 if (!info) 4481 if (!info) 4765 return NULL; 4482 return NULL; 4766 return &info->vfs_inode; 4483 return &info->vfs_inode; 4767 } 4484 } 4768 4485 4769 static void shmem_free_in_core_inode(struct i 4486 static void shmem_free_in_core_inode(struct inode *inode) 4770 { 4487 { 4771 if (S_ISLNK(inode->i_mode)) 4488 if (S_ISLNK(inode->i_mode)) 4772 kfree(inode->i_link); 4489 kfree(inode->i_link); 4773 kmem_cache_free(shmem_inode_cachep, S 4490 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode)); 4774 } 4491 } 4775 4492 4776 static void shmem_destroy_inode(struct inode 4493 static void shmem_destroy_inode(struct inode *inode) 4777 { 4494 { 4778 if (S_ISREG(inode->i_mode)) 4495 if (S_ISREG(inode->i_mode)) 4779 mpol_free_shared_policy(&SHME 4496 mpol_free_shared_policy(&SHMEM_I(inode)->policy); 4780 if (S_ISDIR(inode->i_mode)) 4497 if (S_ISDIR(inode->i_mode)) 4781 simple_offset_destroy(shmem_g 4498 simple_offset_destroy(shmem_get_offset_ctx(inode)); 4782 } 4499 } 4783 4500 4784 static void shmem_init_inode(void *foo) 4501 static void shmem_init_inode(void *foo) 4785 { 4502 { 4786 struct shmem_inode_info *info = foo; 4503 struct shmem_inode_info *info = foo; 4787 inode_init_once(&info->vfs_inode); 4504 inode_init_once(&info->vfs_inode); 4788 } 4505 } 4789 4506 4790 static void __init shmem_init_inodecache(void 4507 static void __init shmem_init_inodecache(void) 4791 { 4508 { 4792 shmem_inode_cachep = kmem_cache_creat 4509 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache", 4793 sizeof(struct 4510 sizeof(struct shmem_inode_info), 4794 0, SLAB_PANIC 4511 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode); 4795 } 4512 } 4796 4513 4797 static void __init shmem_destroy_inodecache(v 4514 static void __init shmem_destroy_inodecache(void) 4798 { 4515 { 4799 kmem_cache_destroy(shmem_inode_cachep 4516 kmem_cache_destroy(shmem_inode_cachep); 4800 } 4517 } 4801 4518 4802 /* Keep the page in page cache instead of tru 4519 /* Keep the page in page cache instead of truncating it */ 4803 static int shmem_error_remove_folio(struct ad 4520 static int shmem_error_remove_folio(struct address_space *mapping, 4804 struct fol 4521 struct folio *folio) 4805 { 4522 { 4806 return 0; 4523 return 0; 4807 } 4524 } 4808 4525 4809 static const struct address_space_operations 4526 static const struct address_space_operations shmem_aops = { 4810 .writepage = shmem_writepage, 4527 .writepage = shmem_writepage, 4811 .dirty_folio = noop_dirty_folio, 4528 .dirty_folio = noop_dirty_folio, 4812 #ifdef CONFIG_TMPFS 4529 #ifdef CONFIG_TMPFS 4813 .write_begin = shmem_write_begin, 4530 .write_begin = shmem_write_begin, 4814 .write_end = shmem_write_end, 4531 .write_end = shmem_write_end, 4815 #endif 4532 #endif 4816 #ifdef CONFIG_MIGRATION 4533 #ifdef CONFIG_MIGRATION 4817 .migrate_folio = migrate_folio, 4534 .migrate_folio = migrate_folio, 4818 #endif 4535 #endif 4819 .error_remove_folio = shmem_error_rem 4536 .error_remove_folio = shmem_error_remove_folio, 4820 }; 4537 }; 4821 4538 4822 static const struct file_operations shmem_fil 4539 static const struct file_operations shmem_file_operations = { 4823 .mmap = shmem_mmap, 4540 .mmap = shmem_mmap, 4824 .open = shmem_file_open, 4541 .open = shmem_file_open, 4825 .get_unmapped_area = shmem_get_unmapp 4542 .get_unmapped_area = shmem_get_unmapped_area, 4826 #ifdef CONFIG_TMPFS 4543 #ifdef CONFIG_TMPFS 4827 .llseek = shmem_file_llseek, 4544 .llseek = shmem_file_llseek, 4828 .read_iter = shmem_file_read_ite 4545 .read_iter = shmem_file_read_iter, 4829 .write_iter = shmem_file_write_it 4546 .write_iter = shmem_file_write_iter, 4830 .fsync = noop_fsync, 4547 .fsync = noop_fsync, 4831 .splice_read = shmem_file_splice_r 4548 .splice_read = shmem_file_splice_read, 4832 .splice_write = iter_file_splice_wr 4549 .splice_write = iter_file_splice_write, 4833 .fallocate = shmem_fallocate, 4550 .fallocate = shmem_fallocate, 4834 #endif 4551 #endif 4835 }; 4552 }; 4836 4553 4837 static const struct inode_operations shmem_in 4554 static const struct inode_operations shmem_inode_operations = { 4838 .getattr = shmem_getattr, 4555 .getattr = shmem_getattr, 4839 .setattr = shmem_setattr, 4556 .setattr = shmem_setattr, 4840 #ifdef CONFIG_TMPFS_XATTR 4557 #ifdef CONFIG_TMPFS_XATTR 4841 .listxattr = shmem_listxattr, 4558 .listxattr = shmem_listxattr, 4842 .set_acl = simple_set_acl, 4559 .set_acl = simple_set_acl, 4843 .fileattr_get = shmem_fileattr_get, 4560 .fileattr_get = shmem_fileattr_get, 4844 .fileattr_set = shmem_fileattr_set, 4561 .fileattr_set = shmem_fileattr_set, 4845 #endif 4562 #endif 4846 }; 4563 }; 4847 4564 4848 static const struct inode_operations shmem_di 4565 static const struct inode_operations shmem_dir_inode_operations = { 4849 #ifdef CONFIG_TMPFS 4566 #ifdef CONFIG_TMPFS 4850 .getattr = shmem_getattr, 4567 .getattr = shmem_getattr, 4851 .create = shmem_create, 4568 .create = shmem_create, 4852 .lookup = simple_lookup, 4569 .lookup = simple_lookup, 4853 .link = shmem_link, 4570 .link = shmem_link, 4854 .unlink = shmem_unlink, 4571 .unlink = shmem_unlink, 4855 .symlink = shmem_symlink, 4572 .symlink = shmem_symlink, 4856 .mkdir = shmem_mkdir, 4573 .mkdir = shmem_mkdir, 4857 .rmdir = shmem_rmdir, 4574 .rmdir = shmem_rmdir, 4858 .mknod = shmem_mknod, 4575 .mknod = shmem_mknod, 4859 .rename = shmem_rename2, 4576 .rename = shmem_rename2, 4860 .tmpfile = shmem_tmpfile, 4577 .tmpfile = shmem_tmpfile, 4861 .get_offset_ctx = shmem_get_offset_ct 4578 .get_offset_ctx = shmem_get_offset_ctx, 4862 #endif 4579 #endif 4863 #ifdef CONFIG_TMPFS_XATTR 4580 #ifdef CONFIG_TMPFS_XATTR 4864 .listxattr = shmem_listxattr, 4581 .listxattr = shmem_listxattr, 4865 .fileattr_get = shmem_fileattr_get, 4582 .fileattr_get = shmem_fileattr_get, 4866 .fileattr_set = shmem_fileattr_set, 4583 .fileattr_set = shmem_fileattr_set, 4867 #endif 4584 #endif 4868 #ifdef CONFIG_TMPFS_POSIX_ACL 4585 #ifdef CONFIG_TMPFS_POSIX_ACL 4869 .setattr = shmem_setattr, 4586 .setattr = shmem_setattr, 4870 .set_acl = simple_set_acl, 4587 .set_acl = simple_set_acl, 4871 #endif 4588 #endif 4872 }; 4589 }; 4873 4590 4874 static const struct inode_operations shmem_sp 4591 static const struct inode_operations shmem_special_inode_operations = { 4875 .getattr = shmem_getattr, 4592 .getattr = shmem_getattr, 4876 #ifdef CONFIG_TMPFS_XATTR 4593 #ifdef CONFIG_TMPFS_XATTR 4877 .listxattr = shmem_listxattr, 4594 .listxattr = shmem_listxattr, 4878 #endif 4595 #endif 4879 #ifdef CONFIG_TMPFS_POSIX_ACL 4596 #ifdef CONFIG_TMPFS_POSIX_ACL 4880 .setattr = shmem_setattr, 4597 .setattr = shmem_setattr, 4881 .set_acl = simple_set_acl, 4598 .set_acl = simple_set_acl, 4882 #endif 4599 #endif 4883 }; 4600 }; 4884 4601 4885 static const struct super_operations shmem_op 4602 static const struct super_operations shmem_ops = { 4886 .alloc_inode = shmem_alloc_inode, 4603 .alloc_inode = shmem_alloc_inode, 4887 .free_inode = shmem_free_in_core_ 4604 .free_inode = shmem_free_in_core_inode, 4888 .destroy_inode = shmem_destroy_inode 4605 .destroy_inode = shmem_destroy_inode, 4889 #ifdef CONFIG_TMPFS 4606 #ifdef CONFIG_TMPFS 4890 .statfs = shmem_statfs, 4607 .statfs = shmem_statfs, 4891 .show_options = shmem_show_options, 4608 .show_options = shmem_show_options, 4892 #endif 4609 #endif 4893 #ifdef CONFIG_TMPFS_QUOTA 4610 #ifdef CONFIG_TMPFS_QUOTA 4894 .get_dquots = shmem_get_dquots, 4611 .get_dquots = shmem_get_dquots, 4895 #endif 4612 #endif 4896 .evict_inode = shmem_evict_inode, 4613 .evict_inode = shmem_evict_inode, 4897 .drop_inode = generic_delete_inod 4614 .drop_inode = generic_delete_inode, 4898 .put_super = shmem_put_super, 4615 .put_super = shmem_put_super, 4899 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4616 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4900 .nr_cached_objects = shmem_unuse 4617 .nr_cached_objects = shmem_unused_huge_count, 4901 .free_cached_objects = shmem_unuse 4618 .free_cached_objects = shmem_unused_huge_scan, 4902 #endif 4619 #endif 4903 }; 4620 }; 4904 4621 4905 static const struct vm_operations_struct shme 4622 static const struct vm_operations_struct shmem_vm_ops = { 4906 .fault = shmem_fault, 4623 .fault = shmem_fault, 4907 .map_pages = filemap_map_pages, 4624 .map_pages = filemap_map_pages, 4908 #ifdef CONFIG_NUMA 4625 #ifdef CONFIG_NUMA 4909 .set_policy = shmem_set_policy, 4626 .set_policy = shmem_set_policy, 4910 .get_policy = shmem_get_policy, 4627 .get_policy = shmem_get_policy, 4911 #endif 4628 #endif 4912 }; 4629 }; 4913 4630 4914 static const struct vm_operations_struct shme 4631 static const struct vm_operations_struct shmem_anon_vm_ops = { 4915 .fault = shmem_fault, 4632 .fault = shmem_fault, 4916 .map_pages = filemap_map_pages, 4633 .map_pages = filemap_map_pages, 4917 #ifdef CONFIG_NUMA 4634 #ifdef CONFIG_NUMA 4918 .set_policy = shmem_set_policy, 4635 .set_policy = shmem_set_policy, 4919 .get_policy = shmem_get_policy, 4636 .get_policy = shmem_get_policy, 4920 #endif 4637 #endif 4921 }; 4638 }; 4922 4639 4923 int shmem_init_fs_context(struct fs_context * 4640 int shmem_init_fs_context(struct fs_context *fc) 4924 { 4641 { 4925 struct shmem_options *ctx; 4642 struct shmem_options *ctx; 4926 4643 4927 ctx = kzalloc(sizeof(struct shmem_opt 4644 ctx = kzalloc(sizeof(struct shmem_options), GFP_KERNEL); 4928 if (!ctx) 4645 if (!ctx) 4929 return -ENOMEM; 4646 return -ENOMEM; 4930 4647 4931 ctx->mode = 0777 | S_ISVTX; 4648 ctx->mode = 0777 | S_ISVTX; 4932 ctx->uid = current_fsuid(); 4649 ctx->uid = current_fsuid(); 4933 ctx->gid = current_fsgid(); 4650 ctx->gid = current_fsgid(); 4934 4651 4935 fc->fs_private = ctx; 4652 fc->fs_private = ctx; 4936 fc->ops = &shmem_fs_context_ops; 4653 fc->ops = &shmem_fs_context_ops; 4937 return 0; 4654 return 0; 4938 } 4655 } 4939 4656 4940 static struct file_system_type shmem_fs_type 4657 static struct file_system_type shmem_fs_type = { 4941 .owner = THIS_MODULE, 4658 .owner = THIS_MODULE, 4942 .name = "tmpfs", 4659 .name = "tmpfs", 4943 .init_fs_context = shmem_init_fs_cont 4660 .init_fs_context = shmem_init_fs_context, 4944 #ifdef CONFIG_TMPFS 4661 #ifdef CONFIG_TMPFS 4945 .parameters = shmem_fs_parameters 4662 .parameters = shmem_fs_parameters, 4946 #endif 4663 #endif 4947 .kill_sb = kill_litter_super, 4664 .kill_sb = kill_litter_super, 4948 .fs_flags = FS_USERNS_MOUNT | F 4665 .fs_flags = FS_USERNS_MOUNT | FS_ALLOW_IDMAP, 4949 }; 4666 }; 4950 4667 4951 void __init shmem_init(void) 4668 void __init shmem_init(void) 4952 { 4669 { 4953 int error; 4670 int error; 4954 4671 4955 shmem_init_inodecache(); 4672 shmem_init_inodecache(); 4956 4673 4957 #ifdef CONFIG_TMPFS_QUOTA 4674 #ifdef CONFIG_TMPFS_QUOTA 4958 register_quota_format(&shmem_quota_fo !! 4675 error = register_quota_format(&shmem_quota_format); >> 4676 if (error < 0) { >> 4677 pr_err("Could not register quota format\n"); >> 4678 goto out3; >> 4679 } 4959 #endif 4680 #endif 4960 4681 4961 error = register_filesystem(&shmem_fs 4682 error = register_filesystem(&shmem_fs_type); 4962 if (error) { 4683 if (error) { 4963 pr_err("Could not register tm 4684 pr_err("Could not register tmpfs\n"); 4964 goto out2; 4685 goto out2; 4965 } 4686 } 4966 4687 4967 shm_mnt = kern_mount(&shmem_fs_type); 4688 shm_mnt = kern_mount(&shmem_fs_type); 4968 if (IS_ERR(shm_mnt)) { 4689 if (IS_ERR(shm_mnt)) { 4969 error = PTR_ERR(shm_mnt); 4690 error = PTR_ERR(shm_mnt); 4970 pr_err("Could not kern_mount 4691 pr_err("Could not kern_mount tmpfs\n"); 4971 goto out1; 4692 goto out1; 4972 } 4693 } 4973 4694 4974 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4695 #ifdef CONFIG_TRANSPARENT_HUGEPAGE 4975 if (has_transparent_hugepage() && shm 4696 if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY) 4976 SHMEM_SB(shm_mnt->mnt_sb)->hu 4697 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 4977 else 4698 else 4978 shmem_huge = SHMEM_HUGE_NEVER 4699 shmem_huge = SHMEM_HUGE_NEVER; /* just in case it was patched */ 4979 << 4980 /* << 4981 * Default to setting PMD-sized THP t << 4982 * disable all other multi-size THPs. << 4983 */ << 4984 huge_shmem_orders_inherit = BIT(HPAGE << 4985 #endif 4700 #endif 4986 return; 4701 return; 4987 4702 4988 out1: 4703 out1: 4989 unregister_filesystem(&shmem_fs_type) 4704 unregister_filesystem(&shmem_fs_type); 4990 out2: 4705 out2: 4991 #ifdef CONFIG_TMPFS_QUOTA 4706 #ifdef CONFIG_TMPFS_QUOTA 4992 unregister_quota_format(&shmem_quota_ 4707 unregister_quota_format(&shmem_quota_format); >> 4708 out3: 4993 #endif 4709 #endif 4994 shmem_destroy_inodecache(); 4710 shmem_destroy_inodecache(); 4995 shm_mnt = ERR_PTR(error); 4711 shm_mnt = ERR_PTR(error); 4996 } 4712 } 4997 4713 4998 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && d 4714 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) 4999 static ssize_t shmem_enabled_show(struct kobj 4715 static ssize_t shmem_enabled_show(struct kobject *kobj, 5000 struct kobj 4716 struct kobj_attribute *attr, char *buf) 5001 { 4717 { 5002 static const int values[] = { 4718 static const int values[] = { 5003 SHMEM_HUGE_ALWAYS, 4719 SHMEM_HUGE_ALWAYS, 5004 SHMEM_HUGE_WITHIN_SIZE, 4720 SHMEM_HUGE_WITHIN_SIZE, 5005 SHMEM_HUGE_ADVISE, 4721 SHMEM_HUGE_ADVISE, 5006 SHMEM_HUGE_NEVER, 4722 SHMEM_HUGE_NEVER, 5007 SHMEM_HUGE_DENY, 4723 SHMEM_HUGE_DENY, 5008 SHMEM_HUGE_FORCE, 4724 SHMEM_HUGE_FORCE, 5009 }; 4725 }; 5010 int len = 0; 4726 int len = 0; 5011 int i; 4727 int i; 5012 4728 5013 for (i = 0; i < ARRAY_SIZE(values); i 4729 for (i = 0; i < ARRAY_SIZE(values); i++) { 5014 len += sysfs_emit_at(buf, len 4730 len += sysfs_emit_at(buf, len, 5015 shmem_huge == 4731 shmem_huge == values[i] ? "%s[%s]" : "%s%s", 5016 i ? " " : "", 4732 i ? " " : "", shmem_format_huge(values[i])); 5017 } 4733 } 5018 len += sysfs_emit_at(buf, len, "\n"); 4734 len += sysfs_emit_at(buf, len, "\n"); 5019 4735 5020 return len; 4736 return len; 5021 } 4737 } 5022 4738 5023 static ssize_t shmem_enabled_store(struct kob 4739 static ssize_t shmem_enabled_store(struct kobject *kobj, 5024 struct kobj_attribute *attr, 4740 struct kobj_attribute *attr, const char *buf, size_t count) 5025 { 4741 { 5026 char tmp[16]; 4742 char tmp[16]; 5027 int huge; 4743 int huge; 5028 4744 5029 if (count + 1 > sizeof(tmp)) 4745 if (count + 1 > sizeof(tmp)) 5030 return -EINVAL; 4746 return -EINVAL; 5031 memcpy(tmp, buf, count); 4747 memcpy(tmp, buf, count); 5032 tmp[count] = '\0'; 4748 tmp[count] = '\0'; 5033 if (count && tmp[count - 1] == '\n') 4749 if (count && tmp[count - 1] == '\n') 5034 tmp[count - 1] = '\0'; 4750 tmp[count - 1] = '\0'; 5035 4751 5036 huge = shmem_parse_huge(tmp); 4752 huge = shmem_parse_huge(tmp); 5037 if (huge == -EINVAL) 4753 if (huge == -EINVAL) 5038 return -EINVAL; 4754 return -EINVAL; 5039 if (!has_transparent_hugepage() && 4755 if (!has_transparent_hugepage() && 5040 huge != SHMEM_HUGE_NE 4756 huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY) 5041 return -EINVAL; 4757 return -EINVAL; 5042 4758 5043 /* Do not override huge allocation po << 5044 if (huge == SHMEM_HUGE_FORCE && << 5045 huge_shmem_orders_inherit != BIT( << 5046 return -EINVAL; << 5047 << 5048 shmem_huge = huge; 4759 shmem_huge = huge; 5049 if (shmem_huge > SHMEM_HUGE_DENY) 4760 if (shmem_huge > SHMEM_HUGE_DENY) 5050 SHMEM_SB(shm_mnt->mnt_sb)->hu 4761 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge; 5051 return count; 4762 return count; 5052 } 4763 } 5053 4764 5054 struct kobj_attribute shmem_enabled_attr = __ 4765 struct kobj_attribute shmem_enabled_attr = __ATTR_RW(shmem_enabled); 5055 static DEFINE_SPINLOCK(huge_shmem_orders_lock << 5056 << 5057 static ssize_t thpsize_shmem_enabled_show(str << 5058 str << 5059 { << 5060 int order = to_thpsize(kobj)->order; << 5061 const char *output; << 5062 << 5063 if (test_bit(order, &huge_shmem_order << 5064 output = "[always] inherit wi << 5065 else if (test_bit(order, &huge_shmem_ << 5066 output = "always [inherit] wi << 5067 else if (test_bit(order, &huge_shmem_ << 5068 output = "always inherit [wit << 5069 else if (test_bit(order, &huge_shmem_ << 5070 output = "always inherit with << 5071 else << 5072 output = "always inherit with << 5073 << 5074 return sysfs_emit(buf, "%s\n", output << 5075 } << 5076 << 5077 static ssize_t thpsize_shmem_enabled_store(st << 5078 st << 5079 co << 5080 { << 5081 int order = to_thpsize(kobj)->order; << 5082 ssize_t ret = count; << 5083 << 5084 if (sysfs_streq(buf, "always")) { << 5085 spin_lock(&huge_shmem_orders_ << 5086 clear_bit(order, &huge_shmem_ << 5087 clear_bit(order, &huge_shmem_ << 5088 clear_bit(order, &huge_shmem_ << 5089 set_bit(order, &huge_shmem_or << 5090 spin_unlock(&huge_shmem_order << 5091 } else if (sysfs_streq(buf, "inherit" << 5092 /* Do not override huge alloc << 5093 if (shmem_huge == SHMEM_HUGE_ << 5094 order != HPAGE_PMD_ORDER) << 5095 return -EINVAL; << 5096 << 5097 spin_lock(&huge_shmem_orders_ << 5098 clear_bit(order, &huge_shmem_ << 5099 clear_bit(order, &huge_shmem_ << 5100 clear_bit(order, &huge_shmem_ << 5101 set_bit(order, &huge_shmem_or << 5102 spin_unlock(&huge_shmem_order << 5103 } else if (sysfs_streq(buf, "within_s << 5104 spin_lock(&huge_shmem_orders_ << 5105 clear_bit(order, &huge_shmem_ << 5106 clear_bit(order, &huge_shmem_ << 5107 clear_bit(order, &huge_shmem_ << 5108 set_bit(order, &huge_shmem_or << 5109 spin_unlock(&huge_shmem_order << 5110 } else if (sysfs_streq(buf, "advise") << 5111 spin_lock(&huge_shmem_orders_ << 5112 clear_bit(order, &huge_shmem_ << 5113 clear_bit(order, &huge_shmem_ << 5114 clear_bit(order, &huge_shmem_ << 5115 set_bit(order, &huge_shmem_or << 5116 spin_unlock(&huge_shmem_order << 5117 } else if (sysfs_streq(buf, "never")) << 5118 spin_lock(&huge_shmem_orders_ << 5119 clear_bit(order, &huge_shmem_ << 5120 clear_bit(order, &huge_shmem_ << 5121 clear_bit(order, &huge_shmem_ << 5122 clear_bit(order, &huge_shmem_ << 5123 spin_unlock(&huge_shmem_order << 5124 } else { << 5125 ret = -EINVAL; << 5126 } << 5127 << 5128 return ret; << 5129 } << 5130 << 5131 struct kobj_attribute thpsize_shmem_enabled_a << 5132 __ATTR(shmem_enabled, 0644, thpsize_s << 5133 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONF 4766 #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_SYSFS */ 5134 4767 5135 #else /* !CONFIG_SHMEM */ 4768 #else /* !CONFIG_SHMEM */ 5136 4769 5137 /* 4770 /* 5138 * tiny-shmem: simple shmemfs and tmpfs using 4771 * tiny-shmem: simple shmemfs and tmpfs using ramfs code 5139 * 4772 * 5140 * This is intended for small system where th 4773 * This is intended for small system where the benefits of the full 5141 * shmem code (swap-backed and resource-limit 4774 * shmem code (swap-backed and resource-limited) are outweighed by 5142 * their complexity. On systems without swap 4775 * their complexity. On systems without swap this code should be 5143 * effectively equivalent, but much lighter w 4776 * effectively equivalent, but much lighter weight. 5144 */ 4777 */ 5145 4778 5146 static struct file_system_type shmem_fs_type 4779 static struct file_system_type shmem_fs_type = { 5147 .name = "tmpfs", 4780 .name = "tmpfs", 5148 .init_fs_context = ramfs_init_fs_cont 4781 .init_fs_context = ramfs_init_fs_context, 5149 .parameters = ramfs_fs_parameters 4782 .parameters = ramfs_fs_parameters, 5150 .kill_sb = ramfs_kill_sb, 4783 .kill_sb = ramfs_kill_sb, 5151 .fs_flags = FS_USERNS_MOUNT, 4784 .fs_flags = FS_USERNS_MOUNT, 5152 }; 4785 }; 5153 4786 5154 void __init shmem_init(void) 4787 void __init shmem_init(void) 5155 { 4788 { 5156 BUG_ON(register_filesystem(&shmem_fs_ 4789 BUG_ON(register_filesystem(&shmem_fs_type) != 0); 5157 4790 5158 shm_mnt = kern_mount(&shmem_fs_type); 4791 shm_mnt = kern_mount(&shmem_fs_type); 5159 BUG_ON(IS_ERR(shm_mnt)); 4792 BUG_ON(IS_ERR(shm_mnt)); 5160 } 4793 } 5161 4794 5162 int shmem_unuse(unsigned int type) 4795 int shmem_unuse(unsigned int type) 5163 { 4796 { 5164 return 0; 4797 return 0; 5165 } 4798 } 5166 4799 5167 int shmem_lock(struct file *file, int lock, s 4800 int shmem_lock(struct file *file, int lock, struct ucounts *ucounts) 5168 { 4801 { 5169 return 0; 4802 return 0; 5170 } 4803 } 5171 4804 5172 void shmem_unlock_mapping(struct address_spac 4805 void shmem_unlock_mapping(struct address_space *mapping) 5173 { 4806 { 5174 } 4807 } 5175 4808 5176 #ifdef CONFIG_MMU 4809 #ifdef CONFIG_MMU 5177 unsigned long shmem_get_unmapped_area(struct 4810 unsigned long shmem_get_unmapped_area(struct file *file, 5178 unsigne 4811 unsigned long addr, unsigned long len, 5179 unsigne 4812 unsigned long pgoff, unsigned long flags) 5180 { 4813 { 5181 return mm_get_unmapped_area(current-> !! 4814 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 5182 } 4815 } 5183 #endif 4816 #endif 5184 4817 5185 void shmem_truncate_range(struct inode *inode 4818 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 5186 { 4819 { 5187 truncate_inode_pages_range(inode->i_m 4820 truncate_inode_pages_range(inode->i_mapping, lstart, lend); 5188 } 4821 } 5189 EXPORT_SYMBOL_GPL(shmem_truncate_range); 4822 EXPORT_SYMBOL_GPL(shmem_truncate_range); 5190 4823 5191 #define shmem_vm_ops 4824 #define shmem_vm_ops generic_file_vm_ops 5192 #define shmem_anon_vm_ops 4825 #define shmem_anon_vm_ops generic_file_vm_ops 5193 #define shmem_file_operations 4826 #define shmem_file_operations ramfs_file_operations 5194 #define shmem_acct_size(flags, size) 4827 #define shmem_acct_size(flags, size) 0 5195 #define shmem_unacct_size(flags, size) 4828 #define shmem_unacct_size(flags, size) do {} while (0) 5196 4829 5197 static inline struct inode *shmem_get_inode(s 4830 static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap, 5198 struct super_ 4831 struct super_block *sb, struct inode *dir, 5199 umode_t mode, 4832 umode_t mode, dev_t dev, unsigned long flags) 5200 { 4833 { 5201 struct inode *inode = ramfs_get_inode 4834 struct inode *inode = ramfs_get_inode(sb, dir, mode, dev); 5202 return inode ? inode : ERR_PTR(-ENOSP 4835 return inode ? inode : ERR_PTR(-ENOSPC); 5203 } 4836 } 5204 4837 5205 #endif /* CONFIG_SHMEM */ 4838 #endif /* CONFIG_SHMEM */ 5206 4839 5207 /* common code */ 4840 /* common code */ 5208 4841 5209 static struct file *__shmem_file_setup(struct 4842 static struct file *__shmem_file_setup(struct vfsmount *mnt, const char *name, 5210 loff_t size, unsigned 4843 loff_t size, unsigned long flags, unsigned int i_flags) 5211 { 4844 { 5212 struct inode *inode; 4845 struct inode *inode; 5213 struct file *res; 4846 struct file *res; 5214 4847 5215 if (IS_ERR(mnt)) 4848 if (IS_ERR(mnt)) 5216 return ERR_CAST(mnt); 4849 return ERR_CAST(mnt); 5217 4850 5218 if (size < 0 || size > MAX_LFS_FILESI 4851 if (size < 0 || size > MAX_LFS_FILESIZE) 5219 return ERR_PTR(-EINVAL); 4852 return ERR_PTR(-EINVAL); 5220 4853 5221 if (shmem_acct_size(flags, size)) 4854 if (shmem_acct_size(flags, size)) 5222 return ERR_PTR(-ENOMEM); 4855 return ERR_PTR(-ENOMEM); 5223 4856 5224 if (is_idmapped_mnt(mnt)) 4857 if (is_idmapped_mnt(mnt)) 5225 return ERR_PTR(-EINVAL); 4858 return ERR_PTR(-EINVAL); 5226 4859 5227 inode = shmem_get_inode(&nop_mnt_idma 4860 inode = shmem_get_inode(&nop_mnt_idmap, mnt->mnt_sb, NULL, 5228 S_IFREG | S_I 4861 S_IFREG | S_IRWXUGO, 0, flags); 5229 if (IS_ERR(inode)) { 4862 if (IS_ERR(inode)) { 5230 shmem_unacct_size(flags, size 4863 shmem_unacct_size(flags, size); 5231 return ERR_CAST(inode); 4864 return ERR_CAST(inode); 5232 } 4865 } 5233 inode->i_flags |= i_flags; 4866 inode->i_flags |= i_flags; 5234 inode->i_size = size; 4867 inode->i_size = size; 5235 clear_nlink(inode); /* It is unli 4868 clear_nlink(inode); /* It is unlinked */ 5236 res = ERR_PTR(ramfs_nommu_expand_for_ 4869 res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size)); 5237 if (!IS_ERR(res)) 4870 if (!IS_ERR(res)) 5238 res = alloc_file_pseudo(inode 4871 res = alloc_file_pseudo(inode, mnt, name, O_RDWR, 5239 &shmem_file_o 4872 &shmem_file_operations); 5240 if (IS_ERR(res)) 4873 if (IS_ERR(res)) 5241 iput(inode); 4874 iput(inode); 5242 return res; 4875 return res; 5243 } 4876 } 5244 4877 5245 /** 4878 /** 5246 * shmem_kernel_file_setup - get an unlinked 4879 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 5247 * kernel internal. There will be NO LS 4880 * kernel internal. There will be NO LSM permission checks against the 5248 * underlying inode. So users of this i 4881 * underlying inode. So users of this interface must do LSM checks at a 5249 * higher layer. The users are the big_ 4882 * higher layer. The users are the big_key and shm implementations. LSM 5250 * checks are provided at the key or shm 4883 * checks are provided at the key or shm level rather than the inode. 5251 * @name: name for dentry (to be seen in /pro 4884 * @name: name for dentry (to be seen in /proc/<pid>/maps 5252 * @size: size to be set for the file 4885 * @size: size to be set for the file 5253 * @flags: VM_NORESERVE suppresses pre-accoun 4886 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 5254 */ 4887 */ 5255 struct file *shmem_kernel_file_setup(const ch 4888 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags) 5256 { 4889 { 5257 return __shmem_file_setup(shm_mnt, na 4890 return __shmem_file_setup(shm_mnt, name, size, flags, S_PRIVATE); 5258 } 4891 } 5259 EXPORT_SYMBOL_GPL(shmem_kernel_file_setup); 4892 EXPORT_SYMBOL_GPL(shmem_kernel_file_setup); 5260 4893 5261 /** 4894 /** 5262 * shmem_file_setup - get an unlinked file li 4895 * shmem_file_setup - get an unlinked file living in tmpfs 5263 * @name: name for dentry (to be seen in /pro 4896 * @name: name for dentry (to be seen in /proc/<pid>/maps 5264 * @size: size to be set for the file 4897 * @size: size to be set for the file 5265 * @flags: VM_NORESERVE suppresses pre-accoun 4898 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 5266 */ 4899 */ 5267 struct file *shmem_file_setup(const char *nam 4900 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) 5268 { 4901 { 5269 return __shmem_file_setup(shm_mnt, na 4902 return __shmem_file_setup(shm_mnt, name, size, flags, 0); 5270 } 4903 } 5271 EXPORT_SYMBOL_GPL(shmem_file_setup); 4904 EXPORT_SYMBOL_GPL(shmem_file_setup); 5272 4905 5273 /** 4906 /** 5274 * shmem_file_setup_with_mnt - get an unlinke 4907 * shmem_file_setup_with_mnt - get an unlinked file living in tmpfs 5275 * @mnt: the tmpfs mount where the file will 4908 * @mnt: the tmpfs mount where the file will be created 5276 * @name: name for dentry (to be seen in /pro 4909 * @name: name for dentry (to be seen in /proc/<pid>/maps 5277 * @size: size to be set for the file 4910 * @size: size to be set for the file 5278 * @flags: VM_NORESERVE suppresses pre-accoun 4911 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 5279 */ 4912 */ 5280 struct file *shmem_file_setup_with_mnt(struct 4913 struct file *shmem_file_setup_with_mnt(struct vfsmount *mnt, const char *name, 5281 loff_t 4914 loff_t size, unsigned long flags) 5282 { 4915 { 5283 return __shmem_file_setup(mnt, name, 4916 return __shmem_file_setup(mnt, name, size, flags, 0); 5284 } 4917 } 5285 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 4918 EXPORT_SYMBOL_GPL(shmem_file_setup_with_mnt); 5286 4919 5287 /** 4920 /** 5288 * shmem_zero_setup - setup a shared anonymou 4921 * shmem_zero_setup - setup a shared anonymous mapping 5289 * @vma: the vma to be mmapped is prepared by 4922 * @vma: the vma to be mmapped is prepared by do_mmap 5290 */ 4923 */ 5291 int shmem_zero_setup(struct vm_area_struct *v 4924 int shmem_zero_setup(struct vm_area_struct *vma) 5292 { 4925 { 5293 struct file *file; 4926 struct file *file; 5294 loff_t size = vma->vm_end - vma->vm_s 4927 loff_t size = vma->vm_end - vma->vm_start; 5295 4928 5296 /* 4929 /* 5297 * Cloning a new file under mmap_lock 4930 * Cloning a new file under mmap_lock leads to a lock ordering conflict 5298 * between XFS directory reading and 4931 * between XFS directory reading and selinux: since this file is only 5299 * accessible to the user through its 4932 * accessible to the user through its mapping, use S_PRIVATE flag to 5300 * bypass file security, in the same 4933 * bypass file security, in the same way as shmem_kernel_file_setup(). 5301 */ 4934 */ 5302 file = shmem_kernel_file_setup("dev/z 4935 file = shmem_kernel_file_setup("dev/zero", size, vma->vm_flags); 5303 if (IS_ERR(file)) 4936 if (IS_ERR(file)) 5304 return PTR_ERR(file); 4937 return PTR_ERR(file); 5305 4938 5306 if (vma->vm_file) 4939 if (vma->vm_file) 5307 fput(vma->vm_file); 4940 fput(vma->vm_file); 5308 vma->vm_file = file; 4941 vma->vm_file = file; 5309 vma->vm_ops = &shmem_anon_vm_ops; 4942 vma->vm_ops = &shmem_anon_vm_ops; 5310 4943 5311 return 0; 4944 return 0; 5312 } 4945 } 5313 4946 5314 /** 4947 /** 5315 * shmem_read_folio_gfp - read into page cach 4948 * shmem_read_folio_gfp - read into page cache, using specified page allocation flags. 5316 * @mapping: the folio's address_space 4949 * @mapping: the folio's address_space 5317 * @index: the folio index 4950 * @index: the folio index 5318 * @gfp: the page allocator flags to u 4951 * @gfp: the page allocator flags to use if allocating 5319 * 4952 * 5320 * This behaves as a tmpfs "read_cache_page_g 4953 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)", 5321 * with any new page allocations done using t 4954 * with any new page allocations done using the specified allocation flags. 5322 * But read_cache_page_gfp() uses the ->read_ 4955 * But read_cache_page_gfp() uses the ->read_folio() method: which does not 5323 * suit tmpfs, since it may have pages in swa 4956 * suit tmpfs, since it may have pages in swapcache, and needs to find those 5324 * for itself; although drivers/gpu/drm i915 4957 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support. 5325 * 4958 * 5326 * i915_gem_object_get_pages_gtt() mixes __GF 4959 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in 5327 * with the mapping_gfp_mask(), to avoid OOMi 4960 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily. 5328 */ 4961 */ 5329 struct folio *shmem_read_folio_gfp(struct add 4962 struct folio *shmem_read_folio_gfp(struct address_space *mapping, 5330 pgoff_t index, gfp_t gfp) 4963 pgoff_t index, gfp_t gfp) 5331 { 4964 { 5332 #ifdef CONFIG_SHMEM 4965 #ifdef CONFIG_SHMEM 5333 struct inode *inode = mapping->host; 4966 struct inode *inode = mapping->host; 5334 struct folio *folio; 4967 struct folio *folio; 5335 int error; 4968 int error; 5336 4969 5337 error = shmem_get_folio_gfp(inode, in !! 4970 error = shmem_get_folio_gfp(inode, index, &folio, SGP_CACHE, 5338 gfp, NULL 4971 gfp, NULL, NULL); 5339 if (error) 4972 if (error) 5340 return ERR_PTR(error); 4973 return ERR_PTR(error); 5341 4974 5342 folio_unlock(folio); 4975 folio_unlock(folio); 5343 return folio; 4976 return folio; 5344 #else 4977 #else 5345 /* 4978 /* 5346 * The tiny !SHMEM case uses ramfs wi 4979 * The tiny !SHMEM case uses ramfs without swap 5347 */ 4980 */ 5348 return mapping_read_folio_gfp(mapping 4981 return mapping_read_folio_gfp(mapping, index, gfp); 5349 #endif 4982 #endif 5350 } 4983 } 5351 EXPORT_SYMBOL_GPL(shmem_read_folio_gfp); 4984 EXPORT_SYMBOL_GPL(shmem_read_folio_gfp); 5352 4985 5353 struct page *shmem_read_mapping_page_gfp(stru 4986 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 5354 pgof 4987 pgoff_t index, gfp_t gfp) 5355 { 4988 { 5356 struct folio *folio = shmem_read_foli 4989 struct folio *folio = shmem_read_folio_gfp(mapping, index, gfp); 5357 struct page *page; 4990 struct page *page; 5358 4991 5359 if (IS_ERR(folio)) 4992 if (IS_ERR(folio)) 5360 return &folio->page; 4993 return &folio->page; 5361 4994 5362 page = folio_file_page(folio, index); 4995 page = folio_file_page(folio, index); 5363 if (PageHWPoison(page)) { 4996 if (PageHWPoison(page)) { 5364 folio_put(folio); 4997 folio_put(folio); 5365 return ERR_PTR(-EIO); 4998 return ERR_PTR(-EIO); 5366 } 4999 } 5367 5000 5368 return page; 5001 return page; 5369 } 5002 } 5370 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp 5003 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); 5371 5004
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.