~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/overlayfs/super.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  *
  4  * Copyright (C) 2011 Novell Inc.
  5  */
  6 
  7 #include <uapi/linux/magic.h>
  8 #include <linux/fs.h>
  9 #include <linux/namei.h>
 10 #include <linux/xattr.h>
 11 #include <linux/mount.h>
 12 #include <linux/parser.h>
 13 #include <linux/module.h>
 14 #include <linux/statfs.h>
 15 #include <linux/seq_file.h>
 16 #include <linux/posix_acl_xattr.h>
 17 #include <linux/exportfs.h>
 18 #include <linux/file.h>
 19 #include <linux/fs_context.h>
 20 #include <linux/fs_parser.h>
 21 #include "overlayfs.h"
 22 #include "params.h"
 23 
 24 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
 25 MODULE_DESCRIPTION("Overlay filesystem");
 26 MODULE_LICENSE("GPL");
 27 
 28 
 29 struct ovl_dir_cache;
 30 
 31 static struct dentry *ovl_d_real(struct dentry *dentry, enum d_real_type type)
 32 {
 33         struct dentry *upper, *lower;
 34         int err;
 35 
 36         switch (type) {
 37         case D_REAL_DATA:
 38         case D_REAL_METADATA:
 39                 break;
 40         default:
 41                 goto bug;
 42         }
 43 
 44         if (!d_is_reg(dentry)) {
 45                 /* d_real_inode() is only relevant for regular files */
 46                 return dentry;
 47         }
 48 
 49         upper = ovl_dentry_upper(dentry);
 50         if (upper && (type == D_REAL_METADATA ||
 51                       ovl_has_upperdata(d_inode(dentry))))
 52                 return upper;
 53 
 54         if (type == D_REAL_METADATA) {
 55                 lower = ovl_dentry_lower(dentry);
 56                 goto real_lower;
 57         }
 58 
 59         /*
 60          * Best effort lazy lookup of lowerdata for D_REAL_DATA case to return
 61          * the real lowerdata dentry.  The only current caller of d_real() with
 62          * D_REAL_DATA is d_real_inode() from trace_uprobe and this caller is
 63          * likely going to be followed reading from the file, before placing
 64          * uprobes on offset within the file, so lowerdata should be available
 65          * when setting the uprobe.
 66          */
 67         err = ovl_verify_lowerdata(dentry);
 68         if (err)
 69                 goto bug;
 70         lower = ovl_dentry_lowerdata(dentry);
 71         if (!lower)
 72                 goto bug;
 73 
 74 real_lower:
 75         /* Handle recursion into stacked lower fs */
 76         return d_real(lower, type);
 77 
 78 bug:
 79         WARN(1, "%s(%pd4, %d): real dentry not found\n", __func__, dentry, type);
 80         return dentry;
 81 }
 82 
 83 static int ovl_revalidate_real(struct dentry *d, unsigned int flags, bool weak)
 84 {
 85         int ret = 1;
 86 
 87         if (!d)
 88                 return 1;
 89 
 90         if (weak) {
 91                 if (d->d_flags & DCACHE_OP_WEAK_REVALIDATE)
 92                         ret =  d->d_op->d_weak_revalidate(d, flags);
 93         } else if (d->d_flags & DCACHE_OP_REVALIDATE) {
 94                 ret = d->d_op->d_revalidate(d, flags);
 95                 if (!ret) {
 96                         if (!(flags & LOOKUP_RCU))
 97                                 d_invalidate(d);
 98                         ret = -ESTALE;
 99                 }
100         }
101         return ret;
102 }
103 
104 static int ovl_dentry_revalidate_common(struct dentry *dentry,
105                                         unsigned int flags, bool weak)
106 {
107         struct ovl_entry *oe;
108         struct ovl_path *lowerstack;
109         struct inode *inode = d_inode_rcu(dentry);
110         struct dentry *upper;
111         unsigned int i;
112         int ret = 1;
113 
114         /* Careful in RCU mode */
115         if (!inode)
116                 return -ECHILD;
117 
118         oe = OVL_I_E(inode);
119         lowerstack = ovl_lowerstack(oe);
120         upper = ovl_i_dentry_upper(inode);
121         if (upper)
122                 ret = ovl_revalidate_real(upper, flags, weak);
123 
124         for (i = 0; ret > 0 && i < ovl_numlower(oe); i++)
125                 ret = ovl_revalidate_real(lowerstack[i].dentry, flags, weak);
126 
127         return ret;
128 }
129 
130 static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags)
131 {
132         return ovl_dentry_revalidate_common(dentry, flags, false);
133 }
134 
135 static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags)
136 {
137         return ovl_dentry_revalidate_common(dentry, flags, true);
138 }
139 
140 static const struct dentry_operations ovl_dentry_operations = {
141         .d_real = ovl_d_real,
142         .d_revalidate = ovl_dentry_revalidate,
143         .d_weak_revalidate = ovl_dentry_weak_revalidate,
144 };
145 
146 static struct kmem_cache *ovl_inode_cachep;
147 
148 static struct inode *ovl_alloc_inode(struct super_block *sb)
149 {
150         struct ovl_inode *oi = alloc_inode_sb(sb, ovl_inode_cachep, GFP_KERNEL);
151 
152         if (!oi)
153                 return NULL;
154 
155         oi->cache = NULL;
156         oi->redirect = NULL;
157         oi->version = 0;
158         oi->flags = 0;
159         oi->__upperdentry = NULL;
160         oi->lowerdata_redirect = NULL;
161         oi->oe = NULL;
162         mutex_init(&oi->lock);
163 
164         return &oi->vfs_inode;
165 }
166 
167 static void ovl_free_inode(struct inode *inode)
168 {
169         struct ovl_inode *oi = OVL_I(inode);
170 
171         kfree(oi->redirect);
172         kfree(oi->oe);
173         mutex_destroy(&oi->lock);
174         kmem_cache_free(ovl_inode_cachep, oi);
175 }
176 
177 static void ovl_destroy_inode(struct inode *inode)
178 {
179         struct ovl_inode *oi = OVL_I(inode);
180 
181         dput(oi->__upperdentry);
182         ovl_stack_put(ovl_lowerstack(oi->oe), ovl_numlower(oi->oe));
183         if (S_ISDIR(inode->i_mode))
184                 ovl_dir_cache_free(inode);
185         else
186                 kfree(oi->lowerdata_redirect);
187 }
188 
189 static void ovl_put_super(struct super_block *sb)
190 {
191         struct ovl_fs *ofs = OVL_FS(sb);
192 
193         if (ofs)
194                 ovl_free_fs(ofs);
195 }
196 
197 /* Sync real dirty inodes in upper filesystem (if it exists) */
198 static int ovl_sync_fs(struct super_block *sb, int wait)
199 {
200         struct ovl_fs *ofs = OVL_FS(sb);
201         struct super_block *upper_sb;
202         int ret;
203 
204         ret = ovl_sync_status(ofs);
205         /*
206          * We have to always set the err, because the return value isn't
207          * checked in syncfs, and instead indirectly return an error via
208          * the sb's writeback errseq, which VFS inspects after this call.
209          */
210         if (ret < 0) {
211                 errseq_set(&sb->s_wb_err, -EIO);
212                 return -EIO;
213         }
214 
215         if (!ret)
216                 return ret;
217 
218         /*
219          * Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC).
220          * All the super blocks will be iterated, including upper_sb.
221          *
222          * If this is a syncfs(2) call, then we do need to call
223          * sync_filesystem() on upper_sb, but enough if we do it when being
224          * called with wait == 1.
225          */
226         if (!wait)
227                 return 0;
228 
229         upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
230 
231         down_read(&upper_sb->s_umount);
232         ret = sync_filesystem(upper_sb);
233         up_read(&upper_sb->s_umount);
234 
235         return ret;
236 }
237 
238 /**
239  * ovl_statfs
240  * @dentry: The dentry to query
241  * @buf: The struct kstatfs to fill in with stats
242  *
243  * Get the filesystem statistics.  As writes always target the upper layer
244  * filesystem pass the statfs to the upper filesystem (if it exists)
245  */
246 static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
247 {
248         struct super_block *sb = dentry->d_sb;
249         struct ovl_fs *ofs = OVL_FS(sb);
250         struct dentry *root_dentry = sb->s_root;
251         struct path path;
252         int err;
253 
254         ovl_path_real(root_dentry, &path);
255 
256         err = vfs_statfs(&path, buf);
257         if (!err) {
258                 buf->f_namelen = ofs->namelen;
259                 buf->f_type = OVERLAYFS_SUPER_MAGIC;
260                 if (ovl_has_fsid(ofs))
261                         buf->f_fsid = uuid_to_fsid(sb->s_uuid.b);
262         }
263 
264         return err;
265 }
266 
267 static const struct super_operations ovl_super_operations = {
268         .alloc_inode    = ovl_alloc_inode,
269         .free_inode     = ovl_free_inode,
270         .destroy_inode  = ovl_destroy_inode,
271         .drop_inode     = generic_delete_inode,
272         .put_super      = ovl_put_super,
273         .sync_fs        = ovl_sync_fs,
274         .statfs         = ovl_statfs,
275         .show_options   = ovl_show_options,
276 };
277 
278 #define OVL_WORKDIR_NAME "work"
279 #define OVL_INDEXDIR_NAME "index"
280 
281 static struct dentry *ovl_workdir_create(struct ovl_fs *ofs,
282                                          const char *name, bool persist)
283 {
284         struct inode *dir =  ofs->workbasedir->d_inode;
285         struct vfsmount *mnt = ovl_upper_mnt(ofs);
286         struct dentry *work;
287         int err;
288         bool retried = false;
289 
290         inode_lock_nested(dir, I_MUTEX_PARENT);
291 retry:
292         work = ovl_lookup_upper(ofs, name, ofs->workbasedir, strlen(name));
293 
294         if (!IS_ERR(work)) {
295                 struct iattr attr = {
296                         .ia_valid = ATTR_MODE,
297                         .ia_mode = S_IFDIR | 0,
298                 };
299 
300                 if (work->d_inode) {
301                         err = -EEXIST;
302                         if (retried)
303                                 goto out_dput;
304 
305                         if (persist)
306                                 goto out_unlock;
307 
308                         retried = true;
309                         err = ovl_workdir_cleanup(ofs, dir, mnt, work, 0);
310                         dput(work);
311                         if (err == -EINVAL) {
312                                 work = ERR_PTR(err);
313                                 goto out_unlock;
314                         }
315                         goto retry;
316                 }
317 
318                 err = ovl_mkdir_real(ofs, dir, &work, attr.ia_mode);
319                 if (err)
320                         goto out_dput;
321 
322                 /* Weird filesystem returning with hashed negative (kernfs)? */
323                 err = -EINVAL;
324                 if (d_really_is_negative(work))
325                         goto out_dput;
326 
327                 /*
328                  * Try to remove POSIX ACL xattrs from workdir.  We are good if:
329                  *
330                  * a) success (there was a POSIX ACL xattr and was removed)
331                  * b) -ENODATA (there was no POSIX ACL xattr)
332                  * c) -EOPNOTSUPP (POSIX ACL xattrs are not supported)
333                  *
334                  * There are various other error values that could effectively
335                  * mean that the xattr doesn't exist (e.g. -ERANGE is returned
336                  * if the xattr name is too long), but the set of filesystems
337                  * allowed as upper are limited to "normal" ones, where checking
338                  * for the above two errors is sufficient.
339                  */
340                 err = ovl_do_remove_acl(ofs, work, XATTR_NAME_POSIX_ACL_DEFAULT);
341                 if (err && err != -ENODATA && err != -EOPNOTSUPP)
342                         goto out_dput;
343 
344                 err = ovl_do_remove_acl(ofs, work, XATTR_NAME_POSIX_ACL_ACCESS);
345                 if (err && err != -ENODATA && err != -EOPNOTSUPP)
346                         goto out_dput;
347 
348                 /* Clear any inherited mode bits */
349                 inode_lock(work->d_inode);
350                 err = ovl_do_notify_change(ofs, work, &attr);
351                 inode_unlock(work->d_inode);
352                 if (err)
353                         goto out_dput;
354         } else {
355                 err = PTR_ERR(work);
356                 goto out_err;
357         }
358 out_unlock:
359         inode_unlock(dir);
360         return work;
361 
362 out_dput:
363         dput(work);
364 out_err:
365         pr_warn("failed to create directory %s/%s (errno: %i); mounting read-only\n",
366                 ofs->config.workdir, name, -err);
367         work = NULL;
368         goto out_unlock;
369 }
370 
371 static int ovl_check_namelen(const struct path *path, struct ovl_fs *ofs,
372                              const char *name)
373 {
374         struct kstatfs statfs;
375         int err = vfs_statfs(path, &statfs);
376 
377         if (err)
378                 pr_err("statfs failed on '%s'\n", name);
379         else
380                 ofs->namelen = max(ofs->namelen, statfs.f_namelen);
381 
382         return err;
383 }
384 
385 static int ovl_lower_dir(const char *name, struct path *path,
386                          struct ovl_fs *ofs, int *stack_depth)
387 {
388         int fh_type;
389         int err;
390 
391         err = ovl_check_namelen(path, ofs, name);
392         if (err)
393                 return err;
394 
395         *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth);
396 
397         /*
398          * The inodes index feature and NFS export need to encode and decode
399          * file handles, so they require that all layers support them.
400          */
401         fh_type = ovl_can_decode_fh(path->dentry->d_sb);
402         if ((ofs->config.nfs_export ||
403              (ofs->config.index && ofs->config.upperdir)) && !fh_type) {
404                 ofs->config.index = false;
405                 ofs->config.nfs_export = false;
406                 pr_warn("fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n",
407                         name);
408         }
409         ofs->nofh |= !fh_type;
410         /*
411          * Decoding origin file handle is required for persistent st_ino.
412          * Without persistent st_ino, xino=auto falls back to xino=off.
413          */
414         if (ofs->config.xino == OVL_XINO_AUTO &&
415             ofs->config.upperdir && !fh_type) {
416                 ofs->config.xino = OVL_XINO_OFF;
417                 pr_warn("fs on '%s' does not support file handles, falling back to xino=off.\n",
418                         name);
419         }
420 
421         /* Check if lower fs has 32bit inode numbers */
422         if (fh_type != FILEID_INO32_GEN)
423                 ofs->xino_mode = -1;
424 
425         return 0;
426 }
427 
428 /* Workdir should not be subdir of upperdir and vice versa */
429 static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir)
430 {
431         bool ok = false;
432 
433         if (workdir != upperdir) {
434                 struct dentry *trap = lock_rename(workdir, upperdir);
435                 if (!IS_ERR(trap))
436                         unlock_rename(workdir, upperdir);
437                 ok = (trap == NULL);
438         }
439         return ok;
440 }
441 
442 static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
443                           struct inode **ptrap, const char *name)
444 {
445         struct inode *trap;
446         int err;
447 
448         trap = ovl_get_trap_inode(sb, dir);
449         err = PTR_ERR_OR_ZERO(trap);
450         if (err) {
451                 if (err == -ELOOP)
452                         pr_err("conflicting %s path\n", name);
453                 return err;
454         }
455 
456         *ptrap = trap;
457         return 0;
458 }
459 
460 /*
461  * Determine how we treat concurrent use of upperdir/workdir based on the
462  * index feature. This is papering over mount leaks of container runtimes,
463  * for example, an old overlay mount is leaked and now its upperdir is
464  * attempted to be used as a lower layer in a new overlay mount.
465  */
466 static int ovl_report_in_use(struct ovl_fs *ofs, const char *name)
467 {
468         if (ofs->config.index) {
469                 pr_err("%s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n",
470                        name);
471                 return -EBUSY;
472         } else {
473                 pr_warn("%s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n",
474                         name);
475                 return 0;
476         }
477 }
478 
479 static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
480                          struct ovl_layer *upper_layer,
481                          const struct path *upperpath)
482 {
483         struct vfsmount *upper_mnt;
484         int err;
485 
486         /* Upperdir path should not be r/o */
487         if (__mnt_is_readonly(upperpath->mnt)) {
488                 pr_err("upper fs is r/o, try multi-lower layers mount\n");
489                 err = -EINVAL;
490                 goto out;
491         }
492 
493         err = ovl_check_namelen(upperpath, ofs, ofs->config.upperdir);
494         if (err)
495                 goto out;
496 
497         err = ovl_setup_trap(sb, upperpath->dentry, &upper_layer->trap,
498                              "upperdir");
499         if (err)
500                 goto out;
501 
502         upper_mnt = clone_private_mount(upperpath);
503         err = PTR_ERR(upper_mnt);
504         if (IS_ERR(upper_mnt)) {
505                 pr_err("failed to clone upperpath\n");
506                 goto out;
507         }
508 
509         /* Don't inherit atime flags */
510         upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
511         upper_layer->mnt = upper_mnt;
512         upper_layer->idx = 0;
513         upper_layer->fsid = 0;
514 
515         /*
516          * Inherit SB_NOSEC flag from upperdir.
517          *
518          * This optimization changes behavior when a security related attribute
519          * (suid/sgid/security.*) is changed on an underlying layer.  This is
520          * okay because we don't yet have guarantees in that case, but it will
521          * need careful treatment once we want to honour changes to underlying
522          * filesystems.
523          */
524         if (upper_mnt->mnt_sb->s_flags & SB_NOSEC)
525                 sb->s_flags |= SB_NOSEC;
526 
527         if (ovl_inuse_trylock(ovl_upper_mnt(ofs)->mnt_root)) {
528                 ofs->upperdir_locked = true;
529         } else {
530                 err = ovl_report_in_use(ofs, "upperdir");
531                 if (err)
532                         goto out;
533         }
534 
535         err = 0;
536 out:
537         return err;
538 }
539 
540 /*
541  * Returns 1 if RENAME_WHITEOUT is supported, 0 if not supported and
542  * negative values if error is encountered.
543  */
544 static int ovl_check_rename_whiteout(struct ovl_fs *ofs)
545 {
546         struct dentry *workdir = ofs->workdir;
547         struct inode *dir = d_inode(workdir);
548         struct dentry *temp;
549         struct dentry *dest;
550         struct dentry *whiteout;
551         struct name_snapshot name;
552         int err;
553 
554         inode_lock_nested(dir, I_MUTEX_PARENT);
555 
556         temp = ovl_create_temp(ofs, workdir, OVL_CATTR(S_IFREG | 0));
557         err = PTR_ERR(temp);
558         if (IS_ERR(temp))
559                 goto out_unlock;
560 
561         dest = ovl_lookup_temp(ofs, workdir);
562         err = PTR_ERR(dest);
563         if (IS_ERR(dest)) {
564                 dput(temp);
565                 goto out_unlock;
566         }
567 
568         /* Name is inline and stable - using snapshot as a copy helper */
569         take_dentry_name_snapshot(&name, temp);
570         err = ovl_do_rename(ofs, dir, temp, dir, dest, RENAME_WHITEOUT);
571         if (err) {
572                 if (err == -EINVAL)
573                         err = 0;
574                 goto cleanup_temp;
575         }
576 
577         whiteout = ovl_lookup_upper(ofs, name.name.name, workdir, name.name.len);
578         err = PTR_ERR(whiteout);
579         if (IS_ERR(whiteout))
580                 goto cleanup_temp;
581 
582         err = ovl_upper_is_whiteout(ofs, whiteout);
583 
584         /* Best effort cleanup of whiteout and temp file */
585         if (err)
586                 ovl_cleanup(ofs, dir, whiteout);
587         dput(whiteout);
588 
589 cleanup_temp:
590         ovl_cleanup(ofs, dir, temp);
591         release_dentry_name_snapshot(&name);
592         dput(temp);
593         dput(dest);
594 
595 out_unlock:
596         inode_unlock(dir);
597 
598         return err;
599 }
600 
601 static struct dentry *ovl_lookup_or_create(struct ovl_fs *ofs,
602                                            struct dentry *parent,
603                                            const char *name, umode_t mode)
604 {
605         size_t len = strlen(name);
606         struct dentry *child;
607 
608         inode_lock_nested(parent->d_inode, I_MUTEX_PARENT);
609         child = ovl_lookup_upper(ofs, name, parent, len);
610         if (!IS_ERR(child) && !child->d_inode)
611                 child = ovl_create_real(ofs, parent->d_inode, child,
612                                         OVL_CATTR(mode));
613         inode_unlock(parent->d_inode);
614         dput(parent);
615 
616         return child;
617 }
618 
619 /*
620  * Creates $workdir/work/incompat/volatile/dirty file if it is not already
621  * present.
622  */
623 static int ovl_create_volatile_dirty(struct ovl_fs *ofs)
624 {
625         unsigned int ctr;
626         struct dentry *d = dget(ofs->workbasedir);
627         static const char *const volatile_path[] = {
628                 OVL_WORKDIR_NAME, "incompat", "volatile", "dirty"
629         };
630         const char *const *name = volatile_path;
631 
632         for (ctr = ARRAY_SIZE(volatile_path); ctr; ctr--, name++) {
633                 d = ovl_lookup_or_create(ofs, d, *name, ctr > 1 ? S_IFDIR : S_IFREG);
634                 if (IS_ERR(d))
635                         return PTR_ERR(d);
636         }
637         dput(d);
638         return 0;
639 }
640 
641 static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
642                             const struct path *workpath)
643 {
644         struct vfsmount *mnt = ovl_upper_mnt(ofs);
645         struct dentry *workdir;
646         struct file *tmpfile;
647         bool rename_whiteout;
648         bool d_type;
649         int fh_type;
650         int err;
651 
652         err = mnt_want_write(mnt);
653         if (err)
654                 return err;
655 
656         workdir = ovl_workdir_create(ofs, OVL_WORKDIR_NAME, false);
657         err = PTR_ERR(workdir);
658         if (IS_ERR_OR_NULL(workdir))
659                 goto out;
660 
661         ofs->workdir = workdir;
662 
663         err = ovl_setup_trap(sb, ofs->workdir, &ofs->workdir_trap, "workdir");
664         if (err)
665                 goto out;
666 
667         /*
668          * Upper should support d_type, else whiteouts are visible.  Given
669          * workdir and upper are on same fs, we can do iterate_dir() on
670          * workdir. This check requires successful creation of workdir in
671          * previous step.
672          */
673         err = ovl_check_d_type_supported(workpath);
674         if (err < 0)
675                 goto out;
676 
677         d_type = err;
678         if (!d_type)
679                 pr_warn("upper fs needs to support d_type.\n");
680 
681         /* Check if upper/work fs supports O_TMPFILE */
682         tmpfile = ovl_do_tmpfile(ofs, ofs->workdir, S_IFREG | 0);
683         ofs->tmpfile = !IS_ERR(tmpfile);
684         if (ofs->tmpfile)
685                 fput(tmpfile);
686         else
687                 pr_warn("upper fs does not support tmpfile.\n");
688 
689 
690         /* Check if upper/work fs supports RENAME_WHITEOUT */
691         err = ovl_check_rename_whiteout(ofs);
692         if (err < 0)
693                 goto out;
694 
695         rename_whiteout = err;
696         if (!rename_whiteout)
697                 pr_warn("upper fs does not support RENAME_WHITEOUT.\n");
698 
699         /*
700          * Check if upper/work fs supports (trusted|user).overlay.* xattr
701          */
702         err = ovl_setxattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE, "", 1);
703         if (err) {
704                 pr_warn("failed to set xattr on upper\n");
705                 ofs->noxattr = true;
706                 if (ovl_redirect_follow(ofs)) {
707                         ofs->config.redirect_mode = OVL_REDIRECT_NOFOLLOW;
708                         pr_warn("...falling back to redirect_dir=nofollow.\n");
709                 }
710                 if (ofs->config.metacopy) {
711                         ofs->config.metacopy = false;
712                         pr_warn("...falling back to metacopy=off.\n");
713                 }
714                 if (ofs->config.index) {
715                         ofs->config.index = false;
716                         pr_warn("...falling back to index=off.\n");
717                 }
718                 if (ovl_has_fsid(ofs)) {
719                         ofs->config.uuid = OVL_UUID_NULL;
720                         pr_warn("...falling back to uuid=null.\n");
721                 }
722                 /*
723                  * xattr support is required for persistent st_ino.
724                  * Without persistent st_ino, xino=auto falls back to xino=off.
725                  */
726                 if (ofs->config.xino == OVL_XINO_AUTO) {
727                         ofs->config.xino = OVL_XINO_OFF;
728                         pr_warn("...falling back to xino=off.\n");
729                 }
730                 if (err == -EPERM && !ofs->config.userxattr)
731                         pr_info("try mounting with 'userxattr' option\n");
732                 err = 0;
733         } else {
734                 ovl_removexattr(ofs, ofs->workdir, OVL_XATTR_OPAQUE);
735         }
736 
737         /*
738          * We allowed sub-optimal upper fs configuration and don't want to break
739          * users over kernel upgrade, but we never allowed remote upper fs, so
740          * we can enforce strict requirements for remote upper fs.
741          */
742         if (ovl_dentry_remote(ofs->workdir) &&
743             (!d_type || !rename_whiteout || ofs->noxattr)) {
744                 pr_err("upper fs missing required features.\n");
745                 err = -EINVAL;
746                 goto out;
747         }
748 
749         /*
750          * For volatile mount, create a incompat/volatile/dirty file to keep
751          * track of it.
752          */
753         if (ofs->config.ovl_volatile) {
754                 err = ovl_create_volatile_dirty(ofs);
755                 if (err < 0) {
756                         pr_err("Failed to create volatile/dirty file.\n");
757                         goto out;
758                 }
759         }
760 
761         /* Check if upper/work fs supports file handles */
762         fh_type = ovl_can_decode_fh(ofs->workdir->d_sb);
763         if (ofs->config.index && !fh_type) {
764                 ofs->config.index = false;
765                 pr_warn("upper fs does not support file handles, falling back to index=off.\n");
766         }
767         ofs->nofh |= !fh_type;
768 
769         /* Check if upper fs has 32bit inode numbers */
770         if (fh_type != FILEID_INO32_GEN)
771                 ofs->xino_mode = -1;
772 
773         /* NFS export of r/w mount depends on index */
774         if (ofs->config.nfs_export && !ofs->config.index) {
775                 pr_warn("NFS export requires \"index=on\", falling back to nfs_export=off.\n");
776                 ofs->config.nfs_export = false;
777         }
778 out:
779         mnt_drop_write(mnt);
780         return err;
781 }
782 
783 static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
784                            const struct path *upperpath,
785                            const struct path *workpath)
786 {
787         int err;
788 
789         err = -EINVAL;
790         if (upperpath->mnt != workpath->mnt) {
791                 pr_err("workdir and upperdir must reside under the same mount\n");
792                 return err;
793         }
794         if (!ovl_workdir_ok(workpath->dentry, upperpath->dentry)) {
795                 pr_err("workdir and upperdir must be separate subtrees\n");
796                 return err;
797         }
798 
799         ofs->workbasedir = dget(workpath->dentry);
800 
801         if (ovl_inuse_trylock(ofs->workbasedir)) {
802                 ofs->workdir_locked = true;
803         } else {
804                 err = ovl_report_in_use(ofs, "workdir");
805                 if (err)
806                         return err;
807         }
808 
809         err = ovl_setup_trap(sb, ofs->workbasedir, &ofs->workbasedir_trap,
810                              "workdir");
811         if (err)
812                 return err;
813 
814         return ovl_make_workdir(sb, ofs, workpath);
815 }
816 
817 static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
818                             struct ovl_entry *oe, const struct path *upperpath)
819 {
820         struct vfsmount *mnt = ovl_upper_mnt(ofs);
821         struct dentry *indexdir;
822         struct dentry *origin = ovl_lowerstack(oe)->dentry;
823         const struct ovl_fh *fh;
824         int err;
825 
826         fh = ovl_get_origin_fh(ofs, origin);
827         if (IS_ERR(fh))
828                 return PTR_ERR(fh);
829 
830         err = mnt_want_write(mnt);
831         if (err)
832                 goto out_free_fh;
833 
834         /* Verify lower root is upper root origin */
835         err = ovl_verify_origin_fh(ofs, upperpath->dentry, fh, true);
836         if (err) {
837                 pr_err("failed to verify upper root origin\n");
838                 goto out;
839         }
840 
841         /* index dir will act also as workdir */
842         iput(ofs->workdir_trap);
843         ofs->workdir_trap = NULL;
844         dput(ofs->workdir);
845         ofs->workdir = NULL;
846         indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true);
847         if (IS_ERR(indexdir)) {
848                 err = PTR_ERR(indexdir);
849         } else if (indexdir) {
850                 ofs->workdir = indexdir;
851                 err = ovl_setup_trap(sb, indexdir, &ofs->workdir_trap,
852                                      "indexdir");
853                 if (err)
854                         goto out;
855 
856                 /*
857                  * Verify upper root is exclusively associated with index dir.
858                  * Older kernels stored upper fh in ".overlay.origin"
859                  * xattr. If that xattr exists, verify that it is a match to
860                  * upper dir file handle. In any case, verify or set xattr
861                  * ".overlay.upper" to indicate that index may have
862                  * directory entries.
863                  */
864                 if (ovl_check_origin_xattr(ofs, indexdir)) {
865                         err = ovl_verify_origin_xattr(ofs, indexdir,
866                                                       OVL_XATTR_ORIGIN,
867                                                       upperpath->dentry, true,
868                                                       false);
869                         if (err)
870                                 pr_err("failed to verify index dir 'origin' xattr\n");
871                 }
872                 err = ovl_verify_upper(ofs, indexdir, upperpath->dentry, true);
873                 if (err)
874                         pr_err("failed to verify index dir 'upper' xattr\n");
875 
876                 /* Cleanup bad/stale/orphan index entries */
877                 if (!err)
878                         err = ovl_indexdir_cleanup(ofs);
879         }
880         if (err || !indexdir)
881                 pr_warn("try deleting index dir or mounting with '-o index=off' to disable inodes index.\n");
882 
883 out:
884         mnt_drop_write(mnt);
885 out_free_fh:
886         kfree(fh);
887         return err;
888 }
889 
890 static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
891 {
892         unsigned int i;
893 
894         if (!ofs->config.nfs_export && !ovl_upper_mnt(ofs))
895                 return true;
896 
897         /*
898          * We allow using single lower with null uuid for index and nfs_export
899          * for example to support those features with single lower squashfs.
900          * To avoid regressions in setups of overlay with re-formatted lower
901          * squashfs, do not allow decoding origin with lower null uuid unless
902          * user opted-in to one of the new features that require following the
903          * lower inode of non-dir upper.
904          */
905         if (ovl_allow_offline_changes(ofs) && uuid_is_null(uuid))
906                 return false;
907 
908         for (i = 0; i < ofs->numfs; i++) {
909                 /*
910                  * We use uuid to associate an overlay lower file handle with a
911                  * lower layer, so we can accept lower fs with null uuid as long
912                  * as all lower layers with null uuid are on the same fs.
913                  * if we detect multiple lower fs with the same uuid, we
914                  * disable lower file handle decoding on all of them.
915                  */
916                 if (ofs->fs[i].is_lower &&
917                     uuid_equal(&ofs->fs[i].sb->s_uuid, uuid)) {
918                         ofs->fs[i].bad_uuid = true;
919                         return false;
920                 }
921         }
922         return true;
923 }
924 
925 /* Get a unique fsid for the layer */
926 static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
927 {
928         struct super_block *sb = path->mnt->mnt_sb;
929         unsigned int i;
930         dev_t dev;
931         int err;
932         bool bad_uuid = false;
933         bool warn = false;
934 
935         for (i = 0; i < ofs->numfs; i++) {
936                 if (ofs->fs[i].sb == sb)
937                         return i;
938         }
939 
940         if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) {
941                 bad_uuid = true;
942                 if (ofs->config.xino == OVL_XINO_AUTO) {
943                         ofs->config.xino = OVL_XINO_OFF;
944                         warn = true;
945                 }
946                 if (ofs->config.index || ofs->config.nfs_export) {
947                         ofs->config.index = false;
948                         ofs->config.nfs_export = false;
949                         warn = true;
950                 }
951                 if (warn) {
952                         pr_warn("%s uuid detected in lower fs '%pd2', falling back to xino=%s,index=off,nfs_export=off.\n",
953                                 uuid_is_null(&sb->s_uuid) ? "null" :
954                                                             "conflicting",
955                                 path->dentry, ovl_xino_mode(&ofs->config));
956                 }
957         }
958 
959         err = get_anon_bdev(&dev);
960         if (err) {
961                 pr_err("failed to get anonymous bdev for lowerpath\n");
962                 return err;
963         }
964 
965         ofs->fs[ofs->numfs].sb = sb;
966         ofs->fs[ofs->numfs].pseudo_dev = dev;
967         ofs->fs[ofs->numfs].bad_uuid = bad_uuid;
968 
969         return ofs->numfs++;
970 }
971 
972 /*
973  * The fsid after the last lower fsid is used for the data layers.
974  * It is a "null fs" with a null sb, null uuid, and no pseudo dev.
975  */
976 static int ovl_get_data_fsid(struct ovl_fs *ofs)
977 {
978         return ofs->numfs;
979 }
980 
981 
982 static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
983                           struct ovl_fs_context *ctx, struct ovl_layer *layers)
984 {
985         int err;
986         unsigned int i;
987         size_t nr_merged_lower;
988 
989         ofs->fs = kcalloc(ctx->nr + 2, sizeof(struct ovl_sb), GFP_KERNEL);
990         if (ofs->fs == NULL)
991                 return -ENOMEM;
992 
993         /*
994          * idx/fsid 0 are reserved for upper fs even with lower only overlay
995          * and the last fsid is reserved for "null fs" of the data layers.
996          */
997         ofs->numfs++;
998 
999         /*
1000          * All lower layers that share the same fs as upper layer, use the same
1001          * pseudo_dev as upper layer.  Allocate fs[0].pseudo_dev even for lower
1002          * only overlay to simplify ovl_fs_free().
1003          * is_lower will be set if upper fs is shared with a lower layer.
1004          */
1005         err = get_anon_bdev(&ofs->fs[0].pseudo_dev);
1006         if (err) {
1007                 pr_err("failed to get anonymous bdev for upper fs\n");
1008                 return err;
1009         }
1010 
1011         if (ovl_upper_mnt(ofs)) {
1012                 ofs->fs[0].sb = ovl_upper_mnt(ofs)->mnt_sb;
1013                 ofs->fs[0].is_lower = false;
1014         }
1015 
1016         nr_merged_lower = ctx->nr - ctx->nr_data;
1017         for (i = 0; i < ctx->nr; i++) {
1018                 struct ovl_fs_context_layer *l = &ctx->lower[i];
1019                 struct vfsmount *mnt;
1020                 struct inode *trap;
1021                 int fsid;
1022 
1023                 if (i < nr_merged_lower)
1024                         fsid = ovl_get_fsid(ofs, &l->path);
1025                 else
1026                         fsid = ovl_get_data_fsid(ofs);
1027                 if (fsid < 0)
1028                         return fsid;
1029 
1030                 /*
1031                  * Check if lower root conflicts with this overlay layers before
1032                  * checking if it is in-use as upperdir/workdir of "another"
1033                  * mount, because we do not bother to check in ovl_is_inuse() if
1034                  * the upperdir/workdir is in fact in-use by our
1035                  * upperdir/workdir.
1036                  */
1037                 err = ovl_setup_trap(sb, l->path.dentry, &trap, "lowerdir");
1038                 if (err)
1039                         return err;
1040 
1041                 if (ovl_is_inuse(l->path.dentry)) {
1042                         err = ovl_report_in_use(ofs, "lowerdir");
1043                         if (err) {
1044                                 iput(trap);
1045                                 return err;
1046                         }
1047                 }
1048 
1049                 mnt = clone_private_mount(&l->path);
1050                 err = PTR_ERR(mnt);
1051                 if (IS_ERR(mnt)) {
1052                         pr_err("failed to clone lowerpath\n");
1053                         iput(trap);
1054                         return err;
1055                 }
1056 
1057                 /*
1058                  * Make lower layers R/O.  That way fchmod/fchown on lower file
1059                  * will fail instead of modifying lower fs.
1060                  */
1061                 mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME;
1062 
1063                 layers[ofs->numlayer].trap = trap;
1064                 layers[ofs->numlayer].mnt = mnt;
1065                 layers[ofs->numlayer].idx = ofs->numlayer;
1066                 layers[ofs->numlayer].fsid = fsid;
1067                 layers[ofs->numlayer].fs = &ofs->fs[fsid];
1068                 /* Store for printing lowerdir=... in ovl_show_options() */
1069                 ofs->config.lowerdirs[ofs->numlayer] = l->name;
1070                 l->name = NULL;
1071                 ofs->numlayer++;
1072                 ofs->fs[fsid].is_lower = true;
1073         }
1074 
1075         /*
1076          * When all layers on same fs, overlay can use real inode numbers.
1077          * With mount option "xino=<on|auto>", mounter declares that there are
1078          * enough free high bits in underlying fs to hold the unique fsid.
1079          * If overlayfs does encounter underlying inodes using the high xino
1080          * bits reserved for fsid, it emits a warning and uses the original
1081          * inode number or a non persistent inode number allocated from a
1082          * dedicated range.
1083          */
1084         if (ofs->numfs - !ovl_upper_mnt(ofs) == 1) {
1085                 if (ofs->config.xino == OVL_XINO_ON)
1086                         pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n");
1087                 ofs->xino_mode = 0;
1088         } else if (ofs->config.xino == OVL_XINO_OFF) {
1089                 ofs->xino_mode = -1;
1090         } else if (ofs->xino_mode < 0) {
1091                 /*
1092                  * This is a roundup of number of bits needed for encoding
1093                  * fsid, where fsid 0 is reserved for upper fs (even with
1094                  * lower only overlay) +1 extra bit is reserved for the non
1095                  * persistent inode number range that is used for resolving
1096                  * xino lower bits overflow.
1097                  */
1098                 BUILD_BUG_ON(ilog2(OVL_MAX_STACK) > 30);
1099                 ofs->xino_mode = ilog2(ofs->numfs - 1) + 2;
1100         }
1101 
1102         if (ofs->xino_mode > 0) {
1103                 pr_info("\"xino\" feature enabled using %d upper inode bits.\n",
1104                         ofs->xino_mode);
1105         }
1106 
1107         return 0;
1108 }
1109 
1110 static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
1111                                             struct ovl_fs_context *ctx,
1112                                             struct ovl_fs *ofs,
1113                                             struct ovl_layer *layers)
1114 {
1115         int err;
1116         unsigned int i;
1117         size_t nr_merged_lower;
1118         struct ovl_entry *oe;
1119         struct ovl_path *lowerstack;
1120 
1121         struct ovl_fs_context_layer *l;
1122 
1123         if (!ofs->config.upperdir && ctx->nr == 1) {
1124                 pr_err("at least 2 lowerdir are needed while upperdir nonexistent\n");
1125                 return ERR_PTR(-EINVAL);
1126         }
1127 
1128         err = -EINVAL;
1129         for (i = 0; i < ctx->nr; i++) {
1130                 l = &ctx->lower[i];
1131 
1132                 err = ovl_lower_dir(l->name, &l->path, ofs, &sb->s_stack_depth);
1133                 if (err)
1134                         return ERR_PTR(err);
1135         }
1136 
1137         err = -EINVAL;
1138         sb->s_stack_depth++;
1139         if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
1140                 pr_err("maximum fs stacking depth exceeded\n");
1141                 return ERR_PTR(err);
1142         }
1143 
1144         err = ovl_get_layers(sb, ofs, ctx, layers);
1145         if (err)
1146                 return ERR_PTR(err);
1147 
1148         err = -ENOMEM;
1149         /* Data-only layers are not merged in root directory */
1150         nr_merged_lower = ctx->nr - ctx->nr_data;
1151         oe = ovl_alloc_entry(nr_merged_lower);
1152         if (!oe)
1153                 return ERR_PTR(err);
1154 
1155         lowerstack = ovl_lowerstack(oe);
1156         for (i = 0; i < nr_merged_lower; i++) {
1157                 l = &ctx->lower[i];
1158                 lowerstack[i].dentry = dget(l->path.dentry);
1159                 lowerstack[i].layer = &ofs->layers[i + 1];
1160         }
1161         ofs->numdatalayer = ctx->nr_data;
1162 
1163         return oe;
1164 }
1165 
1166 /*
1167  * Check if this layer root is a descendant of:
1168  * - another layer of this overlayfs instance
1169  * - upper/work dir of any overlayfs instance
1170  */
1171 static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs,
1172                            struct dentry *dentry, const char *name,
1173                            bool is_lower)
1174 {
1175         struct dentry *next = dentry, *parent;
1176         int err = 0;
1177 
1178         if (!dentry)
1179                 return 0;
1180 
1181         parent = dget_parent(next);
1182 
1183         /* Walk back ancestors to root (inclusive) looking for traps */
1184         while (!err && parent != next) {
1185                 if (is_lower && ovl_lookup_trap_inode(sb, parent)) {
1186                         err = -ELOOP;
1187                         pr_err("overlapping %s path\n", name);
1188                 } else if (ovl_is_inuse(parent)) {
1189                         err = ovl_report_in_use(ofs, name);
1190                 }
1191                 next = parent;
1192                 parent = dget_parent(next);
1193                 dput(next);
1194         }
1195 
1196         dput(parent);
1197 
1198         return err;
1199 }
1200 
1201 /*
1202  * Check if any of the layers or work dirs overlap.
1203  */
1204 static int ovl_check_overlapping_layers(struct super_block *sb,
1205                                         struct ovl_fs *ofs)
1206 {
1207         int i, err;
1208 
1209         if (ovl_upper_mnt(ofs)) {
1210                 err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root,
1211                                       "upperdir", false);
1212                 if (err)
1213                         return err;
1214 
1215                 /*
1216                  * Checking workbasedir avoids hitting ovl_is_inuse(parent) of
1217                  * this instance and covers overlapping work and index dirs,
1218                  * unless work or index dir have been moved since created inside
1219                  * workbasedir.  In that case, we already have their traps in
1220                  * inode cache and we will catch that case on lookup.
1221                  */
1222                 err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir",
1223                                       false);
1224                 if (err)
1225                         return err;
1226         }
1227 
1228         for (i = 1; i < ofs->numlayer; i++) {
1229                 err = ovl_check_layer(sb, ofs,
1230                                       ofs->layers[i].mnt->mnt_root,
1231                                       "lowerdir", true);
1232                 if (err)
1233                         return err;
1234         }
1235 
1236         return 0;
1237 }
1238 
1239 static struct dentry *ovl_get_root(struct super_block *sb,
1240                                    struct dentry *upperdentry,
1241                                    struct ovl_entry *oe)
1242 {
1243         struct dentry *root;
1244         struct ovl_fs *ofs = OVL_FS(sb);
1245         struct ovl_path *lowerpath = ovl_lowerstack(oe);
1246         unsigned long ino = d_inode(lowerpath->dentry)->i_ino;
1247         int fsid = lowerpath->layer->fsid;
1248         struct ovl_inode_params oip = {
1249                 .upperdentry = upperdentry,
1250                 .oe = oe,
1251         };
1252 
1253         root = d_make_root(ovl_new_inode(sb, S_IFDIR, 0));
1254         if (!root)
1255                 return NULL;
1256 
1257         if (upperdentry) {
1258                 /* Root inode uses upper st_ino/i_ino */
1259                 ino = d_inode(upperdentry)->i_ino;
1260                 fsid = 0;
1261                 ovl_dentry_set_upper_alias(root);
1262                 if (ovl_is_impuredir(sb, upperdentry))
1263                         ovl_set_flag(OVL_IMPURE, d_inode(root));
1264         }
1265 
1266         /* Look for xwhiteouts marker except in the lowermost layer */
1267         for (int i = 0; i < ovl_numlower(oe) - 1; i++, lowerpath++) {
1268                 struct path path = {
1269                         .mnt = lowerpath->layer->mnt,
1270                         .dentry = lowerpath->dentry,
1271                 };
1272 
1273                 /* overlay.opaque=x means xwhiteouts directory */
1274                 if (ovl_get_opaquedir_val(ofs, &path) == 'x') {
1275                         ovl_layer_set_xwhiteouts(ofs, lowerpath->layer);
1276                         ovl_dentry_set_xwhiteouts(root);
1277                 }
1278         }
1279 
1280         /* Root is always merge -> can have whiteouts */
1281         ovl_set_flag(OVL_WHITEOUTS, d_inode(root));
1282         ovl_dentry_set_flag(OVL_E_CONNECTED, root);
1283         ovl_set_upperdata(d_inode(root));
1284         ovl_inode_init(d_inode(root), &oip, ino, fsid);
1285         ovl_dentry_init_flags(root, upperdentry, oe, DCACHE_OP_WEAK_REVALIDATE);
1286         /* root keeps a reference of upperdentry */
1287         dget(upperdentry);
1288 
1289         return root;
1290 }
1291 
1292 int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
1293 {
1294         struct ovl_fs *ofs = sb->s_fs_info;
1295         struct ovl_fs_context *ctx = fc->fs_private;
1296         struct dentry *root_dentry;
1297         struct ovl_entry *oe;
1298         struct ovl_layer *layers;
1299         struct cred *cred;
1300         int err;
1301 
1302         err = -EIO;
1303         if (WARN_ON(fc->user_ns != current_user_ns()))
1304                 goto out_err;
1305 
1306         sb->s_d_op = &ovl_dentry_operations;
1307 
1308         err = -ENOMEM;
1309         ofs->creator_cred = cred = prepare_creds();
1310         if (!cred)
1311                 goto out_err;
1312 
1313         err = ovl_fs_params_verify(ctx, &ofs->config);
1314         if (err)
1315                 goto out_err;
1316 
1317         err = -EINVAL;
1318         if (ctx->nr == 0) {
1319                 if (!(fc->sb_flags & SB_SILENT))
1320                         pr_err("missing 'lowerdir'\n");
1321                 goto out_err;
1322         }
1323 
1324         err = -ENOMEM;
1325         layers = kcalloc(ctx->nr + 1, sizeof(struct ovl_layer), GFP_KERNEL);
1326         if (!layers)
1327                 goto out_err;
1328 
1329         ofs->config.lowerdirs = kcalloc(ctx->nr + 1, sizeof(char *), GFP_KERNEL);
1330         if (!ofs->config.lowerdirs) {
1331                 kfree(layers);
1332                 goto out_err;
1333         }
1334         ofs->layers = layers;
1335         /*
1336          * Layer 0 is reserved for upper even if there's no upper.
1337          * config.lowerdirs[0] is used for storing the user provided colon
1338          * separated lowerdir string.
1339          */
1340         ofs->config.lowerdirs[0] = ctx->lowerdir_all;
1341         ctx->lowerdir_all = NULL;
1342         ofs->numlayer = 1;
1343 
1344         sb->s_stack_depth = 0;
1345         sb->s_maxbytes = MAX_LFS_FILESIZE;
1346         atomic_long_set(&ofs->last_ino, 1);
1347         /* Assume underlying fs uses 32bit inodes unless proven otherwise */
1348         if (ofs->config.xino != OVL_XINO_OFF) {
1349                 ofs->xino_mode = BITS_PER_LONG - 32;
1350                 if (!ofs->xino_mode) {
1351                         pr_warn("xino not supported on 32bit kernel, falling back to xino=off.\n");
1352                         ofs->config.xino = OVL_XINO_OFF;
1353                 }
1354         }
1355 
1356         /* alloc/destroy_inode needed for setting up traps in inode cache */
1357         sb->s_op = &ovl_super_operations;
1358 
1359         if (ofs->config.upperdir) {
1360                 struct super_block *upper_sb;
1361 
1362                 err = -EINVAL;
1363                 if (!ofs->config.workdir) {
1364                         pr_err("missing 'workdir'\n");
1365                         goto out_err;
1366                 }
1367 
1368                 err = ovl_get_upper(sb, ofs, &layers[0], &ctx->upper);
1369                 if (err)
1370                         goto out_err;
1371 
1372                 upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
1373                 if (!ovl_should_sync(ofs)) {
1374                         ofs->errseq = errseq_sample(&upper_sb->s_wb_err);
1375                         if (errseq_check(&upper_sb->s_wb_err, ofs->errseq)) {
1376                                 err = -EIO;
1377                                 pr_err("Cannot mount volatile when upperdir has an unseen error. Sync upperdir fs to clear state.\n");
1378                                 goto out_err;
1379                         }
1380                 }
1381 
1382                 err = ovl_get_workdir(sb, ofs, &ctx->upper, &ctx->work);
1383                 if (err)
1384                         goto out_err;
1385 
1386                 if (!ofs->workdir)
1387                         sb->s_flags |= SB_RDONLY;
1388 
1389                 sb->s_stack_depth = upper_sb->s_stack_depth;
1390                 sb->s_time_gran = upper_sb->s_time_gran;
1391         }
1392         oe = ovl_get_lowerstack(sb, ctx, ofs, layers);
1393         err = PTR_ERR(oe);
1394         if (IS_ERR(oe))
1395                 goto out_err;
1396 
1397         /* If the upper fs is nonexistent, we mark overlayfs r/o too */
1398         if (!ovl_upper_mnt(ofs))
1399                 sb->s_flags |= SB_RDONLY;
1400 
1401         if (!ovl_origin_uuid(ofs) && ofs->numfs > 1) {
1402                 pr_warn("The uuid=off requires a single fs for lower and upper, falling back to uuid=null.\n");
1403                 ofs->config.uuid = OVL_UUID_NULL;
1404         } else if (ovl_has_fsid(ofs) && ovl_upper_mnt(ofs)) {
1405                 /* Use per instance persistent uuid/fsid */
1406                 ovl_init_uuid_xattr(sb, ofs, &ctx->upper);
1407         }
1408 
1409         if (!ovl_force_readonly(ofs) && ofs->config.index) {
1410                 err = ovl_get_indexdir(sb, ofs, oe, &ctx->upper);
1411                 if (err)
1412                         goto out_free_oe;
1413 
1414                 /* Force r/o mount with no index dir */
1415                 if (!ofs->workdir)
1416                         sb->s_flags |= SB_RDONLY;
1417         }
1418 
1419         err = ovl_check_overlapping_layers(sb, ofs);
1420         if (err)
1421                 goto out_free_oe;
1422 
1423         /* Show index=off in /proc/mounts for forced r/o mount */
1424         if (!ofs->workdir) {
1425                 ofs->config.index = false;
1426                 if (ovl_upper_mnt(ofs) && ofs->config.nfs_export) {
1427                         pr_warn("NFS export requires an index dir, falling back to nfs_export=off.\n");
1428                         ofs->config.nfs_export = false;
1429                 }
1430         }
1431 
1432         if (ofs->config.metacopy && ofs->config.nfs_export) {
1433                 pr_warn("NFS export is not supported with metadata only copy up, falling back to nfs_export=off.\n");
1434                 ofs->config.nfs_export = false;
1435         }
1436 
1437         /*
1438          * Support encoding decodable file handles with nfs_export=on
1439          * and encoding non-decodable file handles with nfs_export=off
1440          * if all layers support file handles.
1441          */
1442         if (ofs->config.nfs_export)
1443                 sb->s_export_op = &ovl_export_operations;
1444         else if (!ofs->nofh)
1445                 sb->s_export_op = &ovl_export_fid_operations;
1446 
1447         /* Never override disk quota limits or use reserved space */
1448         cap_lower(cred->cap_effective, CAP_SYS_RESOURCE);
1449 
1450         sb->s_magic = OVERLAYFS_SUPER_MAGIC;
1451         sb->s_xattr = ovl_xattr_handlers(ofs);
1452         sb->s_fs_info = ofs;
1453 #ifdef CONFIG_FS_POSIX_ACL
1454         sb->s_flags |= SB_POSIXACL;
1455 #endif
1456         sb->s_iflags |= SB_I_SKIP_SYNC;
1457         /*
1458          * Ensure that umask handling is done by the filesystems used
1459          * for the the upper layer instead of overlayfs as that would
1460          * lead to unexpected results.
1461          */
1462         sb->s_iflags |= SB_I_NOUMASK;
1463         sb->s_iflags |= SB_I_EVM_HMAC_UNSUPPORTED;
1464 
1465         err = -ENOMEM;
1466         root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe);
1467         if (!root_dentry)
1468                 goto out_free_oe;
1469 
1470         sb->s_root = root_dentry;
1471 
1472         return 0;
1473 
1474 out_free_oe:
1475         ovl_free_entry(oe);
1476 out_err:
1477         ovl_free_fs(ofs);
1478         sb->s_fs_info = NULL;
1479         return err;
1480 }
1481 
1482 struct file_system_type ovl_fs_type = {
1483         .owner                  = THIS_MODULE,
1484         .name                   = "overlay",
1485         .init_fs_context        = ovl_init_fs_context,
1486         .parameters             = ovl_parameter_spec,
1487         .fs_flags               = FS_USERNS_MOUNT,
1488         .kill_sb                = kill_anon_super,
1489 };
1490 MODULE_ALIAS_FS("overlay");
1491 
1492 static void ovl_inode_init_once(void *foo)
1493 {
1494         struct ovl_inode *oi = foo;
1495 
1496         inode_init_once(&oi->vfs_inode);
1497 }
1498 
1499 static int __init ovl_init(void)
1500 {
1501         int err;
1502 
1503         ovl_inode_cachep = kmem_cache_create("ovl_inode",
1504                                              sizeof(struct ovl_inode), 0,
1505                                              (SLAB_RECLAIM_ACCOUNT|
1506                                               SLAB_ACCOUNT),
1507                                              ovl_inode_init_once);
1508         if (ovl_inode_cachep == NULL)
1509                 return -ENOMEM;
1510 
1511         err = register_filesystem(&ovl_fs_type);
1512         if (!err)
1513                 return 0;
1514 
1515         kmem_cache_destroy(ovl_inode_cachep);
1516 
1517         return err;
1518 }
1519 
1520 static void __exit ovl_exit(void)
1521 {
1522         unregister_filesystem(&ovl_fs_type);
1523 
1524         /*
1525          * Make sure all delayed rcu free inodes are flushed before we
1526          * destroy cache.
1527          */
1528         rcu_barrier();
1529         kmem_cache_destroy(ovl_inode_cachep);
1530 }
1531 
1532 module_init(ovl_init);
1533 module_exit(ovl_exit);
1534 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php