~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/fs/udf/super.c

Version: ~ [ linux-6.11-rc3 ] ~ [ linux-6.10.4 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.45 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.104 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.164 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.223 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.281 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.319 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-only
  2 /*
  3  * super.c
  4  *
  5  * PURPOSE
  6  *  Super block routines for the OSTA-UDF(tm) filesystem.
  7  *
  8  * DESCRIPTION
  9  *  OSTA-UDF(tm) = Optical Storage Technology Association
 10  *  Universal Disk Format.
 11  *
 12  *  This code is based on version 2.00 of the UDF specification,
 13  *  and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
 14  *    http://www.osta.org/
 15  *    https://www.ecma.ch/
 16  *    https://www.iso.org/
 17  *
 18  * COPYRIGHT
 19  *  (C) 1998 Dave Boynton
 20  *  (C) 1998-2004 Ben Fennema
 21  *  (C) 2000 Stelias Computing Inc
 22  *
 23  * HISTORY
 24  *
 25  *  09/24/98 dgb  changed to allow compiling outside of kernel, and
 26  *                added some debugging.
 27  *  10/01/98 dgb  updated to allow (some) possibility of compiling w/2.0.34
 28  *  10/16/98      attempting some multi-session support
 29  *  10/17/98      added freespace count for "df"
 30  *  11/11/98 gr   added novrs option
 31  *  11/26/98 dgb  added fileset,anchor mount options
 32  *  12/06/98 blf  really hosed things royally. vat/sparing support. sequenced
 33  *                vol descs. rewrote option handling based on isofs
 34  *  12/20/98      find the free space bitmap (if it exists)
 35  */
 36 
 37 #include "udfdecl.h"
 38 
 39 #include <linux/blkdev.h>
 40 #include <linux/slab.h>
 41 #include <linux/kernel.h>
 42 #include <linux/module.h>
 43 #include <linux/stat.h>
 44 #include <linux/cdrom.h>
 45 #include <linux/nls.h>
 46 #include <linux/vfs.h>
 47 #include <linux/vmalloc.h>
 48 #include <linux/errno.h>
 49 #include <linux/seq_file.h>
 50 #include <linux/bitmap.h>
 51 #include <linux/crc-itu-t.h>
 52 #include <linux/log2.h>
 53 #include <asm/byteorder.h>
 54 #include <linux/iversion.h>
 55 #include <linux/fs_context.h>
 56 #include <linux/fs_parser.h>
 57 
 58 #include "udf_sb.h"
 59 #include "udf_i.h"
 60 
 61 #include <linux/init.h>
 62 #include <linux/uaccess.h>
 63 
 64 enum {
 65         VDS_POS_PRIMARY_VOL_DESC,
 66         VDS_POS_UNALLOC_SPACE_DESC,
 67         VDS_POS_LOGICAL_VOL_DESC,
 68         VDS_POS_IMP_USE_VOL_DESC,
 69         VDS_POS_LENGTH
 70 };
 71 
 72 #define VSD_FIRST_SECTOR_OFFSET         32768
 73 #define VSD_MAX_SECTOR_OFFSET           0x800000
 74 
 75 /*
 76  * Maximum number of Terminating Descriptor / Logical Volume Integrity
 77  * Descriptor redirections. The chosen numbers are arbitrary - just that we
 78  * hopefully don't limit any real use of rewritten inode on write-once media
 79  * but avoid looping for too long on corrupted media.
 80  */
 81 #define UDF_MAX_TD_NESTING 64
 82 #define UDF_MAX_LVID_NESTING 1000
 83 
 84 enum { UDF_MAX_LINKS = 0xffff };
 85 /*
 86  * We limit filesize to 4TB. This is arbitrary as the on-disk format supports
 87  * more but because the file space is described by a linked list of extents,
 88  * each of which can have at most 1GB, the creation and handling of extents
 89  * gets unusably slow beyond certain point...
 90  */
 91 #define UDF_MAX_FILESIZE (1ULL << 42)
 92 
 93 /* These are the "meat" - everything else is stuffing */
 94 static int udf_fill_super(struct super_block *sb, struct fs_context *fc);
 95 static void udf_put_super(struct super_block *);
 96 static int udf_sync_fs(struct super_block *, int);
 97 static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
 98 static void udf_open_lvid(struct super_block *);
 99 static void udf_close_lvid(struct super_block *);
100 static unsigned int udf_count_free(struct super_block *);
101 static int udf_statfs(struct dentry *, struct kstatfs *);
102 static int udf_show_options(struct seq_file *, struct dentry *);
103 static int udf_init_fs_context(struct fs_context *fc);
104 static int udf_parse_param(struct fs_context *fc, struct fs_parameter *param);
105 static int udf_reconfigure(struct fs_context *fc);
106 static void udf_free_fc(struct fs_context *fc);
107 static const struct fs_parameter_spec udf_param_spec[];
108 
109 struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
110 {
111         struct logicalVolIntegrityDesc *lvid;
112         unsigned int partnum;
113         unsigned int offset;
114 
115         if (!UDF_SB(sb)->s_lvid_bh)
116                 return NULL;
117         lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
118         partnum = le32_to_cpu(lvid->numOfPartitions);
119         /* The offset is to skip freeSpaceTable and sizeTable arrays */
120         offset = partnum * 2 * sizeof(uint32_t);
121         return (struct logicalVolIntegrityDescImpUse *)
122                                         (((uint8_t *)(lvid + 1)) + offset);
123 }
124 
125 /* UDF filesystem type */
126 static int udf_get_tree(struct fs_context *fc)
127 {
128         return get_tree_bdev(fc, udf_fill_super);
129 }
130 
131 static const struct fs_context_operations udf_context_ops = {
132         .parse_param    = udf_parse_param,
133         .get_tree       = udf_get_tree,
134         .reconfigure    = udf_reconfigure,
135         .free           = udf_free_fc,
136 };
137 
138 static struct file_system_type udf_fstype = {
139         .owner          = THIS_MODULE,
140         .name           = "udf",
141         .kill_sb        = kill_block_super,
142         .fs_flags       = FS_REQUIRES_DEV,
143         .init_fs_context = udf_init_fs_context,
144         .parameters     = udf_param_spec,
145 };
146 MODULE_ALIAS_FS("udf");
147 
148 static struct kmem_cache *udf_inode_cachep;
149 
150 static struct inode *udf_alloc_inode(struct super_block *sb)
151 {
152         struct udf_inode_info *ei;
153         ei = alloc_inode_sb(sb, udf_inode_cachep, GFP_KERNEL);
154         if (!ei)
155                 return NULL;
156 
157         ei->i_unique = 0;
158         ei->i_lenExtents = 0;
159         ei->i_lenStreams = 0;
160         ei->i_next_alloc_block = 0;
161         ei->i_next_alloc_goal = 0;
162         ei->i_strat4096 = 0;
163         ei->i_streamdir = 0;
164         ei->i_hidden = 0;
165         init_rwsem(&ei->i_data_sem);
166         ei->cached_extent.lstart = -1;
167         spin_lock_init(&ei->i_extent_cache_lock);
168         inode_set_iversion(&ei->vfs_inode, 1);
169 
170         return &ei->vfs_inode;
171 }
172 
173 static void udf_free_in_core_inode(struct inode *inode)
174 {
175         kmem_cache_free(udf_inode_cachep, UDF_I(inode));
176 }
177 
178 static void init_once(void *foo)
179 {
180         struct udf_inode_info *ei = foo;
181 
182         ei->i_data = NULL;
183         inode_init_once(&ei->vfs_inode);
184 }
185 
186 static int __init init_inodecache(void)
187 {
188         udf_inode_cachep = kmem_cache_create("udf_inode_cache",
189                                              sizeof(struct udf_inode_info),
190                                              0, (SLAB_RECLAIM_ACCOUNT |
191                                                  SLAB_ACCOUNT),
192                                              init_once);
193         if (!udf_inode_cachep)
194                 return -ENOMEM;
195         return 0;
196 }
197 
198 static void destroy_inodecache(void)
199 {
200         /*
201          * Make sure all delayed rcu free inodes are flushed before we
202          * destroy cache.
203          */
204         rcu_barrier();
205         kmem_cache_destroy(udf_inode_cachep);
206 }
207 
208 /* Superblock operations */
209 static const struct super_operations udf_sb_ops = {
210         .alloc_inode    = udf_alloc_inode,
211         .free_inode     = udf_free_in_core_inode,
212         .write_inode    = udf_write_inode,
213         .evict_inode    = udf_evict_inode,
214         .put_super      = udf_put_super,
215         .sync_fs        = udf_sync_fs,
216         .statfs         = udf_statfs,
217         .show_options   = udf_show_options,
218 };
219 
220 struct udf_options {
221         unsigned int blocksize;
222         unsigned int session;
223         unsigned int lastblock;
224         unsigned int anchor;
225         unsigned int flags;
226         umode_t umask;
227         kgid_t gid;
228         kuid_t uid;
229         umode_t fmode;
230         umode_t dmode;
231         struct nls_table *nls_map;
232 };
233 
234 /*
235  * UDF has historically preserved prior mount options across
236  * a remount, so copy those here if remounting, otherwise set
237  * initial mount defaults.
238  */
239 static void udf_init_options(struct fs_context *fc, struct udf_options *uopt)
240 {
241         if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
242                 struct super_block *sb = fc->root->d_sb;
243                 struct udf_sb_info *sbi = UDF_SB(sb);
244 
245                 uopt->flags = sbi->s_flags;
246                 uopt->uid   = sbi->s_uid;
247                 uopt->gid   = sbi->s_gid;
248                 uopt->umask = sbi->s_umask;
249                 uopt->fmode = sbi->s_fmode;
250                 uopt->dmode = sbi->s_dmode;
251                 uopt->nls_map = NULL;
252         } else {
253                 uopt->flags = (1 << UDF_FLAG_USE_AD_IN_ICB) |
254                               (1 << UDF_FLAG_STRICT);
255                 /*
256                  * By default we'll use overflow[ug]id when UDF
257                  * inode [ug]id == -1
258                  */
259                 uopt->uid = make_kuid(current_user_ns(), overflowuid);
260                 uopt->gid = make_kgid(current_user_ns(), overflowgid);
261                 uopt->umask = 0;
262                 uopt->fmode = UDF_INVALID_MODE;
263                 uopt->dmode = UDF_INVALID_MODE;
264                 uopt->nls_map = NULL;
265                 uopt->session = 0xFFFFFFFF;
266         }
267 }
268 
269 static int udf_init_fs_context(struct fs_context *fc)
270 {
271         struct udf_options *uopt;
272 
273         uopt = kzalloc(sizeof(*uopt), GFP_KERNEL);
274         if (!uopt)
275                 return -ENOMEM;
276 
277         udf_init_options(fc, uopt);
278 
279         fc->fs_private = uopt;
280         fc->ops = &udf_context_ops;
281 
282         return 0;
283 }
284 
285 static void udf_free_fc(struct fs_context *fc)
286 {
287         struct udf_options *uopt = fc->fs_private;
288 
289         unload_nls(uopt->nls_map);
290         kfree(fc->fs_private);
291 }
292 
293 static int __init init_udf_fs(void)
294 {
295         int err;
296 
297         err = init_inodecache();
298         if (err)
299                 goto out1;
300         err = register_filesystem(&udf_fstype);
301         if (err)
302                 goto out;
303 
304         return 0;
305 
306 out:
307         destroy_inodecache();
308 
309 out1:
310         return err;
311 }
312 
313 static void __exit exit_udf_fs(void)
314 {
315         unregister_filesystem(&udf_fstype);
316         destroy_inodecache();
317 }
318 
319 static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
320 {
321         struct udf_sb_info *sbi = UDF_SB(sb);
322 
323         sbi->s_partmaps = kcalloc(count, sizeof(*sbi->s_partmaps), GFP_KERNEL);
324         if (!sbi->s_partmaps) {
325                 sbi->s_partitions = 0;
326                 return -ENOMEM;
327         }
328 
329         sbi->s_partitions = count;
330         return 0;
331 }
332 
333 static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
334 {
335         int i;
336         int nr_groups = bitmap->s_nr_groups;
337 
338         for (i = 0; i < nr_groups; i++)
339                 if (!IS_ERR_OR_NULL(bitmap->s_block_bitmap[i]))
340                         brelse(bitmap->s_block_bitmap[i]);
341 
342         kvfree(bitmap);
343 }
344 
345 static void udf_free_partition(struct udf_part_map *map)
346 {
347         int i;
348         struct udf_meta_data *mdata;
349 
350         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
351                 iput(map->s_uspace.s_table);
352         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
353                 udf_sb_free_bitmap(map->s_uspace.s_bitmap);
354         if (map->s_partition_type == UDF_SPARABLE_MAP15)
355                 for (i = 0; i < 4; i++)
356                         brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
357         else if (map->s_partition_type == UDF_METADATA_MAP25) {
358                 mdata = &map->s_type_specific.s_metadata;
359                 iput(mdata->s_metadata_fe);
360                 mdata->s_metadata_fe = NULL;
361 
362                 iput(mdata->s_mirror_fe);
363                 mdata->s_mirror_fe = NULL;
364 
365                 iput(mdata->s_bitmap_fe);
366                 mdata->s_bitmap_fe = NULL;
367         }
368 }
369 
370 static void udf_sb_free_partitions(struct super_block *sb)
371 {
372         struct udf_sb_info *sbi = UDF_SB(sb);
373         int i;
374 
375         if (!sbi->s_partmaps)
376                 return;
377         for (i = 0; i < sbi->s_partitions; i++)
378                 udf_free_partition(&sbi->s_partmaps[i]);
379         kfree(sbi->s_partmaps);
380         sbi->s_partmaps = NULL;
381 }
382 
383 static int udf_show_options(struct seq_file *seq, struct dentry *root)
384 {
385         struct super_block *sb = root->d_sb;
386         struct udf_sb_info *sbi = UDF_SB(sb);
387 
388         if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
389                 seq_puts(seq, ",nostrict");
390         if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET))
391                 seq_printf(seq, ",bs=%lu", sb->s_blocksize);
392         if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
393                 seq_puts(seq, ",unhide");
394         if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
395                 seq_puts(seq, ",undelete");
396         if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB))
397                 seq_puts(seq, ",noadinicb");
398         if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD))
399                 seq_puts(seq, ",shortad");
400         if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
401                 seq_puts(seq, ",uid=forget");
402         if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
403                 seq_puts(seq, ",gid=forget");
404         if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
405                 seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid));
406         if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
407                 seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid));
408         if (sbi->s_umask != 0)
409                 seq_printf(seq, ",umask=%ho", sbi->s_umask);
410         if (sbi->s_fmode != UDF_INVALID_MODE)
411                 seq_printf(seq, ",mode=%ho", sbi->s_fmode);
412         if (sbi->s_dmode != UDF_INVALID_MODE)
413                 seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
414         if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
415                 seq_printf(seq, ",session=%d", sbi->s_session);
416         if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
417                 seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
418         if (sbi->s_anchor != 0)
419                 seq_printf(seq, ",anchor=%u", sbi->s_anchor);
420         if (sbi->s_nls_map)
421                 seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
422         else
423                 seq_puts(seq, ",iocharset=utf8");
424 
425         return 0;
426 }
427 
428 /*
429  * udf_parse_param
430  *
431  * PURPOSE
432  *      Parse mount options.
433  *
434  * DESCRIPTION
435  *      The following mount options are supported:
436  *
437  *      gid=            Set the default group.
438  *      umask=          Set the default umask.
439  *      mode=           Set the default file permissions.
440  *      dmode=          Set the default directory permissions.
441  *      uid=            Set the default user.
442  *      bs=             Set the block size.
443  *      unhide          Show otherwise hidden files.
444  *      undelete        Show deleted files in lists.
445  *      adinicb         Embed data in the inode (default)
446  *      noadinicb       Don't embed data in the inode
447  *      shortad         Use short ad's
448  *      longad          Use long ad's (default)
449  *      nostrict        Unset strict conformance
450  *      iocharset=      Set the NLS character set
451  *
452  *      The remaining are for debugging and disaster recovery:
453  *
454  *      novrs           Skip volume sequence recognition
455  *
456  *      The following expect a offset from 0.
457  *
458  *      session=        Set the CDROM session (default= last session)
459  *      anchor=         Override standard anchor location. (default= 256)
460  *      volume=         Override the VolumeDesc location. (unused)
461  *      partition=      Override the PartitionDesc location. (unused)
462  *      lastblock=      Set the last block of the filesystem/
463  *
464  *      The following expect a offset from the partition root.
465  *
466  *      fileset=        Override the fileset block location. (unused)
467  *      rootdir=        Override the root directory location. (unused)
468  *              WARNING: overriding the rootdir to a non-directory may
469  *              yield highly unpredictable results.
470  *
471  * PRE-CONDITIONS
472  *      fc              fs_context with pointer to mount options variable.
473  *      param           Pointer to fs_parameter being parsed.
474  *
475  * POST-CONDITIONS
476  *      <return>        0       Mount options parsed okay.
477  *      <return>        errno   Error parsing mount options.
478  *
479  * HISTORY
480  *      July 1, 1997 - Andrew E. Mileski
481  *      Written, tested, and released.
482  */
483 
484 enum {
485         Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
486         Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
487         Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
488         Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
489         Opt_rootdir, Opt_utf8, Opt_iocharset, Opt_err, Opt_fmode, Opt_dmode
490 };
491 
492 static const struct fs_parameter_spec udf_param_spec[] = {
493         fsparam_flag    ("novrs",               Opt_novrs),
494         fsparam_flag    ("nostrict",            Opt_nostrict),
495         fsparam_u32     ("bs",                  Opt_bs),
496         fsparam_flag    ("unhide",              Opt_unhide),
497         fsparam_flag    ("undelete",            Opt_undelete),
498         fsparam_flag_no ("adinicb",             Opt_adinicb),
499         fsparam_flag    ("shortad",             Opt_shortad),
500         fsparam_flag    ("longad",              Opt_longad),
501         fsparam_string  ("gid",                 Opt_gid),
502         fsparam_string  ("uid",                 Opt_uid),
503         fsparam_u32     ("umask",               Opt_umask),
504         fsparam_u32     ("session",             Opt_session),
505         fsparam_u32     ("lastblock",           Opt_lastblock),
506         fsparam_u32     ("anchor",              Opt_anchor),
507         fsparam_u32     ("volume",              Opt_volume),
508         fsparam_u32     ("partition",           Opt_partition),
509         fsparam_u32     ("fileset",             Opt_fileset),
510         fsparam_u32     ("rootdir",             Opt_rootdir),
511         fsparam_flag    ("utf8",                Opt_utf8),
512         fsparam_string  ("iocharset",           Opt_iocharset),
513         fsparam_u32     ("mode",                Opt_fmode),
514         fsparam_u32     ("dmode",               Opt_dmode),
515         {}
516  };
517 
518 static int udf_parse_param(struct fs_context *fc, struct fs_parameter *param)
519 {
520         unsigned int uv;
521         unsigned int n;
522         struct udf_options *uopt = fc->fs_private;
523         struct fs_parse_result result;
524         int token;
525         bool remount = (fc->purpose & FS_CONTEXT_FOR_RECONFIGURE);
526 
527         token = fs_parse(fc, udf_param_spec, param, &result);
528         if (token < 0)
529                 return token;
530 
531         switch (token) {
532         case Opt_novrs:
533                 uopt->flags |= (1 << UDF_FLAG_NOVRS);
534                 break;
535         case Opt_bs:
536                 n = result.uint_32;
537                 if (n != 512 && n != 1024 && n != 2048 && n != 4096)
538                         return -EINVAL;
539                 uopt->blocksize = n;
540                 uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
541                 break;
542         case Opt_unhide:
543                 uopt->flags |= (1 << UDF_FLAG_UNHIDE);
544                 break;
545         case Opt_undelete:
546                 uopt->flags |= (1 << UDF_FLAG_UNDELETE);
547                 break;
548         case Opt_adinicb:
549                 if (result.negated)
550                         uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
551                 else
552                         uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
553                 break;
554         case Opt_shortad:
555                 uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
556                 break;
557         case Opt_longad:
558                 uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
559                 break;
560         case Opt_gid:
561                 if (kstrtoint(param->string, 10, &uv) == 0) {
562                         kgid_t gid = make_kgid(current_user_ns(), uv);
563                         if (!gid_valid(gid))
564                                 return -EINVAL;
565                         uopt->gid = gid;
566                         uopt->flags |= (1 << UDF_FLAG_GID_SET);
567                 } else if (!strcmp(param->string, "forget")) {
568                         uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
569                 } else if (!strcmp(param->string, "ignore")) {
570                         /* this option is superseded by gid=<number> */
571                         ;
572                 } else {
573                         return -EINVAL;
574                 }
575                 break;
576         case Opt_uid:
577                 if (kstrtoint(param->string, 10, &uv) == 0) {
578                         kuid_t uid = make_kuid(current_user_ns(), uv);
579                         if (!uid_valid(uid))
580                                 return -EINVAL;
581                         uopt->uid = uid;
582                         uopt->flags |= (1 << UDF_FLAG_UID_SET);
583                 } else if (!strcmp(param->string, "forget")) {
584                         uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
585                 } else if (!strcmp(param->string, "ignore")) {
586                         /* this option is superseded by uid=<number> */
587                         ;
588                 } else {
589                         return -EINVAL;
590                 }
591                 break;
592         case Opt_umask:
593                 uopt->umask = result.uint_32;
594                 break;
595         case Opt_nostrict:
596                 uopt->flags &= ~(1 << UDF_FLAG_STRICT);
597                 break;
598         case Opt_session:
599                 uopt->session = result.uint_32;
600                 if (!remount)
601                         uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
602                 break;
603         case Opt_lastblock:
604                 uopt->lastblock = result.uint_32;
605                 if (!remount)
606                         uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
607                 break;
608         case Opt_anchor:
609                 uopt->anchor = result.uint_32;
610                 break;
611         case Opt_volume:
612         case Opt_partition:
613         case Opt_fileset:
614         case Opt_rootdir:
615                 /* Ignored (never implemented properly) */
616                 break;
617         case Opt_utf8:
618                 if (!remount) {
619                         unload_nls(uopt->nls_map);
620                         uopt->nls_map = NULL;
621                 }
622                 break;
623         case Opt_iocharset:
624                 if (!remount) {
625                         unload_nls(uopt->nls_map);
626                         uopt->nls_map = NULL;
627                 }
628                 /* When nls_map is not loaded then UTF-8 is used */
629                 if (!remount && strcmp(param->string, "utf8") != 0) {
630                         uopt->nls_map = load_nls(param->string);
631                         if (!uopt->nls_map) {
632                                 errorf(fc, "iocharset %s not found",
633                                         param->string);
634                                 return -EINVAL;
635                         }
636                 }
637                 break;
638         case Opt_fmode:
639                 uopt->fmode = result.uint_32 & 0777;
640                 break;
641         case Opt_dmode:
642                 uopt->dmode = result.uint_32 & 0777;
643                 break;
644         default:
645                 return -EINVAL;
646         }
647         return 0;
648 }
649 
650 static int udf_reconfigure(struct fs_context *fc)
651 {
652         struct udf_options *uopt = fc->fs_private;
653         struct super_block *sb = fc->root->d_sb;
654         struct udf_sb_info *sbi = UDF_SB(sb);
655         int readonly = fc->sb_flags & SB_RDONLY;
656         int error = 0;
657 
658         if (!readonly && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
659                 return -EACCES;
660 
661         sync_filesystem(sb);
662 
663         write_lock(&sbi->s_cred_lock);
664         sbi->s_flags = uopt->flags;
665         sbi->s_uid   = uopt->uid;
666         sbi->s_gid   = uopt->gid;
667         sbi->s_umask = uopt->umask;
668         sbi->s_fmode = uopt->fmode;
669         sbi->s_dmode = uopt->dmode;
670         write_unlock(&sbi->s_cred_lock);
671 
672         if (readonly == sb_rdonly(sb))
673                 goto out_unlock;
674 
675         if (readonly)
676                 udf_close_lvid(sb);
677         else
678                 udf_open_lvid(sb);
679 
680 out_unlock:
681         return error;
682 }
683 
684 /*
685  * Check VSD descriptor. Returns -1 in case we are at the end of volume
686  * recognition area, 0 if the descriptor is valid but non-interesting, 1 if
687  * we found one of NSR descriptors we are looking for.
688  */
689 static int identify_vsd(const struct volStructDesc *vsd)
690 {
691         int ret = 0;
692 
693         if (!memcmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) {
694                 switch (vsd->structType) {
695                 case 0:
696                         udf_debug("ISO9660 Boot Record found\n");
697                         break;
698                 case 1:
699                         udf_debug("ISO9660 Primary Volume Descriptor found\n");
700                         break;
701                 case 2:
702                         udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
703                         break;
704                 case 3:
705                         udf_debug("ISO9660 Volume Partition Descriptor found\n");
706                         break;
707                 case 255:
708                         udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
709                         break;
710                 default:
711                         udf_debug("ISO9660 VRS (%u) found\n", vsd->structType);
712                         break;
713                 }
714         } else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN))
715                 ; /* ret = 0 */
716         else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN))
717                 ret = 1;
718         else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN))
719                 ret = 1;
720         else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BOOT2, VSD_STD_ID_LEN))
721                 ; /* ret = 0 */
722         else if (!memcmp(vsd->stdIdent, VSD_STD_ID_CDW02, VSD_STD_ID_LEN))
723                 ; /* ret = 0 */
724         else {
725                 /* TEA01 or invalid id : end of volume recognition area */
726                 ret = -1;
727         }
728 
729         return ret;
730 }
731 
732 /*
733  * Check Volume Structure Descriptors (ECMA 167 2/9.1)
734  * We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1)
735  * @return   1 if NSR02 or NSR03 found,
736  *          -1 if first sector read error, 0 otherwise
737  */
738 static int udf_check_vsd(struct super_block *sb)
739 {
740         struct volStructDesc *vsd = NULL;
741         loff_t sector = VSD_FIRST_SECTOR_OFFSET;
742         int sectorsize;
743         struct buffer_head *bh = NULL;
744         int nsr = 0;
745         struct udf_sb_info *sbi;
746         loff_t session_offset;
747 
748         sbi = UDF_SB(sb);
749         if (sb->s_blocksize < sizeof(struct volStructDesc))
750                 sectorsize = sizeof(struct volStructDesc);
751         else
752                 sectorsize = sb->s_blocksize;
753 
754         session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits;
755         sector += session_offset;
756 
757         udf_debug("Starting at sector %u (%lu byte sectors)\n",
758                   (unsigned int)(sector >> sb->s_blocksize_bits),
759                   sb->s_blocksize);
760         /* Process the sequence (if applicable). The hard limit on the sector
761          * offset is arbitrary, hopefully large enough so that all valid UDF
762          * filesystems will be recognised. There is no mention of an upper
763          * bound to the size of the volume recognition area in the standard.
764          *  The limit will prevent the code to read all the sectors of a
765          * specially crafted image (like a bluray disc full of CD001 sectors),
766          * potentially causing minutes or even hours of uninterruptible I/O
767          * activity. This actually happened with uninitialised SSD partitions
768          * (all 0xFF) before the check for the limit and all valid IDs were
769          * added */
770         for (; !nsr && sector < VSD_MAX_SECTOR_OFFSET; sector += sectorsize) {
771                 /* Read a block */
772                 bh = sb_bread(sb, sector >> sb->s_blocksize_bits);
773                 if (!bh)
774                         break;
775 
776                 vsd = (struct volStructDesc *)(bh->b_data +
777                                               (sector & (sb->s_blocksize - 1)));
778                 nsr = identify_vsd(vsd);
779                 /* Found NSR or end? */
780                 if (nsr) {
781                         brelse(bh);
782                         break;
783                 }
784                 /*
785                  * Special handling for improperly formatted VRS (e.g., Win10)
786                  * where components are separated by 2048 bytes even though
787                  * sectors are 4K
788                  */
789                 if (sb->s_blocksize == 4096) {
790                         nsr = identify_vsd(vsd + 1);
791                         /* Ignore unknown IDs... */
792                         if (nsr < 0)
793                                 nsr = 0;
794                 }
795                 brelse(bh);
796         }
797 
798         if (nsr > 0)
799                 return 1;
800         else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET)
801                 return -1;
802         else
803                 return 0;
804 }
805 
806 static int udf_verify_domain_identifier(struct super_block *sb,
807                                         struct regid *ident, char *dname)
808 {
809         struct domainIdentSuffix *suffix;
810 
811         if (memcmp(ident->ident, UDF_ID_COMPLIANT, strlen(UDF_ID_COMPLIANT))) {
812                 udf_warn(sb, "Not OSTA UDF compliant %s descriptor.\n", dname);
813                 goto force_ro;
814         }
815         if (ident->flags & ENTITYID_FLAGS_DIRTY) {
816                 udf_warn(sb, "Possibly not OSTA UDF compliant %s descriptor.\n",
817                          dname);
818                 goto force_ro;
819         }
820         suffix = (struct domainIdentSuffix *)ident->identSuffix;
821         if ((suffix->domainFlags & DOMAIN_FLAGS_HARD_WRITE_PROTECT) ||
822             (suffix->domainFlags & DOMAIN_FLAGS_SOFT_WRITE_PROTECT)) {
823                 if (!sb_rdonly(sb)) {
824                         udf_warn(sb, "Descriptor for %s marked write protected."
825                                  " Forcing read only mount.\n", dname);
826                 }
827                 goto force_ro;
828         }
829         return 0;
830 
831 force_ro:
832         if (!sb_rdonly(sb))
833                 return -EACCES;
834         UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
835         return 0;
836 }
837 
838 static int udf_load_fileset(struct super_block *sb, struct fileSetDesc *fset,
839                             struct kernel_lb_addr *root)
840 {
841         int ret;
842 
843         ret = udf_verify_domain_identifier(sb, &fset->domainIdent, "file set");
844         if (ret < 0)
845                 return ret;
846 
847         *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
848         UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
849 
850         udf_debug("Rootdir at block=%u, partition=%u\n",
851                   root->logicalBlockNum, root->partitionReferenceNum);
852         return 0;
853 }
854 
855 static int udf_find_fileset(struct super_block *sb,
856                             struct kernel_lb_addr *fileset,
857                             struct kernel_lb_addr *root)
858 {
859         struct buffer_head *bh;
860         uint16_t ident;
861         int ret;
862 
863         if (fileset->logicalBlockNum == 0xFFFFFFFF &&
864             fileset->partitionReferenceNum == 0xFFFF)
865                 return -EINVAL;
866 
867         bh = udf_read_ptagged(sb, fileset, 0, &ident);
868         if (!bh)
869                 return -EIO;
870         if (ident != TAG_IDENT_FSD) {
871                 brelse(bh);
872                 return -EINVAL;
873         }
874 
875         udf_debug("Fileset at block=%u, partition=%u\n",
876                   fileset->logicalBlockNum, fileset->partitionReferenceNum);
877 
878         UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
879         ret = udf_load_fileset(sb, (struct fileSetDesc *)bh->b_data, root);
880         brelse(bh);
881         return ret;
882 }
883 
884 /*
885  * Load primary Volume Descriptor Sequence
886  *
887  * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
888  * should be tried.
889  */
890 static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
891 {
892         struct primaryVolDesc *pvoldesc;
893         uint8_t *outstr;
894         struct buffer_head *bh;
895         uint16_t ident;
896         int ret;
897         struct timestamp *ts;
898 
899         outstr = kzalloc(128, GFP_KERNEL);
900         if (!outstr)
901                 return -ENOMEM;
902 
903         bh = udf_read_tagged(sb, block, block, &ident);
904         if (!bh) {
905                 ret = -EAGAIN;
906                 goto out2;
907         }
908 
909         if (ident != TAG_IDENT_PVD) {
910                 ret = -EIO;
911                 goto out_bh;
912         }
913 
914         pvoldesc = (struct primaryVolDesc *)bh->b_data;
915 
916         udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
917                               pvoldesc->recordingDateAndTime);
918         ts = &pvoldesc->recordingDateAndTime;
919         udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n",
920                   le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
921                   ts->minute, le16_to_cpu(ts->typeAndTimezone));
922 
923         ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
924         if (ret < 0) {
925                 strscpy_pad(UDF_SB(sb)->s_volume_ident, "InvalidName");
926                 pr_warn("incorrect volume identification, setting to "
927                         "'InvalidName'\n");
928         } else {
929                 strscpy_pad(UDF_SB(sb)->s_volume_ident, outstr);
930         }
931         udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
932 
933         ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128);
934         if (ret < 0) {
935                 ret = 0;
936                 goto out_bh;
937         }
938         outstr[ret] = 0;
939         udf_debug("volSetIdent[] = '%s'\n", outstr);
940 
941         ret = 0;
942 out_bh:
943         brelse(bh);
944 out2:
945         kfree(outstr);
946         return ret;
947 }
948 
949 struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
950                                         u32 meta_file_loc, u32 partition_ref)
951 {
952         struct kernel_lb_addr addr;
953         struct inode *metadata_fe;
954 
955         addr.logicalBlockNum = meta_file_loc;
956         addr.partitionReferenceNum = partition_ref;
957 
958         metadata_fe = udf_iget_special(sb, &addr);
959 
960         if (IS_ERR(metadata_fe)) {
961                 udf_warn(sb, "metadata inode efe not found\n");
962                 return metadata_fe;
963         }
964         if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
965                 udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
966                 iput(metadata_fe);
967                 return ERR_PTR(-EIO);
968         }
969 
970         return metadata_fe;
971 }
972 
973 static int udf_load_metadata_files(struct super_block *sb, int partition,
974                                    int type1_index)
975 {
976         struct udf_sb_info *sbi = UDF_SB(sb);
977         struct udf_part_map *map;
978         struct udf_meta_data *mdata;
979         struct kernel_lb_addr addr;
980         struct inode *fe;
981 
982         map = &sbi->s_partmaps[partition];
983         mdata = &map->s_type_specific.s_metadata;
984         mdata->s_phys_partition_ref = type1_index;
985 
986         /* metadata address */
987         udf_debug("Metadata file location: block = %u part = %u\n",
988                   mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
989 
990         fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
991                                          mdata->s_phys_partition_ref);
992         if (IS_ERR(fe)) {
993                 /* mirror file entry */
994                 udf_debug("Mirror metadata file location: block = %u part = %u\n",
995                           mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
996 
997                 fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
998                                                  mdata->s_phys_partition_ref);
999 
1000                 if (IS_ERR(fe)) {
1001                         udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
1002                         return PTR_ERR(fe);
1003                 }
1004                 mdata->s_mirror_fe = fe;
1005         } else
1006                 mdata->s_metadata_fe = fe;
1007 
1008 
1009         /*
1010          * bitmap file entry
1011          * Note:
1012          * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
1013         */
1014         if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
1015                 addr.logicalBlockNum = mdata->s_bitmap_file_loc;
1016                 addr.partitionReferenceNum = mdata->s_phys_partition_ref;
1017 
1018                 udf_debug("Bitmap file location: block = %u part = %u\n",
1019                           addr.logicalBlockNum, addr.partitionReferenceNum);
1020 
1021                 fe = udf_iget_special(sb, &addr);
1022                 if (IS_ERR(fe)) {
1023                         if (sb_rdonly(sb))
1024                                 udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
1025                         else {
1026                                 udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
1027                                 return PTR_ERR(fe);
1028                         }
1029                 } else
1030                         mdata->s_bitmap_fe = fe;
1031         }
1032 
1033         udf_debug("udf_load_metadata_files Ok\n");
1034         return 0;
1035 }
1036 
1037 int udf_compute_nr_groups(struct super_block *sb, u32 partition)
1038 {
1039         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
1040         return DIV_ROUND_UP(map->s_partition_len +
1041                             (sizeof(struct spaceBitmapDesc) << 3),
1042                             sb->s_blocksize * 8);
1043 }
1044 
1045 static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
1046 {
1047         struct udf_bitmap *bitmap;
1048         int nr_groups = udf_compute_nr_groups(sb, index);
1049 
1050         bitmap = kvzalloc(struct_size(bitmap, s_block_bitmap, nr_groups),
1051                           GFP_KERNEL);
1052         if (!bitmap)
1053                 return NULL;
1054 
1055         bitmap->s_nr_groups = nr_groups;
1056         return bitmap;
1057 }
1058 
1059 static int check_partition_desc(struct super_block *sb,
1060                                 struct partitionDesc *p,
1061                                 struct udf_part_map *map)
1062 {
1063         bool umap, utable, fmap, ftable;
1064         struct partitionHeaderDesc *phd;
1065 
1066         switch (le32_to_cpu(p->accessType)) {
1067         case PD_ACCESS_TYPE_READ_ONLY:
1068         case PD_ACCESS_TYPE_WRITE_ONCE:
1069         case PD_ACCESS_TYPE_NONE:
1070                 goto force_ro;
1071         }
1072 
1073         /* No Partition Header Descriptor? */
1074         if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
1075             strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
1076                 goto force_ro;
1077 
1078         phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1079         utable = phd->unallocSpaceTable.extLength;
1080         umap = phd->unallocSpaceBitmap.extLength;
1081         ftable = phd->freedSpaceTable.extLength;
1082         fmap = phd->freedSpaceBitmap.extLength;
1083 
1084         /* No allocation info? */
1085         if (!utable && !umap && !ftable && !fmap)
1086                 goto force_ro;
1087 
1088         /* We don't support blocks that require erasing before overwrite */
1089         if (ftable || fmap)
1090                 goto force_ro;
1091         /* UDF 2.60: 2.3.3 - no mixing of tables & bitmaps, no VAT. */
1092         if (utable && umap)
1093                 goto force_ro;
1094 
1095         if (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1096             map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1097             map->s_partition_type == UDF_METADATA_MAP25)
1098                 goto force_ro;
1099 
1100         return 0;
1101 force_ro:
1102         if (!sb_rdonly(sb))
1103                 return -EACCES;
1104         UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1105         return 0;
1106 }
1107 
1108 static int udf_fill_partdesc_info(struct super_block *sb,
1109                 struct partitionDesc *p, int p_index)
1110 {
1111         struct udf_part_map *map;
1112         struct udf_sb_info *sbi = UDF_SB(sb);
1113         struct partitionHeaderDesc *phd;
1114         u32 sum;
1115         int err;
1116 
1117         map = &sbi->s_partmaps[p_index];
1118 
1119         map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
1120         map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
1121         if (check_add_overflow(map->s_partition_root, map->s_partition_len,
1122                                &sum)) {
1123                 udf_err(sb, "Partition %d has invalid location %u + %u\n",
1124                         p_index, map->s_partition_root, map->s_partition_len);
1125                 return -EFSCORRUPTED;
1126         }
1127 
1128         if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
1129                 map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
1130         if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
1131                 map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
1132         if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
1133                 map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
1134         if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
1135                 map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
1136 
1137         udf_debug("Partition (%d type %x) starts at physical %u, block length %u\n",
1138                   p_index, map->s_partition_type,
1139                   map->s_partition_root, map->s_partition_len);
1140 
1141         err = check_partition_desc(sb, p, map);
1142         if (err)
1143                 return err;
1144 
1145         /*
1146          * Skip loading allocation info it we cannot ever write to the fs.
1147          * This is a correctness thing as we may have decided to force ro mount
1148          * to avoid allocation info we don't support.
1149          */
1150         if (UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
1151                 return 0;
1152 
1153         phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1154         if (phd->unallocSpaceTable.extLength) {
1155                 struct kernel_lb_addr loc = {
1156                         .logicalBlockNum = le32_to_cpu(
1157                                 phd->unallocSpaceTable.extPosition),
1158                         .partitionReferenceNum = p_index,
1159                 };
1160                 struct inode *inode;
1161 
1162                 inode = udf_iget_special(sb, &loc);
1163                 if (IS_ERR(inode)) {
1164                         udf_debug("cannot load unallocSpaceTable (part %d)\n",
1165                                   p_index);
1166                         return PTR_ERR(inode);
1167                 }
1168                 map->s_uspace.s_table = inode;
1169                 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
1170                 udf_debug("unallocSpaceTable (part %d) @ %lu\n",
1171                           p_index, map->s_uspace.s_table->i_ino);
1172         }
1173 
1174         if (phd->unallocSpaceBitmap.extLength) {
1175                 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1176                 if (!bitmap)
1177                         return -ENOMEM;
1178                 map->s_uspace.s_bitmap = bitmap;
1179                 bitmap->s_extPosition = le32_to_cpu(
1180                                 phd->unallocSpaceBitmap.extPosition);
1181                 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
1182                 /* Check whether math over bitmap won't overflow. */
1183                 if (check_add_overflow(map->s_partition_len,
1184                                        sizeof(struct spaceBitmapDesc) << 3,
1185                                        &sum)) {
1186                         udf_err(sb, "Partition %d is too long (%u)\n", p_index,
1187                                 map->s_partition_len);
1188                         return -EFSCORRUPTED;
1189                 }
1190                 udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
1191                           p_index, bitmap->s_extPosition);
1192         }
1193 
1194         return 0;
1195 }
1196 
1197 static void udf_find_vat_block(struct super_block *sb, int p_index,
1198                                int type1_index, sector_t start_block)
1199 {
1200         struct udf_sb_info *sbi = UDF_SB(sb);
1201         struct udf_part_map *map = &sbi->s_partmaps[p_index];
1202         sector_t vat_block;
1203         struct kernel_lb_addr ino;
1204         struct inode *inode;
1205 
1206         /*
1207          * VAT file entry is in the last recorded block. Some broken disks have
1208          * it a few blocks before so try a bit harder...
1209          */
1210         ino.partitionReferenceNum = type1_index;
1211         for (vat_block = start_block;
1212              vat_block >= map->s_partition_root &&
1213              vat_block >= start_block - 3; vat_block--) {
1214                 ino.logicalBlockNum = vat_block - map->s_partition_root;
1215                 inode = udf_iget_special(sb, &ino);
1216                 if (!IS_ERR(inode)) {
1217                         sbi->s_vat_inode = inode;
1218                         break;
1219                 }
1220         }
1221 }
1222 
1223 static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1224 {
1225         struct udf_sb_info *sbi = UDF_SB(sb);
1226         struct udf_part_map *map = &sbi->s_partmaps[p_index];
1227         struct buffer_head *bh = NULL;
1228         struct udf_inode_info *vati;
1229         struct virtualAllocationTable20 *vat20;
1230         sector_t blocks = sb_bdev_nr_blocks(sb);
1231 
1232         udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
1233         if (!sbi->s_vat_inode &&
1234             sbi->s_last_block != blocks - 1) {
1235                 pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n",
1236                           (unsigned long)sbi->s_last_block,
1237                           (unsigned long)blocks - 1);
1238                 udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
1239         }
1240         if (!sbi->s_vat_inode)
1241                 return -EIO;
1242 
1243         if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
1244                 map->s_type_specific.s_virtual.s_start_offset = 0;
1245                 map->s_type_specific.s_virtual.s_num_entries =
1246                         (sbi->s_vat_inode->i_size - 36) >> 2;
1247         } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
1248                 vati = UDF_I(sbi->s_vat_inode);
1249                 if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1250                         int err = 0;
1251 
1252                         bh = udf_bread(sbi->s_vat_inode, 0, 0, &err);
1253                         if (!bh) {
1254                                 if (!err)
1255                                         err = -EFSCORRUPTED;
1256                                 return err;
1257                         }
1258                         vat20 = (struct virtualAllocationTable20 *)bh->b_data;
1259                 } else {
1260                         vat20 = (struct virtualAllocationTable20 *)
1261                                                         vati->i_data;
1262                 }
1263 
1264                 map->s_type_specific.s_virtual.s_start_offset =
1265                         le16_to_cpu(vat20->lengthHeader);
1266                 map->s_type_specific.s_virtual.s_num_entries =
1267                         (sbi->s_vat_inode->i_size -
1268                                 map->s_type_specific.s_virtual.
1269                                         s_start_offset) >> 2;
1270                 brelse(bh);
1271         }
1272         return 0;
1273 }
1274 
1275 /*
1276  * Load partition descriptor block
1277  *
1278  * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
1279  * sequence.
1280  */
1281 static int udf_load_partdesc(struct super_block *sb, sector_t block)
1282 {
1283         struct buffer_head *bh;
1284         struct partitionDesc *p;
1285         struct udf_part_map *map;
1286         struct udf_sb_info *sbi = UDF_SB(sb);
1287         int i, type1_idx;
1288         uint16_t partitionNumber;
1289         uint16_t ident;
1290         int ret;
1291 
1292         bh = udf_read_tagged(sb, block, block, &ident);
1293         if (!bh)
1294                 return -EAGAIN;
1295         if (ident != TAG_IDENT_PD) {
1296                 ret = 0;
1297                 goto out_bh;
1298         }
1299 
1300         p = (struct partitionDesc *)bh->b_data;
1301         partitionNumber = le16_to_cpu(p->partitionNumber);
1302 
1303         /* First scan for TYPE1 and SPARABLE partitions */
1304         for (i = 0; i < sbi->s_partitions; i++) {
1305                 map = &sbi->s_partmaps[i];
1306                 udf_debug("Searching map: (%u == %u)\n",
1307                           map->s_partition_num, partitionNumber);
1308                 if (map->s_partition_num == partitionNumber &&
1309                     (map->s_partition_type == UDF_TYPE1_MAP15 ||
1310                      map->s_partition_type == UDF_SPARABLE_MAP15))
1311                         break;
1312         }
1313 
1314         if (i >= sbi->s_partitions) {
1315                 udf_debug("Partition (%u) not found in partition map\n",
1316                           partitionNumber);
1317                 ret = 0;
1318                 goto out_bh;
1319         }
1320 
1321         ret = udf_fill_partdesc_info(sb, p, i);
1322         if (ret < 0)
1323                 goto out_bh;
1324 
1325         /*
1326          * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
1327          * PHYSICAL partitions are already set up
1328          */
1329         type1_idx = i;
1330         map = NULL; /* supress 'maybe used uninitialized' warning */
1331         for (i = 0; i < sbi->s_partitions; i++) {
1332                 map = &sbi->s_partmaps[i];
1333 
1334                 if (map->s_partition_num == partitionNumber &&
1335                     (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1336                      map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1337                      map->s_partition_type == UDF_METADATA_MAP25))
1338                         break;
1339         }
1340 
1341         if (i >= sbi->s_partitions) {
1342                 ret = 0;
1343                 goto out_bh;
1344         }
1345 
1346         ret = udf_fill_partdesc_info(sb, p, i);
1347         if (ret < 0)
1348                 goto out_bh;
1349 
1350         if (map->s_partition_type == UDF_METADATA_MAP25) {
1351                 ret = udf_load_metadata_files(sb, i, type1_idx);
1352                 if (ret < 0) {
1353                         udf_err(sb, "error loading MetaData partition map %d\n",
1354                                 i);
1355                         goto out_bh;
1356                 }
1357         } else {
1358                 /*
1359                  * If we have a partition with virtual map, we don't handle
1360                  * writing to it (we overwrite blocks instead of relocating
1361                  * them).
1362                  */
1363                 if (!sb_rdonly(sb)) {
1364                         ret = -EACCES;
1365                         goto out_bh;
1366                 }
1367                 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1368                 ret = udf_load_vat(sb, i, type1_idx);
1369                 if (ret < 0)
1370                         goto out_bh;
1371         }
1372         ret = 0;
1373 out_bh:
1374         /* In case loading failed, we handle cleanup in udf_fill_super */
1375         brelse(bh);
1376         return ret;
1377 }
1378 
1379 static int udf_load_sparable_map(struct super_block *sb,
1380                                  struct udf_part_map *map,
1381                                  struct sparablePartitionMap *spm)
1382 {
1383         uint32_t loc;
1384         uint16_t ident;
1385         struct sparingTable *st;
1386         struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
1387         int i;
1388         struct buffer_head *bh;
1389 
1390         map->s_partition_type = UDF_SPARABLE_MAP15;
1391         sdata->s_packet_len = le16_to_cpu(spm->packetLength);
1392         if (!is_power_of_2(sdata->s_packet_len)) {
1393                 udf_err(sb, "error loading logical volume descriptor: "
1394                         "Invalid packet length %u\n",
1395                         (unsigned)sdata->s_packet_len);
1396                 return -EIO;
1397         }
1398         if (spm->numSparingTables > 4) {
1399                 udf_err(sb, "error loading logical volume descriptor: "
1400                         "Too many sparing tables (%d)\n",
1401                         (int)spm->numSparingTables);
1402                 return -EIO;
1403         }
1404         if (le32_to_cpu(spm->sizeSparingTable) > sb->s_blocksize) {
1405                 udf_err(sb, "error loading logical volume descriptor: "
1406                         "Too big sparing table size (%u)\n",
1407                         le32_to_cpu(spm->sizeSparingTable));
1408                 return -EIO;
1409         }
1410 
1411         for (i = 0; i < spm->numSparingTables; i++) {
1412                 loc = le32_to_cpu(spm->locSparingTable[i]);
1413                 bh = udf_read_tagged(sb, loc, loc, &ident);
1414                 if (!bh)
1415                         continue;
1416 
1417                 st = (struct sparingTable *)bh->b_data;
1418                 if (ident != 0 ||
1419                     strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
1420                             strlen(UDF_ID_SPARING)) ||
1421                     sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
1422                                                         sb->s_blocksize) {
1423                         brelse(bh);
1424                         continue;
1425                 }
1426 
1427                 sdata->s_spar_map[i] = bh;
1428         }
1429         map->s_partition_func = udf_get_pblock_spar15;
1430         return 0;
1431 }
1432 
1433 static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1434                                struct kernel_lb_addr *fileset)
1435 {
1436         struct logicalVolDesc *lvd;
1437         int i, offset;
1438         uint8_t type;
1439         struct udf_sb_info *sbi = UDF_SB(sb);
1440         struct genericPartitionMap *gpm;
1441         uint16_t ident;
1442         struct buffer_head *bh;
1443         unsigned int table_len;
1444         int ret;
1445 
1446         bh = udf_read_tagged(sb, block, block, &ident);
1447         if (!bh)
1448                 return -EAGAIN;
1449         BUG_ON(ident != TAG_IDENT_LVD);
1450         lvd = (struct logicalVolDesc *)bh->b_data;
1451         table_len = le32_to_cpu(lvd->mapTableLength);
1452         if (table_len > sb->s_blocksize - sizeof(*lvd)) {
1453                 udf_err(sb, "error loading logical volume descriptor: "
1454                         "Partition table too long (%u > %lu)\n", table_len,
1455                         sb->s_blocksize - sizeof(*lvd));
1456                 ret = -EIO;
1457                 goto out_bh;
1458         }
1459 
1460         ret = udf_verify_domain_identifier(sb, &lvd->domainIdent,
1461                                            "logical volume");
1462         if (ret)
1463                 goto out_bh;
1464         ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
1465         if (ret)
1466                 goto out_bh;
1467 
1468         for (i = 0, offset = 0;
1469              i < sbi->s_partitions && offset < table_len;
1470              i++, offset += gpm->partitionMapLength) {
1471                 struct udf_part_map *map = &sbi->s_partmaps[i];
1472                 gpm = (struct genericPartitionMap *)
1473                                 &(lvd->partitionMaps[offset]);
1474                 type = gpm->partitionMapType;
1475                 if (type == 1) {
1476                         struct genericPartitionMap1 *gpm1 =
1477                                 (struct genericPartitionMap1 *)gpm;
1478                         map->s_partition_type = UDF_TYPE1_MAP15;
1479                         map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
1480                         map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
1481                         map->s_partition_func = NULL;
1482                 } else if (type == 2) {
1483                         struct udfPartitionMap2 *upm2 =
1484                                                 (struct udfPartitionMap2 *)gpm;
1485                         if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
1486                                                 strlen(UDF_ID_VIRTUAL))) {
1487                                 u16 suf =
1488                                         le16_to_cpu(((__le16 *)upm2->partIdent.
1489                                                         identSuffix)[0]);
1490                                 if (suf < 0x0200) {
1491                                         map->s_partition_type =
1492                                                         UDF_VIRTUAL_MAP15;
1493                                         map->s_partition_func =
1494                                                         udf_get_pblock_virt15;
1495                                 } else {
1496                                         map->s_partition_type =
1497                                                         UDF_VIRTUAL_MAP20;
1498                                         map->s_partition_func =
1499                                                         udf_get_pblock_virt20;
1500                                 }
1501                         } else if (!strncmp(upm2->partIdent.ident,
1502                                                 UDF_ID_SPARABLE,
1503                                                 strlen(UDF_ID_SPARABLE))) {
1504                                 ret = udf_load_sparable_map(sb, map,
1505                                         (struct sparablePartitionMap *)gpm);
1506                                 if (ret < 0)
1507                                         goto out_bh;
1508                         } else if (!strncmp(upm2->partIdent.ident,
1509                                                 UDF_ID_METADATA,
1510                                                 strlen(UDF_ID_METADATA))) {
1511                                 struct udf_meta_data *mdata =
1512                                         &map->s_type_specific.s_metadata;
1513                                 struct metadataPartitionMap *mdm =
1514                                                 (struct metadataPartitionMap *)
1515                                                 &(lvd->partitionMaps[offset]);
1516                                 udf_debug("Parsing Logical vol part %d type %u  id=%s\n",
1517                                           i, type, UDF_ID_METADATA);
1518 
1519                                 map->s_partition_type = UDF_METADATA_MAP25;
1520                                 map->s_partition_func = udf_get_pblock_meta25;
1521 
1522                                 mdata->s_meta_file_loc   =
1523                                         le32_to_cpu(mdm->metadataFileLoc);
1524                                 mdata->s_mirror_file_loc =
1525                                         le32_to_cpu(mdm->metadataMirrorFileLoc);
1526                                 mdata->s_bitmap_file_loc =
1527                                         le32_to_cpu(mdm->metadataBitmapFileLoc);
1528                                 mdata->s_alloc_unit_size =
1529                                         le32_to_cpu(mdm->allocUnitSize);
1530                                 mdata->s_align_unit_size =
1531                                         le16_to_cpu(mdm->alignUnitSize);
1532                                 if (mdm->flags & 0x01)
1533                                         mdata->s_flags |= MF_DUPLICATE_MD;
1534 
1535                                 udf_debug("Metadata Ident suffix=0x%x\n",
1536                                           le16_to_cpu(*(__le16 *)
1537                                                       mdm->partIdent.identSuffix));
1538                                 udf_debug("Metadata part num=%u\n",
1539                                           le16_to_cpu(mdm->partitionNum));
1540                                 udf_debug("Metadata part alloc unit size=%u\n",
1541                                           le32_to_cpu(mdm->allocUnitSize));
1542                                 udf_debug("Metadata file loc=%u\n",
1543                                           le32_to_cpu(mdm->metadataFileLoc));
1544                                 udf_debug("Mirror file loc=%u\n",
1545                                           le32_to_cpu(mdm->metadataMirrorFileLoc));
1546                                 udf_debug("Bitmap file loc=%u\n",
1547                                           le32_to_cpu(mdm->metadataBitmapFileLoc));
1548                                 udf_debug("Flags: %d %u\n",
1549                                           mdata->s_flags, mdm->flags);
1550                         } else {
1551                                 udf_debug("Unknown ident: %s\n",
1552                                           upm2->partIdent.ident);
1553                                 continue;
1554                         }
1555                         map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
1556                         map->s_partition_num = le16_to_cpu(upm2->partitionNum);
1557                 }
1558                 udf_debug("Partition (%d:%u) type %u on volume %u\n",
1559                           i, map->s_partition_num, type, map->s_volumeseqnum);
1560         }
1561 
1562         if (fileset) {
1563                 struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
1564 
1565                 *fileset = lelb_to_cpu(la->extLocation);
1566                 udf_debug("FileSet found in LogicalVolDesc at block=%u, partition=%u\n",
1567                           fileset->logicalBlockNum,
1568                           fileset->partitionReferenceNum);
1569         }
1570         if (lvd->integritySeqExt.extLength)
1571                 udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
1572         ret = 0;
1573 
1574         if (!sbi->s_lvid_bh) {
1575                 /* We can't generate unique IDs without a valid LVID */
1576                 if (sb_rdonly(sb)) {
1577                         UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1578                 } else {
1579                         udf_warn(sb, "Damaged or missing LVID, forcing "
1580                                      "readonly mount\n");
1581                         ret = -EACCES;
1582                 }
1583         }
1584 out_bh:
1585         brelse(bh);
1586         return ret;
1587 }
1588 
1589 static bool udf_lvid_valid(struct super_block *sb,
1590                            struct logicalVolIntegrityDesc *lvid)
1591 {
1592         u32 parts, impuselen;
1593 
1594         parts = le32_to_cpu(lvid->numOfPartitions);
1595         impuselen = le32_to_cpu(lvid->lengthOfImpUse);
1596         if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize ||
1597             sizeof(struct logicalVolIntegrityDesc) + impuselen +
1598             2 * parts * sizeof(u32) > sb->s_blocksize)
1599                 return false;
1600         return true;
1601 }
1602 
1603 /*
1604  * Find the prevailing Logical Volume Integrity Descriptor.
1605  */
1606 static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
1607 {
1608         struct buffer_head *bh, *final_bh;
1609         uint16_t ident;
1610         struct udf_sb_info *sbi = UDF_SB(sb);
1611         struct logicalVolIntegrityDesc *lvid;
1612         int indirections = 0;
1613 
1614         while (++indirections <= UDF_MAX_LVID_NESTING) {
1615                 final_bh = NULL;
1616                 while (loc.extLength > 0 &&
1617                         (bh = udf_read_tagged(sb, loc.extLocation,
1618                                         loc.extLocation, &ident))) {
1619                         if (ident != TAG_IDENT_LVID) {
1620                                 brelse(bh);
1621                                 break;
1622                         }
1623 
1624                         brelse(final_bh);
1625                         final_bh = bh;
1626 
1627                         loc.extLength -= sb->s_blocksize;
1628                         loc.extLocation++;
1629                 }
1630 
1631                 if (!final_bh)
1632                         return;
1633 
1634                 lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
1635                 if (udf_lvid_valid(sb, lvid)) {
1636                         brelse(sbi->s_lvid_bh);
1637                         sbi->s_lvid_bh = final_bh;
1638                 } else {
1639                         udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), "
1640                                  "ignoring.\n",
1641                                  le32_to_cpu(lvid->numOfPartitions),
1642                                  le32_to_cpu(lvid->lengthOfImpUse));
1643                 }
1644 
1645                 if (lvid->nextIntegrityExt.extLength == 0)
1646                         return;
1647 
1648                 loc = leea_to_cpu(lvid->nextIntegrityExt);
1649         }
1650 
1651         udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
1652                 UDF_MAX_LVID_NESTING);
1653         brelse(sbi->s_lvid_bh);
1654         sbi->s_lvid_bh = NULL;
1655 }
1656 
1657 /*
1658  * Step for reallocation of table of partition descriptor sequence numbers.
1659  * Must be power of 2.
1660  */
1661 #define PART_DESC_ALLOC_STEP 32
1662 
1663 struct part_desc_seq_scan_data {
1664         struct udf_vds_record rec;
1665         u32 partnum;
1666 };
1667 
1668 struct desc_seq_scan_data {
1669         struct udf_vds_record vds[VDS_POS_LENGTH];
1670         unsigned int size_part_descs;
1671         unsigned int num_part_descs;
1672         struct part_desc_seq_scan_data *part_descs_loc;
1673 };
1674 
1675 static struct udf_vds_record *handle_partition_descriptor(
1676                                 struct buffer_head *bh,
1677                                 struct desc_seq_scan_data *data)
1678 {
1679         struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
1680         int partnum;
1681         int i;
1682 
1683         partnum = le16_to_cpu(desc->partitionNumber);
1684         for (i = 0; i < data->num_part_descs; i++)
1685                 if (partnum == data->part_descs_loc[i].partnum)
1686                         return &(data->part_descs_loc[i].rec);
1687         if (data->num_part_descs >= data->size_part_descs) {
1688                 struct part_desc_seq_scan_data *new_loc;
1689                 unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
1690 
1691                 new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
1692                 if (!new_loc)
1693                         return ERR_PTR(-ENOMEM);
1694                 memcpy(new_loc, data->part_descs_loc,
1695                        data->size_part_descs * sizeof(*new_loc));
1696                 kfree(data->part_descs_loc);
1697                 data->part_descs_loc = new_loc;
1698                 data->size_part_descs = new_size;
1699         }
1700         return &(data->part_descs_loc[data->num_part_descs++].rec);
1701 }
1702 
1703 
1704 static struct udf_vds_record *get_volume_descriptor_record(uint16_t ident,
1705                 struct buffer_head *bh, struct desc_seq_scan_data *data)
1706 {
1707         switch (ident) {
1708         case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1709                 return &(data->vds[VDS_POS_PRIMARY_VOL_DESC]);
1710         case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1711                 return &(data->vds[VDS_POS_IMP_USE_VOL_DESC]);
1712         case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1713                 return &(data->vds[VDS_POS_LOGICAL_VOL_DESC]);
1714         case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1715                 return &(data->vds[VDS_POS_UNALLOC_SPACE_DESC]);
1716         case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1717                 return handle_partition_descriptor(bh, data);
1718         }
1719         return NULL;
1720 }
1721 
1722 /*
1723  * Process a main/reserve volume descriptor sequence.
1724  *   @block             First block of first extent of the sequence.
1725  *   @lastblock         Lastblock of first extent of the sequence.
1726  *   @fileset           There we store extent containing root fileset
1727  *
1728  * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
1729  * sequence
1730  */
1731 static noinline int udf_process_sequence(
1732                 struct super_block *sb,
1733                 sector_t block, sector_t lastblock,
1734                 struct kernel_lb_addr *fileset)
1735 {
1736         struct buffer_head *bh = NULL;
1737         struct udf_vds_record *curr;
1738         struct generic_desc *gd;
1739         struct volDescPtr *vdp;
1740         bool done = false;
1741         uint32_t vdsn;
1742         uint16_t ident;
1743         int ret;
1744         unsigned int indirections = 0;
1745         struct desc_seq_scan_data data;
1746         unsigned int i;
1747 
1748         memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
1749         data.size_part_descs = PART_DESC_ALLOC_STEP;
1750         data.num_part_descs = 0;
1751         data.part_descs_loc = kcalloc(data.size_part_descs,
1752                                       sizeof(*data.part_descs_loc),
1753                                       GFP_KERNEL);
1754         if (!data.part_descs_loc)
1755                 return -ENOMEM;
1756 
1757         /*
1758          * Read the main descriptor sequence and find which descriptors
1759          * are in it.
1760          */
1761         for (; (!done && block <= lastblock); block++) {
1762                 bh = udf_read_tagged(sb, block, block, &ident);
1763                 if (!bh)
1764                         break;
1765 
1766                 /* Process each descriptor (ISO 13346 3/8.3-8.4) */
1767                 gd = (struct generic_desc *)bh->b_data;
1768                 vdsn = le32_to_cpu(gd->volDescSeqNum);
1769                 switch (ident) {
1770                 case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
1771                         if (++indirections > UDF_MAX_TD_NESTING) {
1772                                 udf_err(sb, "too many Volume Descriptor "
1773                                         "Pointers (max %u supported)\n",
1774                                         UDF_MAX_TD_NESTING);
1775                                 brelse(bh);
1776                                 ret = -EIO;
1777                                 goto out;
1778                         }
1779 
1780                         vdp = (struct volDescPtr *)bh->b_data;
1781                         block = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
1782                         lastblock = le32_to_cpu(
1783                                 vdp->nextVolDescSeqExt.extLength) >>
1784                                 sb->s_blocksize_bits;
1785                         lastblock += block - 1;
1786                         /* For loop is going to increment 'block' again */
1787                         block--;
1788                         break;
1789                 case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1790                 case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1791                 case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1792                 case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1793                 case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1794                         curr = get_volume_descriptor_record(ident, bh, &data);
1795                         if (IS_ERR(curr)) {
1796                                 brelse(bh);
1797                                 ret = PTR_ERR(curr);
1798                                 goto out;
1799                         }
1800                         /* Descriptor we don't care about? */
1801                         if (!curr)
1802                                 break;
1803                         if (vdsn >= curr->volDescSeqNum) {
1804                                 curr->volDescSeqNum = vdsn;
1805                                 curr->block = block;
1806                         }
1807                         break;
1808                 case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
1809                         done = true;
1810                         break;
1811                 }
1812                 brelse(bh);
1813         }
1814         /*
1815          * Now read interesting descriptors again and process them
1816          * in a suitable order
1817          */
1818         if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) {
1819                 udf_err(sb, "Primary Volume Descriptor not found!\n");
1820                 ret = -EAGAIN;
1821                 goto out;
1822         }
1823         ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block);
1824         if (ret < 0)
1825                 goto out;
1826 
1827         if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) {
1828                 ret = udf_load_logicalvol(sb,
1829                                 data.vds[VDS_POS_LOGICAL_VOL_DESC].block,
1830                                 fileset);
1831                 if (ret < 0)
1832                         goto out;
1833         }
1834 
1835         /* Now handle prevailing Partition Descriptors */
1836         for (i = 0; i < data.num_part_descs; i++) {
1837                 ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
1838                 if (ret < 0)
1839                         goto out;
1840         }
1841         ret = 0;
1842 out:
1843         kfree(data.part_descs_loc);
1844         return ret;
1845 }
1846 
1847 /*
1848  * Load Volume Descriptor Sequence described by anchor in bh
1849  *
1850  * Returns <0 on error, 0 on success
1851  */
1852 static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
1853                              struct kernel_lb_addr *fileset)
1854 {
1855         struct anchorVolDescPtr *anchor;
1856         sector_t main_s, main_e, reserve_s, reserve_e;
1857         int ret;
1858 
1859         anchor = (struct anchorVolDescPtr *)bh->b_data;
1860 
1861         /* Locate the main sequence */
1862         main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
1863         main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
1864         main_e = main_e >> sb->s_blocksize_bits;
1865         main_e += main_s - 1;
1866 
1867         /* Locate the reserve sequence */
1868         reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
1869         reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
1870         reserve_e = reserve_e >> sb->s_blocksize_bits;
1871         reserve_e += reserve_s - 1;
1872 
1873         /* Process the main & reserve sequences */
1874         /* responsible for finding the PartitionDesc(s) */
1875         ret = udf_process_sequence(sb, main_s, main_e, fileset);
1876         if (ret != -EAGAIN)
1877                 return ret;
1878         udf_sb_free_partitions(sb);
1879         ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
1880         if (ret < 0) {
1881                 udf_sb_free_partitions(sb);
1882                 /* No sequence was OK, return -EIO */
1883                 if (ret == -EAGAIN)
1884                         ret = -EIO;
1885         }
1886         return ret;
1887 }
1888 
1889 /*
1890  * Check whether there is an anchor block in the given block and
1891  * load Volume Descriptor Sequence if so.
1892  *
1893  * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
1894  * block
1895  */
1896 static int udf_check_anchor_block(struct super_block *sb, sector_t block,
1897                                   struct kernel_lb_addr *fileset)
1898 {
1899         struct buffer_head *bh;
1900         uint16_t ident;
1901         int ret;
1902 
1903         bh = udf_read_tagged(sb, block, block, &ident);
1904         if (!bh)
1905                 return -EAGAIN;
1906         if (ident != TAG_IDENT_AVDP) {
1907                 brelse(bh);
1908                 return -EAGAIN;
1909         }
1910         ret = udf_load_sequence(sb, bh, fileset);
1911         brelse(bh);
1912         return ret;
1913 }
1914 
1915 /*
1916  * Search for an anchor volume descriptor pointer.
1917  *
1918  * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
1919  * of anchors.
1920  */
1921 static int udf_scan_anchors(struct super_block *sb, udf_pblk_t *lastblock,
1922                             struct kernel_lb_addr *fileset)
1923 {
1924         udf_pblk_t last[6];
1925         int i;
1926         struct udf_sb_info *sbi = UDF_SB(sb);
1927         int last_count = 0;
1928         int ret;
1929 
1930         /* First try user provided anchor */
1931         if (sbi->s_anchor) {
1932                 ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
1933                 if (ret != -EAGAIN)
1934                         return ret;
1935         }
1936         /*
1937          * according to spec, anchor is in either:
1938          *     block 256
1939          *     lastblock-256
1940          *     lastblock
1941          *  however, if the disc isn't closed, it could be 512.
1942          */
1943         ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
1944         if (ret != -EAGAIN)
1945                 return ret;
1946         /*
1947          * The trouble is which block is the last one. Drives often misreport
1948          * this so we try various possibilities.
1949          */
1950         last[last_count++] = *lastblock;
1951         if (*lastblock >= 1)
1952                 last[last_count++] = *lastblock - 1;
1953         last[last_count++] = *lastblock + 1;
1954         if (*lastblock >= 2)
1955                 last[last_count++] = *lastblock - 2;
1956         if (*lastblock >= 150)
1957                 last[last_count++] = *lastblock - 150;
1958         if (*lastblock >= 152)
1959                 last[last_count++] = *lastblock - 152;
1960 
1961         for (i = 0; i < last_count; i++) {
1962                 if (last[i] >= sb_bdev_nr_blocks(sb))
1963                         continue;
1964                 ret = udf_check_anchor_block(sb, last[i], fileset);
1965                 if (ret != -EAGAIN) {
1966                         if (!ret)
1967                                 *lastblock = last[i];
1968                         return ret;
1969                 }
1970                 if (last[i] < 256)
1971                         continue;
1972                 ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
1973                 if (ret != -EAGAIN) {
1974                         if (!ret)
1975                                 *lastblock = last[i];
1976                         return ret;
1977                 }
1978         }
1979 
1980         /* Finally try block 512 in case media is open */
1981         return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
1982 }
1983 
1984 /*
1985  * Check Volume Structure Descriptor, find Anchor block and load Volume
1986  * Descriptor Sequence.
1987  *
1988  * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
1989  * block was not found.
1990  */
1991 static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
1992                         int silent, struct kernel_lb_addr *fileset)
1993 {
1994         struct udf_sb_info *sbi = UDF_SB(sb);
1995         int nsr = 0;
1996         int ret;
1997 
1998         if (!sb_set_blocksize(sb, uopt->blocksize)) {
1999                 if (!silent)
2000                         udf_warn(sb, "Bad block size\n");
2001                 return -EINVAL;
2002         }
2003         sbi->s_last_block = uopt->lastblock;
2004         if (!UDF_QUERY_FLAG(sb, UDF_FLAG_NOVRS)) {
2005                 /* Check that it is NSR02 compliant */
2006                 nsr = udf_check_vsd(sb);
2007                 if (!nsr) {
2008                         if (!silent)
2009                                 udf_warn(sb, "No VRS found\n");
2010                         return -EINVAL;
2011                 }
2012                 if (nsr == -1)
2013                         udf_debug("Failed to read sector at offset %d. "
2014                                   "Assuming open disc. Skipping validity "
2015                                   "check\n", VSD_FIRST_SECTOR_OFFSET);
2016                 if (!sbi->s_last_block)
2017                         sbi->s_last_block = udf_get_last_block(sb);
2018         } else {
2019                 udf_debug("Validity check skipped because of novrs option\n");
2020         }
2021 
2022         /* Look for anchor block and load Volume Descriptor Sequence */
2023         sbi->s_anchor = uopt->anchor;
2024         ret = udf_scan_anchors(sb, &sbi->s_last_block, fileset);
2025         if (ret < 0) {
2026                 if (!silent && ret == -EAGAIN)
2027                         udf_warn(sb, "No anchor found\n");
2028                 return ret;
2029         }
2030         return 0;
2031 }
2032 
2033 static void udf_finalize_lvid(struct logicalVolIntegrityDesc *lvid)
2034 {
2035         struct timespec64 ts;
2036 
2037         ktime_get_real_ts64(&ts);
2038         udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
2039         lvid->descTag.descCRC = cpu_to_le16(
2040                 crc_itu_t(0, (char *)lvid + sizeof(struct tag),
2041                         le16_to_cpu(lvid->descTag.descCRCLength)));
2042         lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
2043 }
2044 
2045 static void udf_open_lvid(struct super_block *sb)
2046 {
2047         struct udf_sb_info *sbi = UDF_SB(sb);
2048         struct buffer_head *bh = sbi->s_lvid_bh;
2049         struct logicalVolIntegrityDesc *lvid;
2050         struct logicalVolIntegrityDescImpUse *lvidiu;
2051 
2052         if (!bh)
2053                 return;
2054         lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2055         lvidiu = udf_sb_lvidiu(sb);
2056         if (!lvidiu)
2057                 return;
2058 
2059         mutex_lock(&sbi->s_alloc_mutex);
2060         lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
2061         lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
2062         if (le32_to_cpu(lvid->integrityType) == LVID_INTEGRITY_TYPE_CLOSE)
2063                 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
2064         else
2065                 UDF_SET_FLAG(sb, UDF_FLAG_INCONSISTENT);
2066 
2067         udf_finalize_lvid(lvid);
2068         mark_buffer_dirty(bh);
2069         sbi->s_lvid_dirty = 0;
2070         mutex_unlock(&sbi->s_alloc_mutex);
2071         /* Make opening of filesystem visible on the media immediately */
2072         sync_dirty_buffer(bh);
2073 }
2074 
2075 static void udf_close_lvid(struct super_block *sb)
2076 {
2077         struct udf_sb_info *sbi = UDF_SB(sb);
2078         struct buffer_head *bh = sbi->s_lvid_bh;
2079         struct logicalVolIntegrityDesc *lvid;
2080         struct logicalVolIntegrityDescImpUse *lvidiu;
2081 
2082         if (!bh)
2083                 return;
2084         lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2085         lvidiu = udf_sb_lvidiu(sb);
2086         if (!lvidiu)
2087                 return;
2088 
2089         mutex_lock(&sbi->s_alloc_mutex);
2090         lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
2091         lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
2092         if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
2093                 lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
2094         if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
2095                 lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
2096         if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
2097                 lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
2098         if (!UDF_QUERY_FLAG(sb, UDF_FLAG_INCONSISTENT))
2099                 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
2100 
2101         /*
2102          * We set buffer uptodate unconditionally here to avoid spurious
2103          * warnings from mark_buffer_dirty() when previous EIO has marked
2104          * the buffer as !uptodate
2105          */
2106         set_buffer_uptodate(bh);
2107         udf_finalize_lvid(lvid);
2108         mark_buffer_dirty(bh);
2109         sbi->s_lvid_dirty = 0;
2110         mutex_unlock(&sbi->s_alloc_mutex);
2111         /* Make closing of filesystem visible on the media immediately */
2112         sync_dirty_buffer(bh);
2113 }
2114 
2115 u64 lvid_get_unique_id(struct super_block *sb)
2116 {
2117         struct buffer_head *bh;
2118         struct udf_sb_info *sbi = UDF_SB(sb);
2119         struct logicalVolIntegrityDesc *lvid;
2120         struct logicalVolHeaderDesc *lvhd;
2121         u64 uniqueID;
2122         u64 ret;
2123 
2124         bh = sbi->s_lvid_bh;
2125         if (!bh)
2126                 return 0;
2127 
2128         lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2129         lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
2130 
2131         mutex_lock(&sbi->s_alloc_mutex);
2132         ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
2133         if (!(++uniqueID & 0xFFFFFFFF))
2134                 uniqueID += 16;
2135         lvhd->uniqueID = cpu_to_le64(uniqueID);
2136         udf_updated_lvid(sb);
2137         mutex_unlock(&sbi->s_alloc_mutex);
2138 
2139         return ret;
2140 }
2141 
2142 static int udf_fill_super(struct super_block *sb, struct fs_context *fc)
2143 {
2144         int ret = -EINVAL;
2145         struct inode *inode = NULL;
2146         struct udf_options *uopt = fc->fs_private;
2147         struct kernel_lb_addr rootdir, fileset;
2148         struct udf_sb_info *sbi;
2149         bool lvid_open = false;
2150         int silent = fc->sb_flags & SB_SILENT;
2151 
2152         sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
2153         if (!sbi)
2154                 return -ENOMEM;
2155 
2156         sb->s_fs_info = sbi;
2157 
2158         mutex_init(&sbi->s_alloc_mutex);
2159 
2160         fileset.logicalBlockNum = 0xFFFFFFFF;
2161         fileset.partitionReferenceNum = 0xFFFF;
2162 
2163         sbi->s_flags = uopt->flags;
2164         sbi->s_uid = uopt->uid;
2165         sbi->s_gid = uopt->gid;
2166         sbi->s_umask = uopt->umask;
2167         sbi->s_fmode = uopt->fmode;
2168         sbi->s_dmode = uopt->dmode;
2169         sbi->s_nls_map = uopt->nls_map;
2170         uopt->nls_map = NULL;
2171         rwlock_init(&sbi->s_cred_lock);
2172 
2173         if (uopt->session == 0xFFFFFFFF)
2174                 sbi->s_session = udf_get_last_session(sb);
2175         else
2176                 sbi->s_session = uopt->session;
2177 
2178         udf_debug("Multi-session=%d\n", sbi->s_session);
2179 
2180         /* Fill in the rest of the superblock */
2181         sb->s_op = &udf_sb_ops;
2182         sb->s_export_op = &udf_export_ops;
2183 
2184         sb->s_magic = UDF_SUPER_MAGIC;
2185         sb->s_time_gran = 1000;
2186 
2187         if (uopt->flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
2188                 ret = udf_load_vrs(sb, uopt, silent, &fileset);
2189         } else {
2190                 uopt->blocksize = bdev_logical_block_size(sb->s_bdev);
2191                 while (uopt->blocksize <= 4096) {
2192                         ret = udf_load_vrs(sb, uopt, silent, &fileset);
2193                         if (ret < 0) {
2194                                 if (!silent && ret != -EACCES) {
2195                                         pr_notice("Scanning with blocksize %u failed\n",
2196                                                   uopt->blocksize);
2197                                 }
2198                                 brelse(sbi->s_lvid_bh);
2199                                 sbi->s_lvid_bh = NULL;
2200                                 /*
2201                                  * EACCES is special - we want to propagate to
2202                                  * upper layers that we cannot handle RW mount.
2203                                  */
2204                                 if (ret == -EACCES)
2205                                         break;
2206                         } else
2207                                 break;
2208 
2209                         uopt->blocksize <<= 1;
2210                 }
2211         }
2212         if (ret < 0) {
2213                 if (ret == -EAGAIN) {
2214                         udf_warn(sb, "No partition found (1)\n");
2215                         ret = -EINVAL;
2216                 }
2217                 goto error_out;
2218         }
2219 
2220         udf_debug("Lastblock=%u\n", sbi->s_last_block);
2221 
2222         if (sbi->s_lvid_bh) {
2223                 struct logicalVolIntegrityDescImpUse *lvidiu =
2224                                                         udf_sb_lvidiu(sb);
2225                 uint16_t minUDFReadRev;
2226                 uint16_t minUDFWriteRev;
2227 
2228                 if (!lvidiu) {
2229                         ret = -EINVAL;
2230                         goto error_out;
2231                 }
2232                 minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
2233                 minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
2234                 if (minUDFReadRev > UDF_MAX_READ_VERSION) {
2235                         udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2236                                 minUDFReadRev,
2237                                 UDF_MAX_READ_VERSION);
2238                         ret = -EINVAL;
2239                         goto error_out;
2240                 } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) {
2241                         if (!sb_rdonly(sb)) {
2242                                 ret = -EACCES;
2243                                 goto error_out;
2244                         }
2245                         UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
2246                 }
2247 
2248                 sbi->s_udfrev = minUDFWriteRev;
2249 
2250                 if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
2251                         UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
2252                 if (minUDFReadRev >= UDF_VERS_USE_STREAMS)
2253                         UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
2254         }
2255 
2256         if (!sbi->s_partitions) {
2257                 udf_warn(sb, "No partition found (2)\n");
2258                 ret = -EINVAL;
2259                 goto error_out;
2260         }
2261 
2262         if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
2263                         UDF_PART_FLAG_READ_ONLY) {
2264                 if (!sb_rdonly(sb)) {
2265                         ret = -EACCES;
2266                         goto error_out;
2267                 }
2268                 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
2269         }
2270 
2271         ret = udf_find_fileset(sb, &fileset, &rootdir);
2272         if (ret < 0) {
2273                 udf_warn(sb, "No fileset found\n");
2274                 goto error_out;
2275         }
2276 
2277         if (!silent) {
2278                 struct timestamp ts;
2279                 udf_time_to_disk_stamp(&ts, sbi->s_record_time);
2280                 udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
2281                          sbi->s_volume_ident,
2282                          le16_to_cpu(ts.year), ts.month, ts.day,
2283                          ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
2284         }
2285         if (!sb_rdonly(sb)) {
2286                 udf_open_lvid(sb);
2287                 lvid_open = true;
2288         }
2289 
2290         /* Assign the root inode */
2291         /* assign inodes by physical block number */
2292         /* perhaps it's not extensible enough, but for now ... */
2293         inode = udf_iget(sb, &rootdir);
2294         if (IS_ERR(inode)) {
2295                 udf_err(sb, "Error in udf_iget, block=%u, partition=%u\n",
2296                        rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
2297                 ret = PTR_ERR(inode);
2298                 goto error_out;
2299         }
2300 
2301         /* Allocate a dentry for the root inode */
2302         sb->s_root = d_make_root(inode);
2303         if (!sb->s_root) {
2304                 udf_err(sb, "Couldn't allocate root dentry\n");
2305                 ret = -ENOMEM;
2306                 goto error_out;
2307         }
2308         sb->s_maxbytes = UDF_MAX_FILESIZE;
2309         sb->s_max_links = UDF_MAX_LINKS;
2310         return 0;
2311 
2312 error_out:
2313         iput(sbi->s_vat_inode);
2314         unload_nls(uopt->nls_map);
2315         if (lvid_open)
2316                 udf_close_lvid(sb);
2317         brelse(sbi->s_lvid_bh);
2318         udf_sb_free_partitions(sb);
2319         kfree(sbi);
2320         sb->s_fs_info = NULL;
2321 
2322         return ret;
2323 }
2324 
2325 void _udf_err(struct super_block *sb, const char *function,
2326               const char *fmt, ...)
2327 {
2328         struct va_format vaf;
2329         va_list args;
2330 
2331         va_start(args, fmt);
2332 
2333         vaf.fmt = fmt;
2334         vaf.va = &args;
2335 
2336         pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf);
2337 
2338         va_end(args);
2339 }
2340 
2341 void _udf_warn(struct super_block *sb, const char *function,
2342                const char *fmt, ...)
2343 {
2344         struct va_format vaf;
2345         va_list args;
2346 
2347         va_start(args, fmt);
2348 
2349         vaf.fmt = fmt;
2350         vaf.va = &args;
2351 
2352         pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf);
2353 
2354         va_end(args);
2355 }
2356 
2357 static void udf_put_super(struct super_block *sb)
2358 {
2359         struct udf_sb_info *sbi;
2360 
2361         sbi = UDF_SB(sb);
2362 
2363         iput(sbi->s_vat_inode);
2364         unload_nls(sbi->s_nls_map);
2365         if (!sb_rdonly(sb))
2366                 udf_close_lvid(sb);
2367         brelse(sbi->s_lvid_bh);
2368         udf_sb_free_partitions(sb);
2369         mutex_destroy(&sbi->s_alloc_mutex);
2370         kfree(sb->s_fs_info);
2371         sb->s_fs_info = NULL;
2372 }
2373 
2374 static int udf_sync_fs(struct super_block *sb, int wait)
2375 {
2376         struct udf_sb_info *sbi = UDF_SB(sb);
2377 
2378         mutex_lock(&sbi->s_alloc_mutex);
2379         if (sbi->s_lvid_dirty) {
2380                 struct buffer_head *bh = sbi->s_lvid_bh;
2381                 struct logicalVolIntegrityDesc *lvid;
2382 
2383                 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2384                 udf_finalize_lvid(lvid);
2385 
2386                 /*
2387                  * Blockdevice will be synced later so we don't have to submit
2388                  * the buffer for IO
2389                  */
2390                 mark_buffer_dirty(bh);
2391                 sbi->s_lvid_dirty = 0;
2392         }
2393         mutex_unlock(&sbi->s_alloc_mutex);
2394 
2395         return 0;
2396 }
2397 
2398 static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2399 {
2400         struct super_block *sb = dentry->d_sb;
2401         struct udf_sb_info *sbi = UDF_SB(sb);
2402         struct logicalVolIntegrityDescImpUse *lvidiu;
2403         u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2404 
2405         lvidiu = udf_sb_lvidiu(sb);
2406         buf->f_type = UDF_SUPER_MAGIC;
2407         buf->f_bsize = sb->s_blocksize;
2408         buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
2409         buf->f_bfree = udf_count_free(sb);
2410         buf->f_bavail = buf->f_bfree;
2411         /*
2412          * Let's pretend each free block is also a free 'inode' since UDF does
2413          * not have separate preallocated table of inodes.
2414          */
2415         buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
2416                                           le32_to_cpu(lvidiu->numDirs)) : 0)
2417                         + buf->f_bfree;
2418         buf->f_ffree = buf->f_bfree;
2419         buf->f_namelen = UDF_NAME_LEN;
2420         buf->f_fsid = u64_to_fsid(id);
2421 
2422         return 0;
2423 }
2424 
2425 static unsigned int udf_count_free_bitmap(struct super_block *sb,
2426                                           struct udf_bitmap *bitmap)
2427 {
2428         struct buffer_head *bh = NULL;
2429         unsigned int accum = 0;
2430         int index;
2431         udf_pblk_t block = 0, newblock;
2432         struct kernel_lb_addr loc;
2433         uint32_t bytes;
2434         uint8_t *ptr;
2435         uint16_t ident;
2436         struct spaceBitmapDesc *bm;
2437 
2438         loc.logicalBlockNum = bitmap->s_extPosition;
2439         loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
2440         bh = udf_read_ptagged(sb, &loc, 0, &ident);
2441 
2442         if (!bh) {
2443                 udf_err(sb, "udf_count_free failed\n");
2444                 goto out;
2445         } else if (ident != TAG_IDENT_SBD) {
2446                 brelse(bh);
2447                 udf_err(sb, "udf_count_free failed\n");
2448                 goto out;
2449         }
2450 
2451         bm = (struct spaceBitmapDesc *)bh->b_data;
2452         bytes = le32_to_cpu(bm->numOfBytes);
2453         index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
2454         ptr = (uint8_t *)bh->b_data;
2455 
2456         while (bytes > 0) {
2457                 u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
2458                 accum += bitmap_weight((const unsigned long *)(ptr + index),
2459                                         cur_bytes * 8);
2460                 bytes -= cur_bytes;
2461                 if (bytes) {
2462                         brelse(bh);
2463                         newblock = udf_get_lb_pblock(sb, &loc, ++block);
2464                         bh = sb_bread(sb, newblock);
2465                         if (!bh) {
2466                                 udf_debug("read failed\n");
2467                                 goto out;
2468                         }
2469                         index = 0;
2470                         ptr = (uint8_t *)bh->b_data;
2471                 }
2472         }
2473         brelse(bh);
2474 out:
2475         return accum;
2476 }
2477 
2478 static unsigned int udf_count_free_table(struct super_block *sb,
2479                                          struct inode *table)
2480 {
2481         unsigned int accum = 0;
2482         uint32_t elen;
2483         struct kernel_lb_addr eloc;
2484         struct extent_position epos;
2485 
2486         mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
2487         epos.block = UDF_I(table)->i_location;
2488         epos.offset = sizeof(struct unallocSpaceEntry);
2489         epos.bh = NULL;
2490 
2491         while (udf_next_aext(table, &epos, &eloc, &elen, 1) != -1)
2492                 accum += (elen >> table->i_sb->s_blocksize_bits);
2493 
2494         brelse(epos.bh);
2495         mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
2496 
2497         return accum;
2498 }
2499 
2500 static unsigned int udf_count_free(struct super_block *sb)
2501 {
2502         unsigned int accum = 0;
2503         struct udf_sb_info *sbi = UDF_SB(sb);
2504         struct udf_part_map *map;
2505         unsigned int part = sbi->s_partition;
2506         int ptype = sbi->s_partmaps[part].s_partition_type;
2507 
2508         if (ptype == UDF_METADATA_MAP25) {
2509                 part = sbi->s_partmaps[part].s_type_specific.s_metadata.
2510                                                         s_phys_partition_ref;
2511         } else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) {
2512                 /*
2513                  * Filesystems with VAT are append-only and we cannot write to
2514                  * them. Let's just report 0 here.
2515                  */
2516                 return 0;
2517         }
2518 
2519         if (sbi->s_lvid_bh) {
2520                 struct logicalVolIntegrityDesc *lvid =
2521                         (struct logicalVolIntegrityDesc *)
2522                         sbi->s_lvid_bh->b_data;
2523                 if (le32_to_cpu(lvid->numOfPartitions) > part) {
2524                         accum = le32_to_cpu(
2525                                         lvid->freeSpaceTable[part]);
2526                         if (accum == 0xFFFFFFFF)
2527                                 accum = 0;
2528                 }
2529         }
2530 
2531         if (accum)
2532                 return accum;
2533 
2534         map = &sbi->s_partmaps[part];
2535         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
2536                 accum += udf_count_free_bitmap(sb,
2537                                                map->s_uspace.s_bitmap);
2538         }
2539         if (accum)
2540                 return accum;
2541 
2542         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
2543                 accum += udf_count_free_table(sb,
2544                                               map->s_uspace.s_table);
2545         }
2546         return accum;
2547 }
2548 
2549 MODULE_AUTHOR("Ben Fennema");
2550 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
2551 MODULE_LICENSE("GPL");
2552 module_init(init_udf_fs)
2553 module_exit(exit_udf_fs)
2554 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php