summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/ext2/super.c4
-rw-r--r--fs/notify/fanotify/fanotify.c28
-rw-r--r--fs/notify/fanotify/fanotify.h3
-rw-r--r--fs/notify/fanotify/fanotify_user.c2
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c8
-rw-r--r--fs/notify/inotify/inotify_user.c14
-rw-r--r--fs/notify/notification.c3
-rw-r--r--fs/reiserfs/reiserfs.h2
-rw-r--r--fs/udf/file.c10
-rw-r--r--fs/udf/ialloc.c4
-rw-r--r--fs/udf/inode.c23
-rw-r--r--fs/udf/super.c260
-rw-r--r--fs/udf/udf_sb.h15
-rw-r--r--fs/udf/udfdecl.h2
14 files changed, 219 insertions, 159 deletions
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 7666c065b96f..de1694512f1f 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -827,7 +827,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
unsigned long logic_sb_block;
unsigned long offset = 0;
unsigned long def_mount_opts;
- long ret = -EINVAL;
+ long ret = -ENOMEM;
int blocksize = BLOCK_SIZE;
int db_count;
int i, j;
@@ -835,7 +835,6 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
int err;
struct ext2_mount_options opts;
- err = -ENOMEM;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
goto failed;
@@ -851,6 +850,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_daxdev = dax_dev;
spin_lock_init(&sbi->s_lock);
+ ret = -EINVAL;
/*
* See what the current blocksize for the device is, and
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 6702a6a0bbb5..d51e1bb781cf 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -139,23 +139,32 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
return false;
}
-struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask,
+struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
+ struct inode *inode, u32 mask,
const struct path *path)
{
struct fanotify_event_info *event;
+ gfp_t gfp = GFP_KERNEL;
+
+ /*
+ * For queues with unlimited length lost events are not expected and
+ * can possibly have security implications. Avoid losing events when
+ * memory is short.
+ */
+ if (group->max_events == UINT_MAX)
+ gfp |= __GFP_NOFAIL;
if (fanotify_is_perm_event(mask)) {
struct fanotify_perm_event_info *pevent;
- pevent = kmem_cache_alloc(fanotify_perm_event_cachep,
- GFP_KERNEL);
+ pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp);
if (!pevent)
return NULL;
event = &pevent->fae;
pevent->response = 0;
goto init;
}
- event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
+ event = kmem_cache_alloc(fanotify_event_cachep, gfp);
if (!event)
return NULL;
init: __maybe_unused
@@ -210,10 +219,17 @@ static int fanotify_handle_event(struct fsnotify_group *group,
return 0;
}
- event = fanotify_alloc_event(inode, mask, data);
+ event = fanotify_alloc_event(group, inode, mask, data);
ret = -ENOMEM;
- if (unlikely(!event))
+ if (unlikely(!event)) {
+ /*
+ * We don't queue overflow events for permission events as
+ * there the access is denied and so no event is in fact lost.
+ */
+ if (!fanotify_is_perm_event(mask))
+ fsnotify_queue_overflow(group);
goto finish;
+ }
fsn_event = &event->fse;
ret = fsnotify_add_event(group, fsn_event, fanotify_merge);
diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h
index 256d9d1ddea9..8609ba06f474 100644
--- a/fs/notify/fanotify/fanotify.h
+++ b/fs/notify/fanotify/fanotify.h
@@ -52,5 +52,6 @@ static inline struct fanotify_event_info *FANOTIFY_E(struct fsnotify_event *fse)
return container_of(fse, struct fanotify_event_info, fse);
}
-struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask,
+struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
+ struct inode *inode, u32 mask,
const struct path *path);
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index fa803a58a605..ec4d8c59d0e3 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -757,7 +757,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
group->fanotify_data.user = user;
atomic_inc(&user->fanotify_listeners);
- oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
+ oevent = fanotify_alloc_event(group, NULL, FS_Q_OVERFLOW, NULL);
if (unlikely(!oevent)) {
fd = -ENOMEM;
goto out_destroy_group;
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index 8b73332735ba..40dedb37a1f3 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -99,8 +99,14 @@ int inotify_handle_event(struct fsnotify_group *group,
fsn_mark);
event = kmalloc(alloc_len, GFP_KERNEL);
- if (unlikely(!event))
+ if (unlikely(!event)) {
+ /*
+ * Treat lost event due to ENOMEM the same way as queue
+ * overflow to let userspace know event was lost.
+ */
+ fsnotify_queue_overflow(group);
return -ENOMEM;
+ }
fsn_event = &event->fse;
fsnotify_init_event(fsn_event, inode, mask);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 43c23653ce2e..ef32f3657958 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -307,6 +307,20 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,
spin_unlock(&group->notification_lock);
ret = put_user(send_len, (int __user *) p);
break;
+#ifdef CONFIG_CHECKPOINT_RESTORE
+ case INOTIFY_IOC_SETNEXTWD:
+ ret = -EINVAL;
+ if (arg >= 1 && arg <= INT_MAX) {
+ struct inotify_group_private_data *data;
+
+ data = &group->inotify_data;
+ spin_lock(&data->idr_lock);
+ idr_set_cursor(&data->idr, (unsigned int)arg);
+ spin_unlock(&data->idr_lock);
+ ret = 0;
+ }
+ break;
+#endif /* CONFIG_CHECKPOINT_RESTORE */
}
return ret;
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 66f85c651c52..3c3e36745f59 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -111,7 +111,8 @@ int fsnotify_add_event(struct fsnotify_group *group,
return 2;
}
- if (group->q_len >= group->max_events) {
+ if (event == group->overflow_event ||
+ group->q_len >= group->max_events) {
ret = 2;
/* Queue overflow event only if it isn't already queued */
if (!list_empty(&group->overflow_event->list)) {
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 48835a659948..ae4811fecc1f 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -1916,7 +1916,7 @@ struct reiserfs_de_head {
/* empty directory contains two entries "." and ".." and their headers */
#define EMPTY_DIR_SIZE \
-(DEH_SIZE * 2 + ROUND_UP (strlen (".")) + ROUND_UP (strlen ("..")))
+(DEH_SIZE * 2 + ROUND_UP (sizeof(".") - 1) + ROUND_UP (sizeof("..") - 1))
/* old format directories have this size when empty */
#define EMPTY_DIR_SIZE_V1 (DEH_SIZE * 2 + 3)
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 356c2bf148a5..cd31e4f6d6da 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -257,12 +257,22 @@ const struct file_operations udf_file_operations = {
static int udf_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
+ struct super_block *sb = inode->i_sb;
int error;
error = setattr_prepare(dentry, attr);
if (error)
return error;
+ if ((attr->ia_valid & ATTR_UID) &&
+ UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET) &&
+ !uid_eq(attr->ia_uid, UDF_SB(sb)->s_uid))
+ return -EPERM;
+ if ((attr->ia_valid & ATTR_GID) &&
+ UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET) &&
+ !gid_eq(attr->ia_gid, UDF_SB(sb)->s_gid))
+ return -EPERM;
+
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
error = udf_setsize(inode, attr->ia_size);
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index b6e420c1bfeb..b7a0d4b4bda1 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -104,6 +104,10 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode)
}
inode_init_owner(inode, dir, mode);
+ if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
+ inode->i_uid = sbi->s_uid;
+ if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
+ inode->i_gid = sbi->s_gid;
iinfo->i_location.logicalBlockNum = block;
iinfo->i_location.partitionReferenceNum =
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index c23744d5ae5c..c80765d62f7e 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1275,6 +1275,7 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode)
unsigned int indirections = 0;
int bs = inode->i_sb->s_blocksize;
int ret = -EIO;
+ uint32_t uid, gid;
reread:
if (iloc->partitionReferenceNum >= sbi->s_partitions) {
@@ -1400,17 +1401,19 @@ reread:
ret = -EIO;
read_lock(&sbi->s_cred_lock);
- i_uid_write(inode, le32_to_cpu(fe->uid));
- if (!uid_valid(inode->i_uid) ||
- UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_IGNORE) ||
+ uid = le32_to_cpu(fe->uid);
+ if (uid == UDF_INVALID_ID ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
- inode->i_uid = UDF_SB(inode->i_sb)->s_uid;
+ inode->i_uid = sbi->s_uid;
+ else
+ i_uid_write(inode, uid);
- i_gid_write(inode, le32_to_cpu(fe->gid));
- if (!gid_valid(inode->i_gid) ||
- UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_IGNORE) ||
+ gid = le32_to_cpu(fe->gid);
+ if (gid == UDF_INVALID_ID ||
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
- inode->i_gid = UDF_SB(inode->i_sb)->s_gid;
+ inode->i_gid = sbi->s_gid;
+ else
+ i_gid_write(inode, gid);
if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
sbi->s_fmode != UDF_INVALID_MODE)
@@ -1655,12 +1658,12 @@ static int udf_update_inode(struct inode *inode, int do_sync)
}
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
- fe->uid = cpu_to_le32(-1);
+ fe->uid = cpu_to_le32(UDF_INVALID_ID);
else
fe->uid = cpu_to_le32(i_uid_read(inode));
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
- fe->gid = cpu_to_le32(-1);
+ fe->gid = cpu_to_le32(UDF_INVALID_ID);
else
fe->gid = cpu_to_le32(i_gid_read(inode));
diff --git a/fs/udf/super.c b/fs/udf/super.c
index f73239a9a97d..7949c338efa5 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -64,14 +64,13 @@
#include <linux/init.h>
#include <linux/uaccess.h>
-#define VDS_POS_PRIMARY_VOL_DESC 0
-#define VDS_POS_UNALLOC_SPACE_DESC 1
-#define VDS_POS_LOGICAL_VOL_DESC 2
-#define VDS_POS_PARTITION_DESC 3
-#define VDS_POS_IMP_USE_VOL_DESC 4
-#define VDS_POS_VOL_DESC_PTR 5
-#define VDS_POS_TERMINATING_DESC 6
-#define VDS_POS_LENGTH 7
+enum {
+ VDS_POS_PRIMARY_VOL_DESC,
+ VDS_POS_UNALLOC_SPACE_DESC,
+ VDS_POS_LOGICAL_VOL_DESC,
+ VDS_POS_IMP_USE_VOL_DESC,
+ VDS_POS_LENGTH
+};
#define VSD_FIRST_SECTOR_OFFSET 32768
#define VSD_MAX_SECTOR_OFFSET 0x800000
@@ -223,10 +222,6 @@ struct udf_options {
unsigned int session;
unsigned int lastblock;
unsigned int anchor;
- unsigned int volume;
- unsigned short partition;
- unsigned int fileset;
- unsigned int rootdir;
unsigned int flags;
umode_t umask;
kgid_t gid;
@@ -349,12 +344,8 @@ static int udf_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",shortad");
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
seq_puts(seq, ",uid=forget");
- if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_IGNORE))
- seq_puts(seq, ",uid=ignore");
if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
seq_puts(seq, ",gid=forget");
- if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_IGNORE))
- seq_puts(seq, ",gid=ignore");
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid));
if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
@@ -371,10 +362,6 @@ static int udf_show_options(struct seq_file *seq, struct dentry *root)
seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
if (sbi->s_anchor != 0)
seq_printf(seq, ",anchor=%u", sbi->s_anchor);
- /*
- * volume, partition, fileset and rootdir seem to be ignored
- * currently
- */
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
seq_puts(seq, ",utf8");
if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
@@ -487,14 +474,9 @@ static int udf_parse_options(char *options, struct udf_options *uopt,
int option;
uopt->novrs = 0;
- uopt->partition = 0xFFFF;
uopt->session = 0xFFFFFFFF;
uopt->lastblock = 0;
uopt->anchor = 0;
- uopt->volume = 0xFFFFFFFF;
- uopt->rootdir = 0xFFFFFFFF;
- uopt->fileset = 0xFFFFFFFF;
- uopt->nls_map = NULL;
if (!options)
return 1;
@@ -582,42 +564,30 @@ static int udf_parse_options(char *options, struct udf_options *uopt,
uopt->anchor = option;
break;
case Opt_volume:
- if (match_int(args, &option))
- return 0;
- uopt->volume = option;
- break;
case Opt_partition:
- if (match_int(args, &option))
- return 0;
- uopt->partition = option;
- break;
case Opt_fileset:
- if (match_int(args, &option))
- return 0;
- uopt->fileset = option;
- break;
case Opt_rootdir:
- if (match_int(args, &option))
- return 0;
- uopt->rootdir = option;
+ /* Ignored (never implemented properly) */
break;
case Opt_utf8:
uopt->flags |= (1 << UDF_FLAG_UTF8);
break;
#ifdef CONFIG_UDF_NLS
case Opt_iocharset:
- uopt->nls_map = load_nls(args[0].from);
- uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
+ if (!remount) {
+ if (uopt->nls_map)
+ unload_nls(uopt->nls_map);
+ uopt->nls_map = load_nls(args[0].from);
+ uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
+ }
break;
#endif
- case Opt_uignore:
- uopt->flags |= (1 << UDF_FLAG_UID_IGNORE);
- break;
case Opt_uforget:
uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
break;
+ case Opt_uignore:
case Opt_gignore:
- uopt->flags |= (1 << UDF_FLAG_GID_IGNORE);
+ /* These options are superseeded by uid=<number> */
break;
case Opt_gforget:
uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
@@ -660,6 +630,7 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
uopt.umask = sbi->s_umask;
uopt.fmode = sbi->s_fmode;
uopt.dmode = sbi->s_dmode;
+ uopt.nls_map = NULL;
if (!udf_parse_options(options, &uopt, true))
return -EINVAL;
@@ -1592,6 +1563,60 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
sbi->s_lvid_bh = NULL;
}
+/*
+ * Step for reallocation of table of partition descriptor sequence numbers.
+ * Must be power of 2.
+ */
+#define PART_DESC_ALLOC_STEP 32
+
+struct desc_seq_scan_data {
+ struct udf_vds_record vds[VDS_POS_LENGTH];
+ unsigned int size_part_descs;
+ struct udf_vds_record *part_descs_loc;
+};
+
+static struct udf_vds_record *handle_partition_descriptor(
+ struct buffer_head *bh,
+ struct desc_seq_scan_data *data)
+{
+ struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
+ int partnum;
+
+ partnum = le16_to_cpu(desc->partitionNumber);
+ if (partnum >= data->size_part_descs) {
+ struct udf_vds_record *new_loc;
+ unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
+
+ new_loc = kzalloc(sizeof(*new_loc) * new_size, GFP_KERNEL);
+ if (!new_loc)
+ return ERR_PTR(-ENOMEM);
+ memcpy(new_loc, data->part_descs_loc,
+ data->size_part_descs * sizeof(*new_loc));
+ kfree(data->part_descs_loc);
+ data->part_descs_loc = new_loc;
+ data->size_part_descs = new_size;
+ }
+ return &(data->part_descs_loc[partnum]);
+}
+
+
+static struct udf_vds_record *get_volume_descriptor_record(uint16_t ident,
+ struct buffer_head *bh, struct desc_seq_scan_data *data)
+{
+ switch (ident) {
+ case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
+ return &(data->vds[VDS_POS_PRIMARY_VOL_DESC]);
+ case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
+ return &(data->vds[VDS_POS_IMP_USE_VOL_DESC]);
+ case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
+ return &(data->vds[VDS_POS_LOGICAL_VOL_DESC]);
+ case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
+ return &(data->vds[VDS_POS_UNALLOC_SPACE_DESC]);
+ case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
+ return handle_partition_descriptor(bh, data);
+ }
+ return NULL;
+}
/*
* Process a main/reserve volume descriptor sequence.
@@ -1608,18 +1633,23 @@ static noinline int udf_process_sequence(
struct kernel_lb_addr *fileset)
{
struct buffer_head *bh = NULL;
- struct udf_vds_record vds[VDS_POS_LENGTH];
struct udf_vds_record *curr;
struct generic_desc *gd;
struct volDescPtr *vdp;
bool done = false;
uint32_t vdsn;
uint16_t ident;
- long next_s = 0, next_e = 0;
int ret;
unsigned int indirections = 0;
-
- memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
+ struct desc_seq_scan_data data;
+ unsigned int i;
+
+ memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
+ data.size_part_descs = PART_DESC_ALLOC_STEP;
+ data.part_descs_loc = kzalloc(sizeof(*data.part_descs_loc) *
+ data.size_part_descs, GFP_KERNEL);
+ if (!data.part_descs_loc)
+ return -ENOMEM;
/*
* Read the main descriptor sequence and find which descriptors
@@ -1628,79 +1658,51 @@ static noinline int udf_process_sequence(
for (; (!done && block <= lastblock); block++) {
bh = udf_read_tagged(sb, block, block, &ident);
- if (!bh) {
- udf_err(sb,
- "Block %llu of volume descriptor sequence is corrupted or we could not read it\n",
- (unsigned long long)block);
- return -EAGAIN;
- }
+ if (!bh)
+ break;
/* Process each descriptor (ISO 13346 3/8.3-8.4) */
gd = (struct generic_desc *)bh->b_data;
vdsn = le32_to_cpu(gd->volDescSeqNum);
switch (ident) {
- case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
- curr = &vds[VDS_POS_PRIMARY_VOL_DESC];
- if (vdsn >= curr->volDescSeqNum) {
- curr->volDescSeqNum = vdsn;
- curr->block = block;
- }
- break;
case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
- curr = &vds[VDS_POS_VOL_DESC_PTR];
- if (vdsn >= curr->volDescSeqNum) {
- curr->volDescSeqNum = vdsn;
- curr->block = block;
-
- vdp = (struct volDescPtr *)bh->b_data;
- next_s = le32_to_cpu(
- vdp->nextVolDescSeqExt.extLocation);
- next_e = le32_to_cpu(
- vdp->nextVolDescSeqExt.extLength);
- next_e = next_e >> sb->s_blocksize_bits;
- next_e += next_s;
+ if (++indirections > UDF_MAX_TD_NESTING) {
+ udf_err(sb, "too many Volume Descriptor "
+ "Pointers (max %u supported)\n",
+ UDF_MAX_TD_NESTING);
+ brelse(bh);
+ return -EIO;
}
+
+ vdp = (struct volDescPtr *)bh->b_data;
+ block = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
+ lastblock = le32_to_cpu(
+ vdp->nextVolDescSeqExt.extLength) >>
+ sb->s_blocksize_bits;
+ lastblock += block - 1;
+ /* For loop is going to increment 'block' again */
+ block--;
break;
+ case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
- curr = &vds[VDS_POS_IMP_USE_VOL_DESC];
- if (vdsn >= curr->volDescSeqNum) {
- curr->volDescSeqNum = vdsn;
- curr->block = block;
- }
- break;
- case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
- curr = &vds[VDS_POS_PARTITION_DESC];
- if (!curr->block)
- curr->block = block;
- break;
case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
- curr = &vds[VDS_POS_LOGICAL_VOL_DESC];
- if (vdsn >= curr->volDescSeqNum) {
- curr->volDescSeqNum = vdsn;
- curr->block = block;
- }
- break;
case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
- curr = &vds[VDS_POS_UNALLOC_SPACE_DESC];
+ case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
+ curr = get_volume_descriptor_record(ident, bh, &data);
+ if (IS_ERR(curr)) {
+ brelse(bh);
+ return PTR_ERR(curr);
+ }
+ /* Descriptor we don't care about? */
+ if (!curr)
+ break;
if (vdsn >= curr->volDescSeqNum) {
curr->volDescSeqNum = vdsn;
curr->block = block;
}
break;
case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
- if (++indirections > UDF_MAX_TD_NESTING) {
- udf_err(sb, "too many TDs (max %u supported)\n", UDF_MAX_TD_NESTING);
- brelse(bh);
- return -EIO;
- }
-
- vds[VDS_POS_TERMINATING_DESC].block = block;
- if (next_e) {
- block = next_s;
- lastblock = next_e;
- next_s = next_e = 0;
- } else
- done = true;
+ done = true;
break;
}
brelse(bh);
@@ -1709,31 +1711,27 @@ static noinline int udf_process_sequence(
* Now read interesting descriptors again and process them
* in a suitable order
*/
- if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
+ if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) {
udf_err(sb, "Primary Volume Descriptor not found!\n");
return -EAGAIN;
}
- ret = udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block);
+ ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block);
if (ret < 0)
return ret;
- if (vds[VDS_POS_LOGICAL_VOL_DESC].block) {
+ if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) {
ret = udf_load_logicalvol(sb,
- vds[VDS_POS_LOGICAL_VOL_DESC].block,
- fileset);
+ data.vds[VDS_POS_LOGICAL_VOL_DESC].block,
+ fileset);
if (ret < 0)
return ret;
}
- if (vds[VDS_POS_PARTITION_DESC].block) {
- /*
- * We rescan the whole descriptor sequence to find
- * partition descriptor blocks and process them.
- */
- for (block = vds[VDS_POS_PARTITION_DESC].block;
- block < vds[VDS_POS_TERMINATING_DESC].block;
- block++) {
- ret = udf_load_partdesc(sb, block);
+ /* Now handle prevailing Partition Descriptors */
+ for (i = 0; i < data.size_part_descs; i++) {
+ if (data.part_descs_loc[i].block) {
+ ret = udf_load_partdesc(sb,
+ data.part_descs_loc[i].block);
if (ret < 0)
return ret;
}
@@ -1760,13 +1758,13 @@ static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
main_e = main_e >> sb->s_blocksize_bits;
- main_e += main_s;
+ main_e += main_s - 1;
/* Locate the reserve sequence */
reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
reserve_e = reserve_e >> sb->s_blocksize_bits;
- reserve_e += reserve_s;
+ reserve_e += reserve_s - 1;
/* Process the main & reserve sequences */
/* responsible for finding the PartitionDesc(s) */
@@ -1994,7 +1992,10 @@ static void udf_open_lvid(struct super_block *sb)
lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
ktime_get_real_ts(&ts);
udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
- lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
+ if (le32_to_cpu(lvid->integrityType) == LVID_INTEGRITY_TYPE_CLOSE)
+ lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
+ else
+ UDF_SET_FLAG(sb, UDF_FLAG_INCONSISTENT);
lvid->descTag.descCRC = cpu_to_le16(
crc_itu_t(0, (char *)lvid + sizeof(struct tag),
@@ -2034,7 +2035,8 @@ static void udf_close_lvid(struct super_block *sb)
lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
- lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_INCONSISTENT))
+ lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
lvid->descTag.descCRC = cpu_to_le16(
crc_itu_t(0, (char *)lvid + sizeof(struct tag),
@@ -2091,11 +2093,13 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
bool lvid_open = false;
uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
- uopt.uid = INVALID_UID;
- uopt.gid = INVALID_GID;
+ /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
+ uopt.uid = make_kuid(current_user_ns(), overflowuid);
+ uopt.gid = make_kgid(current_user_ns(), overflowgid);
uopt.umask = 0;
uopt.fmode = UDF_INVALID_MODE;
uopt.dmode = UDF_INVALID_MODE;
+ uopt.nls_map = NULL;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
@@ -2276,8 +2280,8 @@ error_out:
iput(sbi->s_vat_inode);
parse_options_failure:
#ifdef CONFIG_UDF_NLS
- if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
- unload_nls(sbi->s_nls_map);
+ if (uopt.nls_map)
+ unload_nls(uopt.nls_map);
#endif
if (lvid_open)
udf_close_lvid(sb);
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 68c9f1d618f5..9dd3e1b9619e 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -23,14 +23,13 @@
#define UDF_FLAG_NLS_MAP 9
#define UDF_FLAG_UTF8 10
#define UDF_FLAG_UID_FORGET 11 /* save -1 for uid to disk */
-#define UDF_FLAG_UID_IGNORE 12 /* use sb uid instead of on disk uid */
-#define UDF_FLAG_GID_FORGET 13
-#define UDF_FLAG_GID_IGNORE 14
-#define UDF_FLAG_UID_SET 15
-#define UDF_FLAG_GID_SET 16
-#define UDF_FLAG_SESSION_SET 17
-#define UDF_FLAG_LASTBLOCK_SET 18
-#define UDF_FLAG_BLOCKSIZE_SET 19
+#define UDF_FLAG_GID_FORGET 12
+#define UDF_FLAG_UID_SET 13
+#define UDF_FLAG_GID_SET 14
+#define UDF_FLAG_SESSION_SET 15
+#define UDF_FLAG_LASTBLOCK_SET 16
+#define UDF_FLAG_BLOCKSIZE_SET 17
+#define UDF_FLAG_INCONSISTENT 18
#define UDF_PART_FLAG_UNALLOC_BITMAP 0x0001
#define UDF_PART_FLAG_UNALLOC_TABLE 0x0002
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index f5e0fe78979e..68e8a64d22e0 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -48,6 +48,8 @@ extern __printf(3, 4) void _udf_warn(struct super_block *sb,
#define UDF_EXTENT_LENGTH_MASK 0x3FFFFFFF
#define UDF_EXTENT_FLAG_MASK 0xC0000000
+#define UDF_INVALID_ID ((uint32_t)-1)
+
#define UDF_NAME_PAD 4
#define UDF_NAME_LEN 254
#define UDF_NAME_LEN_CS0 255