summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig.binfmt2
-rw-r--r--fs/afs/Makefile2
-rw-r--r--fs/afs/afs.h3
-rw-r--r--fs/afs/afs_vl.h1
-rw-r--r--fs/afs/callback.c345
-rw-r--r--fs/afs/cell.c10
-rw-r--r--fs/afs/cmservice.c67
-rw-r--r--fs/afs/dir.c1253
-rw-r--r--fs/afs/dir_silly.c190
-rw-r--r--fs/afs/dynroot.c93
-rw-r--r--fs/afs/file.c62
-rw-r--r--fs/afs/flock.c114
-rw-r--r--fs/afs/fs_operation.c239
-rw-r--r--fs/afs/fs_probe.c339
-rw-r--r--fs/afs/fsclient.c1305
-rw-r--r--fs/afs/inode.c493
-rw-r--r--fs/afs/internal.h523
-rw-r--r--fs/afs/main.c6
-rw-r--r--fs/afs/proc.c43
-rw-r--r--fs/afs/protocol_yfs.h2
-rw-r--r--fs/afs/rotate.c447
-rw-r--r--fs/afs/rxrpc.c51
-rw-r--r--fs/afs/security.c8
-rw-r--r--fs/afs/server.c299
-rw-r--r--fs/afs/server_list.c40
-rw-r--r--fs/afs/super.c107
-rw-r--r--fs/afs/vl_alias.c383
-rw-r--r--fs/afs/vl_rotate.c4
-rw-r--r--fs/afs/vlclient.c146
-rw-r--r--fs/afs/volume.c154
-rw-r--r--fs/afs/write.c149
-rw-r--r--fs/afs/xattr.c300
-rw-r--r--fs/afs/yfsclient.c916
-rw-r--r--fs/aio.c5
-rw-r--r--fs/bad_inode.c1
-rw-r--r--fs/binfmt_aout.c3
-rw-r--r--fs/binfmt_elf.c27
-rw-r--r--fs/binfmt_elf_fdpic.c40
-rw-r--r--fs/binfmt_em86.c19
-rw-r--r--fs/binfmt_flat.c28
-rw-r--r--fs/binfmt_misc.c73
-rw-r--r--fs/binfmt_script.c88
-rw-r--r--fs/btrfs/Kconfig1
-rw-r--r--fs/btrfs/backref.c837
-rw-r--r--fs/btrfs/backref.h297
-rw-r--r--fs/btrfs/block-group.c233
-rw-r--r--fs/btrfs/block-group.h14
-rw-r--r--fs/btrfs/block-rsv.c5
-rw-r--r--fs/btrfs/btrfs_inode.h44
-rw-r--r--fs/btrfs/compression.c36
-rw-r--r--fs/btrfs/compression.h2
-rw-r--r--fs/btrfs/ctree.c180
-rw-r--r--fs/btrfs/ctree.h121
-rw-r--r--fs/btrfs/disk-io.c93
-rw-r--r--fs/btrfs/disk-io.h4
-rw-r--r--fs/btrfs/export.c17
-rw-r--r--fs/btrfs/extent-io-tree.h1
-rw-r--r--fs/btrfs/extent-tree.c23
-rw-r--r--fs/btrfs/extent_io.c288
-rw-r--r--fs/btrfs/extent_io.h68
-rw-r--r--fs/btrfs/file-item.c62
-rw-r--r--fs/btrfs/file.c111
-rw-r--r--fs/btrfs/free-space-cache.c81
-rw-r--r--fs/btrfs/inode.c1323
-rw-r--r--fs/btrfs/ioctl.c102
-rw-r--r--fs/btrfs/locking.c1
-rw-r--r--fs/btrfs/misc.h54
-rw-r--r--fs/btrfs/props.c9
-rw-r--r--fs/btrfs/qgroup.c14
-rw-r--r--fs/btrfs/relocation.c1319
-rw-r--r--fs/btrfs/root-tree.c12
-rw-r--r--fs/btrfs/scrub.c59
-rw-r--r--fs/btrfs/send.c89
-rw-r--r--fs/btrfs/space-info.c81
-rw-r--r--fs/btrfs/space-info.h1
-rw-r--r--fs/btrfs/struct-funcs.c223
-rw-r--r--fs/btrfs/super.c38
-rw-r--r--fs/btrfs/transaction.c78
-rw-r--r--fs/btrfs/transaction.h3
-rw-r--r--fs/btrfs/tree-checker.c4
-rw-r--r--fs/btrfs/tree-defrag.c2
-rw-r--r--fs/btrfs/tree-log.c192
-rw-r--r--fs/btrfs/uuid-tree.c6
-rw-r--r--fs/btrfs/volumes.c80
-rw-r--r--fs/ceph/Makefile2
-rw-r--r--fs/ceph/acl.c2
-rw-r--r--fs/ceph/addr.c20
-rw-r--r--fs/ceph/caps.c425
-rw-r--r--fs/ceph/debugfs.c100
-rw-r--r--fs/ceph/dir.c26
-rw-r--r--fs/ceph/export.c9
-rw-r--r--fs/ceph/file.c30
-rw-r--r--fs/ceph/inode.c4
-rw-r--r--fs/ceph/mds_client.c48
-rw-r--r--fs/ceph/mds_client.h15
-rw-r--r--fs/ceph/metric.c148
-rw-r--r--fs/ceph/metric.h62
-rw-r--r--fs/ceph/quota.c62
-rw-r--r--fs/ceph/super.h34
-rw-r--r--fs/ceph/xattr.c4
-rw-r--r--fs/cifs/cifs_debug.c6
-rw-r--r--fs/cifs/cifs_debug.h145
-rw-r--r--fs/cifs/cifsencrypt.c8
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h20
-rw-r--r--fs/cifs/cifsproto.h36
-rw-r--r--fs/cifs/cifsroot.c6
-rw-r--r--fs/cifs/cifssmb.c81
-rw-r--r--fs/cifs/connect.c140
-rw-r--r--fs/cifs/dfs_cache.c14
-rw-r--r--fs/cifs/file.c70
-rw-r--r--fs/cifs/inode.c18
-rw-r--r--fs/cifs/link.c8
-rw-r--r--fs/cifs/misc.c60
-rw-r--r--fs/cifs/netmisc.c6
-rw-r--r--fs/cifs/readdir.c10
-rw-r--r--fs/cifs/sess.c55
-rw-r--r--fs/cifs/smb1ops.c2
-rw-r--r--fs/cifs/smb2inode.c137
-rw-r--r--fs/cifs/smb2misc.c20
-rw-r--r--fs/cifs/smb2ops.c174
-rw-r--r--fs/cifs/smb2pdu.c499
-rw-r--r--fs/cifs/smb2pdu.h13
-rw-r--r--fs/cifs/smb2proto.h25
-rw-r--r--fs/cifs/smbdirect.c165
-rw-r--r--fs/cifs/transport.c83
-rw-r--r--fs/compat_binfmt_elf.c5
-rw-r--r--fs/coredump.c8
-rw-r--r--fs/dcache.c21
-rw-r--r--fs/debugfs/internal.h2
-rw-r--r--fs/direct-io.c19
-rw-r--r--fs/dlm/dlm_internal.h7
-rw-r--r--fs/dlm/lockspace.c18
-rw-r--r--fs/dlm/lowcomms.c177
-rw-r--r--fs/dlm/rcom.c2
-rw-r--r--fs/dlm/user.c2
-rw-r--r--fs/drop_caches.c2
-rw-r--r--fs/erofs/data.c4
-rw-r--r--fs/erofs/inode.c6
-rw-r--r--fs/erofs/internal.h27
-rw-r--r--fs/erofs/namei.c2
-rw-r--r--fs/erofs/super.c255
-rw-r--r--fs/erofs/xattr.c4
-rw-r--r--fs/erofs/xattr.h7
-rw-r--r--fs/erofs/zdata.c4
-rw-r--r--fs/eventfd.c64
-rw-r--r--fs/exec.c397
-rw-r--r--fs/exfat/Kconfig7
-rw-r--r--fs/exfat/balloc.c8
-rw-r--r--fs/exfat/dir.c222
-rw-r--r--fs/exfat/exfat_fs.h48
-rw-r--r--fs/exfat/exfat_raw.h85
-rw-r--r--fs/exfat/fatent.c17
-rw-r--r--fs/exfat/file.c25
-rw-r--r--fs/exfat/inode.c57
-rw-r--r--fs/exfat/misc.c46
-rw-r--r--fs/exfat/namei.c63
-rw-r--r--fs/exfat/nls.c52
-rw-r--r--fs/exfat/super.c262
-rw-r--r--fs/ext2/file.c4
-rw-r--r--fs/ext2/inode.c1
-rw-r--r--fs/ext2/namei.c6
-rw-r--r--fs/ext2/symlink.c4
-rw-r--r--fs/ext2/xattr.h1
-rw-r--r--fs/ext4/Kconfig6
-rw-r--r--fs/ext4/acl.c5
-rw-r--r--fs/ext4/balloc.c5
-rw-r--r--fs/ext4/ext4.h40
-rw-r--r--fs/ext4/ext4_extents.h9
-rw-r--r--fs/ext4/ext4_jbd2.h11
-rw-r--r--fs/ext4/extents.c444
-rw-r--r--fs/ext4/extents_status.c2
-rw-r--r--fs/ext4/file.c17
-rw-r--r--fs/ext4/fsync.c28
-rw-r--r--fs/ext4/ialloc.c1
-rw-r--r--fs/ext4/indirect.c4
-rw-r--r--fs/ext4/inline.c6
-rw-r--r--fs/ext4/inode.c152
-rw-r--r--fs/ext4/ioctl.c8
-rw-r--r--fs/ext4/mballoc.c512
-rw-r--r--fs/ext4/mballoc.h16
-rw-r--r--fs/ext4/migrate.c12
-rw-r--r--fs/ext4/namei.c76
-rw-r--r--fs/ext4/super.c33
-rw-r--r--fs/ext4/xattr.c13
-rw-r--r--fs/f2fs/Kconfig10
-rw-r--r--fs/f2fs/acl.h2
-rw-r--r--fs/f2fs/checkpoint.c37
-rw-r--r--fs/f2fs/compress.c182
-rw-r--r--fs/f2fs/data.c166
-rw-r--r--fs/f2fs/dir.c374
-rw-r--r--fs/f2fs/f2fs.h171
-rw-r--r--fs/f2fs/file.c401
-rw-r--r--fs/f2fs/gc.c125
-rw-r--r--fs/f2fs/gc.h2
-rw-r--r--fs/f2fs/hash.c76
-rw-r--r--fs/f2fs/inline.c50
-rw-r--r--fs/f2fs/namei.c19
-rw-r--r--fs/f2fs/node.c101
-rw-r--r--fs/f2fs/node.h5
-rw-r--r--fs/f2fs/recovery.c51
-rw-r--r--fs/f2fs/segment.c40
-rw-r--r--fs/f2fs/segment.h2
-rw-r--r--fs/f2fs/super.c88
-rw-r--r--fs/f2fs/sysfs.c97
-rw-r--r--fs/f2fs/trace.h2
-rw-r--r--fs/f2fs/xattr.h8
-rw-r--r--fs/fat/fatent.c103
-rw-r--r--fs/fat/inode.c6
-rw-r--r--fs/file_table.c4
-rw-r--r--fs/fs-writeback.c1
-rw-r--r--fs/fscache/main.c3
-rw-r--r--fs/fuse/dev.c18
-rw-r--r--fs/fuse/dir.c12
-rw-r--r--fs/fuse/file.c120
-rw-r--r--fs/fuse/fuse_i.h3
-rw-r--r--fs/fuse/inode.c26
-rw-r--r--fs/fuse/virtio_fs.c115
-rw-r--r--fs/gfs2/export.c4
-rw-r--r--fs/gfs2/glock.c208
-rw-r--r--fs/gfs2/glock.h16
-rw-r--r--fs/gfs2/glops.c21
-rw-r--r--fs/gfs2/incore.h9
-rw-r--r--fs/gfs2/inode.c48
-rw-r--r--fs/gfs2/inode.h2
-rw-r--r--fs/gfs2/log.c56
-rw-r--r--fs/gfs2/main.c9
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/gfs2/super.c72
-rw-r--r--fs/gfs2/trans.c21
-rw-r--r--fs/gfs2/trans.h1
-rw-r--r--fs/gfs2/util.c1
-rw-r--r--fs/gfs2/util.h1
-rw-r--r--fs/hpfs/buffer.c2
-rw-r--r--fs/hpfs/file.c1
-rw-r--r--fs/hugetlbfs/inode.c69
-rw-r--r--fs/inode.c114
-rw-r--r--fs/internal.h2
-rw-r--r--fs/io-wq.c15
-rw-r--r--fs/io_uring.c13
-rw-r--r--fs/ioctl.c82
-rw-r--r--fs/iomap/direct-io.c17
-rw-r--r--fs/iomap/fiemap.c11
-rw-r--r--fs/jbd2/transaction.c14
-rw-r--r--fs/jfs/jfs_dtree.c2
-rw-r--r--fs/jfs/jfs_xattr.h4
-rw-r--r--fs/kernfs/file.c6
-rw-r--r--fs/locks.c7
-rw-r--r--fs/namei.c46
-rw-r--r--fs/namespace.c36
-rw-r--r--fs/nfs/direct.c4
-rw-r--r--fs/nfs/dns_resolve.c1
-rw-r--r--fs/nfs/inode.c14
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfs/nfstrace.h106
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/read.c2
-rw-r--r--fs/nfs/sysfs.h2
-rw-r--r--fs/nfsd/cache.h2
-rw-r--r--fs/nfsd/netns.h1
-rw-r--r--fs/nfsd/nfs4callback.c39
-rw-r--r--fs/nfsd/nfs4proc.c9
-rw-r--r--fs/nfsd/nfs4state.c166
-rw-r--r--fs/nfsd/nfscache.c89
-rw-r--r--fs/nfsd/nfsctl.c32
-rw-r--r--fs/nfsd/nfsd.h2
-rw-r--r--fs/nfsd/nfssvc.c6
-rw-r--r--fs/nfsd/state.h7
-rw-r--r--fs/nfsd/trace.h345
-rw-r--r--fs/nilfs2/inode.c3
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/notify/fanotify/fanotify.c9
-rw-r--r--fs/notify/fanotify/fanotify.h2
-rw-r--r--fs/notify/fanotify/fanotify_user.c8
-rw-r--r--fs/notify/fdinfo.c1
-rw-r--r--fs/notify/group.c1
-rw-r--r--fs/notify/inotify/inotify_user.c4
-rw-r--r--fs/notify/mark.c6
-rw-r--r--fs/nsfs.c5
-rw-r--r--fs/ocfs2/cluster/tcp.c42
-rw-r--r--fs/ocfs2/extent_map.c4
-rw-r--r--fs/ocfs2/mmap.c2
-rw-r--r--fs/orangefs/orangefs-bufmap.c9
-rw-r--r--fs/orangefs/orangefs-mod.c2
-rw-r--r--fs/overlayfs/copy_up.c9
-rw-r--r--fs/overlayfs/dir.c51
-rw-r--r--fs/overlayfs/export.c24
-rw-r--r--fs/overlayfs/file.c28
-rw-r--r--fs/overlayfs/inode.c22
-rw-r--r--fs/overlayfs/namei.c138
-rw-r--r--fs/overlayfs/overlayfs.h11
-rw-r--r--fs/overlayfs/ovl_entry.h10
-rw-r--r--fs/overlayfs/readdir.c57
-rw-r--r--fs/overlayfs/super.c243
-rw-r--r--fs/overlayfs/util.c36
-rw-r--r--fs/pipe.c96
-rw-r--r--fs/posix_acl.c2
-rw-r--r--fs/proc/array.c11
-rw-r--r--fs/proc/base.c111
-rw-r--r--fs/proc/generic.c9
-rw-r--r--fs/proc/inode.c30
-rw-r--r--fs/proc/meminfo.c1
-rw-r--r--fs/proc/nommu.c1
-rw-r--r--fs/proc/proc_net.c19
-rw-r--r--fs/proc/proc_sysctl.c196
-rw-r--r--fs/proc/root.c133
-rw-r--r--fs/proc/self.c8
-rw-r--r--fs/proc/task_mmu.c34
-rw-r--r--fs/proc/task_nommu.c18
-rw-r--r--fs/proc/thread_self.c8
-rw-r--r--fs/proc/vmcore.c1
-rw-r--r--fs/proc_namespace.c14
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/select.c112
-rw-r--r--fs/seq_file.c7
-rw-r--r--fs/splice.c81
-rw-r--r--fs/stat.c3
-rw-r--r--fs/super.c2
-rw-r--r--fs/sync.c3
-rw-r--r--fs/sysfs/file.c1
-rw-r--r--fs/userfaultfd.c46
-rw-r--r--fs/xfs/Makefile5
-rw-r--r--fs/xfs/kmem.h8
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.h2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h2
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.h2
-rw-r--r--fs/xfs/libxfs/xfs_attr.c16
-rw-r--r--fs/xfs/libxfs/xfs_attr.h2
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c59
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h2
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.h2
-rw-r--r--fs/xfs/libxfs/xfs_attr_sf.h2
-rw-r--r--fs/xfs/libxfs/xfs_bit.h2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c310
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h2
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c5
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.h2
-rw-r--r--fs/xfs/libxfs/xfs_btree.h2
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.h2
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h2
-rw-r--r--fs/xfs/libxfs/xfs_defer.c162
-rw-r--r--fs/xfs/libxfs/xfs_defer.h26
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c8
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c13
-rw-r--r--fs/xfs/libxfs/xfs_errortag.h6
-rw-r--r--fs/xfs/libxfs/xfs_format.h9
-rw-r--r--fs/xfs/libxfs/xfs_fs.h2
-rw-r--r--fs/xfs/libxfs/xfs_health.h2
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c186
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.h10
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c320
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.h68
-rw-r--r--fs/xfs/libxfs/xfs_log_recover.h83
-rw-r--r--fs/xfs/libxfs/xfs_quota_defs.h1
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c2
-rw-r--r--fs/xfs/libxfs/xfs_sb.c2
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c16
-rw-r--r--fs/xfs/libxfs/xfs_trans_inode.c2
-rw-r--r--fs/xfs/scrub/bmap.c40
-rw-r--r--fs/xfs/scrub/dabtree.c2
-rw-r--r--fs/xfs/scrub/dir.c7
-rw-r--r--fs/xfs/scrub/ialloc.c3
-rw-r--r--fs/xfs/scrub/parent.c2
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/xfs/xfs_attr_inactive.c9
-rw-r--r--fs/xfs/xfs_attr_list.c4
-rw-r--r--fs/xfs/xfs_bmap_item.c237
-rw-r--r--fs/xfs/xfs_bmap_item.h11
-rw-r--r--fs/xfs/xfs_bmap_util.c79
-rw-r--r--fs/xfs/xfs_buf.c70
-rw-r--r--fs/xfs/xfs_buf.h2
-rw-r--r--fs/xfs/xfs_buf_item.c106
-rw-r--r--fs/xfs/xfs_buf_item.h2
-rw-r--r--fs/xfs/xfs_buf_item_recover.c984
-rw-r--r--fs/xfs/xfs_dir2_readdir.c2
-rw-r--r--fs/xfs/xfs_dquot.c118
-rw-r--r--fs/xfs/xfs_dquot.h2
-rw-r--r--fs/xfs/xfs_dquot_item.c17
-rw-r--r--fs/xfs/xfs_dquot_item_recover.c201
-rw-r--r--fs/xfs/xfs_error.c3
-rw-r--r--fs/xfs/xfs_extfree_item.c216
-rw-r--r--fs/xfs/xfs_extfree_item.h25
-rw-r--r--fs/xfs/xfs_file.c4
-rw-r--r--fs/xfs/xfs_fsops.c5
-rw-r--r--fs/xfs/xfs_icache.c345
-rw-r--r--fs/xfs/xfs_icache.h51
-rw-r--r--fs/xfs/xfs_icreate_item.c152
-rw-r--r--fs/xfs/xfs_inode.c277
-rw-r--r--fs/xfs/xfs_inode.h9
-rw-r--r--fs/xfs/xfs_inode_item.c54
-rw-r--r--fs/xfs/xfs_inode_item.h6
-rw-r--r--fs/xfs/xfs_inode_item_recover.c394
-rw-r--r--fs/xfs/xfs_ioctl.c208
-rw-r--r--fs/xfs/xfs_iomap.c113
-rw-r--r--fs/xfs/xfs_iops.c84
-rw-r--r--fs/xfs/xfs_itable.c6
-rw-r--r--fs/xfs/xfs_log_recover.c2561
-rw-r--r--fs/xfs/xfs_message.c22
-rw-r--r--fs/xfs/xfs_message.h24
-rw-r--r--fs/xfs/xfs_mount.c40
-rw-r--r--fs/xfs/xfs_mount.h157
-rw-r--r--fs/xfs/xfs_pnfs.c5
-rw-r--r--fs/xfs/xfs_qm.c66
-rw-r--r--fs/xfs/xfs_qm.h78
-rw-r--r--fs/xfs/xfs_qm_syscalls.c83
-rw-r--r--fs/xfs/xfs_quotaops.c30
-rw-r--r--fs/xfs/xfs_refcount_item.c252
-rw-r--r--fs/xfs/xfs_refcount_item.h11
-rw-r--r--fs/xfs/xfs_rmap_item.c229
-rw-r--r--fs/xfs/xfs_rmap_item.h13
-rw-r--r--fs/xfs/xfs_super.c70
-rw-r--r--fs/xfs/xfs_symlink.c10
-rw-r--r--fs/xfs/xfs_sysctl.c4
-rw-r--r--fs/xfs/xfs_trace.h4
-rw-r--r--fs/xfs/xfs_trans.c203
-rw-r--r--fs/xfs/xfs_trans.h6
-rw-r--r--fs/xfs/xfs_trans_ail.c79
-rw-r--r--fs/xfs/xfs_trans_dquot.c23
-rw-r--r--fs/xfs/xfs_trans_priv.h21
-rw-r--r--fs/xfs/xfs_xattr.c1
-rw-r--r--fs/zonefs/super.c2
428 files changed, 19567 insertions, 16204 deletions
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 04f86b8c100e..8cd471da3255 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -45,7 +45,7 @@ config ARCH_USE_GNU_PROPERTY
config BINFMT_ELF_FDPIC
bool "Kernel support for FDPIC ELF binaries"
default y if !BINFMT_ELF
- depends on (ARM || (SUPERH32 && !MMU) || C6X)
+ depends on (ARM || (SUPERH && !MMU) || C6X)
select ELFCORE
help
ELF FDPIC binaries are based on ELF, but allow the individual load
diff --git a/fs/afs/Makefile b/fs/afs/Makefile
index 10359bea7070..75c4e4043d1d 100644
--- a/fs/afs/Makefile
+++ b/fs/afs/Makefile
@@ -18,6 +18,7 @@ kafs-y := \
file.o \
flock.o \
fsclient.o \
+ fs_operation.o \
fs_probe.o \
inode.o \
main.o \
@@ -30,6 +31,7 @@ kafs-y := \
server_list.o \
super.o \
vlclient.o \
+ vl_alias.o \
vl_list.o \
vl_probe.o \
vl_rotate.o \
diff --git a/fs/afs/afs.h b/fs/afs/afs.h
index b6d49d646ade..432cb4b23961 100644
--- a/fs/afs/afs.h
+++ b/fs/afs/afs.h
@@ -10,7 +10,7 @@
#include <linux/in.h>
-#define AFS_MAXCELLNAME 64 /* Maximum length of a cell name */
+#define AFS_MAXCELLNAME 256 /* Maximum length of a cell name */
#define AFS_MAXVOLNAME 64 /* Maximum length of a volume name */
#define AFS_MAXNSERVERS 8 /* Maximum servers in a basic volume record */
#define AFS_NMAXNSERVERS 13 /* Maximum servers in a N/U-class volume record */
@@ -146,7 +146,6 @@ struct afs_file_status {
struct afs_status_cb {
struct afs_file_status status;
struct afs_callback callback;
- unsigned int cb_break; /* Pre-op callback break counter */
bool have_status; /* True if status record was retrieved */
bool have_cb; /* True if cb record was retrieved */
bool have_error; /* True if status.abort_code indicates an error */
diff --git a/fs/afs/afs_vl.h b/fs/afs/afs_vl.h
index e9b8029920ec..9c65ffb8a523 100644
--- a/fs/afs/afs_vl.h
+++ b/fs/afs/afs_vl.h
@@ -22,6 +22,7 @@ enum AFSVL_Operations {
VLGETENTRYBYNAMEU = 527, /* AFS Get VLDB entry by name (UUID-variant) */
VLGETADDRSU = 533, /* AFS Get addrs for fileserver */
YVLGETENDPOINTS = 64002, /* YFS Get endpoints for file/volume server */
+ YVLGETCELLNAME = 64014, /* YFS Get actual cell name */
VLGETCAPABILITIES = 65537, /* AFS Get server capabilities */
};
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 2dca8df1a18d..7d9b23d981bf 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -21,192 +21,17 @@
#include "internal.h"
/*
- * Create volume and callback interests on a server.
- */
-static struct afs_cb_interest *afs_create_interest(struct afs_server *server,
- struct afs_vnode *vnode)
-{
- struct afs_vol_interest *new_vi, *vi;
- struct afs_cb_interest *new;
- struct hlist_node **pp;
-
- new_vi = kzalloc(sizeof(struct afs_vol_interest), GFP_KERNEL);
- if (!new_vi)
- return NULL;
-
- new = kzalloc(sizeof(struct afs_cb_interest), GFP_KERNEL);
- if (!new) {
- kfree(new_vi);
- return NULL;
- }
-
- new_vi->usage = 1;
- new_vi->vid = vnode->volume->vid;
- INIT_HLIST_NODE(&new_vi->srv_link);
- INIT_HLIST_HEAD(&new_vi->cb_interests);
-
- refcount_set(&new->usage, 1);
- new->sb = vnode->vfs_inode.i_sb;
- new->vid = vnode->volume->vid;
- new->server = afs_get_server(server, afs_server_trace_get_new_cbi);
- INIT_HLIST_NODE(&new->cb_vlink);
-
- write_lock(&server->cb_break_lock);
-
- for (pp = &server->cb_volumes.first; *pp; pp = &(*pp)->next) {
- vi = hlist_entry(*pp, struct afs_vol_interest, srv_link);
- if (vi->vid < new_vi->vid)
- continue;
- if (vi->vid > new_vi->vid)
- break;
- vi->usage++;
- goto found_vi;
- }
-
- new_vi->srv_link.pprev = pp;
- new_vi->srv_link.next = *pp;
- if (*pp)
- (*pp)->pprev = &new_vi->srv_link.next;
- *pp = &new_vi->srv_link;
- vi = new_vi;
- new_vi = NULL;
-found_vi:
-
- new->vol_interest = vi;
- hlist_add_head(&new->cb_vlink, &vi->cb_interests);
-
- write_unlock(&server->cb_break_lock);
- kfree(new_vi);
- return new;
-}
-
-/*
- * Set up an interest-in-callbacks record for a volume on a server and
- * register it with the server.
- * - Called with vnode->io_lock held.
- */
-int afs_register_server_cb_interest(struct afs_vnode *vnode,
- struct afs_server_list *slist,
- unsigned int index)
-{
- struct afs_server_entry *entry = &slist->servers[index];
- struct afs_cb_interest *cbi, *vcbi, *new, *old;
- struct afs_server *server = entry->server;
-
-again:
- vcbi = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->io_lock));
- if (vcbi && likely(vcbi == entry->cb_interest))
- return 0;
-
- read_lock(&slist->lock);
- cbi = afs_get_cb_interest(entry->cb_interest);
- read_unlock(&slist->lock);
-
- if (vcbi) {
- if (vcbi == cbi) {
- afs_put_cb_interest(afs_v2net(vnode), cbi);
- return 0;
- }
-
- /* Use a new interest in the server list for the same server
- * rather than an old one that's still attached to a vnode.
- */
- if (cbi && vcbi->server == cbi->server) {
- write_seqlock(&vnode->cb_lock);
- old = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->cb_lock.lock));
- rcu_assign_pointer(vnode->cb_interest, cbi);
- write_sequnlock(&vnode->cb_lock);
- afs_put_cb_interest(afs_v2net(vnode), old);
- return 0;
- }
-
- /* Re-use the one attached to the vnode. */
- if (!cbi && vcbi->server == server) {
- write_lock(&slist->lock);
- if (entry->cb_interest) {
- write_unlock(&slist->lock);
- afs_put_cb_interest(afs_v2net(vnode), cbi);
- goto again;
- }
-
- entry->cb_interest = cbi;
- write_unlock(&slist->lock);
- return 0;
- }
- }
-
- if (!cbi) {
- new = afs_create_interest(server, vnode);
- if (!new)
- return -ENOMEM;
-
- write_lock(&slist->lock);
- if (!entry->cb_interest) {
- entry->cb_interest = afs_get_cb_interest(new);
- cbi = new;
- new = NULL;
- } else {
- cbi = afs_get_cb_interest(entry->cb_interest);
- }
- write_unlock(&slist->lock);
- afs_put_cb_interest(afs_v2net(vnode), new);
- }
-
- ASSERT(cbi);
-
- /* Change the server the vnode is using. This entails scrubbing any
- * interest the vnode had in the previous server it was using.
- */
- write_seqlock(&vnode->cb_lock);
-
- old = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->cb_lock.lock));
- rcu_assign_pointer(vnode->cb_interest, cbi);
- vnode->cb_s_break = cbi->server->cb_s_break;
- vnode->cb_v_break = vnode->volume->cb_v_break;
- clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
-
- write_sequnlock(&vnode->cb_lock);
- afs_put_cb_interest(afs_v2net(vnode), old);
- return 0;
-}
-
-/*
- * Remove an interest on a server.
- */
-void afs_put_cb_interest(struct afs_net *net, struct afs_cb_interest *cbi)
-{
- struct afs_vol_interest *vi;
-
- if (cbi && refcount_dec_and_test(&cbi->usage)) {
- if (!hlist_unhashed(&cbi->cb_vlink)) {
- write_lock(&cbi->server->cb_break_lock);
-
- hlist_del_init(&cbi->cb_vlink);
- vi = cbi->vol_interest;
- cbi->vol_interest = NULL;
- if (--vi->usage == 0)
- hlist_del(&vi->srv_link);
- else
- vi = NULL;
-
- write_unlock(&cbi->server->cb_break_lock);
- if (vi)
- kfree_rcu(vi, rcu);
- afs_put_server(net, cbi->server, afs_server_trace_put_cbi);
- }
- kfree_rcu(cbi, rcu);
- }
-}
-
-/*
- * allow the fileserver to request callback state (re-)initialisation
+ * Allow the fileserver to request callback state (re-)initialisation.
+ * Unfortunately, UUIDs are not guaranteed unique.
*/
void afs_init_callback_state(struct afs_server *server)
{
- server->cb_s_break++;
+ rcu_read_lock();
+ do {
+ server->cb_s_break++;
+ server = rcu_dereference(server->uuid_next);
+ } while (0);
+ rcu_read_unlock();
}
/*
@@ -238,69 +63,109 @@ void afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reason
}
/*
+ * Look up a volume by volume ID under RCU conditions.
+ */
+static struct afs_volume *afs_lookup_volume_rcu(struct afs_cell *cell,
+ afs_volid_t vid)
+{
+ struct afs_volume *volume = NULL;
+ struct rb_node *p;
+ int seq = 0;
+
+ do {
+ /* Unfortunately, rbtree walking doesn't give reliable results
+ * under just the RCU read lock, so we have to check for
+ * changes.
+ */
+ read_seqbegin_or_lock(&cell->volume_lock, &seq);
+
+ p = rcu_dereference_raw(cell->volumes.rb_node);
+ while (p) {
+ volume = rb_entry(p, struct afs_volume, cell_node);
+
+ if (volume->vid < vid)
+ p = rcu_dereference_raw(p->rb_left);
+ else if (volume->vid > vid)
+ p = rcu_dereference_raw(p->rb_right);
+ else
+ break;
+ volume = NULL;
+ }
+
+ } while (need_seqretry(&cell->volume_lock, seq));
+
+ done_seqretry(&cell->volume_lock, seq);
+ return volume;
+}
+
+/*
* allow the fileserver to explicitly break one callback
* - happens when
* - the backing file is changed
* - a lock is released
*/
-static void afs_break_one_callback(struct afs_server *server,
+static void afs_break_one_callback(struct afs_volume *volume,
struct afs_fid *fid)
{
- struct afs_vol_interest *vi;
- struct afs_cb_interest *cbi;
- struct afs_iget_data data;
+ struct super_block *sb;
struct afs_vnode *vnode;
struct inode *inode;
- read_lock(&server->cb_break_lock);
- hlist_for_each_entry(vi, &server->cb_volumes, srv_link) {
- if (vi->vid < fid->vid)
- continue;
- if (vi->vid > fid->vid) {
- vi = NULL;
- break;
- }
- //atomic_inc(&vi->usage);
- break;
+ if (fid->vnode == 0 && fid->unique == 0) {
+ /* The callback break applies to an entire volume. */
+ write_lock(&volume->cb_v_break_lock);
+ volume->cb_v_break++;
+ trace_afs_cb_break(fid, volume->cb_v_break,
+ afs_cb_break_for_volume_callback, false);
+ write_unlock(&volume->cb_v_break_lock);
+ return;
}
- /* TODO: Find all matching volumes if we couldn't match the server and
- * break them anyway.
+ /* See if we can find a matching inode - even an I_NEW inode needs to
+ * be marked as it can have its callback broken before we finish
+ * setting up the local inode.
*/
- if (!vi)
- goto out;
+ sb = rcu_dereference(volume->sb);
+ if (!sb)
+ return;
+
+ inode = find_inode_rcu(sb, fid->vnode, afs_ilookup5_test_by_fid, fid);
+ if (inode) {
+ vnode = AFS_FS_I(inode);
+ afs_break_callback(vnode, afs_cb_break_for_callback);
+ } else {
+ trace_afs_cb_miss(fid, afs_cb_break_for_callback);
+ }
+}
+
+static void afs_break_some_callbacks(struct afs_server *server,
+ struct afs_callback_break *cbb,
+ size_t *_count)
+{
+ struct afs_callback_break *residue = cbb;
+ struct afs_volume *volume;
+ afs_volid_t vid = cbb->fid.vid;
+ size_t i;
- /* Step through all interested superblocks. There may be more than one
- * because of cell aliasing.
+ volume = afs_lookup_volume_rcu(server->cell, vid);
+
+ /* TODO: Find all matching volumes if we couldn't match the server and
+ * break them anyway.
*/
- hlist_for_each_entry(cbi, &vi->cb_interests, cb_vlink) {
- if (fid->vnode == 0 && fid->unique == 0) {
- /* The callback break applies to an entire volume. */
- struct afs_super_info *as = AFS_FS_S(cbi->sb);
- struct afs_volume *volume = as->volume;
- write_lock(&volume->cb_v_break_lock);
- volume->cb_v_break++;
- trace_afs_cb_break(fid, volume->cb_v_break,
- afs_cb_break_for_volume_callback, false);
- write_unlock(&volume->cb_v_break_lock);
+ for (i = *_count; i > 0; cbb++, i--) {
+ if (cbb->fid.vid == vid) {
+ _debug("- Fid { vl=%08llx n=%llu u=%u }",
+ cbb->fid.vid,
+ cbb->fid.vnode,
+ cbb->fid.unique);
+ --*_count;
+ if (volume)
+ afs_break_one_callback(volume, &cbb->fid);
} else {
- data.volume = NULL;
- data.fid = *fid;
- inode = ilookup5_nowait(cbi->sb, fid->vnode,
- afs_iget5_test, &data);
- if (inode) {
- vnode = AFS_FS_I(inode);
- afs_break_callback(vnode, afs_cb_break_for_callback);
- iput(inode);
- } else {
- trace_afs_cb_miss(fid, afs_cb_break_for_callback);
- }
+ *residue++ = *cbb;
}
}
-
-out:
- read_unlock(&server->cb_break_lock);
}
/*
@@ -313,29 +178,11 @@ void afs_break_callbacks(struct afs_server *server, size_t count,
ASSERT(server != NULL);
- /* TODO: Sort the callback break list by volume ID */
+ rcu_read_lock();
- for (; count > 0; callbacks++, count--) {
- _debug("- Fid { vl=%08llx n=%llu u=%u }",
- callbacks->fid.vid,
- callbacks->fid.vnode,
- callbacks->fid.unique);
- afs_break_one_callback(server, &callbacks->fid);
- }
+ while (count > 0)
+ afs_break_some_callbacks(server, callbacks, &count);
- _leave("");
+ rcu_read_unlock();
return;
}
-
-/*
- * Clear the callback interests in a server list.
- */
-void afs_clear_callback_interests(struct afs_net *net, struct afs_server_list *slist)
-{
- int i;
-
- for (i = 0; i < slist->nr_servers; i++) {
- afs_put_cb_interest(net, slist->servers[i].cb_interest);
- slist->servers[i].cb_interest = NULL;
- }
-}
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 78ba5f932287..005921e3b38d 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -161,9 +161,13 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
atomic_set(&cell->usage, 2);
INIT_WORK(&cell->manager, afs_manage_cell);
- INIT_LIST_HEAD(&cell->proc_volumes);
- rwlock_init(&cell->proc_lock);
+ cell->volumes = RB_ROOT;
+ INIT_HLIST_HEAD(&cell->proc_volumes);
+ seqlock_init(&cell->volume_lock);
+ cell->fs_servers = RB_ROOT;
+ seqlock_init(&cell->fs_lock);
rwlock_init(&cell->vl_servers_lock);
+ cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
/* Provide a VL server list, filling it in if we were given a list of
* addresses to use.
@@ -481,7 +485,9 @@ static void afs_cell_destroy(struct rcu_head *rcu)
ASSERTCMP(atomic_read(&cell->usage), ==, 0);
+ afs_put_volume(cell->net, cell->root_volume, afs_volume_trace_put_cell_root);
afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
+ afs_put_cell(cell->net, cell->alias_of);
key_put(cell->anonymous_key);
kfree(cell);
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 380ad5ace7cf..bef413818af7 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -118,8 +118,6 @@ bool afs_cm_incoming_call(struct afs_call *call)
{
_enter("{%u, CB.OP %u}", call->service_id, call->operation_ID);
- call->epoch = rxrpc_kernel_get_epoch(call->net->socket, call->rxcall);
-
switch (call->operation_ID) {
case CBCallBack:
call->type = &afs_SRXCBCallBack;
@@ -150,49 +148,6 @@ bool afs_cm_incoming_call(struct afs_call *call)
}
/*
- * Record a probe to the cache manager from a server.
- */
-static int afs_record_cm_probe(struct afs_call *call, struct afs_server *server)
-{
- _enter("");
-
- if (test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags) &&
- !test_bit(AFS_SERVER_FL_PROBING, &server->flags)) {
- if (server->cm_epoch == call->epoch)
- return 0;
-
- if (!server->probe.said_rebooted) {
- pr_notice("kAFS: FS rebooted %pU\n", &server->uuid);
- server->probe.said_rebooted = true;
- }
- }
-
- spin_lock(&server->probe_lock);
-
- if (!test_and_set_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) {
- server->cm_epoch = call->epoch;
- server->probe.cm_epoch = call->epoch;
- goto out;
- }
-
- if (server->probe.cm_probed &&
- call->epoch != server->probe.cm_epoch &&
- !server->probe.said_inconsistent) {
- pr_notice("kAFS: FS endpoints inconsistent %pU\n",
- &server->uuid);
- server->probe.said_inconsistent = true;
- }
-
- if (!server->probe.cm_probed || call->epoch == server->cm_epoch)
- server->probe.cm_epoch = server->cm_epoch;
-
-out:
- server->probe.cm_probed = true;
- spin_unlock(&server->probe_lock);
- return 0;
-}
-
-/*
* Find the server record by peer address and record a probe to the cache
* manager from a server.
*/
@@ -210,7 +165,7 @@ static int afs_find_cm_server_by_peer(struct afs_call *call)
}
call->server = server;
- return afs_record_cm_probe(call, server);
+ return 0;
}
/*
@@ -231,7 +186,7 @@ static int afs_find_cm_server_by_uuid(struct afs_call *call,
}
call->server = server;
- return afs_record_cm_probe(call, server);
+ return 0;
}
/*
@@ -268,7 +223,9 @@ static void SRXAFSCB_CallBack(struct work_struct *work)
* to maintain cache coherency.
*/
if (call->server) {
- trace_afs_server(call->server, atomic_read(&call->server->usage),
+ trace_afs_server(call->server,
+ atomic_read(&call->server->ref),
+ atomic_read(&call->server->active),
afs_server_trace_callback);
afs_break_callbacks(call->server, call->count, call->request);
}
@@ -305,8 +262,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("FID count: %u", call->count);
if (call->count > AFSCBMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_cb_fid_count);
+ return afs_protocol_error(call, afs_eproto_cb_fid_count);
call->buffer = kmalloc(array3_size(call->count, 3, 4),
GFP_KERNEL);
@@ -351,8 +307,7 @@ static int afs_deliver_cb_callback(struct afs_call *call)
call->count2 = ntohl(call->tmp);
_debug("CB count: %u", call->count2);
if (call->count2 != call->count && call->count2 != 0)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_cb_count);
+ return afs_protocol_error(call, afs_eproto_cb_count);
call->iter = &call->def_iter;
iov_iter_discard(&call->def_iter, READ, call->count2 * 3 * 4);
call->unmarshall++;
@@ -509,7 +464,8 @@ static int afs_deliver_cb_probe(struct afs_call *call)
}
/*
- * allow the fileserver to quickly find out if the fileserver has been rebooted
+ * Allow the fileserver to quickly find out if the cache manager has been
+ * rebooted.
*/
static void SRXAFSCB_ProbeUuid(struct work_struct *work)
{
@@ -581,7 +537,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
return afs_io_error(call, afs_io_error_cm_reply);
- return afs_find_cm_server_by_uuid(call, call->request);
+ return afs_find_cm_server_by_peer(call);
}
/*
@@ -672,8 +628,7 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("FID count: %u", call->count);
if (call->count > YFSCBMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_cb_fid_count);
+ return afs_protocol_error(call, afs_eproto_cb_fid_count);
size = array_size(call->count, sizeof(struct yfs_xdr_YFSFid));
call->buffer = kmalloc(size, GFP_KERNEL);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index d1e1caa23c8b..aa1d34141ea3 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -99,8 +99,6 @@ struct afs_lookup_cookie {
bool found;
bool one_only;
unsigned short nr_fids;
- struct inode **inodes;
- struct afs_status_cb *statuses;
struct afs_fid fids[50];
};
@@ -618,8 +616,8 @@ static int afs_lookup_filldir(struct dir_context *ctx, const char *name,
}
} else if (cookie->name.len == nlen &&
memcmp(cookie->name.name, name, nlen) == 0) {
- cookie->fids[0].vnode = ino;
- cookie->fids[0].unique = dtype;
+ cookie->fids[1].vnode = ino;
+ cookie->fids[1].unique = dtype;
cookie->found = 1;
if (cookie->one_only)
return -1;
@@ -631,6 +629,111 @@ static int afs_lookup_filldir(struct dir_context *ctx, const char *name,
}
/*
+ * Deal with the result of a successful lookup operation. Turn all the files
+ * into inodes and save the first one - which is the one we actually want.
+ */
+static void afs_do_lookup_success(struct afs_operation *op)
+{
+ struct afs_vnode_param *vp;
+ struct afs_vnode *vnode;
+ struct inode *inode;
+ u32 abort_code;
+ int i;
+
+ _enter("");
+
+ for (i = 0; i < op->nr_files; i++) {
+ switch (i) {
+ case 0:
+ vp = &op->file[0];
+ abort_code = vp->scb.status.abort_code;
+ if (abort_code != 0) {
+ op->abort_code = abort_code;
+ op->error = afs_abort_to_error(abort_code);
+ }
+ break;
+
+ case 1:
+ vp = &op->file[1];
+ break;
+
+ default:
+ vp = &op->more_files[i - 2];
+ break;
+ }
+
+ if (!vp->scb.have_status && !vp->scb.have_error)
+ continue;
+
+ _debug("do [%u]", i);
+ if (vp->vnode) {
+ if (!test_bit(AFS_VNODE_UNSET, &vp->vnode->flags))
+ afs_vnode_commit_status(op, vp);
+ } else if (vp->scb.status.abort_code == 0) {
+ inode = afs_iget(op, vp);
+ if (!IS_ERR(inode)) {
+ vnode = AFS_FS_I(inode);
+ afs_cache_permit(vnode, op->key,
+ 0 /* Assume vnode->cb_break is 0 */ +
+ op->cb_v_break,
+ &vp->scb);
+ vp->vnode = vnode;
+ vp->put_vnode = true;
+ }
+ } else {
+ _debug("- abort %d %llx:%llx.%x",
+ vp->scb.status.abort_code,
+ vp->fid.vid, vp->fid.vnode, vp->fid.unique);
+ }
+ }
+
+ _leave("");
+}
+
+static const struct afs_operation_ops afs_inline_bulk_status_operation = {
+ .issue_afs_rpc = afs_fs_inline_bulk_status,
+ .issue_yfs_rpc = yfs_fs_inline_bulk_status,
+ .success = afs_do_lookup_success,
+};
+
+static const struct afs_operation_ops afs_fetch_status_operation = {
+ .issue_afs_rpc = afs_fs_fetch_status,
+ .issue_yfs_rpc = yfs_fs_fetch_status,
+ .success = afs_do_lookup_success,
+};
+
+/*
+ * See if we know that the server we expect to use doesn't support
+ * FS.InlineBulkStatus.
+ */
+static bool afs_server_supports_ibulk(struct afs_vnode *dvnode)
+{
+ struct afs_server_list *slist;
+ struct afs_volume *volume = dvnode->volume;
+ struct afs_server *server;
+ bool ret = true;
+ int i;
+
+ if (!test_bit(AFS_VOLUME_MAYBE_NO_IBULK, &volume->flags))
+ return true;
+
+ rcu_read_lock();
+ slist = rcu_dereference(volume->servers);
+
+ for (i = 0; i < slist->nr_servers; i++) {
+ server = slist->servers[i].server;
+ if (server == dvnode->cb_server) {
+ if (test_bit(AFS_SERVER_FL_NO_IBULK, &server->flags))
+ ret = false;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
+/*
* Do a lookup in a directory. We make use of bulk lookup to query a slew of
* files in one go and create inodes for them. The inode of the file we were
* asked for is returned.
@@ -639,16 +742,13 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
struct key *key)
{
struct afs_lookup_cookie *cookie;
- struct afs_cb_interest *dcbi, *cbi = NULL;
- struct afs_super_info *as = dir->i_sb->s_fs_info;
- struct afs_status_cb *scb;
- struct afs_iget_data iget_data;
- struct afs_fs_cursor fc;
- struct afs_server *server;
+ struct afs_vnode_param *vp;
+ struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
struct inode *inode = NULL, *ti;
afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
- int ret, i;
+ long ret;
+ int i;
_enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
@@ -656,72 +756,74 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
if (!cookie)
return ERR_PTR(-ENOMEM);
+ for (i = 0; i < ARRAY_SIZE(cookie->fids); i++)
+ cookie->fids[i].vid = dvnode->fid.vid;
cookie->ctx.actor = afs_lookup_filldir;
cookie->name = dentry->d_name;
- cookie->nr_fids = 1; /* slot 0 is saved for the fid we actually want */
-
- read_seqlock_excl(&dvnode->cb_lock);
- dcbi = rcu_dereference_protected(dvnode->cb_interest,
- lockdep_is_held(&dvnode->cb_lock.lock));
- if (dcbi) {
- server = dcbi->server;
- if (server &&
- test_bit(AFS_SERVER_FL_NO_IBULK, &server->flags))
- cookie->one_only = true;
- }
- read_sequnlock_excl(&dvnode->cb_lock);
+ cookie->nr_fids = 2; /* slot 0 is saved for the fid we actually want
+ * and slot 1 for the directory */
- for (i = 0; i < 50; i++)
- cookie->fids[i].vid = as->volume->vid;
+ if (!afs_server_supports_ibulk(dvnode))
+ cookie->one_only = true;
/* search the directory */
ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version);
- if (ret < 0) {
- inode = ERR_PTR(ret);
+ if (ret < 0)
goto out;
- }
dentry->d_fsdata = (void *)(unsigned long)data_version;
- inode = ERR_PTR(-ENOENT);
+ ret = -ENOENT;
if (!cookie->found)
goto out;
/* Check to see if we already have an inode for the primary fid. */
- iget_data.fid = cookie->fids[0];
- iget_data.volume = dvnode->volume;
- iget_data.cb_v_break = dvnode->volume->cb_v_break;
- iget_data.cb_s_break = 0;
- inode = ilookup5(dir->i_sb, cookie->fids[0].vnode,
- afs_iget5_test, &iget_data);
+ inode = ilookup5(dir->i_sb, cookie->fids[1].vnode,
+ afs_ilookup5_test_by_fid, &cookie->fids[1]);
if (inode)
- goto out;
+ goto out; /* We do */
- /* Need space for examining all the selected files */
- inode = ERR_PTR(-ENOMEM);
- cookie->statuses = kvcalloc(cookie->nr_fids, sizeof(struct afs_status_cb),
- GFP_KERNEL);
- if (!cookie->statuses)
+ /* Okay, we didn't find it. We need to query the server - and whilst
+ * we're doing that, we're going to attempt to look up a bunch of other
+ * vnodes also.
+ */
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op)) {
+ ret = PTR_ERR(op);
goto out;
+ }
- cookie->inodes = kcalloc(cookie->nr_fids, sizeof(struct inode *),
- GFP_KERNEL);
- if (!cookie->inodes)
- goto out_s;
+ afs_op_set_vnode(op, 0, dvnode);
+ afs_op_set_fid(op, 1, &cookie->fids[1]);
- for (i = 1; i < cookie->nr_fids; i++) {
- scb = &cookie->statuses[i];
+ op->nr_files = cookie->nr_fids;
+ _debug("nr_files %u", op->nr_files);
- /* Find any inodes that already exist and get their
- * callback counters.
- */
- iget_data.fid = cookie->fids[i];
- ti = ilookup5_nowait(dir->i_sb, iget_data.fid.vnode,
- afs_iget5_test, &iget_data);
- if (!IS_ERR_OR_NULL(ti)) {
- vnode = AFS_FS_I(ti);
- scb->cb_break = afs_calc_vnode_cb_break(vnode);
- cookie->inodes[i] = ti;
+ /* Need space for examining all the selected files */
+ op->error = -ENOMEM;
+ if (op->nr_files > 2) {
+ op->more_files = kvcalloc(op->nr_files - 2,
+ sizeof(struct afs_vnode_param),
+ GFP_KERNEL);
+ if (!op->more_files)
+ goto out_op;
+
+ for (i = 2; i < op->nr_files; i++) {
+ vp = &op->more_files[i - 2];
+ vp->fid = cookie->fids[i];
+
+ /* Find any inodes that already exist and get their
+ * callback counters.
+ */
+ ti = ilookup5_nowait(dir->i_sb, vp->fid.vnode,
+ afs_ilookup5_test_by_fid, &vp->fid);
+ if (!IS_ERR_OR_NULL(ti)) {
+ vnode = AFS_FS_I(ti);
+ vp->dv_before = vnode->status.data_version;
+ vp->cb_break_before = afs_calc_vnode_cb_break(vnode);
+ vp->vnode = vnode;
+ vp->put_vnode = true;
+ }
}
}
@@ -729,120 +831,40 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
* lookups contained therein are stored in the reply without aborting
* the whole operation.
*/
- if (cookie->one_only)
- goto no_inline_bulk_status;
-
- inode = ERR_PTR(-ERESTARTSYS);
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- while (afs_select_fileserver(&fc)) {
- if (test_bit(AFS_SERVER_FL_NO_IBULK,
- &fc.cbi->server->flags)) {
- fc.ac.abort_code = RX_INVALID_OPERATION;
- fc.ac.error = -ECONNABORTED;
- break;
- }
- iget_data.cb_v_break = dvnode->volume->cb_v_break;
- iget_data.cb_s_break = fc.cbi->server->cb_s_break;
- afs_fs_inline_bulk_status(&fc,
- afs_v2net(dvnode),
- cookie->fids,
- cookie->statuses,
- cookie->nr_fids, NULL);
- }
-
- if (fc.ac.error == 0)
- cbi = afs_get_cb_interest(fc.cbi);
- if (fc.ac.abort_code == RX_INVALID_OPERATION)
- set_bit(AFS_SERVER_FL_NO_IBULK, &fc.cbi->server->flags);
- inode = ERR_PTR(afs_end_vnode_operation(&fc));
+ op->error = -ENOTSUPP;
+ if (!cookie->one_only) {
+ op->ops = &afs_inline_bulk_status_operation;
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
}
- if (!IS_ERR(inode))
- goto success;
- if (fc.ac.abort_code != RX_INVALID_OPERATION)
- goto out_c;
-
-no_inline_bulk_status:
- /* We could try FS.BulkStatus next, but this aborts the entire op if
- * any of the lookups fails - so, for the moment, revert to
- * FS.FetchStatus for just the primary fid.
- */
- inode = ERR_PTR(-ERESTARTSYS);
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- while (afs_select_fileserver(&fc)) {
- iget_data.cb_v_break = dvnode->volume->cb_v_break;
- iget_data.cb_s_break = fc.cbi->server->cb_s_break;
- scb = &cookie->statuses[0];
- afs_fs_fetch_status(&fc,
- afs_v2net(dvnode),
- cookie->fids,
- scb,
- NULL);
- }
-
- if (fc.ac.error == 0)
- cbi = afs_get_cb_interest(fc.cbi);
- inode = ERR_PTR(afs_end_vnode_operation(&fc));
+ if (op->error == -ENOTSUPP) {
+ /* We could try FS.BulkStatus next, but this aborts the entire
+ * op if any of the lookups fails - so, for the moment, revert
+ * to FS.FetchStatus for op->file[1].
+ */
+ op->fetch_status.which = 1;
+ op->ops = &afs_fetch_status_operation;
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
}
+ inode = ERR_PTR(op->error);
- if (IS_ERR(inode))
- goto out_c;
-
-success:
- /* Turn all the files into inodes and save the first one - which is the
- * one we actually want.
- */
- scb = &cookie->statuses[0];
- if (scb->status.abort_code != 0)
- inode = ERR_PTR(afs_abort_to_error(scb->status.abort_code));
-
- for (i = 0; i < cookie->nr_fids; i++) {
- struct afs_status_cb *scb = &cookie->statuses[i];
-
- if (!scb->have_status && !scb->have_error)
- continue;
-
- if (cookie->inodes[i]) {
- struct afs_vnode *iv = AFS_FS_I(cookie->inodes[i]);
-
- if (test_bit(AFS_VNODE_UNSET, &iv->flags))
- continue;
-
- afs_vnode_commit_status(&fc, iv,
- scb->cb_break, NULL, scb);
- continue;
- }
-
- if (scb->status.abort_code != 0)
- continue;
-
- iget_data.fid = cookie->fids[i];
- ti = afs_iget(dir->i_sb, key, &iget_data, scb, cbi, dvnode);
- if (!IS_ERR(ti))
- afs_cache_permit(AFS_FS_I(ti), key,
- 0 /* Assume vnode->cb_break is 0 */ +
- iget_data.cb_v_break,
- scb);
- if (i == 0) {
- inode = ti;
- } else {
- if (!IS_ERR(ti))
- iput(ti);
- }
+out_op:
+ if (op->error == 0) {
+ inode = &op->file[1].vnode->vfs_inode;
+ op->file[1].vnode = NULL;
}
-out_c:
- afs_put_cb_interest(afs_v2net(dvnode), cbi);
- if (cookie->inodes) {
- for (i = 0; i < cookie->nr_fids; i++)
- iput(cookie->inodes[i]);
- kfree(cookie->inodes);
- }
-out_s:
- kvfree(cookie->statuses);
+ if (op->file[0].scb.have_status)
+ dentry->d_fsdata = (void *)(unsigned long)op->file[0].scb.status.data_version;
+ else
+ dentry->d_fsdata = (void *)(unsigned long)op->file[0].dv_before;
+ ret = afs_put_operation(op);
out:
kfree(cookie);
- return inode;
+ _leave("");
+ return inode ?: ERR_PTR(ret);
}
/*
@@ -958,6 +980,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
if (!IS_ERR_OR_NULL(inode))
fid = AFS_FS_I(inode)->fid;
+ _debug("splice %p", dentry->d_inode);
d = d_splice_alias(inode, dentry);
if (!IS_ERR_OR_NULL(d)) {
d->d_fsdata = dentry->d_fsdata;
@@ -965,6 +988,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
} else {
trace_afs_lookup(dvnode, &dentry->d_name, &fid);
}
+ _leave("");
return d;
}
@@ -1215,130 +1239,97 @@ void afs_d_release(struct dentry *dentry)
/*
* Create a new inode for create/mkdir/symlink
*/
-static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
- struct dentry *new_dentry,
- struct afs_iget_data *new_data,
- struct afs_status_cb *new_scb)
+static void afs_vnode_new_inode(struct afs_operation *op)
{
+ struct afs_vnode_param *vp = &op->file[1];
struct afs_vnode *vnode;
struct inode *inode;
- if (fc->ac.error < 0)
- return;
+ _enter("");
+
+ ASSERTCMP(op->error, ==, 0);
- inode = afs_iget(fc->vnode->vfs_inode.i_sb, fc->key,
- new_data, new_scb, fc->cbi, fc->vnode);
+ inode = afs_iget(op, vp);
if (IS_ERR(inode)) {
/* ENOMEM or EINTR at a really inconvenient time - just abandon
* the new directory on the server.
*/
- fc->ac.error = PTR_ERR(inode);
+ op->error = PTR_ERR(inode);
return;
}
vnode = AFS_FS_I(inode);
set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
- if (fc->ac.error == 0)
- afs_cache_permit(vnode, fc->key, vnode->cb_break, new_scb);
- d_instantiate(new_dentry, inode);
+ if (!op->error)
+ afs_cache_permit(vnode, op->key, vnode->cb_break, &vp->scb);
+ d_instantiate(op->dentry, inode);
}
-static void afs_prep_for_new_inode(struct afs_fs_cursor *fc,
- struct afs_iget_data *iget_data)
+static void afs_create_success(struct afs_operation *op)
{
- iget_data->volume = fc->vnode->volume;
- iget_data->cb_v_break = fc->vnode->volume->cb_v_break;
- iget_data->cb_s_break = fc->cbi->server->cb_s_break;
+ _enter("op=%08x", op->debug_id);
+ afs_check_for_remote_deletion(op, op->file[0].vnode);
+ afs_vnode_commit_status(op, &op->file[0]);
+ afs_update_dentry_version(op, &op->file[0], op->dentry);
+ afs_vnode_new_inode(op);
}
-/*
- * Note that a dentry got changed. We need to set d_fsdata to the data version
- * number derived from the result of the operation. It doesn't matter if
- * d_fsdata goes backwards as we'll just revalidate.
- */
-static void afs_update_dentry_version(struct afs_fs_cursor *fc,
- struct dentry *dentry,
- struct afs_status_cb *scb)
+static void afs_create_edit_dir(struct afs_operation *op)
+{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
+ struct afs_vnode *dvnode = dvp->vnode;
+
+ _enter("op=%08x", op->debug_id);
+
+ down_write(&dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+ dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
+ afs_edit_dir_add(dvnode, &op->dentry->d_name, &vp->fid,
+ op->create.reason);
+ up_write(&dvnode->validate_lock);
+}
+
+static void afs_create_put(struct afs_operation *op)
{
- if (fc->ac.error == 0)
- dentry->d_fsdata =
- (void *)(unsigned long)scb->status.data_version;
+ _enter("op=%08x", op->debug_id);
+
+ if (op->error)
+ d_drop(op->dentry);
}
+static const struct afs_operation_ops afs_mkdir_operation = {
+ .issue_afs_rpc = afs_fs_make_dir,
+ .issue_yfs_rpc = yfs_fs_make_dir,
+ .success = afs_create_success,
+ .edit_dir = afs_create_edit_dir,
+ .put = afs_create_put,
+};
+
/*
* create a directory on an AFS filesystem
*/
static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
- struct afs_iget_data iget_data;
- struct afs_status_cb *scb;
- struct afs_fs_cursor fc;
+ struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
- struct key *key;
- afs_dataversion_t data_version;
- int ret;
-
- mode |= S_IFDIR;
_enter("{%llx:%llu},{%pd},%ho",
dvnode->fid.vid, dvnode->fid.vnode, dentry, mode);
- ret = -ENOMEM;
- scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- goto error;
-
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op)) {
+ d_drop(dentry);
+ return PTR_ERR(op);
}
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- data_version = dvnode->status.data_version + 1;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_prep_for_new_inode(&fc, &iget_data);
- afs_fs_create(&fc, dentry->d_name.name, mode,
- &scb[0], &iget_data.fid, &scb[1]);
- }
-
- afs_check_for_remote_deletion(&fc, dvnode);
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &data_version, &scb[0]);
- afs_update_dentry_version(&fc, dentry, &scb[0]);
- afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
- ret = afs_end_vnode_operation(&fc);
- if (ret < 0)
- goto error_key;
- } else {
- goto error_key;
- }
-
- if (ret == 0) {
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == data_version)
- afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
- afs_edit_dir_for_create);
- up_write(&dvnode->validate_lock);
- }
-
- key_put(key);
- kfree(scb);
- _leave(" = 0");
- return 0;
-
-error_key:
- key_put(key);
-error_scb:
- kfree(scb);
-error:
- d_drop(dentry);
- _leave(" = %d", ret);
- return ret;
+ afs_op_set_vnode(op, 0, dvnode);
+ op->file[0].dv_delta = 1;
+ op->dentry = dentry;
+ op->create.mode = S_IFDIR | mode;
+ op->create.reason = afs_edit_dir_for_mkdir;
+ op->ops = &afs_mkdir_operation;
+ return afs_do_sync_operation(op);
}
/*
@@ -1356,76 +1347,86 @@ static void afs_dir_remove_subdir(struct dentry *dentry)
}
}
+static void afs_rmdir_success(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+ afs_check_for_remote_deletion(op, op->file[0].vnode);
+ afs_vnode_commit_status(op, &op->file[0]);
+ afs_update_dentry_version(op, &op->file[0], op->dentry);
+}
+
+static void afs_rmdir_edit_dir(struct afs_operation *op)
+{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode *dvnode = dvp->vnode;
+
+ _enter("op=%08x", op->debug_id);
+ afs_dir_remove_subdir(op->dentry);
+
+ down_write(&dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+ dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
+ afs_edit_dir_remove(dvnode, &op->dentry->d_name,
+ afs_edit_dir_for_rmdir);
+ up_write(&dvnode->validate_lock);
+}
+
+static void afs_rmdir_put(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+ if (op->file[1].vnode)
+ up_write(&op->file[1].vnode->rmdir_lock);
+}
+
+static const struct afs_operation_ops afs_rmdir_operation = {
+ .issue_afs_rpc = afs_fs_remove_dir,
+ .issue_yfs_rpc = yfs_fs_remove_dir,
+ .success = afs_rmdir_success,
+ .edit_dir = afs_rmdir_edit_dir,
+ .put = afs_rmdir_put,
+};
+
/*
* remove a directory from an AFS filesystem
*/
static int afs_rmdir(struct inode *dir, struct dentry *dentry)
{
- struct afs_status_cb *scb;
- struct afs_fs_cursor fc;
+ struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL;
- struct key *key;
- afs_dataversion_t data_version;
int ret;
_enter("{%llx:%llu},{%pd}",
dvnode->fid.vid, dvnode->fid.vnode, dentry);
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error;
- }
+ afs_op_set_vnode(op, 0, dvnode);
+ op->file[0].dv_delta = 1;
+
+ op->dentry = dentry;
+ op->ops = &afs_rmdir_operation;
/* Try to make sure we have a callback promise on the victim. */
if (d_really_is_positive(dentry)) {
vnode = AFS_FS_I(d_inode(dentry));
- ret = afs_validate(vnode, key);
+ ret = afs_validate(vnode, op->key);
if (ret < 0)
- goto error_key;
+ goto error;
}
if (vnode) {
ret = down_write_killable(&vnode->rmdir_lock);
if (ret < 0)
- goto error_key;
+ goto error;
+ op->file[1].vnode = vnode;
}
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- data_version = dvnode->status.data_version + 1;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_fs_remove(&fc, vnode, dentry->d_name.name, true, scb);
- }
-
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &data_version, scb);
- afs_update_dentry_version(&fc, dentry, scb);
- ret = afs_end_vnode_operation(&fc);
- if (ret == 0) {
- afs_dir_remove_subdir(dentry);
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == data_version)
- afs_edit_dir_remove(dvnode, &dentry->d_name,
- afs_edit_dir_for_rmdir);
- up_write(&dvnode->validate_lock);
- }
- }
+ return afs_do_sync_operation(op);
- if (vnode)
- up_write(&vnode->rmdir_lock);
-error_key:
- key_put(key);
error:
- kfree(scb);
- return ret;
+ return afs_put_operation(op);
}
/*
@@ -1438,52 +1439,90 @@ error:
* However, if we didn't have a callback promise outstanding, or it was
* outstanding on a different server, then it won't break it either...
*/
-static int afs_dir_remove_link(struct afs_vnode *dvnode, struct dentry *dentry,
- struct key *key)
+static void afs_dir_remove_link(struct afs_operation *op)
{
- int ret = 0;
+ struct afs_vnode *dvnode = op->file[0].vnode;
+ struct afs_vnode *vnode = op->file[1].vnode;
+ struct dentry *dentry = op->dentry;
+ int ret;
- if (d_really_is_positive(dentry)) {
- struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
+ if (op->error != 0 ||
+ (op->file[1].scb.have_status && op->file[1].scb.have_error))
+ return;
+ if (d_really_is_positive(dentry))
+ return;
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
- /* Already done */
- } else if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
- write_seqlock(&vnode->cb_lock);
- drop_nlink(&vnode->vfs_inode);
- if (vnode->vfs_inode.i_nlink == 0) {
- set_bit(AFS_VNODE_DELETED, &vnode->flags);
- __afs_break_callback(vnode, afs_cb_break_for_unlink);
- }
- write_sequnlock(&vnode->cb_lock);
- ret = 0;
- } else {
- afs_break_callback(vnode, afs_cb_break_for_unlink);
+ if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
+ /* Already done */
+ } else if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) {
+ write_seqlock(&vnode->cb_lock);
+ drop_nlink(&vnode->vfs_inode);
+ if (vnode->vfs_inode.i_nlink == 0) {
+ set_bit(AFS_VNODE_DELETED, &vnode->flags);
+ __afs_break_callback(vnode, afs_cb_break_for_unlink);
+ }
+ write_sequnlock(&vnode->cb_lock);
+ } else {
+ afs_break_callback(vnode, afs_cb_break_for_unlink);
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
- kdebug("AFS_VNODE_DELETED");
+ if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
+ _debug("AFS_VNODE_DELETED");
- ret = afs_validate(vnode, key);
- if (ret == -ESTALE)
- ret = 0;
- }
- _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret);
+ ret = afs_validate(vnode, op->key);
+ if (ret != -ESTALE)
+ op->error = ret;
}
- return ret;
+ _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, op->error);
+}
+
+static void afs_unlink_success(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+ afs_check_for_remote_deletion(op, op->file[0].vnode);
+ afs_vnode_commit_status(op, &op->file[0]);
+ afs_vnode_commit_status(op, &op->file[1]);
+ afs_update_dentry_version(op, &op->file[0], op->dentry);
+ afs_dir_remove_link(op);
+}
+
+static void afs_unlink_edit_dir(struct afs_operation *op)
+{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode *dvnode = dvp->vnode;
+
+ _enter("op=%08x", op->debug_id);
+ down_write(&dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+ dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
+ afs_edit_dir_remove(dvnode, &op->dentry->d_name,
+ afs_edit_dir_for_unlink);
+ up_write(&dvnode->validate_lock);
+}
+
+static void afs_unlink_put(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+ if (op->unlink.need_rehash && op->error < 0 && op->error != -ENOENT)
+ d_rehash(op->dentry);
}
+static const struct afs_operation_ops afs_unlink_operation = {
+ .issue_afs_rpc = afs_fs_remove_file,
+ .issue_yfs_rpc = yfs_fs_remove_file,
+ .success = afs_unlink_success,
+ .edit_dir = afs_unlink_edit_dir,
+ .put = afs_unlink_put,
+};
+
/*
* Remove a file or symlink from an AFS filesystem.
*/
static int afs_unlink(struct inode *dir, struct dentry *dentry)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
- struct key *key;
- bool need_rehash = false;
int ret;
_enter("{%llx:%llu},{%pd}",
@@ -1492,269 +1531,176 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
if (dentry->d_name.len >= AFSNAMEMAX)
return -ENAMETOOLONG;
- ret = -ENOMEM;
- scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- goto error;
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
- }
+ afs_op_set_vnode(op, 0, dvnode);
+ op->file[0].dv_delta = 1;
/* Try to make sure we have a callback promise on the victim. */
- ret = afs_validate(vnode, key);
- if (ret < 0)
- goto error_key;
+ ret = afs_validate(vnode, op->key);
+ if (ret < 0) {
+ op->error = ret;
+ goto error;
+ }
spin_lock(&dentry->d_lock);
if (d_count(dentry) > 1) {
spin_unlock(&dentry->d_lock);
/* Start asynchronous writeout of the inode */
write_inode_now(d_inode(dentry), 0);
- ret = afs_sillyrename(dvnode, vnode, dentry, key);
- goto error_key;
+ op->error = afs_sillyrename(dvnode, vnode, dentry, op->key);
+ goto error;
}
if (!d_unhashed(dentry)) {
/* Prevent a race with RCU lookup. */
__d_drop(dentry);
- need_rehash = true;
+ op->unlink.need_rehash = true;
}
spin_unlock(&dentry->d_lock);
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- afs_dataversion_t data_version = dvnode->status.data_version + 1;
- afs_dataversion_t data_version_2 = vnode->status.data_version;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- fc.cb_break_2 = afs_calc_vnode_cb_break(vnode);
-
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc.cbi->server->flags) &&
- !test_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags)) {
- yfs_fs_remove_file2(&fc, vnode, dentry->d_name.name,
- &scb[0], &scb[1]);
- if (fc.ac.error != -ECONNABORTED ||
- fc.ac.abort_code != RXGEN_OPCODE)
- continue;
- set_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags);
- }
-
- afs_fs_remove(&fc, vnode, dentry->d_name.name, false, &scb[0]);
- }
+ op->file[1].vnode = vnode;
+ op->dentry = dentry;
+ op->ops = &afs_unlink_operation;
+ return afs_do_sync_operation(op);
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &data_version, &scb[0]);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
- &data_version_2, &scb[1]);
- afs_update_dentry_version(&fc, dentry, &scb[0]);
- ret = afs_end_vnode_operation(&fc);
- if (ret == 0 && !(scb[1].have_status || scb[1].have_error))
- ret = afs_dir_remove_link(dvnode, dentry, key);
-
- if (ret == 0) {
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == data_version)
- afs_edit_dir_remove(dvnode, &dentry->d_name,
- afs_edit_dir_for_unlink);
- up_write(&dvnode->validate_lock);
- }
- }
-
- if (need_rehash && ret < 0 && ret != -ENOENT)
- d_rehash(dentry);
-
-error_key:
- key_put(key);
-error_scb:
- kfree(scb);
error:
- _leave(" = %d", ret);
- return ret;
+ return afs_put_operation(op);
}
+static const struct afs_operation_ops afs_create_operation = {
+ .issue_afs_rpc = afs_fs_create_file,
+ .issue_yfs_rpc = yfs_fs_create_file,
+ .success = afs_create_success,
+ .edit_dir = afs_create_edit_dir,
+ .put = afs_create_put,
+};
+
/*
* create a regular file on an AFS filesystem
*/
static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
- struct afs_iget_data iget_data;
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
- struct key *key;
- afs_dataversion_t data_version;
- int ret;
-
- mode |= S_IFREG;
+ int ret = -ENAMETOOLONG;
- _enter("{%llx:%llu},{%pd},%ho,",
+ _enter("{%llx:%llu},{%pd},%ho",
dvnode->fid.vid, dvnode->fid.vnode, dentry, mode);
- ret = -ENAMETOOLONG;
if (dentry->d_name.len >= AFSNAMEMAX)
goto error;
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op)) {
+ ret = PTR_ERR(op);
goto error;
}
- ret = -ENOMEM;
- scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- goto error_scb;
+ afs_op_set_vnode(op, 0, dvnode);
+ op->file[0].dv_delta = 1;
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- data_version = dvnode->status.data_version + 1;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_prep_for_new_inode(&fc, &iget_data);
- afs_fs_create(&fc, dentry->d_name.name, mode,
- &scb[0], &iget_data.fid, &scb[1]);
- }
+ op->dentry = dentry;
+ op->create.mode = S_IFREG | mode;
+ op->create.reason = afs_edit_dir_for_create;
+ op->ops = &afs_create_operation;
+ return afs_do_sync_operation(op);
- afs_check_for_remote_deletion(&fc, dvnode);
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &data_version, &scb[0]);
- afs_update_dentry_version(&fc, dentry, &scb[0]);
- afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
- ret = afs_end_vnode_operation(&fc);
- if (ret < 0)
- goto error_key;
- } else {
- goto error_key;
- }
-
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == data_version)
- afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
- afs_edit_dir_for_create);
- up_write(&dvnode->validate_lock);
-
- kfree(scb);
- key_put(key);
- _leave(" = 0");
- return 0;
-
-error_scb:
- kfree(scb);
-error_key:
- key_put(key);
error:
d_drop(dentry);
_leave(" = %d", ret);
return ret;
}
+static void afs_link_success(struct afs_operation *op)
+{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
+
+ _enter("op=%08x", op->debug_id);
+ afs_vnode_commit_status(op, dvp);
+ afs_vnode_commit_status(op, vp);
+ afs_update_dentry_version(op, dvp, op->dentry);
+ if (op->dentry_2->d_parent == op->dentry->d_parent)
+ afs_update_dentry_version(op, dvp, op->dentry_2);
+ ihold(&vp->vnode->vfs_inode);
+ d_instantiate(op->dentry, &vp->vnode->vfs_inode);
+}
+
+static void afs_link_put(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+ if (op->error)
+ d_drop(op->dentry);
+}
+
+static const struct afs_operation_ops afs_link_operation = {
+ .issue_afs_rpc = afs_fs_link,
+ .issue_yfs_rpc = yfs_fs_link,
+ .success = afs_link_success,
+ .edit_dir = afs_create_edit_dir,
+ .put = afs_link_put,
+};
+
/*
* create a hard link between files in an AFS filesystem
*/
static int afs_link(struct dentry *from, struct inode *dir,
struct dentry *dentry)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct afs_vnode *vnode = AFS_FS_I(d_inode(from));
- struct key *key;
- afs_dataversion_t data_version;
- int ret;
+ int ret = -ENAMETOOLONG;
_enter("{%llx:%llu},{%llx:%llu},{%pd}",
vnode->fid.vid, vnode->fid.vnode,
dvnode->fid.vid, dvnode->fid.vnode,
dentry);
- ret = -ENAMETOOLONG;
if (dentry->d_name.len >= AFSNAMEMAX)
goto error;
- ret = -ENOMEM;
- scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op)) {
+ ret = PTR_ERR(op);
goto error;
-
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
- }
-
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- data_version = dvnode->status.data_version + 1;
-
- if (mutex_lock_interruptible_nested(&vnode->io_lock, 1) < 0) {
- afs_end_vnode_operation(&fc);
- goto error_key;
- }
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- fc.cb_break_2 = afs_calc_vnode_cb_break(vnode);
- afs_fs_link(&fc, vnode, dentry->d_name.name,
- &scb[0], &scb[1]);
- }
-
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &data_version, &scb[0]);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
- NULL, &scb[1]);
- ihold(&vnode->vfs_inode);
- afs_update_dentry_version(&fc, dentry, &scb[0]);
- d_instantiate(dentry, &vnode->vfs_inode);
-
- mutex_unlock(&vnode->io_lock);
- ret = afs_end_vnode_operation(&fc);
- if (ret < 0)
- goto error_key;
- } else {
- goto error_key;
}
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == data_version)
- afs_edit_dir_add(dvnode, &dentry->d_name, &vnode->fid,
- afs_edit_dir_for_link);
- up_write(&dvnode->validate_lock);
+ afs_op_set_vnode(op, 0, dvnode);
+ afs_op_set_vnode(op, 1, vnode);
+ op->file[0].dv_delta = 1;
- key_put(key);
- kfree(scb);
- _leave(" = 0");
- return 0;
+ op->dentry = dentry;
+ op->dentry_2 = from;
+ op->ops = &afs_link_operation;
+ op->create.reason = afs_edit_dir_for_link;
+ return afs_do_sync_operation(op);
-error_key:
- key_put(key);
-error_scb:
- kfree(scb);
error:
d_drop(dentry);
_leave(" = %d", ret);
return ret;
}
+static const struct afs_operation_ops afs_symlink_operation = {
+ .issue_afs_rpc = afs_fs_symlink,
+ .issue_yfs_rpc = yfs_fs_symlink,
+ .success = afs_create_success,
+ .edit_dir = afs_create_edit_dir,
+ .put = afs_create_put,
+};
+
/*
* create a symlink in an AFS filesystem
*/
static int afs_symlink(struct inode *dir, struct dentry *dentry,
const char *content)
{
- struct afs_iget_data iget_data;
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *dvnode = AFS_FS_I(dir);
- struct key *key;
- afs_dataversion_t data_version;
int ret;
_enter("{%llx:%llu},{%pd},%s",
@@ -1769,62 +1715,115 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
if (strlen(content) >= AFSPATHMAX)
goto error;
- ret = -ENOMEM;
- scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op)) {
+ ret = PTR_ERR(op);
goto error;
-
- key = afs_request_key(dvnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
}
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- data_version = dvnode->status.data_version + 1;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_prep_for_new_inode(&fc, &iget_data);
- afs_fs_symlink(&fc, dentry->d_name.name, content,
- &scb[0], &iget_data.fid, &scb[1]);
- }
+ afs_op_set_vnode(op, 0, dvnode);
+ op->file[0].dv_delta = 1;
- afs_check_for_remote_deletion(&fc, dvnode);
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &data_version, &scb[0]);
- afs_update_dentry_version(&fc, dentry, &scb[0]);
- afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
- ret = afs_end_vnode_operation(&fc);
- if (ret < 0)
- goto error_key;
- } else {
- goto error_key;
- }
-
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == data_version)
- afs_edit_dir_add(dvnode, &dentry->d_name, &iget_data.fid,
- afs_edit_dir_for_symlink);
- up_write(&dvnode->validate_lock);
+ op->dentry = dentry;
+ op->ops = &afs_symlink_operation;
+ op->create.reason = afs_edit_dir_for_symlink;
+ op->create.symlink = content;
+ return afs_do_sync_operation(op);
- key_put(key);
- kfree(scb);
- _leave(" = 0");
- return 0;
-
-error_key:
- key_put(key);
-error_scb:
- kfree(scb);
error:
d_drop(dentry);
_leave(" = %d", ret);
return ret;
}
+static void afs_rename_success(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+
+ afs_vnode_commit_status(op, &op->file[0]);
+ if (op->file[1].vnode != op->file[0].vnode)
+ afs_vnode_commit_status(op, &op->file[1]);
+}
+
+static void afs_rename_edit_dir(struct afs_operation *op)
+{
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ struct afs_vnode *orig_dvnode = orig_dvp->vnode;
+ struct afs_vnode *new_dvnode = new_dvp->vnode;
+ struct afs_vnode *vnode = AFS_FS_I(d_inode(op->dentry));
+ struct dentry *old_dentry = op->dentry;
+ struct dentry *new_dentry = op->dentry_2;
+ struct inode *new_inode;
+
+ _enter("op=%08x", op->debug_id);
+
+ if (op->rename.rehash) {
+ d_rehash(op->rename.rehash);
+ op->rename.rehash = NULL;
+ }
+
+ down_write(&orig_dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) &&
+ orig_dvnode->status.data_version == orig_dvp->dv_before + orig_dvp->dv_delta)
+ afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name,
+ afs_edit_dir_for_rename_0);
+
+ if (new_dvnode != orig_dvnode) {
+ up_write(&orig_dvnode->validate_lock);
+ down_write(&new_dvnode->validate_lock);
+ }
+
+ if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags) &&
+ new_dvnode->status.data_version == new_dvp->dv_before + new_dvp->dv_delta) {
+ if (!op->rename.new_negative)
+ afs_edit_dir_remove(new_dvnode, &new_dentry->d_name,
+ afs_edit_dir_for_rename_1);
+
+ afs_edit_dir_add(new_dvnode, &new_dentry->d_name,
+ &vnode->fid, afs_edit_dir_for_rename_2);
+ }
+
+ new_inode = d_inode(new_dentry);
+ if (new_inode) {
+ spin_lock(&new_inode->i_lock);
+ if (new_inode->i_nlink > 0)
+ drop_nlink(new_inode);
+ spin_unlock(&new_inode->i_lock);
+ }
+
+ /* Now we can update d_fsdata on the dentries to reflect their
+ * new parent's data_version.
+ *
+ * Note that if we ever implement RENAME_EXCHANGE, we'll have
+ * to update both dentries with opposing dir versions.
+ */
+ afs_update_dentry_version(op, new_dvp, op->dentry);
+ afs_update_dentry_version(op, new_dvp, op->dentry_2);
+
+ d_move(old_dentry, new_dentry);
+
+ up_write(&new_dvnode->validate_lock);
+}
+
+static void afs_rename_put(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+ if (op->rename.rehash)
+ d_rehash(op->rename.rehash);
+ dput(op->rename.tmp);
+ if (op->error)
+ d_rehash(op->dentry);
+}
+
+static const struct afs_operation_ops afs_rename_operation = {
+ .issue_afs_rpc = afs_fs_rename,
+ .issue_yfs_rpc = yfs_fs_rename,
+ .success = afs_rename_success,
+ .edit_dir = afs_rename_edit_dir,
+ .put = afs_rename_put,
+};
+
/*
* rename a file in an AFS filesystem and/or move it between directories
*/
@@ -1832,15 +1831,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *orig_dvnode, *new_dvnode, *vnode;
- struct dentry *tmp = NULL, *rehash = NULL;
- struct inode *new_inode;
- struct key *key;
- afs_dataversion_t orig_data_version;
- afs_dataversion_t new_data_version;
- bool new_negative = d_is_negative(new_dentry);
int ret;
if (flags)
@@ -1860,16 +1852,19 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
new_dvnode->fid.vid, new_dvnode->fid.vnode,
new_dentry);
- ret = -ENOMEM;
- scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- goto error;
+ op = afs_alloc_operation(NULL, orig_dvnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- key = afs_request_key(orig_dvnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
- }
+ afs_op_set_vnode(op, 0, orig_dvnode);
+ afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
+ op->file[0].dv_delta = 1;
+ op->file[1].dv_delta = 1;
+
+ op->dentry = old_dentry;
+ op->dentry_2 = new_dentry;
+ op->rename.new_negative = d_is_negative(new_dentry);
+ op->ops = &afs_rename_operation;
/* For non-directories, check whether the target is busy and if so,
* make a copy of the dentry and then do a silly-rename. If the
@@ -1882,26 +1877,26 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
if (!d_unhashed(new_dentry)) {
d_drop(new_dentry);
- rehash = new_dentry;
+ op->rename.rehash = new_dentry;
}
if (d_count(new_dentry) > 2) {
/* copy the target dentry's name */
ret = -ENOMEM;
- tmp = d_alloc(new_dentry->d_parent,
- &new_dentry->d_name);
- if (!tmp)
- goto error_rehash;
+ op->rename.tmp = d_alloc(new_dentry->d_parent,
+ &new_dentry->d_name);
+ if (!op->rename.tmp)
+ goto error;
ret = afs_sillyrename(new_dvnode,
AFS_FS_I(d_inode(new_dentry)),
- new_dentry, key);
+ new_dentry, op->key);
if (ret)
- goto error_rehash;
+ goto error;
- new_dentry = tmp;
- rehash = NULL;
- new_negative = true;
+ op->dentry_2 = op->rename.tmp;
+ op->rename.rehash = NULL;
+ op->rename.new_negative = true;
}
}
@@ -1916,98 +1911,10 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
d_drop(old_dentry);
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) {
- orig_data_version = orig_dvnode->status.data_version + 1;
-
- if (orig_dvnode != new_dvnode) {
- if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
- afs_end_vnode_operation(&fc);
- goto error_rehash_old;
- }
- new_data_version = new_dvnode->status.data_version + 1;
- } else {
- new_data_version = orig_data_version;
- }
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(orig_dvnode);
- fc.cb_break_2 = afs_calc_vnode_cb_break(new_dvnode);
- afs_fs_rename(&fc, old_dentry->d_name.name,
- new_dvnode, new_dentry->d_name.name,
- &scb[0], &scb[1]);
- }
+ return afs_do_sync_operation(op);
- afs_vnode_commit_status(&fc, orig_dvnode, fc.cb_break,
- &orig_data_version, &scb[0]);
- if (new_dvnode != orig_dvnode) {
- afs_vnode_commit_status(&fc, new_dvnode, fc.cb_break_2,
- &new_data_version, &scb[1]);
- mutex_unlock(&new_dvnode->io_lock);
- }
- ret = afs_end_vnode_operation(&fc);
- if (ret < 0)
- goto error_rehash_old;
- }
-
- if (ret == 0) {
- if (rehash)
- d_rehash(rehash);
- down_write(&orig_dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &orig_dvnode->flags) &&
- orig_dvnode->status.data_version == orig_data_version)
- afs_edit_dir_remove(orig_dvnode, &old_dentry->d_name,
- afs_edit_dir_for_rename_0);
- if (orig_dvnode != new_dvnode) {
- up_write(&orig_dvnode->validate_lock);
-
- down_write(&new_dvnode->validate_lock);
- }
- if (test_bit(AFS_VNODE_DIR_VALID, &new_dvnode->flags) &&
- orig_dvnode->status.data_version == new_data_version) {
- if (!new_negative)
- afs_edit_dir_remove(new_dvnode, &new_dentry->d_name,
- afs_edit_dir_for_rename_1);
-
- afs_edit_dir_add(new_dvnode, &new_dentry->d_name,
- &vnode->fid, afs_edit_dir_for_rename_2);
- }
-
- new_inode = d_inode(new_dentry);
- if (new_inode) {
- spin_lock(&new_inode->i_lock);
- if (new_inode->i_nlink > 0)
- drop_nlink(new_inode);
- spin_unlock(&new_inode->i_lock);
- }
-
- /* Now we can update d_fsdata on the dentries to reflect their
- * new parent's data_version.
- *
- * Note that if we ever implement RENAME_EXCHANGE, we'll have
- * to update both dentries with opposing dir versions.
- */
- afs_update_dentry_version(&fc, old_dentry, &scb[1]);
- afs_update_dentry_version(&fc, new_dentry, &scb[1]);
- d_move(old_dentry, new_dentry);
- up_write(&new_dvnode->validate_lock);
- goto error_tmp;
- }
-
-error_rehash_old:
- d_rehash(new_dentry);
-error_rehash:
- if (rehash)
- d_rehash(rehash);
-error_tmp:
- if (tmp)
- dput(tmp);
- key_put(key);
-error_scb:
- kfree(scb);
error:
- _leave(" = %d", ret);
- return ret;
+ return afs_put_operation(op);
}
/*
diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
index d94e2b7cddff..b14e3d9a25e2 100644
--- a/fs/afs/dir_silly.c
+++ b/fs/afs/dir_silly.c
@@ -12,6 +12,47 @@
#include <linux/fsnotify.h>
#include "internal.h"
+static void afs_silly_rename_success(struct afs_operation *op)
+{
+ _enter("op=%08x", op->debug_id);
+
+ afs_vnode_commit_status(op, &op->file[0]);
+}
+
+static void afs_silly_rename_edit_dir(struct afs_operation *op)
+{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode *dvnode = dvp->vnode;
+ struct afs_vnode *vnode = AFS_FS_I(d_inode(op->dentry));
+ struct dentry *old = op->dentry;
+ struct dentry *new = op->dentry_2;
+
+ spin_lock(&old->d_lock);
+ old->d_flags |= DCACHE_NFSFS_RENAMED;
+ spin_unlock(&old->d_lock);
+ if (dvnode->silly_key != op->key) {
+ key_put(dvnode->silly_key);
+ dvnode->silly_key = key_get(op->key);
+ }
+
+ down_write(&dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+ dvnode->status.data_version == dvp->dv_before + dvp->dv_delta) {
+ afs_edit_dir_remove(dvnode, &old->d_name,
+ afs_edit_dir_for_silly_0);
+ afs_edit_dir_add(dvnode, &new->d_name,
+ &vnode->fid, afs_edit_dir_for_silly_1);
+ }
+ up_write(&dvnode->validate_lock);
+}
+
+static const struct afs_operation_ops afs_silly_rename_operation = {
+ .issue_afs_rpc = afs_fs_rename,
+ .issue_yfs_rpc = yfs_fs_rename,
+ .success = afs_silly_rename_success,
+ .edit_dir = afs_silly_rename_edit_dir,
+};
+
/*
* Actually perform the silly rename step.
*/
@@ -19,56 +60,22 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
struct dentry *old, struct dentry *new,
struct key *key)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
- afs_dataversion_t dir_data_version;
- int ret = -ERESTARTSYS;
+ struct afs_operation *op;
_enter("%pd,%pd", old, new);
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
+ op = afs_alloc_operation(key, dvnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- trace_afs_silly_rename(vnode, false);
- if (afs_begin_vnode_operation(&fc, dvnode, key, true)) {
- dir_data_version = dvnode->status.data_version + 1;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
- afs_fs_rename(&fc, old->d_name.name,
- dvnode, new->d_name.name,
- scb, scb);
- }
-
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &dir_data_version, scb);
- ret = afs_end_vnode_operation(&fc);
- }
+ afs_op_set_vnode(op, 0, dvnode);
- if (ret == 0) {
- spin_lock(&old->d_lock);
- old->d_flags |= DCACHE_NFSFS_RENAMED;
- spin_unlock(&old->d_lock);
- if (dvnode->silly_key != key) {
- key_put(dvnode->silly_key);
- dvnode->silly_key = key_get(key);
- }
-
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == dir_data_version) {
- afs_edit_dir_remove(dvnode, &old->d_name,
- afs_edit_dir_for_silly_0);
- afs_edit_dir_add(dvnode, &new->d_name,
- &vnode->fid, afs_edit_dir_for_silly_1);
- }
- up_write(&dvnode->validate_lock);
- }
+ op->dentry = old;
+ op->dentry_2 = new;
+ op->ops = &afs_silly_rename_operation;
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ trace_afs_silly_rename(vnode, false);
+ return afs_do_sync_operation(op);
}
/**
@@ -139,65 +146,66 @@ out:
return ret;
}
+static void afs_silly_unlink_success(struct afs_operation *op)
+{
+ struct afs_vnode *vnode = op->file[1].vnode;
+
+ _enter("op=%08x", op->debug_id);
+ afs_check_for_remote_deletion(op, op->file[0].vnode);
+ afs_vnode_commit_status(op, &op->file[0]);
+ afs_vnode_commit_status(op, &op->file[1]);
+ afs_update_dentry_version(op, &op->file[0], op->dentry);
+
+ drop_nlink(&vnode->vfs_inode);
+ if (vnode->vfs_inode.i_nlink == 0) {
+ set_bit(AFS_VNODE_DELETED, &vnode->flags);
+ clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ }
+}
+
+static void afs_silly_unlink_edit_dir(struct afs_operation *op)
+{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode *dvnode = dvp->vnode;
+
+ _enter("op=%08x", op->debug_id);
+ down_write(&dvnode->validate_lock);
+ if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
+ dvnode->status.data_version == dvp->dv_before + dvp->dv_delta)
+ afs_edit_dir_remove(dvnode, &op->dentry->d_name,
+ afs_edit_dir_for_unlink);
+ up_write(&dvnode->validate_lock);
+}
+
+static const struct afs_operation_ops afs_silly_unlink_operation = {
+ .issue_afs_rpc = afs_fs_remove_file,
+ .issue_yfs_rpc = yfs_fs_remove_file,
+ .success = afs_silly_unlink_success,
+ .edit_dir = afs_silly_unlink_edit_dir,
+};
+
/*
* Tell the server to remove a sillyrename file.
*/
static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode,
struct dentry *dentry, struct key *key)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
- int ret = -ERESTARTSYS;
+ struct afs_operation *op;
_enter("");
- scb = kcalloc(2, sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
+ op = afs_alloc_operation(NULL, dvnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- trace_afs_silly_rename(vnode, true);
- if (afs_begin_vnode_operation(&fc, dvnode, key, false)) {
- afs_dataversion_t dir_data_version = dvnode->status.data_version + 1;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(dvnode);
-
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc.cbi->server->flags) &&
- !test_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags)) {
- yfs_fs_remove_file2(&fc, vnode, dentry->d_name.name,
- &scb[0], &scb[1]);
- if (fc.ac.error != -ECONNABORTED ||
- fc.ac.abort_code != RXGEN_OPCODE)
- continue;
- set_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags);
- }
-
- afs_fs_remove(&fc, vnode, dentry->d_name.name, false, &scb[0]);
- }
+ afs_op_set_vnode(op, 0, dvnode);
+ afs_op_set_vnode(op, 1, vnode);
- afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
- &dir_data_version, &scb[0]);
- ret = afs_end_vnode_operation(&fc);
- if (ret == 0) {
- drop_nlink(&vnode->vfs_inode);
- if (vnode->vfs_inode.i_nlink == 0) {
- set_bit(AFS_VNODE_DELETED, &vnode->flags);
- clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
- }
- }
- if (ret == 0) {
- down_write(&dvnode->validate_lock);
- if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags) &&
- dvnode->status.data_version == dir_data_version)
- afs_edit_dir_remove(dvnode, &dentry->d_name,
- afs_edit_dir_for_unlink);
- up_write(&dvnode->validate_lock);
- }
- }
+ op->dentry = dentry;
+ op->ops = &afs_silly_unlink_operation;
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ trace_afs_silly_rename(vnode, true);
+ return afs_do_sync_operation(op);
}
/*
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 7503899c0a1b..b79879aacc02 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -10,6 +10,99 @@
#include <linux/dns_resolver.h>
#include "internal.h"
+static atomic_t afs_autocell_ino;
+
+/*
+ * iget5() comparator for inode created by autocell operations
+ *
+ * These pseudo inodes don't match anything.
+ */
+static int afs_iget5_pseudo_test(struct inode *inode, void *opaque)
+{
+ return 0;
+}
+
+/*
+ * iget5() inode initialiser
+ */
+static int afs_iget5_pseudo_set(struct inode *inode, void *opaque)
+{
+ struct afs_super_info *as = AFS_FS_S(inode->i_sb);
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_fid *fid = opaque;
+
+ vnode->volume = as->volume;
+ vnode->fid = *fid;
+ inode->i_ino = fid->vnode;
+ inode->i_generation = fid->unique;
+ return 0;
+}
+
+/*
+ * Create an inode for a dynamic root directory or an autocell dynamic
+ * automount dir.
+ */
+struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
+{
+ struct afs_super_info *as = AFS_FS_S(sb);
+ struct afs_vnode *vnode;
+ struct inode *inode;
+ struct afs_fid fid = {};
+
+ _enter("");
+
+ if (as->volume)
+ fid.vid = as->volume->vid;
+ if (root) {
+ fid.vnode = 1;
+ fid.unique = 1;
+ } else {
+ fid.vnode = atomic_inc_return(&afs_autocell_ino);
+ fid.unique = 0;
+ }
+
+ inode = iget5_locked(sb, fid.vnode,
+ afs_iget5_pseudo_test, afs_iget5_pseudo_set, &fid);
+ if (!inode) {
+ _leave(" = -ENOMEM");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ _debug("GOT INODE %p { ino=%lu, vl=%llx, vn=%llx, u=%x }",
+ inode, inode->i_ino, fid.vid, fid.vnode, fid.unique);
+
+ vnode = AFS_FS_I(inode);
+
+ /* there shouldn't be an existing inode */
+ BUG_ON(!(inode->i_state & I_NEW));
+
+ inode->i_size = 0;
+ inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
+ if (root) {
+ inode->i_op = &afs_dynroot_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ } else {
+ inode->i_op = &afs_autocell_inode_operations;
+ }
+ set_nlink(inode, 2);
+ inode->i_uid = GLOBAL_ROOT_UID;
+ inode->i_gid = GLOBAL_ROOT_GID;
+ inode->i_ctime = inode->i_atime = inode->i_mtime = current_time(inode);
+ inode->i_blocks = 0;
+ inode->i_generation = 0;
+
+ set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
+ if (!root) {
+ set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
+ inode->i_flags |= S_AUTOMOUNT;
+ }
+
+ inode->i_flags |= S_NOATIME;
+ unlock_new_inode(inode);
+ _leave(" = %p", inode);
+ return inode;
+}
+
/*
* Probe to see if a cell may exist. This prevents positive dentries from
* being created unnecessarily.
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 8415733f7bc1..506c47471b42 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -69,7 +69,7 @@ static const struct vm_operations_struct afs_vm_ops = {
*/
void afs_put_wb_key(struct afs_wb_key *wbk)
{
- if (refcount_dec_and_test(&wbk->usage)) {
+ if (wbk && refcount_dec_and_test(&wbk->usage)) {
key_put(wbk->key);
kfree(wbk);
}
@@ -220,14 +220,35 @@ static void afs_file_readpage_read_complete(struct page *page,
}
#endif
+static void afs_fetch_data_success(struct afs_operation *op)
+{
+ struct afs_vnode *vnode = op->file[0].vnode;
+
+ _enter("op=%08x", op->debug_id);
+ afs_check_for_remote_deletion(op, vnode);
+ afs_vnode_commit_status(op, &op->file[0]);
+ afs_stat_v(vnode, n_fetches);
+ atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes);
+}
+
+static void afs_fetch_data_put(struct afs_operation *op)
+{
+ afs_put_read(op->fetch.req);
+}
+
+static const struct afs_operation_ops afs_fetch_data_operation = {
+ .issue_afs_rpc = afs_fs_fetch_data,
+ .issue_yfs_rpc = yfs_fs_fetch_data,
+ .success = afs_fetch_data_success,
+ .put = afs_fetch_data_put,
+};
+
/*
* Fetch file data from the volume.
*/
int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *req)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
- int ret;
+ struct afs_operation *op;
_enter("%s{%llx:%llu.%u},%x,,,",
vnode->volume->name,
@@ -236,34 +257,15 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *re
vnode->fid.unique,
key_serial(key));
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
-
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- afs_dataversion_t data_version = vnode->status.data_version;
+ op = afs_alloc_operation(key, vnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_fetch_data(&fc, scb, req);
- }
-
- afs_check_for_remote_deletion(&fc, vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- ret = afs_end_vnode_operation(&fc);
- }
+ afs_op_set_vnode(op, 0, vnode);
- if (ret == 0) {
- afs_stat_v(vnode, n_fetches);
- atomic_long_add(req->actual_len,
- &afs_v2net(vnode)->n_fetch_bytes);
- }
-
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ op->fetch.req = afs_get_read(req);
+ op->ops = &afs_fetch_data_operation;
+ return afs_do_sync_operation(op);
}
/*
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index 0f2a94ba73cb..71eea2a908c7 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -70,7 +70,8 @@ static void afs_schedule_lock_extension(struct afs_vnode *vnode)
*/
void afs_lock_op_done(struct afs_call *call)
{
- struct afs_vnode *vnode = call->lvnode;
+ struct afs_operation *op = call->op;
+ struct afs_vnode *vnode = op->file[0].vnode;
if (call->error == 0) {
spin_lock(&vnode->lock);
@@ -172,15 +173,28 @@ static void afs_kill_lockers_enoent(struct afs_vnode *vnode)
vnode->lock_key = NULL;
}
+static void afs_lock_success(struct afs_operation *op)
+{
+ struct afs_vnode *vnode = op->file[0].vnode;
+
+ _enter("op=%08x", op->debug_id);
+ afs_check_for_remote_deletion(op, vnode);
+ afs_vnode_commit_status(op, &op->file[0]);
+}
+
+static const struct afs_operation_ops afs_set_lock_operation = {
+ .issue_afs_rpc = afs_fs_set_lock,
+ .issue_yfs_rpc = yfs_fs_set_lock,
+ .success = afs_lock_success,
+};
+
/*
* Get a lock on a file
*/
static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
afs_lock_type_t type)
{
- struct afs_status_cb *scb;
- struct afs_fs_cursor fc;
- int ret;
+ struct afs_operation *op;
_enter("%s{%llx:%llu.%u},%x,%u",
vnode->volume->name,
@@ -189,35 +203,29 @@ static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
vnode->fid.unique,
key_serial(key), type);
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
+ op = afs_alloc_operation(key, vnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_set_lock(&fc, type, scb);
- }
-
- afs_check_for_remote_deletion(&fc, vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb);
- ret = afs_end_vnode_operation(&fc);
- }
+ afs_op_set_vnode(op, 0, vnode);
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ op->lock.type = type;
+ op->ops = &afs_set_lock_operation;
+ return afs_do_sync_operation(op);
}
+static const struct afs_operation_ops afs_extend_lock_operation = {
+ .issue_afs_rpc = afs_fs_extend_lock,
+ .issue_yfs_rpc = yfs_fs_extend_lock,
+ .success = afs_lock_success,
+};
+
/*
* Extend a lock on a file
*/
static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
{
- struct afs_status_cb *scb;
- struct afs_fs_cursor fc;
- int ret;
+ struct afs_operation *op;
_enter("%s{%llx:%llu.%u},%x",
vnode->volume->name,
@@ -226,35 +234,29 @@ static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
vnode->fid.unique,
key_serial(key));
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
-
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, false)) {
- while (afs_select_current_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_extend_lock(&fc, scb);
- }
+ op = afs_alloc_operation(key, vnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- afs_check_for_remote_deletion(&fc, vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb);
- ret = afs_end_vnode_operation(&fc);
- }
+ afs_op_set_vnode(op, 0, vnode);
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ op->flags |= AFS_OPERATION_UNINTR;
+ op->ops = &afs_extend_lock_operation;
+ return afs_do_sync_operation(op);
}
+static const struct afs_operation_ops afs_release_lock_operation = {
+ .issue_afs_rpc = afs_fs_release_lock,
+ .issue_yfs_rpc = yfs_fs_release_lock,
+ .success = afs_lock_success,
+};
+
/*
* Release a lock on a file
*/
static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
{
- struct afs_status_cb *scb;
- struct afs_fs_cursor fc;
- int ret;
+ struct afs_operation *op;
_enter("%s{%llx:%llu.%u},%x",
vnode->volume->name,
@@ -263,25 +265,15 @@ static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
vnode->fid.unique,
key_serial(key));
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
+ op = afs_alloc_operation(key, vnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, false)) {
- while (afs_select_current_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_release_lock(&fc, scb);
- }
+ afs_op_set_vnode(op, 0, vnode);
- afs_check_for_remote_deletion(&fc, vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break, NULL, scb);
- ret = afs_end_vnode_operation(&fc);
- }
-
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ op->flags |= AFS_OPERATION_UNINTR;
+ op->ops = &afs_release_lock_operation;
+ return afs_do_sync_operation(op);
}
/*
diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
new file mode 100644
index 000000000000..2d2dff5688a4
--- /dev/null
+++ b/fs/afs/fs_operation.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Fileserver-directed operation handling.
+ *
+ * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include "internal.h"
+
+static atomic_t afs_operation_debug_counter;
+
+/*
+ * Create an operation against a volume.
+ */
+struct afs_operation *afs_alloc_operation(struct key *key, struct afs_volume *volume)
+{
+ struct afs_operation *op;
+
+ _enter("");
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op)
+ return ERR_PTR(-ENOMEM);
+
+ if (!key) {
+ key = afs_request_key(volume->cell);
+ if (IS_ERR(key)) {
+ kfree(op);
+ return ERR_CAST(key);
+ }
+ } else {
+ key_get(key);
+ }
+
+ op->key = key;
+ op->volume = afs_get_volume(volume, afs_volume_trace_get_new_op);
+ op->net = volume->cell->net;
+ op->cb_v_break = volume->cb_v_break;
+ op->debug_id = atomic_inc_return(&afs_operation_debug_counter);
+ op->error = -EDESTADDRREQ;
+ op->ac.error = SHRT_MAX;
+
+ _leave(" = [op=%08x]", op->debug_id);
+ return op;
+}
+
+/*
+ * Lock the vnode(s) being operated upon.
+ */
+static bool afs_get_io_locks(struct afs_operation *op)
+{
+ struct afs_vnode *vnode = op->file[0].vnode;
+ struct afs_vnode *vnode2 = op->file[1].vnode;
+
+ _enter("");
+
+ if (op->flags & AFS_OPERATION_UNINTR) {
+ mutex_lock(&vnode->io_lock);
+ op->flags |= AFS_OPERATION_LOCK_0;
+ _leave(" = t [1]");
+ return true;
+ }
+
+ if (!vnode2 || !op->file[1].need_io_lock || vnode == vnode2)
+ vnode2 = NULL;
+
+ if (vnode2 > vnode)
+ swap(vnode, vnode2);
+
+ if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
+ op->error = -EINTR;
+ op->flags |= AFS_OPERATION_STOP;
+ _leave(" = f [I 0]");
+ return false;
+ }
+ op->flags |= AFS_OPERATION_LOCK_0;
+
+ if (vnode2) {
+ if (mutex_lock_interruptible_nested(&vnode2->io_lock, 1) < 0) {
+ op->error = -EINTR;
+ op->flags |= AFS_OPERATION_STOP;
+ mutex_unlock(&vnode->io_lock);
+ op->flags &= ~AFS_OPERATION_LOCK_0;
+ _leave(" = f [I 1]");
+ return false;
+ }
+ op->flags |= AFS_OPERATION_LOCK_1;
+ }
+
+ _leave(" = t [2]");
+ return true;
+}
+
+static void afs_drop_io_locks(struct afs_operation *op)
+{
+ struct afs_vnode *vnode = op->file[0].vnode;
+ struct afs_vnode *vnode2 = op->file[1].vnode;
+
+ _enter("");
+
+ if (op->flags & AFS_OPERATION_LOCK_1)
+ mutex_unlock(&vnode2->io_lock);
+ if (op->flags & AFS_OPERATION_LOCK_0)
+ mutex_unlock(&vnode->io_lock);
+}
+
+static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *vp,
+ unsigned int index)
+{
+ struct afs_vnode *vnode = vp->vnode;
+
+ if (vnode) {
+ vp->fid = vnode->fid;
+ vp->dv_before = vnode->status.data_version;
+ vp->cb_break_before = afs_calc_vnode_cb_break(vnode);
+ if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
+ op->flags |= AFS_OPERATION_CUR_ONLY;
+ }
+
+ if (vp->fid.vnode)
+ _debug("PREP[%u] {%llx:%llu.%u}",
+ index, vp->fid.vid, vp->fid.vnode, vp->fid.unique);
+}
+
+/*
+ * Begin an operation on the fileserver.
+ *
+ * Fileserver operations are serialised on the server by vnode, so we serialise
+ * them here also using the io_lock.
+ */
+bool afs_begin_vnode_operation(struct afs_operation *op)
+{
+ struct afs_vnode *vnode = op->file[0].vnode;
+
+ ASSERT(vnode);
+
+ _enter("");
+
+ if (op->file[0].need_io_lock)
+ if (!afs_get_io_locks(op))
+ return false;
+
+ afs_prepare_vnode(op, &op->file[0], 0);
+ afs_prepare_vnode(op, &op->file[1], 1);
+ op->cb_v_break = op->volume->cb_v_break;
+ _leave(" = true");
+ return true;
+}
+
+/*
+ * Tidy up a filesystem cursor and unlock the vnode.
+ */
+static void afs_end_vnode_operation(struct afs_operation *op)
+{
+ _enter("");
+
+ if (op->error == -EDESTADDRREQ ||
+ op->error == -EADDRNOTAVAIL ||
+ op->error == -ENETUNREACH ||
+ op->error == -EHOSTUNREACH)
+ afs_dump_edestaddrreq(op);
+
+ afs_drop_io_locks(op);
+
+ if (op->error == -ECONNABORTED)
+ op->error = afs_abort_to_error(op->ac.abort_code);
+}
+
+/*
+ * Wait for an in-progress operation to complete.
+ */
+void afs_wait_for_operation(struct afs_operation *op)
+{
+ _enter("");
+
+ while (afs_select_fileserver(op)) {
+ op->cb_s_break = op->server->cb_s_break;
+ if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags) &&
+ op->ops->issue_yfs_rpc)
+ op->ops->issue_yfs_rpc(op);
+ else
+ op->ops->issue_afs_rpc(op);
+
+ op->error = afs_wait_for_call_to_complete(op->call, &op->ac);
+ }
+
+ if (op->error == 0) {
+ _debug("success");
+ op->ops->success(op);
+ }
+
+ afs_end_vnode_operation(op);
+
+ if (op->error == 0 && op->ops->edit_dir) {
+ _debug("edit_dir");
+ op->ops->edit_dir(op);
+ }
+ _leave("");
+}
+
+/*
+ * Dispose of an operation.
+ */
+int afs_put_operation(struct afs_operation *op)
+{
+ int i, ret = op->error;
+
+ _enter("op=%08x,%d", op->debug_id, ret);
+
+ if (op->ops && op->ops->put)
+ op->ops->put(op);
+ if (op->file[0].put_vnode)
+ iput(&op->file[0].vnode->vfs_inode);
+ if (op->file[1].put_vnode)
+ iput(&op->file[1].vnode->vfs_inode);
+
+ if (op->more_files) {
+ for (i = 0; i < op->nr_files - 2; i++)
+ if (op->more_files[i].put_vnode)
+ iput(&op->more_files[i].vnode->vfs_inode);
+ kfree(op->more_files);
+ }
+
+ afs_end_cursor(&op->ac);
+ afs_put_serverlist(op->net, op->server_list);
+ afs_put_volume(op->net, op->volume, afs_volume_trace_put_put_op);
+ kfree(op);
+ return ret;
+}
+
+int afs_do_sync_operation(struct afs_operation *op)
+{
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
+ return afs_put_operation(op);
+}
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index 37d1bba57b00..b34f74b0f319 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* AFS fileserver probing
*
- * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2018, 2020 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
@@ -11,15 +11,86 @@
#include "internal.h"
#include "protocol_yfs.h"
-static bool afs_fs_probe_done(struct afs_server *server)
+static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ;
+static unsigned int afs_fs_probe_slow_poll_interval = 5 * 60 * HZ;
+
+/*
+ * Start the probe polling timer. We have to supply it with an inc on the
+ * outstanding server count.
+ */
+static void afs_schedule_fs_probe(struct afs_net *net,
+ struct afs_server *server, bool fast)
+{
+ unsigned long atj;
+
+ if (!net->live)
+ return;
+
+ atj = server->probed_at;
+ atj += fast ? afs_fs_probe_fast_poll_interval : afs_fs_probe_slow_poll_interval;
+
+ afs_inc_servers_outstanding(net);
+ if (timer_reduce(&net->fs_probe_timer, atj))
+ afs_dec_servers_outstanding(net);
+}
+
+/*
+ * Handle the completion of a set of probes.
+ */
+static void afs_finished_fs_probe(struct afs_net *net, struct afs_server *server)
+{
+ bool responded = server->probe.responded;
+
+ write_seqlock(&net->fs_lock);
+ if (responded) {
+ list_add_tail(&server->probe_link, &net->fs_probe_slow);
+ } else {
+ server->rtt = UINT_MAX;
+ clear_bit(AFS_SERVER_FL_RESPONDING, &server->flags);
+ list_add_tail(&server->probe_link, &net->fs_probe_fast);
+ }
+ write_sequnlock(&net->fs_lock);
+
+ afs_schedule_fs_probe(net, server, !responded);
+}
+
+/*
+ * Handle the completion of a probe.
+ */
+static void afs_done_one_fs_probe(struct afs_net *net, struct afs_server *server)
+{
+ _enter("");
+
+ if (atomic_dec_and_test(&server->probe_outstanding))
+ afs_finished_fs_probe(net, server);
+
+ wake_up_all(&server->probe_wq);
+}
+
+/*
+ * Handle inability to send a probe due to ENOMEM when trying to allocate a
+ * call struct.
+ */
+static void afs_fs_probe_not_done(struct afs_net *net,
+ struct afs_server *server,
+ struct afs_addr_cursor *ac)
{
- if (!atomic_dec_and_test(&server->probe_outstanding))
- return false;
+ struct afs_addr_list *alist = ac->alist;
+ unsigned int index = ac->index;
+
+ _enter("");
+
+ trace_afs_io_error(0, -ENOMEM, afs_io_error_fs_probe_fail);
+ spin_lock(&server->probe_lock);
- wake_up_var(&server->probe_outstanding);
- clear_bit_unlock(AFS_SERVER_FL_PROBING, &server->flags);
- wake_up_bit(&server->flags, AFS_SERVER_FL_PROBING);
- return true;
+ server->probe.local_failure = true;
+ if (server->probe.error == 0)
+ server->probe.error = -ENOMEM;
+
+ set_bit(index, &alist->failed);
+
+ spin_unlock(&server->probe_lock);
+ return afs_done_one_fs_probe(net, server);
}
/*
@@ -30,10 +101,8 @@ void afs_fileserver_probe_result(struct afs_call *call)
{
struct afs_addr_list *alist = call->alist;
struct afs_server *server = call->server;
- unsigned int server_index = call->server_index;
unsigned int index = call->addr_ix;
unsigned int rtt_us = 0;
- bool have_result = false;
int ret = call->error;
_enter("%pU,%u", &server->uuid, index);
@@ -52,8 +121,9 @@ void afs_fileserver_probe_result(struct afs_call *call)
goto responded;
case -ENOMEM:
case -ENONET:
+ clear_bit(index, &alist->responded);
server->probe.local_failure = true;
- afs_io_error(call, afs_io_error_fs_probe_fail);
+ trace_afs_io_error(call->debug_id, ret, afs_io_error_fs_probe_fail);
goto out;
case -ECONNRESET: /* Responded, but call expired. */
case -ERFKILL:
@@ -72,12 +142,11 @@ void afs_fileserver_probe_result(struct afs_call *call)
server->probe.error == -ETIMEDOUT ||
server->probe.error == -ETIME))
server->probe.error = ret;
- afs_io_error(call, afs_io_error_fs_probe_fail);
+ trace_afs_io_error(call->debug_id, ret, afs_io_error_fs_probe_fail);
goto out;
}
responded:
- set_bit(index, &alist->responded);
clear_bit(index, &alist->failed);
if (call->service_id == YFS_FS_SERVICE) {
@@ -95,39 +164,34 @@ responded:
rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
if (rtt_us < server->probe.rtt) {
server->probe.rtt = rtt_us;
+ server->rtt = rtt_us;
alist->preferred = index;
- have_result = true;
}
smp_wmb(); /* Set rtt before responded. */
server->probe.responded = true;
- set_bit(AFS_SERVER_FL_PROBED, &server->flags);
+ set_bit(index, &alist->responded);
+ set_bit(AFS_SERVER_FL_RESPONDING, &server->flags);
out:
spin_unlock(&server->probe_lock);
- _debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
- server_index, index, &alist->addrs[index].transport, rtt_us, ret);
+ _debug("probe %pU [%u] %pISpc rtt=%u ret=%d",
+ &server->uuid, index, &alist->addrs[index].transport,
+ rtt_us, ret);
- have_result |= afs_fs_probe_done(server);
- if (have_result)
- wake_up_all(&server->probe_wq);
+ return afs_done_one_fs_probe(call->net, server);
}
/*
- * Probe all of a fileserver's addresses to find out the best route and to
- * query its capabilities.
+ * Probe one or all of a fileserver's addresses to find out the best route and
+ * to query its capabilities.
*/
-static int afs_do_probe_fileserver(struct afs_net *net,
- struct afs_server *server,
- struct key *key,
- unsigned int server_index,
- struct afs_error *_e)
+void afs_fs_probe_fileserver(struct afs_net *net, struct afs_server *server,
+ struct key *key, bool all)
{
struct afs_addr_cursor ac = {
.index = 0,
};
- struct afs_call *call;
- bool in_progress = false;
_enter("%pU", &server->uuid);
@@ -137,50 +201,25 @@ static int afs_do_probe_fileserver(struct afs_net *net,
afs_get_addrlist(ac.alist);
read_unlock(&server->fs_lock);
- atomic_set(&server->probe_outstanding, ac.alist->nr_addrs);
+ server->probed_at = jiffies;
+ atomic_set(&server->probe_outstanding, all ? ac.alist->nr_addrs : 1);
memset(&server->probe, 0, sizeof(server->probe));
server->probe.rtt = UINT_MAX;
- for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) {
- call = afs_fs_get_capabilities(net, server, &ac, key, server_index);
- if (!IS_ERR(call)) {
- afs_put_call(call);
- in_progress = true;
- } else {
- afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code);
- }
- }
-
- if (!in_progress)
- afs_fs_probe_done(server);
- afs_put_addrlist(ac.alist);
- return in_progress;
-}
+ ac.index = ac.alist->preferred;
+ if (ac.index < 0 || ac.index >= ac.alist->nr_addrs)
+ all = true;
-/*
- * Send off probes to all unprobed servers.
- */
-int afs_probe_fileservers(struct afs_net *net, struct key *key,
- struct afs_server_list *list)
-{
- struct afs_server *server;
- struct afs_error e;
- bool in_progress = false;
- int i;
-
- e.error = 0;
- e.responded = false;
- for (i = 0; i < list->nr_servers; i++) {
- server = list->servers[i].server;
- if (test_bit(AFS_SERVER_FL_PROBED, &server->flags))
- continue;
-
- if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &server->flags) &&
- afs_do_probe_fileserver(net, server, key, i, &e))
- in_progress = true;
+ if (all) {
+ for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++)
+ if (!afs_fs_get_capabilities(net, server, &ac, key))
+ afs_fs_probe_not_done(net, server, &ac);
+ } else {
+ if (!afs_fs_get_capabilities(net, server, &ac, key))
+ afs_fs_probe_not_done(net, server, &ac);
}
- return in_progress ? 0 : e.error;
+ afs_put_addrlist(ac.alist);
}
/*
@@ -190,7 +229,7 @@ int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried)
{
struct wait_queue_entry *waits;
struct afs_server *server;
- unsigned int rtt = UINT_MAX;
+ unsigned int rtt = UINT_MAX, rtt_s;
bool have_responders = false;
int pref = -1, i;
@@ -200,7 +239,7 @@ int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried)
for (i = 0; i < slist->nr_servers; i++) {
if (test_bit(i, &untried)) {
server = slist->servers[i].server;
- if (!test_bit(AFS_SERVER_FL_PROBING, &server->flags))
+ if (!atomic_read(&server->probe_outstanding))
__clear_bit(i, &untried);
if (server->probe.responded)
have_responders = true;
@@ -230,7 +269,7 @@ int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried)
server = slist->servers[i].server;
if (server->probe.responded)
goto stop;
- if (test_bit(AFS_SERVER_FL_PROBING, &server->flags))
+ if (atomic_read(&server->probe_outstanding))
still_probing = true;
}
}
@@ -246,10 +285,11 @@ stop:
for (i = 0; i < slist->nr_servers; i++) {
if (test_bit(i, &untried)) {
server = slist->servers[i].server;
- if (server->probe.responded &&
- server->probe.rtt < rtt) {
+ rtt_s = READ_ONCE(server->rtt);
+ if (test_bit(AFS_SERVER_FL_RESPONDING, &server->flags) &&
+ rtt_s < rtt) {
pref = i;
- rtt = server->probe.rtt;
+ rtt = rtt_s;
}
remove_wait_queue(&server->probe_wq, &waits[i]);
@@ -265,3 +305,156 @@ stop:
slist->preferred = pref;
return 0;
}
+
+/*
+ * Probe timer. We have an increment on fs_outstanding that we need to pass
+ * along to the work item.
+ */
+void afs_fs_probe_timer(struct timer_list *timer)
+{
+ struct afs_net *net = container_of(timer, struct afs_net, fs_probe_timer);
+
+ if (!queue_work(afs_wq, &net->fs_prober))
+ afs_dec_servers_outstanding(net);
+}
+
+/*
+ * Dispatch a probe to a server.
+ */
+static void afs_dispatch_fs_probe(struct afs_net *net, struct afs_server *server, bool all)
+ __releases(&net->fs_lock)
+{
+ struct key *key = NULL;
+
+ /* We remove it from the queues here - it will be added back to
+ * one of the queues on the completion of the probe.
+ */
+ list_del_init(&server->probe_link);
+
+ afs_get_server(server, afs_server_trace_get_probe);
+ write_sequnlock(&net->fs_lock);
+
+ afs_fs_probe_fileserver(net, server, key, all);
+ afs_put_server(net, server, afs_server_trace_put_probe);
+}
+
+/*
+ * Probe a server immediately without waiting for its due time to come
+ * round. This is used when all of the addresses have been tried.
+ */
+void afs_probe_fileserver(struct afs_net *net, struct afs_server *server)
+{
+ write_seqlock(&net->fs_lock);
+ if (!list_empty(&server->probe_link))
+ return afs_dispatch_fs_probe(net, server, true);
+ write_sequnlock(&net->fs_lock);
+}
+
+/*
+ * Probe dispatcher to regularly dispatch probes to keep NAT alive.
+ */
+void afs_fs_probe_dispatcher(struct work_struct *work)
+{
+ struct afs_net *net = container_of(work, struct afs_net, fs_prober);
+ struct afs_server *fast, *slow, *server;
+ unsigned long nowj, timer_at, poll_at;
+ bool first_pass = true, set_timer = false;
+
+ if (!net->live)
+ return;
+
+ _enter("");
+
+ if (list_empty(&net->fs_probe_fast) && list_empty(&net->fs_probe_slow)) {
+ _leave(" [none]");
+ return;
+ }
+
+again:
+ write_seqlock(&net->fs_lock);
+
+ fast = slow = server = NULL;
+ nowj = jiffies;
+ timer_at = nowj + MAX_JIFFY_OFFSET;
+
+ if (!list_empty(&net->fs_probe_fast)) {
+ fast = list_first_entry(&net->fs_probe_fast, struct afs_server, probe_link);
+ poll_at = fast->probed_at + afs_fs_probe_fast_poll_interval;
+ if (time_before(nowj, poll_at)) {
+ timer_at = poll_at;
+ set_timer = true;
+ fast = NULL;
+ }
+ }
+
+ if (!list_empty(&net->fs_probe_slow)) {
+ slow = list_first_entry(&net->fs_probe_slow, struct afs_server, probe_link);
+ poll_at = slow->probed_at + afs_fs_probe_slow_poll_interval;
+ if (time_before(nowj, poll_at)) {
+ if (time_before(poll_at, timer_at))
+ timer_at = poll_at;
+ set_timer = true;
+ slow = NULL;
+ }
+ }
+
+ server = fast ?: slow;
+ if (server)
+ _debug("probe %pU", &server->uuid);
+
+ if (server && (first_pass || !need_resched())) {
+ afs_dispatch_fs_probe(net, server, server == fast);
+ first_pass = false;
+ goto again;
+ }
+
+ write_sequnlock(&net->fs_lock);
+
+ if (server) {
+ if (!queue_work(afs_wq, &net->fs_prober))
+ afs_dec_servers_outstanding(net);
+ _leave(" [requeue]");
+ } else if (set_timer) {
+ if (timer_reduce(&net->fs_probe_timer, timer_at))
+ afs_dec_servers_outstanding(net);
+ _leave(" [timer]");
+ } else {
+ afs_dec_servers_outstanding(net);
+ _leave(" [quiesce]");
+ }
+}
+
+/*
+ * Wait for a probe on a particular fileserver to complete for 2s.
+ */
+int afs_wait_for_one_fs_probe(struct afs_server *server, bool is_intr)
+{
+ struct wait_queue_entry wait;
+ unsigned long timo = 2 * HZ;
+
+ if (atomic_read(&server->probe_outstanding) == 0)
+ goto dont_wait;
+
+ init_wait_entry(&wait, 0);
+ for (;;) {
+ prepare_to_wait_event(&server->probe_wq, &wait,
+ is_intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ if (timo == 0 ||
+ server->probe.responded ||
+ atomic_read(&server->probe_outstanding) == 0 ||
+ (is_intr && signal_pending(current)))
+ break;
+ timo = schedule_timeout(timo);
+ }
+
+ finish_wait(&server->probe_wq, &wait);
+
+dont_wait:
+ if (server->probe.responded)
+ return 0;
+ if (is_intr && signal_pending(current))
+ return -ERESTARTSYS;
+ if (timo == 0)
+ return -ETIME;
+ return -EDESTADDRREQ;
+}
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index d2b3798c1932..acb4d0ca2649 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -13,12 +13,6 @@
#include "internal.h"
#include "afs_fs.h"
#include "xdr_fs.h"
-#include "protocol_yfs.h"
-
-static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi)
-{
- call->cbi = afs_get_cb_interest(cbi);
-}
/*
* decode an AFSFid block
@@ -56,16 +50,15 @@ static void xdr_dump_bad(const __be32 *bp)
/*
* decode an AFSFetchStatus block
*/
-static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
- struct afs_call *call,
- struct afs_status_cb *scb)
+static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
+ struct afs_call *call,
+ struct afs_status_cb *scb)
{
const struct afs_xdr_AFSFetchStatus *xdr = (const void *)*_bp;
struct afs_file_status *status = &scb->status;
bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus);
u64 data_version, size;
u32 type, abort_code;
- int ret;
abort_code = ntohl(xdr->abort_code);
@@ -79,7 +72,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
*/
status->abort_code = abort_code;
scb->have_error = true;
- goto good;
+ goto advance;
}
pr_warn("Unknown AFSFetchStatus version %u\n", ntohl(xdr->if_version));
@@ -89,7 +82,7 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
if (abort_code != 0 && inline_error) {
status->abort_code = abort_code;
scb->have_error = true;
- goto good;
+ goto advance;
}
type = ntohl(xdr->type);
@@ -125,15 +118,13 @@ static int xdr_decode_AFSFetchStatus(const __be32 **_bp,
data_version |= (u64)ntohl(xdr->data_version_hi) << 32;
status->data_version = data_version;
scb->have_status = true;
-good:
- ret = 0;
advance:
*_bp = (const void *)*_bp + sizeof(*xdr);
- return ret;
+ return;
bad:
xdr_dump_bad(*_bp);
- ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
+ afs_protocol_error(call, afs_eproto_bad_status);
goto advance;
}
@@ -243,8 +234,10 @@ static void xdr_decode_AFSFetchVolumeStatus(const __be32 **_bp,
/*
* deliver reply data to an FS.FetchStatus
*/
-static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call)
+static int afs_deliver_fs_fetch_status(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *vp = &op->file[op->fetch_status.which];
const __be32 *bp;
int ret;
@@ -254,11 +247,9 @@ static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSCallBack(&bp, call, call->out_scb);
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSCallBack(&bp, call, &vp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -267,54 +258,39 @@ static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call)
/*
* FS.FetchStatus operation type
*/
-static const struct afs_call_type afs_RXFSFetchStatus_vnode = {
- .name = "FS.FetchStatus(vnode)",
+static const struct afs_call_type afs_RXFSFetchStatus = {
+ .name = "FS.FetchStatus",
.op = afs_FS_FetchStatus,
- .deliver = afs_deliver_fs_fetch_status_vnode,
+ .deliver = afs_deliver_fs_fetch_status,
.destructor = afs_flat_call_destructor,
};
/*
* fetch the status information for a file
*/
-int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
- struct afs_volsync *volsync)
+void afs_fs_fetch_status(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[op->fetch_status.which];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_fetch_file_status(fc, scb, volsync);
-
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus_vnode,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSFetchStatus,
16, (21 + 3 + 6) * 4);
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = volsync;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSFETCHSTATUS);
- bp[1] = htonl(vnode->fid.vid);
- bp[2] = htonl(vnode->fid.vnode);
- bp[3] = htonl(vnode->fid.unique);
+ bp[1] = htonl(vp->fid.vid);
+ bp[2] = htonl(vp->fid.vnode);
+ bp[3] = htonl(vp->fid.unique);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
-
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -322,7 +298,9 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_status_cb *scb
*/
static int afs_deliver_fs_fetch_data(struct afs_call *call)
{
- struct afs_read *req = call->read_request;
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *vp = &op->file[0];
+ struct afs_read *req = op->fetch.req;
const __be32 *bp;
unsigned int size;
int ret;
@@ -419,14 +397,12 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSCallBack(&bp, call, call->out_scb);
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSCallBack(&bp, call, &vp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
- req->data_version = call->out_scb->status.data_version;
- req->file_size = call->out_scb->status.size;
+ req->data_version = vp->scb.status.data_version;
+ req->file_size = vp->scb.status.size;
call->unmarshall++;
@@ -449,14 +425,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
return 0;
}
-static void afs_fetch_data_destructor(struct afs_call *call)
-{
- struct afs_read *req = call->read_request;
-
- afs_put_read(req);
- afs_flat_call_destructor(call);
-}
-
/*
* FS.FetchData operation type
*/
@@ -464,102 +432,79 @@ static const struct afs_call_type afs_RXFSFetchData = {
.name = "FS.FetchData",
.op = afs_FS_FetchData,
.deliver = afs_deliver_fs_fetch_data,
- .destructor = afs_fetch_data_destructor,
+ .destructor = afs_flat_call_destructor,
};
static const struct afs_call_type afs_RXFSFetchData64 = {
.name = "FS.FetchData64",
.op = afs_FS_FetchData64,
.deliver = afs_deliver_fs_fetch_data,
- .destructor = afs_fetch_data_destructor,
+ .destructor = afs_flat_call_destructor,
};
/*
* fetch data from a very large file
*/
-static int afs_fs_fetch_data64(struct afs_fs_cursor *fc,
- struct afs_status_cb *scb,
- struct afs_read *req)
+static void afs_fs_fetch_data64(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
+ struct afs_read *req = op->fetch.req;
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter("");
- call = afs_alloc_flat_call(net, &afs_RXFSFetchData64, 32, (21 + 3 + 6) * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSFetchData64, 32, (21 + 3 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = NULL;
- call->read_request = afs_get_read(req);
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSFETCHDATA64);
- bp[1] = htonl(vnode->fid.vid);
- bp[2] = htonl(vnode->fid.vnode);
- bp[3] = htonl(vnode->fid.unique);
+ bp[1] = htonl(vp->fid.vid);
+ bp[2] = htonl(vp->fid.vnode);
+ bp[3] = htonl(vp->fid.unique);
bp[4] = htonl(upper_32_bits(req->pos));
bp[5] = htonl(lower_32_bits(req->pos));
bp[6] = 0;
bp[7] = htonl(lower_32_bits(req->len));
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* fetch data from a file
*/
-int afs_fs_fetch_data(struct afs_fs_cursor *fc,
- struct afs_status_cb *scb,
- struct afs_read *req)
+void afs_fs_fetch_data(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct afs_read *req = op->fetch.req;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_fetch_data(fc, scb, req);
-
if (upper_32_bits(req->pos) ||
upper_32_bits(req->len) ||
upper_32_bits(req->pos + req->len))
- return afs_fs_fetch_data64(fc, scb, req);
+ return afs_fs_fetch_data64(op);
_enter("");
- call = afs_alloc_flat_call(net, &afs_RXFSFetchData, 24, (21 + 3 + 6) * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSFetchData, 24, (21 + 3 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = NULL;
- call->read_request = afs_get_read(req);
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSFETCHDATA);
- bp[1] = htonl(vnode->fid.vid);
- bp[2] = htonl(vnode->fid.vnode);
- bp[3] = htonl(vnode->fid.unique);
+ bp[1] = htonl(vp->fid.vid);
+ bp[2] = htonl(vp->fid.vnode);
+ bp[3] = htonl(vp->fid.unique);
bp[4] = htonl(lower_32_bits(req->pos));
bp[5] = htonl(lower_32_bits(req->len));
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -567,6 +512,9 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc,
*/
static int afs_deliver_fs_create_vnode(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
const __be32 *bp;
int ret;
@@ -576,15 +524,11 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_AFSFid(&bp, call->out_fid);
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSCallBack(&bp, call, call->out_scb);
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFid(&bp, &op->file[1].fid);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSFetchStatus(&bp, call, &dvp->scb);
+ xdr_decode_AFSCallBack(&bp, call, &vp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -600,6 +544,52 @@ static const struct afs_call_type afs_RXFSCreateFile = {
.destructor = afs_flat_call_destructor,
};
+/*
+ * Create a file.
+ */
+void afs_fs_create_file(struct afs_operation *op)
+{
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_call *call;
+ size_t namesz, reqsz, padsz;
+ __be32 *bp;
+
+ _enter("");
+
+ namesz = name->len;
+ padsz = (4 - (namesz & 3)) & 3;
+ reqsz = (5 * 4) + namesz + padsz + (6 * 4);
+
+ call = afs_alloc_flat_call(op->net, &afs_RXFSCreateFile,
+ reqsz, (3 + 21 + 21 + 3 + 6) * 4);
+ if (!call)
+ return afs_op_nomem(op);
+
+ /* marshall the parameters */
+ bp = call->request;
+ *bp++ = htonl(FSCREATEFILE);
+ *bp++ = htonl(dvp->fid.vid);
+ *bp++ = htonl(dvp->fid.vnode);
+ *bp++ = htonl(dvp->fid.unique);
+ *bp++ = htonl(namesz);
+ memcpy(bp, name->name, namesz);
+ bp = (void *) bp + namesz;
+ if (padsz > 0) {
+ memset(bp, 0, padsz);
+ bp = (void *) bp + padsz;
+ }
+ *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
+ *bp++ = htonl(op->mtime.tv_sec); /* mtime */
+ *bp++ = 0; /* owner */
+ *bp++ = 0; /* group */
+ *bp++ = htonl(op->create.mode & S_IALLUGO); /* unix mode */
+ *bp++ = 0; /* segment size */
+
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
+}
+
static const struct afs_call_type afs_RXFSMakeDir = {
.name = "FS.MakeDir",
.op = afs_FS_MakeDir,
@@ -608,80 +598,58 @@ static const struct afs_call_type afs_RXFSMakeDir = {
};
/*
- * create a file or make a directory
+ * Create a new directory
*/
-int afs_fs_create(struct afs_fs_cursor *fc,
- const char *name,
- umode_t mode,
- struct afs_status_cb *dvnode_scb,
- struct afs_fid *newfid,
- struct afs_status_cb *new_scb)
+void afs_fs_make_dir(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
size_t namesz, reqsz, padsz;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)){
- if (S_ISDIR(mode))
- return yfs_fs_make_dir(fc, name, mode, dvnode_scb,
- newfid, new_scb);
- else
- return yfs_fs_create_file(fc, name, mode, dvnode_scb,
- newfid, new_scb);
- }
-
_enter("");
- namesz = strlen(name);
+ namesz = name->len;
padsz = (4 - (namesz & 3)) & 3;
reqsz = (5 * 4) + namesz + padsz + (6 * 4);
- call = afs_alloc_flat_call(
- net, S_ISDIR(mode) ? &afs_RXFSMakeDir : &afs_RXFSCreateFile,
- reqsz, (3 + 21 + 21 + 3 + 6) * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSMakeDir,
+ reqsz, (3 + 21 + 21 + 3 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_fid = newfid;
- call->out_scb = new_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
- *bp++ = htonl(S_ISDIR(mode) ? FSMAKEDIR : FSCREATEFILE);
- *bp++ = htonl(dvnode->fid.vid);
- *bp++ = htonl(dvnode->fid.vnode);
- *bp++ = htonl(dvnode->fid.unique);
+ *bp++ = htonl(FSMAKEDIR);
+ *bp++ = htonl(dvp->fid.vid);
+ *bp++ = htonl(dvp->fid.vnode);
+ *bp++ = htonl(dvp->fid.unique);
*bp++ = htonl(namesz);
- memcpy(bp, name, namesz);
+ memcpy(bp, name->name, namesz);
bp = (void *) bp + namesz;
if (padsz > 0) {
memset(bp, 0, padsz);
bp = (void *) bp + padsz;
}
*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
- *bp++ = htonl(dvnode->vfs_inode.i_mtime.tv_sec); /* mtime */
+ *bp++ = htonl(op->mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
- *bp++ = htonl(mode & S_IALLUGO); /* unix mode */
+ *bp++ = htonl(op->create.mode & S_IALLUGO); /* unix mode */
*bp++ = 0; /* segment size */
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
- * Deliver reply data to any operation that returns directory status and volume
- * sync.
+ * Deliver reply data to any operation that returns status and volume sync.
*/
-static int afs_deliver_fs_dir_status_and_vol(struct afs_call *call)
+static int afs_deliver_fs_file_status_and_vol(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *vp = &op->file[0];
const __be32 *bp;
int ret;
@@ -691,81 +659,108 @@ static int afs_deliver_fs_dir_status_and_vol(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
}
/*
- * FS.RemoveDir/FS.RemoveFile operation type
+ * FS.RemoveFile operation type
*/
static const struct afs_call_type afs_RXFSRemoveFile = {
.name = "FS.RemoveFile",
.op = afs_FS_RemoveFile,
- .deliver = afs_deliver_fs_dir_status_and_vol,
+ .deliver = afs_deliver_fs_file_status_and_vol,
.destructor = afs_flat_call_destructor,
};
+/*
+ * Remove a file.
+ */
+void afs_fs_remove_file(struct afs_operation *op)
+{
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_call *call;
+ size_t namesz, reqsz, padsz;
+ __be32 *bp;
+
+ _enter("");
+
+ namesz = name->len;
+ padsz = (4 - (namesz & 3)) & 3;
+ reqsz = (5 * 4) + namesz + padsz;
+
+ call = afs_alloc_flat_call(op->net, &afs_RXFSRemoveFile,
+ reqsz, (21 + 6) * 4);
+ if (!call)
+ return afs_op_nomem(op);
+
+ /* marshall the parameters */
+ bp = call->request;
+ *bp++ = htonl(FSREMOVEFILE);
+ *bp++ = htonl(dvp->fid.vid);
+ *bp++ = htonl(dvp->fid.vnode);
+ *bp++ = htonl(dvp->fid.unique);
+ *bp++ = htonl(namesz);
+ memcpy(bp, name->name, namesz);
+ bp = (void *) bp + namesz;
+ if (padsz > 0) {
+ memset(bp, 0, padsz);
+ bp = (void *) bp + padsz;
+ }
+
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
+}
+
static const struct afs_call_type afs_RXFSRemoveDir = {
.name = "FS.RemoveDir",
.op = afs_FS_RemoveDir,
- .deliver = afs_deliver_fs_dir_status_and_vol,
+ .deliver = afs_deliver_fs_file_status_and_vol,
.destructor = afs_flat_call_destructor,
};
/*
- * remove a file or directory
+ * Remove a directory.
*/
-int afs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name, bool isdir, struct afs_status_cb *dvnode_scb)
+void afs_fs_remove_dir(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
size_t namesz, reqsz, padsz;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_remove(fc, vnode, name, isdir, dvnode_scb);
-
_enter("");
- namesz = strlen(name);
+ namesz = name->len;
padsz = (4 - (namesz & 3)) & 3;
reqsz = (5 * 4) + namesz + padsz;
- call = afs_alloc_flat_call(
- net, isdir ? &afs_RXFSRemoveDir : &afs_RXFSRemoveFile,
- reqsz, (21 + 6) * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSRemoveDir,
+ reqsz, (21 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
- *bp++ = htonl(isdir ? FSREMOVEDIR : FSREMOVEFILE);
- *bp++ = htonl(dvnode->fid.vid);
- *bp++ = htonl(dvnode->fid.vnode);
- *bp++ = htonl(dvnode->fid.unique);
+ *bp++ = htonl(FSREMOVEDIR);
+ *bp++ = htonl(dvp->fid.vid);
+ *bp++ = htonl(dvp->fid.vnode);
+ *bp++ = htonl(dvp->fid.unique);
*bp++ = htonl(namesz);
- memcpy(bp, name, namesz);
+ memcpy(bp, name->name, namesz);
bp = (void *) bp + namesz;
if (padsz > 0) {
memset(bp, 0, padsz);
bp = (void *) bp + padsz;
}
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -773,6 +768,9 @@ int afs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int afs_deliver_fs_link(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
const __be32 *bp;
int ret;
@@ -784,13 +782,9 @@ static int afs_deliver_fs_link(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSFetchStatus(&bp, call, &dvp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -809,56 +803,44 @@ static const struct afs_call_type afs_RXFSLink = {
/*
* make a hard link
*/
-int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name,
- struct afs_status_cb *dvnode_scb,
- struct afs_status_cb *vnode_scb)
+void afs_fs_link(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
size_t namesz, reqsz, padsz;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_link(fc, vnode, name, dvnode_scb, vnode_scb);
-
_enter("");
- namesz = strlen(name);
+ namesz = name->len;
padsz = (4 - (namesz & 3)) & 3;
reqsz = (5 * 4) + namesz + padsz + (3 * 4);
- call = afs_alloc_flat_call(net, &afs_RXFSLink, reqsz, (21 + 21 + 6) * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSLink, reqsz, (21 + 21 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_scb = vnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSLINK);
- *bp++ = htonl(dvnode->fid.vid);
- *bp++ = htonl(dvnode->fid.vnode);
- *bp++ = htonl(dvnode->fid.unique);
+ *bp++ = htonl(dvp->fid.vid);
+ *bp++ = htonl(dvp->fid.vnode);
+ *bp++ = htonl(dvp->fid.unique);
*bp++ = htonl(namesz);
- memcpy(bp, name, namesz);
+ memcpy(bp, name->name, namesz);
bp = (void *) bp + namesz;
if (padsz > 0) {
memset(bp, 0, padsz);
bp = (void *) bp + padsz;
}
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &vnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
+
+ trace_afs_make_fs_call1(call, &vp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -866,6 +848,9 @@ int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int afs_deliver_fs_symlink(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
const __be32 *bp;
int ret;
@@ -877,14 +862,10 @@ static int afs_deliver_fs_symlink(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_AFSFid(&bp, call->out_fid);
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFid(&bp, &vp->fid);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSFetchStatus(&bp, call, &dvp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -903,75 +884,58 @@ static const struct afs_call_type afs_RXFSSymlink = {
/*
* create a symbolic link
*/
-int afs_fs_symlink(struct afs_fs_cursor *fc,
- const char *name,
- const char *contents,
- struct afs_status_cb *dvnode_scb,
- struct afs_fid *newfid,
- struct afs_status_cb *new_scb)
+void afs_fs_symlink(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
size_t namesz, reqsz, padsz, c_namesz, c_padsz;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_symlink(fc, name, contents, dvnode_scb,
- newfid, new_scb);
-
_enter("");
- namesz = strlen(name);
+ namesz = name->len;
padsz = (4 - (namesz & 3)) & 3;
- c_namesz = strlen(contents);
+ c_namesz = strlen(op->create.symlink);
c_padsz = (4 - (c_namesz & 3)) & 3;
reqsz = (6 * 4) + namesz + padsz + c_namesz + c_padsz + (6 * 4);
- call = afs_alloc_flat_call(net, &afs_RXFSSymlink, reqsz,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSSymlink, reqsz,
(3 + 21 + 21 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_fid = newfid;
- call->out_scb = new_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSYMLINK);
- *bp++ = htonl(dvnode->fid.vid);
- *bp++ = htonl(dvnode->fid.vnode);
- *bp++ = htonl(dvnode->fid.unique);
+ *bp++ = htonl(dvp->fid.vid);
+ *bp++ = htonl(dvp->fid.vnode);
+ *bp++ = htonl(dvp->fid.unique);
*bp++ = htonl(namesz);
- memcpy(bp, name, namesz);
+ memcpy(bp, name->name, namesz);
bp = (void *) bp + namesz;
if (padsz > 0) {
memset(bp, 0, padsz);
bp = (void *) bp + padsz;
}
*bp++ = htonl(c_namesz);
- memcpy(bp, contents, c_namesz);
+ memcpy(bp, op->create.symlink, c_namesz);
bp = (void *) bp + c_namesz;
if (c_padsz > 0) {
memset(bp, 0, c_padsz);
bp = (void *) bp + c_padsz;
}
*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
- *bp++ = htonl(dvnode->vfs_inode.i_mtime.tv_sec); /* mtime */
+ *bp++ = htonl(op->mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = htonl(S_IRWXUGO); /* unix mode */
*bp++ = 0; /* segment size */
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -979,6 +943,9 @@ int afs_fs_symlink(struct afs_fs_cursor *fc,
*/
static int afs_deliver_fs_rename(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
const __be32 *bp;
int ret;
@@ -986,17 +953,13 @@ static int afs_deliver_fs_rename(struct afs_call *call)
if (ret < 0)
return ret;
+ bp = call->buffer;
/* If the two dirs are the same, we have two copies of the same status
* report, so we just decode it twice.
*/
- bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFetchStatus(&bp, call, &orig_dvp->scb);
+ xdr_decode_AFSFetchStatus(&bp, call, &new_dvp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -1015,31 +978,22 @@ static const struct afs_call_type afs_RXFSRename = {
/*
* Rename/move a file or directory.
*/
-int afs_fs_rename(struct afs_fs_cursor *fc,
- const char *orig_name,
- struct afs_vnode *new_dvnode,
- const char *new_name,
- struct afs_status_cb *orig_dvnode_scb,
- struct afs_status_cb *new_dvnode_scb)
+void afs_fs_rename(struct afs_operation *op)
{
- struct afs_vnode *orig_dvnode = fc->vnode;
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ const struct qstr *orig_name = &op->dentry->d_name;
+ const struct qstr *new_name = &op->dentry_2->d_name;
struct afs_call *call;
- struct afs_net *net = afs_v2net(orig_dvnode);
size_t reqsz, o_namesz, o_padsz, n_namesz, n_padsz;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_rename(fc, orig_name,
- new_dvnode, new_name,
- orig_dvnode_scb,
- new_dvnode_scb);
-
_enter("");
- o_namesz = strlen(orig_name);
+ o_namesz = orig_name->len;
o_padsz = (4 - (o_namesz & 3)) & 3;
- n_namesz = strlen(new_name);
+ n_namesz = new_name->len;
n_padsz = (4 - (n_namesz & 3)) & 3;
reqsz = (4 * 4) +
@@ -1047,51 +1001,46 @@ int afs_fs_rename(struct afs_fs_cursor *fc,
(3 * 4) +
4 + n_namesz + n_padsz;
- call = afs_alloc_flat_call(net, &afs_RXFSRename, reqsz, (21 + 21 + 6) * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSRename, reqsz, (21 + 21 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = orig_dvnode_scb;
- call->out_scb = new_dvnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSRENAME);
- *bp++ = htonl(orig_dvnode->fid.vid);
- *bp++ = htonl(orig_dvnode->fid.vnode);
- *bp++ = htonl(orig_dvnode->fid.unique);
+ *bp++ = htonl(orig_dvp->fid.vid);
+ *bp++ = htonl(orig_dvp->fid.vnode);
+ *bp++ = htonl(orig_dvp->fid.unique);
*bp++ = htonl(o_namesz);
- memcpy(bp, orig_name, o_namesz);
+ memcpy(bp, orig_name->name, o_namesz);
bp = (void *) bp + o_namesz;
if (o_padsz > 0) {
memset(bp, 0, o_padsz);
bp = (void *) bp + o_padsz;
}
- *bp++ = htonl(new_dvnode->fid.vid);
- *bp++ = htonl(new_dvnode->fid.vnode);
- *bp++ = htonl(new_dvnode->fid.unique);
+ *bp++ = htonl(new_dvp->fid.vid);
+ *bp++ = htonl(new_dvp->fid.vnode);
+ *bp++ = htonl(new_dvp->fid.unique);
*bp++ = htonl(n_namesz);
- memcpy(bp, new_name, n_namesz);
+ memcpy(bp, new_name->name, n_namesz);
bp = (void *) bp + n_namesz;
if (n_padsz > 0) {
memset(bp, 0, n_padsz);
bp = (void *) bp + n_padsz;
}
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call2(call, &orig_dvnode->fid, orig_name, new_name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
- * deliver reply data to an FS.StoreData
+ * Deliver reply data to FS.StoreData or FS.StoreStatus
*/
static int afs_deliver_fs_store_data(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *vp = &op->file[0];
const __be32 *bp;
int ret;
@@ -1103,10 +1052,8 @@ static int afs_deliver_fs_store_data(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -1132,90 +1079,69 @@ static const struct afs_call_type afs_RXFSStoreData64 = {
/*
* store a set of pages to a very large file
*/
-static int afs_fs_store_data64(struct afs_fs_cursor *fc,
- struct address_space *mapping,
- pgoff_t first, pgoff_t last,
- unsigned offset, unsigned to,
- loff_t size, loff_t pos, loff_t i_size,
- struct afs_status_cb *scb)
+static void afs_fs_store_data64(struct afs_operation *op,
+ loff_t pos, loff_t size, loff_t i_size)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &afs_RXFSStoreData64,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData64,
(4 + 6 + 3 * 2) * 4,
(21 + 6) * 4);
if (!call)
- return -ENOMEM;
+ return afs_op_nomem(op);
- call->key = fc->key;
- call->mapping = mapping;
- call->first = first;
- call->last = last;
- call->first_offset = offset;
- call->last_to = to;
call->send_pages = true;
- call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSTOREDATA64);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
*bp++ = htonl(AFS_SET_MTIME); /* mask */
- *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
+ *bp++ = htonl(op->mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = 0; /* unix mode */
*bp++ = 0; /* segment size */
- *bp++ = htonl(pos >> 32);
- *bp++ = htonl((u32) pos);
- *bp++ = htonl(size >> 32);
- *bp++ = htonl((u32) size);
- *bp++ = htonl(i_size >> 32);
- *bp++ = htonl((u32) i_size);
-
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ *bp++ = htonl(upper_32_bits(pos));
+ *bp++ = htonl(lower_32_bits(pos));
+ *bp++ = htonl(upper_32_bits(size));
+ *bp++ = htonl(lower_32_bits(size));
+ *bp++ = htonl(upper_32_bits(i_size));
+ *bp++ = htonl(lower_32_bits(i_size));
+
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* store a set of pages
*/
-int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
- pgoff_t first, pgoff_t last,
- unsigned offset, unsigned to,
- struct afs_status_cb *scb)
+void afs_fs_store_data(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
loff_t size, pos, i_size;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_store_data(fc, mapping, first, last, offset, to, scb);
-
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- size = (loff_t)to - (loff_t)offset;
- if (first != last)
- size += (loff_t)(last - first) << PAGE_SHIFT;
- pos = (loff_t)first << PAGE_SHIFT;
- pos += offset;
+ size = (loff_t)op->store.last_to - (loff_t)op->store.first_offset;
+ if (op->store.first != op->store.last)
+ size += (loff_t)(op->store.last - op->store.first) << PAGE_SHIFT;
+ pos = (loff_t)op->store.first << PAGE_SHIFT;
+ pos += op->store.first_offset;
- i_size = i_size_read(&vnode->vfs_inode);
+ i_size = i_size_read(&vp->vnode->vfs_inode);
if (pos + size > i_size)
i_size = size + pos;
@@ -1223,73 +1149,38 @@ int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
(unsigned long long) size, (unsigned long long) pos,
(unsigned long long) i_size);
- if (pos >> 32 || i_size >> 32 || size >> 32 || (pos + size) >> 32)
- return afs_fs_store_data64(fc, mapping, first, last, offset, to,
- size, pos, i_size, scb);
+ if (upper_32_bits(pos) || upper_32_bits(i_size) || upper_32_bits(size) ||
+ upper_32_bits(pos + size))
+ return afs_fs_store_data64(op, pos, size, i_size);
- call = afs_alloc_flat_call(net, &afs_RXFSStoreData,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData,
(4 + 6 + 3) * 4,
(21 + 6) * 4);
if (!call)
- return -ENOMEM;
+ return afs_op_nomem(op);
- call->key = fc->key;
- call->mapping = mapping;
- call->first = first;
- call->last = last;
- call->first_offset = offset;
- call->last_to = to;
call->send_pages = true;
- call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSTOREDATA);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
*bp++ = htonl(AFS_SET_MTIME); /* mask */
- *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
+ *bp++ = htonl(op->mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = 0; /* unix mode */
*bp++ = 0; /* segment size */
- *bp++ = htonl(pos);
- *bp++ = htonl(size);
- *bp++ = htonl(i_size);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
-}
-
-/*
- * deliver reply data to an FS.StoreStatus
- */
-static int afs_deliver_fs_store_status(struct afs_call *call)
-{
- const __be32 *bp;
- int ret;
-
- _enter("");
+ *bp++ = htonl(lower_32_bits(pos));
+ *bp++ = htonl(lower_32_bits(size));
+ *bp++ = htonl(lower_32_bits(i_size));
- ret = afs_transfer_reply(call);
- if (ret < 0)
- return ret;
-
- /* unmarshall the reply once we've received all of it */
- bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
-
- _leave(" = 0 [done]");
- return 0;
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1298,21 +1189,21 @@ static int afs_deliver_fs_store_status(struct afs_call *call)
static const struct afs_call_type afs_RXFSStoreStatus = {
.name = "FS.StoreStatus",
.op = afs_FS_StoreStatus,
- .deliver = afs_deliver_fs_store_status,
+ .deliver = afs_deliver_fs_store_data,
.destructor = afs_flat_call_destructor,
};
static const struct afs_call_type afs_RXFSStoreData_as_Status = {
.name = "FS.StoreData",
.op = afs_FS_StoreData,
- .deliver = afs_deliver_fs_store_status,
+ .deliver = afs_deliver_fs_store_data,
.destructor = afs_flat_call_destructor,
};
static const struct afs_call_type afs_RXFSStoreData64_as_Status = {
.name = "FS.StoreData64",
.op = afs_FS_StoreData64,
- .deliver = afs_deliver_fs_store_status,
+ .deliver = afs_deliver_fs_store_data,
.destructor = afs_flat_call_destructor,
};
@@ -1320,85 +1211,74 @@ static const struct afs_call_type afs_RXFSStoreData64_as_Status = {
* set the attributes on a very large file, using FS.StoreData rather than
* FS.StoreStatus so as to alter the file size also
*/
-static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr,
- struct afs_status_cb *scb)
+static void afs_fs_setattr_size64(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct iattr *attr = op->setattr.attr;
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
ASSERT(attr->ia_valid & ATTR_SIZE);
- call = afs_alloc_flat_call(net, &afs_RXFSStoreData64_as_Status,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData64_as_Status,
(4 + 6 + 3 * 2) * 4,
(21 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSTOREDATA64);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
xdr_encode_AFS_StoreStatus(&bp, attr);
- *bp++ = htonl(attr->ia_size >> 32); /* position of start of write */
- *bp++ = htonl((u32) attr->ia_size);
- *bp++ = 0; /* size of write */
+ *bp++ = htonl(upper_32_bits(attr->ia_size)); /* position of start of write */
+ *bp++ = htonl(lower_32_bits(attr->ia_size));
+ *bp++ = 0; /* size of write */
*bp++ = 0;
- *bp++ = htonl(attr->ia_size >> 32); /* new file length */
- *bp++ = htonl((u32) attr->ia_size);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ *bp++ = htonl(upper_32_bits(attr->ia_size)); /* new file length */
+ *bp++ = htonl(lower_32_bits(attr->ia_size));
+
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* set the attributes on a file, using FS.StoreData rather than FS.StoreStatus
* so as to alter the file size also
*/
-static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr,
- struct afs_status_cb *scb)
+static void afs_fs_setattr_size(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct iattr *attr = op->setattr.attr;
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
ASSERT(attr->ia_valid & ATTR_SIZE);
- if (attr->ia_size >> 32)
- return afs_fs_setattr_size64(fc, attr, scb);
+ if (upper_32_bits(attr->ia_size))
+ return afs_fs_setattr_size64(op);
- call = afs_alloc_flat_call(net, &afs_RXFSStoreData_as_Status,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData_as_Status,
(4 + 6 + 3) * 4,
(21 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSTOREDATA);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
xdr_encode_AFS_StoreStatus(&bp, attr);
@@ -1406,57 +1286,44 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr,
*bp++ = 0; /* size of write */
*bp++ = htonl(attr->ia_size); /* new file length */
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* set the attributes on a file, using FS.StoreData if there's a change in file
* size, and FS.StoreStatus otherwise
*/
-int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr,
- struct afs_status_cb *scb)
+void afs_fs_setattr(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct iattr *attr = op->setattr.attr;
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_setattr(fc, attr, scb);
-
if (attr->ia_valid & ATTR_SIZE)
- return afs_fs_setattr_size(fc, attr, scb);
+ return afs_fs_setattr_size(op);
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &afs_RXFSStoreStatus,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSStoreStatus,
(4 + 6) * 4,
(21 + 6) * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSTORESTATUS);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
- xdr_encode_AFS_StoreStatus(&bp, attr);
+ xdr_encode_AFS_StoreStatus(&bp, op->setattr.attr);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1464,6 +1331,7 @@ int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr,
*/
static int afs_deliver_fs_get_volume_status(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
const __be32 *bp;
char *p;
u32 size;
@@ -1485,7 +1353,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
return ret;
bp = call->buffer;
- xdr_decode_AFSFetchVolumeStatus(&bp, call->out_volstatus);
+ xdr_decode_AFSFetchVolumeStatus(&bp, &op->volstatus.vs);
call->unmarshall++;
afs_extract_to_tmp(call);
/* Fall through */
@@ -1499,8 +1367,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("volname length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_volname_len);
+ return afs_protocol_error(call, afs_eproto_volname_len);
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
@@ -1529,8 +1396,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("offline msg length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_offline_msg_len);
+ return afs_protocol_error(call, afs_eproto_offline_msg_len);
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
@@ -1560,8 +1426,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("motd length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_motd_len);
+ return afs_protocol_error(call, afs_eproto_motd_len);
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
@@ -1601,37 +1466,26 @@ static const struct afs_call_type afs_RXFSGetVolumeStatus = {
/*
* fetch the status of a volume
*/
-int afs_fs_get_volume_status(struct afs_fs_cursor *fc,
- struct afs_volume_status *vs)
+void afs_fs_get_volume_status(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_get_volume_status(fc, vs);
-
_enter("");
- call = afs_alloc_flat_call(net, &afs_RXFSGetVolumeStatus, 2 * 4,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSGetVolumeStatus, 2 * 4,
max(12 * 4, AFSOPAQUEMAX + 1));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_volstatus = vs;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSGETVOLUMESTATUS);
- bp[1] = htonl(vnode->fid.vid);
+ bp[1] = htonl(vp->fid.vid);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1639,6 +1493,7 @@ int afs_fs_get_volume_status(struct afs_fs_cursor *fc,
*/
static int afs_deliver_fs_xxxx_lock(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
const __be32 *bp;
int ret;
@@ -1650,7 +1505,7 @@ static int afs_deliver_fs_xxxx_lock(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -1691,114 +1546,80 @@ static const struct afs_call_type afs_RXFSReleaseLock = {
/*
* Set a lock on a file
*/
-int afs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type,
- struct afs_status_cb *scb)
+void afs_fs_set_lock(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_set_lock(fc, type, scb);
-
_enter("");
- call = afs_alloc_flat_call(net, &afs_RXFSSetLock, 5 * 4, 6 * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSSetLock, 5 * 4, 6 * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->lvnode = vnode;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSSETLOCK);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
- *bp++ = htonl(type);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_calli(call, &vnode->fid, type);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
+ *bp++ = htonl(op->lock.type);
+
+ trace_afs_make_fs_calli(call, &vp->fid, op->lock.type);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* extend a lock on a file
*/
-int afs_fs_extend_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb)
+void afs_fs_extend_lock(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_extend_lock(fc, scb);
-
_enter("");
- call = afs_alloc_flat_call(net, &afs_RXFSExtendLock, 4 * 4, 6 * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSExtendLock, 4 * 4, 6 * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->lvnode = vnode;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSEXTENDLOCK);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
+
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* release a lock on a file
*/
-int afs_fs_release_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb)
+void afs_fs_release_lock(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_release_lock(fc, scb);
-
_enter("");
- call = afs_alloc_flat_call(net, &afs_RXFSReleaseLock, 4 * 4, 6 * 4);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSReleaseLock, 4 * 4, 6 * 4);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->lvnode = vnode;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSRELEASELOCK);
- *bp++ = htonl(vnode->fid.vid);
- *bp++ = htonl(vnode->fid.vnode);
- *bp++ = htonl(vnode->fid.unique);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
+
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1842,7 +1663,7 @@ int afs_fs_give_up_all_callbacks(struct afs_net *net,
bp = call->request;
*bp++ = htonl(FSGIVEUPALLCALLBACKS);
- /* Can't take a ref on server */
+ call->server = afs_use_server(server, afs_server_trace_give_up_cb);
afs_make_call(ac, call, GFP_NOFS);
return afs_wait_for_call_to_complete(call, ac);
}
@@ -1905,14 +1726,13 @@ static const struct afs_call_type afs_RXFSGetCapabilities = {
};
/*
- * Probe a fileserver for the capabilities that it supports. This can
- * return up to 196 words.
+ * Probe a fileserver for the capabilities that it supports. This RPC can
+ * reply with up to 196 words. The operation is asynchronous and if we managed
+ * to allocate a call, true is returned the result is delivered through the
+ * ->done() - otherwise we return false to indicate we didn't even try.
*/
-struct afs_call *afs_fs_get_capabilities(struct afs_net *net,
- struct afs_server *server,
- struct afs_addr_cursor *ac,
- struct key *key,
- unsigned int server_index)
+bool afs_fs_get_capabilities(struct afs_net *net, struct afs_server *server,
+ struct afs_addr_cursor *ac, struct key *key)
{
struct afs_call *call;
__be32 *bp;
@@ -1921,11 +1741,10 @@ struct afs_call *afs_fs_get_capabilities(struct afs_net *net,
call = afs_alloc_flat_call(net, &afs_RXFSGetCapabilities, 1 * 4, 16 * 4);
if (!call)
- return ERR_PTR(-ENOMEM);
+ return false;
call->key = key;
- call->server = afs_get_server(server, afs_server_trace_get_caps);
- call->server_index = server_index;
+ call->server = afs_use_server(server, afs_server_trace_get_caps);
call->upgrade = true;
call->async = true;
call->max_lifespan = AFS_PROBE_MAX_LIFESPAN;
@@ -1934,87 +1753,10 @@ struct afs_call *afs_fs_get_capabilities(struct afs_net *net,
bp = call->request;
*bp++ = htonl(FSGETCAPABILITIES);
- /* Can't take a ref on server */
trace_afs_make_fs_call(call, NULL);
afs_make_call(ac, call, GFP_NOFS);
- return call;
-}
-
-/*
- * Deliver reply data to an FS.FetchStatus with no vnode.
- */
-static int afs_deliver_fs_fetch_status(struct afs_call *call)
-{
- const __be32 *bp;
- int ret;
-
- ret = afs_transfer_reply(call);
- if (ret < 0)
- return ret;
-
- /* unmarshall the reply once we've received all of it */
- bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSCallBack(&bp, call, call->out_scb);
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
-
- _leave(" = 0 [done]");
- return 0;
-}
-
-/*
- * FS.FetchStatus operation type
- */
-static const struct afs_call_type afs_RXFSFetchStatus = {
- .name = "FS.FetchStatus",
- .op = afs_FS_FetchStatus,
- .deliver = afs_deliver_fs_fetch_status,
- .destructor = afs_flat_call_destructor,
-};
-
-/*
- * Fetch the status information for a fid without needing a vnode handle.
- */
-int afs_fs_fetch_status(struct afs_fs_cursor *fc,
- struct afs_net *net,
- struct afs_fid *fid,
- struct afs_status_cb *scb,
- struct afs_volsync *volsync)
-{
- struct afs_call *call;
- __be32 *bp;
-
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_fetch_status(fc, net, fid, scb, volsync);
-
- _enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), fid->vid, fid->vnode);
-
- call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4);
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_fid = fid;
- call->out_scb = scb;
- call->out_volsync = volsync;
-
- /* marshall the parameters */
- bp = call->request;
- bp[0] = htonl(FSFETCHSTATUS);
- bp[1] = htonl(fid->vid);
- bp[2] = htonl(fid->vnode);
- bp[3] = htonl(fid->unique);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ afs_put_call(call);
+ return true;
}
/*
@@ -2022,6 +1764,7 @@ int afs_fs_fetch_status(struct afs_fs_cursor *fc,
*/
static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
struct afs_status_cb *scb;
const __be32 *bp;
u32 tmp;
@@ -2043,10 +1786,9 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
tmp = ntohl(call->tmp);
- _debug("status count: %u/%u", tmp, call->count2);
- if (tmp != call->count2)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_ibulkst_count);
+ _debug("status count: %u/%u", tmp, op->nr_files);
+ if (tmp != op->nr_files)
+ return afs_protocol_error(call, afs_eproto_ibulkst_count);
call->count = 0;
call->unmarshall++;
@@ -2060,14 +1802,23 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
if (ret < 0)
return ret;
+ switch (call->count) {
+ case 0:
+ scb = &op->file[0].scb;
+ break;
+ case 1:
+ scb = &op->file[1].scb;
+ break;
+ default:
+ scb = &op->more_files[call->count - 2].scb;
+ break;
+ }
+
bp = call->buffer;
- scb = &call->out_scb[call->count];
- ret = xdr_decode_AFSFetchStatus(&bp, call, scb);
- if (ret < 0)
- return ret;
+ xdr_decode_AFSFetchStatus(&bp, call, scb);
call->count++;
- if (call->count < call->count2)
+ if (call->count < op->nr_files)
goto more_counts;
call->count = 0;
@@ -2084,9 +1835,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
tmp = ntohl(call->tmp);
_debug("CB count: %u", tmp);
- if (tmp != call->count2)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_ibulkst_cb_count);
+ if (tmp != op->nr_files)
+ return afs_protocol_error(call, afs_eproto_ibulkst_cb_count);
call->count = 0;
call->unmarshall++;
more_cbs:
@@ -2100,11 +1850,22 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
_debug("unmarshall CB array");
+ switch (call->count) {
+ case 0:
+ scb = &op->file[0].scb;
+ break;
+ case 1:
+ scb = &op->file[1].scb;
+ break;
+ default:
+ scb = &op->more_files[call->count - 2].scb;
+ break;
+ }
+
bp = call->buffer;
- scb = &call->out_scb[call->count];
xdr_decode_AFSCallBack(&bp, call, scb);
call->count++;
- if (call->count < call->count2)
+ if (call->count < op->nr_files)
goto more_cbs;
afs_extract_to_buf(call, 6 * sizeof(__be32));
@@ -2117,7 +1878,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
bp = call->buffer;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
call->unmarshall++;
@@ -2129,6 +1890,16 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
return 0;
}
+static void afs_done_fs_inline_bulk_status(struct afs_call *call)
+{
+ if (call->error == -ECONNABORTED &&
+ call->abort_code == RX_INVALID_OPERATION) {
+ set_bit(AFS_SERVER_FL_NO_IBULK, &call->server->flags);
+ if (call->op)
+ set_bit(AFS_VOLUME_MAYBE_NO_IBULK, &call->op->volume->flags);
+ }
+}
+
/*
* FS.InlineBulkStatus operation type
*/
@@ -2136,58 +1907,53 @@ static const struct afs_call_type afs_RXFSInlineBulkStatus = {
.name = "FS.InlineBulkStatus",
.op = afs_FS_InlineBulkStatus,
.deliver = afs_deliver_fs_inline_bulk_status,
+ .done = afs_done_fs_inline_bulk_status,
.destructor = afs_flat_call_destructor,
};
/*
* Fetch the status information for up to 50 files
*/
-int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
- struct afs_net *net,
- struct afs_fid *fids,
- struct afs_status_cb *statuses,
- unsigned int nr_fids,
- struct afs_volsync *volsync)
+void afs_fs_inline_bulk_status(struct afs_operation *op)
{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
struct afs_call *call;
__be32 *bp;
int i;
- if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
- return yfs_fs_inline_bulk_status(fc, net, fids, statuses,
- nr_fids, volsync);
+ if (test_bit(AFS_SERVER_FL_NO_IBULK, &op->server->flags)) {
+ op->error = -ENOTSUPP;
+ return;
+ }
_enter(",%x,{%llx:%llu},%u",
- key_serial(fc->key), fids[0].vid, fids[1].vnode, nr_fids);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode, op->nr_files);
- call = afs_alloc_flat_call(net, &afs_RXFSInlineBulkStatus,
- (2 + nr_fids * 3) * 4,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSInlineBulkStatus,
+ (2 + op->nr_files * 3) * 4,
21 * 4);
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_scb = statuses;
- call->out_volsync = volsync;
- call->count2 = nr_fids;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
*bp++ = htonl(FSINLINEBULKSTATUS);
- *bp++ = htonl(nr_fids);
- for (i = 0; i < nr_fids; i++) {
- *bp++ = htonl(fids[i].vid);
- *bp++ = htonl(fids[i].vnode);
- *bp++ = htonl(fids[i].unique);
+ *bp++ = htonl(op->nr_files);
+ *bp++ = htonl(dvp->fid.vid);
+ *bp++ = htonl(dvp->fid.vnode);
+ *bp++ = htonl(dvp->fid.unique);
+ *bp++ = htonl(vp->fid.vid);
+ *bp++ = htonl(vp->fid.vnode);
+ *bp++ = htonl(vp->fid.unique);
+ for (i = 0; i < op->nr_files - 2; i++) {
+ *bp++ = htonl(op->more_files[i].fid.vid);
+ *bp++ = htonl(op->more_files[i].fid.vnode);
+ *bp++ = htonl(op->more_files[i].fid.unique);
}
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &fids[0]);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -2195,6 +1961,8 @@ int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
*/
static int afs_deliver_fs_fetch_acl(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_acl *acl;
const __be32 *bp;
unsigned int size;
@@ -2220,7 +1988,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
acl = kmalloc(struct_size(acl, data, size), GFP_KERNEL);
if (!acl)
return -ENOMEM;
- call->ret_acl = acl;
+ op->acl = acl;
acl->size = call->count2;
afs_extract_begin(call, acl->data, size);
call->unmarshall++;
@@ -2243,10 +2011,8 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
+ xdr_decode_AFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_AFSVolSync(&bp, &op->volsync);
call->unmarshall++;
@@ -2258,12 +2024,6 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
return 0;
}
-static void afs_destroy_fs_fetch_acl(struct afs_call *call)
-{
- kfree(call->ret_acl);
- afs_flat_call_destructor(call);
-}
-
/*
* FS.FetchACL operation type
*/
@@ -2271,68 +2031,33 @@ static const struct afs_call_type afs_RXFSFetchACL = {
.name = "FS.FetchACL",
.op = afs_FS_FetchACL,
.deliver = afs_deliver_fs_fetch_acl,
- .destructor = afs_destroy_fs_fetch_acl,
};
/*
* Fetch the ACL for a file.
*/
-struct afs_acl *afs_fs_fetch_acl(struct afs_fs_cursor *fc,
- struct afs_status_cb *scb)
+void afs_fs_fetch_acl(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &afs_RXFSFetchACL, 16, (21 + 6) * 4);
- if (!call) {
- fc->ac.error = -ENOMEM;
- return ERR_PTR(-ENOMEM);
- }
-
- call->key = fc->key;
- call->ret_acl = NULL;
- call->out_scb = scb;
- call->out_volsync = NULL;
+ call = afs_alloc_flat_call(op->net, &afs_RXFSFetchACL, 16, (21 + 6) * 4);
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSFETCHACL);
- bp[1] = htonl(vnode->fid.vid);
- bp[2] = htonl(vnode->fid.vnode);
- bp[3] = htonl(vnode->fid.unique);
-
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_make_call(&fc->ac, call, GFP_KERNEL);
- return (struct afs_acl *)afs_wait_for_call_to_complete(call, &fc->ac);
-}
-
-/*
- * Deliver reply data to any operation that returns file status and volume
- * sync.
- */
-static int afs_deliver_fs_file_status_and_vol(struct afs_call *call)
-{
- const __be32 *bp;
- int ret;
+ bp[1] = htonl(vp->fid.vid);
+ bp[2] = htonl(vp->fid.vnode);
+ bp[3] = htonl(vp->fid.unique);
- ret = afs_transfer_reply(call);
- if (ret < 0)
- return ret;
-
- bp = call->buffer;
- ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_AFSVolSync(&bp, call->out_volsync);
-
- _leave(" = 0 [done]");
- return 0;
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_KERNEL);
}
/*
@@ -2348,42 +2073,34 @@ static const struct afs_call_type afs_RXFSStoreACL = {
/*
* Fetch the ACL for a file.
*/
-int afs_fs_store_acl(struct afs_fs_cursor *fc, const struct afs_acl *acl,
- struct afs_status_cb *scb)
+void afs_fs_store_acl(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ const struct afs_acl *acl = op->acl;
size_t size;
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
size = round_up(acl->size, 4);
- call = afs_alloc_flat_call(net, &afs_RXFSStoreACL,
+ call = afs_alloc_flat_call(op->net, &afs_RXFSStoreACL,
5 * 4 + size, (21 + 6) * 4);
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = NULL;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp[0] = htonl(FSSTOREACL);
- bp[1] = htonl(vnode->fid.vid);
- bp[2] = htonl(vnode->fid.vnode);
- bp[3] = htonl(vnode->fid.unique);
+ bp[1] = htonl(vp->fid.vid);
+ bp[2] = htonl(vp->fid.vnode);
+ bp[3] = htonl(vp->fid.unique);
bp[4] = htonl(acl->size);
memcpy(&bp[5], acl->data, acl->size);
if (acl->size != size)
memset((void *)&bp[5] + acl->size, 0, size - acl->size);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_make_call(&fc->ac, call, GFP_KERNEL);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_KERNEL);
}
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 281470fe1183..cd0a0060950b 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -67,16 +67,18 @@ static void afs_set_i_size(struct afs_vnode *vnode, u64 size)
/*
* Initialise an inode from the vnode status.
*/
-static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key,
- struct afs_cb_interest *cbi,
- struct afs_vnode *parent_vnode,
- struct afs_status_cb *scb)
+static int afs_inode_init_from_status(struct afs_operation *op,
+ struct afs_vnode_param *vp,
+ struct afs_vnode *vnode)
{
- struct afs_cb_interest *old_cbi = NULL;
- struct afs_file_status *status = &scb->status;
+ struct afs_file_status *status = &vp->scb.status;
struct inode *inode = AFS_VNODE_TO_I(vnode);
struct timespec64 t;
+ _enter("{%llx:%llu.%u} %s",
+ vp->fid.vid, vp->fid.vnode, vp->fid.unique,
+ op->type ? op->type->name : "???");
+
_debug("FS: ft=%d lk=%d sz=%llu ver=%Lu mod=%hu",
status->type,
status->nlink,
@@ -86,12 +88,15 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key,
write_seqlock(&vnode->cb_lock);
+ vnode->cb_v_break = op->cb_v_break;
+ vnode->cb_s_break = op->cb_s_break;
vnode->status = *status;
t = status->mtime_client;
inode->i_ctime = t;
inode->i_mtime = t;
inode->i_atime = t;
+ inode->i_flags |= S_NOATIME;
inode->i_uid = make_kuid(&init_user_ns, status->owner);
inode->i_gid = make_kgid(&init_user_ns, status->group);
set_nlink(&vnode->vfs_inode, status->nlink);
@@ -128,9 +133,9 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key,
inode_nohighmem(inode);
break;
default:
- dump_vnode(vnode, parent_vnode);
+ dump_vnode(vnode, op->file[0].vnode != vnode ? op->file[0].vnode : NULL);
write_sequnlock(&vnode->cb_lock);
- return afs_protocol_error(NULL, -EBADMSG, afs_eproto_file_type);
+ return afs_protocol_error(NULL, afs_eproto_file_type);
}
afs_set_i_size(vnode, status->size);
@@ -138,39 +143,36 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key,
vnode->invalid_before = status->data_version;
inode_set_iversion_raw(&vnode->vfs_inode, status->data_version);
- if (!scb->have_cb) {
+ if (!vp->scb.have_cb) {
/* it's a symlink we just created (the fileserver
* didn't give us a callback) */
vnode->cb_expires_at = ktime_get_real_seconds();
} else {
- vnode->cb_expires_at = scb->callback.expires_at;
- old_cbi = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->cb_lock.lock));
- if (cbi != old_cbi)
- rcu_assign_pointer(vnode->cb_interest, afs_get_cb_interest(cbi));
- else
- old_cbi = NULL;
+ vnode->cb_expires_at = vp->scb.callback.expires_at;
+ vnode->cb_server = op->server;
set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
}
write_sequnlock(&vnode->cb_lock);
- afs_put_cb_interest(afs_v2net(vnode), old_cbi);
return 0;
}
/*
* Update the core inode struct from a returned status record.
*/
-static void afs_apply_status(struct afs_fs_cursor *fc,
- struct afs_vnode *vnode,
- struct afs_status_cb *scb,
- const afs_dataversion_t *expected_version)
+static void afs_apply_status(struct afs_operation *op,
+ struct afs_vnode_param *vp)
{
- struct afs_file_status *status = &scb->status;
+ struct afs_file_status *status = &vp->scb.status;
+ struct afs_vnode *vnode = vp->vnode;
struct timespec64 t;
umode_t mode;
bool data_changed = false;
+ _enter("{%llx:%llu.%u} %s",
+ vp->fid.vid, vp->fid.vnode, vp->fid.unique,
+ op->type ? op->type->name : "???");
+
BUG_ON(test_bit(AFS_VNODE_UNSET, &vnode->flags));
if (status->type != vnode->status.type) {
@@ -179,7 +181,7 @@ static void afs_apply_status(struct afs_fs_cursor *fc,
vnode->fid.vnode,
vnode->fid.unique,
status->type, vnode->status.type);
- afs_protocol_error(NULL, -EBADMSG, afs_eproto_bad_status);
+ afs_protocol_error(NULL, afs_eproto_bad_status);
return;
}
@@ -209,14 +211,13 @@ static void afs_apply_status(struct afs_fs_cursor *fc,
vnode->status = *status;
- if (expected_version &&
- *expected_version != status->data_version) {
+ if (vp->dv_before + vp->dv_delta != status->data_version) {
if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags))
pr_warn("kAFS: vnode modified {%llx:%llu} %llx->%llx %s\n",
vnode->fid.vid, vnode->fid.vnode,
- (unsigned long long)*expected_version,
+ (unsigned long long)vp->dv_before + vp->dv_delta,
(unsigned long long)status->data_version,
- fc->type ? fc->type->name : "???");
+ op->type ? op->type->name : "???");
vnode->invalid_before = status->data_version;
if (vnode->status.type == AFS_FTYPE_DIR) {
@@ -243,22 +244,15 @@ static void afs_apply_status(struct afs_fs_cursor *fc,
/*
* Apply a callback to a vnode.
*/
-static void afs_apply_callback(struct afs_fs_cursor *fc,
- struct afs_vnode *vnode,
- struct afs_status_cb *scb,
- unsigned int cb_break)
+static void afs_apply_callback(struct afs_operation *op,
+ struct afs_vnode_param *vp)
{
- struct afs_cb_interest *old;
- struct afs_callback *cb = &scb->callback;
+ struct afs_callback *cb = &vp->scb.callback;
+ struct afs_vnode *vnode = vp->vnode;
- if (!afs_cb_is_broken(cb_break, vnode, fc->cbi)) {
+ if (!afs_cb_is_broken(vp->cb_break_before, vnode)) {
vnode->cb_expires_at = cb->expires_at;
- old = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->cb_lock.lock));
- if (old != fc->cbi) {
- rcu_assign_pointer(vnode->cb_interest, afs_get_cb_interest(fc->cbi));
- afs_put_cb_interest(afs_v2net(vnode), old);
- }
+ vnode->cb_server = op->server;
set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
}
}
@@ -267,106 +261,108 @@ static void afs_apply_callback(struct afs_fs_cursor *fc,
* Apply the received status and callback to an inode all in the same critical
* section to avoid races with afs_validate().
*/
-void afs_vnode_commit_status(struct afs_fs_cursor *fc,
- struct afs_vnode *vnode,
- unsigned int cb_break,
- const afs_dataversion_t *expected_version,
- struct afs_status_cb *scb)
+void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *vp)
{
- if (fc->ac.error != 0)
- return;
+ struct afs_vnode *vnode = vp->vnode;
+
+ _enter("");
+
+ ASSERTCMP(op->error, ==, 0);
write_seqlock(&vnode->cb_lock);
- if (scb->have_error) {
- if (scb->status.abort_code == VNOVNODE) {
+ if (vp->scb.have_error) {
+ if (vp->scb.status.abort_code == VNOVNODE) {
set_bit(AFS_VNODE_DELETED, &vnode->flags);
clear_nlink(&vnode->vfs_inode);
__afs_break_callback(vnode, afs_cb_break_for_deleted);
}
} else {
- if (scb->have_status)
- afs_apply_status(fc, vnode, scb, expected_version);
- if (scb->have_cb)
- afs_apply_callback(fc, vnode, scb, cb_break);
+ if (vp->scb.have_status)
+ afs_apply_status(op, vp);
+ if (vp->scb.have_cb)
+ afs_apply_callback(op, vp);
}
write_sequnlock(&vnode->cb_lock);
- if (fc->ac.error == 0 && scb->have_status)
- afs_cache_permit(vnode, fc->key, cb_break, scb);
+ if (op->error == 0 && vp->scb.have_status)
+ afs_cache_permit(vnode, op->key, vp->cb_break_before, &vp->scb);
}
+static void afs_fetch_status_success(struct afs_operation *op)
+{
+ struct afs_vnode_param *vp = &op->file[0];
+ struct afs_vnode *vnode = vp->vnode;
+ int ret;
+
+ if (vnode->vfs_inode.i_state & I_NEW) {
+ ret = afs_inode_init_from_status(op, vp, vnode);
+ op->error = ret;
+ if (ret == 0)
+ afs_cache_permit(vnode, op->key, vp->cb_break_before, &vp->scb);
+ } else {
+ afs_vnode_commit_status(op, vp);
+ }
+}
+
+static const struct afs_operation_ops afs_fetch_status_operation = {
+ .issue_afs_rpc = afs_fs_fetch_status,
+ .issue_yfs_rpc = yfs_fs_fetch_status,
+ .success = afs_fetch_status_success,
+};
+
/*
* Fetch file status from the volume.
*/
int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool is_new,
afs_access_t *_caller_access)
{
- struct afs_status_cb *scb;
- struct afs_fs_cursor fc;
- int ret;
+ struct afs_operation *op;
_enter("%s,{%llx:%llu.%u,S=%lx}",
vnode->volume->name,
vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique,
vnode->flags);
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- return -ENOMEM;
+ op = afs_alloc_operation(key, vnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- afs_dataversion_t data_version = vnode->status.data_version;
+ afs_op_set_vnode(op, 0, vnode);
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_fetch_file_status(&fc, scb, NULL);
- }
-
- if (fc.error) {
- /* Do nothing. */
- } else if (is_new) {
- ret = afs_inode_init_from_status(vnode, key, fc.cbi,
- NULL, scb);
- fc.error = ret;
- if (ret == 0)
- afs_cache_permit(vnode, key, fc.cb_break, scb);
- } else {
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- }
- afs_check_for_remote_deletion(&fc, vnode);
- ret = afs_end_vnode_operation(&fc);
- }
+ op->nr_files = 1;
+ op->ops = &afs_fetch_status_operation;
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
- if (ret == 0 && _caller_access)
- *_caller_access = scb->status.caller_access;
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ if (_caller_access)
+ *_caller_access = op->file[0].scb.status.caller_access;
+ return afs_put_operation(op);
}
/*
- * iget5() comparator
+ * ilookup() comparator
*/
-int afs_iget5_test(struct inode *inode, void *opaque)
+int afs_ilookup5_test_by_fid(struct inode *inode, void *opaque)
{
- struct afs_iget_data *iget_data = opaque;
struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_fid *fid = opaque;
- return memcmp(&vnode->fid, &iget_data->fid, sizeof(iget_data->fid)) == 0;
+ return (fid->vnode == vnode->fid.vnode &&
+ fid->vnode_hi == vnode->fid.vnode_hi &&
+ fid->unique == vnode->fid.unique);
}
/*
- * iget5() comparator for inode created by autocell operations
- *
- * These pseudo inodes don't match anything.
+ * iget5() comparator
*/
-static int afs_iget5_pseudo_dir_test(struct inode *inode, void *opaque)
+static int afs_iget5_test(struct inode *inode, void *opaque)
{
- return 0;
+ struct afs_vnode_param *vp = opaque;
+ //struct afs_vnode *vnode = AFS_FS_I(inode);
+
+ return afs_ilookup5_test_by_fid(inode, &vp->fid);
}
/*
@@ -374,99 +370,22 @@ static int afs_iget5_pseudo_dir_test(struct inode *inode, void *opaque)
*/
static int afs_iget5_set(struct inode *inode, void *opaque)
{
- struct afs_iget_data *iget_data = opaque;
+ struct afs_vnode_param *vp = opaque;
+ struct afs_super_info *as = AFS_FS_S(inode->i_sb);
struct afs_vnode *vnode = AFS_FS_I(inode);
- vnode->fid = iget_data->fid;
- vnode->volume = iget_data->volume;
- vnode->cb_v_break = iget_data->cb_v_break;
- vnode->cb_s_break = iget_data->cb_s_break;
+ vnode->volume = as->volume;
+ vnode->fid = vp->fid;
/* YFS supports 96-bit vnode IDs, but Linux only supports
* 64-bit inode numbers.
*/
- inode->i_ino = iget_data->fid.vnode;
- inode->i_generation = iget_data->fid.unique;
+ inode->i_ino = vnode->fid.vnode;
+ inode->i_generation = vnode->fid.unique;
return 0;
}
/*
- * Create an inode for a dynamic root directory or an autocell dynamic
- * automount dir.
- */
-struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
-{
- struct afs_super_info *as;
- struct afs_vnode *vnode;
- struct inode *inode;
- static atomic_t afs_autocell_ino;
-
- struct afs_iget_data iget_data = {
- .cb_v_break = 0,
- .cb_s_break = 0,
- };
-
- _enter("");
-
- as = sb->s_fs_info;
- if (as->volume) {
- iget_data.volume = as->volume;
- iget_data.fid.vid = as->volume->vid;
- }
- if (root) {
- iget_data.fid.vnode = 1;
- iget_data.fid.unique = 1;
- } else {
- iget_data.fid.vnode = atomic_inc_return(&afs_autocell_ino);
- iget_data.fid.unique = 0;
- }
-
- inode = iget5_locked(sb, iget_data.fid.vnode,
- afs_iget5_pseudo_dir_test, afs_iget5_set,
- &iget_data);
- if (!inode) {
- _leave(" = -ENOMEM");
- return ERR_PTR(-ENOMEM);
- }
-
- _debug("GOT INODE %p { ino=%lu, vl=%llx, vn=%llx, u=%x }",
- inode, inode->i_ino, iget_data.fid.vid, iget_data.fid.vnode,
- iget_data.fid.unique);
-
- vnode = AFS_FS_I(inode);
-
- /* there shouldn't be an existing inode */
- BUG_ON(!(inode->i_state & I_NEW));
-
- inode->i_size = 0;
- inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
- if (root) {
- inode->i_op = &afs_dynroot_inode_operations;
- inode->i_fop = &simple_dir_operations;
- } else {
- inode->i_op = &afs_autocell_inode_operations;
- }
- set_nlink(inode, 2);
- inode->i_uid = GLOBAL_ROOT_UID;
- inode->i_gid = GLOBAL_ROOT_GID;
- inode->i_ctime = inode->i_atime = inode->i_mtime = current_time(inode);
- inode->i_blocks = 0;
- inode_set_iversion_raw(inode, 0);
- inode->i_generation = 0;
-
- set_bit(AFS_VNODE_PSEUDODIR, &vnode->flags);
- if (!root) {
- set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
- inode->i_flags |= S_AUTOMOUNT;
- }
-
- inode->i_flags |= S_NOATIME;
- unlock_new_inode(inode);
- _leave(" = %p", inode);
- return inode;
-}
-
-/*
* Get a cache cookie for an inode.
*/
static void afs_get_inode_cache(struct afs_vnode *vnode)
@@ -501,58 +420,41 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
/*
* inode retrieval
*/
-struct inode *afs_iget(struct super_block *sb, struct key *key,
- struct afs_iget_data *iget_data,
- struct afs_status_cb *scb,
- struct afs_cb_interest *cbi,
- struct afs_vnode *parent_vnode)
+struct inode *afs_iget(struct afs_operation *op, struct afs_vnode_param *vp)
{
- struct afs_super_info *as;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct super_block *sb = dvp->vnode->vfs_inode.i_sb;
struct afs_vnode *vnode;
- struct afs_fid *fid = &iget_data->fid;
struct inode *inode;
int ret;
- _enter(",{%llx:%llu.%u},,", fid->vid, fid->vnode, fid->unique);
+ _enter(",{%llx:%llu.%u},,", vp->fid.vid, vp->fid.vnode, vp->fid.unique);
- as = sb->s_fs_info;
- iget_data->volume = as->volume;
-
- inode = iget5_locked(sb, fid->vnode, afs_iget5_test, afs_iget5_set,
- iget_data);
+ inode = iget5_locked(sb, vp->fid.vnode, afs_iget5_test, afs_iget5_set, vp);
if (!inode) {
_leave(" = -ENOMEM");
return ERR_PTR(-ENOMEM);
}
- _debug("GOT INODE %p { vl=%llx vn=%llx, u=%x }",
- inode, fid->vid, fid->vnode, fid->unique);
-
vnode = AFS_FS_I(inode);
+ _debug("GOT INODE %p { vl=%llx vn=%llx, u=%x }",
+ inode, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
+
/* deal with an existing inode */
if (!(inode->i_state & I_NEW)) {
_leave(" = %p", inode);
return inode;
}
- if (!scb) {
- /* it's a remotely extant inode */
- ret = afs_fetch_status(vnode, key, true, NULL);
- if (ret < 0)
- goto bad_inode;
- } else {
- ret = afs_inode_init_from_status(vnode, key, cbi, parent_vnode,
- scb);
- if (ret < 0)
- goto bad_inode;
- }
+ ret = afs_inode_init_from_status(op, vp, vnode);
+ if (ret < 0)
+ goto bad_inode;
afs_get_inode_cache(vnode);
/* success */
clear_bit(AFS_VNODE_UNSET, &vnode->flags);
- inode->i_flags |= S_NOATIME;
unlock_new_inode(inode);
_leave(" = %p", inode);
return inode;
@@ -564,11 +466,79 @@ bad_inode:
return ERR_PTR(ret);
}
+static int afs_iget5_set_root(struct inode *inode, void *opaque)
+{
+ struct afs_super_info *as = AFS_FS_S(inode->i_sb);
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+
+ vnode->volume = as->volume;
+ vnode->fid.vid = as->volume->vid,
+ vnode->fid.vnode = 1;
+ vnode->fid.unique = 1;
+ inode->i_ino = 1;
+ inode->i_generation = 1;
+ return 0;
+}
+
+/*
+ * Set up the root inode for a volume. This is always vnode 1, unique 1 within
+ * the volume.
+ */
+struct inode *afs_root_iget(struct super_block *sb, struct key *key)
+{
+ struct afs_super_info *as = AFS_FS_S(sb);
+ struct afs_operation *op;
+ struct afs_vnode *vnode;
+ struct inode *inode;
+ int ret;
+
+ _enter(",{%llx},,", as->volume->vid);
+
+ inode = iget5_locked(sb, 1, NULL, afs_iget5_set_root, NULL);
+ if (!inode) {
+ _leave(" = -ENOMEM");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ _debug("GOT ROOT INODE %p { vl=%llx }", inode, as->volume->vid);
+
+ BUG_ON(!(inode->i_state & I_NEW));
+
+ vnode = AFS_FS_I(inode);
+ vnode->cb_v_break = as->volume->cb_v_break,
+
+ op = afs_alloc_operation(key, as->volume);
+ if (IS_ERR(op)) {
+ ret = PTR_ERR(op);
+ goto error;
+ }
+
+ afs_op_set_vnode(op, 0, vnode);
+
+ op->nr_files = 1;
+ op->ops = &afs_fetch_status_operation;
+ ret = afs_do_sync_operation(op);
+ if (ret < 0)
+ goto error;
+
+ afs_get_inode_cache(vnode);
+
+ clear_bit(AFS_VNODE_UNSET, &vnode->flags);
+ unlock_new_inode(inode);
+ _leave(" = %p", inode);
+ return inode;
+
+error:
+ iget_failed(inode);
+ _leave(" = %d [bad]", ret);
+ return ERR_PTR(ret);
+}
+
/*
* mark the data attached to an inode as obsolete due to a write on the server
* - might also want to ditch all the outstanding writes and dirty pages
*/
-void afs_zap_data(struct afs_vnode *vnode)
+static void afs_zap_data(struct afs_vnode *vnode)
{
_enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
@@ -586,12 +556,30 @@ void afs_zap_data(struct afs_vnode *vnode)
}
/*
+ * Get the server reinit counter for a vnode's current server.
+ */
+static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
+{
+ struct afs_server_list *slist = rcu_dereference(vnode->volume->servers);
+ struct afs_server *server;
+ int i;
+
+ for (i = 0; i < slist->nr_servers; i++) {
+ server = slist->servers[i].server;
+ if (server == vnode->cb_server) {
+ *_s_break = READ_ONCE(server->cb_s_break);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*
* Check the validity of a vnode/inode.
*/
bool afs_check_validity(struct afs_vnode *vnode)
{
- struct afs_cb_interest *cbi;
- struct afs_server *server;
struct afs_volume *volume = vnode->volume;
enum afs_cb_break_reason need_clear = afs_cb_break_no_break;
time64_t now = ktime_get_real_seconds();
@@ -604,11 +592,8 @@ bool afs_check_validity(struct afs_vnode *vnode)
cb_v_break = READ_ONCE(volume->cb_v_break);
cb_break = vnode->cb_break;
- if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
- cbi = rcu_dereference(vnode->cb_interest);
- server = rcu_dereference(cbi->server);
- cb_s_break = READ_ONCE(server->cb_s_break);
-
+ if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
+ afs_get_s_break_rcu(vnode, &cb_s_break)) {
if (vnode->cb_s_break != cb_s_break ||
vnode->cb_v_break != cb_v_break) {
vnode->cb_s_break = cb_s_break;
@@ -755,7 +740,6 @@ int afs_drop_inode(struct inode *inode)
*/
void afs_evict_inode(struct inode *inode)
{
- struct afs_cb_interest *cbi;
struct afs_vnode *vnode;
vnode = AFS_FS_I(inode);
@@ -772,15 +756,6 @@ void afs_evict_inode(struct inode *inode)
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
- write_seqlock(&vnode->cb_lock);
- cbi = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->cb_lock.lock));
- if (cbi) {
- afs_put_cb_interest(afs_i2net(inode), cbi);
- rcu_assign_pointer(vnode->cb_interest, NULL);
- }
- write_sequnlock(&vnode->cb_lock);
-
while (!list_empty(&vnode->wb_keys)) {
struct afs_wb_key *wbk = list_entry(vnode->wb_keys.next,
struct afs_wb_key, vnode_link);
@@ -808,16 +783,24 @@ void afs_evict_inode(struct inode *inode)
_leave("");
}
+static void afs_setattr_success(struct afs_operation *op)
+{
+ afs_vnode_commit_status(op, &op->file[0]);
+}
+
+static const struct afs_operation_ops afs_setattr_operation = {
+ .issue_afs_rpc = afs_fs_setattr,
+ .issue_yfs_rpc = yfs_fs_setattr,
+ .success = afs_setattr_success,
+};
+
/*
* set the attributes of an inode
*/
int afs_setattr(struct dentry *dentry, struct iattr *attr)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
- struct key *key;
- int ret = -ENOMEM;
_enter("{%llx:%llu},{n=%pd},%x",
vnode->fid.vid, vnode->fid.vnode, dentry,
@@ -829,48 +812,22 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr)
return 0;
}
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
- if (!scb)
- goto error;
-
/* flush any dirty data outstanding on a regular file */
if (S_ISREG(vnode->vfs_inode.i_mode))
filemap_write_and_wait(vnode->vfs_inode.i_mapping);
- if (attr->ia_valid & ATTR_FILE) {
- key = afs_file_key(attr->ia_file);
- } else {
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
- }
- }
-
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, false)) {
- afs_dataversion_t data_version = vnode->status.data_version;
+ op = afs_alloc_operation(((attr->ia_valid & ATTR_FILE) ?
+ afs_file_key(attr->ia_file) : NULL),
+ vnode->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- if (attr->ia_valid & ATTR_SIZE)
- data_version++;
+ afs_op_set_vnode(op, 0, vnode);
+ op->setattr.attr = attr;
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_setattr(&fc, attr, scb);
- }
-
- afs_check_for_remote_deletion(&fc, vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- ret = afs_end_vnode_operation(&fc);
- }
+ if (attr->ia_valid & ATTR_SIZE)
+ op->file[0].dv_delta = 1;
- if (!(attr->ia_valid & ATTR_FILE))
- key_put(key);
-
-error_scb:
- kfree(scb);
-error:
- _leave(" = %d", ret);
- return ret;
+ op->ops = &afs_setattr_operation;
+ return afs_do_sync_operation(op);
}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 80255513e230..0c9806ef2a19 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -59,13 +59,6 @@ struct afs_fs_context {
struct key *key; /* key to use for secure mounting */
};
-struct afs_iget_data {
- struct afs_fid fid;
- struct afs_volume *volume; /* volume on which resides */
- unsigned int cb_v_break; /* Pre-fetch volume break count */
- unsigned int cb_s_break; /* Pre-fetch server break count */
-};
-
enum afs_call_state {
AFS_CALL_CL_REQUESTING, /* Client: Request is being sent */
AFS_CALL_CL_AWAIT_REPLY, /* Client: Awaiting reply */
@@ -90,7 +83,6 @@ struct afs_addr_list {
unsigned char nr_ipv4; /* Number of IPv4 addresses */
enum dns_record_source source:8;
enum dns_lookup_status status:8;
- unsigned long probed; /* Mask of servers that have been probed */
unsigned long failed; /* Mask of addrs that failed locally/ICMP */
unsigned long responded; /* Mask of addrs that responded */
struct sockaddr_rxrpc addrs[];
@@ -111,10 +103,7 @@ struct afs_call {
struct afs_net *net; /* The network namespace */
struct afs_server *server; /* The fileserver record if fs op (pins ref) */
struct afs_vlserver *vlserver; /* The vlserver record if vl op */
- struct afs_cb_interest *cbi; /* Callback interest for server used */
- struct afs_vnode *lvnode; /* vnode being locked */
void *request; /* request data (first part) */
- struct address_space *mapping; /* Pages being written from */
struct iov_iter def_iter; /* Default buffer/data iterator */
struct iov_iter *iter; /* Iterator currently in use */
union { /* Convenience for ->def_iter */
@@ -126,32 +115,19 @@ struct afs_call {
long ret0; /* Value to reply with instead of 0 */
struct afs_addr_list *ret_alist;
struct afs_vldb_entry *ret_vldb;
- struct afs_acl *ret_acl;
+ char *ret_str;
};
- struct afs_fid *out_fid;
- struct afs_status_cb *out_dir_scb;
- struct afs_status_cb *out_scb;
- struct yfs_acl *out_yacl;
- struct afs_volsync *out_volsync;
- struct afs_volume_status *out_volstatus;
- struct afs_read *read_request;
+ struct afs_operation *op;
unsigned int server_index;
- pgoff_t first; /* first page in mapping to deal with */
- pgoff_t last; /* last page in mapping to deal with */
atomic_t usage;
enum afs_call_state state;
spinlock_t state_lock;
int error; /* error code */
u32 abort_code; /* Remote abort ID or 0 */
- u32 epoch;
unsigned int max_lifespan; /* Maximum lifespan to set if not 0 */
unsigned request_size; /* size of request data */
unsigned reply_max; /* maximum size of reply */
- unsigned first_offset; /* offset into mapping[first] */
- union {
- unsigned last_to; /* amount of mapping[last] */
- unsigned count2; /* count used in unmarshalling */
- };
+ unsigned count2; /* count used in unmarshalling */
unsigned char unmarshall; /* unmarshalling phase */
unsigned char addr_ix; /* Address in ->alist */
bool drop_ref; /* T if need to drop ref for incoming call */
@@ -161,6 +137,7 @@ struct afs_call {
bool upgrade; /* T to request service upgrade */
bool have_reply_time; /* T if have got reply_time */
bool intr; /* T if interruptible */
+ bool unmarshalling_error; /* T if an unmarshalling error occurred */
u16 service_id; /* Actual service ID (after upgrade) */
unsigned int debug_id; /* Trace ID */
u32 operation_ID; /* operation ID for an incoming call */
@@ -291,6 +268,7 @@ struct afs_net {
struct timer_list cells_timer;
atomic_t cells_outstanding;
seqlock_t cells_lock;
+ struct mutex cells_alias_lock;
struct mutex proc_cells_lock;
struct hlist_head proc_cells;
@@ -299,9 +277,10 @@ struct afs_net {
* cell, but in practice, people create aliases and subsets and there's
* no easy way to distinguish them.
*/
- seqlock_t fs_lock; /* For fs_servers */
+ seqlock_t fs_lock; /* For fs_servers, fs_probe_*, fs_proc */
struct rb_root fs_servers; /* afs_server (by server UUID or address) */
- struct list_head fs_updates; /* afs_server (by update_at) */
+ struct list_head fs_probe_fast; /* List of afs_server to probe at 30s intervals */
+ struct list_head fs_probe_slow; /* List of afs_server to probe at 5m intervals */
struct hlist_head fs_proc; /* procfs servers list */
struct hlist_head fs_addresses4; /* afs_server (by lowest IPv4 addr) */
@@ -310,6 +289,9 @@ struct afs_net {
struct work_struct fs_manager;
struct timer_list fs_timer;
+
+ struct work_struct fs_prober;
+ struct timer_list fs_probe_timer;
atomic_t servers_outstanding;
/* File locking renewal management */
@@ -360,8 +342,10 @@ enum afs_cell_state {
* for authentication and encryption. The cell name is not typically used in
* the protocol.
*
- * There is no easy way to determine if two cells are aliases or one is a
- * subset of another.
+ * Two cells are determined to be aliases if they have an explicit alias (YFS
+ * only), share any VL servers in common or have at least one volume in common.
+ * "In common" means that the address list of the VL servers or the fileservers
+ * share at least one endpoint.
*/
struct afs_cell {
union {
@@ -369,6 +353,8 @@ struct afs_cell {
struct rb_node net_node; /* Node in net->cells */
};
struct afs_net *net;
+ struct afs_cell *alias_of; /* The cell this is an alias of */
+ struct afs_volume *root_volume; /* The root.cell volume if there is one */
struct key *anonymous_key; /* anonymous user key for this cell */
struct work_struct manager; /* Manager for init/deinit/dns */
struct hlist_node proc_link; /* /proc cell list link */
@@ -381,15 +367,21 @@ struct afs_cell {
unsigned long flags;
#define AFS_CELL_FL_NO_GC 0 /* The cell was added manually, don't auto-gc */
#define AFS_CELL_FL_DO_LOOKUP 1 /* DNS lookup requested */
+#define AFS_CELL_FL_CHECK_ALIAS 2 /* Need to check for aliases */
enum afs_cell_state state;
short error;
enum dns_record_source dns_source:8; /* Latest source of data from lookup */
enum dns_lookup_status dns_status:8; /* Latest status of data from lookup */
unsigned int dns_lookup_count; /* Counter of DNS lookups */
+ /* The volumes belonging to this cell */
+ struct rb_root volumes; /* Tree of volumes on this server */
+ struct hlist_head proc_volumes; /* procfs volume list */
+ seqlock_t volume_lock; /* For volumes */
+
/* Active fileserver interaction state. */
- struct list_head proc_volumes; /* procfs volume list */
- rwlock_t proc_lock;
+ struct rb_root fs_servers; /* afs_server (by server UUID) */
+ seqlock_t fs_lock; /* For fs_servers */
/* VL server list. */
rwlock_t vl_servers_lock; /* Lock on vl_servers */
@@ -471,6 +463,7 @@ struct afs_vldb_entry {
#define AFS_VLDB_QUERY_ERROR 4 /* - VL server returned error */
uuid_t fs_server[AFS_NMAXNSERVERS];
+ u32 addr_version[AFS_NMAXNSERVERS]; /* Registration change counters */
u8 fs_mask[AFS_NMAXNSERVERS];
#define AFS_VOL_VTM_RW 0x01 /* R/W version of the volume is available (on this server) */
#define AFS_VOL_VTM_RO 0x02 /* R/O version of the volume is available (on this server) */
@@ -492,94 +485,64 @@ struct afs_server {
};
struct afs_addr_list __rcu *addresses;
- struct rb_node uuid_rb; /* Link in net->servers */
+ struct afs_cell *cell; /* Cell to which belongs (pins ref) */
+ struct rb_node uuid_rb; /* Link in net->fs_servers */
+ struct afs_server __rcu *uuid_next; /* Next server with same UUID */
+ struct afs_server *uuid_prev; /* Previous server with same UUID */
+ struct list_head probe_link; /* Link in net->fs_probe_list */
struct hlist_node addr4_link; /* Link in net->fs_addresses4 */
struct hlist_node addr6_link; /* Link in net->fs_addresses6 */
struct hlist_node proc_link; /* Link in net->fs_proc */
struct afs_server *gc_next; /* Next server in manager's list */
- time64_t put_time; /* Time at which last put */
- time64_t update_at; /* Time at which to next update the record */
+ time64_t unuse_time; /* Time at which last unused */
unsigned long flags;
-#define AFS_SERVER_FL_NOT_READY 1 /* The record is not ready for use */
-#define AFS_SERVER_FL_NOT_FOUND 2 /* VL server says no such server */
-#define AFS_SERVER_FL_VL_FAIL 3 /* Failed to access VL server */
-#define AFS_SERVER_FL_UPDATING 4
-#define AFS_SERVER_FL_PROBED 5 /* The fileserver has been probed */
-#define AFS_SERVER_FL_PROBING 6 /* Fileserver is being probed */
-#define AFS_SERVER_FL_NO_IBULK 7 /* Fileserver doesn't support FS.InlineBulkStatus */
+#define AFS_SERVER_FL_RESPONDING 0 /* The server is responding */
+#define AFS_SERVER_FL_UPDATING 1
+#define AFS_SERVER_FL_NEEDS_UPDATE 2 /* Fileserver address list is out of date */
+#define AFS_SERVER_FL_NOT_READY 4 /* The record is not ready for use */
+#define AFS_SERVER_FL_NOT_FOUND 5 /* VL server says no such server */
+#define AFS_SERVER_FL_VL_FAIL 6 /* Failed to access VL server */
#define AFS_SERVER_FL_MAY_HAVE_CB 8 /* May have callbacks on this fileserver */
-#define AFS_SERVER_FL_IS_YFS 9 /* Server is YFS not AFS */
-#define AFS_SERVER_FL_NO_RM2 10 /* Fileserver doesn't support YFS.RemoveFile2 */
-#define AFS_SERVER_FL_HAVE_EPOCH 11 /* ->epoch is valid */
- atomic_t usage;
+#define AFS_SERVER_FL_IS_YFS 16 /* Server is YFS not AFS */
+#define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */
+#define AFS_SERVER_FL_NO_RM2 18 /* Fileserver doesn't support YFS.RemoveFile2 */
+ atomic_t ref; /* Object refcount */
+ atomic_t active; /* Active user count */
u32 addr_version; /* Address list version */
- u32 cm_epoch; /* Server RxRPC epoch */
+ unsigned int rtt; /* Server's current RTT in uS */
unsigned int debug_id; /* Debugging ID for traces */
/* file service access */
rwlock_t fs_lock; /* access lock */
/* callback promise management */
- struct hlist_head cb_volumes; /* List of volume interests on this server */
unsigned cb_s_break; /* Break-everything counter. */
- rwlock_t cb_break_lock; /* Volume finding lock */
/* Probe state */
+ unsigned long probed_at; /* Time last probe was dispatched (jiffies) */
wait_queue_head_t probe_wq;
atomic_t probe_outstanding;
spinlock_t probe_lock;
struct {
- unsigned int rtt; /* RTT as ktime/64 */
+ unsigned int rtt; /* RTT in uS */
u32 abort_code;
- u32 cm_epoch;
short error;
bool responded:1;
bool is_yfs:1;
bool not_yfs:1;
bool local_failure:1;
- bool cm_probed:1;
- bool said_rebooted:1;
- bool said_inconsistent:1;
} probe;
};
/*
- * Volume collation in the server's callback interest list.
- */
-struct afs_vol_interest {
- struct hlist_node srv_link; /* Link in server->cb_volumes */
- struct hlist_head cb_interests; /* List of callback interests on the server */
- union {
- struct rcu_head rcu;
- afs_volid_t vid; /* Volume ID to match */
- };
- unsigned int usage;
-};
-
-/*
- * Interest by a superblock on a server.
- */
-struct afs_cb_interest {
- struct hlist_node cb_vlink; /* Link in vol_interest->cb_interests */
- struct afs_vol_interest *vol_interest;
- struct afs_server *server; /* Server on which this interest resides */
- struct super_block *sb; /* Superblock on which inodes reside */
- union {
- struct rcu_head rcu;
- afs_volid_t vid; /* Volume ID to match */
- };
- refcount_t usage;
-};
-
-/*
- * Replaceable server list.
+ * Replaceable volume server list.
*/
struct afs_server_entry {
struct afs_server *server;
- struct afs_cb_interest *cb_interest;
};
struct afs_server_list {
+ afs_volid_t vids[AFS_MAXTYPES]; /* Volume IDs */
refcount_t usage;
unsigned char nr_servers;
unsigned char preferred; /* Preferred server */
@@ -593,11 +556,16 @@ struct afs_server_list {
* Live AFS volume management.
*/
struct afs_volume {
- afs_volid_t vid; /* volume ID */
+ union {
+ struct rcu_head rcu;
+ afs_volid_t vid; /* volume ID */
+ };
atomic_t usage;
time64_t update_at; /* Time at which to next update */
struct afs_cell *cell; /* Cell to which belongs (pins ref) */
- struct list_head proc_link; /* Link in cell->vl_proc */
+ struct rb_node cell_node; /* Link in cell->volumes */
+ struct hlist_node proc_link; /* Link in cell->proc_volumes */
+ struct super_block __rcu *sb; /* Superblock on which inodes reside */
unsigned long flags;
#define AFS_VOLUME_NEEDS_UPDATE 0 /* - T if an update needs performing */
#define AFS_VOLUME_UPDATING 1 /* - T if an update is in progress */
@@ -605,10 +573,11 @@ struct afs_volume {
#define AFS_VOLUME_DELETED 3 /* - T if volume appears deleted */
#define AFS_VOLUME_OFFLINE 4 /* - T if volume offline notice given */
#define AFS_VOLUME_BUSY 5 /* - T if volume busy notice given */
+#define AFS_VOLUME_MAYBE_NO_IBULK 6 /* - T if some servers don't have InlineBulkStatus */
#ifdef CONFIG_AFS_FSCACHE
struct fscache_cookie *cache; /* caching cookie */
#endif
- struct afs_server_list *servers; /* List of servers on which volume resides */
+ struct afs_server_list __rcu *servers; /* List of servers on which volume resides */
rwlock_t servers_lock; /* Lock for ->servers */
unsigned int servers_seq; /* Incremented each time ->servers changes */
@@ -616,7 +585,6 @@ struct afs_volume {
rwlock_t cb_v_break_lock;
afs_voltype_t type; /* type of volume */
- short error;
char type_force; /* force volume type (suppress R/O -> R/W) */
u8 name_len;
u8 name[AFS_MAXVOLNAME + 1]; /* NUL-padded volume name */
@@ -677,11 +645,11 @@ struct afs_vnode {
afs_lock_type_t lock_type : 8;
/* outstanding callback notification on this file */
- struct afs_cb_interest __rcu *cb_interest; /* Server on which this resides */
+ void *cb_server; /* Server with callback/filelock */
unsigned int cb_s_break; /* Mass break counter on ->server */
unsigned int cb_v_break; /* Mass break counter on ->volume */
unsigned int cb_break; /* Break counter on vnode */
- seqlock_t cb_lock; /* Lock for ->cb_interest, ->status, ->cb_*break */
+ seqlock_t cb_lock; /* Lock for ->cb_server, ->status, ->cb_*break */
time64_t cb_expires_at; /* time at which callback expires */
};
@@ -758,29 +726,117 @@ struct afs_vl_cursor {
};
/*
- * Cursor for iterating over a set of fileservers.
+ * Fileserver operation methods.
+ */
+struct afs_operation_ops {
+ void (*issue_afs_rpc)(struct afs_operation *op);
+ void (*issue_yfs_rpc)(struct afs_operation *op);
+ void (*success)(struct afs_operation *op);
+ void (*aborted)(struct afs_operation *op);
+ void (*edit_dir)(struct afs_operation *op);
+ void (*put)(struct afs_operation *op);
+};
+
+struct afs_vnode_param {
+ struct afs_vnode *vnode;
+ struct afs_fid fid; /* Fid to access */
+ struct afs_status_cb scb; /* Returned status and callback promise */
+ afs_dataversion_t dv_before; /* Data version before the call */
+ unsigned int cb_break_before; /* cb_break + cb_s_break before the call */
+ u8 dv_delta; /* Expected change in data version */
+ bool put_vnode; /* T if we have a ref on the vnode */
+ bool need_io_lock; /* T if we need the I/O lock on this */
+};
+
+/*
+ * Fileserver operation wrapper, handling server and address rotation
+ * asynchronously. May make simultaneous calls to multiple servers.
*/
-struct afs_fs_cursor {
+struct afs_operation {
+ struct afs_net *net; /* Network namespace */
+ struct key *key; /* Key for the cell */
const struct afs_call_type *type; /* Type of call done */
+ const struct afs_operation_ops *ops;
+
+ /* Parameters/results for the operation */
+ struct afs_volume *volume; /* Volume being accessed */
+ struct afs_vnode_param file[2];
+ struct afs_vnode_param *more_files;
+ struct afs_volsync volsync;
+ struct dentry *dentry; /* Dentry to be altered */
+ struct dentry *dentry_2; /* Second dentry to be altered */
+ struct timespec64 mtime; /* Modification time to record */
+ short nr_files; /* Number of entries in file[], more_files */
+ short error;
+ unsigned int abort_code;
+ unsigned int debug_id;
+
+ unsigned int cb_v_break; /* Volume break counter before op */
+ unsigned int cb_s_break; /* Server break counter before op */
+
+ union {
+ struct {
+ int which; /* Which ->file[] to fetch for */
+ } fetch_status;
+ struct {
+ int reason; /* enum afs_edit_dir_reason */
+ mode_t mode;
+ const char *symlink;
+ } create;
+ struct {
+ bool need_rehash;
+ } unlink;
+ struct {
+ struct dentry *rehash;
+ struct dentry *tmp;
+ bool new_negative;
+ } rename;
+ struct {
+ struct afs_read *req;
+ } fetch;
+ struct {
+ afs_lock_type_t type;
+ } lock;
+ struct {
+ struct address_space *mapping; /* Pages being written from */
+ pgoff_t first; /* first page in mapping to deal with */
+ pgoff_t last; /* last page in mapping to deal with */
+ unsigned first_offset; /* offset into mapping[first] */
+ unsigned last_to; /* amount of mapping[last] */
+ } store;
+ struct {
+ struct iattr *attr;
+ } setattr;
+ struct afs_acl *acl;
+ struct yfs_acl *yacl;
+ struct {
+ struct afs_volume_status vs;
+ struct kstatfs *buf;
+ } volstatus;
+ };
+
+ /* Fileserver iteration state */
struct afs_addr_cursor ac;
- struct afs_vnode *vnode;
struct afs_server_list *server_list; /* Current server list (pins ref) */
- struct afs_cb_interest *cbi; /* Server on which this resides (pins ref) */
- struct key *key; /* Key for the server */
+ struct afs_server *server; /* Server we're using (ref pinned by server_list) */
+ struct afs_call *call;
unsigned long untried; /* Bitmask of untried servers */
- unsigned int cb_break; /* cb_break + cb_s_break before the call */
- unsigned int cb_break_2; /* cb_break + cb_s_break (2nd vnode) */
short index; /* Current server */
- short error;
- unsigned short flags;
-#define AFS_FS_CURSOR_STOP 0x0001 /* Set to cease iteration */
-#define AFS_FS_CURSOR_VBUSY 0x0002 /* Set if seen VBUSY */
-#define AFS_FS_CURSOR_VMOVED 0x0004 /* Set if seen VMOVED */
-#define AFS_FS_CURSOR_VNOVOL 0x0008 /* Set if seen VNOVOL */
-#define AFS_FS_CURSOR_CUR_ONLY 0x0010 /* Set if current server only (file lock held) */
-#define AFS_FS_CURSOR_NO_VSLEEP 0x0020 /* Set to prevent sleep on VBUSY, VOFFLINE, ... */
-#define AFS_FS_CURSOR_INTR 0x0040 /* Set if op is interruptible */
unsigned short nr_iterations; /* Number of server iterations */
+
+ unsigned int flags;
+#define AFS_OPERATION_STOP 0x0001 /* Set to cease iteration */
+#define AFS_OPERATION_VBUSY 0x0002 /* Set if seen VBUSY */
+#define AFS_OPERATION_VMOVED 0x0004 /* Set if seen VMOVED */
+#define AFS_OPERATION_VNOVOL 0x0008 /* Set if seen VNOVOL */
+#define AFS_OPERATION_CUR_ONLY 0x0010 /* Set if current server only (file lock held) */
+#define AFS_OPERATION_NO_VSLEEP 0x0020 /* Set to prevent sleep on VBUSY, VOFFLINE, ... */
+#define AFS_OPERATION_UNINTR 0x0040 /* Set if op is uninterruptible */
+#define AFS_OPERATION_DOWNGRADE 0x0080 /* Set to retry with downgraded opcode */
+#define AFS_OPERATION_LOCK_0 0x0100 /* Set if have io_lock on file[0] */
+#define AFS_OPERATION_LOCK_1 0x0200 /* Set if have io_lock on file[1] */
+#define AFS_OPERATION_TRIED_ALL 0x0400 /* Set if we've tried all the fileservers */
+#define AFS_OPERATION_RETRY_SERVER 0x0800 /* Set if we should retry the current server */
};
/*
@@ -838,29 +894,15 @@ extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break *);
-extern int afs_register_server_cb_interest(struct afs_vnode *,
- struct afs_server_list *, unsigned int);
-extern void afs_put_cb_interest(struct afs_net *, struct afs_cb_interest *);
-extern void afs_clear_callback_interests(struct afs_net *, struct afs_server_list *);
-
-static inline struct afs_cb_interest *afs_get_cb_interest(struct afs_cb_interest *cbi)
-{
- if (cbi)
- refcount_inc(&cbi->usage);
- return cbi;
-}
-
static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode)
{
return vnode->cb_break + vnode->cb_v_break;
}
static inline bool afs_cb_is_broken(unsigned int cb_break,
- const struct afs_vnode *vnode,
- const struct afs_cb_interest *cbi)
+ const struct afs_vnode *vnode)
{
- return !cbi || cb_break != (vnode->cb_break +
- vnode->volume->cb_v_break);
+ return cb_break != (vnode->cb_break + vnode->volume->cb_v_break);
}
/*
@@ -952,72 +994,81 @@ extern int afs_flock(struct file *, int, struct file_lock *);
/*
* fsclient.c
*/
-extern int afs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_status_cb *,
- struct afs_volsync *);
-extern int afs_fs_give_up_callbacks(struct afs_net *, struct afs_server *);
-extern int afs_fs_fetch_data(struct afs_fs_cursor *, struct afs_status_cb *, struct afs_read *);
-extern int afs_fs_create(struct afs_fs_cursor *, const char *, umode_t,
- struct afs_status_cb *, struct afs_fid *, struct afs_status_cb *);
-extern int afs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool,
- struct afs_status_cb *);
-extern int afs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *,
- struct afs_status_cb *, struct afs_status_cb *);
-extern int afs_fs_symlink(struct afs_fs_cursor *, const char *, const char *,
- struct afs_status_cb *, struct afs_fid *, struct afs_status_cb *);
-extern int afs_fs_rename(struct afs_fs_cursor *, const char *,
- struct afs_vnode *, const char *,
- struct afs_status_cb *, struct afs_status_cb *);
-extern int afs_fs_store_data(struct afs_fs_cursor *, struct address_space *,
- pgoff_t, pgoff_t, unsigned, unsigned, struct afs_status_cb *);
-extern int afs_fs_setattr(struct afs_fs_cursor *, struct iattr *, struct afs_status_cb *);
-extern int afs_fs_get_volume_status(struct afs_fs_cursor *, struct afs_volume_status *);
-extern int afs_fs_set_lock(struct afs_fs_cursor *, afs_lock_type_t, struct afs_status_cb *);
-extern int afs_fs_extend_lock(struct afs_fs_cursor *, struct afs_status_cb *);
-extern int afs_fs_release_lock(struct afs_fs_cursor *, struct afs_status_cb *);
+extern void afs_fs_fetch_status(struct afs_operation *);
+extern void afs_fs_fetch_data(struct afs_operation *);
+extern void afs_fs_create_file(struct afs_operation *);
+extern void afs_fs_make_dir(struct afs_operation *);
+extern void afs_fs_remove_file(struct afs_operation *);
+extern void afs_fs_remove_dir(struct afs_operation *);
+extern void afs_fs_link(struct afs_operation *);
+extern void afs_fs_symlink(struct afs_operation *);
+extern void afs_fs_rename(struct afs_operation *);
+extern void afs_fs_store_data(struct afs_operation *);
+extern void afs_fs_setattr(struct afs_operation *);
+extern void afs_fs_get_volume_status(struct afs_operation *);
+extern void afs_fs_set_lock(struct afs_operation *);
+extern void afs_fs_extend_lock(struct afs_operation *);
+extern void afs_fs_release_lock(struct afs_operation *);
extern int afs_fs_give_up_all_callbacks(struct afs_net *, struct afs_server *,
struct afs_addr_cursor *, struct key *);
-extern struct afs_call *afs_fs_get_capabilities(struct afs_net *, struct afs_server *,
- struct afs_addr_cursor *, struct key *,
- unsigned int);
-extern int afs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *,
- struct afs_fid *, struct afs_status_cb *,
- unsigned int, struct afs_volsync *);
-extern int afs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *,
- struct afs_fid *, struct afs_status_cb *,
- struct afs_volsync *);
+extern bool afs_fs_get_capabilities(struct afs_net *, struct afs_server *,
+ struct afs_addr_cursor *, struct key *);
+extern void afs_fs_inline_bulk_status(struct afs_operation *);
struct afs_acl {
u32 size;
u8 data[];
};
-extern struct afs_acl *afs_fs_fetch_acl(struct afs_fs_cursor *, struct afs_status_cb *);
-extern int afs_fs_store_acl(struct afs_fs_cursor *, const struct afs_acl *,
- struct afs_status_cb *);
+extern void afs_fs_fetch_acl(struct afs_operation *);
+extern void afs_fs_store_acl(struct afs_operation *);
+
+/*
+ * fs_operation.c
+ */
+extern struct afs_operation *afs_alloc_operation(struct key *, struct afs_volume *);
+extern int afs_put_operation(struct afs_operation *);
+extern bool afs_begin_vnode_operation(struct afs_operation *);
+extern void afs_wait_for_operation(struct afs_operation *);
+extern int afs_do_sync_operation(struct afs_operation *);
+
+static inline void afs_op_nomem(struct afs_operation *op)
+{
+ op->error = -ENOMEM;
+}
+
+static inline void afs_op_set_vnode(struct afs_operation *op, unsigned int n,
+ struct afs_vnode *vnode)
+{
+ op->file[n].vnode = vnode;
+ op->file[n].need_io_lock = true;
+}
+
+static inline void afs_op_set_fid(struct afs_operation *op, unsigned int n,
+ const struct afs_fid *fid)
+{
+ op->file[n].fid = *fid;
+}
/*
* fs_probe.c
*/
extern void afs_fileserver_probe_result(struct afs_call *);
-extern int afs_probe_fileservers(struct afs_net *, struct key *, struct afs_server_list *);
+extern void afs_fs_probe_fileserver(struct afs_net *, struct afs_server *, struct key *, bool);
extern int afs_wait_for_fs_probes(struct afs_server_list *, unsigned long);
+extern void afs_probe_fileserver(struct afs_net *, struct afs_server *);
+extern void afs_fs_probe_dispatcher(struct work_struct *);
+extern int afs_wait_for_one_fs_probe(struct afs_server *, bool);
/*
* inode.c
*/
-extern void afs_vnode_commit_status(struct afs_fs_cursor *,
- struct afs_vnode *,
- unsigned int,
- const afs_dataversion_t *,
- struct afs_status_cb *);
+extern void afs_vnode_commit_status(struct afs_operation *, struct afs_vnode_param *);
extern int afs_fetch_status(struct afs_vnode *, struct key *, bool, afs_access_t *);
-extern int afs_iget5_test(struct inode *, void *);
+extern int afs_ilookup5_test_by_fid(struct inode *, void *);
extern struct inode *afs_iget_pseudo_dir(struct super_block *, bool);
-extern struct inode *afs_iget(struct super_block *, struct key *,
- struct afs_iget_data *, struct afs_status_cb *,
- struct afs_cb_interest *,
- struct afs_vnode *);
-extern void afs_zap_data(struct afs_vnode *);
+extern struct inode *afs_iget(struct afs_operation *, struct afs_vnode_param *);
+extern struct inode *afs_root_iget(struct super_block *, struct key *);
extern bool afs_check_validity(struct afs_vnode *);
extern int afs_validate(struct afs_vnode *, struct key *);
extern int afs_getattr(const struct path *, struct kstat *, u32, unsigned int);
@@ -1104,11 +1155,8 @@ static inline void afs_put_sysnames(struct afs_sysnames *sysnames) {}
/*
* rotate.c
*/
-extern bool afs_begin_vnode_operation(struct afs_fs_cursor *, struct afs_vnode *,
- struct key *, bool);
-extern bool afs_select_fileserver(struct afs_fs_cursor *);
-extern bool afs_select_current_fileserver(struct afs_fs_cursor *);
-extern int afs_end_vnode_operation(struct afs_fs_cursor *);
+extern bool afs_select_fileserver(struct afs_operation *);
+extern void afs_dump_edestaddrreq(const struct afs_operation *);
/*
* rxrpc.c
@@ -1128,12 +1176,17 @@ extern void afs_flat_call_destructor(struct afs_call *);
extern void afs_send_empty_reply(struct afs_call *);
extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
extern int afs_extract_data(struct afs_call *, bool);
-extern int afs_protocol_error(struct afs_call *, int, enum afs_eproto_cause);
+extern int afs_protocol_error(struct afs_call *, enum afs_eproto_cause);
-static inline void afs_set_fc_call(struct afs_call *call, struct afs_fs_cursor *fc)
+static inline void afs_make_op_call(struct afs_operation *op, struct afs_call *call,
+ gfp_t gfp)
{
- call->intr = fc->flags & AFS_FS_CURSOR_INTR;
- fc->type = call->type;
+ op->call = call;
+ op->type = call->type;
+ call->op = op;
+ call->key = op->key;
+ call->intr = !(op->flags & AFS_OPERATION_UNINTR);
+ afs_make_call(&op->ac, call, gfp);
}
static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t size)
@@ -1241,13 +1294,33 @@ extern spinlock_t afs_server_peer_lock;
extern struct afs_server *afs_find_server(struct afs_net *,
const struct sockaddr_rxrpc *);
extern struct afs_server *afs_find_server_by_uuid(struct afs_net *, const uuid_t *);
-extern struct afs_server *afs_lookup_server(struct afs_cell *, struct key *, const uuid_t *);
+extern struct afs_server *afs_lookup_server(struct afs_cell *, struct key *, const uuid_t *, u32);
extern struct afs_server *afs_get_server(struct afs_server *, enum afs_server_trace);
+extern struct afs_server *afs_use_server(struct afs_server *, enum afs_server_trace);
+extern void afs_unuse_server(struct afs_net *, struct afs_server *, enum afs_server_trace);
+extern void afs_unuse_server_notime(struct afs_net *, struct afs_server *, enum afs_server_trace);
extern void afs_put_server(struct afs_net *, struct afs_server *, enum afs_server_trace);
extern void afs_manage_servers(struct work_struct *);
extern void afs_servers_timer(struct timer_list *);
+extern void afs_fs_probe_timer(struct timer_list *);
extern void __net_exit afs_purge_servers(struct afs_net *);
-extern bool afs_check_server_record(struct afs_fs_cursor *, struct afs_server *);
+extern bool afs_check_server_record(struct afs_operation *, struct afs_server *);
+
+static inline void afs_inc_servers_outstanding(struct afs_net *net)
+{
+ atomic_inc(&net->servers_outstanding);
+}
+
+static inline void afs_dec_servers_outstanding(struct afs_net *net)
+{
+ if (atomic_dec_and_test(&net->servers_outstanding))
+ wake_up_var(&net->servers_outstanding);
+}
+
+static inline bool afs_is_probing_server(struct afs_server *server)
+{
+ return list_empty(&server->probe_link);
+}
/*
* server_list.c
@@ -1279,6 +1352,12 @@ extern struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *, const uu
extern struct afs_call *afs_vl_get_capabilities(struct afs_net *, struct afs_addr_cursor *,
struct key *, struct afs_vlserver *, unsigned int);
extern struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *, const uuid_t *);
+extern char *afs_yfsvl_get_cell_name(struct afs_vl_cursor *);
+
+/*
+ * vl_alias.c
+ */
+extern int afs_cell_detect_alias(struct afs_cell *, struct key *);
/*
* vl_probe.c
@@ -1322,18 +1401,12 @@ extern struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *,
/*
* volume.c
*/
-static inline struct afs_volume *__afs_get_volume(struct afs_volume *volume)
-{
- if (volume)
- atomic_inc(&volume->usage);
- return volume;
-}
-
extern struct afs_volume *afs_create_volume(struct afs_fs_context *);
extern void afs_activate_volume(struct afs_volume *);
extern void afs_deactivate_volume(struct afs_volume *);
-extern void afs_put_volume(struct afs_cell *, struct afs_volume *);
-extern int afs_check_volume_status(struct afs_volume *, struct afs_fs_cursor *);
+extern struct afs_volume *afs_get_volume(struct afs_volume *, enum afs_volume_trace);
+extern void afs_put_volume(struct afs_net *, struct afs_volume *, enum afs_volume_trace);
+extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
/*
* write.c
@@ -1362,36 +1435,24 @@ extern ssize_t afs_listxattr(struct dentry *, char *, size_t);
/*
* yfsclient.c
*/
-extern int yfs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_status_cb *,
- struct afs_volsync *);
-extern int yfs_fs_fetch_data(struct afs_fs_cursor *, struct afs_status_cb *, struct afs_read *);
-extern int yfs_fs_create_file(struct afs_fs_cursor *, const char *, umode_t, struct afs_status_cb *,
- struct afs_fid *, struct afs_status_cb *);
-extern int yfs_fs_make_dir(struct afs_fs_cursor *, const char *, umode_t, struct afs_status_cb *,
- struct afs_fid *, struct afs_status_cb *);
-extern int yfs_fs_remove_file2(struct afs_fs_cursor *, struct afs_vnode *, const char *,
- struct afs_status_cb *, struct afs_status_cb *);
-extern int yfs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool,
- struct afs_status_cb *);
-extern int yfs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *,
- struct afs_status_cb *, struct afs_status_cb *);
-extern int yfs_fs_symlink(struct afs_fs_cursor *, const char *, const char *,
- struct afs_status_cb *, struct afs_fid *, struct afs_status_cb *);
-extern int yfs_fs_rename(struct afs_fs_cursor *, const char *, struct afs_vnode *, const char *,
- struct afs_status_cb *, struct afs_status_cb *);
-extern int yfs_fs_store_data(struct afs_fs_cursor *, struct address_space *,
- pgoff_t, pgoff_t, unsigned, unsigned, struct afs_status_cb *);
-extern int yfs_fs_setattr(struct afs_fs_cursor *, struct iattr *, struct afs_status_cb *);
-extern int yfs_fs_get_volume_status(struct afs_fs_cursor *, struct afs_volume_status *);
-extern int yfs_fs_set_lock(struct afs_fs_cursor *, afs_lock_type_t, struct afs_status_cb *);
-extern int yfs_fs_extend_lock(struct afs_fs_cursor *, struct afs_status_cb *);
-extern int yfs_fs_release_lock(struct afs_fs_cursor *, struct afs_status_cb *);
-extern int yfs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *,
- struct afs_fid *, struct afs_status_cb *,
- struct afs_volsync *);
-extern int yfs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *,
- struct afs_fid *, struct afs_status_cb *,
- unsigned int, struct afs_volsync *);
+extern void yfs_fs_fetch_file_status(struct afs_operation *);
+extern void yfs_fs_fetch_data(struct afs_operation *);
+extern void yfs_fs_create_file(struct afs_operation *);
+extern void yfs_fs_make_dir(struct afs_operation *);
+extern void yfs_fs_remove_file2(struct afs_operation *);
+extern void yfs_fs_remove_file(struct afs_operation *);
+extern void yfs_fs_remove_dir(struct afs_operation *);
+extern void yfs_fs_link(struct afs_operation *);
+extern void yfs_fs_symlink(struct afs_operation *);
+extern void yfs_fs_rename(struct afs_operation *);
+extern void yfs_fs_store_data(struct afs_operation *);
+extern void yfs_fs_setattr(struct afs_operation *);
+extern void yfs_fs_get_volume_status(struct afs_operation *);
+extern void yfs_fs_set_lock(struct afs_operation *);
+extern void yfs_fs_extend_lock(struct afs_operation *);
+extern void yfs_fs_release_lock(struct afs_operation *);
+extern void yfs_fs_fetch_status(struct afs_operation *);
+extern void yfs_fs_inline_bulk_status(struct afs_operation *);
struct yfs_acl {
struct afs_acl *acl; /* Dir/file/symlink ACL */
@@ -1404,10 +1465,8 @@ struct yfs_acl {
};
extern void yfs_free_opaque_acl(struct yfs_acl *);
-extern struct yfs_acl *yfs_fs_fetch_opaque_acl(struct afs_fs_cursor *, struct yfs_acl *,
- struct afs_status_cb *);
-extern int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *, const struct afs_acl *,
- struct afs_status_cb *);
+extern void yfs_fs_fetch_opaque_acl(struct afs_operation *);
+extern void yfs_fs_store_opaque_acl2(struct afs_operation *);
/*
* Miscellaneous inline functions.
@@ -1422,15 +1481,29 @@ static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode)
return &vnode->vfs_inode;
}
-static inline void afs_check_for_remote_deletion(struct afs_fs_cursor *fc,
+static inline void afs_check_for_remote_deletion(struct afs_operation *op,
struct afs_vnode *vnode)
{
- if (fc->ac.error == -ENOENT) {
+ if (op->error == -ENOENT) {
set_bit(AFS_VNODE_DELETED, &vnode->flags);
afs_break_callback(vnode, afs_cb_break_for_deleted);
}
}
+/*
+ * Note that a dentry got changed. We need to set d_fsdata to the data version
+ * number derived from the result of the operation. It doesn't matter if
+ * d_fsdata goes backwards as we'll just revalidate.
+ */
+static inline void afs_update_dentry_version(struct afs_operation *op,
+ struct afs_vnode_param *dir_vp,
+ struct dentry *dentry)
+{
+ if (!op->error)
+ dentry->d_fsdata =
+ (void *)(unsigned long)dir_vp->scb.status.data_version;
+}
+
static inline int afs_io_error(struct afs_call *call, enum afs_io_error where)
{
trace_afs_io_error(call->debug_id, -EIO, where);
diff --git a/fs/afs/main.c b/fs/afs/main.c
index c9c45d7078bd..9c79c91e8005 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -82,12 +82,14 @@ static int __net_init afs_net_init(struct net *net_ns)
INIT_WORK(&net->cells_manager, afs_manage_cells);
timer_setup(&net->cells_timer, afs_cells_timer, 0);
+ mutex_init(&net->cells_alias_lock);
mutex_init(&net->proc_cells_lock);
INIT_HLIST_HEAD(&net->proc_cells);
seqlock_init(&net->fs_lock);
net->fs_servers = RB_ROOT;
- INIT_LIST_HEAD(&net->fs_updates);
+ INIT_LIST_HEAD(&net->fs_probe_fast);
+ INIT_LIST_HEAD(&net->fs_probe_slow);
INIT_HLIST_HEAD(&net->fs_proc);
INIT_HLIST_HEAD(&net->fs_addresses4);
@@ -96,6 +98,8 @@ static int __net_init afs_net_init(struct net *net_ns)
INIT_WORK(&net->fs_manager, afs_manage_servers);
timer_setup(&net->fs_timer, afs_servers_timer, 0);
+ INIT_WORK(&net->fs_prober, afs_fs_probe_dispatcher);
+ timer_setup(&net->fs_probe_timer, afs_fs_probe_timer, 0);
ret = -ENOMEM;
sysnames = kzalloc(sizeof(*sysnames), GFP_KERNEL);
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 468e1713bce1..e817fc740ba0 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -38,7 +38,7 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
if (v == SEQ_START_TOKEN) {
/* display header on line 1 */
- seq_puts(m, "USE TTL SV NAME\n");
+ seq_puts(m, "USE TTL SV ST NAME\n");
return 0;
}
@@ -46,10 +46,11 @@ static int afs_proc_cells_show(struct seq_file *m, void *v)
vllist = rcu_dereference(cell->vl_servers);
/* display one cell per line on subsequent lines */
- seq_printf(m, "%3u %6lld %2u %s\n",
+ seq_printf(m, "%3u %6lld %2u %2u %s\n",
atomic_read(&cell->usage),
cell->dns_expiry - ktime_get_real_seconds(),
vllist->nr_servers,
+ cell->state,
cell->name);
return 0;
}
@@ -208,11 +209,10 @@ static const char afs_vol_types[3][3] = {
*/
static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
{
- struct afs_cell *cell = PDE_DATA(file_inode(m->file));
- struct afs_volume *vol = list_entry(v, struct afs_volume, proc_link);
+ struct afs_volume *vol = hlist_entry(v, struct afs_volume, proc_link);
/* Display header on line 1 */
- if (v == &cell->proc_volumes) {
+ if (v == SEQ_START_TOKEN) {
seq_puts(m, "USE VID TY NAME\n");
return 0;
}
@@ -230,8 +230,8 @@ static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
{
struct afs_cell *cell = PDE_DATA(file_inode(m->file));
- read_lock(&cell->proc_lock);
- return seq_list_start_head(&cell->proc_volumes, *_pos);
+ rcu_read_lock();
+ return seq_hlist_start_head_rcu(&cell->proc_volumes, *_pos);
}
static void *afs_proc_cell_volumes_next(struct seq_file *m, void *v,
@@ -239,15 +239,13 @@ static void *afs_proc_cell_volumes_next(struct seq_file *m, void *v,
{
struct afs_cell *cell = PDE_DATA(file_inode(m->file));
- return seq_list_next(v, &cell->proc_volumes, _pos);
+ return seq_hlist_next_rcu(v, &cell->proc_volumes, _pos);
}
static void afs_proc_cell_volumes_stop(struct seq_file *m, void *v)
__releases(cell->proc_lock)
{
- struct afs_cell *cell = PDE_DATA(file_inode(m->file));
-
- read_unlock(&cell->proc_lock);
+ rcu_read_unlock();
}
static const struct seq_operations afs_proc_cell_volumes_ops = {
@@ -378,20 +376,26 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
int i;
if (v == SEQ_START_TOKEN) {
- seq_puts(m, "UUID USE ADDR\n");
+ seq_puts(m, "UUID REF ACT\n");
return 0;
}
server = list_entry(v, struct afs_server, proc_link);
alist = rcu_dereference(server->addresses);
- seq_printf(m, "%pU %3d %pISpc%s\n",
+ seq_printf(m, "%pU %3d %3d\n",
&server->uuid,
- atomic_read(&server->usage),
- &alist->addrs[0].transport,
- alist->preferred == 0 ? "*" : "");
- for (i = 1; i < alist->nr_addrs; i++)
- seq_printf(m, " %pISpc%s\n",
- &alist->addrs[i].transport,
+ atomic_read(&server->ref),
+ atomic_read(&server->active));
+ seq_printf(m, " - info: fl=%lx rtt=%u brk=%x\n",
+ server->flags, server->rtt, server->cb_s_break);
+ seq_printf(m, " - probe: last=%d out=%d\n",
+ (int)(jiffies - server->probed_at) / HZ,
+ atomic_read(&server->probe_outstanding));
+ seq_printf(m, " - ALIST v=%u rsp=%lx f=%lx\n",
+ alist->version, alist->responded, alist->failed);
+ for (i = 0; i < alist->nr_addrs; i++)
+ seq_printf(m, " [%x] %pISpc%s\n",
+ i, &alist->addrs[i].transport,
alist->preferred == i ? "*" : "");
return 0;
}
@@ -563,6 +567,7 @@ void afs_put_sysnames(struct afs_sysnames *sysnames)
if (sysnames->subs[i] != afs_init_sysname &&
sysnames->subs[i] != sysnames->blank)
kfree(sysnames->subs[i]);
+ kfree(sysnames);
}
}
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
index 32be9c698348..b5bd03b1d3c7 100644
--- a/fs/afs/protocol_yfs.h
+++ b/fs/afs/protocol_yfs.h
@@ -8,7 +8,7 @@
#define YFS_FS_SERVICE 2500
#define YFS_CM_SERVICE 2501
-#define YFSCBMAX 1024
+#define YFSCBMAX 1024
enum YFS_CM_Operations {
YFSCBProbe = 206, /* probe client */
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index 2a3305e42b14..6a0935cb822f 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -15,60 +15,32 @@
#include "afs_fs.h"
/*
- * Begin an operation on the fileserver.
- *
- * Fileserver operations are serialised on the server by vnode, so we serialise
- * them here also using the io_lock.
- */
-bool afs_begin_vnode_operation(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- struct key *key, bool intr)
-{
- memset(fc, 0, sizeof(*fc));
- fc->vnode = vnode;
- fc->key = key;
- fc->ac.error = SHRT_MAX;
- fc->error = -EDESTADDRREQ;
-
- if (intr) {
- fc->flags |= AFS_FS_CURSOR_INTR;
- if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
- fc->error = -EINTR;
- fc->flags |= AFS_FS_CURSOR_STOP;
- return false;
- }
- } else {
- mutex_lock(&vnode->io_lock);
- }
-
- if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
- fc->flags |= AFS_FS_CURSOR_CUR_ONLY;
- return true;
-}
-
-/*
* Begin iteration through a server list, starting with the vnode's last used
* server if possible, or the last recorded good server if not.
*/
-static bool afs_start_fs_iteration(struct afs_fs_cursor *fc,
+static bool afs_start_fs_iteration(struct afs_operation *op,
struct afs_vnode *vnode)
{
- struct afs_cb_interest *cbi;
+ struct afs_server *server;
+ void *cb_server;
int i;
- read_lock(&vnode->volume->servers_lock);
- fc->server_list = afs_get_serverlist(vnode->volume->servers);
- read_unlock(&vnode->volume->servers_lock);
+ read_lock(&op->volume->servers_lock);
+ op->server_list = afs_get_serverlist(
+ rcu_dereference_protected(op->volume->servers,
+ lockdep_is_held(&op->volume->servers_lock)));
+ read_unlock(&op->volume->servers_lock);
- fc->untried = (1UL << fc->server_list->nr_servers) - 1;
- fc->index = READ_ONCE(fc->server_list->preferred);
+ op->untried = (1UL << op->server_list->nr_servers) - 1;
+ op->index = READ_ONCE(op->server_list->preferred);
- cbi = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->io_lock));
- if (cbi) {
+ cb_server = vnode->cb_server;
+ if (cb_server) {
/* See if the vnode's preferred record is still available */
- for (i = 0; i < fc->server_list->nr_servers; i++) {
- if (fc->server_list->servers[i].cb_interest == cbi) {
- fc->index = i;
+ for (i = 0; i < op->server_list->nr_servers; i++) {
+ server = op->server_list->servers[i].server;
+ if (server == cb_server) {
+ op->index = i;
goto found_interest;
}
}
@@ -77,21 +49,18 @@ static bool afs_start_fs_iteration(struct afs_fs_cursor *fc,
* serving this vnode, then we can't switch to another server
* and have to return an error.
*/
- if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) {
- fc->error = -ESTALE;
+ if (op->flags & AFS_OPERATION_CUR_ONLY) {
+ op->error = -ESTALE;
return false;
}
/* Note that the callback promise is effectively broken */
write_seqlock(&vnode->cb_lock);
- ASSERTCMP(cbi, ==, rcu_access_pointer(vnode->cb_interest));
- rcu_assign_pointer(vnode->cb_interest, NULL);
+ ASSERTCMP(cb_server, ==, vnode->cb_server);
+ vnode->cb_server = NULL;
if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags))
vnode->cb_break++;
write_sequnlock(&vnode->cb_lock);
-
- afs_put_cb_interest(afs_v2net(vnode), cbi);
- cbi = NULL;
}
found_interest:
@@ -118,12 +87,12 @@ static void afs_busy(struct afs_volume *volume, u32 abort_code)
/*
* Sleep and retry the operation to the same fileserver.
*/
-static bool afs_sleep_and_retry(struct afs_fs_cursor *fc)
+static bool afs_sleep_and_retry(struct afs_operation *op)
{
- if (fc->flags & AFS_FS_CURSOR_INTR) {
+ if (!(op->flags & AFS_OPERATION_UNINTR)) {
msleep_interruptible(1000);
if (signal_pending(current)) {
- fc->error = -ERESTARTSYS;
+ op->error = -ERESTARTSYS;
return false;
}
} else {
@@ -137,26 +106,26 @@ static bool afs_sleep_and_retry(struct afs_fs_cursor *fc)
* Select the fileserver to use. May be called multiple times to rotate
* through the fileservers.
*/
-bool afs_select_fileserver(struct afs_fs_cursor *fc)
+bool afs_select_fileserver(struct afs_operation *op)
{
struct afs_addr_list *alist;
struct afs_server *server;
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode *vnode = op->file[0].vnode;
struct afs_error e;
u32 rtt;
- int error = fc->ac.error, i;
+ int error = op->ac.error, i;
_enter("%lx[%d],%lx[%d],%d,%d",
- fc->untried, fc->index,
- fc->ac.tried, fc->ac.index,
- error, fc->ac.abort_code);
+ op->untried, op->index,
+ op->ac.tried, op->ac.index,
+ error, op->ac.abort_code);
- if (fc->flags & AFS_FS_CURSOR_STOP) {
+ if (op->flags & AFS_OPERATION_STOP) {
_leave(" = f [stopped]");
return false;
}
- fc->nr_iterations++;
+ op->nr_iterations++;
/* Evaluate the result of the previous operation, if there was one. */
switch (error) {
@@ -166,8 +135,8 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
case 0:
default:
/* Success or local failure. Stop. */
- fc->error = error;
- fc->flags |= AFS_FS_CURSOR_STOP;
+ op->error = error;
+ op->flags |= AFS_OPERATION_STOP;
_leave(" = f [okay/local %d]", error);
return false;
@@ -175,42 +144,42 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
/* The far side rejected the operation on some grounds. This
* might involve the server being busy or the volume having been moved.
*/
- switch (fc->ac.abort_code) {
+ switch (op->ac.abort_code) {
case VNOVOL:
/* This fileserver doesn't know about the volume.
* - May indicate that the VL is wrong - retry once and compare
* the results.
* - May indicate that the fileserver couldn't attach to the vol.
*/
- if (fc->flags & AFS_FS_CURSOR_VNOVOL) {
- fc->error = -EREMOTEIO;
+ if (op->flags & AFS_OPERATION_VNOVOL) {
+ op->error = -EREMOTEIO;
goto next_server;
}
- write_lock(&vnode->volume->servers_lock);
- fc->server_list->vnovol_mask |= 1 << fc->index;
- write_unlock(&vnode->volume->servers_lock);
+ write_lock(&op->volume->servers_lock);
+ op->server_list->vnovol_mask |= 1 << op->index;
+ write_unlock(&op->volume->servers_lock);
- set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags);
- error = afs_check_volume_status(vnode->volume, fc);
+ set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags);
+ error = afs_check_volume_status(op->volume, op);
if (error < 0)
goto failed_set_error;
- if (test_bit(AFS_VOLUME_DELETED, &vnode->volume->flags)) {
- fc->error = -ENOMEDIUM;
+ if (test_bit(AFS_VOLUME_DELETED, &op->volume->flags)) {
+ op->error = -ENOMEDIUM;
goto failed;
}
/* If the server list didn't change, then assume that
* it's the fileserver having trouble.
*/
- if (vnode->volume->servers == fc->server_list) {
- fc->error = -EREMOTEIO;
+ if (rcu_access_pointer(op->volume->servers) == op->server_list) {
+ op->error = -EREMOTEIO;
goto next_server;
}
/* Try again */
- fc->flags |= AFS_FS_CURSOR_VNOVOL;
+ op->flags |= AFS_OPERATION_VNOVOL;
_leave(" = t [vnovol]");
return true;
@@ -220,20 +189,20 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
case VONLINE:
case VDISKFULL:
case VOVERQUOTA:
- fc->error = afs_abort_to_error(fc->ac.abort_code);
+ op->error = afs_abort_to_error(op->ac.abort_code);
goto next_server;
case VOFFLINE:
- if (!test_and_set_bit(AFS_VOLUME_OFFLINE, &vnode->volume->flags)) {
- afs_busy(vnode->volume, fc->ac.abort_code);
- clear_bit(AFS_VOLUME_BUSY, &vnode->volume->flags);
+ if (!test_and_set_bit(AFS_VOLUME_OFFLINE, &op->volume->flags)) {
+ afs_busy(op->volume, op->ac.abort_code);
+ clear_bit(AFS_VOLUME_BUSY, &op->volume->flags);
}
- if (fc->flags & AFS_FS_CURSOR_NO_VSLEEP) {
- fc->error = -EADV;
+ if (op->flags & AFS_OPERATION_NO_VSLEEP) {
+ op->error = -EADV;
goto failed;
}
- if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) {
- fc->error = -ESTALE;
+ if (op->flags & AFS_OPERATION_CUR_ONLY) {
+ op->error = -ESTALE;
goto failed;
}
goto busy;
@@ -244,17 +213,17 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
/* Retry after going round all the servers unless we
* have a file lock we need to maintain.
*/
- if (fc->flags & AFS_FS_CURSOR_NO_VSLEEP) {
- fc->error = -EBUSY;
+ if (op->flags & AFS_OPERATION_NO_VSLEEP) {
+ op->error = -EBUSY;
goto failed;
}
- if (!test_and_set_bit(AFS_VOLUME_BUSY, &vnode->volume->flags)) {
- afs_busy(vnode->volume, fc->ac.abort_code);
- clear_bit(AFS_VOLUME_OFFLINE, &vnode->volume->flags);
+ if (!test_and_set_bit(AFS_VOLUME_BUSY, &op->volume->flags)) {
+ afs_busy(op->volume, op->ac.abort_code);
+ clear_bit(AFS_VOLUME_OFFLINE, &op->volume->flags);
}
busy:
- if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) {
- if (!afs_sleep_and_retry(fc))
+ if (op->flags & AFS_OPERATION_CUR_ONLY) {
+ if (!afs_sleep_and_retry(op))
goto failed;
/* Retry with same server & address */
@@ -262,7 +231,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
return true;
}
- fc->flags |= AFS_FS_CURSOR_VBUSY;
+ op->flags |= AFS_OPERATION_VBUSY;
goto next_server;
case VMOVED:
@@ -273,15 +242,15 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
* We also limit the number of VMOVED hops we will
* honour, just in case someone sets up a loop.
*/
- if (fc->flags & AFS_FS_CURSOR_VMOVED) {
- fc->error = -EREMOTEIO;
+ if (op->flags & AFS_OPERATION_VMOVED) {
+ op->error = -EREMOTEIO;
goto failed;
}
- fc->flags |= AFS_FS_CURSOR_VMOVED;
+ op->flags |= AFS_OPERATION_VMOVED;
- set_bit(AFS_VOLUME_WAIT, &vnode->volume->flags);
- set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags);
- error = afs_check_volume_status(vnode->volume, fc);
+ set_bit(AFS_VOLUME_WAIT, &op->volume->flags);
+ set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags);
+ error = afs_check_volume_status(op->volume, op);
if (error < 0)
goto failed_set_error;
@@ -294,23 +263,23 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
*
* TODO: Retry a few times with sleeps.
*/
- if (vnode->volume->servers == fc->server_list) {
- fc->error = -ENOMEDIUM;
+ if (rcu_access_pointer(op->volume->servers) == op->server_list) {
+ op->error = -ENOMEDIUM;
goto failed;
}
goto restart_from_beginning;
default:
- clear_bit(AFS_VOLUME_OFFLINE, &vnode->volume->flags);
- clear_bit(AFS_VOLUME_BUSY, &vnode->volume->flags);
- fc->error = afs_abort_to_error(fc->ac.abort_code);
+ clear_bit(AFS_VOLUME_OFFLINE, &op->volume->flags);
+ clear_bit(AFS_VOLUME_BUSY, &op->volume->flags);
+ op->error = afs_abort_to_error(op->ac.abort_code);
goto failed;
}
case -ETIMEDOUT:
case -ETIME:
- if (fc->error != -EDESTADDRREQ)
+ if (op->error != -EDESTADDRREQ)
goto iterate_address;
/* Fall through */
case -ERFKILL:
@@ -320,103 +289,94 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
case -EHOSTDOWN:
case -ECONNREFUSED:
_debug("no conn");
- fc->error = error;
+ op->error = error;
goto iterate_address;
case -ECONNRESET:
_debug("call reset");
- fc->error = error;
+ op->error = error;
goto failed;
}
restart_from_beginning:
_debug("restart");
- afs_end_cursor(&fc->ac);
- afs_put_cb_interest(afs_v2net(vnode), fc->cbi);
- fc->cbi = NULL;
- afs_put_serverlist(afs_v2net(vnode), fc->server_list);
- fc->server_list = NULL;
+ afs_end_cursor(&op->ac);
+ op->server = NULL;
+ afs_put_serverlist(op->net, op->server_list);
+ op->server_list = NULL;
start:
_debug("start");
/* See if we need to do an update of the volume record. Note that the
* volume may have moved or even have been deleted.
*/
- error = afs_check_volume_status(vnode->volume, fc);
+ error = afs_check_volume_status(op->volume, op);
if (error < 0)
goto failed_set_error;
- if (!afs_start_fs_iteration(fc, vnode))
+ if (!afs_start_fs_iteration(op, vnode))
goto failed;
- _debug("__ VOL %llx __", vnode->volume->vid);
- error = afs_probe_fileservers(afs_v2net(vnode), fc->key, fc->server_list);
- if (error < 0)
- goto failed_set_error;
+ _debug("__ VOL %llx __", op->volume->vid);
pick_server:
- _debug("pick [%lx]", fc->untried);
+ _debug("pick [%lx]", op->untried);
- error = afs_wait_for_fs_probes(fc->server_list, fc->untried);
+ error = afs_wait_for_fs_probes(op->server_list, op->untried);
if (error < 0)
goto failed_set_error;
/* Pick the untried server with the lowest RTT. If we have outstanding
* callbacks, we stick with the server we're already using if we can.
*/
- if (fc->cbi) {
- _debug("cbi %u", fc->index);
- if (test_bit(fc->index, &fc->untried))
+ if (op->server) {
+ _debug("server %u", op->index);
+ if (test_bit(op->index, &op->untried))
goto selected_server;
- afs_put_cb_interest(afs_v2net(vnode), fc->cbi);
- fc->cbi = NULL;
- _debug("nocbi");
+ op->server = NULL;
+ _debug("no server");
}
- fc->index = -1;
+ op->index = -1;
rtt = U32_MAX;
- for (i = 0; i < fc->server_list->nr_servers; i++) {
- struct afs_server *s = fc->server_list->servers[i].server;
+ for (i = 0; i < op->server_list->nr_servers; i++) {
+ struct afs_server *s = op->server_list->servers[i].server;
- if (!test_bit(i, &fc->untried) || !s->probe.responded)
+ if (!test_bit(i, &op->untried) ||
+ !test_bit(AFS_SERVER_FL_RESPONDING, &s->flags))
continue;
if (s->probe.rtt < rtt) {
- fc->index = i;
+ op->index = i;
rtt = s->probe.rtt;
}
}
- if (fc->index == -1)
+ if (op->index == -1)
goto no_more_servers;
selected_server:
- _debug("use %d", fc->index);
- __clear_bit(fc->index, &fc->untried);
+ _debug("use %d", op->index);
+ __clear_bit(op->index, &op->untried);
/* We're starting on a different fileserver from the list. We need to
* check it, create a callback intercept, find its address list and
* probe its capabilities before we use it.
*/
- ASSERTCMP(fc->ac.alist, ==, NULL);
- server = fc->server_list->servers[fc->index].server;
+ ASSERTCMP(op->ac.alist, ==, NULL);
+ server = op->server_list->servers[op->index].server;
- if (!afs_check_server_record(fc, server))
+ if (!afs_check_server_record(op, server))
goto failed;
_debug("USING SERVER: %pU", &server->uuid);
- /* Make sure we've got a callback interest record for this server. We
- * have to link it in before we send the request as we can be sent a
- * break request before we've finished decoding the reply and
- * installing the vnode.
- */
- error = afs_register_server_cb_interest(vnode, fc->server_list,
- fc->index);
- if (error < 0)
- goto failed_set_error;
-
- fc->cbi = afs_get_cb_interest(
- rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->io_lock)));
+ op->flags |= AFS_OPERATION_RETRY_SERVER;
+ op->server = server;
+ if (vnode->cb_server != server) {
+ vnode->cb_server = server;
+ vnode->cb_s_break = server->cb_s_break;
+ vnode->cb_v_break = vnode->volume->cb_v_break;
+ clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ }
read_lock(&server->fs_lock);
alist = rcu_dereference_protected(server->addresses,
@@ -424,44 +384,68 @@ selected_server:
afs_get_addrlist(alist);
read_unlock(&server->fs_lock);
- memset(&fc->ac, 0, sizeof(fc->ac));
+retry_server:
+ memset(&op->ac, 0, sizeof(op->ac));
- if (!fc->ac.alist)
- fc->ac.alist = alist;
+ if (!op->ac.alist)
+ op->ac.alist = alist;
else
afs_put_addrlist(alist);
- fc->ac.index = -1;
+ op->ac.index = -1;
iterate_address:
- ASSERT(fc->ac.alist);
+ ASSERT(op->ac.alist);
/* Iterate over the current server's address list to try and find an
* address on which it will respond to us.
*/
- if (!afs_iterate_addresses(&fc->ac))
- goto next_server;
+ if (!afs_iterate_addresses(&op->ac))
+ goto out_of_addresses;
- _debug("address [%u] %u/%u", fc->index, fc->ac.index, fc->ac.alist->nr_addrs);
+ _debug("address [%u] %u/%u %pISp",
+ op->index, op->ac.index, op->ac.alist->nr_addrs,
+ &op->ac.alist->addrs[op->ac.index].transport);
_leave(" = t");
return true;
+out_of_addresses:
+ /* We've now had a failure to respond on all of a server's addresses -
+ * immediately probe them again and consider retrying the server.
+ */
+ afs_probe_fileserver(op->net, op->server);
+ if (op->flags & AFS_OPERATION_RETRY_SERVER) {
+ alist = op->ac.alist;
+ error = afs_wait_for_one_fs_probe(
+ op->server, !(op->flags & AFS_OPERATION_UNINTR));
+ switch (error) {
+ case 0:
+ op->flags &= ~AFS_OPERATION_RETRY_SERVER;
+ goto retry_server;
+ case -ERESTARTSYS:
+ goto failed_set_error;
+ case -ETIME:
+ case -EDESTADDRREQ:
+ goto next_server;
+ }
+ }
+
next_server:
_debug("next");
- afs_end_cursor(&fc->ac);
+ afs_end_cursor(&op->ac);
goto pick_server;
no_more_servers:
/* That's all the servers poked to no good effect. Try again if some
* of them were busy.
*/
- if (fc->flags & AFS_FS_CURSOR_VBUSY)
+ if (op->flags & AFS_OPERATION_VBUSY)
goto restart_from_beginning;
e.error = -EDESTADDRREQ;
e.responded = false;
- for (i = 0; i < fc->server_list->nr_servers; i++) {
- struct afs_server *s = fc->server_list->servers[i].server;
+ for (i = 0; i < op->server_list->nr_servers; i++) {
+ struct afs_server *s = op->server_list->servers[i].server;
afs_prioritise_error(&e, READ_ONCE(s->probe.error),
s->probe.abort_code);
@@ -470,101 +454,18 @@ no_more_servers:
error = e.error;
failed_set_error:
- fc->error = error;
+ op->error = error;
failed:
- fc->flags |= AFS_FS_CURSOR_STOP;
- afs_end_cursor(&fc->ac);
- _leave(" = f [failed %d]", fc->error);
- return false;
-}
-
-/*
- * Select the same fileserver we used for a vnode before and only that
- * fileserver. We use this when we have a lock on that file, which is backed
- * only by the fileserver we obtained it from.
- */
-bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
-{
- struct afs_vnode *vnode = fc->vnode;
- struct afs_cb_interest *cbi;
- struct afs_addr_list *alist;
- int error = fc->ac.error;
-
- _enter("");
-
- cbi = rcu_dereference_protected(vnode->cb_interest,
- lockdep_is_held(&vnode->io_lock));
-
- switch (error) {
- case SHRT_MAX:
- if (!cbi) {
- fc->error = -ESTALE;
- fc->flags |= AFS_FS_CURSOR_STOP;
- return false;
- }
-
- fc->cbi = afs_get_cb_interest(cbi);
-
- read_lock(&cbi->server->fs_lock);
- alist = rcu_dereference_protected(cbi->server->addresses,
- lockdep_is_held(&cbi->server->fs_lock));
- afs_get_addrlist(alist);
- read_unlock(&cbi->server->fs_lock);
- if (!alist) {
- fc->error = -ESTALE;
- fc->flags |= AFS_FS_CURSOR_STOP;
- return false;
- }
-
- memset(&fc->ac, 0, sizeof(fc->ac));
- fc->ac.alist = alist;
- fc->ac.index = -1;
- goto iterate_address;
-
- case 0:
- default:
- /* Success or local failure. Stop. */
- fc->error = error;
- fc->flags |= AFS_FS_CURSOR_STOP;
- _leave(" = f [okay/local %d]", error);
- return false;
-
- case -ECONNABORTED:
- fc->error = afs_abort_to_error(fc->ac.abort_code);
- fc->flags |= AFS_FS_CURSOR_STOP;
- _leave(" = f [abort]");
- return false;
-
- case -ERFKILL:
- case -EADDRNOTAVAIL:
- case -ENETUNREACH:
- case -EHOSTUNREACH:
- case -EHOSTDOWN:
- case -ECONNREFUSED:
- case -ETIMEDOUT:
- case -ETIME:
- _debug("no conn");
- fc->error = error;
- goto iterate_address;
- }
-
-iterate_address:
- /* Iterate over the current server's address list to try and find an
- * address on which it will respond to us.
- */
- if (afs_iterate_addresses(&fc->ac)) {
- _leave(" = t");
- return true;
- }
-
- afs_end_cursor(&fc->ac);
+ op->flags |= AFS_OPERATION_STOP;
+ afs_end_cursor(&op->ac);
+ _leave(" = f [failed %d]", op->error);
return false;
}
/*
* Dump cursor state in the case of the error being EDESTADDRREQ.
*/
-static void afs_dump_edestaddrreq(const struct afs_fs_cursor *fc)
+void afs_dump_edestaddrreq(const struct afs_operation *op)
{
static int count;
int i;
@@ -576,13 +477,14 @@ static void afs_dump_edestaddrreq(const struct afs_fs_cursor *fc)
rcu_read_lock();
pr_notice("EDESTADDR occurred\n");
- pr_notice("FC: cbb=%x cbb2=%x fl=%hx err=%hd\n",
- fc->cb_break, fc->cb_break_2, fc->flags, fc->error);
+ pr_notice("FC: cbb=%x cbb2=%x fl=%x err=%hd\n",
+ op->file[0].cb_break_before,
+ op->file[1].cb_break_before, op->flags, op->error);
pr_notice("FC: ut=%lx ix=%d ni=%u\n",
- fc->untried, fc->index, fc->nr_iterations);
+ op->untried, op->index, op->nr_iterations);
- if (fc->server_list) {
- const struct afs_server_list *sl = fc->server_list;
+ if (op->server_list) {
+ const struct afs_server_list *sl = op->server_list;
pr_notice("FC: SL nr=%u pr=%u vnov=%hx\n",
sl->nr_servers, sl->preferred, sl->vnovol_mask);
for (i = 0; i < sl->nr_servers; i++) {
@@ -596,41 +498,16 @@ static void afs_dump_edestaddrreq(const struct afs_fs_cursor *fc)
a->version,
a->nr_ipv4, a->nr_addrs, a->max_addrs,
a->preferred);
- pr_notice("FC: - pr=%lx R=%lx F=%lx\n",
- a->probed, a->responded, a->failed);
- if (a == fc->ac.alist)
+ pr_notice("FC: - R=%lx F=%lx\n",
+ a->responded, a->failed);
+ if (a == op->ac.alist)
pr_notice("FC: - current\n");
}
}
}
pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n",
- fc->ac.tried, fc->ac.index, fc->ac.abort_code, fc->ac.error,
- fc->ac.responded, fc->ac.nr_iterations);
+ op->ac.tried, op->ac.index, op->ac.abort_code, op->ac.error,
+ op->ac.responded, op->ac.nr_iterations);
rcu_read_unlock();
}
-
-/*
- * Tidy up a filesystem cursor and unlock the vnode.
- */
-int afs_end_vnode_operation(struct afs_fs_cursor *fc)
-{
- struct afs_net *net = afs_v2net(fc->vnode);
-
- if (fc->error == -EDESTADDRREQ ||
- fc->error == -EADDRNOTAVAIL ||
- fc->error == -ENETUNREACH ||
- fc->error == -EHOSTUNREACH)
- afs_dump_edestaddrreq(fc);
-
- mutex_unlock(&fc->vnode->io_lock);
-
- afs_end_cursor(&fc->ac);
- afs_put_cb_interest(net, fc->cbi);
- afs_put_serverlist(net, fc->server_list);
-
- if (fc->error == -ECONNABORTED)
- fc->error = afs_abort_to_error(fc->ac.abort_code);
-
- return fc->error;
-}
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 1ecc67da6c1a..8fc8fb406a5a 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -37,7 +37,6 @@ int afs_open_socket(struct afs_net *net)
{
struct sockaddr_rxrpc srx;
struct socket *socket;
- unsigned int min_level;
int ret;
_enter("");
@@ -57,9 +56,8 @@ int afs_open_socket(struct afs_net *net)
srx.transport.sin6.sin6_family = AF_INET6;
srx.transport.sin6.sin6_port = htons(AFS_CM_PORT);
- min_level = RXRPC_SECURITY_ENCRYPT;
- ret = kernel_setsockopt(socket, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL,
- (void *)&min_level, sizeof(min_level));
+ ret = rxrpc_sock_set_min_security_level(socket->sk,
+ RXRPC_SECURITY_ENCRYPT);
if (ret < 0)
goto error_2;
@@ -183,8 +181,7 @@ void afs_put_call(struct afs_call *call)
if (call->type->destructor)
call->type->destructor(call);
- afs_put_server(call->net, call->server, afs_server_trace_put_call);
- afs_put_cb_interest(call->net, call->cbi);
+ afs_unuse_server_notime(call->net, call->server, afs_server_trace_put_call);
afs_put_addrlist(call->alist);
kfree(call->request);
@@ -283,18 +280,19 @@ static void afs_load_bvec(struct afs_call *call, struct msghdr *msg,
struct bio_vec *bv, pgoff_t first, pgoff_t last,
unsigned offset)
{
+ struct afs_operation *op = call->op;
struct page *pages[AFS_BVEC_MAX];
unsigned int nr, n, i, to, bytes = 0;
nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX);
- n = find_get_pages_contig(call->mapping, first, nr, pages);
+ n = find_get_pages_contig(op->store.mapping, first, nr, pages);
ASSERTCMP(n, ==, nr);
msg->msg_flags |= MSG_MORE;
for (i = 0; i < nr; i++) {
to = PAGE_SIZE;
if (first + i >= last) {
- to = call->last_to;
+ to = op->store.last_to;
msg->msg_flags &= ~MSG_MORE;
}
bv[i].bv_page = pages[i];
@@ -324,13 +322,14 @@ static void afs_notify_end_request_tx(struct sock *sock,
*/
static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
{
+ struct afs_operation *op = call->op;
struct bio_vec bv[AFS_BVEC_MAX];
unsigned int bytes, nr, loop, offset;
- pgoff_t first = call->first, last = call->last;
+ pgoff_t first = op->store.first, last = op->store.last;
int ret;
- offset = call->first_offset;
- call->first_offset = 0;
+ offset = op->store.first_offset;
+ op->store.first_offset = 0;
do {
afs_load_bvec(call, msg, bv, first, last, offset);
@@ -340,7 +339,7 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
bytes = msg->msg_iter.count;
nr = msg->msg_iter.nr_segs;
- ret = rxrpc_kernel_send_data(call->net->socket, call->rxcall, msg,
+ ret = rxrpc_kernel_send_data(op->net->socket, call->rxcall, msg,
bytes, afs_notify_end_request_tx);
for (loop = 0; loop < nr; loop++)
put_page(bv[loop].bv_page);
@@ -350,7 +349,7 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
first += nr;
} while (first <= last);
- trace_afs_sent_pages(call, call->first, last, first, ret);
+ trace_afs_sent_pages(call, op->store.first, last, first, ret);
return ret;
}
@@ -385,16 +384,18 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
*/
tx_total_len = call->request_size;
if (call->send_pages) {
- if (call->last == call->first) {
- tx_total_len += call->last_to - call->first_offset;
+ struct afs_operation *op = call->op;
+
+ if (op->store.last == op->store.first) {
+ tx_total_len += op->store.last_to - op->store.first_offset;
} else {
/* It looks mathematically like you should be able to
* combine the following lines with the ones above, but
* unsigned arithmetic is fun when it wraps...
*/
- tx_total_len += PAGE_SIZE - call->first_offset;
- tx_total_len += call->last_to;
- tx_total_len += (call->last - call->first - 1) * PAGE_SIZE;
+ tx_total_len += PAGE_SIZE - op->store.first_offset;
+ tx_total_len += op->store.last_to;
+ tx_total_len += (op->store.last - op->store.first - 1) * PAGE_SIZE;
}
}
@@ -540,13 +541,15 @@ static void afs_deliver_to_call(struct afs_call *call)
ret = call->type->deliver(call);
state = READ_ONCE(call->state);
+ if (ret == 0 && call->unmarshalling_error)
+ ret = -EBADMSG;
switch (ret) {
case 0:
afs_queue_call_work(call);
if (state == AFS_CALL_CL_PROC_REPLY) {
- if (call->cbi)
+ if (call->op)
set_bit(AFS_SERVER_FL_MAY_HAVE_CB,
- &call->cbi->server->flags);
+ &call->op->server->flags);
goto call_complete;
}
ASSERTCMP(state, >, AFS_CALL_CL_PROC_REPLY);
@@ -959,9 +962,11 @@ int afs_extract_data(struct afs_call *call, bool want_more)
/*
* Log protocol error production.
*/
-noinline int afs_protocol_error(struct afs_call *call, int error,
+noinline int afs_protocol_error(struct afs_call *call,
enum afs_eproto_cause cause)
{
- trace_afs_protocol_error(call, error, cause);
- return error;
+ trace_afs_protocol_error(call, cause);
+ if (call)
+ call->unmarshalling_error = true;
+ return -EBADMSG;
}
diff --git a/fs/afs/security.c b/fs/afs/security.c
index ce9de1e6742b..90d852704328 100644
--- a/fs/afs/security.c
+++ b/fs/afs/security.c
@@ -170,8 +170,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
break;
}
- if (afs_cb_is_broken(cb_break, vnode,
- rcu_dereference(vnode->cb_interest))) {
+ if (afs_cb_is_broken(cb_break, vnode)) {
changed = true;
break;
}
@@ -201,7 +200,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
}
}
- if (afs_cb_is_broken(cb_break, vnode, rcu_dereference(vnode->cb_interest)))
+ if (afs_cb_is_broken(cb_break, vnode))
goto someone_else_changed_it;
/* We need a ref on any permits list we want to copy as we'll have to
@@ -281,8 +280,7 @@ found:
rcu_read_lock();
spin_lock(&vnode->lock);
zap = rcu_access_pointer(vnode->permit_cache);
- if (!afs_cb_is_broken(cb_break, vnode, rcu_dereference(vnode->cb_interest)) &&
- zap == permits)
+ if (!afs_cb_is_broken(cb_break, vnode) && zap == permits)
rcu_assign_pointer(vnode->permit_cache, replacement);
else
zap = replacement;
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 11b90ac7ea30..039e3488511c 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -12,19 +12,11 @@
#include "protocol_yfs.h"
static unsigned afs_server_gc_delay = 10; /* Server record timeout in seconds */
-static unsigned afs_server_update_delay = 30; /* Time till VLDB recheck in secs */
static atomic_t afs_server_debug_id;
-static void afs_inc_servers_outstanding(struct afs_net *net)
-{
- atomic_inc(&net->servers_outstanding);
-}
-
-static void afs_dec_servers_outstanding(struct afs_net *net)
-{
- if (atomic_dec_and_test(&net->servers_outstanding))
- wake_up_var(&net->servers_outstanding);
-}
+static struct afs_server *afs_maybe_use_server(struct afs_server *,
+ enum afs_server_trace);
+static void __afs_put_server(struct afs_net *, struct afs_server *);
/*
* Find a server by one of its addresses.
@@ -41,7 +33,7 @@ struct afs_server *afs_find_server(struct afs_net *net,
do {
if (server)
- afs_put_server(net, server, afs_server_trace_put_find_rsq);
+ afs_unuse_server_notime(net, server, afs_server_trace_put_find_rsq);
server = NULL;
read_seqbegin_or_lock(&net->fs_addr_lock, &seq);
@@ -79,9 +71,9 @@ struct afs_server *afs_find_server(struct afs_net *net,
}
server = NULL;
+ continue;
found:
- if (server && !atomic_inc_not_zero(&server->usage))
- server = NULL;
+ server = afs_maybe_use_server(server, afs_server_trace_get_by_addr);
} while (need_seqretry(&net->fs_addr_lock, seq));
@@ -92,7 +84,7 @@ struct afs_server *afs_find_server(struct afs_net *net,
}
/*
- * Look up a server by its UUID
+ * Look up a server by its UUID and mark it active.
*/
struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uuid)
{
@@ -108,7 +100,7 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu
* changes.
*/
if (server)
- afs_put_server(net, server, afs_server_trace_put_uuid_rsq);
+ afs_unuse_server(net, server, afs_server_trace_put_uuid_rsq);
server = NULL;
read_seqbegin_or_lock(&net->fs_lock, &seq);
@@ -123,7 +115,7 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu
} else if (diff > 0) {
p = p->rb_right;
} else {
- afs_get_server(server, afs_server_trace_get_by_uuid);
+ afs_use_server(server, afs_server_trace_get_by_uuid);
break;
}
@@ -138,13 +130,16 @@ struct afs_server *afs_find_server_by_uuid(struct afs_net *net, const uuid_t *uu
}
/*
- * Install a server record in the namespace tree
+ * Install a server record in the namespace tree. If there's a clash, we stick
+ * it into a list anchored on whichever afs_server struct is actually in the
+ * tree.
*/
-static struct afs_server *afs_install_server(struct afs_net *net,
+static struct afs_server *afs_install_server(struct afs_cell *cell,
struct afs_server *candidate)
{
const struct afs_addr_list *alist;
- struct afs_server *server;
+ struct afs_server *server, *next;
+ struct afs_net *net = cell->net;
struct rb_node **pp, *p;
int diff;
@@ -160,12 +155,30 @@ static struct afs_server *afs_install_server(struct afs_net *net,
_debug("- consider %p", p);
server = rb_entry(p, struct afs_server, uuid_rb);
diff = memcmp(&candidate->uuid, &server->uuid, sizeof(uuid_t));
- if (diff < 0)
+ if (diff < 0) {
pp = &(*pp)->rb_left;
- else if (diff > 0)
+ } else if (diff > 0) {
pp = &(*pp)->rb_right;
- else
- goto exists;
+ } else {
+ if (server->cell == cell)
+ goto exists;
+
+ /* We have the same UUID representing servers in
+ * different cells. Append the new server to the list.
+ */
+ for (;;) {
+ next = rcu_dereference_protected(
+ server->uuid_next,
+ lockdep_is_held(&net->fs_lock.lock));
+ if (!next)
+ break;
+ server = next;
+ }
+ rcu_assign_pointer(server->uuid_next, candidate);
+ candidate->uuid_prev = server;
+ server = candidate;
+ goto added_dup;
+ }
}
server = candidate;
@@ -173,6 +186,7 @@ static struct afs_server *afs_install_server(struct afs_net *net,
rb_insert_color(&server->uuid_rb, &net->fs_servers);
hlist_add_head_rcu(&server->proc_link, &net->fs_proc);
+added_dup:
write_seqlock(&net->fs_addr_lock);
alist = rcu_dereference_protected(server->addresses,
lockdep_is_held(&net->fs_addr_lock.lock));
@@ -199,13 +213,14 @@ exists:
}
/*
- * allocate a new server record
+ * Allocate a new server record and mark it active.
*/
-static struct afs_server *afs_alloc_server(struct afs_net *net,
+static struct afs_server *afs_alloc_server(struct afs_cell *cell,
const uuid_t *uuid,
struct afs_addr_list *alist)
{
struct afs_server *server;
+ struct afs_net *net = cell->net;
_enter("");
@@ -213,20 +228,21 @@ static struct afs_server *afs_alloc_server(struct afs_net *net,
if (!server)
goto enomem;
- atomic_set(&server->usage, 1);
+ atomic_set(&server->ref, 1);
+ atomic_set(&server->active, 1);
server->debug_id = atomic_inc_return(&afs_server_debug_id);
RCU_INIT_POINTER(server->addresses, alist);
server->addr_version = alist->version;
server->uuid = *uuid;
- server->update_at = ktime_get_real_seconds() + afs_server_update_delay;
rwlock_init(&server->fs_lock);
- INIT_HLIST_HEAD(&server->cb_volumes);
- rwlock_init(&server->cb_break_lock);
init_waitqueue_head(&server->probe_wq);
+ INIT_LIST_HEAD(&server->probe_link);
spin_lock_init(&server->probe_lock);
+ server->cell = cell;
+ server->rtt = UINT_MAX;
afs_inc_servers_outstanding(net);
- trace_afs_server(server, 1, afs_server_trace_alloc);
+ trace_afs_server(server, 1, 1, afs_server_trace_alloc);
_leave(" = %p", server);
return server;
@@ -264,7 +280,7 @@ static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
* Get or create a fileserver record.
*/
struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key,
- const uuid_t *uuid)
+ const uuid_t *uuid, u32 addr_version)
{
struct afs_addr_list *alist;
struct afs_server *server, *candidate;
@@ -272,26 +288,34 @@ struct afs_server *afs_lookup_server(struct afs_cell *cell, struct key *key,
_enter("%p,%pU", cell->net, uuid);
server = afs_find_server_by_uuid(cell->net, uuid);
- if (server)
+ if (server) {
+ if (server->addr_version != addr_version)
+ set_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags);
return server;
+ }
alist = afs_vl_lookup_addrs(cell, key, uuid);
if (IS_ERR(alist))
return ERR_CAST(alist);
- candidate = afs_alloc_server(cell->net, uuid, alist);
+ candidate = afs_alloc_server(cell, uuid, alist);
if (!candidate) {
afs_put_addrlist(alist);
return ERR_PTR(-ENOMEM);
}
- server = afs_install_server(cell->net, candidate);
+ server = afs_install_server(cell, candidate);
if (server != candidate) {
afs_put_addrlist(alist);
kfree(candidate);
+ } else {
+ /* Immediately dispatch an asynchronous probe to each interface
+ * on the fileserver. This will make sure the repeat-probing
+ * service is started.
+ */
+ afs_fs_probe_fileserver(cell->net, server, key, true);
}
- _leave(" = %p{%d}", server, atomic_read(&server->usage));
return server;
}
@@ -327,9 +351,38 @@ void afs_servers_timer(struct timer_list *timer)
struct afs_server *afs_get_server(struct afs_server *server,
enum afs_server_trace reason)
{
- unsigned int u = atomic_inc_return(&server->usage);
+ unsigned int u = atomic_inc_return(&server->ref);
+
+ trace_afs_server(server, u, atomic_read(&server->active), reason);
+ return server;
+}
+
+/*
+ * Try to get a reference on a server object.
+ */
+static struct afs_server *afs_maybe_use_server(struct afs_server *server,
+ enum afs_server_trace reason)
+{
+ unsigned int r = atomic_fetch_add_unless(&server->ref, 1, 0);
+ unsigned int a;
+
+ if (r == 0)
+ return NULL;
+
+ a = atomic_inc_return(&server->active);
+ trace_afs_server(server, r, a, reason);
+ return server;
+}
+
+/*
+ * Get an active count on a server object.
+ */
+struct afs_server *afs_use_server(struct afs_server *server, enum afs_server_trace reason)
+{
+ unsigned int r = atomic_inc_return(&server->ref);
+ unsigned int a = atomic_inc_return(&server->active);
- trace_afs_server(server, u, reason);
+ trace_afs_server(server, r, a, reason);
return server;
}
@@ -344,32 +397,57 @@ void afs_put_server(struct afs_net *net, struct afs_server *server,
if (!server)
return;
- server->put_time = ktime_get_real_seconds();
-
- usage = atomic_dec_return(&server->usage);
+ usage = atomic_dec_return(&server->ref);
+ trace_afs_server(server, usage, atomic_read(&server->active), reason);
+ if (unlikely(usage == 0))
+ __afs_put_server(net, server);
+}
- trace_afs_server(server, usage, reason);
+/*
+ * Drop an active count on a server object without updating the last-unused
+ * time.
+ */
+void afs_unuse_server_notime(struct afs_net *net, struct afs_server *server,
+ enum afs_server_trace reason)
+{
+ if (server) {
+ unsigned int active = atomic_dec_return(&server->active);
- if (likely(usage > 0))
- return;
+ if (active == 0)
+ afs_set_server_timer(net, afs_server_gc_delay);
+ afs_put_server(net, server, reason);
+ }
+}
- afs_set_server_timer(net, afs_server_gc_delay);
+/*
+ * Drop an active count on a server object.
+ */
+void afs_unuse_server(struct afs_net *net, struct afs_server *server,
+ enum afs_server_trace reason)
+{
+ if (server) {
+ server->unuse_time = ktime_get_real_seconds();
+ afs_unuse_server_notime(net, server, reason);
+ }
}
static void afs_server_rcu(struct rcu_head *rcu)
{
struct afs_server *server = container_of(rcu, struct afs_server, rcu);
- trace_afs_server(server, atomic_read(&server->usage),
- afs_server_trace_free);
+ trace_afs_server(server, atomic_read(&server->ref),
+ atomic_read(&server->active), afs_server_trace_free);
afs_put_addrlist(rcu_access_pointer(server->addresses));
kfree(server);
}
-/*
- * destroy a dead server
- */
-static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
+static void __afs_put_server(struct afs_net *net, struct afs_server *server)
+{
+ call_rcu(&server->rcu, afs_server_rcu);
+ afs_dec_servers_outstanding(net);
+}
+
+static void afs_give_up_callbacks(struct afs_net *net, struct afs_server *server)
{
struct afs_addr_list *alist = rcu_access_pointer(server->addresses);
struct afs_addr_cursor ac = {
@@ -378,19 +456,18 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
.error = 0,
};
- trace_afs_server(server, atomic_read(&server->usage),
- afs_server_trace_give_up_cb);
+ afs_fs_give_up_all_callbacks(net, server, &ac, NULL);
+}
+/*
+ * destroy a dead server
+ */
+static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
+{
if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
- afs_fs_give_up_all_callbacks(net, server, &ac, NULL);
+ afs_give_up_callbacks(net, server);
- wait_var_event(&server->probe_outstanding,
- atomic_read(&server->probe_outstanding) == 0);
-
- trace_afs_server(server, atomic_read(&server->usage),
- afs_server_trace_destroy);
- call_rcu(&server->rcu, afs_server_rcu);
- afs_dec_servers_outstanding(net);
+ afs_put_server(net, server, afs_server_trace_destroy);
}
/*
@@ -398,32 +475,49 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
*/
static void afs_gc_servers(struct afs_net *net, struct afs_server *gc_list)
{
- struct afs_server *server;
- bool deleted;
- int usage;
+ struct afs_server *server, *next, *prev;
+ int active;
while ((server = gc_list)) {
gc_list = server->gc_next;
write_seqlock(&net->fs_lock);
- usage = 1;
- deleted = atomic_try_cmpxchg(&server->usage, &usage, 0);
- trace_afs_server(server, usage, afs_server_trace_gc);
- if (deleted) {
- rb_erase(&server->uuid_rb, &net->fs_servers);
- hlist_del_rcu(&server->proc_link);
- }
- write_sequnlock(&net->fs_lock);
- if (deleted) {
- write_seqlock(&net->fs_addr_lock);
+ active = atomic_read(&server->active);
+ if (active == 0) {
+ trace_afs_server(server, atomic_read(&server->ref),
+ active, afs_server_trace_gc);
+ next = rcu_dereference_protected(
+ server->uuid_next, lockdep_is_held(&net->fs_lock.lock));
+ prev = server->uuid_prev;
+ if (!prev) {
+ /* The one at the front is in the tree */
+ if (!next) {
+ rb_erase(&server->uuid_rb, &net->fs_servers);
+ } else {
+ rb_replace_node_rcu(&server->uuid_rb,
+ &next->uuid_rb,
+ &net->fs_servers);
+ next->uuid_prev = NULL;
+ }
+ } else {
+ /* This server is not at the front */
+ rcu_assign_pointer(prev->uuid_next, next);
+ if (next)
+ next->uuid_prev = prev;
+ }
+
+ list_del(&server->probe_link);
+ hlist_del_rcu(&server->proc_link);
if (!hlist_unhashed(&server->addr4_link))
hlist_del_rcu(&server->addr4_link);
if (!hlist_unhashed(&server->addr6_link))
hlist_del_rcu(&server->addr6_link);
- write_sequnlock(&net->fs_addr_lock);
- afs_destroy_server(net, server);
}
+ write_sequnlock(&net->fs_lock);
+
+ if (active == 0)
+ afs_destroy_server(net, server);
}
}
@@ -452,15 +546,14 @@ void afs_manage_servers(struct work_struct *work)
for (cursor = rb_first(&net->fs_servers); cursor; cursor = rb_next(cursor)) {
struct afs_server *server =
rb_entry(cursor, struct afs_server, uuid_rb);
- int usage = atomic_read(&server->usage);
+ int active = atomic_read(&server->active);
- _debug("manage %pU %u", &server->uuid, usage);
+ _debug("manage %pU %u", &server->uuid, active);
- ASSERTCMP(usage, >=, 1);
- ASSERTIFCMP(purging, usage, ==, 1);
+ ASSERTIFCMP(purging, active, ==, 0);
- if (usage == 1) {
- time64_t expire_at = server->put_time;
+ if (active == 0) {
+ time64_t expire_at = server->unuse_time;
if (!test_bit(AFS_SERVER_FL_VL_FAIL, &server->flags) &&
!test_bit(AFS_SERVER_FL_NOT_FOUND, &server->flags))
@@ -525,26 +618,27 @@ void afs_purge_servers(struct afs_net *net)
/*
* Get an update for a server's address list.
*/
-static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct afs_server *server)
+static noinline bool afs_update_server_record(struct afs_operation *op,
+ struct afs_server *server)
{
struct afs_addr_list *alist, *discard;
_enter("");
- trace_afs_server(server, atomic_read(&server->usage), afs_server_trace_update);
+ trace_afs_server(server, atomic_read(&server->ref), atomic_read(&server->active),
+ afs_server_trace_update);
- alist = afs_vl_lookup_addrs(fc->vnode->volume->cell, fc->key,
- &server->uuid);
+ alist = afs_vl_lookup_addrs(op->volume->cell, op->key, &server->uuid);
if (IS_ERR(alist)) {
if ((PTR_ERR(alist) == -ERESTARTSYS ||
PTR_ERR(alist) == -EINTR) &&
- !(fc->flags & AFS_FS_CURSOR_INTR) &&
+ (op->flags & AFS_OPERATION_UNINTR) &&
server->addresses) {
_leave(" = t [intr]");
return true;
}
- fc->error = PTR_ERR(alist);
- _leave(" = f [%d]", fc->error);
+ op->error = PTR_ERR(alist);
+ _leave(" = f [%d]", op->error);
return false;
}
@@ -558,7 +652,6 @@ static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct a
write_unlock(&server->fs_lock);
}
- server->update_at = ktime_get_real_seconds() + afs_server_update_delay;
afs_put_addrlist(discard);
_leave(" = t");
return true;
@@ -567,10 +660,8 @@ static noinline bool afs_update_server_record(struct afs_fs_cursor *fc, struct a
/*
* See if a server's address list needs updating.
*/
-bool afs_check_server_record(struct afs_fs_cursor *fc, struct afs_server *server)
+bool afs_check_server_record(struct afs_operation *op, struct afs_server *server)
{
- time64_t now = ktime_get_real_seconds();
- long diff;
bool success;
int ret, retries = 0;
@@ -579,25 +670,29 @@ bool afs_check_server_record(struct afs_fs_cursor *fc, struct afs_server *server
ASSERT(server);
retry:
- diff = READ_ONCE(server->update_at) - now;
- if (diff > 0) {
- _leave(" = t [not now %ld]", diff);
- return true;
- }
+ if (test_bit(AFS_SERVER_FL_UPDATING, &server->flags))
+ goto wait;
+ if (test_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags))
+ goto update;
+ _leave(" = t [good]");
+ return true;
+update:
if (!test_and_set_bit_lock(AFS_SERVER_FL_UPDATING, &server->flags)) {
- success = afs_update_server_record(fc, server);
+ clear_bit(AFS_SERVER_FL_NEEDS_UPDATE, &server->flags);
+ success = afs_update_server_record(op, server);
clear_bit_unlock(AFS_SERVER_FL_UPDATING, &server->flags);
wake_up_bit(&server->flags, AFS_SERVER_FL_UPDATING);
_leave(" = %d", success);
return success;
}
+wait:
ret = wait_on_bit(&server->flags, AFS_SERVER_FL_UPDATING,
- (fc->flags & AFS_FS_CURSOR_INTR) ?
- TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ (op->flags & AFS_OPERATION_UNINTR) ?
+ TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
if (ret == -ERESTARTSYS) {
- fc->error = ret;
+ op->error = ret;
_leave(" = f [intr]");
return false;
}
diff --git a/fs/afs/server_list.c b/fs/afs/server_list.c
index 888d91d195d9..ed9056703505 100644
--- a/fs/afs/server_list.c
+++ b/fs/afs/server_list.c
@@ -14,11 +14,9 @@ void afs_put_serverlist(struct afs_net *net, struct afs_server_list *slist)
int i;
if (slist && refcount_dec_and_test(&slist->usage)) {
- for (i = 0; i < slist->nr_servers; i++) {
- afs_put_cb_interest(net, slist->servers[i].cb_interest);
- afs_put_server(net, slist->servers[i].server,
- afs_server_trace_put_slist);
- }
+ for (i = 0; i < slist->nr_servers; i++)
+ afs_unuse_server(net, slist->servers[i].server,
+ afs_server_trace_put_slist);
kfree(slist);
}
}
@@ -46,12 +44,16 @@ struct afs_server_list *afs_alloc_server_list(struct afs_cell *cell,
refcount_set(&slist->usage, 1);
rwlock_init(&slist->lock);
+ for (i = 0; i < AFS_MAXTYPES; i++)
+ slist->vids[i] = vldb->vid[i];
+
/* Make sure a records exists for each server in the list. */
for (i = 0; i < vldb->nr_servers; i++) {
if (!(vldb->fs_mask[i] & type_mask))
continue;
- server = afs_lookup_server(cell, key, &vldb->fs_server[i]);
+ server = afs_lookup_server(cell, key, &vldb->fs_server[i],
+ vldb->addr_version[i]);
if (IS_ERR(server)) {
ret = PTR_ERR(server);
if (ret == -ENOENT ||
@@ -123,31 +125,5 @@ changed:
}
}
- /* Keep the old callback interest records where possible so that we
- * maintain callback interception.
- */
- i = 0;
- j = 0;
- while (i < old->nr_servers && j < new->nr_servers) {
- if (new->servers[j].server == old->servers[i].server) {
- struct afs_cb_interest *cbi = old->servers[i].cb_interest;
- if (cbi) {
- new->servers[j].cb_interest = cbi;
- refcount_inc(&cbi->usage);
- }
- i++;
- j++;
- continue;
- }
-
- if (new->servers[j].server < old->servers[i].server) {
- j++;
- continue;
- }
-
- i++;
- continue;
- }
-
return true;
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index dda7a9a66848..b552357b1d13 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -352,7 +352,9 @@ static int afs_validate_fc(struct fs_context *fc)
{
struct afs_fs_context *ctx = fc->fs_private;
struct afs_volume *volume;
+ struct afs_cell *cell;
struct key *key;
+ int ret;
if (!ctx->dyn_root) {
if (ctx->no_cell) {
@@ -365,6 +367,7 @@ static int afs_validate_fc(struct fs_context *fc)
return -EDESTADDRREQ;
}
+ reget_key:
/* We try to do the mount securely. */
key = afs_request_key(ctx->cell);
if (IS_ERR(key))
@@ -373,10 +376,26 @@ static int afs_validate_fc(struct fs_context *fc)
ctx->key = key;
if (ctx->volume) {
- afs_put_volume(ctx->cell, ctx->volume);
+ afs_put_volume(ctx->net, ctx->volume,
+ afs_volume_trace_put_validate_fc);
ctx->volume = NULL;
}
+ if (test_bit(AFS_CELL_FL_CHECK_ALIAS, &ctx->cell->flags)) {
+ ret = afs_cell_detect_alias(ctx->cell, key);
+ if (ret < 0)
+ return ret;
+ if (ret == 1) {
+ _debug("switch to alias");
+ key_put(ctx->key);
+ ctx->key = NULL;
+ cell = afs_get_cell(ctx->cell->alias_of);
+ afs_put_cell(ctx->net, ctx->cell);
+ ctx->cell = cell;
+ goto reget_key;
+ }
+ }
+
volume = afs_create_volume(ctx);
if (IS_ERR(volume))
return PTR_ERR(volume);
@@ -421,7 +440,6 @@ static int afs_set_super(struct super_block *sb, struct fs_context *fc)
static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
{
struct afs_super_info *as = AFS_FS_S(sb);
- struct afs_iget_data iget_data;
struct inode *inode = NULL;
int ret;
@@ -446,13 +464,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
} else {
sprintf(sb->s_id, "%llu", as->volume->vid);
afs_activate_volume(as->volume);
- iget_data.fid.vid = as->volume->vid;
- iget_data.fid.vnode = 1;
- iget_data.fid.vnode_hi = 0;
- iget_data.fid.unique = 1;
- iget_data.cb_v_break = as->volume->cb_v_break;
- iget_data.cb_s_break = 0;
- inode = afs_iget(sb, ctx->key, &iget_data, NULL, NULL, NULL);
+ inode = afs_root_iget(sb, ctx->key);
}
if (IS_ERR(inode))
@@ -473,6 +485,7 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
goto error;
} else {
sb->s_d_op = &afs_fs_dentry_operations;
+ rcu_assign_pointer(as->volume->sb, sb);
}
_leave(" = 0");
@@ -496,7 +509,8 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
as->dyn_root = true;
} else {
as->cell = afs_get_cell(ctx->cell);
- as->volume = __afs_get_volume(ctx->volume);
+ as->volume = afs_get_volume(ctx->volume,
+ afs_volume_trace_get_alloc_sbi);
}
}
return as;
@@ -505,8 +519,9 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
static void afs_destroy_sbi(struct afs_super_info *as)
{
if (as) {
- afs_put_volume(as->cell, as->volume);
- afs_put_cell(afs_net(as->net_ns), as->cell);
+ struct afs_net *net = afs_net(as->net_ns);
+ afs_put_volume(net, as->volume, afs_volume_trace_put_destroy_sbi);
+ afs_put_cell(net, as->cell);
put_net(as->net_ns);
kfree(as);
}
@@ -515,7 +530,6 @@ static void afs_destroy_sbi(struct afs_super_info *as)
static void afs_kill_super(struct super_block *sb)
{
struct afs_super_info *as = AFS_FS_S(sb);
- struct afs_net *net = afs_net(as->net_ns);
if (as->dyn_root)
afs_dynroot_depopulate(sb);
@@ -524,7 +538,7 @@ static void afs_kill_super(struct super_block *sb)
* deactivating the superblock.
*/
if (as->volume)
- afs_clear_callback_interests(net, as->volume->servers);
+ rcu_assign_pointer(as->volume->sb, NULL);
kill_anon_super(sb);
if (as->volume)
afs_deactivate_volume(as->volume);
@@ -592,7 +606,7 @@ static void afs_free_fc(struct fs_context *fc)
struct afs_fs_context *ctx = fc->fs_private;
afs_destroy_sbi(fc->s_fs_info);
- afs_put_volume(ctx->cell, ctx->volume);
+ afs_put_volume(ctx->net, ctx->volume, afs_volume_trace_put_free_fc);
afs_put_cell(ctx->net, ctx->cell);
key_put(ctx->key);
kfree(ctx);
@@ -674,7 +688,6 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
vnode->volume = NULL;
vnode->lock_key = NULL;
vnode->permit_cache = NULL;
- RCU_INIT_POINTER(vnode->cb_interest, NULL);
#ifdef CONFIG_AFS_FSCACHE
vnode->cache = NULL;
#endif
@@ -704,22 +717,38 @@ static void afs_destroy_inode(struct inode *inode)
_debug("DESTROY INODE %p", inode);
- ASSERTCMP(rcu_access_pointer(vnode->cb_interest), ==, NULL);
-
atomic_dec(&afs_count_active_inodes);
}
+static void afs_get_volume_status_success(struct afs_operation *op)
+{
+ struct afs_volume_status *vs = &op->volstatus.vs;
+ struct kstatfs *buf = op->volstatus.buf;
+
+ if (vs->max_quota == 0)
+ buf->f_blocks = vs->part_max_blocks;
+ else
+ buf->f_blocks = vs->max_quota;
+
+ if (buf->f_blocks > vs->blocks_in_use)
+ buf->f_bavail = buf->f_bfree =
+ buf->f_blocks - vs->blocks_in_use;
+}
+
+static const struct afs_operation_ops afs_get_volume_status_operation = {
+ .issue_afs_rpc = afs_fs_get_volume_status,
+ .issue_yfs_rpc = yfs_fs_get_volume_status,
+ .success = afs_get_volume_status_success,
+};
+
/*
* return information about an AFS volume
*/
static int afs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct afs_super_info *as = AFS_FS_S(dentry->d_sb);
- struct afs_fs_cursor fc;
- struct afs_volume_status vs;
+ struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
- struct key *key;
- int ret;
buf->f_type = dentry->d_sb->s_magic;
buf->f_bsize = AFS_BLOCK_SIZE;
@@ -732,31 +761,13 @@ static int afs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key))
- return PTR_ERR(key);
+ op = afs_alloc_operation(NULL, as->volume);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- fc.flags |= AFS_FS_CURSOR_NO_VSLEEP;
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_get_volume_status(&fc, &vs);
- }
-
- afs_check_for_remote_deletion(&fc, fc.vnode);
- ret = afs_end_vnode_operation(&fc);
- }
-
- key_put(key);
-
- if (ret == 0) {
- if (vs.max_quota == 0)
- buf->f_blocks = vs.part_max_blocks;
- else
- buf->f_blocks = vs.max_quota;
- buf->f_bavail = buf->f_bfree = buf->f_blocks - vs.blocks_in_use;
- }
-
- return ret;
+ afs_op_set_vnode(op, 0, vnode);
+ op->nr_files = 1;
+ op->volstatus.buf = buf;
+ op->ops = &afs_get_volume_status_operation;
+ return afs_do_sync_operation(op);
}
diff --git a/fs/afs/vl_alias.c b/fs/afs/vl_alias.c
new file mode 100644
index 000000000000..5082ef04e99c
--- /dev/null
+++ b/fs/afs/vl_alias.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* AFS cell alias detection
+ *
+ * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/namei.h>
+#include <keys/rxrpc-type.h>
+#include "internal.h"
+
+/*
+ * Sample a volume.
+ */
+static struct afs_volume *afs_sample_volume(struct afs_cell *cell, struct key *key,
+ const char *name, unsigned int namelen)
+{
+ struct afs_volume *volume;
+ struct afs_fs_context fc = {
+ .type = 0, /* Explicitly leave it to the VLDB */
+ .volnamesz = namelen,
+ .volname = name,
+ .net = cell->net,
+ .cell = cell,
+ .key = key, /* This might need to be something */
+ };
+
+ volume = afs_create_volume(&fc);
+ _leave(" = %p", volume);
+ return volume;
+}
+
+/*
+ * Compare two addresses.
+ */
+static int afs_compare_addrs(const struct sockaddr_rxrpc *srx_a,
+ const struct sockaddr_rxrpc *srx_b)
+{
+ short port_a, port_b;
+ int addr_a, addr_b, diff;
+
+ diff = (short)srx_a->transport_type - (short)srx_b->transport_type;
+ if (diff)
+ goto out;
+
+ switch (srx_a->transport_type) {
+ case AF_INET: {
+ const struct sockaddr_in *a = &srx_a->transport.sin;
+ const struct sockaddr_in *b = &srx_b->transport.sin;
+ addr_a = ntohl(a->sin_addr.s_addr);
+ addr_b = ntohl(b->sin_addr.s_addr);
+ diff = addr_a - addr_b;
+ if (diff == 0) {
+ port_a = ntohs(a->sin_port);
+ port_b = ntohs(b->sin_port);
+ diff = port_a - port_b;
+ }
+ break;
+ }
+
+ case AF_INET6: {
+ const struct sockaddr_in6 *a = &srx_a->transport.sin6;
+ const struct sockaddr_in6 *b = &srx_b->transport.sin6;
+ diff = memcmp(&a->sin6_addr, &b->sin6_addr, 16);
+ if (diff == 0) {
+ port_a = ntohs(a->sin6_port);
+ port_b = ntohs(b->sin6_port);
+ diff = port_a - port_b;
+ }
+ break;
+ }
+
+ default:
+ WARN_ON(1);
+ diff = 1;
+ }
+
+out:
+ return diff;
+}
+
+/*
+ * Compare the address lists of a pair of fileservers.
+ */
+static int afs_compare_fs_alists(const struct afs_server *server_a,
+ const struct afs_server *server_b)
+{
+ const struct afs_addr_list *la, *lb;
+ int a = 0, b = 0, addr_matches = 0;
+
+ la = rcu_dereference(server_a->addresses);
+ lb = rcu_dereference(server_b->addresses);
+
+ while (a < la->nr_addrs && b < lb->nr_addrs) {
+ const struct sockaddr_rxrpc *srx_a = &la->addrs[a];
+ const struct sockaddr_rxrpc *srx_b = &lb->addrs[b];
+ int diff = afs_compare_addrs(srx_a, srx_b);
+
+ if (diff < 0) {
+ a++;
+ } else if (diff > 0) {
+ b++;
+ } else {
+ addr_matches++;
+ a++;
+ b++;
+ }
+ }
+
+ return addr_matches;
+}
+
+/*
+ * Compare the fileserver lists of two volumes. The server lists are sorted in
+ * order of ascending UUID.
+ */
+static int afs_compare_volume_slists(const struct afs_volume *vol_a,
+ const struct afs_volume *vol_b)
+{
+ const struct afs_server_list *la, *lb;
+ int i, a = 0, b = 0, uuid_matches = 0, addr_matches = 0;
+
+ la = rcu_dereference(vol_a->servers);
+ lb = rcu_dereference(vol_b->servers);
+
+ for (i = 0; i < AFS_MAXTYPES; i++)
+ if (la->vids[i] != lb->vids[i])
+ return 0;
+
+ while (a < la->nr_servers && b < lb->nr_servers) {
+ const struct afs_server *server_a = la->servers[a].server;
+ const struct afs_server *server_b = lb->servers[b].server;
+ int diff = memcmp(&server_a->uuid, &server_b->uuid, sizeof(uuid_t));
+
+ if (diff < 0) {
+ a++;
+ } else if (diff > 0) {
+ b++;
+ } else {
+ uuid_matches++;
+ addr_matches += afs_compare_fs_alists(server_a, server_b);
+ a++;
+ b++;
+ }
+ }
+
+ _leave(" = %d [um %d]", addr_matches, uuid_matches);
+ return addr_matches;
+}
+
+/*
+ * Compare root.cell volumes.
+ */
+static int afs_compare_cell_roots(struct afs_cell *cell)
+{
+ struct afs_cell *p;
+
+ _enter("");
+
+ rcu_read_lock();
+
+ hlist_for_each_entry_rcu(p, &cell->net->proc_cells, proc_link) {
+ if (p == cell || p->alias_of)
+ continue;
+ if (!p->root_volume)
+ continue; /* Ignore cells that don't have a root.cell volume. */
+
+ if (afs_compare_volume_slists(cell->root_volume, p->root_volume) != 0)
+ goto is_alias;
+ }
+
+ rcu_read_unlock();
+ _leave(" = 0");
+ return 0;
+
+is_alias:
+ rcu_read_unlock();
+ cell->alias_of = afs_get_cell(p);
+ return 1;
+}
+
+/*
+ * Query the new cell for a volume from a cell we're already using.
+ */
+static int afs_query_for_alias_one(struct afs_cell *cell, struct key *key,
+ struct afs_cell *p)
+{
+ struct afs_volume *volume, *pvol = NULL;
+ int ret;
+
+ /* Arbitrarily pick a volume from the list. */
+ read_seqlock_excl(&p->volume_lock);
+ if (!RB_EMPTY_ROOT(&p->volumes))
+ pvol = afs_get_volume(rb_entry(p->volumes.rb_node,
+ struct afs_volume, cell_node),
+ afs_volume_trace_get_query_alias);
+ read_sequnlock_excl(&p->volume_lock);
+ if (!pvol)
+ return 0;
+
+ _enter("%s:%s", cell->name, pvol->name);
+
+ /* And see if it's in the new cell. */
+ volume = afs_sample_volume(cell, key, pvol->name, pvol->name_len);
+ if (IS_ERR(volume)) {
+ afs_put_volume(cell->net, pvol, afs_volume_trace_put_query_alias);
+ if (PTR_ERR(volume) != -ENOMEDIUM)
+ return PTR_ERR(volume);
+ /* That volume is not in the new cell, so not an alias */
+ return 0;
+ }
+
+ /* The new cell has a like-named volume also - compare volume ID,
+ * server and address lists.
+ */
+ ret = 0;
+ if (pvol->vid == volume->vid) {
+ rcu_read_lock();
+ if (afs_compare_volume_slists(volume, pvol))
+ ret = 1;
+ rcu_read_unlock();
+ }
+
+ afs_put_volume(cell->net, volume, afs_volume_trace_put_query_alias);
+ afs_put_volume(cell->net, pvol, afs_volume_trace_put_query_alias);
+ return ret;
+}
+
+/*
+ * Query the new cell for volumes we know exist in cells we're already using.
+ */
+static int afs_query_for_alias(struct afs_cell *cell, struct key *key)
+{
+ struct afs_cell *p;
+
+ _enter("%s", cell->name);
+
+ if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0)
+ return -ERESTARTSYS;
+
+ hlist_for_each_entry(p, &cell->net->proc_cells, proc_link) {
+ if (p == cell || p->alias_of)
+ continue;
+ if (RB_EMPTY_ROOT(&p->volumes))
+ continue;
+ if (p->root_volume)
+ continue; /* Ignore cells that have a root.cell volume. */
+ afs_get_cell(p);
+ mutex_unlock(&cell->net->proc_cells_lock);
+
+ if (afs_query_for_alias_one(cell, key, p) != 0)
+ goto is_alias;
+
+ if (mutex_lock_interruptible(&cell->net->proc_cells_lock) < 0) {
+ afs_put_cell(cell->net, p);
+ return -ERESTARTSYS;
+ }
+
+ afs_put_cell(cell->net, p);
+ }
+
+ mutex_unlock(&cell->net->proc_cells_lock);
+ _leave(" = 0");
+ return 0;
+
+is_alias:
+ cell->alias_of = p; /* Transfer our ref */
+ return 1;
+}
+
+/*
+ * Look up a VLDB record for a volume.
+ */
+static char *afs_vl_get_cell_name(struct afs_cell *cell, struct key *key)
+{
+ struct afs_vl_cursor vc;
+ char *cell_name = ERR_PTR(-EDESTADDRREQ);
+ bool skipped = false, not_skipped = false;
+ int ret;
+
+ if (!afs_begin_vlserver_operation(&vc, cell, key))
+ return ERR_PTR(-ERESTARTSYS);
+
+ while (afs_select_vlserver(&vc)) {
+ if (!test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags)) {
+ vc.ac.error = -EOPNOTSUPP;
+ skipped = true;
+ continue;
+ }
+ not_skipped = true;
+ cell_name = afs_yfsvl_get_cell_name(&vc);
+ }
+
+ ret = afs_end_vlserver_operation(&vc);
+ if (skipped && !not_skipped)
+ ret = -EOPNOTSUPP;
+ return ret < 0 ? ERR_PTR(ret) : cell_name;
+}
+
+static int yfs_check_canonical_cell_name(struct afs_cell *cell, struct key *key)
+{
+ struct afs_cell *master;
+ char *cell_name;
+
+ cell_name = afs_vl_get_cell_name(cell, key);
+ if (IS_ERR(cell_name))
+ return PTR_ERR(cell_name);
+
+ if (strcmp(cell_name, cell->name) == 0) {
+ kfree(cell_name);
+ return 0;
+ }
+
+ master = afs_lookup_cell(cell->net, cell_name, strlen(cell_name),
+ NULL, false);
+ kfree(cell_name);
+ if (IS_ERR(master))
+ return PTR_ERR(master);
+
+ cell->alias_of = master; /* Transfer our ref */
+ return 1;
+}
+
+static int afs_do_cell_detect_alias(struct afs_cell *cell, struct key *key)
+{
+ struct afs_volume *root_volume;
+ int ret;
+
+ _enter("%s", cell->name);
+
+ ret = yfs_check_canonical_cell_name(cell, key);
+ if (ret != -EOPNOTSUPP)
+ return ret;
+
+ /* Try and get the root.cell volume for comparison with other cells */
+ root_volume = afs_sample_volume(cell, key, "root.cell", 9);
+ if (!IS_ERR(root_volume)) {
+ cell->root_volume = root_volume;
+ return afs_compare_cell_roots(cell);
+ }
+
+ if (PTR_ERR(root_volume) != -ENOMEDIUM)
+ return PTR_ERR(root_volume);
+
+ /* Okay, this cell doesn't have an root.cell volume. We need to
+ * locate some other random volume and use that to check.
+ */
+ return afs_query_for_alias(cell, key);
+}
+
+/*
+ * Check to see if a new cell is an alias of a cell we already have. At this
+ * point we have the cell's volume server list.
+ *
+ * Returns 0 if we didn't detect an alias, 1 if we found an alias and an error
+ * if we had problems gathering the data required. In the case the we did
+ * detect an alias, cell->alias_of is set to point to the assumed master.
+ */
+int afs_cell_detect_alias(struct afs_cell *cell, struct key *key)
+{
+ struct afs_net *net = cell->net;
+ int ret;
+
+ if (mutex_lock_interruptible(&net->cells_alias_lock) < 0)
+ return -ERESTARTSYS;
+
+ if (test_bit(AFS_CELL_FL_CHECK_ALIAS, &cell->flags)) {
+ ret = afs_do_cell_detect_alias(cell, key);
+ if (ret >= 0)
+ clear_bit_unlock(AFS_CELL_FL_CHECK_ALIAS, &cell->flags);
+ } else {
+ ret = cell->alias_of ? 1 : 0;
+ }
+
+ mutex_unlock(&net->cells_alias_lock);
+
+ if (ret == 1)
+ pr_notice("kAFS: Cell %s is an alias of %s\n",
+ cell->name, cell->alias_of->name);
+ return ret;
+}
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
index 72eacc14e6e1..f405ca8b240a 100644
--- a/fs/afs/vl_rotate.c
+++ b/fs/afs/vl_rotate.c
@@ -151,6 +151,10 @@ bool afs_select_vlserver(struct afs_vl_cursor *vc)
vc->error = error;
vc->flags |= AFS_VL_CURSOR_RETRY;
goto next_server;
+
+ case -EOPNOTSUPP:
+ _debug("notsupp");
+ goto next_server;
}
restart_from_beginning:
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index 516e9a3bb5b4..fd82850cd424 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -82,6 +82,7 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
for (j = 0; j < 6; j++)
uuid->node[j] = (u8)ntohl(xdr->node[j]);
+ entry->addr_version[n] = ntohl(uvldb->serverUnique[i]);
entry->nr_servers++;
}
@@ -447,8 +448,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
call->count2 = ntohl(*bp); /* Type or next count */
if (call->count > YFS_MAXENDPOINTS)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_fsendpt_num);
+ return afs_protocol_error(call, afs_eproto_yvl_fsendpt_num);
alist = afs_alloc_addrlist(call->count, FS_SERVICE, AFS_FS_PORT);
if (!alist)
@@ -468,8 +468,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
size = sizeof(__be32) * (1 + 4 + 1);
break;
default:
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_fsendpt_type);
+ return afs_protocol_error(call, afs_eproto_yvl_fsendpt_type);
}
size += sizeof(__be32);
@@ -487,21 +486,20 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
switch (call->count2) {
case YFS_ENDPOINT_IPV4:
if (ntohl(bp[0]) != sizeof(__be32) * 2)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_fsendpt4_len);
+ return afs_protocol_error(
+ call, afs_eproto_yvl_fsendpt4_len);
afs_merge_fs_addr4(alist, bp[1], ntohl(bp[2]));
bp += 3;
break;
case YFS_ENDPOINT_IPV6:
if (ntohl(bp[0]) != sizeof(__be32) * 5)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_fsendpt6_len);
+ return afs_protocol_error(
+ call, afs_eproto_yvl_fsendpt6_len);
afs_merge_fs_addr6(alist, bp + 1, ntohl(bp[5]));
bp += 6;
break;
default:
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_fsendpt_type);
+ return afs_protocol_error(call, afs_eproto_yvl_fsendpt_type);
}
/* Got either the type of the next entry or the count of
@@ -519,8 +517,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
if (!call->count)
goto end;
if (call->count > YFS_MAXENDPOINTS)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_vlendpt_type);
+ return afs_protocol_error(call, afs_eproto_yvl_vlendpt_type);
afs_extract_to_buf(call, 1 * sizeof(__be32));
call->unmarshall = 3;
@@ -547,8 +544,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
size = sizeof(__be32) * (1 + 4 + 1);
break;
default:
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_vlendpt_type);
+ return afs_protocol_error(call, afs_eproto_yvl_vlendpt_type);
}
if (call->count > 1)
@@ -566,19 +562,18 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
switch (call->count2) {
case YFS_ENDPOINT_IPV4:
if (ntohl(bp[0]) != sizeof(__be32) * 2)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_vlendpt4_len);
+ return afs_protocol_error(
+ call, afs_eproto_yvl_vlendpt4_len);
bp += 3;
break;
case YFS_ENDPOINT_IPV6:
if (ntohl(bp[0]) != sizeof(__be32) * 5)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_vlendpt6_len);
+ return afs_protocol_error(
+ call, afs_eproto_yvl_vlendpt6_len);
bp += 6;
break;
default:
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_yvl_vlendpt_type);
+ return afs_protocol_error(call, afs_eproto_yvl_vlendpt_type);
}
/* Got either the type of the next entry or the count of
@@ -650,3 +645,114 @@ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *vc,
afs_make_call(&vc->ac, call, GFP_KERNEL);
return (struct afs_addr_list *)afs_wait_for_call_to_complete(call, &vc->ac);
}
+
+/*
+ * Deliver reply data to a YFSVL.GetCellName operation.
+ */
+static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call)
+{
+ char *cell_name;
+ u32 namesz, paddedsz;
+ int ret;
+
+ _enter("{%u,%zu/%u}",
+ call->unmarshall, iov_iter_count(call->iter), call->count);
+
+ switch (call->unmarshall) {
+ case 0:
+ afs_extract_to_tmp(call);
+ call->unmarshall++;
+
+ /* Fall through - and extract the cell name length */
+ case 1:
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ namesz = ntohl(call->tmp);
+ if (namesz > AFS_MAXCELLNAME)
+ return afs_protocol_error(call, afs_eproto_cellname_len);
+ paddedsz = (namesz + 3) & ~3;
+ call->count = namesz;
+ call->count2 = paddedsz - namesz;
+
+ cell_name = kmalloc(namesz + 1, GFP_KERNEL);
+ if (!cell_name)
+ return -ENOMEM;
+ cell_name[namesz] = 0;
+ call->ret_str = cell_name;
+
+ afs_extract_begin(call, cell_name, namesz);
+ call->unmarshall++;
+
+ /* Fall through - and extract cell name */
+ case 2:
+ ret = afs_extract_data(call, true);
+ if (ret < 0)
+ return ret;
+
+ afs_extract_discard(call, call->count2);
+ call->unmarshall++;
+
+ /* Fall through - and extract padding */
+ case 3:
+ ret = afs_extract_data(call, false);
+ if (ret < 0)
+ return ret;
+
+ call->unmarshall++;
+ break;
+ }
+
+ _leave(" = 0 [done]");
+ return 0;
+}
+
+static void afs_destroy_yfsvl_get_cell_name(struct afs_call *call)
+{
+ kfree(call->ret_str);
+ afs_flat_call_destructor(call);
+}
+
+/*
+ * VL.GetCapabilities operation type
+ */
+static const struct afs_call_type afs_YFSVLGetCellName = {
+ .name = "YFSVL.GetCellName",
+ .op = afs_YFSVL_GetCellName,
+ .deliver = afs_deliver_yfsvl_get_cell_name,
+ .destructor = afs_destroy_yfsvl_get_cell_name,
+};
+
+/*
+ * Probe a volume server for the capabilities that it supports. This can
+ * return up to 196 words.
+ *
+ * We use this to probe for service upgrade to determine what the server at the
+ * other end supports.
+ */
+char *afs_yfsvl_get_cell_name(struct afs_vl_cursor *vc)
+{
+ struct afs_call *call;
+ struct afs_net *net = vc->cell->net;
+ __be32 *bp;
+
+ _enter("");
+
+ call = afs_alloc_flat_call(net, &afs_YFSVLGetCellName, 1 * 4, 0);
+ if (!call)
+ return ERR_PTR(-ENOMEM);
+
+ call->key = vc->key;
+ call->ret_str = NULL;
+ call->max_lifespan = AFS_VL_MAX_LIFESPAN;
+
+ /* marshall the parameters */
+ bp = call->request;
+ *bp++ = htonl(YVLGETCELLNAME);
+
+ /* Can't take a ref on server */
+ trace_afs_make_vl_call(call);
+ afs_make_call(&vc->ac, call, GFP_KERNEL);
+ return (char *)afs_wait_for_call_to_complete(call, &vc->ac);
+}
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 4310336b9bb8..9bc0509e3634 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -13,6 +13,56 @@ unsigned __read_mostly afs_volume_gc_delay = 10;
unsigned __read_mostly afs_volume_record_life = 60 * 60;
/*
+ * Insert a volume into a cell. If there's an existing volume record, that is
+ * returned instead with a ref held.
+ */
+static struct afs_volume *afs_insert_volume_into_cell(struct afs_cell *cell,
+ struct afs_volume *volume)
+{
+ struct afs_volume *p;
+ struct rb_node *parent = NULL, **pp;
+
+ write_seqlock(&cell->volume_lock);
+
+ pp = &cell->volumes.rb_node;
+ while (*pp) {
+ parent = *pp;
+ p = rb_entry(parent, struct afs_volume, cell_node);
+ if (p->vid < volume->vid) {
+ pp = &(*pp)->rb_left;
+ } else if (p->vid > volume->vid) {
+ pp = &(*pp)->rb_right;
+ } else {
+ volume = afs_get_volume(p, afs_volume_trace_get_cell_insert);
+ goto found;
+ }
+ }
+
+ rb_link_node_rcu(&volume->cell_node, parent, pp);
+ rb_insert_color(&volume->cell_node, &cell->volumes);
+ hlist_add_head_rcu(&volume->proc_link, &cell->proc_volumes);
+
+found:
+ write_sequnlock(&cell->volume_lock);
+ return volume;
+
+}
+
+static void afs_remove_volume_from_cell(struct afs_volume *volume)
+{
+ struct afs_cell *cell = volume->cell;
+
+ if (!hlist_unhashed(&volume->proc_link)) {
+ trace_afs_volume(volume->vid, atomic_read(&volume->usage),
+ afs_volume_trace_remove);
+ write_seqlock(&cell->volume_lock);
+ hlist_del_rcu(&volume->proc_link);
+ rb_erase(&volume->cell_node, &cell->volumes);
+ write_sequnlock(&cell->volume_lock);
+ }
+}
+
+/*
* Allocate a volume record and load it up from a vldb record.
*/
static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
@@ -39,7 +89,7 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
volume->name_len = vldb->name_len;
atomic_set(&volume->usage, 1);
- INIT_LIST_HEAD(&volume->proc_link);
+ INIT_HLIST_NODE(&volume->proc_link);
rwlock_init(&volume->servers_lock);
rwlock_init(&volume->cb_v_break_lock);
memcpy(volume->name, vldb->name, vldb->name_len + 1);
@@ -51,7 +101,8 @@ static struct afs_volume *afs_alloc_volume(struct afs_fs_context *params,
}
refcount_set(&slist->usage, 1);
- volume->servers = slist;
+ rcu_assign_pointer(volume->servers, slist);
+ trace_afs_volume(volume->vid, 1, afs_volume_trace_alloc);
return volume;
error_1:
@@ -62,6 +113,25 @@ error_0:
}
/*
+ * Look up or allocate a volume record.
+ */
+static struct afs_volume *afs_lookup_volume(struct afs_fs_context *params,
+ struct afs_vldb_entry *vldb,
+ unsigned long type_mask)
+{
+ struct afs_volume *candidate, *volume;
+
+ candidate = afs_alloc_volume(params, vldb, type_mask);
+ if (IS_ERR(candidate))
+ return candidate;
+
+ volume = afs_insert_volume_into_cell(params->cell, candidate);
+ if (volume != candidate)
+ afs_put_volume(params->net, candidate, afs_volume_trace_put_cell_dup);
+ return volume;
+}
+
+/*
* Look up a VLDB record for a volume.
*/
static struct afs_vldb_entry *afs_vl_lookup_vldb(struct afs_cell *cell,
@@ -138,7 +208,7 @@ struct afs_volume *afs_create_volume(struct afs_fs_context *params)
}
type_mask = 1UL << params->type;
- volume = afs_alloc_volume(params, vldb, type_mask);
+ volume = afs_lookup_volume(params, vldb, type_mask);
error:
kfree(vldb);
@@ -156,23 +226,42 @@ static void afs_destroy_volume(struct afs_net *net, struct afs_volume *volume)
ASSERTCMP(volume->cache, ==, NULL);
#endif
- afs_put_serverlist(net, volume->servers);
+ afs_remove_volume_from_cell(volume);
+ afs_put_serverlist(net, rcu_access_pointer(volume->servers));
afs_put_cell(net, volume->cell);
- kfree(volume);
+ trace_afs_volume(volume->vid, atomic_read(&volume->usage),
+ afs_volume_trace_free);
+ kfree_rcu(volume, rcu);
_leave(" [destroyed]");
}
/*
- * Drop a reference on a volume record.
+ * Get a reference on a volume record.
*/
-void afs_put_volume(struct afs_cell *cell, struct afs_volume *volume)
+struct afs_volume *afs_get_volume(struct afs_volume *volume,
+ enum afs_volume_trace reason)
{
if (volume) {
- _enter("%s", volume->name);
+ int u = atomic_inc_return(&volume->usage);
+ trace_afs_volume(volume->vid, u, reason);
+ }
+ return volume;
+}
+
- if (atomic_dec_and_test(&volume->usage))
- afs_destroy_volume(cell->net, volume);
+/*
+ * Drop a reference on a volume record.
+ */
+void afs_put_volume(struct afs_net *net, struct afs_volume *volume,
+ enum afs_volume_trace reason)
+{
+ if (volume) {
+ afs_volid_t vid = volume->vid;
+ int u = atomic_dec_return(&volume->usage);
+ trace_afs_volume(vid, u, reason);
+ if (u == 0)
+ afs_destroy_volume(net, volume);
}
}
@@ -188,10 +277,6 @@ void afs_activate_volume(struct afs_volume *volume)
NULL, 0,
volume, 0, true);
#endif
-
- write_lock(&volume->cell->proc_lock);
- list_add_tail(&volume->proc_link, &volume->cell->proc_volumes);
- write_unlock(&volume->cell->proc_lock);
}
/*
@@ -201,10 +286,6 @@ void afs_deactivate_volume(struct afs_volume *volume)
{
_enter("%s", volume->name);
- write_lock(&volume->cell->proc_lock);
- list_del_init(&volume->proc_link);
- write_unlock(&volume->cell->proc_lock);
-
#ifdef CONFIG_AFS_FSCACHE
fscache_relinquish_cookie(volume->cache, NULL,
test_bit(AFS_VOLUME_DELETED, &volume->flags));
@@ -256,17 +337,17 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
write_lock(&volume->servers_lock);
discard = new;
- old = volume->servers;
+ old = rcu_dereference_protected(volume->servers,
+ lockdep_is_held(&volume->servers_lock));
if (afs_annotate_server_list(new, old)) {
new->seq = volume->servers_seq + 1;
- volume->servers = new;
+ rcu_assign_pointer(volume->servers, new);
smp_wmb();
volume->servers_seq++;
discard = old;
}
volume->update_at = ktime_get_real_seconds() + afs_volume_record_life;
- clear_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags);
write_unlock(&volume->servers_lock);
ret = 0;
@@ -281,25 +362,27 @@ error:
/*
* Make sure the volume record is up to date.
*/
-int afs_check_volume_status(struct afs_volume *volume, struct afs_fs_cursor *fc)
+int afs_check_volume_status(struct afs_volume *volume, struct afs_operation *op)
{
- time64_t now = ktime_get_real_seconds();
int ret, retries = 0;
_enter("");
- if (volume->update_at <= now)
- set_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags);
-
retry:
- if (!test_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags) &&
- !test_bit(AFS_VOLUME_WAIT, &volume->flags)) {
- _leave(" = 0");
- return 0;
- }
-
+ if (test_bit(AFS_VOLUME_WAIT, &volume->flags))
+ goto wait;
+ if (volume->update_at <= ktime_get_real_seconds() ||
+ test_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags))
+ goto update;
+ _leave(" = 0");
+ return 0;
+
+update:
if (!test_and_set_bit_lock(AFS_VOLUME_UPDATING, &volume->flags)) {
- ret = afs_update_volume_status(volume, fc->key);
+ clear_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags);
+ ret = afs_update_volume_status(volume, op->key);
+ if (ret < 0)
+ set_bit(AFS_VOLUME_NEEDS_UPDATE, &volume->flags);
clear_bit_unlock(AFS_VOLUME_WAIT, &volume->flags);
clear_bit_unlock(AFS_VOLUME_UPDATING, &volume->flags);
wake_up_bit(&volume->flags, AFS_VOLUME_WAIT);
@@ -307,14 +390,15 @@ retry:
return ret;
}
+wait:
if (!test_bit(AFS_VOLUME_WAIT, &volume->flags)) {
_leave(" = 0 [no wait]");
return 0;
}
ret = wait_on_bit(&volume->flags, AFS_VOLUME_WAIT,
- (fc->flags & AFS_FS_CURSOR_INTR) ?
- TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ (op->flags & AFS_OPERATION_UNINTR) ?
+ TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
if (ret == -ERESTARTSYS) {
_leave(" = %d", ret);
return ret;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index cb76566763db..768497f82aee 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -349,82 +349,112 @@ static void afs_pages_written_back(struct afs_vnode *vnode,
}
/*
- * write to a file
+ * Find a key to use for the writeback. We cached the keys used to author the
+ * writes on the vnode. *_wbk will contain the last writeback key used or NULL
+ * and we need to start from there if it's set.
*/
-static int afs_store_data(struct address_space *mapping,
- pgoff_t first, pgoff_t last,
- unsigned offset, unsigned to)
+static int afs_get_writeback_key(struct afs_vnode *vnode,
+ struct afs_wb_key **_wbk)
{
- struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
struct afs_wb_key *wbk = NULL;
struct list_head *p;
int ret = -ENOKEY, ret2;
- _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
- vnode->volume->name,
- vnode->fid.vid,
- vnode->fid.vnode,
- vnode->fid.unique,
- first, last, offset, to);
-
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
- if (!scb)
- return -ENOMEM;
-
spin_lock(&vnode->wb_lock);
- p = vnode->wb_keys.next;
+ if (*_wbk)
+ p = (*_wbk)->vnode_link.next;
+ else
+ p = vnode->wb_keys.next;
- /* Iterate through the list looking for a valid key to use. */
-try_next_key:
while (p != &vnode->wb_keys) {
wbk = list_entry(p, struct afs_wb_key, vnode_link);
_debug("wbk %u", key_serial(wbk->key));
ret2 = key_validate(wbk->key);
- if (ret2 == 0)
- goto found_key;
+ if (ret2 == 0) {
+ refcount_inc(&wbk->usage);
+ _debug("USE WB KEY %u", key_serial(wbk->key));
+ break;
+ }
+
+ wbk = NULL;
if (ret == -ENOKEY)
ret = ret2;
p = p->next;
}
spin_unlock(&vnode->wb_lock);
- afs_put_wb_key(wbk);
- kfree(scb);
- _leave(" = %d [no keys]", ret);
- return ret;
+ if (*_wbk)
+ afs_put_wb_key(*_wbk);
+ *_wbk = wbk;
+ return 0;
+}
-found_key:
- refcount_inc(&wbk->usage);
- spin_unlock(&vnode->wb_lock);
+static void afs_store_data_success(struct afs_operation *op)
+{
+ struct afs_vnode *vnode = op->file[0].vnode;
- _debug("USE WB KEY %u", key_serial(wbk->key));
+ afs_vnode_commit_status(op, &op->file[0]);
+ if (op->error == 0) {
+ afs_pages_written_back(vnode, op->store.first, op->store.last);
+ afs_stat_v(vnode, n_stores);
+ atomic_long_add((op->store.last * PAGE_SIZE + op->store.last_to) -
+ (op->store.first * PAGE_SIZE + op->store.first_offset),
+ &afs_v2net(vnode)->n_store_bytes);
+ }
+}
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, wbk->key, false)) {
- afs_dataversion_t data_version = vnode->status.data_version + 1;
+static const struct afs_operation_ops afs_store_data_operation = {
+ .issue_afs_rpc = afs_fs_store_data,
+ .issue_yfs_rpc = yfs_fs_store_data,
+ .success = afs_store_data_success,
+};
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_store_data(&fc, mapping, first, last, offset, to, scb);
- }
+/*
+ * write to a file
+ */
+static int afs_store_data(struct address_space *mapping,
+ pgoff_t first, pgoff_t last,
+ unsigned offset, unsigned to)
+{
+ struct afs_vnode *vnode = AFS_FS_I(mapping->host);
+ struct afs_operation *op;
+ struct afs_wb_key *wbk = NULL;
+ int ret;
- afs_check_for_remote_deletion(&fc, vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- if (fc.ac.error == 0)
- afs_pages_written_back(vnode, first, last);
- ret = afs_end_vnode_operation(&fc);
+ _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
+ vnode->volume->name,
+ vnode->fid.vid,
+ vnode->fid.vnode,
+ vnode->fid.unique,
+ first, last, offset, to);
+
+ ret = afs_get_writeback_key(vnode, &wbk);
+ if (ret) {
+ _leave(" = %d [no keys]", ret);
+ return ret;
}
- switch (ret) {
- case 0:
- afs_stat_v(vnode, n_stores);
- atomic_long_add((last * PAGE_SIZE + to) -
- (first * PAGE_SIZE + offset),
- &afs_v2net(vnode)->n_store_bytes);
- break;
+ op = afs_alloc_operation(wbk->key, vnode->volume);
+ if (IS_ERR(op)) {
+ afs_put_wb_key(wbk);
+ return -ENOMEM;
+ }
+
+ afs_op_set_vnode(op, 0, vnode);
+ op->file[0].dv_delta = 1;
+ op->store.mapping = mapping;
+ op->store.first = first;
+ op->store.last = last;
+ op->store.first_offset = offset;
+ op->store.last_to = to;
+ op->mtime = vnode->vfs_inode.i_mtime;
+ op->ops = &afs_store_data_operation;
+
+try_next_key:
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
+
+ switch (op->error) {
case -EACCES:
case -EPERM:
case -ENOKEY:
@@ -432,16 +462,19 @@ found_key:
case -EKEYREJECTED:
case -EKEYREVOKED:
_debug("next");
- spin_lock(&vnode->wb_lock);
- p = wbk->vnode_link.next;
- afs_put_wb_key(wbk);
- goto try_next_key;
+
+ ret = afs_get_writeback_key(vnode, &wbk);
+ if (ret == 0) {
+ key_put(op->key);
+ op->key = key_get(wbk->key);
+ goto try_next_key;
+ }
+ break;
}
afs_put_wb_key(wbk);
- kfree(scb);
- _leave(" = %d", ret);
- return ret;
+ _leave(" = %d", op->error);
+ return afs_put_operation(op);
}
/*
diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
index 7af41fd5f3ee..84f3c4f57531 100644
--- a/fs/afs/xattr.c
+++ b/fs/afs/xattr.c
@@ -35,6 +35,25 @@ ssize_t afs_listxattr(struct dentry *dentry, char *buffer, size_t size)
}
/*
+ * Deal with the result of a successful fetch ACL operation.
+ */
+static void afs_acl_success(struct afs_operation *op)
+{
+ afs_vnode_commit_status(op, &op->file[0]);
+}
+
+static void afs_acl_put(struct afs_operation *op)
+{
+ kfree(op->acl);
+}
+
+static const struct afs_operation_ops afs_fetch_acl_operation = {
+ .issue_afs_rpc = afs_fs_fetch_acl,
+ .success = afs_acl_success,
+ .put = afs_acl_put,
+};
+
+/*
* Get a file's ACL.
*/
static int afs_xattr_get_acl(const struct xattr_handler *handler,
@@ -42,37 +61,23 @@ static int afs_xattr_get_acl(const struct xattr_handler *handler,
struct inode *inode, const char *name,
void *buffer, size_t size)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(inode);
struct afs_acl *acl = NULL;
- struct key *key;
- int ret = -ENOMEM;
+ int ret;
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
- if (!scb)
- goto error;
-
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
- }
+ op = afs_alloc_operation(NULL, vnode->volume);
+ if (IS_ERR(op))
+ return -ENOMEM;
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- afs_dataversion_t data_version = vnode->status.data_version;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- acl = afs_fs_fetch_acl(&fc, scb);
- }
+ afs_op_set_vnode(op, 0, vnode);
+ op->ops = &afs_fetch_acl_operation;
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- ret = afs_end_vnode_operation(&fc);
- }
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
+ acl = op->acl;
+ op->acl = NULL;
+ ret = afs_put_operation(op);
if (ret == 0) {
ret = acl->size;
@@ -80,18 +85,37 @@ static int afs_xattr_get_acl(const struct xattr_handler *handler,
if (acl->size <= size)
memcpy(buffer, acl->data, acl->size);
else
- ret = -ERANGE;
+ op->error = -ERANGE;
}
- kfree(acl);
}
- key_put(key);
-error_scb:
- kfree(scb);
-error:
+ kfree(acl);
return ret;
}
+static bool afs_make_acl(struct afs_operation *op,
+ const void *buffer, size_t size)
+{
+ struct afs_acl *acl;
+
+ acl = kmalloc(sizeof(*acl) + size, GFP_KERNEL);
+ if (!acl) {
+ afs_op_nomem(op);
+ return false;
+ }
+
+ acl->size = size;
+ memcpy(acl->data, buffer, size);
+ op->acl = acl;
+ return true;
+}
+
+static const struct afs_operation_ops afs_store_acl_operation = {
+ .issue_afs_rpc = afs_fs_store_acl,
+ .success = afs_acl_success,
+ .put = afs_acl_put,
+};
+
/*
* Set a file's AFS3 ACL.
*/
@@ -100,55 +124,22 @@ static int afs_xattr_set_acl(const struct xattr_handler *handler,
struct inode *inode, const char *name,
const void *buffer, size_t size, int flags)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(inode);
- struct afs_acl *acl = NULL;
- struct key *key;
- int ret = -ENOMEM;
if (flags == XATTR_CREATE)
return -EINVAL;
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
- if (!scb)
- goto error;
-
- acl = kmalloc(sizeof(*acl) + size, GFP_KERNEL);
- if (!acl)
- goto error_scb;
-
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_acl;
- }
-
- acl->size = size;
- memcpy(acl->data, buffer, size);
-
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- afs_dataversion_t data_version = vnode->status.data_version;
+ op = afs_alloc_operation(NULL, vnode->volume);
+ if (IS_ERR(op))
+ return -ENOMEM;
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- afs_fs_store_acl(&fc, acl, scb);
- }
+ afs_op_set_vnode(op, 0, vnode);
+ if (!afs_make_acl(op, buffer, size))
+ return afs_put_operation(op);
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- ret = afs_end_vnode_operation(&fc);
- }
-
- key_put(key);
-error_acl:
- kfree(acl);
-error_scb:
- kfree(scb);
-error:
- return ret;
+ op->ops = &afs_store_acl_operation;
+ return afs_do_sync_operation(op);
}
static const struct xattr_handler afs_xattr_afs_acl_handler = {
@@ -157,6 +148,17 @@ static const struct xattr_handler afs_xattr_afs_acl_handler = {
.set = afs_xattr_set_acl,
};
+static void yfs_acl_put(struct afs_operation *op)
+{
+ yfs_free_opaque_acl(op->yacl);
+}
+
+static const struct afs_operation_ops yfs_fetch_opaque_acl_operation = {
+ .issue_yfs_rpc = yfs_fs_fetch_opaque_acl,
+ .success = afs_acl_success,
+ /* Don't free op->yacl in .put here */
+};
+
/*
* Get a file's YFS ACL.
*/
@@ -165,11 +167,9 @@ static int afs_xattr_get_yfs(const struct xattr_handler *handler,
struct inode *inode, const char *name,
void *buffer, size_t size)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(inode);
struct yfs_acl *yacl = NULL;
- struct key *key;
char buf[16], *data;
int which = 0, dsize, ret = -ENOMEM;
@@ -193,75 +193,62 @@ static int afs_xattr_get_yfs(const struct xattr_handler *handler,
else if (which == 3)
yacl->flags |= YFS_ACL_WANT_VOL_ACL;
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
- if (!scb)
+ op = afs_alloc_operation(NULL, vnode->volume);
+ if (IS_ERR(op))
goto error_yacl;
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_scb;
- }
+ afs_op_set_vnode(op, 0, vnode);
+ op->yacl = yacl;
+ op->ops = &yfs_fetch_opaque_acl_operation;
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- afs_dataversion_t data_version = vnode->status.data_version;
+ afs_begin_vnode_operation(op);
+ afs_wait_for_operation(op);
+ ret = afs_put_operation(op);
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- yfs_fs_fetch_opaque_acl(&fc, yacl, scb);
+ if (ret == 0) {
+ switch (which) {
+ case 0:
+ data = yacl->acl->data;
+ dsize = yacl->acl->size;
+ break;
+ case 1:
+ data = buf;
+ dsize = scnprintf(buf, sizeof(buf), "%u", yacl->inherit_flag);
+ break;
+ case 2:
+ data = buf;
+ dsize = scnprintf(buf, sizeof(buf), "%u", yacl->num_cleaned);
+ break;
+ case 3:
+ data = yacl->vol_acl->data;
+ dsize = yacl->vol_acl->size;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto error_yacl;
}
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- ret = afs_end_vnode_operation(&fc);
- }
-
- if (ret < 0)
- goto error_key;
-
- switch (which) {
- case 0:
- data = yacl->acl->data;
- dsize = yacl->acl->size;
- break;
- case 1:
- data = buf;
- dsize = scnprintf(buf, sizeof(buf), "%u", yacl->inherit_flag);
- break;
- case 2:
- data = buf;
- dsize = scnprintf(buf, sizeof(buf), "%u", yacl->num_cleaned);
- break;
- case 3:
- data = yacl->vol_acl->data;
- dsize = yacl->vol_acl->size;
- break;
- default:
- ret = -EOPNOTSUPP;
- goto error_key;
- }
-
- ret = dsize;
- if (size > 0) {
- if (dsize > size) {
- ret = -ERANGE;
- goto error_key;
+ ret = dsize;
+ if (size > 0) {
+ if (dsize <= size)
+ memcpy(buffer, data, dsize);
+ else
+ ret = -ERANGE;
}
- memcpy(buffer, data, dsize);
}
-error_key:
- key_put(key);
-error_scb:
- kfree(scb);
error_yacl:
yfs_free_opaque_acl(yacl);
error:
return ret;
}
+static const struct afs_operation_ops yfs_store_opaque_acl2_operation = {
+ .issue_yfs_rpc = yfs_fs_store_opaque_acl2,
+ .success = afs_acl_success,
+ .put = yfs_acl_put,
+};
+
/*
* Set a file's YFS ACL.
*/
@@ -270,56 +257,23 @@ static int afs_xattr_set_yfs(const struct xattr_handler *handler,
struct inode *inode, const char *name,
const void *buffer, size_t size, int flags)
{
- struct afs_fs_cursor fc;
- struct afs_status_cb *scb;
+ struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(inode);
- struct afs_acl *acl = NULL;
- struct key *key;
- int ret = -ENOMEM;
if (flags == XATTR_CREATE ||
strcmp(name, "acl") != 0)
return -EINVAL;
- scb = kzalloc(sizeof(struct afs_status_cb), GFP_NOFS);
- if (!scb)
- goto error;
-
- acl = kmalloc(sizeof(*acl) + size, GFP_KERNEL);
- if (!acl)
- goto error_scb;
+ op = afs_alloc_operation(NULL, vnode->volume);
+ if (IS_ERR(op))
+ return -ENOMEM;
- acl->size = size;
- memcpy(acl->data, buffer, size);
+ afs_op_set_vnode(op, 0, vnode);
+ if (!afs_make_acl(op, buffer, size))
+ return afs_put_operation(op);
- key = afs_request_key(vnode->volume->cell);
- if (IS_ERR(key)) {
- ret = PTR_ERR(key);
- goto error_acl;
- }
-
- ret = -ERESTARTSYS;
- if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
- afs_dataversion_t data_version = vnode->status.data_version;
-
- while (afs_select_fileserver(&fc)) {
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
- yfs_fs_store_opaque_acl2(&fc, acl, scb);
- }
-
- afs_check_for_remote_deletion(&fc, fc.vnode);
- afs_vnode_commit_status(&fc, vnode, fc.cb_break,
- &data_version, scb);
- ret = afs_end_vnode_operation(&fc);
- }
-
-error_acl:
- kfree(acl);
- key_put(key);
-error_scb:
- kfree(scb);
-error:
- return ret;
+ op->ops = &yfs_store_opaque_acl2_operation;
+ return afs_do_sync_operation(op);
}
static const struct xattr_handler afs_xattr_yfs_handler = {
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index fe413e7a5cf4..52d5af5fcd44 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -15,13 +15,6 @@
#include "xdr_fs.h"
#include "protocol_yfs.h"
-static const struct afs_fid afs_zero_fid;
-
-static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi)
-{
- call->cbi = afs_get_cb_interest(cbi);
-}
-
#define xdr_size(x) (sizeof(*x) / sizeof(__be32))
static void xdr_decode_YFSFid(const __be32 **_bp, struct afs_fid *fid)
@@ -79,6 +72,11 @@ static __be32 *xdr_encode_string(__be32 *bp, const char *p, unsigned int len)
return bp + len / sizeof(__be32);
}
+static __be32 *xdr_encode_name(__be32 *bp, const struct qstr *p)
+{
+ return xdr_encode_string(bp, p->name, p->len);
+}
+
static s64 linux_to_yfs_time(const struct timespec64 *t)
{
/* Convert to 100ns intervals. */
@@ -179,21 +177,20 @@ static void xdr_dump_bad(const __be32 *bp)
/*
* Decode a YFSFetchStatus block
*/
-static int xdr_decode_YFSFetchStatus(const __be32 **_bp,
- struct afs_call *call,
- struct afs_status_cb *scb)
+static void xdr_decode_YFSFetchStatus(const __be32 **_bp,
+ struct afs_call *call,
+ struct afs_status_cb *scb)
{
const struct yfs_xdr_YFSFetchStatus *xdr = (const void *)*_bp;
struct afs_file_status *status = &scb->status;
u32 type;
- int ret;
status->abort_code = ntohl(xdr->abort_code);
if (status->abort_code != 0) {
if (status->abort_code == VNOVNODE)
status->nlink = 0;
scb->have_error = true;
- goto good;
+ goto advance;
}
type = ntohl(xdr->type);
@@ -221,15 +218,13 @@ static int xdr_decode_YFSFetchStatus(const __be32 **_bp,
status->size = xdr_to_u64(xdr->size);
status->data_version = xdr_to_u64(xdr->data_version);
scb->have_status = true;
-good:
- ret = 0;
advance:
*_bp += xdr_size(xdr);
- return ret;
+ return;
bad:
xdr_dump_bad(*_bp);
- ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
+ afs_protocol_error(call, afs_eproto_bad_status);
goto advance;
}
@@ -339,6 +334,7 @@ static void xdr_decode_YFSFetchVolumeStatus(const __be32 **_bp,
*/
static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
const __be32 *bp;
int ret;
@@ -348,11 +344,9 @@ static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_YFSCallBack(&bp, call, call->out_scb);
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFetchStatus(&bp, call, &op->file[0].scb);
+ xdr_decode_YFSCallBack(&bp, call, &op->file[0].scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -364,6 +358,7 @@ static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call)
*/
static int yfs_deliver_status_and_volsync(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
const __be32 *bp;
int ret;
@@ -372,10 +367,8 @@ static int yfs_deliver_status_and_volsync(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFetchStatus(&bp, call, &op->file[0].scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -394,44 +387,33 @@ static const struct afs_call_type yfs_RXYFSFetchStatus_vnode = {
/*
* Fetch the status information for a file.
*/
-int yfs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
- struct afs_volsync *volsync)
+void yfs_fs_fetch_file_status(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &yfs_RXYFSFetchStatus_vnode,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchStatus_vnode,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSCallBack) +
sizeof(struct yfs_xdr_YFSVolSync));
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = volsync;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSFETCHSTATUS);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -439,7 +421,9 @@ int yfs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_status_cb *scb
*/
static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
{
- struct afs_read *req = call->read_request;
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *vp = &op->file[0];
+ struct afs_read *req = op->fetch.req;
const __be32 *bp;
unsigned int size;
int ret;
@@ -534,14 +518,12 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_YFSCallBack(&bp, call, call->out_scb);
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_YFSCallBack(&bp, call, &vp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
- req->data_version = call->out_scb->status.data_version;
- req->file_size = call->out_scb->status.size;
+ req->data_version = vp->scb.status.data_version;
+ req->file_size = vp->scb.status.size;
call->unmarshall++;
/* Fall through */
@@ -565,12 +547,6 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
return 0;
}
-static void yfs_fetch_data_destructor(struct afs_call *call)
-{
- afs_put_read(call->read_request);
- afs_flat_call_destructor(call);
-}
-
/*
* YFS.FetchData64 operation type
*/
@@ -578,25 +554,24 @@ static const struct afs_call_type yfs_RXYFSFetchData64 = {
.name = "YFS.FetchData64",
.op = yfs_FS_FetchData64,
.deliver = yfs_deliver_fs_fetch_data64,
- .destructor = yfs_fetch_data_destructor,
+ .destructor = afs_flat_call_destructor,
};
/*
* Fetch data from a file.
*/
-int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
- struct afs_read *req)
+void yfs_fs_fetch_data(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
+ struct afs_read *req = op->fetch.req;
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter(",%x,{%llx:%llu},%llx,%llx",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode,
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode,
req->pos, req->len);
- call = afs_alloc_flat_call(net, &yfs_RXYFSFetchData64,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchData64,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(struct yfs_xdr_u64) * 2,
@@ -604,27 +579,19 @@ int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
sizeof(struct yfs_xdr_YFSCallBack) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = NULL;
- call->read_request = afs_get_read(req);
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSFETCHDATA64);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
bp = xdr_encode_u64(bp, req->pos);
bp = xdr_encode_u64(bp, req->len);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -632,6 +599,9 @@ int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_status_cb *scb,
*/
static int yfs_deliver_fs_create_vnode(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
const __be32 *bp;
int ret;
@@ -643,15 +613,11 @@ static int yfs_deliver_fs_create_vnode(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_YFSFid(&bp, call->out_fid);
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- xdr_decode_YFSCallBack(&bp, call, call->out_scb);
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFid(&bp, &op->file[1].fid);
+ xdr_decode_YFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb);
+ xdr_decode_YFSCallBack(&bp, call, &vp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -670,26 +636,20 @@ static const struct afs_call_type afs_RXFSCreateFile = {
/*
* Create a file.
*/
-int yfs_fs_create_file(struct afs_fs_cursor *fc,
- const char *name,
- umode_t mode,
- struct afs_status_cb *dvnode_scb,
- struct afs_fid *newfid,
- struct afs_status_cb *new_scb)
+void yfs_fs_create_file(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
- size_t namesz, reqsz, rplsz;
+ size_t reqsz, rplsz;
__be32 *bp;
_enter("");
- namesz = strlen(name);
reqsz = (sizeof(__be32) +
sizeof(__be32) +
sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(namesz) +
+ xdr_strlen(name->len) +
sizeof(struct yfs_xdr_YFSStoreStatus) +
sizeof(__be32));
rplsz = (sizeof(struct yfs_xdr_YFSFid) +
@@ -698,30 +658,22 @@ int yfs_fs_create_file(struct afs_fs_cursor *fc,
sizeof(struct yfs_xdr_YFSCallBack) +
sizeof(struct yfs_xdr_YFSVolSync));
- call = afs_alloc_flat_call(net, &afs_RXFSCreateFile, reqsz, rplsz);
+ call = afs_alloc_flat_call(op->net, &afs_RXFSCreateFile, reqsz, rplsz);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_fid = newfid;
- call->out_scb = new_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSCREATEFILE);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &dvnode->fid);
- bp = xdr_encode_string(bp, name, namesz);
- bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_name(bp, name);
+ bp = xdr_encode_YFSStoreStatus_mode(bp, op->create.mode);
bp = xdr_encode_u32(bp, yfs_LockNone); /* ViceLockType */
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
static const struct afs_call_type yfs_RXFSMakeDir = {
@@ -734,26 +686,20 @@ static const struct afs_call_type yfs_RXFSMakeDir = {
/*
* Make a directory.
*/
-int yfs_fs_make_dir(struct afs_fs_cursor *fc,
- const char *name,
- umode_t mode,
- struct afs_status_cb *dvnode_scb,
- struct afs_fid *newfid,
- struct afs_status_cb *new_scb)
+void yfs_fs_make_dir(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
- size_t namesz, reqsz, rplsz;
+ size_t reqsz, rplsz;
__be32 *bp;
_enter("");
- namesz = strlen(name);
reqsz = (sizeof(__be32) +
sizeof(struct yfs_xdr_RPCFlags) +
sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(namesz) +
+ xdr_strlen(name->len) +
sizeof(struct yfs_xdr_YFSStoreStatus));
rplsz = (sizeof(struct yfs_xdr_YFSFid) +
sizeof(struct yfs_xdr_YFSFetchStatus) +
@@ -761,29 +707,21 @@ int yfs_fs_make_dir(struct afs_fs_cursor *fc,
sizeof(struct yfs_xdr_YFSCallBack) +
sizeof(struct yfs_xdr_YFSVolSync));
- call = afs_alloc_flat_call(net, &yfs_RXFSMakeDir, reqsz, rplsz);
+ call = afs_alloc_flat_call(op->net, &yfs_RXFSMakeDir, reqsz, rplsz);
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_fid = newfid;
- call->out_scb = new_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSMAKEDIR);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &dvnode->fid);
- bp = xdr_encode_string(bp, name, namesz);
- bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_name(bp, name);
+ bp = xdr_encode_YFSStoreStatus_mode(bp, op->create.mode);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -791,6 +729,9 @@ int yfs_fs_make_dir(struct afs_fs_cursor *fc,
*/
static int yfs_deliver_fs_remove_file2(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
struct afs_fid fid;
const __be32 *bp;
int ret;
@@ -802,20 +743,24 @@ static int yfs_deliver_fs_remove_file2(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
-
+ xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb);
xdr_decode_YFSFid(&bp, &fid);
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
+ xdr_decode_YFSFetchStatus(&bp, call, &vp->scb);
/* Was deleted if vnode->status.abort_code == VNOVNODE. */
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
return 0;
}
+static void yfs_done_fs_remove_file2(struct afs_call *call)
+{
+ if (call->error == -ECONNABORTED &&
+ call->abort_code == RX_INVALID_OPERATION) {
+ set_bit(AFS_SERVER_FL_NO_RM2, &call->server->flags);
+ call->op->flags |= AFS_OPERATION_DOWNGRADE;
+ }
+}
+
/*
* YFS.RemoveFile2 operation type.
*/
@@ -823,55 +768,44 @@ static const struct afs_call_type yfs_RXYFSRemoveFile2 = {
.name = "YFS.RemoveFile2",
.op = yfs_FS_RemoveFile2,
.deliver = yfs_deliver_fs_remove_file2,
+ .done = yfs_done_fs_remove_file2,
.destructor = afs_flat_call_destructor,
};
/*
* Remove a file and retrieve new file status.
*/
-int yfs_fs_remove_file2(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name, struct afs_status_cb *dvnode_scb,
- struct afs_status_cb *vnode_scb)
+void yfs_fs_remove_file2(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ struct afs_vnode_param *dvp = &op->file[0];
+ const struct qstr *name = &op->dentry->d_name;
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
- size_t namesz;
__be32 *bp;
_enter("");
- namesz = strlen(name);
-
- call = afs_alloc_flat_call(net, &yfs_RXYFSRemoveFile2,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSRemoveFile2,
sizeof(__be32) +
sizeof(struct yfs_xdr_RPCFlags) +
sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(namesz),
+ xdr_strlen(name->len),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_scb = vnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSREMOVEFILE2);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &dvnode->fid);
- bp = xdr_encode_string(bp, name, namesz);
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_name(bp, name);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -879,6 +813,8 @@ int yfs_fs_remove_file2(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int yfs_deliver_fs_remove(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
const __be32 *bp;
int ret;
@@ -889,11 +825,8 @@ static int yfs_deliver_fs_remove(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
-
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
return 0;
}
@@ -907,6 +840,43 @@ static const struct afs_call_type yfs_RXYFSRemoveFile = {
.destructor = afs_flat_call_destructor,
};
+/*
+ * Remove a file.
+ */
+void yfs_fs_remove_file(struct afs_operation *op)
+{
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_call *call;
+ __be32 *bp;
+
+ _enter("");
+
+ if (!test_bit(AFS_SERVER_FL_NO_RM2, &op->server->flags))
+ return yfs_fs_remove_file2(op);
+
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSRemoveFile,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(name->len),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
+ if (!call)
+ return afs_op_nomem(op);
+
+ /* marshall the parameters */
+ bp = call->request;
+ bp = xdr_encode_u32(bp, YFSREMOVEFILE);
+ bp = xdr_encode_u32(bp, 0); /* RPC flags */
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_name(bp, name);
+ yfs_check_req(call, bp);
+
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
+}
+
static const struct afs_call_type yfs_RXYFSRemoveDir = {
.name = "YFS.RemoveDir",
.op = yfs_FS_RemoveDir,
@@ -915,48 +885,37 @@ static const struct afs_call_type yfs_RXYFSRemoveDir = {
};
/*
- * remove a file or directory
+ * Remove a directory.
*/
-int yfs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name, bool isdir,
- struct afs_status_cb *dvnode_scb)
+void yfs_fs_remove_dir(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
- size_t namesz;
__be32 *bp;
_enter("");
- namesz = strlen(name);
- call = afs_alloc_flat_call(
- net, isdir ? &yfs_RXYFSRemoveDir : &yfs_RXYFSRemoveFile,
- sizeof(__be32) +
- sizeof(struct yfs_xdr_RPCFlags) +
- sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(namesz),
- sizeof(struct yfs_xdr_YFSFetchStatus) +
- sizeof(struct yfs_xdr_YFSVolSync));
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSRemoveDir,
+ sizeof(__be32) +
+ sizeof(struct yfs_xdr_RPCFlags) +
+ sizeof(struct yfs_xdr_YFSFid) +
+ xdr_strlen(name->len),
+ sizeof(struct yfs_xdr_YFSFetchStatus) +
+ sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
- bp = xdr_encode_u32(bp, isdir ? YFSREMOVEDIR : YFSREMOVEFILE);
+ bp = xdr_encode_u32(bp, YFSREMOVEDIR);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &dvnode->fid);
- bp = xdr_encode_string(bp, name, namesz);
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_name(bp, name);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -964,6 +923,9 @@ int yfs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int yfs_deliver_fs_link(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
const __be32 *bp;
int ret;
@@ -974,13 +936,9 @@ static int yfs_deliver_fs_link(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
}
@@ -998,50 +956,39 @@ static const struct afs_call_type yfs_RXYFSLink = {
/*
* Make a hard link.
*/
-int yfs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
- const char *name,
- struct afs_status_cb *dvnode_scb,
- struct afs_status_cb *vnode_scb)
+void yfs_fs_link(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
- size_t namesz;
__be32 *bp;
_enter("");
- namesz = strlen(name);
- call = afs_alloc_flat_call(net, &yfs_RXYFSLink,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSLink,
sizeof(__be32) +
sizeof(struct yfs_xdr_RPCFlags) +
sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(namesz) +
+ xdr_strlen(name->len) +
sizeof(struct yfs_xdr_YFSFid),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_scb = vnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSLINK);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &dvnode->fid);
- bp = xdr_encode_string(bp, name, namesz);
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_name(bp, name);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &vnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &vp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1049,6 +996,9 @@ int yfs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
*/
static int yfs_deliver_fs_symlink(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
const __be32 *bp;
int ret;
@@ -1060,14 +1010,10 @@ static int yfs_deliver_fs_symlink(struct afs_call *call)
/* unmarshall the reply once we've received all of it */
bp = call->buffer;
- xdr_decode_YFSFid(&bp, call->out_fid);
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFid(&bp, &vp->fid);
+ xdr_decode_YFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_YFSFetchStatus(&bp, call, &dvp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
@@ -1086,28 +1032,22 @@ static const struct afs_call_type yfs_RXYFSSymlink = {
/*
* Create a symbolic link.
*/
-int yfs_fs_symlink(struct afs_fs_cursor *fc,
- const char *name,
- const char *contents,
- struct afs_status_cb *dvnode_scb,
- struct afs_fid *newfid,
- struct afs_status_cb *vnode_scb)
+void yfs_fs_symlink(struct afs_operation *op)
{
- struct afs_vnode *dvnode = fc->vnode;
+ const struct qstr *name = &op->dentry->d_name;
+ struct afs_vnode_param *dvp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(dvnode);
- size_t namesz, contents_sz;
+ size_t contents_sz;
__be32 *bp;
_enter("");
- namesz = strlen(name);
- contents_sz = strlen(contents);
- call = afs_alloc_flat_call(net, &yfs_RXYFSSymlink,
+ contents_sz = strlen(op->create.symlink);
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSSymlink,
sizeof(__be32) +
sizeof(struct yfs_xdr_RPCFlags) +
sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(namesz) +
+ xdr_strlen(name->len) +
xdr_strlen(contents_sz) +
sizeof(struct yfs_xdr_YFSStoreStatus),
sizeof(struct yfs_xdr_YFSFid) +
@@ -1115,28 +1055,20 @@ int yfs_fs_symlink(struct afs_fs_cursor *fc,
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = dvnode_scb;
- call->out_fid = newfid;
- call->out_scb = vnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSSYMLINK);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &dvnode->fid);
- bp = xdr_encode_string(bp, name, namesz);
- bp = xdr_encode_string(bp, contents, contents_sz);
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_name(bp, name);
+ bp = xdr_encode_string(bp, op->create.symlink, contents_sz);
bp = xdr_encode_YFSStoreStatus_mode(bp, S_IRWXUGO);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call1(call, &dvnode->fid, name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call1(call, &dvp->fid, name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1144,6 +1076,9 @@ int yfs_fs_symlink(struct afs_fs_cursor *fc,
*/
static int yfs_deliver_fs_rename(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
const __be32 *bp;
int ret;
@@ -1154,14 +1089,12 @@ static int yfs_deliver_fs_rename(struct afs_call *call)
return ret;
bp = call->buffer;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
- if (ret < 0)
- return ret;
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
-
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ /* If the two dirs are the same, we have two copies of the same status
+ * report, so we just decode it twice.
+ */
+ xdr_decode_YFSFetchStatus(&bp, call, &orig_dvp->scb);
+ xdr_decode_YFSFetchStatus(&bp, call, &new_dvp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
_leave(" = 0 [done]");
return 0;
}
@@ -1179,55 +1112,42 @@ static const struct afs_call_type yfs_RXYFSRename = {
/*
* Rename a file or directory.
*/
-int yfs_fs_rename(struct afs_fs_cursor *fc,
- const char *orig_name,
- struct afs_vnode *new_dvnode,
- const char *new_name,
- struct afs_status_cb *orig_dvnode_scb,
- struct afs_status_cb *new_dvnode_scb)
+void yfs_fs_rename(struct afs_operation *op)
{
- struct afs_vnode *orig_dvnode = fc->vnode;
+ struct afs_vnode_param *orig_dvp = &op->file[0];
+ struct afs_vnode_param *new_dvp = &op->file[1];
+ const struct qstr *orig_name = &op->dentry->d_name;
+ const struct qstr *new_name = &op->dentry_2->d_name;
struct afs_call *call;
- struct afs_net *net = afs_v2net(orig_dvnode);
- size_t o_namesz, n_namesz;
__be32 *bp;
_enter("");
- o_namesz = strlen(orig_name);
- n_namesz = strlen(new_name);
- call = afs_alloc_flat_call(net, &yfs_RXYFSRename,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSRename,
sizeof(__be32) +
sizeof(struct yfs_xdr_RPCFlags) +
sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(o_namesz) +
+ xdr_strlen(orig_name->len) +
sizeof(struct yfs_xdr_YFSFid) +
- xdr_strlen(n_namesz),
+ xdr_strlen(new_name->len),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_dir_scb = orig_dvnode_scb;
- call->out_scb = new_dvnode_scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSRENAME);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &orig_dvnode->fid);
- bp = xdr_encode_string(bp, orig_name, o_namesz);
- bp = xdr_encode_YFSFid(bp, &new_dvnode->fid);
- bp = xdr_encode_string(bp, new_name, n_namesz);
+ bp = xdr_encode_YFSFid(bp, &orig_dvp->fid);
+ bp = xdr_encode_name(bp, orig_name);
+ bp = xdr_encode_YFSFid(bp, &new_dvp->fid);
+ bp = xdr_encode_name(bp, new_name);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call2(call, &orig_dvnode->fid, orig_name, new_name);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call2(call, &orig_dvp->fid, orig_name, new_name);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1243,27 +1163,23 @@ static const struct afs_call_type yfs_RXYFSStoreData64 = {
/*
* Store a set of pages to a large file.
*/
-int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
- pgoff_t first, pgoff_t last,
- unsigned offset, unsigned to,
- struct afs_status_cb *scb)
+void yfs_fs_store_data(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
loff_t size, pos, i_size;
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- size = (loff_t)to - (loff_t)offset;
- if (first != last)
- size += (loff_t)(last - first) << PAGE_SHIFT;
- pos = (loff_t)first << PAGE_SHIFT;
- pos += offset;
+ size = (loff_t)op->store.last_to - (loff_t)op->store.first_offset;
+ if (op->store.first != op->store.last)
+ size += (loff_t)(op->store.last - op->store.first) << PAGE_SHIFT;
+ pos = (loff_t)op->store.first << PAGE_SHIFT;
+ pos += op->store.first_offset;
- i_size = i_size_read(&vnode->vfs_inode);
+ i_size = i_size_read(&vp->vnode->vfs_inode);
if (pos + size > i_size)
i_size = size + pos;
@@ -1271,7 +1187,7 @@ int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
(unsigned long long)size, (unsigned long long)pos,
(unsigned long long)i_size);
- call = afs_alloc_flat_call(net, &yfs_RXYFSStoreData64,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreData64,
sizeof(__be32) +
sizeof(__be32) +
sizeof(struct yfs_xdr_YFSFid) +
@@ -1280,33 +1196,24 @@ int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->mapping = mapping;
- call->first = first;
- call->last = last;
- call->first_offset = offset;
- call->last_to = to;
+ return afs_op_nomem(op);
+
+ call->key = op->key;
call->send_pages = true;
- call->out_scb = scb;
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSSTOREDATA64);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
- bp = xdr_encode_YFSStoreStatus_mtime(bp, &vnode->vfs_inode.i_mtime);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
+ bp = xdr_encode_YFSStoreStatus_mtime(bp, &op->mtime);
bp = xdr_encode_u64(bp, pos);
bp = xdr_encode_u64(bp, size);
bp = xdr_encode_u64(bp, i_size);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1330,18 +1237,17 @@ static const struct afs_call_type yfs_RXYFSStoreData64_as_Status = {
* Set the attributes on a file, using YFS.StoreData64 rather than
* YFS.StoreStatus so as to alter the file size also.
*/
-static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr,
- struct afs_status_cb *scb)
+static void yfs_fs_setattr_size(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct iattr *attr = op->setattr.attr;
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &yfs_RXYFSStoreData64_as_Status,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreData64_as_Status,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(struct yfs_xdr_YFSStoreStatus) +
@@ -1349,72 +1255,59 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr,
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSSTOREDATA64);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
bp = xdr_encode_YFS_StoreStatus(bp, attr);
bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */
bp = xdr_encode_u64(bp, 0); /* size of write */
bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* Set the attributes on a file, using YFS.StoreData64 if there's a change in
* file size, and YFS.StoreStatus otherwise.
*/
-int yfs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr,
- struct afs_status_cb *scb)
+void yfs_fs_setattr(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct iattr *attr = op->setattr.attr;
__be32 *bp;
if (attr->ia_valid & ATTR_SIZE)
- return yfs_fs_setattr_size(fc, attr, scb);
+ return yfs_fs_setattr_size(op);
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreStatus,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(struct yfs_xdr_YFSStoreStatus),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSSTORESTATUS);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
bp = xdr_encode_YFS_StoreStatus(bp, attr);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1422,6 +1315,7 @@ int yfs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr,
*/
static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
const __be32 *bp;
char *p;
u32 size;
@@ -1443,7 +1337,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
return ret;
bp = call->buffer;
- xdr_decode_YFSFetchVolumeStatus(&bp, call->out_volstatus);
+ xdr_decode_YFSFetchVolumeStatus(&bp, &op->volstatus.vs);
call->unmarshall++;
afs_extract_to_tmp(call);
/* Fall through */
@@ -1457,8 +1351,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("volname length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_volname_len);
+ return afs_protocol_error(call, afs_eproto_volname_len);
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
@@ -1487,8 +1380,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("offline msg length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_offline_msg_len);
+ return afs_protocol_error(call, afs_eproto_offline_msg_len);
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
@@ -1518,8 +1410,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
call->count = ntohl(call->tmp);
_debug("motd length: %u", call->count);
if (call->count >= AFSNAMEMAX)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_motd_len);
+ return afs_protocol_error(call, afs_eproto_motd_len);
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
@@ -1560,17 +1451,15 @@ static const struct afs_call_type yfs_RXYFSGetVolumeStatus = {
/*
* fetch the status of a volume
*/
-int yfs_fs_get_volume_status(struct afs_fs_cursor *fc,
- struct afs_volume_status *vs)
+void yfs_fs_get_volume_status(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter("");
- call = afs_alloc_flat_call(net, &yfs_RXYFSGetVolumeStatus,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSGetVolumeStatus,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_u64),
max_t(size_t,
@@ -1578,23 +1467,17 @@ int yfs_fs_get_volume_status(struct afs_fs_cursor *fc,
sizeof(__be32),
AFSOPAQUEMAX + 1));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->out_volstatus = vs;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSGETVOLUMESTATUS);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_u64(bp, vnode->fid.vid);
+ bp = xdr_encode_u64(bp, vp->fid.vid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1632,118 +1515,93 @@ static const struct afs_call_type yfs_RXYFSReleaseLock = {
/*
* Set a lock on a file
*/
-int yfs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type,
- struct afs_status_cb *scb)
+void yfs_fs_set_lock(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter("");
- call = afs_alloc_flat_call(net, &yfs_RXYFSSetLock,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSSetLock,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(__be32),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->lvnode = vnode;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSSETLOCK);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
- bp = xdr_encode_u32(bp, type);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
+ bp = xdr_encode_u32(bp, op->lock.type);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_calli(call, &vnode->fid, type);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_calli(call, &vp->fid, op->lock.type);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* extend a lock on a file
*/
-int yfs_fs_extend_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb)
+void yfs_fs_extend_lock(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter("");
- call = afs_alloc_flat_call(net, &yfs_RXYFSExtendLock,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSExtendLock,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->lvnode = vnode;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSEXTENDLOCK);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
* release a lock on a file
*/
-int yfs_fs_release_lock(struct afs_fs_cursor *fc, struct afs_status_cb *scb)
+void yfs_fs_release_lock(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter("");
- call = afs_alloc_flat_call(net, &yfs_RXYFSReleaseLock,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSReleaseLock,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
if (!call)
- return -ENOMEM;
-
- call->key = fc->key;
- call->lvnode = vnode;
- call->out_scb = scb;
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSRELEASELOCK);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1759,45 +1617,33 @@ static const struct afs_call_type yfs_RXYFSFetchStatus = {
/*
* Fetch the status information for a fid without needing a vnode handle.
*/
-int yfs_fs_fetch_status(struct afs_fs_cursor *fc,
- struct afs_net *net,
- struct afs_fid *fid,
- struct afs_status_cb *scb,
- struct afs_volsync *volsync)
+void yfs_fs_fetch_status(struct afs_operation *op)
{
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), fid->vid, fid->vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &yfs_RXYFSFetchStatus,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchStatus,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid),
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSCallBack) +
sizeof(struct yfs_xdr_YFSVolSync));
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = volsync;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSFETCHSTATUS);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, fid);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1805,6 +1651,7 @@ int yfs_fs_fetch_status(struct afs_fs_cursor *fc,
*/
static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
{
+ struct afs_operation *op = call->op;
struct afs_status_cb *scb;
const __be32 *bp;
u32 tmp;
@@ -1826,10 +1673,9 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
tmp = ntohl(call->tmp);
- _debug("status count: %u/%u", tmp, call->count2);
- if (tmp != call->count2)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_ibulkst_count);
+ _debug("status count: %u/%u", tmp, op->nr_files);
+ if (tmp != op->nr_files)
+ return afs_protocol_error(call, afs_eproto_ibulkst_count);
call->count = 0;
call->unmarshall++;
@@ -1843,14 +1689,23 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
if (ret < 0)
return ret;
+ switch (call->count) {
+ case 0:
+ scb = &op->file[0].scb;
+ break;
+ case 1:
+ scb = &op->file[1].scb;
+ break;
+ default:
+ scb = &op->more_files[call->count - 2].scb;
+ break;
+ }
+
bp = call->buffer;
- scb = &call->out_scb[call->count];
- ret = xdr_decode_YFSFetchStatus(&bp, call, scb);
- if (ret < 0)
- return ret;
+ xdr_decode_YFSFetchStatus(&bp, call, scb);
call->count++;
- if (call->count < call->count2)
+ if (call->count < op->nr_files)
goto more_counts;
call->count = 0;
@@ -1867,9 +1722,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
tmp = ntohl(call->tmp);
_debug("CB count: %u", tmp);
- if (tmp != call->count2)
- return afs_protocol_error(call, -EBADMSG,
- afs_eproto_ibulkst_cb_count);
+ if (tmp != op->nr_files)
+ return afs_protocol_error(call, afs_eproto_ibulkst_cb_count);
call->count = 0;
call->unmarshall++;
more_cbs:
@@ -1883,11 +1737,22 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
_debug("unmarshall CB array");
+ switch (call->count) {
+ case 0:
+ scb = &op->file[0].scb;
+ break;
+ case 1:
+ scb = &op->file[1].scb;
+ break;
+ default:
+ scb = &op->more_files[call->count - 2].scb;
+ break;
+ }
+
bp = call->buffer;
- scb = &call->out_scb[call->count];
xdr_decode_YFSCallBack(&bp, call, scb);
call->count++;
- if (call->count < call->count2)
+ if (call->count < op->nr_files)
goto more_cbs;
afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync));
@@ -1900,7 +1765,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
return ret;
bp = call->buffer;
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
call->unmarshall++;
/* Fall through */
@@ -1926,50 +1791,39 @@ static const struct afs_call_type yfs_RXYFSInlineBulkStatus = {
/*
* Fetch the status information for up to 1024 files
*/
-int yfs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
- struct afs_net *net,
- struct afs_fid *fids,
- struct afs_status_cb *statuses,
- unsigned int nr_fids,
- struct afs_volsync *volsync)
+void yfs_fs_inline_bulk_status(struct afs_operation *op)
{
+ struct afs_vnode_param *dvp = &op->file[0];
+ struct afs_vnode_param *vp = &op->file[1];
struct afs_call *call;
__be32 *bp;
int i;
_enter(",%x,{%llx:%llu},%u",
- key_serial(fc->key), fids[0].vid, fids[1].vnode, nr_fids);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode, op->nr_files);
- call = afs_alloc_flat_call(net, &yfs_RXYFSInlineBulkStatus,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSInlineBulkStatus,
sizeof(__be32) +
sizeof(__be32) +
sizeof(__be32) +
- sizeof(struct yfs_xdr_YFSFid) * nr_fids,
+ sizeof(struct yfs_xdr_YFSFid) * op->nr_files,
sizeof(struct yfs_xdr_YFSFetchStatus));
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_scb = statuses;
- call->out_volsync = volsync;
- call->count2 = nr_fids;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSINLINEBULKSTATUS);
bp = xdr_encode_u32(bp, 0); /* RPCFlags */
- bp = xdr_encode_u32(bp, nr_fids);
- for (i = 0; i < nr_fids; i++)
- bp = xdr_encode_YFSFid(bp, &fids[i]);
+ bp = xdr_encode_u32(bp, op->nr_files);
+ bp = xdr_encode_YFSFid(bp, &dvp->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
+ for (i = 0; i < op->nr_files - 2; i++)
+ bp = xdr_encode_YFSFid(bp, &op->more_files[i].fid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &fids[0]);
- afs_set_fc_call(call, fc);
- afs_make_call(&fc->ac, call, GFP_NOFS);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_NOFS);
}
/*
@@ -1977,7 +1831,9 @@ int yfs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
*/
static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
{
- struct yfs_acl *yacl = call->out_yacl;
+ struct afs_operation *op = call->op;
+ struct afs_vnode_param *vp = &op->file[0];
+ struct yfs_acl *yacl = op->yacl;
struct afs_acl *acl;
const __be32 *bp;
unsigned int size;
@@ -2067,10 +1923,8 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
bp = call->buffer;
yacl->inherit_flag = ntohl(*bp++);
yacl->num_cleaned = ntohl(*bp++);
- ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
- if (ret < 0)
- return ret;
- xdr_decode_YFSVolSync(&bp, call->out_volsync);
+ xdr_decode_YFSFetchStatus(&bp, call, &vp->scb);
+ xdr_decode_YFSVolSync(&bp, &op->volsync);
call->unmarshall++;
/* Fall through */
@@ -2105,45 +1959,33 @@ static const struct afs_call_type yfs_RXYFSFetchOpaqueACL = {
/*
* Fetch the YFS advanced ACLs for a file.
*/
-struct yfs_acl *yfs_fs_fetch_opaque_acl(struct afs_fs_cursor *fc,
- struct yfs_acl *yacl,
- struct afs_status_cb *scb)
+void yfs_fs_fetch_opaque_acl(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
- call = afs_alloc_flat_call(net, &yfs_RXYFSFetchOpaqueACL,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSFetchOpaqueACL,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid),
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
- if (!call) {
- fc->ac.error = -ENOMEM;
- return ERR_PTR(-ENOMEM);
- }
-
- call->key = fc->key;
- call->out_yacl = yacl;
- call->out_scb = scb;
- call->out_volsync = NULL;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSFETCHOPAQUEACL);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
yfs_check_req(call, bp);
- afs_use_fs_server(call, fc->cbi);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_make_call(&fc->ac, call, GFP_KERNEL);
- return (struct yfs_acl *)afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_KERNEL);
}
/*
@@ -2159,46 +2001,38 @@ static const struct afs_call_type yfs_RXYFSStoreOpaqueACL2 = {
/*
* Fetch the YFS ACL for a file.
*/
-int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl,
- struct afs_status_cb *scb)
+void yfs_fs_store_opaque_acl2(struct afs_operation *op)
{
- struct afs_vnode *vnode = fc->vnode;
+ struct afs_vnode_param *vp = &op->file[0];
struct afs_call *call;
- struct afs_net *net = afs_v2net(vnode);
+ struct afs_acl *acl = op->acl;
size_t size;
__be32 *bp;
_enter(",%x,{%llx:%llu},,",
- key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+ key_serial(op->key), vp->fid.vid, vp->fid.vnode);
size = round_up(acl->size, 4);
- call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2,
+ call = afs_alloc_flat_call(op->net, &yfs_RXYFSStoreOpaqueACL2,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(__be32) + size,
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
- if (!call) {
- fc->ac.error = -ENOMEM;
- return -ENOMEM;
- }
-
- call->key = fc->key;
- call->out_scb = scb;
- call->out_volsync = NULL;
+ if (!call)
+ return afs_op_nomem(op);
/* marshall the parameters */
bp = call->request;
bp = xdr_encode_u32(bp, YFSSTOREOPAQUEACL2);
bp = xdr_encode_u32(bp, 0); /* RPC flags */
- bp = xdr_encode_YFSFid(bp, &vnode->fid);
+ bp = xdr_encode_YFSFid(bp, &vp->fid);
bp = xdr_encode_u32(bp, acl->size);
memcpy(bp, acl->data, acl->size);
if (acl->size != size)
memset((void *)bp + acl->size, 0, size - acl->size);
yfs_check_req(call, bp);
- trace_afs_make_fs_call(call, &vnode->fid);
- afs_make_call(&fc->ac, call, GFP_KERNEL);
- return afs_wait_for_call_to_complete(call, &fc->ac);
+ trace_afs_make_fs_call(call, &vp->fid);
+ afs_make_op_call(op, call, GFP_KERNEL);
}
diff --git a/fs/aio.c b/fs/aio.c
index 6483f9274d5e..7ecddc2f38db 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -27,7 +27,6 @@
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/mman.h>
-#include <linux/mmu_context.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/timer.h>
@@ -520,7 +519,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ctx->mmap_size = nr_pages * PAGE_SIZE;
pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
- if (down_write_killable(&mm->mmap_sem)) {
+ if (mmap_write_lock_killable(mm)) {
ctx->mmap_size = 0;
aio_free_ring(ctx);
return -EINTR;
@@ -529,7 +528,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
PROT_READ | PROT_WRITE,
MAP_SHARED, 0, &unused, NULL);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
if (IS_ERR((void *)ctx->mmap_base)) {
ctx->mmap_size = 0;
aio_free_ring(ctx);
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 8035d2a44561..54f0ce444272 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -15,6 +15,7 @@
#include <linux/time.h>
#include <linux/namei.h>
#include <linux/poll.h>
+#include <linux/fiemap.h>
static int bad_file_open(struct inode *inode, struct file *filp)
{
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 8e8346a81723..3e84e9bb9084 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -151,7 +151,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
return -ENOMEM;
/* Flush all traces of the currently running executable */
- retval = flush_old_exec(bprm);
+ retval = begin_new_exec(bprm);
if (retval)
return retval;
@@ -174,7 +174,6 @@ static int load_aout_binary(struct linux_binprm * bprm)
if (retval < 0)
return retval;
- install_exec_creds(bprm);
if (N_MAGIC(ex) == OMAGIC) {
unsigned long text_addr, map_size;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 8945671fe0e5..9fe3b51c116a 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -208,7 +208,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
size_t len = strlen(k_platform) + 1;
u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
- if (__copy_to_user(u_platform, k_platform, len))
+ if (copy_to_user(u_platform, k_platform, len))
return -EFAULT;
}
@@ -221,7 +221,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
size_t len = strlen(k_base_platform) + 1;
u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
- if (__copy_to_user(u_base_platform, k_base_platform, len))
+ if (copy_to_user(u_base_platform, k_base_platform, len))
return -EFAULT;
}
@@ -231,7 +231,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
u_rand_bytes = (elf_addr_t __user *)
STACK_ALLOC(p, sizeof(k_rand_bytes));
- if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
+ if (copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
return -EFAULT;
/* Create the ELF interpreter info */
@@ -279,8 +279,8 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
NEW_AUX_ENT(AT_BASE_PLATFORM,
(elf_addr_t)(unsigned long)u_base_platform);
}
- if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
- NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
+ if (bprm->have_execfd) {
+ NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
}
#undef NEW_AUX_ENT
/* AT_NULL is zero; clear the rest too */
@@ -314,21 +314,21 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
return -EFAULT;
/* Now, let's put argc (and argv, envp if appropriate) on the stack */
- if (__put_user(argc, sp++))
+ if (put_user(argc, sp++))
return -EFAULT;
/* Populate list of argv pointers back to argv strings. */
p = mm->arg_end = mm->arg_start;
while (argc-- > 0) {
size_t len;
- if (__put_user((elf_addr_t)p, sp++))
+ if (put_user((elf_addr_t)p, sp++))
return -EFAULT;
len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
- if (__put_user(0, sp++))
+ if (put_user(0, sp++))
return -EFAULT;
mm->arg_end = p;
@@ -336,14 +336,14 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
mm->env_end = mm->env_start = p;
while (envc-- > 0) {
size_t len;
- if (__put_user((elf_addr_t)p, sp++))
+ if (put_user((elf_addr_t)p, sp++))
return -EFAULT;
len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
- if (__put_user(0, sp++))
+ if (put_user(0, sp++))
return -EFAULT;
mm->env_end = p;
@@ -353,8 +353,6 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
return 0;
}
-#ifndef elf_map
-
static unsigned long elf_map(struct file *filep, unsigned long addr,
const struct elf_phdr *eppnt, int prot, int type,
unsigned long total_size)
@@ -394,8 +392,6 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
return(map_addr);
}
-#endif /* !elf_map */
-
static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr)
{
int i, first_idx = -1, last_idx = -1;
@@ -975,7 +971,7 @@ out_free_interp:
goto out_free_dentry;
/* Flush all traces of the currently running executable */
- retval = flush_old_exec(bprm);
+ retval = begin_new_exec(bprm);
if (retval)
goto out_free_dentry;
@@ -989,7 +985,6 @@ out_free_interp:
current->flags |= PF_RANDOMIZE;
setup_new_exec(bprm);
- install_exec_creds(bprm);
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index d9501a86cec9..0f45521b237c 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -338,7 +338,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
interp_params.flags |= ELF_FDPIC_FLAG_CONSTDISP;
/* flush all traces of the currently running executable */
- retval = flush_old_exec(bprm);
+ retval = begin_new_exec(bprm);
if (retval)
goto error;
@@ -434,7 +434,6 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm)
current->mm->start_stack = current->mm->start_brk + stack_size;
#endif
- install_exec_creds(bprm);
if (create_elf_fdpic_tables(bprm, current->mm,
&exec_params, &interp_params) < 0)
goto error;
@@ -537,7 +536,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
platform_len = strlen(k_platform) + 1;
sp -= platform_len;
u_platform = (char __user *) sp;
- if (__copy_to_user(u_platform, k_platform, platform_len) != 0)
+ if (copy_to_user(u_platform, k_platform, platform_len) != 0)
return -EFAULT;
}
@@ -552,7 +551,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
platform_len = strlen(k_base_platform) + 1;
sp -= platform_len;
u_base_platform = (char __user *) sp;
- if (__copy_to_user(u_base_platform, k_base_platform, platform_len) != 0)
+ if (copy_to_user(u_base_platform, k_base_platform, platform_len) != 0)
return -EFAULT;
}
@@ -589,7 +588,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
nitems = 1 + DLINFO_ITEMS + (k_platform ? 1 : 0) +
(k_base_platform ? 1 : 0) + AT_VECTOR_SIZE_ARCH;
- if (bprm->interp_flags & BINPRM_FLAGS_EXECFD)
+ if (bprm->have_execfd)
nitems++;
csp = sp;
@@ -604,11 +603,13 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
/* put the ELF interpreter info on the stack */
#define NEW_AUX_ENT(id, val) \
do { \
- struct { unsigned long _id, _val; } __user *ent; \
+ struct { unsigned long _id, _val; } __user *ent, v; \
\
ent = (void __user *) csp; \
- __put_user((id), &ent[nr]._id); \
- __put_user((val), &ent[nr]._val); \
+ v._id = (id); \
+ v._val = (val); \
+ if (copy_to_user(ent + nr, &v, sizeof(v))) \
+ return -EFAULT; \
nr++; \
} while (0)
@@ -629,10 +630,10 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
(elf_addr_t) (unsigned long) u_base_platform);
}
- if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
+ if (bprm->have_execfd) {
nr = 0;
csp -= 2 * sizeof(unsigned long);
- NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
+ NEW_AUX_ENT(AT_EXECFD, bprm->execfd);
}
nr = 0;
@@ -675,7 +676,8 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
/* stack argc */
csp -= sizeof(unsigned long);
- __put_user(bprm->argc, (unsigned long __user *) csp);
+ if (put_user(bprm->argc, (unsigned long __user *) csp))
+ return -EFAULT;
BUG_ON(csp != sp);
@@ -689,25 +691,29 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
p = (char __user *) current->mm->arg_start;
for (loop = bprm->argc; loop > 0; loop--) {
- __put_user((elf_caddr_t) p, argv++);
+ if (put_user((elf_caddr_t) p, argv++))
+ return -EFAULT;
len = strnlen_user(p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
- __put_user(NULL, argv);
+ if (put_user(NULL, argv))
+ return -EFAULT;
current->mm->arg_end = (unsigned long) p;
/* fill in the envv[] array */
current->mm->env_start = (unsigned long) p;
for (loop = bprm->envc; loop > 0; loop--) {
- __put_user((elf_caddr_t)(unsigned long) p, envp++);
+ if (put_user((elf_caddr_t)(unsigned long) p, envp++))
+ return -EFAULT;
len = strnlen_user(p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
- __put_user(NULL, envp);
+ if (put_user(NULL, envp))
+ return -EFAULT;
current->mm->env_end = (unsigned long) p;
mm->start_stack = (unsigned long) sp;
@@ -849,8 +855,8 @@ static int elf_fdpic_map_file(struct elf_fdpic_params *params,
tmp = phdr->p_memsz / sizeof(Elf32_Dyn);
dyn = (Elf32_Dyn __user *)params->dynamic_addr;
- __get_user(d_tag, &dyn[tmp - 1].d_tag);
- if (d_tag != 0)
+ if (get_user(d_tag, &dyn[tmp - 1].d_tag) ||
+ d_tag != 0)
goto dynamic_error;
break;
}
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
index 466497860c62..06b9b9fddf70 100644
--- a/fs/binfmt_em86.c
+++ b/fs/binfmt_em86.c
@@ -48,10 +48,6 @@ static int load_em86(struct linux_binprm *bprm)
if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
return -ENOENT;
- allow_write_access(bprm->file);
- fput(bprm->file);
- bprm->file = NULL;
-
/* Unlike in the script case, we don't have to do any hairy
* parsing to find our interpreter... it's hardcoded!
*/
@@ -68,15 +64,15 @@ static int load_em86(struct linux_binprm *bprm)
* user environment and arguments are stored.
*/
remove_arg_zero(bprm);
- retval = copy_strings_kernel(1, &bprm->filename, bprm);
+ retval = copy_string_kernel(bprm->filename, bprm);
if (retval < 0) return retval;
bprm->argc++;
if (i_arg) {
- retval = copy_strings_kernel(1, &i_arg, bprm);
+ retval = copy_string_kernel(i_arg, bprm);
if (retval < 0) return retval;
bprm->argc++;
}
- retval = copy_strings_kernel(1, &i_name, bprm);
+ retval = copy_string_kernel(i_name, bprm);
if (retval < 0) return retval;
bprm->argc++;
@@ -89,13 +85,8 @@ static int load_em86(struct linux_binprm *bprm)
if (IS_ERR(file))
return PTR_ERR(file);
- bprm->file = file;
-
- retval = prepare_binprm(bprm);
- if (retval < 0)
- return retval;
-
- return search_binary_handler(bprm);
+ bprm->interpreter = file;
+ return 0;
}
static struct linux_binfmt em86_format = {
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 831a2b25ba79..f2f9086ebe98 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -138,35 +138,40 @@ static int create_flat_tables(struct linux_binprm *bprm, unsigned long arg_start
current->mm->start_stack = (unsigned long)sp & -FLAT_STACK_ALIGN;
sp = (unsigned long __user *)current->mm->start_stack;
- __put_user(bprm->argc, sp++);
+ if (put_user(bprm->argc, sp++))
+ return -EFAULT;
if (IS_ENABLED(CONFIG_BINFMT_FLAT_ARGVP_ENVP_ON_STACK)) {
unsigned long argv, envp;
argv = (unsigned long)(sp + 2);
envp = (unsigned long)(sp + 2 + bprm->argc + 1);
- __put_user(argv, sp++);
- __put_user(envp, sp++);
+ if (put_user(argv, sp++) || put_user(envp, sp++))
+ return -EFAULT;
}
current->mm->arg_start = (unsigned long)p;
for (i = bprm->argc; i > 0; i--) {
- __put_user((unsigned long)p, sp++);
+ if (put_user((unsigned long)p, sp++))
+ return -EFAULT;
len = strnlen_user(p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
- __put_user(0, sp++);
+ if (put_user(0, sp++))
+ return -EFAULT;
current->mm->arg_end = (unsigned long)p;
current->mm->env_start = (unsigned long) p;
for (i = bprm->envc; i > 0; i--) {
- __put_user((unsigned long)p, sp++);
+ if (put_user((unsigned long)p, sp++))
+ return -EFAULT;
len = strnlen_user(p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
return -EINVAL;
p += len;
}
- __put_user(0, sp++);
+ if (put_user(0, sp++))
+ return -EFAULT;
current->mm->env_end = (unsigned long)p;
return 0;
@@ -534,7 +539,7 @@ static int load_flat_file(struct linux_binprm *bprm,
/* Flush all traces of the currently running executable */
if (id == 0) {
- ret = flush_old_exec(bprm);
+ ret = begin_new_exec(bprm);
if (ret)
goto err;
@@ -854,7 +859,7 @@ static int load_flat_file(struct linux_binprm *bprm,
#endif /* CONFIG_BINFMT_FLAT_OLD */
}
- flush_icache_range(start_code, end_code);
+ flush_icache_user_range(start_code, end_code);
/* zero the BSS, BRK and stack areas */
if (clear_user((void __user *)(datapos + data_len), bss_len +
@@ -963,8 +968,6 @@ static int load_flat_binary(struct linux_binprm *bprm)
}
}
- install_exec_creds(bprm);
-
set_binfmt(&flat_format);
#ifdef CONFIG_MMU
@@ -998,7 +1001,8 @@ static int load_flat_binary(struct linux_binprm *bprm)
unsigned long __user *sp;
current->mm->start_stack -= sizeof(unsigned long);
sp = (unsigned long __user *)current->mm->start_stack;
- __put_user(start_addr, sp);
+ if (put_user(start_addr, sp))
+ return -EFAULT;
start_addr = libinfo.lib_list[i].entry;
}
}
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index cdb45829354d..3880a82da1dc 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -134,7 +134,6 @@ static int load_misc_binary(struct linux_binprm *bprm)
Node *fmt;
struct file *interp_file = NULL;
int retval;
- int fd_binary = -1;
retval = -ENOEXEC;
if (!enabled)
@@ -160,51 +159,25 @@ static int load_misc_binary(struct linux_binprm *bprm)
goto ret;
}
- if (fmt->flags & MISC_FMT_OPEN_BINARY) {
+ if (fmt->flags & MISC_FMT_OPEN_BINARY)
+ bprm->have_execfd = 1;
- /* if the binary should be opened on behalf of the
- * interpreter than keep it open and assign descriptor
- * to it
- */
- fd_binary = get_unused_fd_flags(0);
- if (fd_binary < 0) {
- retval = fd_binary;
- goto ret;
- }
- fd_install(fd_binary, bprm->file);
-
- /* if the binary is not readable than enforce mm->dumpable=0
- regardless of the interpreter's permissions */
- would_dump(bprm, bprm->file);
-
- allow_write_access(bprm->file);
- bprm->file = NULL;
-
- /* mark the bprm that fd should be passed to interp */
- bprm->interp_flags |= BINPRM_FLAGS_EXECFD;
- bprm->interp_data = fd_binary;
-
- } else {
- allow_write_access(bprm->file);
- fput(bprm->file);
- bprm->file = NULL;
- }
/* make argv[1] be the path to the binary */
- retval = copy_strings_kernel(1, &bprm->interp, bprm);
+ retval = copy_string_kernel(bprm->interp, bprm);
if (retval < 0)
- goto error;
+ goto ret;
bprm->argc++;
/* add the interp as argv[0] */
- retval = copy_strings_kernel(1, &fmt->interpreter, bprm);
+ retval = copy_string_kernel(fmt->interpreter, bprm);
if (retval < 0)
- goto error;
+ goto ret;
bprm->argc++;
/* Update interp in case binfmt_script needs it. */
retval = bprm_change_interp(fmt->interpreter, bprm);
if (retval < 0)
- goto error;
+ goto ret;
if (fmt->flags & MISC_FMT_OPEN_FILE) {
interp_file = file_clone_open(fmt->interp_file);
@@ -215,38 +188,16 @@ static int load_misc_binary(struct linux_binprm *bprm)
}
retval = PTR_ERR(interp_file);
if (IS_ERR(interp_file))
- goto error;
-
- bprm->file = interp_file;
- if (fmt->flags & MISC_FMT_CREDENTIALS) {
- loff_t pos = 0;
-
- /*
- * No need to call prepare_binprm(), it's already been
- * done. bprm->buf is stale, update from interp_file.
- */
- memset(bprm->buf, 0, BINPRM_BUF_SIZE);
- retval = kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE,
- &pos);
- } else
- retval = prepare_binprm(bprm);
-
- if (retval < 0)
- goto error;
+ goto ret;
- retval = search_binary_handler(bprm);
- if (retval < 0)
- goto error;
+ bprm->interpreter = interp_file;
+ if (fmt->flags & MISC_FMT_CREDENTIALS)
+ bprm->execfd_creds = 1;
+ retval = 0;
ret:
dput(fmt->dentry);
return retval;
-error:
- if (fd_binary > 0)
- ksys_close(fd_binary);
- bprm->interp_flags = 0;
- bprm->interp_data = 0;
- goto ret;
}
/* Command parsers */
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c
index e9e6a6f4a35f..1b6625e95958 100644
--- a/fs/binfmt_script.c
+++ b/fs/binfmt_script.c
@@ -16,14 +16,14 @@
#include <linux/fs.h>
static inline bool spacetab(char c) { return c == ' ' || c == '\t'; }
-static inline char *next_non_spacetab(char *first, const char *last)
+static inline const char *next_non_spacetab(const char *first, const char *last)
{
for (; first <= last; first++)
if (!spacetab(*first))
return first;
return NULL;
}
-static inline char *next_terminator(char *first, const char *last)
+static inline const char *next_terminator(const char *first, const char *last)
{
for (; first <= last; first++)
if (spacetab(*first) || !*first)
@@ -33,8 +33,7 @@ static inline char *next_terminator(char *first, const char *last)
static int load_script(struct linux_binprm *bprm)
{
- const char *i_arg, *i_name;
- char *cp, *buf_end;
+ const char *i_name, *i_sep, *i_arg, *i_end, *buf_end;
struct file *file;
int retval;
@@ -43,20 +42,6 @@ static int load_script(struct linux_binprm *bprm)
return -ENOEXEC;
/*
- * If the script filename will be inaccessible after exec, typically
- * because it is a "/dev/fd/<fd>/.." path against an O_CLOEXEC fd, give
- * up now (on the assumption that the interpreter will want to load
- * this file).
- */
- if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
- return -ENOENT;
-
- /* Release since we are not mapping a binary into memory. */
- allow_write_access(bprm->file);
- fput(bprm->file);
- bprm->file = NULL;
-
- /*
* This section handles parsing the #! line into separate
* interpreter path and argument strings. We must be careful
* because bprm->buf is not yet guaranteed to be NUL-terminated
@@ -71,39 +56,43 @@ static int load_script(struct linux_binprm *bprm)
* parse them on its own.
*/
buf_end = bprm->buf + sizeof(bprm->buf) - 1;
- cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
- if (!cp) {
- cp = next_non_spacetab(bprm->buf + 2, buf_end);
- if (!cp)
+ i_end = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
+ if (!i_end) {
+ i_end = next_non_spacetab(bprm->buf + 2, buf_end);
+ if (!i_end)
return -ENOEXEC; /* Entire buf is spaces/tabs */
/*
* If there is no later space/tab/NUL we must assume the
* interpreter path is truncated.
*/
- if (!next_terminator(cp, buf_end))
+ if (!next_terminator(i_end, buf_end))
return -ENOEXEC;
- cp = buf_end;
+ i_end = buf_end;
}
- /* NUL-terminate the buffer and any trailing spaces/tabs. */
- *cp = '\0';
- while (cp > bprm->buf) {
- cp--;
- if ((*cp == ' ') || (*cp == '\t'))
- *cp = '\0';
- else
- break;
- }
- for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++);
- if (*cp == '\0')
+ /* Trim any trailing spaces/tabs from i_end */
+ while (spacetab(i_end[-1]))
+ i_end--;
+
+ /* Skip over leading spaces/tabs */
+ i_name = next_non_spacetab(bprm->buf+2, i_end);
+ if (!i_name || (i_name == i_end))
return -ENOEXEC; /* No interpreter name found */
- i_name = cp;
+
+ /* Is there an optional argument? */
i_arg = NULL;
- for ( ; *cp && (*cp != ' ') && (*cp != '\t'); cp++)
- /* nothing */ ;
- while ((*cp == ' ') || (*cp == '\t'))
- *cp++ = '\0';
- if (*cp)
- i_arg = cp;
+ i_sep = next_terminator(i_name, i_end);
+ if (i_sep && (*i_sep != '\0'))
+ i_arg = next_non_spacetab(i_sep, i_end);
+
+ /*
+ * If the script filename will be inaccessible after exec, typically
+ * because it is a "/dev/fd/<fd>/.." path against an O_CLOEXEC fd, give
+ * up now (on the assumption that the interpreter will want to load
+ * this file).
+ */
+ if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
+ return -ENOENT;
+
/*
* OK, we've parsed out the interpreter name and
* (optional) argument.
@@ -117,17 +106,19 @@ static int load_script(struct linux_binprm *bprm)
retval = remove_arg_zero(bprm);
if (retval)
return retval;
- retval = copy_strings_kernel(1, &bprm->interp, bprm);
+ retval = copy_string_kernel(bprm->interp, bprm);
if (retval < 0)
return retval;
bprm->argc++;
+ *((char *)i_end) = '\0';
if (i_arg) {
- retval = copy_strings_kernel(1, &i_arg, bprm);
+ *((char *)i_sep) = '\0';
+ retval = copy_string_kernel(i_arg, bprm);
if (retval < 0)
return retval;
bprm->argc++;
}
- retval = copy_strings_kernel(1, &i_name, bprm);
+ retval = copy_string_kernel(i_name, bprm);
if (retval)
return retval;
bprm->argc++;
@@ -142,11 +133,8 @@ static int load_script(struct linux_binprm *bprm)
if (IS_ERR(file))
return PTR_ERR(file);
- bprm->file = file;
- retval = prepare_binprm(bprm);
- if (retval < 0)
- return retval;
- return search_binary_handler(bprm);
+ bprm->interpreter = file;
+ return 0;
}
static struct linux_binfmt script_format = {
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 575636f6491e..68b95ad82126 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -14,6 +14,7 @@ config BTRFS_FS
select LZO_DECOMPRESS
select ZSTD_COMPRESS
select ZSTD_DECOMPRESS
+ select FS_IOMAP
select RAID6_PQ
select XOR_BLOCKS
select SRCU
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 0cc02577577b..d888e71e66b6 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -13,6 +13,7 @@
#include "transaction.h"
#include "delayed-ref.h"
#include "locking.h"
+#include "misc.h"
/* Just an arbitrary number so we can be sure this happened */
#define BACKREF_FOUND_SHARED 6
@@ -537,18 +538,13 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
const u64 *extent_item_pos, bool ignore_offset)
{
struct btrfs_root *root;
- struct btrfs_key root_key;
struct extent_buffer *eb;
int ret = 0;
int root_level;
int level = ref->level;
struct btrfs_key search_key = ref->key_for_search;
- root_key.objectid = ref->root_id;
- root_key.type = BTRFS_ROOT_ITEM_KEY;
- root_key.offset = (u64)-1;
-
- root = btrfs_get_fs_root(fs_info, &root_key, false);
+ root = btrfs_get_fs_root(fs_info, ref->root_id, false);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto out_free;
@@ -2295,3 +2291,832 @@ void free_ipath(struct inode_fs_paths *ipath)
kvfree(ipath->fspath);
kfree(ipath);
}
+
+struct btrfs_backref_iter *btrfs_backref_iter_alloc(
+ struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
+{
+ struct btrfs_backref_iter *ret;
+
+ ret = kzalloc(sizeof(*ret), gfp_flag);
+ if (!ret)
+ return NULL;
+
+ ret->path = btrfs_alloc_path();
+ if (!ret) {
+ kfree(ret);
+ return NULL;
+ }
+
+ /* Current backref iterator only supports iteration in commit root */
+ ret->path->search_commit_root = 1;
+ ret->path->skip_locking = 1;
+ ret->fs_info = fs_info;
+
+ return ret;
+}
+
+int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
+{
+ struct btrfs_fs_info *fs_info = iter->fs_info;
+ struct btrfs_path *path = iter->path;
+ struct btrfs_extent_item *ei;
+ struct btrfs_key key;
+ int ret;
+
+ key.objectid = bytenr;
+ key.type = BTRFS_METADATA_ITEM_KEY;
+ key.offset = (u64)-1;
+ iter->bytenr = bytenr;
+
+ ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+ if (ret == 0) {
+ ret = -EUCLEAN;
+ goto release;
+ }
+ if (path->slots[0] == 0) {
+ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+ ret = -EUCLEAN;
+ goto release;
+ }
+ path->slots[0]--;
+
+ btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
+ if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
+ key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
+ ret = -ENOENT;
+ goto release;
+ }
+ memcpy(&iter->cur_key, &key, sizeof(key));
+ iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
+ path->slots[0]);
+ iter->end_ptr = (u32)(iter->item_ptr +
+ btrfs_item_size_nr(path->nodes[0], path->slots[0]));
+ ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_extent_item);
+
+ /*
+ * Only support iteration on tree backref yet.
+ *
+ * This is an extra precaution for non skinny-metadata, where
+ * EXTENT_ITEM is also used for tree blocks, that we can only use
+ * extent flags to determine if it's a tree block.
+ */
+ if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
+ ret = -ENOTSUPP;
+ goto release;
+ }
+ iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
+
+ /* If there is no inline backref, go search for keyed backref */
+ if (iter->cur_ptr >= iter->end_ptr) {
+ ret = btrfs_next_item(fs_info->extent_root, path);
+
+ /* No inline nor keyed ref */
+ if (ret > 0) {
+ ret = -ENOENT;
+ goto release;
+ }
+ if (ret < 0)
+ goto release;
+
+ btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
+ path->slots[0]);
+ if (iter->cur_key.objectid != bytenr ||
+ (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
+ iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
+ ret = -ENOENT;
+ goto release;
+ }
+ iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
+ path->slots[0]);
+ iter->item_ptr = iter->cur_ptr;
+ iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
+ path->nodes[0], path->slots[0]));
+ }
+
+ return 0;
+release:
+ btrfs_backref_iter_release(iter);
+ return ret;
+}
+
+/*
+ * Go to the next backref item of current bytenr, can be either inlined or
+ * keyed.
+ *
+ * Caller needs to check whether it's inline ref or not by iter->cur_key.
+ *
+ * Return 0 if we get next backref without problem.
+ * Return >0 if there is no extra backref for this bytenr.
+ * Return <0 if there is something wrong happened.
+ */
+int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
+{
+ struct extent_buffer *eb = btrfs_backref_get_eb(iter);
+ struct btrfs_path *path = iter->path;
+ struct btrfs_extent_inline_ref *iref;
+ int ret;
+ u32 size;
+
+ if (btrfs_backref_iter_is_inline_ref(iter)) {
+ /* We're still inside the inline refs */
+ ASSERT(iter->cur_ptr < iter->end_ptr);
+
+ if (btrfs_backref_has_tree_block_info(iter)) {
+ /* First tree block info */
+ size = sizeof(struct btrfs_tree_block_info);
+ } else {
+ /* Use inline ref type to determine the size */
+ int type;
+
+ iref = (struct btrfs_extent_inline_ref *)
+ ((unsigned long)iter->cur_ptr);
+ type = btrfs_extent_inline_ref_type(eb, iref);
+
+ size = btrfs_extent_inline_ref_size(type);
+ }
+ iter->cur_ptr += size;
+ if (iter->cur_ptr < iter->end_ptr)
+ return 0;
+
+ /* All inline items iterated, fall through */
+ }
+
+ /* We're at keyed items, there is no inline item, go to the next one */
+ ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
+ if (ret)
+ return ret;
+
+ btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
+ if (iter->cur_key.objectid != iter->bytenr ||
+ (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
+ iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
+ return 1;
+ iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
+ path->slots[0]);
+ iter->cur_ptr = iter->item_ptr;
+ iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
+ path->slots[0]);
+ return 0;
+}
+
+void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
+ struct btrfs_backref_cache *cache, int is_reloc)
+{
+ int i;
+
+ cache->rb_root = RB_ROOT;
+ for (i = 0; i < BTRFS_MAX_LEVEL; i++)
+ INIT_LIST_HEAD(&cache->pending[i]);
+ INIT_LIST_HEAD(&cache->changed);
+ INIT_LIST_HEAD(&cache->detached);
+ INIT_LIST_HEAD(&cache->leaves);
+ INIT_LIST_HEAD(&cache->pending_edge);
+ INIT_LIST_HEAD(&cache->useless_node);
+ cache->fs_info = fs_info;
+ cache->is_reloc = is_reloc;
+}
+
+struct btrfs_backref_node *btrfs_backref_alloc_node(
+ struct btrfs_backref_cache *cache, u64 bytenr, int level)
+{
+ struct btrfs_backref_node *node;
+
+ ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
+ node = kzalloc(sizeof(*node), GFP_NOFS);
+ if (!node)
+ return node;
+
+ INIT_LIST_HEAD(&node->list);
+ INIT_LIST_HEAD(&node->upper);
+ INIT_LIST_HEAD(&node->lower);
+ RB_CLEAR_NODE(&node->rb_node);
+ cache->nr_nodes++;
+ node->level = level;
+ node->bytenr = bytenr;
+
+ return node;
+}
+
+struct btrfs_backref_edge *btrfs_backref_alloc_edge(
+ struct btrfs_backref_cache *cache)
+{
+ struct btrfs_backref_edge *edge;
+
+ edge = kzalloc(sizeof(*edge), GFP_NOFS);
+ if (edge)
+ cache->nr_edges++;
+ return edge;
+}
+
+/*
+ * Drop the backref node from cache, also cleaning up all its
+ * upper edges and any uncached nodes in the path.
+ *
+ * This cleanup happens bottom up, thus the node should either
+ * be the lowest node in the cache or a detached node.
+ */
+void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
+ struct btrfs_backref_node *node)
+{
+ struct btrfs_backref_node *upper;
+ struct btrfs_backref_edge *edge;
+
+ if (!node)
+ return;
+
+ BUG_ON(!node->lowest && !node->detached);
+ while (!list_empty(&node->upper)) {
+ edge = list_entry(node->upper.next, struct btrfs_backref_edge,
+ list[LOWER]);
+ upper = edge->node[UPPER];
+ list_del(&edge->list[LOWER]);
+ list_del(&edge->list[UPPER]);
+ btrfs_backref_free_edge(cache, edge);
+
+ if (RB_EMPTY_NODE(&upper->rb_node)) {
+ BUG_ON(!list_empty(&node->upper));
+ btrfs_backref_drop_node(cache, node);
+ node = upper;
+ node->lowest = 1;
+ continue;
+ }
+ /*
+ * Add the node to leaf node list if no other child block
+ * cached.
+ */
+ if (list_empty(&upper->lower)) {
+ list_add_tail(&upper->lower, &cache->leaves);
+ upper->lowest = 1;
+ }
+ }
+
+ btrfs_backref_drop_node(cache, node);
+}
+
+/*
+ * Release all nodes/edges from current cache
+ */
+void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
+{
+ struct btrfs_backref_node *node;
+ int i;
+
+ while (!list_empty(&cache->detached)) {
+ node = list_entry(cache->detached.next,
+ struct btrfs_backref_node, list);
+ btrfs_backref_cleanup_node(cache, node);
+ }
+
+ while (!list_empty(&cache->leaves)) {
+ node = list_entry(cache->leaves.next,
+ struct btrfs_backref_node, lower);
+ btrfs_backref_cleanup_node(cache, node);
+ }
+
+ cache->last_trans = 0;
+
+ for (i = 0; i < BTRFS_MAX_LEVEL; i++)
+ ASSERT(list_empty(&cache->pending[i]));
+ ASSERT(list_empty(&cache->pending_edge));
+ ASSERT(list_empty(&cache->useless_node));
+ ASSERT(list_empty(&cache->changed));
+ ASSERT(list_empty(&cache->detached));
+ ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
+ ASSERT(!cache->nr_nodes);
+ ASSERT(!cache->nr_edges);
+}
+
+/*
+ * Handle direct tree backref
+ *
+ * Direct tree backref means, the backref item shows its parent bytenr
+ * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
+ *
+ * @ref_key: The converted backref key.
+ * For keyed backref, it's the item key.
+ * For inlined backref, objectid is the bytenr,
+ * type is btrfs_inline_ref_type, offset is
+ * btrfs_inline_ref_offset.
+ */
+static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
+ struct btrfs_key *ref_key,
+ struct btrfs_backref_node *cur)
+{
+ struct btrfs_backref_edge *edge;
+ struct btrfs_backref_node *upper;
+ struct rb_node *rb_node;
+
+ ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
+
+ /* Only reloc root uses backref pointing to itself */
+ if (ref_key->objectid == ref_key->offset) {
+ struct btrfs_root *root;
+
+ cur->is_reloc_root = 1;
+ /* Only reloc backref cache cares about a specific root */
+ if (cache->is_reloc) {
+ root = find_reloc_root(cache->fs_info, cur->bytenr);
+ if (WARN_ON(!root))
+ return -ENOENT;
+ cur->root = root;
+ } else {
+ /*
+ * For generic purpose backref cache, reloc root node
+ * is useless.
+ */
+ list_add(&cur->list, &cache->useless_node);
+ }
+ return 0;
+ }
+
+ edge = btrfs_backref_alloc_edge(cache);
+ if (!edge)
+ return -ENOMEM;
+
+ rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
+ if (!rb_node) {
+ /* Parent node not yet cached */
+ upper = btrfs_backref_alloc_node(cache, ref_key->offset,
+ cur->level + 1);
+ if (!upper) {
+ btrfs_backref_free_edge(cache, edge);
+ return -ENOMEM;
+ }
+
+ /*
+ * Backrefs for the upper level block isn't cached, add the
+ * block to pending list
+ */
+ list_add_tail(&edge->list[UPPER], &cache->pending_edge);
+ } else {
+ /* Parent node already cached */
+ upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
+ ASSERT(upper->checked);
+ INIT_LIST_HEAD(&edge->list[UPPER]);
+ }
+ btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
+ return 0;
+}
+
+/*
+ * Handle indirect tree backref
+ *
+ * Indirect tree backref means, we only know which tree the node belongs to.
+ * We still need to do a tree search to find out the parents. This is for
+ * TREE_BLOCK_REF backref (keyed or inlined).
+ *
+ * @ref_key: The same as @ref_key in handle_direct_tree_backref()
+ * @tree_key: The first key of this tree block.
+ * @path: A clean (released) path, to avoid allocating path everytime
+ * the function get called.
+ */
+static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
+ struct btrfs_path *path,
+ struct btrfs_key *ref_key,
+ struct btrfs_key *tree_key,
+ struct btrfs_backref_node *cur)
+{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct btrfs_backref_node *upper;
+ struct btrfs_backref_node *lower;
+ struct btrfs_backref_edge *edge;
+ struct extent_buffer *eb;
+ struct btrfs_root *root;
+ struct rb_node *rb_node;
+ int level;
+ bool need_check = true;
+ int ret;
+
+ root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+ if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
+ cur->cowonly = 1;
+
+ if (btrfs_root_level(&root->root_item) == cur->level) {
+ /* Tree root */
+ ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
+ /*
+ * For reloc backref cache, we may ignore reloc root. But for
+ * general purpose backref cache, we can't rely on
+ * btrfs_should_ignore_reloc_root() as it may conflict with
+ * current running relocation and lead to missing root.
+ *
+ * For general purpose backref cache, reloc root detection is
+ * completely relying on direct backref (key->offset is parent
+ * bytenr), thus only do such check for reloc cache.
+ */
+ if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
+ btrfs_put_root(root);
+ list_add(&cur->list, &cache->useless_node);
+ } else {
+ cur->root = root;
+ }
+ return 0;
+ }
+
+ level = cur->level + 1;
+
+ /* Search the tree to find parent blocks referring to the block */
+ path->search_commit_root = 1;
+ path->skip_locking = 1;
+ path->lowest_level = level;
+ ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
+ path->lowest_level = 0;
+ if (ret < 0) {
+ btrfs_put_root(root);
+ return ret;
+ }
+ if (ret > 0 && path->slots[level] > 0)
+ path->slots[level]--;
+
+ eb = path->nodes[level];
+ if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
+ btrfs_err(fs_info,
+"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
+ cur->bytenr, level - 1, root->root_key.objectid,
+ tree_key->objectid, tree_key->type, tree_key->offset);
+ btrfs_put_root(root);
+ ret = -ENOENT;
+ goto out;
+ }
+ lower = cur;
+
+ /* Add all nodes and edges in the path */
+ for (; level < BTRFS_MAX_LEVEL; level++) {
+ if (!path->nodes[level]) {
+ ASSERT(btrfs_root_bytenr(&root->root_item) ==
+ lower->bytenr);
+ /* Same as previous should_ignore_reloc_root() call */
+ if (btrfs_should_ignore_reloc_root(root) &&
+ cache->is_reloc) {
+ btrfs_put_root(root);
+ list_add(&lower->list, &cache->useless_node);
+ } else {
+ lower->root = root;
+ }
+ break;
+ }
+
+ edge = btrfs_backref_alloc_edge(cache);
+ if (!edge) {
+ btrfs_put_root(root);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ eb = path->nodes[level];
+ rb_node = rb_simple_search(&cache->rb_root, eb->start);
+ if (!rb_node) {
+ upper = btrfs_backref_alloc_node(cache, eb->start,
+ lower->level + 1);
+ if (!upper) {
+ btrfs_put_root(root);
+ btrfs_backref_free_edge(cache, edge);
+ ret = -ENOMEM;
+ goto out;
+ }
+ upper->owner = btrfs_header_owner(eb);
+ if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
+ upper->cowonly = 1;
+
+ /*
+ * If we know the block isn't shared we can avoid
+ * checking its backrefs.
+ */
+ if (btrfs_block_can_be_shared(root, eb))
+ upper->checked = 0;
+ else
+ upper->checked = 1;
+
+ /*
+ * Add the block to pending list if we need to check its
+ * backrefs, we only do this once while walking up a
+ * tree as we will catch anything else later on.
+ */
+ if (!upper->checked && need_check) {
+ need_check = false;
+ list_add_tail(&edge->list[UPPER],
+ &cache->pending_edge);
+ } else {
+ if (upper->checked)
+ need_check = true;
+ INIT_LIST_HEAD(&edge->list[UPPER]);
+ }
+ } else {
+ upper = rb_entry(rb_node, struct btrfs_backref_node,
+ rb_node);
+ ASSERT(upper->checked);
+ INIT_LIST_HEAD(&edge->list[UPPER]);
+ if (!upper->owner)
+ upper->owner = btrfs_header_owner(eb);
+ }
+ btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
+
+ if (rb_node) {
+ btrfs_put_root(root);
+ break;
+ }
+ lower = upper;
+ upper = NULL;
+ }
+out:
+ btrfs_release_path(path);
+ return ret;
+}
+
+/*
+ * Add backref node @cur into @cache.
+ *
+ * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
+ * links aren't yet bi-directional. Needs to finish such links.
+ * Use btrfs_backref_finish_upper_links() to finish such linkage.
+ *
+ * @path: Released path for indirect tree backref lookup
+ * @iter: Released backref iter for extent tree search
+ * @node_key: The first key of the tree block
+ */
+int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
+ struct btrfs_path *path,
+ struct btrfs_backref_iter *iter,
+ struct btrfs_key *node_key,
+ struct btrfs_backref_node *cur)
+{
+ struct btrfs_fs_info *fs_info = cache->fs_info;
+ struct btrfs_backref_edge *edge;
+ struct btrfs_backref_node *exist;
+ int ret;
+
+ ret = btrfs_backref_iter_start(iter, cur->bytenr);
+ if (ret < 0)
+ return ret;
+ /*
+ * We skip the first btrfs_tree_block_info, as we don't use the key
+ * stored in it, but fetch it from the tree block
+ */
+ if (btrfs_backref_has_tree_block_info(iter)) {
+ ret = btrfs_backref_iter_next(iter);
+ if (ret < 0)
+ goto out;
+ /* No extra backref? This means the tree block is corrupted */
+ if (ret > 0) {
+ ret = -EUCLEAN;
+ goto out;
+ }
+ }
+ WARN_ON(cur->checked);
+ if (!list_empty(&cur->upper)) {
+ /*
+ * The backref was added previously when processing backref of
+ * type BTRFS_TREE_BLOCK_REF_KEY
+ */
+ ASSERT(list_is_singular(&cur->upper));
+ edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
+ list[LOWER]);
+ ASSERT(list_empty(&edge->list[UPPER]));
+ exist = edge->node[UPPER];
+ /*
+ * Add the upper level block to pending list if we need check
+ * its backrefs
+ */
+ if (!exist->checked)
+ list_add_tail(&edge->list[UPPER], &cache->pending_edge);
+ } else {
+ exist = NULL;
+ }
+
+ for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
+ struct extent_buffer *eb;
+ struct btrfs_key key;
+ int type;
+
+ cond_resched();
+ eb = btrfs_backref_get_eb(iter);
+
+ key.objectid = iter->bytenr;
+ if (btrfs_backref_iter_is_inline_ref(iter)) {
+ struct btrfs_extent_inline_ref *iref;
+
+ /* Update key for inline backref */
+ iref = (struct btrfs_extent_inline_ref *)
+ ((unsigned long)iter->cur_ptr);
+ type = btrfs_get_extent_inline_ref_type(eb, iref,
+ BTRFS_REF_TYPE_BLOCK);
+ if (type == BTRFS_REF_TYPE_INVALID) {
+ ret = -EUCLEAN;
+ goto out;
+ }
+ key.type = type;
+ key.offset = btrfs_extent_inline_ref_offset(eb, iref);
+ } else {
+ key.type = iter->cur_key.type;
+ key.offset = iter->cur_key.offset;
+ }
+
+ /*
+ * Parent node found and matches current inline ref, no need to
+ * rebuild this node for this inline ref
+ */
+ if (exist &&
+ ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
+ exist->owner == key.offset) ||
+ (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
+ exist->bytenr == key.offset))) {
+ exist = NULL;
+ continue;
+ }
+
+ /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
+ if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
+ ret = handle_direct_tree_backref(cache, &key, cur);
+ if (ret < 0)
+ goto out;
+ continue;
+ } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
+ ret = -EINVAL;
+ btrfs_print_v0_err(fs_info);
+ btrfs_handle_fs_error(fs_info, ret, NULL);
+ goto out;
+ } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
+ continue;
+ }
+
+ /*
+ * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
+ * means the root objectid. We need to search the tree to get
+ * its parent bytenr.
+ */
+ ret = handle_indirect_tree_backref(cache, path, &key, node_key,
+ cur);
+ if (ret < 0)
+ goto out;
+ }
+ ret = 0;
+ cur->checked = 1;
+ WARN_ON(exist);
+out:
+ btrfs_backref_iter_release(iter);
+ return ret;
+}
+
+/*
+ * Finish the upwards linkage created by btrfs_backref_add_tree_node()
+ */
+int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
+ struct btrfs_backref_node *start)
+{
+ struct list_head *useless_node = &cache->useless_node;
+ struct btrfs_backref_edge *edge;
+ struct rb_node *rb_node;
+ LIST_HEAD(pending_edge);
+
+ ASSERT(start->checked);
+
+ /* Insert this node to cache if it's not COW-only */
+ if (!start->cowonly) {
+ rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
+ &start->rb_node);
+ if (rb_node)
+ btrfs_backref_panic(cache->fs_info, start->bytenr,
+ -EEXIST);
+ list_add_tail(&start->lower, &cache->leaves);
+ }
+
+ /*
+ * Use breadth first search to iterate all related edges.
+ *
+ * The starting points are all the edges of this node
+ */
+ list_for_each_entry(edge, &start->upper, list[LOWER])
+ list_add_tail(&edge->list[UPPER], &pending_edge);
+
+ while (!list_empty(&pending_edge)) {
+ struct btrfs_backref_node *upper;
+ struct btrfs_backref_node *lower;
+ struct rb_node *rb_node;
+
+ edge = list_first_entry(&pending_edge,
+ struct btrfs_backref_edge, list[UPPER]);
+ list_del_init(&edge->list[UPPER]);
+ upper = edge->node[UPPER];
+ lower = edge->node[LOWER];
+
+ /* Parent is detached, no need to keep any edges */
+ if (upper->detached) {
+ list_del(&edge->list[LOWER]);
+ btrfs_backref_free_edge(cache, edge);
+
+ /* Lower node is orphan, queue for cleanup */
+ if (list_empty(&lower->upper))
+ list_add(&lower->list, useless_node);
+ continue;
+ }
+
+ /*
+ * All new nodes added in current build_backref_tree() haven't
+ * been linked to the cache rb tree.
+ * So if we have upper->rb_node populated, this means a cache
+ * hit. We only need to link the edge, as @upper and all its
+ * parents have already been linked.
+ */
+ if (!RB_EMPTY_NODE(&upper->rb_node)) {
+ if (upper->lowest) {
+ list_del_init(&upper->lower);
+ upper->lowest = 0;
+ }
+
+ list_add_tail(&edge->list[UPPER], &upper->lower);
+ continue;
+ }
+
+ /* Sanity check, we shouldn't have any unchecked nodes */
+ if (!upper->checked) {
+ ASSERT(0);
+ return -EUCLEAN;
+ }
+
+ /* Sanity check, COW-only node has non-COW-only parent */
+ if (start->cowonly != upper->cowonly) {
+ ASSERT(0);
+ return -EUCLEAN;
+ }
+
+ /* Only cache non-COW-only (subvolume trees) tree blocks */
+ if (!upper->cowonly) {
+ rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
+ &upper->rb_node);
+ if (rb_node) {
+ btrfs_backref_panic(cache->fs_info,
+ upper->bytenr, -EEXIST);
+ return -EUCLEAN;
+ }
+ }
+
+ list_add_tail(&edge->list[UPPER], &upper->lower);
+
+ /*
+ * Also queue all the parent edges of this uncached node
+ * to finish the upper linkage
+ */
+ list_for_each_entry(edge, &upper->upper, list[LOWER])
+ list_add_tail(&edge->list[UPPER], &pending_edge);
+ }
+ return 0;
+}
+
+void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
+ struct btrfs_backref_node *node)
+{
+ struct btrfs_backref_node *lower;
+ struct btrfs_backref_node *upper;
+ struct btrfs_backref_edge *edge;
+
+ while (!list_empty(&cache->useless_node)) {
+ lower = list_first_entry(&cache->useless_node,
+ struct btrfs_backref_node, list);
+ list_del_init(&lower->list);
+ }
+ while (!list_empty(&cache->pending_edge)) {
+ edge = list_first_entry(&cache->pending_edge,
+ struct btrfs_backref_edge, list[UPPER]);
+ list_del(&edge->list[UPPER]);
+ list_del(&edge->list[LOWER]);
+ lower = edge->node[LOWER];
+ upper = edge->node[UPPER];
+ btrfs_backref_free_edge(cache, edge);
+
+ /*
+ * Lower is no longer linked to any upper backref nodes and
+ * isn't in the cache, we can free it ourselves.
+ */
+ if (list_empty(&lower->upper) &&
+ RB_EMPTY_NODE(&lower->rb_node))
+ list_add(&lower->list, &cache->useless_node);
+
+ if (!RB_EMPTY_NODE(&upper->rb_node))
+ continue;
+
+ /* Add this guy's upper edges to the list to process */
+ list_for_each_entry(edge, &upper->upper, list[LOWER])
+ list_add_tail(&edge->list[UPPER],
+ &cache->pending_edge);
+ if (list_empty(&upper->upper))
+ list_add(&upper->list, &cache->useless_node);
+ }
+
+ while (!list_empty(&cache->useless_node)) {
+ lower = list_first_entry(&cache->useless_node,
+ struct btrfs_backref_node, list);
+ list_del_init(&lower->list);
+ if (lower == node)
+ node = NULL;
+ btrfs_backref_free_node(cache, lower);
+ }
+
+ btrfs_backref_cleanup_node(cache, node);
+ ASSERT(list_empty(&cache->useless_node) &&
+ list_empty(&cache->pending_edge));
+}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 723d6da99114..ff705cc564a9 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -8,6 +8,7 @@
#include <linux/btrfs.h>
#include "ulist.h"
+#include "disk-io.h"
#include "extent_io.h"
struct inode_fs_paths {
@@ -78,4 +79,300 @@ struct prelim_ref {
u64 wanted_disk_byte;
};
+/*
+ * Iterate backrefs of one extent.
+ *
+ * Now it only supports iteration of tree block in commit root.
+ */
+struct btrfs_backref_iter {
+ u64 bytenr;
+ struct btrfs_path *path;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_key cur_key;
+ u32 item_ptr;
+ u32 cur_ptr;
+ u32 end_ptr;
+};
+
+struct btrfs_backref_iter *btrfs_backref_iter_alloc(
+ struct btrfs_fs_info *fs_info, gfp_t gfp_flag);
+
+static inline void btrfs_backref_iter_free(struct btrfs_backref_iter *iter)
+{
+ if (!iter)
+ return;
+ btrfs_free_path(iter->path);
+ kfree(iter);
+}
+
+static inline struct extent_buffer *btrfs_backref_get_eb(
+ struct btrfs_backref_iter *iter)
+{
+ if (!iter)
+ return NULL;
+ return iter->path->nodes[0];
+}
+
+/*
+ * For metadata with EXTENT_ITEM key (non-skinny) case, the first inline data
+ * is btrfs_tree_block_info, without a btrfs_extent_inline_ref header.
+ *
+ * This helper determines if that's the case.
+ */
+static inline bool btrfs_backref_has_tree_block_info(
+ struct btrfs_backref_iter *iter)
+{
+ if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY &&
+ iter->cur_ptr - iter->item_ptr == sizeof(struct btrfs_extent_item))
+ return true;
+ return false;
+}
+
+int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr);
+
+int btrfs_backref_iter_next(struct btrfs_backref_iter *iter);
+
+static inline bool btrfs_backref_iter_is_inline_ref(
+ struct btrfs_backref_iter *iter)
+{
+ if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
+ iter->cur_key.type == BTRFS_METADATA_ITEM_KEY)
+ return true;
+ return false;
+}
+
+static inline void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
+{
+ iter->bytenr = 0;
+ iter->item_ptr = 0;
+ iter->cur_ptr = 0;
+ iter->end_ptr = 0;
+ btrfs_release_path(iter->path);
+ memset(&iter->cur_key, 0, sizeof(iter->cur_key));
+}
+
+/*
+ * Backref cache related structures
+ *
+ * The whole objective of backref_cache is to build a bi-directional map
+ * of tree blocks (represented by backref_node) and all their parents.
+ */
+
+/*
+ * Represent a tree block in the backref cache
+ */
+struct btrfs_backref_node {
+ struct {
+ struct rb_node rb_node;
+ u64 bytenr;
+ }; /* Use rb_simple_node for search/insert */
+
+ u64 new_bytenr;
+ /* Objectid of tree block owner, can be not uptodate */
+ u64 owner;
+ /* Link to pending, changed or detached list */
+ struct list_head list;
+
+ /* List of upper level edges, which link this node to its parents */
+ struct list_head upper;
+ /* List of lower level edges, which link this node to its children */
+ struct list_head lower;
+
+ /* NULL if this node is not tree root */
+ struct btrfs_root *root;
+ /* Extent buffer got by COWing the block */
+ struct extent_buffer *eb;
+ /* Level of the tree block */
+ unsigned int level:8;
+ /* Is the block in a non-shareable tree */
+ unsigned int cowonly:1;
+ /* 1 if no child node is in the cache */
+ unsigned int lowest:1;
+ /* Is the extent buffer locked */
+ unsigned int locked:1;
+ /* Has the block been processed */
+ unsigned int processed:1;
+ /* Have backrefs of this block been checked */
+ unsigned int checked:1;
+ /*
+ * 1 if corresponding block has been COWed but some upper level block
+ * pointers may not point to the new location
+ */
+ unsigned int pending:1;
+ /* 1 if the backref node isn't connected to any other backref node */
+ unsigned int detached:1;
+
+ /*
+ * For generic purpose backref cache, where we only care if it's a reloc
+ * root, doesn't care the source subvolid.
+ */
+ unsigned int is_reloc_root:1;
+};
+
+#define LOWER 0
+#define UPPER 1
+
+/*
+ * Represent an edge connecting upper and lower backref nodes.
+ */
+struct btrfs_backref_edge {
+ /*
+ * list[LOWER] is linked to btrfs_backref_node::upper of lower level
+ * node, and list[UPPER] is linked to btrfs_backref_node::lower of
+ * upper level node.
+ *
+ * Also, build_backref_tree() uses list[UPPER] for pending edges, before
+ * linking list[UPPER] to its upper level nodes.
+ */
+ struct list_head list[2];
+
+ /* Two related nodes */
+ struct btrfs_backref_node *node[2];
+};
+
+struct btrfs_backref_cache {
+ /* Red black tree of all backref nodes in the cache */
+ struct rb_root rb_root;
+ /* For passing backref nodes to btrfs_reloc_cow_block */
+ struct btrfs_backref_node *path[BTRFS_MAX_LEVEL];
+ /*
+ * List of blocks that have been COWed but some block pointers in upper
+ * level blocks may not reflect the new location
+ */
+ struct list_head pending[BTRFS_MAX_LEVEL];
+ /* List of backref nodes with no child node */
+ struct list_head leaves;
+ /* List of blocks that have been COWed in current transaction */
+ struct list_head changed;
+ /* List of detached backref node. */
+ struct list_head detached;
+
+ u64 last_trans;
+
+ int nr_nodes;
+ int nr_edges;
+
+ /* List of unchecked backref edges during backref cache build */
+ struct list_head pending_edge;
+
+ /* List of useless backref nodes during backref cache build */
+ struct list_head useless_node;
+
+ struct btrfs_fs_info *fs_info;
+
+ /*
+ * Whether this cache is for relocation
+ *
+ * Reloction backref cache require more info for reloc root compared
+ * to generic backref cache.
+ */
+ unsigned int is_reloc;
+};
+
+void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
+ struct btrfs_backref_cache *cache, int is_reloc);
+struct btrfs_backref_node *btrfs_backref_alloc_node(
+ struct btrfs_backref_cache *cache, u64 bytenr, int level);
+struct btrfs_backref_edge *btrfs_backref_alloc_edge(
+ struct btrfs_backref_cache *cache);
+
+#define LINK_LOWER (1 << 0)
+#define LINK_UPPER (1 << 1)
+static inline void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
+ struct btrfs_backref_node *lower,
+ struct btrfs_backref_node *upper,
+ int link_which)
+{
+ ASSERT(upper && lower && upper->level == lower->level + 1);
+ edge->node[LOWER] = lower;
+ edge->node[UPPER] = upper;
+ if (link_which & LINK_LOWER)
+ list_add_tail(&edge->list[LOWER], &lower->upper);
+ if (link_which & LINK_UPPER)
+ list_add_tail(&edge->list[UPPER], &upper->lower);
+}
+
+static inline void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
+ struct btrfs_backref_node *node)
+{
+ if (node) {
+ cache->nr_nodes--;
+ btrfs_put_root(node->root);
+ kfree(node);
+ }
+}
+
+static inline void btrfs_backref_free_edge(struct btrfs_backref_cache *cache,
+ struct btrfs_backref_edge *edge)
+{
+ if (edge) {
+ cache->nr_edges--;
+ kfree(edge);
+ }
+}
+
+static inline void btrfs_backref_unlock_node_buffer(
+ struct btrfs_backref_node *node)
+{
+ if (node->locked) {
+ btrfs_tree_unlock(node->eb);
+ node->locked = 0;
+ }
+}
+
+static inline void btrfs_backref_drop_node_buffer(
+ struct btrfs_backref_node *node)
+{
+ if (node->eb) {
+ btrfs_backref_unlock_node_buffer(node);
+ free_extent_buffer(node->eb);
+ node->eb = NULL;
+ }
+}
+
+/*
+ * Drop the backref node from cache without cleaning up its children
+ * edges.
+ *
+ * This can only be called on node without parent edges.
+ * The children edges are still kept as is.
+ */
+static inline void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
+ struct btrfs_backref_node *node)
+{
+ BUG_ON(!list_empty(&node->upper));
+
+ btrfs_backref_drop_node_buffer(node);
+ list_del(&node->list);
+ list_del(&node->lower);
+ if (!RB_EMPTY_NODE(&node->rb_node))
+ rb_erase(&node->rb_node, &tree->rb_root);
+ btrfs_backref_free_node(tree, node);
+}
+
+void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
+ struct btrfs_backref_node *node);
+
+void btrfs_backref_release_cache(struct btrfs_backref_cache *cache);
+
+static inline void btrfs_backref_panic(struct btrfs_fs_info *fs_info,
+ u64 bytenr, int errno)
+{
+ btrfs_panic(fs_info, errno,
+ "Inconsistency in backref cache found at offset %llu",
+ bytenr);
+}
+
+int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
+ struct btrfs_path *path,
+ struct btrfs_backref_iter *iter,
+ struct btrfs_key *node_key,
+ struct btrfs_backref_node *cur);
+
+int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
+ struct btrfs_backref_node *start);
+
+void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
+ struct btrfs_backref_node *node);
+
#endif
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 696f47103cfc..176e8a292fd1 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -7,7 +7,6 @@
#include "disk-io.h"
#include "free-space-cache.h"
#include "free-space-tree.h"
-#include "disk-io.h"
#include "volumes.h"
#include "transaction.h"
#include "ref-verify.h"
@@ -161,6 +160,8 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
struct rb_node *parent = NULL;
struct btrfs_block_group *cache;
+ ASSERT(block_group->length != 0);
+
spin_lock(&info->block_group_cache_lock);
p = &info->block_group_cache_tree.rb_node;
@@ -863,11 +864,34 @@ static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
}
}
+static int remove_block_group_item(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_block_group *block_group)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_root *root;
+ struct btrfs_key key;
+ int ret;
+
+ root = fs_info->extent_root;
+ key.objectid = block_group->start;
+ key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ key.offset = block_group->length;
+
+ ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+ if (ret > 0)
+ ret = -ENOENT;
+ if (ret < 0)
+ return ret;
+
+ ret = btrfs_del_item(trans, root, path);
+ return ret;
+}
+
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
u64 group_start, struct extent_map *em)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
- struct btrfs_root *root = fs_info->extent_root;
struct btrfs_path *path;
struct btrfs_block_group *block_group;
struct btrfs_free_cluster *cluster;
@@ -1065,26 +1089,25 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&block_group->space_info->lock);
- key.objectid = block_group->start;
- key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
- key.offset = block_group->length;
-
mutex_lock(&fs_info->chunk_mutex);
spin_lock(&block_group->lock);
block_group->removed = 1;
/*
- * At this point trimming can't start on this block group, because we
- * removed the block group from the tree fs_info->block_group_cache_tree
- * so no one can't find it anymore and even if someone already got this
- * block group before we removed it from the rbtree, they have already
- * incremented block_group->trimming - if they didn't, they won't find
- * any free space entries because we already removed them all when we
- * called btrfs_remove_free_space_cache().
+ * At this point trimming or scrub can't start on this block group,
+ * because we removed the block group from the rbtree
+ * fs_info->block_group_cache_tree so no one can't find it anymore and
+ * even if someone already got this block group before we removed it
+ * from the rbtree, they have already incremented block_group->frozen -
+ * if they didn't, for the trimming case they won't find any free space
+ * entries because we already removed them all when we called
+ * btrfs_remove_free_space_cache().
*
* And we must not remove the extent map from the fs_info->mapping_tree
* to prevent the same logical address range and physical device space
- * ranges from being reused for a new block group. This is because our
- * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
+ * ranges from being reused for a new block group. This is needed to
+ * avoid races with trimming and scrub.
+ *
+ * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
* completely transactionless, so while it is trimming a range the
* currently running transaction might finish and a new one start,
* allowing for new block groups to be created that can reuse the same
@@ -1095,7 +1118,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* in place until the extents have been discarded completely when
* the transaction commit has completed.
*/
- remove_em = (atomic_read(&block_group->trimming) == 0);
+ remove_em = (atomic_read(&block_group->frozen) == 0);
spin_unlock(&block_group->lock);
mutex_unlock(&fs_info->chunk_mutex);
@@ -1107,16 +1130,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
/* Once for the block groups rbtree */
btrfs_put_block_group(block_group);
- ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret > 0)
- ret = -EIO;
+ ret = remove_block_group_item(trans, path, block_group);
if (ret < 0)
goto out;
- ret = btrfs_del_item(trans, root, path);
- if (ret)
- goto out;
-
if (remove_em) {
struct extent_map_tree *em_tree;
@@ -1175,7 +1192,7 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
free_extent_map(em);
return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
- num_items, 1);
+ num_items);
}
/*
@@ -1284,25 +1301,17 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
ret = clear_extent_bits(&prev_trans->pinned_extents, start, end,
EXTENT_DIRTY);
if (ret)
- goto err;
+ goto out;
}
ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end,
EXTENT_DIRTY);
- if (ret)
- goto err;
+out:
mutex_unlock(&fs_info->unused_bg_unpin_mutex);
if (prev_trans)
btrfs_put_transaction(prev_trans);
- return true;
-
-err:
- mutex_unlock(&fs_info->unused_bg_unpin_mutex);
- if (prev_trans)
- btrfs_put_transaction(prev_trans);
- btrfs_dec_block_group_ro(bg);
- return false;
+ return ret == 0;
}
/*
@@ -1400,8 +1409,10 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
* We could have pending pinned extents for this block group,
* just delete them, we don't care about them anymore.
*/
- if (!clean_pinned_extents(trans, block_group))
+ if (!clean_pinned_extents(trans, block_group)) {
+ btrfs_dec_block_group_ro(block_group);
goto end_trans;
+ }
/*
* At this point, the block_group is read only and should fail
@@ -1450,7 +1461,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
/* Implicit trim during transaction commit. */
if (trimming)
- btrfs_get_block_group_trimming(block_group);
+ btrfs_freeze_block_group(block_group);
/*
* Btrfs_remove_chunk will abort the transaction if things go
@@ -1460,7 +1471,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
if (ret) {
if (trimming)
- btrfs_put_block_group_trimming(block_group);
+ btrfs_unfreeze_block_group(block_group);
goto end_trans;
}
@@ -1774,7 +1785,7 @@ static void link_block_group(struct btrfs_block_group *cache)
}
static struct btrfs_block_group *btrfs_create_block_group_cache(
- struct btrfs_fs_info *fs_info, u64 start, u64 size)
+ struct btrfs_fs_info *fs_info, u64 start)
{
struct btrfs_block_group *cache;
@@ -1790,7 +1801,6 @@ static struct btrfs_block_group *btrfs_create_block_group_cache(
}
cache->start = start;
- cache->length = size;
cache->fs_info = fs_info;
cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
@@ -1809,7 +1819,7 @@ static struct btrfs_block_group *btrfs_create_block_group_cache(
INIT_LIST_HEAD(&cache->dirty_list);
INIT_LIST_HEAD(&cache->io_list);
btrfs_init_free_space_ctl(cache);
- atomic_set(&cache->trimming, 0);
+ atomic_set(&cache->frozen, 0);
mutex_init(&cache->free_space_lock);
btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root);
@@ -1870,25 +1880,44 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
return ret;
}
+static int read_block_group_item(struct btrfs_block_group *cache,
+ struct btrfs_path *path,
+ const struct btrfs_key *key)
+{
+ struct extent_buffer *leaf = path->nodes[0];
+ struct btrfs_block_group_item bgi;
+ int slot = path->slots[0];
+
+ cache->length = key->offset;
+
+ read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
+ sizeof(bgi));
+ cache->used = btrfs_stack_block_group_used(&bgi);
+ cache->flags = btrfs_stack_block_group_flags(&bgi);
+
+ return 0;
+}
+
static int read_one_block_group(struct btrfs_fs_info *info,
struct btrfs_path *path,
const struct btrfs_key *key,
int need_clear)
{
- struct extent_buffer *leaf = path->nodes[0];
struct btrfs_block_group *cache;
struct btrfs_space_info *space_info;
- struct btrfs_block_group_item bgi;
const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
- int slot = path->slots[0];
int ret;
ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
- cache = btrfs_create_block_group_cache(info, key->objectid, key->offset);
+ cache = btrfs_create_block_group_cache(info, key->objectid);
if (!cache)
return -ENOMEM;
+ ret = read_block_group_item(cache, path, key);
+ if (ret < 0)
+ goto error;
+
if (need_clear) {
/*
* When we mount with old space cache, we need to
@@ -1903,10 +1932,6 @@ static int read_one_block_group(struct btrfs_fs_info *info,
if (btrfs_test_opt(info, SPACE_CACHE))
cache->disk_cache_state = BTRFS_DC_CLEAR;
}
- read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
- sizeof(bgi));
- cache->used = btrfs_stack_block_group_used(&bgi);
- cache->flags = btrfs_stack_block_group_flags(&bgi);
if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
(cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
btrfs_err(info,
@@ -1934,15 +1959,15 @@ static int read_one_block_group(struct btrfs_fs_info *info,
* are empty, and we can just add all the space in and be done with it.
* This saves us _a_lot_ of time, particularly in the full case.
*/
- if (key->offset == cache->used) {
+ if (cache->length == cache->used) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
btrfs_free_excluded_extents(cache);
} else if (cache->used == 0) {
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
- add_new_free_space(cache, key->objectid,
- key->objectid + key->offset);
+ add_new_free_space(cache, cache->start,
+ cache->start + cache->length);
btrfs_free_excluded_extents(cache);
}
@@ -1952,7 +1977,7 @@ static int read_one_block_group(struct btrfs_fs_info *info,
goto error;
}
trace_btrfs_add_block_group(info, cache, 0);
- btrfs_update_space_info(info, cache->flags, key->offset,
+ btrfs_update_space_info(info, cache->flags, cache->length,
cache->used, cache->bytes_super, &space_info);
cache->space_info = space_info;
@@ -1991,7 +2016,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- path->reada = READA_FORWARD;
cache_gen = btrfs_super_cache_generation(info->super_copy);
if (btrfs_test_opt(info, SPACE_CACHE) &&
@@ -2046,13 +2070,32 @@ error:
return ret;
}
+static int insert_block_group_item(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group)
+{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+ struct btrfs_block_group_item bgi;
+ struct btrfs_root *root;
+ struct btrfs_key key;
+
+ spin_lock(&block_group->lock);
+ btrfs_set_stack_block_group_used(&bgi, block_group->used);
+ btrfs_set_stack_block_group_chunk_objectid(&bgi,
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+ btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
+ key.objectid = block_group->start;
+ key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ key.offset = block_group->length;
+ spin_unlock(&block_group->lock);
+
+ root = fs_info->extent_root;
+ return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
+}
+
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_block_group *block_group;
- struct btrfs_root *extent_root = fs_info->extent_root;
- struct btrfs_block_group_item item;
- struct btrfs_key key;
int ret = 0;
if (!trans->can_flush_pending_bgs)
@@ -2065,21 +2108,11 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
if (ret)
goto next;
- spin_lock(&block_group->lock);
- btrfs_set_stack_block_group_used(&item, block_group->used);
- btrfs_set_stack_block_group_chunk_objectid(&item,
- BTRFS_FIRST_CHUNK_TREE_OBJECTID);
- btrfs_set_stack_block_group_flags(&item, block_group->flags);
- key.objectid = block_group->start;
- key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
- key.offset = block_group->length;
- spin_unlock(&block_group->lock);
-
- ret = btrfs_insert_item(trans, extent_root, &key, &item,
- sizeof(item));
+ ret = insert_block_group_item(trans, block_group);
if (ret)
btrfs_abort_transaction(trans, ret);
- ret = btrfs_finish_chunk_alloc(trans, key.objectid, key.offset);
+ ret = btrfs_finish_chunk_alloc(trans, block_group->start,
+ block_group->length);
if (ret)
btrfs_abort_transaction(trans, ret);
add_block_group_free_space(trans, block_group);
@@ -2100,10 +2133,11 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
btrfs_set_log_full_commit(trans);
- cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size);
+ cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
if (!cache)
return -ENOMEM;
+ cache->length = size;
cache->used = bytes_used;
cache->flags = type;
cache->last_byte_to_unpin = (u64)-1;
@@ -2314,13 +2348,13 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
spin_unlock(&sinfo->lock);
}
-static int write_one_cache_group(struct btrfs_trans_handle *trans,
- struct btrfs_path *path,
- struct btrfs_block_group *cache)
+static int update_block_group_item(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_block_group *cache)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
int ret;
- struct btrfs_root *extent_root = fs_info->extent_root;
+ struct btrfs_root *root = fs_info->extent_root;
unsigned long bi;
struct extent_buffer *leaf;
struct btrfs_block_group_item bgi;
@@ -2330,7 +2364,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
key.offset = cache->length;
- ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1);
+ ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
@@ -2642,7 +2676,7 @@ again:
}
}
if (!ret) {
- ret = write_one_cache_group(trans, path, cache);
+ ret = update_block_group_item(trans, path, cache);
/*
* Our block group might still be attached to the list
* of new block groups in the transaction handle of some
@@ -2791,7 +2825,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
}
}
if (!ret) {
- ret = write_one_cache_group(trans, path, cache);
+ ret = update_block_group_item(trans, path, cache);
/*
* One of the free space endio workers might have
* created a new block group while updating a free space
@@ -2808,7 +2842,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
if (ret == -ENOENT) {
wait_event(cur_trans->writer_wait,
atomic_read(&cur_trans->num_writers) == 1);
- ret = write_one_cache_group(trans, path, cache);
+ ret = update_block_group_item(trans, path, cache);
}
if (ret)
btrfs_abort_transaction(trans, ret);
@@ -3384,3 +3418,44 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
}
return 0;
}
+
+void btrfs_freeze_block_group(struct btrfs_block_group *cache)
+{
+ atomic_inc(&cache->frozen);
+}
+
+void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
+{
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+ struct extent_map_tree *em_tree;
+ struct extent_map *em;
+ bool cleanup;
+
+ spin_lock(&block_group->lock);
+ cleanup = (atomic_dec_and_test(&block_group->frozen) &&
+ block_group->removed);
+ spin_unlock(&block_group->lock);
+
+ if (cleanup) {
+ mutex_lock(&fs_info->chunk_mutex);
+ em_tree = &fs_info->mapping_tree;
+ write_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, block_group->start,
+ 1);
+ BUG_ON(!em); /* logic error, can't happen */
+ remove_extent_mapping(em_tree, em);
+ write_unlock(&em_tree->lock);
+ mutex_unlock(&fs_info->chunk_mutex);
+
+ /* once for us and once for the tree */
+ free_extent_map(em);
+ free_extent_map(em);
+
+ /*
+ * We may have left one free space entry and other possible
+ * tasks trimming this block group have left 1 entry each one.
+ * Free them if any.
+ */
+ __btrfs_remove_free_space_cache(block_group->free_space_ctl);
+ }
+}
diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
index 107bb557ca8d..b6ee70a039c7 100644
--- a/fs/btrfs/block-group.h
+++ b/fs/btrfs/block-group.h
@@ -129,8 +129,17 @@ struct btrfs_block_group {
/* For read-only block groups */
struct list_head ro_list;
+ /*
+ * When non-zero it means the block group's logical address and its
+ * device extents can not be reused for future block group allocations
+ * until the counter goes down to 0. This is to prevent them from being
+ * reused while some task is still using the block group after it was
+ * deleted - we want to make sure they can only be reused for new block
+ * groups after that task is done with the deleted block group.
+ */
+ atomic_t frozen;
+
/* For discard operations */
- atomic_t trimming;
struct list_head discard_list;
int discard_index;
u64 discard_eligible_time;
@@ -283,6 +292,9 @@ static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
cache->cached == BTRFS_CACHE_ERROR;
}
+void btrfs_freeze_block_group(struct btrfs_block_group *cache);
+void btrfs_unfreeze_block_group(struct btrfs_block_group *cache);
+
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
u64 physical, u64 **logical, int *naddrs, int *stripe_len);
diff --git a/fs/btrfs/block-rsv.c b/fs/btrfs/block-rsv.c
index 27efec8f7c5b..7e1549a84fcc 100644
--- a/fs/btrfs/block-rsv.c
+++ b/fs/btrfs/block-rsv.c
@@ -5,6 +5,7 @@
#include "block-rsv.h"
#include "space-info.h"
#include "transaction.h"
+#include "block-group.h"
/*
* HOW DO BLOCK RESERVES WORK
@@ -405,6 +406,8 @@ void btrfs_update_global_block_rsv(struct btrfs_fs_info *fs_info)
else
block_rsv->full = 0;
+ if (block_rsv->size >= sinfo->total_bytes)
+ sinfo->force_alloc = CHUNK_ALLOC_FORCE;
spin_unlock(&block_rsv->lock);
spin_unlock(&sinfo->lock);
}
@@ -455,7 +458,7 @@ static struct btrfs_block_rsv *get_block_rsv(
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *block_rsv = NULL;
- if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
+ if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
(root == fs_info->csum_root && trans->adding_csums) ||
(root == fs_info->uuid_root))
block_rsv = trans->block_rsv;
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 27a1fefce508..aeff56a0e105 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -7,6 +7,7 @@
#define BTRFS_INODE_H
#include <linux/hash.h>
+#include <linux/refcount.h>
#include "extent_map.h"
#include "extent_io.h"
#include "ordered-data.h"
@@ -27,7 +28,6 @@ enum {
BTRFS_INODE_NEEDS_FULL_SYNC,
BTRFS_INODE_COPY_EVERYTHING,
BTRFS_INODE_IN_DELALLOC_LIST,
- BTRFS_INODE_READDIO_NEED_LOCK,
BTRFS_INODE_HAS_PROPS,
BTRFS_INODE_SNAPSHOT_FLUSH,
};
@@ -293,53 +293,25 @@ static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
return ret;
}
-#define BTRFS_DIO_ORIG_BIO_SUBMITTED 0x1
-
struct btrfs_dio_private {
struct inode *inode;
- unsigned long flags;
u64 logical_offset;
u64 disk_bytenr;
u64 bytes;
- void *private;
-
- /* number of bios pending for this dio */
- atomic_t pending_bios;
- /* IO errors */
- int errors;
-
- /* orig_bio is our btrfs_io_bio */
- struct bio *orig_bio;
+ /*
+ * References to this structure. There is one reference per in-flight
+ * bio plus one while we're still setting up.
+ */
+ refcount_t refs;
/* dio_bio came from fs/direct-io.c */
struct bio *dio_bio;
- /*
- * The original bio may be split to several sub-bios, this is
- * done during endio of sub-bios
- */
- blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *,
- blk_status_t);
+ /* Array of checksums */
+ u8 csums[];
};
-/*
- * Disable DIO read nolock optimization, so new dio readers will be forced
- * to grab i_mutex. It is used to avoid the endless truncate due to
- * nonlocked dio read.
- */
-static inline void btrfs_inode_block_unlocked_dio(struct btrfs_inode *inode)
-{
- set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
- smp_mb();
-}
-
-static inline void btrfs_inode_resume_unlocked_dio(struct btrfs_inode *inode)
-{
- smp_mb__before_atomic();
- clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags);
-}
-
/* Array of bytes with variable length, hexadecimal format 0x1234 */
#define CSUM_FMT "0x%*phN"
#define CSUM_FMT_VALUE(size, bytes) size, bytes
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 9ab610cc9114..c6e648603f85 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -194,11 +194,9 @@ static int check_compressed_csum(struct btrfs_inode *inode,
for (i = 0; i < cb->nr_pages; i++) {
page = cb->compressed_pages[i];
- crypto_shash_init(shash);
kaddr = kmap_atomic(page);
- crypto_shash_update(shash, kaddr, PAGE_SIZE);
+ crypto_shash_digest(shash, kaddr, PAGE_SIZE, csum);
kunmap_atomic(kaddr);
- crypto_shash_final(shash, (u8 *)&csum);
if (memcmp(&csum, cb_sum, csum_size)) {
btrfs_print_data_csum_error(inode, disk_start,
@@ -1142,6 +1140,22 @@ static void put_workspace(int type, struct list_head *ws)
}
/*
+ * Adjust @level according to the limits of the compression algorithm or
+ * fallback to default
+ */
+static unsigned int btrfs_compress_set_level(int type, unsigned level)
+{
+ const struct btrfs_compress_op *ops = btrfs_compress_op[type];
+
+ if (level == 0)
+ level = ops->default_level;
+ else
+ level = min(level, ops->max_level);
+
+ return level;
+}
+
+/*
* Given an address space and start and length, compress the bytes into @pages
* that are allocated on demand.
*
@@ -1748,19 +1762,3 @@ unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
return level;
}
-
-/*
- * Adjust @level according to the limits of the compression algorithm or
- * fallback to default
- */
-unsigned int btrfs_compress_set_level(int type, unsigned level)
-{
- const struct btrfs_compress_op *ops = btrfs_compress_op[type];
-
- if (level == 0)
- level = ops->default_level;
- else
- level = min(level, ops->max_level);
-
- return level;
-}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index d253f7aa8ed5..284a3ad31350 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -140,8 +140,6 @@ extern const struct btrfs_compress_op btrfs_zstd_compress;
const char* btrfs_compress_type2str(enum btrfs_compression_type type);
bool btrfs_compress_is_valid_type(const char *str, size_t len);
-unsigned int btrfs_compress_set_level(int type, unsigned level);
-
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
#endif
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index bfedbbe2311f..3a7648bff42c 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -144,9 +144,10 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
return eb;
}
-/* cowonly root (everything not a reference counted cow subvolume), just get
- * put onto a simple dirty list. transaction.c walks this to make sure they
- * get properly updated on disk.
+/*
+ * Cowonly root (not-shareable trees, everything not subvolume or reloc roots),
+ * just get put onto a simple dirty list. Transaction walks this list to make
+ * sure they get properly updated on disk.
*/
static void add_root_to_dirty_list(struct btrfs_root *root)
{
@@ -185,9 +186,9 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
int level;
struct btrfs_disk_key disk_key;
- WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
+ WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
trans->transid != fs_info->running_transaction->transid);
- WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
+ WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
trans->transid != root->last_trans);
level = btrfs_header_level(buf);
@@ -826,12 +827,11 @@ int btrfs_block_can_be_shared(struct btrfs_root *root,
struct extent_buffer *buf)
{
/*
- * Tree blocks not in reference counted trees and tree roots
- * are never shared. If a block was allocated after the last
- * snapshot and the block was not allocated by tree relocation,
- * we know the block is not shared.
+ * Tree blocks not in shareable trees and tree roots are never shared.
+ * If a block was allocated after the last snapshot and the block was
+ * not allocated by tree relocation, we know the block is not shared.
*/
- if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
+ if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
buf != root->node && buf != root->commit_root &&
(btrfs_header_generation(buf) <=
btrfs_root_last_snapshot(&root->root_item) ||
@@ -1024,9 +1024,9 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
btrfs_assert_tree_locked(buf);
- WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
+ WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
trans->transid != fs_info->running_transaction->transid);
- WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
+ WARN_ON(test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
trans->transid != root->last_trans);
level = btrfs_header_level(buf);
@@ -1065,7 +1065,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
return ret;
}
- if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
+ if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
ret = btrfs_reloc_cow_block(trans, root, buf, cow);
if (ret) {
btrfs_abort_transaction(trans, ret);
@@ -1668,15 +1668,8 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
{
int low = 0;
int high = max;
- int mid;
int ret;
- struct btrfs_disk_key *tmp = NULL;
- struct btrfs_disk_key unaligned;
- unsigned long offset;
- char *kaddr = NULL;
- unsigned long map_start = 0;
- unsigned long map_len = 0;
- int err;
+ const int key_size = sizeof(struct btrfs_disk_key);
if (low > high) {
btrfs_err(eb->fs_info,
@@ -1687,32 +1680,26 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
}
while (low < high) {
+ unsigned long oip;
+ unsigned long offset;
+ struct btrfs_disk_key *tmp;
+ struct btrfs_disk_key unaligned;
+ int mid;
+
mid = (low + high) / 2;
offset = p + mid * item_size;
+ oip = offset_in_page(offset);
- if (!kaddr || offset < map_start ||
- (offset + sizeof(struct btrfs_disk_key)) >
- map_start + map_len) {
-
- err = map_private_extent_buffer(eb, offset,
- sizeof(struct btrfs_disk_key),
- &kaddr, &map_start, &map_len);
-
- if (!err) {
- tmp = (struct btrfs_disk_key *)(kaddr + offset -
- map_start);
- } else if (err == 1) {
- read_extent_buffer(eb, &unaligned,
- offset, sizeof(unaligned));
- tmp = &unaligned;
- } else {
- return err;
- }
+ if (oip + key_size <= PAGE_SIZE) {
+ const unsigned long idx = offset >> PAGE_SHIFT;
+ char *kaddr = page_address(eb->pages[idx]);
+ tmp = (struct btrfs_disk_key *)(kaddr + oip);
} else {
- tmp = (struct btrfs_disk_key *)(kaddr + offset -
- map_start);
+ read_extent_buffer(eb, &unaligned, offset, key_size);
+ tmp = &unaligned;
}
+
ret = comp_keys(tmp, key);
if (ret < 0)
@@ -1733,9 +1720,9 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
* leaves vs nodes
*/
int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
- int level, int *slot)
+ int *slot)
{
- if (level == 0)
+ if (btrfs_header_level(eb) == 0)
return generic_bin_search(eb,
offsetof(struct btrfs_leaf, items),
sizeof(struct btrfs_item),
@@ -2348,16 +2335,15 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
struct btrfs_fs_info *fs_info = root->fs_info;
u64 blocknr;
u64 gen;
- struct extent_buffer *b = *eb_ret;
struct extent_buffer *tmp;
struct btrfs_key first_key;
int ret;
int parent_level;
- blocknr = btrfs_node_blockptr(b, slot);
- gen = btrfs_node_ptr_generation(b, slot);
- parent_level = btrfs_header_level(b);
- btrfs_node_key_to_cpu(b, &first_key, slot);
+ blocknr = btrfs_node_blockptr(*eb_ret, slot);
+ gen = btrfs_node_ptr_generation(*eb_ret, slot);
+ parent_level = btrfs_header_level(*eb_ret);
+ btrfs_node_key_to_cpu(*eb_ret, &first_key, slot);
tmp = find_extent_buffer(fs_info, blocknr);
if (tmp) {
@@ -2501,19 +2487,6 @@ done:
return ret;
}
-static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
- int level, int *prev_cmp, int *slot)
-{
- if (*prev_cmp != 0) {
- *prev_cmp = btrfs_bin_search(b, key, level, slot);
- return *prev_cmp;
- }
-
- *slot = 0;
-
- return 0;
-}
-
int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
u64 iobjectid, u64 ioff, u8 key_type,
struct btrfs_key *found_key)
@@ -2783,9 +2756,23 @@ cow_done:
}
}
- ret = key_search(b, key, level, &prev_cmp, &slot);
- if (ret < 0)
- goto done;
+ /*
+ * If btrfs_bin_search returns an exact match (prev_cmp == 0)
+ * we can safely assume the target key will always be in slot 0
+ * on lower levels due to the invariants BTRFS' btree provides,
+ * namely that a btrfs_key_ptr entry always points to the
+ * lowest key in the child node, thus we can skip searching
+ * lower levels
+ */
+ if (prev_cmp == 0) {
+ slot = 0;
+ ret = 0;
+ } else {
+ ret = btrfs_bin_search(b, key, &slot);
+ prev_cmp = ret;
+ if (ret < 0)
+ goto done;
+ }
if (level == 0) {
p->slots[level] = slot;
@@ -2909,7 +2896,6 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
int level;
int lowest_unlock = 1;
u8 lowest_level = 0;
- int prev_cmp = -1;
lowest_level = p->lowest_level;
WARN_ON(p->nodes[0] != NULL);
@@ -2942,12 +2928,7 @@ again:
*/
btrfs_unlock_up_safe(p, level + 1);
- /*
- * Since we can unwind ebs we want to do a real search every
- * time.
- */
- prev_cmp = -1;
- ret = key_search(b, key, level, &prev_cmp, &slot);
+ ret = btrfs_bin_search(b, key, &slot);
if (ret < 0)
goto done;
@@ -3507,19 +3488,17 @@ static int leaf_space_used(struct extent_buffer *l, int start, int nr)
{
struct btrfs_item *start_item;
struct btrfs_item *end_item;
- struct btrfs_map_token token;
int data_len;
int nritems = btrfs_header_nritems(l);
int end = min(nritems, start + nr) - 1;
if (!nr)
return 0;
- btrfs_init_map_token(&token, l);
start_item = btrfs_item_nr(start);
end_item = btrfs_item_nr(end);
- data_len = btrfs_token_item_offset(l, start_item, &token) +
- btrfs_token_item_size(l, start_item, &token);
- data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
+ data_len = btrfs_item_offset(l, start_item) +
+ btrfs_item_size(l, start_item);
+ data_len = data_len - btrfs_item_offset(l, end_item);
data_len += sizeof(struct btrfs_item) * nr;
WARN_ON(data_len < 0);
return data_len;
@@ -3650,8 +3629,8 @@ static noinline int __push_leaf_right(struct btrfs_path *path,
push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
for (i = 0; i < right_nritems; i++) {
item = btrfs_item_nr(i);
- push_space -= btrfs_token_item_size(right, item, &token);
- btrfs_set_token_item_offset(right, item, push_space, &token);
+ push_space -= btrfs_token_item_size(&token, item);
+ btrfs_set_token_item_offset(&token, item, push_space);
}
left_nritems -= push_items;
@@ -3859,10 +3838,9 @@ static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
item = btrfs_item_nr(i);
- ioff = btrfs_token_item_offset(left, item, &token);
- btrfs_set_token_item_offset(left, item,
- ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
- &token);
+ ioff = btrfs_token_item_offset(&token, item);
+ btrfs_set_token_item_offset(&token, item,
+ ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size));
}
btrfs_set_header_nritems(left, old_left_nritems + push_items);
@@ -3892,9 +3870,8 @@ static noinline int __push_leaf_left(struct btrfs_path *path, int data_size,
for (i = 0; i < right_nritems; i++) {
item = btrfs_item_nr(i);
- push_space = push_space - btrfs_token_item_size(right,
- item, &token);
- btrfs_set_token_item_offset(right, item, push_space, &token);
+ push_space = push_space - btrfs_token_item_size(&token, item);
+ btrfs_set_token_item_offset(&token, item, push_space);
}
btrfs_mark_buffer_dirty(left);
@@ -4036,9 +4013,8 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
struct btrfs_item *item = btrfs_item_nr(i);
u32 ioff;
- ioff = btrfs_token_item_offset(right, item, &token);
- btrfs_set_token_item_offset(right, item,
- ioff + rt_data_off, &token);
+ ioff = btrfs_token_item_offset(&token, item);
+ btrfs_set_token_item_offset(&token, item, ioff + rt_data_off);
}
btrfs_set_header_nritems(l, mid);
@@ -4541,9 +4517,8 @@ void btrfs_truncate_item(struct btrfs_path *path, u32 new_size, int from_end)
u32 ioff;
item = btrfs_item_nr(i);
- ioff = btrfs_token_item_offset(leaf, item, &token);
- btrfs_set_token_item_offset(leaf, item,
- ioff + size_diff, &token);
+ ioff = btrfs_token_item_offset(&token, item);
+ btrfs_set_token_item_offset(&token, item, ioff + size_diff);
}
/* shift the data */
@@ -4640,9 +4615,8 @@ void btrfs_extend_item(struct btrfs_path *path, u32 data_size)
u32 ioff;
item = btrfs_item_nr(i);
- ioff = btrfs_token_item_offset(leaf, item, &token);
- btrfs_set_token_item_offset(leaf, item,
- ioff - data_size, &token);
+ ioff = btrfs_token_item_offset(&token, item);
+ btrfs_set_token_item_offset(&token, item, ioff - data_size);
}
/* shift the data */
@@ -4718,9 +4692,9 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
u32 ioff;
item = btrfs_item_nr(i);
- ioff = btrfs_token_item_offset(leaf, item, &token);
- btrfs_set_token_item_offset(leaf, item,
- ioff - total_data, &token);
+ ioff = btrfs_token_item_offset(&token, item);
+ btrfs_set_token_item_offset(&token, item,
+ ioff - total_data);
}
/* shift the items */
memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
@@ -4739,10 +4713,9 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
btrfs_set_item_key(leaf, &disk_key, slot + i);
item = btrfs_item_nr(slot + i);
- btrfs_set_token_item_offset(leaf, item,
- data_end - data_size[i], &token);
+ btrfs_set_token_item_offset(&token, item, data_end - data_size[i]);
data_end -= data_size[i];
- btrfs_set_token_item_size(leaf, item, data_size[i], &token);
+ btrfs_set_token_item_size(&token, item, data_size[i]);
}
btrfs_set_header_nritems(leaf, nritems + nr);
@@ -4930,9 +4903,8 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
u32 ioff;
item = btrfs_item_nr(i);
- ioff = btrfs_token_item_offset(leaf, item, &token);
- btrfs_set_token_item_offset(leaf, item,
- ioff + dsize, &token);
+ ioff = btrfs_token_item_offset(&token, item);
+ btrfs_set_token_item_offset(&token, item, ioff + dsize);
}
memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
@@ -5103,7 +5075,7 @@ again:
while (1) {
nritems = btrfs_header_nritems(cur);
level = btrfs_header_level(cur);
- sret = btrfs_bin_search(cur, min_key, level, &slot);
+ sret = btrfs_bin_search(cur, min_key, &slot);
if (sret < 0) {
ret = sret;
goto out;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8aa7b9dac405..161533040978 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -28,6 +28,7 @@
#include <linux/dynamic_debug.h>
#include <linux/refcount.h>
#include <linux/crc32c.h>
+#include <linux/iomap.h>
#include "extent-io-tree.h"
#include "extent_io.h"
#include "extent_map.h"
@@ -582,6 +583,7 @@ struct btrfs_fs_info {
struct btrfs_root *quota_root;
struct btrfs_root *uuid_root;
struct btrfs_root *free_space_root;
+ struct btrfs_root *data_reloc_root;
/* the log root tree is a directory of all the other log roots */
struct btrfs_root *log_root_tree;
@@ -758,7 +760,6 @@ struct btrfs_fs_info {
struct btrfs_workqueue *endio_workers;
struct btrfs_workqueue *endio_meta_workers;
struct btrfs_workqueue *endio_raid56_workers;
- struct btrfs_workqueue *endio_repair_workers;
struct btrfs_workqueue *rmw_workers;
struct btrfs_workqueue *endio_meta_write_workers;
struct btrfs_workqueue *endio_write_workers;
@@ -970,7 +971,28 @@ enum {
* is used to tell us when more checks are required
*/
BTRFS_ROOT_IN_TRANS_SETUP,
- BTRFS_ROOT_REF_COWS,
+
+ /*
+ * Set if tree blocks of this root can be shared by other roots.
+ * Only subvolume trees and their reloc trees have this bit set.
+ * Conflicts with TRACK_DIRTY bit.
+ *
+ * This affects two things:
+ *
+ * - How balance works
+ * For shareable roots, we need to use reloc tree and do path
+ * replacement for balance, and need various pre/post hooks for
+ * snapshot creation to handle them.
+ *
+ * While for non-shareable trees, we just simply do a tree search
+ * with COW.
+ *
+ * - How dirty roots are tracked
+ * For shareable roots, btrfs_record_root_in_trans() is needed to
+ * track them, while non-subvolume roots have TRACK_DIRTY bit, they
+ * don't need to set this manually.
+ */
+ BTRFS_ROOT_SHAREABLE,
BTRFS_ROOT_TRACK_DIRTY,
BTRFS_ROOT_IN_RADIX,
BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
@@ -1056,7 +1078,7 @@ struct btrfs_root {
struct btrfs_key defrag_progress;
struct btrfs_key defrag_max;
- /* the dirty list is only used by non-reference counted roots */
+ /* The dirty list is only used by non-shareable roots */
struct list_head dirty_list;
struct list_head root_list;
@@ -1146,6 +1168,9 @@ struct btrfs_root {
/* Record pairs of swapped blocks for qgroup */
struct btrfs_qgroup_swapped_blocks swapped_blocks;
+ /* Used only by log trees, when logging csum items */
+ struct extent_io_tree log_csum_range;
+
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
u64 alloc_bytenr;
#endif
@@ -1341,7 +1366,7 @@ do { \
BTRFS_INODE_ROOT_ITEM_INIT)
struct btrfs_map_token {
- const struct extent_buffer *eb;
+ struct extent_buffer *eb;
char *kaddr;
unsigned long offset;
};
@@ -1353,7 +1378,8 @@ static inline void btrfs_init_map_token(struct btrfs_map_token *token,
struct extent_buffer *eb)
{
token->eb = eb;
- token->kaddr = NULL;
+ token->kaddr = page_address(eb->pages[0]);
+ token->offset = 0;
}
/* some macros to generate set/get functions for the struct fields. This
@@ -1377,15 +1403,14 @@ static inline void btrfs_init_map_token(struct btrfs_map_token *token,
sizeof(((type *)0)->member)))
#define DECLARE_BTRFS_SETGET_BITS(bits) \
-u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \
- const void *ptr, unsigned long off, \
- struct btrfs_map_token *token); \
-void btrfs_set_token_##bits(struct extent_buffer *eb, const void *ptr, \
- unsigned long off, u##bits val, \
- struct btrfs_map_token *token); \
+u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
+ const void *ptr, unsigned long off); \
+void btrfs_set_token_##bits(struct btrfs_map_token *token, \
+ const void *ptr, unsigned long off, \
+ u##bits val); \
u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
const void *ptr, unsigned long off); \
-void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \
+void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
unsigned long off, u##bits val);
DECLARE_BTRFS_SETGET_BITS(8)
@@ -1400,25 +1425,23 @@ static inline u##bits btrfs_##name(const struct extent_buffer *eb, \
BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
return btrfs_get_##bits(eb, s, offsetof(type, member)); \
} \
-static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \
+static inline void btrfs_set_##name(const struct extent_buffer *eb, type *s, \
u##bits val) \
{ \
BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
btrfs_set_##bits(eb, s, offsetof(type, member), val); \
} \
-static inline u##bits btrfs_token_##name(const struct extent_buffer *eb,\
- const type *s, \
- struct btrfs_map_token *token) \
+static inline u##bits btrfs_token_##name(struct btrfs_map_token *token, \
+ const type *s) \
{ \
BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
- return btrfs_get_token_##bits(eb, s, offsetof(type, member), token); \
+ return btrfs_get_token_##bits(token, s, offsetof(type, member));\
} \
-static inline void btrfs_set_token_##name(struct extent_buffer *eb, \
- type *s, u##bits val, \
- struct btrfs_map_token *token) \
+static inline void btrfs_set_token_##name(struct btrfs_map_token *token,\
+ type *s, u##bits val) \
{ \
BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
- btrfs_set_token_##bits(eb, s, offsetof(type, member), val, token); \
+ btrfs_set_token_##bits(token, s, offsetof(type, member), val); \
}
#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
@@ -1428,7 +1451,7 @@ static inline u##bits btrfs_##name(const struct extent_buffer *eb) \
u##bits res = le##bits##_to_cpu(p->member); \
return res; \
} \
-static inline void btrfs_set_##name(struct extent_buffer *eb, \
+static inline void btrfs_set_##name(const struct extent_buffer *eb, \
u##bits val) \
{ \
type *p = page_address(eb->pages[0]); \
@@ -1446,7 +1469,7 @@ static inline void btrfs_set_##name(type *s, u##bits val) \
}
-static inline u64 btrfs_device_total_bytes(struct extent_buffer *eb,
+static inline u64 btrfs_device_total_bytes(const struct extent_buffer *eb,
struct btrfs_dev_item *s)
{
BUILD_BUG_ON(sizeof(u64) !=
@@ -1454,7 +1477,7 @@ static inline u64 btrfs_device_total_bytes(struct extent_buffer *eb,
return btrfs_get_64(eb, s, offsetof(struct btrfs_dev_item,
total_bytes));
}
-static inline void btrfs_set_device_total_bytes(struct extent_buffer *eb,
+static inline void btrfs_set_device_total_bytes(const struct extent_buffer *eb,
struct btrfs_dev_item *s,
u64 val)
{
@@ -1558,13 +1581,13 @@ static inline char *btrfs_stripe_dev_uuid_nr(struct btrfs_chunk *c, int nr)
return btrfs_stripe_dev_uuid(btrfs_stripe_nr(c, nr));
}
-static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb,
+static inline u64 btrfs_stripe_offset_nr(const struct extent_buffer *eb,
struct btrfs_chunk *c, int nr)
{
return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr));
}
-static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb,
+static inline u64 btrfs_stripe_devid_nr(const struct extent_buffer *eb,
struct btrfs_chunk *c, int nr)
{
return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr));
@@ -1644,31 +1667,21 @@ BTRFS_SETGET_FUNCS(dev_extent_chunk_objectid, struct btrfs_dev_extent,
BTRFS_SETGET_FUNCS(dev_extent_chunk_offset, struct btrfs_dev_extent,
chunk_offset, 64);
BTRFS_SETGET_FUNCS(dev_extent_length, struct btrfs_dev_extent, length, 64);
-
-static inline unsigned long btrfs_dev_extent_chunk_tree_uuid(struct btrfs_dev_extent *dev)
-{
- unsigned long ptr = offsetof(struct btrfs_dev_extent, chunk_tree_uuid);
- return (unsigned long)dev + ptr;
-}
-
BTRFS_SETGET_FUNCS(extent_refs, struct btrfs_extent_item, refs, 64);
BTRFS_SETGET_FUNCS(extent_generation, struct btrfs_extent_item,
generation, 64);
BTRFS_SETGET_FUNCS(extent_flags, struct btrfs_extent_item, flags, 64);
-BTRFS_SETGET_FUNCS(extent_refs_v0, struct btrfs_extent_item_v0, refs, 32);
-
-
BTRFS_SETGET_FUNCS(tree_block_level, struct btrfs_tree_block_info, level, 8);
-static inline void btrfs_tree_block_key(struct extent_buffer *eb,
+static inline void btrfs_tree_block_key(const struct extent_buffer *eb,
struct btrfs_tree_block_info *item,
struct btrfs_disk_key *key)
{
read_eb_member(eb, item, struct btrfs_tree_block_info, key, key);
}
-static inline void btrfs_set_tree_block_key(struct extent_buffer *eb,
+static inline void btrfs_set_tree_block_key(const struct extent_buffer *eb,
struct btrfs_tree_block_info *item,
struct btrfs_disk_key *key)
{
@@ -1706,12 +1719,6 @@ static inline u32 btrfs_extent_inline_ref_size(int type)
return 0;
}
-BTRFS_SETGET_FUNCS(ref_root_v0, struct btrfs_extent_ref_v0, root, 64);
-BTRFS_SETGET_FUNCS(ref_generation_v0, struct btrfs_extent_ref_v0,
- generation, 64);
-BTRFS_SETGET_FUNCS(ref_objectid_v0, struct btrfs_extent_ref_v0, objectid, 64);
-BTRFS_SETGET_FUNCS(ref_count_v0, struct btrfs_extent_ref_v0, count, 32);
-
/* struct btrfs_node */
BTRFS_SETGET_FUNCS(key_blockptr, struct btrfs_key_ptr, blockptr, 64);
BTRFS_SETGET_FUNCS(key_generation, struct btrfs_key_ptr, generation, 64);
@@ -1720,7 +1727,7 @@ BTRFS_SETGET_STACK_FUNCS(stack_key_blockptr, struct btrfs_key_ptr,
BTRFS_SETGET_STACK_FUNCS(stack_key_generation, struct btrfs_key_ptr,
generation, 64);
-static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr)
+static inline u64 btrfs_node_blockptr(const struct extent_buffer *eb, int nr)
{
unsigned long ptr;
ptr = offsetof(struct btrfs_node, ptrs) +
@@ -1728,7 +1735,7 @@ static inline u64 btrfs_node_blockptr(struct extent_buffer *eb, int nr)
return btrfs_key_blockptr(eb, (struct btrfs_key_ptr *)ptr);
}
-static inline void btrfs_set_node_blockptr(struct extent_buffer *eb,
+static inline void btrfs_set_node_blockptr(const struct extent_buffer *eb,
int nr, u64 val)
{
unsigned long ptr;
@@ -1737,7 +1744,7 @@ static inline void btrfs_set_node_blockptr(struct extent_buffer *eb,
btrfs_set_key_blockptr(eb, (struct btrfs_key_ptr *)ptr, val);
}
-static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr)
+static inline u64 btrfs_node_ptr_generation(const struct extent_buffer *eb, int nr)
{
unsigned long ptr;
ptr = offsetof(struct btrfs_node, ptrs) +
@@ -1745,7 +1752,7 @@ static inline u64 btrfs_node_ptr_generation(struct extent_buffer *eb, int nr)
return btrfs_key_generation(eb, (struct btrfs_key_ptr *)ptr);
}
-static inline void btrfs_set_node_ptr_generation(struct extent_buffer *eb,
+static inline void btrfs_set_node_ptr_generation(const struct extent_buffer *eb,
int nr, u64 val)
{
unsigned long ptr;
@@ -1763,7 +1770,7 @@ static inline unsigned long btrfs_node_key_ptr_offset(int nr)
void btrfs_node_key(const struct extent_buffer *eb,
struct btrfs_disk_key *disk_key, int nr);
-static inline void btrfs_set_node_key(struct extent_buffer *eb,
+static inline void btrfs_set_node_key(const struct extent_buffer *eb,
struct btrfs_disk_key *disk_key, int nr)
{
unsigned long ptr;
@@ -2498,8 +2505,6 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref);
int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr);
-void btrfs_get_block_group_trimming(struct btrfs_block_group *cache);
-void btrfs_put_block_group_trimming(struct btrfs_block_group *cache);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
enum btrfs_reserve_flush_enum {
@@ -2512,6 +2517,7 @@ enum btrfs_reserve_flush_enum {
BTRFS_RESERVE_FLUSH_LIMIT,
BTRFS_RESERVE_FLUSH_EVICT,
BTRFS_RESERVE_FLUSH_ALL,
+ BTRFS_RESERVE_FLUSH_ALL_STEAL,
};
enum btrfs_flush_state {
@@ -2551,7 +2557,7 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
- int level, int *slot);
+ int *slot);
int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
int btrfs_previous_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid,
@@ -2896,10 +2902,9 @@ void btrfs_free_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
int __init btrfs_init_cachep(void);
void __cold btrfs_destroy_cachep(void);
-struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
+struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
struct btrfs_root *root, struct btrfs_path *path);
-struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
- struct btrfs_root *root);
+struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root);
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset,
u64 start, u64 end);
@@ -2929,6 +2934,9 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end);
void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
u64 end, int uptodate);
extern const struct dentry_operations btrfs_dentry_operations;
+ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
+extern const struct iomap_ops btrfs_dio_iomap_ops;
+extern const struct iomap_dio_ops btrfs_dops;
/* ioctl.c */
long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
@@ -3381,6 +3389,9 @@ void btrfs_reloc_pre_snapshot(struct btrfs_pending_snapshot *pending,
int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
struct btrfs_pending_snapshot *pending);
int btrfs_should_cancel_balance(struct btrfs_fs_info *fs_info);
+struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info,
+ u64 bytenr);
+int btrfs_should_ignore_reloc_root(struct btrfs_root *root);
/* scrub.c */
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 7278789ff8a7..7c6f0bbb54a5 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -358,16 +358,14 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
shash->tfm = fs_info->csum_shash;
- crypto_shash_init(shash);
/*
* The super_block structure does not span the whole
* BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
* filled with zeros and is included in the checksum.
*/
- crypto_shash_update(shash, raw_disk_sb + BTRFS_CSUM_SIZE,
- BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
- crypto_shash_final(shash, result);
+ crypto_shash_digest(shash, raw_disk_sb + BTRFS_CSUM_SIZE,
+ BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result);
if (memcmp(disk_sb->csum, result, btrfs_super_csum_size(disk_sb)))
return 1;
@@ -709,9 +707,7 @@ static void end_workqueue_bio(struct bio *bio)
else
wq = fs_info->endio_write_workers;
} else {
- if (unlikely(end_io_wq->metadata == BTRFS_WQ_ENDIO_DIO_REPAIR))
- wq = fs_info->endio_repair_workers;
- else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
+ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
wq = fs_info->endio_raid56_workers;
else if (end_io_wq->metadata)
wq = fs_info->endio_meta_workers;
@@ -1135,9 +1131,12 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
root->log_transid = 0;
root->log_transid_committed = -1;
root->last_log_commit = 0;
- if (!dummy)
+ if (!dummy) {
extent_io_tree_init(fs_info, &root->dirty_log_pages,
IO_TREE_ROOT_DIRTY_LOG_PAGES, NULL);
+ extent_io_tree_init(fs_info, &root->log_csum_range,
+ IO_TREE_LOG_CSUM_RANGE, NULL);
+ }
memset(&root->root_key, 0, sizeof(root->root_key));
memset(&root->root_item, 0, sizeof(root->root_item));
@@ -1275,12 +1274,13 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
/*
- * DON'T set REF_COWS for log trees
+ * DON'T set SHAREABLE bit for log trees.
*
- * log trees do not get reference counted because they go away
- * before a real commit is actually done. They do store pointers
- * to file data extents, and those reference counts still get
- * updated (along with back refs to the log tree).
+ * Log trees are not exposed to user space thus can't be snapshotted,
+ * and they go away before a real commit is actually done.
+ *
+ * They do store pointers to file data extents, and those reference
+ * counts still get updated (along with back refs to the log tree).
*/
leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID,
@@ -1418,8 +1418,9 @@ static int btrfs_init_fs_root(struct btrfs_root *root)
if (ret)
goto fail;
- if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
- set_bit(BTRFS_ROOT_REF_COWS, &root->state);
+ if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
+ root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) {
+ set_bit(BTRFS_ROOT_SHAREABLE, &root->state);
btrfs_check_and_init_root_item(&root->root_item);
}
@@ -1524,6 +1525,7 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
btrfs_put_root(fs_info->uuid_root);
btrfs_put_root(fs_info->free_space_root);
btrfs_put_root(fs_info->fs_root);
+ btrfs_put_root(fs_info->data_reloc_root);
btrfs_check_leaked_roots(fs_info);
btrfs_extent_buffer_leak_debug_check(fs_info);
kfree(fs_info->super_copy);
@@ -1533,35 +1535,34 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
- struct btrfs_key *location,
- bool check_ref)
+ u64 objectid, bool check_ref)
{
struct btrfs_root *root;
struct btrfs_path *path;
struct btrfs_key key;
int ret;
- if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
+ if (objectid == BTRFS_ROOT_TREE_OBJECTID)
return btrfs_grab_root(fs_info->tree_root);
- if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
+ if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
return btrfs_grab_root(fs_info->extent_root);
- if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
+ if (objectid == BTRFS_CHUNK_TREE_OBJECTID)
return btrfs_grab_root(fs_info->chunk_root);
- if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
+ if (objectid == BTRFS_DEV_TREE_OBJECTID)
return btrfs_grab_root(fs_info->dev_root);
- if (location->objectid == BTRFS_CSUM_TREE_OBJECTID)
+ if (objectid == BTRFS_CSUM_TREE_OBJECTID)
return btrfs_grab_root(fs_info->csum_root);
- if (location->objectid == BTRFS_QUOTA_TREE_OBJECTID)
+ if (objectid == BTRFS_QUOTA_TREE_OBJECTID)
return btrfs_grab_root(fs_info->quota_root) ?
fs_info->quota_root : ERR_PTR(-ENOENT);
- if (location->objectid == BTRFS_UUID_TREE_OBJECTID)
+ if (objectid == BTRFS_UUID_TREE_OBJECTID)
return btrfs_grab_root(fs_info->uuid_root) ?
fs_info->uuid_root : ERR_PTR(-ENOENT);
- if (location->objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
+ if (objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
return btrfs_grab_root(fs_info->free_space_root) ?
fs_info->free_space_root : ERR_PTR(-ENOENT);
again:
- root = btrfs_lookup_fs_root(fs_info, location->objectid);
+ root = btrfs_lookup_fs_root(fs_info, objectid);
if (root) {
if (check_ref && btrfs_root_refs(&root->root_item) == 0) {
btrfs_put_root(root);
@@ -1570,7 +1571,10 @@ again:
return root;
}
- root = btrfs_read_tree_root(fs_info->tree_root, location);
+ key.objectid = objectid;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
+ root = btrfs_read_tree_root(fs_info->tree_root, &key);
if (IS_ERR(root))
return root;
@@ -1590,7 +1594,7 @@ again:
}
key.objectid = BTRFS_ORPHAN_OBJECTID;
key.type = BTRFS_ORPHAN_ITEM_KEY;
- key.offset = location->objectid;
+ key.offset = objectid;
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
btrfs_free_path(path);
@@ -1940,7 +1944,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
btrfs_destroy_workqueue(fs_info->workers);
btrfs_destroy_workqueue(fs_info->endio_workers);
btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
- btrfs_destroy_workqueue(fs_info->endio_repair_workers);
btrfs_destroy_workqueue(fs_info->rmw_workers);
btrfs_destroy_workqueue(fs_info->endio_write_workers);
btrfs_destroy_workqueue(fs_info->endio_freespace_worker);
@@ -1981,6 +1984,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
free_root_extent_buffers(info->quota_root);
free_root_extent_buffers(info->uuid_root);
free_root_extent_buffers(info->fs_root);
+ free_root_extent_buffers(info->data_reloc_root);
if (free_chunk_root)
free_root_extent_buffers(info->chunk_root);
free_root_extent_buffers(info->free_space_root);
@@ -1993,6 +1997,7 @@ void btrfs_put_root(struct btrfs_root *root)
if (refcount_dec_and_test(&root->refs)) {
WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
+ WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
if (root->anon_dev)
free_anon_bdev(root->anon_dev);
btrfs_drew_lock_destroy(&root->snapshot_lock);
@@ -2143,8 +2148,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
fs_info->endio_raid56_workers =
btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
max_active, 4);
- fs_info->endio_repair_workers =
- btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
fs_info->rmw_workers =
btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
fs_info->endio_write_workers =
@@ -2168,7 +2171,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
fs_info->flush_workers &&
fs_info->endio_workers && fs_info->endio_meta_workers &&
fs_info->endio_meta_write_workers &&
- fs_info->endio_repair_workers &&
fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
fs_info->endio_freespace_worker && fs_info->rmw_workers &&
fs_info->caching_workers && fs_info->readahead_workers &&
@@ -2290,6 +2292,19 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
fs_info->csum_root = root;
+ /*
+ * This tree can share blocks with some other fs tree during relocation
+ * and we need a proper setup by btrfs_get_fs_root
+ */
+ root = btrfs_get_fs_root(tree_root->fs_info,
+ BTRFS_DATA_RELOC_TREE_OBJECTID, true);
+ if (IS_ERR(root)) {
+ ret = PTR_ERR(root);
+ goto out;
+ }
+ set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
+ fs_info->data_reloc_root = root;
+
location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
root = btrfs_read_tree_root(tree_root, &location);
if (!IS_ERR(root)) {
@@ -2827,7 +2842,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
u64 generation;
u64 features;
u16 csum_type;
- struct btrfs_key location;
struct btrfs_super_block *disk_super;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *tree_root;
@@ -3241,11 +3255,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
}
}
- location.objectid = BTRFS_FS_TREE_OBJECTID;
- location.type = BTRFS_ROOT_ITEM_KEY;
- location.offset = 0;
-
- fs_info->fs_root = btrfs_get_fs_root(fs_info, &location, true);
+ fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
if (IS_ERR(fs_info->fs_root)) {
err = PTR_ERR(fs_info->fs_root);
btrfs_warn(fs_info, "failed to read fs tree: %d", err);
@@ -3508,10 +3518,9 @@ static int write_dev_supers(struct btrfs_device *device,
btrfs_set_super_bytenr(sb, bytenr);
- crypto_shash_init(shash);
- crypto_shash_update(shash, (const char *)sb + BTRFS_CSUM_SIZE,
- BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
- crypto_shash_final(shash, sb->csum);
+ crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE,
+ BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE,
+ sb->csum);
page = find_or_create_page(mapping, bytenr >> PAGE_SHIFT,
GFP_NOFS);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index cd629113f61c..bf43245406c4 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -25,7 +25,6 @@ enum btrfs_wq_endio_type {
BTRFS_WQ_ENDIO_METADATA,
BTRFS_WQ_ENDIO_FREE_SPACE,
BTRFS_WQ_ENDIO_RAID56,
- BTRFS_WQ_ENDIO_DIO_REPAIR,
};
static inline u64 btrfs_sb_offset(int mirror)
@@ -67,8 +66,7 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
- struct btrfs_key *key,
- bool check_ref);
+ u64 objectid, bool check_ref);
void btrfs_free_fs_info(struct btrfs_fs_info *fs_info);
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 2bb25d2dc44b..1a8d419d9e1f 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -64,24 +64,15 @@ struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *root;
struct inode *inode;
- struct btrfs_key key;
if (objectid < BTRFS_FIRST_FREE_OBJECTID)
return ERR_PTR(-ESTALE);
- key.objectid = root_objectid;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
-
- root = btrfs_get_fs_root(fs_info, &key, true);
+ root = btrfs_get_fs_root(fs_info, root_objectid, true);
if (IS_ERR(root))
return ERR_CAST(root);
- key.objectid = objectid;
- key.type = BTRFS_INODE_ITEM_KEY;
- key.offset = 0;
-
- inode = btrfs_iget(sb, &key, root);
+ inode = btrfs_iget(sb, objectid, root);
btrfs_put_root(root);
if (IS_ERR(inode))
return ERR_CAST(inode);
@@ -200,9 +191,7 @@ struct dentry *btrfs_get_parent(struct dentry *child)
found_key.offset, 0, 0);
}
- key.type = BTRFS_INODE_ITEM_KEY;
- key.offset = 0;
- return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root));
+ return d_obtain_alias(btrfs_iget(fs_info->sb, key.objectid, root));
fail:
btrfs_free_path(path);
return ERR_PTR(ret);
diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h
index b4a7bad3e82e..b6561455b3c4 100644
--- a/fs/btrfs/extent-io-tree.h
+++ b/fs/btrfs/extent-io-tree.h
@@ -44,6 +44,7 @@ enum {
IO_TREE_TRANS_DIRTY_PAGES,
IO_TREE_ROOT_DIRTY_LOG_PAGES,
IO_TREE_INODE_FILE_EXTENT,
+ IO_TREE_LOG_CSUM_RANGE,
IO_TREE_SELFTEST,
};
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 54a64d1e18c6..c0bc35f932bf 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2114,22 +2114,6 @@ static u64 find_middle(struct rb_root *root)
}
#endif
-static inline u64 heads_to_leaves(struct btrfs_fs_info *fs_info, u64 heads)
-{
- u64 num_bytes;
-
- num_bytes = heads * (sizeof(struct btrfs_extent_item) +
- sizeof(struct btrfs_extent_inline_ref));
- if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA))
- num_bytes += heads * sizeof(struct btrfs_tree_block_info);
-
- /*
- * We don't ever fill up leaves all the way so multiply by 2 just to be
- * closer to what we're really going to want to use.
- */
- return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(fs_info));
-}
-
/*
* Takes the number of bytes to be csumm'ed and figures out how many leaves it
* would require to store the csums for that many bytes.
@@ -2442,7 +2426,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
nritems = btrfs_header_nritems(buf);
level = btrfs_header_level(buf);
- if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
+ if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0)
return 0;
if (full_backref)
@@ -2932,7 +2916,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
&trimmed);
list_del_init(&block_group->bg_list);
- btrfs_put_block_group_trimming(block_group);
+ btrfs_unfreeze_block_group(block_group);
btrfs_put_block_group(block_group);
if (ret) {
@@ -3369,6 +3353,7 @@ static struct btrfs_block_group *btrfs_lock_cluster(
struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster,
int delalloc)
+ __acquires(&cluster->refill_lock)
{
struct btrfs_block_group *used_bg = NULL;
@@ -5501,8 +5486,6 @@ out:
*/
if (!for_reloc && !root_dropped)
btrfs_add_dead_root(root);
- if (err && err != -EAGAIN)
- btrfs_handle_fs_error(fs_info, err, NULL);
return err;
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index e12eb32d9e17..68c96057ad2d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2333,7 +2333,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
return 0;
}
-int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num)
+int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
u64 start = eb->start;
@@ -2537,8 +2537,9 @@ int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
return 0;
}
-bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
- struct io_failure_record *failrec, int failed_mirror)
+static bool btrfs_check_repairable(struct inode *inode, bool needs_validation,
+ struct io_failure_record *failrec,
+ int failed_mirror)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int num_copies;
@@ -2561,7 +2562,7 @@ bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
* a) deliver good data to the caller
* b) correct the bad sectors on disk
*/
- if (failed_bio_pages > 1) {
+ if (needs_validation) {
/*
* to fulfill b), we need to know the exact failing sectors, as
* we don't want to rewrite any more than the failed ones. thus,
@@ -2600,94 +2601,115 @@ bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
return true;
}
-
-struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
- struct io_failure_record *failrec,
- struct page *page, int pg_offset, int icsum,
- bio_end_io_t *endio_func, void *data)
+static bool btrfs_io_needs_validation(struct inode *inode, struct bio *bio)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct bio *bio;
- struct btrfs_io_bio *btrfs_failed_bio;
- struct btrfs_io_bio *btrfs_bio;
+ u64 len = 0;
+ const u32 blocksize = inode->i_sb->s_blocksize;
- bio = btrfs_io_bio_alloc(1);
- bio->bi_end_io = endio_func;
- bio->bi_iter.bi_sector = failrec->logical >> 9;
- bio->bi_iter.bi_size = 0;
- bio->bi_private = data;
+ /*
+ * If bi_status is BLK_STS_OK, then this was a checksum error, not an
+ * I/O error. In this case, we already know exactly which sector was
+ * bad, so we don't need to validate.
+ */
+ if (bio->bi_status == BLK_STS_OK)
+ return false;
- btrfs_failed_bio = btrfs_io_bio(failed_bio);
- if (btrfs_failed_bio->csum) {
- u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+ /*
+ * We need to validate each sector individually if the failed I/O was
+ * for multiple sectors.
+ *
+ * There are a few possible bios that can end up here:
+ * 1. A buffered read bio, which is not cloned.
+ * 2. A direct I/O read bio, which is cloned.
+ * 3. A (buffered or direct) repair bio, which is not cloned.
+ *
+ * For cloned bios (case 2), we can get the size from
+ * btrfs_io_bio->iter; for non-cloned bios (cases 1 and 3), we can get
+ * it from the bvecs.
+ */
+ if (bio_flagged(bio, BIO_CLONED)) {
+ if (btrfs_io_bio(bio)->iter.bi_size > blocksize)
+ return true;
+ } else {
+ struct bio_vec *bvec;
+ int i;
- btrfs_bio = btrfs_io_bio(bio);
- btrfs_bio->csum = btrfs_bio->csum_inline;
- icsum *= csum_size;
- memcpy(btrfs_bio->csum, btrfs_failed_bio->csum + icsum,
- csum_size);
+ bio_for_each_bvec_all(bvec, bio, i) {
+ len += bvec->bv_len;
+ if (len > blocksize)
+ return true;
+ }
}
-
- bio_add_page(bio, page, failrec->len, pg_offset);
-
- return bio;
+ return false;
}
-/*
- * This is a generic handler for readpage errors. If other copies exist, read
- * those and write back good data to the failed position. Does not investigate
- * in remapping the failed extent elsewhere, hoping the device will be smart
- * enough to do this as needed
- */
-static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
- struct page *page, u64 start, u64 end,
- int failed_mirror)
+blk_status_t btrfs_submit_read_repair(struct inode *inode,
+ struct bio *failed_bio, u64 phy_offset,
+ struct page *page, unsigned int pgoff,
+ u64 start, u64 end, int failed_mirror,
+ submit_bio_hook_t *submit_bio_hook)
{
struct io_failure_record *failrec;
- struct inode *inode = page->mapping->host;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
- struct bio *bio;
- int read_mode = 0;
+ struct btrfs_io_bio *failed_io_bio = btrfs_io_bio(failed_bio);
+ const int icsum = phy_offset >> inode->i_sb->s_blocksize_bits;
+ bool need_validation;
+ struct bio *repair_bio;
+ struct btrfs_io_bio *repair_io_bio;
blk_status_t status;
int ret;
- unsigned failed_bio_pages = failed_bio->bi_iter.bi_size >> PAGE_SHIFT;
+
+ btrfs_debug(fs_info,
+ "repair read error: read error at %llu", start);
BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
if (ret)
- return ret;
+ return errno_to_blk_status(ret);
+
+ need_validation = btrfs_io_needs_validation(inode, failed_bio);
- if (!btrfs_check_repairable(inode, failed_bio_pages, failrec,
+ if (!btrfs_check_repairable(inode, need_validation, failrec,
failed_mirror)) {
free_io_failure(failure_tree, tree, failrec);
- return -EIO;
+ return BLK_STS_IOERR;
}
- if (failed_bio_pages > 1)
- read_mode |= REQ_FAILFAST_DEV;
+ repair_bio = btrfs_io_bio_alloc(1);
+ repair_io_bio = btrfs_io_bio(repair_bio);
+ repair_bio->bi_opf = REQ_OP_READ;
+ if (need_validation)
+ repair_bio->bi_opf |= REQ_FAILFAST_DEV;
+ repair_bio->bi_end_io = failed_bio->bi_end_io;
+ repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
+ repair_bio->bi_private = failed_bio->bi_private;
- phy_offset >>= inode->i_sb->s_blocksize_bits;
- bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
- start - page_offset(page),
- (int)phy_offset, failed_bio->bi_end_io,
- NULL);
- bio->bi_opf = REQ_OP_READ | read_mode;
+ if (failed_io_bio->csum) {
+ const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+
+ repair_io_bio->csum = repair_io_bio->csum_inline;
+ memcpy(repair_io_bio->csum,
+ failed_io_bio->csum + csum_size * icsum, csum_size);
+ }
+
+ bio_add_page(repair_bio, page, failrec->len, pgoff);
+ repair_io_bio->logical = failrec->start;
+ repair_io_bio->iter = repair_bio->bi_iter;
btrfs_debug(btrfs_sb(inode->i_sb),
- "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d",
- read_mode, failrec->this_mirror, failrec->in_validation);
+"repair read error: submitting new read to mirror %d, in_validation=%d",
+ failrec->this_mirror, failrec->in_validation);
- status = tree->ops->submit_bio_hook(tree->private_data, bio, failrec->this_mirror,
- failrec->bio_flags);
+ status = submit_bio_hook(inode, repair_bio, failrec->this_mirror,
+ failrec->bio_flags);
if (status) {
free_io_failure(failure_tree, tree, failrec);
- bio_put(bio);
- ret = blk_status_to_errno(status);
+ bio_put(repair_bio);
}
-
- return ret;
+ return status;
}
/* lots and lots of room for performance fixes in the end_bio funcs */
@@ -2859,9 +2881,10 @@ static void end_bio_extent_readpage(struct bio *bio)
* If it can't handle the error it will return -EIO and
* we remain responsible for that page.
*/
- ret = bio_readpage_error(bio, offset, page, start, end,
- mirror);
- if (ret == 0) {
+ if (!btrfs_submit_read_repair(inode, bio, offset, page,
+ start - page_offset(page),
+ start, end, mirror,
+ tree->ops->submit_bio_hook)) {
uptodate = !bio->bi_status;
offset += len;
continue;
@@ -4862,7 +4885,7 @@ static void __free_extent_buffer(struct extent_buffer *eb)
kmem_cache_free(extent_buffer_cache, eb);
}
-int extent_buffer_under_io(struct extent_buffer *eb)
+int extent_buffer_under_io(const struct extent_buffer *eb)
{
return (atomic_read(&eb->io_pages) ||
test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
@@ -4967,7 +4990,7 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
return eb;
}
-struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
+struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src)
{
int i;
struct page *p;
@@ -5373,7 +5396,7 @@ void free_extent_buffer_stale(struct extent_buffer *eb)
release_extent_buffer(eb);
}
-void clear_extent_buffer_dirty(struct extent_buffer *eb)
+void clear_extent_buffer_dirty(const struct extent_buffer *eb)
{
int i;
int num_pages;
@@ -5571,8 +5594,7 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
struct page *page;
char *kaddr;
char *dst = (char *)dstv;
- size_t start_offset = offset_in_page(eb->start);
- unsigned long i = (start_offset + start) >> PAGE_SHIFT;
+ unsigned long i = start >> PAGE_SHIFT;
if (start + len > eb->len) {
WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
@@ -5581,7 +5603,7 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
return;
}
- offset = offset_in_page(start_offset + start);
+ offset = offset_in_page(start);
while (len > 0) {
page = eb->pages[i];
@@ -5606,14 +5628,13 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb,
struct page *page;
char *kaddr;
char __user *dst = (char __user *)dstv;
- size_t start_offset = offset_in_page(eb->start);
- unsigned long i = (start_offset + start) >> PAGE_SHIFT;
+ unsigned long i = start >> PAGE_SHIFT;
int ret = 0;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = offset_in_page(start_offset + start);
+ offset = offset_in_page(start);
while (len > 0) {
page = eb->pages[i];
@@ -5634,48 +5655,6 @@ int read_extent_buffer_to_user(const struct extent_buffer *eb,
return ret;
}
-/*
- * return 0 if the item is found within a page.
- * return 1 if the item spans two pages.
- * return -EINVAL otherwise.
- */
-int map_private_extent_buffer(const struct extent_buffer *eb,
- unsigned long start, unsigned long min_len,
- char **map, unsigned long *map_start,
- unsigned long *map_len)
-{
- size_t offset;
- char *kaddr;
- struct page *p;
- size_t start_offset = offset_in_page(eb->start);
- unsigned long i = (start_offset + start) >> PAGE_SHIFT;
- unsigned long end_i = (start_offset + start + min_len - 1) >>
- PAGE_SHIFT;
-
- if (start + min_len > eb->len) {
- WARN(1, KERN_ERR "btrfs bad mapping eb start %llu len %lu, wanted %lu %lu\n",
- eb->start, eb->len, start, min_len);
- return -EINVAL;
- }
-
- if (i != end_i)
- return 1;
-
- if (i == 0) {
- offset = start_offset;
- *map_start = 0;
- } else {
- offset = 0;
- *map_start = ((u64)i << PAGE_SHIFT) - start_offset;
- }
-
- p = eb->pages[i];
- kaddr = page_address(p);
- *map = kaddr + offset;
- *map_len = PAGE_SIZE - offset;
- return 0;
-}
-
int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
unsigned long start, unsigned long len)
{
@@ -5684,14 +5663,13 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
struct page *page;
char *kaddr;
char *ptr = (char *)ptrv;
- size_t start_offset = offset_in_page(eb->start);
- unsigned long i = (start_offset + start) >> PAGE_SHIFT;
+ unsigned long i = start >> PAGE_SHIFT;
int ret = 0;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = offset_in_page(start_offset + start);
+ offset = offset_in_page(start);
while (len > 0) {
page = eb->pages[i];
@@ -5711,7 +5689,7 @@ int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
return ret;
}
-void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
+void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
const void *srcv)
{
char *kaddr;
@@ -5722,7 +5700,7 @@ void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
BTRFS_FSID_SIZE);
}
-void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv)
+void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv)
{
char *kaddr;
@@ -5732,7 +5710,7 @@ void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv)
BTRFS_FSID_SIZE);
}
-void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
+void write_extent_buffer(const struct extent_buffer *eb, const void *srcv,
unsigned long start, unsigned long len)
{
size_t cur;
@@ -5740,13 +5718,12 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
struct page *page;
char *kaddr;
char *src = (char *)srcv;
- size_t start_offset = offset_in_page(eb->start);
- unsigned long i = (start_offset + start) >> PAGE_SHIFT;
+ unsigned long i = start >> PAGE_SHIFT;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = offset_in_page(start_offset + start);
+ offset = offset_in_page(start);
while (len > 0) {
page = eb->pages[i];
@@ -5763,20 +5740,19 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
}
}
-void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
+void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
unsigned long len)
{
size_t cur;
size_t offset;
struct page *page;
char *kaddr;
- size_t start_offset = offset_in_page(eb->start);
- unsigned long i = (start_offset + start) >> PAGE_SHIFT;
+ unsigned long i = start >> PAGE_SHIFT;
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
- offset = offset_in_page(start_offset + start);
+ offset = offset_in_page(start);
while (len > 0) {
page = eb->pages[i];
@@ -5792,8 +5768,8 @@ void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
}
}
-void copy_extent_buffer_full(struct extent_buffer *dst,
- struct extent_buffer *src)
+void copy_extent_buffer_full(const struct extent_buffer *dst,
+ const struct extent_buffer *src)
{
int i;
int num_pages;
@@ -5806,7 +5782,8 @@ void copy_extent_buffer_full(struct extent_buffer *dst,
page_address(src->pages[i]));
}
-void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
+void copy_extent_buffer(const struct extent_buffer *dst,
+ const struct extent_buffer *src,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len)
{
@@ -5815,12 +5792,11 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
size_t offset;
struct page *page;
char *kaddr;
- size_t start_offset = offset_in_page(dst->start);
- unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT;
+ unsigned long i = dst_offset >> PAGE_SHIFT;
WARN_ON(src->len != dst_len);
- offset = offset_in_page(start_offset + dst_offset);
+ offset = offset_in_page(dst_offset);
while (len > 0) {
page = dst->pages[i];
@@ -5851,12 +5827,11 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
* This helper hides the ugliness of finding the byte in an extent buffer which
* contains a given bit.
*/
-static inline void eb_bitmap_offset(struct extent_buffer *eb,
+static inline void eb_bitmap_offset(const struct extent_buffer *eb,
unsigned long start, unsigned long nr,
unsigned long *page_index,
size_t *page_offset)
{
- size_t start_offset = offset_in_page(eb->start);
size_t byte_offset = BIT_BYTE(nr);
size_t offset;
@@ -5865,7 +5840,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
* the bitmap item in the extent buffer + the offset of the byte in the
* bitmap item.
*/
- offset = start_offset + start + byte_offset;
+ offset = start + byte_offset;
*page_index = offset >> PAGE_SHIFT;
*page_offset = offset_in_page(offset);
@@ -5877,7 +5852,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb,
* @start: offset of the bitmap item in the extent buffer
* @nr: bit number to test
*/
-int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
+int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
unsigned long nr)
{
u8 *kaddr;
@@ -5899,7 +5874,7 @@ int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
* @pos: bit number of the first bit
* @len: number of bits to set
*/
-void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
+void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
unsigned long pos, unsigned long len)
{
u8 *kaddr;
@@ -5941,8 +5916,9 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
* @pos: bit number of the first bit
* @len: number of bits to clear
*/
-void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
- unsigned long pos, unsigned long len)
+void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
+ unsigned long start, unsigned long pos,
+ unsigned long len)
{
u8 *kaddr;
struct page *page;
@@ -6003,14 +5979,14 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
}
-void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
- unsigned long src_offset, unsigned long len)
+void memcpy_extent_buffer(const struct extent_buffer *dst,
+ unsigned long dst_offset, unsigned long src_offset,
+ unsigned long len)
{
struct btrfs_fs_info *fs_info = dst->fs_info;
size_t cur;
size_t dst_off_in_page;
size_t src_off_in_page;
- size_t start_offset = offset_in_page(dst->start);
unsigned long dst_i;
unsigned long src_i;
@@ -6028,11 +6004,11 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
}
while (len > 0) {
- dst_off_in_page = offset_in_page(start_offset + dst_offset);
- src_off_in_page = offset_in_page(start_offset + src_offset);
+ dst_off_in_page = offset_in_page(dst_offset);
+ src_off_in_page = offset_in_page(src_offset);
- dst_i = (start_offset + dst_offset) >> PAGE_SHIFT;
- src_i = (start_offset + src_offset) >> PAGE_SHIFT;
+ dst_i = dst_offset >> PAGE_SHIFT;
+ src_i = src_offset >> PAGE_SHIFT;
cur = min(len, (unsigned long)(PAGE_SIZE -
src_off_in_page));
@@ -6048,8 +6024,9 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
}
}
-void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
- unsigned long src_offset, unsigned long len)
+void memmove_extent_buffer(const struct extent_buffer *dst,
+ unsigned long dst_offset, unsigned long src_offset,
+ unsigned long len)
{
struct btrfs_fs_info *fs_info = dst->fs_info;
size_t cur;
@@ -6057,7 +6034,6 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
size_t src_off_in_page;
unsigned long dst_end = dst_offset + len - 1;
unsigned long src_end = src_offset + len - 1;
- size_t start_offset = offset_in_page(dst->start);
unsigned long dst_i;
unsigned long src_i;
@@ -6078,11 +6054,11 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
return;
}
while (len > 0) {
- dst_i = (start_offset + dst_end) >> PAGE_SHIFT;
- src_i = (start_offset + src_end) >> PAGE_SHIFT;
+ dst_i = dst_end >> PAGE_SHIFT;
+ src_i = src_end >> PAGE_SHIFT;
- dst_off_in_page = offset_in_page(start_offset + dst_end);
- src_off_in_page = offset_in_page(start_offset + src_end);
+ dst_off_in_page = offset_in_page(dst_end);
+ src_off_in_page = offset_in_page(src_end);
cur = min_t(unsigned long, len, src_off_in_page + 1);
cur = min(cur, dst_off_in_page + 1);
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 25594e09fdcd..87f60a48f750 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -5,6 +5,7 @@
#include <linux/rbtree.h>
#include <linux/refcount.h>
+#include <linux/fiemap.h>
#include "ulist.h"
/*
@@ -66,6 +67,10 @@ struct btrfs_io_bio;
struct io_failure_record;
struct extent_io_tree;
+typedef blk_status_t (submit_bio_hook_t)(struct inode *inode, struct bio *bio,
+ int mirror_num,
+ unsigned long bio_flags);
+
typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
struct bio *bio, u64 bio_offset);
@@ -74,8 +79,7 @@ struct extent_io_ops {
* The following callbacks must be always defined, the function
* pointer will be called unconditionally.
*/
- blk_status_t (*submit_bio_hook)(struct inode *inode, struct bio *bio,
- int mirror_num, unsigned long bio_flags);
+ submit_bio_hook_t *submit_bio_hook;
int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
struct page *page, u64 start, u64 end,
int mirror);
@@ -209,7 +213,7 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, unsigned long len);
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
-struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
+struct extent_buffer *btrfs_clone_extent_buffer(const struct extent_buffer *src);
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
void free_extent_buffer(struct extent_buffer *eb);
@@ -227,7 +231,7 @@ static inline int num_extent_pages(const struct extent_buffer *eb)
(eb->start >> PAGE_SHIFT);
}
-static inline int extent_buffer_uptodate(struct extent_buffer *eb)
+static inline int extent_buffer_uptodate(const struct extent_buffer *eb)
{
return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
}
@@ -240,37 +244,37 @@ void read_extent_buffer(const struct extent_buffer *eb, void *dst,
int read_extent_buffer_to_user(const struct extent_buffer *eb,
void __user *dst, unsigned long start,
unsigned long len);
-void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
-void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
+void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *src);
+void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb,
const void *src);
-void write_extent_buffer(struct extent_buffer *eb, const void *src,
+void write_extent_buffer(const struct extent_buffer *eb, const void *src,
unsigned long start, unsigned long len);
-void copy_extent_buffer_full(struct extent_buffer *dst,
- struct extent_buffer *src);
-void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
+void copy_extent_buffer_full(const struct extent_buffer *dst,
+ const struct extent_buffer *src);
+void copy_extent_buffer(const struct extent_buffer *dst,
+ const struct extent_buffer *src,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len);
-void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
- unsigned long src_offset, unsigned long len);
-void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
- unsigned long src_offset, unsigned long len);
-void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
+void memcpy_extent_buffer(const struct extent_buffer *dst,
+ unsigned long dst_offset, unsigned long src_offset,
+ unsigned long len);
+void memmove_extent_buffer(const struct extent_buffer *dst,
+ unsigned long dst_offset, unsigned long src_offset,
+ unsigned long len);
+void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start,
unsigned long len);
-int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
+int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start,
unsigned long pos);
-void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
+void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start,
unsigned long pos, unsigned long len);
-void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start,
- unsigned long pos, unsigned long len);
-void clear_extent_buffer_dirty(struct extent_buffer *eb);
+void extent_buffer_bitmap_clear(const struct extent_buffer *eb,
+ unsigned long start, unsigned long pos,
+ unsigned long len);
+void clear_extent_buffer_dirty(const struct extent_buffer *eb);
bool set_extent_buffer_dirty(struct extent_buffer *eb);
void set_extent_buffer_uptodate(struct extent_buffer *eb);
void clear_extent_buffer_uptodate(struct extent_buffer *eb);
-int extent_buffer_under_io(struct extent_buffer *eb);
-int map_private_extent_buffer(const struct extent_buffer *eb,
- unsigned long offset, unsigned long min_len,
- char **map, unsigned long *map_start,
- unsigned long *map_len);
+int extent_buffer_under_io(const struct extent_buffer *eb);
void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
@@ -289,7 +293,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
u64 length, u64 logical, struct page *page,
unsigned int pg_offset, int mirror_num);
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
-int btrfs_repair_eb_io_failure(struct extent_buffer *eb, int mirror_num);
+int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num);
/*
* When IO fails, either with EIO or csum verification fails, we
@@ -311,12 +315,12 @@ struct io_failure_record {
};
-bool btrfs_check_repairable(struct inode *inode, unsigned failed_bio_pages,
- struct io_failure_record *failrec, int fail_mirror);
-struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
- struct io_failure_record *failrec,
- struct page *page, int pg_offset, int icsum,
- bio_end_io_t *endio_func, void *data);
+blk_status_t btrfs_submit_read_repair(struct inode *inode,
+ struct bio *failed_bio, u64 phy_offset,
+ struct page *page, unsigned int pgoff,
+ u64 start, u64 end, int failed_mirror,
+ submit_bio_hook_t *submit_bio_hook);
+
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
bool find_lock_delalloc_range(struct inode *inode,
struct page *locked_page, u64 *start,
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index b618ad5339ba..706a3128e192 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -242,11 +242,13 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
/**
* btrfs_lookup_bio_sums - Look up checksums for a bio.
* @inode: inode that the bio is for.
- * @bio: bio embedded in btrfs_io_bio.
+ * @bio: bio to look up.
* @offset: Unless (u64)-1, look up checksums for this offset in the file.
* If (u64)-1, use the page offsets from the bio instead.
- * @dst: Buffer of size btrfs_super_csum_size() used to return checksum. If
- * NULL, the checksum is returned in btrfs_io_bio(bio)->csum instead.
+ * @dst: Buffer of size nblocks * btrfs_super_csum_size() used to return
+ * checksum (nblocks = bio->bi_iter.bi_size / fs_info->sectorsize). If
+ * NULL, the checksum buffer is allocated and returned in
+ * btrfs_io_bio(bio)->csum instead.
*
* Return: BLK_STS_RESOURCE if allocating memory fails, BLK_STS_OK otherwise.
*/
@@ -256,7 +258,6 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio_vec bvec;
struct bvec_iter iter;
- struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
struct btrfs_csum_item *item = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct btrfs_path *path;
@@ -277,6 +278,8 @@ blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
if (!dst) {
+ struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
+
if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
btrfs_bio->csum = kmalloc_array(nblocks, csum_size,
GFP_NOFS);
@@ -598,13 +601,12 @@ blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
index = 0;
}
- crypto_shash_init(shash);
data = kmap_atomic(bvec.bv_page);
- crypto_shash_update(shash, data + bvec.bv_offset
+ crypto_shash_digest(shash, data + bvec.bv_offset
+ (i * fs_info->sectorsize),
- fs_info->sectorsize);
+ fs_info->sectorsize,
+ sums->sums + index);
kunmap_atomic(data);
- crypto_shash_final(shash, (char *)(sums->sums + index));
index += csum_size;
offset += fs_info->sectorsize;
this_sum_bytes += fs_info->sectorsize;
@@ -869,7 +871,7 @@ again:
}
ret = PTR_ERR(item);
if (ret != -EFBIG && ret != -ENOENT)
- goto fail_unlock;
+ goto out;
if (ret == -EFBIG) {
u32 item_size;
@@ -887,10 +889,12 @@ again:
nritems = btrfs_header_nritems(path->nodes[0]);
if (!nritems || (path->slots[0] >= nritems - 1)) {
ret = btrfs_next_leaf(root, path);
- if (ret == 1)
+ if (ret < 0) {
+ goto out;
+ } else if (ret > 0) {
found_next = 1;
- if (ret != 0)
goto insert;
+ }
slot = path->slots[0];
}
btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
@@ -905,14 +909,27 @@ again:
}
/*
- * at this point, we know the tree has an item, but it isn't big
- * enough yet to put our csum in. Grow it
+ * At this point, we know the tree has a checksum item that ends at an
+ * offset matching the start of the checksum range we want to insert.
+ * We try to extend that item as much as possible and then add as many
+ * checksums to it as they fit.
+ *
+ * First check if the leaf has enough free space for at least one
+ * checksum. If it has go directly to the item extension code, otherwise
+ * release the path and do a search for insertion before the extension.
*/
+ if (btrfs_leaf_free_space(leaf) >= csum_size) {
+ btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+ csum_offset = (bytenr - found_key.offset) >>
+ fs_info->sb->s_blocksize_bits;
+ goto extend_csum;
+ }
+
btrfs_release_path(path);
ret = btrfs_search_slot(trans, root, &file_key, path,
csum_size, 1);
if (ret < 0)
- goto fail_unlock;
+ goto out;
if (ret > 0) {
if (path->slots[0] == 0)
@@ -931,19 +948,13 @@ again:
goto insert;
}
+extend_csum:
if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
csum_size) {
int extend_nr;
u64 tmp;
u32 diff;
- u32 free_space;
-
- if (btrfs_leaf_free_space(leaf) <
- sizeof(struct btrfs_item) + csum_size * 2)
- goto insert;
- free_space = btrfs_leaf_free_space(leaf) -
- sizeof(struct btrfs_item) - csum_size;
tmp = sums->len - total_bytes;
tmp >>= fs_info->sb->s_blocksize_bits;
WARN_ON(tmp < 1);
@@ -954,7 +965,7 @@ again:
MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
- diff = min(free_space, diff);
+ diff = min_t(u32, btrfs_leaf_free_space(leaf), diff);
diff /= csum_size;
diff *= csum_size;
@@ -985,9 +996,9 @@ insert:
ins_size);
path->leave_spinning = 0;
if (ret < 0)
- goto fail_unlock;
+ goto out;
if (WARN_ON(ret != 0))
- goto fail_unlock;
+ goto out;
leaf = path->nodes[0];
csum:
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
@@ -1017,9 +1028,6 @@ found:
out:
btrfs_free_path(path);
return ret;
-
-fail_unlock:
- goto out;
}
void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 719e68ab552c..fde125616687 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -275,26 +275,18 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
{
struct btrfs_root *inode_root;
struct inode *inode;
- struct btrfs_key key;
struct btrfs_ioctl_defrag_range_args range;
int num_defrag;
int ret;
/* get the inode */
- key.objectid = defrag->root;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
-
- inode_root = btrfs_get_fs_root(fs_info, &key, true);
+ inode_root = btrfs_get_fs_root(fs_info, defrag->root, true);
if (IS_ERR(inode_root)) {
ret = PTR_ERR(inode_root);
goto cleanup;
}
- key.objectid = defrag->ino;
- key.type = BTRFS_INODE_ITEM_KEY;
- key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, inode_root);
+ inode = btrfs_iget(fs_info->sb, defrag->ino, inode_root);
btrfs_put_root(inode_root);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
@@ -775,7 +767,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent)
modify_tree = 0;
- update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
+ update_refs = (test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
root == fs_info->tree_root);
while (1) {
recow = 0;
@@ -1817,21 +1809,61 @@ again:
return num_written ? num_written : ret;
}
-static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
+ const struct iov_iter *iter, loff_t offset)
+{
+ const unsigned int blocksize_mask = fs_info->sectorsize - 1;
+
+ if (offset & blocksize_mask)
+ return -EINVAL;
+
+ if (iov_iter_alignment(iter) & blocksize_mask)
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- loff_t pos;
- ssize_t written;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ loff_t pos = iocb->ki_pos;
+ ssize_t written = 0;
ssize_t written_buffered;
loff_t endbyte;
int err;
+ size_t count = 0;
+ bool relock = false;
- written = generic_file_direct_write(iocb, from);
+ if (check_direct_IO(fs_info, from, pos))
+ goto buffered;
+
+ count = iov_iter_count(from);
+ /*
+ * If the write DIO is beyond the EOF, we need update the isize, but it
+ * is protected by i_mutex. So we can not unlock the i_mutex at this
+ * case.
+ */
+ if (pos + count <= inode->i_size) {
+ inode_unlock(inode);
+ relock = true;
+ } else if (iocb->ki_flags & IOCB_NOWAIT) {
+ return -EAGAIN;
+ }
+
+ down_read(&BTRFS_I(inode)->dio_sem);
+ written = iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dops,
+ is_sync_kiocb(iocb));
+ up_read(&BTRFS_I(inode)->dio_sem);
+
+ if (relock)
+ inode_lock(inode);
if (written < 0 || !iov_iter_count(from))
return written;
+buffered:
pos = iocb->ki_pos;
written_buffered = btrfs_buffered_write(iocb, from);
if (written_buffered < 0) {
@@ -1970,7 +2002,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
atomic_inc(&BTRFS_I(inode)->sync_writers);
if (iocb->ki_flags & IOCB_DIRECT) {
- num_written = __btrfs_direct_write(iocb, from);
+ num_written = btrfs_direct_write(iocb, from);
} else {
num_written = btrfs_buffered_write(iocb, from);
if (num_written > 0)
@@ -3484,9 +3516,54 @@ static int btrfs_file_open(struct inode *inode, struct file *filp)
return generic_file_open(inode, filp);
}
+static int check_direct_read(struct btrfs_fs_info *fs_info,
+ const struct iov_iter *iter, loff_t offset)
+{
+ int ret;
+ int i, seg;
+
+ ret = check_direct_IO(fs_info, iter, offset);
+ if (ret < 0)
+ return ret;
+
+ for (seg = 0; seg < iter->nr_segs; seg++)
+ for (i = seg + 1; i < iter->nr_segs; i++)
+ if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
+ return -EINVAL;
+ return 0;
+}
+
+static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ ssize_t ret;
+
+ if (check_direct_read(btrfs_sb(inode->i_sb), to, iocb->ki_pos))
+ return 0;
+
+ inode_lock_shared(inode);
+ ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dops,
+ is_sync_kiocb(iocb));
+ inode_unlock_shared(inode);
+ return ret;
+}
+
+static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ ssize_t ret = 0;
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ ret = btrfs_direct_read(iocb, to);
+ if (ret < 0)
+ return ret;
+ }
+
+ return generic_file_buffered_read(iocb, to, ret);
+}
+
const struct file_operations btrfs_file_operations = {
.llseek = btrfs_file_llseek,
- .read_iter = generic_file_read_iter,
+ .read_iter = btrfs_file_read_iter,
.splice_read = generic_file_splice_read,
.write_iter = btrfs_file_write_iter,
.mmap = btrfs_file_mmap,
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 3613da065a73..55955bd424d7 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -82,7 +82,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
* sure NOFS is set to keep us from deadlocking.
*/
nofs_flag = memalloc_nofs_save();
- inode = btrfs_iget_path(fs_info->sb, &location, root, path);
+ inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path);
btrfs_release_path(path);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(inode))
@@ -1190,13 +1190,10 @@ out:
if (ret) {
invalidate_inode_pages2(inode->i_mapping);
BTRFS_I(inode)->generation = 0;
- if (block_group) {
-#ifdef CONFIG_BTRFS_DEBUG
- btrfs_err(root->fs_info,
- "failed to write free space cache for block group %llu",
- block_group->start);
-#endif
- }
+ if (block_group)
+ btrfs_debug(root->fs_info,
+ "failed to write free space cache for block group %llu error %d",
+ block_group->start, ret);
}
btrfs_update_inode(trans, root, inode);
@@ -1415,11 +1412,9 @@ int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl,
block_group, &block_group->io_ctl, trans);
if (ret) {
-#ifdef CONFIG_BTRFS_DEBUG
- btrfs_err(fs_info,
- "failed to write free space cache for block group %llu",
- block_group->start);
-#endif
+ btrfs_debug(fs_info,
+ "failed to write free space cache for block group %llu error %d",
+ block_group->start, ret);
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_ERROR;
spin_unlock(&block_group->lock);
@@ -3762,46 +3757,6 @@ out:
return ret;
}
-void btrfs_get_block_group_trimming(struct btrfs_block_group *cache)
-{
- atomic_inc(&cache->trimming);
-}
-
-void btrfs_put_block_group_trimming(struct btrfs_block_group *block_group)
-{
- struct btrfs_fs_info *fs_info = block_group->fs_info;
- struct extent_map_tree *em_tree;
- struct extent_map *em;
- bool cleanup;
-
- spin_lock(&block_group->lock);
- cleanup = (atomic_dec_and_test(&block_group->trimming) &&
- block_group->removed);
- spin_unlock(&block_group->lock);
-
- if (cleanup) {
- mutex_lock(&fs_info->chunk_mutex);
- em_tree = &fs_info->mapping_tree;
- write_lock(&em_tree->lock);
- em = lookup_extent_mapping(em_tree, block_group->start,
- 1);
- BUG_ON(!em); /* logic error, can't happen */
- remove_extent_mapping(em_tree, em);
- write_unlock(&em_tree->lock);
- mutex_unlock(&fs_info->chunk_mutex);
-
- /* once for us and once for the tree */
- free_extent_map(em);
- free_extent_map(em);
-
- /*
- * We've left one free space entry and other tasks trimming
- * this block group have left 1 entry each one. Free them.
- */
- __btrfs_remove_free_space_cache(block_group->free_space_ctl);
- }
-}
-
int btrfs_trim_block_group(struct btrfs_block_group *block_group,
u64 *trimmed, u64 start, u64 end, u64 minlen)
{
@@ -3816,7 +3771,7 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group,
spin_unlock(&block_group->lock);
return 0;
}
- btrfs_get_block_group_trimming(block_group);
+ btrfs_freeze_block_group(block_group);
spin_unlock(&block_group->lock);
ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, false);
@@ -3829,7 +3784,7 @@ int btrfs_trim_block_group(struct btrfs_block_group *block_group,
if (rem)
reset_trimming_bitmap(ctl, offset_to_bitmap(ctl, end));
out:
- btrfs_put_block_group_trimming(block_group);
+ btrfs_unfreeze_block_group(block_group);
return ret;
}
@@ -3846,11 +3801,11 @@ int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,
spin_unlock(&block_group->lock);
return 0;
}
- btrfs_get_block_group_trimming(block_group);
+ btrfs_freeze_block_group(block_group);
spin_unlock(&block_group->lock);
ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, async);
- btrfs_put_block_group_trimming(block_group);
+ btrfs_unfreeze_block_group(block_group);
return ret;
}
@@ -3868,13 +3823,13 @@ int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
spin_unlock(&block_group->lock);
return 0;
}
- btrfs_get_block_group_trimming(block_group);
+ btrfs_freeze_block_group(block_group);
spin_unlock(&block_group->lock);
ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen,
async);
- btrfs_put_block_group_trimming(block_group);
+ btrfs_unfreeze_block_group(block_group);
return ret;
}
@@ -4035,11 +3990,9 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
if (release_metadata)
btrfs_delalloc_release_metadata(BTRFS_I(inode),
inode->i_size, true);
-#ifdef CONFIG_BTRFS_DEBUG
- btrfs_err(fs_info,
- "failed to write free ino cache for root %llu",
- root->root_key.objectid);
-#endif
+ btrfs_debug(fs_info,
+ "failed to write free ino cache for root %llu error %d",
+ root->root_key.objectid, ret);
}
return ret;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 8b3489f229c7..31ac8c682f19 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5,7 +5,6 @@
#include <linux/kernel.h>
#include <linux/bio.h>
-#include <linux/buffer_head.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
@@ -49,17 +48,18 @@
#include "qgroup.h"
#include "delalloc-space.h"
#include "block-group.h"
+#include "space-info.h"
struct btrfs_iget_args {
- struct btrfs_key *location;
+ u64 ino;
struct btrfs_root *root;
};
struct btrfs_dio_data {
u64 reserve;
- u64 unsubmitted_oe_range_start;
- u64 unsubmitted_oe_range_end;
- int overwrite;
+ loff_t length;
+ ssize_t submitted;
+ struct extent_changeset *data_reserved;
};
static const struct inode_operations btrfs_dir_inode_operations;
@@ -1142,7 +1142,7 @@ out_unlock:
*/
if (extent_reserved) {
extent_clear_unlock_delalloc(inode, start,
- start + cur_alloc_size,
+ start + cur_alloc_size - 1,
locked_page,
clear_bits,
page_ops);
@@ -1355,6 +1355,66 @@ static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
return 1;
}
+static int fallback_to_cow(struct inode *inode, struct page *locked_page,
+ const u64 start, const u64 end,
+ int *page_started, unsigned long *nr_written)
+{
+ const bool is_space_ino = btrfs_is_free_space_inode(BTRFS_I(inode));
+ const u64 range_bytes = end + 1 - start;
+ struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ u64 range_start = start;
+ u64 count;
+
+ /*
+ * If EXTENT_NORESERVE is set it means that when the buffered write was
+ * made we had not enough available data space and therefore we did not
+ * reserve data space for it, since we though we could do NOCOW for the
+ * respective file range (either there is prealloc extent or the inode
+ * has the NOCOW bit set).
+ *
+ * However when we need to fallback to COW mode (because for example the
+ * block group for the corresponding extent was turned to RO mode by a
+ * scrub or relocation) we need to do the following:
+ *
+ * 1) We increment the bytes_may_use counter of the data space info.
+ * If COW succeeds, it allocates a new data extent and after doing
+ * that it decrements the space info's bytes_may_use counter and
+ * increments its bytes_reserved counter by the same amount (we do
+ * this at btrfs_add_reserved_bytes()). So we need to increment the
+ * bytes_may_use counter to compensate (when space is reserved at
+ * buffered write time, the bytes_may_use counter is incremented);
+ *
+ * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
+ * that if the COW path fails for any reason, it decrements (through
+ * extent_clear_unlock_delalloc()) the bytes_may_use counter of the
+ * data space info, which we incremented in the step above.
+ *
+ * If we need to fallback to cow and the inode corresponds to a free
+ * space cache inode, we must also increment bytes_may_use of the data
+ * space_info for the same reason. Space caches always get a prealloc
+ * extent for them, however scrub or balance may have set the block
+ * group that contains that extent to RO mode.
+ */
+ count = count_range_bits(io_tree, &range_start, end, range_bytes,
+ EXTENT_NORESERVE, 0);
+ if (count > 0 || is_space_ino) {
+ const u64 bytes = is_space_ino ? range_bytes : count;
+ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ struct btrfs_space_info *sinfo = fs_info->data_sinfo;
+
+ spin_lock(&sinfo->lock);
+ btrfs_space_info_update_bytes_may_use(fs_info, sinfo, bytes);
+ spin_unlock(&sinfo->lock);
+
+ if (count > 0)
+ clear_extent_bit(io_tree, start, end, EXTENT_NORESERVE,
+ 0, 0, NULL);
+ }
+
+ return cow_file_range(inode, locked_page, start, end, page_started,
+ nr_written, 1);
+}
+
/*
* when nowcow writeback call back. This checks for snapshots or COW copies
* of the extents that exist in the file, and COWs the file as required.
@@ -1602,9 +1662,9 @@ out_check:
* NOCOW, following one which needs to be COW'ed
*/
if (cow_start != (u64)-1) {
- ret = cow_file_range(inode, locked_page,
- cow_start, found_key.offset - 1,
- page_started, nr_written, 1);
+ ret = fallback_to_cow(inode, locked_page, cow_start,
+ found_key.offset - 1,
+ page_started, nr_written);
if (ret) {
if (nocow)
btrfs_dec_nocow_writers(fs_info,
@@ -1693,8 +1753,8 @@ out_check:
if (cow_start != (u64)-1) {
cur_offset = end;
- ret = cow_file_range(inode, locked_page, cow_start, end,
- page_started, nr_written, 1);
+ ret = fallback_to_cow(inode, locked_page, cow_start, end,
+ page_started, nr_written);
if (ret)
goto error;
}
@@ -2726,10 +2786,9 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
btrfs_queue_work(wq, &ordered_extent->work);
}
-static int __readpage_endio_check(struct inode *inode,
- struct btrfs_io_bio *io_bio,
- int icsum, struct page *page,
- int pgoff, u64 start, size_t len)
+static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
+ int icsum, struct page *page, int pgoff, u64 start,
+ size_t len)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
@@ -2743,9 +2802,7 @@ static int __readpage_endio_check(struct inode *inode,
kaddr = kmap_atomic(page);
shash->tfm = fs_info->csum_shash;
- crypto_shash_init(shash);
- crypto_shash_update(shash, kaddr + pgoff, len);
- crypto_shash_final(shash, csum);
+ crypto_shash_digest(shash, kaddr + pgoff, len, csum);
if (memcmp(csum, csum_expected, csum_size))
goto zeroit;
@@ -2790,8 +2847,8 @@ static int btrfs_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
}
phy_offset >>= inode->i_sb->s_blocksize_bits;
- return __readpage_endio_check(inode, io_bio, phy_offset, page, offset,
- start, (size_t)(end - start + 1));
+ return check_data_csum(inode, io_bio, phy_offset, page, offset, start,
+ (size_t)(end - start + 1));
}
/*
@@ -2981,7 +3038,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
found_key.objectid = found_key.offset;
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &found_key, root);
+ inode = btrfs_iget(fs_info->sb, last_objectid, root);
ret = PTR_ERR_OR_ZERO(inode);
if (ret && ret != -ENOENT)
goto out;
@@ -3000,18 +3057,16 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
* orphan must not get deleted.
* find_dead_roots already ran before us, so if this
* is a snapshot deletion, we should find the root
- * in the dead_roots list
+ * in the fs_roots radix tree.
*/
- spin_lock(&fs_info->trans_lock);
- list_for_each_entry(dead_root, &fs_info->dead_roots,
- root_list) {
- if (dead_root->root_key.objectid ==
- found_key.objectid) {
- is_dead_root = 1;
- break;
- }
- }
- spin_unlock(&fs_info->trans_lock);
+
+ spin_lock(&fs_info->fs_roots_radix_lock);
+ dead_root = radix_tree_lookup(&fs_info->fs_roots_radix,
+ (unsigned long)found_key.objectid);
+ if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0)
+ is_dead_root = 1;
+ spin_unlock(&fs_info->fs_roots_radix_lock);
+
if (is_dead_root) {
/* prevent this orphan from being found again */
key.offset = found_key.objectid - 1;
@@ -3357,43 +3412,40 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
btrfs_init_map_token(&token, leaf);
- btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
- btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
- btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
- &token);
- btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
- btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
-
- btrfs_set_token_timespec_sec(leaf, &item->atime,
- inode->i_atime.tv_sec, &token);
- btrfs_set_token_timespec_nsec(leaf, &item->atime,
- inode->i_atime.tv_nsec, &token);
-
- btrfs_set_token_timespec_sec(leaf, &item->mtime,
- inode->i_mtime.tv_sec, &token);
- btrfs_set_token_timespec_nsec(leaf, &item->mtime,
- inode->i_mtime.tv_nsec, &token);
-
- btrfs_set_token_timespec_sec(leaf, &item->ctime,
- inode->i_ctime.tv_sec, &token);
- btrfs_set_token_timespec_nsec(leaf, &item->ctime,
- inode->i_ctime.tv_nsec, &token);
-
- btrfs_set_token_timespec_sec(leaf, &item->otime,
- BTRFS_I(inode)->i_otime.tv_sec, &token);
- btrfs_set_token_timespec_nsec(leaf, &item->otime,
- BTRFS_I(inode)->i_otime.tv_nsec, &token);
-
- btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
- &token);
- btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
- &token);
- btrfs_set_token_inode_sequence(leaf, item, inode_peek_iversion(inode),
- &token);
- btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
- btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
- btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
- btrfs_set_token_inode_block_group(leaf, item, 0, &token);
+ btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
+ btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
+ btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size);
+ btrfs_set_token_inode_mode(&token, item, inode->i_mode);
+ btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
+
+ btrfs_set_token_timespec_sec(&token, &item->atime,
+ inode->i_atime.tv_sec);
+ btrfs_set_token_timespec_nsec(&token, &item->atime,
+ inode->i_atime.tv_nsec);
+
+ btrfs_set_token_timespec_sec(&token, &item->mtime,
+ inode->i_mtime.tv_sec);
+ btrfs_set_token_timespec_nsec(&token, &item->mtime,
+ inode->i_mtime.tv_nsec);
+
+ btrfs_set_token_timespec_sec(&token, &item->ctime,
+ inode->i_ctime.tv_sec);
+ btrfs_set_token_timespec_nsec(&token, &item->ctime,
+ inode->i_ctime.tv_nsec);
+
+ btrfs_set_token_timespec_sec(&token, &item->otime,
+ BTRFS_I(inode)->i_otime.tv_sec);
+ btrfs_set_token_timespec_nsec(&token, &item->otime,
+ BTRFS_I(inode)->i_otime.tv_nsec);
+
+ btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
+ btrfs_set_token_inode_generation(&token, item,
+ BTRFS_I(inode)->generation);
+ btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
+ btrfs_set_token_inode_transid(&token, item, trans->transid);
+ btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
+ btrfs_set_token_inode_flags(&token, item, BTRFS_I(inode)->flags);
+ btrfs_set_token_inode_block_group(&token, item, 0);
}
/*
@@ -3618,7 +3670,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
* 1 for the inode ref
* 1 for the inode
*/
- return btrfs_start_transaction_fallback_global_rsv(root, 5, 5);
+ return btrfs_start_transaction_fallback_global_rsv(root, 5);
}
static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
@@ -4108,11 +4160,12 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
/*
- * for non-free space inodes and ref cows, we want to back off from
- * time to time
+ * For non-free space inodes and non-shareable roots, we want to back
+ * off from time to time. This means all inodes in subvolume roots,
+ * reloc roots, and data reloc roots.
*/
if (!btrfs_is_free_space_inode(BTRFS_I(inode)) &&
- test_bit(BTRFS_ROOT_REF_COWS, &root->state))
+ test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
be_nice = true;
path = btrfs_alloc_path();
@@ -4120,20 +4173,19 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
return -ENOMEM;
path->reada = READA_BACK;
- if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
+ if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1,
&cached_state);
- /*
- * We want to drop from the next block forward in case this new size is
- * not block aligned since we will be keeping the last block of the
- * extent just the way it is.
- */
- if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
- root == fs_info->tree_root)
+ /*
+ * We want to drop from the next block forward in case this
+ * new size is not block aligned since we will be keeping the
+ * last block of the extent just the way it is.
+ */
btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size,
fs_info->sectorsize),
(u64)-1, 0);
+ }
/*
* This function is also used to drop the items in the log tree before
@@ -4241,7 +4293,7 @@ search_again:
extent_num_bytes);
num_dec = (orig_num_bytes -
extent_num_bytes);
- if (test_bit(BTRFS_ROOT_REF_COWS,
+ if (test_bit(BTRFS_ROOT_SHAREABLE,
&root->state) &&
extent_start != 0)
inode_sub_bytes(inode, num_dec);
@@ -4257,7 +4309,7 @@ search_again:
num_dec = btrfs_file_extent_num_bytes(leaf, fi);
if (extent_start != 0) {
found_extent = 1;
- if (test_bit(BTRFS_ROOT_REF_COWS,
+ if (test_bit(BTRFS_ROOT_SHAREABLE,
&root->state))
inode_sub_bytes(inode, num_dec);
}
@@ -4293,7 +4345,7 @@ search_again:
clear_len = fs_info->sectorsize;
}
- if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
+ if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
inode_sub_bytes(inode, item_end + 1 - new_size);
}
delete:
@@ -4334,8 +4386,7 @@ delete:
should_throttle = false;
if (found_extent &&
- (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
- root == fs_info->tree_root)) {
+ root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
struct btrfs_ref ref = { 0 };
bytes_deleted += extent_num_bytes;
@@ -4759,10 +4810,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
truncate_setsize(inode, newsize);
- /* Disable nonlocked read DIO to avoid the endless truncate */
- btrfs_inode_block_unlocked_dio(BTRFS_I(inode));
inode_dio_wait(inode);
- btrfs_inode_resume_unlocked_dio(BTRFS_I(inode));
ret = btrfs_truncate(inode, newsize == oldsize);
if (ret && inode->i_nlink) {
@@ -5154,7 +5202,7 @@ static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
btrfs_release_path(path);
- new_root = btrfs_get_fs_root(fs_info, location, true);
+ new_root = btrfs_get_fs_root(fs_info, location->objectid, true);
if (IS_ERR(new_root)) {
err = PTR_ERR(new_root);
goto out;
@@ -5232,9 +5280,11 @@ static void inode_tree_del(struct inode *inode)
static int btrfs_init_locked_inode(struct inode *inode, void *p)
{
struct btrfs_iget_args *args = p;
- inode->i_ino = args->location->objectid;
- memcpy(&BTRFS_I(inode)->location, args->location,
- sizeof(*args->location));
+
+ inode->i_ino = args->ino;
+ BTRFS_I(inode)->location.objectid = args->ino;
+ BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
+ BTRFS_I(inode)->location.offset = 0;
BTRFS_I(inode)->root = btrfs_grab_root(args->root);
BUG_ON(args->root && !BTRFS_I(inode)->root);
return 0;
@@ -5243,19 +5293,19 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
static int btrfs_find_actor(struct inode *inode, void *opaque)
{
struct btrfs_iget_args *args = opaque;
- return args->location->objectid == BTRFS_I(inode)->location.objectid &&
+
+ return args->ino == BTRFS_I(inode)->location.objectid &&
args->root == BTRFS_I(inode)->root;
}
-static struct inode *btrfs_iget_locked(struct super_block *s,
- struct btrfs_key *location,
+static struct inode *btrfs_iget_locked(struct super_block *s, u64 ino,
struct btrfs_root *root)
{
struct inode *inode;
struct btrfs_iget_args args;
- unsigned long hashval = btrfs_inode_hash(location->objectid, root);
+ unsigned long hashval = btrfs_inode_hash(ino, root);
- args.location = location;
+ args.ino = ino;
args.root = root;
inode = iget5_locked(s, hashval, btrfs_find_actor,
@@ -5265,17 +5315,17 @@ static struct inode *btrfs_iget_locked(struct super_block *s,
}
/*
- * Get an inode object given its location and corresponding root.
+ * Get an inode object given its inode number and corresponding root.
* Path can be preallocated to prevent recursing back to iget through
* allocator. NULL is also valid but may require an additional allocation
* later.
*/
-struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
+struct inode *btrfs_iget_path(struct super_block *s, u64 ino,
struct btrfs_root *root, struct btrfs_path *path)
{
struct inode *inode;
- inode = btrfs_iget_locked(s, location, root);
+ inode = btrfs_iget_locked(s, ino, root);
if (!inode)
return ERR_PTR(-ENOMEM);
@@ -5302,10 +5352,9 @@ struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
return inode;
}
-struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
- struct btrfs_root *root)
+struct inode *btrfs_iget(struct super_block *s, u64 ino, struct btrfs_root *root)
{
- return btrfs_iget_path(s, location, root, NULL);
+ return btrfs_iget_path(s, ino, root, NULL);
}
static struct inode *new_simple_dir(struct super_block *s,
@@ -5374,7 +5423,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
return ERR_PTR(ret);
if (location.type == BTRFS_INODE_ITEM_KEY) {
- inode = btrfs_iget(dir->i_sb, &location, root);
+ inode = btrfs_iget(dir->i_sb, location.objectid, root);
if (IS_ERR(inode))
return inode;
@@ -5398,7 +5447,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
else
inode = new_simple_dir(dir->i_sb, &location, sub_root);
} else {
- inode = btrfs_iget(dir->i_sb, &location, sub_root);
+ inode = btrfs_iget(dir->i_sb, location.objectid, sub_root);
}
if (root != sub_root)
btrfs_put_root(sub_root);
@@ -5779,7 +5828,8 @@ int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index)
static int btrfs_insert_inode_locked(struct inode *inode)
{
struct btrfs_iget_args args;
- args.location = &BTRFS_I(inode)->location;
+
+ args.ino = BTRFS_I(inode)->location.objectid;
args.root = BTRFS_I(inode)->root;
return insert_inode_locked4(inode,
@@ -6991,7 +7041,7 @@ out:
}
static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
- struct extent_state **cached_state, int writing)
+ struct extent_state **cached_state, bool writing)
{
struct btrfs_ordered_extent *ordered;
int ret = 0;
@@ -7129,30 +7179,7 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len,
}
-static int btrfs_get_blocks_direct_read(struct extent_map *em,
- struct buffer_head *bh_result,
- struct inode *inode,
- u64 start, u64 len)
-{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-
- if (em->block_start == EXTENT_MAP_HOLE ||
- test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
- return -ENOENT;
-
- len = min(len, em->len - (start - em->start));
-
- bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
- inode->i_blkbits;
- bh_result->b_size = len;
- bh_result->b_bdev = fs_info->fs_devices->latest_bdev;
- set_buffer_mapped(bh_result);
-
- return 0;
-}
-
static int btrfs_get_blocks_direct_write(struct extent_map **map,
- struct buffer_head *bh_result,
struct inode *inode,
struct btrfs_dio_data *dio_data,
u64 start, u64 len)
@@ -7214,7 +7241,6 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
}
/* this will cow the extent */
- len = bh_result->b_size;
free_extent_map(em);
*map = em = btrfs_new_extent_direct(inode, start, len);
if (IS_ERR(em)) {
@@ -7225,64 +7251,73 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
len = min(len, em->len - (start - em->start));
skip_cow:
- bh_result->b_blocknr = (em->block_start + (start - em->start)) >>
- inode->i_blkbits;
- bh_result->b_size = len;
- bh_result->b_bdev = fs_info->fs_devices->latest_bdev;
- set_buffer_mapped(bh_result);
-
- if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
- set_buffer_new(bh_result);
-
/*
* Need to update the i_size under the extent lock so buffered
* readers will get the updated i_size when we unlock.
*/
- if (!dio_data->overwrite && start + len > i_size_read(inode))
+ if (start + len > i_size_read(inode))
i_size_write(inode, start + len);
- WARN_ON(dio_data->reserve < len);
dio_data->reserve -= len;
- dio_data->unsubmitted_oe_range_end = start + len;
- current->journal_info = dio_data;
out:
return ret;
}
-static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
+static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start,
+ loff_t length, unsigned flags, struct iomap *iomap,
+ struct iomap *srcmap)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map *em;
struct extent_state *cached_state = NULL;
struct btrfs_dio_data *dio_data = NULL;
- u64 start = iblock << inode->i_blkbits;
u64 lockstart, lockend;
- u64 len = bh_result->b_size;
+ const bool write = !!(flags & IOMAP_WRITE);
int ret = 0;
+ u64 len = length;
+ bool unlock_extents = false;
- if (!create)
+ if (!write)
len = min_t(u64, len, fs_info->sectorsize);
lockstart = start;
lockend = start + len - 1;
- if (current->journal_info) {
- /*
- * Need to pull our outstanding extents and set journal_info to NULL so
- * that anything that needs to check if there's a transaction doesn't get
- * confused.
- */
- dio_data = current->journal_info;
- current->journal_info = NULL;
+ /*
+ * The generic stuff only does filemap_write_and_wait_range, which
+ * isn't enough if we've written compressed pages to this area, so we
+ * need to flush the dirty pages again to make absolutely sure that any
+ * outstanding dirty pages are on disk.
+ */
+ if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+ &BTRFS_I(inode)->runtime_flags))
+ ret = filemap_fdatawrite_range(inode->i_mapping, start,
+ start + length - 1);
+
+ dio_data = kzalloc(sizeof(*dio_data), GFP_NOFS);
+ if (!dio_data)
+ return -ENOMEM;
+
+ dio_data->length = length;
+ if (write) {
+ dio_data->reserve = round_up(length, fs_info->sectorsize);
+ ret = btrfs_delalloc_reserve_space(inode,
+ &dio_data->data_reserved,
+ start, dio_data->reserve);
+ if (ret) {
+ extent_changeset_free(dio_data->data_reserved);
+ kfree(dio_data);
+ return ret;
+ }
}
+ iomap->private = dio_data;
+
/*
* If this errors out it's because we couldn't invalidate pagecache for
* this range and we need to fallback to buffered.
*/
- if (lock_extent_direct(inode, lockstart, lockend, &cached_state,
- create)) {
+ if (lock_extent_direct(inode, lockstart, lockend, &cached_state, write)) {
ret = -ENOTBLK;
goto err;
}
@@ -7314,36 +7349,48 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
goto unlock_err;
}
- if (create) {
- ret = btrfs_get_blocks_direct_write(&em, bh_result, inode,
- dio_data, start, len);
+ len = min(len, em->len - (start - em->start));
+ if (write) {
+ ret = btrfs_get_blocks_direct_write(&em, inode, dio_data,
+ start, len);
if (ret < 0)
goto unlock_err;
-
- unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, &cached_state);
+ unlock_extents = true;
+ /* Recalc len in case the new em is smaller than requested */
+ len = min(len, em->len - (start - em->start));
} else {
- ret = btrfs_get_blocks_direct_read(em, bh_result, inode,
- start, len);
- /* Can be negative only if we read from a hole */
- if (ret < 0) {
- ret = 0;
- free_extent_map(em);
- goto unlock_err;
- }
/*
* We need to unlock only the end area that we aren't using.
* The rest is going to be unlocked by the endio routine.
*/
- lockstart = start + bh_result->b_size;
- if (lockstart < lockend) {
- unlock_extent_cached(&BTRFS_I(inode)->io_tree,
- lockstart, lockend, &cached_state);
- } else {
- free_extent_state(cached_state);
- }
+ lockstart = start + len;
+ if (lockstart < lockend)
+ unlock_extents = true;
}
+ if (unlock_extents)
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+ lockstart, lockend, &cached_state);
+ else
+ free_extent_state(cached_state);
+
+ /*
+ * Translate extent map information to iomap.
+ * We trim the extents (and move the addr) even though iomap code does
+ * that, since we have locked only the parts we are performing I/O in.
+ */
+ if ((em->block_start == EXTENT_MAP_HOLE) ||
+ (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) {
+ iomap->addr = IOMAP_NULL_ADDR;
+ iomap->type = IOMAP_HOLE;
+ } else {
+ iomap->addr = em->block_start + (start - em->start);
+ iomap->type = IOMAP_MAPPED;
+ }
+ iomap->offset = start;
+ iomap->bdev = fs_info->fs_devices->latest_bdev;
+ iomap->length = len;
+
free_extent_map(em);
return 0;
@@ -7352,370 +7399,152 @@ unlock_err:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state);
err:
- if (dio_data)
- current->journal_info = dio_data;
+ if (dio_data) {
+ btrfs_delalloc_release_space(inode, dio_data->data_reserved,
+ start, dio_data->reserve, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->reserve);
+ extent_changeset_free(dio_data->data_reserved);
+ kfree(dio_data);
+ }
return ret;
}
-static inline blk_status_t submit_dio_repair_bio(struct inode *inode,
- struct bio *bio,
- int mirror_num)
+static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length,
+ ssize_t written, unsigned flags, struct iomap *iomap)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- blk_status_t ret;
+ int ret = 0;
+ struct btrfs_dio_data *dio_data = iomap->private;
+ size_t submitted = dio_data->submitted;
+ const bool write = !!(flags & IOMAP_WRITE);
- BUG_ON(bio_op(bio) == REQ_OP_WRITE);
+ if (!write && (iomap->type == IOMAP_HOLE)) {
+ /* If reading from a hole, unlock and return */
+ unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1);
+ goto out;
+ }
- ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
- if (ret)
- return ret;
+ if (submitted < length) {
+ pos += submitted;
+ length -= submitted;
+ if (write)
+ __endio_write_update_ordered(inode, pos, length, false);
+ else
+ unlock_extent(&BTRFS_I(inode)->io_tree, pos,
+ pos + length - 1);
+ ret = -ENOTBLK;
+ }
- ret = btrfs_map_bio(fs_info, bio, mirror_num);
+ if (write) {
+ if (dio_data->reserve)
+ btrfs_delalloc_release_space(inode,
+ dio_data->data_reserved, pos,
+ dio_data->reserve, true);
+ btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->length);
+ extent_changeset_free(dio_data->data_reserved);
+ }
+out:
+ kfree(dio_data);
+ iomap->private = NULL;
return ret;
}
-static int btrfs_check_dio_repairable(struct inode *inode,
- struct bio *failed_bio,
- struct io_failure_record *failrec,
- int failed_mirror)
+static void btrfs_dio_private_put(struct btrfs_dio_private *dip)
{
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- int num_copies;
-
- num_copies = btrfs_num_copies(fs_info, failrec->logical, failrec->len);
- if (num_copies == 1) {
- /*
- * we only have a single copy of the data, so don't bother with
- * all the retry and error correction code that follows. no
- * matter what the error is, it is very likely to persist.
- */
- btrfs_debug(fs_info,
- "Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
- num_copies, failrec->this_mirror, failed_mirror);
- return 0;
- }
-
- failrec->failed_mirror = failed_mirror;
- failrec->this_mirror++;
- if (failrec->this_mirror == failed_mirror)
- failrec->this_mirror++;
+ /*
+ * This implies a barrier so that stores to dio_bio->bi_status before
+ * this and loads of dio_bio->bi_status after this are fully ordered.
+ */
+ if (!refcount_dec_and_test(&dip->refs))
+ return;
- if (failrec->this_mirror > num_copies) {
- btrfs_debug(fs_info,
- "Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
- num_copies, failrec->this_mirror, failed_mirror);
- return 0;
+ if (bio_op(dip->dio_bio) == REQ_OP_WRITE) {
+ __endio_write_update_ordered(dip->inode, dip->logical_offset,
+ dip->bytes,
+ !dip->dio_bio->bi_status);
+ } else {
+ unlock_extent(&BTRFS_I(dip->inode)->io_tree,
+ dip->logical_offset,
+ dip->logical_offset + dip->bytes - 1);
}
- return 1;
+ bio_endio(dip->dio_bio);
+ kfree(dip);
}
-static blk_status_t dio_read_error(struct inode *inode, struct bio *failed_bio,
- struct page *page, unsigned int pgoff,
- u64 start, u64 end, int failed_mirror,
- bio_end_io_t *repair_endio, void *repair_arg)
+static blk_status_t submit_dio_repair_bio(struct inode *inode, struct bio *bio,
+ int mirror_num,
+ unsigned long bio_flags)
{
- struct io_failure_record *failrec;
- struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
- struct bio *bio;
- int isector;
- unsigned int read_mode = 0;
- int segs;
- int ret;
- blk_status_t status;
- struct bio_vec bvec;
+ struct btrfs_dio_private *dip = bio->bi_private;
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ blk_status_t ret;
- BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
+ BUG_ON(bio_op(bio) == REQ_OP_WRITE);
- ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
+ ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
if (ret)
- return errno_to_blk_status(ret);
-
- ret = btrfs_check_dio_repairable(inode, failed_bio, failrec,
- failed_mirror);
- if (!ret) {
- free_io_failure(failure_tree, io_tree, failrec);
- return BLK_STS_IOERR;
- }
-
- segs = bio_segments(failed_bio);
- bio_get_first_bvec(failed_bio, &bvec);
- if (segs > 1 ||
- (bvec.bv_len > btrfs_inode_sectorsize(inode)))
- read_mode |= REQ_FAILFAST_DEV;
-
- isector = start - btrfs_io_bio(failed_bio)->logical;
- isector >>= inode->i_sb->s_blocksize_bits;
- bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
- pgoff, isector, repair_endio, repair_arg);
- bio->bi_opf = REQ_OP_READ | read_mode;
-
- btrfs_debug(BTRFS_I(inode)->root->fs_info,
- "repair DIO read error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d",
- read_mode, failrec->this_mirror, failrec->in_validation);
-
- status = submit_dio_repair_bio(inode, bio, failrec->this_mirror);
- if (status) {
- free_io_failure(failure_tree, io_tree, failrec);
- bio_put(bio);
- }
-
- return status;
-}
-
-struct btrfs_retry_complete {
- struct completion done;
- struct inode *inode;
- u64 start;
- int uptodate;
-};
+ return ret;
-static void btrfs_retry_endio_nocsum(struct bio *bio)
-{
- struct btrfs_retry_complete *done = bio->bi_private;
- struct inode *inode = done->inode;
- struct bio_vec *bvec;
- struct extent_io_tree *io_tree, *failure_tree;
- struct bvec_iter_all iter_all;
-
- if (bio->bi_status)
- goto end;
-
- ASSERT(bio->bi_vcnt == 1);
- io_tree = &BTRFS_I(inode)->io_tree;
- failure_tree = &BTRFS_I(inode)->io_failure_tree;
- ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(inode));
-
- done->uptodate = 1;
- ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, iter_all)
- clean_io_failure(BTRFS_I(inode)->root->fs_info, failure_tree,
- io_tree, done->start, bvec->bv_page,
- btrfs_ino(BTRFS_I(inode)), 0);
-end:
- complete(&done->done);
- bio_put(bio);
+ refcount_inc(&dip->refs);
+ ret = btrfs_map_bio(fs_info, bio, mirror_num);
+ if (ret)
+ refcount_dec(&dip->refs);
+ return ret;
}
-static blk_status_t __btrfs_correct_data_nocsum(struct inode *inode,
- struct btrfs_io_bio *io_bio)
+static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
+ struct btrfs_io_bio *io_bio,
+ const bool uptodate)
{
- struct btrfs_fs_info *fs_info;
+ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ const u32 sectorsize = fs_info->sectorsize;
+ struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
+ struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+ const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
struct bio_vec bvec;
struct bvec_iter iter;
- struct btrfs_retry_complete done;
- u64 start;
- unsigned int pgoff;
- u32 sectorsize;
- int nr_sectors;
- blk_status_t ret;
+ u64 start = io_bio->logical;
+ int icsum = 0;
blk_status_t err = BLK_STS_OK;
- fs_info = BTRFS_I(inode)->root->fs_info;
- sectorsize = fs_info->sectorsize;
+ __bio_for_each_segment(bvec, &io_bio->bio, iter, io_bio->iter) {
+ unsigned int i, nr_sectors, pgoff;
- start = io_bio->logical;
- done.inode = inode;
- io_bio->bio.bi_iter = io_bio->iter;
-
- bio_for_each_segment(bvec, &io_bio->bio, iter) {
nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
pgoff = bvec.bv_offset;
-
-next_block_or_try_again:
- done.uptodate = 0;
- done.start = start;
- init_completion(&done.done);
-
- ret = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
- pgoff, start, start + sectorsize - 1,
- io_bio->mirror_num,
- btrfs_retry_endio_nocsum, &done);
- if (ret) {
- err = ret;
- goto next;
- }
-
- wait_for_completion_io(&done.done);
-
- if (!done.uptodate) {
- /* We might have another mirror, so try again */
- goto next_block_or_try_again;
- }
-
-next:
- start += sectorsize;
-
- nr_sectors--;
- if (nr_sectors) {
- pgoff += sectorsize;
+ for (i = 0; i < nr_sectors; i++) {
ASSERT(pgoff < PAGE_SIZE);
- goto next_block_or_try_again;
- }
- }
-
- return err;
-}
-
-static void btrfs_retry_endio(struct bio *bio)
-{
- struct btrfs_retry_complete *done = bio->bi_private;
- struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
- struct extent_io_tree *io_tree, *failure_tree;
- struct inode *inode = done->inode;
- struct bio_vec *bvec;
- int uptodate;
- int ret;
- int i = 0;
- struct bvec_iter_all iter_all;
-
- if (bio->bi_status)
- goto end;
-
- uptodate = 1;
-
- ASSERT(bio->bi_vcnt == 1);
- ASSERT(bio_first_bvec_all(bio)->bv_len == btrfs_inode_sectorsize(done->inode));
-
- io_tree = &BTRFS_I(inode)->io_tree;
- failure_tree = &BTRFS_I(inode)->io_failure_tree;
-
- ASSERT(!bio_flagged(bio, BIO_CLONED));
- bio_for_each_segment_all(bvec, bio, iter_all) {
- ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
- bvec->bv_offset, done->start,
- bvec->bv_len);
- if (!ret)
- clean_io_failure(BTRFS_I(inode)->root->fs_info,
- failure_tree, io_tree, done->start,
- bvec->bv_page,
- btrfs_ino(BTRFS_I(inode)),
- bvec->bv_offset);
- else
- uptodate = 0;
- i++;
- }
-
- done->uptodate = uptodate;
-end:
- complete(&done->done);
- bio_put(bio);
-}
-
-static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
- struct btrfs_io_bio *io_bio, blk_status_t err)
-{
- struct btrfs_fs_info *fs_info;
- struct bio_vec bvec;
- struct bvec_iter iter;
- struct btrfs_retry_complete done;
- u64 start;
- u64 offset = 0;
- u32 sectorsize;
- int nr_sectors;
- unsigned int pgoff;
- int csum_pos;
- bool uptodate = (err == 0);
- int ret;
- blk_status_t status;
-
- fs_info = BTRFS_I(inode)->root->fs_info;
- sectorsize = fs_info->sectorsize;
-
- err = BLK_STS_OK;
- start = io_bio->logical;
- done.inode = inode;
- io_bio->bio.bi_iter = io_bio->iter;
-
- bio_for_each_segment(bvec, &io_bio->bio, iter) {
- nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
-
- pgoff = bvec.bv_offset;
-next_block:
- if (uptodate) {
- csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset);
- ret = __readpage_endio_check(inode, io_bio, csum_pos,
- bvec.bv_page, pgoff, start, sectorsize);
- if (likely(!ret))
- goto next;
- }
-try_again:
- done.uptodate = 0;
- done.start = start;
- init_completion(&done.done);
-
- status = dio_read_error(inode, &io_bio->bio, bvec.bv_page,
- pgoff, start, start + sectorsize - 1,
- io_bio->mirror_num, btrfs_retry_endio,
- &done);
- if (status) {
- err = status;
- goto next;
- }
-
- wait_for_completion_io(&done.done);
-
- if (!done.uptodate) {
- /* We might have another mirror, so try again */
- goto try_again;
- }
-next:
- offset += sectorsize;
- start += sectorsize;
-
- ASSERT(nr_sectors);
-
- nr_sectors--;
- if (nr_sectors) {
+ if (uptodate &&
+ (!csum || !check_data_csum(inode, io_bio, icsum,
+ bvec.bv_page, pgoff,
+ start, sectorsize))) {
+ clean_io_failure(fs_info, failure_tree, io_tree,
+ start, bvec.bv_page,
+ btrfs_ino(BTRFS_I(inode)),
+ pgoff);
+ } else {
+ blk_status_t status;
+
+ status = btrfs_submit_read_repair(inode,
+ &io_bio->bio,
+ start - io_bio->logical,
+ bvec.bv_page, pgoff,
+ start,
+ start + sectorsize - 1,
+ io_bio->mirror_num,
+ submit_dio_repair_bio);
+ if (status)
+ err = status;
+ }
+ start += sectorsize;
+ icsum++;
pgoff += sectorsize;
- ASSERT(pgoff < PAGE_SIZE);
- goto next_block;
}
}
-
return err;
}
-static blk_status_t btrfs_subio_endio_read(struct inode *inode,
- struct btrfs_io_bio *io_bio, blk_status_t err)
-{
- bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
-
- if (skip_csum) {
- if (unlikely(err))
- return __btrfs_correct_data_nocsum(inode, io_bio);
- else
- return BLK_STS_OK;
- } else {
- return __btrfs_subio_endio_read(inode, io_bio, err);
- }
-}
-
-static void btrfs_endio_direct_read(struct bio *bio)
-{
- struct btrfs_dio_private *dip = bio->bi_private;
- struct inode *inode = dip->inode;
- struct bio *dio_bio;
- struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
- blk_status_t err = bio->bi_status;
-
- if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
- err = btrfs_subio_endio_read(inode, io_bio, err);
-
- unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
- dip->logical_offset + dip->bytes - 1);
- dio_bio = dip->dio_bio;
-
- kfree(dip);
-
- dio_bio->bi_status = err;
- dio_end_io(dio_bio);
- btrfs_io_bio_free_csum(io_bio);
- bio_put(bio);
-}
-
static void __endio_write_update_ordered(struct inode *inode,
const u64 offset, const u64 bytes,
const bool uptodate)
@@ -7759,21 +7588,6 @@ static void __endio_write_update_ordered(struct inode *inode,
}
}
-static void btrfs_endio_direct_write(struct bio *bio)
-{
- struct btrfs_dio_private *dip = bio->bi_private;
- struct bio *dio_bio = dip->dio_bio;
-
- __endio_write_update_ordered(dip->inode, dip->logical_offset,
- dip->bytes, !bio->bi_status);
-
- kfree(dip);
-
- dio_bio->bi_status = bio->bi_status;
- dio_end_io(dio_bio);
- bio_put(bio);
-}
-
static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data,
struct bio *bio, u64 offset)
{
@@ -7797,64 +7611,16 @@ static void btrfs_end_dio_bio(struct bio *bio)
(unsigned long long)bio->bi_iter.bi_sector,
bio->bi_iter.bi_size, err);
- if (dip->subio_endio)
- err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err);
-
- if (err) {
- /*
- * We want to perceive the errors flag being set before
- * decrementing the reference count. We don't need a barrier
- * since atomic operations with a return value are fully
- * ordered as per atomic_t.txt
- */
- dip->errors = 1;
+ if (bio_op(bio) == REQ_OP_READ) {
+ err = btrfs_check_read_dio_bio(dip->inode, btrfs_io_bio(bio),
+ !err);
}
- /* if there are more bios still pending for this dio, just exit */
- if (!atomic_dec_and_test(&dip->pending_bios))
- goto out;
+ if (err)
+ dip->dio_bio->bi_status = err;
- if (dip->errors) {
- bio_io_error(dip->orig_bio);
- } else {
- dip->dio_bio->bi_status = BLK_STS_OK;
- bio_endio(dip->orig_bio);
- }
-out:
bio_put(bio);
-}
-
-static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
- struct btrfs_dio_private *dip,
- struct bio *bio,
- u64 file_offset)
-{
- struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
- struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
- u16 csum_size;
- blk_status_t ret;
-
- /*
- * We load all the csum data we need when we submit
- * the first bio to reduce the csum tree search and
- * contention.
- */
- if (dip->logical_offset == file_offset) {
- ret = btrfs_lookup_bio_sums(inode, dip->orig_bio, file_offset,
- NULL);
- if (ret)
- return ret;
- }
-
- if (bio == dip->orig_bio)
- return 0;
-
- file_offset -= dip->logical_offset;
- file_offset >>= inode->i_sb->s_blocksize_bits;
- csum_size = btrfs_super_csum_size(btrfs_sb(inode->i_sb)->super_copy);
- io_bio->csum = orig_io_bio->csum + csum_size * file_offset;
-
- return 0;
+ btrfs_dio_private_put(dip);
}
static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
@@ -7892,10 +7658,12 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
if (ret)
goto err;
} else {
- ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio,
- file_offset);
- if (ret)
- goto err;
+ u64 csum_offset;
+
+ csum_offset = file_offset - dip->logical_offset;
+ csum_offset >>= inode->i_sb->s_blocksize_bits;
+ csum_offset *= btrfs_super_csum_size(fs_info->super_copy);
+ btrfs_io_bio(bio)->csum = dip->csums + csum_offset;
}
map:
ret = btrfs_map_bio(fs_info, bio, 0);
@@ -7903,14 +7671,53 @@ err:
return ret;
}
-static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip)
+/*
+ * If this succeeds, the btrfs_dio_private is responsible for cleaning up locked
+ * or ordered extents whether or not we submit any bios.
+ */
+static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
+ struct inode *inode,
+ loff_t file_offset)
{
- struct inode *inode = dip->inode;
+ const bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
+ const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
+ size_t dip_size;
+ struct btrfs_dio_private *dip;
+
+ dip_size = sizeof(*dip);
+ if (!write && csum) {
+ struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
+ size_t nblocks;
+
+ nblocks = dio_bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
+ dip_size += csum_size * nblocks;
+ }
+
+ dip = kzalloc(dip_size, GFP_NOFS);
+ if (!dip)
+ return NULL;
+
+ dip->inode = inode;
+ dip->logical_offset = file_offset;
+ dip->bytes = dio_bio->bi_iter.bi_size;
+ dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
+ dip->dio_bio = dio_bio;
+ refcount_set(&dip->refs, 1);
+ return dip;
+}
+
+static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
+ struct bio *dio_bio, loff_t file_offset)
+{
+ const bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
+ const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ const bool raid56 = (btrfs_data_alloc_profile(fs_info) &
+ BTRFS_BLOCK_GROUP_RAID56_MASK);
+ struct btrfs_dio_private *dip;
struct bio *bio;
- struct bio *orig_bio = dip->orig_bio;
- u64 start_sector = orig_bio->bi_iter.bi_sector;
- u64 file_offset = dip->logical_offset;
+ u64 start_sector;
int async_submit = 0;
u64 submit_len;
int clone_offset = 0;
@@ -7918,339 +7725,115 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip)
int ret;
blk_status_t status;
struct btrfs_io_geometry geom;
+ struct btrfs_dio_data *dio_data = iomap->private;
- submit_len = orig_bio->bi_iter.bi_size;
- ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio),
- start_sector << 9, submit_len, &geom);
- if (ret)
- return -EIO;
+ dip = btrfs_create_dio_private(dio_bio, inode, file_offset);
+ if (!dip) {
+ if (!write) {
+ unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
+ file_offset + dio_bio->bi_iter.bi_size - 1);
+ }
+ dio_bio->bi_status = BLK_STS_RESOURCE;
+ bio_endio(dio_bio);
+ return BLK_QC_T_NONE;
+ }
- if (geom.len >= submit_len) {
- bio = orig_bio;
- dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
- goto submit;
+ if (!write && csum) {
+ /*
+ * Load the csums up front to reduce csum tree searches and
+ * contention when submitting bios.
+ */
+ status = btrfs_lookup_bio_sums(inode, dio_bio, file_offset,
+ dip->csums);
+ if (status != BLK_STS_OK)
+ goto out_err;
}
- /* async crcs make it difficult to collect full stripe writes. */
- if (btrfs_data_alloc_profile(fs_info) & BTRFS_BLOCK_GROUP_RAID56_MASK)
- async_submit = 0;
- else
- async_submit = 1;
+ start_sector = dio_bio->bi_iter.bi_sector;
+ submit_len = dio_bio->bi_iter.bi_size;
- /* bio split */
- ASSERT(geom.len <= INT_MAX);
- atomic_inc(&dip->pending_bios);
do {
+ ret = btrfs_get_io_geometry(fs_info, btrfs_op(dio_bio),
+ start_sector << 9, submit_len,
+ &geom);
+ if (ret) {
+ status = errno_to_blk_status(ret);
+ goto out_err;
+ }
+ ASSERT(geom.len <= INT_MAX);
+
clone_len = min_t(int, submit_len, geom.len);
/*
* This will never fail as it's passing GPF_NOFS and
* the allocation is backed by btrfs_bioset.
*/
- bio = btrfs_bio_clone_partial(orig_bio, clone_offset,
- clone_len);
+ bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len);
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
btrfs_io_bio(bio)->logical = file_offset;
ASSERT(submit_len >= clone_len);
submit_len -= clone_len;
- if (submit_len == 0)
- break;
/*
* Increase the count before we submit the bio so we know
* the end IO handler won't happen before we increase the
* count. Otherwise, the dip might get freed before we're
* done setting it up.
+ *
+ * We transfer the initial reference to the last bio, so we
+ * don't need to increment the reference count for the last one.
*/
- atomic_inc(&dip->pending_bios);
+ if (submit_len > 0) {
+ refcount_inc(&dip->refs);
+ /*
+ * If we are submitting more than one bio, submit them
+ * all asynchronously. The exception is RAID 5 or 6, as
+ * asynchronous checksums make it difficult to collect
+ * full stripe writes.
+ */
+ if (!raid56)
+ async_submit = 1;
+ }
status = btrfs_submit_dio_bio(bio, inode, file_offset,
async_submit);
if (status) {
bio_put(bio);
- atomic_dec(&dip->pending_bios);
+ if (submit_len > 0)
+ refcount_dec(&dip->refs);
goto out_err;
}
+ dio_data->submitted += clone_len;
clone_offset += clone_len;
start_sector += clone_len >> 9;
file_offset += clone_len;
-
- ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio),
- start_sector << 9, submit_len, &geom);
- if (ret)
- goto out_err;
} while (submit_len > 0);
+ return BLK_QC_T_NONE;
-submit:
- status = btrfs_submit_dio_bio(bio, inode, file_offset, async_submit);
- if (!status)
- return 0;
-
- bio_put(bio);
out_err:
- dip->errors = 1;
- /*
- * Before atomic variable goto zero, we must make sure dip->errors is
- * perceived to be set. This ordering is ensured by the fact that an
- * atomic operations with a return value are fully ordered as per
- * atomic_t.txt
- */
- if (atomic_dec_and_test(&dip->pending_bios))
- bio_io_error(dip->orig_bio);
-
- /* bio_end_io() will handle error, so we needn't return it */
- return 0;
-}
-
-static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
- loff_t file_offset)
-{
- struct btrfs_dio_private *dip = NULL;
- struct bio *bio = NULL;
- struct btrfs_io_bio *io_bio;
- bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
- int ret = 0;
-
- bio = btrfs_bio_clone(dio_bio);
-
- dip = kzalloc(sizeof(*dip), GFP_NOFS);
- if (!dip) {
- ret = -ENOMEM;
- goto free_ordered;
- }
-
- dip->private = dio_bio->bi_private;
- dip->inode = inode;
- dip->logical_offset = file_offset;
- dip->bytes = dio_bio->bi_iter.bi_size;
- dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
- bio->bi_private = dip;
- dip->orig_bio = bio;
- dip->dio_bio = dio_bio;
- atomic_set(&dip->pending_bios, 0);
- io_bio = btrfs_io_bio(bio);
- io_bio->logical = file_offset;
-
- if (write) {
- bio->bi_end_io = btrfs_endio_direct_write;
- } else {
- bio->bi_end_io = btrfs_endio_direct_read;
- dip->subio_endio = btrfs_subio_endio_read;
- }
-
- /*
- * Reset the range for unsubmitted ordered extents (to a 0 length range)
- * even if we fail to submit a bio, because in such case we do the
- * corresponding error handling below and it must not be done a second
- * time by btrfs_direct_IO().
- */
- if (write) {
- struct btrfs_dio_data *dio_data = current->journal_info;
-
- dio_data->unsubmitted_oe_range_end = dip->logical_offset +
- dip->bytes;
- dio_data->unsubmitted_oe_range_start =
- dio_data->unsubmitted_oe_range_end;
- }
-
- ret = btrfs_submit_direct_hook(dip);
- if (!ret)
- return;
-
- btrfs_io_bio_free_csum(io_bio);
-
-free_ordered:
- /*
- * If we arrived here it means either we failed to submit the dip
- * or we either failed to clone the dio_bio or failed to allocate the
- * dip. If we cloned the dio_bio and allocated the dip, we can just
- * call bio_endio against our io_bio so that we get proper resource
- * cleanup if we fail to submit the dip, otherwise, we must do the
- * same as btrfs_endio_direct_[write|read] because we can't call these
- * callbacks - they require an allocated dip and a clone of dio_bio.
- */
- if (bio && dip) {
- bio_io_error(bio);
- /*
- * The end io callbacks free our dip, do the final put on bio
- * and all the cleanup and final put for dio_bio (through
- * dio_end_io()).
- */
- dip = NULL;
- bio = NULL;
- } else {
- if (write)
- __endio_write_update_ordered(inode,
- file_offset,
- dio_bio->bi_iter.bi_size,
- false);
- else
- unlock_extent(&BTRFS_I(inode)->io_tree, file_offset,
- file_offset + dio_bio->bi_iter.bi_size - 1);
-
- dio_bio->bi_status = BLK_STS_IOERR;
- /*
- * Releases and cleans up our dio_bio, no need to bio_put()
- * nor bio_endio()/bio_io_error() against dio_bio.
- */
- dio_end_io(dio_bio);
- }
- if (bio)
- bio_put(bio);
- kfree(dip);
+ dip->dio_bio->bi_status = status;
+ btrfs_dio_private_put(dip);
+ return BLK_QC_T_NONE;
}
-static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
- const struct iov_iter *iter, loff_t offset)
-{
- int seg;
- int i;
- unsigned int blocksize_mask = fs_info->sectorsize - 1;
- ssize_t retval = -EINVAL;
-
- if (offset & blocksize_mask)
- goto out;
-
- if (iov_iter_alignment(iter) & blocksize_mask)
- goto out;
-
- /* If this is a write we don't need to check anymore */
- if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter))
- return 0;
- /*
- * Check to make sure we don't have duplicate iov_base's in this
- * iovec, if so return EINVAL, otherwise we'll get csum errors
- * when reading back.
- */
- for (seg = 0; seg < iter->nr_segs; seg++) {
- for (i = seg + 1; i < iter->nr_segs; i++) {
- if (iter->iov[seg].iov_base == iter->iov[i].iov_base)
- goto out;
- }
- }
- retval = 0;
-out:
- return retval;
-}
-
-static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
- struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_dio_data dio_data = { 0 };
- struct extent_changeset *data_reserved = NULL;
- loff_t offset = iocb->ki_pos;
- size_t count = 0;
- int flags = 0;
- bool wakeup = true;
- bool relock = false;
- ssize_t ret;
-
- if (check_direct_IO(fs_info, iter, offset))
- return 0;
-
- inode_dio_begin(inode);
-
- /*
- * The generic stuff only does filemap_write_and_wait_range, which
- * isn't enough if we've written compressed pages to this area, so
- * we need to flush the dirty pages again to make absolutely sure
- * that any outstanding dirty pages are on disk.
- */
- count = iov_iter_count(iter);
- if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
- &BTRFS_I(inode)->runtime_flags))
- filemap_fdatawrite_range(inode->i_mapping, offset,
- offset + count - 1);
-
- if (iov_iter_rw(iter) == WRITE) {
- /*
- * If the write DIO is beyond the EOF, we need update
- * the isize, but it is protected by i_mutex. So we can
- * not unlock the i_mutex at this case.
- */
- if (offset + count <= inode->i_size) {
- dio_data.overwrite = 1;
- inode_unlock(inode);
- relock = true;
- } else if (iocb->ki_flags & IOCB_NOWAIT) {
- ret = -EAGAIN;
- goto out;
- }
- ret = btrfs_delalloc_reserve_space(inode, &data_reserved,
- offset, count);
- if (ret)
- goto out;
-
- /*
- * We need to know how many extents we reserved so that we can
- * do the accounting properly if we go over the number we
- * originally calculated. Abuse current->journal_info for this.
- */
- dio_data.reserve = round_up(count,
- fs_info->sectorsize);
- dio_data.unsubmitted_oe_range_start = (u64)offset;
- dio_data.unsubmitted_oe_range_end = (u64)offset;
- current->journal_info = &dio_data;
- down_read(&BTRFS_I(inode)->dio_sem);
- } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
- &BTRFS_I(inode)->runtime_flags)) {
- inode_dio_end(inode);
- flags = DIO_LOCKING | DIO_SKIP_HOLES;
- wakeup = false;
- }
-
- ret = __blockdev_direct_IO(iocb, inode,
- fs_info->fs_devices->latest_bdev,
- iter, btrfs_get_blocks_direct, NULL,
- btrfs_submit_direct, flags);
- if (iov_iter_rw(iter) == WRITE) {
- up_read(&BTRFS_I(inode)->dio_sem);
- current->journal_info = NULL;
- if (ret < 0 && ret != -EIOCBQUEUED) {
- if (dio_data.reserve)
- btrfs_delalloc_release_space(inode, data_reserved,
- offset, dio_data.reserve, true);
- /*
- * On error we might have left some ordered extents
- * without submitting corresponding bios for them, so
- * cleanup them up to avoid other tasks getting them
- * and waiting for them to complete forever.
- */
- if (dio_data.unsubmitted_oe_range_start <
- dio_data.unsubmitted_oe_range_end)
- __endio_write_update_ordered(inode,
- dio_data.unsubmitted_oe_range_start,
- dio_data.unsubmitted_oe_range_end -
- dio_data.unsubmitted_oe_range_start,
- false);
- } else if (ret >= 0 && (size_t)ret < count)
- btrfs_delalloc_release_space(inode, data_reserved,
- offset, count - (size_t)ret, true);
- btrfs_delalloc_release_extents(BTRFS_I(inode), count);
- }
-out:
- if (wakeup)
- inode_dio_end(inode);
- if (relock)
- inode_lock(inode);
-
- extent_changeset_free(data_reserved);
- return ret;
-}
+const struct iomap_ops btrfs_dio_iomap_ops = {
+ .iomap_begin = btrfs_dio_iomap_begin,
+ .iomap_end = btrfs_dio_iomap_end,
+};
-#define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC)
+const struct iomap_dio_ops btrfs_dops = {
+ .submit_io = btrfs_submit_direct,
+};
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
int ret;
- ret = fiemap_check_flags(fieinfo, BTRFS_FIEMAP_FLAGS);
+ ret = fiemap_prep(inode, fieinfo, start, &len, 0);
if (ret)
return ret;
@@ -10539,7 +10122,7 @@ static const struct address_space_operations btrfs_aops = {
.writepage = btrfs_writepage,
.writepages = btrfs_writepages,
.readahead = btrfs_readahead,
- .direct_IO = btrfs_direct_IO,
+ .direct_IO = noop_direct_IO,
.invalidatepage = btrfs_invalidatepage,
.releasepage = btrfs_releasepage,
#ifdef CONFIG_MIGRATION
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 40b729dce91c..168deb8ef68a 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -660,7 +660,7 @@ static noinline int create_subvol(struct inode *dir,
goto fail;
key.offset = (u64)-1;
- new_root = btrfs_get_fs_root(fs_info, &key, true);
+ new_root = btrfs_get_fs_root(fs_info, objectid, true);
if (IS_ERR(new_root)) {
ret = PTR_ERR(new_root);
btrfs_abort_transaction(trans, ret);
@@ -748,9 +748,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
struct btrfs_pending_snapshot *pending_snapshot;
struct btrfs_trans_handle *trans;
int ret;
- bool snapshot_force_cow = false;
- if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
+ if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
return -EINVAL;
if (atomic_read(&root->nr_swapfiles)) {
@@ -771,27 +770,6 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
goto free_pending;
}
- /*
- * Force new buffered writes to reserve space even when NOCOW is
- * possible. This is to avoid later writeback (running dealloc) to
- * fallback to COW mode and unexpectedly fail with ENOSPC.
- */
- btrfs_drew_read_lock(&root->snapshot_lock);
-
- ret = btrfs_start_delalloc_snapshot(root);
- if (ret)
- goto dec_and_free;
-
- /*
- * All previous writes have started writeback in NOCOW mode, so now
- * we force future writes to fallback to COW mode during snapshot
- * creation.
- */
- atomic_inc(&root->snapshot_force_cow);
- snapshot_force_cow = true;
-
- btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
-
btrfs_init_block_rsv(&pending_snapshot->block_rsv,
BTRFS_BLOCK_RSV_TEMP);
/*
@@ -806,7 +784,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
&pending_snapshot->block_rsv, 8,
false);
if (ret)
- goto dec_and_free;
+ goto free_pending;
pending_snapshot->dentry = dentry;
pending_snapshot->root = root;
@@ -848,11 +826,6 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
fail:
btrfs_put_root(pending_snapshot->snap);
btrfs_subvolume_release_metadata(fs_info, &pending_snapshot->block_rsv);
-dec_and_free:
- if (snapshot_force_cow)
- atomic_dec(&root->snapshot_force_cow);
- btrfs_drew_read_unlock(&root->snapshot_lock);
-
free_pending:
kfree(pending_snapshot->root_item);
btrfs_free_path(pending_snapshot->path);
@@ -983,6 +956,45 @@ out_unlock:
return error;
}
+static noinline int btrfs_mksnapshot(const struct path *parent,
+ const char *name, int namelen,
+ struct btrfs_root *root,
+ bool readonly,
+ struct btrfs_qgroup_inherit *inherit)
+{
+ int ret;
+ bool snapshot_force_cow = false;
+
+ /*
+ * Force new buffered writes to reserve space even when NOCOW is
+ * possible. This is to avoid later writeback (running dealloc) to
+ * fallback to COW mode and unexpectedly fail with ENOSPC.
+ */
+ btrfs_drew_read_lock(&root->snapshot_lock);
+
+ ret = btrfs_start_delalloc_snapshot(root);
+ if (ret)
+ goto out;
+
+ /*
+ * All previous writes have started writeback in NOCOW mode, so now
+ * we force future writes to fallback to COW mode during snapshot
+ * creation.
+ */
+ atomic_inc(&root->snapshot_force_cow);
+ snapshot_force_cow = true;
+
+ btrfs_wait_ordered_extents(root, U64_MAX, 0, (u64)-1);
+
+ ret = btrfs_mksubvol(parent, name, namelen,
+ root, readonly, inherit);
+out:
+ if (snapshot_force_cow)
+ atomic_dec(&root->snapshot_force_cow);
+ btrfs_drew_read_unlock(&root->snapshot_lock);
+ return ret;
+}
+
/*
* When we're defragging a range, we don't want to kick it off again
* if it is really just waiting for delalloc to send it down.
@@ -1762,7 +1774,7 @@ static noinline int __btrfs_ioctl_snap_create(struct file *file,
*/
ret = -EPERM;
} else {
- ret = btrfs_mksubvol(&file->f_path, name, namelen,
+ ret = btrfs_mksnapshot(&file->f_path, name, namelen,
BTRFS_I(src_inode)->root,
readonly, inherit);
}
@@ -2127,10 +2139,7 @@ static noinline int search_ioctl(struct inode *inode,
/* search the root of the inode that was passed */
root = btrfs_grab_root(BTRFS_I(inode)->root);
} else {
- key.objectid = sk->tree_id;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
- root = btrfs_get_fs_root(info, &key, true);
+ root = btrfs_get_fs_root(info, sk->tree_id, true);
if (IS_ERR(root)) {
btrfs_free_path(path);
return PTR_ERR(root);
@@ -2263,10 +2272,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
ptr = &name[BTRFS_INO_LOOKUP_PATH_MAX - 1];
- key.objectid = tree_id;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
- root = btrfs_get_fs_root(info, &key, true);
+ root = btrfs_get_fs_root(info, tree_id, true);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
root = NULL;
@@ -2359,10 +2365,7 @@ static int btrfs_search_path_in_tree_user(struct inode *inode,
if (dirid != upper_limit.objectid) {
ptr = &args->path[BTRFS_INO_LOOKUP_USER_PATH_MAX - 1];
- key.objectid = treeid;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
- root = btrfs_get_fs_root(fs_info, &key, true);
+ root = btrfs_get_fs_root(fs_info, treeid, true);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto out;
@@ -2421,7 +2424,7 @@ static int btrfs_search_path_in_tree_user(struct inode *inode,
goto out_put;
}
- temp_inode = btrfs_iget(sb, &key2, root);
+ temp_inode = btrfs_iget(sb, key2.objectid, root);
if (IS_ERR(temp_inode)) {
ret = PTR_ERR(temp_inode);
goto out_put;
@@ -2608,9 +2611,7 @@ static int btrfs_ioctl_get_subvol_info(struct file *file, void __user *argp)
/* Get root_item of inode's subvolume */
key.objectid = BTRFS_I(inode)->root->root_key.objectid;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
- root = btrfs_get_fs_root(fs_info, &key, true);
+ root = btrfs_get_fs_root(fs_info, key.objectid, true);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto out_free;
@@ -3278,7 +3279,6 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
struct btrfs_dir_item *di;
struct btrfs_trans_handle *trans;
struct btrfs_path *path = NULL;
- struct btrfs_key location;
struct btrfs_disk_key disk_key;
u64 objectid = 0;
u64 dir_id;
@@ -3299,11 +3299,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
if (!objectid)
objectid = BTRFS_FS_TREE_OBJECTID;
- location.objectid = objectid;
- location.type = BTRFS_ROOT_ITEM_KEY;
- location.offset = (u64)-1;
-
- new_root = btrfs_get_fs_root(fs_info, &location, true);
+ new_root = btrfs_get_fs_root(fs_info, objectid, true);
if (IS_ERR(new_root)) {
ret = PTR_ERR(new_root);
goto out;
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index fb647d8cf527..f75612e18a82 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -410,6 +410,7 @@ void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
* The rwlock is held for write upon exit.
*/
void btrfs_tree_lock(struct extent_buffer *eb)
+ __acquires(&eb->lock)
{
u64 start_ns = 0;
diff --git a/fs/btrfs/misc.h b/fs/btrfs/misc.h
index 72bab64ecf60..6461ebc3a1c1 100644
--- a/fs/btrfs/misc.h
+++ b/fs/btrfs/misc.h
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include <asm/div64.h>
+#include <linux/rbtree.h>
#define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
@@ -58,4 +59,57 @@ static inline bool has_single_bit_set(u64 n)
return is_power_of_two_u64(n);
}
+/*
+ * Simple bytenr based rb_tree relate structures
+ *
+ * Any structure wants to use bytenr as single search index should have their
+ * structure start with these members.
+ */
+struct rb_simple_node {
+ struct rb_node rb_node;
+ u64 bytenr;
+};
+
+static inline struct rb_node *rb_simple_search(struct rb_root *root, u64 bytenr)
+{
+ struct rb_node *node = root->rb_node;
+ struct rb_simple_node *entry;
+
+ while (node) {
+ entry = rb_entry(node, struct rb_simple_node, rb_node);
+
+ if (bytenr < entry->bytenr)
+ node = node->rb_left;
+ else if (bytenr > entry->bytenr)
+ node = node->rb_right;
+ else
+ return node;
+ }
+ return NULL;
+}
+
+static inline struct rb_node *rb_simple_insert(struct rb_root *root, u64 bytenr,
+ struct rb_node *node)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct rb_simple_node *entry;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct rb_simple_node, rb_node);
+
+ if (bytenr < entry->bytenr)
+ p = &(*p)->rb_left;
+ else if (bytenr > entry->bytenr)
+ p = &(*p)->rb_right;
+ else
+ return parent;
+ }
+
+ rb_link_node(node, parent, p);
+ rb_insert_color(node, root);
+ return NULL;
+}
+
#endif
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index ff1ff90e48b1..2dcb1cb21634 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -408,19 +408,14 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
struct btrfs_root *parent_root)
{
struct super_block *sb = root->fs_info->sb;
- struct btrfs_key key;
struct inode *parent_inode, *child_inode;
int ret;
- key.objectid = BTRFS_FIRST_FREE_OBJECTID;
- key.type = BTRFS_INODE_ITEM_KEY;
- key.offset = 0;
-
- parent_inode = btrfs_iget(sb, &key, parent_root);
+ parent_inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, parent_root);
if (IS_ERR(parent_inode))
return PTR_ERR(parent_inode);
- child_inode = btrfs_iget(sb, &key, root);
+ child_inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, root);
if (IS_ERR(child_inode)) {
iput(parent_inode);
return PTR_ERR(child_inode);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index c3888fb367e7..5bd4089ad0e1 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2622,6 +2622,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
struct btrfs_root *quota_root;
struct btrfs_qgroup *srcgroup;
struct btrfs_qgroup *dstgroup;
+ bool need_rescan = false;
u32 level_size = 0;
u64 nums;
@@ -2765,6 +2766,13 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
goto unlock;
}
++i_qgroups;
+
+ /*
+ * If we're doing a snapshot, and adding the snapshot to a new
+ * qgroup, the numbers are guaranteed to be incorrect.
+ */
+ if (srcid)
+ need_rescan = true;
}
for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) {
@@ -2784,6 +2792,9 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
dst->rfer = src->rfer - level_size;
dst->rfer_cmpr = src->rfer_cmpr - level_size;
+
+ /* Manually tweaking numbers certainly needs a rescan */
+ need_rescan = true;
}
for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) {
struct btrfs_qgroup *src;
@@ -2802,6 +2813,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
dst->excl = src->excl + level_size;
dst->excl_cmpr = src->excl_cmpr + level_size;
+ need_rescan = true;
}
unlock:
@@ -2809,6 +2821,8 @@ unlock:
out:
if (!committing)
mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ if (need_rescan)
+ fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
return ret;
}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 03bc7134e8cb..3bbae80c752f 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -24,6 +24,7 @@
#include "delalloc-space.h"
#include "block-group.h"
#include "backref.h"
+#include "misc.h"
/*
* Relocation overview
@@ -72,100 +73,15 @@
* The entry point of relocation is relocate_block_group() function.
*/
-/*
- * backref_node, mapping_node and tree_block start with this
- */
-struct tree_entry {
- struct rb_node rb_node;
- u64 bytenr;
-};
-
-/*
- * present a tree block in the backref cache
- */
-struct backref_node {
- struct rb_node rb_node;
- u64 bytenr;
-
- u64 new_bytenr;
- /* objectid of tree block owner, can be not uptodate */
- u64 owner;
- /* link to pending, changed or detached list */
- struct list_head list;
- /* list of upper level blocks reference this block */
- struct list_head upper;
- /* list of child blocks in the cache */
- struct list_head lower;
- /* NULL if this node is not tree root */
- struct btrfs_root *root;
- /* extent buffer got by COW the block */
- struct extent_buffer *eb;
- /* level of tree block */
- unsigned int level:8;
- /* is the block in non-reference counted tree */
- unsigned int cowonly:1;
- /* 1 if no child node in the cache */
- unsigned int lowest:1;
- /* is the extent buffer locked */
- unsigned int locked:1;
- /* has the block been processed */
- unsigned int processed:1;
- /* have backrefs of this block been checked */
- unsigned int checked:1;
- /*
- * 1 if corresponding block has been cowed but some upper
- * level block pointers may not point to the new location
- */
- unsigned int pending:1;
- /*
- * 1 if the backref node isn't connected to any other
- * backref node.
- */
- unsigned int detached:1;
-};
-
-/*
- * present a block pointer in the backref cache
- */
-struct backref_edge {
- struct list_head list[2];
- struct backref_node *node[2];
-};
-
-#define LOWER 0
-#define UPPER 1
#define RELOCATION_RESERVED_NODES 256
-
-struct backref_cache {
- /* red black tree of all backref nodes in the cache */
- struct rb_root rb_root;
- /* for passing backref nodes to btrfs_reloc_cow_block */
- struct backref_node *path[BTRFS_MAX_LEVEL];
- /*
- * list of blocks that have been cowed but some block
- * pointers in upper level blocks may not reflect the
- * new location
- */
- struct list_head pending[BTRFS_MAX_LEVEL];
- /* list of backref nodes with no child node */
- struct list_head leaves;
- /* list of blocks that have been cowed in current transaction */
- struct list_head changed;
- /* list of detached backref node. */
- struct list_head detached;
-
- u64 last_trans;
-
- int nr_nodes;
- int nr_edges;
-};
-
/*
* map address of tree root to tree
*/
struct mapping_node {
- struct rb_node rb_node;
- u64 bytenr;
+ struct {
+ struct rb_node rb_node;
+ u64 bytenr;
+ }; /* Use rb_simle_node for search/insert */
void *data;
};
@@ -178,8 +94,10 @@ struct mapping_tree {
* present a tree block to process
*/
struct tree_block {
- struct rb_node rb_node;
- u64 bytenr;
+ struct {
+ struct rb_node rb_node;
+ u64 bytenr;
+ }; /* Use rb_simple_node for search/insert */
struct btrfs_key key;
unsigned int level:8;
unsigned int key_ready:1;
@@ -204,7 +122,7 @@ struct reloc_control {
struct btrfs_block_rsv *block_rsv;
- struct backref_cache backref_cache;
+ struct btrfs_backref_cache backref_cache;
struct file_extent_cluster cluster;
/* tree blocks have been processed */
@@ -235,168 +153,41 @@ struct reloc_control {
#define MOVE_DATA_EXTENTS 0
#define UPDATE_DATA_PTRS 1
-static void remove_backref_node(struct backref_cache *cache,
- struct backref_node *node);
-static void __mark_block_processed(struct reloc_control *rc,
- struct backref_node *node);
-
-static void mapping_tree_init(struct mapping_tree *tree)
-{
- tree->rb_root = RB_ROOT;
- spin_lock_init(&tree->lock);
-}
-
-static void backref_cache_init(struct backref_cache *cache)
-{
- int i;
- cache->rb_root = RB_ROOT;
- for (i = 0; i < BTRFS_MAX_LEVEL; i++)
- INIT_LIST_HEAD(&cache->pending[i]);
- INIT_LIST_HEAD(&cache->changed);
- INIT_LIST_HEAD(&cache->detached);
- INIT_LIST_HEAD(&cache->leaves);
-}
-
-static void backref_cache_cleanup(struct backref_cache *cache)
-{
- struct backref_node *node;
- int i;
-
- while (!list_empty(&cache->detached)) {
- node = list_entry(cache->detached.next,
- struct backref_node, list);
- remove_backref_node(cache, node);
- }
-
- while (!list_empty(&cache->leaves)) {
- node = list_entry(cache->leaves.next,
- struct backref_node, lower);
- remove_backref_node(cache, node);
- }
-
- cache->last_trans = 0;
-
- for (i = 0; i < BTRFS_MAX_LEVEL; i++)
- ASSERT(list_empty(&cache->pending[i]));
- ASSERT(list_empty(&cache->changed));
- ASSERT(list_empty(&cache->detached));
- ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
- ASSERT(!cache->nr_nodes);
- ASSERT(!cache->nr_edges);
-}
-
-static struct backref_node *alloc_backref_node(struct backref_cache *cache)
-{
- struct backref_node *node;
-
- node = kzalloc(sizeof(*node), GFP_NOFS);
- if (node) {
- INIT_LIST_HEAD(&node->list);
- INIT_LIST_HEAD(&node->upper);
- INIT_LIST_HEAD(&node->lower);
- RB_CLEAR_NODE(&node->rb_node);
- cache->nr_nodes++;
- }
- return node;
-}
-
-static void free_backref_node(struct backref_cache *cache,
- struct backref_node *node)
-{
- if (node) {
- cache->nr_nodes--;
- btrfs_put_root(node->root);
- kfree(node);
- }
-}
-
-static struct backref_edge *alloc_backref_edge(struct backref_cache *cache)
-{
- struct backref_edge *edge;
-
- edge = kzalloc(sizeof(*edge), GFP_NOFS);
- if (edge)
- cache->nr_edges++;
- return edge;
-}
-
-static void free_backref_edge(struct backref_cache *cache,
- struct backref_edge *edge)
+static void mark_block_processed(struct reloc_control *rc,
+ struct btrfs_backref_node *node)
{
- if (edge) {
- cache->nr_edges--;
- kfree(edge);
- }
-}
+ u32 blocksize;
-static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
- struct rb_node *node)
-{
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct tree_entry *entry;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct tree_entry, rb_node);
-
- if (bytenr < entry->bytenr)
- p = &(*p)->rb_left;
- else if (bytenr > entry->bytenr)
- p = &(*p)->rb_right;
- else
- return parent;
+ if (node->level == 0 ||
+ in_range(node->bytenr, rc->block_group->start,
+ rc->block_group->length)) {
+ blocksize = rc->extent_root->fs_info->nodesize;
+ set_extent_bits(&rc->processed_blocks, node->bytenr,
+ node->bytenr + blocksize - 1, EXTENT_DIRTY);
}
-
- rb_link_node(node, parent, p);
- rb_insert_color(node, root);
- return NULL;
+ node->processed = 1;
}
-static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
-{
- struct rb_node *n = root->rb_node;
- struct tree_entry *entry;
-
- while (n) {
- entry = rb_entry(n, struct tree_entry, rb_node);
- if (bytenr < entry->bytenr)
- n = n->rb_left;
- else if (bytenr > entry->bytenr)
- n = n->rb_right;
- else
- return n;
- }
- return NULL;
-}
-
-static void backref_tree_panic(struct rb_node *rb_node, int errno, u64 bytenr)
+static void mapping_tree_init(struct mapping_tree *tree)
{
-
- struct btrfs_fs_info *fs_info = NULL;
- struct backref_node *bnode = rb_entry(rb_node, struct backref_node,
- rb_node);
- if (bnode->root)
- fs_info = bnode->root->fs_info;
- btrfs_panic(fs_info, errno,
- "Inconsistency in backref cache found at offset %llu",
- bytenr);
+ tree->rb_root = RB_ROOT;
+ spin_lock_init(&tree->lock);
}
/*
* walk up backref nodes until reach node presents tree root
*/
-static struct backref_node *walk_up_backref(struct backref_node *node,
- struct backref_edge *edges[],
- int *index)
+static struct btrfs_backref_node *walk_up_backref(
+ struct btrfs_backref_node *node,
+ struct btrfs_backref_edge *edges[], int *index)
{
- struct backref_edge *edge;
+ struct btrfs_backref_edge *edge;
int idx = *index;
while (!list_empty(&node->upper)) {
edge = list_entry(node->upper.next,
- struct backref_edge, list[LOWER]);
+ struct btrfs_backref_edge, list[LOWER]);
edges[idx++] = edge;
node = edge->node[UPPER];
}
@@ -408,11 +199,11 @@ static struct backref_node *walk_up_backref(struct backref_node *node,
/*
* walk down backref nodes to find start of next reference path
*/
-static struct backref_node *walk_down_backref(struct backref_edge *edges[],
- int *index)
+static struct btrfs_backref_node *walk_down_backref(
+ struct btrfs_backref_edge *edges[], int *index)
{
- struct backref_edge *edge;
- struct backref_node *lower;
+ struct btrfs_backref_edge *edge;
+ struct btrfs_backref_node *lower;
int idx = *index;
while (idx > 0) {
@@ -423,7 +214,7 @@ static struct backref_node *walk_down_backref(struct backref_edge *edges[],
continue;
}
edge = list_entry(edge->list[LOWER].next,
- struct backref_edge, list[LOWER]);
+ struct btrfs_backref_edge, list[LOWER]);
edges[idx - 1] = edge;
*index = idx;
return edge->node[UPPER];
@@ -432,95 +223,24 @@ static struct backref_node *walk_down_backref(struct backref_edge *edges[],
return NULL;
}
-static void unlock_node_buffer(struct backref_node *node)
-{
- if (node->locked) {
- btrfs_tree_unlock(node->eb);
- node->locked = 0;
- }
-}
-
-static void drop_node_buffer(struct backref_node *node)
-{
- if (node->eb) {
- unlock_node_buffer(node);
- free_extent_buffer(node->eb);
- node->eb = NULL;
- }
-}
-
-static void drop_backref_node(struct backref_cache *tree,
- struct backref_node *node)
-{
- BUG_ON(!list_empty(&node->upper));
-
- drop_node_buffer(node);
- list_del(&node->list);
- list_del(&node->lower);
- if (!RB_EMPTY_NODE(&node->rb_node))
- rb_erase(&node->rb_node, &tree->rb_root);
- free_backref_node(tree, node);
-}
-
-/*
- * remove a backref node from the backref cache
- */
-static void remove_backref_node(struct backref_cache *cache,
- struct backref_node *node)
-{
- struct backref_node *upper;
- struct backref_edge *edge;
-
- if (!node)
- return;
-
- BUG_ON(!node->lowest && !node->detached);
- while (!list_empty(&node->upper)) {
- edge = list_entry(node->upper.next, struct backref_edge,
- list[LOWER]);
- upper = edge->node[UPPER];
- list_del(&edge->list[LOWER]);
- list_del(&edge->list[UPPER]);
- free_backref_edge(cache, edge);
-
- if (RB_EMPTY_NODE(&upper->rb_node)) {
- BUG_ON(!list_empty(&node->upper));
- drop_backref_node(cache, node);
- node = upper;
- node->lowest = 1;
- continue;
- }
- /*
- * add the node to leaf node list if no other
- * child block cached.
- */
- if (list_empty(&upper->lower)) {
- list_add_tail(&upper->lower, &cache->leaves);
- upper->lowest = 1;
- }
- }
-
- drop_backref_node(cache, node);
-}
-
-static void update_backref_node(struct backref_cache *cache,
- struct backref_node *node, u64 bytenr)
+static void update_backref_node(struct btrfs_backref_cache *cache,
+ struct btrfs_backref_node *node, u64 bytenr)
{
struct rb_node *rb_node;
rb_erase(&node->rb_node, &cache->rb_root);
node->bytenr = bytenr;
- rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node);
+ rb_node = rb_simple_insert(&cache->rb_root, node->bytenr, &node->rb_node);
if (rb_node)
- backref_tree_panic(rb_node, -EEXIST, bytenr);
+ btrfs_backref_panic(cache->fs_info, bytenr, -EEXIST);
}
/*
* update backref cache after a transaction commit
*/
static int update_backref_cache(struct btrfs_trans_handle *trans,
- struct backref_cache *cache)
+ struct btrfs_backref_cache *cache)
{
- struct backref_node *node;
+ struct btrfs_backref_node *node;
int level = 0;
if (cache->last_trans == 0) {
@@ -538,13 +258,13 @@ static int update_backref_cache(struct btrfs_trans_handle *trans,
*/
while (!list_empty(&cache->detached)) {
node = list_entry(cache->detached.next,
- struct backref_node, list);
- remove_backref_node(cache, node);
+ struct btrfs_backref_node, list);
+ btrfs_backref_cleanup_node(cache, node);
}
while (!list_empty(&cache->changed)) {
node = list_entry(cache->changed.next,
- struct backref_node, list);
+ struct btrfs_backref_node, list);
list_del_init(&node->list);
BUG_ON(node->pending);
update_backref_node(cache, node, node->new_bytenr);
@@ -585,7 +305,8 @@ static bool reloc_root_is_dead(struct btrfs_root *root)
*
* Reloc tree after swap is considered dead, thus not considered as valid.
* This is enough for most callers, as they don't distinguish dead reloc root
- * from no reloc root. But should_ignore_root() below is a special case.
+ * from no reloc root. But btrfs_should_ignore_reloc_root() below is a
+ * special case.
*/
static bool have_reloc_root(struct btrfs_root *root)
{
@@ -596,11 +317,11 @@ static bool have_reloc_root(struct btrfs_root *root)
return true;
}
-static int should_ignore_root(struct btrfs_root *root)
+int btrfs_should_ignore_reloc_root(struct btrfs_root *root)
{
struct btrfs_root *reloc_root;
- if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
+ if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
return 0;
/* This root has been merged with its reloc tree, we can ignore it */
@@ -622,18 +343,20 @@ static int should_ignore_root(struct btrfs_root *root)
*/
return 1;
}
+
/*
* find reloc tree by address of tree root
*/
-static struct btrfs_root *find_reloc_root(struct reloc_control *rc,
- u64 bytenr)
+struct btrfs_root *find_reloc_root(struct btrfs_fs_info *fs_info, u64 bytenr)
{
+ struct reloc_control *rc = fs_info->reloc_ctl;
struct rb_node *rb_node;
struct mapping_node *node;
struct btrfs_root *root = NULL;
+ ASSERT(rc);
spin_lock(&rc->reloc_root_tree.lock);
- rb_node = tree_search(&rc->reloc_root_tree.rb_root, bytenr);
+ rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root, bytenr);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
root = (struct btrfs_root *)node->data;
@@ -642,594 +365,165 @@ static struct btrfs_root *find_reloc_root(struct reloc_control *rc,
return btrfs_grab_root(root);
}
-static struct btrfs_root *read_fs_root(struct btrfs_fs_info *fs_info,
- u64 root_objectid)
+/*
+ * For useless nodes, do two major clean ups:
+ *
+ * - Cleanup the children edges and nodes
+ * If child node is also orphan (no parent) during cleanup, then the child
+ * node will also be cleaned up.
+ *
+ * - Freeing up leaves (level 0), keeps nodes detached
+ * For nodes, the node is still cached as "detached"
+ *
+ * Return false if @node is not in the @useless_nodes list.
+ * Return true if @node is in the @useless_nodes list.
+ */
+static bool handle_useless_nodes(struct reloc_control *rc,
+ struct btrfs_backref_node *node)
{
- struct btrfs_key key;
+ struct btrfs_backref_cache *cache = &rc->backref_cache;
+ struct list_head *useless_node = &cache->useless_node;
+ bool ret = false;
- key.objectid = root_objectid;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
+ while (!list_empty(useless_node)) {
+ struct btrfs_backref_node *cur;
- return btrfs_get_fs_root(fs_info, &key, false);
-}
+ cur = list_first_entry(useless_node, struct btrfs_backref_node,
+ list);
+ list_del_init(&cur->list);
-static noinline_for_stack
-int find_inline_backref(struct extent_buffer *leaf, int slot,
- unsigned long *ptr, unsigned long *end)
-{
- struct btrfs_key key;
- struct btrfs_extent_item *ei;
- struct btrfs_tree_block_info *bi;
- u32 item_size;
+ /* Only tree root nodes can be added to @useless_nodes */
+ ASSERT(list_empty(&cur->upper));
- btrfs_item_key_to_cpu(leaf, &key, slot);
+ if (cur == node)
+ ret = true;
- item_size = btrfs_item_size_nr(leaf, slot);
- if (item_size < sizeof(*ei)) {
- btrfs_print_v0_err(leaf->fs_info);
- btrfs_handle_fs_error(leaf->fs_info, -EINVAL, NULL);
- return 1;
- }
- ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
- WARN_ON(!(btrfs_extent_flags(leaf, ei) &
- BTRFS_EXTENT_FLAG_TREE_BLOCK));
+ /* The node is the lowest node */
+ if (cur->lowest) {
+ list_del_init(&cur->lower);
+ cur->lowest = 0;
+ }
- if (key.type == BTRFS_EXTENT_ITEM_KEY &&
- item_size <= sizeof(*ei) + sizeof(*bi)) {
- WARN_ON(item_size < sizeof(*ei) + sizeof(*bi));
- return 1;
- }
- if (key.type == BTRFS_METADATA_ITEM_KEY &&
- item_size <= sizeof(*ei)) {
- WARN_ON(item_size < sizeof(*ei));
- return 1;
- }
+ /* Cleanup the lower edges */
+ while (!list_empty(&cur->lower)) {
+ struct btrfs_backref_edge *edge;
+ struct btrfs_backref_node *lower;
- if (key.type == BTRFS_EXTENT_ITEM_KEY) {
- bi = (struct btrfs_tree_block_info *)(ei + 1);
- *ptr = (unsigned long)(bi + 1);
- } else {
- *ptr = (unsigned long)(ei + 1);
+ edge = list_entry(cur->lower.next,
+ struct btrfs_backref_edge, list[UPPER]);
+ list_del(&edge->list[UPPER]);
+ list_del(&edge->list[LOWER]);
+ lower = edge->node[LOWER];
+ btrfs_backref_free_edge(cache, edge);
+
+ /* Child node is also orphan, queue for cleanup */
+ if (list_empty(&lower->upper))
+ list_add(&lower->list, useless_node);
+ }
+ /* Mark this block processed for relocation */
+ mark_block_processed(rc, cur);
+
+ /*
+ * Backref nodes for tree leaves are deleted from the cache.
+ * Backref nodes for upper level tree blocks are left in the
+ * cache to avoid unnecessary backref lookup.
+ */
+ if (cur->level > 0) {
+ list_add(&cur->list, &cache->detached);
+ cur->detached = 1;
+ } else {
+ rb_erase(&cur->rb_node, &cache->rb_root);
+ btrfs_backref_free_node(cache, cur);
+ }
}
- *end = (unsigned long)ei + item_size;
- return 0;
+ return ret;
}
/*
- * build backref tree for a given tree block. root of the backref tree
- * corresponds the tree block, leaves of the backref tree correspond
- * roots of b-trees that reference the tree block.
+ * Build backref tree for a given tree block. Root of the backref tree
+ * corresponds the tree block, leaves of the backref tree correspond roots of
+ * b-trees that reference the tree block.
*
- * the basic idea of this function is check backrefs of a given block
- * to find upper level blocks that reference the block, and then check
- * backrefs of these upper level blocks recursively. the recursion stop
- * when tree root is reached or backrefs for the block is cached.
+ * The basic idea of this function is check backrefs of a given block to find
+ * upper level blocks that reference the block, and then check backrefs of
+ * these upper level blocks recursively. The recursion stops when tree root is
+ * reached or backrefs for the block is cached.
*
- * NOTE: if we find backrefs for a block are cached, we know backrefs
- * for all upper level blocks that directly/indirectly reference the
- * block are also cached.
+ * NOTE: if we find that backrefs for a block are cached, we know backrefs for
+ * all upper level blocks that directly/indirectly reference the block are also
+ * cached.
*/
-static noinline_for_stack
-struct backref_node *build_backref_tree(struct reloc_control *rc,
- struct btrfs_key *node_key,
- int level, u64 bytenr)
+static noinline_for_stack struct btrfs_backref_node *build_backref_tree(
+ struct reloc_control *rc, struct btrfs_key *node_key,
+ int level, u64 bytenr)
{
- struct backref_cache *cache = &rc->backref_cache;
- struct btrfs_path *path1; /* For searching extent root */
- struct btrfs_path *path2; /* For searching parent of TREE_BLOCK_REF */
- struct extent_buffer *eb;
- struct btrfs_root *root;
- struct backref_node *cur;
- struct backref_node *upper;
- struct backref_node *lower;
- struct backref_node *node = NULL;
- struct backref_node *exist = NULL;
- struct backref_edge *edge;
- struct rb_node *rb_node;
- struct btrfs_key key;
- unsigned long end;
- unsigned long ptr;
- LIST_HEAD(list); /* Pending edge list, upper node needs to be checked */
- LIST_HEAD(useless);
- int cowonly;
+ struct btrfs_backref_iter *iter;
+ struct btrfs_backref_cache *cache = &rc->backref_cache;
+ /* For searching parent of TREE_BLOCK_REF */
+ struct btrfs_path *path;
+ struct btrfs_backref_node *cur;
+ struct btrfs_backref_node *node = NULL;
+ struct btrfs_backref_edge *edge;
int ret;
int err = 0;
- bool need_check = true;
- path1 = btrfs_alloc_path();
- path2 = btrfs_alloc_path();
- if (!path1 || !path2) {
+ iter = btrfs_backref_iter_alloc(rc->extent_root->fs_info, GFP_NOFS);
+ if (!iter)
+ return ERR_PTR(-ENOMEM);
+ path = btrfs_alloc_path();
+ if (!path) {
err = -ENOMEM;
goto out;
}
- node = alloc_backref_node(cache);
+ node = btrfs_backref_alloc_node(cache, bytenr, level);
if (!node) {
err = -ENOMEM;
goto out;
}
- node->bytenr = bytenr;
- node->level = level;
node->lowest = 1;
cur = node;
-again:
- end = 0;
- ptr = 0;
- key.objectid = cur->bytenr;
- key.type = BTRFS_METADATA_ITEM_KEY;
- key.offset = (u64)-1;
-
- path1->search_commit_root = 1;
- path1->skip_locking = 1;
- ret = btrfs_search_slot(NULL, rc->extent_root, &key, path1,
- 0, 0);
- if (ret < 0) {
- err = ret;
- goto out;
- }
- ASSERT(ret);
- ASSERT(path1->slots[0]);
-
- path1->slots[0]--;
- WARN_ON(cur->checked);
- if (!list_empty(&cur->upper)) {
- /*
- * the backref was added previously when processing
- * backref of type BTRFS_TREE_BLOCK_REF_KEY
- */
- ASSERT(list_is_singular(&cur->upper));
- edge = list_entry(cur->upper.next, struct backref_edge,
- list[LOWER]);
- ASSERT(list_empty(&edge->list[UPPER]));
- exist = edge->node[UPPER];
- /*
- * add the upper level block to pending list if we need
- * check its backrefs
- */
- if (!exist->checked)
- list_add_tail(&edge->list[UPPER], &list);
- } else {
- exist = NULL;
- }
-
- while (1) {
- cond_resched();
- eb = path1->nodes[0];
-
- if (ptr >= end) {
- if (path1->slots[0] >= btrfs_header_nritems(eb)) {
- ret = btrfs_next_leaf(rc->extent_root, path1);
- if (ret < 0) {
- err = ret;
- goto out;
- }
- if (ret > 0)
- break;
- eb = path1->nodes[0];
- }
-
- btrfs_item_key_to_cpu(eb, &key, path1->slots[0]);
- if (key.objectid != cur->bytenr) {
- WARN_ON(exist);
- break;
- }
-
- if (key.type == BTRFS_EXTENT_ITEM_KEY ||
- key.type == BTRFS_METADATA_ITEM_KEY) {
- ret = find_inline_backref(eb, path1->slots[0],
- &ptr, &end);
- if (ret)
- goto next;
- }
- }
-
- if (ptr < end) {
- /* update key for inline back ref */
- struct btrfs_extent_inline_ref *iref;
- int type;
- iref = (struct btrfs_extent_inline_ref *)ptr;
- type = btrfs_get_extent_inline_ref_type(eb, iref,
- BTRFS_REF_TYPE_BLOCK);
- if (type == BTRFS_REF_TYPE_INVALID) {
- err = -EUCLEAN;
- goto out;
- }
- key.type = type;
- key.offset = btrfs_extent_inline_ref_offset(eb, iref);
-
- WARN_ON(key.type != BTRFS_TREE_BLOCK_REF_KEY &&
- key.type != BTRFS_SHARED_BLOCK_REF_KEY);
- }
-
- /*
- * Parent node found and matches current inline ref, no need to
- * rebuild this node for this inline ref.
- */
- if (exist &&
- ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
- exist->owner == key.offset) ||
- (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
- exist->bytenr == key.offset))) {
- exist = NULL;
- goto next;
- }
-
- /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
- if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
- if (key.objectid == key.offset) {
- /*
- * Only root blocks of reloc trees use backref
- * pointing to itself.
- */
- root = find_reloc_root(rc, cur->bytenr);
- ASSERT(root);
- cur->root = root;
- break;
- }
-
- edge = alloc_backref_edge(cache);
- if (!edge) {
- err = -ENOMEM;
- goto out;
- }
- rb_node = tree_search(&cache->rb_root, key.offset);
- if (!rb_node) {
- upper = alloc_backref_node(cache);
- if (!upper) {
- free_backref_edge(cache, edge);
- err = -ENOMEM;
- goto out;
- }
- upper->bytenr = key.offset;
- upper->level = cur->level + 1;
- /*
- * backrefs for the upper level block isn't
- * cached, add the block to pending list
- */
- list_add_tail(&edge->list[UPPER], &list);
- } else {
- upper = rb_entry(rb_node, struct backref_node,
- rb_node);
- ASSERT(upper->checked);
- INIT_LIST_HEAD(&edge->list[UPPER]);
- }
- list_add_tail(&edge->list[LOWER], &cur->upper);
- edge->node[LOWER] = cur;
- edge->node[UPPER] = upper;
-
- goto next;
- } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
- err = -EINVAL;
- btrfs_print_v0_err(rc->extent_root->fs_info);
- btrfs_handle_fs_error(rc->extent_root->fs_info, err,
- NULL);
- goto out;
- } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
- goto next;
- }
-
- /*
- * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
- * means the root objectid. We need to search the tree to get
- * its parent bytenr.
- */
- root = read_fs_root(rc->extent_root->fs_info, key.offset);
- if (IS_ERR(root)) {
- err = PTR_ERR(root);
- goto out;
- }
-
- if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
- cur->cowonly = 1;
-
- if (btrfs_root_level(&root->root_item) == cur->level) {
- /* tree root */
- ASSERT(btrfs_root_bytenr(&root->root_item) ==
- cur->bytenr);
- if (should_ignore_root(root)) {
- btrfs_put_root(root);
- list_add(&cur->list, &useless);
- } else {
- cur->root = root;
- }
- break;
- }
-
- level = cur->level + 1;
-
- /* Search the tree to find parent blocks referring the block. */
- path2->search_commit_root = 1;
- path2->skip_locking = 1;
- path2->lowest_level = level;
- ret = btrfs_search_slot(NULL, root, node_key, path2, 0, 0);
- path2->lowest_level = 0;
+ /* Breadth-first search to build backref cache */
+ do {
+ ret = btrfs_backref_add_tree_node(cache, path, iter, node_key,
+ cur);
if (ret < 0) {
- btrfs_put_root(root);
err = ret;
goto out;
}
- if (ret > 0 && path2->slots[level] > 0)
- path2->slots[level]--;
-
- eb = path2->nodes[level];
- if (btrfs_node_blockptr(eb, path2->slots[level]) !=
- cur->bytenr) {
- btrfs_err(root->fs_info,
- "couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
- cur->bytenr, level - 1,
- root->root_key.objectid,
- node_key->objectid, node_key->type,
- node_key->offset);
- btrfs_put_root(root);
- err = -ENOENT;
- goto out;
- }
- lower = cur;
- need_check = true;
-
- /* Add all nodes and edges in the path */
- for (; level < BTRFS_MAX_LEVEL; level++) {
- if (!path2->nodes[level]) {
- ASSERT(btrfs_root_bytenr(&root->root_item) ==
- lower->bytenr);
- if (should_ignore_root(root)) {
- btrfs_put_root(root);
- list_add(&lower->list, &useless);
- } else {
- lower->root = root;
- }
- break;
- }
-
- edge = alloc_backref_edge(cache);
- if (!edge) {
- btrfs_put_root(root);
- err = -ENOMEM;
- goto out;
- }
-
- eb = path2->nodes[level];
- rb_node = tree_search(&cache->rb_root, eb->start);
- if (!rb_node) {
- upper = alloc_backref_node(cache);
- if (!upper) {
- btrfs_put_root(root);
- free_backref_edge(cache, edge);
- err = -ENOMEM;
- goto out;
- }
- upper->bytenr = eb->start;
- upper->owner = btrfs_header_owner(eb);
- upper->level = lower->level + 1;
- if (!test_bit(BTRFS_ROOT_REF_COWS,
- &root->state))
- upper->cowonly = 1;
-
- /*
- * if we know the block isn't shared
- * we can void checking its backrefs.
- */
- if (btrfs_block_can_be_shared(root, eb))
- upper->checked = 0;
- else
- upper->checked = 1;
-
- /*
- * add the block to pending list if we
- * need check its backrefs, we only do this once
- * while walking up a tree as we will catch
- * anything else later on.
- */
- if (!upper->checked && need_check) {
- need_check = false;
- list_add_tail(&edge->list[UPPER],
- &list);
- } else {
- if (upper->checked)
- need_check = true;
- INIT_LIST_HEAD(&edge->list[UPPER]);
- }
- } else {
- upper = rb_entry(rb_node, struct backref_node,
- rb_node);
- ASSERT(upper->checked);
- INIT_LIST_HEAD(&edge->list[UPPER]);
- if (!upper->owner)
- upper->owner = btrfs_header_owner(eb);
- }
- list_add_tail(&edge->list[LOWER], &lower->upper);
- edge->node[LOWER] = lower;
- edge->node[UPPER] = upper;
-
- if (rb_node) {
- btrfs_put_root(root);
- break;
- }
- lower = upper;
- upper = NULL;
- }
- btrfs_release_path(path2);
-next:
- if (ptr < end) {
- ptr += btrfs_extent_inline_ref_size(key.type);
- if (ptr >= end) {
- WARN_ON(ptr > end);
- ptr = 0;
- end = 0;
- }
- }
- if (ptr >= end)
- path1->slots[0]++;
- }
- btrfs_release_path(path1);
-
- cur->checked = 1;
- WARN_ON(exist);
-
- /* the pending list isn't empty, take the first block to process */
- if (!list_empty(&list)) {
- edge = list_entry(list.next, struct backref_edge, list[UPPER]);
- list_del_init(&edge->list[UPPER]);
- cur = edge->node[UPPER];
- goto again;
- }
-
- /*
- * everything goes well, connect backref nodes and insert backref nodes
- * into the cache.
- */
- ASSERT(node->checked);
- cowonly = node->cowonly;
- if (!cowonly) {
- rb_node = tree_insert(&cache->rb_root, node->bytenr,
- &node->rb_node);
- if (rb_node)
- backref_tree_panic(rb_node, -EEXIST, node->bytenr);
- list_add_tail(&node->lower, &cache->leaves);
- }
-
- list_for_each_entry(edge, &node->upper, list[LOWER])
- list_add_tail(&edge->list[UPPER], &list);
-
- while (!list_empty(&list)) {
- edge = list_entry(list.next, struct backref_edge, list[UPPER]);
- list_del_init(&edge->list[UPPER]);
- upper = edge->node[UPPER];
- if (upper->detached) {
- list_del(&edge->list[LOWER]);
- lower = edge->node[LOWER];
- free_backref_edge(cache, edge);
- if (list_empty(&lower->upper))
- list_add(&lower->list, &useless);
- continue;
- }
-
- if (!RB_EMPTY_NODE(&upper->rb_node)) {
- if (upper->lowest) {
- list_del_init(&upper->lower);
- upper->lowest = 0;
- }
-
- list_add_tail(&edge->list[UPPER], &upper->lower);
- continue;
- }
-
- if (!upper->checked) {
- /*
- * Still want to blow up for developers since this is a
- * logic bug.
- */
- ASSERT(0);
- err = -EINVAL;
- goto out;
- }
- if (cowonly != upper->cowonly) {
- ASSERT(0);
- err = -EINVAL;
- goto out;
- }
-
- if (!cowonly) {
- rb_node = tree_insert(&cache->rb_root, upper->bytenr,
- &upper->rb_node);
- if (rb_node)
- backref_tree_panic(rb_node, -EEXIST,
- upper->bytenr);
+ edge = list_first_entry_or_null(&cache->pending_edge,
+ struct btrfs_backref_edge, list[UPPER]);
+ /*
+ * The pending list isn't empty, take the first block to
+ * process
+ */
+ if (edge) {
+ list_del_init(&edge->list[UPPER]);
+ cur = edge->node[UPPER];
}
+ } while (edge);
- list_add_tail(&edge->list[UPPER], &upper->lower);
-
- list_for_each_entry(edge, &upper->upper, list[LOWER])
- list_add_tail(&edge->list[UPPER], &list);
+ /* Finish the upper linkage of newly added edges/nodes */
+ ret = btrfs_backref_finish_upper_links(cache, node);
+ if (ret < 0) {
+ err = ret;
+ goto out;
}
- /*
- * process useless backref nodes. backref nodes for tree leaves
- * are deleted from the cache. backref nodes for upper level
- * tree blocks are left in the cache to avoid unnecessary backref
- * lookup.
- */
- while (!list_empty(&useless)) {
- upper = list_entry(useless.next, struct backref_node, list);
- list_del_init(&upper->list);
- ASSERT(list_empty(&upper->upper));
- if (upper == node)
- node = NULL;
- if (upper->lowest) {
- list_del_init(&upper->lower);
- upper->lowest = 0;
- }
- while (!list_empty(&upper->lower)) {
- edge = list_entry(upper->lower.next,
- struct backref_edge, list[UPPER]);
- list_del(&edge->list[UPPER]);
- list_del(&edge->list[LOWER]);
- lower = edge->node[LOWER];
- free_backref_edge(cache, edge);
- if (list_empty(&lower->upper))
- list_add(&lower->list, &useless);
- }
- __mark_block_processed(rc, upper);
- if (upper->level > 0) {
- list_add(&upper->list, &cache->detached);
- upper->detached = 1;
- } else {
- rb_erase(&upper->rb_node, &cache->rb_root);
- free_backref_node(cache, upper);
- }
- }
+ if (handle_useless_nodes(rc, node))
+ node = NULL;
out:
- btrfs_free_path(path1);
- btrfs_free_path(path2);
+ btrfs_backref_iter_free(iter);
+ btrfs_free_path(path);
if (err) {
- while (!list_empty(&useless)) {
- lower = list_entry(useless.next,
- struct backref_node, list);
- list_del_init(&lower->list);
- }
- while (!list_empty(&list)) {
- edge = list_first_entry(&list, struct backref_edge,
- list[UPPER]);
- list_del(&edge->list[UPPER]);
- list_del(&edge->list[LOWER]);
- lower = edge->node[LOWER];
- upper = edge->node[UPPER];
- free_backref_edge(cache, edge);
-
- /*
- * Lower is no longer linked to any upper backref nodes
- * and isn't in the cache, we can free it ourselves.
- */
- if (list_empty(&lower->upper) &&
- RB_EMPTY_NODE(&lower->rb_node))
- list_add(&lower->list, &useless);
-
- if (!RB_EMPTY_NODE(&upper->rb_node))
- continue;
-
- /* Add this guy's upper edges to the list to process */
- list_for_each_entry(edge, &upper->upper, list[LOWER])
- list_add_tail(&edge->list[UPPER], &list);
- if (list_empty(&upper->upper))
- list_add(&upper->list, &useless);
- }
-
- while (!list_empty(&useless)) {
- lower = list_entry(useless.next,
- struct backref_node, list);
- list_del_init(&lower->list);
- if (lower == node)
- node = NULL;
- free_backref_node(cache, lower);
- }
-
- remove_backref_node(cache, node);
+ btrfs_backref_error_cleanup(cache, node);
return ERR_PTR(err);
}
ASSERT(!node || !node->detached);
+ ASSERT(list_empty(&cache->useless_node) &&
+ list_empty(&cache->pending_edge));
return node;
}
@@ -1244,19 +538,19 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
struct btrfs_root *dest)
{
struct btrfs_root *reloc_root = src->reloc_root;
- struct backref_cache *cache = &rc->backref_cache;
- struct backref_node *node = NULL;
- struct backref_node *new_node;
- struct backref_edge *edge;
- struct backref_edge *new_edge;
+ struct btrfs_backref_cache *cache = &rc->backref_cache;
+ struct btrfs_backref_node *node = NULL;
+ struct btrfs_backref_node *new_node;
+ struct btrfs_backref_edge *edge;
+ struct btrfs_backref_edge *new_edge;
struct rb_node *rb_node;
if (cache->last_trans > 0)
update_backref_cache(trans, cache);
- rb_node = tree_search(&cache->rb_root, src->commit_root->start);
+ rb_node = rb_simple_search(&cache->rb_root, src->commit_root->start);
if (rb_node) {
- node = rb_entry(rb_node, struct backref_node, rb_node);
+ node = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
if (node->detached)
node = NULL;
else
@@ -1264,10 +558,10 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
}
if (!node) {
- rb_node = tree_search(&cache->rb_root,
- reloc_root->commit_root->start);
+ rb_node = rb_simple_search(&cache->rb_root,
+ reloc_root->commit_root->start);
if (rb_node) {
- node = rb_entry(rb_node, struct backref_node,
+ node = rb_entry(rb_node, struct btrfs_backref_node,
rb_node);
BUG_ON(node->detached);
}
@@ -1276,12 +570,11 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
if (!node)
return 0;
- new_node = alloc_backref_node(cache);
+ new_node = btrfs_backref_alloc_node(cache, dest->node->start,
+ node->level);
if (!new_node)
return -ENOMEM;
- new_node->bytenr = dest->node->start;
- new_node->level = node->level;
new_node->lowest = node->lowest;
new_node->checked = 1;
new_node->root = btrfs_grab_root(dest);
@@ -1289,23 +582,21 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
if (!node->lowest) {
list_for_each_entry(edge, &node->lower, list[UPPER]) {
- new_edge = alloc_backref_edge(cache);
+ new_edge = btrfs_backref_alloc_edge(cache);
if (!new_edge)
goto fail;
- new_edge->node[UPPER] = new_node;
- new_edge->node[LOWER] = edge->node[LOWER];
- list_add_tail(&new_edge->list[UPPER],
- &new_node->lower);
+ btrfs_backref_link_edge(new_edge, edge->node[LOWER],
+ new_node, LINK_UPPER);
}
} else {
list_add_tail(&new_node->lower, &cache->leaves);
}
- rb_node = tree_insert(&cache->rb_root, new_node->bytenr,
- &new_node->rb_node);
+ rb_node = rb_simple_insert(&cache->rb_root, new_node->bytenr,
+ &new_node->rb_node);
if (rb_node)
- backref_tree_panic(rb_node, -EEXIST, new_node->bytenr);
+ btrfs_backref_panic(trans->fs_info, new_node->bytenr, -EEXIST);
if (!new_node->lowest) {
list_for_each_entry(new_edge, &new_node->lower, list[UPPER]) {
@@ -1317,11 +608,11 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
fail:
while (!list_empty(&new_node->lower)) {
new_edge = list_entry(new_node->lower.next,
- struct backref_edge, list[UPPER]);
+ struct btrfs_backref_edge, list[UPPER]);
list_del(&new_edge->list[UPPER]);
- free_backref_edge(cache, new_edge);
+ btrfs_backref_free_edge(cache, new_edge);
}
- free_backref_node(cache, new_node);
+ btrfs_backref_free_node(cache, new_node);
return -ENOMEM;
}
@@ -1343,8 +634,8 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
node->data = root;
spin_lock(&rc->reloc_root_tree.lock);
- rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
- node->bytenr, &node->rb_node);
+ rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
+ node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node) {
btrfs_panic(fs_info, -EEXIST,
@@ -1370,8 +661,8 @@ static void __del_reloc_root(struct btrfs_root *root)
if (rc && root->node) {
spin_lock(&rc->reloc_root_tree.lock);
- rb_node = tree_search(&rc->reloc_root_tree.rb_root,
- root->commit_root->start);
+ rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
+ root->commit_root->start);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
@@ -1414,8 +705,8 @@ static int __update_reloc_root(struct btrfs_root *root)
struct reloc_control *rc = fs_info->reloc_ctl;
spin_lock(&rc->reloc_root_tree.lock);
- rb_node = tree_search(&rc->reloc_root_tree.rb_root,
- root->commit_root->start);
+ rb_node = rb_simple_search(&rc->reloc_root_tree.rb_root,
+ root->commit_root->start);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
@@ -1428,11 +719,11 @@ static int __update_reloc_root(struct btrfs_root *root)
spin_lock(&rc->reloc_root_tree.lock);
node->bytenr = root->node->start;
- rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
- node->bytenr, &node->rb_node);
+ rb_node = rb_simple_insert(&rc->reloc_root_tree.rb_root,
+ node->bytenr, &node->rb_node);
spin_unlock(&rc->reloc_root_tree.lock);
if (rb_node)
- backref_tree_panic(rb_node, -EEXIST, node->bytenr);
+ btrfs_backref_panic(fs_info, node->bytenr, -EEXIST);
return 0;
}
@@ -1505,7 +796,7 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
reloc_root = btrfs_read_tree_root(fs_info->tree_root, &root_key);
BUG_ON(IS_ERR(reloc_root));
- set_bit(BTRFS_ROOT_REF_COWS, &reloc_root->state);
+ set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
reloc_root->last_trans = trans->transid;
return reloc_root;
}
@@ -1679,14 +970,6 @@ again:
return NULL;
}
-static int in_block_group(u64 bytenr, struct btrfs_block_group *block_group)
-{
- if (bytenr >= block_group->start &&
- bytenr < block_group->start + block_group->length)
- return 1;
- return 0;
-}
-
/*
* get new location of data
*/
@@ -1784,7 +1067,8 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
if (bytenr == 0)
continue;
- if (!in_block_group(bytenr, rc->block_group))
+ if (!in_range(bytenr, rc->block_group->start,
+ rc->block_group->length))
continue;
/*
@@ -1940,7 +1224,7 @@ again:
level = btrfs_header_level(parent);
BUG_ON(level < lowest_level);
- ret = btrfs_bin_search(parent, &key, level, &slot);
+ ret = btrfs_bin_search(parent, &key, &slot);
if (ret < 0)
break;
if (ret && slot > 0)
@@ -2560,7 +1844,8 @@ again:
struct btrfs_root, root_list);
list_del_init(&reloc_root->root_list);
- root = read_fs_root(fs_info, reloc_root->root_key.offset);
+ root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
+ false);
BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root);
@@ -2588,13 +1873,10 @@ again:
static noinline_for_stack
void free_reloc_roots(struct list_head *list)
{
- struct btrfs_root *reloc_root;
+ struct btrfs_root *reloc_root, *tmp;
- while (!list_empty(list)) {
- reloc_root = list_entry(list->next, struct btrfs_root,
- root_list);
+ list_for_each_entry_safe(reloc_root, tmp, list, root_list)
__del_reloc_root(reloc_root);
- }
}
static noinline_for_stack
@@ -2624,12 +1906,11 @@ again:
reloc_root = list_entry(reloc_roots.next,
struct btrfs_root, root_list);
+ root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
+ false);
if (btrfs_root_refs(&reloc_root->root_item) > 0) {
- root = read_fs_root(fs_info,
- reloc_root->root_key.offset);
BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root);
-
ret = merge_reloc_root(rc, root);
btrfs_put_root(root);
if (ret) {
@@ -2639,6 +1920,16 @@ again:
goto out;
}
} else {
+ if (!IS_ERR(root)) {
+ if (root->reloc_root == reloc_root) {
+ root->reloc_root = NULL;
+ btrfs_put_root(reloc_root);
+ }
+ clear_bit(BTRFS_ROOT_DEAD_RELOC_TREE,
+ &root->state);
+ btrfs_put_root(root);
+ }
+
list_del_init(&reloc_root->root_list);
/* Don't forget to queue this reloc root for cleanup */
list_add_tail(&reloc_root->reloc_dirty_list,
@@ -2653,15 +1944,13 @@ again:
out:
if (ret) {
btrfs_handle_fs_error(fs_info, ret, NULL);
- if (!list_empty(&reloc_roots))
- free_reloc_roots(&reloc_roots);
+ free_reloc_roots(&reloc_roots);
/* new reloc root may be added */
mutex_lock(&fs_info->reloc_mutex);
list_splice_init(&rc->reloc_roots, &reloc_roots);
mutex_unlock(&fs_info->reloc_mutex);
- if (!list_empty(&reloc_roots))
- free_reloc_roots(&reloc_roots);
+ free_reloc_roots(&reloc_roots);
}
/*
@@ -2702,7 +1991,7 @@ static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
if (reloc_root->last_trans == trans->transid)
return 0;
- root = read_fs_root(fs_info, reloc_root->root_key.offset);
+ root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false);
BUG_ON(IS_ERR(root));
BUG_ON(root->reloc_root != reloc_root);
ret = btrfs_record_root_in_trans(trans, root);
@@ -2714,10 +2003,10 @@ static int record_reloc_root_in_trans(struct btrfs_trans_handle *trans,
static noinline_for_stack
struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
- struct backref_node *node,
- struct backref_edge *edges[])
+ struct btrfs_backref_node *node,
+ struct btrfs_backref_edge *edges[])
{
- struct backref_node *next;
+ struct btrfs_backref_node *next;
struct btrfs_root *root;
int index = 0;
@@ -2727,7 +2016,7 @@ struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
next = walk_up_backref(next, edges, &index);
root = next->root;
BUG_ON(!root);
- BUG_ON(!test_bit(BTRFS_ROOT_REF_COWS, &root->state));
+ BUG_ON(!test_bit(BTRFS_ROOT_SHAREABLE, &root->state));
if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
record_reloc_root_in_trans(trans, root);
@@ -2746,7 +2035,7 @@ struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
ASSERT(next->root);
list_add_tail(&next->list,
&rc->backref_cache.changed);
- __mark_block_processed(rc, next);
+ mark_block_processed(rc, next);
break;
}
@@ -2771,18 +2060,21 @@ struct btrfs_root *select_reloc_root(struct btrfs_trans_handle *trans,
}
/*
- * select a tree root for relocation. return NULL if the block
- * is reference counted. we should use do_relocation() in this
- * case. return a tree root pointer if the block isn't reference
- * counted. return -ENOENT if the block is root of reloc tree.
+ * Select a tree root for relocation.
+ *
+ * Return NULL if the block is not shareable. We should use do_relocation() in
+ * this case.
+ *
+ * Return a tree root pointer if the block is shareable.
+ * Return -ENOENT if the block is root of reloc tree.
*/
static noinline_for_stack
-struct btrfs_root *select_one_root(struct backref_node *node)
+struct btrfs_root *select_one_root(struct btrfs_backref_node *node)
{
- struct backref_node *next;
+ struct btrfs_backref_node *next;
struct btrfs_root *root;
struct btrfs_root *fs_root = NULL;
- struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
+ struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
int index = 0;
next = node;
@@ -2792,8 +2084,8 @@ struct btrfs_root *select_one_root(struct backref_node *node)
root = next->root;
BUG_ON(!root);
- /* no other choice for non-references counted tree */
- if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
+ /* No other choice for non-shareable tree */
+ if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
return root;
if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID)
@@ -2814,12 +2106,12 @@ struct btrfs_root *select_one_root(struct backref_node *node)
static noinline_for_stack
u64 calcu_metadata_size(struct reloc_control *rc,
- struct backref_node *node, int reserve)
+ struct btrfs_backref_node *node, int reserve)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
- struct backref_node *next = node;
- struct backref_edge *edge;
- struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
+ struct btrfs_backref_node *next = node;
+ struct btrfs_backref_edge *edge;
+ struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
u64 num_bytes = 0;
int index = 0;
@@ -2837,7 +2129,7 @@ u64 calcu_metadata_size(struct reloc_control *rc,
break;
edge = list_entry(next->upper.next,
- struct backref_edge, list[LOWER]);
+ struct btrfs_backref_edge, list[LOWER]);
edges[index++] = edge;
next = edge->node[UPPER];
}
@@ -2848,7 +2140,7 @@ u64 calcu_metadata_size(struct reloc_control *rc,
static int reserve_metadata_space(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
- struct backref_node *node)
+ struct btrfs_backref_node *node)
{
struct btrfs_root *root = rc->extent_root;
struct btrfs_fs_info *fs_info = root->fs_info;
@@ -2896,14 +2188,14 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
*/
static int do_relocation(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
- struct backref_node *node,
+ struct btrfs_backref_node *node,
struct btrfs_key *key,
struct btrfs_path *path, int lowest)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
- struct backref_node *upper;
- struct backref_edge *edge;
- struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
+ struct btrfs_backref_node *upper;
+ struct btrfs_backref_edge *edge;
+ struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
struct btrfs_root *root;
struct extent_buffer *eb;
u32 blocksize;
@@ -2929,8 +2221,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
if (upper->eb && !upper->locked) {
if (!lowest) {
- ret = btrfs_bin_search(upper->eb, key,
- upper->level, &slot);
+ ret = btrfs_bin_search(upper->eb, key, &slot);
if (ret < 0) {
err = ret;
goto next;
@@ -2940,7 +2231,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
if (node->eb->start == bytenr)
goto next;
}
- drop_node_buffer(upper);
+ btrfs_backref_drop_node_buffer(upper);
}
if (!upper->eb) {
@@ -2968,8 +2259,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
slot = path->slots[upper->level];
btrfs_release_path(path);
} else {
- ret = btrfs_bin_search(upper->eb, key, upper->level,
- &slot);
+ ret = btrfs_bin_search(upper->eb, key, &slot);
if (ret < 0) {
err = ret;
goto next;
@@ -3039,15 +2329,15 @@ static int do_relocation(struct btrfs_trans_handle *trans,
}
next:
if (!upper->pending)
- drop_node_buffer(upper);
+ btrfs_backref_drop_node_buffer(upper);
else
- unlock_node_buffer(upper);
+ btrfs_backref_unlock_node_buffer(upper);
if (err)
break;
}
if (!err && node->pending) {
- drop_node_buffer(node);
+ btrfs_backref_drop_node_buffer(node);
list_move_tail(&node->list, &rc->backref_cache.changed);
node->pending = 0;
}
@@ -3059,7 +2349,7 @@ next:
static int link_to_upper(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
- struct backref_node *node,
+ struct btrfs_backref_node *node,
struct btrfs_path *path)
{
struct btrfs_key key;
@@ -3073,15 +2363,15 @@ static int finish_pending_nodes(struct btrfs_trans_handle *trans,
struct btrfs_path *path, int err)
{
LIST_HEAD(list);
- struct backref_cache *cache = &rc->backref_cache;
- struct backref_node *node;
+ struct btrfs_backref_cache *cache = &rc->backref_cache;
+ struct btrfs_backref_node *node;
int level;
int ret;
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
while (!list_empty(&cache->pending[level])) {
node = list_entry(cache->pending[level].next,
- struct backref_node, list);
+ struct btrfs_backref_node, list);
list_move_tail(&node->list, &list);
BUG_ON(!node->pending);
@@ -3096,35 +2386,16 @@ static int finish_pending_nodes(struct btrfs_trans_handle *trans,
return err;
}
-static void mark_block_processed(struct reloc_control *rc,
- u64 bytenr, u32 blocksize)
-{
- set_extent_bits(&rc->processed_blocks, bytenr, bytenr + blocksize - 1,
- EXTENT_DIRTY);
-}
-
-static void __mark_block_processed(struct reloc_control *rc,
- struct backref_node *node)
-{
- u32 blocksize;
- if (node->level == 0 ||
- in_block_group(node->bytenr, rc->block_group)) {
- blocksize = rc->extent_root->fs_info->nodesize;
- mark_block_processed(rc, node->bytenr, blocksize);
- }
- node->processed = 1;
-}
-
/*
* mark a block and all blocks directly/indirectly reference the block
* as processed.
*/
static void update_processed_blocks(struct reloc_control *rc,
- struct backref_node *node)
+ struct btrfs_backref_node *node)
{
- struct backref_node *next = node;
- struct backref_edge *edge;
- struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
+ struct btrfs_backref_node *next = node;
+ struct btrfs_backref_edge *edge;
+ struct btrfs_backref_edge *edges[BTRFS_MAX_LEVEL - 1];
int index = 0;
while (next) {
@@ -3133,13 +2404,13 @@ static void update_processed_blocks(struct reloc_control *rc,
if (next->processed)
break;
- __mark_block_processed(rc, next);
+ mark_block_processed(rc, next);
if (list_empty(&next->upper))
break;
edge = list_entry(next->upper.next,
- struct backref_edge, list[LOWER]);
+ struct btrfs_backref_edge, list[LOWER]);
edges[index++] = edge;
next = edge->node[UPPER];
}
@@ -3184,7 +2455,7 @@ static int get_tree_block_key(struct btrfs_fs_info *fs_info,
*/
static int relocate_tree_block(struct btrfs_trans_handle *trans,
struct reloc_control *rc,
- struct backref_node *node,
+ struct btrfs_backref_node *node,
struct btrfs_key *key,
struct btrfs_path *path)
{
@@ -3210,7 +2481,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
}
if (root) {
- if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
+ if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) {
BUG_ON(node->new_bytenr);
BUG_ON(!list_empty(&node->list));
btrfs_record_root_in_trans(trans, root);
@@ -3234,7 +2505,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
}
out:
if (ret || node->level == 0 || node->cowonly)
- remove_backref_node(&rc->backref_cache, node);
+ btrfs_backref_cleanup_node(&rc->backref_cache, node);
return ret;
}
@@ -3246,7 +2517,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
struct reloc_control *rc, struct rb_root *blocks)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
- struct backref_node *node;
+ struct btrfs_backref_node *node;
struct btrfs_path *path;
struct tree_block *block;
struct tree_block *next;
@@ -3613,9 +2884,10 @@ static int add_tree_block(struct reloc_control *rc,
block->level = level;
block->key_ready = 0;
- rb_node = tree_insert(blocks, block->bytenr, &block->rb_node);
+ rb_node = rb_simple_insert(blocks, block->bytenr, &block->rb_node);
if (rb_node)
- backref_tree_panic(rb_node, -EEXIST, block->bytenr);
+ btrfs_backref_panic(rc->extent_root->fs_info, block->bytenr,
+ -EEXIST);
return 0;
}
@@ -3636,7 +2908,7 @@ static int __add_tree_block(struct reloc_control *rc,
if (tree_block_processed(bytenr, rc))
return 0;
- if (tree_search(blocks, bytenr))
+ if (rb_simple_search(blocks, bytenr))
return 0;
path = btrfs_alloc_path();
@@ -3698,7 +2970,6 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
struct inode *inode,
u64 ino)
{
- struct btrfs_key key;
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_trans_handle *trans;
int ret = 0;
@@ -3706,11 +2977,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
if (inode)
goto truncate;
- key.objectid = ino;
- key.type = BTRFS_INODE_ITEM_KEY;
- key.offset = 0;
-
- inode = btrfs_iget(fs_info->sb, &key, root);
+ inode = btrfs_iget(fs_info->sb, ino, root);
if (IS_ERR(inode))
return -ENOENT;
@@ -4122,7 +3389,7 @@ restart:
rc->create_reloc_tree = 0;
set_reloc_control(rc);
- backref_cache_cleanup(&rc->backref_cache);
+ btrfs_backref_release_cache(&rc->backref_cache);
btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1, NULL);
/*
@@ -4198,14 +3465,10 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
struct btrfs_root *root;
- struct btrfs_key key;
u64 objectid;
int err = 0;
- root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
- if (IS_ERR(root))
- return ERR_CAST(root);
-
+ root = btrfs_grab_root(fs_info->data_reloc_root);
trans = btrfs_start_transaction(root, 6);
if (IS_ERR(trans)) {
btrfs_put_root(root);
@@ -4219,10 +3482,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
err = __insert_orphan_inode(trans, root, objectid);
BUG_ON(err);
- key.objectid = objectid;
- key.type = BTRFS_INODE_ITEM_KEY;
- key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, root);
+ inode = btrfs_iget(fs_info->sb, objectid, root);
BUG_ON(IS_ERR(inode));
BTRFS_I(inode)->index_cnt = group->start;
@@ -4249,7 +3509,7 @@ static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
INIT_LIST_HEAD(&rc->reloc_roots);
INIT_LIST_HEAD(&rc->dirty_subvol_roots);
- backref_cache_init(&rc->backref_cache);
+ btrfs_backref_init_cache(fs_info, &rc->backref_cache, 1);
mapping_tree_init(&rc->reloc_root_tree);
extent_io_tree_init(fs_info, &rc->processed_blocks,
IO_TREE_RELOC_BLOCKS, NULL);
@@ -4494,12 +3754,12 @@ int btrfs_recover_relocation(struct btrfs_root *root)
goto out;
}
- set_bit(BTRFS_ROOT_REF_COWS, &reloc_root->state);
+ set_bit(BTRFS_ROOT_SHAREABLE, &reloc_root->state);
list_add(&reloc_root->root_list, &reloc_roots);
if (btrfs_root_refs(&reloc_root->root_item) > 0) {
- fs_root = read_fs_root(fs_info,
- reloc_root->root_key.offset);
+ fs_root = btrfs_get_fs_root(fs_info,
+ reloc_root->root_key.offset, false);
if (IS_ERR(fs_root)) {
ret = PTR_ERR(fs_root);
if (ret != -ENOENT) {
@@ -4555,7 +3815,8 @@ int btrfs_recover_relocation(struct btrfs_root *root)
continue;
}
- fs_root = read_fs_root(fs_info, reloc_root->root_key.offset);
+ fs_root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset,
+ false);
if (IS_ERR(fs_root)) {
err = PTR_ERR(fs_root);
list_add_tail(&reloc_root->root_list, &reloc_roots);
@@ -4591,20 +3852,16 @@ out_unset:
unset_reloc_control(rc);
free_reloc_control(rc);
out:
- if (!list_empty(&reloc_roots))
- free_reloc_roots(&reloc_roots);
+ free_reloc_roots(&reloc_roots);
btrfs_free_path(path);
if (err == 0) {
/* cleanup orphan inode in data relocation tree */
- fs_root = read_fs_root(fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID);
- if (IS_ERR(fs_root)) {
- err = PTR_ERR(fs_root);
- } else {
- err = btrfs_orphan_cleanup(fs_root);
- btrfs_put_root(fs_root);
- }
+ fs_root = btrfs_grab_root(fs_info->data_reloc_root);
+ ASSERT(fs_root);
+ err = btrfs_orphan_cleanup(fs_root);
+ btrfs_put_root(fs_root);
}
return err;
}
@@ -4666,7 +3923,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct reloc_control *rc;
- struct backref_node *node;
+ struct btrfs_backref_node *node;
int first_cow = 0;
int level;
int ret = 0;
@@ -4691,7 +3948,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
BUG_ON(node->bytenr != buf->start &&
node->new_bytenr != buf->start);
- drop_node_buffer(node);
+ btrfs_backref_drop_node_buffer(node);
atomic_inc(&cow->refs);
node->eb = cow;
node->new_bytenr = cow->start;
@@ -4703,7 +3960,7 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
}
if (first_cow)
- __mark_block_processed(rc, node);
+ mark_block_processed(rc, node);
if (first_cow && level > 0)
rc->nodes_relocated += buf->len;
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 668f22844017..c89697486366 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -210,7 +210,6 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
struct extent_buffer *leaf;
struct btrfs_path *path;
struct btrfs_key key;
- struct btrfs_key root_key;
struct btrfs_root *root;
int err = 0;
int ret;
@@ -223,10 +222,9 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
key.type = BTRFS_ORPHAN_ITEM_KEY;
key.offset = 0;
- root_key.type = BTRFS_ROOT_ITEM_KEY;
- root_key.offset = (u64)-1;
-
while (1) {
+ u64 root_objectid;
+
ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
if (ret < 0) {
err = ret;
@@ -250,10 +248,10 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
key.type != BTRFS_ORPHAN_ITEM_KEY)
break;
- root_key.objectid = key.offset;
+ root_objectid = key.offset;
key.offset++;
- root = btrfs_get_fs_root(fs_info, &root_key, false);
+ root = btrfs_get_fs_root(fs_info, root_objectid, false);
err = PTR_ERR_OR_ZERO(root);
if (err && err != -ENOENT) {
break;
@@ -270,7 +268,7 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
break;
}
err = btrfs_del_orphan_item(trans, tree_root,
- root_key.objectid);
+ root_objectid);
btrfs_end_transaction(trans);
if (err) {
btrfs_handle_fs_error(fs_info, err,
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index adaf8ab694d5..016a025e36c7 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -647,13 +647,9 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
struct inode_fs_paths *ipath = NULL;
struct btrfs_root *local_root;
- struct btrfs_key root_key;
struct btrfs_key key;
- root_key.objectid = root;
- root_key.type = BTRFS_ROOT_ITEM_KEY;
- root_key.offset = (u64)-1;
- local_root = btrfs_get_fs_root(fs_info, &root_key, true);
+ local_root = btrfs_get_fs_root(fs_info, root, true);
if (IS_ERR(local_root)) {
ret = PTR_ERR(local_root);
goto err;
@@ -3046,7 +3042,8 @@ out:
static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
struct map_lookup *map,
struct btrfs_device *scrub_dev,
- int num, u64 base, u64 length)
+ int num, u64 base, u64 length,
+ struct btrfs_block_group *cache)
{
struct btrfs_path *path, *ppath;
struct btrfs_fs_info *fs_info = sctx->fs_info;
@@ -3284,6 +3281,20 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
break;
}
+ /*
+ * If our block group was removed in the meanwhile, just
+ * stop scrubbing since there is no point in continuing.
+ * Continuing would prevent reusing its device extents
+ * for new block groups for a long time.
+ */
+ spin_lock(&cache->lock);
+ if (cache->removed) {
+ spin_unlock(&cache->lock);
+ ret = 0;
+ goto out;
+ }
+ spin_unlock(&cache->lock);
+
extent = btrfs_item_ptr(l, slot,
struct btrfs_extent_item);
flags = btrfs_extent_flags(l, extent);
@@ -3328,13 +3339,14 @@ again:
&extent_dev,
&extent_mirror_num);
- ret = btrfs_lookup_csums_range(csum_root,
- extent_logical,
- extent_logical +
- extent_len - 1,
- &sctx->csum_list, 1);
- if (ret)
- goto out;
+ if (flags & BTRFS_EXTENT_FLAG_DATA) {
+ ret = btrfs_lookup_csums_range(csum_root,
+ extent_logical,
+ extent_logical + extent_len - 1,
+ &sctx->csum_list, 1);
+ if (ret)
+ goto out;
+ }
ret = scrub_extent(sctx, map, extent_logical, extent_len,
extent_physical, extent_dev, flags,
@@ -3457,7 +3469,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
map->stripes[i].physical == dev_offset) {
ret = scrub_stripe(sctx, map, scrub_dev, i,
- chunk_offset, length);
+ chunk_offset, length, cache);
if (ret)
goto out;
}
@@ -3555,6 +3567,23 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
goto skip;
/*
+ * Make sure that while we are scrubbing the corresponding block
+ * group doesn't get its logical address and its device extents
+ * reused for another block group, which can possibly be of a
+ * different type and different profile. We do this to prevent
+ * false error detections and crashes due to bogus attempts to
+ * repair extents.
+ */
+ spin_lock(&cache->lock);
+ if (cache->removed) {
+ spin_unlock(&cache->lock);
+ btrfs_put_block_group(cache);
+ goto skip;
+ }
+ btrfs_freeze_block_group(cache);
+ spin_unlock(&cache->lock);
+
+ /*
* we need call btrfs_inc_block_group_ro() with scrubs_paused,
* to avoid deadlock caused by:
* btrfs_inc_block_group_ro()
@@ -3609,6 +3638,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
} else {
btrfs_warn(fs_info,
"failed setting block group ro: %d", ret);
+ btrfs_unfreeze_block_group(cache);
btrfs_put_block_group(cache);
scrub_pause_off(fs_info);
break;
@@ -3695,6 +3725,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
spin_unlock(&cache->lock);
}
+ btrfs_unfreeze_block_group(cache);
btrfs_put_block_group(cache);
if (ret)
break;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 6a92ecf9eaa2..d9813a5b075a 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -23,6 +23,7 @@
#include "btrfs_inode.h"
#include "transaction.h"
#include "compression.h"
+#include "xattr.h"
/*
* Maximum number of references an extent can have in order for us to attempt to
@@ -4545,6 +4546,10 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
struct fs_path *p;
struct posix_acl_xattr_header dummy_acl;
+ /* Capabilities are emitted by finish_inode_if_needed */
+ if (!strncmp(name, XATTR_NAME_CAPS, name_len))
+ return 0;
+
p = fs_path_alloc();
if (!p)
return -ENOMEM;
@@ -4801,17 +4806,12 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
struct inode *inode;
struct page *page;
char *addr;
- struct btrfs_key key;
pgoff_t index = offset >> PAGE_SHIFT;
pgoff_t last_index;
unsigned pg_offset = offset_in_page(offset);
ssize_t ret = 0;
- key.objectid = sctx->cur_ino;
- key.type = BTRFS_INODE_ITEM_KEY;
- key.offset = 0;
-
- inode = btrfs_iget(fs_info->sb, &key, root);
+ inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -5107,6 +5107,64 @@ static int send_extent_data(struct send_ctx *sctx,
return 0;
}
+/*
+ * Search for a capability xattr related to sctx->cur_ino. If the capability is
+ * found, call send_set_xattr function to emit it.
+ *
+ * Return 0 if there isn't a capability, or when the capability was emitted
+ * successfully, or < 0 if an error occurred.
+ */
+static int send_capabilities(struct send_ctx *sctx)
+{
+ struct fs_path *fspath = NULL;
+ struct btrfs_path *path;
+ struct btrfs_dir_item *di;
+ struct extent_buffer *leaf;
+ unsigned long data_ptr;
+ char *buf = NULL;
+ int buf_len;
+ int ret = 0;
+
+ path = alloc_path_for_send();
+ if (!path)
+ return -ENOMEM;
+
+ di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
+ XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
+ if (!di) {
+ /* There is no xattr for this inode */
+ goto out;
+ } else if (IS_ERR(di)) {
+ ret = PTR_ERR(di);
+ goto out;
+ }
+
+ leaf = path->nodes[0];
+ buf_len = btrfs_dir_data_len(leaf, di);
+
+ fspath = fs_path_alloc();
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!fspath || !buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
+ if (ret < 0)
+ goto out;
+
+ data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
+ read_extent_buffer(leaf, buf, data_ptr, buf_len);
+
+ ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
+ strlen(XATTR_NAME_CAPS), buf, buf_len);
+out:
+ kfree(buf);
+ fs_path_free(fspath);
+ btrfs_free_path(path);
+ return ret;
+}
+
static int clone_range(struct send_ctx *sctx,
struct clone_root *clone_root,
const u64 disk_byte,
@@ -5972,6 +6030,10 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
goto out;
}
+ ret = send_capabilities(sctx);
+ if (ret < 0)
+ goto out;
+
/*
* If other directory inodes depended on our current directory
* inode's move/rename, now do their move/rename operations.
@@ -7021,7 +7083,6 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
struct btrfs_fs_info *fs_info = send_root->fs_info;
struct btrfs_root *clone_root;
- struct btrfs_key key;
struct send_ctx *sctx = NULL;
u32 i;
u64 *clone_sources_tmp = NULL;
@@ -7143,11 +7204,8 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
}
for (i = 0; i < arg->clone_sources_count; i++) {
- key.objectid = clone_sources_tmp[i];
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
-
- clone_root = btrfs_get_fs_root(fs_info, &key, true);
+ clone_root = btrfs_get_fs_root(fs_info,
+ clone_sources_tmp[i], true);
if (IS_ERR(clone_root)) {
ret = PTR_ERR(clone_root);
goto out;
@@ -7178,11 +7236,8 @@ long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
}
if (arg->parent_root) {
- key.objectid = arg->parent_root;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
-
- sctx->parent_root = btrfs_get_fs_root(fs_info, &key, true);
+ sctx->parent_root = btrfs_get_fs_root(fs_info, arg->parent_root,
+ true);
if (IS_ERR(sctx->parent_root)) {
ret = PTR_ERR(sctx->parent_root);
goto out;
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index ff17a4420358..41ee88633769 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -626,6 +626,7 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
struct reserve_ticket *ticket = NULL;
struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_block_rsv;
struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
+ struct btrfs_block_rsv *trans_rsv = &fs_info->trans_block_rsv;
struct btrfs_trans_handle *trans;
u64 bytes_needed;
u64 reclaim_bytes = 0;
@@ -688,6 +689,11 @@ static int may_commit_transaction(struct btrfs_fs_info *fs_info,
spin_lock(&delayed_refs_rsv->lock);
reclaim_bytes += delayed_refs_rsv->reserved;
spin_unlock(&delayed_refs_rsv->lock);
+
+ spin_lock(&trans_rsv->lock);
+ reclaim_bytes += trans_rsv->reserved;
+ spin_unlock(&trans_rsv->lock);
+
if (reclaim_bytes >= bytes_needed)
goto commit;
bytes_needed -= reclaim_bytes;
@@ -856,6 +862,34 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
}
+static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ struct reserve_ticket *ticket)
+{
+ struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
+ u64 min_bytes;
+
+ if (global_rsv->space_info != space_info)
+ return false;
+
+ spin_lock(&global_rsv->lock);
+ min_bytes = div_factor(global_rsv->size, 1);
+ if (global_rsv->reserved < min_bytes + ticket->bytes) {
+ spin_unlock(&global_rsv->lock);
+ return false;
+ }
+ global_rsv->reserved -= ticket->bytes;
+ ticket->bytes = 0;
+ list_del_init(&ticket->list);
+ wake_up(&ticket->wait);
+ space_info->tickets_id++;
+ if (global_rsv->reserved < global_rsv->size)
+ global_rsv->full = 0;
+ spin_unlock(&global_rsv->lock);
+
+ return true;
+}
+
/*
* maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
* @fs_info - fs_info for this fs
@@ -888,6 +922,10 @@ static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
ticket = list_first_entry(&space_info->tickets,
struct reserve_ticket, list);
+ if (ticket->steal &&
+ steal_from_global_rsv(fs_info, space_info, ticket))
+ return true;
+
/*
* may_commit_transaction will avoid committing the transaction
* if it doesn't feel like the space reclaimed by the commit
@@ -1104,6 +1142,7 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
switch (flush) {
case BTRFS_RESERVE_FLUSH_ALL:
+ case BTRFS_RESERVE_FLUSH_ALL_STEAL:
wait_reserve_ticket(fs_info, space_info, ticket);
break;
case BTRFS_RESERVE_FLUSH_LIMIT:
@@ -1125,11 +1164,17 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
ret = ticket->error;
if (ticket->bytes || ticket->error) {
/*
- * Need to delete here for priority tickets. For regular tickets
- * either the async reclaim job deletes the ticket from the list
- * or we delete it ourselves at wait_reserve_ticket().
+ * We were a priority ticket, so we need to delete ourselves
+ * from the list. Because we could have other priority tickets
+ * behind us that require less space, run
+ * btrfs_try_granting_tickets() to see if their reservations can
+ * now be made.
*/
- remove_ticket(space_info, ticket);
+ if (!list_empty(&ticket->list)) {
+ remove_ticket(space_info, ticket);
+ btrfs_try_granting_tickets(fs_info, space_info);
+ }
+
if (!ret)
ret = -ENOSPC;
}
@@ -1145,6 +1190,16 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
return ret;
}
+/*
+ * This returns true if this flush state will go through the ordinary flushing
+ * code.
+ */
+static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
+{
+ return (flush == BTRFS_RESERVE_FLUSH_ALL) ||
+ (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
+}
+
/**
* reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
* @root - the root we're allocating for
@@ -1175,8 +1230,17 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
spin_lock(&space_info->lock);
ret = -ENOSPC;
used = btrfs_space_info_used(space_info, true);
- pending_tickets = !list_empty(&space_info->tickets) ||
- !list_empty(&space_info->priority_tickets);
+
+ /*
+ * We don't want NO_FLUSH allocations to jump everybody, they can
+ * generally handle ENOSPC in a different way, so treat them the same as
+ * normal flushers when it comes to skipping pending tickets.
+ */
+ if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
+ pending_tickets = !list_empty(&space_info->tickets) ||
+ !list_empty(&space_info->priority_tickets);
+ else
+ pending_tickets = !list_empty(&space_info->priority_tickets);
/*
* Carry on if we have enough space (short-circuit) OR call
@@ -1198,12 +1262,13 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
* the list and we will do our own flushing further down.
*/
if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
- ASSERT(space_info->reclaim_size >= 0);
ticket.bytes = orig_bytes;
ticket.error = 0;
space_info->reclaim_size += ticket.bytes;
init_waitqueue_head(&ticket.wait);
- if (flush == BTRFS_RESERVE_FLUSH_ALL) {
+ ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
+ if (flush == BTRFS_RESERVE_FLUSH_ALL ||
+ flush == BTRFS_RESERVE_FLUSH_ALL_STEAL) {
list_add_tail(&ticket.list, &space_info->tickets);
if (!space_info->flush) {
space_info->flush = 1;
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index 0a5001ef1481..c3c64019950a 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -78,6 +78,7 @@ struct btrfs_space_info {
struct reserve_ticket {
u64 bytes;
int error;
+ bool steal;
struct list_head list;
wait_queue_head_t wait;
};
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index 73f7987143df..079b059818e9 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -17,151 +17,152 @@ static inline void put_unaligned_le8(u8 val, void *p)
*(u8 *)p = val;
}
+static bool check_setget_bounds(const struct extent_buffer *eb,
+ const void *ptr, unsigned off, int size)
+{
+ const unsigned long member_offset = (unsigned long)ptr + off;
+
+ if (member_offset > eb->len) {
+ btrfs_warn(eb->fs_info,
+ "bad eb member start: ptr 0x%lx start %llu member offset %lu size %d",
+ (unsigned long)ptr, eb->start, member_offset, size);
+ return false;
+ }
+ if (member_offset + size > eb->len) {
+ btrfs_warn(eb->fs_info,
+ "bad eb member end: ptr 0x%lx start %llu member offset %lu size %d",
+ (unsigned long)ptr, eb->start, member_offset, size);
+ return false;
+ }
+
+ return true;
+}
+
/*
- * this is some deeply nasty code.
+ * Macro templates that define helpers to read/write extent buffer data of a
+ * given size, that are also used via ctree.h for access to item members by
+ * specialized helpers.
*
- * The end result is that anyone who #includes ctree.h gets a
- * declaration for the btrfs_set_foo functions and btrfs_foo functions,
- * which are wrappers of btrfs_set_token_#bits functions and
- * btrfs_get_token_#bits functions, which are defined in this file.
+ * Generic helpers:
+ * - btrfs_set_8 (for 8/16/32/64)
+ * - btrfs_get_8 (for 8/16/32/64)
*
- * These setget functions do all the extent_buffer related mapping
- * required to efficiently read and write specific fields in the extent
- * buffers. Every pointer to metadata items in btrfs is really just
- * an unsigned long offset into the extent buffer which has been
- * cast to a specific type. This gives us all the gcc type checking.
+ * Generic helpers with a token (cached address of the most recently accessed
+ * page):
+ * - btrfs_set_token_8 (for 8/16/32/64)
+ * - btrfs_get_token_8 (for 8/16/32/64)
*
- * The extent buffer api is used to do the page spanning work required to
- * have a metadata blocksize different from the page size.
+ * The set/get functions handle data spanning two pages transparently, in case
+ * metadata block size is larger than page. Every pointer to metadata items is
+ * an offset into the extent buffer page array, cast to a specific type. This
+ * gives us all the type checking.
*
- * There are 2 variants defined, one with a token pointer and one without.
+ * The extent buffer pages stored in the array pages do not form a contiguous
+ * phyusical range, but the API functions assume the linear offset to the range
+ * from 0 to metadata node size.
*/
#define DEFINE_BTRFS_SETGET_BITS(bits) \
-u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \
- const void *ptr, unsigned long off, \
- struct btrfs_map_token *token) \
+u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
+ const void *ptr, unsigned long off) \
{ \
- unsigned long part_offset = (unsigned long)ptr; \
- unsigned long offset = part_offset + off; \
- void *p; \
- int err; \
- char *kaddr; \
- unsigned long map_start; \
- unsigned long map_len; \
- int size = sizeof(u##bits); \
- u##bits res; \
+ const unsigned long member_offset = (unsigned long)ptr + off; \
+ const unsigned long idx = member_offset >> PAGE_SHIFT; \
+ const unsigned long oip = offset_in_page(member_offset); \
+ const int size = sizeof(u##bits); \
+ u8 lebytes[sizeof(u##bits)]; \
+ const int part = PAGE_SIZE - oip; \
\
ASSERT(token); \
- ASSERT(token->eb == eb); \
- \
- if (token->kaddr && token->offset <= offset && \
- (token->offset + PAGE_SIZE >= offset + size)) { \
- kaddr = token->kaddr; \
- p = kaddr + part_offset - token->offset; \
- res = get_unaligned_le##bits(p + off); \
- return res; \
+ ASSERT(token->kaddr); \
+ ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
+ if (token->offset <= member_offset && \
+ member_offset + size <= token->offset + PAGE_SIZE) { \
+ return get_unaligned_le##bits(token->kaddr + oip); \
} \
- err = map_private_extent_buffer(eb, offset, size, \
- &kaddr, &map_start, &map_len); \
- if (err) { \
- __le##bits leres; \
+ token->kaddr = page_address(token->eb->pages[idx]); \
+ token->offset = idx << PAGE_SHIFT; \
+ if (oip + size <= PAGE_SIZE) \
+ return get_unaligned_le##bits(token->kaddr + oip); \
\
- read_extent_buffer(eb, &leres, offset, size); \
- return le##bits##_to_cpu(leres); \
- } \
- p = kaddr + part_offset - map_start; \
- res = get_unaligned_le##bits(p + off); \
- token->kaddr = kaddr; \
- token->offset = map_start; \
- return res; \
+ memcpy(lebytes, token->kaddr + oip, part); \
+ token->kaddr = page_address(token->eb->pages[idx + 1]); \
+ token->offset = (idx + 1) << PAGE_SHIFT; \
+ memcpy(lebytes + part, token->kaddr, size - part); \
+ return get_unaligned_le##bits(lebytes); \
} \
u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
const void *ptr, unsigned long off) \
{ \
- unsigned long part_offset = (unsigned long)ptr; \
- unsigned long offset = part_offset + off; \
- void *p; \
- int err; \
- char *kaddr; \
- unsigned long map_start; \
- unsigned long map_len; \
- int size = sizeof(u##bits); \
- u##bits res; \
+ const unsigned long member_offset = (unsigned long)ptr + off; \
+ const unsigned long oip = offset_in_page(member_offset); \
+ const unsigned long idx = member_offset >> PAGE_SHIFT; \
+ char *kaddr = page_address(eb->pages[idx]); \
+ const int size = sizeof(u##bits); \
+ const int part = PAGE_SIZE - oip; \
+ u8 lebytes[sizeof(u##bits)]; \
\
- err = map_private_extent_buffer(eb, offset, size, \
- &kaddr, &map_start, &map_len); \
- if (err) { \
- __le##bits leres; \
+ ASSERT(check_setget_bounds(eb, ptr, off, size)); \
+ if (oip + size <= PAGE_SIZE) \
+ return get_unaligned_le##bits(kaddr + oip); \
\
- read_extent_buffer(eb, &leres, offset, size); \
- return le##bits##_to_cpu(leres); \
- } \
- p = kaddr + part_offset - map_start; \
- res = get_unaligned_le##bits(p + off); \
- return res; \
+ memcpy(lebytes, kaddr + oip, part); \
+ kaddr = page_address(eb->pages[idx + 1]); \
+ memcpy(lebytes + part, kaddr, size - part); \
+ return get_unaligned_le##bits(lebytes); \
} \
-void btrfs_set_token_##bits(struct extent_buffer *eb, \
+void btrfs_set_token_##bits(struct btrfs_map_token *token, \
const void *ptr, unsigned long off, \
- u##bits val, \
- struct btrfs_map_token *token) \
+ u##bits val) \
{ \
- unsigned long part_offset = (unsigned long)ptr; \
- unsigned long offset = part_offset + off; \
- void *p; \
- int err; \
- char *kaddr; \
- unsigned long map_start; \
- unsigned long map_len; \
- int size = sizeof(u##bits); \
+ const unsigned long member_offset = (unsigned long)ptr + off; \
+ const unsigned long idx = member_offset >> PAGE_SHIFT; \
+ const unsigned long oip = offset_in_page(member_offset); \
+ const int size = sizeof(u##bits); \
+ u8 lebytes[sizeof(u##bits)]; \
+ const int part = PAGE_SIZE - oip; \
\
ASSERT(token); \
- ASSERT(token->eb == eb); \
- \
- if (token->kaddr && token->offset <= offset && \
- (token->offset + PAGE_SIZE >= offset + size)) { \
- kaddr = token->kaddr; \
- p = kaddr + part_offset - token->offset; \
- put_unaligned_le##bits(val, p + off); \
+ ASSERT(token->kaddr); \
+ ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
+ if (token->offset <= member_offset && \
+ member_offset + size <= token->offset + PAGE_SIZE) { \
+ put_unaligned_le##bits(val, token->kaddr + oip); \
return; \
} \
- err = map_private_extent_buffer(eb, offset, size, \
- &kaddr, &map_start, &map_len); \
- if (err) { \
- __le##bits val2; \
- \
- val2 = cpu_to_le##bits(val); \
- write_extent_buffer(eb, &val2, offset, size); \
+ token->kaddr = page_address(token->eb->pages[idx]); \
+ token->offset = idx << PAGE_SHIFT; \
+ if (oip + size <= PAGE_SIZE) { \
+ put_unaligned_le##bits(val, token->kaddr + oip); \
return; \
} \
- p = kaddr + part_offset - map_start; \
- put_unaligned_le##bits(val, p + off); \
- token->kaddr = kaddr; \
- token->offset = map_start; \
+ put_unaligned_le##bits(val, lebytes); \
+ memcpy(token->kaddr + oip, lebytes, part); \
+ token->kaddr = page_address(token->eb->pages[idx + 1]); \
+ token->offset = (idx + 1) << PAGE_SHIFT; \
+ memcpy(token->kaddr, lebytes + part, size - part); \
} \
-void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \
+void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
unsigned long off, u##bits val) \
{ \
- unsigned long part_offset = (unsigned long)ptr; \
- unsigned long offset = part_offset + off; \
- void *p; \
- int err; \
- char *kaddr; \
- unsigned long map_start; \
- unsigned long map_len; \
- int size = sizeof(u##bits); \
- \
- err = map_private_extent_buffer(eb, offset, size, \
- &kaddr, &map_start, &map_len); \
- if (err) { \
- __le##bits val2; \
+ const unsigned long member_offset = (unsigned long)ptr + off; \
+ const unsigned long oip = offset_in_page(member_offset); \
+ const unsigned long idx = member_offset >> PAGE_SHIFT; \
+ char *kaddr = page_address(eb->pages[idx]); \
+ const int size = sizeof(u##bits); \
+ const int part = PAGE_SIZE - oip; \
+ u8 lebytes[sizeof(u##bits)]; \
\
- val2 = cpu_to_le##bits(val); \
- write_extent_buffer(eb, &val2, offset, size); \
+ ASSERT(check_setget_bounds(eb, ptr, off, size)); \
+ if (oip + size <= PAGE_SIZE) { \
+ put_unaligned_le##bits(val, kaddr + oip); \
return; \
} \
- p = kaddr + part_offset - map_start; \
- put_unaligned_le##bits(val, p + off); \
+ \
+ put_unaligned_le##bits(val, lebytes); \
+ memcpy(kaddr + oip, lebytes, part); \
+ kaddr = page_address(eb->pages[idx + 1]); \
+ memcpy(kaddr, lebytes + part, size - part); \
}
DEFINE_BTRFS_SETGET_BITS(8)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 7932d8d07cff..bc73fd670702 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -72,23 +72,32 @@ const char * __attribute_const__ btrfs_decode_error(int errno)
char *errstr = "unknown";
switch (errno) {
- case -EIO:
+ case -ENOENT: /* -2 */
+ errstr = "No such entry";
+ break;
+ case -EIO: /* -5 */
errstr = "IO failure";
break;
- case -ENOMEM:
+ case -ENOMEM: /* -12*/
errstr = "Out of memory";
break;
- case -EROFS:
- errstr = "Readonly filesystem";
- break;
- case -EEXIST:
+ case -EEXIST: /* -17 */
errstr = "Object already exists";
break;
- case -ENOSPC:
+ case -ENOSPC: /* -28 */
errstr = "No space left";
break;
- case -ENOENT:
- errstr = "No such entry";
+ case -EROFS: /* -30 */
+ errstr = "Readonly filesystem";
+ break;
+ case -EOPNOTSUPP: /* -95 */
+ errstr = "Operation not supported";
+ break;
+ case -EUCLEAN: /* -117 */
+ errstr = "Filesystem corrupted";
+ break;
+ case -EDQUOT: /* -122 */
+ errstr = "Quota exceeded";
break;
}
@@ -1093,10 +1102,7 @@ char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref);
btrfs_release_path(path);
- key.objectid = subvol_objectid;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
- fs_root = btrfs_get_fs_root(fs_info, &key, true);
+ fs_root = btrfs_get_fs_root(fs_info, subvol_objectid, true);
if (IS_ERR(fs_root)) {
ret = PTR_ERR(fs_root);
fs_root = NULL;
@@ -1211,7 +1217,6 @@ static int btrfs_fill_super(struct super_block *sb,
{
struct inode *inode;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
- struct btrfs_key key;
int err;
sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -1239,10 +1244,7 @@ static int btrfs_fill_super(struct super_block *sb,
return err;
}
- key.objectid = BTRFS_FIRST_FREE_OBJECTID;
- key.type = BTRFS_INODE_ITEM_KEY;
- key.offset = 0;
- inode = btrfs_iget(sb, &key, fs_info->fs_root);
+ inode = btrfs_iget(sb, BTRFS_FIRST_FREE_OBJECTID, fs_info->fs_root);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto fail_close;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 2d5498136e5e..b359d4b17658 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -21,6 +21,7 @@
#include "dev-replace.h"
#include "qgroup.h"
#include "block-group.h"
+#include "space-info.h"
#define BTRFS_ROOT_TRANS_TAG 0
@@ -141,7 +142,7 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
struct btrfs_block_group,
bg_list);
list_del_init(&cache->bg_list);
- btrfs_put_block_group_trimming(cache);
+ btrfs_unfreeze_block_group(cache);
btrfs_put_block_group(cache);
}
WARN_ON(!list_empty(&transaction->dev_update_list));
@@ -348,10 +349,10 @@ loop:
}
/*
- * this does all the record keeping required to make sure that a reference
- * counted root is properly recorded in a given transaction. This is required
- * to make sure the old root from before we joined the transaction is deleted
- * when the transaction commits
+ * This does all the record keeping required to make sure that a shareable root
+ * is properly recorded in a given transaction. This is required to make sure
+ * the old root from before we joined the transaction is deleted when the
+ * transaction commits.
*/
static int record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -359,7 +360,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = root->fs_info;
- if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
+ if ((test_bit(BTRFS_ROOT_SHAREABLE, &root->state) &&
root->last_trans < trans->transid) || force) {
WARN_ON(root == fs_info->extent_root);
WARN_ON(!force && root->commit_root != root->node);
@@ -438,7 +439,7 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
{
struct btrfs_fs_info *fs_info = root->fs_info;
- if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
+ if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
return 0;
/*
@@ -503,7 +504,7 @@ static inline bool need_reserve_reloc_root(struct btrfs_root *root)
struct btrfs_fs_info *fs_info = root->fs_info;
if (!fs_info->reloc_ctl ||
- !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
+ !test_bit(BTRFS_ROOT_SHAREABLE, &root->state) ||
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
root->reloc_root)
return false;
@@ -523,6 +524,7 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
u64 num_bytes = 0;
u64 qgroup_reserved = 0;
bool reloc_reserved = false;
+ bool do_chunk_alloc = false;
int ret;
/* Send isn't supposed to start transactions. */
@@ -563,7 +565,8 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
* refill that amount for whatever is missing in the reserve.
*/
num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
- if (delayed_refs_rsv->full == 0) {
+ if (flush == BTRFS_RESERVE_FLUSH_ALL &&
+ delayed_refs_rsv->full == 0) {
delayed_refs_bytes = num_bytes;
num_bytes <<= 1;
}
@@ -584,6 +587,9 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
delayed_refs_bytes);
num_bytes -= delayed_refs_bytes;
}
+
+ if (rsv->space_info->force_alloc)
+ do_chunk_alloc = true;
} else if (num_items == 0 && flush == BTRFS_RESERVE_FLUSH_ALL &&
!delayed_refs_rsv->full) {
/*
@@ -666,6 +672,19 @@ got_it:
current->journal_info = h;
/*
+ * If the space_info is marked ALLOC_FORCE then we'll get upgraded to
+ * ALLOC_FORCE the first run through, and then we won't allocate for
+ * anybody else who races in later. We don't care about the return
+ * value here.
+ */
+ if (do_chunk_alloc && num_bytes) {
+ u64 flags = h->block_rsv->space_info->flags;
+
+ btrfs_chunk_alloc(h, btrfs_get_alloc_profile(fs_info, flags),
+ CHUNK_ALLOC_NO_FORCE);
+ }
+
+ /*
* btrfs_record_root_in_trans() needs to alloc new extents, and may
* call btrfs_join_transaction() while we're also starting a
* transaction.
@@ -699,43 +718,10 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
struct btrfs_root *root,
- unsigned int num_items,
- int min_factor)
+ unsigned int num_items)
{
- struct btrfs_fs_info *fs_info = root->fs_info;
- struct btrfs_trans_handle *trans;
- u64 num_bytes;
- int ret;
-
- /*
- * We have two callers: unlink and block group removal. The
- * former should succeed even if we will temporarily exceed
- * quota and the latter operates on the extent root so
- * qgroup enforcement is ignored anyway.
- */
- trans = start_transaction(root, num_items, TRANS_START,
- BTRFS_RESERVE_FLUSH_ALL, false);
- if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
- return trans;
-
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans))
- return trans;
-
- num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_items);
- ret = btrfs_cond_migrate_bytes(fs_info, &fs_info->trans_block_rsv,
- num_bytes, min_factor);
- if (ret) {
- btrfs_end_transaction(trans);
- return ERR_PTR(ret);
- }
-
- trans->block_rsv = &fs_info->trans_block_rsv;
- trans->bytes_reserved = num_bytes;
- trace_btrfs_space_reservation(fs_info, "transaction",
- trans->transid, num_bytes, 1);
-
- return trans;
+ return start_transaction(root, num_items, TRANS_START,
+ BTRFS_RESERVE_FLUSH_ALL_STEAL, false);
}
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
@@ -1644,7 +1630,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
}
key.offset = (u64)-1;
- pending->snap = btrfs_get_fs_root(fs_info, &key, true);
+ pending->snap = btrfs_get_fs_root(fs_info, objectid, true);
if (IS_ERR(pending->snap)) {
ret = PTR_ERR(pending->snap);
btrfs_abort_transaction(trans, ret);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 31ae8d273065..bf102e64bfb2 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -193,8 +193,7 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
unsigned int num_items);
struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
struct btrfs_root *root,
- unsigned int num_items,
- int min_factor);
+ unsigned int num_items);
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_join_transaction_spacecache(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index a92f8a6dd192..517b44300a05 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -957,10 +957,6 @@ static int check_dev_item(struct extent_buffer *leaf,
return 0;
}
-/* Inode item error output has the same format as dir_item_err() */
-#define inode_item_err(eb, slot, fmt, ...) \
- dir_item_err(eb, slot, fmt, __VA_ARGS__)
-
static int check_inode_item(struct extent_buffer *leaf,
struct btrfs_key *key, int slot)
{
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index 5f9e2dd413af..16c3a6d2586d 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -35,7 +35,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
goto out;
}
- if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
+ if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
goto out;
path = btrfs_alloc_path();
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 02ebdd9edc19..920cee312f4e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -505,13 +505,8 @@ insert:
*/
if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
- ino_size != 0) {
- struct btrfs_map_token token;
-
- btrfs_init_map_token(&token, dst_eb);
- btrfs_set_token_inode_size(dst_eb, dst_item,
- ino_size, &token);
- }
+ ino_size != 0)
+ btrfs_set_inode_size(dst_eb, dst_item, ino_size);
goto no_copy;
}
@@ -555,13 +550,9 @@ no_copy:
static noinline struct inode *read_one_inode(struct btrfs_root *root,
u64 objectid)
{
- struct btrfs_key key;
struct inode *inode;
- key.objectid = objectid;
- key.type = BTRFS_INODE_ITEM_KEY;
- key.offset = 0;
- inode = btrfs_iget(root->fs_info->sb, &key, root);
+ inode = btrfs_iget(root->fs_info->sb, objectid, root);
if (IS_ERR(inode))
inode = NULL;
return inode;
@@ -3299,6 +3290,7 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
+ extent_io_tree_release(&log->log_csum_range);
btrfs_put_root(log);
}
@@ -3816,8 +3808,7 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
found_key.offset = 0;
found_key.type = 0;
- ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
- &start_slot);
+ ret = btrfs_bin_search(path->nodes[0], &found_key, &start_slot);
if (ret < 0)
break;
@@ -3853,44 +3844,41 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
* just to say 'this inode exists' and a logging
* to say 'update this inode with these values'
*/
- btrfs_set_token_inode_generation(leaf, item, 0, &token);
- btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
+ btrfs_set_token_inode_generation(&token, item, 0);
+ btrfs_set_token_inode_size(&token, item, logged_isize);
} else {
- btrfs_set_token_inode_generation(leaf, item,
- BTRFS_I(inode)->generation,
- &token);
- btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
- }
-
- btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
- btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
- btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
- btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
-
- btrfs_set_token_timespec_sec(leaf, &item->atime,
- inode->i_atime.tv_sec, &token);
- btrfs_set_token_timespec_nsec(leaf, &item->atime,
- inode->i_atime.tv_nsec, &token);
-
- btrfs_set_token_timespec_sec(leaf, &item->mtime,
- inode->i_mtime.tv_sec, &token);
- btrfs_set_token_timespec_nsec(leaf, &item->mtime,
- inode->i_mtime.tv_nsec, &token);
-
- btrfs_set_token_timespec_sec(leaf, &item->ctime,
- inode->i_ctime.tv_sec, &token);
- btrfs_set_token_timespec_nsec(leaf, &item->ctime,
- inode->i_ctime.tv_nsec, &token);
-
- btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
- &token);
-
- btrfs_set_token_inode_sequence(leaf, item,
- inode_peek_iversion(inode), &token);
- btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
- btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
- btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
- btrfs_set_token_inode_block_group(leaf, item, 0, &token);
+ btrfs_set_token_inode_generation(&token, item,
+ BTRFS_I(inode)->generation);
+ btrfs_set_token_inode_size(&token, item, inode->i_size);
+ }
+
+ btrfs_set_token_inode_uid(&token, item, i_uid_read(inode));
+ btrfs_set_token_inode_gid(&token, item, i_gid_read(inode));
+ btrfs_set_token_inode_mode(&token, item, inode->i_mode);
+ btrfs_set_token_inode_nlink(&token, item, inode->i_nlink);
+
+ btrfs_set_token_timespec_sec(&token, &item->atime,
+ inode->i_atime.tv_sec);
+ btrfs_set_token_timespec_nsec(&token, &item->atime,
+ inode->i_atime.tv_nsec);
+
+ btrfs_set_token_timespec_sec(&token, &item->mtime,
+ inode->i_mtime.tv_sec);
+ btrfs_set_token_timespec_nsec(&token, &item->mtime,
+ inode->i_mtime.tv_nsec);
+
+ btrfs_set_token_timespec_sec(&token, &item->ctime,
+ inode->i_ctime.tv_sec);
+ btrfs_set_token_timespec_nsec(&token, &item->ctime,
+ inode->i_ctime.tv_nsec);
+
+ btrfs_set_token_inode_nbytes(&token, item, inode_get_bytes(inode));
+
+ btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode));
+ btrfs_set_token_inode_transid(&token, item, trans->transid);
+ btrfs_set_token_inode_rdev(&token, item, inode->i_rdev);
+ btrfs_set_token_inode_flags(&token, item, BTRFS_I(inode)->flags);
+ btrfs_set_token_inode_block_group(&token, item, 0);
}
static int log_inode_item(struct btrfs_trans_handle *trans,
@@ -3916,9 +3904,21 @@ static int log_csums(struct btrfs_trans_handle *trans,
struct btrfs_root *log_root,
struct btrfs_ordered_sum *sums)
{
+ const u64 lock_end = sums->bytenr + sums->len - 1;
+ struct extent_state *cached_state = NULL;
int ret;
/*
+ * Serialize logging for checksums. This is to avoid racing with the
+ * same checksum being logged by another task that is logging another
+ * file which happens to refer to the same extent as well. Such races
+ * can leave checksum items in the log with overlapping ranges.
+ */
+ ret = lock_extent_bits(&log_root->log_csum_range, sums->bytenr,
+ lock_end, &cached_state);
+ if (ret)
+ return ret;
+ /*
* Due to extent cloning, we might have logged a csum item that covers a
* subrange of a cloned extent, and later we can end up logging a csum
* item for a larger subrange of the same extent or the entire range.
@@ -3928,10 +3928,13 @@ static int log_csums(struct btrfs_trans_handle *trans,
* trim and adjust) any existing csum items in the log for this range.
*/
ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len);
- if (ret)
- return ret;
+ if (!ret)
+ ret = btrfs_csum_file_blocks(trans, log_root, sums);
- return btrfs_csum_file_blocks(trans, log_root, sums);
+ unlock_extent_cached(&log_root->log_csum_range, sums->bytenr, lock_end,
+ &cached_state);
+
+ return ret;
}
static noinline int copy_items(struct btrfs_trans_handle *trans,
@@ -4164,43 +4167,35 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
- btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
- &token);
+ btrfs_set_token_file_extent_generation(&token, fi, trans->transid);
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
- btrfs_set_token_file_extent_type(leaf, fi,
- BTRFS_FILE_EXTENT_PREALLOC,
- &token);
+ btrfs_set_token_file_extent_type(&token, fi,
+ BTRFS_FILE_EXTENT_PREALLOC);
else
- btrfs_set_token_file_extent_type(leaf, fi,
- BTRFS_FILE_EXTENT_REG,
- &token);
+ btrfs_set_token_file_extent_type(&token, fi,
+ BTRFS_FILE_EXTENT_REG);
block_len = max(em->block_len, em->orig_block_len);
if (em->compress_type != BTRFS_COMPRESS_NONE) {
- btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
- em->block_start,
- &token);
- btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
- &token);
+ btrfs_set_token_file_extent_disk_bytenr(&token, fi,
+ em->block_start);
+ btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len);
} else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
- btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
+ btrfs_set_token_file_extent_disk_bytenr(&token, fi,
em->block_start -
- extent_offset, &token);
- btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
- &token);
+ extent_offset);
+ btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len);
} else {
- btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
- btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
- &token);
- }
-
- btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
- btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
- btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
- btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
- &token);
- btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
- btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
+ btrfs_set_token_file_extent_disk_bytenr(&token, fi, 0);
+ btrfs_set_token_file_extent_disk_num_bytes(&token, fi, 0);
+ }
+
+ btrfs_set_token_file_extent_offset(&token, fi, extent_offset);
+ btrfs_set_token_file_extent_num_bytes(&token, fi, em->len);
+ btrfs_set_token_file_extent_ram_bytes(&token, fi, em->ram_bytes);
+ btrfs_set_token_file_extent_compression(&token, fi, em->compress_type);
+ btrfs_set_token_file_extent_encryption(&token, fi, 0);
+ btrfs_set_token_file_extent_other_encoding(&token, fi, 0);
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
@@ -4336,12 +4331,9 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
}
}
}
- if (ins_nr > 0) {
+ if (ins_nr > 0)
ret = copy_items(trans, inode, dst_path, path,
start_slot, ins_nr, 1, 0);
- if (ret > 0)
- ret = 0;
- }
out:
btrfs_release_path(path);
btrfs_free_path(dst_path);
@@ -4835,10 +4827,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
btrfs_release_path(path);
- key.objectid = ino;
- key.type = BTRFS_INODE_ITEM_KEY;
- key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &key, root);
+ inode = btrfs_iget(fs_info->sb, ino, root);
/*
* If the other inode that had a conflicting dir entry was
* deleted in the current transaction, we need to log its parent
@@ -4847,8 +4836,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
if (ret == -ENOENT) {
- key.objectid = parent;
- inode = btrfs_iget(fs_info->sb, &key, root);
+ inode = btrfs_iget(fs_info->sb, parent, root);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
} else {
@@ -5587,7 +5575,7 @@ process_leaf:
continue;
btrfs_release_path(path);
- di_inode = btrfs_iget(fs_info->sb, &di_key, root);
+ di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root);
if (IS_ERR(di_inode)) {
ret = PTR_ERR(di_inode);
goto next_dir_inode;
@@ -5713,7 +5701,8 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
cur_offset = item_size;
}
- dir_inode = btrfs_iget(fs_info->sb, &inode_key, root);
+ dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid,
+ root);
/*
* If the parent inode was deleted, return an error to
* fallback to a transaction commit. This is to prevent
@@ -5780,14 +5769,17 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
int slot = path->slots[0];
struct btrfs_key search_key;
struct inode *inode;
+ u64 ino;
int ret = 0;
btrfs_release_path(path);
+ ino = found_key.offset;
+
search_key.objectid = found_key.offset;
search_key.type = BTRFS_INODE_ITEM_KEY;
search_key.offset = 0;
- inode = btrfs_iget(fs_info->sb, &search_key, root);
+ inode = btrfs_iget(fs_info->sb, ino, root);
if (IS_ERR(inode))
return PTR_ERR(inode);
@@ -6132,7 +6124,6 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
struct btrfs_trans_handle *trans;
struct btrfs_key key;
struct btrfs_key found_key;
- struct btrfs_key tmp_key;
struct btrfs_root *log;
struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
struct walk_control wc = {
@@ -6194,11 +6185,8 @@ again:
goto error;
}
- tmp_key.objectid = found_key.offset;
- tmp_key.type = BTRFS_ROOT_ITEM_KEY;
- tmp_key.offset = (u64)-1;
-
- wc.replay_dest = btrfs_get_fs_root(fs_info, &tmp_key, true);
+ wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset,
+ true);
if (IS_ERR(wc.replay_dest)) {
ret = PTR_ERR(wc.replay_dest);
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 76671a6bcb61..28525ad7ff8c 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -257,7 +257,6 @@ out:
static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
u8 *uuid, u8 type, u64 subvolid)
{
- struct btrfs_key key;
int ret = 0;
struct btrfs_root *subvol_root;
@@ -265,10 +264,7 @@ static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
goto out;
- key.objectid = subvolid;
- key.type = BTRFS_ROOT_ITEM_KEY;
- key.offset = (u64)-1;
- subvol_root = btrfs_get_fs_root(fs_info, &key, true);
+ subvol_root = btrfs_get_fs_root(fs_info, subvolid, true);
if (IS_ERR(subvol_root)) {
ret = PTR_ERR(subvol_root);
if (ret == -ENOENT)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c1909e5f4506..0d6e785bcb98 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -280,10 +280,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
* ============
*
* uuid_mutex
- * volume_mutex
- * device_list_mutex
- * chunk_mutex
- * balance_mutex
+ * device_list_mutex
+ * chunk_mutex
+ * balance_mutex
*
*
* Exclusive operations, BTRFS_FS_EXCL_OP
@@ -1042,6 +1041,8 @@ again:
&device->dev_state)) {
if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
&device->dev_state) &&
+ !test_bit(BTRFS_DEV_STATE_MISSING,
+ &device->dev_state) &&
(!latest_dev ||
device->generation > latest_dev->generation)) {
latest_dev = device;
@@ -1185,7 +1186,6 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
{
struct btrfs_device *device;
struct btrfs_device *latest_dev = NULL;
- int ret = 0;
flags |= FMODE_EXCL;
@@ -1198,16 +1198,15 @@ static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
device->generation > latest_dev->generation)
latest_dev = device;
}
- if (fs_devices->open_devices == 0) {
- ret = -EINVAL;
- goto out;
- }
+ if (fs_devices->open_devices == 0)
+ return -EINVAL;
+
fs_devices->opened = 1;
fs_devices->latest_bdev = latest_dev->bdev;
fs_devices->total_rw_bytes = 0;
fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
-out:
- return ret;
+
+ return 0;
}
static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
@@ -1251,49 +1250,48 @@ void btrfs_release_disk_super(struct btrfs_super_block *super)
put_page(page);
}
-static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
- struct page **page,
- struct btrfs_super_block **disk_super)
+static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
+ u64 bytenr)
{
+ struct btrfs_super_block *disk_super;
+ struct page *page;
void *p;
pgoff_t index;
/* make sure our super fits in the device */
if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
- return 1;
+ return ERR_PTR(-EINVAL);
/* make sure our super fits in the page */
- if (sizeof(**disk_super) > PAGE_SIZE)
- return 1;
+ if (sizeof(*disk_super) > PAGE_SIZE)
+ return ERR_PTR(-EINVAL);
/* make sure our super doesn't straddle pages on disk */
index = bytenr >> PAGE_SHIFT;
- if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
- return 1;
+ if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
+ return ERR_PTR(-EINVAL);
/* pull in the page with our super */
- *page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
- index, GFP_KERNEL);
+ page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
- if (IS_ERR(*page))
- return 1;
+ if (IS_ERR(page))
+ return ERR_CAST(page);
- p = page_address(*page);
+ p = page_address(page);
/* align our pointer to the offset of the super block */
- *disk_super = p + offset_in_page(bytenr);
+ disk_super = p + offset_in_page(bytenr);
- if (btrfs_super_bytenr(*disk_super) != bytenr ||
- btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
+ if (btrfs_super_bytenr(disk_super) != bytenr ||
+ btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
btrfs_release_disk_super(p);
- return 1;
+ return ERR_PTR(-EINVAL);
}
- if ((*disk_super)->label[0] &&
- (*disk_super)->label[BTRFS_LABEL_SIZE - 1])
- (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';
+ if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
+ disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
- return 0;
+ return disk_super;
}
int btrfs_forget_devices(const char *path)
@@ -1319,7 +1317,6 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
bool new_device_added = false;
struct btrfs_device *device = NULL;
struct block_device *bdev;
- struct page *page;
u64 bytenr;
lockdep_assert_held(&uuid_mutex);
@@ -1337,8 +1334,9 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
if (IS_ERR(bdev))
return ERR_CAST(bdev);
- if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) {
- device = ERR_PTR(-EINVAL);
+ disk_super = btrfs_read_disk_super(bdev, bytenr);
+ if (IS_ERR(disk_super)) {
+ device = ERR_CAST(disk_super);
goto error_bdev_put;
}
@@ -2663,8 +2661,18 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
ret = btrfs_commit_transaction(trans);
}
- /* Update ctime/mtime for libblkid */
+ /*
+ * Now that we have written a new super block to this device, check all
+ * other fs_devices list if device_path alienates any other scanned
+ * device.
+ * We can ignore the return value as it typically returns -EINVAL and
+ * only succeeds if the device was an alien.
+ */
+ btrfs_forget_devices(device_path);
+
+ /* Update ctime/mtime for blkid or udev */
update_dev_time(device_path);
+
return ret;
error_sysfs:
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index 0a0823d378db..50c635dc7f71 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_CEPH_FS) += ceph.o
ceph-y := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
export.o caps.o snap.o xattr.o quota.o io.o \
mds_client.o mdsmap.o strings.o ceph_frag.o \
- debugfs.o util.o
+ debugfs.o util.o metric.o
ceph-$(CONFIG_CEPH_FSCACHE) += cache.o
ceph-$(CONFIG_CEPH_FS_POSIX_ACL) += acl.o
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 26be6520d3fb..e0465741c591 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -22,7 +22,7 @@ static inline void ceph_set_cached_acl(struct inode *inode,
struct ceph_inode_info *ci = ceph_inode(inode);
spin_lock(&ci->i_ceph_lock);
- if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0))
+ if (__ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 0))
set_cached_acl(inode, type, acl);
else
forget_cached_acl(inode, type);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 6f4678d98df7..01ad09733ac7 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -11,10 +11,12 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/signal.h>
#include <linux/iversion.h>
+#include <linux/ktime.h>
#include "super.h"
#include "mds_client.h"
#include "cache.h"
+#include "metric.h"
#include <linux/ceph/osd_client.h>
#include <linux/ceph/striper.h>
@@ -216,6 +218,9 @@ static int ceph_sync_readpages(struct ceph_fs_client *fsc,
if (!rc)
rc = ceph_osdc_wait_request(osdc, req);
+ ceph_update_read_latency(&fsc->mdsc->metric, req->r_start_latency,
+ req->r_end_latency, rc);
+
ceph_osdc_put_request(req);
dout("readpages result %d\n", rc);
return rc;
@@ -299,6 +304,7 @@ static int ceph_readpage(struct file *filp, struct page *page)
static void finish_read(struct ceph_osd_request *req)
{
struct inode *inode = req->r_inode;
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_osd_data *osd_data;
int rc = req->r_result <= 0 ? req->r_result : 0;
int bytes = req->r_result >= 0 ? req->r_result : 0;
@@ -336,6 +342,10 @@ unlock:
put_page(page);
bytes -= PAGE_SIZE;
}
+
+ ceph_update_read_latency(&fsc->mdsc->metric, req->r_start_latency,
+ req->r_end_latency, rc);
+
kfree(osd_data->pages);
}
@@ -643,6 +653,9 @@ static int ceph_sync_writepages(struct ceph_fs_client *fsc,
if (!rc)
rc = ceph_osdc_wait_request(osdc, req);
+ ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency,
+ req->r_end_latency, rc);
+
ceph_osdc_put_request(req);
if (rc == 0)
rc = len;
@@ -794,6 +807,9 @@ static void writepages_finish(struct ceph_osd_request *req)
ceph_clear_error_write(ci);
}
+ ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency,
+ req->r_end_latency, rc);
+
/*
* We lost the cache cap, need to truncate the page before
* it is unlocked, otherwise we'd truncate it later in the
@@ -1852,6 +1868,10 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
err = ceph_osdc_start_request(&fsc->client->osdc, req, false);
if (!err)
err = ceph_osdc_wait_request(&fsc->client->osdc, req);
+
+ ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency,
+ req->r_end_latency, err);
+
out_put:
ceph_osdc_put_request(req);
if (err == -ECANCELED)
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index f1acde6fb9a6..972c13aa4225 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -597,6 +597,27 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
}
}
+/**
+ * change_auth_cap_ses - move inode to appropriate lists when auth caps change
+ * @ci: inode to be moved
+ * @session: new auth caps session
+ */
+static void change_auth_cap_ses(struct ceph_inode_info *ci,
+ struct ceph_mds_session *session)
+{
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+ if (list_empty(&ci->i_dirty_item) && list_empty(&ci->i_flushing_item))
+ return;
+
+ spin_lock(&session->s_mdsc->cap_dirty_lock);
+ if (!list_empty(&ci->i_dirty_item))
+ list_move(&ci->i_dirty_item, &session->s_cap_dirty);
+ if (!list_empty(&ci->i_flushing_item))
+ list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
+ spin_unlock(&session->s_mdsc->cap_dirty_lock);
+}
+
/*
* Add a capability under the given MDS session.
*
@@ -727,6 +748,9 @@ void ceph_add_cap(struct inode *inode,
if (flags & CEPH_CAP_FLAG_AUTH) {
if (!ci->i_auth_cap ||
ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) {
+ if (ci->i_auth_cap &&
+ ci->i_auth_cap->session != cap->session)
+ change_auth_cap_ses(ci, cap->session);
ci->i_auth_cap = cap;
cap->mds_wanted = wanted;
}
@@ -912,6 +936,20 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
return 0;
}
+int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
+ int touch)
+{
+ struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+ int r;
+
+ r = __ceph_caps_issued_mask(ci, mask, touch);
+ if (r)
+ ceph_update_cap_hit(&fsc->mdsc->metric);
+ else
+ ceph_update_cap_mis(&fsc->mdsc->metric);
+ return r;
+}
+
/*
* Return true if mask caps are currently being revoked by an MDS.
*/
@@ -1109,8 +1147,10 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
/* remove from inode's cap rbtree, and clear auth cap */
rb_erase(&cap->ci_node, &ci->i_caps);
- if (ci->i_auth_cap == cap)
+ if (ci->i_auth_cap == cap) {
+ WARN_ON_ONCE(!list_empty(&ci->i_dirty_item));
ci->i_auth_cap = NULL;
+ }
/* remove from session list */
spin_lock(&session->s_cap_lock);
@@ -1167,6 +1207,7 @@ struct cap_msg_args {
u64 xattr_version;
u64 change_attr;
struct ceph_buffer *xattr_buf;
+ struct ceph_buffer *old_xattr_buf;
struct timespec64 atime, mtime, ctime, btime;
int op, caps, wanted, dirty;
u32 seq, issue_seq, mseq, time_warp_seq;
@@ -1175,6 +1216,7 @@ struct cap_msg_args {
kgid_t gid;
umode_t mode;
bool inline_data;
+ bool wake;
};
/*
@@ -1304,44 +1346,29 @@ void __ceph_remove_caps(struct ceph_inode_info *ci)
}
/*
- * Send a cap msg on the given inode. Update our caps state, then
- * drop i_ceph_lock and send the message.
+ * Prepare to send a cap message to an MDS. Update the cap state, and populate
+ * the arg struct with the parameters that will need to be sent. This should
+ * be done under the i_ceph_lock to guard against changes to cap state.
*
* Make note of max_size reported/requested from mds, revoked caps
* that have now been implemented.
- *
- * Return non-zero if delayed release, or we experienced an error
- * such that the caller should requeue + retry later.
- *
- * called with i_ceph_lock, then drops it.
- * caller should hold snap_rwsem (read), s_mutex.
*/
-static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
- int op, int flags, int used, int want, int retain,
- int flushing, u64 flush_tid, u64 oldest_flush_tid)
- __releases(cap->ci->i_ceph_lock)
+static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
+ int op, int flags, int used, int want, int retain,
+ int flushing, u64 flush_tid, u64 oldest_flush_tid)
{
struct ceph_inode_info *ci = cap->ci;
struct inode *inode = &ci->vfs_inode;
- struct ceph_buffer *old_blob = NULL;
- struct cap_msg_args arg;
int held, revoking;
- int wake = 0;
- int ret;
- /* Don't send anything if it's still being created. Return delayed */
- if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
- spin_unlock(&ci->i_ceph_lock);
- dout("%s async create in flight for %p\n", __func__, inode);
- return 1;
- }
+ lockdep_assert_held(&ci->i_ceph_lock);
held = cap->issued | cap->implemented;
revoking = cap->implemented & ~cap->issued;
retain &= ~revoking;
- dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
- inode, cap, cap->session,
+ dout("%s %p cap %p session %p %s -> %s (revoking %s)\n",
+ __func__, inode, cap, cap->session,
ceph_cap_string(held), ceph_cap_string(held & retain),
ceph_cap_string(revoking));
BUG_ON((retain & CEPH_CAP_PIN) == 0);
@@ -1349,60 +1376,62 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
ci->i_ceph_flags &= ~CEPH_I_FLUSH;
cap->issued &= retain; /* drop bits we don't want */
- if (cap->implemented & ~cap->issued) {
- /*
- * Wake up any waiters on wanted -> needed transition.
- * This is due to the weird transition from buffered
- * to sync IO... we need to flush dirty pages _before_
- * allowing sync writes to avoid reordering.
- */
- wake = 1;
- }
+ /*
+ * Wake up any waiters on wanted -> needed transition. This is due to
+ * the weird transition from buffered to sync IO... we need to flush
+ * dirty pages _before_ allowing sync writes to avoid reordering.
+ */
+ arg->wake = cap->implemented & ~cap->issued;
cap->implemented &= cap->issued | used;
cap->mds_wanted = want;
- arg.session = cap->session;
- arg.ino = ceph_vino(inode).ino;
- arg.cid = cap->cap_id;
- arg.follows = flushing ? ci->i_head_snapc->seq : 0;
- arg.flush_tid = flush_tid;
- arg.oldest_flush_tid = oldest_flush_tid;
-
- arg.size = inode->i_size;
- ci->i_reported_size = arg.size;
- arg.max_size = ci->i_wanted_max_size;
- if (cap == ci->i_auth_cap)
- ci->i_requested_max_size = arg.max_size;
+ arg->session = cap->session;
+ arg->ino = ceph_vino(inode).ino;
+ arg->cid = cap->cap_id;
+ arg->follows = flushing ? ci->i_head_snapc->seq : 0;
+ arg->flush_tid = flush_tid;
+ arg->oldest_flush_tid = oldest_flush_tid;
+
+ arg->size = inode->i_size;
+ ci->i_reported_size = arg->size;
+ arg->max_size = ci->i_wanted_max_size;
+ if (cap == ci->i_auth_cap) {
+ if (want & CEPH_CAP_ANY_FILE_WR)
+ ci->i_requested_max_size = arg->max_size;
+ else
+ ci->i_requested_max_size = 0;
+ }
if (flushing & CEPH_CAP_XATTR_EXCL) {
- old_blob = __ceph_build_xattrs_blob(ci);
- arg.xattr_version = ci->i_xattrs.version;
- arg.xattr_buf = ci->i_xattrs.blob;
+ arg->old_xattr_buf = __ceph_build_xattrs_blob(ci);
+ arg->xattr_version = ci->i_xattrs.version;
+ arg->xattr_buf = ci->i_xattrs.blob;
} else {
- arg.xattr_buf = NULL;
+ arg->xattr_buf = NULL;
+ arg->old_xattr_buf = NULL;
}
- arg.mtime = inode->i_mtime;
- arg.atime = inode->i_atime;
- arg.ctime = inode->i_ctime;
- arg.btime = ci->i_btime;
- arg.change_attr = inode_peek_iversion_raw(inode);
+ arg->mtime = inode->i_mtime;
+ arg->atime = inode->i_atime;
+ arg->ctime = inode->i_ctime;
+ arg->btime = ci->i_btime;
+ arg->change_attr = inode_peek_iversion_raw(inode);
- arg.op = op;
- arg.caps = cap->implemented;
- arg.wanted = want;
- arg.dirty = flushing;
+ arg->op = op;
+ arg->caps = cap->implemented;
+ arg->wanted = want;
+ arg->dirty = flushing;
- arg.seq = cap->seq;
- arg.issue_seq = cap->issue_seq;
- arg.mseq = cap->mseq;
- arg.time_warp_seq = ci->i_time_warp_seq;
+ arg->seq = cap->seq;
+ arg->issue_seq = cap->issue_seq;
+ arg->mseq = cap->mseq;
+ arg->time_warp_seq = ci->i_time_warp_seq;
- arg.uid = inode->i_uid;
- arg.gid = inode->i_gid;
- arg.mode = inode->i_mode;
+ arg->uid = inode->i_uid;
+ arg->gid = inode->i_gid;
+ arg->mode = inode->i_mode;
- arg.inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
+ arg->inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
if (!(flags & CEPH_CLIENT_CAPS_PENDING_CAPSNAP) &&
!list_empty(&ci->i_cap_snaps)) {
struct ceph_cap_snap *capsnap;
@@ -1415,27 +1444,35 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
}
}
}
- arg.flags = flags;
-
- spin_unlock(&ci->i_ceph_lock);
+ arg->flags = flags;
+}
- ceph_buffer_put(old_blob);
+/*
+ * Send a cap msg on the given inode.
+ *
+ * Caller should hold snap_rwsem (read), s_mutex.
+ */
+static void __send_cap(struct ceph_mds_client *mdsc, struct cap_msg_args *arg,
+ struct ceph_inode_info *ci)
+{
+ struct inode *inode = &ci->vfs_inode;
+ int ret;
- ret = send_cap_msg(&arg);
+ ret = send_cap_msg(arg);
if (ret < 0) {
pr_err("error sending cap msg, ino (%llx.%llx) "
"flushing %s tid %llu, requeue\n",
- ceph_vinop(inode), ceph_cap_string(flushing),
- flush_tid);
+ ceph_vinop(inode), ceph_cap_string(arg->dirty),
+ arg->flush_tid);
spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
spin_unlock(&ci->i_ceph_lock);
}
- if (wake)
- wake_up_all(&ci->i_cap_wq);
+ ceph_buffer_put(arg->old_xattr_buf);
- return ret;
+ if (arg->wake)
+ wake_up_all(&ci->i_cap_wq);
}
static inline int __send_flush_snap(struct inode *inode,
@@ -1456,6 +1493,7 @@ static inline int __send_flush_snap(struct inode *inode,
arg.max_size = 0;
arg.xattr_version = capsnap->xattr_version;
arg.xattr_buf = capsnap->xattr_blob;
+ arg.old_xattr_buf = NULL;
arg.atime = capsnap->atime;
arg.mtime = capsnap->mtime;
@@ -1479,6 +1517,7 @@ static inline int __send_flush_snap(struct inode *inode,
arg.inline_data = capsnap->inline_data;
arg.flags = 0;
+ arg.wake = false;
return send_cap_msg(&arg);
}
@@ -1676,6 +1715,8 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
ceph_cap_string(was | mask));
ci->i_dirty_caps |= mask;
if (was == 0) {
+ struct ceph_mds_session *session = ci->i_auth_cap->session;
+
WARN_ON_ONCE(ci->i_prealloc_cap_flush);
swap(ci->i_prealloc_cap_flush, *pcf);
@@ -1688,7 +1729,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
&ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
BUG_ON(!list_empty(&ci->i_dirty_item));
spin_lock(&mdsc->cap_dirty_lock);
- list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
+ list_add(&ci->i_dirty_item, &session->s_cap_dirty);
spin_unlock(&mdsc->cap_dirty_lock);
if (ci->i_flushing_caps == 0) {
ihold(inode);
@@ -1731,30 +1772,33 @@ static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc)
* Remove cap_flush from the mdsc's or inode's flushing cap list.
* Return true if caller needs to wake up flush waiters.
*/
-static bool __finish_cap_flush(struct ceph_mds_client *mdsc,
- struct ceph_inode_info *ci,
- struct ceph_cap_flush *cf)
+static bool __detach_cap_flush_from_mdsc(struct ceph_mds_client *mdsc,
+ struct ceph_cap_flush *cf)
{
struct ceph_cap_flush *prev;
bool wake = cf->wake;
- if (mdsc) {
- /* are there older pending cap flushes? */
- if (wake && cf->g_list.prev != &mdsc->cap_flush_list) {
- prev = list_prev_entry(cf, g_list);
- prev->wake = true;
- wake = false;
- }
- list_del(&cf->g_list);
- } else if (ci) {
- if (wake && cf->i_list.prev != &ci->i_cap_flush_list) {
- prev = list_prev_entry(cf, i_list);
- prev->wake = true;
- wake = false;
- }
- list_del(&cf->i_list);
- } else {
- BUG_ON(1);
+
+ if (wake && cf->g_list.prev != &mdsc->cap_flush_list) {
+ prev = list_prev_entry(cf, g_list);
+ prev->wake = true;
+ wake = false;
}
+ list_del(&cf->g_list);
+ return wake;
+}
+
+static bool __detach_cap_flush_from_ci(struct ceph_inode_info *ci,
+ struct ceph_cap_flush *cf)
+{
+ struct ceph_cap_flush *prev;
+ bool wake = cf->wake;
+
+ if (wake && cf->i_list.prev != &ci->i_cap_flush_list) {
+ prev = list_prev_entry(cf, i_list);
+ prev->wake = true;
+ wake = false;
+ }
+ list_del(&cf->i_list);
return wake;
}
@@ -1953,6 +1997,9 @@ retry_locked:
}
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
+ int mflags = 0;
+ struct cap_msg_args arg;
+
cap = rb_entry(p, struct ceph_cap, ci_node);
/* avoid looping forever */
@@ -2030,12 +2077,24 @@ ack:
if (mutex_trylock(&session->s_mutex) == 0) {
dout("inverting session/ino locks on %p\n",
session);
+ session = ceph_get_mds_session(session);
spin_unlock(&ci->i_ceph_lock);
if (took_snap_rwsem) {
up_read(&mdsc->snap_rwsem);
took_snap_rwsem = 0;
}
- mutex_lock(&session->s_mutex);
+ if (session) {
+ mutex_lock(&session->s_mutex);
+ ceph_put_mds_session(session);
+ } else {
+ /*
+ * Because we take the reference while
+ * holding the i_ceph_lock, it should
+ * never be NULL. Throw a warning if it
+ * ever is.
+ */
+ WARN_ON_ONCE(true);
+ }
goto retry;
}
}
@@ -2070,6 +2129,9 @@ ack:
flushing = ci->i_dirty_caps;
flush_tid = __mark_caps_flushing(inode, session, false,
&oldest_flush_tid);
+ if (flags & CHECK_CAPS_FLUSH &&
+ list_empty(&session->s_cap_dirty))
+ mflags |= CEPH_CLIENT_CAPS_SYNC;
} else {
flushing = 0;
flush_tid = 0;
@@ -2080,9 +2142,12 @@ ack:
mds = cap->mds; /* remember mds, so we don't repeat */
- /* __send_cap drops i_ceph_lock */
- __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, 0, cap_used, want,
- retain, flushing, flush_tid, oldest_flush_tid);
+ __prep_cap(&arg, cap, CEPH_CAP_OP_UPDATE, mflags, cap_used,
+ want, retain, flushing, flush_tid, oldest_flush_tid);
+ spin_unlock(&ci->i_ceph_lock);
+
+ __send_cap(mdsc, &arg, ci);
+
goto retry; /* retake i_ceph_lock and restart our cap scan. */
}
@@ -2121,6 +2186,7 @@ retry:
retry_locked:
if (ci->i_dirty_caps && ci->i_auth_cap) {
struct ceph_cap *cap = ci->i_auth_cap;
+ struct cap_msg_args arg;
if (session != cap->session) {
spin_unlock(&ci->i_ceph_lock);
@@ -2148,11 +2214,13 @@ retry_locked:
flush_tid = __mark_caps_flushing(inode, session, true,
&oldest_flush_tid);
- /* __send_cap drops i_ceph_lock */
- __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, CEPH_CLIENT_CAPS_SYNC,
+ __prep_cap(&arg, cap, CEPH_CAP_OP_FLUSH, CEPH_CLIENT_CAPS_SYNC,
__ceph_caps_used(ci), __ceph_caps_wanted(ci),
(cap->issued | cap->implemented),
flushing, flush_tid, oldest_flush_tid);
+ spin_unlock(&ci->i_ceph_lock);
+
+ __send_cap(mdsc, &arg, ci);
} else {
if (!list_empty(&ci->i_cap_flush_list)) {
struct ceph_cap_flush *cf =
@@ -2354,15 +2422,19 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
first_tid = cf->tid + 1;
if (cf->caps) {
+ struct cap_msg_args arg;
+
dout("kick_flushing_caps %p cap %p tid %llu %s\n",
inode, cap, cf->tid, ceph_cap_string(cf->caps));
- __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
+ __prep_cap(&arg, cap, CEPH_CAP_OP_FLUSH,
(cf->tid < last_snap_flush ?
CEPH_CLIENT_CAPS_PENDING_CAPSNAP : 0),
__ceph_caps_used(ci),
__ceph_caps_wanted(ci),
(cap->issued | cap->implemented),
cf->caps, cf->tid, oldest_flush_tid);
+ spin_unlock(&ci->i_ceph_lock);
+ __send_cap(mdsc, &arg, ci);
} else {
struct ceph_cap_snap *capsnap =
container_of(cf, struct ceph_cap_snap,
@@ -2446,6 +2518,8 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_cap *cap;
u64 oldest_flush_tid;
+ lockdep_assert_held(&session->s_mutex);
+
dout("kick_flushing_caps mds%d\n", session->s_mds);
spin_lock(&mdsc->cap_dirty_lock);
@@ -2685,6 +2759,11 @@ out_unlock:
if (snap_rwsem_locked)
up_read(&mdsc->snap_rwsem);
+ if (!ret)
+ ceph_update_cap_mis(&mdsc->metric);
+ else if (ret == 1)
+ ceph_update_cap_hit(&mdsc->metric);
+
dout("get_cap_refs %p ret %d got %s\n", inode,
ret, ceph_cap_string(*got));
return ret;
@@ -2937,7 +3016,8 @@ static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
* If we are releasing a WR cap (from a sync write), finalize any affected
* cap_snap, and wake up any waiters.
*/
-void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
+static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
+ bool skip_checking_caps)
{
struct inode *inode = &ci->vfs_inode;
int last = 0, put = 0, flushsnaps = 0, wake = 0;
@@ -2993,7 +3073,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
last ? " last" : "", put ? " put" : "");
- if (last)
+ if (last && !skip_checking_caps)
ceph_check_caps(ci, 0, NULL);
else if (flushsnaps)
ceph_flush_snaps(ci, NULL);
@@ -3003,6 +3083,16 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
iput(inode);
}
+void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
+{
+ __ceph_put_cap_refs(ci, had, false);
+}
+
+void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had)
+{
+ __ceph_put_cap_refs(ci, had, true);
+}
+
/*
* Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
* context. Adjust per-snap dirty page accounting as appropriate.
@@ -3301,10 +3391,6 @@ static void handle_cap_grant(struct inode *inode,
ci->i_requested_max_size = 0;
}
wake = true;
- } else if (ci->i_wanted_max_size > ci->i_max_size &&
- ci->i_wanted_max_size > ci->i_requested_max_size) {
- /* CEPH_CAP_OP_IMPORT */
- wake = true;
}
}
@@ -3380,9 +3466,18 @@ static void handle_cap_grant(struct inode *inode,
fill_inline = true;
}
- if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
+ if (ci->i_auth_cap == cap &&
+ le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
if (newcaps & ~extra_info->issued)
wake = true;
+
+ if (ci->i_requested_max_size > max_size ||
+ !(le32_to_cpu(grant->wanted) & CEPH_CAP_ANY_FILE_WR)) {
+ /* re-request max_size if necessary */
+ ci->i_requested_max_size = 0;
+ wake = true;
+ }
+
ceph_kick_flushing_inode_caps(session, ci);
spin_unlock(&ci->i_ceph_lock);
up_read(&session->s_mdsc->snap_rwsem);
@@ -3442,15 +3537,26 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
bool wake_mdsc = false;
list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) {
+ /* Is this the one that was flushed? */
if (cf->tid == flush_tid)
cleaned = cf->caps;
- if (cf->caps == 0) /* capsnap */
+
+ /* Is this a capsnap? */
+ if (cf->caps == 0)
continue;
+
if (cf->tid <= flush_tid) {
- if (__finish_cap_flush(NULL, ci, cf))
- wake_ci = true;
+ /*
+ * An earlier or current tid. The FLUSH_ACK should
+ * represent a superset of this flush's caps.
+ */
+ wake_ci |= __detach_cap_flush_from_ci(ci, cf);
list_add_tail(&cf->i_list, &to_remove);
} else {
+ /*
+ * This is a later one. Any caps in it are still dirty
+ * so don't count them as cleaned.
+ */
cleaned &= ~cf->caps;
if (!cleaned)
break;
@@ -3470,10 +3576,8 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
spin_lock(&mdsc->cap_dirty_lock);
- list_for_each_entry(cf, &to_remove, i_list) {
- if (__finish_cap_flush(mdsc, NULL, cf))
- wake_mdsc = true;
- }
+ list_for_each_entry(cf, &to_remove, i_list)
+ wake_mdsc |= __detach_cap_flush_from_mdsc(mdsc, cf);
if (ci->i_flushing_caps == 0) {
if (list_empty(&ci->i_cap_flush_list)) {
@@ -3565,17 +3669,15 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
dout(" removing %p cap_snap %p follows %lld\n",
inode, capsnap, follows);
list_del(&capsnap->ci_item);
- if (__finish_cap_flush(NULL, ci, &capsnap->cap_flush))
- wake_ci = true;
+ wake_ci |= __detach_cap_flush_from_ci(ci, &capsnap->cap_flush);
spin_lock(&mdsc->cap_dirty_lock);
if (list_empty(&ci->i_cap_flush_list))
list_del_init(&ci->i_flushing_item);
- if (__finish_cap_flush(mdsc, NULL, &capsnap->cap_flush))
- wake_mdsc = true;
-
+ wake_mdsc |= __detach_cap_flush_from_mdsc(mdsc,
+ &capsnap->cap_flush);
spin_unlock(&mdsc->cap_dirty_lock);
}
spin_unlock(&ci->i_ceph_lock);
@@ -3595,10 +3697,9 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
*
* caller hold s_mutex.
*/
-static void handle_cap_trunc(struct inode *inode,
+static bool handle_cap_trunc(struct inode *inode,
struct ceph_mds_caps *trunc,
struct ceph_mds_session *session)
- __releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
int mds = session->s_mds;
@@ -3609,7 +3710,9 @@ static void handle_cap_trunc(struct inode *inode,
int implemented = 0;
int dirty = __ceph_caps_dirty(ci);
int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
- int queue_trunc = 0;
+ bool queue_trunc = false;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
issued |= implemented | dirty;
@@ -3617,10 +3720,7 @@ static void handle_cap_trunc(struct inode *inode,
inode, mds, seq, truncate_size, truncate_seq);
queue_trunc = ceph_fill_file_size(inode, issued,
truncate_seq, truncate_size, size);
- spin_unlock(&ci->i_ceph_lock);
-
- if (queue_trunc)
- ceph_queue_vmtruncate(inode);
+ return queue_trunc;
}
/*
@@ -3694,15 +3794,9 @@ retry:
tcap->issue_seq = t_seq - 1;
tcap->issued |= issued;
tcap->implemented |= issued;
- if (cap == ci->i_auth_cap)
+ if (cap == ci->i_auth_cap) {
ci->i_auth_cap = tcap;
-
- if (!list_empty(&ci->i_cap_flush_list) &&
- ci->i_auth_cap == tcap) {
- spin_lock(&mdsc->cap_dirty_lock);
- list_move_tail(&ci->i_flushing_item,
- &tcap->session->s_cap_flushing);
- spin_unlock(&mdsc->cap_dirty_lock);
+ change_auth_cap_ses(ci, tcap->session);
}
}
__ceph_remove_cap(cap, false);
@@ -3771,7 +3865,6 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
struct ceph_mds_cap_peer *ph,
struct ceph_mds_session *session,
struct ceph_cap **target_cap, int *old_issued)
- __acquires(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_cap *cap, *ocap, *new_cap = NULL;
@@ -3796,14 +3889,13 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n",
inode, ci, mds, mseq, peer);
-
retry:
- spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
if (!new_cap) {
spin_unlock(&ci->i_ceph_lock);
new_cap = ceph_get_cap(mdsc, NULL);
+ spin_lock(&ci->i_ceph_lock);
goto retry;
}
cap = new_cap;
@@ -3838,9 +3930,6 @@ retry:
__ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
}
- /* make sure we re-request max_size, if necessary */
- ci->i_requested_max_size = 0;
-
*old_issued = issued;
*target_cap = cap;
}
@@ -3869,6 +3958,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
size_t snaptrace_len;
void *p, *end;
struct cap_extra_info extra_info = {};
+ bool queue_trunc;
dout("handle_caps from mds%d\n", session->s_mds);
@@ -4016,6 +4106,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
} else {
down_read(&mdsc->snap_rwsem);
}
+ spin_lock(&ci->i_ceph_lock);
handle_cap_import(mdsc, inode, h, peer, session,
&cap, &extra_info.issued);
handle_cap_grant(inode, session, cap,
@@ -4052,7 +4143,10 @@ void ceph_handle_caps(struct ceph_mds_session *session,
break;
case CEPH_CAP_OP_TRUNC:
- handle_cap_trunc(inode, h, session);
+ queue_trunc = handle_cap_trunc(inode, h, session);
+ spin_unlock(&ci->i_ceph_lock);
+ if (queue_trunc)
+ ceph_queue_vmtruncate(inode);
break;
default:
@@ -4121,15 +4215,16 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
/*
* Flush all dirty caps to the mds
*/
-void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
+static void flush_dirty_session_caps(struct ceph_mds_session *s)
{
+ struct ceph_mds_client *mdsc = s->s_mdsc;
struct ceph_inode_info *ci;
struct inode *inode;
dout("flush_dirty_caps\n");
spin_lock(&mdsc->cap_dirty_lock);
- while (!list_empty(&mdsc->cap_dirty)) {
- ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
+ while (!list_empty(&s->s_cap_dirty)) {
+ ci = list_first_entry(&s->s_cap_dirty, struct ceph_inode_info,
i_dirty_item);
inode = &ci->vfs_inode;
ihold(inode);
@@ -4143,6 +4238,35 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
dout("flush_dirty_caps done\n");
}
+static void iterate_sessions(struct ceph_mds_client *mdsc,
+ void (*cb)(struct ceph_mds_session *))
+{
+ int mds;
+
+ mutex_lock(&mdsc->mutex);
+ for (mds = 0; mds < mdsc->max_sessions; ++mds) {
+ struct ceph_mds_session *s;
+
+ if (!mdsc->sessions[mds])
+ continue;
+
+ s = ceph_get_mds_session(mdsc->sessions[mds]);
+ if (!s)
+ continue;
+
+ mutex_unlock(&mdsc->mutex);
+ cb(s);
+ ceph_put_mds_session(s);
+ mutex_lock(&mdsc->mutex);
+ }
+ mutex_unlock(&mdsc->mutex);
+}
+
+void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
+{
+ iterate_sessions(mdsc, flush_dirty_session_caps);
+}
+
void __ceph_touch_fmode(struct ceph_inode_info *ci,
struct ceph_mds_client *mdsc, int fmode)
{
@@ -4269,6 +4393,9 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
cap->issued &= ~drop;
cap->implemented &= ~drop;
cap->mds_wanted = wanted;
+ if (cap == ci->i_auth_cap &&
+ !(wanted & CEPH_CAP_ANY_FILE_WR))
+ ci->i_requested_max_size = 0;
} else {
dout("encode_inode_release %p cap %p %s"
" (force)\n", inode, cap,
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index dcaed75de9e6..070ed8481340 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -7,6 +7,8 @@
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/math64.h>
+#include <linux/ktime.h>
#include <linux/ceph/libceph.h>
#include <linux/ceph/mon_client.h>
@@ -18,6 +20,7 @@
#ifdef CONFIG_DEBUG_FS
#include "mds_client.h"
+#include "metric.h"
static int mdsmap_show(struct seq_file *s, void *p)
{
@@ -124,6 +127,87 @@ static int mdsc_show(struct seq_file *s, void *p)
return 0;
}
+#define CEPH_METRIC_SHOW(name, total, avg, min, max, sq) { \
+ s64 _total, _avg, _min, _max, _sq, _st; \
+ _avg = ktime_to_us(avg); \
+ _min = ktime_to_us(min == KTIME_MAX ? 0 : min); \
+ _max = ktime_to_us(max); \
+ _total = total - 1; \
+ _sq = _total > 0 ? DIV64_U64_ROUND_CLOSEST(sq, _total) : 0; \
+ _st = int_sqrt64(_sq); \
+ _st = ktime_to_us(_st); \
+ seq_printf(s, "%-14s%-12lld%-16lld%-16lld%-16lld%lld\n", \
+ name, total, _avg, _min, _max, _st); \
+}
+
+static int metric_show(struct seq_file *s, void *p)
+{
+ struct ceph_fs_client *fsc = s->private;
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_client_metric *m = &mdsc->metric;
+ int i, nr_caps = 0;
+ s64 total, sum, avg, min, max, sq;
+
+ seq_printf(s, "item total avg_lat(us) min_lat(us) max_lat(us) stdev(us)\n");
+ seq_printf(s, "-----------------------------------------------------------------------------------\n");
+
+ spin_lock(&m->read_latency_lock);
+ total = m->total_reads;
+ sum = m->read_latency_sum;
+ avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
+ min = m->read_latency_min;
+ max = m->read_latency_max;
+ sq = m->read_latency_sq_sum;
+ spin_unlock(&m->read_latency_lock);
+ CEPH_METRIC_SHOW("read", total, avg, min, max, sq);
+
+ spin_lock(&m->write_latency_lock);
+ total = m->total_writes;
+ sum = m->write_latency_sum;
+ avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
+ min = m->write_latency_min;
+ max = m->write_latency_max;
+ sq = m->write_latency_sq_sum;
+ spin_unlock(&m->write_latency_lock);
+ CEPH_METRIC_SHOW("write", total, avg, min, max, sq);
+
+ spin_lock(&m->metadata_latency_lock);
+ total = m->total_metadatas;
+ sum = m->metadata_latency_sum;
+ avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
+ min = m->metadata_latency_min;
+ max = m->metadata_latency_max;
+ sq = m->metadata_latency_sq_sum;
+ spin_unlock(&m->metadata_latency_lock);
+ CEPH_METRIC_SHOW("metadata", total, avg, min, max, sq);
+
+ seq_printf(s, "\n");
+ seq_printf(s, "item total miss hit\n");
+ seq_printf(s, "-------------------------------------------------\n");
+
+ seq_printf(s, "%-14s%-16lld%-16lld%lld\n", "d_lease",
+ atomic64_read(&m->total_dentries),
+ percpu_counter_sum(&m->d_lease_mis),
+ percpu_counter_sum(&m->d_lease_hit));
+
+ mutex_lock(&mdsc->mutex);
+ for (i = 0; i < mdsc->max_sessions; i++) {
+ struct ceph_mds_session *s;
+
+ s = __ceph_lookup_mds_session(mdsc, i);
+ if (!s)
+ continue;
+ nr_caps += s->s_nr_caps;
+ ceph_put_mds_session(s);
+ }
+ mutex_unlock(&mdsc->mutex);
+ seq_printf(s, "%-14s%-16d%-16lld%lld\n", "caps", nr_caps,
+ percpu_counter_sum(&m->i_caps_mis),
+ percpu_counter_sum(&m->i_caps_hit));
+
+ return 0;
+}
+
static int caps_show_cb(struct inode *inode, struct ceph_cap *cap, void *p)
{
struct seq_file *s = p;
@@ -222,6 +306,7 @@ DEFINE_SHOW_ATTRIBUTE(mdsmap);
DEFINE_SHOW_ATTRIBUTE(mdsc);
DEFINE_SHOW_ATTRIBUTE(caps);
DEFINE_SHOW_ATTRIBUTE(mds_sessions);
+DEFINE_SHOW_ATTRIBUTE(metric);
/*
@@ -255,6 +340,7 @@ void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
debugfs_remove(fsc->debugfs_mdsmap);
debugfs_remove(fsc->debugfs_mds_sessions);
debugfs_remove(fsc->debugfs_caps);
+ debugfs_remove(fsc->debugfs_metric);
debugfs_remove(fsc->debugfs_mdsc);
}
@@ -295,11 +381,17 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
fsc,
&mdsc_fops);
+ fsc->debugfs_metric = debugfs_create_file("metrics",
+ 0400,
+ fsc->client->debugfs_dir,
+ fsc,
+ &metric_fops);
+
fsc->debugfs_caps = debugfs_create_file("caps",
- 0400,
- fsc->client->debugfs_dir,
- fsc,
- &caps_fops);
+ 0400,
+ fsc->client->debugfs_dir,
+ fsc,
+ &caps_fops);
}
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 4c4202c93b71..39f5311404b0 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -38,6 +38,8 @@ static int __dir_lease_try_check(const struct dentry *dentry);
static int ceph_d_init(struct dentry *dentry)
{
struct ceph_dentry_info *di;
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
di = kmem_cache_zalloc(ceph_dentry_cachep, GFP_KERNEL);
if (!di)
@@ -48,6 +50,9 @@ static int ceph_d_init(struct dentry *dentry)
di->time = jiffies;
dentry->d_fsdata = di;
INIT_LIST_HEAD(&di->lease_list);
+
+ atomic64_inc(&mdsc->metric.total_dentries);
+
return 0;
}
@@ -344,8 +349,9 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
!ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
ceph_snap(inode) != CEPH_SNAPDIR &&
__ceph_dir_is_complete_ordered(ci) &&
- __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
+ __ceph_caps_issued_mask_metric(ci, CEPH_CAP_FILE_SHARED, 1)) {
int shared_gen = atomic_read(&ci->i_shared_gen);
+
spin_unlock(&ci->i_ceph_lock);
err = __dcache_readdir(file, ctx, shared_gen);
if (err != -EAGAIN)
@@ -762,7 +768,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
!is_root_ceph_dentry(dir, dentry) &&
ceph_test_mount_opt(fsc, DCACHE) &&
__ceph_dir_is_complete(ci) &&
- (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
+ __ceph_caps_issued_mask_metric(ci, CEPH_CAP_FILE_SHARED, 1)) {
__ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_RD);
spin_unlock(&ci->i_ceph_lock);
dout(" dir %p complete, -ENOENT\n", dir);
@@ -1203,11 +1209,12 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
op = CEPH_MDS_OP_RENAMESNAP;
else
return -EROFS;
+ } else if (old_dir != new_dir) {
+ err = ceph_quota_check_rename(mdsc, d_inode(old_dentry),
+ new_dir);
+ if (err)
+ return err;
}
- /* don't allow cross-quota renames */
- if ((old_dir != new_dir) &&
- (!ceph_quota_is_same_realm(old_dir, new_dir)))
- return -EXDEV;
dout("rename dir %p dentry %p to dir %p dentry %p\n",
old_dir, old_dentry, new_dir, new_dentry);
@@ -1709,6 +1716,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
if (flags & LOOKUP_RCU)
return -ECHILD;
+ percpu_counter_inc(&mdsc->metric.d_lease_mis);
+
op = ceph_snap(dir) == CEPH_SNAPDIR ?
CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
@@ -1740,6 +1749,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
dout("d_revalidate %p lookup result=%d\n",
dentry, err);
}
+ } else {
+ percpu_counter_inc(&mdsc->metric.d_lease_hit);
}
dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
@@ -1782,9 +1793,12 @@ static int ceph_d_delete(const struct dentry *dentry)
static void ceph_d_release(struct dentry *dentry)
{
struct ceph_dentry_info *di = ceph_dentry(dentry);
+ struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
dout("d_release %p\n", dentry);
+ atomic64_dec(&fsc->mdsc->metric.total_dentries);
+
spin_lock(&dentry->d_lock);
__dentry_lease_unlist(di);
dentry->d_fsdata = NULL;
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 79dc06881e78..e088843a7734 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -172,9 +172,16 @@ struct inode *ceph_lookup_inode(struct super_block *sb, u64 ino)
static struct dentry *__fh_to_dentry(struct super_block *sb, u64 ino)
{
struct inode *inode = __lookup_inode(sb, ino);
+ int err;
+
if (IS_ERR(inode))
return ERR_CAST(inode);
- if (inode->i_nlink == 0) {
+ /* We need LINK caps to reliably check i_nlink */
+ err = ceph_do_getattr(inode, CEPH_CAP_LINK_SHARED, false);
+ if (err)
+ return ERR_PTR(err);
+ /* -ESTALE if inode as been unlinked and no file is open */
+ if ((inode->i_nlink == 0) && (atomic_read(&inode->i_count) == 1)) {
iput(inode);
return ERR_PTR(-ESTALE);
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index afdfca965a7f..160644ddaeed 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -11,11 +11,13 @@
#include <linux/writeback.h>
#include <linux/falloc.h>
#include <linux/iversion.h>
+#include <linux/ktime.h>
#include "super.h"
#include "mds_client.h"
#include "cache.h"
#include "io.h"
+#include "metric.h"
static __le32 ceph_flags_sys2wire(u32 flags)
{
@@ -906,6 +908,12 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
ret = ceph_osdc_start_request(osdc, req, false);
if (!ret)
ret = ceph_osdc_wait_request(osdc, req);
+
+ ceph_update_read_latency(&fsc->mdsc->metric,
+ req->r_start_latency,
+ req->r_end_latency,
+ ret);
+
ceph_osdc_put_request(req);
i_size = i_size_read(inode);
@@ -1044,6 +1052,8 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
struct inode *inode = req->r_inode;
struct ceph_aio_request *aio_req = req->r_priv;
struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_client_metric *metric = &fsc->mdsc->metric;
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
BUG_ON(!osd_data->num_bvecs);
@@ -1051,6 +1061,16 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
dout("ceph_aio_complete_req %p rc %d bytes %u\n",
inode, rc, osd_data->bvec_pos.iter.bi_size);
+ /* r_start_latency == 0 means the request was not submitted */
+ if (req->r_start_latency) {
+ if (aio_req->write)
+ ceph_update_write_latency(metric, req->r_start_latency,
+ req->r_end_latency, rc);
+ else
+ ceph_update_read_latency(metric, req->r_start_latency,
+ req->r_end_latency, rc);
+ }
+
if (rc == -EOLDSNAPC) {
struct ceph_aio_work *aio_work;
BUG_ON(!aio_req->write);
@@ -1179,6 +1199,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_client_metric *metric = &fsc->mdsc->metric;
struct ceph_vino vino;
struct ceph_osd_request *req;
struct bio_vec *bvecs;
@@ -1295,6 +1316,13 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (!ret)
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ if (write)
+ ceph_update_write_latency(metric, req->r_start_latency,
+ req->r_end_latency, ret);
+ else
+ ceph_update_read_latency(metric, req->r_start_latency,
+ req->r_end_latency, ret);
+
size = i_size_read(inode);
if (!write) {
if (ret == -ENOENT)
@@ -1466,6 +1494,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
if (!ret)
ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
+ ceph_update_write_latency(&fsc->mdsc->metric, req->r_start_latency,
+ req->r_end_latency, ret);
out:
ceph_osdc_put_request(req);
if (ret != 0) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 7fef94fd1e55..357c937699d5 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -2288,8 +2288,8 @@ int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
dout("do_getattr inode %p mask %s mode 0%o\n",
inode, ceph_cap_string(mask), inode->i_mode);
- if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
- return 0;
+ if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
+ return 0;
mode = (mask & CEPH_STAT_RSTAT) ? USE_AUTH_MDS : USE_ANY_MDS;
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 7c63abf5bea9..a50497142e59 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -10,6 +10,7 @@
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/bits.h>
+#include <linux/ktime.h>
#include "super.h"
#include "mds_client.h"
@@ -658,6 +659,7 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
if (refcount_dec_and_test(&s->s_ref)) {
if (s->s_auth.authorizer)
ceph_auth_destroy_authorizer(s->s_auth.authorizer);
+ WARN_ON(mutex_is_locked(&s->s_mutex));
xa_destroy(&s->s_delegated_inos);
kfree(s);
}
@@ -753,6 +755,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
INIT_LIST_HEAD(&s->s_cap_releases);
INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
+ INIT_LIST_HEAD(&s->s_cap_dirty);
INIT_LIST_HEAD(&s->s_cap_flushing);
mdsc->sessions[mds] = s;
@@ -801,7 +804,7 @@ void ceph_mdsc_release_request(struct kref *kref)
struct ceph_mds_request *req = container_of(kref,
struct ceph_mds_request,
r_kref);
- ceph_mdsc_release_dir_caps(req);
+ ceph_mdsc_release_dir_caps_no_check(req);
destroy_reply_info(&req->r_reply_info);
if (req->r_request)
ceph_msg_put(req->r_request);
@@ -2201,6 +2204,7 @@ ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
mutex_init(&req->r_fill_mutex);
req->r_mdsc = mdsc;
req->r_started = jiffies;
+ req->r_start_latency = ktime_get();
req->r_resend_mds = -1;
INIT_LIST_HEAD(&req->r_unsafe_dir_item);
INIT_LIST_HEAD(&req->r_unsafe_target_item);
@@ -2547,6 +2551,8 @@ out:
static void complete_request(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
+ req->r_end_latency = ktime_get();
+
if (req->r_callback)
req->r_callback(mdsc, req);
complete_all(&req->r_completion);
@@ -3155,6 +3161,9 @@ out_err:
/* kick calling process */
complete_request(mdsc, req);
+
+ ceph_update_metadata_latency(&mdsc->metric, req->r_start_latency,
+ req->r_end_latency, err);
out:
ceph_mdsc_put_request(req);
return;
@@ -3393,6 +3402,18 @@ void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
}
}
+void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
+{
+ int dcaps;
+
+ dcaps = xchg(&req->r_dir_caps, 0);
+ if (dcaps) {
+ dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
+ ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
+ dcaps);
+ }
+}
+
/*
* called under session->mutex.
*/
@@ -3425,7 +3446,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
if (req->r_session->s_mds != session->s_mds)
continue;
- ceph_mdsc_release_dir_caps(req);
+ ceph_mdsc_release_dir_caps_no_check(req);
__send_request(mdsc, session, req, true);
}
@@ -3760,8 +3781,6 @@ fail:
* recovering MDS might have.
*
* This is a relatively heavyweight operation, but it's rare.
- *
- * called with mdsc->mutex held.
*/
static void send_mds_reconnect(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
@@ -4015,7 +4034,11 @@ static void check_new_map(struct ceph_mds_client *mdsc,
oldstate != CEPH_MDS_STATE_STARTING)
pr_info("mds%d recovery completed\n", s->s_mds);
kick_requests(mdsc, i);
+ mutex_unlock(&mdsc->mutex);
+ mutex_lock(&s->s_mutex);
+ mutex_lock(&mdsc->mutex);
ceph_kick_flushing_caps(mdsc, s);
+ mutex_unlock(&s->s_mutex);
wake_up_session_caps(s, RECONNECT);
}
}
@@ -4323,6 +4346,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
{
struct ceph_mds_client *mdsc;
+ int err;
mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
if (!mdsc)
@@ -4331,8 +4355,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
mutex_init(&mdsc->mutex);
mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
if (!mdsc->mdsmap) {
- kfree(mdsc);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_mdsc;
}
fsc->mdsc = mdsc;
@@ -4364,13 +4388,15 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
spin_lock_init(&mdsc->snap_flush_lock);
mdsc->last_cap_flush_tid = 1;
INIT_LIST_HEAD(&mdsc->cap_flush_list);
- INIT_LIST_HEAD(&mdsc->cap_dirty);
INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
mdsc->num_cap_flushing = 0;
spin_lock_init(&mdsc->cap_dirty_lock);
init_waitqueue_head(&mdsc->cap_flushing_wq);
INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
atomic_set(&mdsc->cap_reclaim_pending, 0);
+ err = ceph_metric_init(&mdsc->metric);
+ if (err)
+ goto err_mdsmap;
spin_lock_init(&mdsc->dentry_list_lock);
INIT_LIST_HEAD(&mdsc->dentry_leases);
@@ -4389,6 +4415,12 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
strscpy(mdsc->nodename, utsname()->nodename,
sizeof(mdsc->nodename));
return 0;
+
+err_mdsmap:
+ kfree(mdsc->mdsmap);
+err_mdsc:
+ kfree(mdsc);
+ return err;
}
/*
@@ -4646,6 +4678,8 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
ceph_mdsc_stop(mdsc);
+ ceph_metric_destroy(&mdsc->metric);
+
fsc->mdsc = NULL;
kfree(mdsc);
dout("mdsc_destroy %p done\n", mdsc);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index 903d9edfd4bf..5e0c4073a6be 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -10,12 +10,15 @@
#include <linux/spinlock.h>
#include <linux/refcount.h>
#include <linux/utsname.h>
+#include <linux/ktime.h>
#include <linux/ceph/types.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/mdsmap.h>
#include <linux/ceph/auth.h>
+#include "metric.h"
+
/* The first 8 bits are reserved for old ceph releases */
enum ceph_feature_type {
CEPHFS_FEATURE_MIMIC = 8,
@@ -196,8 +199,12 @@ struct ceph_mds_session {
struct list_head s_cap_releases; /* waiting cap_release messages */
struct work_struct s_cap_release_work;
- /* protected by mutex */
+ /* See ceph_inode_info->i_dirty_item. */
+ struct list_head s_cap_dirty; /* inodes w/ dirty caps */
+
+ /* See ceph_inode_info->i_flushing_item. */
struct list_head s_cap_flushing; /* inodes w/ flushing caps */
+
unsigned long s_renew_requested; /* last time we sent a renew req */
u64 s_renew_seq;
@@ -297,6 +304,8 @@ struct ceph_mds_request {
unsigned long r_timeout; /* optional. jiffies, 0 is "wait forever" */
unsigned long r_started; /* start time to measure timeout against */
+ unsigned long r_start_latency; /* start time to measure latency */
+ unsigned long r_end_latency; /* finish time to measure latency */
unsigned long r_request_started; /* start time for mds request only,
used to measure lease durations */
@@ -419,7 +428,6 @@ struct ceph_mds_client {
u64 last_cap_flush_tid;
struct list_head cap_flush_list;
- struct list_head cap_dirty; /* inodes with dirty caps */
struct list_head cap_dirty_migrating; /* ...that are migration... */
int num_cap_flushing; /* # caps we are flushing */
spinlock_t cap_dirty_lock; /* protects above items */
@@ -454,6 +462,8 @@ struct ceph_mds_client {
struct list_head dentry_leases; /* fifo list */
struct list_head dentry_dir_leases; /* lru list */
+ struct ceph_client_metric metric;
+
spinlock_t snapid_map_lock;
struct rb_root snapid_map_tree;
struct list_head snapid_map_lru;
@@ -497,6 +507,7 @@ extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
struct inode *dir,
struct ceph_mds_request *req);
extern void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req);
+extern void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req);
static inline void ceph_mdsc_get_request(struct ceph_mds_request *req)
{
kref_get(&req->r_kref);
diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
new file mode 100644
index 000000000000..9217f35bc2b9
--- /dev/null
+++ b/fs/ceph/metric.c
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/types.h>
+#include <linux/percpu_counter.h>
+#include <linux/math64.h>
+
+#include "metric.h"
+
+int ceph_metric_init(struct ceph_client_metric *m)
+{
+ int ret;
+
+ if (!m)
+ return -EINVAL;
+
+ atomic64_set(&m->total_dentries, 0);
+ ret = percpu_counter_init(&m->d_lease_hit, 0, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ ret = percpu_counter_init(&m->d_lease_mis, 0, GFP_KERNEL);
+ if (ret)
+ goto err_d_lease_mis;
+
+ ret = percpu_counter_init(&m->i_caps_hit, 0, GFP_KERNEL);
+ if (ret)
+ goto err_i_caps_hit;
+
+ ret = percpu_counter_init(&m->i_caps_mis, 0, GFP_KERNEL);
+ if (ret)
+ goto err_i_caps_mis;
+
+ spin_lock_init(&m->read_latency_lock);
+ m->read_latency_sq_sum = 0;
+ m->read_latency_min = KTIME_MAX;
+ m->read_latency_max = 0;
+ m->total_reads = 0;
+ m->read_latency_sum = 0;
+
+ spin_lock_init(&m->write_latency_lock);
+ m->write_latency_sq_sum = 0;
+ m->write_latency_min = KTIME_MAX;
+ m->write_latency_max = 0;
+ m->total_writes = 0;
+ m->write_latency_sum = 0;
+
+ spin_lock_init(&m->metadata_latency_lock);
+ m->metadata_latency_sq_sum = 0;
+ m->metadata_latency_min = KTIME_MAX;
+ m->metadata_latency_max = 0;
+ m->total_metadatas = 0;
+ m->metadata_latency_sum = 0;
+
+ return 0;
+
+err_i_caps_mis:
+ percpu_counter_destroy(&m->i_caps_hit);
+err_i_caps_hit:
+ percpu_counter_destroy(&m->d_lease_mis);
+err_d_lease_mis:
+ percpu_counter_destroy(&m->d_lease_hit);
+
+ return ret;
+}
+
+void ceph_metric_destroy(struct ceph_client_metric *m)
+{
+ if (!m)
+ return;
+
+ percpu_counter_destroy(&m->i_caps_mis);
+ percpu_counter_destroy(&m->i_caps_hit);
+ percpu_counter_destroy(&m->d_lease_mis);
+ percpu_counter_destroy(&m->d_lease_hit);
+}
+
+static inline void __update_latency(ktime_t *totalp, ktime_t *lsump,
+ ktime_t *min, ktime_t *max,
+ ktime_t *sq_sump, ktime_t lat)
+{
+ ktime_t total, avg, sq, lsum;
+
+ total = ++(*totalp);
+ lsum = (*lsump += lat);
+
+ if (unlikely(lat < *min))
+ *min = lat;
+ if (unlikely(lat > *max))
+ *max = lat;
+
+ if (unlikely(total == 1))
+ return;
+
+ /* the sq is (lat - old_avg) * (lat - new_avg) */
+ avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
+ sq = lat - avg;
+ avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
+ sq = sq * (lat - avg);
+ *sq_sump += sq;
+}
+
+void ceph_update_read_latency(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc)
+{
+ ktime_t lat = ktime_sub(r_end, r_start);
+
+ if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
+ return;
+
+ spin_lock(&m->read_latency_lock);
+ __update_latency(&m->total_reads, &m->read_latency_sum,
+ &m->read_latency_min, &m->read_latency_max,
+ &m->read_latency_sq_sum, lat);
+ spin_unlock(&m->read_latency_lock);
+}
+
+void ceph_update_write_latency(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc)
+{
+ ktime_t lat = ktime_sub(r_end, r_start);
+
+ if (unlikely(rc && rc != -ETIMEDOUT))
+ return;
+
+ spin_lock(&m->write_latency_lock);
+ __update_latency(&m->total_writes, &m->write_latency_sum,
+ &m->write_latency_min, &m->write_latency_max,
+ &m->write_latency_sq_sum, lat);
+ spin_unlock(&m->write_latency_lock);
+}
+
+void ceph_update_metadata_latency(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc)
+{
+ ktime_t lat = ktime_sub(r_end, r_start);
+
+ if (unlikely(rc && rc != -ENOENT))
+ return;
+
+ spin_lock(&m->metadata_latency_lock);
+ __update_latency(&m->total_metadatas, &m->metadata_latency_sum,
+ &m->metadata_latency_min, &m->metadata_latency_max,
+ &m->metadata_latency_sq_sum, lat);
+ spin_unlock(&m->metadata_latency_lock);
+}
diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
new file mode 100644
index 000000000000..ccd81285a450
--- /dev/null
+++ b/fs/ceph/metric.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _FS_CEPH_MDS_METRIC_H
+#define _FS_CEPH_MDS_METRIC_H
+
+#include <linux/types.h>
+#include <linux/percpu_counter.h>
+#include <linux/ktime.h>
+
+/* This is the global metrics */
+struct ceph_client_metric {
+ atomic64_t total_dentries;
+ struct percpu_counter d_lease_hit;
+ struct percpu_counter d_lease_mis;
+
+ struct percpu_counter i_caps_hit;
+ struct percpu_counter i_caps_mis;
+
+ spinlock_t read_latency_lock;
+ u64 total_reads;
+ ktime_t read_latency_sum;
+ ktime_t read_latency_sq_sum;
+ ktime_t read_latency_min;
+ ktime_t read_latency_max;
+
+ spinlock_t write_latency_lock;
+ u64 total_writes;
+ ktime_t write_latency_sum;
+ ktime_t write_latency_sq_sum;
+ ktime_t write_latency_min;
+ ktime_t write_latency_max;
+
+ spinlock_t metadata_latency_lock;
+ u64 total_metadatas;
+ ktime_t metadata_latency_sum;
+ ktime_t metadata_latency_sq_sum;
+ ktime_t metadata_latency_min;
+ ktime_t metadata_latency_max;
+};
+
+extern int ceph_metric_init(struct ceph_client_metric *m);
+extern void ceph_metric_destroy(struct ceph_client_metric *m);
+
+static inline void ceph_update_cap_hit(struct ceph_client_metric *m)
+{
+ percpu_counter_inc(&m->i_caps_hit);
+}
+
+static inline void ceph_update_cap_mis(struct ceph_client_metric *m)
+{
+ percpu_counter_inc(&m->i_caps_mis);
+}
+
+extern void ceph_update_read_latency(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc);
+extern void ceph_update_write_latency(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc);
+extern void ceph_update_metadata_latency(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc);
+#endif /* _FS_CEPH_MDS_METRIC_H */
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 19507e2fdb57..198ddde5c1e6 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -264,7 +264,7 @@ restart:
return NULL;
}
-bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
+static bool ceph_quota_is_same_realm(struct inode *old, struct inode *new)
{
struct ceph_mds_client *mdsc = ceph_inode_to_client(old)->mdsc;
struct ceph_snap_realm *old_realm, *new_realm;
@@ -361,8 +361,6 @@ restart:
spin_unlock(&ci->i_ceph_lock);
switch (op) {
case QUOTA_CHECK_MAX_FILES_OP:
- exceeded = (max && (rvalue >= max));
- break;
case QUOTA_CHECK_MAX_BYTES_OP:
exceeded = (max && (rvalue + delta > max));
break;
@@ -417,7 +415,7 @@ bool ceph_quota_is_max_files_exceeded(struct inode *inode)
WARN_ON(!S_ISDIR(inode->i_mode));
- return check_quota_exceeded(inode, QUOTA_CHECK_MAX_FILES_OP, 0);
+ return check_quota_exceeded(inode, QUOTA_CHECK_MAX_FILES_OP, 1);
}
/*
@@ -518,3 +516,59 @@ bool ceph_quota_update_statfs(struct ceph_fs_client *fsc, struct kstatfs *buf)
return is_updated;
}
+/*
+ * ceph_quota_check_rename - check if a rename can be executed
+ * @mdsc: MDS client instance
+ * @old: inode to be copied
+ * @new: destination inode (directory)
+ *
+ * This function verifies if a rename (e.g. moving a file or directory) can be
+ * executed. It forces an rstat update in the @new target directory (and in the
+ * source @old as well, if it's a directory). The actual check is done both for
+ * max_files and max_bytes.
+ *
+ * This function returns 0 if it's OK to do the rename, or, if quotas are
+ * exceeded, -EXDEV (if @old is a directory) or -EDQUOT.
+ */
+int ceph_quota_check_rename(struct ceph_mds_client *mdsc,
+ struct inode *old, struct inode *new)
+{
+ struct ceph_inode_info *ci_old = ceph_inode(old);
+ int ret = 0;
+
+ if (ceph_quota_is_same_realm(old, new))
+ return 0;
+
+ /*
+ * Get the latest rstat for target directory (and for source, if a
+ * directory)
+ */
+ ret = ceph_do_getattr(new, CEPH_STAT_RSTAT, false);
+ if (ret)
+ return ret;
+
+ if (S_ISDIR(old->i_mode)) {
+ ret = ceph_do_getattr(old, CEPH_STAT_RSTAT, false);
+ if (ret)
+ return ret;
+ ret = check_quota_exceeded(new, QUOTA_CHECK_MAX_BYTES_OP,
+ ci_old->i_rbytes);
+ if (!ret)
+ ret = check_quota_exceeded(new,
+ QUOTA_CHECK_MAX_FILES_OP,
+ ci_old->i_rfiles +
+ ci_old->i_rsubdirs);
+ if (ret)
+ ret = -EXDEV;
+ } else {
+ ret = check_quota_exceeded(new, QUOTA_CHECK_MAX_BYTES_OP,
+ i_size_read(old));
+ if (!ret)
+ ret = check_quota_exceeded(new,
+ QUOTA_CHECK_MAX_FILES_OP, 1);
+ if (ret)
+ ret = -EDQUOT;
+ }
+
+ return ret;
+}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 60aac3aee055..5a6cdd39bc10 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -128,6 +128,7 @@ struct ceph_fs_client {
struct dentry *debugfs_congestion_kb;
struct dentry *debugfs_bdi;
struct dentry *debugfs_mdsc, *debugfs_mdsmap;
+ struct dentry *debugfs_metric;
struct dentry *debugfs_mds_sessions;
#endif
@@ -350,7 +351,25 @@ struct ceph_inode_info {
struct rb_root i_caps; /* cap list */
struct ceph_cap *i_auth_cap; /* authoritative cap, if any */
unsigned i_dirty_caps, i_flushing_caps; /* mask of dirtied fields */
- struct list_head i_dirty_item, i_flushing_item;
+
+ /*
+ * Link to the the auth cap's session's s_cap_dirty list. s_cap_dirty
+ * is protected by the mdsc->cap_dirty_lock, but each individual item
+ * is also protected by the inode's i_ceph_lock. Walking s_cap_dirty
+ * requires the mdsc->cap_dirty_lock. List presence for an item can
+ * be tested under the i_ceph_lock. Changing anything requires both.
+ */
+ struct list_head i_dirty_item;
+
+ /*
+ * Link to session's s_cap_flushing list. Protected in a similar
+ * fashion to i_dirty_item, but also by the s_mutex for changes. The
+ * s_cap_flushing list can be walked while holding either the s_mutex
+ * or msdc->cap_dirty_lock. List presence can also be checked while
+ * holding the i_ceph_lock for this inode.
+ */
+ struct list_head i_flushing_item;
+
/* we need to track cap writeback on a per-cap-bit basis, to allow
* overlapping, pipelined cap flushes to the mds. we can probably
* reduce the tid to 8 bits if we're concerned about inode size. */
@@ -644,6 +663,8 @@ static inline bool __ceph_is_any_real_caps(struct ceph_inode_info *ci)
extern int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented);
extern int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int t);
+extern int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
+ int t);
extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
struct ceph_cap *cap);
@@ -656,12 +677,12 @@ static inline int ceph_caps_issued(struct ceph_inode_info *ci)
return issued;
}
-static inline int ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask,
- int touch)
+static inline int ceph_caps_issued_mask_metric(struct ceph_inode_info *ci,
+ int mask, int touch)
{
int r;
spin_lock(&ci->i_ceph_lock);
- r = __ceph_caps_issued_mask(ci, mask, touch);
+ r = __ceph_caps_issued_mask_metric(ci, mask, touch);
spin_unlock(&ci->i_ceph_lock);
return r;
}
@@ -1074,6 +1095,8 @@ extern void ceph_take_cap_refs(struct ceph_inode_info *ci, int caps,
bool snap_rwsem_locked);
extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
+extern void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci,
+ int had);
extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
struct ceph_snap_context *snapc);
extern void ceph_flush_snaps(struct ceph_inode_info *ci,
@@ -1189,13 +1212,14 @@ extern void ceph_handle_quota(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg);
extern bool ceph_quota_is_max_files_exceeded(struct inode *inode);
-extern bool ceph_quota_is_same_realm(struct inode *old, struct inode *new);
extern bool ceph_quota_is_max_bytes_exceeded(struct inode *inode,
loff_t newlen);
extern bool ceph_quota_is_max_bytes_approaching(struct inode *inode,
loff_t newlen);
extern bool ceph_quota_update_statfs(struct ceph_fs_client *fsc,
struct kstatfs *buf);
+extern int ceph_quota_check_rename(struct ceph_mds_client *mdsc,
+ struct inode *old, struct inode *new);
extern void ceph_cleanup_quotarealms_inodes(struct ceph_mds_client *mdsc);
#endif /* _FS_CEPH_SUPER_H */
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 7b8a070a782d..71ee34d160c3 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -856,7 +856,7 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
if (ci->i_xattrs.version == 0 ||
!((req_mask & CEPH_CAP_XATTR_SHARED) ||
- __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1))) {
+ __ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1))) {
spin_unlock(&ci->i_ceph_lock);
/* security module gets xattr while filling trace */
@@ -914,7 +914,7 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
ci->i_xattrs.version, ci->i_xattrs.index_version);
if (ci->i_xattrs.version == 0 ||
- !__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1)) {
+ !__ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1)) {
spin_unlock(&ci->i_ceph_lock);
err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true);
if (err)
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 916567d770f5..3ad1a98fd567 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -221,6 +221,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
struct cifs_ses *ses;
struct cifs_tcon *tcon;
int i, j;
+ const char *security_types[] = {"Unspecified", "LANMAN", "NTLM",
+ "NTLMv2", "RawNTLMSSP", "Kerberos"};
seq_puts(m,
"Display Internal CIFS Data Structures for Debugging\n"
@@ -375,6 +377,10 @@ skip_rdma:
ses->ses_count, ses->serverOS, ses->serverNOS,
ses->capabilities, ses->status);
}
+
+ seq_printf(m,"Security type: %s\n",
+ security_types[server->ops->select_sectype(server, ses->sectype)]);
+
if (server->rdma)
seq_printf(m, "RDMA\n\t");
seq_printf(m, "TCP status: %d Instance: %d\n\tLocal Users To "
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 100b0056a369..5e66dab712d0 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -8,6 +8,12 @@
#ifndef _H_CIFS_DEBUG
#define _H_CIFS_DEBUG
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "CIFS: " fmt
+
void cifs_dump_mem(char *label, void *data, int length);
void cifs_dump_detail(void *buf, struct TCP_Server_Info *ptcp_info);
void cifs_dump_mids(struct TCP_Server_Info *);
@@ -46,92 +52,81 @@ extern int cifsFYI;
*/
/* Information level messages, minor events */
-#define cifs_info_func(ratefunc, fmt, ...) \
-do { \
- pr_info_ ## ratefunc("CIFS: " fmt, ##__VA_ARGS__); \
-} while (0)
+#define cifs_info_func(ratefunc, fmt, ...) \
+ pr_info_ ## ratefunc(fmt, ##__VA_ARGS__)
-#define cifs_info(fmt, ...) \
-do { \
- cifs_info_func(ratelimited, fmt, ##__VA_ARGS__); \
-} while (0)
+#define cifs_info(fmt, ...) \
+ cifs_info_func(ratelimited, fmt, ##__VA_ARGS__)
/* information message: e.g., configuration, major event */
-#define cifs_dbg_func(ratefunc, type, fmt, ...) \
-do { \
- if ((type) & FYI && cifsFYI & CIFS_INFO) { \
- pr_debug_ ## ratefunc("%s: " \
- fmt, __FILE__, ##__VA_ARGS__); \
- } else if ((type) & VFS) { \
- pr_err_ ## ratefunc("CIFS VFS: " \
- fmt, ##__VA_ARGS__); \
- } else if ((type) & NOISY && (NOISY != 0)) { \
- pr_debug_ ## ratefunc(fmt, ##__VA_ARGS__); \
- } \
+#define cifs_dbg_func(ratefunc, type, fmt, ...) \
+do { \
+ if ((type) & FYI && cifsFYI & CIFS_INFO) { \
+ pr_debug_ ## ratefunc("%s: " fmt, \
+ __FILE__, ##__VA_ARGS__); \
+ } else if ((type) & VFS) { \
+ pr_err_ ## ratefunc("VFS: " fmt, ##__VA_ARGS__); \
+ } else if ((type) & NOISY && (NOISY != 0)) { \
+ pr_debug_ ## ratefunc(fmt, ##__VA_ARGS__); \
+ } \
} while (0)
-#define cifs_dbg(type, fmt, ...) \
-do { \
- if ((type) & ONCE) \
- cifs_dbg_func(once, \
- type, fmt, ##__VA_ARGS__); \
- else \
- cifs_dbg_func(ratelimited, \
- type, fmt, ##__VA_ARGS__); \
+#define cifs_dbg(type, fmt, ...) \
+do { \
+ if ((type) & ONCE) \
+ cifs_dbg_func(once, type, fmt, ##__VA_ARGS__); \
+ else \
+ cifs_dbg_func(ratelimited, type, fmt, ##__VA_ARGS__); \
} while (0)
-#define cifs_server_dbg_func(ratefunc, type, fmt, ...) \
-do { \
- const char *sn = ""; \
- if (server && server->hostname) \
- sn = server->hostname; \
- if ((type) & FYI && cifsFYI & CIFS_INFO) { \
- pr_debug_ ## ratefunc("%s: \\\\%s " fmt, \
- __FILE__, sn, ##__VA_ARGS__); \
- } else if ((type) & VFS) { \
- pr_err_ ## ratefunc("CIFS VFS: \\\\%s " fmt, \
- sn, ##__VA_ARGS__); \
- } else if ((type) & NOISY && (NOISY != 0)) { \
- pr_debug_ ## ratefunc("\\\\%s " fmt, \
- sn, ##__VA_ARGS__); \
- } \
+#define cifs_server_dbg_func(ratefunc, type, fmt, ...) \
+do { \
+ const char *sn = ""; \
+ if (server && server->hostname) \
+ sn = server->hostname; \
+ if ((type) & FYI && cifsFYI & CIFS_INFO) { \
+ pr_debug_ ## ratefunc("%s: \\\\%s " fmt, \
+ __FILE__, sn, ##__VA_ARGS__); \
+ } else if ((type) & VFS) { \
+ pr_err_ ## ratefunc("VFS: \\\\%s " fmt, \
+ sn, ##__VA_ARGS__); \
+ } else if ((type) & NOISY && (NOISY != 0)) { \
+ pr_debug_ ## ratefunc("\\\\%s " fmt, \
+ sn, ##__VA_ARGS__); \
+ } \
} while (0)
-#define cifs_server_dbg(type, fmt, ...) \
-do { \
- if ((type) & ONCE) \
- cifs_server_dbg_func(once, \
- type, fmt, ##__VA_ARGS__); \
- else \
- cifs_server_dbg_func(ratelimited, \
- type, fmt, ##__VA_ARGS__); \
+#define cifs_server_dbg(type, fmt, ...) \
+do { \
+ if ((type) & ONCE) \
+ cifs_server_dbg_func(once, type, fmt, ##__VA_ARGS__); \
+ else \
+ cifs_server_dbg_func(ratelimited, type, fmt, \
+ ##__VA_ARGS__); \
} while (0)
-#define cifs_tcon_dbg_func(ratefunc, type, fmt, ...) \
-do { \
- const char *tn = ""; \
- if (tcon && tcon->treeName) \
- tn = tcon->treeName; \
- if ((type) & FYI && cifsFYI & CIFS_INFO) { \
- pr_debug_ ## ratefunc("%s: %s " fmt, \
- __FILE__, tn, ##__VA_ARGS__); \
- } else if ((type) & VFS) { \
- pr_err_ ## ratefunc("CIFS VFS: %s " fmt, \
- tn, ##__VA_ARGS__); \
- } else if ((type) & NOISY && (NOISY != 0)) { \
- pr_debug_ ## ratefunc("%s " fmt, \
- tn, ##__VA_ARGS__); \
- } \
+#define cifs_tcon_dbg_func(ratefunc, type, fmt, ...) \
+do { \
+ const char *tn = ""; \
+ if (tcon && tcon->treeName) \
+ tn = tcon->treeName; \
+ if ((type) & FYI && cifsFYI & CIFS_INFO) { \
+ pr_debug_ ## ratefunc("%s: %s " fmt, \
+ __FILE__, tn, ##__VA_ARGS__); \
+ } else if ((type) & VFS) { \
+ pr_err_ ## ratefunc("VFS: %s " fmt, tn, ##__VA_ARGS__); \
+ } else if ((type) & NOISY && (NOISY != 0)) { \
+ pr_debug_ ## ratefunc("%s " fmt, tn, ##__VA_ARGS__); \
+ } \
} while (0)
-#define cifs_tcon_dbg(type, fmt, ...) \
-do { \
- if ((type) & ONCE) \
- cifs_tcon_dbg_func(once, \
- type, fmt, ##__VA_ARGS__); \
- else \
- cifs_tcon_dbg_func(ratelimited, \
- type, fmt, ##__VA_ARGS__); \
+#define cifs_tcon_dbg(type, fmt, ...) \
+do { \
+ if ((type) & ONCE) \
+ cifs_tcon_dbg_func(once, type, fmt, ##__VA_ARGS__); \
+ else \
+ cifs_tcon_dbg_func(ratelimited, type, fmt, \
+ ##__VA_ARGS__); \
} while (0)
/*
@@ -159,9 +154,7 @@ do { \
} while (0)
#define cifs_info(fmt, ...) \
-do { \
- pr_info("CIFS: "fmt, ##__VA_ARGS__); \
-} while (0)
+ pr_info(fmt, ##__VA_ARGS__)
#endif
#endif /* _H_CIFS_DEBUG */
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 97b7497c13ef..874a551f339c 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -520,7 +520,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
if (rc) {
- cifs_dbg(VFS, "%s: could not init hmacmd5\n", __func__);
+ cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
return rc;
}
@@ -624,7 +624,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
if (rc) {
- cifs_dbg(VFS, "%s: could not init hmacmd5\n", __func__);
+ cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
return rc;
}
@@ -723,7 +723,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
/* calculate ntlmv2_hash */
rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
if (rc) {
- cifs_dbg(VFS, "could not get v2 hash rc %d\n", rc);
+ cifs_dbg(VFS, "Could not get v2 hash rc %d\n", rc);
goto unlock;
}
@@ -783,7 +783,7 @@ calc_seckey(struct cifs_ses *ses)
ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
if (!ctx_arc4) {
- cifs_dbg(VFS, "could not allocate arc4 context\n");
+ cifs_dbg(VFS, "Could not allocate arc4 context\n");
return -ENOMEM;
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index c31f362fa098..889f9c71049b 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -534,6 +534,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_puts(s, ",signloosely");
if (tcon->nocase)
seq_puts(s, ",nocase");
+ if (tcon->nodelete)
+ seq_puts(s, ",nodelete");
if (tcon->local_lease)
seq_puts(s, ",locallease");
if (tcon->retry)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index c9e2e6bbca13..c7a311d28d3d 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -156,5 +156,5 @@ extern int cifs_truncate_page(struct address_space *mapping, loff_t from);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.26"
+#define CIFS_VERSION "2.27"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 39b708d9d86d..e133bb3e172f 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -562,6 +562,7 @@ struct smb_vol {
bool override_gid:1;
bool dynperm:1;
bool noperm:1;
+ bool nodelete:1;
bool mode_ace:1;
bool no_psx_acl:1; /* set if posix acl support should be disabled */
bool cifs_acl:1;
@@ -1029,6 +1030,7 @@ struct cifs_ses {
#define CIFS_MAX_CHANNELS 16
struct cifs_chan chans[CIFS_MAX_CHANNELS];
+ struct cifs_chan *binding_chan;
size_t chan_count;
size_t chan_max;
atomic_t chan_seq; /* round robin state */
@@ -1036,23 +1038,31 @@ struct cifs_ses {
/*
* When binding a new channel, we need to access the channel which isn't fully
- * established yet (one past the established count)
+ * established yet.
*/
static inline
struct cifs_chan *cifs_ses_binding_channel(struct cifs_ses *ses)
{
if (ses->binding)
- return &ses->chans[ses->chan_count];
+ return ses->binding_chan;
else
return NULL;
}
+/*
+ * Returns the server pointer of the session. When binding a new
+ * channel this returns the last channel which isn't fully established
+ * yet.
+ *
+ * This function should be use for negprot/sess.setup codepaths. For
+ * the other requests see cifs_pick_channel().
+ */
static inline
struct TCP_Server_Info *cifs_ses_server(struct cifs_ses *ses)
{
if (ses->binding)
- return ses->chans[ses->chan_count].server;
+ return ses->binding_chan->server;
else
return ses->server;
}
@@ -1136,6 +1146,7 @@ struct cifs_tcon {
bool retry:1;
bool nocase:1;
bool nohandlecache:1; /* if strange server resource prob can turn off */
+ bool nodelete:1;
bool seal:1; /* transport encryption for this mounted share */
bool unix_ext:1; /* if false disable Linux extensions to CIFS protocol
for this mount even if server would support */
@@ -1333,6 +1344,7 @@ struct cifs_io_parms {
__u64 offset;
unsigned int length;
struct cifs_tcon *tcon;
+ struct TCP_Server_Info *server;
};
struct cifs_aio_ctx {
@@ -1380,6 +1392,7 @@ struct cifs_readdata {
struct cifs_readdata *rdata,
struct iov_iter *iter);
struct kvec iov[2];
+ struct TCP_Server_Info *server;
#ifdef CONFIG_CIFS_SMB_DIRECT
struct smbd_mr *mr;
#endif
@@ -1406,6 +1419,7 @@ struct cifs_writedata {
pid_t pid;
unsigned int bytes;
int result;
+ struct TCP_Server_Info *server;
#ifdef CONFIG_CIFS_SMB_DIRECT
struct smbd_mr *mr;
#endif
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 12a895e02db4..bd92070ca30c 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -45,25 +45,25 @@ extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *,
unsigned int /* length */);
extern unsigned int _get_xid(void);
extern void _free_xid(unsigned int);
-#define get_xid() \
-({ \
+#define get_xid() \
+({ \
unsigned int __xid = _get_xid(); \
- cifs_dbg(FYI, "CIFS VFS: in %s as Xid: %u with uid: %d\n", \
+ cifs_dbg(FYI, "VFS: in %s as Xid: %u with uid: %d\n", \
__func__, __xid, \
from_kuid(&init_user_ns, current_fsuid())); \
- trace_smb3_enter(__xid, __func__); \
- __xid; \
+ trace_smb3_enter(__xid, __func__); \
+ __xid; \
})
-#define free_xid(curr_xid) \
-do { \
- _free_xid(curr_xid); \
- cifs_dbg(FYI, "CIFS VFS: leaving %s (xid = %u) rc = %d\n", \
- __func__, curr_xid, (int)rc); \
- if (rc) \
+#define free_xid(curr_xid) \
+do { \
+ _free_xid(curr_xid); \
+ cifs_dbg(FYI, "VFS: leaving %s (xid = %u) rc = %d\n", \
+ __func__, curr_xid, (int)rc); \
+ if (rc) \
trace_smb3_exit_err(curr_xid, __func__, (int)rc); \
- else \
- trace_smb3_exit_done(curr_xid, __func__); \
+ else \
+ trace_smb3_exit_done(curr_xid, __func__); \
} while (0)
extern int init_cifs_idmap(void);
extern void exit_cifs_idmap(void);
@@ -89,16 +89,20 @@ extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry);
extern void cifs_wake_up_task(struct mid_q_entry *mid);
extern int cifs_handle_standard(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
+extern bool cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs);
extern int cifs_discard_remaining_data(struct TCP_Server_Info *server);
extern int cifs_call_async(struct TCP_Server_Info *server,
struct smb_rqst *rqst,
mid_receive_t *receive, mid_callback_t *callback,
mid_handle_t *handle, void *cbdata, const int flags,
const struct cifs_credits *exist_credits);
+extern struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses);
extern int cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
struct smb_rqst *rqst, int *resp_buf_type,
const int flags, struct kvec *resp_iov);
extern int compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
const int flags, const int num_rqst,
struct smb_rqst *rqst, int *resp_buf_type,
struct kvec *resp_iov);
@@ -589,6 +593,8 @@ void cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc);
extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
unsigned int *len, unsigned int *offset);
+struct cifs_chan *
+cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server);
int cifs_try_adding_channels(struct cifs_ses *ses);
int cifs_ses_add_channel(struct cifs_ses *ses,
struct cifs_server_iface *iface);
@@ -616,6 +622,10 @@ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
return dfs_cache_find(xid, ses, nls_codepage, remap, old_path,
referral, NULL);
}
+
+int match_target_ip(struct TCP_Server_Info *server,
+ const char *share, size_t share_len,
+ bool *result);
#endif
static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
diff --git a/fs/cifs/cifsroot.c b/fs/cifs/cifsroot.c
index 37edbfb8e096..9e91a5a40aae 100644
--- a/fs/cifs/cifsroot.c
+++ b/fs/cifs/cifsroot.c
@@ -56,7 +56,7 @@ static int __init cifs_root_setup(char *line)
/* len is strlen(unc) + '\0' */
len = s - line + 1;
if (len > sizeof(root_dev)) {
- printk(KERN_ERR "Root-CIFS: UNC path too long\n");
+ pr_err("Root-CIFS: UNC path too long\n");
return 1;
}
strlcpy(root_dev, line, len);
@@ -66,7 +66,7 @@ static int __init cifs_root_setup(char *line)
sizeof(root_opts), "%s,%s",
DEFAULT_MNT_OPTS, s + 1);
if (n >= sizeof(root_opts)) {
- printk(KERN_ERR "Root-CIFS: mount options string too long\n");
+ pr_err("Root-CIFS: mount options string too long\n");
root_opts[sizeof(root_opts)-1] = '\0';
return 1;
}
@@ -83,7 +83,7 @@ __setup("cifsroot=", cifs_root_setup);
int __init cifs_root_data(char **dev, char **opts)
{
if (!root_dev[0] || root_server_addr == htonl(INADDR_NONE)) {
- printk(KERN_ERR "Root-CIFS: no SMB server address\n");
+ pr_err("Root-CIFS: no SMB server address\n");
return -1;
}
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 5014a82391ff..bf41ee048396 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -129,6 +129,7 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
struct cifs_tcon *tcon)
{
int rc;
+ struct TCP_Server_Info *server = tcon->ses->server;
struct dfs_cache_tgt_list tl;
struct dfs_cache_tgt_iterator *it = NULL;
char *tree;
@@ -141,15 +142,14 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
if (!tree)
return -ENOMEM;
- if (tcon->ipc) {
- scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
- tcon->ses->server->hostname);
- rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
- goto out;
- }
-
if (!tcon->dfs_path) {
- rc = CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc);
+ if (tcon->ipc) {
+ scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
+ server->hostname);
+ rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
+ } else {
+ rc = CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc);
+ }
goto out;
}
@@ -157,13 +157,13 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
if (rc)
goto out;
- extract_unc_hostname(tcon->ses->server->hostname, &tcp_host,
- &tcp_host_len);
+ extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len);
for (it = dfs_cache_get_tgt_iterator(&tl); it;
it = dfs_cache_get_next_tgt(&tl, it)) {
const char *share, *prefix;
size_t share_len, prefix_len;
+ bool target_match;
rc = dfs_cache_get_tgt_share(it, &share, &share_len, &prefix,
&prefix_len);
@@ -177,19 +177,38 @@ static int __cifs_reconnect_tcon(const struct nls_table *nlsc,
if (dfs_host_len != tcp_host_len
|| strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
- cifs_dbg(FYI, "%s: skipping %.*s, doesn't match %.*s",
+ cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n",
__func__,
(int)dfs_host_len, dfs_host,
(int)tcp_host_len, tcp_host);
- continue;
- }
- scnprintf(tree, MAX_TREE_SIZE, "\\%.*s", (int)share_len, share);
+ rc = match_target_ip(server, dfs_host, dfs_host_len,
+ &target_match);
+ if (rc) {
+ cifs_dbg(VFS, "%s: failed to match target ip: %d\n",
+ __func__, rc);
+ break;
+ }
+
+ if (!target_match) {
+ cifs_dbg(FYI, "%s: skipping target\n", __func__);
+ continue;
+ }
+ }
- rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
- if (!rc) {
- rc = update_super_prepath(tcon, prefix, prefix_len);
- break;
+ if (tcon->ipc) {
+ scnprintf(tree, MAX_TREE_SIZE, "\\\\%.*s\\IPC$",
+ (int)share_len, share);
+ rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
+ } else {
+ scnprintf(tree, MAX_TREE_SIZE, "\\%.*s", (int)share_len,
+ share);
+ rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc);
+ if (!rc) {
+ rc = update_super_prepath(tcon, prefix,
+ prefix_len);
+ break;
+ }
}
if (rc == -EREMOTE)
break;
@@ -262,8 +281,8 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
(server->tcpStatus != CifsNeedReconnect),
10 * HZ);
if (rc < 0) {
- cifs_dbg(FYI, "%s: aborting reconnect due to a received"
- " signal by the process\n", __func__);
+ cifs_dbg(FYI, "%s: aborting reconnect due to a received signal by the process\n",
+ __func__);
return -ERESTARTSYS;
}
@@ -324,7 +343,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
if (rc) {
- printk_once(KERN_WARNING "reconnect tcon failed rc = %d\n", rc);
+ pr_warn_once("reconnect tcon failed rc = %d\n", rc);
goto out;
}
@@ -557,7 +576,7 @@ cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
/* If server requires signing, does client allow it? */
if (srv_sign_required) {
if (!mnt_sign_enabled) {
- cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!");
+ cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n");
return -ENOTSUPP;
}
server->sign = true;
@@ -566,14 +585,14 @@ cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
/* If client requires signing, does server allow it? */
if (mnt_sign_required) {
if (!srv_sign_enabled) {
- cifs_dbg(VFS, "Server does not support signing!");
+ cifs_dbg(VFS, "Server does not support signing!\n");
return -ENOTSUPP;
}
server->sign = true;
}
if (cifs_rdma_enabled(server) && server->sign)
- cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled");
+ cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n");
return 0;
}
@@ -703,7 +722,7 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
if (should_set_ext_sec_flag(ses->sectype)) {
- cifs_dbg(FYI, "Requesting extended security.");
+ cifs_dbg(FYI, "Requesting extended security\n");
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
}
@@ -2375,7 +2394,7 @@ int
CIFSSMBWrite2(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, struct kvec *iov, int n_vec)
{
- int rc = -EACCES;
+ int rc;
WRITE_REQ *pSMB = NULL;
int wct;
int smb_hdr_len;
@@ -3868,7 +3887,7 @@ GetExtAttrRetry:
struct file_chattr_info *pfinfo;
/* BB Do we need a cast or hash here ? */
if (count != 16) {
- cifs_dbg(FYI, "Illegal size ret in GetExtAttr\n");
+ cifs_dbg(FYI, "Invalid size ret in GetExtAttr\n");
rc = -EIO;
goto GetExtAttrOut;
}
@@ -4244,7 +4263,7 @@ QFileInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cifs_dbg(FYI, "Send error in QFileInfo = %d", rc);
+ cifs_dbg(FYI, "Send error in QFileInfo = %d\n", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4411,7 +4430,7 @@ UnixQFileInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cifs_dbg(FYI, "Send error in UnixQFileInfo = %d", rc);
+ cifs_dbg(FYI, "Send error in UnixQFileInfo = %d\n", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4493,7 +4512,7 @@ UnixQPathInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cifs_dbg(FYI, "Send error in UnixQPathInfo = %d", rc);
+ cifs_dbg(FYI, "Send error in UnixQPathInfo = %d\n", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4913,7 +4932,7 @@ GetInodeNumberRetry:
struct file_internal_info *pfinfo;
/* BB Do we need a cast or hash here ? */
if (count < 8) {
- cifs_dbg(FYI, "Illegal size ret in QryIntrnlInf\n");
+ cifs_dbg(FYI, "Invalid size ret in QryIntrnlInf\n");
rc = -EIO;
goto GetInodeNumOut;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 28268ed461b8..5fac34f192af 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -75,7 +75,7 @@ enum {
Opt_forceuid, Opt_noforceuid,
Opt_forcegid, Opt_noforcegid,
Opt_noblocksend, Opt_noautotune, Opt_nolease,
- Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
+ Opt_hard, Opt_soft, Opt_perm, Opt_noperm, Opt_nodelete,
Opt_mapposix, Opt_nomapposix,
Opt_mapchars, Opt_nomapchars, Opt_sfu,
Opt_nosfu, Opt_nodfs, Opt_posixpaths,
@@ -141,6 +141,7 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_soft, "soft" },
{ Opt_perm, "perm" },
{ Opt_noperm, "noperm" },
+ { Opt_nodelete, "nodelete" },
{ Opt_mapchars, "mapchars" }, /* SFU style */
{ Opt_nomapchars, "nomapchars" },
{ Opt_mapposix, "mapposix" }, /* SFM style */
@@ -426,8 +427,7 @@ static void reconn_inval_dfs_target(struct TCP_Server_Info *server,
}
static inline int reconn_setup_dfs_targets(struct cifs_sb_info *cifs_sb,
- struct dfs_cache_tgt_list *tl,
- struct dfs_cache_tgt_iterator **it)
+ struct dfs_cache_tgt_list *tl)
{
if (!cifs_sb->origin_fullpath)
return -EOPNOTSUPP;
@@ -472,7 +472,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
} else {
cifs_sb = CIFS_SB(sb);
- rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list, &tgt_it);
+ rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list);
if (rc && (rc != -EOPNOTSUPP)) {
cifs_server_dbg(VFS, "%s: no target servers for DFS failover\n",
__func__);
@@ -572,26 +572,26 @@ cifs_reconnect(struct TCP_Server_Info *server)
try_to_freeze();
mutex_lock(&server->srv_mutex);
+#ifdef CONFIG_CIFS_DFS_UPCALL
/*
* Set up next DFS target server (if any) for reconnect. If DFS
* feature is disabled, then we will retry last server we
* connected to before.
*/
+ reconn_inval_dfs_target(server, cifs_sb, &tgt_list, &tgt_it);
+#endif
+ rc = reconn_set_ipaddr(server);
+ if (rc) {
+ cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
+ __func__, rc);
+ }
+
if (cifs_rdma_enabled(server))
rc = smbd_reconnect(server);
else
rc = generic_ip_connect(server);
if (rc) {
cifs_dbg(FYI, "reconnect error %d\n", rc);
-#ifdef CONFIG_CIFS_DFS_UPCALL
- reconn_inval_dfs_target(server, cifs_sb, &tgt_list,
- &tgt_it);
-#endif
- rc = reconn_set_ipaddr(server);
- if (rc) {
- cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
- __func__, rc);
- }
mutex_unlock(&server->srv_mutex);
msleep(3000);
} else {
@@ -879,8 +879,7 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
* function has finished processing it is a bug.
*/
if (mid->mid_flags & MID_DELETED)
- printk_once(KERN_WARNING
- "trying to dequeue a deleted mid\n");
+ pr_warn_once("trying to dequeue a deleted mid\n");
else {
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
@@ -1229,9 +1228,8 @@ next_pdu:
smb2_add_credits_from_hdr(bufs[i], server);
cifs_dbg(FYI, "Received oplock break\n");
} else {
- cifs_server_dbg(VFS, "No task to wake, unknown frame "
- "received! NumMids %d\n",
- atomic_read(&midCount));
+ cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
+ atomic_read(&midCount));
cifs_dump_mem("Received Data is: ", bufs[i],
HEADER_SIZE(server));
smb2_add_credits_from_hdr(bufs[i], server);
@@ -1476,9 +1474,7 @@ cifs_parse_smb_version(char *value, struct smb_vol *vol, bool is_smb3)
cifs_dbg(VFS, "vers=1.0 (cifs) not permitted when mounting with smb3\n");
return 1;
}
- cifs_dbg(VFS, "Use of the less secure dialect vers=1.0 "
- "is not recommended unless required for "
- "access to very old servers\n");
+ cifs_dbg(VFS, "Use of the less secure dialect vers=1.0 is not recommended unless required for access to very old servers\n");
vol->ops = &smb1_operations;
vol->vals = &smb1_values;
break;
@@ -1545,7 +1541,7 @@ cifs_parse_devname(const char *devname, struct smb_vol *vol)
size_t len;
if (unlikely(!devname || !*devname)) {
- cifs_dbg(VFS, "Device name not specified.\n");
+ cifs_dbg(VFS, "Device name not specified\n");
return -EINVAL;
}
@@ -1695,13 +1691,13 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case 0:
break;
case -ENOMEM:
- cifs_dbg(VFS, "Unable to allocate memory for devname.\n");
+ cifs_dbg(VFS, "Unable to allocate memory for devname\n");
goto cifs_parse_mount_err;
case -EINVAL:
- cifs_dbg(VFS, "Malformed UNC in devname.\n");
+ cifs_dbg(VFS, "Malformed UNC in devname\n");
goto cifs_parse_mount_err;
default:
- cifs_dbg(VFS, "Unknown error parsing devname.\n");
+ cifs_dbg(VFS, "Unknown error parsing devname\n");
goto cifs_parse_mount_err;
}
@@ -1761,6 +1757,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case Opt_noperm:
vol->noperm = 1;
break;
+ case Opt_nodelete:
+ vol->nodelete = 1;
+ break;
case Opt_mapchars:
vol->sfu_remap = true;
vol->remap = false; /* disable SFM mapping */
@@ -1909,7 +1908,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->seal = 1;
break;
case Opt_noac:
- pr_warn("CIFS: Mount option noac not supported. Instead set /proc/fs/cifs/LookupCacheEnabled to 0\n");
+ pr_warn("Mount option noac not supported. Instead set /proc/fs/cifs/LookupCacheEnabled to 0\n");
break;
case Opt_fsc:
#ifndef CONFIG_CIFS_FSCACHE
@@ -1965,9 +1964,13 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
break;
case Opt_multichannel:
vol->multichannel = true;
+ /* if number of channels not specified, default to 2 */
+ if (vol->max_channels < 2)
+ vol->max_channels = 2;
break;
case Opt_nomultichannel:
vol->multichannel = false;
+ vol->max_channels = 1;
break;
case Opt_compress:
vol->compression = UNKNOWN_TYPE;
@@ -2156,7 +2159,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (strnlen(string, CIFS_MAX_USERNAME_LEN) >
CIFS_MAX_USERNAME_LEN) {
- pr_warn("CIFS: username too long\n");
+ pr_warn("username too long\n");
goto cifs_parse_mount_err;
}
@@ -2222,7 +2225,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
temp_len = strlen(value);
vol->password = kzalloc(temp_len+1, GFP_KERNEL);
if (vol->password == NULL) {
- pr_warn("CIFS: no memory for password\n");
+ pr_warn("no memory for password\n");
goto cifs_parse_mount_err;
}
@@ -2246,7 +2249,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (!cifs_convert_address(dstaddr, string,
strlen(string))) {
- pr_err("CIFS: bad ip= option (%s).\n", string);
+ pr_err("bad ip= option (%s)\n", string);
goto cifs_parse_mount_err;
}
got_ip = true;
@@ -2258,14 +2261,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN)
== CIFS_MAX_DOMAINNAME_LEN) {
- pr_warn("CIFS: domain name too long\n");
+ pr_warn("domain name too long\n");
goto cifs_parse_mount_err;
}
kfree(vol->domainname);
vol->domainname = kstrdup(string, GFP_KERNEL);
if (!vol->domainname) {
- pr_warn("CIFS: no memory for domainname\n");
+ pr_warn("no memory for domainname\n");
goto cifs_parse_mount_err;
}
cifs_dbg(FYI, "Domain name set\n");
@@ -2278,7 +2281,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (!cifs_convert_address(
(struct sockaddr *)&vol->srcaddr,
string, strlen(string))) {
- pr_warn("CIFS: Could not parse srcaddr: %s\n",
+ pr_warn("Could not parse srcaddr: %s\n",
string);
goto cifs_parse_mount_err;
}
@@ -2289,7 +2292,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
goto out_nomem;
if (strnlen(string, 1024) >= 65) {
- pr_warn("CIFS: iocharset name too long.\n");
+ pr_warn("iocharset name too long\n");
goto cifs_parse_mount_err;
}
@@ -2298,7 +2301,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->iocharset = kstrdup(string,
GFP_KERNEL);
if (!vol->iocharset) {
- pr_warn("CIFS: no memory for charset\n");
+ pr_warn("no memory for charset\n");
goto cifs_parse_mount_err;
}
}
@@ -2329,7 +2332,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
* set at top of the function
*/
if (i == RFC1001_NAME_LEN && string[i] != 0)
- pr_warn("CIFS: netbiosname longer than 15 truncated.\n");
+ pr_warn("netbiosname longer than 15 truncated\n");
break;
case Opt_servern:
/* servernetbiosname specified override *SMBSERVER */
@@ -2355,7 +2358,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
/* The string has 16th byte zero still from
set at top of the function */
if (i == RFC1001_NAME_LEN && string[i] != 0)
- pr_warn("CIFS: server netbiosname longer than 15 truncated.\n");
+ pr_warn("server netbiosname longer than 15 truncated\n");
break;
case Opt_ver:
/* version of mount userspace tools, not dialect */
@@ -2366,17 +2369,15 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
/* If interface changes in mount.cifs bump to new ver */
if (strncasecmp(string, "1", 1) == 0) {
if (strlen(string) > 1) {
- pr_warn("Bad mount helper ver=%s. Did "
- "you want SMB1 (CIFS) dialect "
- "and mean to type vers=1.0 "
- "instead?\n", string);
+ pr_warn("Bad mount helper ver=%s. Did you want SMB1 (CIFS) dialect and mean to type vers=1.0 instead?\n",
+ string);
goto cifs_parse_mount_err;
}
/* This is the default */
break;
}
/* For all other value, error */
- pr_warn("CIFS: Invalid mount helper version specified\n");
+ pr_warn("Invalid mount helper version specified\n");
goto cifs_parse_mount_err;
case Opt_vers:
/* protocol version (dialect) */
@@ -2419,7 +2420,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
}
if (!sloppy && invalid) {
- pr_err("CIFS: Unknown mount option \"%s\"\n", invalid);
+ pr_err("Unknown mount option \"%s\"\n", invalid);
goto cifs_parse_mount_err;
}
@@ -2455,7 +2456,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
slash = strchr(&vol->UNC[2], '\\');
len = slash - &vol->UNC[2];
if (!cifs_convert_address(dstaddr, &vol->UNC[2], len)) {
- pr_err("Unable to determine destination address.\n");
+ pr_err("Unable to determine destination address\n");
goto cifs_parse_mount_err;
}
}
@@ -2466,20 +2467,15 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (uid_specified)
vol->override_uid = override_uid;
else if (override_uid == 1)
- pr_notice("CIFS: ignoring forceuid mount option specified with no uid= option.\n");
+ pr_notice("ignoring forceuid mount option specified with no uid= option\n");
if (gid_specified)
vol->override_gid = override_gid;
else if (override_gid == 1)
- pr_notice("CIFS: ignoring forcegid mount option specified with no gid= option.\n");
+ pr_notice("ignoring forcegid mount option specified with no gid= option\n");
if (got_version == false)
- pr_warn_once("No dialect specified on mount. Default has changed"
- " to a more secure dialect, SMB2.1 or later (e.g. "
- "SMB3.1.1), from CIFS (SMB1). To use the less secure "
- "SMB1 dialect to access old servers which do not "
- "support SMB3.1.1 (or even SMB3 or SMB2.1) specify "
- "vers=1.0 on mount.\n");
+ pr_warn_once("No dialect specified on mount. Default has changed to a more secure dialect, SMB2.1 or later (e.g. SMB3.1.1), from CIFS (SMB1). To use the less secure SMB1 dialect to access old servers which do not support SMB3.1.1 (or even SMB3 or SMB2.1) specify vers=1.0 on mount.\n");
kfree(mountdata_copy);
return 0;
@@ -2496,8 +2492,8 @@ cifs_parse_mount_err:
* specified, or if srcaddr is specified and
* matches the IP address of the rhs argument.
*/
-static bool
-srcip_matches(struct sockaddr *srcaddr, struct sockaddr *rhs)
+bool
+cifs_match_ipaddr(struct sockaddr *srcaddr, struct sockaddr *rhs)
{
switch (srcaddr->sa_family) {
case AF_UNSPEC:
@@ -2588,7 +2584,7 @@ match_address(struct TCP_Server_Info *server, struct sockaddr *addr,
return false; /* don't expect to be here */
}
- if (!srcip_matches(srcaddr, (struct sockaddr *)&server->srcaddr))
+ if (!cifs_match_ipaddr(srcaddr, (struct sockaddr *)&server->srcaddr))
return false;
return true;
@@ -3197,8 +3193,8 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
strlen(ses->domainName),
GFP_KERNEL);
if (!vol->domainname) {
- cifs_dbg(FYI, "Unable to allocate %zd bytes for "
- "domain\n", len);
+ cifs_dbg(FYI, "Unable to allocate %zd bytes for domain\n",
+ len);
rc = -ENOMEM;
kfree(vol->username);
vol->username = NULL;
@@ -3363,6 +3359,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
return 0;
if (tcon->no_lease != volume_info->no_lease)
return 0;
+ if (tcon->nodelete != volume_info->nodelete)
+ return 0;
return 1;
}
@@ -3519,10 +3517,9 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
if (volume_info->linux_ext) {
if (ses->server->posix_ext_supported) {
tcon->posix_extensions = true;
- printk_once(KERN_WARNING
- "SMB3.11 POSIX Extensions are experimental\n");
+ pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
} else {
- cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions.\n");
+ cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
rc = -EOPNOTSUPP;
goto out_fail;
}
@@ -3580,6 +3577,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
cifs_dbg(VFS, "cache=singleclient requested on mount but NO_CACHING flag set on share\n");
}
+ if (volume_info->no_lease) {
+ if (ses->server->vals->protocol_id == 0) {
+ cifs_dbg(VFS,
+ "SMB2 or later required for nolease option\n");
+ rc = -EOPNOTSUPP;
+ goto out_fail;
+ } else
+ tcon->no_lease = volume_info->no_lease;
+ }
+
/*
* We can have only one retry value for a connection to a share so for
* resources mounted more than once to the same server share the last
@@ -3588,8 +3595,8 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
tcon->retry = volume_info->retry;
tcon->nocase = volume_info->nocase;
tcon->nohandlecache = volume_info->nohandlecache;
+ tcon->nodelete = volume_info->nodelete;
tcon->local_lease = volume_info->local_lease;
- tcon->no_lease = volume_info->no_lease;
INIT_LIST_HEAD(&tcon->pending_opens);
spin_lock(&cifs_tcp_ses_lock);
@@ -3929,14 +3936,8 @@ generic_ip_connect(struct TCP_Server_Info *server)
socket->sk->sk_rcvbuf = 140 * 1024;
}
- if (server->tcp_nodelay) {
- int val = 1;
- rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
- (char *)&val, sizeof(val));
- if (rc)
- cifs_dbg(FYI, "set TCP_NODELAY socket option error %d\n",
- rc);
- }
+ if (server->tcp_nodelay)
+ tcp_sock_set_nodelay(socket->sk);
cifs_dbg(FYI, "sndbuf %d rcvbuf %d rcvtimeo 0x%lx\n",
socket->sk->sk_sndbuf,
@@ -4742,8 +4743,7 @@ static int is_path_remote(struct cifs_sb_info *cifs_sb, struct smb_vol *vol,
rc = cifs_are_all_path_components_accessible(server, xid, tcon,
cifs_sb, full_path, tcon->Flags & SMB_SHARE_IS_IN_DFS);
if (rc != 0) {
- cifs_server_dbg(VFS, "cannot query dirs between root and final path, "
- "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
+ cifs_server_dbg(VFS, "cannot query dirs between root and final path, enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
rc = 0;
}
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index a67f88bf7ae1..df81c718d2fa 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -198,7 +198,7 @@ static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
if (c != '0')
return -EINVAL;
- cifs_dbg(FYI, "clearing dfs cache");
+ cifs_dbg(FYI, "clearing dfs cache\n");
down_write(&htable_rw_lock);
flush_cache_ents();
@@ -234,8 +234,8 @@ static inline void dump_tgts(const struct cache_entry *ce)
static inline void dump_ce(const struct cache_entry *ce)
{
- cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,"
- "interlink=%s,path_consumed=%d,expired=%s\n", ce->path,
+ cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,interlink=%s,path_consumed=%d,expired=%s\n",
+ ce->path,
ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
ce->etime.tv_nsec,
IS_INTERLINK_SET(ce->flags) ? "yes" : "no",
@@ -453,11 +453,11 @@ static void remove_oldest_entry(void)
}
if (!to_del) {
- cifs_dbg(FYI, "%s: no entry to remove", __func__);
+ cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
return;
}
- cifs_dbg(FYI, "%s: removing entry", __func__);
+ cifs_dbg(FYI, "%s: removing entry\n", __func__);
dump_ce(to_del);
flush_cache_ent(to_del);
}
@@ -696,8 +696,8 @@ static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses,
}
if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
- cifs_dbg(FYI, "%s: reached max cache size (%d)", __func__,
- CACHE_MAX_ENTRIES);
+ cifs_dbg(FYI, "%s: reached max cache size (%d)\n",
+ __func__, CACHE_MAX_ENTRIES);
down_write(&htable_rw_lock);
remove_oldest_entry();
up_write(&htable_rw_lock);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 75ddce8ef456..8277859d12a3 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -857,7 +857,7 @@ cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
tcon->need_reopen_files = false;
- cifs_dbg(FYI, "Reopen persistent handles");
+ cifs_dbg(FYI, "Reopen persistent handles\n");
INIT_LIST_HEAD(&tmp_list);
/* list all files open on tree connection, reopen resilient handles */
@@ -1853,7 +1853,7 @@ cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
unsigned int xid;
struct dentry *dentry = open_file->dentry;
struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
write_size, *offset, dentry);
@@ -2056,7 +2056,7 @@ find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
if (rc)
- cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
+ cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
return cfile;
}
@@ -2292,8 +2292,6 @@ wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
struct address_space *mapping, struct writeback_control *wbc)
{
int rc;
- struct TCP_Server_Info *server =
- tlink_tcon(wdata->cfile->tlink)->ses->server;
wdata->sync_mode = wbc->sync_mode;
wdata->nr_pages = nr_pages;
@@ -2305,14 +2303,15 @@ wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
wdata->pid = wdata->cfile->pid;
- rc = adjust_credits(server, &wdata->credits, wdata->bytes);
+ rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes);
if (rc)
return rc;
if (wdata->cfile->invalidHandle)
rc = -EAGAIN;
else
- rc = server->ops->async_writev(wdata, cifs_writedata_release);
+ rc = wdata->server->ops->async_writev(wdata,
+ cifs_writedata_release);
return rc;
}
@@ -2349,7 +2348,8 @@ static int cifs_writepages(struct address_space *mapping,
range_whole = true;
scanned = true;
}
- server = cifs_sb_master_tcon(cifs_sb)->ses->server;
+ server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
+
retry:
while (!done && index <= end) {
unsigned int i, nr_pages, found_pages, wsize;
@@ -2403,6 +2403,7 @@ retry:
wdata->credits = credits_on_stack;
wdata->cfile = cfile;
+ wdata->server = server;
cfile = NULL;
if (!wdata->cfile) {
@@ -2806,8 +2807,7 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
unsigned int wsize;
struct cifs_credits credits;
int rc;
- struct TCP_Server_Info *server =
- tlink_tcon(wdata->cfile->tlink)->ses->server;
+ struct TCP_Server_Info *server = wdata->server;
do {
if (wdata->cfile->invalidHandle) {
@@ -2893,7 +2893,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
else
pid = current->tgid;
- server = tlink_tcon(open_file->tlink)->ses->server;
+ server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
xid = get_xid();
do {
@@ -2923,11 +2923,9 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
from, &pagevec, cur_len, &start);
if (result < 0) {
cifs_dbg(VFS,
- "direct_writev couldn't get user pages "
- "(rc=%zd) iter type %d iov_offset %zd "
- "count %zd\n",
- result, iov_iter_type(from),
- from->iov_offset, from->count);
+ "direct_writev couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
+ result, iov_iter_type(from),
+ from->iov_offset, from->count);
dump_stack();
rc = result;
@@ -2999,6 +2997,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
wdata->nr_pages = nr_pages;
wdata->offset = (__u64)offset;
wdata->cfile = cifsFileInfo_get(open_file);
+ wdata->server = server;
wdata->pid = pid;
wdata->bytes = cur_len;
wdata->pagesz = PAGE_SIZE;
@@ -3540,8 +3539,10 @@ static int cifs_resend_rdata(struct cifs_readdata *rdata,
unsigned int rsize;
struct cifs_credits credits;
int rc;
- struct TCP_Server_Info *server =
- tlink_tcon(rdata->cfile->tlink)->ses->server;
+ struct TCP_Server_Info *server;
+
+ /* XXX: should we pick a new channel here? */
+ server = rdata->server;
do {
if (rdata->cfile->invalidHandle) {
@@ -3620,7 +3621,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
size_t start;
struct iov_iter direct_iov = ctx->iter;
- server = tlink_tcon(open_file->tlink)->ses->server;
+ server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
@@ -3654,12 +3655,10 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
cur_len, &start);
if (result < 0) {
cifs_dbg(VFS,
- "couldn't get user pages (rc=%zd)"
- " iter type %d"
- " iov_offset %zd count %zd\n",
- result, iov_iter_type(&direct_iov),
- direct_iov.iov_offset,
- direct_iov.count);
+ "Couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
+ result, iov_iter_type(&direct_iov),
+ direct_iov.iov_offset,
+ direct_iov.count);
dump_stack();
rc = result;
@@ -3706,6 +3705,7 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
rdata->tailsz = PAGE_SIZE;
}
+ rdata->server = server;
rdata->cfile = cifsFileInfo_get(open_file);
rdata->nr_pages = npages;
rdata->offset = offset;
@@ -4018,7 +4018,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
unsigned int xid;
char *cur_offset;
struct cifsFileInfo *open_file;
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
int buf_type = CIFS_NO_BUFFER;
__u32 pid;
@@ -4035,7 +4035,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
}
open_file = file->private_data;
tcon = tlink_tcon(open_file->tlink);
- server = tcon->ses->server;
+ server = cifs_pick_channel(tcon->ses);
if (!server->ops->sync_read) {
free_xid(xid);
@@ -4074,6 +4074,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
io_parms.tcon = tcon;
io_parms.offset = *offset;
io_parms.length = current_read_size;
+ io_parms.server = server;
rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
&bytes_read, &cur_offset,
&buf_type);
@@ -4162,7 +4163,7 @@ cifs_readv_complete(struct work_struct *work)
for (i = 0; i < rdata->nr_pages; i++) {
struct page *page = rdata->pages[i];
- lru_cache_add_file(page);
+ lru_cache_add(page);
if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes)) {
@@ -4232,7 +4233,7 @@ readpages_fill_pages(struct TCP_Server_Info *server,
* fill them until the writes are flushed.
*/
zero_user(page, 0, PAGE_SIZE);
- lru_cache_add_file(page);
+ lru_cache_add(page);
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
@@ -4242,7 +4243,7 @@ readpages_fill_pages(struct TCP_Server_Info *server,
continue;
} else {
/* no need to hold page hostage */
- lru_cache_add_file(page);
+ lru_cache_add(page);
unlock_page(page);
put_page(page);
rdata->pages[i] = NULL;
@@ -4376,7 +4377,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
pid = current->tgid;
rc = 0;
- server = tlink_tcon(open_file->tlink)->ses->server;
+ server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
__func__, file, mapping, num_pages);
@@ -4437,7 +4438,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
/* best to give up if we're out of mem */
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
list_del(&page->lru);
- lru_cache_add_file(page);
+ lru_cache_add(page);
unlock_page(page);
put_page(page);
}
@@ -4447,6 +4448,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
}
rdata->cfile = cifsFileInfo_get(open_file);
+ rdata->server = server;
rdata->mapping = mapping;
rdata->offset = offset;
rdata->bytes = bytes;
@@ -4475,7 +4477,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
add_credits_and_wake_if(server, &rdata->credits, 0);
for (i = 0; i < rdata->nr_pages; i++) {
page = rdata->pages[i];
- lru_cache_add_file(page);
+ lru_cache_add(page);
unlock_page(page);
put_page(page);
}
@@ -4828,7 +4830,7 @@ static int cifs_swap_activate(struct swap_info_struct *sis,
}
*span = sis->pages;
- printk_once(KERN_WARNING "Swap support over SMB3 is experimental\n");
+ pr_warn_once("Swap support over SMB3 is experimental\n");
/*
* TODO: consider adding ACL (or documenting how) to prevent other
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 5d2965a23730..5072bcaf4be1 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -25,6 +25,7 @@
#include <linux/freezer.h>
#include <linux/sched/signal.h>
#include <linux/wait_bit.h>
+#include <linux/fiemap.h>
#include <asm/div64.h>
#include "cifsfs.h"
@@ -447,7 +448,7 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
struct cifs_tcon *tcon;
struct cifs_fid fid;
struct cifs_open_parms oparms;
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
char buf[24];
unsigned int bytes_read;
char *pbuf;
@@ -1155,7 +1156,7 @@ struct inode *cifs_root_iget(struct super_block *sb)
/* some servers mistakenly claim POSIX support */
if (rc != -EOPNOTSUPP)
goto iget_no_retry;
- cifs_dbg(VFS, "server does not support POSIX extensions");
+ cifs_dbg(VFS, "server does not support POSIX extensions\n");
tcon->unix_ext = false;
}
@@ -1418,6 +1419,11 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
xid = get_xid();
+ if (tcon->nodelete) {
+ rc = -EACCES;
+ goto unlink_out;
+ }
+
/* Unlink can be called from rename so we can not take the
* sb->s_vfs_rename_mutex here */
full_path = build_path_from_dentry(dentry);
@@ -1746,6 +1752,12 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
goto rmdir_exit;
}
+ if (tcon->nodelete) {
+ rc = -EACCES;
+ cifs_put_tlink(tlink);
+ goto rmdir_exit;
+ }
+
rc = server->ops->rmdir(xid, tcon, full_path, cifs_sb);
cifs_put_tlink(tlink);
@@ -1999,7 +2011,7 @@ cifs_invalidate_mapping(struct inode *inode)
if (inode->i_mapping && inode->i_mapping->nrpages != 0) {
rc = invalidate_inode_pages2(inode->i_mapping);
if (rc)
- cifs_dbg(VFS, "%s: could not invalidate inode %p\n",
+ cifs_dbg(VFS, "%s: Could not invalidate inode %p\n",
__func__, inode);
}
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index a25ef35b023e..c381d2d03ef6 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -308,7 +308,7 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
int oplock = 0;
struct cifs_fid fid;
struct cifs_open_parms oparms;
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
int buf_type = CIFS_NO_BUFFER;
FILE_ALL_INFO file_info;
@@ -352,7 +352,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
int oplock = 0;
struct cifs_fid fid;
struct cifs_open_parms oparms;
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
@@ -389,7 +389,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
int rc;
struct cifs_fid fid;
struct cifs_open_parms oparms;
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
int buf_type = CIFS_NO_BUFFER;
__le16 *utf16_path;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
@@ -450,7 +450,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
int rc;
struct cifs_fid fid;
struct cifs_open_parms oparms;
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
__le16 *utf16_path;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct kvec iov[2];
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 550ce9020a3e..56791a692c8b 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -32,6 +32,9 @@
#include "cifs_unicode.h"
#include "smb2pdu.h"
#include "cifsfs.h"
+#ifdef CONFIG_CIFS_DFS_UPCALL
+#include "dns_resolve.h"
+#endif
extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
@@ -421,7 +424,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
if (data_offset >
len - sizeof(struct file_notify_information)) {
- cifs_dbg(FYI, "invalid data_offset %u\n",
+ cifs_dbg(FYI, "Invalid data_offset %u\n",
data_offset);
return true;
}
@@ -449,7 +452,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
large dirty files cached on the client */
if ((NT_STATUS_INVALID_HANDLE) ==
le32_to_cpu(pSMB->hdr.Status.CifsError)) {
- cifs_dbg(FYI, "invalid handle on oplock break\n");
+ cifs_dbg(FYI, "Invalid handle on oplock break\n");
return true;
} else if (ERRbadfid ==
le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
@@ -530,9 +533,9 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
cifs_sb->mnt_cifs_serverino_autodisabled = true;
- cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s.\n",
+ cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
tcon ? tcon->treeName : "new server");
- cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS).\n");
+ cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
}
@@ -874,7 +877,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
while (count && npages < max_pages) {
rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
if (rc < 0) {
- cifs_dbg(VFS, "couldn't get user pages (rc=%zd)\n", rc);
+ cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc);
break;
}
@@ -933,7 +936,7 @@ cifs_alloc_hash(const char *name,
*shash = crypto_alloc_shash(name, 0, 0);
if (IS_ERR(*shash)) {
- cifs_dbg(VFS, "could not allocate crypto %s\n", name);
+ cifs_dbg(VFS, "Could not allocate crypto %s\n", name);
rc = PTR_ERR(*shash);
*shash = NULL;
*sdesc = NULL;
@@ -1083,6 +1086,51 @@ void cifs_put_tcp_super(struct super_block *sb)
}
#ifdef CONFIG_CIFS_DFS_UPCALL
+int match_target_ip(struct TCP_Server_Info *server,
+ const char *share, size_t share_len,
+ bool *result)
+{
+ int rc;
+ char *target, *tip = NULL;
+ struct sockaddr tipaddr;
+
+ *result = false;
+
+ target = kzalloc(share_len + 3, GFP_KERNEL);
+ if (!target) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
+
+ cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
+
+ rc = dns_resolve_server_name_to_ip(target, &tip);
+ if (rc < 0)
+ goto out;
+
+ cifs_dbg(FYI, "%s: target ip: %s\n", __func__, tip);
+
+ if (!cifs_convert_address(&tipaddr, tip, strlen(tip))) {
+ cifs_dbg(VFS, "%s: failed to convert target ip address\n",
+ __func__);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr,
+ &tipaddr);
+ cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
+ rc = 0;
+
+out:
+ kfree(target);
+ kfree(tip);
+
+ return rc;
+}
+
static void tcon_super_cb(struct super_block *sb, void *arg)
{
struct super_cb_data *sd = arg;
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 9b41436fb8db..b7ca4960d4ca 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -957,15 +957,15 @@ struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
sec = 2 * st->TwoSeconds;
min = st->Minutes;
if ((sec > 59) || (min > 59))
- cifs_dbg(VFS, "illegal time min %d sec %lld\n", min, sec);
+ cifs_dbg(VFS, "Invalid time min %d sec %lld\n", min, sec);
sec += (min * 60);
sec += 60 * 60 * st->Hours;
if (st->Hours > 24)
- cifs_dbg(VFS, "illegal hours %d\n", st->Hours);
+ cifs_dbg(VFS, "Invalid hours %d\n", st->Hours);
day = sd->Day;
month = sd->Month;
if (day < 1 || day > 31 || month < 1 || month > 12) {
- cifs_dbg(VFS, "illegal date, month %d day: %d\n", month, day);
+ cifs_dbg(VFS, "Invalid date, month %d day: %d\n", month, day);
day = clamp(day, 1, 31);
month = clamp(month, 1, 12);
}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 50f776a8d4ba..6df0922e7e30 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -53,7 +53,7 @@ static void dump_cifs_file_struct(struct file *file, char *label)
return;
}
if (cf->invalidHandle)
- cifs_dbg(FYI, "invalid handle\n");
+ cifs_dbg(FYI, "Invalid handle\n");
if (cf->srch_inf.endOfSearch)
cifs_dbg(FYI, "end of search\n");
if (cf->srch_inf.emptyDir)
@@ -246,7 +246,7 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info,
*/
fattr->cf_mode = le32_to_cpu(info->Mode) & ~S_IFMT;
- cifs_dbg(FYI, "posix fattr: dev %d, reparse %d, mode %o",
+ cifs_dbg(FYI, "posix fattr: dev %d, reparse %d, mode %o\n",
le32_to_cpu(info->DeviceId),
le32_to_cpu(info->ReparseTag),
le32_to_cpu(info->Mode));
@@ -478,7 +478,7 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset);
if (old_entry + next_offset < old_entry) {
- cifs_dbg(VFS, "invalid offset %u\n", next_offset);
+ cifs_dbg(VFS, "Invalid offset %u\n", next_offset);
return NULL;
}
new_entry = old_entry + next_offset;
@@ -515,7 +515,7 @@ static void cifs_fill_dirent_posix(struct cifs_dirent *de,
/* payload should have already been checked at this point */
if (posix_info_parse(info, NULL, &parsed) < 0) {
- cifs_dbg(VFS, "invalid POSIX info payload");
+ cifs_dbg(VFS, "Invalid POSIX info payload\n");
return;
}
@@ -968,7 +968,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
} else if (current_entry != NULL) {
cifs_dbg(FYI, "entry %lld found\n", ctx->pos);
} else {
- cifs_dbg(FYI, "could not find entry\n");
+ cifs_dbg(FYI, "Could not find entry\n");
goto rddir2_exit;
}
cifs_dbg(FYI, "loop through %d times filling dir for net buf %p\n",
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 43a88e26d26b..5d05bd2822d2 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -122,7 +122,7 @@ int cifs_try_adding_channels(struct cifs_ses *ses)
tries++;
if (tries > 3*ses->chan_max) {
- cifs_dbg(FYI, "too many attempt at opening channels (%d channels left to open)\n",
+ cifs_dbg(FYI, "too many channel open attempts (%d channels left to open)\n",
left);
break;
}
@@ -150,6 +150,22 @@ int cifs_try_adding_channels(struct cifs_ses *ses)
return ses->chan_count - old_chan_count;
}
+/*
+ * If server is a channel of ses, return the corresponding enclosing
+ * cifs_chan otherwise return NULL.
+ */
+struct cifs_chan *
+cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server)
+{
+ int i;
+
+ for (i = 0; i < ses->chan_count; i++) {
+ if (ses->chans[i].server == server)
+ return &ses->chans[i];
+ }
+ return NULL;
+}
+
int
cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface)
{
@@ -162,12 +178,14 @@ cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface)
int rc;
unsigned int xid = get_xid();
- cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ",
- ses, iface->speed, iface->rdma_capable ? "yes" : "no");
if (iface->sockaddr.ss_family == AF_INET)
- cifs_dbg(FYI, "ip:%pI4)\n", &ipv4->sin_addr);
+ cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ip:%pI4)\n",
+ ses, iface->speed, iface->rdma_capable ? "yes" : "no",
+ &ipv4->sin_addr);
else
- cifs_dbg(FYI, "ip:%pI6)\n", &ipv6->sin6_addr);
+ cifs_dbg(FYI, "adding channel to ses %p (speed:%zu bps rdma:%s ip:%pI4)\n",
+ ses, iface->speed, iface->rdma_capable ? "yes" : "no",
+ &ipv6->sin6_addr);
/*
* Setup a smb_vol with mostly the same info as the existing
@@ -198,7 +216,7 @@ cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface)
vol.UNC = unc;
vol.prepath = "";
- /* Re-use same version as master connection */
+ /* Reuse same version as master connection */
vol.vals = ses->server->vals;
vol.ops = ses->server->ops;
@@ -229,7 +247,7 @@ cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface)
mutex_lock(&ses->session_mutex);
- chan = &ses->chans[ses->chan_count];
+ chan = ses->binding_chan = &ses->chans[ses->chan_count];
chan->server = cifs_get_tcp_session(&vol);
if (IS_ERR(chan->server)) {
rc = PTR_ERR(chan->server);
@@ -261,7 +279,7 @@ cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface)
goto out;
/* success, put it on the list
- * XXX: sharing ses between 2 tcp server is not possible, the
+ * XXX: sharing ses between 2 tcp servers is not possible, the
* way "internal" linked lists works in linux makes element
* only able to belong to one list
*
@@ -274,6 +292,7 @@ cifs_ses_add_channel(struct cifs_ses *ses, struct cifs_server_iface *iface)
atomic_set(&ses->chan_seq, 0);
out:
ses->binding = false;
+ ses->binding_chan = NULL;
mutex_unlock(&ses->session_mutex);
if (rc && chan->server)
@@ -569,15 +588,15 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
if (tioffset > blob_len || tioffset + tilen > blob_len) {
- cifs_dbg(VFS, "tioffset + tilen too high %u + %u",
- tioffset, tilen);
+ cifs_dbg(VFS, "tioffset + tilen too high %u + %u\n",
+ tioffset, tilen);
return -EINVAL;
}
if (tilen) {
ses->auth_key.response = kmemdup(bcc_ptr + tioffset, tilen,
GFP_KERNEL);
if (!ses->auth_key.response) {
- cifs_dbg(VFS, "Challenge target info alloc failure");
+ cifs_dbg(VFS, "Challenge target info alloc failure\n");
return -ENOMEM;
}
ses->auth_key.len = tilen;
@@ -970,7 +989,7 @@ sess_auth_lanman(struct sess_data *sess_data)
/* Calculate hash with password and copy into bcc_ptr.
* Encryption Key (stored as in cryptkey) gets used if the
- * security mode bit in Negottiate Protocol response states
+ * security mode bit in Negotiate Protocol response states
* to use challenge/response method (i.e. Password bit is 1).
*/
rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
@@ -1303,9 +1322,8 @@ sess_auth_kerberos(struct sess_data *sess_data)
* sending us a response in an expected form
*/
if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
- cifs_dbg(VFS,
- "incorrect version of cifs.upcall (expected %d but got %d)",
- CIFS_SPNEGO_UPCALL_VERSION, msg->version);
+ cifs_dbg(VFS, "incorrect version of cifs.upcall (expected %d but got %d)\n",
+ CIFS_SPNEGO_UPCALL_VERSION, msg->version);
rc = -EKEYREJECTED;
goto out_put_spnego_key;
}
@@ -1313,8 +1331,8 @@ sess_auth_kerberos(struct sess_data *sess_data)
ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
GFP_KERNEL);
if (!ses->auth_key.response) {
- cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory",
- msg->sesskey_len);
+ cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
+ msg->sesskey_len);
rc = -ENOMEM;
goto out_put_spnego_key;
}
@@ -1657,8 +1675,7 @@ static int select_sec(struct cifs_ses *ses, struct sess_data *sess_data)
type = cifs_select_sectype(ses->server, ses->sectype);
cifs_dbg(FYI, "sess setup type %d\n", type);
if (type == Unspecified) {
- cifs_dbg(VFS,
- "Unable to select appropriate authentication method!");
+ cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
return -EINVAL;
}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index b130efaf8feb..197ed455e657 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -247,7 +247,7 @@ check2ndT2(char *buf)
/* check for plausible wct, bcc and t2 data and parm sizes */
/* check for parm and data offset going beyond end of smb */
if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */
- cifs_dbg(FYI, "invalid transact2 word count\n");
+ cifs_dbg(FYI, "Invalid transact2 word count\n");
return -EINVAL;
}
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index a8c301ae00ed..0a116fc490a9 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -47,6 +47,18 @@ free_set_inf_compound(struct smb_rqst *rqst)
}
+struct cop_vars {
+ struct cifs_open_parms oparms;
+ struct kvec rsp_iov[3];
+ struct smb_rqst rqst[3];
+ struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+ struct kvec qi_iov[1];
+ struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
+ struct kvec close_iov[1];
+ struct smb2_file_rename_info rename_info;
+ struct smb2_file_link_info link_info;
+};
+
static int
smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path,
@@ -54,35 +66,36 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
__u32 create_options, umode_t mode, void *ptr, int command,
struct cifsFileInfo *cfile)
{
+ struct cop_vars *vars = NULL;
+ struct kvec *rsp_iov;
+ struct smb_rqst *rqst;
int rc;
__le16 *utf16_path = NULL;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
- struct cifs_open_parms oparms;
struct cifs_fid fid;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server;
int num_rqst = 0;
- struct smb_rqst rqst[3];
int resp_buftype[3];
- struct kvec rsp_iov[3];
- struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
- struct kvec qi_iov[1];
- struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
- struct kvec close_iov[1];
struct smb2_query_info_rsp *qi_rsp = NULL;
int flags = 0;
__u8 delete_pending[8] = {1, 0, 0, 0, 0, 0, 0, 0};
unsigned int size[2];
void *data[2];
- struct smb2_file_rename_info rename_info;
- struct smb2_file_link_info link_info;
int len;
+ vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
+ if (vars == NULL)
+ return -ENOMEM;
+ rqst = &vars->rqst[0];
+ rsp_iov = &vars->rsp_iov[0];
+
+ server = cifs_pick_channel(ses);
+
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- memset(rqst, 0, sizeof(rqst));
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
- memset(rsp_iov, 0, sizeof(rsp_iov));
/* We already have a handle so we can skip the open */
if (cfile)
@@ -95,19 +108,18 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
goto finished;
}
- memset(&oparms, 0, sizeof(struct cifs_open_parms));
- oparms.tcon = tcon;
- oparms.desired_access = desired_access;
- oparms.disposition = create_disposition;
- oparms.create_options = cifs_create_options(cifs_sb, create_options);
- oparms.fid = &fid;
- oparms.reconnect = false;
- oparms.mode = mode;
-
- memset(&open_iov, 0, sizeof(open_iov));
- rqst[num_rqst].rq_iov = open_iov;
+ vars->oparms.tcon = tcon;
+ vars->oparms.desired_access = desired_access;
+ vars->oparms.disposition = create_disposition;
+ vars->oparms.create_options = cifs_create_options(cifs_sb, create_options);
+ vars->oparms.fid = &fid;
+ vars->oparms.reconnect = false;
+ vars->oparms.mode = mode;
+
+ rqst[num_rqst].rq_iov = &vars->open_iov[0];
rqst[num_rqst].rq_nvec = SMB2_CREATE_IOV_SIZE;
- rc = SMB2_open_init(tcon, &rqst[num_rqst], &oplock, &oparms,
+ rc = SMB2_open_init(tcon, server,
+ &rqst[num_rqst], &oplock, &vars->oparms,
utf16_path);
kfree(utf16_path);
if (rc)
@@ -121,12 +133,12 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
/* Operation */
switch (command) {
case SMB2_OP_QUERY_INFO:
- memset(&qi_iov, 0, sizeof(qi_iov));
- rqst[num_rqst].rq_iov = qi_iov;
+ rqst[num_rqst].rq_iov = &vars->qi_iov[0];
rqst[num_rqst].rq_nvec = 1;
if (cfile)
- rc = SMB2_query_info_init(tcon, &rqst[num_rqst],
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst[num_rqst],
cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
FILE_ALL_INFORMATION,
@@ -134,10 +146,11 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
sizeof(struct smb2_file_all_info) +
PATH_MAX * 2, 0, NULL);
else {
- rc = SMB2_query_info_init(tcon, &rqst[num_rqst],
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst[num_rqst],
COMPOUND_FID,
COMPOUND_FID,
- FILE_ALL_INFORMATION,
+ FILE_ALL_INFORMATION,
SMB2_O_INFO_FILE, 0,
sizeof(struct smb2_file_all_info) +
PATH_MAX * 2, 0, NULL);
@@ -164,14 +177,14 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
trace_smb3_mkdir_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_RMDIR:
- memset(&si_iov, 0, sizeof(si_iov));
- rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_iov = &vars->si_iov[0];
rqst[num_rqst].rq_nvec = 1;
size[0] = 1; /* sizeof __u8 See MS-FSCC section 2.4.11 */
data[0] = &delete_pending[0];
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst], COMPOUND_FID,
COMPOUND_FID, current->tgid,
FILE_DISPOSITION_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
@@ -182,14 +195,14 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
trace_smb3_rmdir_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_SET_EOF:
- memset(&si_iov, 0, sizeof(si_iov));
- rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_iov = &vars->si_iov[0];
rqst[num_rqst].rq_nvec = 1;
size[0] = 8; /* sizeof __le64 */
data[0] = ptr;
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst], COMPOUND_FID,
COMPOUND_FID, current->tgid,
FILE_END_OF_FILE_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
@@ -200,8 +213,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_SET_INFO:
- memset(&si_iov, 0, sizeof(si_iov));
- rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_iov = &vars->si_iov[0];
rqst[num_rqst].rq_nvec = 1;
@@ -209,13 +221,15 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
data[0] = ptr;
if (cfile)
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst],
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst],
cfile->fid.persistent_fid,
cfile->fid.volatile_fid, current->tgid,
FILE_BASIC_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
else {
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst],
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst],
COMPOUND_FID,
COMPOUND_FID, current->tgid,
FILE_BASIC_INFORMATION,
@@ -233,30 +247,31 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
full_path);
break;
case SMB2_OP_RENAME:
- memset(&si_iov, 0, sizeof(si_iov));
- rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_iov = &vars->si_iov[0];
rqst[num_rqst].rq_nvec = 2;
len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
- rename_info.ReplaceIfExists = 1;
- rename_info.RootDirectory = 0;
- rename_info.FileNameLength = cpu_to_le32(len);
+ vars->rename_info.ReplaceIfExists = 1;
+ vars->rename_info.RootDirectory = 0;
+ vars->rename_info.FileNameLength = cpu_to_le32(len);
size[0] = sizeof(struct smb2_file_rename_info);
- data[0] = &rename_info;
+ data[0] = &vars->rename_info;
size[1] = len + 2 /* null */;
data[1] = (__le16 *)ptr;
if (cfile)
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst],
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst],
cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
current->tgid, FILE_RENAME_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
else {
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst],
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst],
COMPOUND_FID, COMPOUND_FID,
current->tgid, FILE_RENAME_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
@@ -271,23 +286,23 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
trace_smb3_rename_enter(xid, ses->Suid, tcon->tid, full_path);
break;
case SMB2_OP_HARDLINK:
- memset(&si_iov, 0, sizeof(si_iov));
- rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_iov = &vars->si_iov[0];
rqst[num_rqst].rq_nvec = 2;
len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
- link_info.ReplaceIfExists = 0;
- link_info.RootDirectory = 0;
- link_info.FileNameLength = cpu_to_le32(len);
+ vars->link_info.ReplaceIfExists = 0;
+ vars->link_info.RootDirectory = 0;
+ vars->link_info.FileNameLength = cpu_to_le32(len);
size[0] = sizeof(struct smb2_file_link_info);
- data[0] = &link_info;
+ data[0] = &vars->link_info;
size[1] = len + 2 /* null */;
data[1] = (__le16 *)ptr;
- rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[num_rqst], COMPOUND_FID,
COMPOUND_FID, current->tgid,
FILE_LINK_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
@@ -308,10 +323,10 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
if (cfile)
goto after_close;
/* Close */
- memset(&close_iov, 0, sizeof(close_iov));
- rqst[num_rqst].rq_iov = close_iov;
+ rqst[num_rqst].rq_iov = &vars->close_iov[0];
rqst[num_rqst].rq_nvec = 1;
- rc = SMB2_close_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ rc = SMB2_close_init(tcon, server,
+ &rqst[num_rqst], COMPOUND_FID,
COMPOUND_FID, false);
smb2_set_related(&rqst[num_rqst]);
if (rc)
@@ -322,11 +337,13 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
if (cfile) {
cifsFileInfo_put(cfile);
cfile = NULL;
- rc = compound_send_recv(xid, ses, flags, num_rqst - 2,
+ rc = compound_send_recv(xid, ses, server,
+ flags, num_rqst - 2,
&rqst[1], &resp_buftype[1],
&rsp_iov[1]);
} else
- rc = compound_send_recv(xid, ses, flags, num_rqst,
+ rc = compound_send_recv(xid, ses, server,
+ flags, num_rqst,
rqst, resp_buftype,
rsp_iov);
@@ -336,8 +353,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
SMB2_open_free(&rqst[0]);
if (rc == -EREMCHG) {
- printk_once(KERN_WARNING "server share %s deleted\n",
- tcon->treeName);
+ pr_warn_once("server share %s deleted\n", tcon->treeName);
tcon->need_reconnect = true;
}
@@ -420,6 +436,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+ kfree(vars);
return rc;
}
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 497afb0b9960..6a39451973f8 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -110,14 +110,14 @@ static __u32 get_neg_ctxt_len(struct smb2_sync_hdr *hdr, __u32 len,
/* Make sure that negotiate contexts start after gss security blob */
nc_offset = le32_to_cpu(pneg_rsp->NegotiateContextOffset);
if (nc_offset < non_ctxlen) {
- printk_once(KERN_WARNING "invalid negotiate context offset\n");
+ pr_warn_once("Invalid negotiate context offset\n");
return 0;
}
size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen;
/* Verify that at least minimal negotiate contexts fit within frame */
if (len < nc_offset + (neg_count * sizeof(struct smb2_neg_context))) {
- printk_once(KERN_WARNING "negotiate context goes beyond end\n");
+ pr_warn_once("negotiate context goes beyond end\n");
return 0;
}
@@ -190,14 +190,14 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
return 1;
if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
- cifs_dbg(VFS, "Illegal structure size %u\n",
+ cifs_dbg(VFS, "Invalid structure size %u\n",
le16_to_cpu(shdr->StructureSize));
return 1;
}
command = le16_to_cpu(shdr->Command);
if (command >= NUMBER_OF_SMB2_COMMANDS) {
- cifs_dbg(VFS, "Illegal SMB2 command %d\n", command);
+ cifs_dbg(VFS, "Invalid SMB2 command %d\n", command);
return 1;
}
@@ -205,7 +205,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 ||
pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2)) {
/* error packets have 9 byte structure size */
- cifs_dbg(VFS, "Illegal response size %u for command %d\n",
+ cifs_dbg(VFS, "Invalid response size %u for command %d\n",
le16_to_cpu(pdu->StructureSize2), command);
return 1;
} else if (command == SMB2_OPLOCK_BREAK_HE
@@ -213,7 +213,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
&& (le16_to_cpu(pdu->StructureSize2) != 44)
&& (le16_to_cpu(pdu->StructureSize2) != 36)) {
/* special case for SMB2.1 lease break message */
- cifs_dbg(VFS, "Illegal response size %d for oplock break\n",
+ cifs_dbg(VFS, "Invalid response size %d for oplock break\n",
le16_to_cpu(pdu->StructureSize2));
return 1;
}
@@ -864,14 +864,14 @@ ok:
d = server->secmech.sdescsha512;
rc = crypto_shash_init(&d->shash);
if (rc) {
- cifs_dbg(VFS, "%s: could not init sha512 shash\n", __func__);
+ cifs_dbg(VFS, "%s: Could not init sha512 shash\n", __func__);
return rc;
}
rc = crypto_shash_update(&d->shash, ses->preauth_sha_hash,
SMB2_PREAUTH_HASH_SIZE);
if (rc) {
- cifs_dbg(VFS, "%s: could not update sha512 shash\n", __func__);
+ cifs_dbg(VFS, "%s: Could not update sha512 shash\n", __func__);
return rc;
}
@@ -879,7 +879,7 @@ ok:
rc = crypto_shash_update(&d->shash,
iov[i].iov_base, iov[i].iov_len);
if (rc) {
- cifs_dbg(VFS, "%s: could not update sha512 shash\n",
+ cifs_dbg(VFS, "%s: Could not update sha512 shash\n",
__func__);
return rc;
}
@@ -887,7 +887,7 @@ ok:
rc = crypto_shash_final(&d->shash, ses->preauth_sha_hash);
if (rc) {
- cifs_dbg(VFS, "%s: could not finalize sha512 shash\n",
+ cifs_dbg(VFS, "%s: Could not finalize sha512 shash\n",
__func__);
return rc;
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index f829f4165d38..736d86b8a910 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -12,6 +12,7 @@
#include <linux/uuid.h>
#include <linux/sort.h>
#include <crypto/aead.h>
+#include <linux/fiemap.h>
#include "cifsfs.h"
#include "cifsglob.h"
#include "smb2pdu.h"
@@ -79,7 +80,7 @@ smb2_add_credits(struct TCP_Server_Info *server,
if (*val > 65000) {
*val = 65000; /* Don't get near 64K credits, avoid srv bugs */
- printk_once(KERN_WARNING "server overflowed SMB3 credits\n");
+ pr_warn_once("server overflowed SMB3 credits\n");
}
server->in_flight--;
if (server->in_flight == 0 && (optype & CIFS_OP_MASK) != CIFS_NEG_OP)
@@ -708,7 +709,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = pfid;
oparms.reconnect = false;
- rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, &utf16_path);
+ rc = SMB2_open_init(tcon, server,
+ &rqst[0], &oplock, &oparms, &utf16_path);
if (rc)
goto oshr_free;
smb2_set_next_command(tcon, &rqst[0]);
@@ -717,7 +719,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
rqst[1].rq_iov = qi_iov;
rqst[1].rq_nvec = 1;
- rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst[1], COMPOUND_FID,
COMPOUND_FID, FILE_ALL_INFORMATION,
SMB2_O_INFO_FILE, 0,
sizeof(struct smb2_file_all_info) +
@@ -727,7 +730,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
smb2_set_related(&rqst[1]);
- rc = compound_send_recv(xid, ses, flags, 2, rqst,
+ rc = compound_send_recv(xid, ses, server,
+ flags, 2, rqst,
resp_buftype, rsp_iov);
mutex_lock(&tcon->crfid.fid_mutex);
@@ -767,8 +771,8 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon,
if (rc) {
if (rc == -EREMCHG) {
tcon->need_reconnect = true;
- printk_once(KERN_WARNING "server share %s deleted\n",
- tcon->treeName);
+ pr_warn_once("server share %s deleted\n",
+ tcon->treeName);
}
goto oshr_exit;
}
@@ -1102,6 +1106,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb)
{
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
__le16 *utf16_path = NULL;
int ea_name_len = strlen(ea_name);
int flags = 0;
@@ -1190,7 +1195,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
+ rc = SMB2_open_init(tcon, server,
+ &rqst[0], &oplock, &oparms, utf16_path);
if (rc)
goto sea_exit;
smb2_set_next_command(tcon, &rqst[0]);
@@ -1216,7 +1222,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
size[0] = len;
data[0] = ea;
- rc = SMB2_set_info_init(tcon, &rqst[1], COMPOUND_FID,
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[1], COMPOUND_FID,
COMPOUND_FID, current->tgid,
FILE_FULL_EA_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
@@ -1228,10 +1235,12 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
memset(&close_iov, 0, sizeof(close_iov));
rqst[2].rq_iov = close_iov;
rqst[2].rq_nvec = 1;
- rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+ rc = SMB2_close_init(tcon, server,
+ &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
smb2_set_related(&rqst[2]);
- rc = compound_send_recv(xid, ses, flags, 3, rqst,
+ rc = compound_send_recv(xid, ses, server,
+ flags, 3, rqst,
resp_buftype, rsp_iov);
/* no need to bump num_remote_opens because handle immediately closed */
@@ -1452,6 +1461,16 @@ req_res_key_exit:
return rc;
}
+struct iqi_vars {
+ struct smb_rqst rqst[3];
+ struct kvec rsp_iov[3];
+ struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+ struct kvec qi_iov[1];
+ struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
+ struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
+ struct kvec close_iov[1];
+};
+
static int
smb2_ioctl_query_info(const unsigned int xid,
struct cifs_tcon *tcon,
@@ -1459,7 +1478,11 @@ smb2_ioctl_query_info(const unsigned int xid,
__le16 *path, int is_dir,
unsigned long p)
{
+ struct iqi_vars *vars;
+ struct smb_rqst *rqst;
+ struct kvec *rsp_iov;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
char __user *arg = (char __user *)p;
struct smb_query_info qi;
struct smb_query_info __user *pqi;
@@ -1468,45 +1491,47 @@ smb2_ioctl_query_info(const unsigned int xid,
struct smb2_query_info_rsp *qi_rsp = NULL;
struct smb2_ioctl_rsp *io_rsp = NULL;
void *buffer = NULL;
- struct smb_rqst rqst[3];
int resp_buftype[3];
- struct kvec rsp_iov[3];
- struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
struct cifs_open_parms oparms;
u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct cifs_fid fid;
- struct kvec qi_iov[1];
- struct kvec io_iov[SMB2_IOCTL_IOV_SIZE];
- struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
- struct kvec close_iov[1];
unsigned int size[2];
void *data[2];
int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
- memset(rqst, 0, sizeof(rqst));
+ vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
+ if (vars == NULL)
+ return -ENOMEM;
+ rqst = &vars->rqst[0];
+ rsp_iov = &vars->rsp_iov[0];
+
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
- memset(rsp_iov, 0, sizeof(rsp_iov));
if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
- return -EFAULT;
+ goto e_fault;
- if (qi.output_buffer_length > 1024)
+ if (qi.output_buffer_length > 1024) {
+ kfree(vars);
return -EINVAL;
+ }
- if (!ses || !(ses->server))
+ if (!ses || !server) {
+ kfree(vars);
return -EIO;
+ }
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
buffer = memdup_user(arg + sizeof(struct smb_query_info),
qi.output_buffer_length);
- if (IS_ERR(buffer))
+ if (IS_ERR(buffer)) {
+ kfree(vars);
return PTR_ERR(buffer);
+ }
/* Open */
- memset(&open_iov, 0, sizeof(open_iov));
- rqst[0].rq_iov = open_iov;
+ rqst[0].rq_iov = &vars->open_iov[0];
rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
memset(&oparms, 0, sizeof(oparms));
@@ -1537,7 +1562,8 @@ smb2_ioctl_query_info(const unsigned int xid,
oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
}
- rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
+ rc = SMB2_open_init(tcon, server,
+ &rqst[0], &oplock, &oparms, path);
if (rc)
goto iqinf_exit;
smb2_set_next_command(tcon, &rqst[0]);
@@ -1548,11 +1574,11 @@ smb2_ioctl_query_info(const unsigned int xid,
if (!capable(CAP_SYS_ADMIN))
rc = -EPERM;
else {
- memset(&io_iov, 0, sizeof(io_iov));
- rqst[1].rq_iov = io_iov;
+ rqst[1].rq_iov = &vars->io_iov[0];
rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
- rc = SMB2_ioctl_init(tcon, &rqst[1],
+ rc = SMB2_ioctl_init(tcon, server,
+ &rqst[1],
COMPOUND_FID, COMPOUND_FID,
qi.info_type, true, buffer,
qi.output_buffer_length,
@@ -1565,31 +1591,32 @@ smb2_ioctl_query_info(const unsigned int xid,
if (!capable(CAP_SYS_ADMIN))
rc = -EPERM;
else {
- memset(&si_iov, 0, sizeof(si_iov));
- rqst[1].rq_iov = si_iov;
+ rqst[1].rq_iov = &vars->si_iov[0];
rqst[1].rq_nvec = 1;
size[0] = 8;
data[0] = buffer;
- rc = SMB2_set_info_init(tcon, &rqst[1],
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst[1],
COMPOUND_FID, COMPOUND_FID,
current->tgid,
FILE_END_OF_FILE_INFORMATION,
SMB2_O_INFO_FILE, 0, data, size);
}
} else if (qi.flags == PASSTHRU_QUERY_INFO) {
- memset(&qi_iov, 0, sizeof(qi_iov));
- rqst[1].rq_iov = qi_iov;
+ rqst[1].rq_iov = &vars->qi_iov[0];
rqst[1].rq_nvec = 1;
- rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID,
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst[1], COMPOUND_FID,
COMPOUND_FID, qi.file_info_class,
qi.info_type, qi.additional_information,
qi.input_buffer_length,
qi.output_buffer_length, buffer);
} else { /* unknown flags */
- cifs_tcon_dbg(VFS, "invalid passthru query flags: 0x%x\n", qi.flags);
+ cifs_tcon_dbg(VFS, "Invalid passthru query flags: 0x%x\n",
+ qi.flags);
rc = -EINVAL;
}
@@ -1599,16 +1626,17 @@ smb2_ioctl_query_info(const unsigned int xid,
smb2_set_related(&rqst[1]);
/* Close */
- memset(&close_iov, 0, sizeof(close_iov));
- rqst[2].rq_iov = close_iov;
+ rqst[2].rq_iov = &vars->close_iov[0];
rqst[2].rq_nvec = 1;
- rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+ rc = SMB2_close_init(tcon, server,
+ &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
if (rc)
goto iqinf_exit;
smb2_set_related(&rqst[2]);
- rc = compound_send_recv(xid, ses, flags, 3, rqst,
+ rc = compound_send_recv(xid, ses, server,
+ flags, 3, rqst,
resp_buftype, rsp_iov);
if (rc)
goto iqinf_exit;
@@ -1649,6 +1677,7 @@ smb2_ioctl_query_info(const unsigned int xid,
}
iqinf_exit:
+ kfree(vars);
kfree(buffer);
SMB2_open_free(&rqst[0]);
if (qi.flags & PASSTHRU_FSCTL)
@@ -1719,7 +1748,7 @@ smb2_copychunk_range(const unsigned int xid,
if (rc == 0) {
if (ret_data_len !=
sizeof(struct copychunk_ioctl_rsp)) {
- cifs_tcon_dbg(VFS, "invalid cchunk response size\n");
+ cifs_tcon_dbg(VFS, "Invalid cchunk response size\n");
rc = -EIO;
goto cchunk_out;
}
@@ -1733,12 +1762,12 @@ smb2_copychunk_range(const unsigned int xid,
*/
if (le32_to_cpu(retbuf->TotalBytesWritten) >
le32_to_cpu(pcchunk->Length)) {
- cifs_tcon_dbg(VFS, "invalid copy chunk response\n");
+ cifs_tcon_dbg(VFS, "Invalid copy chunk response\n");
rc = -EIO;
goto cchunk_out;
}
if (le32_to_cpu(retbuf->ChunksWritten) != 1) {
- cifs_tcon_dbg(VFS, "invalid num chunks written\n");
+ cifs_tcon_dbg(VFS, "Invalid num chunks written\n");
rc = -EIO;
goto cchunk_out;
}
@@ -2159,6 +2188,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_open_parms oparms;
struct smb2_query_directory_rsp *qd_rsp = NULL;
struct smb2_create_rsp *op_rsp = NULL;
+ struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
if (!utf16_path)
@@ -2183,7 +2213,8 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = fid;
oparms.reconnect = false;
- rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
+ rc = SMB2_open_init(tcon, server,
+ &rqst[0], &oplock, &oparms, utf16_path);
if (rc)
goto qdf_free;
smb2_set_next_command(tcon, &rqst[0]);
@@ -2196,7 +2227,8 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
rqst[1].rq_iov = qd_iov;
rqst[1].rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
- rc = SMB2_query_directory_init(xid, tcon, &rqst[1],
+ rc = SMB2_query_directory_init(xid, tcon, server,
+ &rqst[1],
COMPOUND_FID, COMPOUND_FID,
0, srch_inf->info_level);
if (rc)
@@ -2204,7 +2236,8 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
smb2_set_related(&rqst[1]);
- rc = compound_send_recv(xid, tcon->ses, flags, 2, rqst,
+ rc = compound_send_recv(xid, tcon->ses, server,
+ flags, 2, rqst,
resp_buftype, rsp_iov);
/* If the open failed there is nothing to do */
@@ -2409,6 +2442,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb)
{
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
int flags = 0;
struct smb_rqst rqst[3];
int resp_buftype[3];
@@ -2439,7 +2473,8 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
+ rc = SMB2_open_init(tcon, server,
+ &rqst[0], &oplock, &oparms, utf16_path);
if (rc)
goto qic_exit;
smb2_set_next_command(tcon, &rqst[0]);
@@ -2448,7 +2483,8 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
rqst[1].rq_iov = qi_iov;
rqst[1].rq_nvec = 1;
- rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst[1], COMPOUND_FID, COMPOUND_FID,
class, type, 0,
output_len, 0,
NULL);
@@ -2461,19 +2497,21 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
rqst[2].rq_iov = close_iov;
rqst[2].rq_nvec = 1;
- rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+ rc = SMB2_close_init(tcon, server,
+ &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
if (rc)
goto qic_exit;
smb2_set_related(&rqst[2]);
- rc = compound_send_recv(xid, ses, flags, 3, rqst,
+ rc = compound_send_recv(xid, ses, server,
+ flags, 3, rqst,
resp_buftype, rsp_iov);
if (rc) {
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
if (rc == -EREMCHG) {
tcon->need_reconnect = true;
- printk_once(KERN_WARNING "server share %s deleted\n",
- tcon->treeName);
+ pr_warn_once("server share %s deleted\n",
+ tcon->treeName);
}
goto qic_exit;
}
@@ -2753,15 +2791,15 @@ parse_reparse_point(struct reparse_data_buffer *buf,
struct cifs_sb_info *cifs_sb)
{
if (plen < sizeof(struct reparse_data_buffer)) {
- cifs_dbg(VFS, "reparse buffer is too small. Must be "
- "at least 8 bytes but was %d\n", plen);
+ cifs_dbg(VFS, "reparse buffer is too small. Must be at least 8 bytes but was %d\n",
+ plen);
return -EIO;
}
if (plen < le16_to_cpu(buf->ReparseDataLength) +
sizeof(struct reparse_data_buffer)) {
- cifs_dbg(VFS, "srv returned invalid reparse buf "
- "length: %d\n", plen);
+ cifs_dbg(VFS, "srv returned invalid reparse buf length: %d\n",
+ plen);
return -EIO;
}
@@ -2776,8 +2814,8 @@ parse_reparse_point(struct reparse_data_buffer *buf,
(struct reparse_symlink_data_buffer *)buf,
plen, target_path, cifs_sb);
default:
- cifs_dbg(VFS, "srv returned unknown symlink buffer "
- "tag:0x%08x\n", le32_to_cpu(buf->ReparseTag));
+ cifs_dbg(VFS, "srv returned unknown symlink buffer tag:0x%08x\n",
+ le32_to_cpu(buf->ReparseTag));
return -EOPNOTSUPP;
}
}
@@ -2798,6 +2836,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec err_iov = {NULL, 0};
struct smb2_err_rsp *err_buf = NULL;
struct smb2_symlink_err_rsp *symlink;
+ struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
unsigned int sub_len;
unsigned int sub_offset;
unsigned int print_len;
@@ -2843,7 +2882,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
oparms.fid = &fid;
oparms.reconnect = false;
- rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, utf16_path);
+ rc = SMB2_open_init(tcon, server,
+ &rqst[0], &oplock, &oparms, utf16_path);
if (rc)
goto querty_exit;
smb2_set_next_command(tcon, &rqst[0]);
@@ -2854,7 +2894,8 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
rqst[1].rq_iov = io_iov;
rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
- rc = SMB2_ioctl_init(tcon, &rqst[1], fid.persistent_fid,
+ rc = SMB2_ioctl_init(tcon, server,
+ &rqst[1], fid.persistent_fid,
fid.volatile_fid, FSCTL_GET_REPARSE_POINT,
true /* is_fctl */, NULL, 0,
CIFSMaxBufSize -
@@ -2872,13 +2913,15 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
rqst[2].rq_iov = close_iov;
rqst[2].rq_nvec = 1;
- rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
+ rc = SMB2_close_init(tcon, server,
+ &rqst[2], COMPOUND_FID, COMPOUND_FID, false);
if (rc)
goto querty_exit;
smb2_set_related(&rqst[2]);
- rc = compound_send_recv(xid, tcon->ses, flags, 3, rqst,
+ rc = compound_send_recv(xid, tcon->ses, server,
+ flags, 3, rqst,
resp_buftype, rsp_iov);
create_rsp = rsp_iov[0].iov_base;
@@ -3407,8 +3450,9 @@ static int smb3_fiemap(struct cifs_tcon *tcon,
int i, num, rc, flags, last_blob;
u64 next;
- if (fiemap_check_flags(fei, FIEMAP_FLAG_SYNC))
- return -EBADR;
+ rc = fiemap_prep(d_inode(cfile->dentry), fei, start, &len, 0);
+ if (rc)
+ return rc;
xid = get_xid();
again:
@@ -4571,7 +4615,7 @@ smb2_make_node(unsigned int xid, struct inode *inode,
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
int rc = -EPERM;
FILE_ALL_INFO *buf = NULL;
- struct cifs_io_parms io_parms;
+ struct cifs_io_parms io_parms = {0};
__u32 oplock = 0;
struct cifs_fid fid;
struct cifs_open_parms oparms;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index b30aa3cdd845..ded96b529a4d 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -85,7 +85,7 @@ static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
int smb3_encryption_required(const struct cifs_tcon *tcon)
{
- if (!tcon)
+ if (!tcon || !tcon->ses)
return 0;
if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
(tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
@@ -98,14 +98,13 @@ int smb3_encryption_required(const struct cifs_tcon *tcon)
static void
smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd,
- const struct cifs_tcon *tcon)
+ const struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server)
{
shdr->ProtocolId = SMB2_PROTO_NUMBER;
shdr->StructureSize = cpu_to_le16(64);
shdr->Command = smb2_cmd;
- if (tcon && tcon->ses && tcon->ses->server) {
- struct TCP_Server_Info *server = tcon->ses->server;
-
+ if (server) {
spin_lock(&server->req_lock);
/* Request up to 10 credits but don't go over the limit. */
if (server->credits >= server->max_credits)
@@ -125,8 +124,7 @@ smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd,
/* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
/* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
- if ((tcon->ses) && (tcon->ses->server) &&
- (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+ if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
shdr->CreditCharge = cpu_to_le16(1);
/* else CreditCharge MBZ */
@@ -148,8 +146,7 @@ smb2_hdr_assemble(struct smb2_sync_hdr *shdr, __le16 smb2_cmd,
/* if (tcon->share_flags & SHI1005_FLAGS_DFS)
shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
- if (tcon->ses && tcon->ses->server && tcon->ses->server->sign &&
- !smb3_encryption_required(tcon))
+ if (server && server->sign && !smb3_encryption_required(tcon))
shdr->Flags |= SMB2_FLAGS_SIGNED;
out:
return;
@@ -160,6 +157,7 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
struct cifs_tcon *tcon)
{
int rc;
+ struct TCP_Server_Info *server = tcon->ses->server;
struct dfs_cache_tgt_list tl;
struct dfs_cache_tgt_iterator *it = NULL;
char *tree;
@@ -172,15 +170,15 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
if (!tree)
return -ENOMEM;
- if (tcon->ipc) {
- scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
- tcon->ses->server->hostname);
- rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
- goto out;
- }
-
if (!tcon->dfs_path) {
- rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc);
+ if (tcon->ipc) {
+ scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$",
+ server->hostname);
+ rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
+ } else {
+ rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon,
+ nlsc);
+ }
goto out;
}
@@ -188,13 +186,13 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
if (rc)
goto out;
- extract_unc_hostname(tcon->ses->server->hostname, &tcp_host,
- &tcp_host_len);
+ extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len);
for (it = dfs_cache_get_tgt_iterator(&tl); it;
it = dfs_cache_get_next_tgt(&tl, it)) {
const char *share, *prefix;
size_t share_len, prefix_len;
+ bool target_match;
rc = dfs_cache_get_tgt_share(it, &share, &share_len, &prefix,
&prefix_len);
@@ -208,19 +206,38 @@ static int __smb2_reconnect(const struct nls_table *nlsc,
if (dfs_host_len != tcp_host_len
|| strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
- cifs_dbg(FYI, "%s: skipping %.*s, doesn't match %.*s",
+ cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n",
__func__,
(int)dfs_host_len, dfs_host,
(int)tcp_host_len, tcp_host);
- continue;
- }
- scnprintf(tree, MAX_TREE_SIZE, "\\%.*s", (int)share_len, share);
+ rc = match_target_ip(server, dfs_host, dfs_host_len,
+ &target_match);
+ if (rc) {
+ cifs_dbg(VFS, "%s: failed to match target ip: %d\n",
+ __func__, rc);
+ break;
+ }
- rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
- if (!rc) {
- rc = update_super_prepath(tcon, prefix, prefix_len);
- break;
+ if (!target_match) {
+ cifs_dbg(FYI, "%s: skipping target\n", __func__);
+ continue;
+ }
+ }
+
+ if (tcon->ipc) {
+ scnprintf(tree, MAX_TREE_SIZE, "\\\\%.*s\\IPC$",
+ (int)share_len, share);
+ rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
+ } else {
+ scnprintf(tree, MAX_TREE_SIZE, "\\%.*s", (int)share_len,
+ share);
+ rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc);
+ if (!rc) {
+ rc = update_super_prepath(tcon, prefix,
+ prefix_len);
+ break;
+ }
}
if (rc == -EREMOTE)
break;
@@ -247,12 +264,12 @@ static inline int __smb2_reconnect(const struct nls_table *nlsc,
#endif
static int
-smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
+smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server)
{
int rc;
struct nls_table *nls_codepage;
struct cifs_ses *ses;
- struct TCP_Server_Info *server;
int retries;
/*
@@ -281,12 +298,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
}
}
if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
- (!tcon->ses->server))
+ (!tcon->ses->server) || !server)
return -EIO;
ses = tcon->ses;
- server = ses->server;
-
retries = server->nr_targets;
/*
@@ -314,8 +329,8 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
(server->tcpStatus != CifsNeedReconnect),
10 * HZ);
if (rc < 0) {
- cifs_dbg(FYI, "%s: aborting reconnect due to a received"
- " signal by the process\n", __func__);
+ cifs_dbg(FYI, "%s: aborting reconnect due to a received signal by the process\n",
+ __func__);
return -ERESTARTSYS;
}
@@ -360,15 +375,31 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
goto out;
}
+ /*
+ * If we are reconnecting an extra channel, bind
+ */
+ if (server->is_channel) {
+ ses->binding = true;
+ ses->binding_chan = cifs_ses_find_chan(ses, server);
+ }
+
rc = cifs_negotiate_protocol(0, tcon->ses);
if (!rc && tcon->ses->need_reconnect) {
rc = cifs_setup_session(0, tcon->ses, nls_codepage);
if ((rc == -EACCES) && !tcon->retry) {
rc = -EHOSTDOWN;
+ ses->binding = false;
+ ses->binding_chan = NULL;
mutex_unlock(&tcon->ses->session_mutex);
goto failed;
}
}
+ /*
+ * End of channel binding
+ */
+ ses->binding = false;
+ ses->binding_chan = NULL;
+
if (rc || !tcon->need_reconnect) {
mutex_unlock(&tcon->ses->session_mutex);
goto out;
@@ -384,7 +415,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
if (rc) {
/* If sess reconnected but tcon didn't, something strange ... */
- printk_once(KERN_WARNING "reconnect tcon failed rc = %d\n", rc);
+ pr_warn_once("reconnect tcon failed rc = %d\n", rc);
goto out;
}
@@ -419,7 +450,9 @@ failed:
}
static void
-fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf,
+fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ void *buf,
unsigned int *total_len)
{
struct smb2_sync_pdu *spdu = (struct smb2_sync_pdu *)buf;
@@ -432,7 +465,7 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf,
*/
memset(buf, 0, 256);
- smb2_hdr_assemble(&spdu->sync_hdr, smb2_command, tcon);
+ smb2_hdr_assemble(&spdu->sync_hdr, smb2_command, tcon, server);
spdu->StructureSize2 = cpu_to_le16(parmsize);
*total_len = parmsize + sizeof(struct smb2_sync_hdr);
@@ -444,7 +477,8 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf,
* function must have filled in request_buf pointer.
*/
static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
- void **request_buf, unsigned int *total_len)
+ struct TCP_Server_Info *server,
+ void **request_buf, unsigned int *total_len)
{
/* BB eventually switch this to SMB2 specific small buf size */
if (smb2_command == SMB2_SET_INFO)
@@ -456,7 +490,7 @@ static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
return -ENOMEM;
}
- fill_small_buf(smb2_command, tcon,
+ fill_small_buf(smb2_command, tcon, server,
(struct smb2_sync_hdr *)(*request_buf),
total_len);
@@ -470,27 +504,30 @@ static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
}
static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
void **request_buf, unsigned int *total_len)
{
int rc;
- rc = smb2_reconnect(smb2_command, tcon);
+ rc = smb2_reconnect(smb2_command, tcon, server);
if (rc)
return rc;
- return __smb2_plain_req_init(smb2_command, tcon, request_buf,
+ return __smb2_plain_req_init(smb2_command, tcon, server, request_buf,
total_len);
}
static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
void **request_buf, unsigned int *total_len)
{
/* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
- return __smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf,
- total_len);
+ return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
+ request_buf, total_len);
}
- return smb2_plain_req_init(SMB2_IOCTL, tcon, request_buf, total_len);
+ return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
+ request_buf, total_len);
}
/* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
@@ -626,13 +663,13 @@ static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
/* If invalid preauth context warn but use what we requested, SHA-512 */
if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
- printk_once(KERN_WARNING "server sent bad preauth context\n");
+ pr_warn_once("server sent bad preauth context\n");
return;
}
if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
- printk_once(KERN_WARNING "illegal SMB3 hash algorithm count\n");
+ pr_warn_once("Invalid SMB3 hash algorithm count\n");
if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
- printk_once(KERN_WARNING "unknown SMB3 hash algorithm\n");
+ pr_warn_once("unknown SMB3 hash algorithm\n");
}
static void decode_compress_ctx(struct TCP_Server_Info *server,
@@ -642,15 +679,15 @@ static void decode_compress_ctx(struct TCP_Server_Info *server,
/* sizeof compress context is a one element compression capbility struct */
if (len < 10) {
- printk_once(KERN_WARNING "server sent bad compression cntxt\n");
+ pr_warn_once("server sent bad compression cntxt\n");
return;
}
if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) {
- printk_once(KERN_WARNING "illegal SMB3 compress algorithm count\n");
+ pr_warn_once("Invalid SMB3 compress algorithm count\n");
return;
}
if (le16_to_cpu(ctxt->CompressionAlgorithms[0]) > 3) {
- printk_once(KERN_WARNING "unknown compression algorithm\n");
+ pr_warn_once("unknown compression algorithm\n");
return;
}
server->compress_algorithm = ctxt->CompressionAlgorithms[0];
@@ -663,18 +700,18 @@ static int decode_encrypt_ctx(struct TCP_Server_Info *server,
cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
- printk_once(KERN_WARNING "server sent bad crypto ctxt len\n");
+ pr_warn_once("server sent bad crypto ctxt len\n");
return -EINVAL;
}
if (le16_to_cpu(ctxt->CipherCount) != 1) {
- printk_once(KERN_WARNING "illegal SMB3.11 cipher count\n");
+ pr_warn_once("Invalid SMB3.11 cipher count\n");
return -EINVAL;
}
cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0]));
if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) &&
(ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM)) {
- printk_once(KERN_WARNING "invalid SMB3.11 cipher returned\n");
+ pr_warn_once("Invalid SMB3.11 cipher returned\n");
return -EINVAL;
}
server->cipher_type = ctxt->Ciphers[0];
@@ -774,7 +811,7 @@ create_posix_buf(umode_t mode)
buf->Name[14] = 0xCD;
buf->Name[15] = 0x7C;
buf->Mode = cpu_to_le32(mode);
- cifs_dbg(FYI, "mode on posix create 0%o", mode);
+ cifs_dbg(FYI, "mode on posix create 0%o\n", mode);
return buf;
}
@@ -786,7 +823,7 @@ add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
iov[num].iov_base = create_posix_buf(mode);
if (mode == ACL_NO_MODE)
- cifs_dbg(FYI, "illegal mode\n");
+ cifs_dbg(FYI, "Invalid mode\n");
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = sizeof(struct create_posix);
@@ -838,7 +875,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
return -EIO;
}
- rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -896,7 +934,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
/*
@@ -904,9 +943,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
* cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
*/
if (rc == -EOPNOTSUPP) {
- cifs_server_dbg(VFS, "Dialect not supported by server. Consider "
- "specifying vers=1.0 or vers=2.0 on mount for accessing"
- " older servers\n");
+ cifs_server_dbg(VFS, "Dialect not supported by server. Consider specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n");
goto neg_exit;
} else if (rc != 0)
goto neg_exit;
@@ -939,8 +976,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
} else if (le16_to_cpu(rsp->DialectRevision) !=
server->vals->protocol_id) {
/* if requested single dialect ensure returned dialect matched */
- cifs_server_dbg(VFS, "Illegal 0x%x dialect returned: not requested\n",
- le16_to_cpu(rsp->DialectRevision));
+ cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
+ le16_to_cpu(rsp->DialectRevision));
return -EIO;
}
@@ -957,8 +994,8 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
else {
- cifs_server_dbg(VFS, "Illegal dialect returned by server 0x%x\n",
- le16_to_cpu(rsp->DialectRevision));
+ cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
+ le16_to_cpu(rsp->DialectRevision));
rc = -EIO;
goto neg_exit;
}
@@ -1116,15 +1153,16 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
rc = 0;
goto out_free_inbuf;
} else if (rc != 0) {
- cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
+ cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n",
+ rc);
rc = -EIO;
goto out_free_inbuf;
}
rc = -EIO;
if (rsplen != sizeof(*pneg_rsp)) {
- cifs_tcon_dbg(VFS, "invalid protocol negotiate response size: %d\n",
- rsplen);
+ cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n",
+ rsplen);
/* relax check since Mac returns max bufsize allowed on ioctl */
if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp))
@@ -1208,8 +1246,9 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
struct TCP_Server_Info *server = cifs_ses_server(ses);
unsigned int total_len;
- rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server,
+ (void **) &req,
+ &total_len);
if (rc)
return rc;
@@ -1286,6 +1325,7 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
/* BB add code to build os and lm fields */
rc = cifs_send_recv(sess_data->xid, sess_data->ses,
+ cifs_ses_server(sess_data->ses),
&rqst,
&sess_data->buf0_type,
CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
@@ -1357,9 +1397,8 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
* sending us a response in an expected form
*/
if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
- cifs_dbg(VFS,
- "bad cifs.upcall version. Expected %d got %d",
- CIFS_SPNEGO_UPCALL_VERSION, msg->version);
+ cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n",
+ CIFS_SPNEGO_UPCALL_VERSION, msg->version);
rc = -EKEYREJECTED;
goto out_put_spnego_key;
}
@@ -1369,8 +1408,7 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
GFP_KERNEL);
if (!ses->auth_key.response) {
- cifs_dbg(VFS,
- "Kerberos can't allocate (%u bytes) memory",
+ cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
msg->sesskey_len);
rc = -ENOMEM;
goto out_put_spnego_key;
@@ -1584,8 +1622,7 @@ SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
type = smb2_select_sectype(cifs_ses_server(ses), ses->sectype);
cifs_dbg(FYI, "sess setup type %d\n", type);
if (type == Unspecified) {
- cifs_dbg(VFS,
- "Unable to select appropriate authentication method!");
+ cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
return -EINVAL;
}
@@ -1673,7 +1710,8 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
if (ses->need_reconnect)
goto smb2_session_already_dead;
- rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -1694,7 +1732,8 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, ses->server,
+ &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
/*
* No tcon so can't do
@@ -1735,7 +1774,10 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
__le16 *unc_path = NULL;
int flags = 0;
unsigned int total_len;
- struct TCP_Server_Info *server = ses->server;
+ struct TCP_Server_Info *server;
+
+ /* always use master channel */
+ server = ses->server;
cifs_dbg(FYI, "TCON\n");
@@ -1756,8 +1798,8 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
/* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
tcon->tid = 0;
atomic_set(&tcon->num_remote_opens, 0);
- rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server,
+ (void **) &req, &total_len);
if (rc) {
kfree(unc_path);
return rc;
@@ -1796,7 +1838,8 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
/* Need 64 for max size write so ask for more in case not there yet */
req->sync_hdr.CreditRequest = cpu_to_le16(64);
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
@@ -1881,8 +1924,9 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
close_shroot_lease(&tcon->crfid);
- rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server,
+ (void **) &req,
+ &total_len);
if (rc)
return rc;
@@ -1898,7 +1942,8 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, ses->server,
+ &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc)
cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
@@ -2452,6 +2497,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
int flags = 0;
unsigned int total_len;
__le16 *utf16_path = NULL;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
cifs_dbg(FYI, "mkdir\n");
@@ -2460,13 +2506,14 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
if (!utf16_path)
return -ENOMEM;
- if (!ses || !(ses->server)) {
+ if (!ses || !server) {
rc = -EIO;
goto err_free_path;
}
/* resource #2: request */
- rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
+ (void **) &req, &total_len);
if (rc)
goto err_free_path;
@@ -2552,7 +2599,8 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
FILE_WRITE_ATTRIBUTES);
/* resource #4: response buffer */
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
@@ -2581,10 +2629,10 @@ err_free_path:
}
int
-SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
+SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ struct smb_rqst *rqst, __u8 *oplock,
struct cifs_open_parms *oparms, __le16 *path)
{
- struct TCP_Server_Info *server = tcon->ses->server;
struct smb2_create_req *req;
unsigned int n_iov = 2;
__u32 file_attributes = 0;
@@ -2595,7 +2643,8 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
__le16 *copy_path;
int rc;
- rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -2767,9 +2816,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
{
struct smb_rqst rqst;
struct smb2_create_rsp *rsp = NULL;
- struct TCP_Server_Info *server;
struct cifs_tcon *tcon = oparms->tcon;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
struct kvec iov[SMB2_CREATE_IOV_SIZE];
struct kvec rsp_iov = {NULL, 0};
int resp_buftype = CIFS_NO_BUFFER;
@@ -2777,9 +2826,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
int flags = 0;
cifs_dbg(FYI, "create/open\n");
- if (ses && (ses->server))
- server = ses->server;
- else
+ if (!ses || !server)
return -EIO;
if (smb3_encryption_required(tcon))
@@ -2790,14 +2837,16 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
rqst.rq_iov = iov;
rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
- rc = SMB2_open_init(tcon, &rqst, oplock, oparms, path);
+ rc = SMB2_open_init(tcon, server,
+ &rqst, oplock, oparms, path);
if (rc)
goto creat_exit;
trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid,
oparms->create_options, oparms->desired_access);
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags,
&rsp_iov);
rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
@@ -2812,8 +2861,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
trace_smb3_open_err(xid, tcon->tid, ses->Suid,
oparms->create_options, oparms->desired_access, rc);
if (rc == -EREMCHG) {
- printk_once(KERN_WARNING "server share %s deleted\n",
- tcon->treeName);
+ pr_warn_once("server share %s deleted\n",
+ tcon->treeName);
tcon->need_reconnect = true;
}
goto creat_exit;
@@ -2849,7 +2898,8 @@ creat_exit:
}
int
-SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
bool is_fsctl, char *in_data, u32 indatalen,
__u32 max_response_size)
@@ -2860,7 +2910,8 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
int rc;
char *in_data_buf;
- rc = smb2_ioctl_req_init(opcode, tcon, (void **) &req, &total_len);
+ rc = smb2_ioctl_req_init(opcode, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -2922,7 +2973,7 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
* response size smaller.
*/
req->MaxOutputResponse = cpu_to_le32(max_response_size);
-
+ req->sync_hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(max_response_size, SMB2_MAX_BUFFER_SIZE));
if (is_fsctl)
req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
else
@@ -2960,12 +3011,12 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
struct smb_rqst rqst;
struct smb2_ioctl_rsp *rsp = NULL;
struct cifs_ses *ses;
+ struct TCP_Server_Info *server;
struct kvec iov[SMB2_IOCTL_IOV_SIZE];
struct kvec rsp_iov = {NULL, 0};
int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
int flags = 0;
- struct TCP_Server_Info *server;
cifs_dbg(FYI, "SMB2 IOCTL\n");
@@ -2976,14 +3027,14 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
if (plen)
*plen = 0;
- if (tcon)
- ses = tcon->ses;
- else
+ if (!tcon)
return -EIO;
+ ses = tcon->ses;
if (!ses)
return -EIO;
- server = ses->server;
+
+ server = cifs_pick_channel(ses);
if (!server)
return -EIO;
@@ -2995,12 +3046,14 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
rqst.rq_iov = iov;
rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
- rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, opcode,
+ rc = SMB2_ioctl_init(tcon, server,
+ &rqst, persistent_fid, volatile_fid, opcode,
is_fsctl, in_data, indatalen, max_out_data_len);
if (rc)
goto ioctl_exit;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags,
&rsp_iov);
rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
@@ -3088,7 +3141,8 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
}
int
-SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, bool query_attrs)
{
struct smb2_close_req *req;
@@ -3096,7 +3150,8 @@ SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
unsigned int total_len;
int rc;
- rc = smb2_plain_req_init(SMB2_CLOSE, tcon, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -3127,6 +3182,7 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
struct smb_rqst rqst;
struct smb2_close_rsp *rsp = NULL;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buftype = CIFS_NO_BUFFER;
@@ -3136,7 +3192,7 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
cifs_dbg(FYI, "Close\n");
- if (!ses || !(ses->server))
+ if (!ses || !server)
return -EIO;
if (smb3_encryption_required(tcon))
@@ -3152,12 +3208,14 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
query_attrs = true;
trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid);
- rc = SMB2_close_init(tcon, &rqst, persistent_fid, volatile_fid,
+ rc = SMB2_close_init(tcon, server,
+ &rqst, persistent_fid, volatile_fid,
query_attrs);
if (rc)
goto close_exit;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
if (rc != 0) {
@@ -3225,7 +3283,7 @@ smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
}
if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
- cifs_dbg(VFS, "illegal server response, bad offset to data\n");
+ cifs_dbg(VFS, "Invalid server response, bad offset to data\n");
return -EINVAL;
}
@@ -3257,7 +3315,8 @@ smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
}
int
-SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
u8 info_class, u8 info_type, u32 additional_info,
size_t output_len, size_t input_len, void *input)
@@ -3267,8 +3326,8 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
unsigned int total_len;
int rc;
- rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -3320,7 +3379,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
if (!ses)
return -EIO;
- server = ses->server;
+ server = cifs_pick_channel(ses);
if (!server)
return -EIO;
@@ -3332,7 +3391,8 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = SMB2_query_info_init(tcon, &rqst, persistent_fid, volatile_fid,
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst, persistent_fid, volatile_fid,
info_class, info_type, additional_info,
output_len, 0, NULL);
if (rc)
@@ -3341,7 +3401,8 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
ses->Suid, info_class, (__u32)info_type);
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
if (rc) {
@@ -3426,15 +3487,17 @@ SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
static int
SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
- struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid,
- u32 completion_filter, bool watch_tree)
+ struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ u64 persistent_fid, u64 volatile_fid,
+ u32 completion_filter, bool watch_tree)
{
struct smb2_change_notify_req *req;
struct kvec *iov = rqst->rq_iov;
unsigned int total_len;
int rc;
- rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -3461,6 +3524,7 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
u32 completion_filter)
{
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
struct smb_rqst rqst;
struct kvec iov[1];
struct kvec rsp_iov = {NULL, 0};
@@ -3469,7 +3533,7 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
cifs_dbg(FYI, "change notify\n");
- if (!ses || !(ses->server))
+ if (!ses || !server)
return -EIO;
if (smb3_encryption_required(tcon))
@@ -3480,14 +3544,16 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = SMB2_notify_init(xid, &rqst, tcon, persistent_fid, volatile_fid,
+ rc = SMB2_notify_init(xid, &rqst, tcon, server,
+ persistent_fid, volatile_fid,
completion_filter, watch_tree);
if (rc)
goto cnotify_exit;
trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
(u8)watch_tree, completion_filter);
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
if (rc != 0) {
cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE);
@@ -3577,7 +3643,7 @@ void smb2_reconnect_server(struct work_struct *work)
spin_unlock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
- rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon);
+ rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
if (!rc)
cifs_reopen_persistent_handles(tcon);
else
@@ -3617,7 +3683,8 @@ SMB2_echo(struct TCP_Server_Info *server)
return rc;
}
- rc = smb2_plain_req_init(SMB2_ECHO, NULL, (void **)&req, &total_len);
+ rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
+ (void **)&req, &total_len);
if (rc)
return rc;
@@ -3644,14 +3711,16 @@ SMB2_flush_free(struct smb_rqst *rqst)
int
SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
- struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid)
+ struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ u64 persistent_fid, u64 volatile_fid)
{
struct smb2_flush_req *req;
struct kvec *iov = rqst->rq_iov;
unsigned int total_len;
int rc;
- rc = smb2_plain_req_init(SMB2_FLUSH, tcon, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -3672,6 +3741,7 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
struct smb_rqst rqst;
struct kvec iov[1];
struct kvec rsp_iov = {NULL, 0};
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
int resp_buftype = CIFS_NO_BUFFER;
int flags = 0;
int rc = 0;
@@ -3688,12 +3758,14 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = SMB2_flush_init(xid, &rqst, tcon, persistent_fid, volatile_fid);
+ rc = SMB2_flush_init(xid, &rqst, tcon, server,
+ persistent_fid, volatile_fid);
if (rc)
goto flush_exit;
trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
if (rc != 0) {
cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
@@ -3721,14 +3793,13 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
int rc = -EACCES;
struct smb2_read_plain_req *req = NULL;
struct smb2_sync_hdr *shdr;
- struct TCP_Server_Info *server;
+ struct TCP_Server_Info *server = io_parms->server;
- rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, (void **) &req,
- total_len);
+ rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server,
+ (void **) &req, total_len);
if (rc)
return rc;
- server = io_parms->tcon->ses->server;
if (server == NULL)
return -ECONNABORTED;
@@ -3757,8 +3828,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
struct smbd_buffer_descriptor_v1 *v1;
- bool need_invalidate =
- io_parms->tcon->ses->server->dialect == SMB30_PROT_ID;
+ bool need_invalidate = server->dialect == SMB30_PROT_ID;
rdata->mr = smbd_register_mr(
server->smbd_conn, rdata->pages,
@@ -3815,7 +3885,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
{
struct cifs_readdata *rdata = mid->callback_data;
struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
- struct TCP_Server_Info *server = tcon->ses->server;
+ struct TCP_Server_Info *server = rdata->server;
struct smb2_sync_hdr *shdr =
(struct smb2_sync_hdr *)rdata->iov[0].iov_base;
struct cifs_credits credits = { .value = 0, .instance = 0 };
@@ -3827,6 +3897,10 @@ smb2_readv_callback(struct mid_q_entry *mid)
.rq_pagesz = rdata->pagesz,
.rq_tailsz = rdata->tailsz };
+ WARN_ONCE(rdata->server != mid->server,
+ "rdata server %p != mid server %p",
+ rdata->server, mid->server);
+
cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
__func__, mid->mid, mid->mid_state, rdata->result,
rdata->bytes);
@@ -3904,20 +3978,23 @@ smb2_async_readv(struct cifs_readdata *rdata)
struct smb_rqst rqst = { .rq_iov = rdata->iov,
.rq_nvec = 1 };
struct TCP_Server_Info *server;
+ struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
unsigned int total_len;
cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
__func__, rdata->offset, rdata->bytes);
+ if (!rdata->server)
+ rdata->server = cifs_pick_channel(tcon->ses);
+
io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
+ io_parms.server = server = rdata->server;
io_parms.offset = rdata->offset;
io_parms.length = rdata->bytes;
io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
io_parms.pid = rdata->pid;
- server = io_parms.tcon->ses->server;
-
rc = smb2_new_read_req(
(void **) &buf, &total_len, &io_parms, rdata, 0, 0);
if (rc)
@@ -3945,7 +4022,7 @@ smb2_async_readv(struct cifs_readdata *rdata)
}
kref_get(&rdata->refcount);
- rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
+ rc = cifs_call_async(server, &rqst,
cifs_readv_receive, smb2_readv_callback,
smb3_handle_read_data, rdata, flags,
&rdata->credits);
@@ -3977,6 +4054,9 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
int flags = CIFS_LOG_ERROR;
struct cifs_ses *ses = io_parms->tcon->ses;
+ if (!io_parms->server)
+ io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
+
*nbytes = 0;
rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
if (rc)
@@ -3992,7 +4072,8 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, io_parms->server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
if (rc) {
@@ -4048,11 +4129,15 @@ smb2_writev_callback(struct mid_q_entry *mid)
{
struct cifs_writedata *wdata = mid->callback_data;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
- struct TCP_Server_Info *server = tcon->ses->server;
+ struct TCP_Server_Info *server = wdata->server;
unsigned int written;
struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
struct cifs_credits credits = { .value = 0, .instance = 0 };
+ WARN_ONCE(wdata->server != mid->server,
+ "wdata server %p != mid server %p",
+ wdata->server, mid->server);
+
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest);
@@ -4108,8 +4193,8 @@ smb2_writev_callback(struct mid_q_entry *mid)
tcon->tid, tcon->ses->Suid, wdata->offset,
wdata->bytes, wdata->result);
if (wdata->result == -ENOSPC)
- printk_once(KERN_WARNING "Out of space writing to %s\n",
- tcon->treeName);
+ pr_warn_once("Out of space writing to %s\n",
+ tcon->treeName);
} else
trace_smb3_write_done(0 /* no xid */,
wdata->cfile->fid.persistent_fid,
@@ -4130,12 +4215,16 @@ smb2_async_writev(struct cifs_writedata *wdata,
struct smb2_write_req *req = NULL;
struct smb2_sync_hdr *shdr;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
- struct TCP_Server_Info *server = tcon->ses->server;
+ struct TCP_Server_Info *server = wdata->server;
struct kvec iov[1];
struct smb_rqst rqst = { };
unsigned int total_len;
- rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
+ if (!wdata->server)
+ server = wdata->server = cifs_pick_channel(tcon->ses);
+
+ rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -4274,20 +4363,24 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
struct kvec rsp_iov;
int flags = 0;
unsigned int total_len;
+ struct TCP_Server_Info *server;
*nbytes = 0;
if (n_vec < 1)
return rc;
- rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, (void **) &req,
- &total_len);
+ if (!io_parms->server)
+ io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
+ server = io_parms->server;
+ if (server == NULL)
+ return -ECONNABORTED;
+
+ rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
- if (io_parms->tcon->ses->server == NULL)
- return -ECONNABORTED;
-
if (smb3_encryption_required(io_parms->tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -4316,7 +4409,8 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
rqst.rq_iov = iov;
rqst.rq_nvec = n_vec + 1;
- rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
+ rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
+ &rqst,
&resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
@@ -4490,11 +4584,12 @@ num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry,
* Readdir/FindFirst
*/
int SMB2_query_directory_init(const unsigned int xid,
- struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
int index, int info_level)
{
- struct TCP_Server_Info *server = tcon->ses->server;
struct smb2_query_directory_req *req;
unsigned char *bufptr;
__le16 asteriks = cpu_to_le16('*');
@@ -4505,8 +4600,8 @@ int SMB2_query_directory_init(const unsigned int xid,
struct kvec *iov = rqst->rq_iov;
int len, rc;
- rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -4632,7 +4727,7 @@ smb2_parse_query_directory(struct cifs_tcon *tcon,
else if (resp_buftype == CIFS_SMALL_BUFFER)
srch_inf->smallBuf = true;
else
- cifs_tcon_dbg(VFS, "illegal search buffer type\n");
+ cifs_tcon_dbg(VFS, "Invalid search buffer type\n");
return 0;
}
@@ -4649,6 +4744,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec rsp_iov;
int rc = 0;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
int flags = 0;
if (!ses || !(ses->server))
@@ -4662,13 +4758,15 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
- rc = SMB2_query_directory_init(xid, tcon, &rqst, persistent_fid,
+ rc = SMB2_query_directory_init(xid, tcon, server,
+ &rqst, persistent_fid,
volatile_fid, index,
srch_inf->info_level);
if (rc)
goto qdir_exit;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
if (rc) {
@@ -4705,17 +4803,19 @@ qdir_exit:
}
int
-SMB2_set_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
- u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
- u8 info_type, u32 additional_info,
- void **data, unsigned int *size)
+SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid, u32 pid,
+ u8 info_class, u8 info_type, u32 additional_info,
+ void **data, unsigned int *size)
{
struct smb2_set_info_req *req;
struct kvec *iov = rqst->rq_iov;
unsigned int i, total_len;
int rc;
- rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -4766,9 +4866,10 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
int flags = 0;
- if (!ses || !(ses->server))
+ if (!ses || !server)
return -EIO;
if (!num)
@@ -4785,7 +4886,8 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = num;
- rc = SMB2_set_info_init(tcon, &rqst, persistent_fid, volatile_fid, pid,
+ rc = SMB2_set_info_init(tcon, server,
+ &rqst, persistent_fid, volatile_fid, pid,
info_class, info_type, additional_info,
data, size);
if (rc) {
@@ -4794,7 +4896,8 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
}
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags,
&rsp_iov);
SMB2_set_info_free(&rqst);
rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
@@ -4857,6 +4960,7 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
int rc;
struct smb2_oplock_break *req = NULL;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
int flags = CIFS_OBREAK_OP;
unsigned int total_len;
struct kvec iov[1];
@@ -4864,8 +4968,8 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
int resp_buf_type;
cifs_dbg(FYI, "SMB2_oplock_break\n");
- rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -4886,7 +4990,8 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc) {
@@ -4929,8 +5034,10 @@ copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
}
static int
-build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
- int outbuf_len, u64 persistent_fid, u64 volatile_fid)
+build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ int level, int outbuf_len, u64 persistent_fid,
+ u64 volatile_fid)
{
int rc;
struct smb2_query_info_req *req;
@@ -4938,11 +5045,11 @@ build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
cifs_dbg(FYI, "Query FSInfo level %d\n", level);
- if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
+ if ((tcon->ses == NULL) || server == NULL)
return -EIO;
- rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -4972,10 +5079,12 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
FILE_SYSTEM_POSIX_INFO *info = NULL;
int flags = 0;
- rc = build_qfs_info_req(&iov, tcon, FS_POSIX_INFORMATION,
+ rc = build_qfs_info_req(&iov, tcon, server,
+ FS_POSIX_INFORMATION,
sizeof(FILE_SYSTEM_POSIX_INFO),
persistent_fid, volatile_fid);
if (rc)
@@ -4988,7 +5097,8 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -5020,10 +5130,12 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
struct smb2_fs_full_size_info *info = NULL;
int flags = 0;
- rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
+ rc = build_qfs_info_req(&iov, tcon, server,
+ FS_FULL_SIZE_INFORMATION,
sizeof(struct smb2_fs_full_size_info),
persistent_fid, volatile_fid);
if (rc)
@@ -5036,7 +5148,8 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -5068,6 +5181,7 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
int rc = 0;
int resp_buftype, max_len, min_len;
struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = cifs_pick_channel(ses);
unsigned int rsp_len, offset;
int flags = 0;
@@ -5088,7 +5202,8 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
return -EINVAL;
}
- rc = build_qfs_info_req(&iov, tcon, level, max_len,
+ rc = build_qfs_info_req(&iov, tcon, server,
+ level, max_len,
persistent_fid, volatile_fid);
if (rc)
return rc;
@@ -5100,7 +5215,8 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -5153,10 +5269,12 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
unsigned int count;
int flags = CIFS_NO_RSP_BUF;
unsigned int total_len;
+ struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
- rc = smb2_plain_req_init(SMB2_LOCK, tcon, (void **) &req, &total_len);
+ rc = smb2_plain_req_init(SMB2_LOCK, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -5182,7 +5300,8 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
- rc = cifs_send_recv(xid, tcon->ses, &rqst, &resp_buf_type, flags,
+ rc = cifs_send_recv(xid, tcon->ses, server,
+ &rqst, &resp_buf_type, flags,
&rsp_iov);
cifs_small_buf_release(req);
if (rc) {
@@ -5227,10 +5346,11 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
int resp_buf_type;
__u64 *please_key_high;
__u64 *please_key_low;
+ struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
cifs_dbg(FYI, "SMB2_lease_break\n");
- rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
- &total_len);
+ rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
+ (void **) &req, &total_len);
if (rc)
return rc;
@@ -5253,7 +5373,8 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
rqst.rq_iov = iov;
rqst.rq_nvec = 1;
- rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
+ rc = cifs_send_recv(xid, ses, server,
+ &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
please_key_low = (__u64 *)lease_key;
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 10acf90f858d..3b0e6acf9d7d 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -143,8 +143,17 @@ struct smb2_transform_hdr {
__u64 SessionId;
} __packed;
+/* See MS-SMB2 2.2.42 */
+struct smb2_compression_transform_hdr {
+ __le32 ProtocolId; /* 0xFC 'S' 'M' 'B' */
+ __le32 OriginalCompressedSegmentSize;
+ __le16 CompressionAlgorithm;
+ __le16 Flags;
+ __le16 Length; /* if chained it is length, else offset */
+} __packed;
+
/* See MS-SMB2 2.2.42.1 */
-struct compression_playload_header {
+struct compression_payload_header {
__le16 AlgorithmId;
__le16 Reserved;
__le32 Length;
@@ -333,7 +342,7 @@ struct smb2_encryption_neg_context {
#define SMB3_COMPRESS_LZ77 cpu_to_le16(0x0002)
#define SMB3_COMPRESS_LZ77_HUFF cpu_to_le16(0x0003)
/* Pattern scanning algorithm See MS-SMB2 3.1.4.4.1 */
-#define SMB3_COMPRESS_PATTERN cpu_to_le16(0x0004)
+#define SMB3_COMPRESS_PATTERN cpu_to_le16(0x0004) /* Pattern_V1 */
/* Compression Flags */
#define SMB2_COMPRESSION_CAPABILITIES_FLAG_NONE cpu_to_le32(0x00000000)
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 087d5f14320b..71ba74792c9e 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -143,7 +143,9 @@ extern int SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms,
struct smb2_file_all_info *buf,
struct create_posix_rsp *posix,
struct kvec *err_iov, int *resp_buftype);
-extern int SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+extern int SMB2_open_init(struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
__u8 *oplock, struct cifs_open_parms *oparms,
__le16 *path);
extern void SMB2_open_free(struct smb_rqst *rqst);
@@ -151,7 +153,9 @@ extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen,
char **out_data, u32 *plen /* returned data len */);
-extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+extern int SMB2_ioctl_init(struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, u32 opcode,
bool is_fsctl, char *in_data, u32 indatalen,
__u32 max_response_size);
@@ -165,19 +169,25 @@ extern int __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
struct smb2_file_network_open_info *pbuf);
extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
-extern int SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
- u64 persistent_fid, u64 volatile_fid, bool query_attrs);
+extern int SMB2_close_init(struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid,
+ bool query_attrs);
extern void SMB2_close_free(struct smb_rqst *rqst);
extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id);
extern int SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
u64 persistent_file_id, u64 volatile_file_id);
extern void SMB2_flush_free(struct smb_rqst *rqst);
extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
struct smb2_file_all_info *data);
-extern int SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+extern int SMB2_query_info_init(struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
u8 info_class, u8 info_type,
u32 additional_info, size_t output_len,
@@ -201,6 +211,7 @@ extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf);
extern int SMB2_query_directory_init(unsigned int xid, struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
int index, int info_level);
@@ -208,7 +219,9 @@ extern void SMB2_query_directory_free(struct smb_rqst *rqst);
extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 pid,
__le64 *eof);
-extern int SMB2_set_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+extern int SMB2_set_info_init(struct cifs_tcon *tcon,
+ struct TCP_Server_Info *server,
+ struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, u32 pid,
u8 info_class, u8 info_type, u32 additional_info,
void **data, unsigned int *size);
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index 1a5834a5d597..b029ed31ef91 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -294,15 +294,12 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
{
- log_rdma_event(INFO, "resp message min_version %u max_version %u "
- "negotiated_version %u credits_requested %u "
- "credits_granted %u status %u max_readwrite_size %u "
- "preferred_send_size %u max_receive_size %u "
- "max_fragmented_size %u\n",
- resp->min_version, resp->max_version, resp->negotiated_version,
- resp->credits_requested, resp->credits_granted, resp->status,
- resp->max_readwrite_size, resp->preferred_send_size,
- resp->max_receive_size, resp->max_fragmented_size);
+ log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n",
+ resp->min_version, resp->max_version,
+ resp->negotiated_version, resp->credits_requested,
+ resp->credits_granted, resp->status,
+ resp->max_readwrite_size, resp->preferred_send_size,
+ resp->max_receive_size, resp->max_fragmented_size);
}
/*
@@ -450,10 +447,9 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
struct smbd_connection *info = response->info;
int data_length = 0;
- log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d "
- "byte_len=%d pkey_index=%x\n",
- response, response->type, wc->status, wc->opcode,
- wc->byte_len, wc->pkey_index);
+ log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%x\n",
+ response, response->type, wc->status, wc->opcode,
+ wc->byte_len, wc->pkey_index);
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
@@ -519,12 +515,11 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
wake_up_interruptible(&info->wait_send_queue);
}
- log_incoming(INFO, "data flags %d data_offset %d "
- "data_length %d remaining_data_length %d\n",
- le16_to_cpu(data_transfer->flags),
- le32_to_cpu(data_transfer->data_offset),
- le32_to_cpu(data_transfer->data_length),
- le32_to_cpu(data_transfer->remaining_data_length));
+ log_incoming(INFO, "data flags %d data_offset %d data_length %d remaining_data_length %d\n",
+ le16_to_cpu(data_transfer->flags),
+ le32_to_cpu(data_transfer->data_offset),
+ le32_to_cpu(data_transfer->data_length),
+ le32_to_cpu(data_transfer->remaining_data_length));
/* Send a KEEP_ALIVE response right away if requested */
info->keep_alive_requested = KEEP_ALIVE_NONE;
@@ -632,14 +627,10 @@ static int smbd_ia_open(
}
if (!frwr_is_supported(&info->id->device->attrs)) {
- log_rdma_event(ERR,
- "Fast Registration Work Requests "
- "(FRWR) is not supported\n");
- log_rdma_event(ERR,
- "Device capability flags = %llx "
- "max_fast_reg_page_list_len = %u\n",
- info->id->device->attrs.device_cap_flags,
- info->id->device->attrs.max_fast_reg_page_list_len);
+ log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n");
+ log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n",
+ info->id->device->attrs.device_cap_flags,
+ info->id->device->attrs.max_fast_reg_page_list_len);
rc = -EPROTONOSUPPORT;
goto out2;
}
@@ -898,13 +889,12 @@ wait_send_queue:
packet->remaining_data_length = cpu_to_le32(remaining_data_length);
packet->padding = 0;
- log_outgoing(INFO, "credits_requested=%d credits_granted=%d "
- "data_offset=%d data_length=%d remaining_data_length=%d\n",
- le16_to_cpu(packet->credits_requested),
- le16_to_cpu(packet->credits_granted),
- le32_to_cpu(packet->data_offset),
- le32_to_cpu(packet->data_length),
- le32_to_cpu(packet->remaining_data_length));
+ log_outgoing(INFO, "credits_requested=%d credits_granted=%d data_offset=%d data_length=%d remaining_data_length=%d\n",
+ le16_to_cpu(packet->credits_requested),
+ le16_to_cpu(packet->credits_granted),
+ le32_to_cpu(packet->data_offset),
+ le32_to_cpu(packet->data_length),
+ le32_to_cpu(packet->remaining_data_length));
/* Map the packet to DMA */
header_length = sizeof(struct smbd_data_transfer);
@@ -1078,11 +1068,9 @@ static int smbd_negotiate(struct smbd_connection *info)
response->type = SMBD_NEGOTIATE_RESP;
rc = smbd_post_recv(info, response);
- log_rdma_event(INFO,
- "smbd_post_recv rc=%d iov.addr=%llx iov.length=%x "
- "iov.lkey=%x\n",
- rc, response->sge.addr,
- response->sge.length, response->sge.lkey);
+ log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=%llx iov.length=%x iov.lkey=%x\n",
+ rc, response->sge.addr,
+ response->sge.length, response->sge.lkey);
if (rc)
return rc;
@@ -1540,25 +1528,19 @@ static struct smbd_connection *_smbd_get_connection(
if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
- log_rdma_event(ERR,
- "consider lowering send_credit_target = %d. "
- "Possible CQE overrun, device "
- "reporting max_cpe %d max_qp_wr %d\n",
- smbd_send_credit_target,
- info->id->device->attrs.max_cqe,
- info->id->device->attrs.max_qp_wr);
+ log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
+ smbd_send_credit_target,
+ info->id->device->attrs.max_cqe,
+ info->id->device->attrs.max_qp_wr);
goto config_failed;
}
if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
- log_rdma_event(ERR,
- "consider lowering receive_credit_max = %d. "
- "Possible CQE overrun, device "
- "reporting max_cpe %d max_qp_wr %d\n",
- smbd_receive_credit_max,
- info->id->device->attrs.max_cqe,
- info->id->device->attrs.max_qp_wr);
+ log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n",
+ smbd_receive_credit_max,
+ info->id->device->attrs.max_cqe,
+ info->id->device->attrs.max_qp_wr);
goto config_failed;
}
@@ -1865,11 +1847,9 @@ again:
to_read -= to_copy;
data_read += to_copy;
- log_read(INFO, "_get_first_reassembly memcpy %d bytes "
- "data_transfer_length-offset=%d after that "
- "to_read=%d data_read=%d offset=%d\n",
- to_copy, data_length - offset,
- to_read, data_read, offset);
+ log_read(INFO, "_get_first_reassembly memcpy %d bytes data_transfer_length-offset=%d after that to_read=%d data_read=%d offset=%d\n",
+ to_copy, data_length - offset,
+ to_read, data_read, offset);
}
spin_lock_irq(&info->reassembly_queue_lock);
@@ -1878,10 +1858,9 @@ again:
spin_unlock_irq(&info->reassembly_queue_lock);
info->first_entry_offset = offset;
- log_read(INFO, "returning to thread data_read=%d "
- "reassembly_data_length=%d first_entry_offset=%d\n",
- data_read, info->reassembly_data_length,
- info->first_entry_offset);
+ log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
+ data_read, info->reassembly_data_length,
+ info->first_entry_offset);
read_rfc1002_done:
return data_read;
}
@@ -1952,7 +1931,7 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
if (iov_iter_rw(&msg->msg_iter) == WRITE) {
/* It's a bug in upper layer to get there */
- cifs_dbg(VFS, "CIFS: invalid msg iter dir %u\n",
+ cifs_dbg(VFS, "Invalid msg iter dir %u\n",
iov_iter_rw(&msg->msg_iter));
rc = -EINVAL;
goto out;
@@ -1974,7 +1953,7 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
default:
/* It's a bug in upper layer to get there */
- cifs_dbg(VFS, "CIFS: invalid msg type %d\n",
+ cifs_dbg(VFS, "Invalid msg type %d\n",
iov_iter_type(&msg->msg_iter));
rc = -EINVAL;
}
@@ -2043,10 +2022,9 @@ next_rqst:
dump_smb(iov[i].iov_base, iov[i].iov_len);
- log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
- "rq_tailsz=%d buflen=%lu\n",
- rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
- rqst->rq_tailsz, smb_rqst_len(server, rqst));
+ log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d rq_tailsz=%d buflen=%lu\n",
+ rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
+ rqst->rq_tailsz, smb_rqst_len(server, rqst));
start = i = 0;
buflen = 0;
@@ -2056,11 +2034,9 @@ next_rqst:
if (i > start) {
remaining_data_length -=
(buflen-iov[i].iov_len);
- log_write(INFO, "sending iov[] from start=%d "
- "i=%d nvecs=%d "
- "remaining_data_length=%d\n",
- start, i, i-start,
- remaining_data_length);
+ log_write(INFO, "sending iov[] from start=%d i=%d nvecs=%d remaining_data_length=%d\n",
+ start, i, i - start,
+ remaining_data_length);
rc = smbd_post_send_data(
info, &iov[start], i-start,
remaining_data_length);
@@ -2069,10 +2045,9 @@ next_rqst:
} else {
/* iov[start] is too big, break it */
nvecs = (buflen+max_iov_size-1)/max_iov_size;
- log_write(INFO, "iov[%d] iov_base=%p buflen=%d"
- " break to %d vectors\n",
- start, iov[start].iov_base,
- buflen, nvecs);
+ log_write(INFO, "iov[%d] iov_base=%p buflen=%d break to %d vectors\n",
+ start, iov[start].iov_base,
+ buflen, nvecs);
for (j = 0; j < nvecs; j++) {
vec.iov_base =
(char *)iov[start].iov_base +
@@ -2084,11 +2059,9 @@ next_rqst:
max_iov_size*(nvecs-1);
remaining_data_length -= vec.iov_len;
log_write(INFO,
- "sending vec j=%d iov_base=%p"
- " iov_len=%zu "
- "remaining_data_length=%d\n",
- j, vec.iov_base, vec.iov_len,
- remaining_data_length);
+ "sending vec j=%d iov_base=%p iov_len=%zu remaining_data_length=%d\n",
+ j, vec.iov_base, vec.iov_len,
+ remaining_data_length);
rc = smbd_post_send_data(
info, &vec, 1,
remaining_data_length);
@@ -2106,11 +2079,9 @@ next_rqst:
if (i == rqst->rq_nvec) {
/* send out all remaining vecs */
remaining_data_length -= buflen;
- log_write(INFO,
- "sending iov[] from start=%d i=%d "
- "nvecs=%d remaining_data_length=%d\n",
- start, i, i-start,
- remaining_data_length);
+ log_write(INFO, "sending iov[] from start=%d i=%d nvecs=%d remaining_data_length=%d\n",
+ start, i, i - start,
+ remaining_data_length);
rc = smbd_post_send_data(info, &iov[start],
i-start, remaining_data_length);
if (rc)
@@ -2134,10 +2105,9 @@ next_rqst:
if (j == nvecs-1)
size = buflen - j*max_iov_size;
remaining_data_length -= size;
- log_write(INFO, "sending pages i=%d offset=%d size=%d"
- " remaining_data_length=%d\n",
- i, j*max_iov_size+offset, size,
- remaining_data_length);
+ log_write(INFO, "sending pages i=%d offset=%d size=%d remaining_data_length=%d\n",
+ i, j * max_iov_size + offset, size,
+ remaining_data_length);
rc = smbd_post_send_page(
info, rqst->rq_pages[i],
j*max_iov_size + offset,
@@ -2211,11 +2181,9 @@ static void smbd_mr_recovery_work(struct work_struct *work)
info->pd, info->mr_type,
info->max_frmr_depth);
if (IS_ERR(smbdirect_mr->mr)) {
- log_rdma_mr(ERR,
- "ib_alloc_mr failed mr_type=%x "
- "max_frmr_depth=%x\n",
- info->mr_type,
- info->max_frmr_depth);
+ log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
+ info->mr_type,
+ info->max_frmr_depth);
smbd_disconnect_rdma_connection(info);
continue;
}
@@ -2278,9 +2246,8 @@ static int allocate_mr_list(struct smbd_connection *info)
smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
info->max_frmr_depth);
if (IS_ERR(smbdirect_mr->mr)) {
- log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x "
- "max_frmr_depth=%x\n",
- info->mr_type, info->max_frmr_depth);
+ log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
+ info->mr_type, info->max_frmr_depth);
goto out;
}
smbdirect_mr->sgl = kcalloc(
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index c97570eb2c18..d11e31064679 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -112,7 +112,7 @@ static void _cifs_mid_q_entry_release(struct kref *refcount)
#ifdef CONFIG_CIFS_STATS2
now = jiffies;
if (now < midEntry->when_alloc)
- cifs_server_dbg(VFS, "invalid mid allocation time\n");
+ cifs_server_dbg(VFS, "Invalid mid allocation time\n");
roundtrip_time = now - midEntry->when_alloc;
if (smb_cmd < NUMBER_OF_SMB2_COMMANDS) {
@@ -151,12 +151,12 @@ static void _cifs_mid_q_entry_release(struct kref *refcount)
trace_smb3_slow_rsp(smb_cmd, midEntry->mid, midEntry->pid,
midEntry->when_sent, midEntry->when_received);
if (cifsFYI & CIFS_TIMER) {
- pr_debug(" CIFS slow rsp: cmd %d mid %llu",
- midEntry->command, midEntry->mid);
- cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
- now - midEntry->when_alloc,
- now - midEntry->when_sent,
- now - midEntry->when_received);
+ pr_debug("slow rsp: cmd %d mid %llu",
+ midEntry->command, midEntry->mid);
+ cifs_info("A: 0x%lx S: 0x%lx R: 0x%lx\n",
+ now - midEntry->when_alloc,
+ now - midEntry->when_sent,
+ now - midEntry->when_received);
}
}
#endif
@@ -325,7 +325,6 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
size_t total_len = 0, sent, size;
struct socket *ssocket = server->ssocket;
struct msghdr smb_msg;
- int val = 1;
__be32 rfc1002_marker;
if (cifs_rdma_enabled(server)) {
@@ -345,8 +344,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
}
/* cork the socket */
- kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
- (char *)&val, sizeof(val));
+ tcp_sock_set_cork(ssocket->sk, true);
for (j = 0; j < num_rqst; j++)
send_length += smb_rqst_len(server, &rqst[j]);
@@ -435,9 +433,7 @@ unmask:
}
/* uncork it */
- val = 0;
- kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
- (char *)&val, sizeof(val));
+ tcp_sock_set_cork(ssocket->sk, false);
if ((total_len > 0) && (total_len != send_length)) {
cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
@@ -477,8 +473,7 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
return -ENOMEM;
if (!server->ops->init_transform_rq) {
- cifs_server_dbg(VFS, "Encryption requested but transform "
- "callback is missing\n");
+ cifs_server_dbg(VFS, "Encryption requested but transform callback is missing\n");
return -EIO;
}
@@ -993,8 +988,35 @@ cifs_cancelled_callback(struct mid_q_entry *mid)
DeleteMidQEntry(mid);
}
+/*
+ * Return a channel (master if none) of @ses that can be used to send
+ * regular requests.
+ *
+ * If we are currently binding a new channel (negprot/sess.setup),
+ * return the new incomplete channel.
+ */
+struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
+{
+ uint index = 0;
+
+ if (!ses)
+ return NULL;
+
+ if (!ses->binding) {
+ /* round robin */
+ if (ses->chan_count > 1) {
+ index = (uint)atomic_inc_return(&ses->chan_seq);
+ index %= ses->chan_count;
+ }
+ return ses->chans[index].server;
+ } else {
+ return cifs_ses_server(ses);
+ }
+}
+
int
compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
const int flags, const int num_rqst, struct smb_rqst *rqst,
int *resp_buf_type, struct kvec *resp_iov)
{
@@ -1006,30 +1028,17 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
};
unsigned int instance;
char *buf;
- struct TCP_Server_Info *server;
optype = flags & CIFS_OP_MASK;
for (i = 0; i < num_rqst; i++)
resp_buf_type[i] = CIFS_NO_BUFFER; /* no response buf yet */
- if ((ses == NULL) || (ses->server == NULL)) {
+ if (!ses || !ses->server || !server) {
cifs_dbg(VFS, "Null session\n");
return -EIO;
}
- if (!ses->binding) {
- uint index = 0;
-
- if (ses->chan_count > 1) {
- index = (uint)atomic_inc_return(&ses->chan_seq);
- index %= ses->chan_count;
- }
- server = ses->chans[index].server;
- } else {
- server = cifs_ses_server(ses);
- }
-
if (server->tcpStatus == CifsExiting)
return -ENOENT;
@@ -1224,11 +1233,12 @@ out:
int
cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
struct smb_rqst *rqst, int *resp_buf_type, const int flags,
struct kvec *resp_iov)
{
- return compound_send_recv(xid, ses, flags, 1, rqst, resp_buf_type,
- resp_iov);
+ return compound_send_recv(xid, ses, server, flags, 1,
+ rqst, resp_buf_type, resp_iov);
}
int
@@ -1263,7 +1273,8 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
rqst.rq_iov = new_iov;
rqst.rq_nvec = n_vec + 1;
- rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
+ rc = cifs_send_recv(xid, ses, ses->server,
+ &rqst, resp_buf_type, flags, resp_iov);
if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
kfree(new_iov);
return rc;
@@ -1300,8 +1311,8 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
use ses->maxReq */
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cifs_server_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
- len);
+ cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
+ len);
return -EIO;
}
@@ -1441,8 +1452,8 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
use ses->maxReq */
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cifs_tcon_dbg(VFS, "Illegal length, greater than maximum frame, %d\n",
- len);
+ cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
+ len);
return -EIO;
}
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
index e61f3fe8e32a..2d24c765cbd7 100644
--- a/fs/compat_binfmt_elf.c
+++ b/fs/compat_binfmt_elf.c
@@ -117,6 +117,11 @@
#define arch_setup_additional_pages compat_arch_setup_additional_pages
#endif
+#ifdef compat_elf_read_implies_exec
+#undef elf_read_implies_exec
+#define elf_read_implies_exec compat_elf_read_implies_exec
+#endif
+
/*
* Rename a few of the symbols that binfmt_elf.c will define.
* These are all local so the names don't really matter, but it
diff --git a/fs/coredump.c b/fs/coredump.c
index 478a0d810136..7237f07ff6be 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -393,7 +393,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
* of ->siglock provides a memory barrier.
*
* do_exit:
- * The caller holds mm->mmap_sem. This means that the task which
+ * The caller holds mm->mmap_lock. This means that the task which
* uses this mm can't pass exit_mm(), so it can't exit or clear
* its ->mm.
*
@@ -401,7 +401,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
* It does list_replace_rcu(&leader->tasks, &current->tasks),
* we must see either old or new leader, this does not matter.
* However, it can change p->sighand, so lock_task_sighand(p)
- * must be used. Since p->mm != NULL and we hold ->mmap_sem
+ * must be used. Since p->mm != NULL and we hold ->mmap_lock
* it can't fail.
*
* Note also that "g" can be the old leader with ->mm == NULL
@@ -445,12 +445,12 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
core_state->dumper.task = tsk;
core_state->dumper.next = NULL;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
if (!mm->core_state)
core_waiters = zap_threads(tsk, mm, core_state, exit_code);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
if (core_waiters > 0) {
struct core_thread *ptr;
diff --git a/fs/dcache.c b/fs/dcache.c
index b280e07e162b..361ea7ab30ea 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -165,7 +165,7 @@ static long get_nr_dentry_negative(void)
return sum < 0 ? 0 : sum;
}
-int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
+int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
dentry_stat.nr_dentry = get_nr_dentry();
@@ -647,6 +647,10 @@ static inline bool retain_dentry(struct dentry *dentry)
if (dentry->d_op->d_delete(dentry))
return false;
}
+
+ if (unlikely(dentry->d_flags & DCACHE_DONTCACHE))
+ return false;
+
/* retain; LRU fodder */
dentry->d_lockref.count--;
if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
@@ -656,6 +660,21 @@ static inline bool retain_dentry(struct dentry *dentry)
return true;
}
+void d_mark_dontcache(struct inode *inode)
+{
+ struct dentry *de;
+
+ spin_lock(&inode->i_lock);
+ hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) {
+ spin_lock(&de->d_lock);
+ de->d_flags |= DCACHE_DONTCACHE;
+ spin_unlock(&de->d_lock);
+ }
+ inode->i_state |= I_DONTCACHE;
+ spin_unlock(&inode->i_lock);
+}
+EXPORT_SYMBOL(d_mark_dontcache);
+
/*
* Finish off a dentry we've decided to kill.
* dentry->d_lock must be held, returns with it unlocked.
diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h
index f0d73d86cc1a..034e6973cead 100644
--- a/fs/debugfs/internal.h
+++ b/fs/debugfs/internal.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* internal.h - declarations internal to debugfs
*
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 6d5370eac2a8..1543b5af400e 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -386,25 +386,6 @@ static void dio_bio_end_io(struct bio *bio)
spin_unlock_irqrestore(&dio->bio_lock, flags);
}
-/**
- * dio_end_io - handle the end io action for the given bio
- * @bio: The direct io bio thats being completed
- *
- * This is meant to be called by any filesystem that uses their own dio_submit_t
- * so that the DIO specific endio actions are dealt with after the filesystem
- * has done it's completion work.
- */
-void dio_end_io(struct bio *bio)
-{
- struct dio *dio = bio->bi_private;
-
- if (dio->is_async)
- dio_bio_end_aio(bio);
- else
- dio_bio_end_io(bio);
-}
-EXPORT_SYMBOL_GPL(dio_end_io);
-
static inline void
dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
struct block_device *bdev,
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 416d9de35679..04fe9f525ac7 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -97,7 +97,6 @@ do { \
__LINE__, __FILE__, #x, jiffies); \
{do} \
printk("\n"); \
- BUG(); \
panic("DLM: Record message above and reboot.\n"); \
} \
}
@@ -421,7 +420,7 @@ struct dlm_message {
int m_bastmode;
int m_asts;
int m_result; /* 0 or -EXXX */
- char m_extra[0]; /* name or lvb */
+ char m_extra[]; /* name or lvb */
};
@@ -450,7 +449,7 @@ struct dlm_rcom {
uint64_t rc_id; /* match reply with request */
uint64_t rc_seq; /* sender's ls_recover_seq */
uint64_t rc_seq_reply; /* remote ls_recover_seq */
- char rc_buf[0];
+ char rc_buf[];
};
union dlm_packet {
@@ -506,7 +505,7 @@ struct rcom_lock {
__le16 rl_wait_type;
__le16 rl_namelen;
char rl_name[DLM_RESNAME_MAXLEN];
- char rl_lvb[0];
+ char rl_lvb[];
};
/*
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index afb8340918b8..e93670ecfae5 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -197,8 +197,6 @@ static struct kset *dlm_kset;
static int do_uevent(struct dlm_ls *ls, int in)
{
- int error;
-
if (in)
kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
else
@@ -209,20 +207,12 @@ static int do_uevent(struct dlm_ls *ls, int in)
/* dlm_controld will see the uevent, do the necessary group management
and then write to sysfs to wake us */
- error = wait_event_interruptible(ls->ls_uevent_wait,
- test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
+ wait_event(ls->ls_uevent_wait,
+ test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
- log_rinfo(ls, "group event done %d %d", error, ls->ls_uevent_result);
-
- if (error)
- goto out;
+ log_rinfo(ls, "group event done %d", ls->ls_uevent_result);
- error = ls->ls_uevent_result;
- out:
- if (error)
- log_error(ls, "group %s failed %d %d", in ? "join" : "leave",
- error, ls->ls_uevent_result);
- return error;
+ return ls->ls_uevent_result;
}
static int dlm_uevent(struct kset *kset, struct kobject *kobj,
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index cdfaf4f0e11a..3543a8fec907 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -724,7 +724,7 @@ out_close:
}
/* Listening socket is busy, accept a connection */
-static int tcp_accept_from_sock(struct connection *con)
+static int accept_from_sock(struct connection *con)
{
int result;
struct sockaddr_storage peeraddr;
@@ -852,123 +852,6 @@ accept_err:
return result;
}
-static int sctp_accept_from_sock(struct connection *con)
-{
- /* Check that the new node is in the lockspace */
- struct sctp_prim prim;
- int nodeid;
- int prim_len, ret;
- int addr_len;
- struct connection *newcon;
- struct connection *addcon;
- struct socket *newsock;
-
- mutex_lock(&connections_lock);
- if (!dlm_allow_conn) {
- mutex_unlock(&connections_lock);
- return -1;
- }
- mutex_unlock(&connections_lock);
-
- mutex_lock_nested(&con->sock_mutex, 0);
-
- ret = kernel_accept(con->sock, &newsock, O_NONBLOCK);
- if (ret < 0)
- goto accept_err;
-
- memset(&prim, 0, sizeof(struct sctp_prim));
- prim_len = sizeof(struct sctp_prim);
-
- ret = kernel_getsockopt(newsock, IPPROTO_SCTP, SCTP_PRIMARY_ADDR,
- (char *)&prim, &prim_len);
- if (ret < 0) {
- log_print("getsockopt/sctp_primary_addr failed: %d", ret);
- goto accept_err;
- }
-
- make_sockaddr(&prim.ssp_addr, 0, &addr_len);
- ret = addr_to_nodeid(&prim.ssp_addr, &nodeid);
- if (ret) {
- unsigned char *b = (unsigned char *)&prim.ssp_addr;
-
- log_print("reject connect from unknown addr");
- print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
- b, sizeof(struct sockaddr_storage));
- goto accept_err;
- }
-
- newcon = nodeid2con(nodeid, GFP_NOFS);
- if (!newcon) {
- ret = -ENOMEM;
- goto accept_err;
- }
-
- mutex_lock_nested(&newcon->sock_mutex, 1);
-
- if (newcon->sock) {
- struct connection *othercon = newcon->othercon;
-
- if (!othercon) {
- othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
- if (!othercon) {
- log_print("failed to allocate incoming socket");
- mutex_unlock(&newcon->sock_mutex);
- ret = -ENOMEM;
- goto accept_err;
- }
- othercon->nodeid = nodeid;
- othercon->rx_action = receive_from_sock;
- mutex_init(&othercon->sock_mutex);
- INIT_LIST_HEAD(&othercon->writequeue);
- spin_lock_init(&othercon->writequeue_lock);
- INIT_WORK(&othercon->swork, process_send_sockets);
- INIT_WORK(&othercon->rwork, process_recv_sockets);
- set_bit(CF_IS_OTHERCON, &othercon->flags);
- }
- mutex_lock_nested(&othercon->sock_mutex, 2);
- if (!othercon->sock) {
- newcon->othercon = othercon;
- add_sock(newsock, othercon);
- addcon = othercon;
- mutex_unlock(&othercon->sock_mutex);
- } else {
- printk("Extra connection from node %d attempted\n", nodeid);
- ret = -EAGAIN;
- mutex_unlock(&othercon->sock_mutex);
- mutex_unlock(&newcon->sock_mutex);
- goto accept_err;
- }
- } else {
- newcon->rx_action = receive_from_sock;
- add_sock(newsock, newcon);
- addcon = newcon;
- }
-
- log_print("connected to %d", nodeid);
-
- mutex_unlock(&newcon->sock_mutex);
-
- /*
- * Add it to the active queue in case we got data
- * between processing the accept adding the socket
- * to the read_sockets list
- */
- if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
- queue_work(recv_workqueue, &addcon->rwork);
- mutex_unlock(&con->sock_mutex);
-
- return 0;
-
-accept_err:
- mutex_unlock(&con->sock_mutex);
- if (newsock)
- sock_release(newsock);
- if (ret != -EAGAIN)
- log_print("error accepting connection from node: %d", ret);
-
- return ret;
-}
-
static void free_entry(struct writequeue_entry *e)
{
__free_page(e->page);
@@ -999,6 +882,7 @@ static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
static int sctp_bind_addrs(struct connection *con, uint16_t port)
{
struct sockaddr_storage localaddr;
+ struct sockaddr *addr = (struct sockaddr *)&localaddr;
int i, addr_len, result = 0;
for (i = 0; i < dlm_local_count; i++) {
@@ -1006,13 +890,9 @@ static int sctp_bind_addrs(struct connection *con, uint16_t port)
make_sockaddr(&localaddr, port, &addr_len);
if (!i)
- result = kernel_bind(con->sock,
- (struct sockaddr *)&localaddr,
- addr_len);
+ result = kernel_bind(con->sock, addr, addr_len);
else
- result = kernel_setsockopt(con->sock, SOL_SCTP,
- SCTP_SOCKOPT_BINDX_ADD,
- (char *)&localaddr, addr_len);
+ result = sock_bind_add(con->sock->sk, addr, addr_len);
if (result < 0) {
log_print("Can't bind to %d addr number %d, %d.\n",
@@ -1031,11 +911,9 @@ static int sctp_bind_addrs(struct connection *con, uint16_t port)
static void sctp_connect_to_sock(struct connection *con)
{
struct sockaddr_storage daddr;
- int one = 1;
int result;
int addr_len;
struct socket *sock;
- struct __kernel_sock_timeval tv = { .tv_sec = 5, .tv_usec = 0 };
if (con->nodeid == 0) {
log_print("attempt to connect sock 0 foiled");
@@ -1079,21 +957,17 @@ static void sctp_connect_to_sock(struct connection *con)
log_print("connecting to %d", con->nodeid);
/* Turn off Nagle's algorithm */
- kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one,
- sizeof(one));
+ sctp_sock_set_nodelay(sock->sk);
/*
* Make sock->ops->connect() function return in specified time,
* since O_NONBLOCK argument in connect() function does not work here,
* then, we should restore the default value of this attribute.
*/
- kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_NEW, (char *)&tv,
- sizeof(tv));
+ sock_set_sndtimeo(sock->sk, 5);
result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
0);
- memset(&tv, 0, sizeof(tv));
- kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_NEW, (char *)&tv,
- sizeof(tv));
+ sock_set_sndtimeo(sock->sk, 0);
if (result == -EINPROGRESS)
result = 0;
@@ -1132,7 +1006,6 @@ static void tcp_connect_to_sock(struct connection *con)
struct sockaddr_storage saddr, src_addr;
int addr_len;
struct socket *sock = NULL;
- int one = 1;
int result;
if (con->nodeid == 0) {
@@ -1181,8 +1054,7 @@ static void tcp_connect_to_sock(struct connection *con)
log_print("connecting to %d", con->nodeid);
/* Turn off Nagle's algorithm */
- kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
- sizeof(one));
+ tcp_sock_set_nodelay(sock->sk);
result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
O_NONBLOCK);
@@ -1224,7 +1096,6 @@ static struct socket *tcp_create_listen_sock(struct connection *con,
{
struct socket *sock = NULL;
int result = 0;
- int one = 1;
int addr_len;
if (dlm_local_addr[0]->ss_family == AF_INET)
@@ -1241,19 +1112,14 @@ static struct socket *tcp_create_listen_sock(struct connection *con,
}
/* Turn off Nagle's algorithm */
- kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
- sizeof(one));
+ tcp_sock_set_nodelay(sock->sk);
- result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
- (char *)&one, sizeof(one));
+ sock_set_reuseaddr(sock->sk);
- if (result < 0) {
- log_print("Failed to set SO_REUSEADDR on socket: %d", result);
- }
write_lock_bh(&sock->sk->sk_callback_lock);
sock->sk->sk_user_data = con;
save_listen_callbacks(sock);
- con->rx_action = tcp_accept_from_sock;
+ con->rx_action = accept_from_sock;
con->connect_action = tcp_connect_to_sock;
write_unlock_bh(&sock->sk->sk_callback_lock);
@@ -1267,11 +1133,7 @@ static struct socket *tcp_create_listen_sock(struct connection *con,
con->sock = NULL;
goto create_out;
}
- result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
- (char *)&one, sizeof(one));
- if (result < 0) {
- log_print("Set keepalive failed: %d", result);
- }
+ sock_set_keepalive(sock->sk);
result = sock->ops->listen(sock, 5);
if (result < 0) {
@@ -1309,8 +1171,6 @@ static int sctp_listen_for_all(void)
struct socket *sock = NULL;
int result = -EINVAL;
struct connection *con = nodeid2con(0, GFP_NOFS);
- int bufsize = NEEDED_RMEM;
- int one = 1;
if (!con)
return -ENOMEM;
@@ -1324,15 +1184,8 @@ static int sctp_listen_for_all(void)
goto out;
}
- result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE,
- (char *)&bufsize, sizeof(bufsize));
- if (result)
- log_print("Error increasing buffer space on socket %d", result);
-
- result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one,
- sizeof(one));
- if (result < 0)
- log_print("Could not set SCTP NODELAY error %d\n", result);
+ sock_set_rcvbuf(sock->sk, NEEDED_RMEM);
+ sctp_sock_set_nodelay(sock->sk);
write_lock_bh(&sock->sk->sk_callback_lock);
/* Init con struct */
@@ -1340,7 +1193,7 @@ static int sctp_listen_for_all(void)
save_listen_callbacks(sock);
con->sock = sock;
con->sock->sk->sk_data_ready = lowcomms_data_ready;
- con->rx_action = sctp_accept_from_sock;
+ con->rx_action = accept_from_sock;
con->connect_action = sctp_connect_to_sock;
write_unlock_bh(&sock->sk->sk_callback_lock);
diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
index e3d9f72c640d..4daf5dc2b51c 100644
--- a/fs/dlm/rcom.c
+++ b/fs/dlm/rcom.c
@@ -563,7 +563,7 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
lock = 1;
reply = 1;
break;
- };
+ }
spin_lock(&ls->ls_recover_lock);
status = ls->ls_recover_status;
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 5264bac75115..e5cefa90b1ce 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -46,7 +46,7 @@ struct dlm_lock_params32 {
__u32 bastaddr;
__u32 lksb;
char lvb[DLM_USER_LVB_LEN];
- char name[0];
+ char name[];
};
struct dlm_write_request32 {
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index dc1a1d5d825b..f00fcc4a4f72 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -47,7 +47,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
}
int drop_caches_sysctl_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *length, loff_t *ppos)
+ void *buffer, size_t *length, loff_t *ppos)
{
int ret;
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index d0542151e8c4..64b56c7df023 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -265,7 +265,7 @@ submit_bio_out:
*/
static int erofs_raw_access_readpage(struct file *file, struct page *page)
{
- erofs_off_t last_block;
+ erofs_off_t uninitialized_var(last_block);
struct bio *bio;
trace_erofs_readpage(page, true);
@@ -282,7 +282,7 @@ static int erofs_raw_access_readpage(struct file *file, struct page *page)
static void erofs_raw_access_readahead(struct readahead_control *rac)
{
- erofs_off_t last_block;
+ erofs_off_t uninitialized_var(last_block);
struct bio *bio = NULL;
struct page *page;
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index 3350ab65d892..7dd4bbe9674f 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -311,27 +311,21 @@ int erofs_getattr(const struct path *path, struct kstat *stat,
const struct inode_operations erofs_generic_iops = {
.getattr = erofs_getattr,
-#ifdef CONFIG_EROFS_FS_XATTR
.listxattr = erofs_listxattr,
-#endif
.get_acl = erofs_get_acl,
};
const struct inode_operations erofs_symlink_iops = {
.get_link = page_get_link,
.getattr = erofs_getattr,
-#ifdef CONFIG_EROFS_FS_XATTR
.listxattr = erofs_listxattr,
-#endif
.get_acl = erofs_get_acl,
};
const struct inode_operations erofs_fast_symlink_iops = {
.get_link = simple_get_link,
.getattr = erofs_getattr,
-#ifdef CONFIG_EROFS_FS_XATTR
.listxattr = erofs_listxattr,
-#endif
.get_acl = erofs_get_acl,
};
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 5eead7fdc7a6..1c077b7bb43d 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -46,6 +46,17 @@ typedef u64 erofs_off_t;
/* data type for filesystem-wide blocks number */
typedef u32 erofs_blk_t;
+struct erofs_fs_context {
+#ifdef CONFIG_EROFS_FS_ZIP
+ /* current strategy of how to use managed cache */
+ unsigned char cache_strategy;
+
+ /* threshold for decompression synchronously */
+ unsigned int max_sync_decompress_pages;
+#endif
+ unsigned int mount_opt;
+};
+
struct erofs_sb_info {
#ifdef CONFIG_EROFS_FS_ZIP
/* list for all registered superblocks, mainly for shrinker */
@@ -55,14 +66,8 @@ struct erofs_sb_info {
/* managed XArray arranged in physical block number */
struct xarray managed_pslots;
- /* threshold for decompression synchronously */
- unsigned int max_sync_decompress_pages;
-
unsigned int shrinker_run_no;
- /* current strategy of how to use managed cache */
- unsigned char cache_strategy;
-
/* pseudo inode to manage cached pages */
struct inode *managed_cache;
#endif /* CONFIG_EROFS_FS_ZIP */
@@ -88,7 +93,7 @@ struct erofs_sb_info {
u32 feature_compat;
u32 feature_incompat;
- unsigned int mount_opt;
+ struct erofs_fs_context ctx; /* options */
};
#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
@@ -98,17 +103,17 @@ struct erofs_sb_info {
#define EROFS_MOUNT_XATTR_USER 0x00000010
#define EROFS_MOUNT_POSIX_ACL 0x00000020
-#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option)
-#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option)
-#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
+#define clear_opt(ctx, option) ((ctx)->mount_opt &= ~EROFS_MOUNT_##option)
+#define set_opt(ctx, option) ((ctx)->mount_opt |= EROFS_MOUNT_##option)
+#define test_opt(ctx, option) ((ctx)->mount_opt & EROFS_MOUNT_##option)
-#ifdef CONFIG_EROFS_FS_ZIP
enum {
EROFS_ZIP_CACHE_DISABLED,
EROFS_ZIP_CACHE_READAHEAD,
EROFS_ZIP_CACHE_READAROUND
};
+#ifdef CONFIG_EROFS_FS_ZIP
#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
/* basic unit of the workstation of a super_block */
diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c
index 3abbecbf73de..52f201e03c62 100644
--- a/fs/erofs/namei.c
+++ b/fs/erofs/namei.c
@@ -244,9 +244,7 @@ static struct dentry *erofs_lookup(struct inode *dir,
const struct inode_operations erofs_dir_iops = {
.lookup = erofs_lookup,
.getattr = erofs_getattr,
-#ifdef CONFIG_EROFS_FS_XATTR
.listxattr = erofs_listxattr,
-#endif
.get_acl = erofs_get_acl,
};
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index b514c67e5fc2..7a13ffb07c23 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -10,6 +10,8 @@
#include <linux/parser.h>
#include <linux/seq_file.h>
#include <linux/crc32c.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include "xattr.h"
#define CREATE_TRACE_POINTS
@@ -192,53 +194,18 @@ out:
return ret;
}
-#ifdef CONFIG_EROFS_FS_ZIP
-static int erofs_build_cache_strategy(struct super_block *sb,
- substring_t *args)
-{
- struct erofs_sb_info *sbi = EROFS_SB(sb);
- const char *cs = match_strdup(args);
- int err = 0;
-
- if (!cs) {
- erofs_err(sb, "Not enough memory to store cache strategy");
- return -ENOMEM;
- }
-
- if (!strcmp(cs, "disabled")) {
- sbi->cache_strategy = EROFS_ZIP_CACHE_DISABLED;
- } else if (!strcmp(cs, "readahead")) {
- sbi->cache_strategy = EROFS_ZIP_CACHE_READAHEAD;
- } else if (!strcmp(cs, "readaround")) {
- sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND;
- } else {
- erofs_err(sb, "Unrecognized cache strategy \"%s\"", cs);
- err = -EINVAL;
- }
- kfree(cs);
- return err;
-}
-#else
-static int erofs_build_cache_strategy(struct super_block *sb,
- substring_t *args)
-{
- erofs_info(sb, "EROFS compression is disabled, so cache strategy is ignored");
- return 0;
-}
-#endif
-
/* set up default EROFS parameters */
-static void erofs_default_options(struct erofs_sb_info *sbi)
+static void erofs_default_options(struct erofs_fs_context *ctx)
{
#ifdef CONFIG_EROFS_FS_ZIP
- sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND;
- sbi->max_sync_decompress_pages = 3;
+ ctx->cache_strategy = EROFS_ZIP_CACHE_READAROUND;
+ ctx->max_sync_decompress_pages = 3;
#endif
#ifdef CONFIG_EROFS_FS_XATTR
- set_opt(sbi, XATTR_USER);
+ set_opt(ctx, XATTR_USER);
#endif
#ifdef CONFIG_EROFS_FS_POSIX_ACL
- set_opt(sbi, POSIX_ACL);
+ set_opt(ctx, POSIX_ACL);
#endif
}
@@ -251,73 +218,62 @@ enum {
Opt_err
};
-static match_table_t erofs_tokens = {
- {Opt_user_xattr, "user_xattr"},
- {Opt_nouser_xattr, "nouser_xattr"},
- {Opt_acl, "acl"},
- {Opt_noacl, "noacl"},
- {Opt_cache_strategy, "cache_strategy=%s"},
- {Opt_err, NULL}
+static const struct constant_table erofs_param_cache_strategy[] = {
+ {"disabled", EROFS_ZIP_CACHE_DISABLED},
+ {"readahead", EROFS_ZIP_CACHE_READAHEAD},
+ {"readaround", EROFS_ZIP_CACHE_READAROUND},
+ {}
};
-static int erofs_parse_options(struct super_block *sb, char *options)
-{
- substring_t args[MAX_OPT_ARGS];
- char *p;
- int err;
-
- if (!options)
- return 0;
-
- while ((p = strsep(&options, ","))) {
- int token;
+static const struct fs_parameter_spec erofs_fs_parameters[] = {
+ fsparam_flag_no("user_xattr", Opt_user_xattr),
+ fsparam_flag_no("acl", Opt_acl),
+ fsparam_enum("cache_strategy", Opt_cache_strategy,
+ erofs_param_cache_strategy),
+ {}
+};
- if (!*p)
- continue;
+static int erofs_fc_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ struct erofs_fs_context *ctx __maybe_unused = fc->fs_private;
+ struct fs_parse_result result;
+ int opt;
- args[0].to = args[0].from = NULL;
- token = match_token(p, erofs_tokens, args);
+ opt = fs_parse(fc, erofs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
- switch (token) {
+ switch (opt) {
+ case Opt_user_xattr:
#ifdef CONFIG_EROFS_FS_XATTR
- case Opt_user_xattr:
- set_opt(EROFS_SB(sb), XATTR_USER);
- break;
- case Opt_nouser_xattr:
- clear_opt(EROFS_SB(sb), XATTR_USER);
- break;
+ if (result.boolean)
+ set_opt(ctx, XATTR_USER);
+ else
+ clear_opt(ctx, XATTR_USER);
#else
- case Opt_user_xattr:
- erofs_info(sb, "user_xattr options not supported");
- break;
- case Opt_nouser_xattr:
- erofs_info(sb, "nouser_xattr options not supported");
- break;
+ errorfc(fc, "{,no}user_xattr options not supported");
#endif
+ break;
+ case Opt_acl:
#ifdef CONFIG_EROFS_FS_POSIX_ACL
- case Opt_acl:
- set_opt(EROFS_SB(sb), POSIX_ACL);
- break;
- case Opt_noacl:
- clear_opt(EROFS_SB(sb), POSIX_ACL);
- break;
+ if (result.boolean)
+ set_opt(ctx, POSIX_ACL);
+ else
+ clear_opt(ctx, POSIX_ACL);
#else
- case Opt_acl:
- erofs_info(sb, "acl options not supported");
- break;
- case Opt_noacl:
- erofs_info(sb, "noacl options not supported");
- break;
+ errorfc(fc, "{,no}acl options not supported");
#endif
- case Opt_cache_strategy:
- err = erofs_build_cache_strategy(sb, args);
- if (err)
- return err;
- break;
- default:
- erofs_err(sb, "Unrecognized mount option \"%s\" or missing value", p);
- return -EINVAL;
- }
+ break;
+ case Opt_cache_strategy:
+#ifdef CONFIG_EROFS_FS_ZIP
+ ctx->cache_strategy = result.uint_32;
+#else
+ errorfc(fc, "compression not supported, cache_strategy ignored");
+#endif
+ break;
+ default:
+ return -ENOPARAM;
}
return 0;
}
@@ -381,10 +337,11 @@ static int erofs_init_managed_cache(struct super_block *sb)
static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
#endif
-static int erofs_fill_super(struct super_block *sb, void *data, int silent)
+static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct inode *inode;
struct erofs_sb_info *sbi;
+ struct erofs_fs_context *ctx = fc->fs_private;
int err;
sb->s_magic = EROFS_SUPER_MAGIC;
@@ -408,22 +365,15 @@ static int erofs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_time_gran = 1;
sb->s_op = &erofs_sops;
-
-#ifdef CONFIG_EROFS_FS_XATTR
sb->s_xattr = erofs_xattr_handlers;
-#endif
- /* set erofs default mount options */
- erofs_default_options(sbi);
- err = erofs_parse_options(sb, data);
- if (err)
- return err;
-
- if (test_opt(sbi, POSIX_ACL))
+ if (test_opt(ctx, POSIX_ACL))
sb->s_flags |= SB_POSIXACL;
else
sb->s_flags &= ~SB_POSIXACL;
+ sbi->ctx = *ctx;
+
#ifdef CONFIG_EROFS_FS_ZIP
xa_init(&sbi->managed_pslots);
#endif
@@ -450,15 +400,58 @@ static int erofs_fill_super(struct super_block *sb, void *data, int silent)
if (err)
return err;
- erofs_info(sb, "mounted with opts: %s, root inode @ nid %llu.",
- (char *)data, ROOT_NID(sbi));
+ erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
+ return 0;
+}
+
+static int erofs_fc_get_tree(struct fs_context *fc)
+{
+ return get_tree_bdev(fc, erofs_fc_fill_super);
+}
+
+static int erofs_fc_reconfigure(struct fs_context *fc)
+{
+ struct super_block *sb = fc->root->d_sb;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_fs_context *ctx = fc->fs_private;
+
+ DBG_BUGON(!sb_rdonly(sb));
+
+ if (test_opt(ctx, POSIX_ACL))
+ fc->sb_flags |= SB_POSIXACL;
+ else
+ fc->sb_flags &= ~SB_POSIXACL;
+
+ sbi->ctx = *ctx;
+
+ fc->sb_flags |= SB_RDONLY;
return 0;
}
-static struct dentry *erofs_mount(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data)
+static void erofs_fc_free(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, erofs_fill_super);
+ kfree(fc->fs_private);
+}
+
+static const struct fs_context_operations erofs_context_ops = {
+ .parse_param = erofs_fc_parse_param,
+ .get_tree = erofs_fc_get_tree,
+ .reconfigure = erofs_fc_reconfigure,
+ .free = erofs_fc_free,
+};
+
+static int erofs_init_fs_context(struct fs_context *fc)
+{
+ fc->fs_private = kzalloc(sizeof(struct erofs_fs_context), GFP_KERNEL);
+ if (!fc->fs_private)
+ return -ENOMEM;
+
+ /* set default mount options */
+ erofs_default_options(fc->fs_private);
+
+ fc->ops = &erofs_context_ops;
+
+ return 0;
}
/*
@@ -497,7 +490,7 @@ static void erofs_put_super(struct super_block *sb)
static struct file_system_type erofs_fs_type = {
.owner = THIS_MODULE,
.name = "erofs",
- .mount = erofs_mount,
+ .init_fs_context = erofs_init_fs_context,
.kill_sb = erofs_kill_sb,
.fs_flags = FS_REQUIRES_DEV,
};
@@ -578,61 +571,37 @@ static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
static int erofs_show_options(struct seq_file *seq, struct dentry *root)
{
struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb);
+ struct erofs_fs_context *ctx __maybe_unused = &sbi->ctx;
#ifdef CONFIG_EROFS_FS_XATTR
- if (test_opt(sbi, XATTR_USER))
+ if (test_opt(ctx, XATTR_USER))
seq_puts(seq, ",user_xattr");
else
seq_puts(seq, ",nouser_xattr");
#endif
#ifdef CONFIG_EROFS_FS_POSIX_ACL
- if (test_opt(sbi, POSIX_ACL))
+ if (test_opt(ctx, POSIX_ACL))
seq_puts(seq, ",acl");
else
seq_puts(seq, ",noacl");
#endif
#ifdef CONFIG_EROFS_FS_ZIP
- if (sbi->cache_strategy == EROFS_ZIP_CACHE_DISABLED) {
+ if (ctx->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
seq_puts(seq, ",cache_strategy=disabled");
- } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) {
+ else if (ctx->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
seq_puts(seq, ",cache_strategy=readahead");
- } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) {
+ else if (ctx->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
seq_puts(seq, ",cache_strategy=readaround");
- }
#endif
return 0;
}
-static int erofs_remount(struct super_block *sb, int *flags, char *data)
-{
- struct erofs_sb_info *sbi = EROFS_SB(sb);
- unsigned int org_mnt_opt = sbi->mount_opt;
- int err;
-
- DBG_BUGON(!sb_rdonly(sb));
- err = erofs_parse_options(sb, data);
- if (err)
- goto out;
-
- if (test_opt(sbi, POSIX_ACL))
- sb->s_flags |= SB_POSIXACL;
- else
- sb->s_flags &= ~SB_POSIXACL;
-
- *flags |= SB_RDONLY;
- return 0;
-out:
- sbi->mount_opt = org_mnt_opt;
- return err;
-}
-
const struct super_operations erofs_sops = {
.put_super = erofs_put_super,
.alloc_inode = erofs_alloc_inode,
.free_inode = erofs_free_inode,
.statfs = erofs_statfs,
.show_options = erofs_show_options,
- .remount_fs = erofs_remount,
};
module_init(erofs_module_init);
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
index b766c3ee5fa8..87e437e7b34f 100644
--- a/fs/erofs/xattr.c
+++ b/fs/erofs/xattr.c
@@ -422,7 +422,7 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
static bool erofs_xattr_user_list(struct dentry *dentry)
{
- return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER);
+ return test_opt(&EROFS_SB(dentry->d_sb)->ctx, XATTR_USER);
}
static bool erofs_xattr_trusted_list(struct dentry *dentry)
@@ -469,7 +469,7 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler,
switch (handler->flags) {
case EROFS_XATTR_INDEX_USER:
- if (!test_opt(sbi, XATTR_USER))
+ if (!test_opt(&sbi->ctx, XATTR_USER))
return -EOPNOTSUPP;
break;
case EROFS_XATTR_INDEX_TRUSTED:
diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h
index 50966f1c676e..e4e5093f012c 100644
--- a/fs/erofs/xattr.h
+++ b/fs/erofs/xattr.h
@@ -76,11 +76,8 @@ static inline int erofs_getxattr(struct inode *inode, int index,
return -EOPNOTSUPP;
}
-static inline ssize_t erofs_listxattr(struct dentry *dentry,
- char *buffer, size_t buffer_size)
-{
- return -EOPNOTSUPP;
-}
+#define erofs_listxattr (NULL)
+#define erofs_xattr_handlers (NULL)
#endif /* !CONFIG_EROFS_FS_XATTR */
#ifdef CONFIG_EROFS_FS_POSIX_ACL
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 187f93b4900e..be50a4d9d273 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -615,7 +615,7 @@ restart_now:
goto err_out;
/* preload all compressed pages (maybe downgrade role if necessary) */
- if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la))
+ if (should_alloc_managed_pages(fe, sbi->ctx.cache_strategy, map->m_la))
cache_strategy = DELAYEDALLOC;
else
cache_strategy = DONTALLOC;
@@ -1302,7 +1302,7 @@ static int z_erofs_readpage(struct file *file, struct page *page)
static bool should_decompress_synchronously(struct erofs_sb_info *sbi,
unsigned int nr)
{
- return nr <= sbi->max_sync_decompress_pages;
+ return nr <= sbi->ctx.max_sync_decompress_pages;
}
static void z_erofs_readahead(struct readahead_control *rac)
diff --git a/fs/eventfd.c b/fs/eventfd.c
index 78e41c7c3d05..df466ef81ddd 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -23,6 +23,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/idr.h>
+#include <linux/uio.h>
DEFINE_PER_CPU(int, eventfd_wake_count);
@@ -216,32 +217,32 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w
}
EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
-static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
+static ssize_t eventfd_read(struct kiocb *iocb, struct iov_iter *to)
{
+ struct file *file = iocb->ki_filp;
struct eventfd_ctx *ctx = file->private_data;
- ssize_t res;
__u64 ucnt = 0;
DECLARE_WAITQUEUE(wait, current);
- if (count < sizeof(ucnt))
+ if (iov_iter_count(to) < sizeof(ucnt))
return -EINVAL;
-
spin_lock_irq(&ctx->wqh.lock);
- res = -EAGAIN;
- if (ctx->count > 0)
- res = sizeof(ucnt);
- else if (!(file->f_flags & O_NONBLOCK)) {
+ if (!ctx->count) {
+ if ((file->f_flags & O_NONBLOCK) ||
+ (iocb->ki_flags & IOCB_NOWAIT)) {
+ spin_unlock_irq(&ctx->wqh.lock);
+ return -EAGAIN;
+ }
__add_wait_queue(&ctx->wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- if (ctx->count > 0) {
- res = sizeof(ucnt);
+ if (ctx->count)
break;
- }
if (signal_pending(current)) {
- res = -ERESTARTSYS;
- break;
+ __remove_wait_queue(&ctx->wqh, &wait);
+ __set_current_state(TASK_RUNNING);
+ spin_unlock_irq(&ctx->wqh.lock);
+ return -ERESTARTSYS;
}
spin_unlock_irq(&ctx->wqh.lock);
schedule();
@@ -250,17 +251,14 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
__remove_wait_queue(&ctx->wqh, &wait);
__set_current_state(TASK_RUNNING);
}
- if (likely(res > 0)) {
- eventfd_ctx_do_read(ctx, &ucnt);
- if (waitqueue_active(&ctx->wqh))
- wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
- }
+ eventfd_ctx_do_read(ctx, &ucnt);
+ if (waitqueue_active(&ctx->wqh))
+ wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
spin_unlock_irq(&ctx->wqh.lock);
-
- if (res > 0 && put_user(ucnt, (__u64 __user *)buf))
+ if (unlikely(copy_to_iter(&ucnt, sizeof(ucnt), to) != sizeof(ucnt)))
return -EFAULT;
- return res;
+ return sizeof(ucnt);
}
static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
@@ -329,7 +327,7 @@ static const struct file_operations eventfd_fops = {
#endif
.release = eventfd_release,
.poll = eventfd_poll,
- .read = eventfd_read,
+ .read_iter = eventfd_read,
.write = eventfd_write,
.llseek = noop_llseek,
};
@@ -406,6 +404,7 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
static int do_eventfd(unsigned int count, int flags)
{
struct eventfd_ctx *ctx;
+ struct file *file;
int fd;
/* Check the EFD_* constants for consistency. */
@@ -425,11 +424,24 @@ static int do_eventfd(unsigned int count, int flags)
ctx->flags = flags;
ctx->id = ida_simple_get(&eventfd_ida, 0, 0, GFP_KERNEL);
- fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
- O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
+ flags &= EFD_SHARED_FCNTL_FLAGS;
+ flags |= O_RDWR;
+ fd = get_unused_fd_flags(flags);
if (fd < 0)
- eventfd_free_ctx(ctx);
+ goto err;
+
+ file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, flags);
+ if (IS_ERR(file)) {
+ put_unused_fd(fd);
+ fd = PTR_ERR(file);
+ goto err;
+ }
+ file->f_mode |= FMODE_NOWAIT;
+ fd_install(fd, file);
+ return fd;
+err:
+ eventfd_free_ctx(ctx);
return fd;
}
diff --git a/fs/exec.c b/fs/exec.c
index 2c465119affc..e6e8a9a70327 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -72,6 +72,8 @@
#include <trace/events/sched.h>
+static int bprm_creds_from_file(struct linux_binprm *bprm);
+
int suid_dumpable = 0;
static LIST_HEAD(formats);
@@ -250,7 +252,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
return -ENOMEM;
vma_set_anonymous(vma);
- if (down_write_killable(&mm->mmap_sem)) {
+ if (mmap_write_lock_killable(mm)) {
err = -EINTR;
goto err_free;
}
@@ -272,11 +274,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
goto err;
mm->stack_vm = mm->total_vm = 1;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
bprm->p = vma->vm_end - sizeof(void *);
return 0;
err:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
err_free:
bprm->vma = NULL;
vm_area_free(vma);
@@ -588,24 +590,48 @@ out:
}
/*
- * Like copy_strings, but get argv and its values from kernel memory.
+ * Copy and argument/environment string from the kernel to the processes stack.
*/
-int copy_strings_kernel(int argc, const char *const *__argv,
- struct linux_binprm *bprm)
+int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
{
- int r;
- mm_segment_t oldfs = get_fs();
- struct user_arg_ptr argv = {
- .ptr.native = (const char __user *const __user *)__argv,
- };
+ int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */;
+ unsigned long pos = bprm->p;
+
+ if (len == 0)
+ return -EFAULT;
+ if (!valid_arg_len(bprm, len))
+ return -E2BIG;
+
+ /* We're going to work our way backwards. */
+ arg += len;
+ bprm->p -= len;
+ if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin)
+ return -E2BIG;
- set_fs(KERNEL_DS);
- r = copy_strings(argc, argv, bprm);
- set_fs(oldfs);
+ while (len > 0) {
+ unsigned int bytes_to_copy = min_t(unsigned int, len,
+ min_not_zero(offset_in_page(pos), PAGE_SIZE));
+ struct page *page;
+ char *kaddr;
+
+ pos -= bytes_to_copy;
+ arg -= bytes_to_copy;
+ len -= bytes_to_copy;
+
+ page = get_arg_page(bprm, pos, 1);
+ if (!page)
+ return -E2BIG;
+ kaddr = kmap_atomic(page);
+ flush_arg_page(bprm, pos & PAGE_MASK, page);
+ memcpy(kaddr + offset_in_page(pos), arg, bytes_to_copy);
+ flush_kernel_dcache_page(page);
+ kunmap_atomic(kaddr);
+ put_arg_page(page);
+ }
- return r;
+ return 0;
}
-EXPORT_SYMBOL(copy_strings_kernel);
+EXPORT_SYMBOL(copy_string_kernel);
#ifdef CONFIG_MMU
@@ -737,7 +763,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
bprm->loader -= stack_shift;
bprm->exec -= stack_shift;
- if (down_write_killable(&mm->mmap_sem))
+ if (mmap_write_lock_killable(mm))
return -EINTR;
vm_flags = VM_STACK_FLAGS;
@@ -799,7 +825,7 @@ int setup_arg_pages(struct linux_binprm *bprm,
ret = -EFAULT;
out_unlock:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
return ret;
}
EXPORT_SYMBOL(setup_arg_pages);
@@ -1027,14 +1053,17 @@ out:
}
EXPORT_SYMBOL_GPL(kernel_read_file_from_fd);
+#if defined(CONFIG_HAVE_AOUT) || defined(CONFIG_BINFMT_FLAT) || \
+ defined(CONFIG_BINFMT_ELF_FDPIC)
ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
{
ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
if (res > 0)
- flush_icache_range(addr, addr + len);
+ flush_icache_user_range(addr, addr + len);
return res;
}
EXPORT_SYMBOL(read_code);
+#endif
/*
* Maps the mm_struct mm into the current task struct.
@@ -1051,22 +1080,23 @@ static int exec_mmap(struct mm_struct *mm)
tsk = current;
old_mm = current->mm;
exec_mm_release(tsk, old_mm);
+ if (old_mm)
+ sync_mm_rss(old_mm);
ret = mutex_lock_killable(&tsk->signal->exec_update_mutex);
if (ret)
return ret;
if (old_mm) {
- sync_mm_rss(old_mm);
/*
* Make sure that if there is a core dump in progress
* for the old mm, we get out and die instead of going
- * through with the exec. We must hold mmap_sem around
+ * through with the exec. We must hold mmap_lock around
* checking core_state and changing tsk->mm.
*/
- down_read(&old_mm->mmap_sem);
+ mmap_read_lock(old_mm);
if (unlikely(old_mm->core_state)) {
- up_read(&old_mm->mmap_sem);
+ mmap_read_unlock(old_mm);
mutex_unlock(&tsk->signal->exec_update_mutex);
return -EINTR;
}
@@ -1082,7 +1112,7 @@ static int exec_mmap(struct mm_struct *mm)
vmacache_flush(tsk);
task_unlock(tsk);
if (old_mm) {
- up_read(&old_mm->mmap_sem);
+ mmap_read_unlock(old_mm);
BUG_ON(active_mm != old_mm);
setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
mm_update_next_owner(old_mm);
@@ -1093,12 +1123,6 @@ static int exec_mmap(struct mm_struct *mm)
return 0;
}
-/*
- * This function makes sure the current process has its own signal table,
- * so that flush_signal_handlers can later reset the handlers without
- * disturbing other processes. (Other processes might share the signal
- * table via the CLONE_SIGHAND option to clone().)
- */
static int de_thread(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
@@ -1176,7 +1200,6 @@ static int de_thread(struct task_struct *tsk)
tsk->start_boottime = leader->start_boottime;
BUG_ON(!same_thread_group(leader, tsk));
- BUG_ON(has_group_leader_pid(tsk));
/*
* An exec() starts a new thread group with the
* TGID of the previous thread group. Rehash the
@@ -1186,11 +1209,8 @@ static int de_thread(struct task_struct *tsk)
/* Become a process group leader with the old leader's pid.
* The old leader becomes a thread of the this thread group.
- * Note: The old leader also uses this pid until release_task
- * is called. Odd but simple and correct.
*/
- tsk->pid = leader->pid;
- change_pid(tsk, PIDTYPE_PID, task_pid(leader));
+ exchange_tids(tsk, leader);
transfer_pid(leader, tsk, PIDTYPE_TGID);
transfer_pid(leader, tsk, PIDTYPE_PGID);
transfer_pid(leader, tsk, PIDTYPE_SID);
@@ -1240,6 +1260,12 @@ killed:
}
+/*
+ * This function makes sure the current process has its own signal table,
+ * so that flush_signal_handlers can later reset the handlers without
+ * disturbing other processes. (Other processes might share the signal
+ * table via the CLONE_SIGHAND option to clone().)
+ */
static int unshare_sighand(struct task_struct *me)
{
struct sighand_struct *oldsighand = me->sighand;
@@ -1296,13 +1322,23 @@ void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
* Calling this is the point of no return. None of the failures will be
* seen by userspace since either the process is already taking a fatal
* signal (via de_thread() or coredump), or will have SEGV raised
- * (after exec_mmap()) by search_binary_handlers (see below).
+ * (after exec_mmap()) by search_binary_handler (see below).
*/
-int flush_old_exec(struct linux_binprm * bprm)
+int begin_new_exec(struct linux_binprm * bprm)
{
struct task_struct *me = current;
int retval;
+ /* Once we are committed compute the creds */
+ retval = bprm_creds_from_file(bprm);
+ if (retval)
+ return retval;
+
+ /*
+ * Ensure all future errors are fatal.
+ */
+ bprm->point_of_no_return = true;
+
/*
* Make this the only thread in the thread group.
*/
@@ -1317,7 +1353,10 @@ int flush_old_exec(struct linux_binprm * bprm)
*/
set_mm_exe_file(bprm->mm, bprm->file);
+ /* If the binary is not readable then enforce mm->dumpable=0 */
would_dump(bprm, bprm->file);
+ if (bprm->have_execfd)
+ would_dump(bprm, bprm->executable);
/*
* Release all of the old mmap stuff
@@ -1327,13 +1366,6 @@ int flush_old_exec(struct linux_binprm * bprm)
if (retval)
goto out;
- /*
- * After setting bprm->called_exec_mmap (to mark that current is
- * using the prepared mm now), we have nothing left of the original
- * process. If anything from here on returns an error, the check
- * in search_binary_handler() will SEGV current.
- */
- bprm->called_exec_mmap = 1;
bprm->mm = NULL;
#ifdef CONFIG_POSIX_TIMERS
@@ -1346,7 +1378,7 @@ int flush_old_exec(struct linux_binprm * bprm)
*/
retval = unshare_sighand(me);
if (retval)
- goto out;
+ goto out_unlock;
set_fs(USER_DS);
me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
@@ -1361,12 +1393,84 @@ int flush_old_exec(struct linux_binprm * bprm)
* undergoing exec(2).
*/
do_close_on_exec(me->files);
+
+ if (bprm->secureexec) {
+ /* Make sure parent cannot signal privileged process. */
+ me->pdeath_signal = 0;
+
+ /*
+ * For secureexec, reset the stack limit to sane default to
+ * avoid bad behavior from the prior rlimits. This has to
+ * happen before arch_pick_mmap_layout(), which examines
+ * RLIMIT_STACK, but after the point of no return to avoid
+ * needing to clean up the change on failure.
+ */
+ if (bprm->rlim_stack.rlim_cur > _STK_LIM)
+ bprm->rlim_stack.rlim_cur = _STK_LIM;
+ }
+
+ me->sas_ss_sp = me->sas_ss_size = 0;
+
+ /*
+ * Figure out dumpability. Note that this checking only of current
+ * is wrong, but userspace depends on it. This should be testing
+ * bprm->secureexec instead.
+ */
+ if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
+ !(uid_eq(current_euid(), current_uid()) &&
+ gid_eq(current_egid(), current_gid())))
+ set_dumpable(current->mm, suid_dumpable);
+ else
+ set_dumpable(current->mm, SUID_DUMP_USER);
+
+ perf_event_exec();
+ __set_task_comm(me, kbasename(bprm->filename), true);
+
+ /* An exec changes our domain. We are no longer part of the thread
+ group */
+ WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
+ flush_signal_handlers(me, 0);
+
+ /*
+ * install the new credentials for this executable
+ */
+ security_bprm_committing_creds(bprm);
+
+ commit_creds(bprm->cred);
+ bprm->cred = NULL;
+
+ /*
+ * Disable monitoring for regular users
+ * when executing setuid binaries. Must
+ * wait until new credentials are committed
+ * by commit_creds() above
+ */
+ if (get_dumpable(me->mm) != SUID_DUMP_USER)
+ perf_event_exit_task(me);
+ /*
+ * cred_guard_mutex must be held at least to this point to prevent
+ * ptrace_attach() from altering our determination of the task's
+ * credentials; any time after this it may be unlocked.
+ */
+ security_bprm_committed_creds(bprm);
+
+ /* Pass the opened binary to the interpreter. */
+ if (bprm->have_execfd) {
+ retval = get_unused_fd_flags(0);
+ if (retval < 0)
+ goto out_unlock;
+ fd_install(retval, bprm->executable);
+ bprm->executable = NULL;
+ bprm->execfd = retval;
+ }
return 0;
+out_unlock:
+ mutex_unlock(&me->signal->exec_update_mutex);
out:
return retval;
}
-EXPORT_SYMBOL(flush_old_exec);
+EXPORT_SYMBOL(begin_new_exec);
void would_dump(struct linux_binprm *bprm, struct file *file)
{
@@ -1391,58 +1495,20 @@ EXPORT_SYMBOL(would_dump);
void setup_new_exec(struct linux_binprm * bprm)
{
- /*
- * Once here, prepare_binrpm() will not be called any more, so
- * the final state of setuid/setgid/fscaps can be merged into the
- * secureexec flag.
- */
- bprm->secureexec |= bprm->cap_elevated;
-
- if (bprm->secureexec) {
- /* Make sure parent cannot signal privileged process. */
- current->pdeath_signal = 0;
-
- /*
- * For secureexec, reset the stack limit to sane default to
- * avoid bad behavior from the prior rlimits. This has to
- * happen before arch_pick_mmap_layout(), which examines
- * RLIMIT_STACK, but after the point of no return to avoid
- * needing to clean up the change on failure.
- */
- if (bprm->rlim_stack.rlim_cur > _STK_LIM)
- bprm->rlim_stack.rlim_cur = _STK_LIM;
- }
-
- arch_pick_mmap_layout(current->mm, &bprm->rlim_stack);
-
- current->sas_ss_sp = current->sas_ss_size = 0;
+ /* Setup things that can depend upon the personality */
+ struct task_struct *me = current;
- /*
- * Figure out dumpability. Note that this checking only of current
- * is wrong, but userspace depends on it. This should be testing
- * bprm->secureexec instead.
- */
- if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
- !(uid_eq(current_euid(), current_uid()) &&
- gid_eq(current_egid(), current_gid())))
- set_dumpable(current->mm, suid_dumpable);
- else
- set_dumpable(current->mm, SUID_DUMP_USER);
+ arch_pick_mmap_layout(me->mm, &bprm->rlim_stack);
arch_setup_new_exec();
- perf_event_exec();
- __set_task_comm(current, kbasename(bprm->filename), true);
/* Set the new mm task size. We have to do that late because it may
* depend on TIF_32BIT which is only updated in flush_thread() on
* some architectures like powerpc
*/
- current->mm->task_size = TASK_SIZE;
-
- /* An exec changes our domain. We are no longer part of the thread
- group */
- WRITE_ONCE(current->self_exec_id, current->self_exec_id + 1);
- flush_signal_handlers(current, 0);
+ me->mm->task_size = TASK_SIZE;
+ mutex_unlock(&me->signal->exec_update_mutex);
+ mutex_unlock(&me->signal->cred_guard_mutex);
}
EXPORT_SYMBOL(setup_new_exec);
@@ -1458,7 +1524,7 @@ EXPORT_SYMBOL(finalize_exec);
/*
* Prepare credentials and lock ->cred_guard_mutex.
- * install_exec_creds() commits the new creds and drops the lock.
+ * setup_new_exec() commits the new creds and drops the lock.
* Or, if exec fails before, free_bprm() should release ->cred and
* and unlock.
*/
@@ -1479,8 +1545,6 @@ static void free_bprm(struct linux_binprm *bprm)
{
free_arg_pages(bprm);
if (bprm->cred) {
- if (bprm->called_exec_mmap)
- mutex_unlock(&current->signal->exec_update_mutex);
mutex_unlock(&current->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
@@ -1488,6 +1552,8 @@ static void free_bprm(struct linux_binprm *bprm)
allow_write_access(bprm->file);
fput(bprm->file);
}
+ if (bprm->executable)
+ fput(bprm->executable);
/* If a binfmt changed the interp, free it. */
if (bprm->interp != bprm->filename)
kfree(bprm->interp);
@@ -1507,35 +1573,6 @@ int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
EXPORT_SYMBOL(bprm_change_interp);
/*
- * install the new credentials for this executable
- */
-void install_exec_creds(struct linux_binprm *bprm)
-{
- security_bprm_committing_creds(bprm);
-
- commit_creds(bprm->cred);
- bprm->cred = NULL;
-
- /*
- * Disable monitoring for regular users
- * when executing setuid binaries. Must
- * wait until new credentials are committed
- * by commit_creds() above
- */
- if (get_dumpable(current->mm) != SUID_DUMP_USER)
- perf_event_exit_task(current);
- /*
- * cred_guard_mutex must be held at least to this point to prevent
- * ptrace_attach() from altering our determination of the task's
- * credentials; any time after this it may be unlocked.
- */
- security_bprm_committed_creds(bprm);
- mutex_unlock(&current->signal->exec_update_mutex);
- mutex_unlock(&current->signal->cred_guard_mutex);
-}
-EXPORT_SYMBOL(install_exec_creds);
-
-/*
* determine how safe it is to execute the proposed program
* - the caller must hold ->cred_guard_mutex to protect against
* PTRACE_ATTACH or seccomp thread-sync
@@ -1572,29 +1609,21 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
spin_unlock(&p->fs->lock);
}
-static void bprm_fill_uid(struct linux_binprm *bprm)
+static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
{
+ /* Handle suid and sgid on files */
struct inode *inode;
unsigned int mode;
kuid_t uid;
kgid_t gid;
- /*
- * Since this can be called multiple times (via prepare_binprm),
- * we must clear any previous work done when setting set[ug]id
- * bits from any earlier bprm->file uses (for example when run
- * first for a setuid script then again for its interpreter).
- */
- bprm->cred->euid = current_euid();
- bprm->cred->egid = current_egid();
-
- if (!mnt_may_suid(bprm->file->f_path.mnt))
+ if (!mnt_may_suid(file->f_path.mnt))
return;
if (task_no_new_privs(current))
return;
- inode = bprm->file->f_path.dentry->d_inode;
+ inode = file->f_path.dentry->d_inode;
mode = READ_ONCE(inode->i_mode);
if (!(mode & (S_ISUID|S_ISGID)))
return;
@@ -1625,30 +1654,31 @@ static void bprm_fill_uid(struct linux_binprm *bprm)
}
/*
+ * Compute brpm->cred based upon the final binary.
+ */
+static int bprm_creds_from_file(struct linux_binprm *bprm)
+{
+ /* Compute creds based on which file? */
+ struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file;
+
+ bprm_fill_uid(bprm, file);
+ return security_bprm_creds_from_file(bprm, file);
+}
+
+/*
* Fill the binprm structure from the inode.
- * Check permissions, then read the first BINPRM_BUF_SIZE bytes
+ * Read the first BINPRM_BUF_SIZE bytes
*
* This may be called multiple times for binary chains (scripts for example).
*/
-int prepare_binprm(struct linux_binprm *bprm)
+static int prepare_binprm(struct linux_binprm *bprm)
{
- int retval;
loff_t pos = 0;
- bprm_fill_uid(bprm);
-
- /* fill in binprm security blob */
- retval = security_bprm_set_creds(bprm);
- if (retval)
- return retval;
- bprm->called_set_creds = 1;
-
memset(bprm->buf, 0, BINPRM_BUF_SIZE);
return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
}
-EXPORT_SYMBOL(prepare_binprm);
-
/*
* Arguments are '\0' separated strings found at the location bprm->p
* points to; chop off the first by relocating brpm->p to right after
@@ -1694,15 +1724,15 @@ EXPORT_SYMBOL(remove_arg_zero);
/*
* cycle the list of binary formats handler, until one recognizes the image
*/
-int search_binary_handler(struct linux_binprm *bprm)
+static int search_binary_handler(struct linux_binprm *bprm)
{
bool need_retry = IS_ENABLED(CONFIG_MODULES);
struct linux_binfmt *fmt;
int retval;
- /* This allows 4 levels of binfmt rewrites before failing hard. */
- if (bprm->recursion_depth > 5)
- return -ELOOP;
+ retval = prepare_binprm(bprm);
+ if (retval < 0)
+ return retval;
retval = security_bprm_check(bprm);
if (retval)
@@ -1716,19 +1746,11 @@ int search_binary_handler(struct linux_binprm *bprm)
continue;
read_unlock(&binfmt_lock);
- bprm->recursion_depth++;
retval = fmt->load_binary(bprm);
- bprm->recursion_depth--;
read_lock(&binfmt_lock);
put_binfmt(fmt);
- if (retval < 0 && bprm->called_exec_mmap) {
- /* we got to flush_old_exec() and failed after it */
- read_unlock(&binfmt_lock);
- force_sigsegv(SIGSEGV);
- return retval;
- }
- if (retval != -ENOEXEC || !bprm->file) {
+ if (bprm->point_of_no_return || (retval != -ENOEXEC)) {
read_unlock(&binfmt_lock);
return retval;
}
@@ -1747,12 +1769,11 @@ int search_binary_handler(struct linux_binprm *bprm)
return retval;
}
-EXPORT_SYMBOL(search_binary_handler);
static int exec_binprm(struct linux_binprm *bprm)
{
pid_t old_pid, old_vpid;
- int ret;
+ int ret, depth;
/* Need to fetch pid before load_binary changes it */
old_pid = current->pid;
@@ -1760,15 +1781,38 @@ static int exec_binprm(struct linux_binprm *bprm)
old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
rcu_read_unlock();
- ret = search_binary_handler(bprm);
- if (ret >= 0) {
- audit_bprm(bprm);
- trace_sched_process_exec(current, old_pid, bprm);
- ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
- proc_exec_connector(current);
+ /* This allows 4 levels of binfmt rewrites before failing hard. */
+ for (depth = 0;; depth++) {
+ struct file *exec;
+ if (depth > 5)
+ return -ELOOP;
+
+ ret = search_binary_handler(bprm);
+ if (ret < 0)
+ return ret;
+ if (!bprm->interpreter)
+ break;
+
+ exec = bprm->file;
+ bprm->file = bprm->interpreter;
+ bprm->interpreter = NULL;
+
+ allow_write_access(exec);
+ if (unlikely(bprm->have_execfd)) {
+ if (bprm->executable) {
+ fput(exec);
+ return -ENOEXEC;
+ }
+ bprm->executable = exec;
+ } else
+ fput(exec);
}
- return ret;
+ audit_bprm(bprm);
+ trace_sched_process_exec(current, old_pid, bprm);
+ ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
+ proc_exec_connector(current);
+ return 0;
}
/*
@@ -1861,11 +1905,12 @@ static int __do_execve_file(int fd, struct filename *filename,
if (retval < 0)
goto out;
- retval = prepare_binprm(bprm);
- if (retval < 0)
+ /* Set the unchanging part of bprm->cred */
+ retval = security_bprm_creds_for_exec(bprm);
+ if (retval)
goto out;
- retval = copy_strings_kernel(1, &bprm->filename, bprm);
+ retval = copy_string_kernel(bprm->filename, bprm);
if (retval < 0)
goto out;
@@ -1897,6 +1942,14 @@ static int __do_execve_file(int fd, struct filename *filename,
return retval;
out:
+ /*
+ * If past the point of no return ensure the the code never
+ * returns to the userspace process. Use an existing fatal
+ * signal if present otherwise terminate the process with
+ * SIGSEGV.
+ */
+ if (bprm->point_of_no_return && !fatal_signal_pending(current))
+ force_sigsegv(SIGSEGV);
if (bprm->mm) {
acct_arg_size(bprm, 0);
mmput(bprm->mm);
diff --git a/fs/exfat/Kconfig b/fs/exfat/Kconfig
index 2d3636dc5b8c..5a65071b5ecf 100644
--- a/fs/exfat/Kconfig
+++ b/fs/exfat/Kconfig
@@ -16,6 +16,7 @@ config EXFAT_DEFAULT_IOCHARSET
depends on EXFAT_FS
help
Set this to the default input/output character set to use for
- converting between the encoding is used for user visible filename and
- UTF-16 character that exfat filesystem use, and can be overridden with
- the "iocharset" mount option for exFAT filesystems.
+ converting between the encoding that is used for user visible
+ filenames and the UTF-16 character encoding that the exFAT
+ filesystem uses. This can be overridden with the "iocharset" mount
+ option for the exFAT filesystems.
diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
index 6774a5a6ded8..4055eb00ea9b 100644
--- a/fs/exfat/balloc.c
+++ b/fs/exfat/balloc.c
@@ -58,9 +58,8 @@ static int exfat_allocate_bitmap(struct super_block *sb,
need_map_size = ((EXFAT_DATA_CLUSTER_COUNT(sbi) - 1) / BITS_PER_BYTE)
+ 1;
if (need_map_size != map_size) {
- exfat_msg(sb, KERN_ERR,
- "bogus allocation bitmap size(need : %u, cur : %lld)",
- need_map_size, map_size);
+ exfat_err(sb, "bogus allocation bitmap size(need : %u, cur : %lld)",
+ need_map_size, map_size);
/*
* Only allowed when bogus allocation
* bitmap size is large
@@ -192,8 +191,7 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu)
(1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
if (ret_discard == -EOPNOTSUPP) {
- exfat_msg(sb, KERN_ERR,
- "discard not supported by device, disabling");
+ exfat_err(sb, "discard not supported by device, disabling");
opts->discard = 0;
}
}
diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
index 4b91afb0f051..de43534aa299 100644
--- a/fs/exfat/dir.c
+++ b/fs/exfat/dir.c
@@ -32,35 +32,30 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb,
struct exfat_chain *p_dir, int entry, unsigned short *uniname)
{
int i;
- struct exfat_dentry *ep;
struct exfat_entry_set_cache *es;
- es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES, &ep);
+ es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES);
if (!es)
return;
- if (es->num_entries < 3)
- goto free_es;
-
- ep += 2;
-
/*
* First entry : file entry
* Second entry : stream-extension entry
* Third entry : first file-name entry
* So, the index of first file-name dentry should start from 2.
*/
- for (i = 2; i < es->num_entries; i++, ep++) {
+ for (i = 2; i < es->num_entries; i++) {
+ struct exfat_dentry *ep = exfat_get_dentry_cached(es, i);
+
/* end of name entry */
if (exfat_get_entry_type(ep) != TYPE_EXTEND)
- goto free_es;
+ break;
exfat_extract_uni_name(ep, uniname);
uniname += EXFAT_FILE_NAME_LEN;
}
-free_es:
- kfree(es);
+ exfat_free_dentry_set(es, false);
}
/* read a directory entry from the opened directory */
@@ -137,12 +132,12 @@ static int exfat_readdir(struct inode *inode, struct exfat_dir_entry *dir_entry)
ep->dentry.file.create_tz,
ep->dentry.file.create_time,
ep->dentry.file.create_date,
- ep->dentry.file.create_time_ms);
+ ep->dentry.file.create_time_cs);
exfat_get_entry_time(sbi, &dir_entry->mtime,
ep->dentry.file.modify_tz,
ep->dentry.file.modify_time,
ep->dentry.file.modify_date,
- ep->dentry.file.modify_time_ms);
+ ep->dentry.file.modify_time_cs);
exfat_get_entry_time(sbi, &dir_entry->atime,
ep->dentry.file.access_tz,
ep->dentry.file.access_time,
@@ -461,12 +456,12 @@ int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir,
&ep->dentry.file.create_tz,
&ep->dentry.file.create_time,
&ep->dentry.file.create_date,
- &ep->dentry.file.create_time_ms);
+ &ep->dentry.file.create_time_cs);
exfat_set_entry_time(sbi, &ts,
&ep->dentry.file.modify_tz,
&ep->dentry.file.modify_time,
&ep->dentry.file.modify_date,
- &ep->dentry.file.modify_time_ms);
+ &ep->dentry.file.modify_time_cs);
exfat_set_entry_time(sbi, &ts,
&ep->dentry.file.access_tz,
&ep->dentry.file.access_time,
@@ -496,7 +491,7 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
int ret = 0;
int i, num_entries;
sector_t sector;
- unsigned short chksum;
+ u16 chksum;
struct exfat_dentry *ep, *fep;
struct buffer_head *fbh, *bh;
@@ -505,7 +500,7 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
return -EIO;
num_entries = fep->dentry.file.num_ext + 1;
- chksum = exfat_calc_chksum_2byte(fep, DENTRY_SIZE, 0, CS_DIR_ENTRY);
+ chksum = exfat_calc_chksum16(fep, DENTRY_SIZE, 0, CS_DIR_ENTRY);
for (i = 1; i < num_entries; i++) {
ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, NULL);
@@ -513,7 +508,7 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
ret = -EIO;
goto release_fbh;
}
- chksum = exfat_calc_chksum_2byte(ep, DENTRY_SIZE, chksum,
+ chksum = exfat_calc_chksum16(ep, DENTRY_SIZE, chksum,
CS_DEFAULT);
brelse(bh);
}
@@ -590,62 +585,33 @@ int exfat_remove_entries(struct inode *inode, struct exfat_chain *p_dir,
return 0;
}
-int exfat_update_dir_chksum_with_entry_set(struct super_block *sb,
- struct exfat_entry_set_cache *es, int sync)
+void exfat_update_dir_chksum_with_entry_set(struct exfat_entry_set_cache *es)
{
- struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct buffer_head *bh;
- sector_t sec = es->sector;
- unsigned int off = es->offset;
- int chksum_type = CS_DIR_ENTRY, i, num_entries = es->num_entries;
- unsigned int buf_off = (off - es->offset);
- unsigned int remaining_byte_in_sector, copy_entries, clu;
+ int chksum_type = CS_DIR_ENTRY, i;
unsigned short chksum = 0;
+ struct exfat_dentry *ep;
- for (i = 0; i < num_entries; i++) {
- chksum = exfat_calc_chksum_2byte(&es->entries[i], DENTRY_SIZE,
- chksum, chksum_type);
+ for (i = 0; i < es->num_entries; i++) {
+ ep = exfat_get_dentry_cached(es, i);
+ chksum = exfat_calc_chksum16(ep, DENTRY_SIZE, chksum,
+ chksum_type);
chksum_type = CS_DEFAULT;
}
+ ep = exfat_get_dentry_cached(es, 0);
+ ep->dentry.file.checksum = cpu_to_le16(chksum);
+ es->modified = true;
+}
- es->entries[0].dentry.file.checksum = cpu_to_le16(chksum);
+void exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync)
+{
+ int i;
- while (num_entries) {
- /* write per sector base */
- remaining_byte_in_sector = (1 << sb->s_blocksize_bits) - off;
- copy_entries = min_t(int,
- EXFAT_B_TO_DEN(remaining_byte_in_sector),
- num_entries);
- bh = sb_bread(sb, sec);
- if (!bh)
- goto err_out;
- memcpy(bh->b_data + off,
- (unsigned char *)&es->entries[0] + buf_off,
- EXFAT_DEN_TO_B(copy_entries));
- exfat_update_bh(sb, bh, sync);
- brelse(bh);
- num_entries -= copy_entries;
-
- if (num_entries) {
- /* get next sector */
- if (exfat_is_last_sector_in_cluster(sbi, sec)) {
- clu = exfat_sector_to_cluster(sbi, sec);
- if (es->alloc_flag == ALLOC_NO_FAT_CHAIN)
- clu++;
- else if (exfat_get_next_cluster(sb, &clu))
- goto err_out;
- sec = exfat_cluster_to_sector(sbi, clu);
- } else {
- sec++;
- }
- off = 0;
- buf_off += EXFAT_DEN_TO_B(copy_entries);
- }
+ for (i = 0; i < es->num_bh; i++) {
+ if (es->modified)
+ exfat_update_bh(es->sb, es->bh[i], sync);
+ brelse(es->bh[i]);
}
-
- return 0;
-err_out:
- return -EIO;
+ kfree(es);
}
static int exfat_walk_fat_chain(struct super_block *sb,
@@ -720,9 +686,8 @@ static int exfat_dir_readahead(struct super_block *sb, sector_t sec)
return 0;
if (sec < sbi->data_start_sector) {
- exfat_msg(sb, KERN_ERR,
- "requested sector is invalid(sect:%llu, root:%llu)",
- (unsigned long long)sec, sbi->data_start_sector);
+ exfat_err(sb, "requested sector is invalid(sect:%llu, root:%llu)",
+ (unsigned long long)sec, sbi->data_start_sector);
return -EIO;
}
@@ -750,7 +715,7 @@ struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
sector_t sec;
if (p_dir->dir == DIR_DELETED) {
- exfat_msg(sb, KERN_ERR, "abnormal access to deleted dentry\n");
+ exfat_err(sb, "abnormal access to deleted dentry");
return NULL;
}
@@ -821,39 +786,45 @@ static bool exfat_validate_entry(unsigned int type,
}
}
+struct exfat_dentry *exfat_get_dentry_cached(
+ struct exfat_entry_set_cache *es, int num)
+{
+ int off = es->start_off + num * DENTRY_SIZE;
+ struct buffer_head *bh = es->bh[EXFAT_B_TO_BLK(off, es->sb)];
+ char *p = bh->b_data + EXFAT_BLK_OFFSET(off, es->sb);
+
+ return (struct exfat_dentry *)p;
+}
+
/*
* Returns a set of dentries for a file or dir.
*
- * Note that this is a copy (dump) of dentries so that user should
- * call write_entry_set() to apply changes made in this entry set
- * to the real device.
+ * Note It provides a direct pointer to bh->data via exfat_get_dentry_cached().
+ * User should call exfat_get_dentry_set() after setting 'modified' to apply
+ * changes made in this entry set to the real device.
*
* in:
* sb+p_dir+entry: indicates a file/dir
* type: specifies how many dentries should be included.
- * out:
- * file_ep: will point the first dentry(= file dentry) on success
* return:
* pointer of entry set on success,
* NULL on failure.
*/
struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
- struct exfat_chain *p_dir, int entry, unsigned int type,
- struct exfat_dentry **file_ep)
+ struct exfat_chain *p_dir, int entry, unsigned int type)
{
- int ret;
+ int ret, i, num_bh;
unsigned int off, byte_offset, clu = 0;
- unsigned int entry_type;
sector_t sec;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_entry_set_cache *es;
- struct exfat_dentry *ep, *pos;
- unsigned char num_entries;
+ struct exfat_dentry *ep;
+ int num_entries;
enum exfat_validate_dentry_mode mode = ES_MODE_STARTED;
struct buffer_head *bh;
if (p_dir->dir == DIR_DELETED) {
- exfat_msg(sb, KERN_ERR, "access to deleted dentry\n");
+ exfat_err(sb, "access to deleted dentry");
return NULL;
}
@@ -862,11 +833,18 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
if (ret)
return NULL;
+ es = kzalloc(sizeof(*es), GFP_KERNEL);
+ if (!es)
+ return NULL;
+ es->sb = sb;
+ es->modified = false;
+
/* byte offset in cluster */
byte_offset = EXFAT_CLU_OFFSET(byte_offset, sbi);
/* byte offset in sector */
off = EXFAT_BLK_OFFSET(byte_offset, sb);
+ es->start_off = off;
/* sector offset in cluster */
sec = EXFAT_B_TO_BLK(byte_offset, sb);
@@ -874,72 +852,46 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
bh = sb_bread(sb, sec);
if (!bh)
- return NULL;
-
- ep = (struct exfat_dentry *)(bh->b_data + off);
- entry_type = exfat_get_entry_type(ep);
+ goto free_es;
+ es->bh[es->num_bh++] = bh;
- if (entry_type != TYPE_FILE && entry_type != TYPE_DIR)
- goto release_bh;
+ ep = exfat_get_dentry_cached(es, 0);
+ if (!exfat_validate_entry(exfat_get_entry_type(ep), &mode))
+ goto free_es;
num_entries = type == ES_ALL_ENTRIES ?
ep->dentry.file.num_ext + 1 : type;
- es = kmalloc(struct_size(es, entries, num_entries), GFP_KERNEL);
- if (!es)
- goto release_bh;
-
es->num_entries = num_entries;
- es->sector = sec;
- es->offset = off;
- es->alloc_flag = p_dir->flags;
-
- pos = &es->entries[0];
-
- while (num_entries) {
- if (!exfat_validate_entry(exfat_get_entry_type(ep), &mode))
- goto free_es;
-
- /* copy dentry */
- memcpy(pos, ep, sizeof(struct exfat_dentry));
-
- if (--num_entries == 0)
- break;
-
- if (((off + DENTRY_SIZE) & (sb->s_blocksize - 1)) <
- (off & (sb->s_blocksize - 1))) {
- /* get the next sector */
- if (exfat_is_last_sector_in_cluster(sbi, sec)) {
- if (es->alloc_flag == ALLOC_NO_FAT_CHAIN)
- clu++;
- else if (exfat_get_next_cluster(sb, &clu))
- goto free_es;
- sec = exfat_cluster_to_sector(sbi, clu);
- } else {
- sec++;
- }
- brelse(bh);
- bh = sb_bread(sb, sec);
- if (!bh)
+ num_bh = EXFAT_B_TO_BLK_ROUND_UP(off + num_entries * DENTRY_SIZE, sb);
+ for (i = 1; i < num_bh; i++) {
+ /* get the next sector */
+ if (exfat_is_last_sector_in_cluster(sbi, sec)) {
+ if (p_dir->flags == ALLOC_NO_FAT_CHAIN)
+ clu++;
+ else if (exfat_get_next_cluster(sb, &clu))
goto free_es;
- off = 0;
- ep = (struct exfat_dentry *)bh->b_data;
+ sec = exfat_cluster_to_sector(sbi, clu);
} else {
- ep++;
- off += DENTRY_SIZE;
+ sec++;
}
- pos++;
+
+ bh = sb_bread(sb, sec);
+ if (!bh)
+ goto free_es;
+ es->bh[es->num_bh++] = bh;
}
- if (file_ep)
- *file_ep = &es->entries[0];
- brelse(bh);
+ /* validiate cached dentries */
+ for (i = 1; i < num_entries; i++) {
+ ep = exfat_get_dentry_cached(es, i);
+ if (!exfat_validate_entry(exfat_get_entry_type(ep), &mode))
+ goto free_es;
+ }
return es;
free_es:
- kfree(es);
-release_bh:
- brelse(bh);
+ exfat_free_dentry_set(es, false);
return NULL;
}
@@ -1048,7 +1000,7 @@ rewind:
}
if (entry_type == TYPE_STREAM) {
- unsigned short name_hash;
+ u16 name_hash;
if (step != DIRENT_STEP_STRM) {
step = DIRENT_STEP_FILE;
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index d67fb8a6f770..595f3117f492 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -71,10 +71,8 @@ enum {
#define MAX_NAME_LENGTH 255 /* max len of file name excluding NULL */
#define MAX_VFSNAME_BUF_SIZE ((MAX_NAME_LENGTH + 1) * MAX_CHARSET_SIZE)
-#define FAT_CACHE_SIZE 128
-#define FAT_CACHE_HASH_SIZE 64
-#define BUF_CACHE_SIZE 256
-#define BUF_CACHE_HASH_SIZE 64
+/* Enough size to hold 256 dentry (even 512 Byte sector) */
+#define DIR_CACHE_SIZE (256*sizeof(struct exfat_dentry)/512+1)
#define EXFAT_HINT_NONE -1
#define EXFAT_MIN_SUBDIR 2
@@ -139,7 +137,7 @@ struct exfat_dentry_namebuf {
struct exfat_uni_name {
/* +3 for null and for converting */
unsigned short name[MAX_NAME_LENGTH + 3];
- unsigned short name_hash;
+ u16 name_hash;
unsigned char name_len;
};
@@ -170,14 +168,12 @@ struct exfat_hint {
};
struct exfat_entry_set_cache {
- /* sector number that contains file_entry */
- sector_t sector;
- /* byte offset in the sector */
- unsigned int offset;
- /* flag in stream entry. 01 for cluster chain, 03 for contig. */
- int alloc_flag;
+ struct super_block *sb;
+ bool modified;
+ unsigned int start_off;
+ int num_bh;
+ struct buffer_head *bh[DIR_CACHE_SIZE];
unsigned int num_entries;
- struct exfat_dentry entries[];
};
struct exfat_dir_entry {
@@ -231,7 +227,7 @@ struct exfat_sb_info {
unsigned int root_dir; /* root dir cluster */
unsigned int dentries_per_clu; /* num of dentries per cluster */
unsigned int vol_flag; /* volume dirty flag */
- struct buffer_head *pbr_bh; /* buffer_head of PBR sector */
+ struct buffer_head *boot_bh; /* buffer_head of BOOT sector */
unsigned int map_clu; /* allocation bitmap start cluster */
unsigned int map_sectors; /* num of allocation bitmap sectors */
@@ -451,8 +447,7 @@ int exfat_remove_entries(struct inode *inode, struct exfat_chain *p_dir,
int entry, int order, int num_entries);
int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
int entry);
-int exfat_update_dir_chksum_with_entry_set(struct super_block *sb,
- struct exfat_entry_set_cache *es, int sync);
+void exfat_update_dir_chksum_with_entry_set(struct exfat_entry_set_cache *es);
int exfat_calc_num_entries(struct exfat_uni_name *p_uniname);
int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei,
struct exfat_chain *p_dir, struct exfat_uni_name *p_uniname,
@@ -463,9 +458,11 @@ int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir,
struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
struct exfat_chain *p_dir, int entry, struct buffer_head **bh,
sector_t *sector);
+struct exfat_dentry *exfat_get_dentry_cached(struct exfat_entry_set_cache *es,
+ int num);
struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
- struct exfat_chain *p_dir, int entry, unsigned int type,
- struct exfat_dentry **file_ep);
+ struct exfat_chain *p_dir, int entry, unsigned int type);
+void exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync);
int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir);
/* inode.c */
@@ -492,8 +489,6 @@ int exfat_nls_to_utf16(struct super_block *sb,
struct exfat_uni_name *uniname, int *p_lossy);
int exfat_create_upcase_table(struct super_block *sb);
void exfat_free_upcase_table(struct exfat_sb_info *sbi);
-unsigned short exfat_high_surrogate(unicode_t u);
-unsigned short exfat_low_surrogate(unicode_t u);
/* exfat/misc.c */
void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
@@ -505,13 +500,20 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
fmt, ## args)
void exfat_msg(struct super_block *sb, const char *lv, const char *fmt, ...)
__printf(3, 4) __cold;
+#define exfat_err(sb, fmt, ...) \
+ exfat_msg(sb, KERN_ERR, fmt, ##__VA_ARGS__)
+#define exfat_warn(sb, fmt, ...) \
+ exfat_msg(sb, KERN_WARNING, fmt, ##__VA_ARGS__)
+#define exfat_info(sb, fmt, ...) \
+ exfat_msg(sb, KERN_INFO, fmt, ##__VA_ARGS__)
+
void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
- u8 tz, __le16 time, __le16 date, u8 time_ms);
+ u8 tz, __le16 time, __le16 date, u8 time_cs);
void exfat_truncate_atime(struct timespec64 *ts);
void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
- u8 *tz, __le16 *time, __le16 *date, u8 *time_ms);
-unsigned short exfat_calc_chksum_2byte(void *data, int len,
- unsigned short chksum, int type);
+ u8 *tz, __le16 *time, __le16 *date, u8 *time_cs);
+u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type);
+u32 exfat_calc_chksum32(void *data, int len, u32 chksum, int type);
void exfat_update_bh(struct super_block *sb, struct buffer_head *bh, int sync);
void exfat_chain_set(struct exfat_chain *ec, unsigned int dir,
unsigned int size, unsigned char flags);
diff --git a/fs/exfat/exfat_raw.h b/fs/exfat/exfat_raw.h
index 2a841010e649..350ce59cc324 100644
--- a/fs/exfat/exfat_raw.h
+++ b/fs/exfat/exfat_raw.h
@@ -8,12 +8,15 @@
#include <linux/types.h>
-#define PBR_SIGNATURE 0xAA55
+#define BOOT_SIGNATURE 0xAA55
+#define EXBOOT_SIGNATURE 0xAA550000
+#define STR_EXFAT "EXFAT " /* size should be 8 */
#define EXFAT_MAX_FILE_LEN 255
#define VOL_CLEAN 0x0000
#define VOL_DIRTY 0x0002
+#define ERR_MEDIUM 0x0004
#define EXFAT_EOF_CLUSTER 0xFFFFFFFFu
#define EXFAT_BAD_CLUSTER 0xFFFFFFF7u
@@ -55,7 +58,7 @@
/* checksum types */
#define CS_DIR_ENTRY 0
-#define CS_PBR_SECTOR 1
+#define CS_BOOT_SECTOR 1
#define CS_DEFAULT 2
/* file attributes */
@@ -69,57 +72,35 @@
#define ATTR_RWMASK (ATTR_HIDDEN | ATTR_SYSTEM | ATTR_VOLUME | \
ATTR_SUBDIR | ATTR_ARCHIVE)
-#define PBR64_JUMP_BOOT_LEN 3
-#define PBR64_OEM_NAME_LEN 8
-#define PBR64_RESERVED_LEN 53
+#define BOOTSEC_JUMP_BOOT_LEN 3
+#define BOOTSEC_FS_NAME_LEN 8
+#define BOOTSEC_OLDBPB_LEN 53
#define EXFAT_FILE_NAME_LEN 15
-/* EXFAT BIOS parameter block (64 bytes) */
-struct bpb64 {
- __u8 jmp_boot[PBR64_JUMP_BOOT_LEN];
- __u8 oem_name[PBR64_OEM_NAME_LEN];
- __u8 res_zero[PBR64_RESERVED_LEN];
-} __packed;
-
-/* EXFAT EXTEND BIOS parameter block (56 bytes) */
-struct bsx64 {
- __le64 vol_offset;
- __le64 vol_length;
- __le32 fat_offset;
- __le32 fat_length;
- __le32 clu_offset;
- __le32 clu_count;
- __le32 root_cluster;
- __le32 vol_serial;
- __u8 fs_version[2];
- __le16 vol_flags;
- __u8 sect_size_bits;
- __u8 sect_per_clus_bits;
- __u8 num_fats;
- __u8 phy_drv_no;
- __u8 perc_in_use;
- __u8 reserved2[7];
-} __packed;
-
-/* EXFAT PBR[BPB+BSX] (120 bytes) */
-struct pbr64 {
- struct bpb64 bpb;
- struct bsx64 bsx;
-} __packed;
-
-/* Common PBR[Partition Boot Record] (512 bytes) */
-struct pbr {
- union {
- __u8 raw[64];
- struct bpb64 f64;
- } bpb;
- union {
- __u8 raw[56];
- struct bsx64 f64;
- } bsx;
- __u8 boot_code[390];
- __le16 signature;
+/* EXFAT: Main and Backup Boot Sector (512 bytes) */
+struct boot_sector {
+ __u8 jmp_boot[BOOTSEC_JUMP_BOOT_LEN];
+ __u8 fs_name[BOOTSEC_FS_NAME_LEN];
+ __u8 must_be_zero[BOOTSEC_OLDBPB_LEN];
+ __le64 partition_offset;
+ __le64 vol_length;
+ __le32 fat_offset;
+ __le32 fat_length;
+ __le32 clu_offset;
+ __le32 clu_count;
+ __le32 root_cluster;
+ __le32 vol_serial;
+ __u8 fs_revision[2];
+ __le16 vol_flags;
+ __u8 sect_size_bits;
+ __u8 sect_per_clus_bits;
+ __u8 num_fats;
+ __u8 drv_sel;
+ __u8 percent_in_use;
+ __u8 reserved[7];
+ __u8 boot_code[390];
+ __le16 signature;
} __packed;
struct exfat_dentry {
@@ -136,8 +117,8 @@ struct exfat_dentry {
__le16 modify_date;
__le16 access_time;
__le16 access_date;
- __u8 create_time_ms;
- __u8 modify_time_ms;
+ __u8 create_time_cs;
+ __u8 modify_time_cs;
__u8 create_tz;
__u8 modify_tz;
__u8 access_tz;
diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
index a855b1769a96..4e5c5c9c0f2d 100644
--- a/fs/exfat/fatent.c
+++ b/fs/exfat/fatent.c
@@ -169,9 +169,8 @@ int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain)
return 0;
/* check cluster validation */
- if (p_chain->dir < 2 && p_chain->dir >= sbi->num_clusters) {
- exfat_msg(sb, KERN_ERR, "invalid start cluster (%u)",
- p_chain->dir);
+ if (!is_valid_cluster(sbi, p_chain->dir)) {
+ exfat_err(sb, "invalid start cluster (%u)", p_chain->dir);
return -EIO;
}
@@ -305,8 +304,7 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu)
return 0;
release_bhs:
- exfat_msg(sb, KERN_ERR, "failed zeroed sect %llu\n",
- (unsigned long long)blknr);
+ exfat_err(sb, "failed zeroed sect %llu\n", (unsigned long long)blknr);
for (i = 0; i < n; i++)
bforget(bhs[i]);
return err;
@@ -337,9 +335,8 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
/* find new cluster */
if (hint_clu == EXFAT_EOF_CLUSTER) {
if (sbi->clu_srch_ptr < EXFAT_FIRST_CLUSTER) {
- exfat_msg(sb, KERN_ERR,
- "sbi->clu_srch_ptr is invalid (%u)\n",
- sbi->clu_srch_ptr);
+ exfat_err(sb, "sbi->clu_srch_ptr is invalid (%u)\n",
+ sbi->clu_srch_ptr);
sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER;
}
@@ -349,8 +346,8 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc,
}
/* check cluster validation */
- if (hint_clu < EXFAT_FIRST_CLUSTER && hint_clu >= sbi->num_clusters) {
- exfat_msg(sb, KERN_ERR, "hint_cluster is invalid (%u)\n",
+ if (!is_valid_cluster(sbi, hint_clu)) {
+ exfat_err(sb, "hint_cluster is invalid (%u)",
hint_clu);
hint_clu = EXFAT_FIRST_CLUSTER;
if (p_chain->flags == ALLOC_NO_FAT_CHAIN) {
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index c9db8eb0cfc3..fce03f318787 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -96,11 +96,9 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
unsigned int num_clusters_new, num_clusters_phys;
unsigned int last_clu = EXFAT_FREE_CLUSTER;
struct exfat_chain clu;
- struct exfat_dentry *ep, *ep2;
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
- struct exfat_entry_set_cache *es = NULL;
int evict = (ei->dir.dir == DIR_DELETED) ? 1 : 0;
/* check if the given file ID is opened */
@@ -153,28 +151,31 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
/* update the directory entry */
if (!evict) {
struct timespec64 ts;
+ struct exfat_dentry *ep, *ep2;
+ struct exfat_entry_set_cache *es;
es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry,
- ES_ALL_ENTRIES, &ep);
+ ES_ALL_ENTRIES);
if (!es)
return -EIO;
- ep2 = ep + 1;
+ ep = exfat_get_dentry_cached(es, 0);
+ ep2 = exfat_get_dentry_cached(es, 1);
ts = current_time(inode);
exfat_set_entry_time(sbi, &ts,
&ep->dentry.file.modify_tz,
&ep->dentry.file.modify_time,
&ep->dentry.file.modify_date,
- &ep->dentry.file.modify_time_ms);
+ &ep->dentry.file.modify_time_cs);
ep->dentry.file.attr = cpu_to_le16(ei->attr);
/* File size should be zero if there is no cluster allocated */
if (ei->start_clu == EXFAT_EOF_CLUSTER) {
- ep->dentry.stream.valid_size = 0;
- ep->dentry.stream.size = 0;
+ ep2->dentry.stream.valid_size = 0;
+ ep2->dentry.stream.size = 0;
} else {
- ep->dentry.stream.valid_size = cpu_to_le64(new_size);
- ep->dentry.stream.size = ep->dentry.stream.valid_size;
+ ep2->dentry.stream.valid_size = cpu_to_le64(new_size);
+ ep2->dentry.stream.size = ep->dentry.stream.valid_size;
}
if (new_size == 0) {
@@ -185,10 +186,8 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
ep2->dentry.stream.start_clu = EXFAT_FREE_CLUSTER;
}
- if (exfat_update_dir_chksum_with_entry_set(sb, es,
- inode_needs_sync(inode)))
- return -EIO;
- kfree(es);
+ exfat_update_dir_chksum_with_entry_set(es);
+ exfat_free_dentry_set(es, inode_needs_sync(inode));
}
/* cut off from the FAT chain */
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 785ead346543..cf9ca6c4d046 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -19,7 +19,6 @@
static int __exfat_write_inode(struct inode *inode, int sync)
{
- int ret = -EIO;
unsigned long long on_disk_size;
struct exfat_dentry *ep, *ep2;
struct exfat_entry_set_cache *es = NULL;
@@ -43,11 +42,11 @@ static int __exfat_write_inode(struct inode *inode, int sync)
exfat_set_vol_flags(sb, VOL_DIRTY);
/* get the directory entry of given file or directory */
- es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES,
- &ep);
+ es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES);
if (!es)
return -EIO;
- ep2 = ep + 1;
+ ep = exfat_get_dentry_cached(es, 0);
+ ep2 = exfat_get_dentry_cached(es, 1);
ep->dentry.file.attr = cpu_to_le16(exfat_make_attr(inode));
@@ -56,12 +55,12 @@ static int __exfat_write_inode(struct inode *inode, int sync)
&ep->dentry.file.create_tz,
&ep->dentry.file.create_time,
&ep->dentry.file.create_date,
- &ep->dentry.file.create_time_ms);
+ &ep->dentry.file.create_time_cs);
exfat_set_entry_time(sbi, &inode->i_mtime,
&ep->dentry.file.modify_tz,
&ep->dentry.file.modify_time,
&ep->dentry.file.modify_date,
- &ep->dentry.file.modify_time_ms);
+ &ep->dentry.file.modify_time_cs);
exfat_set_entry_time(sbi, &inode->i_atime,
&ep->dentry.file.access_tz,
&ep->dentry.file.access_time,
@@ -77,9 +76,9 @@ static int __exfat_write_inode(struct inode *inode, int sync)
ep2->dentry.stream.valid_size = cpu_to_le64(on_disk_size);
ep2->dentry.stream.size = ep2->dentry.stream.valid_size;
- ret = exfat_update_dir_chksum_with_entry_set(sb, es, sync);
- kfree(es);
- return ret;
+ exfat_update_dir_chksum_with_entry_set(es);
+ exfat_free_dentry_set(es, sync);
+ return 0;
}
int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
@@ -110,8 +109,6 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
int ret, modified = false;
unsigned int last_clu;
struct exfat_chain new_clu;
- struct exfat_dentry *ep;
- struct exfat_entry_set_cache *es = NULL;
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
@@ -222,34 +219,28 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
num_clusters += num_to_be_allocated;
*clu = new_clu.dir;
- if (ei->dir.dir != DIR_DELETED) {
+ if (ei->dir.dir != DIR_DELETED && modified) {
+ struct exfat_dentry *ep;
+ struct exfat_entry_set_cache *es;
+
es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry,
- ES_ALL_ENTRIES, &ep);
+ ES_ALL_ENTRIES);
if (!es)
return -EIO;
/* get stream entry */
- ep++;
+ ep = exfat_get_dentry_cached(es, 1);
/* update directory entry */
- if (modified) {
- if (ep->dentry.stream.flags != ei->flags)
- ep->dentry.stream.flags = ei->flags;
-
- if (le32_to_cpu(ep->dentry.stream.start_clu) !=
- ei->start_clu)
- ep->dentry.stream.start_clu =
- cpu_to_le32(ei->start_clu);
-
- ep->dentry.stream.valid_size =
- cpu_to_le64(i_size_read(inode));
- ep->dentry.stream.size =
- ep->dentry.stream.valid_size;
- }
-
- if (exfat_update_dir_chksum_with_entry_set(sb, es,
- inode_needs_sync(inode)))
- return -EIO;
- kfree(es);
+ ep->dentry.stream.flags = ei->flags;
+ ep->dentry.stream.start_clu =
+ cpu_to_le32(ei->start_clu);
+ ep->dentry.stream.valid_size =
+ cpu_to_le64(i_size_read(inode));
+ ep->dentry.stream.size =
+ ep->dentry.stream.valid_size;
+
+ exfat_update_dir_chksum_with_entry_set(es);
+ exfat_free_dentry_set(es, inode_needs_sync(inode));
} /* end of if != DIR_DELETED */
diff --git a/fs/exfat/misc.c b/fs/exfat/misc.c
index ebd2cbe3cbc1..17d41f3d3709 100644
--- a/fs/exfat/misc.c
+++ b/fs/exfat/misc.c
@@ -32,7 +32,7 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- exfat_msg(sb, KERN_ERR, "error, %pV\n", &vaf);
+ exfat_err(sb, "error, %pV", &vaf);
va_end(args);
}
@@ -41,7 +41,7 @@ void __exfat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
sb->s_id);
} else if (opts->errors == EXFAT_ERRORS_RO && !sb_rdonly(sb)) {
sb->s_flags |= SB_RDONLY;
- exfat_msg(sb, KERN_ERR, "Filesystem has been set read-only");
+ exfat_err(sb, "Filesystem has been set read-only");
}
}
@@ -75,7 +75,7 @@ static void exfat_adjust_tz(struct timespec64 *ts, u8 tz_off)
/* Convert a EXFAT time/date pair to a UNIX date (seconds since 1 1 70). */
void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
- u8 tz, __le16 time, __le16 date, u8 time_ms)
+ u8 tz, __le16 time, __le16 date, u8 time_cs)
{
u16 t = le16_to_cpu(time);
u16 d = le16_to_cpu(date);
@@ -84,10 +84,10 @@ void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
t >> 11, (t >> 5) & 0x003F, (t & 0x001F) << 1);
- /* time_ms field represent 0 ~ 199(1990 ms) */
- if (time_ms) {
- ts->tv_sec += time_ms / 100;
- ts->tv_nsec = (time_ms % 100) * 10 * NSEC_PER_MSEC;
+ /* time_cs field represent 0 ~ 199cs(1990 ms) */
+ if (time_cs) {
+ ts->tv_sec += time_cs / 100;
+ ts->tv_nsec = (time_cs % 100) * 10 * NSEC_PER_MSEC;
} else
ts->tv_nsec = 0;
@@ -101,7 +101,7 @@ void exfat_get_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
/* Convert linear UNIX date to a EXFAT time/date pair. */
void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
- u8 *tz, __le16 *time, __le16 *date, u8 *time_ms)
+ u8 *tz, __le16 *time, __le16 *date, u8 *time_cs)
{
struct tm tm;
u16 t, d;
@@ -113,9 +113,9 @@ void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
*time = cpu_to_le16(t);
*date = cpu_to_le16(d);
- /* time_ms field represent 0 ~ 199(1990 ms) */
- if (time_ms)
- *time_ms = (tm.tm_sec & 1) * 100 +
+ /* time_cs field represent 0 ~ 199cs(1990 ms) */
+ if (time_cs)
+ *time_cs = (tm.tm_sec & 1) * 100 +
ts->tv_nsec / (10 * NSEC_PER_MSEC);
/*
@@ -136,17 +136,29 @@ void exfat_truncate_atime(struct timespec64 *ts)
ts->tv_nsec = 0;
}
-unsigned short exfat_calc_chksum_2byte(void *data, int len,
- unsigned short chksum, int type)
+u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type)
{
int i;
- unsigned char *c = (unsigned char *)data;
+ u8 *c = (u8 *)data;
for (i = 0; i < len; i++, c++) {
- if (((i == 2) || (i == 3)) && (type == CS_DIR_ENTRY))
+ if (unlikely(type == CS_DIR_ENTRY && (i == 2 || i == 3)))
continue;
- chksum = (((chksum & 1) << 15) | ((chksum & 0xFFFE) >> 1)) +
- (unsigned short)*c;
+ chksum = ((chksum << 15) | (chksum >> 1)) + *c;
+ }
+ return chksum;
+}
+
+u32 exfat_calc_chksum32(void *data, int len, u32 chksum, int type)
+{
+ int i;
+ u8 *c = (u8 *)data;
+
+ for (i = 0; i < len; i++, c++) {
+ if (unlikely(type == CS_BOOT_SECTOR &&
+ (i == 106 || i == 107 || i == 112)))
+ continue;
+ chksum = ((chksum << 31) | (chksum >> 1)) + *c;
}
return chksum;
}
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index a2659a8a68a1..5b0f35329d63 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -147,16 +147,10 @@ static int exfat_utf8_d_hash(const struct dentry *dentry, struct qstr *qstr)
return charlen;
/*
- * Convert to UTF-16: code points above U+FFFF are encoded as
- * surrogate pairs.
* exfat_toupper() works only for code points up to the U+FFFF.
*/
- if (u > 0xFFFF) {
- hash = partial_name_hash(exfat_high_surrogate(u), hash);
- hash = partial_name_hash(exfat_low_surrogate(u), hash);
- } else {
- hash = partial_name_hash(exfat_toupper(sb, u), hash);
- }
+ hash = partial_name_hash(u <= 0xFFFF ? exfat_toupper(sb, u) : u,
+ hash);
}
qstr->hash = end_name_hash(hash);
@@ -185,14 +179,9 @@ static int exfat_utf8_d_cmp(const struct dentry *dentry, unsigned int len,
if (u_a <= 0xFFFF && u_b <= 0xFFFF) {
if (exfat_toupper(sb, u_a) != exfat_toupper(sb, u_b))
return 1;
- } else if (u_a > 0xFFFF && u_b > 0xFFFF) {
- if (exfat_low_surrogate(u_a) !=
- exfat_low_surrogate(u_b) ||
- exfat_high_surrogate(u_a) !=
- exfat_high_surrogate(u_b))
- return 1;
} else {
- return 1;
+ if (u_a != u_b)
+ return 1;
}
}
@@ -611,8 +600,6 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
int ret, dentry, num_entries, count;
struct exfat_chain cdir;
struct exfat_uni_name uni_name;
- struct exfat_dentry *ep, *ep2;
- struct exfat_entry_set_cache *es = NULL;
struct super_block *sb = dir->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(dir);
@@ -671,10 +658,14 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
info->num_subdirs = count;
} else {
- es = exfat_get_dentry_set(sb, &cdir, dentry, ES_2_ENTRIES, &ep);
+ struct exfat_dentry *ep, *ep2;
+ struct exfat_entry_set_cache *es;
+
+ es = exfat_get_dentry_set(sb, &cdir, dentry, ES_2_ENTRIES);
if (!es)
return -EIO;
- ep2 = ep + 1;
+ ep = exfat_get_dentry_cached(es, 0);
+ ep2 = exfat_get_dentry_cached(es, 1);
info->type = exfat_get_entry_type(ep);
info->attr = le16_to_cpu(ep->dentry.file.attr);
@@ -692,7 +683,7 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
exfat_fs_error(sb,
"non-zero size file starts with zero cluster (size : %llu, p_dir : %u, entry : 0x%08x)",
i_size_read(dir), ei->dir.dir, ei->entry);
- kfree(es);
+ exfat_free_dentry_set(es, false);
return -EIO;
}
@@ -700,18 +691,18 @@ static int exfat_find(struct inode *dir, struct qstr *qname,
ep->dentry.file.create_tz,
ep->dentry.file.create_time,
ep->dentry.file.create_date,
- ep->dentry.file.create_time_ms);
+ ep->dentry.file.create_time_cs);
exfat_get_entry_time(sbi, &info->mtime,
ep->dentry.file.modify_tz,
ep->dentry.file.modify_time,
ep->dentry.file.modify_date,
- ep->dentry.file.modify_time_ms);
+ ep->dentry.file.modify_time_cs);
exfat_get_entry_time(sbi, &info->atime,
ep->dentry.file.access_tz,
ep->dentry.file.access_time,
ep->dentry.file.access_date,
0);
- kfree(es);
+ exfat_free_dentry_set(es, false);
if (info->type == TYPE_DIR) {
exfat_chain_set(&cdir, info->start_clu,
@@ -778,8 +769,8 @@ static struct dentry *exfat_lookup(struct inode *dir, struct dentry *dentry,
if (d_unhashed(alias)) {
WARN_ON(alias->d_name.hash_len !=
dentry->d_name.hash_len);
- exfat_msg(sb, KERN_INFO,
- "rehashed a dentry(%p) in read lookup", alias);
+ exfat_info(sb, "rehashed a dentry(%p) in read lookup",
+ alias);
d_drop(dentry);
d_rehash(alias);
} else if (!S_ISDIR(i_mode)) {
@@ -824,7 +815,7 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
exfat_chain_dup(&cdir, &ei->dir);
entry = ei->entry;
if (ei->dir.dir == DIR_DELETED) {
- exfat_msg(sb, KERN_ERR, "abnormal access to deleted dentry");
+ exfat_err(sb, "abnormal access to deleted dentry");
err = -ENOENT;
goto unlock;
}
@@ -979,7 +970,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
entry = ei->entry;
if (ei->dir.dir == DIR_DELETED) {
- exfat_msg(sb, KERN_ERR, "abnormal access to deleted dentry");
+ exfat_err(sb, "abnormal access to deleted dentry");
err = -ENOENT;
goto unlock;
}
@@ -991,9 +982,8 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
err = exfat_check_dir_empty(sb, &clu_to_free);
if (err) {
if (err == -EIO)
- exfat_msg(sb, KERN_ERR,
- "failed to exfat_check_dir_empty : err(%d)",
- err);
+ exfat_err(sb, "failed to exfat_check_dir_empty : err(%d)",
+ err);
goto unlock;
}
@@ -1014,9 +1004,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
err = exfat_remove_entries(dir, &cdir, entry, 0, num_entries);
if (err) {
- exfat_msg(sb, KERN_ERR,
- "failed to exfat_remove_entries : err(%d)",
- err);
+ exfat_err(sb, "failed to exfat_remove_entries : err(%d)", err);
goto unlock;
}
ei->dir.dir = DIR_DELETED;
@@ -1245,8 +1233,7 @@ static int __exfat_rename(struct inode *old_parent_inode,
return -EINVAL;
if (ei->dir.dir == DIR_DELETED) {
- exfat_msg(sb, KERN_ERR,
- "abnormal access to deleted source dentry");
+ exfat_err(sb, "abnormal access to deleted source dentry");
return -ENOENT;
}
@@ -1268,8 +1255,7 @@ static int __exfat_rename(struct inode *old_parent_inode,
new_ei = EXFAT_I(new_inode);
if (new_ei->dir.dir == DIR_DELETED) {
- exfat_msg(sb, KERN_ERR,
- "abnormal access to deleted target dentry");
+ exfat_err(sb, "abnormal access to deleted target dentry");
goto out;
}
@@ -1431,8 +1417,7 @@ static int exfat_rename(struct inode *old_dir, struct dentry *old_dentry,
if (S_ISDIR(new_inode->i_mode))
drop_nlink(new_inode);
} else {
- exfat_msg(sb, KERN_WARNING,
- "abnormal access to an inode dropped");
+ exfat_warn(sb, "abnormal access to an inode dropped");
WARN_ON(new_inode->i_nlink == 0);
}
new_inode->i_ctime = EXFAT_I(new_inode)->i_crtime =
diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c
index 6d1c3ae130ff..57b5a7a4d1f7 100644
--- a/fs/exfat/nls.c
+++ b/fs/exfat/nls.c
@@ -503,21 +503,17 @@ static int exfat_utf8_to_utf16(struct super_block *sb,
unilen = utf8s_to_utf16s(p_cstring, len, UTF16_HOST_ENDIAN,
(wchar_t *)uniname, MAX_NAME_LENGTH + 2);
if (unilen < 0) {
- exfat_msg(sb, KERN_ERR,
- "failed to %s (err : %d) nls len : %d",
- __func__, unilen, len);
+ exfat_err(sb, "failed to %s (err : %d) nls len : %d",
+ __func__, unilen, len);
return unilen;
}
if (unilen > MAX_NAME_LENGTH) {
- exfat_msg(sb, KERN_ERR,
- "failed to %s (estr:ENAMETOOLONG) nls len : %d, unilen : %d > %d",
- __func__, len, unilen, MAX_NAME_LENGTH);
+ exfat_err(sb, "failed to %s (estr:ENAMETOOLONG) nls len : %d, unilen : %d > %d",
+ __func__, len, unilen, MAX_NAME_LENGTH);
return -ENAMETOOLONG;
}
- p_uniname->name_len = unilen & 0xFF;
-
for (i = 0; i < unilen; i++) {
if (*uniname < 0x0020 ||
exfat_wstrchr(bad_uni_chars, *uniname))
@@ -529,7 +525,7 @@ static int exfat_utf8_to_utf16(struct super_block *sb,
*uniname = '\0';
p_uniname->name_len = unilen;
- p_uniname->name_hash = exfat_calc_chksum_2byte(upname, unilen << 1, 0,
+ p_uniname->name_hash = exfat_calc_chksum16(upname, unilen << 1, 0,
CS_DEFAULT);
if (p_lossy)
@@ -537,22 +533,9 @@ static int exfat_utf8_to_utf16(struct super_block *sb,
return unilen;
}
-#define PLANE_SIZE 0x00010000
#define SURROGATE_MASK 0xfffff800
#define SURROGATE_PAIR 0x0000d800
#define SURROGATE_LOW 0x00000400
-#define SURROGATE_BITS 0x000003ff
-
-unsigned short exfat_high_surrogate(unicode_t u)
-{
- return ((u - PLANE_SIZE) >> 10) + SURROGATE_PAIR;
-}
-
-unsigned short exfat_low_surrogate(unicode_t u)
-{
- return ((u - PLANE_SIZE) & SURROGATE_BITS) | SURROGATE_PAIR |
- SURROGATE_LOW;
-}
static int __exfat_utf16_to_nls(struct super_block *sb,
struct exfat_uni_name *p_uniname, unsigned char *p_cstring,
@@ -638,7 +621,7 @@ static int exfat_nls_to_ucs2(struct super_block *sb,
*uniname = '\0';
p_uniname->name_len = unilen;
- p_uniname->name_hash = exfat_calc_chksum_2byte(upname, unilen << 1, 0,
+ p_uniname->name_hash = exfat_calc_chksum16(upname, unilen << 1, 0,
CS_DEFAULT);
if (p_lossy)
@@ -670,7 +653,8 @@ static int exfat_load_upcase_table(struct super_block *sb,
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
unsigned int sect_size = sb->s_blocksize;
- unsigned int i, index = 0, checksum = 0;
+ unsigned int i, index = 0;
+ u32 chksum = 0;
int ret;
unsigned char skip = false;
unsigned short *upcase_table;
@@ -687,9 +671,8 @@ static int exfat_load_upcase_table(struct super_block *sb,
bh = sb_bread(sb, sector);
if (!bh) {
- exfat_msg(sb, KERN_ERR,
- "failed to read sector(0x%llx)\n",
- (unsigned long long)sector);
+ exfat_err(sb, "failed to read sector(0x%llx)\n",
+ (unsigned long long)sector);
ret = -EIO;
goto free_table;
}
@@ -697,13 +680,6 @@ static int exfat_load_upcase_table(struct super_block *sb,
for (i = 0; i < sect_size && index <= 0xFFFF; i += 2) {
unsigned short uni = get_unaligned_le16(bh->b_data + i);
- checksum = ((checksum & 1) ? 0x80000000 : 0) +
- (checksum >> 1) +
- *(((unsigned char *)bh->b_data) + i);
- checksum = ((checksum & 1) ? 0x80000000 : 0) +
- (checksum >> 1) +
- *(((unsigned char *)bh->b_data) + (i + 1));
-
if (skip) {
index += uni;
skip = false;
@@ -716,15 +692,15 @@ static int exfat_load_upcase_table(struct super_block *sb,
index++;
}
}
+ chksum = exfat_calc_chksum32(bh->b_data, i, chksum, CS_DEFAULT);
brelse(bh);
}
- if (index >= 0xFFFF && utbl_checksum == checksum)
+ if (index >= 0xFFFF && utbl_checksum == chksum)
return 0;
- exfat_msg(sb, KERN_ERR,
- "failed to load upcase table (idx : 0x%08x, chksum : 0x%08x, utbl_chksum : 0x%08x)\n",
- index, checksum, utbl_checksum);
+ exfat_err(sb, "failed to load upcase table (idx : 0x%08x, chksum : 0x%08x, utbl_chksum : 0x%08x)",
+ index, chksum, utbl_checksum);
ret = -EINVAL;
free_table:
exfat_free_upcase_table(sbi);
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index a846ff555656..e650e65536f8 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -49,7 +49,7 @@ static void exfat_put_super(struct super_block *sb)
sync_blockdev(sb->s_bdev);
exfat_set_vol_flags(sb, VOL_CLEAN);
exfat_free_bitmap(sbi);
- brelse(sbi->pbr_bh);
+ brelse(sbi->boot_bh);
mutex_unlock(&sbi->s_lock);
call_rcu(&sbi->rcu, exfat_delayed_free);
@@ -101,8 +101,8 @@ static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag)
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct pbr64 *bpb = (struct pbr64 *)sbi->pbr_bh->b_data;
- bool sync = 0;
+ struct boot_sector *p_boot = (struct boot_sector *)sbi->boot_bh->b_data;
+ bool sync;
/* flags are not changed */
if (sbi->vol_flag == new_flag)
@@ -116,18 +116,18 @@ int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag)
if (sb_rdonly(sb))
return 0;
- bpb->bsx.vol_flags = cpu_to_le16(new_flag);
+ p_boot->vol_flags = cpu_to_le16(new_flag);
- if (new_flag == VOL_DIRTY && !buffer_dirty(sbi->pbr_bh))
+ if (new_flag == VOL_DIRTY && !buffer_dirty(sbi->boot_bh))
sync = true;
else
sync = false;
- set_buffer_uptodate(sbi->pbr_bh);
- mark_buffer_dirty(sbi->pbr_bh);
+ set_buffer_uptodate(sbi->boot_bh);
+ mark_buffer_dirty(sbi->boot_bh);
if (sync)
- sync_dirty_buffer(sbi->pbr_bh);
+ sync_dirty_buffer(sbi->boot_bh);
return 0;
}
@@ -273,9 +273,8 @@ static int exfat_parse_param(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_charset:
exfat_free_iocharset(sbi);
- opts->iocharset = kstrdup(param->string, GFP_KERNEL);
- if (!opts->iocharset)
- return -ENOMEM;
+ opts->iocharset = param->string;
+ param->string = NULL;
break;
case Opt_errors:
opts->errors = result.uint_32;
@@ -366,151 +365,208 @@ static int exfat_read_root(struct inode *inode)
return 0;
}
-static struct pbr *exfat_read_pbr_with_logical_sector(struct super_block *sb)
+static int exfat_calibrate_blocksize(struct super_block *sb, int logical_sect)
{
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct pbr *p_pbr = (struct pbr *) (sbi->pbr_bh)->b_data;
- unsigned short logical_sect = 0;
-
- logical_sect = 1 << p_pbr->bsx.f64.sect_size_bits;
if (!is_power_of_2(logical_sect) ||
logical_sect < 512 || logical_sect > 4096) {
- exfat_msg(sb, KERN_ERR, "bogus logical sector size %u",
- logical_sect);
- return NULL;
+ exfat_err(sb, "bogus logical sector size %u", logical_sect);
+ return -EIO;
}
if (logical_sect < sb->s_blocksize) {
- exfat_msg(sb, KERN_ERR,
- "logical sector size too small for device (logical sector size = %u)",
- logical_sect);
- return NULL;
+ exfat_err(sb, "logical sector size too small for device (logical sector size = %u)",
+ logical_sect);
+ return -EIO;
}
if (logical_sect > sb->s_blocksize) {
- brelse(sbi->pbr_bh);
- sbi->pbr_bh = NULL;
+ brelse(sbi->boot_bh);
+ sbi->boot_bh = NULL;
if (!sb_set_blocksize(sb, logical_sect)) {
- exfat_msg(sb, KERN_ERR,
- "unable to set blocksize %u", logical_sect);
- return NULL;
+ exfat_err(sb, "unable to set blocksize %u",
+ logical_sect);
+ return -EIO;
}
- sbi->pbr_bh = sb_bread(sb, 0);
- if (!sbi->pbr_bh) {
- exfat_msg(sb, KERN_ERR,
- "unable to read boot sector (logical sector size = %lu)",
- sb->s_blocksize);
- return NULL;
+ sbi->boot_bh = sb_bread(sb, 0);
+ if (!sbi->boot_bh) {
+ exfat_err(sb, "unable to read boot sector (logical sector size = %lu)",
+ sb->s_blocksize);
+ return -EIO;
}
-
- p_pbr = (struct pbr *)sbi->pbr_bh->b_data;
}
- return p_pbr;
+ return 0;
}
-/* mount the file system volume */
-static int __exfat_fill_super(struct super_block *sb)
+static int exfat_read_boot_sector(struct super_block *sb)
{
- int ret;
- struct pbr *p_pbr;
- struct pbr64 *p_bpb;
+ struct boot_sector *p_boot;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
/* set block size to read super block */
sb_min_blocksize(sb, 512);
/* read boot sector */
- sbi->pbr_bh = sb_bread(sb, 0);
- if (!sbi->pbr_bh) {
- exfat_msg(sb, KERN_ERR, "unable to read boot sector");
+ sbi->boot_bh = sb_bread(sb, 0);
+ if (!sbi->boot_bh) {
+ exfat_err(sb, "unable to read boot sector");
return -EIO;
}
+ p_boot = (struct boot_sector *)sbi->boot_bh->b_data;
- /* PRB is read */
- p_pbr = (struct pbr *)sbi->pbr_bh->b_data;
-
- /* check the validity of PBR */
- if (le16_to_cpu((p_pbr->signature)) != PBR_SIGNATURE) {
- exfat_msg(sb, KERN_ERR, "invalid boot record signature");
- ret = -EINVAL;
- goto free_bh;
+ /* check the validity of BOOT */
+ if (le16_to_cpu((p_boot->signature)) != BOOT_SIGNATURE) {
+ exfat_err(sb, "invalid boot record signature");
+ return -EINVAL;
}
-
- /* check logical sector size */
- p_pbr = exfat_read_pbr_with_logical_sector(sb);
- if (!p_pbr) {
- ret = -EIO;
- goto free_bh;
+ if (memcmp(p_boot->fs_name, STR_EXFAT, BOOTSEC_FS_NAME_LEN)) {
+ exfat_err(sb, "invalid fs_name"); /* fs_name may unprintable */
+ return -EINVAL;
}
/*
- * res_zero field must be filled with zero to prevent mounting
+ * must_be_zero field must be filled with zero to prevent mounting
* from FAT volume.
*/
- if (memchr_inv(p_pbr->bpb.f64.res_zero, 0,
- sizeof(p_pbr->bpb.f64.res_zero))) {
- ret = -EINVAL;
- goto free_bh;
- }
+ if (memchr_inv(p_boot->must_be_zero, 0, sizeof(p_boot->must_be_zero)))
+ return -EINVAL;
- p_bpb = (struct pbr64 *)p_pbr;
- if (!p_bpb->bsx.num_fats) {
- exfat_msg(sb, KERN_ERR, "bogus number of FAT structure");
- ret = -EINVAL;
- goto free_bh;
+ if (p_boot->num_fats != 1 && p_boot->num_fats != 2) {
+ exfat_err(sb, "bogus number of FAT structure");
+ return -EINVAL;
}
- sbi->sect_per_clus = 1 << p_bpb->bsx.sect_per_clus_bits;
- sbi->sect_per_clus_bits = p_bpb->bsx.sect_per_clus_bits;
- sbi->cluster_size_bits = sbi->sect_per_clus_bits + sb->s_blocksize_bits;
+ sbi->sect_per_clus = 1 << p_boot->sect_per_clus_bits;
+ sbi->sect_per_clus_bits = p_boot->sect_per_clus_bits;
+ sbi->cluster_size_bits = p_boot->sect_per_clus_bits +
+ p_boot->sect_size_bits;
sbi->cluster_size = 1 << sbi->cluster_size_bits;
- sbi->num_FAT_sectors = le32_to_cpu(p_bpb->bsx.fat_length);
- sbi->FAT1_start_sector = le32_to_cpu(p_bpb->bsx.fat_offset);
- sbi->FAT2_start_sector = p_bpb->bsx.num_fats == 1 ?
- sbi->FAT1_start_sector :
- sbi->FAT1_start_sector + sbi->num_FAT_sectors;
- sbi->data_start_sector = le32_to_cpu(p_bpb->bsx.clu_offset);
- sbi->num_sectors = le64_to_cpu(p_bpb->bsx.vol_length);
+ sbi->num_FAT_sectors = le32_to_cpu(p_boot->fat_length);
+ sbi->FAT1_start_sector = le32_to_cpu(p_boot->fat_offset);
+ sbi->FAT2_start_sector = le32_to_cpu(p_boot->fat_offset);
+ if (p_boot->num_fats == 2)
+ sbi->FAT2_start_sector += sbi->num_FAT_sectors;
+ sbi->data_start_sector = le32_to_cpu(p_boot->clu_offset);
+ sbi->num_sectors = le64_to_cpu(p_boot->vol_length);
/* because the cluster index starts with 2 */
- sbi->num_clusters = le32_to_cpu(p_bpb->bsx.clu_count) +
+ sbi->num_clusters = le32_to_cpu(p_boot->clu_count) +
EXFAT_RESERVED_CLUSTERS;
- sbi->root_dir = le32_to_cpu(p_bpb->bsx.root_cluster);
+ sbi->root_dir = le32_to_cpu(p_boot->root_cluster);
sbi->dentries_per_clu = 1 <<
(sbi->cluster_size_bits - DENTRY_SIZE_BITS);
- sbi->vol_flag = le16_to_cpu(p_bpb->bsx.vol_flags);
+ sbi->vol_flag = le16_to_cpu(p_boot->vol_flags);
sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER;
sbi->used_clusters = EXFAT_CLUSTERS_UNTRACKED;
- if (le16_to_cpu(p_bpb->bsx.vol_flags) & VOL_DIRTY) {
- sbi->vol_flag |= VOL_DIRTY;
- exfat_msg(sb, KERN_WARNING,
- "Volume was not properly unmounted. Some data may be corrupt. Please run fsck.");
+ /* check consistencies */
+ if (sbi->num_FAT_sectors << p_boot->sect_size_bits <
+ sbi->num_clusters * 4) {
+ exfat_err(sb, "bogus fat length");
+ return -EINVAL;
}
+ if (sbi->data_start_sector <
+ sbi->FAT1_start_sector + sbi->num_FAT_sectors * p_boot->num_fats) {
+ exfat_err(sb, "bogus data start sector");
+ return -EINVAL;
+ }
+ if (sbi->vol_flag & VOL_DIRTY)
+ exfat_warn(sb, "Volume was not properly unmounted. Some data may be corrupt. Please run fsck.");
+ if (sbi->vol_flag & ERR_MEDIUM)
+ exfat_warn(sb, "Medium has reported failures. Some data may be lost.");
/* exFAT file size is limited by a disk volume size */
sb->s_maxbytes = (u64)(sbi->num_clusters - EXFAT_RESERVED_CLUSTERS) <<
sbi->cluster_size_bits;
+ /* check logical sector size */
+ if (exfat_calibrate_blocksize(sb, 1 << p_boot->sect_size_bits))
+ return -EIO;
+
+ return 0;
+}
+
+static int exfat_verify_boot_region(struct super_block *sb)
+{
+ struct buffer_head *bh = NULL;
+ u32 chksum = 0;
+ __le32 *p_sig, *p_chksum;
+ int sn, i;
+
+ /* read boot sector sub-regions */
+ for (sn = 0; sn < 11; sn++) {
+ bh = sb_bread(sb, sn);
+ if (!bh)
+ return -EIO;
+
+ if (sn != 0 && sn <= 8) {
+ /* extended boot sector sub-regions */
+ p_sig = (__le32 *)&bh->b_data[sb->s_blocksize - 4];
+ if (le32_to_cpu(*p_sig) != EXBOOT_SIGNATURE)
+ exfat_warn(sb, "Invalid exboot-signature(sector = %d): 0x%08x",
+ sn, le32_to_cpu(*p_sig));
+ }
+
+ chksum = exfat_calc_chksum32(bh->b_data, sb->s_blocksize,
+ chksum, sn ? CS_DEFAULT : CS_BOOT_SECTOR);
+ brelse(bh);
+ }
+
+ /* boot checksum sub-regions */
+ bh = sb_bread(sb, sn);
+ if (!bh)
+ return -EIO;
+
+ for (i = 0; i < sb->s_blocksize; i += sizeof(u32)) {
+ p_chksum = (__le32 *)&bh->b_data[i];
+ if (le32_to_cpu(*p_chksum) != chksum) {
+ exfat_err(sb, "Invalid boot checksum (boot checksum : 0x%08x, checksum : 0x%08x)",
+ le32_to_cpu(*p_chksum), chksum);
+ brelse(bh);
+ return -EINVAL;
+ }
+ }
+ brelse(bh);
+ return 0;
+}
+
+/* mount the file system volume */
+static int __exfat_fill_super(struct super_block *sb)
+{
+ int ret;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ ret = exfat_read_boot_sector(sb);
+ if (ret) {
+ exfat_err(sb, "failed to read boot sector");
+ goto free_bh;
+ }
+
+ ret = exfat_verify_boot_region(sb);
+ if (ret) {
+ exfat_err(sb, "invalid boot region");
+ goto free_bh;
+ }
+
ret = exfat_create_upcase_table(sb);
if (ret) {
- exfat_msg(sb, KERN_ERR, "failed to load upcase table");
+ exfat_err(sb, "failed to load upcase table");
goto free_bh;
}
ret = exfat_load_bitmap(sb);
if (ret) {
- exfat_msg(sb, KERN_ERR, "failed to load alloc-bitmap");
+ exfat_err(sb, "failed to load alloc-bitmap");
goto free_upcase_table;
}
ret = exfat_count_used_clusters(sb, &sbi->used_clusters);
if (ret) {
- exfat_msg(sb, KERN_ERR, "failed to scan clusters");
+ exfat_err(sb, "failed to scan clusters");
goto free_alloc_bitmap;
}
@@ -521,7 +577,7 @@ free_alloc_bitmap:
free_upcase_table:
exfat_free_upcase_table(sbi);
free_bh:
- brelse(sbi->pbr_bh);
+ brelse(sbi->boot_bh);
return ret;
}
@@ -539,8 +595,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
struct request_queue *q = bdev_get_queue(sb->s_bdev);
if (!blk_queue_discard(q)) {
- exfat_msg(sb, KERN_WARNING,
- "mounting with \"discard\" option, but the device does not support discard");
+ exfat_warn(sb, "mounting with \"discard\" option, but the device does not support discard");
opts->discard = 0;
}
}
@@ -555,7 +610,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
err = __exfat_fill_super(sb);
if (err) {
- exfat_msg(sb, KERN_ERR, "failed to recognize exfat type");
+ exfat_err(sb, "failed to recognize exfat type");
goto check_nls_io;
}
@@ -567,8 +622,8 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
else {
sbi->nls_io = load_nls(sbi->options.iocharset);
if (!sbi->nls_io) {
- exfat_msg(sb, KERN_ERR, "IO charset %s not found",
- sbi->options.iocharset);
+ exfat_err(sb, "IO charset %s not found",
+ sbi->options.iocharset);
err = -EINVAL;
goto free_table;
}
@@ -581,7 +636,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
root_inode = new_inode(sb);
if (!root_inode) {
- exfat_msg(sb, KERN_ERR, "failed to allocate root inode.");
+ exfat_err(sb, "failed to allocate root inode");
err = -ENOMEM;
goto free_table;
}
@@ -590,7 +645,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
inode_set_iversion(root_inode, 1);
err = exfat_read_root(root_inode);
if (err) {
- exfat_msg(sb, KERN_ERR, "failed to initialize root inode.");
+ exfat_err(sb, "failed to initialize root inode");
goto put_inode;
}
@@ -599,7 +654,7 @@ static int exfat_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_root = d_make_root(root_inode);
if (!sb->s_root) {
- exfat_msg(sb, KERN_ERR, "failed to get the root dentry");
+ exfat_err(sb, "failed to get the root dentry");
err = -ENOMEM;
goto put_inode;
}
@@ -613,7 +668,7 @@ put_inode:
free_table:
exfat_free_upcase_table(sbi);
exfat_free_bitmap(sbi);
- brelse(sbi->pbr_bh);
+ brelse(sbi->boot_bh);
check_nls_io:
unload_nls(sbi->nls_io);
@@ -630,7 +685,12 @@ static int exfat_get_tree(struct fs_context *fc)
static void exfat_free(struct fs_context *fc)
{
- kfree(fc->s_fs_info);
+ struct exfat_sb_info *sbi = fc->s_fs_info;
+
+ if (sbi) {
+ exfat_free_iocharset(sbi);
+ kfree(sbi);
+ }
}
static const struct fs_context_operations exfat_context_ops = {
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 39c4772e96c9..60378ddf1424 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -79,7 +79,7 @@ out_unlock:
/*
* The lock ordering for ext2 DAX fault paths is:
*
- * mmap_sem (MM)
+ * mmap_lock (MM)
* sb_start_pagefault (vfs, freeze)
* ext2_inode_info->dax_sem
* address_space->i_mmap_rwsem or page_lock (mutually exclusive in DAX)
@@ -196,9 +196,7 @@ const struct file_operations ext2_file_operations = {
};
const struct inode_operations ext2_file_inode_operations = {
-#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
-#endif
.getattr = ext2_getattr,
.setattr = ext2_setattr,
.get_acl = ext2_get_acl,
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 2875c0a705b5..c8b371c82b4f 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -36,6 +36,7 @@
#include <linux/iomap.h>
#include <linux/namei.h>
#include <linux/uio.h>
+#include <linux/fiemap.h>
#include "ext2.h"
#include "acl.h"
#include "xattr.h"
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index ccfbbf59e2fc..ba3e3e075891 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -136,9 +136,7 @@ static int ext2_mknod (struct inode * dir, struct dentry *dentry, umode_t mode,
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
-#ifdef CONFIG_EXT2_FS_XATTR
inode->i_op = &ext2_special_inode_operations;
-#endif
mark_inode_dirty(inode);
err = ext2_add_nondir(dentry, inode);
}
@@ -413,9 +411,7 @@ const struct inode_operations ext2_dir_inode_operations = {
.rmdir = ext2_rmdir,
.mknod = ext2_mknod,
.rename = ext2_rename,
-#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
-#endif
.getattr = ext2_getattr,
.setattr = ext2_setattr,
.get_acl = ext2_get_acl,
@@ -424,9 +420,7 @@ const struct inode_operations ext2_dir_inode_operations = {
};
const struct inode_operations ext2_special_inode_operations = {
-#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
-#endif
.getattr = ext2_getattr,
.setattr = ext2_setattr,
.get_acl = ext2_get_acl,
diff --git a/fs/ext2/symlink.c b/fs/ext2/symlink.c
index 00cdb8679486..948d3a441403 100644
--- a/fs/ext2/symlink.c
+++ b/fs/ext2/symlink.c
@@ -25,16 +25,12 @@ const struct inode_operations ext2_symlink_inode_operations = {
.get_link = page_get_link,
.getattr = ext2_getattr,
.setattr = ext2_setattr,
-#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
-#endif
};
const struct inode_operations ext2_fast_symlink_inode_operations = {
.get_link = simple_get_link,
.getattr = ext2_getattr,
.setattr = ext2_setattr,
-#ifdef CONFIG_EXT2_FS_XATTR
.listxattr = ext2_listxattr,
-#endif
};
diff --git a/fs/ext2/xattr.h b/fs/ext2/xattr.h
index 16272e6ddcf4..7925f596e8e2 100644
--- a/fs/ext2/xattr.h
+++ b/fs/ext2/xattr.h
@@ -100,6 +100,7 @@ static inline void ext2_xattr_destroy_cache(struct mb_cache *cache)
}
#define ext2_xattr_handlers NULL
+#define ext2_listxattr NULL
# endif /* CONFIG_EXT2_FS_XATTR */
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 2a592e38cdfe..1afa5a4bcb5f 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -99,13 +99,13 @@ config EXT4_DEBUG
Enables run-time debugging support for the ext4 filesystem.
If you select Y here, then you will be able to turn on debugging
- with a command such as:
- echo 1 > /sys/module/ext4/parameters/mballoc_debug
+ using dynamic debug control for mb_debug() / ext_debug() msgs.
config EXT4_KUNIT_TESTS
- tristate "KUnit tests for ext4"
+ tristate "KUnit tests for ext4" if !KUNIT_ALL_TESTS
select EXT4_FS
depends on KUNIT
+ default KUNIT_ALL_TESTS
help
This builds the ext4 KUnit tests.
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 8c7bbf3e566d..76f634d185f1 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -215,9 +215,8 @@ __ext4_set_acl(handle_t *handle, struct inode *inode, int type,
value, size, xattr_flags);
kfree(value);
- if (!error) {
+ if (!error)
set_cached_acl(inode, type, acl);
- }
return error;
}
@@ -256,7 +255,7 @@ retry:
if (!error && update_mode) {
inode->i_mode = mode;
inode->i_ctime = current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
+ error = ext4_mark_inode_dirty(handle, inode);
}
out_stop:
ext4_journal_stop(handle);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index a32e5f7b5385..1ba46d87cdf1 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -903,10 +903,11 @@ ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
return bg_start;
if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
- colour = (current->pid % 16) *
+ colour = (task_pid_nr(current) % 16) *
(EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
else
- colour = (current->pid % 16) * ((last_block - bg_start) / 16);
+ colour = (task_pid_nr(current) % 16) *
+ ((last_block - bg_start) / 16);
return bg_start + colour;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 15b062efcff1..b08841f70b69 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -36,6 +36,7 @@
#include <crypto/hash.h>
#include <linux/falloc.h>
#include <linux/percpu-rwsem.h>
+#include <linux/fiemap.h>
#ifdef __KERNEL__
#include <linux/compat.h>
#endif
@@ -80,14 +81,22 @@
#define ext4_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
+ /*
+ * Turn on EXT_DEBUG to enable ext4_ext_show_path/leaf/move in extents.c
+ */
+#define EXT_DEBUG__
+
/*
- * Turn on EXT_DEBUG to get lots of info about extents operations.
+ * Dynamic printk for controlled extents debugging.
*/
-#define EXT_DEBUG__
-#ifdef EXT_DEBUG
-#define ext_debug(fmt, ...) printk(fmt, ##__VA_ARGS__)
+#ifdef CONFIG_EXT4_DEBUG
+#define ext_debug(ino, fmt, ...) \
+ pr_debug("[%s/%d] EXT4-fs (%s): ino %lu: (%s, %d): %s:" fmt, \
+ current->comm, task_pid_nr(current), \
+ ino->i_sb->s_id, ino->i_ino, __FILE__, __LINE__, \
+ __func__, ##__VA_ARGS__)
#else
-#define ext_debug(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
+#define ext_debug(ino, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
/* data type for block offset of block group */
@@ -142,6 +151,8 @@ enum SHIFT_DIRECTION {
#define EXT4_MB_USE_ROOT_BLOCKS 0x1000
/* Use blocks from reserved pool */
#define EXT4_MB_USE_RESERVED 0x2000
+/* Do strict check for free blocks while retrying block allocation */
+#define EXT4_MB_STRICT_CHECK 0x4000
struct ext4_allocation_request {
/* target inode for block we're allocating */
@@ -171,10 +182,10 @@ struct ext4_allocation_request {
* well as to store the information returned by ext4_map_blocks(). It
* takes less room on the stack than a struct buffer_head.
*/
-#define EXT4_MAP_NEW (1 << BH_New)
-#define EXT4_MAP_MAPPED (1 << BH_Mapped)
-#define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten)
-#define EXT4_MAP_BOUNDARY (1 << BH_Boundary)
+#define EXT4_MAP_NEW BIT(BH_New)
+#define EXT4_MAP_MAPPED BIT(BH_Mapped)
+#define EXT4_MAP_UNWRITTEN BIT(BH_Unwritten)
+#define EXT4_MAP_BOUNDARY BIT(BH_Boundary)
#define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
@@ -417,7 +428,7 @@ struct flex_groups {
/* 0x00400000 was formerly EXT4_EOFBLOCKS_FL */
#define EXT4_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */
#define EXT4_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
-#define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded file */
+#define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded directory */
#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
#define EXT4_FL_USER_VISIBLE 0x705BDFFF /* User visible flags */
@@ -490,6 +501,7 @@ enum {
/* 22 was formerly EXT4_INODE_EOFBLOCKS */
EXT4_INODE_INLINE_DATA = 28, /* Data in inode. */
EXT4_INODE_PROJINHERIT = 29, /* Create with parents projid */
+ EXT4_INODE_CASEFOLD = 30, /* Casefolded directory */
EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */
};
@@ -535,6 +547,7 @@ static inline void ext4_check_flag_values(void)
CHECK_FLAG_VALUE(EA_INODE);
CHECK_FLAG_VALUE(INLINE_DATA);
CHECK_FLAG_VALUE(PROJINHERIT);
+ CHECK_FLAG_VALUE(CASEFOLD);
CHECK_FLAG_VALUE(RESERVED);
}
@@ -609,8 +622,6 @@ enum {
#define EXT4_GET_BLOCKS_METADATA_NOFAIL 0x0020
/* Don't normalize allocation size (used for fallocate) */
#define EXT4_GET_BLOCKS_NO_NORMALIZE 0x0040
- /* Request will not result in inode size update (user for fallocate) */
-#define EXT4_GET_BLOCKS_KEEP_SIZE 0x0080
/* Convert written extents to unwritten */
#define EXT4_GET_BLOCKS_CONVERT_UNWRITTEN 0x0100
/* Write zeros to newly created written extents */
@@ -632,6 +643,7 @@ enum {
*/
#define EXT4_EX_NOCACHE 0x40000000
#define EXT4_EX_FORCE_CACHE 0x20000000
+#define EXT4_EX_NOFAIL 0x10000000
/*
* Flags used by ext4_free_blocks
@@ -2051,7 +2063,7 @@ struct ext4_dir_entry_2 {
__le32 inode; /* Inode number */
__le16 rec_len; /* Directory entry length */
__u8 name_len; /* Name length */
- __u8 file_type;
+ __u8 file_type; /* See file type macros EXT4_FT_* below */
char name[EXT4_NAME_LEN]; /* File name */
};
@@ -3354,7 +3366,7 @@ struct ext4_extent;
*/
#define EXT_MAX_BLOCKS 0xffffffff
-extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
+extern void ext4_ext_tree_init(handle_t *handle, struct inode *inode);
extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 1c216fcc202a..44e59881a1f0 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -170,10 +170,13 @@ struct partial_cluster {
(EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
#define EXT_LAST_INDEX(__hdr__) \
(EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_entries) - 1)
-#define EXT_MAX_EXTENT(__hdr__) \
- (EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)
+#define EXT_MAX_EXTENT(__hdr__) \
+ ((le16_to_cpu((__hdr__)->eh_max)) ? \
+ ((EXT_FIRST_EXTENT((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) \
+ : 0)
#define EXT_MAX_INDEX(__hdr__) \
- (EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)
+ ((le16_to_cpu((__hdr__)->eh_max)) ? \
+ ((EXT_FIRST_INDEX((__hdr__)) + le16_to_cpu((__hdr__)->eh_max) - 1)) : 0)
static inline struct ext4_extent_header *ext_inode_hdr(struct inode *inode)
{
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 4b9002f0e84c..00dc668e052b 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -222,7 +222,10 @@ ext4_mark_iloc_dirty(handle_t *handle,
int ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
struct ext4_iloc *iloc);
-int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode);
+#define ext4_mark_inode_dirty(__h, __i) \
+ __ext4_mark_inode_dirty((__h), (__i), __func__, __LINE__)
+int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
+ const char *func, unsigned int line);
int ext4_expand_extra_isize(struct inode *inode,
unsigned int new_extra_isize,
@@ -335,12 +338,6 @@ static inline handle_t *__ext4_journal_start(struct inode *inode,
handle_t *__ext4_journal_start_reserved(handle_t *handle, unsigned int line,
int type);
-static inline void ext4_journal_free_reserved(handle_t *handle)
-{
- if (ext4_handle_valid(handle))
- jbd2_journal_free_reserved(handle);
-}
-
static inline handle_t *ext4_journal_current_handle(void)
{
return journal_current_handle();
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 2b4b94542e34..7d088ff1e902 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -297,11 +297,14 @@ ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
{
struct ext4_ext_path *path = *ppath;
int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
+ int flags = EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO;
+
+ if (nofail)
+ flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL | EXT4_EX_NOFAIL;
return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
- EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO |
- (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
+ flags);
}
static int
@@ -487,8 +490,12 @@ __read_extent_tree_block(const char *function, unsigned int line,
{
struct buffer_head *bh;
int err;
+ gfp_t gfp_flags = __GFP_MOVABLE | GFP_NOFS;
+
+ if (flags & EXT4_EX_NOFAIL)
+ gfp_flags |= __GFP_NOFAIL;
- bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
+ bh = sb_getblk_gfp(inode->i_sb, pblk, gfp_flags);
if (unlikely(!bh))
return ERR_PTR(-ENOMEM);
@@ -600,22 +607,22 @@ static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
{
int k, l = path->p_depth;
- ext_debug("path:");
+ ext_debug(inode, "path:");
for (k = 0; k <= l; k++, path++) {
if (path->p_idx) {
- ext_debug(" %d->%llu",
+ ext_debug(inode, " %d->%llu",
le32_to_cpu(path->p_idx->ei_block),
ext4_idx_pblock(path->p_idx));
} else if (path->p_ext) {
- ext_debug(" %d:[%d]%d:%llu ",
+ ext_debug(inode, " %d:[%d]%d:%llu ",
le32_to_cpu(path->p_ext->ee_block),
ext4_ext_is_unwritten(path->p_ext),
ext4_ext_get_actual_len(path->p_ext),
ext4_ext_pblock(path->p_ext));
} else
- ext_debug(" []");
+ ext_debug(inode, " []");
}
- ext_debug("\n");
+ ext_debug(inode, "\n");
}
static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
@@ -631,14 +638,14 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
eh = path[depth].p_hdr;
ex = EXT_FIRST_EXTENT(eh);
- ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
+ ext_debug(inode, "Displaying leaf extents\n");
for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
- ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
+ ext_debug(inode, "%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
ext4_ext_is_unwritten(ex),
ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
}
- ext_debug("\n");
+ ext_debug(inode, "\n");
}
static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
@@ -651,10 +658,9 @@ static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
struct ext4_extent_idx *idx;
idx = path[level].p_idx;
while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
- ext_debug("%d: move %d:%llu in new index %llu\n", level,
- le32_to_cpu(idx->ei_block),
- ext4_idx_pblock(idx),
- newblock);
+ ext_debug(inode, "%d: move %d:%llu in new index %llu\n",
+ level, le32_to_cpu(idx->ei_block),
+ ext4_idx_pblock(idx), newblock);
idx++;
}
@@ -663,7 +669,7 @@ static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
ex = path[depth].p_ext;
while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
- ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
+ ext_debug(inode, "move %d:%llu:[%d]%d in new leaf %llu\n",
le32_to_cpu(ex->ee_block),
ext4_ext_pblock(ex),
ext4_ext_is_unwritten(ex),
@@ -707,7 +713,7 @@ ext4_ext_binsearch_idx(struct inode *inode,
struct ext4_extent_idx *r, *l, *m;
- ext_debug("binsearch for %u(idx): ", block);
+ ext_debug(inode, "binsearch for %u(idx): ", block);
l = EXT_FIRST_INDEX(eh) + 1;
r = EXT_LAST_INDEX(eh);
@@ -717,13 +723,13 @@ ext4_ext_binsearch_idx(struct inode *inode,
r = m - 1;
else
l = m + 1;
- ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
- m, le32_to_cpu(m->ei_block),
- r, le32_to_cpu(r->ei_block));
+ ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
+ le32_to_cpu(l->ei_block), m, le32_to_cpu(m->ei_block),
+ r, le32_to_cpu(r->ei_block));
}
path->p_idx = l - 1;
- ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
+ ext_debug(inode, " -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
ext4_idx_pblock(path->p_idx));
#ifdef CHECK_BINSEARCH
@@ -774,7 +780,7 @@ ext4_ext_binsearch(struct inode *inode,
return;
}
- ext_debug("binsearch for %u: ", block);
+ ext_debug(inode, "binsearch for %u: ", block);
l = EXT_FIRST_EXTENT(eh) + 1;
r = EXT_LAST_EXTENT(eh);
@@ -785,13 +791,13 @@ ext4_ext_binsearch(struct inode *inode,
r = m - 1;
else
l = m + 1;
- ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
- m, le32_to_cpu(m->ee_block),
- r, le32_to_cpu(r->ee_block));
+ ext_debug(inode, "%p(%u):%p(%u):%p(%u) ", l,
+ le32_to_cpu(l->ee_block), m, le32_to_cpu(m->ee_block),
+ r, le32_to_cpu(r->ee_block));
}
path->p_ext = l - 1;
- ext_debug(" -> %d:%llu:[%d]%d ",
+ ext_debug(inode, " -> %d:%llu:[%d]%d ",
le32_to_cpu(path->p_ext->ee_block),
ext4_ext_pblock(path->p_ext),
ext4_ext_is_unwritten(path->p_ext),
@@ -816,7 +822,7 @@ ext4_ext_binsearch(struct inode *inode,
}
-int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
+void ext4_ext_tree_init(handle_t *handle, struct inode *inode)
{
struct ext4_extent_header *eh;
@@ -826,7 +832,6 @@ int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
eh->eh_magic = EXT4_EXT_MAGIC;
eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
ext4_mark_inode_dirty(handle, inode);
- return 0;
}
struct ext4_ext_path *
@@ -838,6 +843,10 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
short int depth, i, ppos = 0;
int ret;
+ gfp_t gfp_flags = GFP_NOFS;
+
+ if (flags & EXT4_EX_NOFAIL)
+ gfp_flags |= __GFP_NOFAIL;
eh = ext_inode_hdr(inode);
depth = ext_depth(inode);
@@ -858,7 +867,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
if (!path) {
/* account possible depth increase */
path = kcalloc(depth + 2, sizeof(struct ext4_ext_path),
- GFP_NOFS);
+ gfp_flags);
if (unlikely(!path))
return ERR_PTR(-ENOMEM);
path[0].p_maxdepth = depth + 1;
@@ -871,7 +880,7 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
ext4_cache_extents(inode, eh);
/* walk through the tree */
while (i) {
- ext_debug("depth %d: num %d, max %d\n",
+ ext_debug(inode, "depth %d: num %d, max %d\n",
ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
ext4_ext_binsearch_idx(inode, path + ppos, block);
@@ -948,18 +957,20 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
/* insert after */
- ext_debug("insert new index %d after: %llu\n", logical, ptr);
+ ext_debug(inode, "insert new index %d after: %llu\n",
+ logical, ptr);
ix = curp->p_idx + 1;
} else {
/* insert before */
- ext_debug("insert new index %d before: %llu\n", logical, ptr);
+ ext_debug(inode, "insert new index %d before: %llu\n",
+ logical, ptr);
ix = curp->p_idx;
}
len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
BUG_ON(len < 0);
if (len > 0) {
- ext_debug("insert new index %d: "
+ ext_debug(inode, "insert new index %d: "
"move %d indices from 0x%p to 0x%p\n",
logical, len, ix, ix + 1);
memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
@@ -1008,9 +1019,13 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
ext4_fsblk_t newblock, oldblock;
__le32 border;
ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
+ gfp_t gfp_flags = GFP_NOFS;
int err = 0;
size_t ext_size = 0;
+ if (flags & EXT4_EX_NOFAIL)
+ gfp_flags |= __GFP_NOFAIL;
+
/* make decision: where to split? */
/* FIXME: now decision is simplest: at current extent */
@@ -1022,12 +1037,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
}
if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
border = path[depth].p_ext[1].ee_block;
- ext_debug("leaf will be split."
+ ext_debug(inode, "leaf will be split."
" next leaf starts at %d\n",
le32_to_cpu(border));
} else {
border = newext->ee_block;
- ext_debug("leaf will be added."
+ ext_debug(inode, "leaf will be added."
" next leaf starts at %d\n",
le32_to_cpu(border));
}
@@ -1044,12 +1059,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
* We need this to handle errors and free blocks
* upon them.
*/
- ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), GFP_NOFS);
+ ablocks = kcalloc(depth, sizeof(ext4_fsblk_t), gfp_flags);
if (!ablocks)
return -ENOMEM;
/* allocate all needed blocks */
- ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
+ ext_debug(inode, "allocate %d blocks for indexes/leaf\n", depth - at);
for (a = 0; a < depth - at; a++) {
newblock = ext4_ext_new_meta_block(handle, inode, path,
newext, &err, flags);
@@ -1135,7 +1150,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
goto cleanup;
}
if (k)
- ext_debug("create %d intermediate indices\n", k);
+ ext_debug(inode, "create %d intermediate indices\n", k);
/* insert new index into current index block */
/* current depth stored in i var */
i = depth - 1;
@@ -1162,7 +1177,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
fidx->ei_block = border;
ext4_idx_store_pblock(fidx, oldblock);
- ext_debug("int.index at %d (block %llu): %u -> %llu\n",
+ ext_debug(inode, "int.index at %d (block %llu): %u -> %llu\n",
i, newblock, le32_to_cpu(border), oldblock);
/* move remainder of path[i] to the new index block */
@@ -1176,7 +1191,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
}
/* start copy indexes */
m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
- ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
+ ext_debug(inode, "cur 0x%p, last 0x%p\n", path[i].p_idx,
EXT_MAX_INDEX(path[i].p_hdr));
ext4_ext_show_move(inode, path, newblock, i);
if (m) {
@@ -1313,13 +1328,13 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
EXT_FIRST_INDEX(neh)->ei_block =
EXT_FIRST_EXTENT(neh)->ee_block;
}
- ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
+ ext_debug(inode, "new root: num %d(%d), lblock %d, ptr %llu\n",
le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
le16_add_cpu(&neh->eh_depth, 1);
- ext4_mark_inode_dirty(handle, inode);
+ err = ext4_mark_inode_dirty(handle, inode);
out:
brelse(bh);
@@ -1955,7 +1970,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
/* Try to append newex to the ex */
if (ext4_can_extents_be_merged(inode, ex, newext)) {
- ext_debug("append [%d]%d block to %u:[%d]%d"
+ ext_debug(inode, "append [%d]%d block to %u:[%d]%d"
"(from %llu)\n",
ext4_ext_is_unwritten(newext),
ext4_ext_get_actual_len(newext),
@@ -1980,7 +1995,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
prepend:
/* Try to prepend newex to the ex */
if (ext4_can_extents_be_merged(inode, newext, ex)) {
- ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
+ ext_debug(inode, "prepend %u[%d]%d block to %u:[%d]%d"
"(from %llu)\n",
le32_to_cpu(newext->ee_block),
ext4_ext_is_unwritten(newext),
@@ -2018,20 +2033,20 @@ prepend:
if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
next = ext4_ext_next_leaf_block(path);
if (next != EXT_MAX_BLOCKS) {
- ext_debug("next leaf block - %u\n", next);
+ ext_debug(inode, "next leaf block - %u\n", next);
BUG_ON(npath != NULL);
- npath = ext4_find_extent(inode, next, NULL, 0);
+ npath = ext4_find_extent(inode, next, NULL, gb_flags);
if (IS_ERR(npath))
return PTR_ERR(npath);
BUG_ON(npath->p_depth != path->p_depth);
eh = npath[depth].p_hdr;
if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
- ext_debug("next leaf isn't full(%d)\n",
+ ext_debug(inode, "next leaf isn't full(%d)\n",
le16_to_cpu(eh->eh_entries));
path = npath;
goto has_space;
}
- ext_debug("next leaf has no free space(%d,%d)\n",
+ ext_debug(inode, "next leaf has no free space(%d,%d)\n",
le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
}
@@ -2057,7 +2072,7 @@ has_space:
if (!nearex) {
/* there is no extent in this leaf, create first one */
- ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
+ ext_debug(inode, "first extent in the leaf: %u:%llu:[%d]%d\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
ext4_ext_is_unwritten(newext),
@@ -2067,7 +2082,7 @@ has_space:
if (le32_to_cpu(newext->ee_block)
> le32_to_cpu(nearex->ee_block)) {
/* Insert after */
- ext_debug("insert %u:%llu:[%d]%d before: "
+ ext_debug(inode, "insert %u:%llu:[%d]%d before: "
"nearest %p\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
@@ -2078,7 +2093,7 @@ has_space:
} else {
/* Insert before */
BUG_ON(newext->ee_block == nearex->ee_block);
- ext_debug("insert %u:%llu:[%d]%d after: "
+ ext_debug(inode, "insert %u:%llu:[%d]%d after: "
"nearest %p\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
@@ -2088,7 +2103,7 @@ has_space:
}
len = EXT_LAST_EXTENT(eh) - nearex + 1;
if (len > 0) {
- ext_debug("insert %u:%llu:[%d]%d: "
+ ext_debug(inode, "insert %u:%llu:[%d]%d: "
"move %d extents from 0x%p to 0x%p\n",
le32_to_cpu(newext->ee_block),
ext4_ext_pblock(newext),
@@ -2232,7 +2247,7 @@ ext4_ext_put_gap_in_cache(struct inode *inode, ext4_lblk_t hole_start,
return;
hole_len = min(es.es_lblk - hole_start, hole_len);
}
- ext_debug(" -> %u:%u\n", hole_start, hole_len);
+ ext_debug(inode, " -> %u:%u\n", hole_start, hole_len);
ext4_es_insert_extent(inode, hole_start, hole_len, ~0,
EXTENT_STATUS_HOLE);
}
@@ -2269,7 +2284,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
err = ext4_ext_dirty(handle, inode, path);
if (err)
return err;
- ext_debug("index is empty, remove it, free block %llu\n", leaf);
+ ext_debug(inode, "index is empty, remove it, free block %llu\n", leaf);
trace_ext4_ext_rm_idx(inode, leaf);
ext4_free_blocks(handle, inode, NULL, leaf, 1,
@@ -2548,7 +2563,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
ext4_fsblk_t pblk;
/* the header must be checked already in ext4_ext_remove_space() */
- ext_debug("truncate since %u in leaf to %u\n", start, end);
+ ext_debug(inode, "truncate since %u in leaf to %u\n", start, end);
if (!path[depth].p_hdr)
path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
eh = path[depth].p_hdr;
@@ -2574,7 +2589,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
else
unwritten = 0;
- ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
+ ext_debug(inode, "remove ext %u:[%d]%d\n", ex_ee_block,
unwritten, ex_ee_len);
path[depth].p_ext = ex;
@@ -2582,7 +2597,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
b = ex_ee_block+ex_ee_len - 1 < end ?
ex_ee_block+ex_ee_len - 1 : end;
- ext_debug(" border %u:%u\n", a, b);
+ ext_debug(inode, " border %u:%u\n", a, b);
/* If this extent is beyond the end of the hole, skip it */
if (end < ex_ee_block) {
@@ -2691,7 +2706,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
if (err)
goto out;
- ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
+ ext_debug(inode, "new extent: %u:%u:%llu\n", ex_ee_block, num,
ext4_ext_pblock(ex));
ex--;
ex_ee_block = le32_to_cpu(ex->ee_block);
@@ -2768,7 +2783,7 @@ int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
partial.lblk = 0;
partial.state = initial;
- ext_debug("truncate since %u to %u\n", start, end);
+ ext_debug(inode, "truncate since %u to %u\n", start, end);
/* probably first extent we're gonna free will be last in block */
handle = ext4_journal_start_with_revoke(inode, EXT4_HT_TRUNCATE,
@@ -2793,7 +2808,8 @@ again:
ext4_fsblk_t pblk;
/* find extent for or closest extent to this block */
- path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
+ path = ext4_find_extent(inode, end, NULL,
+ EXT4_EX_NOCACHE | EXT4_EX_NOFAIL);
if (IS_ERR(path)) {
ext4_journal_stop(handle);
return PTR_ERR(path);
@@ -2879,7 +2895,7 @@ again:
le16_to_cpu(path[k].p_hdr->eh_entries)+1;
} else {
path = kcalloc(depth + 1, sizeof(struct ext4_ext_path),
- GFP_NOFS);
+ GFP_NOFS | __GFP_NOFAIL);
if (path == NULL) {
ext4_journal_stop(handle);
return -ENOMEM;
@@ -2909,7 +2925,7 @@ again:
/* this is index block */
if (!path[i].p_hdr) {
- ext_debug("initialize header\n");
+ ext_debug(inode, "initialize header\n");
path[i].p_hdr = ext_block_hdr(path[i].p_bh);
}
@@ -2917,7 +2933,7 @@ again:
/* this level hasn't been touched yet */
path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
- ext_debug("init index ptr: hdr 0x%p, num %d\n",
+ ext_debug(inode, "init index ptr: hdr 0x%p, num %d\n",
path[i].p_hdr,
le16_to_cpu(path[i].p_hdr->eh_entries));
} else {
@@ -2925,13 +2941,13 @@ again:
path[i].p_idx--;
}
- ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
+ ext_debug(inode, "level %d - index, first 0x%p, cur 0x%p\n",
i, EXT_FIRST_INDEX(path[i].p_hdr),
path[i].p_idx);
if (ext4_ext_more_to_rm(path + i)) {
struct buffer_head *bh;
/* go to the next level */
- ext_debug("move to level %d (block %llu)\n",
+ ext_debug(inode, "move to level %d (block %llu)\n",
i + 1, ext4_idx_pblock(path[i].p_idx));
memset(path + i + 1, 0, sizeof(*path));
bh = read_extent_tree_block(inode,
@@ -2967,7 +2983,7 @@ again:
brelse(path[i].p_bh);
path[i].p_bh = NULL;
i--;
- ext_debug("return to level %d\n", i);
+ ext_debug(inode, "return to level %d\n", i);
}
}
@@ -3135,8 +3151,7 @@ static int ext4_split_extent_at(handle_t *handle,
BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
(EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
- ext_debug("ext4_split_extents_at: inode %lu, logical"
- "block %llu\n", inode->i_ino, (unsigned long long)split);
+ ext_debug(inode, "logical block %llu\n", (unsigned long long)split);
ext4_ext_show_leaf(inode, path);
@@ -3244,6 +3259,10 @@ out:
fix_extent_len:
ex->ee_len = orig_ex.ee_len;
+ /*
+ * Ignore ext4_ext_dirty return value since we are already in error path
+ * and err is a non-zero error code.
+ */
ext4_ext_dirty(handle, inode, path + path->p_depth);
return err;
}
@@ -3300,7 +3319,7 @@ static int ext4_split_extent(handle_t *handle,
* Update path is required because previous ext4_split_extent_at() may
* result in split of original leaf or extent zeroout.
*/
- path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
+ path = ext4_find_extent(inode, map->m_lblk, ppath, flags);
if (IS_ERR(path))
return PTR_ERR(path);
depth = ext_depth(inode);
@@ -3369,9 +3388,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
int err = 0;
int split_flag = EXT4_EXT_DATA_VALID2;
- ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
- "block %llu, max_blocks %u\n", inode->i_ino,
- (unsigned long long)map->m_lblk, map_len);
+ ext_debug(inode, "logical block %llu, max_blocks %u\n",
+ (unsigned long long)map->m_lblk, map_len);
sbi = EXT4_SB(inode->i_sb);
eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
@@ -3503,7 +3521,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
}
if (allocated) {
/* Mark the block containing both extents as dirty */
- ext4_ext_dirty(handle, inode, path + depth);
+ err = ext4_ext_dirty(handle, inode, path + depth);
/* Update path to point to the right extent */
path[depth].p_ext = abut_ex;
@@ -3623,8 +3641,7 @@ static int ext4_split_convert_extents(handle_t *handle,
unsigned int ee_len;
int split_flag = 0, depth;
- ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n",
- __func__, inode->i_ino,
+ ext_debug(inode, "logical block %llu, max_blocks %u\n",
(unsigned long long)map->m_lblk, map->m_len);
eof_block = (EXT4_I(inode)->i_disksize + inode->i_sb->s_blocksize - 1)
@@ -3670,8 +3687,7 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
- ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
- "block %llu, max_blocks %u\n", inode->i_ino,
+ ext_debug(inode, "logical block %llu, max_blocks %u\n",
(unsigned long long)ee_block, ee_len);
/* If extent is larger than requested it is a clear sign that we still
@@ -3741,8 +3757,7 @@ convert_initialized_extent(handle_t *handle, struct inode *inode,
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
- ext_debug("%s: inode %lu, logical"
- "block %llu, max_blocks %u\n", __func__, inode->i_ino,
+ ext_debug(inode, "logical block %llu, max_blocks %u\n",
(unsigned long long)ee_block, ee_len);
if (ee_block != map->m_lblk || ee_len > map->m_len) {
@@ -3794,16 +3809,13 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
struct ext4_ext_path **ppath, int flags,
unsigned int allocated, ext4_fsblk_t newblock)
{
-#ifdef EXT_DEBUG
- struct ext4_ext_path *path = *ppath;
-#endif
+ struct ext4_ext_path __maybe_unused *path = *ppath;
int ret = 0;
int err = 0;
- ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical "
- "block %llu, max_blocks %u, flags %x, allocated %u\n",
- inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
- flags, allocated);
+ ext_debug(inode, "logical block %llu, max_blocks %u, flags 0x%x, allocated %u\n",
+ (unsigned long long)map->m_lblk, map->m_len, flags,
+ allocated);
ext4_ext_show_leaf(inode, path);
/*
@@ -3815,39 +3827,38 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
allocated, newblock);
- /* get_block() before submit the IO, split the extent */
+ /* get_block() before submitting IO, split the extent */
if (flags & EXT4_GET_BLOCKS_PRE_IO) {
ret = ext4_split_convert_extents(handle, inode, map, ppath,
flags | EXT4_GET_BLOCKS_CONVERT);
- if (ret <= 0)
- goto out;
+ if (ret < 0) {
+ err = ret;
+ goto out2;
+ }
+ /*
+ * shouldn't get a 0 return when splitting an extent unless
+ * m_len is 0 (bug) or extent has been corrupted
+ */
+ if (unlikely(ret == 0)) {
+ EXT4_ERROR_INODE(inode,
+ "unexpected ret == 0, m_len = %u",
+ map->m_len);
+ err = -EFSCORRUPTED;
+ goto out2;
+ }
map->m_flags |= EXT4_MAP_UNWRITTEN;
goto out;
}
/* IO end_io complete, convert the filled extent to written */
if (flags & EXT4_GET_BLOCKS_CONVERT) {
- if (flags & EXT4_GET_BLOCKS_ZERO) {
- if (allocated > map->m_len)
- allocated = map->m_len;
- err = ext4_issue_zeroout(inode, map->m_lblk, newblock,
- allocated);
- if (err < 0)
- goto out2;
- }
- ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
+ err = ext4_convert_unwritten_extents_endio(handle, inode, map,
ppath);
- if (ret >= 0)
- ext4_update_inode_fsync_trans(handle, inode, 1);
- else
- err = ret;
- map->m_flags |= EXT4_MAP_MAPPED;
- map->m_pblk = newblock;
- if (allocated > map->m_len)
- allocated = map->m_len;
- map->m_len = allocated;
- goto out2;
+ if (err < 0)
+ goto out2;
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+ goto map_out;
}
- /* buffered IO case */
+ /* buffered IO cases */
/*
* repeat fallocate creation request
* we already have an unwritten extent
@@ -3870,29 +3881,39 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
goto out1;
}
- /* buffered write, writepage time, convert*/
+ /*
+ * Default case when (flags & EXT4_GET_BLOCKS_CREATE) == 1.
+ * For buffered writes, at writepage time, etc. Convert a
+ * discovered unwritten extent to written.
+ */
ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
- if (ret >= 0)
- ext4_update_inode_fsync_trans(handle, inode, 1);
-out:
- if (ret <= 0) {
+ if (ret < 0) {
err = ret;
goto out2;
- } else
- allocated = ret;
- map->m_flags |= EXT4_MAP_NEW;
- if (allocated > map->m_len)
- allocated = map->m_len;
- map->m_len = allocated;
+ }
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+ /*
+ * shouldn't get a 0 return when converting an unwritten extent
+ * unless m_len is 0 (bug) or extent has been corrupted
+ */
+ if (unlikely(ret == 0)) {
+ EXT4_ERROR_INODE(inode, "unexpected ret == 0, m_len = %u",
+ map->m_len);
+ err = -EFSCORRUPTED;
+ goto out2;
+ }
+out:
+ allocated = ret;
+ map->m_flags |= EXT4_MAP_NEW;
map_out:
map->m_flags |= EXT4_MAP_MAPPED;
out1:
+ map->m_pblk = newblock;
if (allocated > map->m_len)
allocated = map->m_len;
- ext4_ext_show_leaf(inode, path);
- map->m_pblk = newblock;
map->m_len = allocated;
+ ext4_ext_show_leaf(inode, path);
out2:
return err ? err : allocated;
}
@@ -4024,15 +4045,14 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path = NULL;
struct ext4_extent newex, *ex, *ex2;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- ext4_fsblk_t newblock = 0;
+ ext4_fsblk_t newblock = 0, pblk;
int err = 0, depth, ret;
unsigned int allocated = 0, offset = 0;
unsigned int allocated_clusters = 0;
struct ext4_allocation_request ar;
ext4_lblk_t cluster_offset;
- ext_debug("blocks %u/%u requested for inode %lu\n",
- map->m_lblk, map->m_len, inode->i_ino);
+ ext_debug(inode, "blocks %u/%u requested\n", map->m_lblk, map->m_len);
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
/* find extent for this block */
@@ -4040,7 +4060,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
if (IS_ERR(path)) {
err = PTR_ERR(path);
path = NULL;
- goto out2;
+ goto out;
}
depth = ext_depth(inode);
@@ -4056,7 +4076,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
(unsigned long) map->m_lblk, depth,
path[depth].p_block);
err = -EFSCORRUPTED;
- goto out2;
+ goto out;
}
ex = path[depth].p_ext;
@@ -4079,8 +4099,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
newblock = map->m_lblk - ee_block + ee_start;
/* number of remaining blocks in the extent */
allocated = ee_len - (map->m_lblk - ee_block);
- ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
- ee_block, ee_len, newblock);
+ ext_debug(inode, "%u fit into %u:%d -> %llu\n",
+ map->m_lblk, ee_block, ee_len, newblock);
/*
* If the extent is initialized check whether the
@@ -4090,8 +4110,14 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
err = convert_initialized_extent(handle,
inode, map, &path, &allocated);
- goto out2;
+ goto out;
} else if (!ext4_ext_is_unwritten(ex)) {
+ map->m_flags |= EXT4_MAP_MAPPED;
+ map->m_pblk = newblock;
+ if (allocated > map->m_len)
+ allocated = map->m_len;
+ map->m_len = allocated;
+ ext4_ext_show_leaf(inode, path);
goto out;
}
@@ -4102,7 +4128,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
err = ret;
else
allocated = ret;
- goto out2;
+ goto out;
}
}
@@ -4127,7 +4153,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
map->m_pblk = 0;
map->m_len = min_t(unsigned int, map->m_len, hole_len);
- goto out2;
+ goto out;
}
/*
@@ -4151,12 +4177,12 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ar.lleft = map->m_lblk;
err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
if (err)
- goto out2;
+ goto out;
ar.lright = map->m_lblk;
ex2 = NULL;
err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
if (err)
- goto out2;
+ goto out;
/* Check if the extent after searching to the right implies a
* cluster we can use. */
@@ -4217,17 +4243,18 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
ar.flags |= EXT4_MB_USE_RESERVED;
newblock = ext4_mb_new_blocks(handle, &ar, &err);
if (!newblock)
- goto out2;
- ext_debug("allocate new block: goal %llu, found %llu/%u\n",
- ar.goal, newblock, allocated);
+ goto out;
allocated_clusters = ar.len;
ar.len = EXT4_C2B(sbi, ar.len) - offset;
+ ext_debug(inode, "allocate new block: goal %llu, found %llu/%u, requested %u\n",
+ ar.goal, newblock, ar.len, allocated);
if (ar.len > allocated)
ar.len = allocated;
got_allocated_blocks:
/* try to insert new extent into found leaf and return */
- ext4_ext_store_pblock(&newex, newblock + offset);
+ pblk = newblock + offset;
+ ext4_ext_store_pblock(&newex, pblk);
newex.ee_len = cpu_to_le16(ar.len);
/* Mark unwritten */
if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
@@ -4252,16 +4279,9 @@ got_allocated_blocks:
EXT4_C2B(sbi, allocated_clusters),
fb_flags);
}
- goto out2;
+ goto out;
}
- /* previous routine could use block we allocated */
- newblock = ext4_ext_pblock(&newex);
- allocated = ext4_ext_get_actual_len(&newex);
- if (allocated > map->m_len)
- allocated = map->m_len;
- map->m_flags |= EXT4_MAP_NEW;
-
/*
* Reduce the reserved cluster count to reflect successful deferred
* allocation of delayed allocated clusters or direct allocation of
@@ -4307,14 +4327,14 @@ got_allocated_blocks:
ext4_update_inode_fsync_trans(handle, inode, 1);
else
ext4_update_inode_fsync_trans(handle, inode, 0);
-out:
- if (allocated > map->m_len)
- allocated = map->m_len;
+
+ map->m_flags |= (EXT4_MAP_NEW | EXT4_MAP_MAPPED);
+ map->m_pblk = pblk;
+ map->m_len = ar.len;
+ allocated = map->m_len;
ext4_ext_show_leaf(inode, path);
- map->m_flags |= EXT4_MAP_MAPPED;
- map->m_pblk = newblock;
- map->m_len = allocated;
-out2:
+
+out:
ext4_ext_drop_refs(path);
kfree(path);
@@ -4353,7 +4373,14 @@ retry:
}
if (err)
return err;
- return ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
+retry_remove_space:
+ err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
+ if (err == -ENOMEM) {
+ cond_resched();
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry_remove_space;
+ }
+ return err;
}
static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
@@ -4363,7 +4390,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
struct inode *inode = file_inode(file);
handle_t *handle;
int ret = 0;
- int ret2 = 0;
+ int ret2 = 0, ret3 = 0;
int retries = 0;
int depth = 0;
struct ext4_map_blocks map;
@@ -4423,10 +4450,11 @@ retry:
if (ext4_update_inode_size(inode, epos) & 0x1)
inode->i_mtime = inode->i_ctime;
}
- ext4_mark_inode_dirty(handle, inode);
+ ret2 = ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
- ret2 = ext4_journal_stop(handle);
- if (ret2)
+ ret3 = ext4_journal_stop(handle);
+ ret2 = ret3 ? ret3 : ret2;
+ if (unlikely(ret2))
break;
}
if (ret == -ENOSPC &&
@@ -4490,7 +4518,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
inode_lock(inode);
/*
- * Indirect files do not support unwritten extnets
+ * Indirect files do not support unwritten extents
*/
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
ret = -EOPNOTSUPP;
@@ -4507,8 +4535,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
}
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
- if (mode & FALLOC_FL_KEEP_SIZE)
- flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
/* Wait all existing dio workers, newcomers will block on i_mutex */
inode_dio_wait(inode);
@@ -4577,7 +4603,9 @@ static long ext4_zero_range(struct file *file, loff_t offset,
inode->i_mtime = inode->i_ctime = current_time(inode);
if (new_size)
ext4_update_inode_size(inode, new_size);
- ext4_mark_inode_dirty(handle, inode);
+ ret = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret))
+ goto out_handle;
/* Zero out partial block at the edges of the range */
ret = ext4_zero_partial_blocks(handle, inode, offset, len);
@@ -4587,6 +4615,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
if (file->f_flags & O_SYNC)
ext4_handle_sync(handle);
+out_handle:
ext4_journal_stop(handle);
out_mutex:
inode_unlock(inode);
@@ -4647,8 +4676,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
max_blocks = EXT4_MAX_BLOCKS(len, offset, blkbits);
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
- if (mode & FALLOC_FL_KEEP_SIZE)
- flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
inode_lock(inode);
@@ -4700,8 +4727,7 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
loff_t offset, ssize_t len)
{
unsigned int max_blocks;
- int ret = 0;
- int ret2 = 0;
+ int ret = 0, ret2 = 0, ret3 = 0;
struct ext4_map_blocks map;
unsigned int blkbits = inode->i_blkbits;
unsigned int credits = 0;
@@ -4734,9 +4760,13 @@ int ext4_convert_unwritten_extents(handle_t *handle, struct inode *inode,
"ext4_ext_map_blocks returned %d",
inode->i_ino, map.m_lblk,
map.m_len, ret);
- ext4_mark_inode_dirty(handle, inode);
- if (credits)
- ret2 = ext4_journal_stop(handle);
+ ret2 = ext4_mark_inode_dirty(handle, inode);
+ if (credits) {
+ ret3 = ext4_journal_stop(handle);
+ if (unlikely(ret3))
+ ret2 = ret3;
+ }
+
if (ret <= 0 || ret2)
break;
}
@@ -4854,11 +4884,9 @@ static int ext4_fiemap_check_ranges(struct inode *inode, u64 start, u64 *len)
return 0;
}
-static int _ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- __u64 start, __u64 len, bool from_es_cache)
+int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 len)
{
- ext4_lblk_t start_blk;
- u32 ext4_fiemap_flags = FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR;
int error = 0;
if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
@@ -4868,12 +4896,6 @@ static int _ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
}
- if (from_es_cache)
- ext4_fiemap_flags &= FIEMAP_FLAG_XATTR;
-
- if (fiemap_check_flags(fieinfo, ext4_fiemap_flags))
- return -EBADR;
-
/*
* For bitmap files the maximum size limit could be smaller than
* s_maxbytes, so check len here manually instead of just relying on the
@@ -4885,40 +4907,20 @@ static int _ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
- error = iomap_fiemap(inode, fieinfo, start, len,
- &ext4_iomap_xattr_ops);
- } else if (!from_es_cache) {
- error = iomap_fiemap(inode, fieinfo, start, len,
- &ext4_iomap_report_ops);
- } else {
- ext4_lblk_t len_blks;
- __u64 last_blk;
-
- start_blk = start >> inode->i_sb->s_blocksize_bits;
- last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
- if (last_blk >= EXT_MAX_BLOCKS)
- last_blk = EXT_MAX_BLOCKS-1;
- len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
-
- /*
- * Walk the extent tree gathering extent information
- * and pushing extents back to the user.
- */
- error = ext4_fill_es_cache_info(inode, start_blk, len_blks,
- fieinfo);
+ return iomap_fiemap(inode, fieinfo, start, len,
+ &ext4_iomap_xattr_ops);
}
- return error;
-}
-int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
- __u64 start, __u64 len)
-{
- return _ext4_fiemap(inode, fieinfo, start, len, false);
+ return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
}
int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
+ ext4_lblk_t start_blk, len_blks;
+ __u64 last_blk;
+ int error = 0;
+
if (ext4_has_inline_data(inode)) {
int has_inline;
@@ -4929,9 +4931,33 @@ int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
return 0;
}
- return _ext4_fiemap(inode, fieinfo, start, len, true);
-}
+ if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
+ error = ext4_ext_precache(inode);
+ if (error)
+ return error;
+ fieinfo->fi_flags &= ~FIEMAP_FLAG_CACHE;
+ }
+
+ error = fiemap_prep(inode, fieinfo, start, &len, 0);
+ if (error)
+ return error;
+
+ error = ext4_fiemap_check_ranges(inode, start, &len);
+ if (error)
+ return error;
+ start_blk = start >> inode->i_sb->s_blocksize_bits;
+ last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
+ if (last_blk >= EXT_MAX_BLOCKS)
+ last_blk = EXT_MAX_BLOCKS-1;
+ len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
+
+ /*
+ * Walk the extent tree gathering extent information
+ * and pushing extents back to the user.
+ */
+ return ext4_fill_es_cache_info(inode, start_blk, len_blks, fieinfo);
+}
/*
* ext4_access_path:
@@ -5304,7 +5330,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
if (IS_SYNC(inode))
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
+ ret = ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index d996b44d2265..e75171535375 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -1054,7 +1054,7 @@ static void count_rsvd(struct inode *inode, ext4_lblk_t lblk, long len,
end = (end > ext4_es_end(es)) ? ext4_es_end(es) : end;
/* record the first block of the first delonly extent seen */
- if (rc->first_do_lblk_found == false) {
+ if (!rc->first_do_lblk_found) {
rc->first_do_lblk = i;
rc->first_do_lblk_found = true;
}
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 0d624250a62b..2a01e31a032c 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -287,6 +287,7 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
bool truncate = false;
u8 blkbits = inode->i_blkbits;
ext4_lblk_t written_blk, end_blk;
+ int ret;
/*
* Note that EXT4_I(inode)->i_disksize can get extended up to
@@ -327,8 +328,14 @@ static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
goto truncate;
}
- if (ext4_update_inode_size(inode, offset + written))
- ext4_mark_inode_dirty(handle, inode);
+ if (ext4_update_inode_size(inode, offset + written)) {
+ ret = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret)) {
+ written = ret;
+ ext4_journal_stop(handle);
+ goto truncate;
+ }
+ }
/*
* We may need to truncate allocated but not written blocks beyond EOF.
@@ -495,6 +502,12 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (ret <= 0)
return ret;
+ /* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */
+ if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
offset = iocb->ki_pos;
count = ret;
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 35ff9a56db67..1d668c8f131f 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -44,30 +44,28 @@
*/
static int ext4_sync_parent(struct inode *inode)
{
- struct dentry *dentry = NULL;
- struct inode *next;
+ struct dentry *dentry, *next;
int ret = 0;
if (!ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY))
return 0;
- inode = igrab(inode);
+ dentry = d_find_any_alias(inode);
+ if (!dentry)
+ return 0;
while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
- dentry = d_find_any_alias(inode);
- if (!dentry)
- break;
- next = igrab(d_inode(dentry->d_parent));
+
+ next = dget_parent(dentry);
dput(dentry);
- if (!next)
- break;
- iput(inode);
- inode = next;
+ dentry = next;
+ inode = dentry->d_inode;
+
/*
* The directory inode may have gone through rmdir by now. But
* the inode itself and its blocks are still allocated (we hold
- * a reference to the inode so it didn't go through
- * ext4_evict_inode()) and so we are safe to flush metadata
- * blocks and the inode.
+ * a reference to the inode via its dentry), so it didn't go
+ * through ext4_evict_inode()) and so we are safe to flush
+ * metadata blocks and the inode.
*/
ret = sync_mapping_buffers(inode->i_mapping);
if (ret)
@@ -76,7 +74,7 @@ static int ext4_sync_parent(struct inode *inode)
if (ret)
break;
}
- iput(inode);
+ dput(dentry);
return ret;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 499f08d8522e..54d324e80fe5 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1246,6 +1246,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
ext4_error_err(sb, -err,
"couldn't read orphan inode %lu (err %d)",
ino, err);
+ brelse(bitmap_bh);
return inode;
}
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 107f0043f67f..be2b66eb65f7 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -467,7 +467,9 @@ static int ext4_splice_branch(handle_t *handle,
/*
* OK, we spliced it into the inode itself on a direct block.
*/
- ext4_mark_inode_dirty(handle, ar->inode);
+ err = ext4_mark_inode_dirty(handle, ar->inode);
+ if (unlikely(err))
+ goto err_out;
jbd_debug(5, "splicing direct\n");
}
return err;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index f35e289e17aa..c3a1ad2db122 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1260,7 +1260,7 @@ out:
int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
struct inode *dir, struct inode *inode)
{
- int ret, inline_size, no_expand;
+ int ret, ret2, inline_size, no_expand;
void *inline_start;
struct ext4_iloc iloc;
@@ -1314,7 +1314,9 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
out:
ext4_write_unlock_xattr(dir, &no_expand);
- ext4_mark_inode_dirty(handle, dir);
+ ret2 = ext4_mark_inode_dirty(handle, dir);
+ if (unlikely(ret2 && !ret))
+ ret = ret2;
brelse(iloc.bh);
return ret;
}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 52be85f96159..40ec5c7ef0d3 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -221,6 +221,16 @@ void ext4_evict_inode(struct inode *inode)
truncate_inode_pages_final(&inode->i_data);
/*
+ * For inodes with journalled data, transaction commit could have
+ * dirtied the inode. Flush worker is ignoring it because of I_FREEING
+ * flag but we still need to remove the inode from the writeback lists.
+ */
+ if (!list_empty_careful(&inode->i_io_list)) {
+ WARN_ON_ONCE(!ext4_should_journal_data(inode));
+ inode_io_list_del(inode);
+ }
+
+ /*
* Protect us against freezing - iput() caller didn't have to have any
* protection against it
*/
@@ -432,11 +442,9 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
*/
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- retval = ext4_ext_map_blocks(handle, inode, map, flags &
- EXT4_GET_BLOCKS_KEEP_SIZE);
+ retval = ext4_ext_map_blocks(handle, inode, map, 0);
} else {
- retval = ext4_ind_map_blocks(handle, inode, map, flags &
- EXT4_GET_BLOCKS_KEEP_SIZE);
+ retval = ext4_ind_map_blocks(handle, inode, map, 0);
}
up_read((&EXT4_I(inode)->i_data_sem));
@@ -493,9 +501,8 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
#endif
map->m_flags = 0;
- ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
- "logical block %lu\n", inode->i_ino, flags, map->m_len,
- (unsigned long) map->m_lblk);
+ ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
+ flags, map->m_len, (unsigned long) map->m_lblk);
/*
* ext4_map_blocks returns an int, and m_len is an unsigned int
@@ -541,11 +548,9 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
*/
down_read(&EXT4_I(inode)->i_data_sem);
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
- retval = ext4_ext_map_blocks(handle, inode, map, flags &
- EXT4_GET_BLOCKS_KEEP_SIZE);
+ retval = ext4_ext_map_blocks(handle, inode, map, 0);
} else {
- retval = ext4_ind_map_blocks(handle, inode, map, flags &
- EXT4_GET_BLOCKS_KEEP_SIZE);
+ retval = ext4_ind_map_blocks(handle, inode, map, 0);
}
if (retval > 0) {
unsigned int status;
@@ -726,6 +731,9 @@ out_sem:
return ret;
}
}
+
+ if (retval < 0)
+ ext_debug(inode, "failed with err %d\n", retval);
return retval;
}
@@ -1296,7 +1304,7 @@ static int ext4_write_end(struct file *file,
* filesystems.
*/
if (i_size_changed || inline_data)
- ext4_mark_inode_dirty(handle, inode);
+ ret = ext4_mark_inode_dirty(handle, inode);
if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
/* if we have allocated more blocks and copied
@@ -1526,6 +1534,7 @@ struct mpage_da_data {
struct ext4_map_blocks map;
struct ext4_io_submit io_submit; /* IO submission data */
unsigned int do_map:1;
+ unsigned int scanned_until_end:1;
};
static void mpage_release_unused_pages(struct mpage_da_data *mpd,
@@ -1541,6 +1550,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
if (mpd->first_page >= mpd->next_page)
return;
+ mpd->scanned_until_end = 0;
index = mpd->first_page;
end = mpd->next_page - 1;
if (invalidate) {
@@ -1681,8 +1691,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
invalid_block = ~0;
map->m_flags = 0;
- ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
- "logical block %lu\n", inode->i_ino, map->m_len,
+ ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
(unsigned long) map->m_lblk);
/* Lookup extent status tree firstly */
@@ -2078,7 +2087,7 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
return err;
}
-#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
+#define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay))
/*
* mballoc gives us at most this number of blocks...
@@ -2188,7 +2197,11 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
if (err < 0)
return err;
}
- return lblk < blocks;
+ if (lblk >= blocks) {
+ mpd->scanned_until_end = 1;
+ return 0;
+ }
+ return 1;
}
/*
@@ -2311,7 +2324,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
* mapping, or maybe the page was submitted for IO.
* So we return to call further extent mapping.
*/
- if (err < 0 || map_bh == true)
+ if (err < 0 || map_bh)
goto out;
/* Page fully mapped - let IO run! */
err = mpage_submit_page(mpd, page);
@@ -2358,7 +2371,7 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
dioread_nolock = ext4_should_dioread_nolock(inode);
if (dioread_nolock)
get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
- if (map->m_flags & (1 << BH_Delay))
+ if (map->m_flags & BIT(BH_Delay))
get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
@@ -2546,7 +2559,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
tag);
if (nr_pages == 0)
- goto out;
+ break;
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
@@ -2601,6 +2614,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
pagevec_release(&pvec);
cond_resched();
}
+ mpd->scanned_until_end = 1;
return 0;
out:
pagevec_release(&pvec);
@@ -2619,7 +2633,6 @@ static int ext4_writepages(struct address_space *mapping,
struct inode *inode = mapping->host;
int needed_blocks, rsv_blocks = 0, ret = 0;
struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
- bool done;
struct blk_plug plug;
bool give_up_on_write = false;
@@ -2705,7 +2718,6 @@ static int ext4_writepages(struct address_space *mapping,
retry:
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
- done = false;
blk_start_plug(&plug);
/*
@@ -2715,6 +2727,7 @@ retry:
* started.
*/
mpd.do_map = 0;
+ mpd.scanned_until_end = 0;
mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
if (!mpd.io_submit.io_end) {
ret = -ENOMEM;
@@ -2730,7 +2743,7 @@ retry:
if (ret < 0)
goto unplug;
- while (!done && mpd.first_page <= mpd.last_page) {
+ while (!mpd.scanned_until_end && wbc->nr_to_write > 0) {
/* For each extent of pages we use new io_end */
mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
if (!mpd.io_submit.io_end) {
@@ -2765,20 +2778,9 @@ retry:
trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
ret = mpage_prepare_extent_to_map(&mpd);
- if (!ret) {
- if (mpd.map.m_len)
- ret = mpage_map_and_submit_extent(handle, &mpd,
+ if (!ret && mpd.map.m_len)
+ ret = mpage_map_and_submit_extent(handle, &mpd,
&give_up_on_write);
- else {
- /*
- * We scanned the whole range (or exhausted
- * nr_to_write), submitted what was mapped and
- * didn't find anything needing mapping. We are
- * done.
- */
- done = true;
- }
- }
/*
* Caution: If the handle is synchronous,
* ext4_journal_stop() can wait for transaction commit
@@ -3077,7 +3079,7 @@ static int ext4_da_write_end(struct file *file,
* new_i_size is less that inode->i_size
* bu greater than i_disksize.(hint delalloc)
*/
- ext4_mark_inode_dirty(handle, inode);
+ ret = ext4_mark_inode_dirty(handle, inode);
}
}
@@ -3094,7 +3096,7 @@ static int ext4_da_write_end(struct file *file,
if (ret2 < 0)
ret = ret2;
ret2 = ext4_journal_stop(handle);
- if (!ret)
+ if (unlikely(ret2 && !ret))
ret = ret2;
return ret ? ret : copied;
@@ -3883,6 +3885,8 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
loff_t len)
{
handle_t *handle;
+ int ret;
+
loff_t size = i_size_read(inode);
WARN_ON(!inode_is_locked(inode));
@@ -3896,10 +3900,10 @@ int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
if (IS_ERR(handle))
return PTR_ERR(handle);
ext4_update_i_disksize(inode, size);
- ext4_mark_inode_dirty(handle, inode);
+ ret = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
- return 0;
+ return ret;
}
static void ext4_wait_dax_page(struct ext4_inode_info *ei)
@@ -3951,7 +3955,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
loff_t first_block_offset, last_block_offset;
handle_t *handle;
unsigned int credits;
- int ret = 0;
+ int ret = 0, ret2 = 0;
trace_ext4_punch_hole(inode, offset, length, 0);
@@ -4074,7 +4078,9 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
+ ret2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret2))
+ ret = ret2;
if (ret >= 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
@@ -4143,7 +4149,7 @@ int ext4_truncate(struct inode *inode)
{
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int credits;
- int err = 0;
+ int err = 0, err2;
handle_t *handle;
struct address_space *mapping = inode->i_mapping;
@@ -4231,7 +4237,9 @@ out_stop:
ext4_orphan_del(handle, inode);
inode->i_mtime = inode->i_ctime = current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
+ err2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(err2 && !err))
+ err = err2;
ext4_journal_stop(handle);
trace_ext4_truncate_exit(inode);
@@ -4857,21 +4865,22 @@ static int ext4_inode_blocks_set(handle_t *handle,
return 0;
}
-struct other_inode {
- unsigned long orig_ino;
- struct ext4_inode *raw_inode;
-};
-
-static int other_inode_match(struct inode * inode, unsigned long ino,
- void *data)
+static void __ext4_update_other_inode_time(struct super_block *sb,
+ unsigned long orig_ino,
+ unsigned long ino,
+ struct ext4_inode *raw_inode)
{
- struct other_inode *oi = (struct other_inode *) data;
+ struct inode *inode;
+
+ inode = find_inode_by_ino_rcu(sb, ino);
+ if (!inode)
+ return;
- if ((inode->i_ino != ino) ||
- (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
+ if ((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
I_DIRTY_INODE)) ||
((inode->i_state & I_DIRTY_TIME) == 0))
- return 0;
+ return;
+
spin_lock(&inode->i_lock);
if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
I_DIRTY_INODE)) == 0) &&
@@ -4882,16 +4891,15 @@ static int other_inode_match(struct inode * inode, unsigned long ino,
spin_unlock(&inode->i_lock);
spin_lock(&ei->i_raw_lock);
- EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode);
- EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode);
- EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode);
- ext4_inode_csum_set(inode, oi->raw_inode, ei);
+ EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
+ EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
+ EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
+ ext4_inode_csum_set(inode, raw_inode, ei);
spin_unlock(&ei->i_raw_lock);
- trace_ext4_other_inode_update_time(inode, oi->orig_ino);
- return -1;
+ trace_ext4_other_inode_update_time(inode, orig_ino);
+ return;
}
spin_unlock(&inode->i_lock);
- return -1;
}
/*
@@ -4901,24 +4909,24 @@ static int other_inode_match(struct inode * inode, unsigned long ino,
static void ext4_update_other_inodes_time(struct super_block *sb,
unsigned long orig_ino, char *buf)
{
- struct other_inode oi;
unsigned long ino;
int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
int inode_size = EXT4_INODE_SIZE(sb);
- oi.orig_ino = orig_ino;
/*
* Calculate the first inode in the inode table block. Inode
* numbers are one-based. That is, the first inode in a block
* (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
*/
ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
+ rcu_read_lock();
for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
if (ino == orig_ino)
continue;
- oi.raw_inode = (struct ext4_inode *) buf;
- (void) find_inode_nowait(sb, ino, other_inode_match, &oi);
+ __ext4_update_other_inode_time(sb, orig_ino, ino,
+ (struct ext4_inode *)buf);
}
+ rcu_read_unlock();
}
/*
@@ -5289,6 +5297,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
inode->i_gid = attr->ia_gid;
error = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
+ if (unlikely(error))
+ return error;
}
if (attr->ia_valid & ATTR_SIZE) {
@@ -5774,7 +5784,8 @@ out_unlock:
* Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
* we start and wait on commits.
*/
-int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
+int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
+ const char *func, unsigned int line)
{
struct ext4_iloc iloc;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -5784,13 +5795,18 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
trace_ext4_mark_inode_dirty(inode, _RET_IP_);
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (err)
- return err;
+ goto out;
if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
iloc, handle);
- return ext4_mark_iloc_dirty(handle, inode, &iloc);
+ err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+out:
+ if (unlikely(err))
+ ext4_error_inode_err(inode, func, line, 0, err,
+ "mark_inode_dirty error");
+ return err;
}
/*
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0746532ba463..2162db0c747d 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -754,14 +754,6 @@ static int ext4_ioctl_get_es_cache(struct file *filp, unsigned long arg)
fieinfo.fi_extents_max = fiemap.fm_extent_count;
fieinfo.fi_extents_start = ufiemap->fm_extents;
- if (fiemap.fm_extent_count != 0 &&
- !access_ok(fieinfo.fi_extents_start,
- fieinfo.fi_extents_max * sizeof(struct fiemap_extent)))
- return -EFAULT;
-
- if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
- filemap_write_and_wait(inode->i_mapping);
-
error = ext4_get_es_cache(inode, &fieinfo, fiemap.fm_start,
fiemap.fm_length);
fiemap.fm_flags = fieinfo.fi_flags;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 30d5d97548c4..a9083113a8c0 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -18,13 +18,6 @@
#include <linux/backing-dev.h>
#include <trace/events/ext4.h>
-#ifdef CONFIG_EXT4_DEBUG
-ushort ext4_mballoc_debug __read_mostly;
-
-module_param_named(mballoc_debug, ext4_mballoc_debug, ushort, 0644);
-MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc");
-#endif
-
/*
* MUSTDO:
* - test ext4_ext_search_left() and ext4_ext_search_right()
@@ -356,6 +349,36 @@ static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
ext4_group_t group);
static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
ext4_group_t group);
+static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac);
+
+/*
+ * The algorithm using this percpu seq counter goes below:
+ * 1. We sample the percpu discard_pa_seq counter before trying for block
+ * allocation in ext4_mb_new_blocks().
+ * 2. We increment this percpu discard_pa_seq counter when we either allocate
+ * or free these blocks i.e. while marking those blocks as used/free in
+ * mb_mark_used()/mb_free_blocks().
+ * 3. We also increment this percpu seq counter when we successfully identify
+ * that the bb_prealloc_list is not empty and hence proceed for discarding
+ * of those PAs inside ext4_mb_discard_group_preallocations().
+ *
+ * Now to make sure that the regular fast path of block allocation is not
+ * affected, as a small optimization we only sample the percpu seq counter
+ * on that cpu. Only when the block allocation fails and when freed blocks
+ * found were 0, that is when we sample percpu seq counter for all cpus using
+ * below function ext4_get_discard_pa_seq_sum(). This happens after making
+ * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty.
+ */
+static DEFINE_PER_CPU(u64, discard_pa_seq);
+static inline u64 ext4_get_discard_pa_seq_sum(void)
+{
+ int __cpu;
+ u64 __seq = 0;
+
+ for_each_possible_cpu(__cpu)
+ __seq += per_cpu(discard_pa_seq, __cpu);
+ return __seq;
+}
static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
{
@@ -493,6 +516,8 @@ static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
{
+ if (unlikely(e4b->bd_info->bb_bitmap == NULL))
+ return;
if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
unsigned char *b1, *b2;
int i;
@@ -511,6 +536,31 @@ static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
}
}
+static void mb_group_bb_bitmap_alloc(struct super_block *sb,
+ struct ext4_group_info *grp, ext4_group_t group)
+{
+ struct buffer_head *bh;
+
+ grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS);
+ if (!grp->bb_bitmap)
+ return;
+
+ bh = ext4_read_block_bitmap(sb, group);
+ if (IS_ERR_OR_NULL(bh)) {
+ kfree(grp->bb_bitmap);
+ grp->bb_bitmap = NULL;
+ return;
+ }
+
+ memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize);
+ put_bh(bh);
+}
+
+static void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
+{
+ kfree(grp->bb_bitmap);
+}
+
#else
static inline void mb_free_blocks_double(struct inode *inode,
struct ext4_buddy *e4b, int first, int count)
@@ -526,6 +576,17 @@ static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
{
return;
}
+
+static inline void mb_group_bb_bitmap_alloc(struct super_block *sb,
+ struct ext4_group_info *grp, ext4_group_t group)
+{
+ return;
+}
+
+static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp)
+{
+ return;
+}
#endif
#ifdef AGGRESSIVE_CHECK
@@ -820,14 +881,14 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
char *bitmap;
struct ext4_group_info *grinfo;
- mb_debug(1, "init page %lu\n", page->index);
-
inode = page->mapping->host;
sb = inode->i_sb;
ngroups = ext4_get_groups_count(sb);
blocksize = i_blocksize(inode);
blocks_per_page = PAGE_SIZE / blocksize;
+ mb_debug(sb, "init page %lu\n", page->index);
+
groups_per_page = blocks_per_page >> 1;
if (groups_per_page == 0)
groups_per_page = 1;
@@ -867,7 +928,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
bh[i] = NULL;
goto out;
}
- mb_debug(1, "read bitmap for group %u\n", group);
+ mb_debug(sb, "read bitmap for group %u\n", group);
}
/* wait for I/O completion */
@@ -912,7 +973,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
if ((first_block + i) & 1) {
/* this is block of buddy */
BUG_ON(incore == NULL);
- mb_debug(1, "put buddy for group %u in page %lu/%x\n",
+ mb_debug(sb, "put buddy for group %u in page %lu/%x\n",
group, page->index, i * blocksize);
trace_ext4_mb_buddy_bitmap_load(sb, group);
grinfo = ext4_get_group_info(sb, group);
@@ -932,7 +993,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
} else {
/* this is block of bitmap */
BUG_ON(incore != NULL);
- mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
+ mb_debug(sb, "put bitmap for group %u in page %lu/%x\n",
group, page->index, i * blocksize);
trace_ext4_mb_bitmap_load(sb, group);
@@ -1038,7 +1099,7 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
int ret = 0;
might_sleep();
- mb_debug(1, "init group %u\n", group);
+ mb_debug(sb, "init group %u\n", group);
this_grp = ext4_get_group_info(sb, group);
/*
* This ensures that we don't reinit the buddy cache
@@ -1110,7 +1171,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
struct inode *inode = sbi->s_buddy_cache;
might_sleep();
- mb_debug(1, "load group %u\n", group);
+ mb_debug(sb, "load group %u\n", group);
blocks_per_page = PAGE_SIZE / sb->s_blocksize;
grp = ext4_get_group_info(sb, group);
@@ -1430,6 +1491,7 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
mb_check_buddy(e4b);
mb_free_blocks_double(inode, e4b, first, count);
+ this_cpu_inc(discard_pa_seq);
e4b->bd_info->bb_free += count;
if (first < e4b->bd_info->bb_first_free)
e4b->bd_info->bb_first_free = first;
@@ -1571,6 +1633,7 @@ static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
mb_check_buddy(e4b);
mb_mark_used_double(e4b, start, len);
+ this_cpu_inc(discard_pa_seq);
e4b->bd_info->bb_free -= len;
if (e4b->bd_info->bb_first_free == start)
e4b->bd_info->bb_first_free += len;
@@ -1670,6 +1733,14 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
spin_unlock(&sbi->s_md_lock);
}
+ /*
+ * As we've just preallocated more space than
+ * user requested originally, we store allocated
+ * space in a special descriptor.
+ */
+ if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
+ ext4_mb_new_preallocation(ac);
+
}
/*
@@ -1918,7 +1989,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
ext4_mb_use_best_found(ac, e4b);
- BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
+ BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len);
if (EXT4_SB(sb)->s_mb_stats)
atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
@@ -2035,15 +2106,14 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
}
/*
- * This is now called BEFORE we load the buddy bitmap.
+ * This is also called BEFORE we load the buddy bitmap.
* Returns either 1 or 0 indicating that the group is either suitable
- * for the allocation or not. In addition it can also return negative
- * error code when something goes wrong.
+ * for the allocation or not.
*/
-static int ext4_mb_good_group(struct ext4_allocation_context *ac,
+static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
ext4_group_t group, int cr)
{
- unsigned free, fragments;
+ ext4_grpblk_t free, fragments;
int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
@@ -2051,23 +2121,16 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
free = grp->bb_free;
if (free == 0)
- return 0;
+ return false;
if (cr <= 2 && free < ac->ac_g_ex.fe_len)
- return 0;
+ return false;
if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
- return 0;
-
- /* We only do this if the grp has never been initialized */
- if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
- int ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
- if (ret)
- return ret;
- }
+ return false;
fragments = grp->bb_fragments;
if (fragments == 0)
- return 0;
+ return false;
switch (cr) {
case 0:
@@ -2077,38 +2140,80 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
(flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
((group % flex_size) == 0))
- return 0;
+ return false;
if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) ||
(free / fragments) >= ac->ac_g_ex.fe_len)
- return 1;
+ return true;
if (grp->bb_largest_free_order < ac->ac_2order)
- return 0;
+ return false;
- return 1;
+ return true;
case 1:
if ((free / fragments) >= ac->ac_g_ex.fe_len)
- return 1;
+ return true;
break;
case 2:
if (free >= ac->ac_g_ex.fe_len)
- return 1;
+ return true;
break;
case 3:
- return 1;
+ return true;
default:
BUG();
}
- return 0;
+ return false;
+}
+
+/*
+ * This could return negative error code if something goes wrong
+ * during ext4_mb_init_group(). This should not be called with
+ * ext4_lock_group() held.
+ */
+static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
+ ext4_group_t group, int cr)
+{
+ struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
+ struct super_block *sb = ac->ac_sb;
+ bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK;
+ ext4_grpblk_t free;
+ int ret = 0;
+
+ if (should_lock)
+ ext4_lock_group(sb, group);
+ free = grp->bb_free;
+ if (free == 0)
+ goto out;
+ if (cr <= 2 && free < ac->ac_g_ex.fe_len)
+ goto out;
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
+ goto out;
+ if (should_lock)
+ ext4_unlock_group(sb, group);
+
+ /* We only do this if the grp has never been initialized */
+ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
+ ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS);
+ if (ret)
+ return ret;
+ }
+
+ if (should_lock)
+ ext4_lock_group(sb, group);
+ ret = ext4_mb_good_group(ac, group, cr);
+out:
+ if (should_lock)
+ ext4_unlock_group(sb, group);
+ return ret;
}
static noinline_for_stack int
ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
{
ext4_group_t ngroups, group, i;
- int cr;
+ int cr = -1;
int err = 0, first_err = 0;
struct ext4_sb_info *sbi;
struct super_block *sb;
@@ -2189,7 +2294,7 @@ repeat:
group = 0;
/* This now checks without needing the buddy page */
- ret = ext4_mb_good_group(ac, group, cr);
+ ret = ext4_mb_good_group_nolock(ac, group, cr);
if (ret <= 0) {
if (!first_err)
first_err = ret;
@@ -2207,11 +2312,9 @@ repeat:
* block group
*/
ret = ext4_mb_good_group(ac, group, cr);
- if (ret <= 0) {
+ if (ret == 0) {
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
- if (!first_err)
- first_err = ret;
continue;
}
@@ -2260,6 +2363,10 @@ repeat:
out:
if (!err && ac->ac_status != AC_STATUS_FOUND && first_err)
err = first_err;
+
+ mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n",
+ ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status,
+ ac->ac_flags, cr, err);
return err;
}
@@ -2452,20 +2559,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
meta_group_info[i]->bb_free_root = RB_ROOT;
meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
-#ifdef DOUBLE_CHECK
- {
- struct buffer_head *bh;
- meta_group_info[i]->bb_bitmap =
- kmalloc(sb->s_blocksize, GFP_NOFS);
- BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
- bh = ext4_read_block_bitmap(sb, group);
- BUG_ON(IS_ERR_OR_NULL(bh));
- memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
- sb->s_blocksize);
- put_bh(bh);
- }
-#endif
-
+ mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group);
return 0;
exit_group_info:
@@ -2702,7 +2796,7 @@ out:
}
/* need to called with the ext4 group lock held */
-static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
+static int ext4_mb_cleanup_pa(struct ext4_group_info *grp)
{
struct ext4_prealloc_space *pa;
struct list_head *cur, *tmp;
@@ -2714,9 +2808,7 @@ static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
count++;
kmem_cache_free(ext4_pspace_cachep, pa);
}
- if (count)
- mb_debug(1, "mballoc: %u PAs left\n", count);
-
+ return count;
}
int ext4_mb_release(struct super_block *sb)
@@ -2727,16 +2819,18 @@ int ext4_mb_release(struct super_block *sb)
struct ext4_group_info *grinfo, ***group_info;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
+ int count;
if (sbi->s_group_info) {
for (i = 0; i < ngroups; i++) {
cond_resched();
grinfo = ext4_get_group_info(sb, i);
-#ifdef DOUBLE_CHECK
- kfree(grinfo->bb_bitmap);
-#endif
+ mb_group_bb_bitmap_free(grinfo);
ext4_lock_group(sb, i);
- ext4_mb_cleanup_pa(grinfo);
+ count = ext4_mb_cleanup_pa(grinfo);
+ if (count)
+ mb_debug(sb, "mballoc: %d PAs left\n",
+ count);
ext4_unlock_group(sb, i);
kmem_cache_free(cachep, grinfo);
}
@@ -2809,7 +2903,7 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
struct ext4_group_info *db;
int err, count = 0, count2 = 0;
- mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
+ mb_debug(sb, "gonna free %u blocks in group %u (0x%p):",
entry->efd_count, entry->efd_group, entry);
err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b);
@@ -2849,7 +2943,8 @@ static void ext4_free_data_in_buddy(struct super_block *sb,
kmem_cache_free(ext4_free_data_cachep, entry);
ext4_mb_unload_buddy(&e4b);
- mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
+ mb_debug(sb, "freed %d blocks in %d structures\n", count,
+ count2);
}
/*
@@ -2909,23 +3004,26 @@ int __init ext4_init_mballoc(void)
ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space,
SLAB_RECLAIM_ACCOUNT);
if (ext4_pspace_cachep == NULL)
- return -ENOMEM;
+ goto out;
ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context,
SLAB_RECLAIM_ACCOUNT);
- if (ext4_ac_cachep == NULL) {
- kmem_cache_destroy(ext4_pspace_cachep);
- return -ENOMEM;
- }
+ if (ext4_ac_cachep == NULL)
+ goto out_pa_free;
ext4_free_data_cachep = KMEM_CACHE(ext4_free_data,
SLAB_RECLAIM_ACCOUNT);
- if (ext4_free_data_cachep == NULL) {
- kmem_cache_destroy(ext4_pspace_cachep);
- kmem_cache_destroy(ext4_ac_cachep);
- return -ENOMEM;
- }
+ if (ext4_free_data_cachep == NULL)
+ goto out_ac_free;
+
return 0;
+
+out_ac_free:
+ kmem_cache_destroy(ext4_ac_cachep);
+out_pa_free:
+ kmem_cache_destroy(ext4_pspace_cachep);
+out:
+ return -ENOMEM;
}
void ext4_exit_mballoc(void)
@@ -3077,8 +3175,7 @@ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
BUG_ON(lg == NULL);
ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
- mb_debug(1, "#%u: goal %u blocks for locality group\n",
- current->pid, ac->ac_g_ex.fe_len);
+ mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len);
}
/*
@@ -3276,8 +3373,8 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
}
- mb_debug(1, "goal: %u(was %u) blocks at %u\n", (unsigned) size,
- (unsigned) orig_size, (unsigned) start);
+ mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size,
+ orig_size, start);
}
static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
@@ -3366,7 +3463,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
BUG_ON(pa->pa_free < len);
pa->pa_free -= len;
- mb_debug(1, "use %llu/%u from inode pa %p\n", start, len, pa);
+ mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa);
}
/*
@@ -3390,7 +3487,8 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
* in on-disk bitmap -- see ext4_mb_release_context()
* Other CPUs are prevented from allocating from this pa by lg_mutex
*/
- mb_debug(1, "use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
+ mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n",
+ pa->pa_lstart-len, len, pa);
}
/*
@@ -3425,7 +3523,7 @@ ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
/*
* search goal blocks in preallocated space
*/
-static noinline_for_stack int
+static noinline_for_stack bool
ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
{
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
@@ -3437,7 +3535,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
/* only data can be preallocated */
if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
- return 0;
+ return false;
/* first, try per-file preallocation */
rcu_read_lock();
@@ -3464,7 +3562,7 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
spin_unlock(&pa->pa_lock);
ac->ac_criteria = 10;
rcu_read_unlock();
- return 1;
+ return true;
}
spin_unlock(&pa->pa_lock);
}
@@ -3472,12 +3570,12 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
/* can we use group allocation? */
if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
- return 0;
+ return false;
/* inode may have no locality group for some reason */
lg = ac->ac_lg;
if (lg == NULL)
- return 0;
+ return false;
order = fls(ac->ac_o_ex.fe_len) - 1;
if (order > PREALLOC_TB_SIZE - 1)
/* The max size of hash table is PREALLOC_TB_SIZE */
@@ -3506,9 +3604,9 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
if (cpa) {
ext4_mb_use_group_pa(ac, cpa);
ac->ac_criteria = 20;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
/*
@@ -3573,7 +3671,7 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
ext4_set_bits(bitmap, start, len);
preallocated += len;
}
- mb_debug(1, "preallocated %u for group %u\n", preallocated, group);
+ mb_debug(sb, "preallocated %d for group %u\n", preallocated, group);
}
static void ext4_mb_pa_callback(struct rcu_head *head)
@@ -3649,7 +3747,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
/*
* creates new preallocated space for given inode
*/
-static noinline_for_stack int
+static noinline_for_stack void
ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
{
struct super_block *sb = ac->ac_sb;
@@ -3662,10 +3760,9 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
BUG_ON(ac->ac_status != AC_STATUS_FOUND);
BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
+ BUG_ON(ac->ac_pa == NULL);
- pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
- if (pa == NULL)
- return -ENOMEM;
+ pa = ac->ac_pa;
if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
int winl;
@@ -3709,15 +3806,14 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
pa->pa_len = ac->ac_b_ex.fe_len;
pa->pa_free = pa->pa_len;
- atomic_set(&pa->pa_count, 1);
spin_lock_init(&pa->pa_lock);
INIT_LIST_HEAD(&pa->pa_inode_list);
INIT_LIST_HEAD(&pa->pa_group_list);
pa->pa_deleted = 0;
pa->pa_type = MB_INODE_PA;
- mb_debug(1, "new inode pa %p: %llu/%u for %u\n", pa,
- pa->pa_pstart, pa->pa_len, pa->pa_lstart);
+ mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
+ pa->pa_len, pa->pa_lstart);
trace_ext4_mb_new_inode_pa(ac, pa);
ext4_mb_use_inode_pa(ac, pa);
@@ -3729,21 +3825,17 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
pa->pa_obj_lock = &ei->i_prealloc_lock;
pa->pa_inode = ac->ac_inode;
- ext4_lock_group(sb, ac->ac_b_ex.fe_group);
list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
- ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
spin_lock(pa->pa_obj_lock);
list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
spin_unlock(pa->pa_obj_lock);
-
- return 0;
}
/*
* creates new preallocated space for locality group inodes belongs to
*/
-static noinline_for_stack int
+static noinline_for_stack void
ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
{
struct super_block *sb = ac->ac_sb;
@@ -3755,11 +3847,9 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
BUG_ON(ac->ac_status != AC_STATUS_FOUND);
BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
+ BUG_ON(ac->ac_pa == NULL);
- BUG_ON(ext4_pspace_cachep == NULL);
- pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
- if (pa == NULL)
- return -ENOMEM;
+ pa = ac->ac_pa;
/* preallocation can change ac_b_ex, thus we store actually
* allocated blocks for history */
@@ -3769,15 +3859,14 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
pa->pa_lstart = pa->pa_pstart;
pa->pa_len = ac->ac_b_ex.fe_len;
pa->pa_free = pa->pa_len;
- atomic_set(&pa->pa_count, 1);
spin_lock_init(&pa->pa_lock);
INIT_LIST_HEAD(&pa->pa_inode_list);
INIT_LIST_HEAD(&pa->pa_group_list);
pa->pa_deleted = 0;
pa->pa_type = MB_GROUP_PA;
- mb_debug(1, "new group pa %p: %llu/%u for %u\n", pa,
- pa->pa_pstart, pa->pa_len, pa->pa_lstart);
+ mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart,
+ pa->pa_len, pa->pa_lstart);
trace_ext4_mb_new_group_pa(ac, pa);
ext4_mb_use_group_pa(ac, pa);
@@ -3790,26 +3879,20 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
pa->pa_obj_lock = &lg->lg_prealloc_lock;
pa->pa_inode = NULL;
- ext4_lock_group(sb, ac->ac_b_ex.fe_group);
list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
- ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
/*
* We will later add the new pa to the right bucket
* after updating the pa_free in ext4_mb_release_context
*/
- return 0;
}
-static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
+static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
{
- int err;
-
if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
- err = ext4_mb_new_group_pa(ac);
+ ext4_mb_new_group_pa(ac);
else
- err = ext4_mb_new_inode_pa(ac);
- return err;
+ ext4_mb_new_inode_pa(ac);
}
/*
@@ -3844,7 +3927,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
if (bit >= end)
break;
next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
- mb_debug(1, " free preallocated %u/%u in group %u\n",
+ mb_debug(sb, "free preallocated %u/%u in group %u\n",
(unsigned) ext4_group_first_block_no(sb, group) + bit,
(unsigned) next - bit, (unsigned) group);
free += next - bit;
@@ -3858,10 +3941,10 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
}
if (free != pa->pa_free) {
ext4_msg(e4b->bd_sb, KERN_CRIT,
- "pa %p: logic %lu, phys. %lu, len %lu",
+ "pa %p: logic %lu, phys. %lu, len %d",
pa, (unsigned long) pa->pa_lstart,
(unsigned long) pa->pa_pstart,
- (unsigned long) pa->pa_len);
+ pa->pa_len);
ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u",
free, pa->pa_free);
/*
@@ -3915,10 +3998,9 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
int busy = 0;
int free = 0;
- mb_debug(1, "discard preallocation for group %u\n", group);
-
+ mb_debug(sb, "discard preallocation for group %u\n", group);
if (list_empty(&grp->bb_prealloc_list))
- return 0;
+ goto out_dbg;
bitmap_bh = ext4_read_block_bitmap(sb, group);
if (IS_ERR(bitmap_bh)) {
@@ -3926,7 +4008,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
ext4_error_err(sb, -err,
"Error %d reading block bitmap for %u",
err, group);
- return 0;
+ goto out_dbg;
}
err = ext4_mb_load_buddy(sb, group, &e4b);
@@ -3934,7 +4016,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
ext4_warning(sb, "Error %d loading buddy information for %u",
err, group);
put_bh(bitmap_bh);
- return 0;
+ goto out_dbg;
}
if (needed == 0)
@@ -3943,6 +4025,7 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
INIT_LIST_HEAD(&list);
repeat:
ext4_lock_group(sb, group);
+ this_cpu_inc(discard_pa_seq);
list_for_each_entry_safe(pa, tmp,
&grp->bb_prealloc_list, pa_group_list) {
spin_lock(&pa->pa_lock);
@@ -3979,6 +4062,8 @@ repeat:
/* found anything to free? */
if (list_empty(&list)) {
BUG_ON(free != 0);
+ mb_debug(sb, "Someone else may have freed PA for this group %u\n",
+ group);
goto out;
}
@@ -4003,6 +4088,9 @@ out:
ext4_unlock_group(sb, group);
ext4_mb_unload_buddy(&e4b);
put_bh(bitmap_bh);
+out_dbg:
+ mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n",
+ free, group, grp->bb_free);
return free;
}
@@ -4031,7 +4119,8 @@ void ext4_discard_preallocations(struct inode *inode)
return;
}
- mb_debug(1, "discard preallocation for inode %lu\n", inode->i_ino);
+ mb_debug(sb, "discard preallocation for inode %lu\n",
+ inode->i_ino);
trace_ext4_discard_preallocations(inode);
INIT_LIST_HEAD(&list);
@@ -4119,22 +4208,74 @@ repeat:
}
}
+static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac)
+{
+ struct ext4_prealloc_space *pa;
+
+ BUG_ON(ext4_pspace_cachep == NULL);
+ pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS);
+ if (!pa)
+ return -ENOMEM;
+ atomic_set(&pa->pa_count, 1);
+ ac->ac_pa = pa;
+ return 0;
+}
+
+static void ext4_mb_pa_free(struct ext4_allocation_context *ac)
+{
+ struct ext4_prealloc_space *pa = ac->ac_pa;
+
+ BUG_ON(!pa);
+ ac->ac_pa = NULL;
+ WARN_ON(!atomic_dec_and_test(&pa->pa_count));
+ kmem_cache_free(ext4_pspace_cachep, pa);
+}
+
#ifdef CONFIG_EXT4_DEBUG
+static inline void ext4_mb_show_pa(struct super_block *sb)
+{
+ ext4_group_t i, ngroups;
+
+ if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
+ return;
+
+ ngroups = ext4_get_groups_count(sb);
+ mb_debug(sb, "groups: ");
+ for (i = 0; i < ngroups; i++) {
+ struct ext4_group_info *grp = ext4_get_group_info(sb, i);
+ struct ext4_prealloc_space *pa;
+ ext4_grpblk_t start;
+ struct list_head *cur;
+ ext4_lock_group(sb, i);
+ list_for_each(cur, &grp->bb_prealloc_list) {
+ pa = list_entry(cur, struct ext4_prealloc_space,
+ pa_group_list);
+ spin_lock(&pa->pa_lock);
+ ext4_get_group_no_and_offset(sb, pa->pa_pstart,
+ NULL, &start);
+ spin_unlock(&pa->pa_lock);
+ mb_debug(sb, "PA:%u:%d:%d\n", i, start,
+ pa->pa_len);
+ }
+ ext4_unlock_group(sb, i);
+ mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free,
+ grp->bb_fragments);
+ }
+}
+
static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
{
struct super_block *sb = ac->ac_sb;
- ext4_group_t ngroups, i;
- if (!ext4_mballoc_debug ||
- (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
+ if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
return;
- ext4_msg(ac->ac_sb, KERN_ERR, "Can't allocate:"
+ mb_debug(sb, "Can't allocate:"
" Allocation context details:");
- ext4_msg(ac->ac_sb, KERN_ERR, "status %d flags %d",
+ mb_debug(sb, "status %u flags 0x%x",
ac->ac_status, ac->ac_flags);
- ext4_msg(ac->ac_sb, KERN_ERR, "orig %lu/%lu/%lu@%lu, "
- "goal %lu/%lu/%lu@%lu, "
+ mb_debug(sb, "orig %lu/%lu/%lu@%lu, "
+ "goal %lu/%lu/%lu@%lu, "
"best %lu/%lu/%lu@%lu cr %d",
(unsigned long)ac->ac_o_ex.fe_group,
(unsigned long)ac->ac_o_ex.fe_start,
@@ -4149,37 +4290,17 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
(unsigned long)ac->ac_b_ex.fe_len,
(unsigned long)ac->ac_b_ex.fe_logical,
(int)ac->ac_criteria);
- ext4_msg(ac->ac_sb, KERN_ERR, "%d found", ac->ac_found);
- ext4_msg(ac->ac_sb, KERN_ERR, "groups: ");
- ngroups = ext4_get_groups_count(sb);
- for (i = 0; i < ngroups; i++) {
- struct ext4_group_info *grp = ext4_get_group_info(sb, i);
- struct ext4_prealloc_space *pa;
- ext4_grpblk_t start;
- struct list_head *cur;
- ext4_lock_group(sb, i);
- list_for_each(cur, &grp->bb_prealloc_list) {
- pa = list_entry(cur, struct ext4_prealloc_space,
- pa_group_list);
- spin_lock(&pa->pa_lock);
- ext4_get_group_no_and_offset(sb, pa->pa_pstart,
- NULL, &start);
- spin_unlock(&pa->pa_lock);
- printk(KERN_ERR "PA:%u:%d:%u \n", i,
- start, pa->pa_len);
- }
- ext4_unlock_group(sb, i);
-
- if (grp->bb_free == 0)
- continue;
- printk(KERN_ERR "%u: %d/%d \n",
- i, grp->bb_free, grp->bb_fragments);
- }
- printk(KERN_ERR "\n");
+ mb_debug(sb, "%u found", ac->ac_found);
+ ext4_mb_show_pa(sb);
}
#else
+static inline void ext4_mb_show_pa(struct super_block *sb)
+{
+ return;
+}
static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
{
+ ext4_mb_show_pa(ac->ac_sb);
return;
}
#endif
@@ -4282,7 +4403,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
* locality group. this is a policy, actually */
ext4_mb_group_or_file(ac);
- mb_debug(1, "init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
+ mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, "
"left: %u/%u, right %u/%u to %swritable\n",
(unsigned) ar->len, (unsigned) ar->logical,
(unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
@@ -4303,7 +4424,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb,
struct list_head discard_list;
struct ext4_prealloc_space *pa, *tmp;
- mb_debug(1, "discard locality group preallocation\n");
+ mb_debug(sb, "discard locality group preallocation\n");
INIT_LIST_HEAD(&discard_list);
@@ -4486,6 +4607,30 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
return freed;
}
+static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb,
+ struct ext4_allocation_context *ac, u64 *seq)
+{
+ int freed;
+ u64 seq_retry = 0;
+ bool ret = false;
+
+ freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
+ if (freed) {
+ ret = true;
+ goto out_dbg;
+ }
+ seq_retry = ext4_get_discard_pa_seq_sum();
+ if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) {
+ ac->ac_flags |= EXT4_MB_STRICT_CHECK;
+ *seq = seq_retry;
+ ret = true;
+ }
+
+out_dbg:
+ mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no");
+ return ret;
+}
+
/*
* Main entry point into mballoc to allocate blocks
* it tries to use preallocation first, then falls back
@@ -4494,13 +4639,13 @@ static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
struct ext4_allocation_request *ar, int *errp)
{
- int freed;
struct ext4_allocation_context *ac = NULL;
struct ext4_sb_info *sbi;
struct super_block *sb;
ext4_fsblk_t block = 0;
unsigned int inquota = 0;
unsigned int reserv_clstrs = 0;
+ u64 seq;
might_sleep();
sb = ar->inode->i_sb;
@@ -4525,6 +4670,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
ar->len = ar->len >> 1;
}
if (!ar->len) {
+ ext4_mb_show_pa(sb);
*errp = -ENOSPC;
return 0;
}
@@ -4562,26 +4708,32 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
}
ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
+ seq = *this_cpu_ptr(&discard_pa_seq);
if (!ext4_mb_use_preallocated(ac)) {
ac->ac_op = EXT4_MB_HISTORY_ALLOC;
ext4_mb_normalize_request(ac, ar);
+
+ *errp = ext4_mb_pa_alloc(ac);
+ if (*errp)
+ goto errout;
repeat:
/* allocate space in core */
*errp = ext4_mb_regular_allocator(ac);
- if (*errp)
- goto discard_and_exit;
-
- /* as we've just preallocated more space than
- * user requested originally, we store allocated
- * space in a special descriptor */
- if (ac->ac_status == AC_STATUS_FOUND &&
- ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
- *errp = ext4_mb_new_preallocation(ac);
+ /*
+ * pa allocated above is added to grp->bb_prealloc_list only
+ * when we were able to allocate some block i.e. when
+ * ac->ac_status == AC_STATUS_FOUND.
+ * And error from above mean ac->ac_status != AC_STATUS_FOUND
+ * So we have to free this pa here itself.
+ */
if (*errp) {
- discard_and_exit:
+ ext4_mb_pa_free(ac);
ext4_discard_allocated_blocks(ac);
goto errout;
}
+ if (ac->ac_status == AC_STATUS_FOUND &&
+ ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len)
+ ext4_mb_pa_free(ac);
}
if (likely(ac->ac_status == AC_STATUS_FOUND)) {
*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
@@ -4593,9 +4745,13 @@ repeat:
ar->len = ac->ac_b_ex.fe_len;
}
} else {
- freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
- if (freed)
+ if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq))
goto repeat;
+ /*
+ * If block allocation fails then the pa allocated above
+ * needs to be freed here itself.
+ */
+ ext4_mb_pa_free(ac);
*errp = -ENOSPC;
}
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 88c98f17e3d9..6b4d17c2935d 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -24,19 +24,15 @@
#include "ext4.h"
/*
+ * mb_debug() dynamic printk msgs could be used to debug mballoc code.
*/
#ifdef CONFIG_EXT4_DEBUG
-extern ushort ext4_mballoc_debug;
-
-#define mb_debug(n, fmt, ...) \
-do { \
- if ((n) <= ext4_mballoc_debug) { \
- printk(KERN_DEBUG "(%s, %d): %s: " fmt, \
- __FILE__, __LINE__, __func__, ##__VA_ARGS__); \
- } \
-} while (0)
+#define mb_debug(sb, fmt, ...) \
+ pr_debug("[%s/%d] EXT4-fs (%s): (%s, %d): %s: " fmt, \
+ current->comm, task_pid_nr(current), sb->s_id, \
+ __FILE__, __LINE__, __func__, ##__VA_ARGS__)
#else
-#define mb_debug(n, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
+#define mb_debug(sb, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif
#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index fb6520f37135..c5e3fc998211 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -287,7 +287,7 @@ static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
struct inode *tmp_inode)
{
- int retval;
+ int retval, retval2 = 0;
__le32 i_data[3];
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
@@ -342,7 +342,9 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
* i_blocks when freeing the indirect meta-data blocks
*/
retval = free_ind_block(handle, inode, i_data);
- ext4_mark_inode_dirty(handle, inode);
+ retval2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(retval2 && !retval))
+ retval = retval2;
err_out:
return retval;
@@ -601,7 +603,7 @@ int ext4_ind_migrate(struct inode *inode)
ext4_lblk_t start, end;
ext4_fsblk_t blk;
handle_t *handle;
- int ret;
+ int ret, ret2 = 0;
if (!ext4_has_feature_extents(inode->i_sb) ||
(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
@@ -655,7 +657,9 @@ int ext4_ind_migrate(struct inode *inode)
memset(ei->i_data, 0, sizeof(ei->i_data));
for (i = start; i <= end; i++)
ei->i_data[i] = cpu_to_le32(blk++);
- ext4_mark_inode_dirty(handle, inode);
+ ret2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret2 && !ret))
+ ret = ret2;
errout:
ext4_journal_stop(handle);
up_write(&EXT4_I(inode)->i_data_sem);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index a8aca4772aaa..56738b538ddf 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1993,7 +1993,7 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
{
unsigned int blocksize = dir->i_sb->s_blocksize;
int csum_size = 0;
- int err;
+ int err, err2;
if (ext4_has_metadata_csum(inode->i_sb))
csum_size = sizeof(struct ext4_dir_entry_tail);
@@ -2028,12 +2028,12 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
dir->i_mtime = dir->i_ctime = current_time(dir);
ext4_update_dx_flag(dir);
inode_inc_iversion(dir);
- ext4_mark_inode_dirty(handle, dir);
+ err2 = ext4_mark_inode_dirty(handle, dir);
BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_dirblock(handle, dir, bh);
if (err)
ext4_std_error(dir->i_sb, err);
- return 0;
+ return err ? err : err2;
}
/*
@@ -2223,7 +2223,9 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
}
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
dx_fallback++;
- ext4_mark_inode_dirty(handle, dir);
+ retval = ext4_mark_inode_dirty(handle, dir);
+ if (unlikely(retval))
+ goto out;
}
blocks = dir->i_size >> sb->s_blocksize_bits;
for (block = 0; block < blocks; block++) {
@@ -2576,12 +2578,12 @@ static int ext4_add_nondir(handle_t *handle,
struct inode *inode = *inodep;
int err = ext4_add_entry(handle, dentry, inode);
if (!err) {
- ext4_mark_inode_dirty(handle, inode);
+ err = ext4_mark_inode_dirty(handle, inode);
if (IS_DIRSYNC(dir))
ext4_handle_sync(handle);
d_instantiate_new(dentry, inode);
*inodep = NULL;
- return 0;
+ return err;
}
drop_nlink(inode);
ext4_orphan_add(handle, inode);
@@ -2775,7 +2777,7 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
handle_t *handle;
struct inode *inode;
- int err, credits, retries = 0;
+ int err, err2 = 0, credits, retries = 0;
if (EXT4_DIR_LINK_MAX(dir))
return -EMLINK;
@@ -2808,7 +2810,9 @@ out_clear_inode:
clear_nlink(inode);
ext4_orphan_add(handle, inode);
unlock_new_inode(inode);
- ext4_mark_inode_dirty(handle, inode);
+ err2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(err2))
+ err = err2;
ext4_journal_stop(handle);
iput(inode);
goto out_retry;
@@ -3148,10 +3152,12 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
inode->i_size = 0;
ext4_orphan_add(handle, inode);
inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
+ retval = ext4_mark_inode_dirty(handle, inode);
+ if (retval)
+ goto end_rmdir;
ext4_dec_count(handle, dir);
ext4_update_dx_flag(dir);
- ext4_mark_inode_dirty(handle, dir);
+ retval = ext4_mark_inode_dirty(handle, dir);
#ifdef CONFIG_UNICODE
/* VFS negative dentries are incompatible with Encoding and
@@ -3221,7 +3227,9 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
goto end_unlink;
dir->i_ctime = dir->i_mtime = current_time(dir);
ext4_update_dx_flag(dir);
- ext4_mark_inode_dirty(handle, dir);
+ retval = ext4_mark_inode_dirty(handle, dir);
+ if (retval)
+ goto end_unlink;
if (inode->i_nlink == 0)
ext4_warning_inode(inode, "Deleting file '%.*s' with no links",
dentry->d_name.len, dentry->d_name.name);
@@ -3230,7 +3238,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
if (!inode->i_nlink)
ext4_orphan_add(handle, inode);
inode->i_ctime = current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
+ retval = ext4_mark_inode_dirty(handle, inode);
#ifdef CONFIG_UNICODE
/* VFS negative dentries are incompatible with Encoding and
@@ -3419,7 +3427,7 @@ retry:
err = ext4_add_entry(handle, dentry, inode);
if (!err) {
- ext4_mark_inode_dirty(handle, inode);
+ err = ext4_mark_inode_dirty(handle, inode);
/* this can happen only for tmpfile being
* linked the first time
*/
@@ -3531,7 +3539,7 @@ static int ext4_rename_dir_finish(handle_t *handle, struct ext4_renament *ent,
static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
unsigned ino, unsigned file_type)
{
- int retval;
+ int retval, retval2;
BUFFER_TRACE(ent->bh, "get write access");
retval = ext4_journal_get_write_access(handle, ent->bh);
@@ -3543,19 +3551,19 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
inode_inc_iversion(ent->dir);
ent->dir->i_ctime = ent->dir->i_mtime =
current_time(ent->dir);
- ext4_mark_inode_dirty(handle, ent->dir);
+ retval = ext4_mark_inode_dirty(handle, ent->dir);
BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata");
if (!ent->inlined) {
- retval = ext4_handle_dirty_dirblock(handle, ent->dir, ent->bh);
- if (unlikely(retval)) {
- ext4_std_error(ent->dir->i_sb, retval);
- return retval;
+ retval2 = ext4_handle_dirty_dirblock(handle, ent->dir, ent->bh);
+ if (unlikely(retval2)) {
+ ext4_std_error(ent->dir->i_sb, retval2);
+ return retval2;
}
}
brelse(ent->bh);
ent->bh = NULL;
- return 0;
+ return retval;
}
static int ext4_find_delete_entry(handle_t *handle, struct inode *dir,
@@ -3790,7 +3798,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
EXT4_FT_CHRDEV);
if (retval)
goto end_rename;
- ext4_mark_inode_dirty(handle, whiteout);
+ retval = ext4_mark_inode_dirty(handle, whiteout);
+ if (unlikely(retval))
+ goto end_rename;
}
if (!new.bh) {
retval = ext4_add_entry(handle, new.dentry, old.inode);
@@ -3811,7 +3821,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
* rename.
*/
old.inode->i_ctime = current_time(old.inode);
- ext4_mark_inode_dirty(handle, old.inode);
+ retval = ext4_mark_inode_dirty(handle, old.inode);
+ if (unlikely(retval))
+ goto end_rename;
if (!whiteout) {
/*
@@ -3840,12 +3852,18 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
} else {
ext4_inc_count(handle, new.dir);
ext4_update_dx_flag(new.dir);
- ext4_mark_inode_dirty(handle, new.dir);
+ retval = ext4_mark_inode_dirty(handle, new.dir);
+ if (unlikely(retval))
+ goto end_rename;
}
}
- ext4_mark_inode_dirty(handle, old.dir);
+ retval = ext4_mark_inode_dirty(handle, old.dir);
+ if (unlikely(retval))
+ goto end_rename;
if (new.inode) {
- ext4_mark_inode_dirty(handle, new.inode);
+ retval = ext4_mark_inode_dirty(handle, new.inode);
+ if (unlikely(retval))
+ goto end_rename;
if (!new.inode->i_nlink)
ext4_orphan_add(handle, new.inode);
}
@@ -3979,8 +3997,12 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
ctime = current_time(old.inode);
old.inode->i_ctime = ctime;
new.inode->i_ctime = ctime;
- ext4_mark_inode_dirty(handle, old.inode);
- ext4_mark_inode_dirty(handle, new.inode);
+ retval = ext4_mark_inode_dirty(handle, old.inode);
+ if (unlikely(retval))
+ goto end_rename;
+ retval = ext4_mark_inode_dirty(handle, new.inode);
+ if (unlikely(retval))
+ goto end_rename;
if (old.dir_bh) {
retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 9824cd8203e8..c668f6b42374 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -93,11 +93,11 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
* i_mmap_rwsem (inode->i_mmap_rwsem)!
*
* page fault path:
- * mmap_sem -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
+ * mmap_lock -> sb_start_pagefault -> i_mmap_sem (r) -> transaction start ->
* page lock -> i_data_sem (rw)
*
* buffered write path:
- * sb_start_write -> i_mutex -> mmap_sem
+ * sb_start_write -> i_mutex -> mmap_lock
* sb_start_write -> i_mutex -> transaction start -> page lock ->
* i_data_sem (rw)
*
@@ -107,7 +107,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
* i_data_sem (rw)
*
* direct IO:
- * sb_start_write -> i_mutex -> mmap_sem
+ * sb_start_write -> i_mutex -> mmap_lock
* sb_start_write -> i_mutex -> transaction start -> i_data_sem (rw)
*
* writepages:
@@ -3718,7 +3718,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
int blocksize, clustersize;
unsigned int db_count;
unsigned int i;
- int needs_recovery, has_huge_files, has_bigalloc;
+ int needs_recovery, has_huge_files;
__u64 blocks_count;
int err = 0;
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
@@ -4010,17 +4010,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, and O_DIRECT support!\n");
+ /* can't mount with both data=journal and dioread_nolock. */
clear_opt(sb, DIOREAD_NOLOCK);
if (test_opt2(sb, EXPLICIT_DELALLOC)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"both data=journal and delalloc");
goto failed_mount;
}
- if (test_opt(sb, DIOREAD_NOLOCK)) {
- ext4_msg(sb, KERN_ERR, "can't mount with "
- "both data=journal and dioread_nolock");
- goto failed_mount;
- }
if (test_opt(sb, DAX)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
"both data=journal and dax");
@@ -4237,8 +4233,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
/* Handle clustersize */
clustersize = BLOCK_SIZE << le32_to_cpu(es->s_log_cluster_size);
- has_bigalloc = ext4_has_feature_bigalloc(sb);
- if (has_bigalloc) {
+ if (ext4_has_feature_bigalloc(sb)) {
if (clustersize < blocksize) {
ext4_msg(sb, KERN_ERR,
"cluster size (%d) smaller than "
@@ -5925,7 +5920,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
EXT4_I(inode)->i_flags |= EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL;
inode_set_flags(inode, S_NOATIME | S_IMMUTABLE,
S_NOATIME | S_IMMUTABLE);
- ext4_mark_inode_dirty(handle, inode);
+ err = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
unlock_inode:
inode_unlock(inode);
@@ -6027,12 +6022,14 @@ static int ext4_quota_off(struct super_block *sb, int type)
* this is not a hard failure and quotas are already disabled.
*/
handle = ext4_journal_start(inode, EXT4_HT_QUOTA, 1);
- if (IS_ERR(handle))
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
goto out_unlock;
+ }
EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
inode->i_mtime = inode->i_ctime = current_time(inode);
- ext4_mark_inode_dirty(handle, inode);
+ err = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
out_unlock:
inode_unlock(inode);
@@ -6090,7 +6087,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
{
struct inode *inode = sb_dqopt(sb)->files[type];
ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
- int err, offset = off & (sb->s_blocksize - 1);
+ int err = 0, err2 = 0, offset = off & (sb->s_blocksize - 1);
int retries = 0;
struct buffer_head *bh;
handle_t *handle = journal_current_handle();
@@ -6138,9 +6135,11 @@ out:
if (inode->i_size < off + len) {
i_size_write(inode, off + len);
EXT4_I(inode)->i_disksize = inode->i_size;
- ext4_mark_inode_dirty(handle, inode);
+ err2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(err2 && !err))
+ err = err2;
}
- return len;
+ return err ? err : len;
}
#endif
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 21df43a25328..9b29a40738ac 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1327,7 +1327,7 @@ static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
int blocksize = ea_inode->i_sb->s_blocksize;
int max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits;
int csize, wsize = 0;
- int ret = 0;
+ int ret = 0, ret2 = 0;
int retries = 0;
retry:
@@ -1385,7 +1385,9 @@ retry:
ext4_update_i_disksize(ea_inode, wsize);
inode_unlock(ea_inode);
- ext4_mark_inode_dirty(handle, ea_inode);
+ ret2 = ext4_mark_inode_dirty(handle, ea_inode);
+ if (unlikely(ret2 && !ret))
+ ret = ret2;
out:
brelse(bh);
@@ -1800,8 +1802,11 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
if (EXT4_I(inode)->i_file_acl) {
/* The inode already has an extended attribute block. */
bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
- if (IS_ERR(bs->bh))
- return PTR_ERR(bs->bh);
+ if (IS_ERR(bs->bh)) {
+ error = PTR_ERR(bs->bh);
+ bs->bh = NULL;
+ return error;
+ }
ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
atomic_read(&(bs->bh->b_count)),
le32_to_cpu(BHDR(bs->bh)->h_refcount));
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index bb68d21e1f8c..d13c5c6a9787 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -127,3 +127,13 @@ config F2FS_FS_ZSTD
default y
help
Support ZSTD compress algorithm, if unsure, say Y.
+
+config F2FS_FS_LZORLE
+ bool "LZO-RLE compression support"
+ depends on F2FS_FS_COMPRESSION
+ depends on F2FS_FS_LZO
+ select LZO_COMPRESS
+ select LZO_DECOMPRESS
+ default y
+ help
+ Support LZO-RLE compress algorithm, if unsure, say Y.
diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h
index b96823c59b15..124868c13f80 100644
--- a/fs/f2fs/acl.h
+++ b/fs/f2fs/acl.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/f2fs/acl.h
*
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 852890b72d6a..236064930251 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -86,6 +86,8 @@ repeat:
return ERR_PTR(err);
}
+ f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE);
+
lock_page(page);
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
@@ -220,6 +222,7 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
.is_por = (type == META_POR),
};
struct blk_plug plug;
+ int err;
if (unlikely(type == META_POR))
fio.op_flags &= ~REQ_META;
@@ -263,8 +266,11 @@ int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
}
fio.page = page;
- f2fs_submit_page_bio(&fio);
- f2fs_put_page(page, 0);
+ err = f2fs_submit_page_bio(&fio);
+ f2fs_put_page(page, err ? 1 : 0);
+
+ if (!err)
+ f2fs_update_iostat(sbi, FS_META_READ_IO, F2FS_BLKSIZE);
}
out:
blk_finish_plug(&plug);
@@ -889,8 +895,8 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
int i;
int err;
- sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks),
- GFP_KERNEL);
+ sbi->ckpt = f2fs_kvzalloc(sbi, array_size(blk_size, cp_blks),
+ GFP_KERNEL);
if (!sbi->ckpt)
return -ENOMEM;
/*
@@ -1160,10 +1166,12 @@ static int block_operations(struct f2fs_sb_info *sbi)
.nr_to_write = LONG_MAX,
.for_reclaim = 0,
};
- struct blk_plug plug;
int err = 0, cnt = 0;
- blk_start_plug(&plug);
+ /*
+ * Let's flush inline_data in dirty node pages.
+ */
+ f2fs_flush_inline_data(sbi);
retry_flush_quotas:
f2fs_lock_all(sbi);
@@ -1192,7 +1200,7 @@ retry_flush_dents:
f2fs_unlock_all(sbi);
err = f2fs_sync_dirty_inodes(sbi, DIR_INODE);
if (err)
- goto out;
+ return err;
cond_resched();
goto retry_flush_quotas;
}
@@ -1208,7 +1216,7 @@ retry_flush_dents:
f2fs_unlock_all(sbi);
err = f2fs_sync_inode_meta(sbi);
if (err)
- goto out;
+ return err;
cond_resched();
goto retry_flush_quotas;
}
@@ -1224,7 +1232,7 @@ retry_flush_nodes:
if (err) {
up_write(&sbi->node_change);
f2fs_unlock_all(sbi);
- goto out;
+ return err;
}
cond_resched();
goto retry_flush_nodes;
@@ -1236,8 +1244,6 @@ retry_flush_nodes:
*/
__prepare_cp_block(sbi);
up_write(&sbi->node_change);
-out:
- blk_finish_plug(&plug);
return err;
}
@@ -1260,6 +1266,9 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
if (unlikely(f2fs_cp_error(sbi)))
break;
+ if (type == F2FS_DIRTY_META)
+ f2fs_sync_meta_pages(sbi, META, LONG_MAX,
+ FS_CP_META_IO);
io_schedule_timeout(DEFAULT_IO_TIMEOUT);
}
finish_wait(&sbi->cp_wait, &wait);
@@ -1553,7 +1562,8 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
return 0;
f2fs_warn(sbi, "Start checkpoint disabled!");
}
- mutex_lock(&sbi->cp_mutex);
+ if (cpc->reason != CP_RESIZE)
+ mutex_lock(&sbi->cp_mutex);
if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) &&
((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) ||
@@ -1622,7 +1632,8 @@ stop:
f2fs_update_time(sbi, CP_TIME);
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
out:
- mutex_unlock(&sbi->cp_mutex);
+ if (cpc->reason != CP_RESIZE)
+ mutex_unlock(&sbi->cp_mutex);
return err;
}
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index df7b2d15eacd..1e02a8c106b0 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -65,15 +65,6 @@ static void f2fs_set_compressed_page(struct page *page,
page->mapping = inode->i_mapping;
}
-static void f2fs_put_compressed_page(struct page *page)
-{
- set_page_private(page, (unsigned long)NULL);
- ClearPagePrivate(page);
- page->mapping = NULL;
- unlock_page(page);
- put_page(page);
-}
-
static void f2fs_drop_rpages(struct compress_ctx *cc, int len, bool unlock)
{
int i;
@@ -98,8 +89,7 @@ static void f2fs_unlock_rpages(struct compress_ctx *cc, int len)
f2fs_drop_rpages(cc, len, true);
}
-static void f2fs_put_rpages_mapping(struct compress_ctx *cc,
- struct address_space *mapping,
+static void f2fs_put_rpages_mapping(struct address_space *mapping,
pgoff_t start, int len)
{
int i;
@@ -236,7 +226,12 @@ static int lz4_init_compress_ctx(struct compress_ctx *cc)
if (!cc->private)
return -ENOMEM;
- cc->clen = LZ4_compressBound(PAGE_SIZE << cc->log_cluster_size);
+ /*
+ * we do not change cc->clen to LZ4_compressBound(inputsize) to
+ * adapt worst compress case, because lz4 compressor can handle
+ * output budget properly.
+ */
+ cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
return 0;
}
@@ -252,11 +247,9 @@ static int lz4_compress_pages(struct compress_ctx *cc)
len = LZ4_compress_default(cc->rbuf, cc->cbuf->cdata, cc->rlen,
cc->clen, cc->private);
- if (!len) {
- printk_ratelimited("%sF2FS-fs (%s): lz4 compress failed\n",
- KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id);
- return -EIO;
- }
+ if (!len)
+ return -EAGAIN;
+
cc->clen = len;
return 0;
}
@@ -366,6 +359,13 @@ static int zstd_compress_pages(struct compress_ctx *cc)
return -EIO;
}
+ /*
+ * there is compressed data remained in intermediate buffer due to
+ * no more space in cbuf.cdata
+ */
+ if (ret)
+ return -EAGAIN;
+
cc->clen = outbuf.pos;
return 0;
}
@@ -451,6 +451,31 @@ static const struct f2fs_compress_ops f2fs_zstd_ops = {
};
#endif
+#ifdef CONFIG_F2FS_FS_LZO
+#ifdef CONFIG_F2FS_FS_LZORLE
+static int lzorle_compress_pages(struct compress_ctx *cc)
+{
+ int ret;
+
+ ret = lzorle1x_1_compress(cc->rbuf, cc->rlen, cc->cbuf->cdata,
+ &cc->clen, cc->private);
+ if (ret != LZO_E_OK) {
+ printk_ratelimited("%sF2FS-fs (%s): lzo-rle compress failed, ret:%d\n",
+ KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id, ret);
+ return -EIO;
+ }
+ return 0;
+}
+
+static const struct f2fs_compress_ops f2fs_lzorle_ops = {
+ .init_compress_ctx = lzo_init_compress_ctx,
+ .destroy_compress_ctx = lzo_destroy_compress_ctx,
+ .compress_pages = lzorle_compress_pages,
+ .decompress_pages = lzo_decompress_pages,
+};
+#endif
+#endif
+
static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
#ifdef CONFIG_F2FS_FS_LZO
&f2fs_lzo_ops,
@@ -467,6 +492,11 @@ static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
#else
NULL,
#endif
+#if defined(CONFIG_F2FS_FS_LZO) && defined(CONFIG_F2FS_FS_LZORLE)
+ &f2fs_lzorle_ops,
+#else
+ NULL,
+#endif
};
bool f2fs_is_compress_backend_ready(struct inode *inode)
@@ -476,17 +506,47 @@ bool f2fs_is_compress_backend_ready(struct inode *inode)
return f2fs_cops[F2FS_I(inode)->i_compress_algorithm];
}
-static struct page *f2fs_grab_page(void)
+static mempool_t *compress_page_pool = NULL;
+static int num_compress_pages = 512;
+module_param(num_compress_pages, uint, 0444);
+MODULE_PARM_DESC(num_compress_pages,
+ "Number of intermediate compress pages to preallocate");
+
+int f2fs_init_compress_mempool(void)
+{
+ compress_page_pool = mempool_create_page_pool(num_compress_pages, 0);
+ if (!compress_page_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void f2fs_destroy_compress_mempool(void)
+{
+ mempool_destroy(compress_page_pool);
+}
+
+static struct page *f2fs_compress_alloc_page(void)
{
struct page *page;
- page = alloc_page(GFP_NOFS);
- if (!page)
- return NULL;
+ page = mempool_alloc(compress_page_pool, GFP_NOFS);
lock_page(page);
+
return page;
}
+static void f2fs_compress_free_page(struct page *page)
+{
+ if (!page)
+ return;
+ set_page_private(page, (unsigned long)NULL);
+ ClearPagePrivate(page);
+ page->mapping = NULL;
+ unlock_page(page);
+ mempool_free(page, compress_page_pool);
+}
+
static int f2fs_compress_pages(struct compress_ctx *cc)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
@@ -516,7 +576,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
}
for (i = 0; i < cc->nr_cpages; i++) {
- cc->cpages[i] = f2fs_grab_page();
+ cc->cpages[i] = f2fs_compress_alloc_page();
if (!cc->cpages[i]) {
ret = -ENOMEM;
goto out_free_cpages;
@@ -561,7 +621,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
vunmap(cc->rbuf);
for (i = nr_cpages; i < cc->nr_cpages; i++) {
- f2fs_put_compressed_page(cc->cpages[i]);
+ f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
@@ -581,7 +641,7 @@ out_vunmap_rbuf:
out_free_cpages:
for (i = 0; i < cc->nr_cpages; i++) {
if (cc->cpages[i])
- f2fs_put_compressed_page(cc->cpages[i]);
+ f2fs_compress_free_page(cc->cpages[i]);
}
kfree(cc->cpages);
cc->cpages = NULL;
@@ -788,6 +848,8 @@ static bool cluster_may_compress(struct compress_ctx *cc)
return false;
if (!f2fs_cluster_is_full(cc))
return false;
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(cc->inode))))
+ return false;
return __cluster_may_compress(cc);
}
@@ -879,7 +941,7 @@ retry:
if (!PageUptodate(page)) {
f2fs_unlock_rpages(cc, i + 1);
- f2fs_put_rpages_mapping(cc, mapping, start_idx,
+ f2fs_put_rpages_mapping(mapping, start_idx,
cc->cluster_size);
f2fs_destroy_compress_ctx(cc);
goto retry;
@@ -914,7 +976,7 @@ retry:
unlock_pages:
f2fs_unlock_rpages(cc, i);
release_pages:
- f2fs_put_rpages_mapping(cc, mapping, start_idx, i);
+ f2fs_put_rpages_mapping(mapping, start_idx, i);
f2fs_destroy_compress_ctx(cc);
return ret;
}
@@ -954,6 +1016,55 @@ bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
return first_index;
}
+int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock)
+{
+ void *fsdata = NULL;
+ struct page *pagep;
+ int log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
+ pgoff_t start_idx = from >> (PAGE_SHIFT + log_cluster_size) <<
+ log_cluster_size;
+ int err;
+
+ err = f2fs_is_compressed_cluster(inode, start_idx);
+ if (err < 0)
+ return err;
+
+ /* truncate normal cluster */
+ if (!err)
+ return f2fs_do_truncate_blocks(inode, from, lock);
+
+ /* truncate compressed cluster */
+ err = f2fs_prepare_compress_overwrite(inode, &pagep,
+ start_idx, &fsdata);
+
+ /* should not be a normal cluster */
+ f2fs_bug_on(F2FS_I_SB(inode), err == 0);
+
+ if (err <= 0)
+ return err;
+
+ if (err > 0) {
+ struct page **rpages = fsdata;
+ int cluster_size = F2FS_I(inode)->i_cluster_size;
+ int i;
+
+ for (i = cluster_size - 1; i >= 0; i--) {
+ loff_t start = rpages[i]->index << PAGE_SHIFT;
+
+ if (from <= start) {
+ zero_user_segment(rpages[i], 0, PAGE_SIZE);
+ } else {
+ zero_user_segment(rpages[i], from - start,
+ PAGE_SIZE);
+ break;
+ }
+ }
+
+ f2fs_compress_write_end(inode, fsdata, start_idx, true);
+ }
+ return 0;
+}
+
static int f2fs_write_compressed_pages(struct compress_ctx *cc,
int *submitted,
struct writeback_control *wbc,
@@ -985,7 +1096,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
loff_t psize;
int i, err;
- if (!f2fs_trylock_op(sbi))
+ if (!IS_NOQUOTA(inode) && !f2fs_trylock_op(sbi))
return -EAGAIN;
set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
@@ -1092,7 +1203,8 @@ unlock_continue:
set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
f2fs_put_dnode(&dn);
- f2fs_unlock_op(sbi);
+ if (!IS_NOQUOTA(inode))
+ f2fs_unlock_op(sbi);
spin_lock(&fi->i_size_lock);
if (fi->last_disk_size < psize)
@@ -1118,7 +1230,8 @@ out_put_cic:
out_put_dnode:
f2fs_put_dnode(&dn);
out_unlock_op:
- f2fs_unlock_op(sbi);
+ if (!IS_NOQUOTA(inode))
+ f2fs_unlock_op(sbi);
return -EAGAIN;
}
@@ -1132,7 +1245,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
if (unlikely(bio->bi_status))
mapping_set_error(cic->inode->i_mapping, -EIO);
- f2fs_put_compressed_page(page);
+ f2fs_compress_free_page(page);
dec_page_count(sbi, F2FS_WB_DATA);
@@ -1293,7 +1406,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
for (i = 0; i < dic->nr_cpages; i++) {
struct page *page;
- page = f2fs_grab_page();
+ page = f2fs_compress_alloc_page();
if (!page)
goto out_free;
@@ -1313,7 +1426,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
continue;
}
- dic->tpages[i] = f2fs_grab_page();
+ dic->tpages[i] = f2fs_compress_alloc_page();
if (!dic->tpages[i])
goto out_free;
}
@@ -1335,8 +1448,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
continue;
if (!dic->tpages[i])
continue;
- unlock_page(dic->tpages[i]);
- put_page(dic->tpages[i]);
+ f2fs_compress_free_page(dic->tpages[i]);
}
kfree(dic->tpages);
}
@@ -1345,7 +1457,7 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
for (i = 0; i < dic->nr_cpages; i++) {
if (!dic->cpages[i])
continue;
- f2fs_put_compressed_page(dic->cpages[i]);
+ f2fs_compress_free_page(dic->cpages[i]);
}
kfree(dic->cpages);
}
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 03ec97f28235..326c63879ddc 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -19,6 +19,7 @@
#include <linux/uio.h>
#include <linux/cleancache.h>
#include <linux/sched/signal.h>
+#include <linux/fiemap.h>
#include "f2fs.h"
#include "node.h"
@@ -114,7 +115,8 @@ static enum count_type __read_io_type(struct page *page)
/* postprocessing steps for read bios */
enum bio_post_read_step {
STEP_DECRYPT,
- STEP_DECOMPRESS,
+ STEP_DECOMPRESS_NOWQ, /* handle normal cluster data inplace */
+ STEP_DECOMPRESS, /* handle compressed cluster data in workqueue */
STEP_VERITY,
};
@@ -513,6 +515,34 @@ void f2fs_submit_bio(struct f2fs_sb_info *sbi,
__submit_bio(sbi, bio, type);
}
+static void __attach_io_flag(struct f2fs_io_info *fio)
+{
+ struct f2fs_sb_info *sbi = fio->sbi;
+ unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
+ unsigned int io_flag, fua_flag, meta_flag;
+
+ if (fio->type == DATA)
+ io_flag = sbi->data_io_flag;
+ else if (fio->type == NODE)
+ io_flag = sbi->node_io_flag;
+ else
+ return;
+
+ fua_flag = io_flag & temp_mask;
+ meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
+
+ /*
+ * data/node io flag bits per temp:
+ * REQ_META | REQ_FUA |
+ * 5 | 4 | 3 | 2 | 1 | 0 |
+ * Cold | Warm | Hot | Cold | Warm | Hot |
+ */
+ if ((1 << fio->temp) & meta_flag)
+ fio->op_flags |= REQ_META;
+ if ((1 << fio->temp) & fua_flag)
+ fio->op_flags |= REQ_FUA;
+}
+
static void __submit_merged_bio(struct f2fs_bio_info *io)
{
struct f2fs_io_info *fio = &io->fio;
@@ -520,6 +550,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
if (!io->bio)
return;
+ __attach_io_flag(fio);
bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
if (is_read_io(fio->op))
@@ -661,6 +692,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
if (fio->io_wbc && !is_read_io(fio->op))
wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
+ __attach_io_flag(fio);
bio_set_op_attrs(bio, fio->op, fio->op_flags);
inc_page_count(fio->sbi, is_read_io(fio->op) ?
@@ -847,6 +879,7 @@ int f2fs_merge_page_bio(struct f2fs_io_info *fio)
alloc_new:
if (!bio) {
bio = __bio_alloc(fio, BIO_MAX_PAGES);
+ __attach_io_flag(fio);
bio_set_op_attrs(bio, fio->op, fio->op_flags);
add_bio_entry(fio->sbi, bio, page, fio->temp);
@@ -967,7 +1000,7 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
if (f2fs_encrypted_file(inode))
post_read_steps |= 1 << STEP_DECRYPT;
if (f2fs_compressed_file(inode))
- post_read_steps |= 1 << STEP_DECOMPRESS;
+ post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ;
if (f2fs_need_verity(inode, first_idx))
post_read_steps |= 1 << STEP_VERITY;
@@ -1010,6 +1043,7 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
}
ClearPageError(page);
inc_page_count(sbi, F2FS_RD_DATA);
+ f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
__submit_bio(sbi, bio, DATA);
return 0;
}
@@ -1808,6 +1842,25 @@ static int f2fs_xattr_fiemap(struct inode *inode,
return (err < 0 ? err : 0);
}
+static loff_t max_inode_blocks(struct inode *inode)
+{
+ loff_t result = ADDRS_PER_INODE(inode);
+ loff_t leaf_count = ADDRS_PER_BLOCK(inode);
+
+ /* two direct node blocks */
+ result += (leaf_count * 2);
+
+ /* two indirect node blocks */
+ leaf_count *= NIDS_PER_BLOCK;
+ result += (leaf_count * 2);
+
+ /* one double indirect node block */
+ leaf_count *= NIDS_PER_BLOCK;
+ result += leaf_count;
+
+ return result;
+}
+
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len)
{
@@ -1817,6 +1870,8 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 logical = 0, phys = 0, size = 0;
u32 flags = 0;
int ret = 0;
+ bool compr_cluster = false;
+ unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
ret = f2fs_precache_extents(inode);
@@ -1824,7 +1879,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return ret;
}
- ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR);
+ ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
if (ret)
return ret;
@@ -1851,6 +1906,9 @@ next:
memset(&map_bh, 0, sizeof(struct buffer_head));
map_bh.b_size = len;
+ if (compr_cluster)
+ map_bh.b_size = blk_to_logical(inode, cluster_size - 1);
+
ret = get_data_block(inode, start_blk, &map_bh, 0,
F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
if (ret)
@@ -1861,7 +1919,7 @@ next:
start_blk = next_pgofs;
if (blk_to_logical(inode, start_blk) < blk_to_logical(inode,
- F2FS_I_SB(inode)->max_file_blocks))
+ max_inode_blocks(inode)))
goto prep_next;
flags |= FIEMAP_EXTENT_LAST;
@@ -1873,11 +1931,38 @@ next:
ret = fiemap_fill_next_extent(fieinfo, logical,
phys, size, flags);
+ if (ret)
+ goto out;
+ size = 0;
}
- if (start_blk > last_blk || ret)
+ if (start_blk > last_blk)
goto out;
+ if (compr_cluster) {
+ compr_cluster = false;
+
+
+ logical = blk_to_logical(inode, start_blk - 1);
+ phys = blk_to_logical(inode, map_bh.b_blocknr);
+ size = blk_to_logical(inode, cluster_size);
+
+ flags |= FIEMAP_EXTENT_ENCODED;
+
+ start_blk += cluster_size - 1;
+
+ if (start_blk > last_blk)
+ goto out;
+
+ goto prep_next;
+ }
+
+ if (map_bh.b_blocknr == COMPRESS_ADDR) {
+ compr_cluster = true;
+ start_blk++;
+ goto prep_next;
+ }
+
logical = blk_to_logical(inode, start_blk);
phys = blk_to_logical(inode, map_bh.b_blocknr);
size = map_bh.b_size;
@@ -2015,6 +2100,7 @@ submit_and_realloc:
goto submit_and_realloc;
inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
+ f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
ClearPageError(page);
*last_block_in_bio = block_nr;
goto out;
@@ -2113,6 +2199,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
for (i = 0; i < dic->nr_cpages; i++) {
struct page *page = dic->cpages[i];
block_t blkaddr;
+ struct bio_post_read_ctx *ctx;
blkaddr = data_blkaddr(dn.inode, dn.node_page,
dn.ofs_in_node + i + 1);
@@ -2130,16 +2217,16 @@ submit_and_realloc:
page->index, for_write);
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
- bio = NULL;
dic->failed = true;
if (refcount_sub_and_test(dic->nr_cpages - i,
- &dic->ref))
+ &dic->ref)) {
f2fs_decompress_end_io(dic->rpages,
cc->cluster_size, true,
false);
- f2fs_free_dic(dic);
+ f2fs_free_dic(dic);
+ }
f2fs_put_dnode(&dn);
- *bio_ret = bio;
+ *bio_ret = NULL;
return ret;
}
}
@@ -2149,7 +2236,14 @@ submit_and_realloc:
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
goto submit_and_realloc;
+ /* tag STEP_DECOMPRESS to handle IO in wq */
+ ctx = bio->bi_private;
+ if (!(ctx->enabled_steps & (1 << STEP_DECOMPRESS)))
+ ctx->enabled_steps |= 1 << STEP_DECOMPRESS;
+
inc_page_count(sbi, F2FS_RD_DATA);
+ f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
+ f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
ClearPageError(page);
*last_block_in_bio = blkaddr;
}
@@ -2623,8 +2717,8 @@ write:
f2fs_available_free_memory(sbi, BASE_CHECK))))
goto redirty_out;
- /* Dentry blocks are controlled by checkpoint */
- if (S_ISDIR(inode->i_mode)) {
+ /* Dentry/quota blocks are controlled by checkpoint */
+ if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
fio.need_lock = LOCK_DONE;
err = f2fs_do_write_data_page(&fio);
goto done;
@@ -2766,7 +2860,6 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
pgoff_t index;
pgoff_t end; /* Inclusive */
pgoff_t done_index;
- int cycled;
int range_whole = 0;
xa_mark_t tag;
int nwritten = 0;
@@ -2784,17 +2877,12 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
- if (index == 0)
- cycled = 1;
- else
- cycled = 0;
end = -1;
} else {
index = wbc->range_start >> PAGE_SHIFT;
end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = 1;
- cycled = 1; /* ignore range_cyclic tests */
}
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
@@ -2959,12 +3047,13 @@ next:
}
}
#endif
- if ((!cycled && !done) || retry) {
- cycled = 1;
+ if (retry) {
index = 0;
- end = writeback_index - 1;
+ end = -1;
goto retry;
}
+ if (wbc->range_cyclic && !done)
+ done_index = 0;
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = done_index;
@@ -3493,6 +3582,9 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
} else if (err < 0) {
f2fs_write_failed(mapping, offset + count);
}
+ } else {
+ if (err > 0)
+ f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
}
out:
@@ -3576,6 +3668,37 @@ static int f2fs_set_data_page_dirty(struct page *page)
return 0;
}
+
+static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
+{
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ struct dnode_of_data dn;
+ sector_t start_idx, blknr = 0;
+ int ret;
+
+ start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
+ if (ret)
+ return 0;
+
+ if (dn.data_blkaddr != COMPRESS_ADDR) {
+ dn.ofs_in_node += block - start_idx;
+ blknr = f2fs_data_blkaddr(&dn);
+ if (!__is_valid_data_blkaddr(blknr))
+ blknr = 0;
+ }
+
+ f2fs_put_dnode(&dn);
+
+ return blknr;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+
static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
@@ -3587,6 +3710,9 @@ static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
filemap_write_and_wait(mapping);
+ if (f2fs_compressed_file(inode))
+ return f2fs_bmap_compress(inode, block);
+
return generic_block_bmap(mapping, block, get_data_block_bmap);
}
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 44bfc464df78..d35976785e8c 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -70,6 +70,111 @@ unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de)
return DT_UNKNOWN;
}
+/* If @dir is casefolded, initialize @fname->cf_name from @fname->usr_fname. */
+int f2fs_init_casefolded_name(const struct inode *dir,
+ struct f2fs_filename *fname)
+{
+#ifdef CONFIG_UNICODE
+ struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
+
+ if (IS_CASEFOLDED(dir)) {
+ fname->cf_name.name = f2fs_kmalloc(sbi, F2FS_NAME_LEN,
+ GFP_NOFS);
+ if (!fname->cf_name.name)
+ return -ENOMEM;
+ fname->cf_name.len = utf8_casefold(sbi->s_encoding,
+ fname->usr_fname,
+ fname->cf_name.name,
+ F2FS_NAME_LEN);
+ if ((int)fname->cf_name.len <= 0) {
+ kfree(fname->cf_name.name);
+ fname->cf_name.name = NULL;
+ if (f2fs_has_strict_mode(sbi))
+ return -EINVAL;
+ /* fall back to treating name as opaque byte sequence */
+ }
+ }
+#endif
+ return 0;
+}
+
+static int __f2fs_setup_filename(const struct inode *dir,
+ const struct fscrypt_name *crypt_name,
+ struct f2fs_filename *fname)
+{
+ int err;
+
+ memset(fname, 0, sizeof(*fname));
+
+ fname->usr_fname = crypt_name->usr_fname;
+ fname->disk_name = crypt_name->disk_name;
+#ifdef CONFIG_FS_ENCRYPTION
+ fname->crypto_buf = crypt_name->crypto_buf;
+#endif
+ if (crypt_name->is_ciphertext_name) {
+ /* hash was decoded from the no-key name */
+ fname->hash = cpu_to_le32(crypt_name->hash);
+ } else {
+ err = f2fs_init_casefolded_name(dir, fname);
+ if (err) {
+ f2fs_free_filename(fname);
+ return err;
+ }
+ f2fs_hash_filename(dir, fname);
+ }
+ return 0;
+}
+
+/*
+ * Prepare to search for @iname in @dir. This is similar to
+ * fscrypt_setup_filename(), but this also handles computing the casefolded name
+ * and the f2fs dirhash if needed, then packing all the information about this
+ * filename up into a 'struct f2fs_filename'.
+ */
+int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
+ int lookup, struct f2fs_filename *fname)
+{
+ struct fscrypt_name crypt_name;
+ int err;
+
+ err = fscrypt_setup_filename(dir, iname, lookup, &crypt_name);
+ if (err)
+ return err;
+
+ return __f2fs_setup_filename(dir, &crypt_name, fname);
+}
+
+/*
+ * Prepare to look up @dentry in @dir. This is similar to
+ * fscrypt_prepare_lookup(), but this also handles computing the casefolded name
+ * and the f2fs dirhash if needed, then packing all the information about this
+ * filename up into a 'struct f2fs_filename'.
+ */
+int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ struct f2fs_filename *fname)
+{
+ struct fscrypt_name crypt_name;
+ int err;
+
+ err = fscrypt_prepare_lookup(dir, dentry, &crypt_name);
+ if (err)
+ return err;
+
+ return __f2fs_setup_filename(dir, &crypt_name, fname);
+}
+
+void f2fs_free_filename(struct f2fs_filename *fname)
+{
+#ifdef CONFIG_FS_ENCRYPTION
+ kfree(fname->crypto_buf.name);
+ fname->crypto_buf.name = NULL;
+#endif
+#ifdef CONFIG_UNICODE
+ kfree(fname->cf_name.name);
+ fname->cf_name.name = NULL;
+#endif
+}
+
static unsigned long dir_block_index(unsigned int level,
int dir_level, unsigned int idx)
{
@@ -84,8 +189,7 @@ static unsigned long dir_block_index(unsigned int level,
static struct f2fs_dir_entry *find_in_block(struct inode *dir,
struct page *dentry_page,
- struct fscrypt_name *fname,
- f2fs_hash_t namehash,
+ const struct f2fs_filename *fname,
int *max_slots,
struct page **res_page)
{
@@ -96,7 +200,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
dentry_blk = (struct f2fs_dentry_block *)page_address(dentry_page);
make_dentry_ptr_block(dir, &d, dentry_blk);
- de = f2fs_find_target_dentry(fname, namehash, max_slots, &d);
+ de = f2fs_find_target_dentry(&d, fname, max_slots);
if (de)
*res_page = dentry_page;
@@ -107,112 +211,57 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
/*
* Test whether a case-insensitive directory entry matches the filename
* being searched for.
- *
- * Returns: 0 if the directory entry matches, more than 0 if it
- * doesn't match or less than zero on error.
*/
-int f2fs_ci_compare(const struct inode *parent, const struct qstr *name,
- const struct qstr *entry, bool quick)
+static bool f2fs_match_ci_name(const struct inode *dir, const struct qstr *name,
+ const u8 *de_name, u32 de_name_len)
{
- const struct f2fs_sb_info *sbi = F2FS_SB(parent->i_sb);
+ const struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
const struct unicode_map *um = sbi->s_encoding;
- int ret;
-
- if (quick)
- ret = utf8_strncasecmp_folded(um, name, entry);
- else
- ret = utf8_strncasecmp(um, name, entry);
+ struct qstr entry = QSTR_INIT(de_name, de_name_len);
+ int res;
- if (ret < 0) {
- /* Handle invalid character sequence as either an error
- * or as an opaque byte sequence.
+ res = utf8_strncasecmp_folded(um, name, &entry);
+ if (res < 0) {
+ /*
+ * In strict mode, ignore invalid names. In non-strict mode,
+ * fall back to treating them as opaque byte sequences.
*/
- if (f2fs_has_strict_mode(sbi))
- return -EINVAL;
-
- if (name->len != entry->len)
- return 1;
-
- return !!memcmp(name->name, entry->name, name->len);
+ if (f2fs_has_strict_mode(sbi) || name->len != entry.len)
+ return false;
+ return !memcmp(name->name, entry.name, name->len);
}
-
- return ret;
+ return res == 0;
}
+#endif /* CONFIG_UNICODE */
-static void f2fs_fname_setup_ci_filename(struct inode *dir,
- const struct qstr *iname,
- struct fscrypt_str *cf_name)
+static inline bool f2fs_match_name(const struct inode *dir,
+ const struct f2fs_filename *fname,
+ const u8 *de_name, u32 de_name_len)
{
- struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
-
- if (!IS_CASEFOLDED(dir)) {
- cf_name->name = NULL;
- return;
- }
+ struct fscrypt_name f;
- cf_name->name = f2fs_kmalloc(sbi, F2FS_NAME_LEN, GFP_NOFS);
- if (!cf_name->name)
- return;
-
- cf_name->len = utf8_casefold(sbi->s_encoding,
- iname, cf_name->name,
- F2FS_NAME_LEN);
- if ((int)cf_name->len <= 0) {
- kvfree(cf_name->name);
- cf_name->name = NULL;
- }
-}
-#endif
-
-static inline bool f2fs_match_name(struct f2fs_dentry_ptr *d,
- struct f2fs_dir_entry *de,
- struct fscrypt_name *fname,
- struct fscrypt_str *cf_str,
- unsigned long bit_pos,
- f2fs_hash_t namehash)
-{
#ifdef CONFIG_UNICODE
- struct inode *parent = d->inode;
- struct f2fs_sb_info *sbi = F2FS_I_SB(parent);
- struct qstr entry;
-#endif
-
- if (de->hash_code != namehash)
- return false;
+ if (fname->cf_name.name) {
+ struct qstr cf = FSTR_TO_QSTR(&fname->cf_name);
-#ifdef CONFIG_UNICODE
- entry.name = d->filename[bit_pos];
- entry.len = de->name_len;
-
- if (sbi->s_encoding && IS_CASEFOLDED(parent)) {
- if (cf_str->name) {
- struct qstr cf = {.name = cf_str->name,
- .len = cf_str->len};
- return !f2fs_ci_compare(parent, &cf, &entry, true);
- }
- return !f2fs_ci_compare(parent, fname->usr_fname, &entry,
- false);
+ return f2fs_match_ci_name(dir, &cf, de_name, de_name_len);
}
#endif
- if (fscrypt_match_name(fname, d->filename[bit_pos],
- le16_to_cpu(de->name_len)))
- return true;
- return false;
+ f.usr_fname = fname->usr_fname;
+ f.disk_name = fname->disk_name;
+#ifdef CONFIG_FS_ENCRYPTION
+ f.crypto_buf = fname->crypto_buf;
+#endif
+ return fscrypt_match_name(&f, de_name, de_name_len);
}
-struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname,
- f2fs_hash_t namehash, int *max_slots,
- struct f2fs_dentry_ptr *d)
+struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
+ const struct f2fs_filename *fname, int *max_slots)
{
struct f2fs_dir_entry *de;
- struct fscrypt_str cf_str = { .name = NULL, .len = 0 };
unsigned long bit_pos = 0;
int max_len = 0;
-#ifdef CONFIG_UNICODE
- f2fs_fname_setup_ci_filename(d->inode, fname->usr_fname, &cf_str);
-#endif
-
if (max_slots)
*max_slots = 0;
while (bit_pos < d->max) {
@@ -229,7 +278,9 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname,
continue;
}
- if (f2fs_match_name(d, de, fname, &cf_str, bit_pos, namehash))
+ if (de->hash_code == fname->hash &&
+ f2fs_match_name(d->inode, fname, d->filename[bit_pos],
+ le16_to_cpu(de->name_len)))
goto found;
if (max_slots && max_len > *max_slots)
@@ -243,33 +294,27 @@ struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname,
found:
if (max_slots && max_len > *max_slots)
*max_slots = max_len;
-
-#ifdef CONFIG_UNICODE
- kvfree(cf_str.name);
-#endif
return de;
}
static struct f2fs_dir_entry *find_in_level(struct inode *dir,
unsigned int level,
- struct fscrypt_name *fname,
+ const struct f2fs_filename *fname,
struct page **res_page)
{
- struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
- int s = GET_DENTRY_SLOTS(name.len);
+ int s = GET_DENTRY_SLOTS(fname->disk_name.len);
unsigned int nbucket, nblock;
unsigned int bidx, end_block;
struct page *dentry_page;
struct f2fs_dir_entry *de = NULL;
bool room = false;
int max_slots;
- f2fs_hash_t namehash = f2fs_dentry_hash(dir, &name, fname);
nbucket = dir_buckets(level, F2FS_I(dir)->i_dir_level);
nblock = bucket_blocks(level);
bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
- le32_to_cpu(namehash) % nbucket);
+ le32_to_cpu(fname->hash) % nbucket);
end_block = bidx + nblock;
for (; bidx < end_block; bidx++) {
@@ -285,8 +330,8 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
}
}
- de = find_in_block(dir, dentry_page, fname, namehash,
- &max_slots, res_page);
+ de = find_in_block(dir, dentry_page, fname, &max_slots,
+ res_page);
if (de)
break;
@@ -295,8 +340,8 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
f2fs_put_page(dentry_page, 0);
}
- if (!de && room && F2FS_I(dir)->chash != namehash) {
- F2FS_I(dir)->chash = namehash;
+ if (!de && room && F2FS_I(dir)->chash != fname->hash) {
+ F2FS_I(dir)->chash = fname->hash;
F2FS_I(dir)->clevel = level;
}
@@ -304,7 +349,8 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
}
struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
- struct fscrypt_name *fname, struct page **res_page)
+ const struct f2fs_filename *fname,
+ struct page **res_page)
{
unsigned long npages = dir_blocks(dir);
struct f2fs_dir_entry *de = NULL;
@@ -353,18 +399,10 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
const struct qstr *child, struct page **res_page)
{
struct f2fs_dir_entry *de = NULL;
- struct fscrypt_name fname;
+ struct f2fs_filename fname;
int err;
-#ifdef CONFIG_UNICODE
- if (f2fs_has_strict_mode(F2FS_I_SB(dir)) && IS_CASEFOLDED(dir) &&
- utf8_validate(F2FS_I_SB(dir)->s_encoding, child)) {
- *res_page = ERR_PTR(-EINVAL);
- return NULL;
- }
-#endif
-
- err = fscrypt_setup_filename(dir, child, 1, &fname);
+ err = f2fs_setup_filename(dir, child, 1, &fname);
if (err) {
if (err == -ENOENT)
*res_page = NULL;
@@ -375,7 +413,7 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
de = __f2fs_find_entry(dir, &fname, res_page);
- fscrypt_free_filename(&fname);
+ f2fs_free_filename(&fname);
return de;
}
@@ -416,7 +454,8 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
f2fs_put_page(page, 1);
}
-static void init_dent_inode(const struct qstr *name, struct page *ipage)
+static void init_dent_inode(const struct f2fs_filename *fname,
+ struct page *ipage)
{
struct f2fs_inode *ri;
@@ -424,16 +463,16 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage)
/* copy name info. to this inode page */
ri = F2FS_INODE(ipage);
- ri->i_namelen = cpu_to_le32(name->len);
- memcpy(ri->i_name, name->name, name->len);
+ ri->i_namelen = cpu_to_le32(fname->disk_name.len);
+ memcpy(ri->i_name, fname->disk_name.name, fname->disk_name.len);
set_page_dirty(ipage);
}
void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
struct f2fs_dentry_ptr *d)
{
- struct qstr dot = QSTR_INIT(".", 1);
- struct qstr dotdot = QSTR_INIT("..", 2);
+ struct fscrypt_str dot = FSTR_INIT(".", 1);
+ struct fscrypt_str dotdot = FSTR_INIT("..", 2);
/* update dirent of "." */
f2fs_update_dentry(inode->i_ino, inode->i_mode, d, &dot, 0, 0);
@@ -467,8 +506,7 @@ static int make_empty_dir(struct inode *inode,
}
struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
- const struct qstr *new_name, const struct qstr *orig_name,
- struct page *dpage)
+ const struct f2fs_filename *fname, struct page *dpage)
{
struct page *page;
int err;
@@ -493,7 +531,8 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
if (err)
goto put_error;
- err = f2fs_init_security(inode, dir, orig_name, page);
+ err = f2fs_init_security(inode, dir,
+ fname ? fname->usr_fname : NULL, page);
if (err)
goto put_error;
@@ -508,8 +547,8 @@ struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
return page;
}
- if (new_name) {
- init_dent_inode(new_name, page);
+ if (fname) {
+ init_dent_inode(fname, page);
if (IS_ENCRYPTED(dir))
file_set_enc_name(inode);
}
@@ -577,11 +616,11 @@ next:
}
bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
- struct fscrypt_name *fname)
+ const struct f2fs_filename *fname)
{
struct f2fs_dentry_ptr d;
unsigned int bit_pos;
- int slots = GET_DENTRY_SLOTS(fname_len(fname));
+ int slots = GET_DENTRY_SLOTS(fname->disk_name.len);
make_dentry_ptr_inline(dir, &d, inline_data_addr(dir, ipage));
@@ -591,8 +630,8 @@ bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
}
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
- const struct qstr *name, f2fs_hash_t name_hash,
- unsigned int bit_pos)
+ const struct fscrypt_str *name, f2fs_hash_t name_hash,
+ unsigned int bit_pos)
{
struct f2fs_dir_entry *de;
int slots = GET_DENTRY_SLOTS(name->len);
@@ -612,15 +651,13 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
}
}
-int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
- const struct qstr *orig_name,
- struct inode *inode, nid_t ino, umode_t mode)
+int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
+ struct inode *inode, nid_t ino, umode_t mode)
{
unsigned int bit_pos;
unsigned int level;
unsigned int current_depth;
unsigned long bidx, block;
- f2fs_hash_t dentry_hash;
unsigned int nbucket, nblock;
struct page *dentry_page = NULL;
struct f2fs_dentry_block *dentry_blk = NULL;
@@ -629,11 +666,10 @@ int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
int slots, err = 0;
level = 0;
- slots = GET_DENTRY_SLOTS(new_name->len);
- dentry_hash = f2fs_dentry_hash(dir, new_name, NULL);
+ slots = GET_DENTRY_SLOTS(fname->disk_name.len);
current_depth = F2FS_I(dir)->i_current_depth;
- if (F2FS_I(dir)->chash == dentry_hash) {
+ if (F2FS_I(dir)->chash == fname->hash) {
level = F2FS_I(dir)->clevel;
F2FS_I(dir)->chash = 0;
}
@@ -655,7 +691,7 @@ start:
nblock = bucket_blocks(level);
bidx = dir_block_index(level, F2FS_I(dir)->i_dir_level,
- (le32_to_cpu(dentry_hash) % nbucket));
+ (le32_to_cpu(fname->hash) % nbucket));
for (block = bidx; block <= (bidx + nblock - 1); block++) {
dentry_page = f2fs_get_new_data_page(dir, NULL, block, true);
@@ -679,8 +715,7 @@ add_dentry:
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
- page = f2fs_init_inode_metadata(inode, dir, new_name,
- orig_name, NULL);
+ page = f2fs_init_inode_metadata(inode, dir, fname, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
@@ -688,7 +723,8 @@ add_dentry:
}
make_dentry_ptr_block(NULL, &d, dentry_blk);
- f2fs_update_dentry(ino, mode, &d, new_name, dentry_hash, bit_pos);
+ f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash,
+ bit_pos);
set_page_dirty(dentry_page);
@@ -712,21 +748,15 @@ fail:
return err;
}
-int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname,
- struct inode *inode, nid_t ino, umode_t mode)
+int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
+ struct inode *inode, nid_t ino, umode_t mode)
{
- struct qstr new_name;
int err = -EAGAIN;
- new_name.name = fname_name(fname);
- new_name.len = fname_len(fname);
-
if (f2fs_has_inline_dentry(dir))
- err = f2fs_add_inline_entry(dir, &new_name, fname->usr_fname,
- inode, ino, mode);
+ err = f2fs_add_inline_entry(dir, fname, inode, ino, mode);
if (err == -EAGAIN)
- err = f2fs_add_regular_entry(dir, &new_name, fname->usr_fname,
- inode, ino, mode);
+ err = f2fs_add_regular_entry(dir, fname, inode, ino, mode);
f2fs_update_time(F2FS_I_SB(dir), REQ_TIME);
return err;
@@ -739,12 +769,12 @@ int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname,
int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode)
{
- struct fscrypt_name fname;
+ struct f2fs_filename fname;
struct page *page = NULL;
struct f2fs_dir_entry *de = NULL;
int err;
- err = fscrypt_setup_filename(dir, name, 0, &fname);
+ err = f2fs_setup_filename(dir, name, 0, &fname);
if (err)
return err;
@@ -767,7 +797,7 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
} else {
err = f2fs_add_dentry(dir, &fname, inode, ino, mode);
}
- fscrypt_free_filename(&fname);
+ f2fs_free_filename(&fname);
return err;
}
@@ -777,7 +807,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
int err = 0;
down_write(&F2FS_I(inode)->i_sem);
- page = f2fs_init_inode_metadata(inode, dir, NULL, NULL, NULL);
+ page = f2fs_init_inode_metadata(inode, dir, NULL, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
@@ -1080,17 +1110,41 @@ const struct file_operations f2fs_dir_operations = {
static int f2fs_d_compare(const struct dentry *dentry, unsigned int len,
const char *str, const struct qstr *name)
{
- struct qstr qstr = {.name = str, .len = len };
const struct dentry *parent = READ_ONCE(dentry->d_parent);
- const struct inode *inode = READ_ONCE(parent->d_inode);
+ const struct inode *dir = READ_ONCE(parent->d_inode);
+ const struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb);
+ struct qstr entry = QSTR_INIT(str, len);
+ char strbuf[DNAME_INLINE_LEN];
+ int res;
+
+ if (!dir || !IS_CASEFOLDED(dir))
+ goto fallback;
- if (!inode || !IS_CASEFOLDED(inode)) {
- if (len != name->len)
- return -1;
- return memcmp(str, name->name, len);
+ /*
+ * If the dentry name is stored in-line, then it may be concurrently
+ * modified by a rename. If this happens, the VFS will eventually retry
+ * the lookup, so it doesn't matter what ->d_compare() returns.
+ * However, it's unsafe to call utf8_strncasecmp() with an unstable
+ * string. Therefore, we have to copy the name into a temporary buffer.
+ */
+ if (len <= DNAME_INLINE_LEN - 1) {
+ memcpy(strbuf, str, len);
+ strbuf[len] = 0;
+ entry.name = strbuf;
+ /* prevent compiler from optimizing out the temporary buffer */
+ barrier();
}
- return f2fs_ci_compare(inode, name, &qstr, false);
+ res = utf8_strncasecmp(sbi->s_encoding, name, &entry);
+ if (res >= 0)
+ return res;
+
+ if (f2fs_has_strict_mode(sbi))
+ return -EINVAL;
+fallback:
+ if (len != name->len)
+ return 1;
+ return !!memcmp(str, name->name, len);
}
static int f2fs_d_hash(const struct dentry *dentry, struct qstr *str)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 5c0149d2f46a..b35a50f4953c 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/f2fs/f2fs.h
*
@@ -139,6 +139,7 @@ struct f2fs_mount_info {
int fs_mode; /* fs mode: LFS or ADAPTIVE */
int bggc_mode; /* bggc mode: off, on or sync */
struct fscrypt_dummy_context dummy_enc_ctx; /* test dummy encryption */
+ block_t unusable_cap_perc; /* percentage for cap */
block_t unusable_cap; /* Amount of space allowed to be
* unusable when disabling checkpoint
*/
@@ -194,6 +195,7 @@ enum {
#define CP_DISCARD 0x00000010
#define CP_TRIMMED 0x00000020
#define CP_PAUSE 0x00000040
+#define CP_RESIZE 0x00000080
#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
@@ -428,6 +430,10 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
#define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15)
#define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64)
#define F2FS_IOC_GET_COMPRESS_BLOCKS _IOR(F2FS_IOCTL_MAGIC, 17, __u64)
+#define F2FS_IOC_RELEASE_COMPRESS_BLOCKS \
+ _IOR(F2FS_IOCTL_MAGIC, 18, __u64)
+#define F2FS_IOC_RESERVE_COMPRESS_BLOCKS \
+ _IOR(F2FS_IOCTL_MAGIC, 19, __u64)
#define F2FS_IOC_GET_VOLUME_NAME FS_IOC_GETFSLABEL
#define F2FS_IOC_SET_VOLUME_NAME FS_IOC_SETFSLABEL
@@ -506,6 +512,42 @@ static inline int get_inline_xattr_addrs(struct inode *inode);
* For INODE and NODE manager
*/
/* for directory operations */
+
+struct f2fs_filename {
+ /*
+ * The filename the user specified. This is NULL for some
+ * filesystem-internal operations, e.g. converting an inline directory
+ * to a non-inline one, or roll-forward recovering an encrypted dentry.
+ */
+ const struct qstr *usr_fname;
+
+ /*
+ * The on-disk filename. For encrypted directories, this is encrypted.
+ * This may be NULL for lookups in an encrypted dir without the key.
+ */
+ struct fscrypt_str disk_name;
+
+ /* The dirhash of this filename */
+ f2fs_hash_t hash;
+
+#ifdef CONFIG_FS_ENCRYPTION
+ /*
+ * For lookups in encrypted directories: either the buffer backing
+ * disk_name, or a buffer that holds the decoded no-key name.
+ */
+ struct fscrypt_str crypto_buf;
+#endif
+#ifdef CONFIG_UNICODE
+ /*
+ * For casefolded directories: the casefolded name, but it's left NULL
+ * if the original name is not valid Unicode or if the filesystem is
+ * doing an internal operation where usr_fname is also NULL. In these
+ * cases we fall back to treating the name as an opaque byte sequence.
+ */
+ struct fscrypt_str cf_name;
+#endif
+};
+
struct f2fs_dentry_ptr {
struct inode *inode;
void *bitmap;
@@ -1088,8 +1130,9 @@ enum cp_reason_type {
};
enum iostat_type {
- APP_DIRECT_IO, /* app direct IOs */
- APP_BUFFERED_IO, /* app buffered IOs */
+ /* WRITE IO */
+ APP_DIRECT_IO, /* app direct write IOs */
+ APP_BUFFERED_IO, /* app buffered write IOs */
APP_WRITE_IO, /* app write IOs */
APP_MAPPED_IO, /* app mapped IOs */
FS_DATA_IO, /* data IOs from kworker/fsync/reclaimer */
@@ -1100,6 +1143,19 @@ enum iostat_type {
FS_CP_DATA_IO, /* data IOs from checkpoint */
FS_CP_NODE_IO, /* node IOs from checkpoint */
FS_CP_META_IO, /* meta IOs from checkpoint */
+
+ /* READ IO */
+ APP_DIRECT_READ_IO, /* app direct read IOs */
+ APP_BUFFERED_READ_IO, /* app buffered read IOs */
+ APP_READ_IO, /* app read IOs */
+ APP_MAPPED_READ_IO, /* app mapped read IOs */
+ FS_DATA_READ_IO, /* data read IOs */
+ FS_GDATA_READ_IO, /* data read IOs from background gc */
+ FS_CDATA_READ_IO, /* compressed data read IOs */
+ FS_NODE_READ_IO, /* node read IOs */
+ FS_META_READ_IO, /* meta read IOs */
+
+ /* other */
FS_DISCARD, /* discard */
NR_IO_TYPE,
};
@@ -1269,6 +1325,7 @@ enum compress_algorithm_type {
COMPRESS_LZO,
COMPRESS_LZ4,
COMPRESS_ZSTD,
+ COMPRESS_LZORLE,
COMPRESS_MAX,
};
@@ -1418,7 +1475,6 @@ struct f2fs_sb_info {
unsigned int segs_per_sec; /* segments per section */
unsigned int secs_per_zone; /* sections per zone */
unsigned int total_sections; /* total section count */
- struct mutex resize_mutex; /* for resize exclusion */
unsigned int total_node_count; /* total node block count */
unsigned int total_valid_node_count; /* valid node block count */
loff_t max_file_blocks; /* max block index of file */
@@ -1504,8 +1560,15 @@ struct f2fs_sb_info {
/* For app/fs IO statistics */
spinlock_t iostat_lock;
- unsigned long long write_iostat[NR_IO_TYPE];
+ unsigned long long rw_iostat[NR_IO_TYPE];
+ unsigned long long prev_rw_iostat[NR_IO_TYPE];
bool iostat_enable;
+ unsigned long iostat_next_period;
+ unsigned int iostat_period_ms;
+
+ /* to attach REQ_META|REQ_FUA flags */
+ unsigned int data_io_flag;
+ unsigned int node_io_flag;
/* For sysfs suppport */
struct kobject s_kobj;
@@ -2902,12 +2965,12 @@ static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
}
-static inline bool is_dot_dotdot(const struct qstr *str)
+static inline bool is_dot_dotdot(const u8 *name, size_t len)
{
- if (str->len == 1 && str->name[0] == '.')
+ if (len == 1 && name[0] == '.')
return true;
- if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.')
+ if (len == 2 && name[0] == '.' && name[1] == '.')
return true;
return false;
@@ -2935,18 +2998,12 @@ static inline bool f2fs_may_extent_tree(struct inode *inode)
static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{
- void *ret;
-
if (time_to_inject(sbi, FAULT_KMALLOC)) {
f2fs_show_injection_info(sbi, FAULT_KMALLOC);
return NULL;
}
- ret = kmalloc(size, flags);
- if (ret)
- return ret;
-
- return kvmalloc(size, flags);
+ return kmalloc(size, flags);
}
static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
@@ -2996,29 +3053,45 @@ static inline int get_inline_xattr_addrs(struct inode *inode)
sizeof((f2fs_inode)->field)) \
<= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize))) \
+#define DEFAULT_IOSTAT_PERIOD_MS 3000
+#define MIN_IOSTAT_PERIOD_MS 100
+/* maximum period of iostat tracing is 1 day */
+#define MAX_IOSTAT_PERIOD_MS 8640000
+
static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
{
int i;
spin_lock(&sbi->iostat_lock);
- for (i = 0; i < NR_IO_TYPE; i++)
- sbi->write_iostat[i] = 0;
+ for (i = 0; i < NR_IO_TYPE; i++) {
+ sbi->rw_iostat[i] = 0;
+ sbi->prev_rw_iostat[i] = 0;
+ }
spin_unlock(&sbi->iostat_lock);
}
+extern void f2fs_record_iostat(struct f2fs_sb_info *sbi);
+
static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
enum iostat_type type, unsigned long long io_bytes)
{
if (!sbi->iostat_enable)
return;
spin_lock(&sbi->iostat_lock);
- sbi->write_iostat[type] += io_bytes;
+ sbi->rw_iostat[type] += io_bytes;
if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
- sbi->write_iostat[APP_BUFFERED_IO] =
- sbi->write_iostat[APP_WRITE_IO] -
- sbi->write_iostat[APP_DIRECT_IO];
+ sbi->rw_iostat[APP_BUFFERED_IO] =
+ sbi->rw_iostat[APP_WRITE_IO] -
+ sbi->rw_iostat[APP_DIRECT_IO];
+
+ if (type == APP_READ_IO || type == APP_DIRECT_READ_IO)
+ sbi->rw_iostat[APP_BUFFERED_READ_IO] =
+ sbi->rw_iostat[APP_READ_IO] -
+ sbi->rw_iostat[APP_DIRECT_READ_IO];
spin_unlock(&sbi->iostat_lock);
+
+ f2fs_record_iostat(sbi);
}
#define __is_large_section(sbi) ((sbi)->segs_per_sec > 1)
@@ -3064,6 +3137,7 @@ static inline void f2fs_clear_page_private(struct page *page)
*/
int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
+int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
int f2fs_truncate(struct inode *inode);
int f2fs_getattr(const struct path *path, struct kstat *stat,
@@ -3099,31 +3173,32 @@ int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
bool hot, bool set);
struct dentry *f2fs_get_parent(struct dentry *child);
-extern int f2fs_ci_compare(const struct inode *parent,
- const struct qstr *name,
- const struct qstr *entry,
- bool quick);
-
/*
* dir.c
*/
unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de);
-struct f2fs_dir_entry *f2fs_find_target_dentry(struct fscrypt_name *fname,
- f2fs_hash_t namehash, int *max_slots,
- struct f2fs_dentry_ptr *d);
+int f2fs_init_casefolded_name(const struct inode *dir,
+ struct f2fs_filename *fname);
+int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
+ int lookup, struct f2fs_filename *fname);
+int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
+ struct f2fs_filename *fname);
+void f2fs_free_filename(struct f2fs_filename *fname);
+struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
+ const struct f2fs_filename *fname, int *max_slots);
int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
unsigned int start_pos, struct fscrypt_str *fstr);
void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
struct f2fs_dentry_ptr *d);
struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
- const struct qstr *new_name,
- const struct qstr *orig_name, struct page *dpage);
+ const struct f2fs_filename *fname, struct page *dpage);
void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
unsigned int current_depth);
int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
- struct fscrypt_name *fname, struct page **res_page);
+ const struct f2fs_filename *fname,
+ struct page **res_page);
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
const struct qstr *child, struct page **res_page);
struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
@@ -3132,14 +3207,13 @@ ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
struct page *page, struct inode *inode);
bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
- struct fscrypt_name *fname);
+ const struct f2fs_filename *fname);
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
- const struct qstr *name, f2fs_hash_t name_hash,
+ const struct fscrypt_str *name, f2fs_hash_t name_hash,
unsigned int bit_pos);
-int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
- const struct qstr *orig_name,
+int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
struct inode *inode, nid_t ino, umode_t mode);
-int f2fs_add_dentry(struct inode *dir, struct fscrypt_name *fname,
+int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
struct inode *inode, nid_t ino, umode_t mode);
int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode);
@@ -3169,8 +3243,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
/*
* hash.c
*/
-f2fs_hash_t f2fs_dentry_hash(const struct inode *dir,
- const struct qstr *name_info, struct fscrypt_name *fname);
+void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
/*
* node.c
@@ -3202,6 +3275,7 @@ void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
struct page *f2fs_get_node_page_ra(struct page *parent, int start);
int f2fs_move_node_page(struct page *node_page, int gc_type);
+int f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic,
unsigned int *seq_id);
@@ -3645,7 +3719,7 @@ static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
static inline void __init f2fs_create_root_stats(void) { }
static inline void f2fs_destroy_root_stats(void) { }
-static inline void update_sit_info(struct f2fs_sb_info *sbi) {}
+static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
#endif
extern const struct file_operations f2fs_dir_operations;
@@ -3678,11 +3752,11 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
int f2fs_write_inline_data(struct inode *inode, struct page *page);
bool f2fs_recover_inline_data(struct inode *inode, struct page *npage);
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
- struct fscrypt_name *fname, struct page **res_page);
+ const struct f2fs_filename *fname,
+ struct page **res_page);
int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
struct page *ipage);
-int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
- const struct qstr *orig_name,
+int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
struct inode *inode, nid_t ino, umode_t mode);
void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
struct page *page, struct inode *dir,
@@ -3781,8 +3855,11 @@ int f2fs_prepare_compress_overwrite(struct inode *inode,
struct page **pagep, pgoff_t index, void **fsdata);
bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
pgoff_t index, unsigned copied);
+int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
bool f2fs_is_compress_backend_ready(struct inode *inode);
+int f2fs_init_compress_mempool(void);
+void f2fs_destroy_compress_mempool(void);
void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity);
bool f2fs_cluster_is_empty(struct compress_ctx *cc);
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
@@ -3816,6 +3893,8 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
WARN_ON_ONCE(1);
return ERR_PTR(-EINVAL);
}
+static inline int f2fs_init_compress_mempool(void) { return 0; }
+static inline void f2fs_destroy_compress_mempool(void) { }
#endif
static inline void set_compress_context(struct inode *inode)
@@ -3962,6 +4041,10 @@ static inline void f2fs_i_compr_blocks_update(struct inode *inode,
{
int diff = F2FS_I(inode)->i_cluster_size - blocks;
+ /* don't update i_compr_blocks if saved blocks were released */
+ if (!add && !F2FS_I(inode)->i_compr_blocks)
+ return;
+
if (add) {
F2FS_I(inode)->i_compr_blocks += diff;
stat_add_compr_blocks(inode, diff);
@@ -4003,8 +4086,6 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
return true;
if (f2fs_is_multi_device(sbi))
return true;
- if (f2fs_compressed_file(inode))
- return true;
/*
* for blkzoned device, fallback direct IO to buffered IO, so
* all IOs can be serialized by log-structured write.
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 6ab8f621a3c5..3268f8dd59bb 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -40,6 +40,10 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
ret = filemap_fault(vmf);
up_read(&F2FS_I(inode)->i_mmap_sem);
+ if (!ret)
+ f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
+ F2FS_BLKSIZE);
+
trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
return ret;
@@ -165,9 +169,11 @@ static int get_parent_ino(struct inode *inode, nid_t *pino)
{
struct dentry *dentry;
- inode = igrab(inode);
- dentry = d_find_any_alias(inode);
- iput(inode);
+ /*
+ * Make sure to get the non-deleted alias. The alias associated with
+ * the open file descriptor being fsync()'ed may be deleted already.
+ */
+ dentry = d_find_alias(inode);
if (!dentry)
return 0;
@@ -557,6 +563,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
bool compressed_cluster = false;
int cluster_index = 0, valid_blocks = 0;
int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+ bool released = !F2FS_I(dn->inode)->i_compr_blocks;
if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
base = get_extra_isize(dn->inode);
@@ -595,7 +602,9 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
f2fs_invalidate_blocks(sbi, blkaddr);
- nr_free++;
+
+ if (!released || blkaddr != COMPRESS_ADDR)
+ nr_free++;
}
if (compressed_cluster)
@@ -643,9 +652,6 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
return 0;
}
- if (f2fs_compressed_file(inode))
- return 0;
-
page = f2fs_get_lock_data_page(inode, index, true);
if (IS_ERR(page))
return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
@@ -661,7 +667,7 @@ truncate_out:
return 0;
}
-static int do_truncate_blocks(struct inode *inode, u64 from, bool lock)
+int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
@@ -729,23 +735,28 @@ free_partial:
int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
{
u64 free_from = from;
+ int err;
+#ifdef CONFIG_F2FS_FS_COMPRESSION
/*
* for compressed file, only support cluster size
* aligned truncation.
*/
- if (f2fs_compressed_file(inode)) {
- size_t cluster_shift = PAGE_SHIFT +
- F2FS_I(inode)->i_log_cluster_size;
- size_t cluster_mask = (1 << cluster_shift) - 1;
+ if (f2fs_compressed_file(inode))
+ free_from = round_up(from,
+ F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
+#endif
- free_from = from >> cluster_shift;
- if (from & cluster_mask)
- free_from++;
- free_from <<= cluster_shift;
- }
+ err = f2fs_do_truncate_blocks(inode, free_from, lock);
+ if (err)
+ return err;
+
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+ if (from != free_from)
+ err = f2fs_truncate_partial_cluster(inode, from, lock);
+#endif
- return do_truncate_blocks(inode, free_from, lock);
+ return err;
}
int f2fs_truncate(struct inode *inode)
@@ -968,9 +979,7 @@ const struct inode_operations f2fs_file_inode_operations = {
.setattr = f2fs_setattr,
.get_acl = f2fs_get_acl,
.set_acl = f2fs_set_acl,
-#ifdef CONFIG_F2FS_FS_XATTR
.listxattr = f2fs_listxattr,
-#endif
.fiemap = f2fs_fiemap,
};
@@ -1649,7 +1658,11 @@ next_alloc:
down_write(&sbi->pin_sem);
map.m_seg_type = CURSEG_COLD_DATA_PINNED;
+
+ f2fs_lock_op(sbi);
f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA);
+ f2fs_unlock_op(sbi);
+
err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
up_write(&sbi->pin_sem);
@@ -2219,8 +2232,15 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
if (in != F2FS_GOING_DOWN_FULLSYNC) {
ret = mnt_want_write_file(filp);
- if (ret)
+ if (ret) {
+ if (ret == -EROFS) {
+ ret = 0;
+ f2fs_stop_checkpoint(sbi, false);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
+ trace_f2fs_shutdown(sbi, in, ret);
+ }
return ret;
+ }
}
switch (in) {
@@ -3301,7 +3321,6 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
__u64 block_count;
- int ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -3313,9 +3332,7 @@ static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
sizeof(block_count)))
return -EFAULT;
- ret = f2fs_resize_fs(sbi, block_count);
-
- return ret;
+ return f2fs_resize_fs(sbi, block_count);
}
static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
@@ -3419,6 +3436,326 @@ static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
return put_user(blocks, (u64 __user *)arg);
}
+static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ unsigned int released_blocks = 0;
+ int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+ block_t blkaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ dn->ofs_in_node + i);
+
+ if (!__is_valid_data_blkaddr(blkaddr))
+ continue;
+ if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
+ DATA_GENERIC_ENHANCE)))
+ return -EFSCORRUPTED;
+ }
+
+ while (count) {
+ int compr_blocks = 0;
+
+ for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
+ blkaddr = f2fs_data_blkaddr(dn);
+
+ if (i == 0) {
+ if (blkaddr == COMPRESS_ADDR)
+ continue;
+ dn->ofs_in_node += cluster_size;
+ goto next;
+ }
+
+ if (__is_valid_data_blkaddr(blkaddr))
+ compr_blocks++;
+
+ if (blkaddr != NEW_ADDR)
+ continue;
+
+ dn->data_blkaddr = NULL_ADDR;
+ f2fs_set_data_blkaddr(dn);
+ }
+
+ f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
+ dec_valid_block_count(sbi, dn->inode,
+ cluster_size - compr_blocks);
+
+ released_blocks += cluster_size - compr_blocks;
+next:
+ count -= cluster_size;
+ }
+
+ return released_blocks;
+}
+
+static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ pgoff_t page_idx = 0, last_idx;
+ unsigned int released_blocks = 0;
+ int ret;
+ int writecount;
+
+ if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+ return -EOPNOTSUPP;
+
+ if (!f2fs_compressed_file(inode))
+ return -EINVAL;
+
+ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
+
+ inode_lock(inode);
+
+ writecount = atomic_read(&inode->i_writecount);
+ if ((filp->f_mode & FMODE_WRITE && writecount != 1) || writecount) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (IS_IMMUTABLE(inode)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
+ if (ret)
+ goto out;
+
+ if (!F2FS_I(inode)->i_compr_blocks)
+ goto out;
+
+ F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
+ f2fs_set_inode_flags(inode);
+ inode->i_ctime = current_time(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
+
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+
+ last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+
+ while (page_idx < last_idx) {
+ struct dnode_of_data dn;
+ pgoff_t end_offset, count;
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
+ if (ret) {
+ if (ret == -ENOENT) {
+ page_idx = f2fs_get_next_page_offset(&dn,
+ page_idx);
+ ret = 0;
+ continue;
+ }
+ break;
+ }
+
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
+ count = round_up(count, F2FS_I(inode)->i_cluster_size);
+
+ ret = release_compress_blocks(&dn, count);
+
+ f2fs_put_dnode(&dn);
+
+ if (ret < 0)
+ break;
+
+ page_idx += count;
+ released_blocks += ret;
+ }
+
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+out:
+ inode_unlock(inode);
+
+ mnt_drop_write_file(filp);
+
+ if (ret >= 0) {
+ ret = put_user(released_blocks, (u64 __user *)arg);
+ } else if (released_blocks && F2FS_I(inode)->i_compr_blocks) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
+ "iblocks=%llu, released=%u, compr_blocks=%llu, "
+ "run fsck to fix.",
+ __func__, inode->i_ino, inode->i_blocks,
+ released_blocks,
+ F2FS_I(inode)->i_compr_blocks);
+ }
+
+ return ret;
+}
+
+static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
+ unsigned int reserved_blocks = 0;
+ int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
+ block_t blkaddr;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ blkaddr = data_blkaddr(dn->inode, dn->node_page,
+ dn->ofs_in_node + i);
+
+ if (!__is_valid_data_blkaddr(blkaddr))
+ continue;
+ if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
+ DATA_GENERIC_ENHANCE)))
+ return -EFSCORRUPTED;
+ }
+
+ while (count) {
+ int compr_blocks = 0;
+ blkcnt_t reserved;
+ int ret;
+
+ for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
+ blkaddr = f2fs_data_blkaddr(dn);
+
+ if (i == 0) {
+ if (blkaddr == COMPRESS_ADDR)
+ continue;
+ dn->ofs_in_node += cluster_size;
+ goto next;
+ }
+
+ if (__is_valid_data_blkaddr(blkaddr)) {
+ compr_blocks++;
+ continue;
+ }
+
+ dn->data_blkaddr = NEW_ADDR;
+ f2fs_set_data_blkaddr(dn);
+ }
+
+ reserved = cluster_size - compr_blocks;
+ ret = inc_valid_block_count(sbi, dn->inode, &reserved);
+ if (ret)
+ return ret;
+
+ if (reserved != cluster_size - compr_blocks)
+ return -ENOSPC;
+
+ f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
+
+ reserved_blocks += reserved;
+next:
+ count -= cluster_size;
+ }
+
+ return reserved_blocks;
+}
+
+static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ pgoff_t page_idx = 0, last_idx;
+ unsigned int reserved_blocks = 0;
+ int ret;
+
+ if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
+ return -EOPNOTSUPP;
+
+ if (!f2fs_compressed_file(inode))
+ return -EINVAL;
+
+ if (f2fs_readonly(sbi->sb))
+ return -EROFS;
+
+ ret = mnt_want_write_file(filp);
+ if (ret)
+ return ret;
+
+ if (F2FS_I(inode)->i_compr_blocks)
+ goto out;
+
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
+
+ inode_lock(inode);
+
+ if (!IS_IMMUTABLE(inode)) {
+ ret = -EINVAL;
+ goto unlock_inode;
+ }
+
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+
+ last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+
+ while (page_idx < last_idx) {
+ struct dnode_of_data dn;
+ pgoff_t end_offset, count;
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
+ if (ret) {
+ if (ret == -ENOENT) {
+ page_idx = f2fs_get_next_page_offset(&dn,
+ page_idx);
+ ret = 0;
+ continue;
+ }
+ break;
+ }
+
+ end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+ count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
+ count = round_up(count, F2FS_I(inode)->i_cluster_size);
+
+ ret = reserve_compress_blocks(&dn, count);
+
+ f2fs_put_dnode(&dn);
+
+ if (ret < 0)
+ break;
+
+ page_idx += count;
+ reserved_blocks += ret;
+ }
+
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+
+ if (ret >= 0) {
+ F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
+ f2fs_set_inode_flags(inode);
+ inode->i_ctime = current_time(inode);
+ f2fs_mark_inode_dirty_sync(inode, true);
+ }
+unlock_inode:
+ inode_unlock(inode);
+out:
+ mnt_drop_write_file(filp);
+
+ if (ret >= 0) {
+ ret = put_user(reserved_blocks, (u64 __user *)arg);
+ } else if (reserved_blocks && F2FS_I(inode)->i_compr_blocks) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
+ "iblocks=%llu, reserved=%u, compr_blocks=%llu, "
+ "run fsck to fix.",
+ __func__, inode->i_ino, inode->i_blocks,
+ reserved_blocks,
+ F2FS_I(inode)->i_compr_blocks);
+ }
+
+ return ret;
+}
+
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
@@ -3501,6 +3838,10 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_set_volume_name(filp, arg);
case F2FS_IOC_GET_COMPRESS_BLOCKS:
return f2fs_get_compress_blocks(filp, arg);
+ case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
+ return f2fs_release_compress_blocks(filp, arg);
+ case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
+ return f2fs_reserve_compress_blocks(filp, arg);
default:
return -ENOTTY;
}
@@ -3510,11 +3851,17 @@ static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
+ int ret;
if (!f2fs_is_compress_backend_ready(inode))
return -EOPNOTSUPP;
- return generic_file_read_iter(iocb, iter);
+ ret = generic_file_read_iter(iocb, iter);
+
+ if (ret > 0)
+ f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
+
+ return ret;
}
static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
@@ -3662,6 +4009,8 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC_GET_VOLUME_NAME:
case F2FS_IOC_SET_VOLUME_NAME:
case F2FS_IOC_GET_COMPRESS_BLOCKS:
+ case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
+ case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 26248c8936db..5b95d5a146eb 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -13,6 +13,7 @@
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/freezer.h>
+#include <linux/sched/signal.h>
#include "f2fs.h"
#include "node.h"
@@ -737,6 +738,10 @@ got_it:
goto put_encrypted_page;
f2fs_put_page(fio.encrypted_page, 0);
f2fs_put_page(page, 1);
+
+ f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
+ f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
+
return 0;
put_encrypted_page:
f2fs_put_page(fio.encrypted_page, 1);
@@ -840,6 +845,10 @@ static int move_data_block(struct inode *inode, block_t bidx,
f2fs_put_page(mpage, 1);
goto up_out;
}
+
+ f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
+ f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
+
lock_page(mpage);
if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
!PageUptodate(mpage))) {
@@ -1399,12 +1408,29 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
}
-static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
- unsigned int end)
+static int free_segment_range(struct f2fs_sb_info *sbi,
+ unsigned int secs, bool gc_only)
{
- int type;
- unsigned int segno, next_inuse;
+ unsigned int segno, next_inuse, start, end;
+ struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
+ int gc_mode, gc_type;
int err = 0;
+ int type;
+
+ /* Force block allocation for GC */
+ MAIN_SECS(sbi) -= secs;
+ start = MAIN_SECS(sbi) * sbi->segs_per_sec;
+ end = MAIN_SEGS(sbi) - 1;
+
+ mutex_lock(&DIRTY_I(sbi)->seglist_lock);
+ for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
+ if (SIT_I(sbi)->last_victim[gc_mode] >= start)
+ SIT_I(sbi)->last_victim[gc_mode] = 0;
+
+ for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
+ if (sbi->next_victim_seg[gc_type] >= start)
+ sbi->next_victim_seg[gc_type] = NULL_SEGNO;
+ mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
/* Move out cursegs from the target range */
for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++)
@@ -1417,18 +1443,24 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
};
- down_write(&sbi->gc_lock);
do_garbage_collect(sbi, segno, &gc_list, FG_GC);
- up_write(&sbi->gc_lock);
put_gc_inode(&gc_list);
- if (get_valid_blocks(sbi, segno, true))
- return -EAGAIN;
+ if (!gc_only && get_valid_blocks(sbi, segno, true)) {
+ err = -EAGAIN;
+ goto out;
+ }
+ if (fatal_signal_pending(current)) {
+ err = -ERESTARTSYS;
+ goto out;
+ }
}
+ if (gc_only)
+ goto out;
- err = f2fs_sync_fs(sbi->sb, 1);
+ err = f2fs_write_checkpoint(sbi, &cpc);
if (err)
- return err;
+ goto out;
next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
if (next_inuse <= end) {
@@ -1436,6 +1468,8 @@ static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
next_inuse);
f2fs_bug_on(sbi, 1);
}
+out:
+ MAIN_SECS(sbi) += secs;
return err;
}
@@ -1481,6 +1515,7 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
+ MAIN_SECS(sbi) += secs;
FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
@@ -1502,8 +1537,8 @@ static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
{
__u64 old_block_count, shrunk_blocks;
+ struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
unsigned int secs;
- int gc_mode, gc_type;
int err = 0;
__u32 rem;
@@ -1538,10 +1573,27 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
return -EINVAL;
}
- freeze_bdev(sbi->sb->s_bdev);
-
shrunk_blocks = old_block_count - block_count;
secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
+
+ /* stop other GC */
+ if (!down_write_trylock(&sbi->gc_lock))
+ return -EAGAIN;
+
+ /* stop CP to protect MAIN_SEC in free_segment_range */
+ f2fs_lock_op(sbi);
+ err = free_segment_range(sbi, secs, true);
+ f2fs_unlock_op(sbi);
+ up_write(&sbi->gc_lock);
+ if (err)
+ return err;
+
+ set_sbi_flag(sbi, SBI_IS_RESIZEFS);
+
+ freeze_super(sbi->sb);
+ down_write(&sbi->gc_lock);
+ mutex_lock(&sbi->cp_mutex);
+
spin_lock(&sbi->stat_lock);
if (shrunk_blocks + valid_user_blocks(sbi) +
sbi->current_reserved_blocks + sbi->unusable_block_count +
@@ -1550,69 +1602,44 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
else
sbi->user_block_count -= shrunk_blocks;
spin_unlock(&sbi->stat_lock);
- if (err) {
- thaw_bdev(sbi->sb->s_bdev, sbi->sb);
- return err;
- }
-
- mutex_lock(&sbi->resize_mutex);
- set_sbi_flag(sbi, SBI_IS_RESIZEFS);
-
- mutex_lock(&DIRTY_I(sbi)->seglist_lock);
-
- MAIN_SECS(sbi) -= secs;
-
- for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
- if (SIT_I(sbi)->last_victim[gc_mode] >=
- MAIN_SECS(sbi) * sbi->segs_per_sec)
- SIT_I(sbi)->last_victim[gc_mode] = 0;
-
- for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
- if (sbi->next_victim_seg[gc_type] >=
- MAIN_SECS(sbi) * sbi->segs_per_sec)
- sbi->next_victim_seg[gc_type] = NULL_SEGNO;
-
- mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
+ if (err)
+ goto out_err;
- err = free_segment_range(sbi, MAIN_SECS(sbi) * sbi->segs_per_sec,
- MAIN_SEGS(sbi) - 1);
+ err = free_segment_range(sbi, secs, false);
if (err)
- goto out;
+ goto recover_out;
update_sb_metadata(sbi, -secs);
err = f2fs_commit_super(sbi, false);
if (err) {
update_sb_metadata(sbi, secs);
- goto out;
+ goto recover_out;
}
- mutex_lock(&sbi->cp_mutex);
update_fs_metadata(sbi, -secs);
clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
set_sbi_flag(sbi, SBI_IS_DIRTY);
- mutex_unlock(&sbi->cp_mutex);
- err = f2fs_sync_fs(sbi->sb, 1);
+ err = f2fs_write_checkpoint(sbi, &cpc);
if (err) {
- mutex_lock(&sbi->cp_mutex);
update_fs_metadata(sbi, secs);
- mutex_unlock(&sbi->cp_mutex);
update_sb_metadata(sbi, secs);
f2fs_commit_super(sbi, false);
}
-out:
+recover_out:
if (err) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
- MAIN_SECS(sbi) += secs;
spin_lock(&sbi->stat_lock);
sbi->user_block_count += shrunk_blocks;
spin_unlock(&sbi->stat_lock);
}
+out_err:
+ mutex_unlock(&sbi->cp_mutex);
+ up_write(&sbi->gc_lock);
+ thaw_super(sbi->sb);
clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
- mutex_unlock(&sbi->resize_mutex);
- thaw_bdev(sbi->sb->s_bdev, sbi->sb);
return err;
}
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index bbac9d3787bd..db3c61046aa4 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/f2fs/gc.h
*
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index 8c4ea5003ef8..de841aaf3c43 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -67,22 +67,9 @@ static void str2hashbuf(const unsigned char *msg, size_t len,
*buf++ = pad;
}
-static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info,
- struct fscrypt_name *fname)
+static u32 TEA_hash_name(const u8 *p, size_t len)
{
- __u32 hash;
- f2fs_hash_t f2fs_hash;
- const unsigned char *p;
__u32 in[8], buf[4];
- const unsigned char *name = name_info->name;
- size_t len = name_info->len;
-
- /* encrypted bigname case */
- if (fname && !fname->disk_name.name)
- return cpu_to_le32(fname->hash);
-
- if (is_dot_dotdot(name_info))
- return 0;
/* Initialize the default seed for the hash checksum functions */
buf[0] = 0x67452301;
@@ -90,7 +77,6 @@ static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info,
buf[2] = 0x98badcfe;
buf[3] = 0x10325476;
- p = name;
while (1) {
str2hashbuf(p, len, in, 4);
TEA_transform(buf, in);
@@ -99,41 +85,43 @@ static f2fs_hash_t __f2fs_dentry_hash(const struct qstr *name_info,
break;
len -= 16;
}
- hash = buf[0];
- f2fs_hash = cpu_to_le32(hash & ~F2FS_HASH_COL_BIT);
- return f2fs_hash;
+ return buf[0] & ~F2FS_HASH_COL_BIT;
}
-f2fs_hash_t f2fs_dentry_hash(const struct inode *dir,
- const struct qstr *name_info, struct fscrypt_name *fname)
+/*
+ * Compute @fname->hash. For all directories, @fname->disk_name must be set.
+ * For casefolded directories, @fname->usr_fname must be set, and also
+ * @fname->cf_name if the filename is valid Unicode.
+ */
+void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname)
{
-#ifdef CONFIG_UNICODE
- struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
- const struct unicode_map *um = sbi->s_encoding;
- int r, dlen;
- unsigned char *buff;
- struct qstr folded;
+ const u8 *name = fname->disk_name.name;
+ size_t len = fname->disk_name.len;
- if (!name_info->len || !IS_CASEFOLDED(dir))
- goto opaque_seq;
+ WARN_ON_ONCE(!name);
- buff = f2fs_kzalloc(sbi, sizeof(char) * PATH_MAX, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- dlen = utf8_casefold(um, name_info, buff, PATH_MAX);
- if (dlen < 0) {
- kvfree(buff);
- goto opaque_seq;
+ if (is_dot_dotdot(name, len)) {
+ fname->hash = 0;
+ return;
}
- folded.name = buff;
- folded.len = dlen;
- r = __f2fs_dentry_hash(&folded, fname);
-
- kvfree(buff);
- return r;
-opaque_seq:
+#ifdef CONFIG_UNICODE
+ if (IS_CASEFOLDED(dir)) {
+ /*
+ * If the casefolded name is provided, hash it instead of the
+ * on-disk name. If the casefolded name is *not* provided, that
+ * should only be because the name wasn't valid Unicode, so fall
+ * back to treating the name as an opaque byte sequence.
+ */
+ WARN_ON_ONCE(!fname->usr_fname->name);
+ if (fname->cf_name.name) {
+ name = fname->cf_name.name;
+ len = fname->cf_name.len;
+ } else {
+ name = fname->usr_fname->name;
+ len = fname->usr_fname->len;
+ }
+ }
#endif
- return __f2fs_dentry_hash(name_info, fname);
+ fname->hash = cpu_to_le32(TEA_hash_name(name, len));
}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 4167e5408151..dbade310dc79 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -8,6 +8,7 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
+#include <linux/fiemap.h>
#include "f2fs.h"
#include "node.h"
@@ -305,15 +306,14 @@ process_inline:
}
struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
- struct fscrypt_name *fname, struct page **res_page)
+ const struct f2fs_filename *fname,
+ struct page **res_page)
{
struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb);
- struct qstr name = FSTR_TO_QSTR(&fname->disk_name);
struct f2fs_dir_entry *de;
struct f2fs_dentry_ptr d;
struct page *ipage;
void *inline_dentry;
- f2fs_hash_t namehash;
ipage = f2fs_get_node_page(sbi, dir->i_ino);
if (IS_ERR(ipage)) {
@@ -321,12 +321,10 @@ struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
return NULL;
}
- namehash = f2fs_dentry_hash(dir, &name, fname);
-
inline_dentry = inline_data_addr(dir, ipage);
make_dentry_ptr_inline(dir, &d, inline_dentry);
- de = f2fs_find_target_dentry(fname, namehash, NULL, &d);
+ de = f2fs_find_target_dentry(&d, fname, NULL);
unlock_page(ipage);
if (de)
*res_page = ipage;
@@ -443,7 +441,7 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
while (bit_pos < d.max) {
struct f2fs_dir_entry *de;
- struct qstr new_name;
+ struct f2fs_filename fname;
nid_t ino;
umode_t fake_mode;
@@ -459,14 +457,19 @@ static int f2fs_add_inline_entries(struct inode *dir, void *inline_dentry)
continue;
}
- new_name.name = d.filename[bit_pos];
- new_name.len = le16_to_cpu(de->name_len);
+ /*
+ * We only need the disk_name and hash to move the dentry.
+ * We don't need the original or casefolded filenames.
+ */
+ memset(&fname, 0, sizeof(fname));
+ fname.disk_name.name = d.filename[bit_pos];
+ fname.disk_name.len = le16_to_cpu(de->name_len);
+ fname.hash = de->hash_code;
ino = le32_to_cpu(de->ino);
fake_mode = f2fs_get_de_type(de) << S_SHIFT;
- err = f2fs_add_regular_entry(dir, &new_name, NULL, NULL,
- ino, fake_mode);
+ err = f2fs_add_regular_entry(dir, &fname, NULL, ino, fake_mode);
if (err)
goto punch_dentry_pages;
@@ -543,7 +546,7 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct page *ipage;
- struct fscrypt_name fname;
+ struct f2fs_filename fname;
void *inline_dentry = NULL;
int err = 0;
@@ -552,19 +555,19 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
f2fs_lock_op(sbi);
- err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname);
+ err = f2fs_setup_filename(dir, &dentry->d_name, 0, &fname);
if (err)
goto out;
ipage = f2fs_get_node_page(sbi, dir->i_ino);
if (IS_ERR(ipage)) {
err = PTR_ERR(ipage);
- goto out;
+ goto out_fname;
}
if (f2fs_has_enough_room(dir, ipage, &fname)) {
f2fs_put_page(ipage, 1);
- goto out;
+ goto out_fname;
}
inline_dentry = inline_data_addr(dir, ipage);
@@ -572,22 +575,22 @@ int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry)
err = do_convert_inline_dir(dir, ipage, inline_dentry);
if (!err)
f2fs_put_page(ipage, 1);
+out_fname:
+ f2fs_free_filename(&fname);
out:
f2fs_unlock_op(sbi);
return err;
}
-int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
- const struct qstr *orig_name,
- struct inode *inode, nid_t ino, umode_t mode)
+int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
+ struct inode *inode, nid_t ino, umode_t mode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
struct page *ipage;
unsigned int bit_pos;
- f2fs_hash_t name_hash;
void *inline_dentry = NULL;
struct f2fs_dentry_ptr d;
- int slots = GET_DENTRY_SLOTS(new_name->len);
+ int slots = GET_DENTRY_SLOTS(fname->disk_name.len);
struct page *page = NULL;
int err = 0;
@@ -609,8 +612,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
- page = f2fs_init_inode_metadata(inode, dir, new_name,
- orig_name, ipage);
+ page = f2fs_init_inode_metadata(inode, dir, fname, ipage);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
@@ -619,8 +621,8 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
f2fs_wait_on_page_writeback(ipage, NODE, true, true);
- name_hash = f2fs_dentry_hash(dir, new_name, NULL);
- f2fs_update_dentry(ino, mode, &d, new_name, name_hash, bit_pos);
+ f2fs_update_dentry(ino, mode, &d, &fname->disk_name, fname->hash,
+ bit_pos);
set_page_dirty(ipage);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index f54119da2217..e94e02c6580a 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -482,7 +482,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
nid_t ino = -1;
int err = 0;
unsigned int root_ino = F2FS_ROOT_INO(F2FS_I_SB(dir));
- struct fscrypt_name fname;
+ struct f2fs_filename fname;
trace_f2fs_lookup_start(dir, dentry, flags);
@@ -491,19 +491,20 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
- err = fscrypt_prepare_lookup(dir, dentry, &fname);
+ err = f2fs_prepare_lookup(dir, dentry, &fname);
if (err == -ENOENT)
goto out_splice;
if (err)
goto out;
de = __f2fs_find_entry(dir, &fname, &page);
- fscrypt_free_filename(&fname);
+ f2fs_free_filename(&fname);
if (!de) {
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto out;
}
+ err = -ENOENT;
goto out_splice;
}
@@ -549,7 +550,7 @@ out_splice:
#endif
new = d_splice_alias(inode, dentry);
err = PTR_ERR_OR_ZERO(new);
- trace_f2fs_lookup_end(dir, dentry, ino, err);
+ trace_f2fs_lookup_end(dir, dentry, ino, !new ? -ENOENT : err);
return new;
out_iput:
iput(inode);
@@ -564,7 +565,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
struct inode *inode = d_inode(dentry);
struct f2fs_dir_entry *de;
struct page *page;
- int err = -ENOENT;
+ int err;
trace_f2fs_unlink_enter(dir, dentry);
@@ -1287,9 +1288,7 @@ const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
.get_link = f2fs_encrypted_get_link,
.getattr = f2fs_getattr,
.setattr = f2fs_setattr,
-#ifdef CONFIG_F2FS_FS_XATTR
.listxattr = f2fs_listxattr,
-#endif
};
const struct inode_operations f2fs_dir_inode_operations = {
@@ -1307,9 +1306,7 @@ const struct inode_operations f2fs_dir_inode_operations = {
.setattr = f2fs_setattr,
.get_acl = f2fs_get_acl,
.set_acl = f2fs_set_acl,
-#ifdef CONFIG_F2FS_FS_XATTR
.listxattr = f2fs_listxattr,
-#endif
.fiemap = f2fs_fiemap,
};
@@ -1317,9 +1314,7 @@ const struct inode_operations f2fs_symlink_inode_operations = {
.get_link = f2fs_get_link,
.getattr = f2fs_getattr,
.setattr = f2fs_setattr,
-#ifdef CONFIG_F2FS_FS_XATTR
.listxattr = f2fs_listxattr,
-#endif
};
const struct inode_operations f2fs_special_inode_operations = {
@@ -1327,7 +1322,5 @@ const struct inode_operations f2fs_special_inode_operations = {
.setattr = f2fs_setattr,
.get_acl = f2fs_get_acl,
.set_acl = f2fs_set_acl,
-#ifdef CONFIG_F2FS_FS_XATTR
.listxattr = f2fs_listxattr,
-#endif
};
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index ecbd6bd14a49..03e24df1c84f 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1300,7 +1300,13 @@ static int read_node_page(struct page *page, int op_flags)
}
fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
- return f2fs_submit_page_bio(&fio);
+
+ err = f2fs_submit_page_bio(&fio);
+
+ if (!err)
+ f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE);
+
+ return err;
}
/*
@@ -1514,8 +1520,15 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
trace_f2fs_writepage(page, NODE);
- if (unlikely(f2fs_cp_error(sbi)))
+ if (unlikely(f2fs_cp_error(sbi))) {
+ if (is_sbi_flag_set(sbi, SBI_IS_CLOSE)) {
+ ClearPageUptodate(page);
+ dec_page_count(sbi, F2FS_DIRTY_NODES);
+ unlock_page(page);
+ return 0;
+ }
goto redirty_out;
+ }
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
@@ -1801,6 +1814,53 @@ static bool flush_dirty_inode(struct page *page)
return true;
}
+int f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
+{
+ pgoff_t index = 0;
+ struct pagevec pvec;
+ int nr_pages;
+ int ret = 0;
+
+ pagevec_init(&pvec);
+
+ while ((nr_pages = pagevec_lookup_tag(&pvec,
+ NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
+ int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pvec.pages[i];
+
+ if (!IS_DNODE(page))
+ continue;
+
+ lock_page(page);
+
+ if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
+continue_unlock:
+ unlock_page(page);
+ continue;
+ }
+
+ if (!PageDirty(page)) {
+ /* someone wrote it for us */
+ goto continue_unlock;
+ }
+
+ /* flush inline_data, if it's async context. */
+ if (is_inline_node(page)) {
+ clear_inline_node(page);
+ unlock_page(page);
+ flush_inline_data(sbi, ino_of_node(page));
+ continue;
+ }
+ unlock_page(page);
+ }
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+ return ret;
+}
+
int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
struct writeback_control *wbc,
bool do_balance, enum iostat_type io_type)
@@ -1864,8 +1924,8 @@ continue_unlock:
goto continue_unlock;
}
- /* flush inline_data */
- if (is_inline_node(page)) {
+ /* flush inline_data, if it's async context. */
+ if (do_balance && is_inline_node(page)) {
clear_inline_node(page);
unlock_page(page);
flush_inline_data(sbi, ino_of_node(page));
@@ -2482,7 +2542,6 @@ void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct free_nid *i, *next;
int nr = nr_shrink;
if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
@@ -2491,17 +2550,23 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
if (!mutex_trylock(&nm_i->build_lock))
return 0;
- spin_lock(&nm_i->nid_list_lock);
- list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
- if (nr_shrink <= 0 ||
- nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
- break;
+ while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
+ struct free_nid *i, *next;
+ unsigned int batch = SHRINK_NID_BATCH_SIZE;
- __remove_free_nid(sbi, i, FREE_NID);
- kmem_cache_free(free_nid_slab, i);
- nr_shrink--;
+ spin_lock(&nm_i->nid_list_lock);
+ list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
+ if (!nr_shrink || !batch ||
+ nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
+ break;
+ __remove_free_nid(sbi, i, FREE_NID);
+ kmem_cache_free(free_nid_slab, i);
+ nr_shrink--;
+ batch--;
+ }
+ spin_unlock(&nm_i->nid_list_lock);
}
- spin_unlock(&nm_i->nid_list_lock);
+
mutex_unlock(&nm_i->build_lock);
return nr - nr_shrink;
@@ -2928,7 +2993,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
return 0;
nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
- nm_i->nat_bits = f2fs_kzalloc(sbi,
+ nm_i->nat_bits = f2fs_kvzalloc(sbi,
nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
if (!nm_i->nat_bits)
return -ENOMEM;
@@ -3061,9 +3126,9 @@ static int init_free_nid_cache(struct f2fs_sb_info *sbi)
int i;
nm_i->free_nid_bitmap =
- f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *),
- nm_i->nat_blocks),
- GFP_KERNEL);
+ f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
+ nm_i->nat_blocks),
+ GFP_KERNEL);
if (!nm_i->free_nid_bitmap)
return -ENOMEM;
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index e05af5df5648..69e5859e993c 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/f2fs/node.h
*
@@ -15,6 +15,9 @@
#define FREE_NID_PAGES 8
#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
+/* size of free nid batch when shrinking */
+#define SHRINK_NID_BATCH_SIZE 8
+
#define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */
/* maximum readahead size for node during getting data blocks */
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index dd804c07eeb0..ae5310f02e7f 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -107,13 +107,51 @@ static void del_fsync_inode(struct fsync_inode_entry *entry, int drop)
kmem_cache_free(fsync_entry_slab, entry);
}
+static int init_recovered_filename(const struct inode *dir,
+ struct f2fs_inode *raw_inode,
+ struct f2fs_filename *fname,
+ struct qstr *usr_fname)
+{
+ int err;
+
+ memset(fname, 0, sizeof(*fname));
+ fname->disk_name.len = le32_to_cpu(raw_inode->i_namelen);
+ fname->disk_name.name = raw_inode->i_name;
+
+ if (WARN_ON(fname->disk_name.len > F2FS_NAME_LEN))
+ return -ENAMETOOLONG;
+
+ if (!IS_ENCRYPTED(dir)) {
+ usr_fname->name = fname->disk_name.name;
+ usr_fname->len = fname->disk_name.len;
+ fname->usr_fname = usr_fname;
+ }
+
+ /* Compute the hash of the filename */
+ if (IS_CASEFOLDED(dir)) {
+ err = f2fs_init_casefolded_name(dir, fname);
+ if (err)
+ return err;
+ f2fs_hash_filename(dir, fname);
+#ifdef CONFIG_UNICODE
+ /* Case-sensitive match is fine for recovery */
+ kfree(fname->cf_name.name);
+ fname->cf_name.name = NULL;
+#endif
+ } else {
+ f2fs_hash_filename(dir, fname);
+ }
+ return 0;
+}
+
static int recover_dentry(struct inode *inode, struct page *ipage,
struct list_head *dir_list)
{
struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
nid_t pino = le32_to_cpu(raw_inode->i_pino);
struct f2fs_dir_entry *de;
- struct fscrypt_name fname;
+ struct f2fs_filename fname;
+ struct qstr usr_fname;
struct page *page;
struct inode *dir, *einode;
struct fsync_inode_entry *entry;
@@ -132,16 +170,9 @@ static int recover_dentry(struct inode *inode, struct page *ipage,
}
dir = entry->inode;
-
- memset(&fname, 0, sizeof(struct fscrypt_name));
- fname.disk_name.len = le32_to_cpu(raw_inode->i_namelen);
- fname.disk_name.name = raw_inode->i_name;
-
- if (unlikely(fname.disk_name.len > F2FS_NAME_LEN)) {
- WARN_ON(1);
- err = -ENAMETOOLONG;
+ err = init_recovered_filename(dir, raw_inode, &fname, &usr_fname);
+ if (err)
goto out;
- }
retry:
de = __f2fs_find_entry(dir, &fname, &page);
if (de && inode->i_ino == le32_to_cpu(de->ino))
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index b7a9421472a7..196f31503511 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -1029,9 +1029,9 @@ static void f2fs_submit_discard_endio(struct bio *bio)
struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
unsigned long flags;
- dc->error = blk_status_to_errno(bio->bi_status);
-
spin_lock_irqsave(&dc->lock, flags);
+ if (!dc->error)
+ dc->error = blk_status_to_errno(bio->bi_status);
dc->bio_ref--;
if (!dc->bio_ref && dc->state == D_SUBMIT) {
dc->state = D_DONE;
@@ -1101,7 +1101,6 @@ static void __init_discard_policy(struct f2fs_sb_info *sbi,
} else if (discard_type == DPOLICY_FSTRIM) {
dpolicy->io_aware = false;
} else if (discard_type == DPOLICY_UMOUNT) {
- dpolicy->max_requests = UINT_MAX;
dpolicy->io_aware = false;
/* we need to issue all to keep CP_TRIMMED_FLAG */
dpolicy->granularity = 1;
@@ -1215,12 +1214,14 @@ submit:
len = total_len;
}
- if (!err && len)
+ if (!err && len) {
+ dcc->undiscard_blks -= len;
__update_discard_tree_range(sbi, bdev, lstart, start, len);
+ }
return err;
}
-static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
+static void __insert_discard_tree(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
block_t start, block_t len,
struct rb_node **insert_p,
@@ -1229,7 +1230,6 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct rb_node **p;
struct rb_node *parent = NULL;
- struct discard_cmd *dc = NULL;
bool leftmost = true;
if (insert_p && insert_parent) {
@@ -1241,12 +1241,8 @@ static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
lstart, &leftmost);
do_insert:
- dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
+ __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
p, leftmost);
- if (!dc)
- return NULL;
-
- return dc;
}
static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
@@ -1463,6 +1459,8 @@ next:
return issued;
}
+static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
+ struct discard_policy *dpolicy);
static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy)
@@ -1471,12 +1469,14 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
struct list_head *pend_list;
struct discard_cmd *dc, *tmp;
struct blk_plug plug;
- int i, issued = 0;
+ int i, issued;
bool io_interrupted = false;
if (dpolicy->timeout)
f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
+retry:
+ issued = 0;
for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
if (dpolicy->timeout &&
f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
@@ -1523,6 +1523,11 @@ next:
break;
}
+ if (dpolicy->type == DPOLICY_UMOUNT && issued) {
+ __wait_all_discard_cmd(sbi, dpolicy);
+ goto retry;
+ }
+
if (!issued && io_interrupted)
issued = -1;
@@ -3102,6 +3107,14 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
type = CURSEG_COLD_DATA;
}
+ /*
+ * We need to wait for node_write to avoid block allocation during
+ * checkpoint. This can only happen to quota writes which can cause
+ * the below discard race condition.
+ */
+ if (IS_DATASEG(type))
+ down_write(&sbi->node_write);
+
down_read(&SM_I(sbi)->curseg_lock);
mutex_lock(&curseg->curseg_mutex);
@@ -3167,6 +3180,9 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
up_read(&SM_I(sbi)->curseg_lock);
+ if (IS_DATASEG(type))
+ up_write(&sbi->node_write);
+
if (put_pin_sem)
up_read(&sbi->pin_sem);
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 7a83bd530812..cba16cca5189 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/f2fs/segment.h
*
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 8a9955902d84..20e56b0fa46a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -285,6 +285,22 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
F2FS_OPTION(sbi).s_resgid));
}
+static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
+{
+ if (!F2FS_OPTION(sbi).unusable_cap_perc)
+ return;
+
+ if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
+ F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
+ else
+ F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
+ F2FS_OPTION(sbi).unusable_cap_perc;
+
+ f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
+ F2FS_OPTION(sbi).unusable_cap,
+ F2FS_OPTION(sbi).unusable_cap_perc);
+}
+
static void init_once(void *foo)
{
struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
@@ -471,11 +487,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
if (!name)
return -ENOMEM;
- if (strlen(name) == 2 && !strncmp(name, "on", 2)) {
+ if (!strcmp(name, "on")) {
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
- } else if (strlen(name) == 3 && !strncmp(name, "off", 3)) {
+ } else if (!strcmp(name, "off")) {
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
- } else if (strlen(name) == 4 && !strncmp(name, "sync", 4)) {
+ } else if (!strcmp(name, "sync")) {
F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
} else {
kvfree(name);
@@ -635,16 +651,14 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
if (!name)
return -ENOMEM;
- if (strlen(name) == 8 &&
- !strncmp(name, "adaptive", 8)) {
+ if (!strcmp(name, "adaptive")) {
if (f2fs_sb_has_blkzoned(sbi)) {
f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
kvfree(name);
return -EINVAL;
}
F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
- } else if (strlen(name) == 3 &&
- !strncmp(name, "lfs", 3)) {
+ } else if (!strcmp(name, "lfs")) {
F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
} else {
kvfree(name);
@@ -769,14 +783,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
name = match_strdup(&args[0]);
if (!name)
return -ENOMEM;
- if (strlen(name) == 10 &&
- !strncmp(name, "user-based", 10)) {
+ if (!strcmp(name, "user-based")) {
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
- } else if (strlen(name) == 3 &&
- !strncmp(name, "off", 3)) {
+ } else if (!strcmp(name, "off")) {
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
- } else if (strlen(name) == 8 &&
- !strncmp(name, "fs-based", 8)) {
+ } else if (!strcmp(name, "fs-based")) {
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
} else {
kvfree(name);
@@ -789,11 +800,9 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
if (!name)
return -ENOMEM;
- if (strlen(name) == 7 &&
- !strncmp(name, "default", 7)) {
+ if (!strcmp(name, "default")) {
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
- } else if (strlen(name) == 5 &&
- !strncmp(name, "reuse", 5)) {
+ } else if (!strcmp(name, "reuse")) {
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
} else {
kvfree(name);
@@ -805,14 +814,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
name = match_strdup(&args[0]);
if (!name)
return -ENOMEM;
- if (strlen(name) == 5 &&
- !strncmp(name, "posix", 5)) {
+ if (!strcmp(name, "posix")) {
F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
- } else if (strlen(name) == 6 &&
- !strncmp(name, "strict", 6)) {
+ } else if (!strcmp(name, "strict")) {
F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
- } else if (strlen(name) == 9 &&
- !strncmp(name, "nobarrier", 9)) {
+ } else if (!strcmp(name, "nobarrier")) {
F2FS_OPTION(sbi).fsync_mode =
FSYNC_MODE_NOBARRIER;
} else {
@@ -832,12 +838,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
return -EINVAL;
if (arg < 0 || arg > 100)
return -EINVAL;
- if (arg == 100)
- F2FS_OPTION(sbi).unusable_cap =
- sbi->user_block_count;
- else
- F2FS_OPTION(sbi).unusable_cap =
- (sbi->user_block_count / 100) * arg;
+ F2FS_OPTION(sbi).unusable_cap_perc = arg;
set_opt(sbi, DISABLE_CHECKPOINT);
break;
case Opt_checkpoint_disable_cap:
@@ -860,17 +861,18 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
name = match_strdup(&args[0]);
if (!name)
return -ENOMEM;
- if (strlen(name) == 3 && !strcmp(name, "lzo")) {
+ if (!strcmp(name, "lzo")) {
F2FS_OPTION(sbi).compress_algorithm =
COMPRESS_LZO;
- } else if (strlen(name) == 3 &&
- !strcmp(name, "lz4")) {
+ } else if (!strcmp(name, "lz4")) {
F2FS_OPTION(sbi).compress_algorithm =
COMPRESS_LZ4;
- } else if (strlen(name) == 4 &&
- !strcmp(name, "zstd")) {
+ } else if (!strcmp(name, "zstd")) {
F2FS_OPTION(sbi).compress_algorithm =
COMPRESS_ZSTD;
+ } else if (!strcmp(name, "lzo-rle")) {
+ F2FS_OPTION(sbi).compress_algorithm =
+ COMPRESS_LZORLE;
} else {
kfree(name);
return -EINVAL;
@@ -1330,7 +1332,8 @@ static int f2fs_statfs_project(struct super_block *sb,
limit >>= sb->s_blocksize_bits;
if (limit && buf->f_blocks > limit) {
- curblock = dquot->dq_dqb.dqb_curspace >> sb->s_blocksize_bits;
+ curblock = (dquot->dq_dqb.dqb_curspace +
+ dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
buf->f_blocks = limit;
buf->f_bfree = buf->f_bavail =
(buf->f_blocks > curblock) ?
@@ -1465,6 +1468,9 @@ static inline void f2fs_show_compress_options(struct seq_file *seq,
case COMPRESS_ZSTD:
algtype = "zstd";
break;
+ case COMPRESS_LZORLE:
+ algtype = "lzo-rle";
+ break;
}
seq_printf(seq, ",compress_algorithm=%s", algtype);
@@ -1880,6 +1886,7 @@ skip:
(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
limit_reserve_root(sbi);
+ adjust_unusable_cap_perc(sbi);
*flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
return 0;
restore_gc:
@@ -3062,7 +3069,7 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
FDEV(devi).nr_blkz++;
- FDEV(devi).blkz_seq = f2fs_kzalloc(sbi,
+ FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
BITS_TO_LONGS(FDEV(devi).nr_blkz)
* sizeof(unsigned long),
GFP_KERNEL);
@@ -3449,7 +3456,6 @@ try_onemore:
init_rwsem(&sbi->gc_lock);
mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex);
- mutex_init(&sbi->resize_mutex);
init_rwsem(&sbi->node_write);
init_rwsem(&sbi->node_change);
@@ -3460,6 +3466,7 @@ try_onemore:
/* init iostat info */
spin_lock_init(&sbi->iostat_lock);
sbi->iostat_enable = false;
+ sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS;
for (i = 0; i < NR_PAGE_TYPE; i++) {
int n = (i == META) ? 1: NR_TEMP_TYPE;
@@ -3557,6 +3564,7 @@ try_onemore:
sbi->reserved_blocks = 0;
sbi->current_reserved_blocks = 0;
limit_reserve_root(sbi);
+ adjust_unusable_cap_perc(sbi);
for (i = 0; i < NR_INODE_TYPE; i++) {
INIT_LIST_HEAD(&sbi->inode_list[i]);
@@ -3927,7 +3935,12 @@ static int __init init_f2fs_fs(void)
err = f2fs_init_bioset();
if (err)
goto free_bio_enrty_cache;
+ err = f2fs_init_compress_mempool();
+ if (err)
+ goto free_bioset;
return 0;
+free_bioset:
+ f2fs_destroy_bioset();
free_bio_enrty_cache:
f2fs_destroy_bio_entry_cache();
free_post_read:
@@ -3955,6 +3968,7 @@ fail:
static void __exit exit_f2fs_fs(void)
{
+ f2fs_destroy_compress_mempool();
f2fs_destroy_bioset();
f2fs_destroy_bio_entry_cache();
f2fs_destroy_post_read_processing();
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 3162f46b3c9b..e877c59b9fdb 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -15,6 +15,7 @@
#include "f2fs.h"
#include "segment.h"
#include "gc.h"
+#include <trace/events/f2fs.h>
static struct proc_dir_entry *f2fs_proc_root;
@@ -372,7 +373,6 @@ out:
return count;
}
-
if (!strcmp(a->attr.name, "iostat_enable")) {
sbi->iostat_enable = !!t;
if (!sbi->iostat_enable)
@@ -380,6 +380,15 @@ out:
return count;
}
+ if (!strcmp(a->attr.name, "iostat_period_ms")) {
+ if (t < MIN_IOSTAT_PERIOD_MS || t > MAX_IOSTAT_PERIOD_MS)
+ return -EINVAL;
+ spin_lock(&sbi->iostat_lock);
+ sbi->iostat_period_ms = (unsigned int)t;
+ spin_unlock(&sbi->iostat_lock);
+ return count;
+ }
+
*ui = (unsigned int)t;
return count;
@@ -538,6 +547,7 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle_interval, interval_time[GC_TIME]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info,
umount_discard_timeout, interval_time[UMOUNT_DISCARD_TIMEOUT]);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_enable, iostat_enable);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, iostat_period_ms, iostat_period_ms);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, readdir_ra, readdir_ra);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_pin_file_thresh, gc_pin_file_threshold);
F2FS_RW_ATTR(F2FS_SBI, f2fs_super_block, extension_list, extension_list);
@@ -545,6 +555,8 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_super_block, extension_list, extension_list);
F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate);
F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
#endif
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, data_io_flag, data_io_flag);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, node_io_flag, node_io_flag);
F2FS_GENERAL_RO_ATTR(dirty_segments);
F2FS_GENERAL_RO_ATTR(free_segments);
F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
@@ -618,6 +630,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(gc_idle_interval),
ATTR_LIST(umount_discard_timeout),
ATTR_LIST(iostat_enable),
+ ATTR_LIST(iostat_period_ms),
ATTR_LIST(readdir_ra),
ATTR_LIST(gc_pin_file_thresh),
ATTR_LIST(extension_list),
@@ -625,6 +638,8 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(inject_rate),
ATTR_LIST(inject_type),
#endif
+ ATTR_LIST(data_io_flag),
+ ATTR_LIST(node_io_flag),
ATTR_LIST(dirty_segments),
ATTR_LIST(free_segments),
ATTR_LIST(unusable),
@@ -754,6 +769,33 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
return 0;
}
+void f2fs_record_iostat(struct f2fs_sb_info *sbi)
+{
+ unsigned long long iostat_diff[NR_IO_TYPE];
+ int i;
+
+ if (time_is_after_jiffies(sbi->iostat_next_period))
+ return;
+
+ /* Need double check under the lock */
+ spin_lock(&sbi->iostat_lock);
+ if (time_is_after_jiffies(sbi->iostat_next_period)) {
+ spin_unlock(&sbi->iostat_lock);
+ return;
+ }
+ sbi->iostat_next_period = jiffies +
+ msecs_to_jiffies(sbi->iostat_period_ms);
+
+ for (i = 0; i < NR_IO_TYPE; i++) {
+ iostat_diff[i] = sbi->rw_iostat[i] -
+ sbi->prev_rw_iostat[i];
+ sbi->prev_rw_iostat[i] = sbi->rw_iostat[i];
+ }
+ spin_unlock(&sbi->iostat_lock);
+
+ trace_f2fs_iostat(sbi, iostat_diff);
+}
+
static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
void *offset)
{
@@ -766,33 +808,58 @@ static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
seq_printf(seq, "time: %-16llu\n", now);
- /* print app IOs */
+ /* print app write IOs */
+ seq_puts(seq, "[WRITE]\n");
seq_printf(seq, "app buffered: %-16llu\n",
- sbi->write_iostat[APP_BUFFERED_IO]);
+ sbi->rw_iostat[APP_BUFFERED_IO]);
seq_printf(seq, "app direct: %-16llu\n",
- sbi->write_iostat[APP_DIRECT_IO]);
+ sbi->rw_iostat[APP_DIRECT_IO]);
seq_printf(seq, "app mapped: %-16llu\n",
- sbi->write_iostat[APP_MAPPED_IO]);
+ sbi->rw_iostat[APP_MAPPED_IO]);
- /* print fs IOs */
+ /* print fs write IOs */
seq_printf(seq, "fs data: %-16llu\n",
- sbi->write_iostat[FS_DATA_IO]);
+ sbi->rw_iostat[FS_DATA_IO]);
seq_printf(seq, "fs node: %-16llu\n",
- sbi->write_iostat[FS_NODE_IO]);
+ sbi->rw_iostat[FS_NODE_IO]);
seq_printf(seq, "fs meta: %-16llu\n",
- sbi->write_iostat[FS_META_IO]);
+ sbi->rw_iostat[FS_META_IO]);
seq_printf(seq, "fs gc data: %-16llu\n",
- sbi->write_iostat[FS_GC_DATA_IO]);
+ sbi->rw_iostat[FS_GC_DATA_IO]);
seq_printf(seq, "fs gc node: %-16llu\n",
- sbi->write_iostat[FS_GC_NODE_IO]);
+ sbi->rw_iostat[FS_GC_NODE_IO]);
seq_printf(seq, "fs cp data: %-16llu\n",
- sbi->write_iostat[FS_CP_DATA_IO]);
+ sbi->rw_iostat[FS_CP_DATA_IO]);
seq_printf(seq, "fs cp node: %-16llu\n",
- sbi->write_iostat[FS_CP_NODE_IO]);
+ sbi->rw_iostat[FS_CP_NODE_IO]);
seq_printf(seq, "fs cp meta: %-16llu\n",
- sbi->write_iostat[FS_CP_META_IO]);
+ sbi->rw_iostat[FS_CP_META_IO]);
+
+ /* print app read IOs */
+ seq_puts(seq, "[READ]\n");
+ seq_printf(seq, "app buffered: %-16llu\n",
+ sbi->rw_iostat[APP_BUFFERED_READ_IO]);
+ seq_printf(seq, "app direct: %-16llu\n",
+ sbi->rw_iostat[APP_DIRECT_READ_IO]);
+ seq_printf(seq, "app mapped: %-16llu\n",
+ sbi->rw_iostat[APP_MAPPED_READ_IO]);
+
+ /* print fs read IOs */
+ seq_printf(seq, "fs data: %-16llu\n",
+ sbi->rw_iostat[FS_DATA_READ_IO]);
+ seq_printf(seq, "fs gc data: %-16llu\n",
+ sbi->rw_iostat[FS_GDATA_READ_IO]);
+ seq_printf(seq, "fs compr_data: %-16llu\n",
+ sbi->rw_iostat[FS_CDATA_READ_IO]);
+ seq_printf(seq, "fs node: %-16llu\n",
+ sbi->rw_iostat[FS_NODE_READ_IO]);
+ seq_printf(seq, "fs meta: %-16llu\n",
+ sbi->rw_iostat[FS_META_READ_IO]);
+
+ /* print other IOs */
+ seq_puts(seq, "[OTHER]\n");
seq_printf(seq, "fs discard: %-16llu\n",
- sbi->write_iostat[FS_DISCARD]);
+ sbi->rw_iostat[FS_DISCARD]);
return 0;
}
diff --git a/fs/f2fs/trace.h b/fs/f2fs/trace.h
index e8075fc5b228..789f6aa727fc 100644
--- a/fs/f2fs/trace.h
+++ b/fs/f2fs/trace.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* f2fs IO tracer
*
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index 938fcd20565d..416d652774a3 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* fs/f2fs/xattr.h
*
@@ -136,6 +136,7 @@ extern void f2fs_destroy_xattr_caches(struct f2fs_sb_info *);
#else
#define f2fs_xattr_handlers NULL
+#define f2fs_listxattr NULL
static inline int f2fs_setxattr(struct inode *inode, int index,
const char *name, const void *value, size_t size,
struct page *page, int flags)
@@ -148,11 +149,6 @@ static inline int f2fs_getxattr(struct inode *inode, int index,
{
return -EOPNOTSUPP;
}
-static inline ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer,
- size_t buffer_size)
-{
- return -EOPNOTSUPP;
-}
static inline int f2fs_init_xattr_caches(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_xattr_caches(struct f2fs_sb_info *sbi) { }
#endif
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 3647c65a0f48..bbfe18c07417 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -632,20 +632,80 @@ error:
}
EXPORT_SYMBOL_GPL(fat_free_clusters);
-/* 128kb is the whole sectors for FAT12 and FAT16 */
-#define FAT_READA_SIZE (128 * 1024)
+struct fatent_ra {
+ sector_t cur;
+ sector_t limit;
+
+ unsigned int ra_blocks;
+ sector_t ra_advance;
+ sector_t ra_next;
+ sector_t ra_limit;
+};
-static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent,
- unsigned long reada_blocks)
+static void fat_ra_init(struct super_block *sb, struct fatent_ra *ra,
+ struct fat_entry *fatent, int ent_limit)
{
- const struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
- sector_t blocknr;
- int i, offset;
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ const struct fatent_operations *ops = sbi->fatent_ops;
+ sector_t blocknr, block_end;
+ int offset;
+ /*
+ * This is the sequential read, so ra_pages * 2 (but try to
+ * align the optimal hardware IO size).
+ * [BTW, 128kb covers the whole sectors for FAT12 and FAT16]
+ */
+ unsigned long ra_pages = sb->s_bdi->ra_pages;
+ unsigned int reada_blocks;
+ if (ra_pages > sb->s_bdi->io_pages)
+ ra_pages = rounddown(ra_pages, sb->s_bdi->io_pages);
+ reada_blocks = ra_pages << (PAGE_SHIFT - sb->s_blocksize_bits + 1);
+
+ /* Initialize the range for sequential read */
ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
+ ops->ent_blocknr(sb, ent_limit - 1, &offset, &block_end);
+ ra->cur = 0;
+ ra->limit = (block_end + 1) - blocknr;
- for (i = 0; i < reada_blocks; i++)
- sb_breadahead(sb, blocknr + i);
+ /* Advancing the window at half size */
+ ra->ra_blocks = reada_blocks >> 1;
+ ra->ra_advance = ra->cur;
+ ra->ra_next = ra->cur;
+ ra->ra_limit = ra->cur + min_t(sector_t, reada_blocks, ra->limit);
+}
+
+/* Assuming to be called before reading a new block (increments ->cur). */
+static void fat_ent_reada(struct super_block *sb, struct fatent_ra *ra,
+ struct fat_entry *fatent)
+{
+ if (ra->ra_next >= ra->ra_limit)
+ return;
+
+ if (ra->cur >= ra->ra_advance) {
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ const struct fatent_operations *ops = sbi->fatent_ops;
+ struct blk_plug plug;
+ sector_t blocknr, diff;
+ int offset;
+
+ ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
+
+ diff = blocknr - ra->cur;
+ blk_start_plug(&plug);
+ /*
+ * FIXME: we would want to directly use the bio with
+ * pages to reduce the number of segments.
+ */
+ for (; ra->ra_next < ra->ra_limit; ra->ra_next++)
+ sb_breadahead(sb, ra->ra_next + diff);
+ blk_finish_plug(&plug);
+
+ /* Advance the readahead window */
+ ra->ra_advance += ra->ra_blocks;
+ ra->ra_limit += min_t(sector_t,
+ ra->ra_blocks, ra->limit - ra->ra_limit);
+ }
+ ra->cur++;
}
int fat_count_free_clusters(struct super_block *sb)
@@ -653,27 +713,20 @@ int fat_count_free_clusters(struct super_block *sb)
struct msdos_sb_info *sbi = MSDOS_SB(sb);
const struct fatent_operations *ops = sbi->fatent_ops;
struct fat_entry fatent;
- unsigned long reada_blocks, reada_mask, cur_block;
+ struct fatent_ra fatent_ra;
int err = 0, free;
lock_fat(sbi);
if (sbi->free_clusters != -1 && sbi->free_clus_valid)
goto out;
- reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits;
- reada_mask = reada_blocks - 1;
- cur_block = 0;
-
free = 0;
fatent_init(&fatent);
fatent_set_entry(&fatent, FAT_START_ENT);
+ fat_ra_init(sb, &fatent_ra, &fatent, sbi->max_cluster);
while (fatent.entry < sbi->max_cluster) {
/* readahead of fat blocks */
- if ((cur_block & reada_mask) == 0) {
- unsigned long rest = sbi->fat_length - cur_block;
- fat_ent_reada(sb, &fatent, min(reada_blocks, rest));
- }
- cur_block++;
+ fat_ent_reada(sb, &fatent_ra, &fatent);
err = fat_ent_read_block(sb, &fatent);
if (err)
@@ -707,9 +760,9 @@ int fat_trim_fs(struct inode *inode, struct fstrim_range *range)
struct msdos_sb_info *sbi = MSDOS_SB(sb);
const struct fatent_operations *ops = sbi->fatent_ops;
struct fat_entry fatent;
+ struct fatent_ra fatent_ra;
u64 ent_start, ent_end, minlen, trimmed = 0;
u32 free = 0;
- unsigned long reada_blocks, reada_mask, cur_block = 0;
int err = 0;
/*
@@ -727,19 +780,13 @@ int fat_trim_fs(struct inode *inode, struct fstrim_range *range)
if (ent_end >= sbi->max_cluster)
ent_end = sbi->max_cluster - 1;
- reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits;
- reada_mask = reada_blocks - 1;
-
fatent_init(&fatent);
lock_fat(sbi);
fatent_set_entry(&fatent, ent_start);
+ fat_ra_init(sb, &fatent_ra, &fatent, ent_end + 1);
while (fatent.entry <= ent_end) {
/* readahead of fat blocks */
- if ((cur_block & reada_mask) == 0) {
- unsigned long rest = sbi->fat_length - cur_block;
- fat_ent_reada(sb, &fatent, min(reada_blocks, rest));
- }
- cur_block++;
+ fat_ent_reada(sb, &fatent_ra, &fatent);
err = fat_ent_read_block(sb, &fatent);
if (err)
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index e6e68b2274a5..a0cf99debb1e 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1519,6 +1519,12 @@ static int fat_read_bpb(struct super_block *sb, struct fat_boot_sector *b,
goto out;
}
+ if (bpb->fat_fat_length == 0 && bpb->fat32_length == 0) {
+ if (!silent)
+ fat_msg(sb, KERN_ERR, "bogus number of FAT sectors");
+ goto out;
+ }
+
error = 0;
out:
diff --git a/fs/file_table.c b/fs/file_table.c
index 676e620948d2..656647f9575a 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -80,14 +80,14 @@ EXPORT_SYMBOL_GPL(get_max_files);
*/
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
int proc_nr_files(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
files_stat.nr_files = get_nr_files();
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
#else
int proc_nr_files(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
return -ENOSYS;
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index a750381d554a..a605c3dddabc 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1125,6 +1125,7 @@ void inode_io_list_del(struct inode *inode)
inode_io_list_del_locked(inode, wb);
spin_unlock(&wb->list_lock);
}
+EXPORT_SYMBOL(inode_io_list_del);
/*
* mark an inode as under writeback on the sb
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index 59c2494efda3..c1e6cc9091aa 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -51,8 +51,7 @@ static unsigned fscache_op_max_active = 2;
static struct ctl_table_header *fscache_sysctl_header;
static int fscache_max_active_sysctl(struct ctl_table *table, int write,
- void __user *buffer,
- size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
struct workqueue_struct **wqp = table->extra1;
unsigned int *datap = table->data;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index c7a65cf2bcca..02b3c36b3676 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -342,7 +342,7 @@ static int queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
list_add_tail(&req->intr_entry, &fiq->interrupts);
/*
* Pairs with smp_mb() implied by test_and_set_bit()
- * from request_end().
+ * from fuse_request_end().
*/
smp_mb();
if (test_bit(FR_FINISHED, &req->flags)) {
@@ -764,16 +764,15 @@ static int fuse_check_page(struct page *page)
{
if (page_mapcount(page) ||
page->mapping != NULL ||
- page_count(page) != 1 ||
(page->flags & PAGE_FLAGS_CHECK_AT_PREP &
~(1 << PG_locked |
1 << PG_referenced |
1 << PG_uptodate |
1 << PG_lru |
1 << PG_active |
- 1 << PG_reclaim))) {
- pr_warn("trying to steal weird page\n");
- pr_warn(" page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
+ 1 << PG_reclaim |
+ 1 << PG_waiters))) {
+ dump_page(page, "fuse: trying to steal weird page");
return 1;
}
return 0;
@@ -805,7 +804,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
if (cs->len != PAGE_SIZE)
goto out_fallback;
- if (pipe_buf_steal(cs->pipe, buf) != 0)
+ if (!pipe_buf_try_steal(cs->pipe, buf))
goto out_fallback;
newpage = buf->page;
@@ -840,7 +839,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
get_page(newpage);
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
- lru_cache_add_file(newpage);
+ lru_cache_add(newpage);
err = 0;
spin_lock(&cs->req->waitq.lock);
@@ -1977,8 +1976,9 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
struct pipe_buffer *ibuf;
struct pipe_buffer *obuf;
- BUG_ON(nbuf >= pipe->ring_size);
- BUG_ON(tail == head);
+ if (WARN_ON(nbuf >= count || tail == head))
+ goto out_free;
+
ibuf = &pipe->bufs[tail & mask];
obuf = &bufs[nbuf];
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index de1e2fde60bd..26f028bc760b 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1689,8 +1689,18 @@ static int fuse_getattr(const struct path *path, struct kstat *stat,
struct inode *inode = d_inode(path->dentry);
struct fuse_conn *fc = get_fuse_conn(inode);
- if (!fuse_allow_current_process(fc))
+ if (!fuse_allow_current_process(fc)) {
+ if (!request_mask) {
+ /*
+ * If user explicitly requested *nothing* then don't
+ * error out, but return st_dev only.
+ */
+ stat->result_mask = 0;
+ stat->dev = inode->i_sb->s_dev;
+ return 0;
+ }
return -EACCES;
+ }
return fuse_update_get_attr(inode, NULL, stat, request_mask, flags);
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index bac51c32d660..e573b0cd2737 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -357,7 +357,7 @@ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
struct fuse_writepage_args {
struct fuse_io_args ia;
- struct list_head writepages_entry;
+ struct rb_node writepages_entry;
struct list_head queue_entry;
struct fuse_writepage_args *next;
struct inode *inode;
@@ -366,17 +366,23 @@ struct fuse_writepage_args {
static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
pgoff_t idx_from, pgoff_t idx_to)
{
- struct fuse_writepage_args *wpa;
+ struct rb_node *n;
+
+ n = fi->writepages.rb_node;
- list_for_each_entry(wpa, &fi->writepages, writepages_entry) {
+ while (n) {
+ struct fuse_writepage_args *wpa;
pgoff_t curr_index;
+ wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry);
WARN_ON(get_fuse_inode(wpa->inode) != fi);
curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT;
- if (idx_from < curr_index + wpa->ia.ap.num_pages &&
- curr_index <= idx_to) {
+ if (idx_from >= curr_index + wpa->ia.ap.num_pages)
+ n = n->rb_right;
+ else if (idx_to < curr_index)
+ n = n->rb_left;
+ else
return wpa;
- }
}
return NULL;
}
@@ -445,9 +451,6 @@ static int fuse_flush(struct file *file, fl_owner_t id)
if (is_bad_inode(inode))
return -EIO;
- if (fc->no_flush)
- return 0;
-
err = write_inode_now(inode, 1);
if (err)
return err;
@@ -460,6 +463,10 @@ static int fuse_flush(struct file *file, fl_owner_t id)
if (err)
return err;
+ err = 0;
+ if (fc->no_flush)
+ goto inval_attr_out;
+
memset(&inarg, 0, sizeof(inarg));
inarg.fh = ff->fh;
inarg.lock_owner = fuse_lock_owner_id(fc, id);
@@ -475,6 +482,14 @@ static int fuse_flush(struct file *file, fl_owner_t id)
fc->no_flush = 1;
err = 0;
}
+
+inval_attr_out:
+ /*
+ * In memory i_blocks is not maintained by fuse, if writeback cache is
+ * enabled, i_blocks from cached attr may not be accurate.
+ */
+ if (!err && fc->writeback_cache)
+ fuse_invalidate_attr(inode);
return err;
}
@@ -712,6 +727,7 @@ static ssize_t fuse_async_req_send(struct fuse_conn *fc,
spin_unlock(&io->lock);
ia->ap.args.end = fuse_aio_complete_req;
+ ia->ap.args.may_block = io->should_dirty;
err = fuse_simple_background(fc, &ia->ap.args, GFP_KERNEL);
if (err)
fuse_aio_complete_req(fc, &ia->ap.args, err);
@@ -1570,7 +1586,7 @@ static void fuse_writepage_finish(struct fuse_conn *fc,
struct backing_dev_info *bdi = inode_to_bdi(inode);
int i;
- list_del(&wpa->writepages_entry);
+ rb_erase(&wpa->writepages_entry, &fi->writepages);
for (i = 0; i < ap->num_pages; i++) {
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
@@ -1658,6 +1674,36 @@ __acquires(fi->lock)
}
}
+static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
+{
+ pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT;
+ pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+
+ WARN_ON(!wpa->ia.ap.num_pages);
+ while (*p) {
+ struct fuse_writepage_args *curr;
+ pgoff_t curr_index;
+
+ parent = *p;
+ curr = rb_entry(parent, struct fuse_writepage_args,
+ writepages_entry);
+ WARN_ON(curr->inode != wpa->inode);
+ curr_index = curr->ia.write.in.offset >> PAGE_SHIFT;
+
+ if (idx_from >= curr_index + curr->ia.ap.num_pages)
+ p = &(*p)->rb_right;
+ else if (idx_to < curr_index)
+ p = &(*p)->rb_left;
+ else
+ return (void) WARN_ON(true);
+ }
+
+ rb_link_node(&wpa->writepages_entry, parent, p);
+ rb_insert_color(&wpa->writepages_entry, root);
+}
+
static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args,
int error)
{
@@ -1676,7 +1722,7 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_args *args,
wpa->next = next->next;
next->next = NULL;
next->ia.ff = fuse_file_get(wpa->ia.ff);
- list_add(&next->writepages_entry, &fi->writepages);
+ tree_insert(&fi->writepages, next);
/*
* Skip fuse_flush_writepages() to make it easy to crop requests
@@ -1811,7 +1857,7 @@ static int fuse_writepage_locked(struct page *page)
inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
spin_lock(&fi->lock);
- list_add(&wpa->writepages_entry, &fi->writepages);
+ tree_insert(&fi->writepages, wpa);
list_add_tail(&wpa->queue_entry, &fi->queued_writes);
fuse_flush_writepages(inode);
spin_unlock(&fi->lock);
@@ -1923,10 +1969,10 @@ static bool fuse_writepage_in_flight(struct fuse_writepage_args *new_wpa,
WARN_ON(new_ap->num_pages != 0);
spin_lock(&fi->lock);
- list_del(&new_wpa->writepages_entry);
+ rb_erase(&new_wpa->writepages_entry, &fi->writepages);
old_wpa = fuse_find_writeback(fi, page->index, page->index);
if (!old_wpa) {
- list_add(&new_wpa->writepages_entry, &fi->writepages);
+ tree_insert(&fi->writepages, new_wpa);
spin_unlock(&fi->lock);
return false;
}
@@ -2041,7 +2087,7 @@ static int fuse_writepages_fill(struct page *page,
wpa->inode = inode;
spin_lock(&fi->lock);
- list_add(&wpa->writepages_entry, &fi->writepages);
+ tree_insert(&fi->writepages, wpa);
spin_unlock(&fi->lock);
data->wpa = wpa;
@@ -3235,13 +3281,11 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
return -EXDEV;
- if (fc->writeback_cache) {
- inode_lock(inode_in);
- err = fuse_writeback_range(inode_in, pos_in, pos_in + len);
- inode_unlock(inode_in);
- if (err)
- return err;
- }
+ inode_lock(inode_in);
+ err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1);
+ inode_unlock(inode_in);
+ if (err)
+ return err;
inode_lock(inode_out);
@@ -3249,11 +3293,27 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
if (err)
goto out;
- if (fc->writeback_cache) {
- err = fuse_writeback_range(inode_out, pos_out, pos_out + len);
- if (err)
- goto out;
- }
+ /*
+ * Write out dirty pages in the destination file before sending the COPY
+ * request to userspace. After the request is completed, truncate off
+ * pages (including partial ones) from the cache that have been copied,
+ * since these contain stale data at that point.
+ *
+ * This should be mostly correct, but if the COPY writes to partial
+ * pages (at the start or end) and the parts not covered by the COPY are
+ * written through a memory map after calling fuse_writeback_range(),
+ * then these partial page modifications will be lost on truncation.
+ *
+ * It is unlikely that someone would rely on such mixed style
+ * modifications. Yet this does give less guarantees than if the
+ * copying was performed with write(2).
+ *
+ * To fix this a i_mmap_sem style lock could be used to prevent new
+ * faults while the copy is ongoing.
+ */
+ err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
+ if (err)
+ goto out;
if (is_unstable)
set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
@@ -3274,6 +3334,10 @@ static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
if (err)
goto out;
+ truncate_inode_pages_range(inode_out->i_mapping,
+ ALIGN_DOWN(pos_out, PAGE_SIZE),
+ ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1);
+
if (fc->writeback_cache) {
fuse_write_update_size(inode_out, pos_out + outarg.size);
file_update_time(file_out);
@@ -3351,5 +3415,5 @@ void fuse_init_file_inode(struct inode *inode)
INIT_LIST_HEAD(&fi->queued_writes);
fi->writectr = 0;
init_waitqueue_head(&fi->page_waitq);
- INIT_LIST_HEAD(&fi->writepages);
+ fi->writepages = RB_ROOT;
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index ca344bf71404..740a8a7d7ae6 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -111,7 +111,7 @@ struct fuse_inode {
wait_queue_head_t page_waitq;
/* List of writepage requestst (pending or sent) */
- struct list_head writepages;
+ struct rb_root writepages;
};
/* readdir cache (directory only) */
@@ -249,6 +249,7 @@ struct fuse_args {
bool out_argvar:1;
bool page_zeroing:1;
bool page_replace:1;
+ bool may_block:1;
struct fuse_in_arg in_args[3];
struct fuse_arg out_args[2];
void (*end)(struct fuse_conn *fc, struct fuse_args *args, int error);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 95d712d44ca1..5b4aebf5821f 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -321,6 +321,8 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
loff_t offset, loff_t len)
{
+ struct fuse_conn *fc = get_fuse_conn_super(sb);
+ struct fuse_inode *fi;
struct inode *inode;
pgoff_t pg_start;
pgoff_t pg_end;
@@ -329,6 +331,11 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid,
if (!inode)
return -ENOENT;
+ fi = get_fuse_inode(inode);
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
+ spin_unlock(&fi->lock);
+
fuse_invalidate_attr(inode);
forget_all_cached_acls(inode);
if (offset >= 0) {
@@ -1113,7 +1120,7 @@ EXPORT_SYMBOL_GPL(fuse_dev_free);
int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
{
- struct fuse_dev *fud;
+ struct fuse_dev *fud = NULL;
struct fuse_conn *fc = get_fuse_conn_super(sb);
struct inode *root;
struct dentry *root_dentry;
@@ -1155,9 +1162,12 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
if (sb->s_user_ns != &init_user_ns)
sb->s_xattr = fuse_no_acl_xattr_handlers;
- fud = fuse_dev_alloc_install(fc);
- if (!fud)
- goto err;
+ if (ctx->fudptr) {
+ err = -ENOMEM;
+ fud = fuse_dev_alloc_install(fc);
+ if (!fud)
+ goto err;
+ }
fc->dev = sb->s_dev;
fc->sb = sb;
@@ -1191,7 +1201,7 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
mutex_lock(&fuse_mutex);
err = -EINVAL;
- if (*ctx->fudptr)
+ if (ctx->fudptr && *ctx->fudptr)
goto err_unlock;
err = fuse_ctl_add_conn(fc);
@@ -1200,7 +1210,8 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
list_add_tail(&fc->entry, &fuse_conn_list);
sb->s_root = root_dentry;
- *ctx->fudptr = fud;
+ if (ctx->fudptr)
+ *ctx->fudptr = fud;
mutex_unlock(&fuse_mutex);
return 0;
@@ -1208,7 +1219,8 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
mutex_unlock(&fuse_mutex);
dput(root_dentry);
err_dev_free:
- fuse_dev_free(fud);
+ if (fud)
+ fuse_dev_free(fud);
err:
return err;
}
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index bade74768903..4c4ef5d69298 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -60,6 +60,12 @@ struct virtio_fs_forget {
struct virtio_fs_forget_req req;
};
+struct virtio_fs_req_work {
+ struct fuse_req *req;
+ struct virtio_fs_vq *fsvq;
+ struct work_struct done_work;
+};
+
static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
struct fuse_req *req, bool in_flight);
@@ -485,19 +491,67 @@ static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
}
/* Work function for request completion */
+static void virtio_fs_request_complete(struct fuse_req *req,
+ struct virtio_fs_vq *fsvq)
+{
+ struct fuse_pqueue *fpq = &fsvq->fud->pq;
+ struct fuse_conn *fc = fsvq->fud->fc;
+ struct fuse_args *args;
+ struct fuse_args_pages *ap;
+ unsigned int len, i, thislen;
+ struct page *page;
+
+ /*
+ * TODO verify that server properly follows FUSE protocol
+ * (oh.uniq, oh.len)
+ */
+ args = req->args;
+ copy_args_from_argbuf(args, req);
+
+ if (args->out_pages && args->page_zeroing) {
+ len = args->out_args[args->out_numargs - 1].size;
+ ap = container_of(args, typeof(*ap), args);
+ for (i = 0; i < ap->num_pages; i++) {
+ thislen = ap->descs[i].length;
+ if (len < thislen) {
+ WARN_ON(ap->descs[i].offset);
+ page = ap->pages[i];
+ zero_user_segment(page, len, thislen);
+ len = 0;
+ } else {
+ len -= thislen;
+ }
+ }
+ }
+
+ spin_lock(&fpq->lock);
+ clear_bit(FR_SENT, &req->flags);
+ spin_unlock(&fpq->lock);
+
+ fuse_request_end(fc, req);
+ spin_lock(&fsvq->lock);
+ dec_in_flight_req(fsvq);
+ spin_unlock(&fsvq->lock);
+}
+
+static void virtio_fs_complete_req_work(struct work_struct *work)
+{
+ struct virtio_fs_req_work *w =
+ container_of(work, typeof(*w), done_work);
+
+ virtio_fs_request_complete(w->req, w->fsvq);
+ kfree(w);
+}
+
static void virtio_fs_requests_done_work(struct work_struct *work)
{
struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
done_work);
struct fuse_pqueue *fpq = &fsvq->fud->pq;
- struct fuse_conn *fc = fsvq->fud->fc;
struct virtqueue *vq = fsvq->vq;
struct fuse_req *req;
- struct fuse_args_pages *ap;
struct fuse_req *next;
- struct fuse_args *args;
- unsigned int len, i, thislen;
- struct page *page;
+ unsigned int len;
LIST_HEAD(reqs);
/* Collect completed requests off the virtqueue */
@@ -515,38 +569,20 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
/* End requests */
list_for_each_entry_safe(req, next, &reqs, list) {
- /*
- * TODO verify that server properly follows FUSE protocol
- * (oh.uniq, oh.len)
- */
- args = req->args;
- copy_args_from_argbuf(args, req);
-
- if (args->out_pages && args->page_zeroing) {
- len = args->out_args[args->out_numargs - 1].size;
- ap = container_of(args, typeof(*ap), args);
- for (i = 0; i < ap->num_pages; i++) {
- thislen = ap->descs[i].length;
- if (len < thislen) {
- WARN_ON(ap->descs[i].offset);
- page = ap->pages[i];
- zero_user_segment(page, len, thislen);
- len = 0;
- } else {
- len -= thislen;
- }
- }
- }
-
- spin_lock(&fpq->lock);
- clear_bit(FR_SENT, &req->flags);
list_del_init(&req->list);
- spin_unlock(&fpq->lock);
- fuse_request_end(fc, req);
- spin_lock(&fsvq->lock);
- dec_in_flight_req(fsvq);
- spin_unlock(&fsvq->lock);
+ /* blocking async request completes in a worker context */
+ if (req->args->may_block) {
+ struct virtio_fs_req_work *w;
+
+ w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
+ INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
+ w->fsvq = fsvq;
+ w->req = req;
+ schedule_work(&w->done_work);
+ } else {
+ virtio_fs_request_complete(req, fsvq);
+ }
}
}
@@ -1067,7 +1103,7 @@ static int virtio_fs_fill_super(struct super_block *sb)
err = -ENOMEM;
/* Allocate fuse_dev for hiprio and notification queues */
- for (i = 0; i < VQ_REQUEST; i++) {
+ for (i = 0; i < fs->nvqs; i++) {
struct virtio_fs_vq *fsvq = &fs->vqs[i];
fsvq->fud = fuse_dev_alloc();
@@ -1075,18 +1111,15 @@ static int virtio_fs_fill_super(struct super_block *sb)
goto err_free_fuse_devs;
}
- ctx.fudptr = (void **)&fs->vqs[VQ_REQUEST].fud;
+ /* virtiofs allocates and installs its own fuse devices */
+ ctx.fudptr = NULL;
err = fuse_fill_super_common(sb, &ctx);
if (err < 0)
goto err_free_fuse_devs;
- fc = fs->vqs[VQ_REQUEST].fud->fc;
-
for (i = 0; i < fs->nvqs; i++) {
struct virtio_fs_vq *fsvq = &fs->vqs[i];
- if (i == VQ_REQUEST)
- continue; /* already initialized */
fuse_dev_install(fsvq->fud, fc);
}
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index 3f717285ee48..756d05779200 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -134,7 +134,9 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
struct gfs2_sbd *sdp = sb->s_fs_info;
struct inode *inode;
- inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino,
+ if (!inum->no_formal_ino)
+ return ERR_PTR(-ESTALE);
+ inode = gfs2_lookup_by_inum(sdp, inum->no_addr, inum->no_formal_ino,
GFS2_BLKST_DINODE);
if (IS_ERR(inode))
return ERR_CAST(inode);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index bf70e3b14938..2299dcc417ea 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -125,12 +125,11 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
{
struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
- if (gl->gl_ops->go_flags & GLOF_ASPACE) {
+ kfree(gl->gl_lksb.sb_lvbptr);
+ if (gl->gl_ops->go_flags & GLOF_ASPACE)
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
- } else {
- kfree(gl->gl_lksb.sb_lvbptr);
+ else
kmem_cache_free(gfs2_glock_cachep, gl);
- }
}
/**
@@ -164,7 +163,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- BUG_ON(atomic_read(&gl->gl_revokes));
+ gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
smp_mb();
wake_up_glock(gl);
@@ -465,6 +464,15 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
gl->gl_tchange = jiffies;
}
+static void gfs2_set_demote(struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ set_bit(GLF_DEMOTE, &gl->gl_flags);
+ smp_mb();
+ wake_up(&sdp->sd_async_glock_wait);
+}
+
static void gfs2_demote_wake(struct gfs2_glock *gl)
{
gl->gl_demote_state = LM_ST_EXCLUSIVE;
@@ -626,7 +634,8 @@ __acquires(&gl->gl_lockref.lock)
*/
if ((atomic_read(&gl->gl_ail_count) != 0) &&
(!cmpxchg(&sdp->sd_log_error, 0, -EIO))) {
- gfs2_assert_warn(sdp, !atomic_read(&gl->gl_ail_count));
+ gfs2_glock_assert_warn(gl,
+ !atomic_read(&gl->gl_ail_count));
gfs2_dump_glock(NULL, gl, true);
}
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
@@ -756,20 +765,127 @@ out_unlock:
return;
}
+void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation)
+{
+ struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
+
+ if (ri->ri_magic == 0)
+ ri->ri_magic = cpu_to_be32(GFS2_MAGIC);
+ if (ri->ri_magic == cpu_to_be32(GFS2_MAGIC))
+ ri->ri_generation_deleted = cpu_to_be64(generation);
+}
+
+bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation)
+{
+ struct gfs2_inode_lvb *ri = (void *)gl->gl_lksb.sb_lvbptr;
+
+ if (ri->ri_magic != cpu_to_be32(GFS2_MAGIC))
+ return false;
+ return generation <= be64_to_cpu(ri->ri_generation_deleted);
+}
+
+static void gfs2_glock_poke(struct gfs2_glock *gl)
+{
+ int flags = LM_FLAG_TRY_1CB | LM_FLAG_ANY | GL_SKIP;
+ struct gfs2_holder gh;
+ int error;
+
+ error = gfs2_glock_nq_init(gl, LM_ST_SHARED, flags, &gh);
+ if (!error)
+ gfs2_glock_dq(&gh);
+}
+
+static bool gfs2_try_evict(struct gfs2_glock *gl)
+{
+ struct gfs2_inode *ip;
+ bool evicted = false;
+
+ /*
+ * If there is contention on the iopen glock and we have an inode, try
+ * to grab and release the inode so that it can be evicted. This will
+ * allow the remote node to go ahead and delete the inode without us
+ * having to do it, which will avoid rgrp glock thrashing.
+ *
+ * The remote node is likely still holding the corresponding inode
+ * glock, so it will run before we get to verify that the delete has
+ * happened below.
+ */
+ spin_lock(&gl->gl_lockref.lock);
+ ip = gl->gl_object;
+ if (ip && !igrab(&ip->i_inode))
+ ip = NULL;
+ spin_unlock(&gl->gl_lockref.lock);
+ if (ip) {
+ struct gfs2_glock *inode_gl = NULL;
+
+ gl->gl_no_formal_ino = ip->i_no_formal_ino;
+ set_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
+ d_prune_aliases(&ip->i_inode);
+ iput(&ip->i_inode);
+
+ /* If the inode was evicted, gl->gl_object will now be NULL. */
+ spin_lock(&gl->gl_lockref.lock);
+ ip = gl->gl_object;
+ if (ip) {
+ inode_gl = ip->i_gl;
+ lockref_get(&inode_gl->gl_lockref);
+ clear_bit(GIF_DEFERRED_DELETE, &ip->i_flags);
+ }
+ spin_unlock(&gl->gl_lockref.lock);
+ if (inode_gl) {
+ gfs2_glock_poke(inode_gl);
+ gfs2_glock_put(inode_gl);
+ }
+ evicted = !ip;
+ }
+ return evicted;
+}
+
static void delete_work_func(struct work_struct *work)
{
- struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct inode *inode;
u64 no_addr = gl->gl_name.ln_number;
+ spin_lock(&gl->gl_lockref.lock);
+ clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+ spin_unlock(&gl->gl_lockref.lock);
+
/* If someone's using this glock to create a new dinode, the block must
have been freed by another node, then re-used, in which case our
iopen callback is too late after the fact. Ignore it. */
if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
goto out;
- inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
+ if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
+ /*
+ * If we can evict the inode, give the remote node trying to
+ * delete the inode some time before verifying that the delete
+ * has happened. Otherwise, if we cause contention on the inode glock
+ * immediately, the remote node will think that we still have
+ * the inode in use, and so it will give up waiting.
+ *
+ * If we can't evict the inode, signal to the remote node that
+ * the inode is still in use. We'll later try to delete the
+ * inode locally in gfs2_evict_inode.
+ *
+ * FIXME: We only need to verify that the remote node has
+ * deleted the inode because nodes before this remote delete
+ * rework won't cooperate. At a later time, when we no longer
+ * care about compatibility with such nodes, we can skip this
+ * step entirely.
+ */
+ if (gfs2_try_evict(gl)) {
+ if (gfs2_queue_delete_work(gl, 5 * HZ))
+ return;
+ }
+ goto out;
+ }
+
+ inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
+ GFS2_BLKST_UNLINKED);
if (!IS_ERR_OR_NULL(inode)) {
d_prune_aliases(inode);
iput(inode);
@@ -800,7 +916,7 @@ static void glock_work_func(struct work_struct *work)
if (!delay) {
clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
- set_bit(GLF_DEMOTE, &gl->gl_flags);
+ gfs2_set_demote(gl);
}
}
run_queue(gl, 0);
@@ -931,7 +1047,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_object = NULL;
gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
- INIT_WORK(&gl->gl_delete, delete_work_func);
+ INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
mapping = gfs2_glock2aspace(gl);
if (mapping) {
@@ -1145,9 +1261,10 @@ wait_for_dlm:
static void handle_callback(struct gfs2_glock *gl, unsigned int state,
unsigned long delay, bool remote)
{
- int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
-
- set_bit(bit, &gl->gl_flags);
+ if (delay)
+ set_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
+ else
+ gfs2_set_demote(gl);
if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
gl->gl_demote_state = state;
gl->gl_demote_time = jiffies;
@@ -1754,6 +1871,44 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
rhashtable_walk_exit(&iter);
}
+bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
+{
+ bool queued;
+
+ spin_lock(&gl->gl_lockref.lock);
+ queued = queue_delayed_work(gfs2_delete_workqueue,
+ &gl->gl_delete, delay);
+ if (queued)
+ set_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+ spin_unlock(&gl->gl_lockref.lock);
+ return queued;
+}
+
+void gfs2_cancel_delete_work(struct gfs2_glock *gl)
+{
+ if (cancel_delayed_work_sync(&gl->gl_delete)) {
+ clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+ gfs2_glock_put(gl);
+ }
+}
+
+bool gfs2_delete_work_queued(const struct gfs2_glock *gl)
+{
+ return test_bit(GLF_PENDING_DELETE, &gl->gl_flags);
+}
+
+static void flush_delete_work(struct gfs2_glock *gl)
+{
+ flush_delayed_work(&gl->gl_delete);
+ gfs2_glock_queue_work(gl, 0);
+}
+
+void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
+{
+ glock_hash_walk(flush_delete_work, sdp);
+ flush_workqueue(gfs2_delete_workqueue);
+}
+
/**
* thaw_glock - thaw out a glock which has an unprocessed reply waiting
* @gl: The glock to thaw
@@ -1836,7 +1991,7 @@ void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
int ret;
ret = gfs2_truncatei_resume(ip);
- gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
+ gfs2_glock_assert_withdraw(gl, ret == 0);
spin_lock(&gl->gl_lockref.lock);
clear_bit(GLF_LOCK, &gl->gl_flags);
@@ -1978,7 +2133,13 @@ void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
char gflags_buf[32];
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
+ unsigned long nrpages = 0;
+
+ if (gl->gl_ops->go_flags & GLOF_ASPACE) {
+ struct address_space *mapping = gfs2_glock2aspace(gl);
+ nrpages = mapping->nrpages;
+ }
memset(fs_id_buf, 0, sizeof(fs_id_buf));
if (fsid && sdp) /* safety precaution */
sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
@@ -1987,15 +2148,16 @@ void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl, bool fsid)
if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
dtime = 0;
gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
- "v:%d r:%d m:%ld\n", fs_id_buf, state2str(gl->gl_state),
- gl->gl_name.ln_type,
- (unsigned long long)gl->gl_name.ln_number,
- gflags2str(gflags_buf, gl),
- state2str(gl->gl_target),
- state2str(gl->gl_demote_state), dtime,
- atomic_read(&gl->gl_ail_count),
- atomic_read(&gl->gl_revokes),
- (int)gl->gl_lockref.count, gl->gl_hold_time);
+ "v:%d r:%d m:%ld p:%lu\n",
+ fs_id_buf, state2str(gl->gl_state),
+ gl->gl_name.ln_type,
+ (unsigned long long)gl->gl_name.ln_number,
+ gflags2str(gflags_buf, gl),
+ state2str(gl->gl_target),
+ state2str(gl->gl_demote_state), dtime,
+ atomic_read(&gl->gl_ail_count),
+ atomic_read(&gl->gl_revokes),
+ (int)gl->gl_lockref.count, gl->gl_hold_time, nrpages);
list_for_each_entry(gh, &gl->gl_holders, gh_list)
dump_holder(seq, gh, fs_id_buf);
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index b8adaf80e4c5..53813364517b 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -205,6 +205,15 @@ extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \
gfs2_dump_glock(NULL, gl, true); \
BUG(); } } while(0)
+#define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \
+ gfs2_dump_glock(NULL, gl, true); \
+ gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \
+ while (0)
+#define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \
+ gfs2_dump_glock(NULL, gl, true); \
+ gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
+ while (0)
+
extern __printf(2, 3)
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
@@ -235,6 +244,10 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
+extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
+extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
+extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
+extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
@@ -306,4 +319,7 @@ static inline void glock_clear_object(struct gfs2_glock *gl, void *object)
spin_unlock(&gl->gl_lockref.lock);
}
+extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
+extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
+
#endif /* __GLOCK_DOT_H__ */
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 9e9c7a4b8c66..c84887769b5a 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -91,6 +91,8 @@ static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
memset(&tr, 0, sizeof(tr));
INIT_LIST_HEAD(&tr.tr_buf);
INIT_LIST_HEAD(&tr.tr_databuf);
+ INIT_LIST_HEAD(&tr.tr_ail1_list);
+ INIT_LIST_HEAD(&tr.tr_ail2_list);
tr.tr_revokes = atomic_read(&gl->gl_ail_count);
if (!tr.tr_revokes) {
@@ -268,7 +270,7 @@ static int inode_go_sync(struct gfs2_glock *gl)
struct gfs2_inode *ip = gfs2_glock2inode(gl);
int isreg = ip && S_ISREG(ip->i_inode.i_mode);
struct address_space *metamapping = gfs2_glock2aspace(gl);
- int error = 0;
+ int error = 0, ret;
if (isreg) {
if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
@@ -289,8 +291,10 @@ static int inode_go_sync(struct gfs2_glock *gl)
error = filemap_fdatawait(mapping);
mapping_set_error(mapping, error);
}
- error = filemap_fdatawait(metamapping);
- mapping_set_error(metamapping, error);
+ ret = filemap_fdatawait(metamapping);
+ mapping_set_error(metamapping, ret);
+ if (!error)
+ error = ret;
gfs2_ail_empty_gl(gl);
/*
* Writeback of the data mapping may cause the dirty flag to be set
@@ -608,11 +612,17 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
gl->gl_state == LM_ST_SHARED && ip) {
gl->gl_lockref.count++;
- if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
+ if (!queue_delayed_work(gfs2_delete_workqueue,
+ &gl->gl_delete, 0))
gl->gl_lockref.count--;
}
}
+static int iopen_go_demote_ok(const struct gfs2_glock *gl)
+{
+ return !gfs2_delete_work_queued(gl);
+}
+
/**
* inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
* @gl: glock being freed
@@ -692,7 +702,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
.go_lock = inode_go_lock,
.go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE,
- .go_flags = GLOF_ASPACE | GLOF_LRU,
+ .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
.go_free = inode_go_free,
};
@@ -716,6 +726,7 @@ const struct gfs2_glock_operations gfs2_freeze_glops = {
const struct gfs2_glock_operations gfs2_iopen_glops = {
.go_type = LM_TYPE_IOPEN,
.go_callback = iopen_go_callback,
+ .go_demote_ok = iopen_go_demote_ok,
.go_flags = GLOF_LRU | GLOF_NONDISK,
};
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 84a824293a78..03ab11fab962 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -345,6 +345,7 @@ enum {
GLF_OBJECT = 14, /* Used only for tracing */
GLF_BLOCKING = 15,
GLF_INODE_CREATING = 16, /* Inode creation occurring */
+ GLF_PENDING_DELETE = 17,
GLF_FREEING = 18, /* Wait for glock to be freed */
};
@@ -378,8 +379,11 @@ struct gfs2_glock {
atomic_t gl_revokes;
struct delayed_work gl_work;
union {
- /* For inode and iopen glocks only */
- struct work_struct gl_delete;
+ /* For iopen glocks only */
+ struct {
+ struct delayed_work gl_delete;
+ u64 gl_no_formal_ino;
+ };
/* For rgrp glocks only */
struct {
loff_t start;
@@ -398,6 +402,7 @@ enum {
GIF_ORDERED = 4,
GIF_FREE_VFS_INODE = 5,
GIF_GLOP_PENDING = 6,
+ GIF_DEFERRED_DELETE = 7,
};
struct gfs2_inode {
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 5acd3ce30759..370c3a4b31ac 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -17,6 +17,7 @@
#include <linux/crc32.h>
#include <linux/iomap.h>
#include <linux/security.h>
+#include <linux/fiemap.h>
#include <linux/uaccess.h>
#include "gfs2.h"
@@ -114,6 +115,10 @@ static void gfs2_set_iop(struct inode *inode)
* placeholder because it doesn't otherwise make sense), the on-disk block type
* is verified to be @blktype.
*
+ * When @no_formal_ino is non-zero, this function will return ERR_PTR(-ESTALE)
+ * if it detects that @no_formal_ino doesn't match the actual inode generation
+ * number. However, it doesn't always know unless @type is DT_UNKNOWN.
+ *
* Returns: A VFS inode, or an error
*/
@@ -157,6 +162,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
if (error)
goto fail;
+ error = -ESTALE;
+ if (no_formal_ino &&
+ gfs2_inode_already_deleted(ip->i_gl, no_formal_ino))
+ goto fail;
+
if (blktype != GFS2_BLKST_FREE) {
error = gfs2_check_blk_type(sdp, no_addr,
blktype);
@@ -170,6 +180,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
if (unlikely(error))
goto fail;
+ gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
gfs2_glock_put(io_gl);
io_gl = NULL;
@@ -188,13 +199,23 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
inode->i_mode = DT2IF(type);
}
+ if (gfs2_holder_initialized(&i_gh))
+ gfs2_glock_dq_uninit(&i_gh);
+
gfs2_set_iop(inode);
+ }
- unlock_new_inode(inode);
+ if (no_formal_ino && ip->i_no_formal_ino &&
+ no_formal_ino != ip->i_no_formal_ino) {
+ if (inode->i_state & I_NEW)
+ goto fail;
+ iput(inode);
+ return ERR_PTR(-ESTALE);
}
- if (gfs2_holder_initialized(&i_gh))
- gfs2_glock_dq_uninit(&i_gh);
+ if (inode->i_state & I_NEW)
+ unlock_new_inode(inode);
+
return inode;
fail:
@@ -206,23 +227,26 @@ fail:
return ERR_PTR(error);
}
+/**
+ * gfs2_lookup_by_inum - look up an inode by inode number
+ * @sdp: The super block
+ * @no_addr: The inode number
+ * @no_formal_ino: The inode generation number (0 for any)
+ * @blktype: Requested block type (see gfs2_inode_lookup)
+ */
struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
- u64 *no_formal_ino, unsigned int blktype)
+ u64 no_formal_ino, unsigned int blktype)
{
struct super_block *sb = sdp->sd_vfs;
struct inode *inode;
int error;
- inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, blktype);
+ inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, no_formal_ino,
+ blktype);
if (IS_ERR(inode))
return inode;
- /* Two extra checks for NFS only */
if (no_formal_ino) {
- error = -ESTALE;
- if (GFS2_I(inode)->i_no_formal_ino != *no_formal_ino)
- goto fail_iput;
-
error = -EIO;
if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM)
goto fail_iput;
@@ -724,6 +748,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (error)
goto fail_gunlock2;
+ gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl);
glock_set_object(ip->i_iopen_gh.gh_gl, ip);
gfs2_set_iop(inode);
insert_inode_hash(inode);
@@ -780,7 +805,8 @@ fail_gunlock2:
fail_free_inode:
if (ip->i_gl) {
glock_clear_object(ip->i_gl, ip);
- gfs2_glock_put(ip->i_gl);
+ if (free_vfs_inode) /* else evict will do the put for us */
+ gfs2_glock_put(ip->i_gl);
}
gfs2_rs_delete(ip, NULL);
gfs2_qa_put(ip);
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index 580adbf0b5e1..b52ecf4ffe63 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -92,7 +92,7 @@ extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
u64 no_addr, u64 no_formal_ino,
unsigned int blktype);
extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
- u64 *no_formal_ino,
+ u64 no_formal_ino,
unsigned int blktype);
extern int gfs2_inode_refresh(struct gfs2_inode *ip);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 0644e58c6191..3e4734431783 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -30,6 +30,7 @@
#include "util.h"
#include "dir.h"
#include "trace_gfs2.h"
+#include "trans.h"
static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
@@ -145,9 +146,6 @@ static void dump_ail_list(struct gfs2_sbd *sdp)
struct gfs2_bufdata *bd;
struct buffer_head *bh;
- fs_err(sdp, "Error: In gfs2_ail1_flush for ten minutes! t=%d\n",
- current->journal_info ? 1 : 0);
-
list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
list_for_each_entry_reverse(bd, &tr->tr_ail1_list,
bd_ail_st_list) {
@@ -197,6 +195,8 @@ void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
restart:
ret = 0;
if (time_after(jiffies, flush_start + (HZ * 600))) {
+ fs_err(sdp, "Error: In %s for ten minutes! t=%d\n",
+ __func__, current->journal_info ? 1 : 0);
dump_ail_list(sdp);
goto out;
}
@@ -379,7 +379,7 @@ static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
list_del(&tr->tr_list);
gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
- kfree(tr);
+ gfs2_trans_free(sdp, tr);
}
spin_unlock(&sdp->sd_ail_lock);
@@ -864,19 +864,41 @@ static void ail_drain(struct gfs2_sbd *sdp)
gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail1_list);
gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
list_del(&tr->tr_list);
- kfree(tr);
+ gfs2_trans_free(sdp, tr);
}
while (!list_empty(&sdp->sd_ail2_list)) {
tr = list_first_entry(&sdp->sd_ail2_list, struct gfs2_trans,
tr_list);
gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
list_del(&tr->tr_list);
- kfree(tr);
+ gfs2_trans_free(sdp, tr);
}
spin_unlock(&sdp->sd_ail_lock);
}
/**
+ * empty_ail1_list - try to start IO and empty the ail1 list
+ * @sdp: Pointer to GFS2 superblock
+ */
+static void empty_ail1_list(struct gfs2_sbd *sdp)
+{
+ unsigned long start = jiffies;
+
+ for (;;) {
+ if (time_after(jiffies, start + (HZ * 600))) {
+ fs_err(sdp, "Error: In %s for 10 minutes! t=%d\n",
+ __func__, current->journal_info ? 1 : 0);
+ dump_ail_list(sdp);
+ return;
+ }
+ gfs2_ail1_start(sdp);
+ gfs2_ail1_wait(sdp);
+ if (gfs2_ail1_empty(sdp, 0))
+ return;
+ }
+}
+
+/**
* gfs2_log_flush - flush incore transaction(s)
* @sdp: the filesystem
* @gl: The glock structure to flush. If NULL, flush the whole incore log
@@ -912,8 +934,6 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
tr = sdp->sd_log_tr;
if (tr) {
sdp->sd_log_tr = NULL;
- INIT_LIST_HEAD(&tr->tr_ail1_list);
- INIT_LIST_HEAD(&tr->tr_ail2_list);
tr->tr_first = sdp->sd_log_flush_head;
if (unlikely (state == SFS_FROZEN))
if (gfs2_assert_withdraw_delayed(sdp,
@@ -965,12 +985,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
if (!sdp->sd_log_idle) {
- for (;;) {
- gfs2_ail1_start(sdp);
- gfs2_ail1_wait(sdp);
- if (gfs2_ail1_empty(sdp, 0))
- break;
- }
+ empty_ail1_list(sdp);
if (gfs2_withdrawn(sdp))
goto out;
atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */
@@ -994,7 +1009,7 @@ out:
trace_gfs2_log_flush(sdp, 0, flags);
up_write(&sdp->sd_log_flush_lock);
- kfree(tr);
+ gfs2_trans_free(sdp, tr);
}
/**
@@ -1003,8 +1018,10 @@ out:
* @new: New transaction to be merged
*/
-static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
+static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new)
{
+ struct gfs2_trans *old = sdp->sd_log_tr;
+
WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags));
old->tr_num_buf_new += new->tr_num_buf_new;
@@ -1016,6 +1033,11 @@ static void gfs2_merge_trans(struct gfs2_trans *old, struct gfs2_trans *new)
list_splice_tail_init(&new->tr_databuf, &old->tr_databuf);
list_splice_tail_init(&new->tr_buf, &old->tr_buf);
+
+ spin_lock(&sdp->sd_ail_lock);
+ list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list);
+ list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list);
+ spin_unlock(&sdp->sd_ail_lock);
}
static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
@@ -1027,7 +1049,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
gfs2_log_lock(sdp);
if (sdp->sd_log_tr) {
- gfs2_merge_trans(sdp->sd_log_tr, tr);
+ gfs2_merge_trans(sdp, tr);
} else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) {
gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags));
sdp->sd_log_tr = tr;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index a1a295b739fb..733470ca6be9 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -143,6 +143,12 @@ static int __init init_gfs2_fs(void)
if (!gfs2_qadata_cachep)
goto fail_cachep7;
+ gfs2_trans_cachep = kmem_cache_create("gfs2_trans",
+ sizeof(struct gfs2_trans),
+ 0, 0, NULL);
+ if (!gfs2_trans_cachep)
+ goto fail_cachep8;
+
error = register_shrinker(&gfs2_qd_shrinker);
if (error)
goto fail_shrinker;
@@ -194,6 +200,8 @@ fail_fs2:
fail_fs1:
unregister_shrinker(&gfs2_qd_shrinker);
fail_shrinker:
+ kmem_cache_destroy(gfs2_trans_cachep);
+fail_cachep8:
kmem_cache_destroy(gfs2_qadata_cachep);
fail_cachep7:
kmem_cache_destroy(gfs2_quotad_cachep);
@@ -236,6 +244,7 @@ static void __exit exit_gfs2_fs(void)
rcu_barrier();
mempool_destroy(gfs2_page_pool);
+ kmem_cache_destroy(gfs2_trans_cachep);
kmem_cache_destroy(gfs2_qadata_cachep);
kmem_cache_destroy(gfs2_quotad_cachep);
kmem_cache_destroy(gfs2_rgrpd_cachep);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index e2b69ffcc6a8..094f5fe7c009 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -880,7 +880,7 @@ fail:
}
static const match_table_t nolock_tokens = {
- { Opt_jid, "jid=%d\n", },
+ { Opt_jid, "jid=%d", },
{ Opt_err, NULL },
};
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index a321c34e3d6e..074f228ea839 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1835,7 +1835,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
*/
ip = gl->gl_object;
- if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
+ if (ip || !gfs2_queue_delete_work(gl, 0))
gfs2_glock_put(gl);
else
found++;
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 956fced0a8ec..32d8d26126a1 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -626,7 +626,7 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
}
}
- flush_workqueue(gfs2_delete_workqueue);
+ gfs2_flush_delete_work(sdp);
if (!log_write_allowed && current == sdp->sd_quotad_process)
fs_warn(sdp, "The quotad daemon is withdrawing.\n");
else if (sdp->sd_quotad_process)
@@ -1054,7 +1054,7 @@ static int gfs2_drop_inode(struct inode *inode)
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
gfs2_glock_hold(gl);
- if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
+ if (!gfs2_queue_delete_work(gl, 0))
gfs2_glock_queue_put(gl);
return false;
}
@@ -1258,6 +1258,55 @@ static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
gfs2_glock_put(gl);
}
+static bool gfs2_upgrade_iopen_glock(struct inode *inode)
+{
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+ struct gfs2_holder *gh = &ip->i_iopen_gh;
+ long timeout = 5 * HZ;
+ int error;
+
+ gh->gh_flags |= GL_NOCACHE;
+ gfs2_glock_dq_wait(gh);
+
+ /*
+ * If there are no other lock holders, we'll get the lock immediately.
+ * Otherwise, the other nodes holding the lock will be notified about
+ * our locking request. If they don't have the inode open, they'll
+ * evict the cached inode and release the lock. Otherwise, if they
+ * poke the inode glock, we'll take this as an indication that they
+ * still need the iopen glock and that they'll take care of deleting
+ * the inode when they're done. As a last resort, if another node
+ * keeps holding the iopen glock without showing any activity on the
+ * inode glock, we'll eventually time out.
+ *
+ * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
+ * locking request as an optimization to notify lock holders as soon as
+ * possible. Without that flag, they'd be notified implicitly by the
+ * second locking request.
+ */
+
+ gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
+ error = gfs2_glock_nq(gh);
+ if (error != GLR_TRYFAILED)
+ return !error;
+
+ gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
+ error = gfs2_glock_nq(gh);
+ if (error)
+ return false;
+
+ timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
+ !test_bit(HIF_WAIT, &gh->gh_iflags) ||
+ test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
+ timeout);
+ if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
+ gfs2_glock_dq(gh);
+ return false;
+ }
+ return true;
+}
+
/**
* gfs2_evict_inode - Remove an inode from cache
* @inode: The inode to evict
@@ -1299,9 +1348,12 @@ static void gfs2_evict_inode(struct inode *inode)
if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
gfs2_holder_mark_uninitialized(&gh);
- goto alloc_failed;
+ goto out_delete;
}
+ if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
+ goto out;
+
/* Deletes should never happen under memory pressure anymore. */
if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
goto out;
@@ -1315,6 +1367,8 @@ static void gfs2_evict_inode(struct inode *inode)
goto out;
}
+ if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
+ goto out_truncate;
error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
if (error)
goto out_truncate;
@@ -1331,16 +1385,13 @@ static void gfs2_evict_inode(struct inode *inode)
if (inode->i_nlink)
goto out_truncate;
-alloc_failed:
+out_delete:
if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
- ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
- gfs2_glock_dq_wait(&ip->i_iopen_gh);
- gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE,
- &ip->i_iopen_gh);
- error = gfs2_glock_nq(&ip->i_iopen_gh);
- if (error)
+ if (!gfs2_upgrade_iopen_glock(inode)) {
+ gfs2_holder_uninit(&ip->i_iopen_gh);
goto out_truncate;
+ }
}
if (S_ISDIR(inode->i_mode) &&
@@ -1368,6 +1419,7 @@ alloc_failed:
that subsequent inode creates don't see an old gl_object. */
glock_clear_object(ip->i_gl, ip);
error = gfs2_dinode_dealloc(ip);
+ gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
goto out_unlock;
out_truncate:
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index ffe840505082..a3dfa3aa87ad 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -37,7 +37,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
return -EROFS;
- tr = kzalloc(sizeof(struct gfs2_trans), GFP_NOFS);
+ tr = kmem_cache_zalloc(gfs2_trans_cachep, GFP_NOFS);
if (!tr)
return -ENOMEM;
@@ -52,6 +52,8 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
tr->tr_reserved += gfs2_struct2blk(sdp, revokes);
INIT_LIST_HEAD(&tr->tr_databuf);
INIT_LIST_HEAD(&tr->tr_buf);
+ INIT_LIST_HEAD(&tr->tr_ail1_list);
+ INIT_LIST_HEAD(&tr->tr_ail2_list);
sb_start_intwrite(sdp->sd_vfs);
@@ -65,7 +67,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
fail:
sb_end_intwrite(sdp->sd_vfs);
- kfree(tr);
+ kmem_cache_free(gfs2_trans_cachep, tr);
return error;
}
@@ -93,7 +95,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
if (!test_bit(TR_TOUCHED, &tr->tr_flags)) {
gfs2_log_release(sdp, tr->tr_reserved);
if (alloced) {
- kfree(tr);
+ gfs2_trans_free(sdp, tr);
sb_end_intwrite(sdp->sd_vfs);
}
return;
@@ -109,7 +111,7 @@ void gfs2_trans_end(struct gfs2_sbd *sdp)
gfs2_log_commit(sdp, tr);
if (alloced && !test_bit(TR_ATTACHED, &tr->tr_flags))
- kfree(tr);
+ gfs2_trans_free(sdp, tr);
up_read(&sdp->sd_log_flush_lock);
if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
@@ -276,3 +278,14 @@ void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
gfs2_log_unlock(sdp);
}
+void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
+{
+ if (tr == NULL)
+ return;
+
+ gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
+ gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
+ gfs2_assert_warn(sdp, list_empty(&tr->tr_databuf));
+ gfs2_assert_warn(sdp, list_empty(&tr->tr_buf));
+ kmem_cache_free(gfs2_trans_cachep, tr);
+}
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index 6071334de035..83199ce5a5c5 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -42,5 +42,6 @@ extern void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
extern void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
extern void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
extern void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
+extern void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr);
#endif /* __TRANS_DOT_H__ */
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index aa087a5675af..1cd0328cae20 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -32,6 +32,7 @@ struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
struct kmem_cache *gfs2_rgrpd_cachep __read_mostly;
struct kmem_cache *gfs2_quotad_cachep __read_mostly;
struct kmem_cache *gfs2_qadata_cachep __read_mostly;
+struct kmem_cache *gfs2_trans_cachep __read_mostly;
mempool_t *gfs2_page_pool __read_mostly;
void gfs2_assert_i(struct gfs2_sbd *sdp)
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index a3542560da6f..6d9157efe16c 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -172,6 +172,7 @@ extern struct kmem_cache *gfs2_bufdata_cachep;
extern struct kmem_cache *gfs2_rgrpd_cachep;
extern struct kmem_cache *gfs2_quotad_cachep;
extern struct kmem_cache *gfs2_qadata_cachep;
+extern struct kmem_cache *gfs2_trans_cachep;
extern mempool_t *gfs2_page_pool;
extern struct workqueue_struct *gfs2_control_wq;
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index e285d6b3bba4..d39246865c51 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -53,7 +53,7 @@ void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n)
return;
}
brelse(bh);
- };
+ }
blk_start_plug(&plug);
while (n > 0) {
diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c
index 2de0d3492d15..077c25128eb7 100644
--- a/fs/hpfs/file.c
+++ b/fs/hpfs/file.c
@@ -9,6 +9,7 @@
#include "hpfs_fn.h"
#include <linux/mpage.h>
+#include <linux/fiemap.h>
#define BLOCKS(size) (((size) + 511) >> 9)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 991c60c7ffe0..ef5313f9c78f 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -38,6 +38,7 @@
#include <linux/uio.h>
#include <linux/uaccess.h>
+#include <linux/sched/mm.h>
static const struct super_operations hugetlbfs_ops;
static const struct address_space_operations hugetlbfs_aops;
@@ -186,18 +187,65 @@ out:
}
/*
- * Called under down_write(mmap_sem).
+ * Called under mmap_write_lock(mm).
*/
#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long
+hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct hstate *h = hstate_file(file);
+ struct vm_unmapped_area_info info;
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = current->mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.align_offset = 0;
+ return vm_unmapped_area(&info);
+}
+
+static unsigned long
+hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct hstate *h = hstate_file(file);
+ struct vm_unmapped_area_info info;
+
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+ info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+ info.high_limit = current->mm->mmap_base;
+ info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.align_offset = 0;
+ addr = vm_unmapped_area(&info);
+
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ if (unlikely(offset_in_page(addr))) {
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = current->mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ addr = vm_unmapped_area(&info);
+ }
+
+ return addr;
+}
+
+static unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct hstate *h = hstate_file(file);
- struct vm_unmapped_area_info info;
if (len & ~huge_page_mask(h))
return -EINVAL;
@@ -218,13 +266,16 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return addr;
}
- info.flags = 0;
- info.length = len;
- info.low_limit = TASK_UNMAPPED_BASE;
- info.high_limit = TASK_SIZE;
- info.align_mask = PAGE_MASK & ~huge_page_mask(h);
- info.align_offset = 0;
- return vm_unmapped_area(&info);
+ /*
+ * Use mm->get_unmapped_area value as a hint to use topdown routine.
+ * If architectures have special needs, they should define their own
+ * version of hugetlb_get_unmapped_area.
+ */
+ if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
+ return hugetlb_get_unmapped_area_topdown(file, addr, len,
+ pgoff, flags);
+ return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+ pgoff, flags);
}
#endif
diff --git a/fs/inode.c b/fs/inode.c
index 37226a9cfa4f..72c4c347afb7 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -108,7 +108,7 @@ long get_nr_dirty_inodes(void)
*/
#ifdef CONFIG_SYSCTL
int proc_nr_inodes(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
inodes_stat.nr_inodes = get_nr_inodes();
inodes_stat.nr_unused = get_nr_inodes_unused();
@@ -497,7 +497,7 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval)
spin_lock(&inode_hash_lock);
spin_lock(&inode->i_lock);
- hlist_add_head(&inode->i_hash, b);
+ hlist_add_head_rcu(&inode->i_hash, b);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
}
@@ -513,7 +513,7 @@ void __remove_inode_hash(struct inode *inode)
{
spin_lock(&inode_hash_lock);
spin_lock(&inode->i_lock);
- hlist_del_init(&inode->i_hash);
+ hlist_del_init_rcu(&inode->i_hash);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
}
@@ -1107,7 +1107,7 @@ again:
*/
spin_lock(&inode->i_lock);
inode->i_state |= I_NEW;
- hlist_add_head(&inode->i_hash, head);
+ hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
if (!creating)
inode_sb_list_add(inode);
@@ -1201,7 +1201,7 @@ again:
inode->i_ino = ino;
spin_lock(&inode->i_lock);
inode->i_state = I_NEW;
- hlist_add_head(&inode->i_hash, head);
+ hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
inode_sb_list_add(inode);
spin_unlock(&inode_hash_lock);
@@ -1244,15 +1244,10 @@ static int test_inode_iunique(struct super_block *sb, unsigned long ino)
struct hlist_head *b = inode_hashtable + hash(sb, ino);
struct inode *inode;
- spin_lock(&inode_hash_lock);
- hlist_for_each_entry(inode, b, i_hash) {
- if (inode->i_ino == ino && inode->i_sb == sb) {
- spin_unlock(&inode_hash_lock);
+ hlist_for_each_entry_rcu(inode, b, i_hash) {
+ if (inode->i_ino == ino && inode->i_sb == sb)
return 0;
- }
}
- spin_unlock(&inode_hash_lock);
-
return 1;
}
@@ -1281,6 +1276,7 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
static unsigned int counter;
ino_t res;
+ rcu_read_lock();
spin_lock(&iunique_lock);
do {
if (counter <= max_reserved)
@@ -1288,6 +1284,7 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)
res = counter++;
} while (!test_inode_iunique(sb, res));
spin_unlock(&iunique_lock);
+ rcu_read_unlock();
return res;
}
@@ -1456,6 +1453,84 @@ out:
}
EXPORT_SYMBOL(find_inode_nowait);
+/**
+ * find_inode_rcu - find an inode in the inode cache
+ * @sb: Super block of file system to search
+ * @hashval: Key to hash
+ * @test: Function to test match on an inode
+ * @data: Data for test function
+ *
+ * Search for the inode specified by @hashval and @data in the inode cache,
+ * where the helper function @test will return 0 if the inode does not match
+ * and 1 if it does. The @test function must be responsible for taking the
+ * i_lock spin_lock and checking i_state for an inode being freed or being
+ * initialized.
+ *
+ * If successful, this will return the inode for which the @test function
+ * returned 1 and NULL otherwise.
+ *
+ * The @test function is not permitted to take a ref on any inode presented.
+ * It is also not permitted to sleep.
+ *
+ * The caller must hold the RCU read lock.
+ */
+struct inode *find_inode_rcu(struct super_block *sb, unsigned long hashval,
+ int (*test)(struct inode *, void *), void *data)
+{
+ struct hlist_head *head = inode_hashtable + hash(sb, hashval);
+ struct inode *inode;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "suspicious find_inode_rcu() usage");
+
+ hlist_for_each_entry_rcu(inode, head, i_hash) {
+ if (inode->i_sb == sb &&
+ !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)) &&
+ test(inode, data))
+ return inode;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(find_inode_rcu);
+
+/**
+ * find_inode_by_rcu - Find an inode in the inode cache
+ * @sb: Super block of file system to search
+ * @ino: The inode number to match
+ *
+ * Search for the inode specified by @hashval and @data in the inode cache,
+ * where the helper function @test will return 0 if the inode does not match
+ * and 1 if it does. The @test function must be responsible for taking the
+ * i_lock spin_lock and checking i_state for an inode being freed or being
+ * initialized.
+ *
+ * If successful, this will return the inode for which the @test function
+ * returned 1 and NULL otherwise.
+ *
+ * The @test function is not permitted to take a ref on any inode presented.
+ * It is also not permitted to sleep.
+ *
+ * The caller must hold the RCU read lock.
+ */
+struct inode *find_inode_by_ino_rcu(struct super_block *sb,
+ unsigned long ino)
+{
+ struct hlist_head *head = inode_hashtable + hash(sb, ino);
+ struct inode *inode;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "suspicious find_inode_by_ino_rcu() usage");
+
+ hlist_for_each_entry_rcu(inode, head, i_hash) {
+ if (inode->i_ino == ino &&
+ inode->i_sb == sb &&
+ !(READ_ONCE(inode->i_state) & (I_FREEING | I_WILL_FREE)))
+ return inode;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(find_inode_by_ino_rcu);
+
int insert_inode_locked(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
@@ -1480,7 +1555,7 @@ int insert_inode_locked(struct inode *inode)
if (likely(!old)) {
spin_lock(&inode->i_lock);
inode->i_state |= I_NEW | I_CREATING;
- hlist_add_head(&inode->i_hash, head);
+ hlist_add_head_rcu(&inode->i_hash, head);
spin_unlock(&inode->i_lock);
spin_unlock(&inode_hash_lock);
return 0;
@@ -1540,6 +1615,7 @@ static void iput_final(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
const struct super_operations *op = inode->i_sb->s_op;
+ unsigned long state;
int drop;
WARN_ON(inode->i_state & I_NEW);
@@ -1555,16 +1631,20 @@ static void iput_final(struct inode *inode)
return;
}
+ state = inode->i_state;
if (!drop) {
- inode->i_state |= I_WILL_FREE;
+ WRITE_ONCE(inode->i_state, state | I_WILL_FREE);
spin_unlock(&inode->i_lock);
+
write_inode_now(inode, 1);
+
spin_lock(&inode->i_lock);
- WARN_ON(inode->i_state & I_NEW);
- inode->i_state &= ~I_WILL_FREE;
+ state = inode->i_state;
+ WARN_ON(state & I_NEW);
+ state &= ~I_WILL_FREE;
}
- inode->i_state |= I_FREEING;
+ WRITE_ONCE(inode->i_state, state | I_FREEING);
if (!list_empty(&inode->i_lru))
inode_lru_list_del(inode);
spin_unlock(&inode->i_lock);
diff --git a/fs/internal.h b/fs/internal.h
index b89d78f10396..9b863a7bd708 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -142,8 +142,6 @@ extern int dentry_needs_remove_privs(struct dentry *dentry);
/*
* fs-writeback.c
*/
-extern void inode_io_list_del(struct inode *inode);
-
extern long get_nr_dirty_inodes(void);
extern int invalidate_inodes(struct super_block *, bool);
diff --git a/fs/io-wq.c b/fs/io-wq.c
index d7dc638f4b8e..0b65a912b036 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -10,7 +10,6 @@
#include <linux/errno.h>
#include <linux/sched/signal.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
#include <linux/sched/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
@@ -171,8 +170,7 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
dropped_lock = true;
}
__set_current_state(TASK_RUNNING);
- set_fs(KERNEL_DS);
- unuse_mm(worker->mm);
+ kthread_unuse_mm(worker->mm);
mmput(worker->mm);
worker->mm = NULL;
}
@@ -419,18 +417,15 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
{
if (worker->mm) {
- unuse_mm(worker->mm);
+ kthread_unuse_mm(worker->mm);
mmput(worker->mm);
worker->mm = NULL;
}
- if (!work->mm) {
- set_fs(KERNEL_DS);
+ if (!work->mm)
return;
- }
+
if (mmget_not_zero(work->mm)) {
- use_mm(work->mm);
- if (!worker->mm)
- set_fs(USER_DS);
+ kthread_use_mm(work->mm);
worker->mm = work->mm;
/* hang on to this mm */
work->mm = NULL;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 61fca5afaac8..155f3d830ddb 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -55,7 +55,6 @@
#include <linux/fdtable.h>
#include <linux/mm.h>
#include <linux/mman.h>
-#include <linux/mmu_context.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/kthread.h>
@@ -5827,7 +5826,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (io_op_defs[req->opcode].needs_mm && !current->mm) {
if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
return -EFAULT;
- use_mm(ctx->sqo_mm);
+ kthread_use_mm(ctx->sqo_mm);
}
sqe_flags = READ_ONCE(sqe->flags);
@@ -5942,7 +5941,7 @@ static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
struct mm_struct *mm = current->mm;
if (mm) {
- unuse_mm(mm);
+ kthread_unuse_mm(mm);
mmput(mm);
}
}
@@ -5951,15 +5950,12 @@ static int io_sq_thread(void *data)
{
struct io_ring_ctx *ctx = data;
const struct cred *old_cred;
- mm_segment_t old_fs;
DEFINE_WAIT(wait);
unsigned long timeout;
int ret = 0;
complete(&ctx->sq_thread_comp);
- old_fs = get_fs();
- set_fs(USER_DS);
old_cred = override_creds(ctx->creds);
timeout = jiffies + ctx->sq_thread_idle;
@@ -6064,7 +6060,6 @@ static int io_sq_thread(void *data)
if (current->task_works)
task_work_run();
- set_fs(old_fs);
io_sq_thread_drop_mm(ctx);
revert_creds(old_cred);
@@ -7148,7 +7143,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
}
ret = 0;
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
pret = pin_user_pages(ubuf, nr_pages,
FOLL_WRITE | FOLL_LONGTERM,
pages, vmas);
@@ -7166,7 +7161,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
} else {
ret = pret < 0 ? pret : -EFAULT;
}
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (ret) {
/*
* if we did partial map, or found file backed vmas,
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 5e80b40bc1b5..d69786d1dd91 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -18,6 +18,7 @@
#include <linux/buffer_head.h>
#include <linux/falloc.h>
#include <linux/sched/signal.h>
+#include <linux/fiemap.h>
#include "internal.h"
@@ -148,61 +149,55 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
EXPORT_SYMBOL(fiemap_fill_next_extent);
/**
- * fiemap_check_flags - check validity of requested flags for fiemap
+ * fiemap_prep - check validity of requested flags for fiemap
+ * @inode: Inode to operate on
* @fieinfo: Fiemap context passed into ->fiemap
- * @fs_flags: Set of fiemap flags that the file system understands
+ * @start: Start of the mapped range
+ * @len: Length of the mapped range, can be truncated by this function.
+ * @supported_flags: Set of fiemap flags that the file system understands
*
- * Called from file system ->fiemap callback. This will compute the
- * intersection of valid fiemap flags and those that the fs supports. That
- * value is then compared against the user supplied flags. In case of bad user
- * flags, the invalid values will be written into the fieinfo structure, and
- * -EBADR is returned, which tells ioctl_fiemap() to return those values to
- * userspace. For this reason, a return code of -EBADR should be preserved.
+ * This function must be called from each ->fiemap instance to validate the
+ * fiemap request against the file system parameters.
*
- * Returns 0 on success, -EBADR on bad flags.
+ * Returns 0 on success, or a negative error on failure.
*/
-int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags)
+int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ u64 start, u64 *len, u32 supported_flags)
{
+ u64 maxbytes = inode->i_sb->s_maxbytes;
u32 incompat_flags;
+ int ret = 0;
- incompat_flags = fieinfo->fi_flags & ~(FIEMAP_FLAGS_COMPAT & fs_flags);
- if (incompat_flags) {
- fieinfo->fi_flags = incompat_flags;
- return -EBADR;
- }
- return 0;
-}
-EXPORT_SYMBOL(fiemap_check_flags);
-
-static int fiemap_check_ranges(struct super_block *sb,
- u64 start, u64 len, u64 *new_len)
-{
- u64 maxbytes = (u64) sb->s_maxbytes;
-
- *new_len = len;
-
- if (len == 0)
+ if (*len == 0)
return -EINVAL;
-
if (start > maxbytes)
return -EFBIG;
/*
* Shrink request scope to what the fs can actually handle.
*/
- if (len > maxbytes || (maxbytes - len) < start)
- *new_len = maxbytes - start;
+ if (*len > maxbytes || (maxbytes - *len) < start)
+ *len = maxbytes - start;
+
+ supported_flags |= FIEMAP_FLAG_SYNC;
+ supported_flags &= FIEMAP_FLAGS_COMPAT;
+ incompat_flags = fieinfo->fi_flags & ~supported_flags;
+ if (incompat_flags) {
+ fieinfo->fi_flags = incompat_flags;
+ return -EBADR;
+ }
- return 0;
+ if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
+ ret = filemap_write_and_wait(inode->i_mapping);
+ return ret;
}
+EXPORT_SYMBOL(fiemap_prep);
static int ioctl_fiemap(struct file *filp, struct fiemap __user *ufiemap)
{
struct fiemap fiemap;
struct fiemap_extent_info fieinfo = { 0, };
struct inode *inode = file_inode(filp);
- struct super_block *sb = inode->i_sb;
- u64 len;
int error;
if (!inode->i_op->fiemap)
@@ -214,24 +209,13 @@ static int ioctl_fiemap(struct file *filp, struct fiemap __user *ufiemap)
if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
return -EINVAL;
- error = fiemap_check_ranges(sb, fiemap.fm_start, fiemap.fm_length,
- &len);
- if (error)
- return error;
-
fieinfo.fi_flags = fiemap.fm_flags;
fieinfo.fi_extents_max = fiemap.fm_extent_count;
fieinfo.fi_extents_start = ufiemap->fm_extents;
- if (fiemap.fm_extent_count != 0 &&
- !access_ok(fieinfo.fi_extents_start,
- fieinfo.fi_extents_max * sizeof(struct fiemap_extent)))
- return -EFAULT;
-
- if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
- filemap_write_and_wait(inode->i_mapping);
+ error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start,
+ fiemap.fm_length);
- error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
fiemap.fm_flags = fieinfo.fi_flags;
fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
@@ -307,8 +291,7 @@ static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
* If you use this function directly, you need to do your own locking. Use
* generic_block_fiemap if you want the locking done for you.
*/
-
-int __generic_block_fiemap(struct inode *inode,
+static int __generic_block_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo, loff_t start,
loff_t len, get_block_t *get_block)
{
@@ -320,7 +303,7 @@ int __generic_block_fiemap(struct inode *inode,
bool past_eof = false, whole_file = false;
int ret = 0;
- ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
+ ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_SYNC);
if (ret)
return ret;
@@ -453,7 +436,6 @@ int __generic_block_fiemap(struct inode *inode,
return ret;
}
-EXPORT_SYMBOL(__generic_block_fiemap);
/**
* generic_block_fiemap - FIEMAP for block based inodes
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index fd3bd06fabb6..ec7b78e6feca 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -59,7 +59,7 @@ int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
- struct bio *bio)
+ struct bio *bio, loff_t pos)
{
atomic_inc(&dio->ref);
@@ -67,7 +67,12 @@ static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
bio_set_polled(bio, dio->iocb);
dio->submit.last_queue = bdev_get_queue(iomap->bdev);
- dio->submit.cookie = submit_bio(bio);
+ if (dio->dops && dio->dops->submit_io)
+ dio->submit.cookie = dio->dops->submit_io(
+ file_inode(dio->iocb->ki_filp),
+ iomap, bio, pos);
+ else
+ dio->submit.cookie = submit_bio(bio);
}
static ssize_t iomap_dio_complete(struct iomap_dio *dio)
@@ -191,7 +196,7 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
get_page(page);
__bio_add_page(bio, page, len, 0);
bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
- iomap_dio_submit_bio(dio, iomap, bio);
+ iomap_dio_submit_bio(dio, iomap, bio, pos);
}
static loff_t
@@ -299,11 +304,11 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
}
dio->size += n;
- pos += n;
copied += n;
nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
- iomap_dio_submit_bio(dio, iomap, bio);
+ iomap_dio_submit_bio(dio, iomap, bio, pos);
+ pos += n;
} while (nr_pages);
/*
@@ -411,8 +416,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
struct blk_plug plug;
struct iomap_dio *dio;
- lockdep_assert_held(&inode->i_rwsem);
-
if (!count)
return 0;
diff --git a/fs/iomap/fiemap.c b/fs/iomap/fiemap.c
index d55e8f491a5e..aab070df4a21 100644
--- a/fs/iomap/fiemap.c
+++ b/fs/iomap/fiemap.c
@@ -6,6 +6,7 @@
#include <linux/compiler.h>
#include <linux/fs.h>
#include <linux/iomap.h>
+#include <linux/fiemap.h>
struct fiemap_ctx {
struct fiemap_extent_info *fi;
@@ -65,7 +66,7 @@ iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
}
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
- loff_t start, loff_t len, const struct iomap_ops *ops)
+ u64 start, u64 len, const struct iomap_ops *ops)
{
struct fiemap_ctx ctx;
loff_t ret;
@@ -74,16 +75,10 @@ int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
ctx.fi = fi;
ctx.prev.type = IOMAP_HOLE;
- ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
+ ret = fiemap_prep(inode, fi, start, &len, 0);
if (ret)
return ret;
- if (fi->fi_flags & FIEMAP_FLAG_SYNC) {
- ret = filemap_write_and_wait(inode->i_mapping);
- if (ret)
- return ret;
- }
-
while (len > 0) {
ret = iomap_apply(inode, start, len, IOMAP_REPORT, ops, &ctx,
iomap_fiemap_actor);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 3dccc23cf010..e91aad3637a2 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -541,17 +541,24 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
}
EXPORT_SYMBOL(jbd2_journal_start);
-static void __jbd2_journal_unreserve_handle(handle_t *handle)
+static void __jbd2_journal_unreserve_handle(handle_t *handle, transaction_t *t)
{
journal_t *journal = handle->h_journal;
WARN_ON(!handle->h_reserved);
sub_reserved_credits(journal, handle->h_total_credits);
+ if (t)
+ atomic_sub(handle->h_total_credits, &t->t_outstanding_credits);
}
void jbd2_journal_free_reserved(handle_t *handle)
{
- __jbd2_journal_unreserve_handle(handle);
+ journal_t *journal = handle->h_journal;
+
+ /* Get j_state_lock to pin running transaction if it exists */
+ read_lock(&journal->j_state_lock);
+ __jbd2_journal_unreserve_handle(handle, journal->j_running_transaction);
+ read_unlock(&journal->j_state_lock);
jbd2_free_handle(handle);
}
EXPORT_SYMBOL(jbd2_journal_free_reserved);
@@ -722,7 +729,8 @@ static void stop_this_handle(handle_t *handle)
atomic_sub(handle->h_total_credits,
&transaction->t_outstanding_credits);
if (handle->h_rsv_handle)
- __jbd2_journal_unreserve_handle(handle->h_rsv_handle);
+ __jbd2_journal_unreserve_handle(handle->h_rsv_handle,
+ transaction);
if (atomic_dec_and_test(&transaction->t_updates))
wake_up(&journal->j_wait_updates);
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 3acc954f7c04..837d42f61464 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -2964,7 +2964,7 @@ struct jfs_dirent {
loff_t position;
int ino;
u16 name_len;
- char name[0];
+ char name[];
};
/*
diff --git a/fs/jfs/jfs_xattr.h b/fs/jfs/jfs_xattr.h
index f0558b3348da..c50167a7bc50 100644
--- a/fs/jfs/jfs_xattr.h
+++ b/fs/jfs/jfs_xattr.h
@@ -17,12 +17,12 @@ struct jfs_ea {
u8 flag; /* Unused? */
u8 namelen; /* Length of name */
__le16 valuelen; /* Length of value */
- char name[0]; /* Attribute name (includes null-terminator) */
+ char name[]; /* Attribute name (includes null-terminator) */
}; /* Value immediately follows name */
struct jfs_ea_list {
__le32 size; /* overall size */
- struct jfs_ea ea[0]; /* Variable length list */
+ struct jfs_ea ea[]; /* Variable length list */
};
/* Macros for defining maxiumum number of bytes supported for EAs */
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 34366db3620d..06b342d8462b 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -652,9 +652,9 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
* The following is done to give a different lockdep key to
* @of->mutex for files which implement mmap. This is a rather
* crude way to avoid false positive lockdep warning around
- * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
+ * mm->mmap_lock - mmap nests @of->mutex under mm->mmap_lock and
* reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
- * which mm->mmap_sem nests, while holding @of->mutex. As each
+ * which mm->mmap_lock nests, while holding @of->mutex. As each
* open file has a separate mutex, it's okay as long as those don't
* happen on the same file. At this point, we can't easily give
* each file a separate locking class. Let's differentiate on
@@ -1010,7 +1010,7 @@ struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (key) {
- lockdep_init_map(&kn->dep_map, "kn->count", key, 0);
+ lockdep_init_map(&kn->dep_map, "kn->active", key, 0);
kn->flags |= KERNFS_LOCKDEP;
}
#endif
diff --git a/fs/locks.c b/fs/locks.c
index 1d4f4d5da704..7df0f9fa66f4 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1557,6 +1557,9 @@ static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker)
{
bool rc;
+ if (lease->fl_lmops->lm_breaker_owns_lease
+ && lease->fl_lmops->lm_breaker_owns_lease(lease))
+ return false;
if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) {
rc = false;
goto trace;
@@ -2823,7 +2826,7 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
{
struct inode *inode = NULL;
unsigned int fl_pid;
- struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
+ struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
fl_pid = locks_translate_pid(fl, proc_pidns);
/*
@@ -2901,7 +2904,7 @@ static int locks_show(struct seq_file *f, void *v)
{
struct locks_iterator *iter = f->private;
struct file_lock *fl, *bfl;
- struct pid_namespace *proc_pidns = file_inode(f->file)->i_sb->s_fs_info;
+ struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb);
fl = hlist_entry(v, struct file_lock, fl_link);
diff --git a/fs/namei.c b/fs/namei.c
index d81f73ff1a8b..72d4219c93ac 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -271,7 +271,7 @@ static int check_acl(struct inode *inode, int mask)
/* no ->get_acl() calls in RCU mode... */
if (is_uncached_acl(acl))
return -ECHILD;
- return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK);
+ return posix_acl_permission(inode, acl, mask);
}
acl = get_acl(inode, ACL_TYPE_ACCESS);
@@ -288,37 +288,51 @@ static int check_acl(struct inode *inode, int mask)
}
/*
- * This does the basic permission checking
+ * This does the basic UNIX permission checking.
+ *
+ * Note that the POSIX ACL check cares about the MAY_NOT_BLOCK bit,
+ * for RCU walking.
*/
static int acl_permission_check(struct inode *inode, int mask)
{
unsigned int mode = inode->i_mode;
- if (likely(uid_eq(current_fsuid(), inode->i_uid)))
+ /* Are we the owner? If so, ACL's don't matter */
+ if (likely(uid_eq(current_fsuid(), inode->i_uid))) {
+ mask &= 7;
mode >>= 6;
- else {
- if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
- int error = check_acl(inode, mask);
- if (error != -EAGAIN)
- return error;
- }
+ return (mask & ~mode) ? -EACCES : 0;
+ }
- if (in_group_p(inode->i_gid))
- mode >>= 3;
+ /* Do we have ACL's? */
+ if (IS_POSIXACL(inode) && (mode & S_IRWXG)) {
+ int error = check_acl(inode, mask);
+ if (error != -EAGAIN)
+ return error;
}
+ /* Only RWX matters for group/other mode bits */
+ mask &= 7;
+
/*
- * If the DACs are ok we don't need any capability check.
+ * Are the group permissions different from
+ * the other permissions in the bits we care
+ * about? Need to check group ownership if so.
*/
- if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0)
- return 0;
- return -EACCES;
+ if (mask & (mode ^ (mode >> 3))) {
+ if (in_group_p(inode->i_gid))
+ mode >>= 3;
+ }
+
+ /* Bits in 'mode' clear that we require? */
+ return (mask & ~mode) ? -EACCES : 0;
}
/**
* generic_permission - check for access rights on a Posix-like filesystem
* @inode: inode to check access rights for
- * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
+ * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC,
+ * %MAY_NOT_BLOCK ...)
*
* Used to check for read/write/execute permissions on a file.
* We use "fsuid" for this, letting us set arbitrary permissions
diff --git a/fs/namespace.c b/fs/namespace.c
index a6baee3c7904..f30ed401cc6d 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -684,9 +684,6 @@ bool __is_local_mountpoint(struct dentry *dentry)
struct mount *mnt;
bool is_covered = false;
- if (!d_mountpoint(dentry))
- goto out;
-
down_read(&namespace_sem);
lock_ns_list(ns);
list_for_each_entry(mnt, &ns->list, mnt_list) {
@@ -698,7 +695,7 @@ bool __is_local_mountpoint(struct dentry *dentry)
}
unlock_ns_list(ns);
up_read(&namespace_sem);
-out:
+
return is_covered;
}
@@ -1786,6 +1783,11 @@ static struct mnt_namespace *to_mnt_ns(struct ns_common *ns)
return container_of(ns, struct mnt_namespace, ns);
}
+struct ns_common *from_mnt_ns(struct mnt_namespace *mnt)
+{
+ return &mnt->ns;
+}
+
static bool mnt_ns_loop(struct dentry *dentry)
{
/* Could bind mounting the mount namespace inode cause a
@@ -1932,6 +1934,9 @@ struct vfsmount *clone_private_mount(const struct path *path)
if (IS_ERR(new_mnt))
return ERR_CAST(new_mnt);
+ /* Longterm mount to be removed by kern_unmount*() */
+ new_mnt->mnt_ns = MNT_NS_INTERNAL;
+
return &new_mnt->mnt;
}
EXPORT_SYMBOL_GPL(clone_private_mount);
@@ -3858,6 +3863,19 @@ void kern_unmount(struct vfsmount *mnt)
}
EXPORT_SYMBOL(kern_unmount);
+void kern_unmount_array(struct vfsmount *mnt[], unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ if (mnt[i])
+ real_mount(mnt[i])->mnt_ns = NULL;
+ synchronize_rcu_expedited();
+ for (i = 0; i < num; i++)
+ mntput(mnt[i]);
+}
+EXPORT_SYMBOL(kern_unmount_array);
+
bool our_mnt(struct vfsmount *mnt)
{
return check_mnt(real_mount(mnt));
@@ -4013,16 +4031,18 @@ static void mntns_put(struct ns_common *ns)
put_mnt_ns(to_mnt_ns(ns));
}
-static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns)
+static int mntns_install(struct nsset *nsset, struct ns_common *ns)
{
- struct fs_struct *fs = current->fs;
+ struct nsproxy *nsproxy = nsset->nsproxy;
+ struct fs_struct *fs = nsset->fs;
struct mnt_namespace *mnt_ns = to_mnt_ns(ns), *old_mnt_ns;
+ struct user_namespace *user_ns = nsset->cred->user_ns;
struct path root;
int err;
if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) ||
- !ns_capable(current_user_ns(), CAP_SYS_CHROOT) ||
- !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
+ !ns_capable(user_ns, CAP_SYS_CHROOT) ||
+ !ns_capable(user_ns, CAP_SYS_ADMIN))
return -EPERM;
if (is_anon_ns(mnt_ns))
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index a57e7c72c7f4..1b79dd5cf661 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -446,7 +446,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
struct inode *inode = mapping->host;
struct nfs_direct_req *dreq;
struct nfs_lock_context *l_ctx;
- ssize_t result = -EINVAL, requested;
+ ssize_t result, requested;
size_t count = iov_iter_count(iter);
nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
@@ -731,6 +731,8 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
nfs_list_remove_request(req);
if (request_commit) {
kref_get(&req->wb_kref);
+ memcpy(&req->wb_verf, &hdr->verf.verifier,
+ sizeof(req->wb_verf));
nfs_mark_request_commit(req, hdr->lseg, &cinfo,
hdr->ds_commit_idx);
}
diff --git a/fs/nfs/dns_resolve.c b/fs/nfs/dns_resolve.c
index 963800037609..e87d500ad95a 100644
--- a/fs/nfs/dns_resolve.c
+++ b/fs/nfs/dns_resolve.c
@@ -39,7 +39,6 @@ ssize_t nfs_dns_resolve_name(struct net *net, char *name, size_t namelen,
#include <linux/string.h>
#include <linux/kmod.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/socket.h>
#include <linux/seq_file.h>
#include <linux/inet.h>
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b9d0921cb4fe..0bf1f835de01 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -833,6 +833,8 @@ int nfs_getattr(const struct path *path, struct kstat *stat,
do_update |= cache_validity & NFS_INO_INVALID_ATIME;
if (request_mask & (STATX_CTIME|STATX_MTIME))
do_update |= cache_validity & NFS_INO_REVAL_PAGECACHE;
+ if (request_mask & STATX_BLOCKS)
+ do_update |= cache_validity & NFS_INO_INVALID_BLOCKS;
if (do_update) {
/* Update the attribute cache */
if (!(server->flags & NFS_MOUNT_NOAC))
@@ -1764,7 +1766,8 @@ out_noforce:
status = nfs_post_op_update_inode_locked(inode, fattr,
NFS_INO_INVALID_CHANGE
| NFS_INO_INVALID_CTIME
- | NFS_INO_INVALID_MTIME);
+ | NFS_INO_INVALID_MTIME
+ | NFS_INO_INVALID_BLOCKS);
return status;
}
@@ -1871,7 +1874,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_ATIME
| NFS_INO_REVAL_FORCED
- | NFS_INO_REVAL_PAGECACHE);
+ | NFS_INO_REVAL_PAGECACHE
+ | NFS_INO_INVALID_BLOCKS);
/* Do atomic weak cache consistency updates */
nfs_wcc_update_inode(inode, fattr);
@@ -2033,8 +2037,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
} else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
inode->i_blocks = fattr->du.nfs2.blocks;
- else
+ else {
+ nfsi->cache_validity |= save_cache_validity &
+ (NFS_INO_INVALID_BLOCKS
+ | NFS_INO_REVAL_FORCED);
cache_revalidated = false;
+ }
/* Update attrtimeo value if we're out of the unstable period */
if (attr_changed) {
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index a46d1d5d16d8..2397ceedba8a 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -179,11 +179,11 @@ nfs3_proc_lookup(struct inode *dir, struct dentry *dentry,
if (nfs_lookup_is_soft_revalidate(dentry))
task_flags |= RPC_TASK_TIMEOUT;
- dprintk("NFS call lookup %pd2\n", dentry);
res.dir_attr = nfs_alloc_fattr();
if (res.dir_attr == NULL)
return -ENOMEM;
+ dprintk("NFS call lookup %pd2\n", dentry);
nfs_fattr_init(fattr);
status = rpc_call_sync(NFS_CLIENT(dir), &msg, task_flags);
nfs_refresh_inode(dir, res.dir_attr);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 9056f3dd380e..e32717fd1169 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -7909,7 +7909,7 @@ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
}
static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
- .rpc_call_done = &nfs4_bind_one_conn_to_session_done,
+ .rpc_call_done = nfs4_bind_one_conn_to_session_done,
};
/*
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 7e7a97ae21ed..547cec79899f 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -961,6 +961,97 @@ TRACE_EVENT(nfs_readpage_done,
)
);
+TRACE_EVENT(nfs_readpage_short,
+ TP_PROTO(
+ const struct rpc_task *task,
+ const struct nfs_pgio_header *hdr
+ ),
+
+ TP_ARGS(task, hdr),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __field(loff_t, offset)
+ __field(u32, arg_count)
+ __field(u32, res_count)
+ __field(bool, eof)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ const struct inode *inode = hdr->inode;
+ const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = hdr->args.fh ?
+ hdr->args.fh : &nfsi->fh;
+
+ __entry->status = task->tk_status;
+ __entry->offset = hdr->args.offset;
+ __entry->arg_count = hdr->args.count;
+ __entry->res_count = hdr->res.count;
+ __entry->eof = hdr->res.eof;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(fh);
+ ),
+
+ TP_printk(
+ "fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%u res=%u status=%d%s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid,
+ __entry->fhandle,
+ (long long)__entry->offset, __entry->arg_count,
+ __entry->res_count, __entry->status,
+ __entry->eof ? " eof" : ""
+ )
+);
+
+TRACE_EVENT(nfs_pgio_error,
+ TP_PROTO(
+ const struct nfs_pgio_header *hdr,
+ int error,
+ loff_t pos
+ ),
+
+ TP_ARGS(hdr, error, pos),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(u32, fhandle)
+ __field(u64, fileid)
+ __field(loff_t, offset)
+ __field(u32, arg_count)
+ __field(u32, res_count)
+ __field(loff_t, pos)
+ __field(int, status)
+ ),
+
+ TP_fast_assign(
+ const struct inode *inode = hdr->inode;
+ const struct nfs_inode *nfsi = NFS_I(inode);
+ const struct nfs_fh *fh = hdr->args.fh ?
+ hdr->args.fh : &nfsi->fh;
+
+ __entry->status = error;
+ __entry->offset = hdr->args.offset;
+ __entry->arg_count = hdr->args.count;
+ __entry->res_count = hdr->res.count;
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->fileid = nfsi->fileid;
+ __entry->fhandle = nfs_fhandle_hash(fh);
+ ),
+
+ TP_printk("fileid=%02x:%02x:%llu fhandle=0x%08x "
+ "offset=%lld count=%u res=%u pos=%llu status=%d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->fileid, __entry->fhandle,
+ (long long)__entry->offset, __entry->arg_count, __entry->res_count,
+ __entry->pos, __entry->status
+ )
+);
+
TRACE_DEFINE_ENUM(NFS_UNSTABLE);
TRACE_DEFINE_ENUM(NFS_DATA_SYNC);
TRACE_DEFINE_ENUM(NFS_FILE_SYNC);
@@ -1312,7 +1403,12 @@ TRACE_EVENT(nfs_xdr_status,
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
+ __field(int, version)
__field(unsigned long, error)
+ __string(program,
+ xdr->rqst->rq_task->tk_client->cl_program->name)
+ __string(procedure,
+ xdr->rqst->rq_task->tk_msg.rpc_proc->p_name)
),
TP_fast_assign(
@@ -1322,13 +1418,19 @@ TRACE_EVENT(nfs_xdr_status,
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->version = task->tk_client->cl_vers;
__entry->error = error;
+ __assign_str(program,
+ task->tk_client->cl_program->name)
+ __assign_str(procedure, task->tk_msg.rpc_proc->p_name)
),
TP_printk(
- "task:%u@%d xid=0x%08x error=%ld (%s)",
+ "task:%u@%d xid=0x%08x %sv%d %s error=%ld (%s)",
__entry->task_id, __entry->client_id, __entry->xid,
- -__entry->error, nfs_show_status(__entry->error)
+ __get_str(program), __entry->version,
+ __get_str(procedure), -__entry->error,
+ nfs_show_status(__entry->error)
)
);
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 6ca421cbe19c..6ea4cac41e46 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -24,6 +24,7 @@
#include "internal.h"
#include "pnfs.h"
+#include "nfstrace.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
@@ -64,6 +65,7 @@ void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
{
unsigned int new = pos - hdr->io_start;
+ trace_nfs_pgio_error(hdr, error, pos);
if (hdr->good_bytes > new) {
hdr->good_bytes = new;
clear_bit(NFS_IOHDR_EOF, &hdr->flags);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 13b22e898116..eb854f1f86e2 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -264,6 +264,8 @@ static void nfs_readpage_retry(struct rpc_task *task,
/* This is a short read! */
nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
+ trace_nfs_readpage_short(task, hdr);
+
/* Has the server at least made some progress? */
if (resp->count == 0) {
nfs_set_pgio_error(hdr, -EIO, argp->offset);
diff --git a/fs/nfs/sysfs.h b/fs/nfs/sysfs.h
index f1b27411dcc0..ebcbdc40483b 100644
--- a/fs/nfs/sysfs.h
+++ b/fs/nfs/sysfs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2019 Hammerspace Inc
*/
diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h
index 10ec5ecdf117..65c331f75e9c 100644
--- a/fs/nfsd/cache.h
+++ b/fs/nfsd/cache.h
@@ -78,6 +78,8 @@ enum {
/* Checksum this amount of the request */
#define RC_CSUMLEN (256U)
+int nfsd_drc_slab_create(void);
+void nfsd_drc_slab_free(void);
int nfsd_reply_cache_init(struct nfsd_net *);
void nfsd_reply_cache_shutdown(struct nfsd_net *);
int nfsd_cache_lookup(struct svc_rqst *);
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 09aa545825bd..9217cb64bf0e 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -139,7 +139,6 @@ struct nfsd_net {
* Duplicate reply cache
*/
struct nfsd_drc_bucket *drc_hashtbl;
- struct kmem_cache *drc_slab;
/* max number of entries allowed in the cache */
unsigned int max_drc_entries;
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 5cf91322de0f..7fbe9840a03e 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -38,6 +38,7 @@
#include "nfsd.h"
#include "state.h"
#include "netns.h"
+#include "trace.h"
#include "xdr4cb.h"
#include "xdr4.h"
@@ -904,16 +905,20 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
if (clp->cl_minorversion == 0) {
if (!clp->cl_cred.cr_principal &&
- (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
+ (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) {
+ trace_nfsd_cb_setup_err(clp, -EINVAL);
return -EINVAL;
+ }
args.client_name = clp->cl_cred.cr_principal;
args.prognumber = conn->cb_prog;
args.protocol = XPRT_TRANSPORT_TCP;
args.authflavor = clp->cl_cred.cr_flavor;
clp->cl_cb_ident = conn->cb_ident;
} else {
- if (!conn->cb_xprt)
+ if (!conn->cb_xprt) {
+ trace_nfsd_cb_setup_err(clp, -EINVAL);
return -EINVAL;
+ }
clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
clp->cl_cb_session = ses;
args.bc_xprt = conn->cb_xprt;
@@ -925,32 +930,27 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
/* Create RPC client */
client = rpc_create(&args);
if (IS_ERR(client)) {
- dprintk("NFSD: couldn't create callback client: %ld\n",
- PTR_ERR(client));
+ trace_nfsd_cb_setup_err(clp, PTR_ERR(client));
return PTR_ERR(client);
}
cred = get_backchannel_cred(clp, client, ses);
if (!cred) {
+ trace_nfsd_cb_setup_err(clp, -ENOMEM);
rpc_shutdown_client(client);
return -ENOMEM;
}
clp->cl_cb_client = client;
clp->cl_cb_cred = cred;
+ trace_nfsd_cb_setup(clp);
return 0;
}
-static void warn_no_callback_path(struct nfs4_client *clp, int reason)
-{
- dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
- (int)clp->cl_name.len, clp->cl_name.data, reason);
-}
-
static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
{
if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
return;
clp->cl_cb_state = NFSD4_CB_DOWN;
- warn_no_callback_path(clp, reason);
+ trace_nfsd_cb_state(clp);
}
static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
@@ -958,17 +958,20 @@ static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
return;
clp->cl_cb_state = NFSD4_CB_FAULT;
- warn_no_callback_path(clp, reason);
+ trace_nfsd_cb_state(clp);
}
static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
{
struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
+ trace_nfsd_cb_done(clp, task->tk_status);
if (task->tk_status)
nfsd4_mark_cb_down(clp, task->tk_status);
- else
+ else {
clp->cl_cb_state = NFSD4_CB_UP;
+ trace_nfsd_cb_state(clp);
+ }
}
static void nfsd4_cb_probe_release(void *calldata)
@@ -993,6 +996,7 @@ static const struct rpc_call_ops nfsd4_cb_probe_ops = {
void nfsd4_probe_callback(struct nfs4_client *clp)
{
clp->cl_cb_state = NFSD4_CB_UNKNOWN;
+ trace_nfsd_cb_state(clp);
set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
nfsd4_run_cb(&clp->cl_cb_null);
}
@@ -1009,6 +1013,7 @@ void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *conn)
spin_lock(&clp->cl_lock);
memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
spin_unlock(&clp->cl_lock);
+ trace_nfsd_cb_state(clp);
}
/*
@@ -1165,8 +1170,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
struct nfsd4_callback *cb = calldata;
struct nfs4_client *clp = cb->cb_clp;
- dprintk("%s: minorversion=%d\n", __func__,
- clp->cl_minorversion);
+ trace_nfsd_cb_done(clp, task->tk_status);
if (!nfsd4_cb_sequence_done(task, cb))
return;
@@ -1271,6 +1275,7 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
* kill the old client:
*/
if (clp->cl_cb_client) {
+ trace_nfsd_cb_shutdown(clp);
rpc_shutdown_client(clp->cl_cb_client);
clp->cl_cb_client = NULL;
put_cred(clp->cl_cb_cred);
@@ -1301,6 +1306,8 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
err = setup_callback_client(clp, &conn, ses);
if (err) {
nfsd4_mark_cb_down(clp, err);
+ if (c)
+ svc_xprt_put(c->cn_xprt);
return;
}
}
@@ -1314,6 +1321,8 @@ nfsd4_run_cb_work(struct work_struct *work)
struct rpc_clnt *clnt;
int flags;
+ trace_nfsd_cb_work(clp, cb->cb_msg.rpc_proc->p_name);
+
if (cb->cb_need_restart) {
cb->cb_need_restart = false;
} else {
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 0e75f7fb5fec..a09c35f0f6f0 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1155,7 +1155,7 @@ extern void nfs_sb_deactive(struct super_block *sb);
#define NFSD42_INTERSSC_MOUNTOPS "vers=4.2,addr=%s,sec=sys"
-/**
+/*
* Support one copy source server for now.
*/
static __be32
@@ -1245,10 +1245,9 @@ nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
mntput(ss_mnt);
}
-/**
- * nfsd4_setup_inter_ssc
- *
+/*
* Verify COPY destination stateid.
+ *
* Connect to the source server with NFSv4.1.
* Create the source struct file for nfsd_copy_range.
* Called with COPY cstate:
@@ -2302,6 +2301,8 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
}
check_if_stalefh_allowed(args);
+ rqstp->rq_lease_breaker = (void **)&cstate->clp;
+
trace_nfsd_compound(rqstp, args->opcnt);
while (!status && resp->opcnt < args->opcnt) {
op = &args->ops[resp->opcnt++];
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index c107caa56525..bb3d2c32664a 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -51,6 +51,7 @@
#include "netns.h"
#include "pnfs.h"
#include "filecache.h"
+#include "trace.h"
#define NFSDDBG_FACILITY NFSDDBG_PROC
@@ -167,9 +168,6 @@ renew_client_locked(struct nfs4_client *clp)
return;
}
- dprintk("renewing client (clientid %08x/%08x)\n",
- clp->cl_clientid.cl_boot,
- clp->cl_clientid.cl_id);
list_move_tail(&clp->cl_lru, &nn->client_lru);
clp->cl_time = ktime_get_boottime_seconds();
}
@@ -1922,8 +1920,7 @@ STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
*/
if (clid->cl_boot == (u32)nn->boot_time)
return 0;
- dprintk("NFSD stale clientid (%08x/%08x) boot_time %08llx\n",
- clid->cl_boot, clid->cl_id, nn->boot_time);
+ trace_nfsd_clid_stale(clid);
return 1;
}
@@ -2406,6 +2403,11 @@ static void states_stop(struct seq_file *s, void *v)
spin_unlock(&clp->cl_lock);
}
+static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
+{
+ seq_printf(s, "filename: \"%pD2\"", f->nf_file);
+}
+
static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
{
struct inode *inode = f->nf_inode;
@@ -2422,6 +2424,12 @@ static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
}
+static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
+{
+ seq_printf(s, "0x%.8x", stid->si_generation);
+ seq_printf(s, "%12phN", &stid->si_opaque);
+}
+
static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
{
struct nfs4_ol_stateid *ols;
@@ -2437,7 +2445,9 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
nf = st->sc_file;
file = find_any_file(nf);
- seq_printf(s, "- 0x%16phN: { type: open, ", &st->sc_stateid);
+ seq_printf(s, "- ");
+ nfs4_show_stateid(s, &st->sc_stateid);
+ seq_printf(s, ": { type: open, ");
access = bmap_to_share_mode(ols->st_access_bmap);
deny = bmap_to_share_mode(ols->st_deny_bmap);
@@ -2451,6 +2461,8 @@ static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
nfs4_show_superblock(s, file);
seq_printf(s, ", ");
+ nfs4_show_fname(s, file);
+ seq_printf(s, ", ");
nfs4_show_owner(s, oo);
seq_printf(s, " }\n");
nfsd_file_put(file);
@@ -2470,7 +2482,9 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
nf = st->sc_file;
file = find_any_file(nf);
- seq_printf(s, "- 0x%16phN: { type: lock, ", &st->sc_stateid);
+ seq_printf(s, "- ");
+ nfs4_show_stateid(s, &st->sc_stateid);
+ seq_printf(s, ": { type: lock, ");
/*
* Note: a lock stateid isn't really the same thing as a lock,
@@ -2482,6 +2496,8 @@ static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
nfs4_show_superblock(s, file);
/* XXX: open stateid? */
seq_printf(s, ", ");
+ nfs4_show_fname(s, file);
+ seq_printf(s, ", ");
nfs4_show_owner(s, oo);
seq_printf(s, " }\n");
nfsd_file_put(file);
@@ -2499,7 +2515,9 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
nf = st->sc_file;
file = nf->fi_deleg_file;
- seq_printf(s, "- 0x%16phN: { type: deleg, ", &st->sc_stateid);
+ seq_printf(s, "- ");
+ nfs4_show_stateid(s, &st->sc_stateid);
+ seq_printf(s, ": { type: deleg, ");
/* Kinda dead code as long as we only support read delegs: */
seq_printf(s, "access: %s, ",
@@ -2508,6 +2526,8 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
/* XXX: lease time, whether it's being recalled. */
nfs4_show_superblock(s, file);
+ seq_printf(s, ", ");
+ nfs4_show_fname(s, file);
seq_printf(s, " }\n");
return 0;
@@ -2521,11 +2541,15 @@ static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
file = ls->ls_file;
- seq_printf(s, "- 0x%16phN: { type: layout, ", &st->sc_stateid);
+ seq_printf(s, "- ");
+ nfs4_show_stateid(s, &st->sc_stateid);
+ seq_printf(s, ": { type: layout, ");
/* XXX: What else would be useful? */
nfs4_show_superblock(s, file);
+ seq_printf(s, ", ");
+ nfs4_show_fname(s, file);
seq_printf(s, " }\n");
return 0;
@@ -2845,14 +2869,12 @@ gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_r
conn->cb_prog = se->se_callback_prog;
conn->cb_ident = se->se_callback_ident;
memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
+ trace_nfsd_cb_args(clp, conn);
return;
out_err:
conn->cb_addr.ss_family = AF_UNSPEC;
conn->cb_addrlen = 0;
- dprintk("NFSD: this client (clientid %08x/%08x) "
- "will not receive delegations\n",
- clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
-
+ trace_nfsd_cb_nodelegs(clp);
return;
}
@@ -3458,6 +3480,45 @@ __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
return nfs_ok;
}
+static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
+{
+ struct nfsd4_conn *c;
+
+ list_for_each_entry(c, &s->se_conns, cn_persession) {
+ if (c->cn_xprt == xpt) {
+ return c;
+ }
+ }
+ return NULL;
+}
+
+static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
+ struct nfsd4_session *session, u32 req)
+{
+ struct nfs4_client *clp = session->se_client;
+ struct svc_xprt *xpt = rqst->rq_xprt;
+ struct nfsd4_conn *c;
+ __be32 status;
+
+ /* Following the last paragraph of RFC 5661 Section 18.34.3: */
+ spin_lock(&clp->cl_lock);
+ c = __nfsd4_find_conn(xpt, session);
+ if (!c)
+ status = nfserr_noent;
+ else if (req == c->cn_flags)
+ status = nfs_ok;
+ else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
+ c->cn_flags != NFS4_CDFC4_BACK)
+ status = nfs_ok;
+ else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
+ c->cn_flags != NFS4_CDFC4_FORE)
+ status = nfs_ok;
+ else
+ status = nfserr_inval;
+ spin_unlock(&clp->cl_lock);
+ return status;
+}
+
__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
@@ -3479,6 +3540,9 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(session->se_client, rqstp))
goto out;
+ status = nfsd4_match_existing_connection(rqstp, session, bcts->dir);
+ if (status == nfs_ok || status == nfserr_inval)
+ goto out;
status = nfsd4_map_bcts_dir(&bcts->dir);
if (status)
goto out;
@@ -3544,18 +3608,6 @@ out:
return status;
}
-static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
-{
- struct nfsd4_conn *c;
-
- list_for_each_entry(c, &s->se_conns, cn_persession) {
- if (c->cn_xprt == xpt) {
- return c;
- }
- }
- return NULL;
-}
-
static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
{
struct nfs4_client *clp = ses->se_client;
@@ -3879,23 +3931,18 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (clp_used_exchangeid(conf))
goto out;
if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
- char addr_str[INET6_ADDRSTRLEN];
- rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
- sizeof(addr_str));
- dprintk("NFSD: setclientid: string in use by client "
- "at %s\n", addr_str);
+ trace_nfsd_clid_inuse_err(conf);
goto out;
}
}
unconf = find_unconfirmed_client_by_name(&clname, nn);
if (unconf)
unhash_client_locked(unconf);
+ /* We need to handle only case 1: probable callback update */
if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
- /* case 1: probable callback update */
copy_clid(new, conf);
gen_confirm(new, nn);
- } else /* case 4 (new client) or cases 2, 3 (client reboot): */
- ;
+ }
new->cl_minorversion = 0;
gen_callback(new, setclid, rqstp);
add_to_unconfirmed(new);
@@ -4076,7 +4123,6 @@ out_free_openowner_slab:
out_free_client_slab:
kmem_cache_destroy(client_slab);
out:
- dprintk("nfsd4: out of memory while initializing nfsv4\n");
return -ENOMEM;
}
@@ -4508,6 +4554,8 @@ nfsd_break_deleg_cb(struct file_lock *fl)
struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
struct nfs4_file *fp = dp->dl_stid.sc_file;
+ trace_nfsd_deleg_break(&dp->dl_stid.sc_stateid);
+
/*
* We don't want the locks code to timeout the lease for us;
* we'll remove it ourself if a delegation isn't returned
@@ -4522,6 +4570,19 @@ nfsd_break_deleg_cb(struct file_lock *fl)
return ret;
}
+static bool nfsd_breaker_owns_lease(struct file_lock *fl)
+{
+ struct nfs4_delegation *dl = fl->fl_owner;
+ struct svc_rqst *rqst;
+ struct nfs4_client *clp;
+
+ if (!i_am_nfsd())
+ return NULL;
+ rqst = kthread_data(current);
+ clp = *(rqst->rq_lease_breaker);
+ return dl->dl_stid.sc_client == clp;
+}
+
static int
nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
struct list_head *dispose)
@@ -4533,6 +4594,7 @@ nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
}
static const struct lock_manager_operations nfsd_lease_mng_ops = {
+ .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
.lm_break = nfsd_break_deleg_cb,
.lm_change = nfsd_change_deleg_cb,
};
@@ -5018,8 +5080,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
- dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
- STATEID_VAL(&dp->dl_stid.sc_stateid));
+ trace_nfsd_deleg_open(&dp->dl_stid.sc_stateid);
open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
nfs4_put_stid(&dp->dl_stid);
return;
@@ -5136,9 +5197,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
nfs4_open_delegation(current_fh, open, stp);
nodeleg:
status = nfs_ok;
-
- dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
- STATEID_VAL(&stp->st_stid.sc_stateid));
+ trace_nfsd_deleg_none(&stp->st_stid.sc_stateid);
out:
/* 4.1 client trying to upgrade/downgrade delegation? */
if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
@@ -5192,8 +5251,7 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
- dprintk("process_renew(%08x/%08x): starting\n",
- clid->cl_boot, clid->cl_id);
+ trace_nfsd_clid_renew(clid);
status = lookup_clientid(clid, cstate, nn, false);
if (status)
goto out;
@@ -5214,6 +5272,7 @@ nfsd4_end_grace(struct nfsd_net *nn)
if (nn->grace_ended)
return;
+ trace_nfsd_grace_complete(nn);
nn->grace_ended = true;
/*
* If the server goes down again right now, an NFSv4
@@ -5279,13 +5338,10 @@ nfs4_laundromat(struct nfsd_net *nn)
copy_stateid_t *cps_t;
int i;
- dprintk("NFSD: laundromat service - starting\n");
-
if (clients_still_reclaiming(nn)) {
new_timeo = 0;
goto out;
}
- dprintk("NFSD: end of grace period\n");
nfsd4_end_grace(nn);
INIT_LIST_HEAD(&reaplist);
@@ -5307,8 +5363,7 @@ nfs4_laundromat(struct nfsd_net *nn)
break;
}
if (mark_client_expired_locked(clp)) {
- dprintk("NFSD: client in use (clientid %08x)\n",
- clp->cl_clientid.cl_id);
+ trace_nfsd_clid_expired(&clp->cl_clientid);
continue;
}
list_add(&clp->cl_lru, &reaplist);
@@ -5316,8 +5371,7 @@ nfs4_laundromat(struct nfsd_net *nn)
spin_unlock(&nn->client_lock);
list_for_each_safe(pos, next, &reaplist) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
- dprintk("NFSD: purging unused client (clientid %08x)\n",
- clp->cl_clientid.cl_id);
+ trace_nfsd_clid_purged(&clp->cl_clientid);
list_del_init(&clp->cl_lru);
expire_client(clp);
}
@@ -5407,7 +5461,6 @@ laundromat_main(struct work_struct *laundry)
laundromat_work);
t = nfs4_laundromat(nn);
- dprintk("NFSD: laundromat_main - sleeping for %lld seconds\n", t);
queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
}
@@ -5948,8 +6001,7 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
struct nfs4_stid *s;
struct nfs4_ol_stateid *stp = NULL;
- dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
- seqid, STATEID_VAL(stateid));
+ trace_nfsd_preprocess(seqid, stateid);
*stpp = NULL;
status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
@@ -6018,9 +6070,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
oo->oo_flags |= NFS4_OO_CONFIRMED;
nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
- dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
- __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
-
+ trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
nfsd4_client_record_create(oo->oo_owner.so_client);
status = nfs_ok;
put_stateid:
@@ -7072,7 +7122,7 @@ nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
unsigned int strhashval;
struct nfs4_client_reclaim *crp;
- dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", name.len, name.data);
+ trace_nfsd_clid_reclaim(nn, name.len, name.data);
crp = alloc_reclaim();
if (crp) {
strhashval = clientstr_hashval(name);
@@ -7122,7 +7172,7 @@ nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
unsigned int strhashval;
struct nfs4_client_reclaim *crp = NULL;
- dprintk("NFSD: nfs4_find_reclaim_client for name %.*s\n", name.len, name.data);
+ trace_nfsd_clid_find(nn, name.len, name.data);
strhashval = clientstr_hashval(name);
list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
@@ -7686,6 +7736,9 @@ nfsd_recall_delegations(struct list_head *reaplist)
list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
list_del_init(&dp->dl_recall_lru);
clp = dp->dl_stid.sc_client;
+
+ trace_nfsd_deleg_recall(&dp->dl_stid.sc_stateid);
+
/*
* We skipped all entries that had a zero dl_time before,
* so we can now reset the dl_time back to 0. If a delegation
@@ -7868,6 +7921,7 @@ nfs4_state_start_net(struct net *net)
goto skip_grace;
printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
nn->nfsd4_grace, net->ns.inum);
+ trace_nfsd_grace_start(nn);
queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
return 0;
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 96352ab7bd81..0a0cf1fd77d3 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -20,8 +20,7 @@
#include "nfsd.h"
#include "cache.h"
-
-#define NFSDDBG_FACILITY NFSDDBG_REPCACHE
+#include "trace.h"
/*
* We use this value to determine the number of hash buckets from the max
@@ -36,6 +35,8 @@ struct nfsd_drc_bucket {
spinlock_t cache_lock;
};
+static struct kmem_cache *drc_slab;
+
static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
struct shrink_control *sc);
@@ -95,7 +96,7 @@ nfsd_reply_cache_alloc(struct svc_rqst *rqstp, __wsum csum,
{
struct svc_cacherep *rp;
- rp = kmem_cache_alloc(nn->drc_slab, GFP_KERNEL);
+ rp = kmem_cache_alloc(drc_slab, GFP_KERNEL);
if (rp) {
rp->c_state = RC_UNUSED;
rp->c_type = RC_NOCACHE;
@@ -129,7 +130,7 @@ nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
atomic_dec(&nn->num_drc_entries);
nn->drc_mem_usage -= sizeof(*rp);
}
- kmem_cache_free(nn->drc_slab, rp);
+ kmem_cache_free(drc_slab, rp);
}
static void
@@ -141,6 +142,18 @@ nfsd_reply_cache_free(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
spin_unlock(&b->cache_lock);
}
+int nfsd_drc_slab_create(void)
+{
+ drc_slab = kmem_cache_create("nfsd_drc",
+ sizeof(struct svc_cacherep), 0, 0, NULL);
+ return drc_slab ? 0: -ENOMEM;
+}
+
+void nfsd_drc_slab_free(void)
+{
+ kmem_cache_destroy(drc_slab);
+}
+
int nfsd_reply_cache_init(struct nfsd_net *nn)
{
unsigned int hashsize;
@@ -159,18 +172,13 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
if (status)
goto out_nomem;
- nn->drc_slab = kmem_cache_create("nfsd_drc",
- sizeof(struct svc_cacherep), 0, 0, NULL);
- if (!nn->drc_slab)
- goto out_shrinker;
-
nn->drc_hashtbl = kcalloc(hashsize,
sizeof(*nn->drc_hashtbl), GFP_KERNEL);
if (!nn->drc_hashtbl) {
nn->drc_hashtbl = vzalloc(array_size(hashsize,
sizeof(*nn->drc_hashtbl)));
if (!nn->drc_hashtbl)
- goto out_slab;
+ goto out_shrinker;
}
for (i = 0; i < hashsize; i++) {
@@ -180,8 +188,6 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
nn->drc_hashsize = hashsize;
return 0;
-out_slab:
- kmem_cache_destroy(nn->drc_slab);
out_shrinker:
unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
out_nomem:
@@ -209,8 +215,6 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
nn->drc_hashtbl = NULL;
nn->drc_hashsize = 0;
- kmem_cache_destroy(nn->drc_slab);
- nn->drc_slab = NULL;
}
/*
@@ -323,8 +327,10 @@ nfsd_cache_key_cmp(const struct svc_cacherep *key,
const struct svc_cacherep *rp, struct nfsd_net *nn)
{
if (key->c_key.k_xid == rp->c_key.k_xid &&
- key->c_key.k_csum != rp->c_key.k_csum)
+ key->c_key.k_csum != rp->c_key.k_csum) {
++nn->payload_misses;
+ trace_nfsd_drc_mismatch(nn, key, rp);
+ }
return memcmp(&key->c_key, &rp->c_key, sizeof(key->c_key));
}
@@ -377,15 +383,22 @@ out:
return ret;
}
-/*
+/**
+ * nfsd_cache_lookup - Find an entry in the duplicate reply cache
+ * @rqstp: Incoming Call to find
+ *
* Try to find an entry matching the current call in the cache. When none
* is found, we try to grab the oldest expired entry off the LRU list. If
* a suitable one isn't there, then drop the cache_lock and allocate a
* new one, then search again in case one got inserted while this thread
* didn't hold the lock.
+ *
+ * Return values:
+ * %RC_DOIT: Process the request normally
+ * %RC_REPLY: Reply from cache
+ * %RC_DROPIT: Do not process the request further
*/
-int
-nfsd_cache_lookup(struct svc_rqst *rqstp)
+int nfsd_cache_lookup(struct svc_rqst *rqstp)
{
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct svc_cacherep *rp, *found;
@@ -399,7 +412,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
rqstp->rq_cacherep = NULL;
if (type == RC_NOCACHE) {
nfsdstats.rcnocache++;
- return rtn;
+ goto out;
}
csum = nfsd_cache_csum(rqstp);
@@ -409,10 +422,8 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
* preallocate an entry.
*/
rp = nfsd_reply_cache_alloc(rqstp, csum, nn);
- if (!rp) {
- dprintk("nfsd: unable to allocate DRC entry!\n");
- return rtn;
- }
+ if (!rp)
+ goto out;
spin_lock(&b->cache_lock);
found = nfsd_cache_insert(b, rp, nn);
@@ -431,8 +442,10 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
/* go ahead and prune the cache */
prune_bucket(b, nn);
- out:
+
+out_unlock:
spin_unlock(&b->cache_lock);
+out:
return rtn;
found_entry:
@@ -442,13 +455,13 @@ found_entry:
/* Request being processed */
if (rp->c_state == RC_INPROG)
- goto out;
+ goto out_trace;
/* From the hall of fame of impractical attacks:
* Is this a user who tries to snoop on the cache? */
rtn = RC_DOIT;
if (!test_bit(RQ_SECURE, &rqstp->rq_flags) && rp->c_secure)
- goto out;
+ goto out_trace;
/* Compose RPC reply header */
switch (rp->c_type) {
@@ -460,21 +473,26 @@ found_entry:
break;
case RC_REPLBUFF:
if (!nfsd_cache_append(rqstp, &rp->c_replvec))
- goto out; /* should not happen */
+ goto out_unlock; /* should not happen */
rtn = RC_REPLY;
break;
default:
- printk(KERN_WARNING "nfsd: bad repcache type %d\n", rp->c_type);
- nfsd_reply_cache_free_locked(b, rp, nn);
+ WARN_ONCE(1, "nfsd: bad repcache type %d\n", rp->c_type);
}
- goto out;
+out_trace:
+ trace_nfsd_drc_found(nn, rqstp, rtn);
+ goto out_unlock;
}
-/*
- * Update a cache entry. This is called from nfsd_dispatch when
- * the procedure has been executed and the complete reply is in
- * rqstp->rq_res.
+/**
+ * nfsd_cache_update - Update an entry in the duplicate reply cache.
+ * @rqstp: svc_rqst with a finished Reply
+ * @cachetype: which cache to update
+ * @statp: Reply's status code
+ *
+ * This is called from nfsd_dispatch when the procedure has been
+ * executed and the complete reply is in rqstp->rq_res.
*
* We're copying around data here rather than swapping buffers because
* the toplevel loop requires max-sized buffers, which would be a waste
@@ -487,8 +505,7 @@ found_entry:
* nfsd failed to encode a reply that otherwise would have been cached.
* In this case, nfsd_cache_update is called with statp == NULL.
*/
-void
-nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
+void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
{
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct svc_cacherep *rp = rqstp->rq_cacherep;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 3bb2db947d29..b68e96681522 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -238,7 +238,7 @@ static inline struct net *netns(struct file *file)
return file_inode(file)->i_sb->s_fs_info;
}
-/**
+/*
* write_unlock_ip - Release all locks used by a client
*
* Experimental.
@@ -277,7 +277,7 @@ static ssize_t write_unlock_ip(struct file *file, char *buf, size_t size)
return nlmsvc_unlock_all_by_ip(sap);
}
-/**
+/*
* write_unlock_fs - Release all locks on a local file system
*
* Experimental.
@@ -327,7 +327,7 @@ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size)
return error;
}
-/**
+/*
* write_filehandle - Get a variable-length NFS file handle by path
*
* On input, the buffer contains a '\n'-terminated C string comprised of
@@ -402,7 +402,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
return mesg - buf;
}
-/**
+/*
* write_threads - Start NFSD, or report the current number of running threads
*
* Input:
@@ -452,7 +452,7 @@ static ssize_t write_threads(struct file *file, char *buf, size_t size)
return scnprintf(buf, SIMPLE_TRANSACTION_LIMIT, "%d\n", rv);
}
-/**
+/*
* write_pool_threads - Set or report the current number of threads per pool
*
* Input:
@@ -661,7 +661,7 @@ out:
return tlen + len;
}
-/**
+/*
* write_versions - Set or report the available NFS protocol versions
*
* Input:
@@ -811,7 +811,7 @@ static ssize_t __write_ports(struct file *file, char *buf, size_t size,
return -EINVAL;
}
-/**
+/*
* write_ports - Pass a socket file descriptor or transport name to listen on
*
* Input:
@@ -867,7 +867,7 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size)
int nfsd_max_blksize;
-/**
+/*
* write_maxblksize - Set or report the current NFS blksize
*
* Input:
@@ -917,7 +917,7 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
nfsd_max_blksize);
}
-/**
+/*
* write_maxconn - Set or report the current max number of connections
*
* Input:
@@ -998,7 +998,7 @@ static ssize_t nfsd4_write_time(struct file *file, char *buf, size_t size,
return rv;
}
-/**
+/*
* write_leasetime - Set or report the current NFSv4 lease time
*
* Input:
@@ -1025,7 +1025,7 @@ static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
return nfsd4_write_time(file, buf, size, &nn->nfsd4_lease, nn);
}
-/**
+/*
* write_gracetime - Set or report current NFSv4 grace period time
*
* As above, but sets the time of the NFSv4 grace period.
@@ -1069,7 +1069,7 @@ static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size,
nfs4_recoverydir());
}
-/**
+/*
* write_recoverydir - Set or report the pathname of the recovery directory
*
* Input:
@@ -1101,7 +1101,7 @@ static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
return rv;
}
-/**
+/*
* write_v4_end_grace - release grace period for nfsd's v4.x lock manager
*
* Input:
@@ -1533,6 +1533,9 @@ static int __init init_nfsd(void)
goto out_free_slabs;
nfsd_fault_inject_init(); /* nfsd fault injection controls */
nfsd_stat_init(); /* Statistics */
+ retval = nfsd_drc_slab_create();
+ if (retval)
+ goto out_free_stat;
nfsd_lockd_init(); /* lockd->nfsd callbacks */
retval = create_proc_exports_entry();
if (retval)
@@ -1546,6 +1549,8 @@ out_free_all:
remove_proc_entry("fs/nfs", NULL);
out_free_lockd:
nfsd_lockd_shutdown();
+ nfsd_drc_slab_free();
+out_free_stat:
nfsd_stat_shutdown();
nfsd_fault_inject_cleanup();
nfsd4_exit_pnfs();
@@ -1560,6 +1565,7 @@ out_unregister_pernet:
static void __exit exit_nfsd(void)
{
+ nfsd_drc_slab_free();
remove_proc_entry("fs/nfs/exports", NULL);
remove_proc_entry("fs/nfs", NULL);
nfsd_stat_shutdown();
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 2ab5569126b8..36cdd81b6688 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -88,6 +88,8 @@ int nfsd_pool_stats_release(struct inode *, struct file *);
void nfsd_destroy(struct net *net);
+bool i_am_nfsd(void);
+
struct nfsdfs_client {
struct kref cl_ref;
void (*cl_release)(struct kref *kref);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index ca9fd348548b..b603dfcdd361 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -601,6 +601,11 @@ static const struct svc_serv_ops nfsd_thread_sv_ops = {
.svo_module = THIS_MODULE,
};
+bool i_am_nfsd(void)
+{
+ return kthread_func(current) == nfsd;
+}
+
int nfsd_create_serv(struct net *net)
{
int error;
@@ -1011,6 +1016,7 @@ nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
*statp = rpc_garbage_args;
return 1;
}
+ rqstp->rq_lease_breaker = NULL;
/*
* Give the xdr decoder a chance to change this if it wants
* (necessary in the NFSv4.0 compound case)
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 68d3f30ee760..3b408532a5dc 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -64,13 +64,6 @@ typedef struct {
refcount_t sc_count;
} copy_stateid_t;
-#define STATEID_FMT "(%08x/%08x/%08x/%08x)"
-#define STATEID_VAL(s) \
- (s)->si_opaque.so_clid.cl_boot, \
- (s)->si_opaque.so_clid.cl_id, \
- (s)->si_opaque.so_id, \
- (s)->si_generation
-
struct nfsd4_callback {
struct nfs4_client *cb_clp;
struct rpc_message cb_msg;
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index 78c574251c60..1861db1bdc67 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -277,6 +277,7 @@ DECLARE_EVENT_CLASS(nfsd_stateid_class,
DEFINE_EVENT(nfsd_stateid_class, nfsd_##name, \
TP_PROTO(stateid_t *stp), \
TP_ARGS(stp))
+
DEFINE_STATEID_EVENT(layoutstate_alloc);
DEFINE_STATEID_EVENT(layoutstate_unhash);
DEFINE_STATEID_EVENT(layoutstate_free);
@@ -288,6 +289,138 @@ DEFINE_STATEID_EVENT(layout_recall_done);
DEFINE_STATEID_EVENT(layout_recall_fail);
DEFINE_STATEID_EVENT(layout_recall_release);
+DEFINE_STATEID_EVENT(deleg_open);
+DEFINE_STATEID_EVENT(deleg_none);
+DEFINE_STATEID_EVENT(deleg_break);
+DEFINE_STATEID_EVENT(deleg_recall);
+
+DECLARE_EVENT_CLASS(nfsd_stateseqid_class,
+ TP_PROTO(u32 seqid, const stateid_t *stp),
+ TP_ARGS(seqid, stp),
+ TP_STRUCT__entry(
+ __field(u32, seqid)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(u32, si_id)
+ __field(u32, si_generation)
+ ),
+ TP_fast_assign(
+ __entry->seqid = seqid;
+ __entry->cl_boot = stp->si_opaque.so_clid.cl_boot;
+ __entry->cl_id = stp->si_opaque.so_clid.cl_id;
+ __entry->si_id = stp->si_opaque.so_id;
+ __entry->si_generation = stp->si_generation;
+ ),
+ TP_printk("seqid=%u client %08x:%08x stateid %08x:%08x",
+ __entry->seqid, __entry->cl_boot, __entry->cl_id,
+ __entry->si_id, __entry->si_generation)
+)
+
+#define DEFINE_STATESEQID_EVENT(name) \
+DEFINE_EVENT(nfsd_stateseqid_class, nfsd_##name, \
+ TP_PROTO(u32 seqid, const stateid_t *stp), \
+ TP_ARGS(seqid, stp))
+
+DEFINE_STATESEQID_EVENT(preprocess);
+DEFINE_STATESEQID_EVENT(open_confirm);
+
+DECLARE_EVENT_CLASS(nfsd_clientid_class,
+ TP_PROTO(const clientid_t *clid),
+ TP_ARGS(clid),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clid->cl_boot;
+ __entry->cl_id = clid->cl_id;
+ ),
+ TP_printk("client %08x:%08x", __entry->cl_boot, __entry->cl_id)
+)
+
+#define DEFINE_CLIENTID_EVENT(name) \
+DEFINE_EVENT(nfsd_clientid_class, nfsd_clid_##name, \
+ TP_PROTO(const clientid_t *clid), \
+ TP_ARGS(clid))
+
+DEFINE_CLIENTID_EVENT(expired);
+DEFINE_CLIENTID_EVENT(purged);
+DEFINE_CLIENTID_EVENT(renew);
+DEFINE_CLIENTID_EVENT(stale);
+
+DECLARE_EVENT_CLASS(nfsd_net_class,
+ TP_PROTO(const struct nfsd_net *nn),
+ TP_ARGS(nn),
+ TP_STRUCT__entry(
+ __field(unsigned long long, boot_time)
+ ),
+ TP_fast_assign(
+ __entry->boot_time = nn->boot_time;
+ ),
+ TP_printk("boot_time=%16llx", __entry->boot_time)
+)
+
+#define DEFINE_NET_EVENT(name) \
+DEFINE_EVENT(nfsd_net_class, nfsd_##name, \
+ TP_PROTO(const struct nfsd_net *nn), \
+ TP_ARGS(nn))
+
+DEFINE_NET_EVENT(grace_start);
+DEFINE_NET_EVENT(grace_complete);
+
+DECLARE_EVENT_CLASS(nfsd_clid_class,
+ TP_PROTO(const struct nfsd_net *nn,
+ unsigned int namelen,
+ const unsigned char *namedata),
+ TP_ARGS(nn, namelen, namedata),
+ TP_STRUCT__entry(
+ __field(unsigned long long, boot_time)
+ __field(unsigned int, namelen)
+ __dynamic_array(unsigned char, name, namelen)
+ ),
+ TP_fast_assign(
+ __entry->boot_time = nn->boot_time;
+ __entry->namelen = namelen;
+ memcpy(__get_dynamic_array(name), namedata, namelen);
+ ),
+ TP_printk("boot_time=%16llx nfs4_clientid=%.*s",
+ __entry->boot_time, __entry->namelen, __get_str(name))
+)
+
+#define DEFINE_CLID_EVENT(name) \
+DEFINE_EVENT(nfsd_clid_class, nfsd_clid_##name, \
+ TP_PROTO(const struct nfsd_net *nn, \
+ unsigned int namelen, \
+ const unsigned char *namedata), \
+ TP_ARGS(nn, namelen, namedata))
+
+DEFINE_CLID_EVENT(find);
+DEFINE_CLID_EVENT(reclaim);
+
+TRACE_EVENT(nfsd_clid_inuse_err,
+ TP_PROTO(const struct nfs4_client *clp),
+ TP_ARGS(clp),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __field(unsigned int, namelen)
+ __dynamic_array(unsigned char, name, clp->cl_name.len)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ memcpy(__entry->addr, &clp->cl_addr,
+ sizeof(struct sockaddr_in6));
+ __entry->namelen = clp->cl_name.len;
+ memcpy(__get_dynamic_array(name), clp->cl_name.data,
+ clp->cl_name.len);
+ ),
+ TP_printk("nfs4_clientid %.*s already in use by %pISpc, client %08x:%08x",
+ __entry->namelen, __get_str(name), __entry->addr,
+ __entry->cl_boot, __entry->cl_id)
+)
+
TRACE_DEFINE_ENUM(NFSD_FILE_HASHED);
TRACE_DEFINE_ENUM(NFSD_FILE_PENDING);
TRACE_DEFINE_ENUM(NFSD_FILE_BREAK_READ);
@@ -432,6 +565,218 @@ TRACE_EVENT(nfsd_file_fsnotify_handle_event,
__entry->nlink, __entry->mode, __entry->mask)
);
+#include "cache.h"
+
+TRACE_DEFINE_ENUM(RC_DROPIT);
+TRACE_DEFINE_ENUM(RC_REPLY);
+TRACE_DEFINE_ENUM(RC_DOIT);
+
+#define show_drc_retval(x) \
+ __print_symbolic(x, \
+ { RC_DROPIT, "DROPIT" }, \
+ { RC_REPLY, "REPLY" }, \
+ { RC_DOIT, "DOIT" })
+
+TRACE_EVENT(nfsd_drc_found,
+ TP_PROTO(
+ const struct nfsd_net *nn,
+ const struct svc_rqst *rqstp,
+ int result
+ ),
+ TP_ARGS(nn, rqstp, result),
+ TP_STRUCT__entry(
+ __field(unsigned long long, boot_time)
+ __field(unsigned long, result)
+ __field(u32, xid)
+ ),
+ TP_fast_assign(
+ __entry->boot_time = nn->boot_time;
+ __entry->result = result;
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ ),
+ TP_printk("boot_time=%16llx xid=0x%08x result=%s",
+ __entry->boot_time, __entry->xid,
+ show_drc_retval(__entry->result))
+
+);
+
+TRACE_EVENT(nfsd_drc_mismatch,
+ TP_PROTO(
+ const struct nfsd_net *nn,
+ const struct svc_cacherep *key,
+ const struct svc_cacherep *rp
+ ),
+ TP_ARGS(nn, key, rp),
+ TP_STRUCT__entry(
+ __field(unsigned long long, boot_time)
+ __field(u32, xid)
+ __field(u32, cached)
+ __field(u32, ingress)
+ ),
+ TP_fast_assign(
+ __entry->boot_time = nn->boot_time;
+ __entry->xid = be32_to_cpu(key->c_key.k_xid);
+ __entry->cached = (__force u32)key->c_key.k_csum;
+ __entry->ingress = (__force u32)rp->c_key.k_csum;
+ ),
+ TP_printk("boot_time=%16llx xid=0x%08x cached-csum=0x%08x ingress-csum=0x%08x",
+ __entry->boot_time, __entry->xid, __entry->cached,
+ __entry->ingress)
+);
+
+TRACE_EVENT(nfsd_cb_args,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ const struct nfs4_cb_conn *conn
+ ),
+ TP_ARGS(clp, conn),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(u32, prog)
+ __field(u32, ident)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ __entry->prog = conn->cb_prog;
+ __entry->ident = conn->cb_ident;
+ memcpy(__entry->addr, &conn->cb_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("client %08x:%08x callback addr=%pISpc prog=%u ident=%u",
+ __entry->cl_boot, __entry->cl_id,
+ __entry->addr, __entry->prog, __entry->ident)
+);
+
+TRACE_EVENT(nfsd_cb_nodelegs,
+ TP_PROTO(const struct nfs4_client *clp),
+ TP_ARGS(clp),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ ),
+ TP_printk("client %08x:%08x", __entry->cl_boot, __entry->cl_id)
+)
+
+TRACE_DEFINE_ENUM(NFSD4_CB_UP);
+TRACE_DEFINE_ENUM(NFSD4_CB_UNKNOWN);
+TRACE_DEFINE_ENUM(NFSD4_CB_DOWN);
+TRACE_DEFINE_ENUM(NFSD4_CB_FAULT);
+
+#define show_cb_state(val) \
+ __print_symbolic(val, \
+ { NFSD4_CB_UP, "UP" }, \
+ { NFSD4_CB_UNKNOWN, "UNKNOWN" }, \
+ { NFSD4_CB_DOWN, "DOWN" }, \
+ { NFSD4_CB_FAULT, "FAULT"})
+
+DECLARE_EVENT_CLASS(nfsd_cb_class,
+ TP_PROTO(const struct nfs4_client *clp),
+ TP_ARGS(clp),
+ TP_STRUCT__entry(
+ __field(unsigned long, state)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ __entry->state = clp->cl_cb_state;
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x state=%s",
+ __entry->addr, __entry->cl_boot, __entry->cl_id,
+ show_cb_state(__entry->state))
+);
+
+#define DEFINE_NFSD_CB_EVENT(name) \
+DEFINE_EVENT(nfsd_cb_class, nfsd_cb_##name, \
+ TP_PROTO(const struct nfs4_client *clp), \
+ TP_ARGS(clp))
+
+DEFINE_NFSD_CB_EVENT(setup);
+DEFINE_NFSD_CB_EVENT(state);
+DEFINE_NFSD_CB_EVENT(shutdown);
+
+TRACE_EVENT(nfsd_cb_setup_err,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ long error
+ ),
+ TP_ARGS(clp, error),
+ TP_STRUCT__entry(
+ __field(long, error)
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ __entry->error = error;
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x error=%ld",
+ __entry->addr, __entry->cl_boot, __entry->cl_id, __entry->error)
+);
+
+TRACE_EVENT(nfsd_cb_work,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ const char *procedure
+ ),
+ TP_ARGS(clp, procedure),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __string(procedure, procedure)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ __assign_str(procedure, procedure)
+ memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x procedure=%s",
+ __entry->addr, __entry->cl_boot, __entry->cl_id,
+ __get_str(procedure))
+);
+
+TRACE_EVENT(nfsd_cb_done,
+ TP_PROTO(
+ const struct nfs4_client *clp,
+ int status
+ ),
+ TP_ARGS(clp, status),
+ TP_STRUCT__entry(
+ __field(u32, cl_boot)
+ __field(u32, cl_id)
+ __field(int, status)
+ __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ ),
+ TP_fast_assign(
+ __entry->cl_boot = clp->cl_clientid.cl_boot;
+ __entry->cl_id = clp->cl_clientid.cl_id;
+ __entry->status = status;
+ memcpy(__entry->addr, &clp->cl_cb_conn.cb_addr,
+ sizeof(struct sockaddr_in6));
+ ),
+ TP_printk("addr=%pISpc client %08x:%08x status=%d",
+ __entry->addr, __entry->cl_boot, __entry->cl_id,
+ __entry->status)
+);
+
#endif /* _NFSD_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index ceeb3b441844..28009ec54420 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -14,6 +14,7 @@
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/uio.h>
+#include <linux/fiemap.h>
#include "nilfs.h"
#include "btnode.h"
#include "segment.h"
@@ -996,7 +997,7 @@ int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
unsigned int blkbits = inode->i_blkbits;
int ret, n;
- ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
+ ret = fiemap_prep(inode, fieinfo, start, &len, 0);
if (ret)
return ret;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 445eef41bfaf..91b58c897f92 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2780,6 +2780,8 @@ int nilfs_attach_log_writer(struct super_block *sb, struct nilfs_root *root)
if (!nilfs->ns_writer)
return -ENOMEM;
+ inode_attach_wb(nilfs->ns_bdev->bd_inode, NULL);
+
err = nilfs_segctor_start_thread(nilfs->ns_writer);
if (err) {
kfree(nilfs->ns_writer);
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index c18459cea6f4..85eda539b35f 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -70,7 +70,7 @@ static bool fanotify_name_event_equal(struct fanotify_name_event *fne1,
return !memcmp(fne1->name, fne2->name, fne1->name_len);
}
-static bool should_merge(struct fsnotify_event *old_fsn,
+static bool fanotify_should_merge(struct fsnotify_event *old_fsn,
struct fsnotify_event *new_fsn)
{
struct fanotify_event *old, *new;
@@ -129,7 +129,7 @@ static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
return 0;
list_for_each_entry_reverse(test_event, list, list) {
- if (should_merge(test_event, event)) {
+ if (fanotify_should_merge(test_event, event)) {
FANOTIFY_E(test_event)->mask |= new->mask;
return 1;
}
@@ -232,6 +232,10 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
if (!fsnotify_iter_should_report_type(iter_info, type))
continue;
mark = iter_info->marks[type];
+
+ /* Apply ignore mask regardless of ISDIR and ON_CHILD flags */
+ marks_ignored_mask |= mark->ignored_mask;
+
/*
* If the event is on dir and this mark doesn't care about
* events on dir, don't send it!
@@ -249,7 +253,6 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
continue;
marks_mask |= mark->mask;
- marks_ignored_mask |= mark->ignored_mask;
}
test_mask = event_mask & marks_mask & ~marks_ignored_mask;
diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h
index 35bfbf4a7aac..8ce7ccfc4b0d 100644
--- a/fs/notify/fanotify/fanotify.h
+++ b/fs/notify/fanotify/fanotify.h
@@ -89,7 +89,7 @@ struct fanotify_name_event {
__kernel_fsid_t fsid;
struct fanotify_fh dir_fh;
u8 name_len;
- char name[0];
+ char name[];
};
static inline struct fanotify_name_event *
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 42cb794c62ac..63b5dffdca9e 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -328,7 +328,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
ret = -EFAULT;
/*
* Sanity check copy size in case get_one_event() and
- * fill_event_metadata() event_len sizes ever get out of sync.
+ * event_len sizes ever get out of sync.
*/
if (WARN_ON_ONCE(metadata.event_len > count))
goto out_close_fd;
@@ -487,8 +487,10 @@ static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t
group = file->private_data;
- if (count > sizeof(response))
- count = sizeof(response);
+ if (count < sizeof(response))
+ return -EINVAL;
+
+ count = sizeof(response);
pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c
index ef83f4020554..f0d6b54be412 100644
--- a/fs/notify/fdinfo.c
+++ b/fs/notify/fdinfo.c
@@ -11,7 +11,6 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/seq_file.h>
-#include <linux/proc_fs.h>
#include <linux/exportfs.h>
#include "inotify/inotify.h"
diff --git a/fs/notify/group.c b/fs/notify/group.c
index 133f723aca07..a4a4b1c64d32 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -25,6 +25,7 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group)
group->ops->free_group_priv(group);
mem_cgroup_put(group->memcg);
+ mutex_destroy(&group->mark_mutex);
kfree(group);
}
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 81ffc8629fc4..f88bbcc9efeb 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -764,20 +764,18 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
struct fsnotify_group *group;
struct inotify_inode_mark *i_mark;
struct fd f;
- int ret = 0;
+ int ret = -EINVAL;
f = fdget(fd);
if (unlikely(!f.file))
return -EBADF;
/* verify that this is indeed an inotify instance */
- ret = -EINVAL;
if (unlikely(f.file->f_op != &inotify_fops))
goto out;
group = f.file->private_data;
- ret = -EINVAL;
i_mark = inotify_idr_find(group, wd);
if (unlikely(!i_mark))
goto out;
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 1d96216dffd1..8387937b9d01 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -325,13 +325,16 @@ static void fsnotify_put_mark_wake(struct fsnotify_mark *mark)
}
bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
+ __releases(&fsnotify_mark_srcu)
{
int type;
fsnotify_foreach_obj_type(type) {
/* This can fail if mark is being removed */
- if (!fsnotify_get_mark_safe(iter_info->marks[type]))
+ if (!fsnotify_get_mark_safe(iter_info->marks[type])) {
+ __release(&fsnotify_mark_srcu);
goto fail;
+ }
}
/*
@@ -350,6 +353,7 @@ fail:
}
void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info)
+ __acquires(&fsnotify_mark_srcu)
{
int type;
diff --git a/fs/nsfs.c b/fs/nsfs.c
index 4f1205725cfe..800c1d0eb0d0 100644
--- a/fs/nsfs.c
+++ b/fs/nsfs.c
@@ -229,6 +229,11 @@ int ns_get_name(char *buf, size_t size, struct task_struct *task,
return res;
}
+bool proc_ns_file(const struct file *file)
+{
+ return file->f_op == &ns_file_operations;
+}
+
struct file *proc_ns_fget(int fd)
{
struct file *file;
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 2c512b40a940..79a231719460 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1441,22 +1441,6 @@ static void o2net_rx_until_empty(struct work_struct *work)
sc_put(sc);
}
-static int o2net_set_nodelay(struct socket *sock)
-{
- int val = 1;
-
- return kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
- (void *)&val, sizeof(val));
-}
-
-static int o2net_set_usertimeout(struct socket *sock)
-{
- int user_timeout = O2NET_TCP_USER_TIMEOUT;
-
- return kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
- (void *)&user_timeout, sizeof(user_timeout));
-}
-
static void o2net_initialize_handshake(void)
{
o2net_hand->o2hb_heartbeat_timeout_ms = cpu_to_be32(
@@ -1636,17 +1620,8 @@ static void o2net_start_connect(struct work_struct *work)
goto out;
}
- ret = o2net_set_nodelay(sc->sc_sock);
- if (ret) {
- mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
- goto out;
- }
-
- ret = o2net_set_usertimeout(sock);
- if (ret) {
- mlog(ML_ERROR, "set TCP_USER_TIMEOUT failed with %d\n", ret);
- goto out;
- }
+ tcp_sock_set_nodelay(sc->sc_sock->sk);
+ tcp_sock_set_user_timeout(sock->sk, O2NET_TCP_USER_TIMEOUT);
o2net_register_callbacks(sc->sc_sock->sk, sc);
@@ -1832,17 +1807,8 @@ static int o2net_accept_one(struct socket *sock, int *more)
*more = 1;
new_sock->sk->sk_allocation = GFP_ATOMIC;
- ret = o2net_set_nodelay(new_sock);
- if (ret) {
- mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
- goto out;
- }
-
- ret = o2net_set_usertimeout(new_sock);
- if (ret) {
- mlog(ML_ERROR, "set TCP_USER_TIMEOUT failed with %d\n", ret);
- goto out;
- }
+ tcp_sock_set_nodelay(new_sock->sk);
+ tcp_sock_set_user_timeout(new_sock->sk, O2NET_TCP_USER_TIMEOUT);
ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin, 1);
if (ret < 0)
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index e3e2d1b2af51..a94852af5510 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -733,8 +733,6 @@ static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
return 0;
}
-#define OCFS2_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC)
-
int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 map_start, u64 map_len)
{
@@ -746,7 +744,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
struct buffer_head *di_bh = NULL;
struct ocfs2_extent_rec rec;
- ret = fiemap_check_flags(fieinfo, OCFS2_FIEMAP_FLAGS);
+ ret = fiemap_prep(inode, fieinfo, map_start, &map_len, 0);
if (ret)
return ret;
diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c
index 3a44e461828a..25cabbfe87fc 100644
--- a/fs/ocfs2/mmap.c
+++ b/fs/ocfs2/mmap.c
@@ -62,7 +62,7 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
last_index = (size - 1) >> PAGE_SHIFT;
/*
- * There are cases that lead to the page no longer bebongs to the
+ * There are cases that lead to the page no longer belonging to the
* mapping.
* 1) pagecache truncates locally due to memory pressure.
* 2) pagecache truncates when another is taking EX lock against
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
index 2bb916d68576..538e839590ef 100644
--- a/fs/orangefs/orangefs-bufmap.c
+++ b/fs/orangefs/orangefs-bufmap.c
@@ -168,10 +168,7 @@ static DEFINE_SPINLOCK(orangefs_bufmap_lock);
static void
orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
{
- int i;
-
- for (i = 0; i < bufmap->page_count; i++)
- put_page(bufmap->page_array[i]);
+ unpin_user_pages(bufmap->page_array, bufmap->page_count);
}
static void
@@ -268,7 +265,7 @@ orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
int offset = 0, ret, i;
/* map the pages */
- ret = get_user_pages_fast((unsigned long)user_desc->ptr,
+ ret = pin_user_pages_fast((unsigned long)user_desc->ptr,
bufmap->page_count, FOLL_WRITE, bufmap->page_array);
if (ret < 0)
@@ -280,7 +277,7 @@ orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
for (i = 0; i < ret; i++) {
SetPageError(bufmap->page_array[i]);
- put_page(bufmap->page_array[i]);
+ unpin_user_page(bufmap->page_array[i]);
}
return -ENOMEM;
}
diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c
index c010c1fddafc..289b648ae196 100644
--- a/fs/orangefs/orangefs-mod.c
+++ b/fs/orangefs/orangefs-mod.c
@@ -79,7 +79,7 @@ DECLARE_WAIT_QUEUE_HEAD(orangefs_request_list_waitq);
static int __init orangefs_init(void)
{
- int ret = -1;
+ int ret;
__u32 i = 0;
if (op_timeout_secs < 0)
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 9709cf22cab3..79dd052c7dbf 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -47,7 +47,7 @@ int ovl_copy_xattr(struct dentry *old, struct dentry *new)
{
ssize_t list_size, size, value_size = 0;
char *buf, *name, *value = NULL;
- int uninitialized_var(error);
+ int error = 0;
size_t slen;
if (!(old->d_inode->i_opflags & IOP_XATTR) ||
@@ -584,9 +584,10 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
.link = c->link
};
- err = ovl_lock_rename_workdir(c->workdir, c->destdir);
- if (err)
- return err;
+ /* workdir and destdir could be the same when copying up to indexdir */
+ err = -EIO;
+ if (lock_rename(c->workdir, c->destdir) != NULL)
+ goto unlock;
err = ovl_prep_cu_creds(c->dentry, &cc);
if (err)
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 279009dee366..1bba4813f9cb 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -62,35 +62,59 @@ struct dentry *ovl_lookup_temp(struct dentry *workdir)
}
/* caller holds i_mutex on workdir */
-static struct dentry *ovl_whiteout(struct dentry *workdir)
+static struct dentry *ovl_whiteout(struct ovl_fs *ofs)
{
int err;
struct dentry *whiteout;
+ struct dentry *workdir = ofs->workdir;
struct inode *wdir = workdir->d_inode;
- whiteout = ovl_lookup_temp(workdir);
- if (IS_ERR(whiteout))
- return whiteout;
+ if (!ofs->whiteout) {
+ whiteout = ovl_lookup_temp(workdir);
+ if (IS_ERR(whiteout))
+ goto out;
- err = ovl_do_whiteout(wdir, whiteout);
- if (err) {
- dput(whiteout);
- whiteout = ERR_PTR(err);
+ err = ovl_do_whiteout(wdir, whiteout);
+ if (err) {
+ dput(whiteout);
+ whiteout = ERR_PTR(err);
+ goto out;
+ }
+ ofs->whiteout = whiteout;
}
+ if (ofs->share_whiteout) {
+ whiteout = ovl_lookup_temp(workdir);
+ if (IS_ERR(whiteout))
+ goto out;
+
+ err = ovl_do_link(ofs->whiteout, wdir, whiteout);
+ if (!err)
+ goto out;
+
+ if (err != -EMLINK) {
+ pr_warn("Failed to link whiteout - disabling whiteout inode sharing(nlink=%u, err=%i)\n",
+ ofs->whiteout->d_inode->i_nlink, err);
+ ofs->share_whiteout = false;
+ }
+ dput(whiteout);
+ }
+ whiteout = ofs->whiteout;
+ ofs->whiteout = NULL;
+out:
return whiteout;
}
/* Caller must hold i_mutex on both workdir and dir */
-int ovl_cleanup_and_whiteout(struct dentry *workdir, struct inode *dir,
+int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct inode *dir,
struct dentry *dentry)
{
- struct inode *wdir = workdir->d_inode;
+ struct inode *wdir = ofs->workdir->d_inode;
struct dentry *whiteout;
int err;
int flags = 0;
- whiteout = ovl_whiteout(workdir);
+ whiteout = ovl_whiteout(ofs);
err = PTR_ERR(whiteout);
if (IS_ERR(whiteout))
return err;
@@ -262,6 +286,8 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
inode = ovl_get_inode(dentry->d_sb, &oip);
if (IS_ERR(inode))
return PTR_ERR(inode);
+ if (inode == oip.newinode)
+ ovl_set_flag(OVL_UPPERDATA, inode);
} else {
WARN_ON(ovl_inode_real(inode) != d_inode(newdentry));
dput(newdentry);
@@ -715,6 +741,7 @@ static bool ovl_matches_upper(struct dentry *dentry, struct dentry *upper)
static int ovl_remove_and_whiteout(struct dentry *dentry,
struct list_head *list)
{
+ struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
struct dentry *workdir = ovl_workdir(dentry);
struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
struct dentry *upper;
@@ -748,7 +775,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry,
goto out_dput_upper;
}
- err = ovl_cleanup_and_whiteout(workdir, d_inode(upperdir), upper);
+ err = ovl_cleanup_and_whiteout(ofs, d_inode(upperdir), upper);
if (err)
goto out_d_drop;
diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
index ed5c1078919c..8f4286450f92 100644
--- a/fs/overlayfs/export.c
+++ b/fs/overlayfs/export.c
@@ -204,7 +204,7 @@ static int ovl_check_encode_origin(struct dentry *dentry)
* ovl_connect_layer() will try to make origin's layer "connected" by
* copying up a "connectable" ancestor.
*/
- if (d_is_dir(dentry) && ofs->upper_mnt)
+ if (d_is_dir(dentry) && ovl_upper_mnt(ofs))
return ovl_connect_layer(dentry);
/* Lower file handle for indexed and non-upper dir/non-dir */
@@ -231,12 +231,9 @@ static int ovl_dentry_to_fid(struct dentry *dentry, u32 *fid, int buflen)
if (IS_ERR(fh))
return PTR_ERR(fh);
- err = -EOVERFLOW;
len = OVL_FH_LEN(fh);
- if (len > buflen)
- goto fail;
-
- memcpy(fid, fh, len);
+ if (len <= buflen)
+ memcpy(fid, fh, len);
err = len;
out:
@@ -244,9 +241,8 @@ out:
return err;
fail:
- pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i, buflen=%d, len=%d, type=%d)\n",
- dentry, err, buflen, fh ? (int)fh->fb.len : 0,
- fh ? fh->fb.type : 0);
+ pr_warn_ratelimited("failed to encode file handle (%pd2, err=%i)\n",
+ dentry, err);
goto out;
}
@@ -254,7 +250,7 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
struct inode *parent)
{
struct dentry *dentry;
- int bytes = *max_len << 2;
+ int bytes, buflen = *max_len << 2;
/* TODO: encode connectable file handles */
if (parent)
@@ -264,12 +260,14 @@ static int ovl_encode_fh(struct inode *inode, u32 *fid, int *max_len,
if (WARN_ON(!dentry))
return FILEID_INVALID;
- bytes = ovl_dentry_to_fid(dentry, fid, bytes);
+ bytes = ovl_dentry_to_fid(dentry, fid, buflen);
dput(dentry);
if (bytes <= 0)
return FILEID_INVALID;
*max_len = bytes >> 2;
+ if (bytes > buflen)
+ return FILEID_INVALID;
return OVL_FILEID_V1;
}
@@ -679,10 +677,10 @@ static struct dentry *ovl_upper_fh_to_d(struct super_block *sb,
struct dentry *dentry;
struct dentry *upper;
- if (!ofs->upper_mnt)
+ if (!ovl_upper_mnt(ofs))
return ERR_PTR(-EACCES);
- upper = ovl_decode_real_fh(fh, ofs->upper_mnt, true);
+ upper = ovl_decode_real_fh(fh, ovl_upper_mnt(ofs), true);
if (IS_ERR_OR_NULL(upper))
return upper;
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 87c362f65448..01820e654a21 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -10,6 +10,7 @@
#include <linux/uio.h>
#include <linux/uaccess.h>
#include <linux/splice.h>
+#include <linux/security.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include "overlayfs.h"
@@ -39,10 +40,22 @@ static struct file *ovl_open_realfile(const struct file *file,
struct file *realfile;
const struct cred *old_cred;
int flags = file->f_flags | O_NOATIME | FMODE_NONOTIFY;
+ int acc_mode = ACC_MODE(flags);
+ int err;
+
+ if (flags & O_APPEND)
+ acc_mode |= MAY_APPEND;
old_cred = ovl_override_creds(inode->i_sb);
- realfile = open_with_fake_path(&file->f_path, flags, realinode,
- current_cred());
+ err = inode_permission(realinode, MAY_OPEN | acc_mode);
+ if (err) {
+ realfile = ERR_PTR(err);
+ } else if (!inode_owner_or_capable(realinode)) {
+ realfile = ERR_PTR(-EPERM);
+ } else {
+ realfile = open_with_fake_path(&file->f_path, flags, realinode,
+ current_cred());
+ }
revert_creds(old_cred);
pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n",
@@ -219,9 +232,8 @@ static void ovl_file_accessed(struct file *file)
touch_atime(&file->f_path);
}
-static rwf_t ovl_iocb_to_rwf(struct kiocb *iocb)
+static rwf_t ovl_iocb_to_rwf(int ifl)
{
- int ifl = iocb->ki_flags;
rwf_t flags = 0;
if (ifl & IOCB_NOWAIT)
@@ -283,7 +295,7 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
old_cred = ovl_override_creds(file_inode(file)->i_sb);
if (is_sync_kiocb(iocb)) {
ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
- ovl_iocb_to_rwf(iocb));
+ ovl_iocb_to_rwf(iocb->ki_flags));
} else {
struct ovl_aio_req *aio_req;
@@ -336,7 +348,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
if (is_sync_kiocb(iocb)) {
file_start_write(real.file);
ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
- ovl_iocb_to_rwf(iocb));
+ ovl_iocb_to_rwf(iocb->ki_flags));
file_end_write(real.file);
/* Update size */
ovl_copyattr(ovl_inode_real(inode), inode);
@@ -520,7 +532,9 @@ static long ovl_real_ioctl(struct file *file, unsigned int cmd,
return ret;
old_cred = ovl_override_creds(file_inode(file)->i_sb);
- ret = vfs_ioctl(real.file, cmd, arg);
+ ret = security_file_ioctl(real.file, cmd, arg);
+ if (!ret)
+ ret = vfs_ioctl(real.file, cmd, arg);
revert_creds(old_cred);
fdput(real);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 981f11ec51bc..8be6cd264f66 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -10,6 +10,7 @@
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/ratelimit.h>
+#include <linux/fiemap.h>
#include "overlayfs.h"
@@ -456,7 +457,7 @@ int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags)
if (flags & S_ATIME) {
struct ovl_fs *ofs = inode->i_sb->s_fs_info;
struct path upperpath = {
- .mnt = ofs->upper_mnt,
+ .mnt = ovl_upper_mnt(ofs),
.dentry = ovl_upperdentry_dereference(OVL_I(inode)),
};
@@ -479,10 +480,6 @@ static int ovl_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return -EOPNOTSUPP;
old_cred = ovl_override_creds(inode->i_sb);
-
- if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
- filemap_write_and_wait(realinode->i_mapping);
-
err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
revert_creds(old_cred);
@@ -908,7 +905,7 @@ struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir)
* Does overlay inode need to be hashed by lower inode?
*/
static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper,
- struct dentry *lower, struct dentry *index)
+ struct dentry *lower, bool index)
{
struct ovl_fs *ofs = sb->s_fs_info;
@@ -921,7 +918,7 @@ static bool ovl_hash_bylower(struct super_block *sb, struct dentry *upper,
return true;
/* Yes, if won't be copied up */
- if (!ofs->upper_mnt)
+ if (!ovl_upper_mnt(ofs))
return true;
/* No, if lower hardlink is or will be broken on copy up */
@@ -957,7 +954,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry,
oip->index);
int fsid = bylower ? lowerpath->layer->fsid : 0;
- bool is_dir, metacopy = false;
+ bool is_dir;
unsigned long ino = 0;
int err = oip->newinode ? -EEXIST : -ENOMEM;
@@ -1018,15 +1015,6 @@ struct inode *ovl_get_inode(struct super_block *sb,
if (oip->index)
ovl_set_flag(OVL_INDEX, inode);
- if (upperdentry) {
- err = ovl_check_metacopy_xattr(upperdentry);
- if (err < 0)
- goto out_err;
- metacopy = err;
- if (!metacopy)
- ovl_set_flag(OVL_UPPERDATA, inode);
- }
-
OVL_I(inode)->redirect = oip->redirect;
if (bylower)
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 0db23baf98e7..3566282a9199 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -191,16 +191,36 @@ static bool ovl_is_opaquedir(struct dentry *dentry)
return ovl_check_dir_xattr(dentry, OVL_XATTR_OPAQUE);
}
+static struct dentry *ovl_lookup_positive_unlocked(const char *name,
+ struct dentry *base, int len,
+ bool drop_negative)
+{
+ struct dentry *ret = lookup_one_len_unlocked(name, base, len);
+
+ if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
+ if (drop_negative && ret->d_lockref.count == 1) {
+ spin_lock(&ret->d_lock);
+ /* Recheck condition under lock */
+ if (d_is_negative(ret) && ret->d_lockref.count == 1)
+ __d_drop(ret);
+ spin_unlock(&ret->d_lock);
+ }
+ dput(ret);
+ ret = ERR_PTR(-ENOENT);
+ }
+ return ret;
+}
+
static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
const char *name, unsigned int namelen,
size_t prelen, const char *post,
- struct dentry **ret)
+ struct dentry **ret, bool drop_negative)
{
struct dentry *this;
int err;
bool last_element = !post[0];
- this = lookup_positive_unlocked(name, base, namelen);
+ this = ovl_lookup_positive_unlocked(name, base, namelen, drop_negative);
if (IS_ERR(this)) {
err = PTR_ERR(this);
this = NULL;
@@ -276,7 +296,7 @@ out_err:
}
static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
- struct dentry **ret)
+ struct dentry **ret, bool drop_negative)
{
/* Counting down from the end, since the prefix can change */
size_t rem = d->name.len - 1;
@@ -285,7 +305,7 @@ static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
if (d->name.name[0] != '/')
return ovl_lookup_single(base, d, d->name.name, d->name.len,
- 0, "", ret);
+ 0, "", ret, drop_negative);
while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
const char *s = d->name.name + d->name.len - rem;
@@ -298,7 +318,8 @@ static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
return -EIO;
err = ovl_lookup_single(base, d, s, thislen,
- d->name.len - rem, next, &base);
+ d->name.len - rem, next, &base,
+ drop_negative);
dput(dentry);
if (err)
return err;
@@ -468,7 +489,7 @@ struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index)
if (IS_ERR_OR_NULL(fh))
return ERR_CAST(fh);
- upper = ovl_decode_real_fh(fh, ofs->upper_mnt, true);
+ upper = ovl_decode_real_fh(fh, ovl_upper_mnt(ofs), true);
kfree(fh);
if (IS_ERR_OR_NULL(upper))
@@ -484,12 +505,6 @@ struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index)
return upper;
}
-/* Is this a leftover from create/whiteout of directory index entry? */
-static bool ovl_is_temp_index(struct dentry *index)
-{
- return index->d_name.name[0] == '#';
-}
-
/*
* Verify that an index entry name matches the origin file handle stored in
* OVL_XATTR_ORIGIN and that origin file handle can be decoded to lower path.
@@ -507,11 +522,6 @@ int ovl_verify_index(struct ovl_fs *ofs, struct dentry *index)
if (!d_inode(index))
return 0;
- /* Cleanup leftover from index create/cleanup attempt */
- err = -ESTALE;
- if (ovl_is_temp_index(index))
- goto fail;
-
err = -EINVAL;
if (index->d_name.len < sizeof(struct ovl_fb)*2)
goto fail;
@@ -823,7 +833,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
struct dentry *this;
unsigned int i;
int err;
- bool metacopy = false;
+ bool uppermetacopy = false;
struct ovl_lookup_data d = {
.sb = dentry->d_sb,
.name = dentry->d_name,
@@ -841,7 +851,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
old_cred = ovl_override_creds(dentry->d_sb);
upperdir = ovl_dentry_upper(dentry->d_parent);
if (upperdir) {
- err = ovl_lookup_layer(upperdir, &d, &upperdentry);
+ err = ovl_lookup_layer(upperdir, &d, &upperdentry, true);
if (err)
goto out;
@@ -869,7 +879,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
goto out_put_upper;
if (d.metacopy)
- metacopy = true;
+ uppermetacopy = true;
}
if (d.redirect) {
@@ -899,13 +909,19 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
else
d.last = lower.layer->idx == roe->numlower;
- err = ovl_lookup_layer(lower.dentry, &d, &this);
+ err = ovl_lookup_layer(lower.dentry, &d, &this, false);
if (err)
goto out_put;
if (!this)
continue;
+ if ((uppermetacopy || d.metacopy) && !ofs->config.metacopy) {
+ err = -EPERM;
+ pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n", dentry);
+ goto out_put;
+ }
+
/*
* If no origin fh is stored in upper of a merge dir, store fh
* of lower dir and set upper parent "impure".
@@ -940,21 +956,21 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
origin = this;
}
- if (d.metacopy)
- metacopy = true;
- /*
- * Do not store intermediate metacopy dentries in chain,
- * except top most lower metacopy dentry
- */
if (d.metacopy && ctr) {
+ /*
+ * Do not store intermediate metacopy dentries in
+ * lower chain, except top most lower metacopy dentry.
+ * Continue the loop so that if there is an absolute
+ * redirect on this dentry, poe can be reset to roe.
+ */
dput(this);
- continue;
+ this = NULL;
+ } else {
+ stack[ctr].dentry = this;
+ stack[ctr].layer = lower.layer;
+ ctr++;
}
- stack[ctr].dentry = this;
- stack[ctr].layer = lower.layer;
- ctr++;
-
/*
* Following redirects can have security consequences: it's like
* a symlink into the lower layer without the permission checks.
@@ -982,22 +998,17 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
}
}
- if (metacopy) {
- /*
- * Found a metacopy dentry but did not find corresponding
- * data dentry
- */
- if (d.metacopy) {
- err = -EIO;
- goto out_put;
- }
-
- err = -EPERM;
- if (!ofs->config.metacopy) {
- pr_warn_ratelimited("refusing to follow metacopy origin for (%pd2)\n",
- dentry);
- goto out_put;
- }
+ /*
+ * For regular non-metacopy upper dentries, there is no lower
+ * path based lookup, hence ctr will be zero. If a dentry is found
+ * using ORIGIN xattr on upper, install it in stack.
+ *
+ * For metacopy dentry, path based lookup will find lower dentries.
+ * Just make sure a corresponding data dentry has been found.
+ */
+ if (d.metacopy || (uppermetacopy && !ctr)) {
+ err = -EIO;
+ goto out_put;
} else if (!d.is_dir && upperdentry && !ctr && origin_path) {
if (WARN_ON(stack != NULL)) {
err = -EIO;
@@ -1005,25 +1016,30 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
}
stack = origin_path;
ctr = 1;
+ origin = origin_path->dentry;
origin_path = NULL;
}
/*
- * Lookup index by lower inode and verify it matches upper inode.
- * We only trust dir index if we verified that lower dir matches
- * origin, otherwise dir index entries may be inconsistent and we
- * ignore them.
+ * Always lookup index if there is no-upperdentry.
+ *
+ * For the case of upperdentry, we have set origin by now if it
+ * needed to be set. There are basically three cases.
+ *
+ * For directories, lookup index by lower inode and verify it matches
+ * upper inode. We only trust dir index if we verified that lower dir
+ * matches origin, otherwise dir index entries may be inconsistent
+ * and we ignore them.
+ *
+ * For regular upper, we already set origin if upper had ORIGIN
+ * xattr. There is no verification though as there is no path
+ * based dentry lookup in lower in this case.
*
- * For non-dir upper metacopy dentry, we already set "origin" if we
- * verified that lower matched upper origin. If upper origin was
- * not present (because lower layer did not support fh encode/decode),
- * or indexing is not enabled, do not set "origin" and skip looking up
- * index. This case should be handled in same way as a non-dir upper
- * without ORIGIN is handled.
+ * For metacopy upper, we set a verified origin already if index
+ * is enabled and if upper had an ORIGIN xattr.
*
- * Always lookup index of non-dir non-metacopy and non-upper.
*/
- if (ctr && (!upperdentry || (!d.is_dir && !metacopy)))
+ if (!upperdentry && ctr)
origin = stack[0].dentry;
if (origin && ovl_indexdir(dentry->d_sb) &&
@@ -1074,6 +1090,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
err = PTR_ERR(inode);
if (IS_ERR(inode))
goto out_free_oe;
+ if (upperdentry && !uppermetacopy)
+ ovl_set_flag(OVL_UPPERDATA, inode);
}
ovl_dentry_update_reval(dentry, upperdentry,
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index e6f3670146ed..b725c7f15ff4 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -355,6 +355,9 @@ int ovl_check_fb_len(struct ovl_fb *fb, int fb_len);
static inline int ovl_check_fh_len(struct ovl_fh *fh, int fh_len)
{
+ if (fh_len < sizeof(struct ovl_fh))
+ return -EINVAL;
+
return ovl_check_fb_len(&fh->fb, fh_len - OVL_FH_WIRE_OFFSET);
}
@@ -394,8 +397,8 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list);
void ovl_cache_free(struct list_head *list);
void ovl_dir_cache_free(struct inode *inode);
int ovl_check_d_type_supported(struct path *realpath);
-void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
- struct dentry *dentry, int level);
+int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+ struct dentry *dentry, int level);
int ovl_indexdir_cleanup(struct ovl_fs *ofs);
/* inode.c */
@@ -421,7 +424,7 @@ struct ovl_inode_params {
struct inode *newinode;
struct dentry *upperdentry;
struct ovl_path *lowerpath;
- struct dentry *index;
+ bool index;
unsigned int numlower;
char *redirect;
struct dentry *lowerdata;
@@ -455,7 +458,7 @@ static inline void ovl_copyflags(struct inode *from, struct inode *to)
/* dir.c */
extern const struct inode_operations ovl_dir_inode_operations;
-int ovl_cleanup_and_whiteout(struct dentry *workdir, struct inode *dir,
+int ovl_cleanup_and_whiteout(struct ovl_fs *ofs, struct inode *dir,
struct dentry *dentry);
struct ovl_cattr {
dev_t rdev;
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 5762d802fe01..b429c80879ee 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -46,7 +46,6 @@ struct ovl_path {
/* private information held for overlayfs's superblock */
struct ovl_fs {
- struct vfsmount *upper_mnt;
unsigned int numlayer;
/* Number of unique fs among layers including upper fs */
unsigned int numfs;
@@ -68,8 +67,8 @@ struct ovl_fs {
/* Did we take the inuse lock? */
bool upperdir_locked;
bool workdir_locked;
+ bool share_whiteout;
/* Traps in ovl inode cache */
- struct inode *upperdir_trap;
struct inode *workbasedir_trap;
struct inode *workdir_trap;
struct inode *indexdir_trap;
@@ -77,8 +76,15 @@ struct ovl_fs {
int xino_mode;
/* For allocation of non-persistent inode numbers */
atomic_long_t last_ino;
+ /* Whiteout dentry cache */
+ struct dentry *whiteout;
};
+static inline struct vfsmount *ovl_upper_mnt(struct ovl_fs *ofs)
+{
+ return ofs->layers[0].mnt;
+}
+
static inline struct ovl_fs *OVL_FS(struct super_block *sb)
{
return (struct ovl_fs *)sb->s_fs_info;
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index e452ff7d583d..6918b98faeb6 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -297,7 +297,7 @@ static inline int ovl_dir_read(struct path *realpath,
struct file *realfile;
int err;
- realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
+ realfile = ovl_path_open(realpath, O_RDONLY | O_LARGEFILE);
if (IS_ERR(realfile))
return PTR_ERR(realfile);
@@ -743,8 +743,10 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
struct ovl_dir_file *od = file->private_data;
struct dentry *dentry = file->f_path.dentry;
struct ovl_cache_entry *p;
+ const struct cred *old_cred;
int err;
+ old_cred = ovl_override_creds(dentry->d_sb);
if (!ctx->pos)
ovl_dir_reset(file);
@@ -758,17 +760,20 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
(ovl_same_fs(dentry->d_sb) &&
(ovl_is_impure_dir(file) ||
OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) {
- return ovl_iterate_real(file, ctx);
+ err = ovl_iterate_real(file, ctx);
+ } else {
+ err = iterate_dir(od->realfile, ctx);
}
- return iterate_dir(od->realfile, ctx);
+ goto out;
}
if (!od->cache) {
struct ovl_dir_cache *cache;
cache = ovl_cache_get(dentry);
+ err = PTR_ERR(cache);
if (IS_ERR(cache))
- return PTR_ERR(cache);
+ goto out;
od->cache = cache;
ovl_seek_cursor(od, ctx->pos);
@@ -780,7 +785,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
if (!p->ino) {
err = ovl_cache_update_ino(&file->f_path, p);
if (err)
- return err;
+ goto out;
}
if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
break;
@@ -788,7 +793,10 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
od->cursor = p->l_node.next;
ctx->pos++;
}
- return 0;
+ err = 0;
+out:
+ revert_creds(old_cred);
+ return err;
}
static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
@@ -831,6 +839,19 @@ out_unlock:
return res;
}
+static struct file *ovl_dir_open_realfile(struct file *file,
+ struct path *realpath)
+{
+ struct file *res;
+ const struct cred *old_cred;
+
+ old_cred = ovl_override_creds(file_inode(file)->i_sb);
+ res = ovl_path_open(realpath, O_RDONLY | (file->f_flags & O_LARGEFILE));
+ revert_creds(old_cred);
+
+ return res;
+}
+
static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
@@ -853,7 +874,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
struct path upperpath;
ovl_path_upper(dentry, &upperpath);
- realfile = ovl_path_open(&upperpath, O_RDONLY);
+ realfile = ovl_dir_open_realfile(file, &upperpath);
inode_lock(inode);
if (!od->upperfile) {
@@ -904,7 +925,7 @@ static int ovl_dir_open(struct inode *inode, struct file *file)
return -ENOMEM;
type = ovl_path_real(file->f_path.dentry, &realpath);
- realfile = ovl_path_open(&realpath, file->f_flags);
+ realfile = ovl_dir_open_realfile(file, &realpath);
if (IS_ERR(realfile)) {
kfree(od);
return PTR_ERR(realfile);
@@ -1071,14 +1092,13 @@ out:
ovl_cache_free(&list);
}
-void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
struct dentry *dentry, int level)
{
int err;
if (!d_is_dir(dentry) || level > 1) {
- ovl_cleanup(dir, dentry);
- return;
+ return ovl_cleanup(dir, dentry);
}
err = ovl_do_rmdir(dir, dentry);
@@ -1088,8 +1108,10 @@ void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
inode_unlock(dir);
ovl_workdir_cleanup_recurse(&path, level + 1);
inode_lock_nested(dir, I_MUTEX_PARENT);
- ovl_cleanup(dir, dentry);
+ err = ovl_cleanup(dir, dentry);
}
+
+ return err;
}
int ovl_indexdir_cleanup(struct ovl_fs *ofs)
@@ -1098,7 +1120,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
struct dentry *indexdir = ofs->indexdir;
struct dentry *index = NULL;
struct inode *dir = indexdir->d_inode;
- struct path path = { .mnt = ofs->upper_mnt, .dentry = indexdir };
+ struct path path = { .mnt = ovl_upper_mnt(ofs), .dentry = indexdir };
LIST_HEAD(list);
struct rb_root root = RB_ROOT;
struct ovl_cache_entry *p;
@@ -1128,6 +1150,13 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
index = NULL;
break;
}
+ /* Cleanup leftover from index create/cleanup attempt */
+ if (index->d_name.name[0] == '#') {
+ err = ovl_workdir_cleanup(dir, path.mnt, index, 1);
+ if (err)
+ break;
+ goto next;
+ }
err = ovl_verify_index(ofs, index);
if (!err) {
goto next;
@@ -1146,7 +1175,7 @@ int ovl_indexdir_cleanup(struct ovl_fs *ofs)
* Whiteout orphan index to block future open by
* handle after overlay nlink dropped to zero.
*/
- err = ovl_cleanup_and_whiteout(indexdir, dir, index);
+ err = ovl_cleanup_and_whiteout(ofs, dir, index);
} else {
/* Cleanup orphan index entries */
err = ovl_cleanup(dir, index);
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 732ad5495c92..91476bc422f9 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -211,24 +211,28 @@ static void ovl_destroy_inode(struct inode *inode)
static void ovl_free_fs(struct ovl_fs *ofs)
{
+ struct vfsmount **mounts;
unsigned i;
iput(ofs->workbasedir_trap);
iput(ofs->indexdir_trap);
iput(ofs->workdir_trap);
- iput(ofs->upperdir_trap);
+ dput(ofs->whiteout);
dput(ofs->indexdir);
dput(ofs->workdir);
if (ofs->workdir_locked)
ovl_inuse_unlock(ofs->workbasedir);
dput(ofs->workbasedir);
if (ofs->upperdir_locked)
- ovl_inuse_unlock(ofs->upper_mnt->mnt_root);
- mntput(ofs->upper_mnt);
- for (i = 1; i < ofs->numlayer; i++) {
+ ovl_inuse_unlock(ovl_upper_mnt(ofs)->mnt_root);
+
+ /* Hack! Reuse ofs->layers as a vfsmount array before freeing it */
+ mounts = (struct vfsmount **) ofs->layers;
+ for (i = 0; i < ofs->numlayer; i++) {
iput(ofs->layers[i].trap);
- mntput(ofs->layers[i].mnt);
+ mounts[i] = ofs->layers[i].mnt;
}
+ kern_unmount_array(mounts, ofs->numlayer);
kfree(ofs->layers);
for (i = 0; i < ofs->numfs; i++)
free_anon_bdev(ofs->fs[i].pseudo_dev);
@@ -257,12 +261,12 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
struct super_block *upper_sb;
int ret;
- if (!ofs->upper_mnt)
+ if (!ovl_upper_mnt(ofs))
return 0;
/*
- * If this is a sync(2) call or an emergency sync, all the super blocks
- * will be iterated, including upper_sb, so no need to do anything.
+ * Not called for sync(2) call or an emergency sync (SB_I_SKIP_SYNC).
+ * All the super blocks will be iterated, including upper_sb.
*
* If this is a syncfs(2) call, then we do need to call
* sync_filesystem() on upper_sb, but enough if we do it when being
@@ -271,7 +275,7 @@ static int ovl_sync_fs(struct super_block *sb, int wait)
if (!wait)
return 0;
- upper_sb = ofs->upper_mnt->mnt_sb;
+ upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
down_read(&upper_sb->s_umount);
ret = sync_filesystem(upper_sb);
@@ -309,7 +313,7 @@ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
/* Will this overlay be forced to mount/remount ro? */
static bool ovl_force_readonly(struct ovl_fs *ofs)
{
- return (!ofs->upper_mnt || !ofs->workdir);
+ return (!ovl_upper_mnt(ofs) || !ofs->workdir);
}
static const char *ovl_redirect_mode_def(void)
@@ -364,11 +368,20 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
static int ovl_remount(struct super_block *sb, int *flags, char *data)
{
struct ovl_fs *ofs = sb->s_fs_info;
+ struct super_block *upper_sb;
+ int ret = 0;
if (!(*flags & SB_RDONLY) && ovl_force_readonly(ofs))
return -EROFS;
- return 0;
+ if (*flags & SB_RDONLY && !sb_rdonly(sb)) {
+ upper_sb = ovl_upper_mnt(ofs)->mnt_sb;
+ down_read(&upper_sb->s_umount);
+ ret = sync_filesystem(upper_sb);
+ up_read(&upper_sb->s_umount);
+ }
+
+ return ret;
}
static const struct super_operations ovl_super_operations = {
@@ -470,6 +483,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
char *p;
int err;
bool metacopy_opt = false, redirect_opt = false;
+ bool nfs_export_opt = false, index_opt = false;
config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
if (!config->redirect_mode)
@@ -519,18 +533,22 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
case OPT_INDEX_ON:
config->index = true;
+ index_opt = true;
break;
case OPT_INDEX_OFF:
config->index = false;
+ index_opt = true;
break;
case OPT_NFS_EXPORT_ON:
config->nfs_export = true;
+ nfs_export_opt = true;
break;
case OPT_NFS_EXPORT_OFF:
config->nfs_export = false;
+ nfs_export_opt = true;
break;
case OPT_XINO_ON:
@@ -552,6 +570,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
case OPT_METACOPY_OFF:
config->metacopy = false;
+ metacopy_opt = true;
break;
default:
@@ -601,6 +620,48 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
}
}
+ /* Resolve nfs_export -> index dependency */
+ if (config->nfs_export && !config->index) {
+ if (nfs_export_opt && index_opt) {
+ pr_err("conflicting options: nfs_export=on,index=off\n");
+ return -EINVAL;
+ }
+ if (index_opt) {
+ /*
+ * There was an explicit index=off that resulted
+ * in this conflict.
+ */
+ pr_info("disabling nfs_export due to index=off\n");
+ config->nfs_export = false;
+ } else {
+ /* Automatically enable index otherwise. */
+ config->index = true;
+ }
+ }
+
+ /* Resolve nfs_export -> !metacopy dependency */
+ if (config->nfs_export && config->metacopy) {
+ if (nfs_export_opt && metacopy_opt) {
+ pr_err("conflicting options: nfs_export=on,metacopy=on\n");
+ return -EINVAL;
+ }
+ if (metacopy_opt) {
+ /*
+ * There was an explicit metacopy=on that resulted
+ * in this conflict.
+ */
+ pr_info("disabling nfs_export due to metacopy=on\n");
+ config->nfs_export = false;
+ } else {
+ /*
+ * There was an explicit nfs_export=on that resulted
+ * in this conflict.
+ */
+ pr_info("disabling metacopy due to nfs_export=on\n");
+ config->metacopy = false;
+ }
+ }
+
return 0;
}
@@ -611,15 +672,12 @@ static struct dentry *ovl_workdir_create(struct ovl_fs *ofs,
const char *name, bool persist)
{
struct inode *dir = ofs->workbasedir->d_inode;
- struct vfsmount *mnt = ofs->upper_mnt;
+ struct vfsmount *mnt = ovl_upper_mnt(ofs);
struct dentry *work;
int err;
bool retried = false;
- bool locked = false;
inode_lock_nested(dir, I_MUTEX_PARENT);
- locked = true;
-
retry:
work = lookup_one_len(name, ofs->workbasedir, strlen(name));
@@ -680,9 +738,7 @@ retry:
goto out_err;
}
out_unlock:
- if (locked)
- inode_unlock(dir);
-
+ inode_unlock(dir);
return work;
out_dput:
@@ -779,11 +835,11 @@ static int ovl_lower_dir(const char *name, struct path *path,
err = ovl_mount_dir_noesc(name, path);
if (err)
- goto out;
+ return err;
err = ovl_check_namelen(path, ofs, name);
if (err)
- goto out_put;
+ return err;
*stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth);
@@ -805,11 +861,6 @@ static int ovl_lower_dir(const char *name, struct path *path,
ofs->xino_mode = -1;
return 0;
-
-out_put:
- path_put_init(path);
-out:
- return err;
}
/* Workdir should not be subdir of upperdir and vice versa */
@@ -1016,7 +1067,7 @@ static int ovl_report_in_use(struct ovl_fs *ofs, const char *name)
}
static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
- struct path *upperpath)
+ struct ovl_layer *upper_layer, struct path *upperpath)
{
struct vfsmount *upper_mnt;
int err;
@@ -1036,7 +1087,7 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
if (err)
goto out;
- err = ovl_setup_trap(sb, upperpath->dentry, &ofs->upperdir_trap,
+ err = ovl_setup_trap(sb, upperpath->dentry, &upper_layer->trap,
"upperdir");
if (err)
goto out;
@@ -1050,9 +1101,23 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
/* Don't inherit atime flags */
upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
- ofs->upper_mnt = upper_mnt;
+ upper_layer->mnt = upper_mnt;
+ upper_layer->idx = 0;
+ upper_layer->fsid = 0;
- if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) {
+ /*
+ * Inherit SB_NOSEC flag from upperdir.
+ *
+ * This optimization changes behavior when a security related attribute
+ * (suid/sgid/security.*) is changed on an underlying layer. This is
+ * okay because we don't yet have guarantees in that case, but it will
+ * need careful treatment once we want to honour changes to underlying
+ * filesystems.
+ */
+ if (upper_mnt->mnt_sb->s_flags & SB_NOSEC)
+ sb->s_flags |= SB_NOSEC;
+
+ if (ovl_inuse_trylock(ovl_upper_mnt(ofs)->mnt_root)) {
ofs->upperdir_locked = true;
} else {
err = ovl_report_in_use(ofs, "upperdir");
@@ -1128,7 +1193,7 @@ out_unlock:
static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
struct path *workpath)
{
- struct vfsmount *mnt = ofs->upper_mnt;
+ struct vfsmount *mnt = ovl_upper_mnt(ofs);
struct dentry *temp;
bool rename_whiteout;
bool d_type;
@@ -1272,7 +1337,7 @@ out:
static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
struct ovl_entry *oe, struct path *upperpath)
{
- struct vfsmount *mnt = ofs->upper_mnt;
+ struct vfsmount *mnt = ovl_upper_mnt(ofs);
int err;
err = mnt_want_write(mnt);
@@ -1328,7 +1393,7 @@ static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
{
unsigned int i;
- if (!ofs->config.nfs_export && !ofs->upper_mnt)
+ if (!ofs->config.nfs_export && !ovl_upper_mnt(ofs))
return true;
for (i = 0; i < ofs->numfs; i++) {
@@ -1388,18 +1453,13 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
}
static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
- struct path *stack, unsigned int numlower)
+ struct path *stack, unsigned int numlower,
+ struct ovl_layer *layers)
{
int err;
unsigned int i;
- struct ovl_layer *layers;
err = -ENOMEM;
- layers = kcalloc(numlower + 1, sizeof(struct ovl_layer), GFP_KERNEL);
- if (!layers)
- goto out;
- ofs->layers = layers;
-
ofs->fs = kcalloc(numlower + 1, sizeof(struct ovl_sb), GFP_KERNEL);
if (ofs->fs == NULL)
goto out;
@@ -1407,11 +1467,6 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
/* idx/fsid 0 are reserved for upper fs even with lower only overlay */
ofs->numfs++;
- layers[0].mnt = ofs->upper_mnt;
- layers[0].idx = 0;
- layers[0].fsid = 0;
- ofs->numlayer = 1;
-
/*
* All lower layers that share the same fs as upper layer, use the same
* pseudo_dev as upper layer. Allocate fs[0].pseudo_dev even for lower
@@ -1424,8 +1479,8 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
goto out;
}
- if (ofs->upper_mnt) {
- ofs->fs[0].sb = ofs->upper_mnt->mnt_sb;
+ if (ovl_upper_mnt(ofs)) {
+ ofs->fs[0].sb = ovl_upper_mnt(ofs)->mnt_sb;
ofs->fs[0].is_lower = false;
}
@@ -1480,7 +1535,7 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
* inode number or a non persistent inode number allocated from a
* dedicated range.
*/
- if (ofs->numfs - !ofs->upper_mnt == 1) {
+ if (ofs->numfs - !ovl_upper_mnt(ofs) == 1) {
if (ofs->config.xino == OVL_XINO_ON)
pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n");
ofs->xino_mode = 0;
@@ -1509,44 +1564,30 @@ out:
}
static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
- struct ovl_fs *ofs)
+ const char *lower, unsigned int numlower,
+ struct ovl_fs *ofs, struct ovl_layer *layers)
{
int err;
- char *lowertmp, *lower;
struct path *stack = NULL;
- unsigned int stacklen, numlower = 0, i;
+ unsigned int i;
struct ovl_entry *oe;
- err = -ENOMEM;
- lowertmp = kstrdup(ofs->config.lowerdir, GFP_KERNEL);
- if (!lowertmp)
- goto out_err;
-
- err = -EINVAL;
- stacklen = ovl_split_lowerdirs(lowertmp);
- if (stacklen > OVL_MAX_STACK) {
- pr_err("too many lower directories, limit is %d\n",
- OVL_MAX_STACK);
- goto out_err;
- } else if (!ofs->config.upperdir && stacklen == 1) {
+ if (!ofs->config.upperdir && numlower == 1) {
pr_err("at least 2 lowerdir are needed while upperdir nonexistent\n");
- goto out_err;
+ return ERR_PTR(-EINVAL);
} else if (!ofs->config.upperdir && ofs->config.nfs_export &&
ofs->config.redirect_follow) {
pr_warn("NFS export requires \"redirect_dir=nofollow\" on non-upper mount, falling back to nfs_export=off.\n");
ofs->config.nfs_export = false;
}
- err = -ENOMEM;
- stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL);
+ stack = kcalloc(numlower, sizeof(struct path), GFP_KERNEL);
if (!stack)
- goto out_err;
+ return ERR_PTR(-ENOMEM);
err = -EINVAL;
- lower = lowertmp;
- for (numlower = 0; numlower < stacklen; numlower++) {
- err = ovl_lower_dir(lower, &stack[numlower], ofs,
- &sb->s_stack_depth);
+ for (i = 0; i < numlower; i++) {
+ err = ovl_lower_dir(lower, &stack[i], ofs, &sb->s_stack_depth);
if (err)
goto out_err;
@@ -1560,7 +1601,7 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
goto out_err;
}
- err = ovl_get_layers(sb, ofs, stack, numlower);
+ err = ovl_get_layers(sb, ofs, stack, numlower, layers);
if (err)
goto out_err;
@@ -1578,7 +1619,6 @@ out:
for (i = 0; i < numlower; i++)
path_put(&stack[i]);
kfree(stack);
- kfree(lowertmp);
return oe;
@@ -1629,8 +1669,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb,
{
int i, err;
- if (ofs->upper_mnt) {
- err = ovl_check_layer(sb, ofs, ofs->upper_mnt->mnt_root,
+ if (ovl_upper_mnt(ofs)) {
+ err = ovl_check_layer(sb, ofs, ovl_upper_mnt(ofs)->mnt_root,
"upperdir");
if (err)
return err;
@@ -1702,7 +1742,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
struct dentry *root_dentry;
struct ovl_entry *oe;
struct ovl_fs *ofs;
+ struct ovl_layer *layers;
struct cred *cred;
+ char *splitlower = NULL;
+ unsigned int numlower;
int err;
sb->s_d_op = &ovl_dentry_operations;
@@ -1716,6 +1759,9 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
if (!cred)
goto out_err;
+ /* Is there a reason anyone would want not to share whiteouts? */
+ ofs->share_whiteout = true;
+
ofs->config.index = ovl_index_def;
ofs->config.nfs_export = ovl_nfs_export_def;
ofs->config.xino = ovl_xino_def();
@@ -1731,6 +1777,26 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
goto out_err;
}
+ err = -ENOMEM;
+ splitlower = kstrdup(ofs->config.lowerdir, GFP_KERNEL);
+ if (!splitlower)
+ goto out_err;
+
+ numlower = ovl_split_lowerdirs(splitlower);
+ if (numlower > OVL_MAX_STACK) {
+ pr_err("too many lower directories, limit is %d\n",
+ OVL_MAX_STACK);
+ goto out_err;
+ }
+
+ layers = kcalloc(numlower + 1, sizeof(struct ovl_layer), GFP_KERNEL);
+ if (!layers)
+ goto out_err;
+
+ ofs->layers = layers;
+ /* Layer 0 is reserved for upper even if there's no upper */
+ ofs->numlayer = 1;
+
sb->s_stack_depth = 0;
sb->s_maxbytes = MAX_LFS_FILESIZE;
atomic_long_set(&ofs->last_ino, 1);
@@ -1752,7 +1818,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
goto out_err;
}
- err = ovl_get_upper(sb, ofs, &upperpath);
+ err = ovl_get_upper(sb, ofs, &layers[0], &upperpath);
if (err)
goto out_err;
@@ -1763,31 +1829,35 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
if (!ofs->workdir)
sb->s_flags |= SB_RDONLY;
- sb->s_stack_depth = ofs->upper_mnt->mnt_sb->s_stack_depth;
- sb->s_time_gran = ofs->upper_mnt->mnt_sb->s_time_gran;
+ sb->s_stack_depth = ovl_upper_mnt(ofs)->mnt_sb->s_stack_depth;
+ sb->s_time_gran = ovl_upper_mnt(ofs)->mnt_sb->s_time_gran;
}
- oe = ovl_get_lowerstack(sb, ofs);
+ oe = ovl_get_lowerstack(sb, splitlower, numlower, ofs, layers);
err = PTR_ERR(oe);
if (IS_ERR(oe))
goto out_err;
/* If the upper fs is nonexistent, we mark overlayfs r/o too */
- if (!ofs->upper_mnt)
+ if (!ovl_upper_mnt(ofs))
sb->s_flags |= SB_RDONLY;
if (!(ovl_force_readonly(ofs)) && ofs->config.index) {
+ /* index dir will act also as workdir */
+ dput(ofs->workdir);
+ ofs->workdir = NULL;
+ iput(ofs->workdir_trap);
+ ofs->workdir_trap = NULL;
+
err = ovl_get_indexdir(sb, ofs, oe, &upperpath);
if (err)
goto out_free_oe;
/* Force r/o mount with no index dir */
- if (!ofs->indexdir) {
- dput(ofs->workdir);
- ofs->workdir = NULL;
+ if (ofs->indexdir)
+ ofs->workdir = dget(ofs->indexdir);
+ else
sb->s_flags |= SB_RDONLY;
- }
-
}
err = ovl_check_overlapping_layers(sb, ofs);
@@ -1797,7 +1867,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
/* Show index=off in /proc/mounts for forced r/o mount */
if (!ofs->indexdir) {
ofs->config.index = false;
- if (ofs->upper_mnt && ofs->config.nfs_export) {
+ if (ovl_upper_mnt(ofs) && ofs->config.nfs_export) {
pr_warn("NFS export requires an index dir, falling back to nfs_export=off.\n");
ofs->config.nfs_export = false;
}
@@ -1818,6 +1888,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
sb->s_xattr = ovl_xattr_handlers;
sb->s_fs_info = ofs;
sb->s_flags |= SB_POSIXACL;
+ sb->s_iflags |= SB_I_SKIP_SYNC;
err = -ENOMEM;
root_dentry = ovl_get_root(sb, upperpath.dentry, oe);
@@ -1825,6 +1896,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
goto out_free_oe;
mntput(upperpath.mnt);
+ kfree(splitlower);
sb->s_root = root_dentry;
@@ -1834,6 +1906,7 @@ out_free_oe:
ovl_entry_stack_free(oe);
kfree(oe);
out_err:
+ kfree(splitlower);
path_put(&upperpath);
ovl_free_fs(ofs);
out:
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 36b60788ee47..56c1f89f20c9 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -18,13 +18,13 @@
int ovl_want_write(struct dentry *dentry)
{
struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
- return mnt_want_write(ofs->upper_mnt);
+ return mnt_want_write(ovl_upper_mnt(ofs));
}
void ovl_drop_write(struct dentry *dentry)
{
struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
- mnt_drop_write(ofs->upper_mnt);
+ mnt_drop_write(ovl_upper_mnt(ofs));
}
struct dentry *ovl_workdir(struct dentry *dentry)
@@ -150,7 +150,7 @@ void ovl_path_upper(struct dentry *dentry, struct path *path)
{
struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
- path->mnt = ofs->upper_mnt;
+ path->mnt = ovl_upper_mnt(ofs);
path->dentry = ovl_dentry_upper(dentry);
}
@@ -459,7 +459,32 @@ bool ovl_is_whiteout(struct dentry *dentry)
struct file *ovl_path_open(struct path *path, int flags)
{
- return dentry_open(path, flags | O_NOATIME, current_cred());
+ struct inode *inode = d_inode(path->dentry);
+ int err, acc_mode;
+
+ if (flags & ~(O_ACCMODE | O_LARGEFILE))
+ BUG();
+
+ switch (flags & O_ACCMODE) {
+ case O_RDONLY:
+ acc_mode = MAY_READ;
+ break;
+ case O_WRONLY:
+ acc_mode = MAY_WRITE;
+ break;
+ default:
+ BUG();
+ }
+
+ err = inode_permission(inode, acc_mode | MAY_OPEN);
+ if (err)
+ return ERR_PTR(err);
+
+ /* O_NOATIME is an optimization, don't fail if not permitted */
+ if (inode_owner_or_capable(inode))
+ flags |= O_NOATIME;
+
+ return dentry_open(path, flags, current_cred());
}
/* Caller should hold ovl_inode->lock */
@@ -707,7 +732,8 @@ static void ovl_cleanup_index(struct dentry *dentry)
index = NULL;
} else if (ovl_index_all(dentry->d_sb)) {
/* Whiteout orphan index to block future open by handle */
- err = ovl_cleanup_and_whiteout(indexdir, dir, index);
+ err = ovl_cleanup_and_whiteout(OVL_FS(dentry->d_sb),
+ dir, index);
} else {
/* Cleanup orphan index entries */
err = ovl_cleanup(dir, index);
diff --git a/fs/pipe.c b/fs/pipe.c
index 16fb72e9abf7..c7c4fb5f345f 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -140,21 +140,20 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
put_page(page);
}
-static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
+static bool anon_pipe_buf_try_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
{
struct page *page = buf->page;
- if (page_count(page) == 1) {
- memcg_kmem_uncharge_page(page, 0);
- __SetPageLocked(page);
- return 0;
- }
- return 1;
+ if (page_count(page) != 1)
+ return false;
+ memcg_kmem_uncharge_page(page, 0);
+ __SetPageLocked(page);
+ return true;
}
/**
- * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
+ * generic_pipe_buf_try_steal - attempt to take ownership of a &pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to attempt to steal
*
@@ -165,8 +164,8 @@ static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
* he wishes; the typical use is insertion into a different file
* page cache.
*/
-int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
+bool generic_pipe_buf_try_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
{
struct page *page = buf->page;
@@ -177,12 +176,11 @@ int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
*/
if (page_count(page) == 1) {
lock_page(page);
- return 0;
+ return true;
}
-
- return 1;
+ return false;
}
-EXPORT_SYMBOL(generic_pipe_buf_steal);
+EXPORT_SYMBOL(generic_pipe_buf_try_steal);
/**
* generic_pipe_buf_get - get a reference to a &struct pipe_buffer
@@ -201,22 +199,6 @@ bool generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
EXPORT_SYMBOL(generic_pipe_buf_get);
/**
- * generic_pipe_buf_confirm - verify contents of the pipe buffer
- * @info: the pipe that the buffer belongs to
- * @buf: the buffer to confirm
- *
- * Description:
- * This function does nothing, because the generic pipe code uses
- * pages that are always good when inserted into the pipe.
- */
-int generic_pipe_buf_confirm(struct pipe_inode_info *info,
- struct pipe_buffer *buf)
-{
- return 0;
-}
-EXPORT_SYMBOL(generic_pipe_buf_confirm);
-
-/**
* generic_pipe_buf_release - put a reference to a &struct pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to put a reference to
@@ -231,48 +213,12 @@ void generic_pipe_buf_release(struct pipe_inode_info *pipe,
}
EXPORT_SYMBOL(generic_pipe_buf_release);
-/* New data written to a pipe may be appended to a buffer with this type. */
static const struct pipe_buf_operations anon_pipe_buf_ops = {
- .confirm = generic_pipe_buf_confirm,
- .release = anon_pipe_buf_release,
- .steal = anon_pipe_buf_steal,
- .get = generic_pipe_buf_get,
-};
-
-static const struct pipe_buf_operations anon_pipe_buf_nomerge_ops = {
- .confirm = generic_pipe_buf_confirm,
- .release = anon_pipe_buf_release,
- .steal = anon_pipe_buf_steal,
- .get = generic_pipe_buf_get,
-};
-
-static const struct pipe_buf_operations packet_pipe_buf_ops = {
- .confirm = generic_pipe_buf_confirm,
- .release = anon_pipe_buf_release,
- .steal = anon_pipe_buf_steal,
- .get = generic_pipe_buf_get,
+ .release = anon_pipe_buf_release,
+ .try_steal = anon_pipe_buf_try_steal,
+ .get = generic_pipe_buf_get,
};
-/**
- * pipe_buf_mark_unmergeable - mark a &struct pipe_buffer as unmergeable
- * @buf: the buffer to mark
- *
- * Description:
- * This function ensures that no future writes will be merged into the
- * given &struct pipe_buffer. This is necessary when multiple pipe buffers
- * share the same backing page.
- */
-void pipe_buf_mark_unmergeable(struct pipe_buffer *buf)
-{
- if (buf->ops == &anon_pipe_buf_ops)
- buf->ops = &anon_pipe_buf_nomerge_ops;
-}
-
-static bool pipe_buf_can_merge(struct pipe_buffer *buf)
-{
- return buf->ops == &anon_pipe_buf_ops;
-}
-
/* Done while waiting without holding the pipe lock - thus the READ_ONCE() */
static inline bool pipe_readable(const struct pipe_inode_info *pipe)
{
@@ -478,7 +424,8 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
struct pipe_buffer *buf = &pipe->bufs[(head - 1) & mask];
int offset = buf->offset + buf->len;
- if (pipe_buf_can_merge(buf) && offset + chars <= PAGE_SIZE) {
+ if ((buf->flags & PIPE_BUF_FLAG_CAN_MERGE) &&
+ offset + chars <= PAGE_SIZE) {
ret = pipe_buf_confirm(pipe, buf);
if (ret)
goto out;
@@ -541,11 +488,10 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
buf->ops = &anon_pipe_buf_ops;
buf->offset = 0;
buf->len = 0;
- buf->flags = 0;
- if (is_packetized(filp)) {
- buf->ops = &packet_pipe_buf_ops;
+ if (is_packetized(filp))
buf->flags = PIPE_BUF_FLAG_PACKET;
- }
+ else
+ buf->flags = PIPE_BUF_FLAG_CAN_MERGE;
pipe->tmp_page = NULL;
copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 249672bf54fe..95882b3f5f62 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -350,7 +350,7 @@ posix_acl_permission(struct inode *inode, const struct posix_acl *acl, int want)
const struct posix_acl_entry *pa, *pe, *mask_obj;
int found = 0;
- want &= MAY_READ | MAY_WRITE | MAY_EXEC | MAY_NOT_BLOCK;
+ want &= MAY_READ | MAY_WRITE | MAY_EXEC;
FOREACH_ACL_ENTRY(pa, acl, pe) {
switch(pa->e_tag) {
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 8e16f14bb05a..55ecbeb3a721 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -92,7 +92,6 @@
#include <linux/user_namespace.h>
#include <linux/fs_struct.h>
-#include <asm/pgtable.h>
#include <asm/processor.h>
#include "internal.h"
@@ -248,8 +247,8 @@ void render_sigset_t(struct seq_file *m, const char *header,
seq_putc(m, '\n');
}
-static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
- sigset_t *catch)
+static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *sigign,
+ sigset_t *sigcatch)
{
struct k_sigaction *k;
int i;
@@ -257,9 +256,9 @@ static void collect_sigign_sigcatch(struct task_struct *p, sigset_t *ign,
k = p->sighand->action;
for (i = 1; i <= _NSIG; ++i, ++k) {
if (k->sa.sa_handler == SIG_IGN)
- sigaddset(ign, i);
+ sigaddset(sigign, i);
else if (k->sa.sa_handler != SIG_DFL)
- sigaddset(catch, i);
+ sigaddset(sigcatch, i);
}
}
@@ -728,7 +727,7 @@ static int children_seq_show(struct seq_file *seq, void *v)
{
struct inode *inode = file_inode(seq->file);
- seq_printf(seq, "%d ", pid_nr_ns(v, proc_pid_ns(inode)));
+ seq_printf(seq, "%d ", pid_nr_ns(v, proc_pid_ns(inode->i_sb)));
return 0;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index eb2255e95f62..d86c0afc8a85 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -697,13 +697,21 @@ int proc_setattr(struct dentry *dentry, struct iattr *attr)
* May current process learn task's sched/cmdline info (for hide_pid_min=1)
* or euid/egid (for hide_pid_min=2)?
*/
-static bool has_pid_permissions(struct pid_namespace *pid,
+static bool has_pid_permissions(struct proc_fs_info *fs_info,
struct task_struct *task,
- int hide_pid_min)
+ enum proc_hidepid hide_pid_min)
{
- if (pid->hide_pid < hide_pid_min)
+ /*
+ * If 'hidpid' mount option is set force a ptrace check,
+ * we indicate that we are using a filesystem syscall
+ * by passing PTRACE_MODE_READ_FSCREDS
+ */
+ if (fs_info->hide_pid == HIDEPID_NOT_PTRACEABLE)
+ return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
+
+ if (fs_info->hide_pid < hide_pid_min)
return true;
- if (in_group_p(pid->pid_gid))
+ if (in_group_p(fs_info->pid_gid))
return true;
return ptrace_may_access(task, PTRACE_MODE_READ_FSCREDS);
}
@@ -711,18 +719,18 @@ static bool has_pid_permissions(struct pid_namespace *pid,
static int proc_pid_permission(struct inode *inode, int mask)
{
- struct pid_namespace *pid = proc_pid_ns(inode);
+ struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb);
struct task_struct *task;
bool has_perms;
task = get_proc_task(inode);
if (!task)
return -ESRCH;
- has_perms = has_pid_permissions(pid, task, HIDEPID_NO_ACCESS);
+ has_perms = has_pid_permissions(fs_info, task, HIDEPID_NO_ACCESS);
put_task_struct(task);
if (!has_perms) {
- if (pid->hide_pid == HIDEPID_INVISIBLE) {
+ if (fs_info->hide_pid == HIDEPID_INVISIBLE) {
/*
* Let's make getdents(), stat(), and open()
* consistent with each other. If a process
@@ -746,7 +754,7 @@ static const struct inode_operations proc_def_inode_operations = {
static int proc_single_show(struct seq_file *m, void *v)
{
struct inode *inode = m->private;
- struct pid_namespace *ns = proc_pid_ns(inode);
+ struct pid_namespace *ns = proc_pid_ns(inode->i_sb);
struct pid *pid = proc_pid(inode);
struct task_struct *task;
int ret;
@@ -1415,7 +1423,7 @@ static const struct file_operations proc_fail_nth_operations = {
static int sched_show(struct seq_file *m, void *v)
{
struct inode *inode = m->private;
- struct pid_namespace *ns = proc_pid_ns(inode);
+ struct pid_namespace *ns = proc_pid_ns(inode->i_sb);
struct task_struct *p;
p = get_proc_task(inode);
@@ -1909,7 +1917,7 @@ int pid_getattr(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags)
{
struct inode *inode = d_inode(path->dentry);
- struct pid_namespace *pid = proc_pid_ns(inode);
+ struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb);
struct task_struct *task;
generic_fillattr(inode, stat);
@@ -1919,7 +1927,7 @@ int pid_getattr(const struct path *path, struct kstat *stat,
rcu_read_lock();
task = pid_task(proc_pid(inode), PIDTYPE_PID);
if (task) {
- if (!has_pid_permissions(pid, task, HIDEPID_INVISIBLE)) {
+ if (!has_pid_permissions(fs_info, task, HIDEPID_INVISIBLE)) {
rcu_read_unlock();
/*
* This doesn't prevent learning whether PID exists,
@@ -2104,11 +2112,11 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
goto out;
if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
- status = down_read_killable(&mm->mmap_sem);
+ status = mmap_read_lock_killable(mm);
if (!status) {
exact_vma_exists = !!find_exact_vma(mm, vm_start,
vm_end);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
}
}
@@ -2155,7 +2163,7 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
if (rc)
goto out_mmput;
- rc = down_read_killable(&mm->mmap_sem);
+ rc = mmap_read_lock_killable(mm);
if (rc)
goto out_mmput;
@@ -2166,7 +2174,7 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
path_get(path);
rc = 0;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out_mmput:
mmput(mm);
@@ -2256,7 +2264,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
goto out_put_task;
result = ERR_PTR(-EINTR);
- if (down_read_killable(&mm->mmap_sem))
+ if (mmap_read_lock_killable(mm))
goto out_put_mm;
result = ERR_PTR(-ENOENT);
@@ -2269,7 +2277,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
(void *)(unsigned long)vma->vm_file->f_mode);
out_no_vma:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out_put_mm:
mmput(mm);
out_put_task:
@@ -2314,7 +2322,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
if (!mm)
goto out_put_task;
- ret = down_read_killable(&mm->mmap_sem);
+ ret = mmap_read_lock_killable(mm);
if (ret) {
mmput(mm);
goto out_put_task;
@@ -2325,11 +2333,11 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
/*
* We need two passes here:
*
- * 1) Collect vmas of mapped files with mmap_sem taken
- * 2) Release mmap_sem and instantiate entries
+ * 1) Collect vmas of mapped files with mmap_lock taken
+ * 2) Release mmap_lock and instantiate entries
*
* otherwise we get lockdep complained, since filldir()
- * routine might require mmap_sem taken in might_fault().
+ * routine might require mmap_lock taken in might_fault().
*/
for (vma = mm->mmap, pos = 2; vma; vma = vma->vm_next) {
@@ -2341,7 +2349,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
p = genradix_ptr_alloc(&fa, nr_files++, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
goto out_put_task;
}
@@ -2350,7 +2358,7 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
p->end = vma->vm_end;
p->mode = vma->vm_file->f_mode;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
for (i = 0; i < nr_files; i++) {
@@ -2470,7 +2478,7 @@ static int proc_timers_open(struct inode *inode, struct file *file)
return -ENOMEM;
tp->pid = proc_pid(inode);
- tp->ns = proc_pid_ns(inode);
+ tp->ns = proc_pid_ns(inode->i_sb);
return 0;
}
@@ -2770,6 +2778,15 @@ static const struct pid_entry smack_attr_dir_stuff[] = {
LSM_DIR_OPS(smack);
#endif
+#ifdef CONFIG_SECURITY_APPARMOR
+static const struct pid_entry apparmor_attr_dir_stuff[] = {
+ ATTR("apparmor", "current", 0666),
+ ATTR("apparmor", "prev", 0444),
+ ATTR("apparmor", "exec", 0666),
+};
+LSM_DIR_OPS(apparmor);
+#endif
+
static const struct pid_entry attr_dir_stuff[] = {
ATTR(NULL, "current", 0666),
ATTR(NULL, "prev", 0444),
@@ -2781,6 +2798,10 @@ static const struct pid_entry attr_dir_stuff[] = {
DIR("smack", 0555,
proc_smack_attr_dir_inode_ops, proc_smack_attr_dir_ops),
#endif
+#ifdef CONFIG_SECURITY_APPARMOR
+ DIR("apparmor", 0555,
+ proc_apparmor_attr_dir_inode_ops, proc_apparmor_attr_dir_ops),
+#endif
};
static int proc_attr_dir_readdir(struct file *file, struct dir_context *ctx)
@@ -3312,6 +3333,7 @@ struct dentry *proc_pid_lookup(struct dentry *dentry, unsigned int flags)
{
struct task_struct *task;
unsigned tgid;
+ struct proc_fs_info *fs_info;
struct pid_namespace *ns;
struct dentry *result = ERR_PTR(-ENOENT);
@@ -3319,7 +3341,8 @@ struct dentry *proc_pid_lookup(struct dentry *dentry, unsigned int flags)
if (tgid == ~0U)
goto out;
- ns = dentry->d_sb->s_fs_info;
+ fs_info = proc_sb_info(dentry->d_sb);
+ ns = fs_info->pid_ns;
rcu_read_lock();
task = find_task_by_pid_ns(tgid, ns);
if (task)
@@ -3328,7 +3351,14 @@ struct dentry *proc_pid_lookup(struct dentry *dentry, unsigned int flags)
if (!task)
goto out;
+ /* Limit procfs to only ptraceable tasks */
+ if (fs_info->hide_pid == HIDEPID_NOT_PTRACEABLE) {
+ if (!has_pid_permissions(fs_info, task, HIDEPID_NO_ACCESS))
+ goto out_put_task;
+ }
+
result = proc_pid_instantiate(dentry, task, NULL);
+out_put_task:
put_task_struct(task);
out:
return result;
@@ -3354,20 +3384,8 @@ retry:
pid = find_ge_pid(iter.tgid, ns);
if (pid) {
iter.tgid = pid_nr_ns(pid, ns);
- iter.task = pid_task(pid, PIDTYPE_PID);
- /* What we to know is if the pid we have find is the
- * pid of a thread_group_leader. Testing for task
- * being a thread_group_leader is the obvious thing
- * todo but there is a window when it fails, due to
- * the pid transfer logic in de_thread.
- *
- * So we perform the straight forward test of seeing
- * if the pid we have found is the pid of a thread
- * group leader, and don't worry if the task we have
- * found doesn't happen to be a thread group leader.
- * As we don't care in the case of readdir.
- */
- if (!iter.task || !has_group_leader_pid(iter.task)) {
+ iter.task = pid_task(pid, PIDTYPE_TGID);
+ if (!iter.task) {
iter.tgid += 1;
goto retry;
}
@@ -3383,20 +3401,21 @@ retry:
int proc_pid_readdir(struct file *file, struct dir_context *ctx)
{
struct tgid_iter iter;
- struct pid_namespace *ns = proc_pid_ns(file_inode(file));
+ struct proc_fs_info *fs_info = proc_sb_info(file_inode(file)->i_sb);
+ struct pid_namespace *ns = proc_pid_ns(file_inode(file)->i_sb);
loff_t pos = ctx->pos;
if (pos >= PID_MAX_LIMIT + TGID_OFFSET)
return 0;
if (pos == TGID_OFFSET - 2) {
- struct inode *inode = d_inode(ns->proc_self);
+ struct inode *inode = d_inode(fs_info->proc_self);
if (!dir_emit(ctx, "self", 4, inode->i_ino, DT_LNK))
return 0;
ctx->pos = pos = pos + 1;
}
if (pos == TGID_OFFSET - 1) {
- struct inode *inode = d_inode(ns->proc_thread_self);
+ struct inode *inode = d_inode(fs_info->proc_thread_self);
if (!dir_emit(ctx, "thread-self", 11, inode->i_ino, DT_LNK))
return 0;
ctx->pos = pos = pos + 1;
@@ -3410,7 +3429,7 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
unsigned int len;
cond_resched();
- if (!has_pid_permissions(ns, iter.task, HIDEPID_INVISIBLE))
+ if (!has_pid_permissions(fs_info, iter.task, HIDEPID_INVISIBLE))
continue;
len = snprintf(name, sizeof(name), "%u", iter.tgid);
@@ -3610,6 +3629,7 @@ static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry
struct task_struct *task;
struct task_struct *leader = get_proc_task(dir);
unsigned tid;
+ struct proc_fs_info *fs_info;
struct pid_namespace *ns;
struct dentry *result = ERR_PTR(-ENOENT);
@@ -3620,7 +3640,8 @@ static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry
if (tid == ~0U)
goto out;
- ns = dentry->d_sb->s_fs_info;
+ fs_info = proc_sb_info(dentry->d_sb);
+ ns = fs_info->pid_ns;
rcu_read_lock();
task = find_task_by_pid_ns(tid, ns);
if (task)
@@ -3734,7 +3755,7 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx)
/* f_version caches the tgid value that the last readdir call couldn't
* return. lseek aka telldir automagically resets f_version to 0.
*/
- ns = proc_pid_ns(inode);
+ ns = proc_pid_ns(inode->i_sb);
tid = (int)file->f_version;
file->f_version = 0;
for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns);
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 4ed6dabdf6ff..2f9fa179194d 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -269,6 +269,11 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
+ struct proc_fs_info *fs_info = proc_sb_info(dir->i_sb);
+
+ if (fs_info->pidonly == PROC_PIDONLY_ON)
+ return ERR_PTR(-ENOENT);
+
return proc_lookup_de(dir, dentry, PDE(dir));
}
@@ -325,6 +330,10 @@ int proc_readdir_de(struct file *file, struct dir_context *ctx,
int proc_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
+ struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb);
+
+ if (fs_info->pidonly == PROC_PIDONLY_ON)
+ return 1;
return proc_readdir_de(file, ctx, PDE(inode));
}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index fb4cace9ea41..f40c2532c057 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -24,6 +24,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/mount.h>
+#include <linux/bug.h>
#include <linux/uaccess.h>
@@ -165,15 +166,28 @@ void proc_invalidate_siblings_dcache(struct hlist_head *inodes, spinlock_t *lock
deactivate_super(old_sb);
}
+static inline const char *hidepid2str(enum proc_hidepid v)
+{
+ switch (v) {
+ case HIDEPID_OFF: return "off";
+ case HIDEPID_NO_ACCESS: return "noaccess";
+ case HIDEPID_INVISIBLE: return "invisible";
+ case HIDEPID_NOT_PTRACEABLE: return "ptraceable";
+ }
+ WARN_ONCE(1, "bad hide_pid value: %d\n", v);
+ return "unknown";
+}
+
static int proc_show_options(struct seq_file *seq, struct dentry *root)
{
- struct super_block *sb = root->d_sb;
- struct pid_namespace *pid = sb->s_fs_info;
+ struct proc_fs_info *fs_info = proc_sb_info(root->d_sb);
- if (!gid_eq(pid->pid_gid, GLOBAL_ROOT_GID))
- seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, pid->pid_gid));
- if (pid->hide_pid != HIDEPID_OFF)
- seq_printf(seq, ",hidepid=%u", pid->hide_pid);
+ if (!gid_eq(fs_info->pid_gid, GLOBAL_ROOT_GID))
+ seq_printf(seq, ",gid=%u", from_kgid_munged(&init_user_ns, fs_info->pid_gid));
+ if (fs_info->hide_pid != HIDEPID_OFF)
+ seq_printf(seq, ",hidepid=%s", hidepid2str(fs_info->hide_pid));
+ if (fs_info->pidonly != PROC_PIDONLY_OFF)
+ seq_printf(seq, ",subset=pid");
return 0;
}
@@ -464,6 +478,7 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
static int proc_reg_open(struct inode *inode, struct file *file)
{
+ struct proc_fs_info *fs_info = proc_sb_info(inode->i_sb);
struct proc_dir_entry *pde = PDE(inode);
int rv = 0;
typeof_member(struct proc_ops, proc_open) open;
@@ -477,6 +492,9 @@ static int proc_reg_open(struct inode *inode, struct file *file)
return rv;
}
+ if (fs_info->pidonly == PROC_PIDONLY_ON)
+ return -ENOENT;
+
/*
* Ensure that
* 1) PDE's ->release hook will be called no matter what
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index ecc63ce01be7..e9a6841fc25b 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -17,7 +17,6 @@
#include <linux/cma.h>
#endif
#include <asm/page.h>
-#include <asm/pgtable.h>
#include "internal.h"
void __attribute__((weak)) arch_report_meminfo(struct seq_file *m)
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
index 14c2badb8fd9..13452b32e2bd 100644
--- a/fs/proc/nommu.c
+++ b/fs/proc/nommu.c
@@ -22,7 +22,6 @@
#include <linux/hugetlb.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
-#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/div64.h>
#include "internal.h"
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 4888c5224442..dba63b2429f0 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -98,6 +98,25 @@ static const struct proc_ops proc_net_seq_ops = {
.proc_release = seq_release_net,
};
+int bpf_iter_init_seq_net(void *priv_data)
+{
+#ifdef CONFIG_NET_NS
+ struct seq_net_private *p = priv_data;
+
+ p->net = get_net(current->nsproxy->net_ns);
+#endif
+ return 0;
+}
+
+void bpf_iter_fini_seq_net(void *priv_data)
+{
+#ifdef CONFIG_NET_NS
+ struct seq_net_private *p = priv_data;
+
+ put_net(p->net);
+#endif
+}
+
struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
struct proc_dir_entry *parent, const struct seq_operations *ops,
unsigned int state_size, void *data)
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index b6f5d459b087..42c5128c7d1c 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/bpf-cgroup.h>
+#include <linux/mount.h>
#include "internal.h"
static const struct dentry_operations proc_sys_dentry_operations;
@@ -539,13 +540,13 @@ out:
return err;
}
-static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
+static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf,
size_t count, loff_t *ppos, int write)
{
struct inode *inode = file_inode(filp);
struct ctl_table_header *head = grab_header(inode);
struct ctl_table *table = PROC_I(inode)->sysctl_entry;
- void *new_buf = NULL;
+ void *kbuf;
ssize_t error;
if (IS_ERR(head))
@@ -564,27 +565,42 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
if (!table->proc_handler)
goto out;
- error = BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, &count,
- ppos, &new_buf);
+ /* don't even try if the size is too large */
+ if (count > KMALLOC_MAX_SIZE)
+ return -ENOMEM;
+
+ if (write) {
+ kbuf = memdup_user_nul(ubuf, count);
+ if (IS_ERR(kbuf)) {
+ error = PTR_ERR(kbuf);
+ goto out;
+ }
+ } else {
+ error = -ENOMEM;
+ kbuf = kzalloc(count, GFP_KERNEL);
+ if (!kbuf)
+ goto out;
+ }
+
+ error = BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, &kbuf, &count,
+ ppos);
if (error)
- goto out;
+ goto out_free_buf;
/* careful: calling conventions are nasty here */
- if (new_buf) {
- mm_segment_t old_fs;
-
- old_fs = get_fs();
- set_fs(KERNEL_DS);
- error = table->proc_handler(table, write, (void __user *)new_buf,
- &count, ppos);
- set_fs(old_fs);
- kfree(new_buf);
- } else {
- error = table->proc_handler(table, write, buf, &count, ppos);
+ error = table->proc_handler(table, write, kbuf, &count, ppos);
+ if (error)
+ goto out_free_buf;
+
+ if (!write) {
+ error = -EFAULT;
+ if (copy_to_user(ubuf, kbuf, count))
+ goto out_free_buf;
}
- if (!error)
- error = count;
+ error = count;
+out_free_buf:
+ kfree(kbuf);
out:
sysctl_head_finish(head);
@@ -1692,3 +1708,147 @@ int __init proc_sys_init(void)
return sysctl_init();
}
+
+struct sysctl_alias {
+ const char *kernel_param;
+ const char *sysctl_param;
+};
+
+/*
+ * Historically some settings had both sysctl and a command line parameter.
+ * With the generic sysctl. parameter support, we can handle them at a single
+ * place and only keep the historical name for compatibility. This is not meant
+ * to add brand new aliases. When adding existing aliases, consider whether
+ * the possibly different moment of changing the value (e.g. from early_param
+ * to the moment do_sysctl_args() is called) is an issue for the specific
+ * parameter.
+ */
+static const struct sysctl_alias sysctl_aliases[] = {
+ {"hardlockup_all_cpu_backtrace", "kernel.hardlockup_all_cpu_backtrace" },
+ {"hung_task_panic", "kernel.hung_task_panic" },
+ {"numa_zonelist_order", "vm.numa_zonelist_order" },
+ {"softlockup_all_cpu_backtrace", "kernel.softlockup_all_cpu_backtrace" },
+ {"softlockup_panic", "kernel.softlockup_panic" },
+ { }
+};
+
+static const char *sysctl_find_alias(char *param)
+{
+ const struct sysctl_alias *alias;
+
+ for (alias = &sysctl_aliases[0]; alias->kernel_param != NULL; alias++) {
+ if (strcmp(alias->kernel_param, param) == 0)
+ return alias->sysctl_param;
+ }
+
+ return NULL;
+}
+
+/* Set sysctl value passed on kernel command line. */
+static int process_sysctl_arg(char *param, char *val,
+ const char *unused, void *arg)
+{
+ char *path;
+ struct vfsmount **proc_mnt = arg;
+ struct file_system_type *proc_fs_type;
+ struct file *file;
+ int len;
+ int err;
+ loff_t pos = 0;
+ ssize_t wret;
+
+ if (strncmp(param, "sysctl", sizeof("sysctl") - 1) == 0) {
+ param += sizeof("sysctl") - 1;
+
+ if (param[0] != '/' && param[0] != '.')
+ return 0;
+
+ param++;
+ } else {
+ param = (char *) sysctl_find_alias(param);
+ if (!param)
+ return 0;
+ }
+
+ /*
+ * To set sysctl options, we use a temporary mount of proc, look up the
+ * respective sys/ file and write to it. To avoid mounting it when no
+ * options were given, we mount it only when the first sysctl option is
+ * found. Why not a persistent mount? There are problems with a
+ * persistent mount of proc in that it forces userspace not to use any
+ * proc mount options.
+ */
+ if (!*proc_mnt) {
+ proc_fs_type = get_fs_type("proc");
+ if (!proc_fs_type) {
+ pr_err("Failed to find procfs to set sysctl from command line\n");
+ return 0;
+ }
+ *proc_mnt = kern_mount(proc_fs_type);
+ put_filesystem(proc_fs_type);
+ if (IS_ERR(*proc_mnt)) {
+ pr_err("Failed to mount procfs to set sysctl from command line\n");
+ return 0;
+ }
+ }
+
+ path = kasprintf(GFP_KERNEL, "sys/%s", param);
+ if (!path)
+ panic("%s: Failed to allocate path for %s\n", __func__, param);
+ strreplace(path, '.', '/');
+
+ file = file_open_root((*proc_mnt)->mnt_root, *proc_mnt, path, O_WRONLY, 0);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ if (err == -ENOENT)
+ pr_err("Failed to set sysctl parameter '%s=%s': parameter not found\n",
+ param, val);
+ else if (err == -EACCES)
+ pr_err("Failed to set sysctl parameter '%s=%s': permission denied (read-only?)\n",
+ param, val);
+ else
+ pr_err("Error %pe opening proc file to set sysctl parameter '%s=%s'\n",
+ file, param, val);
+ goto out;
+ }
+ len = strlen(val);
+ wret = kernel_write(file, val, len, &pos);
+ if (wret < 0) {
+ err = wret;
+ if (err == -EINVAL)
+ pr_err("Failed to set sysctl parameter '%s=%s': invalid value\n",
+ param, val);
+ else
+ pr_err("Error %pe writing to proc file to set sysctl parameter '%s=%s'\n",
+ ERR_PTR(err), param, val);
+ } else if (wret != len) {
+ pr_err("Wrote only %zd bytes of %d writing to proc file %s to set sysctl parameter '%s=%s\n",
+ wret, len, path, param, val);
+ }
+
+ err = filp_close(file, NULL);
+ if (err)
+ pr_err("Error %pe closing proc file to set sysctl parameter '%s=%s\n",
+ ERR_PTR(err), param, val);
+out:
+ kfree(path);
+ return 0;
+}
+
+void do_sysctl_args(void)
+{
+ char *command_line;
+ struct vfsmount *proc_mnt = NULL;
+
+ command_line = kstrdup(saved_command_line, GFP_KERNEL);
+ if (!command_line)
+ panic("%s: Failed to allocate copy of command line\n", __func__);
+
+ parse_args("Setting sysctl args", command_line,
+ NULL, 0, -1, -1, &proc_mnt, process_sysctl_arg);
+
+ if (proc_mnt)
+ kern_unmount(proc_mnt);
+
+ kfree(command_line);
+}
diff --git a/fs/proc/root.c b/fs/proc/root.c
index cdbe9293ea55..5e444d4f9717 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -32,21 +32,86 @@
struct proc_fs_context {
struct pid_namespace *pid_ns;
unsigned int mask;
- int hidepid;
+ enum proc_hidepid hidepid;
int gid;
+ enum proc_pidonly pidonly;
};
enum proc_param {
Opt_gid,
Opt_hidepid,
+ Opt_subset,
};
static const struct fs_parameter_spec proc_fs_parameters[] = {
fsparam_u32("gid", Opt_gid),
- fsparam_u32("hidepid", Opt_hidepid),
+ fsparam_string("hidepid", Opt_hidepid),
+ fsparam_string("subset", Opt_subset),
{}
};
+static inline int valid_hidepid(unsigned int value)
+{
+ return (value == HIDEPID_OFF ||
+ value == HIDEPID_NO_ACCESS ||
+ value == HIDEPID_INVISIBLE ||
+ value == HIDEPID_NOT_PTRACEABLE);
+}
+
+static int proc_parse_hidepid_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct proc_fs_context *ctx = fc->fs_private;
+ struct fs_parameter_spec hidepid_u32_spec = fsparam_u32("hidepid", Opt_hidepid);
+ struct fs_parse_result result;
+ int base = (unsigned long)hidepid_u32_spec.data;
+
+ if (param->type != fs_value_is_string)
+ return invalf(fc, "proc: unexpected type of hidepid value\n");
+
+ if (!kstrtouint(param->string, base, &result.uint_32)) {
+ if (!valid_hidepid(result.uint_32))
+ return invalf(fc, "proc: unknown value of hidepid - %s\n", param->string);
+ ctx->hidepid = result.uint_32;
+ return 0;
+ }
+
+ if (!strcmp(param->string, "off"))
+ ctx->hidepid = HIDEPID_OFF;
+ else if (!strcmp(param->string, "noaccess"))
+ ctx->hidepid = HIDEPID_NO_ACCESS;
+ else if (!strcmp(param->string, "invisible"))
+ ctx->hidepid = HIDEPID_INVISIBLE;
+ else if (!strcmp(param->string, "ptraceable"))
+ ctx->hidepid = HIDEPID_NOT_PTRACEABLE;
+ else
+ return invalf(fc, "proc: unknown value of hidepid - %s\n", param->string);
+
+ return 0;
+}
+
+static int proc_parse_subset_param(struct fs_context *fc, char *value)
+{
+ struct proc_fs_context *ctx = fc->fs_private;
+
+ while (value) {
+ char *ptr = strchr(value, ',');
+
+ if (ptr != NULL)
+ *ptr++ = '\0';
+
+ if (*value != '\0') {
+ if (!strcmp(value, "pid")) {
+ ctx->pidonly = PROC_PIDONLY_ON;
+ } else {
+ return invalf(fc, "proc: unsupported subset option - %s\n", value);
+ }
+ }
+ value = ptr;
+ }
+
+ return 0;
+}
+
static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
struct proc_fs_context *ctx = fc->fs_private;
@@ -63,10 +128,13 @@ static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param)
break;
case Opt_hidepid:
- ctx->hidepid = result.uint_32;
- if (ctx->hidepid < HIDEPID_OFF ||
- ctx->hidepid > HIDEPID_INVISIBLE)
- return invalfc(fc, "hidepid value must be between 0 and 2.\n");
+ if (proc_parse_hidepid_param(fc, param))
+ return -EINVAL;
+ break;
+
+ case Opt_subset:
+ if (proc_parse_subset_param(fc, param->string) < 0)
+ return -EINVAL;
break;
default:
@@ -77,26 +145,33 @@ static int proc_parse_param(struct fs_context *fc, struct fs_parameter *param)
return 0;
}
-static void proc_apply_options(struct super_block *s,
+static void proc_apply_options(struct proc_fs_info *fs_info,
struct fs_context *fc,
- struct pid_namespace *pid_ns,
struct user_namespace *user_ns)
{
struct proc_fs_context *ctx = fc->fs_private;
if (ctx->mask & (1 << Opt_gid))
- pid_ns->pid_gid = make_kgid(user_ns, ctx->gid);
+ fs_info->pid_gid = make_kgid(user_ns, ctx->gid);
if (ctx->mask & (1 << Opt_hidepid))
- pid_ns->hide_pid = ctx->hidepid;
+ fs_info->hide_pid = ctx->hidepid;
+ if (ctx->mask & (1 << Opt_subset))
+ fs_info->pidonly = ctx->pidonly;
}
static int proc_fill_super(struct super_block *s, struct fs_context *fc)
{
- struct pid_namespace *pid_ns = get_pid_ns(s->s_fs_info);
+ struct proc_fs_context *ctx = fc->fs_private;
struct inode *root_inode;
+ struct proc_fs_info *fs_info;
int ret;
- proc_apply_options(s, fc, pid_ns, current_user_ns());
+ fs_info = kzalloc(sizeof(*fs_info), GFP_KERNEL);
+ if (!fs_info)
+ return -ENOMEM;
+
+ fs_info->pid_ns = get_pid_ns(ctx->pid_ns);
+ proc_apply_options(fs_info, fc, current_user_ns());
/* User space would break if executables or devices appear on proc */
s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
@@ -106,6 +181,7 @@ static int proc_fill_super(struct super_block *s, struct fs_context *fc)
s->s_magic = PROC_SUPER_MAGIC;
s->s_op = &proc_sops;
s->s_time_gran = 1;
+ s->s_fs_info = fs_info;
/*
* procfs isn't actually a stacking filesystem; however, there is
@@ -113,7 +189,7 @@ static int proc_fill_super(struct super_block *s, struct fs_context *fc)
* top of it
*/
s->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
-
+
/* procfs dentries and inodes don't require IO to create */
s->s_shrink.seeks = 0;
@@ -140,19 +216,17 @@ static int proc_fill_super(struct super_block *s, struct fs_context *fc)
static int proc_reconfigure(struct fs_context *fc)
{
struct super_block *sb = fc->root->d_sb;
- struct pid_namespace *pid = sb->s_fs_info;
+ struct proc_fs_info *fs_info = proc_sb_info(sb);
sync_filesystem(sb);
- proc_apply_options(sb, fc, pid, current_user_ns());
+ proc_apply_options(fs_info, fc, current_user_ns());
return 0;
}
static int proc_get_tree(struct fs_context *fc)
{
- struct proc_fs_context *ctx = fc->fs_private;
-
- return get_tree_keyed(fc, proc_fill_super, ctx->pid_ns);
+ return get_tree_nodev(fc, proc_fill_super);
}
static void proc_fs_context_free(struct fs_context *fc)
@@ -188,22 +262,19 @@ static int proc_init_fs_context(struct fs_context *fc)
static void proc_kill_sb(struct super_block *sb)
{
- struct pid_namespace *ns;
+ struct proc_fs_info *fs_info = proc_sb_info(sb);
- ns = (struct pid_namespace *)sb->s_fs_info;
- if (ns->proc_self)
- dput(ns->proc_self);
- if (ns->proc_thread_self)
- dput(ns->proc_thread_self);
- kill_anon_super(sb);
+ if (!fs_info) {
+ kill_anon_super(sb);
+ return;
+ }
- /* Make the pid namespace safe for the next mount of proc */
- ns->proc_self = NULL;
- ns->proc_thread_self = NULL;
- ns->pid_gid = GLOBAL_ROOT_GID;
- ns->hide_pid = 0;
+ dput(fs_info->proc_self);
+ dput(fs_info->proc_thread_self);
- put_pid_ns(ns);
+ kill_anon_super(sb);
+ put_pid_ns(fs_info->pid_ns);
+ kfree(fs_info);
}
static struct file_system_type proc_fs_type = {
diff --git a/fs/proc/self.c b/fs/proc/self.c
index 57c0a1047250..ca5158fa561c 100644
--- a/fs/proc/self.c
+++ b/fs/proc/self.c
@@ -12,7 +12,7 @@ static const char *proc_self_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
- struct pid_namespace *ns = proc_pid_ns(inode);
+ struct pid_namespace *ns = proc_pid_ns(inode->i_sb);
pid_t tgid = task_tgid_nr_ns(current, ns);
char *name;
@@ -36,10 +36,10 @@ static unsigned self_inum __ro_after_init;
int proc_setup_self(struct super_block *s)
{
struct inode *root_inode = d_inode(s->s_root);
- struct pid_namespace *ns = proc_pid_ns(root_inode);
+ struct proc_fs_info *fs_info = proc_sb_info(s);
struct dentry *self;
int ret = -ENOMEM;
-
+
inode_lock(root_inode);
self = d_alloc_name(s->s_root, "self");
if (self) {
@@ -62,7 +62,7 @@ int proc_setup_self(struct super_block *s)
if (ret)
pr_err("proc_fill_super: can't allocate /proc/self\n");
else
- ns->proc_self = self;
+ fs_info->proc_self = self;
return ret;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 6ad407d5efe2..dbda4499a859 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -145,7 +145,7 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
return NULL;
}
- if (down_read_killable(&mm->mmap_sem)) {
+ if (mmap_read_lock_killable(mm)) {
mmput(mm);
put_task_struct(priv->task);
priv->task = NULL;
@@ -188,7 +188,7 @@ static void m_stop(struct seq_file *m, void *v)
return;
release_task_mempolicy(priv);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
put_task_struct(priv->task);
priv->task = NULL;
@@ -593,7 +593,7 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (pmd_trans_unstable(pmd))
goto out;
/*
- * The mmap_sem held all the way back in m_start() is what
+ * The mmap_lock held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
* in here.
*/
@@ -752,7 +752,7 @@ static void smap_gather_stats(struct vm_area_struct *vma,
}
}
#endif
- /* mmap_sem is held in m_start */
+ /* mmap_lock is held in m_start */
walk_page_vma(vma, &smaps_walk_ops, mss);
}
@@ -847,7 +847,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
memset(&mss, 0, sizeof(mss));
- ret = down_read_killable(&mm->mmap_sem);
+ ret = mmap_read_lock_killable(mm);
if (ret)
goto out_put_mm;
@@ -866,7 +866,7 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
__show_smap(m, &mss, true);
release_task_mempolicy(priv);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out_put_mm:
mmput(mm);
@@ -1140,7 +1140,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
};
if (type == CLEAR_REFS_MM_HIWATER_RSS) {
- if (down_write_killable(&mm->mmap_sem)) {
+ if (mmap_write_lock_killable(mm)) {
count = -EINTR;
goto out_mm;
}
@@ -1150,11 +1150,11 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
* resident set size to this mm's current rss value.
*/
reset_mm_hiwater_rss(mm);
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
goto out_mm;
}
- if (down_read_killable(&mm->mmap_sem)) {
+ if (mmap_read_lock_killable(mm)) {
count = -EINTR;
goto out_mm;
}
@@ -1163,8 +1163,8 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
for (vma = mm->mmap; vma; vma = vma->vm_next) {
if (!(vma->vm_flags & VM_SOFTDIRTY))
continue;
- up_read(&mm->mmap_sem);
- if (down_write_killable(&mm->mmap_sem)) {
+ mmap_read_unlock(mm);
+ if (mmap_write_lock_killable(mm)) {
count = -EINTR;
goto out_mm;
}
@@ -1183,14 +1183,14 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
* failed like if
* get_proc_task() fails?
*/
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
goto out_mm;
}
for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma->vm_flags &= ~VM_SOFTDIRTY;
vma_set_page_prot(vma);
}
- downgrade_write(&mm->mmap_sem);
+ mmap_write_downgrade(mm);
break;
}
@@ -1203,7 +1203,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
if (type == CLEAR_REFS_SOFT_DIRTY)
mmu_notifier_invalidate_range_end(&range);
tlb_finish_mmu(&tlb, 0, -1);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out_mm:
mmput(mm);
}
@@ -1564,11 +1564,11 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
/* overflow ? */
if (end < start_vaddr || end > end_vaddr)
end = end_vaddr;
- ret = down_read_killable(&mm->mmap_sem);
+ ret = mmap_read_lock_killable(mm);
if (ret)
goto out_free;
ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
start_vaddr = end;
len = min(count, PM_ENTRY_BYTES * pm.pos);
@@ -1827,7 +1827,7 @@ static int show_numa_map(struct seq_file *m, void *v)
if (is_vm_hugetlb_page(vma))
seq_puts(m, " huge");
- /* mmap_sem is held by m_start */
+ /* mmap_lock is held by m_start */
walk_page_vma(vma, &show_numa_ops, md);
if (!md->pages)
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 7907e6419e57..a6d21fc0033c 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -25,7 +25,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
struct rb_node *p;
unsigned long bytes = 0, sbytes = 0, slack = 0, size;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
vma = rb_entry(p, struct vm_area_struct, vm_rb);
@@ -77,7 +77,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
"Shared:\t%8lu bytes\n",
bytes, slack, sbytes);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
}
unsigned long task_vsize(struct mm_struct *mm)
@@ -86,12 +86,12 @@ unsigned long task_vsize(struct mm_struct *mm)
struct rb_node *p;
unsigned long vsize = 0;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
vma = rb_entry(p, struct vm_area_struct, vm_rb);
vsize += vma->vm_end - vma->vm_start;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return vsize;
}
@@ -104,7 +104,7 @@ unsigned long task_statm(struct mm_struct *mm,
struct rb_node *p;
unsigned long size = kobjsize(mm);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p)) {
vma = rb_entry(p, struct vm_area_struct, vm_rb);
size += kobjsize(vma);
@@ -119,7 +119,7 @@ unsigned long task_statm(struct mm_struct *mm,
>> PAGE_SHIFT;
*data = (PAGE_ALIGN(mm->start_stack) - (mm->start_data & PAGE_MASK))
>> PAGE_SHIFT;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
size >>= PAGE_SHIFT;
size += *text + *data;
*resident = size;
@@ -211,7 +211,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
if (!mm || !mmget_not_zero(mm))
return NULL;
- if (down_read_killable(&mm->mmap_sem)) {
+ if (mmap_read_lock_killable(mm)) {
mmput(mm);
return ERR_PTR(-EINTR);
}
@@ -221,7 +221,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
if (n-- == 0)
return p;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
return NULL;
}
@@ -231,7 +231,7 @@ static void m_stop(struct seq_file *m, void *_vml)
struct proc_maps_private *priv = m->private;
if (!IS_ERR_OR_NULL(_vml)) {
- up_read(&priv->mm->mmap_sem);
+ mmap_read_unlock(priv->mm);
mmput(priv->mm);
}
if (priv->task) {
diff --git a/fs/proc/thread_self.c b/fs/proc/thread_self.c
index f61ae53533f5..ac284f409568 100644
--- a/fs/proc/thread_self.c
+++ b/fs/proc/thread_self.c
@@ -12,7 +12,7 @@ static const char *proc_thread_self_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
- struct pid_namespace *ns = proc_pid_ns(inode);
+ struct pid_namespace *ns = proc_pid_ns(inode->i_sb);
pid_t tgid = task_tgid_nr_ns(current, ns);
pid_t pid = task_pid_nr_ns(current, ns);
char *name;
@@ -36,7 +36,7 @@ static unsigned thread_self_inum __ro_after_init;
int proc_setup_thread_self(struct super_block *s)
{
struct inode *root_inode = d_inode(s->s_root);
- struct pid_namespace *ns = proc_pid_ns(root_inode);
+ struct proc_fs_info *fs_info = proc_sb_info(s);
struct dentry *thread_self;
int ret = -ENOMEM;
@@ -60,9 +60,9 @@ int proc_setup_thread_self(struct super_block *s)
inode_unlock(root_inode);
if (ret)
- pr_err("proc_fill_super: can't allocate /proc/thread_self\n");
+ pr_err("proc_fill_super: can't allocate /proc/thread-self\n");
else
- ns->proc_thread_self = thread_self;
+ fs_info->proc_thread_self = thread_self;
return ret;
}
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index c663202da8de..c3a345c28a93 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -27,7 +27,6 @@
#include <linux/pagemap.h>
#include <linux/uaccess.h>
#include <linux/mem_encrypt.h>
-#include <asm/pgtable.h>
#include <asm/io.h>
#include "internal.h"
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index e4d70c0dffe9..3059a9394c2d 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -37,23 +37,23 @@ static __poll_t mounts_poll(struct file *file, poll_table *wait)
return res;
}
-struct proc_fs_info {
+struct proc_fs_opts {
int flag;
const char *str;
};
static int show_sb_opts(struct seq_file *m, struct super_block *sb)
{
- static const struct proc_fs_info fs_info[] = {
+ static const struct proc_fs_opts fs_opts[] = {
{ SB_SYNCHRONOUS, ",sync" },
{ SB_DIRSYNC, ",dirsync" },
{ SB_MANDLOCK, ",mand" },
{ SB_LAZYTIME, ",lazytime" },
{ 0, NULL }
};
- const struct proc_fs_info *fs_infop;
+ const struct proc_fs_opts *fs_infop;
- for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
+ for (fs_infop = fs_opts; fs_infop->flag; fs_infop++) {
if (sb->s_flags & fs_infop->flag)
seq_puts(m, fs_infop->str);
}
@@ -63,7 +63,7 @@ static int show_sb_opts(struct seq_file *m, struct super_block *sb)
static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
{
- static const struct proc_fs_info mnt_info[] = {
+ static const struct proc_fs_opts mnt_opts[] = {
{ MNT_NOSUID, ",nosuid" },
{ MNT_NODEV, ",nodev" },
{ MNT_NOEXEC, ",noexec" },
@@ -72,9 +72,9 @@ static void show_mnt_opts(struct seq_file *m, struct vfsmount *mnt)
{ MNT_RELATIME, ",relatime" },
{ 0, NULL }
};
- const struct proc_fs_info *fs_infop;
+ const struct proc_fs_opts *fs_infop;
- for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
+ for (fs_infop = mnt_opts; fs_infop->flag; fs_infop++) {
if (mnt->mnt_flags & fs_infop->flag)
seq_puts(m, fs_infop->str);
}
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index b6a4f692d345..7b4bac91146b 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2841,7 +2841,7 @@ const struct quotactl_ops dquot_quotactl_sysfile_ops = {
EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
static int do_proc_dqstats(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
+ void *buffer, size_t *lenp, loff_t *ppos)
{
unsigned int type = (unsigned long *)table->data - dqstats.stat;
s64 value = percpu_counter_sum(&dqstats.counter[type]);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 0031070b3692..1509775da040 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1066,7 +1066,7 @@ research:
} else {
/* paste hole to the indirect item */
/*
- * If kmalloc failed, max_to_insert becomes
+ * If kcalloc failed, max_to_insert becomes
* zero and it means we only have space for
* one block
*/
diff --git a/fs/select.c b/fs/select.c
index 11d0285d46b7..7aef49552d4c 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -766,22 +766,38 @@ static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
* which has a pointer to the sigset_t itself followed by a size_t containing
* the sigset size.
*/
+struct sigset_argpack {
+ sigset_t __user *p;
+ size_t size;
+};
+
+static inline int get_sigset_argpack(struct sigset_argpack *to,
+ struct sigset_argpack __user *from)
+{
+ // the path is hot enough for overhead of copy_from_user() to matter
+ if (from) {
+ if (!user_read_access_begin(from, sizeof(*from)))
+ return -EFAULT;
+ unsafe_get_user(to->p, &from->p, Efault);
+ unsafe_get_user(to->size, &from->size, Efault);
+ user_read_access_end();
+ }
+ return 0;
+Efault:
+ user_access_end();
+ return -EFAULT;
+}
+
SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
fd_set __user *, exp, struct __kernel_timespec __user *, tsp,
void __user *, sig)
{
- size_t sigsetsize = 0;
- sigset_t __user *up = NULL;
-
- if (sig) {
- if (!access_ok(sig, sizeof(void *)+sizeof(size_t))
- || __get_user(up, (sigset_t __user * __user *)sig)
- || __get_user(sigsetsize,
- (size_t __user *)(sig+sizeof(void *))))
- return -EFAULT;
- }
+ struct sigset_argpack x = {NULL, 0};
+
+ if (get_sigset_argpack(&x, sig))
+ return -EFAULT;
- return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_TIMESPEC);
+ return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_TIMESPEC);
}
#if defined(CONFIG_COMPAT_32BIT_TIME) && !defined(CONFIG_64BIT)
@@ -790,18 +806,12 @@ SYSCALL_DEFINE6(pselect6_time32, int, n, fd_set __user *, inp, fd_set __user *,
fd_set __user *, exp, struct old_timespec32 __user *, tsp,
void __user *, sig)
{
- size_t sigsetsize = 0;
- sigset_t __user *up = NULL;
-
- if (sig) {
- if (!access_ok(sig, sizeof(void *)+sizeof(size_t))
- || __get_user(up, (sigset_t __user * __user *)sig)
- || __get_user(sigsetsize,
- (size_t __user *)(sig+sizeof(void *))))
- return -EFAULT;
- }
+ struct sigset_argpack x = {NULL, 0};
+
+ if (get_sigset_argpack(&x, sig))
+ return -EFAULT;
- return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize, PT_OLD_TIMESPEC);
+ return do_pselect(n, inp, outp, exp, tsp, x.p, x.size, PT_OLD_TIMESPEC);
}
#endif
@@ -1325,24 +1335,37 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp,
return poll_select_finish(&end_time, tsp, type, ret);
}
+struct compat_sigset_argpack {
+ compat_uptr_t p;
+ compat_size_t size;
+};
+static inline int get_compat_sigset_argpack(struct compat_sigset_argpack *to,
+ struct compat_sigset_argpack __user *from)
+{
+ if (from) {
+ if (!user_read_access_begin(from, sizeof(*from)))
+ return -EFAULT;
+ unsafe_get_user(to->p, &from->p, Efault);
+ unsafe_get_user(to->size, &from->size, Efault);
+ user_read_access_end();
+ }
+ return 0;
+Efault:
+ user_access_end();
+ return -EFAULT;
+}
+
COMPAT_SYSCALL_DEFINE6(pselect6_time64, int, n, compat_ulong_t __user *, inp,
compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
struct __kernel_timespec __user *, tsp, void __user *, sig)
{
- compat_size_t sigsetsize = 0;
- compat_uptr_t up = 0;
-
- if (sig) {
- if (!access_ok(sig,
- sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
- __get_user(up, (compat_uptr_t __user *)sig) ||
- __get_user(sigsetsize,
- (compat_size_t __user *)(sig+sizeof(up))))
- return -EFAULT;
- }
+ struct compat_sigset_argpack x = {0, 0};
+
+ if (get_compat_sigset_argpack(&x, sig))
+ return -EFAULT;
- return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
- sigsetsize, PT_TIMESPEC);
+ return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p),
+ x.size, PT_TIMESPEC);
}
#if defined(CONFIG_COMPAT_32BIT_TIME)
@@ -1351,20 +1374,13 @@ COMPAT_SYSCALL_DEFINE6(pselect6_time32, int, n, compat_ulong_t __user *, inp,
compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
struct old_timespec32 __user *, tsp, void __user *, sig)
{
- compat_size_t sigsetsize = 0;
- compat_uptr_t up = 0;
-
- if (sig) {
- if (!access_ok(sig,
- sizeof(compat_uptr_t)+sizeof(compat_size_t)) ||
- __get_user(up, (compat_uptr_t __user *)sig) ||
- __get_user(sigsetsize,
- (compat_size_t __user *)(sig+sizeof(up))))
- return -EFAULT;
- }
+ struct compat_sigset_argpack x = {0, 0};
+
+ if (get_compat_sigset_argpack(&x, sig))
+ return -EFAULT;
- return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(up),
- sigsetsize, PT_OLD_TIMESPEC);
+ return do_compat_pselect(n, inp, outp, exp, tsp, compat_ptr(x.p),
+ x.size, PT_OLD_TIMESPEC);
}
#endif
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 70f5fdf99bf6..4e6239f33c06 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -6,6 +6,8 @@
* initial implementation -- AV, Oct 2001.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/cache.h>
#include <linux/fs.h>
#include <linux/export.h>
@@ -233,9 +235,8 @@ Fill:
p = m->op->next(m, p, &m->index);
if (pos == m->index) {
- pr_info_ratelimited("buggy seq_file .next function %ps "
- "did not updated position index\n",
- m->op->next);
+ pr_info_ratelimited("buggy .next function %ps did not update position index\n",
+ m->op->next);
m->index++;
}
if (!p || IS_ERR(p)) {
diff --git a/fs/splice.c b/fs/splice.c
index 5013565eb756..6b3c9a018a8e 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -44,8 +44,8 @@
* addition of remove_mapping(). If success is returned, the caller may
* attempt to reuse this page for another destination.
*/
-static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
+static bool page_cache_pipe_buf_try_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
{
struct page *page = buf->page;
struct address_space *mapping;
@@ -76,7 +76,7 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
*/
if (remove_mapping(mapping, page)) {
buf->flags |= PIPE_BUF_FLAG_LRU;
- return 0;
+ return true;
}
}
@@ -86,7 +86,7 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
*/
out_unlock:
unlock_page(page);
- return 1;
+ return false;
}
static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
@@ -139,27 +139,26 @@ error:
}
const struct pipe_buf_operations page_cache_pipe_buf_ops = {
- .confirm = page_cache_pipe_buf_confirm,
- .release = page_cache_pipe_buf_release,
- .steal = page_cache_pipe_buf_steal,
- .get = generic_pipe_buf_get,
+ .confirm = page_cache_pipe_buf_confirm,
+ .release = page_cache_pipe_buf_release,
+ .try_steal = page_cache_pipe_buf_try_steal,
+ .get = generic_pipe_buf_get,
};
-static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
+static bool user_page_pipe_buf_try_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
{
if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
- return 1;
+ return false;
buf->flags |= PIPE_BUF_FLAG_LRU;
- return generic_pipe_buf_steal(pipe, buf);
+ return generic_pipe_buf_try_steal(pipe, buf);
}
static const struct pipe_buf_operations user_page_pipe_buf_ops = {
- .confirm = generic_pipe_buf_confirm,
- .release = page_cache_pipe_buf_release,
- .steal = user_page_pipe_buf_steal,
- .get = generic_pipe_buf_get,
+ .release = page_cache_pipe_buf_release,
+ .try_steal = user_page_pipe_buf_try_steal,
+ .get = generic_pipe_buf_get,
};
static void wakeup_pipe_readers(struct pipe_inode_info *pipe)
@@ -331,24 +330,15 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
EXPORT_SYMBOL(generic_file_splice_read);
const struct pipe_buf_operations default_pipe_buf_ops = {
- .confirm = generic_pipe_buf_confirm,
- .release = generic_pipe_buf_release,
- .steal = generic_pipe_buf_steal,
- .get = generic_pipe_buf_get,
+ .release = generic_pipe_buf_release,
+ .try_steal = generic_pipe_buf_try_steal,
+ .get = generic_pipe_buf_get,
};
-int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
-{
- return 1;
-}
-
/* Pipe buffer operations for a socket and similar. */
const struct pipe_buf_operations nosteal_pipe_buf_ops = {
- .confirm = generic_pipe_buf_confirm,
- .release = generic_pipe_buf_release,
- .steal = generic_pipe_buf_nosteal,
- .get = generic_pipe_buf_get,
+ .release = generic_pipe_buf_release,
+ .get = generic_pipe_buf_get,
};
EXPORT_SYMBOL(nosteal_pipe_buf_ops);
@@ -852,15 +842,9 @@ EXPORT_SYMBOL(generic_splice_sendpage);
static long do_splice_from(struct pipe_inode_info *pipe, struct file *out,
loff_t *ppos, size_t len, unsigned int flags)
{
- ssize_t (*splice_write)(struct pipe_inode_info *, struct file *,
- loff_t *, size_t, unsigned int);
-
if (out->f_op->splice_write)
- splice_write = out->f_op->splice_write;
- else
- splice_write = default_file_splice_write;
-
- return splice_write(pipe, out, ppos, len, flags);
+ return out->f_op->splice_write(pipe, out, ppos, len, flags);
+ return default_file_splice_write(pipe, out, ppos, len, flags);
}
/*
@@ -870,8 +854,6 @@ static long do_splice_to(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
- ssize_t (*splice_read)(struct file *, loff_t *,
- struct pipe_inode_info *, size_t, unsigned int);
int ret;
if (unlikely(!(in->f_mode & FMODE_READ)))
@@ -885,11 +867,8 @@ static long do_splice_to(struct file *in, loff_t *ppos,
len = MAX_RW_COUNT;
if (in->f_op->splice_read)
- splice_read = in->f_op->splice_read;
- else
- splice_read = default_file_splice_read;
-
- return splice_read(in, ppos, pipe, len, flags);
+ return in->f_op->splice_read(in, ppos, pipe, len, flags);
+ return default_file_splice_read(in, ppos, pipe, len, flags);
}
/**
@@ -1626,12 +1605,11 @@ retry:
*obuf = *ibuf;
/*
- * Don't inherit the gift flag, we need to
+ * Don't inherit the gift and merge flags, we need to
* prevent multiple steals of this page.
*/
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
-
- pipe_buf_mark_unmergeable(obuf);
+ obuf->flags &= ~PIPE_BUF_FLAG_CAN_MERGE;
obuf->len = len;
ibuf->offset += len;
@@ -1719,12 +1697,11 @@ static int link_pipe(struct pipe_inode_info *ipipe,
*obuf = *ibuf;
/*
- * Don't inherit the gift flag, we need to
- * prevent multiple steals of this page.
+ * Don't inherit the gift and merge flag, we need to prevent
+ * multiple steals of this page.
*/
obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
-
- pipe_buf_mark_unmergeable(obuf);
+ obuf->flags &= ~PIPE_BUF_FLAG_CAN_MERGE;
if (obuf->len > len)
obuf->len = len;
diff --git a/fs/stat.c b/fs/stat.c
index b86a5c338133..44f8ad346db4 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -80,6 +80,9 @@ int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
if (IS_AUTOMOUNT(inode))
stat->attributes |= STATX_ATTR_AUTOMOUNT;
+ if (IS_DAX(inode))
+ stat->attributes |= STATX_ATTR_DAX;
+
if (inode->i_op->getattr)
return inode->i_op->getattr(path, stat, request_mask,
query_flags);
diff --git a/fs/super.c b/fs/super.c
index bf3b7685b52a..904459b35119 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -361,7 +361,7 @@ EXPORT_SYMBOL(deactivate_locked_super);
*/
void deactivate_super(struct super_block *s)
{
- if (!atomic_add_unless(&s->s_active, -1, 1)) {
+ if (!atomic_add_unless(&s->s_active, -1, 1)) {
down_write(&s->s_umount);
deactivate_locked_super(s);
}
diff --git a/fs/sync.c b/fs/sync.c
index c6f6f5be5682..1373a610dc78 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -76,7 +76,8 @@ static void sync_inodes_one_sb(struct super_block *sb, void *arg)
static void sync_fs_one_sb(struct super_block *sb, void *arg)
{
- if (!sb_rdonly(sb) && sb->s_op->sync_fs)
+ if (!sb_rdonly(sb) && !(sb->s_iflags & SB_I_SKIP_SYNC) &&
+ sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, *(int *)arg);
}
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index f275fcda62fb..eb6897ab78e7 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -492,6 +492,7 @@ bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr)
kernfs_put(kn);
return ret;
}
+EXPORT_SYMBOL_GPL(sysfs_remove_file_self);
void sysfs_remove_files(struct kobject *kobj, const struct attribute * const *ptr)
{
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index e39fdec8a0b0..52de29000c7e 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -234,7 +234,7 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
pte_t *ptep, pte;
bool ret = true;
- VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+ mmap_assert_locked(mm);
ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
@@ -286,7 +286,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
pte_t *pte;
bool ret = true;
- VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
+ mmap_assert_locked(mm);
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
@@ -369,13 +369,13 @@ static inline bool userfaultfd_signal_pending(unsigned int flags)
* FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
* recommendation in __lock_page_or_retry is not an understatement.
*
- * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released
+ * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_lock must be released
* before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
* not set.
*
* If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
* set, VM_FAULT_RETRY can still be returned if and only if there are
- * fatal_signal_pending()s, and the mmap_sem must be released before
+ * fatal_signal_pending()s, and the mmap_lock must be released before
* returning it.
*/
vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
@@ -396,16 +396,16 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
* FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
* the no_page_table() helper in follow_page_mask(), but the
* shmem_vm_ops->fault method is invoked even during
- * coredumping without mmap_sem and it ends up here.
+ * coredumping without mmap_lock and it ends up here.
*/
if (current->flags & (PF_EXITING|PF_DUMPCORE))
goto out;
/*
- * Coredumping runs without mmap_sem so we can only check that
- * the mmap_sem is held, if PF_DUMPCORE was not set.
+ * Coredumping runs without mmap_lock so we can only check that
+ * the mmap_lock is held, if PF_DUMPCORE was not set.
*/
- WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
+ mmap_assert_locked(mm);
ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
if (!ctx)
@@ -422,7 +422,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
/*
* If it's already released don't get it. This avoids to loop
* in __get_user_pages if userfaultfd_release waits on the
- * caller of handle_userfault to release the mmap_sem.
+ * caller of handle_userfault to release the mmap_lock.
*/
if (unlikely(READ_ONCE(ctx->released))) {
/*
@@ -481,7 +481,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out;
- /* take the reference before dropping the mmap_sem */
+ /* take the reference before dropping the mmap_lock */
userfaultfd_ctx_get(ctx);
init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
@@ -514,7 +514,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
vmf->address,
vmf->flags, reason);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (likely(must_wait && !READ_ONCE(ctx->released) &&
!userfaultfd_signal_pending(vmf->flags))) {
@@ -637,7 +637,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
struct mm_struct *mm = release_new_ctx->mm;
/* the various vma->vm_userfaultfd_ctx still points to it */
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
/* no task can run (and in turn coredump) yet */
VM_WARN_ON(!mmget_still_valid(mm));
for (vma = mm->mmap; vma; vma = vma->vm_next)
@@ -645,7 +645,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
}
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
userfaultfd_ctx_put(release_new_ctx);
}
@@ -799,7 +799,7 @@ bool userfaultfd_remove(struct vm_area_struct *vma,
userfaultfd_ctx_get(ctx);
WRITE_ONCE(ctx->mmap_changing, true);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
msg_init(&ewq.msg);
@@ -890,11 +890,11 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
* Flush page faults out of all CPUs. NOTE: all page faults
* must be retried without returning VM_FAULT_SIGBUS if
* userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
- * changes while handle_userfault released the mmap_sem. So
+ * changes while handle_userfault released the mmap_lock. So
* it's critical that released is set to true (above), before
- * taking the mmap_sem for writing.
+ * taking the mmap_lock for writing.
*/
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
still_valid = mmget_still_valid(mm);
prev = NULL;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
@@ -920,7 +920,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
}
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
mmput(mm);
wakeup:
/*
@@ -1248,7 +1248,7 @@ static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
/*
* To be sure waitqueue_active() is not reordered by the CPU
* before the pagetable update, use an explicit SMP memory
- * barrier here. PT lock release or up_read(mmap_sem) still
+ * barrier here. PT lock release or mmap_read_unlock(mm) still
* have release semantics that can allow the
* waitqueue_active() to be reordered before the pte update.
*/
@@ -1345,7 +1345,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
if (!mmget_not_zero(mm))
goto out;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
if (!mmget_still_valid(mm))
goto out_unlock;
vma = find_vma_prev(mm, start, &prev);
@@ -1492,7 +1492,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
vma = vma->vm_next;
} while (vma && vma->vm_start < end);
out_unlock:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
mmput(mm);
if (!ret) {
__u64 ioctls_out;
@@ -1547,7 +1547,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
if (!mmget_not_zero(mm))
goto out;
- down_write(&mm->mmap_sem);
+ mmap_write_lock(mm);
if (!mmget_still_valid(mm))
goto out_unlock;
vma = find_vma_prev(mm, start, &prev);
@@ -1664,7 +1664,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
vma = vma->vm_next;
} while (vma && vma->vm_start < end);
out_unlock:
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
mmput(mm);
out:
return ret;
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 4f95df476181..04611a1068b4 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -7,8 +7,6 @@
ccflags-y += -I $(srctree)/$(src) # needed for trace events
ccflags-y += -I $(srctree)/$(src)/libxfs
-ccflags-$(CONFIG_XFS_DEBUG) += -g
-
obj-$(CONFIG_XFS_FS) += xfs.o
# this one should be compiled first, as the tracing macros can easily blow up
@@ -101,9 +99,12 @@ xfs-y += xfs_log.o \
xfs_log_cil.o \
xfs_bmap_item.o \
xfs_buf_item.o \
+ xfs_buf_item_recover.o \
+ xfs_dquot_item_recover.o \
xfs_extfree_item.o \
xfs_icreate_item.o \
xfs_inode_item.o \
+ xfs_inode_item_recover.o \
xfs_refcount_item.o \
xfs_rmap_item.o \
xfs_log_recover.o \
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index 6143117770e9..34cbcfde9228 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
@@ -19,6 +19,7 @@ typedef unsigned __bitwise xfs_km_flags_t;
#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
#define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
+#define KM_NOLOCKDEP ((__force xfs_km_flags_t)0x0020u)
/*
* We use a special process flag to avoid recursive callbacks into
@@ -30,7 +31,7 @@ kmem_flags_convert(xfs_km_flags_t flags)
{
gfp_t lflags;
- BUG_ON(flags & ~(KM_NOFS|KM_MAYFAIL|KM_ZERO));
+ BUG_ON(flags & ~(KM_NOFS | KM_MAYFAIL | KM_ZERO | KM_NOLOCKDEP));
lflags = GFP_KERNEL | __GFP_NOWARN;
if (flags & KM_NOFS)
@@ -49,6 +50,9 @@ kmem_flags_convert(xfs_km_flags_t flags)
if (flags & KM_ZERO)
lflags |= __GFP_ZERO;
+ if (flags & KM_NOLOCKDEP)
+ lflags |= __GFP_NOLOCKDEP;
+
return lflags;
}
diff --git a/fs/xfs/libxfs/xfs_ag_resv.h b/fs/xfs/libxfs/xfs_ag_resv.h
index c0352edc8e41..f3fd0ee9a7f7 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.h
+++ b/fs/xfs/libxfs/xfs_ag_resv.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2016 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <darrick.wong@oracle.com>
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index a851bf77f17b..6c22b12176b8 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.h b/fs/xfs/libxfs/xfs_alloc_btree.h
index 047f09f0be3c..a5b998e950fe 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.h
+++ b/fs/xfs/libxfs/xfs_alloc_btree.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000,2005 Silicon Graphics, Inc.
* All Rights Reserved.
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index e4fe3dca9883..3b1bd6e112f8 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -61,8 +61,8 @@ xfs_inode_hasattr(
struct xfs_inode *ip)
{
if (!XFS_IFORK_Q(ip) ||
- (ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
- ip->i_d.di_anextents == 0))
+ (ip->i_afp->if_format == XFS_DINODE_FMT_EXTENTS &&
+ ip->i_afp->if_nextents == 0))
return 0;
return 1;
}
@@ -84,7 +84,7 @@ xfs_attr_get_ilocked(
if (!xfs_inode_hasattr(args->dp))
return -ENOATTR;
- if (args->dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL)
+ if (args->dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL)
return xfs_attr_shortform_getvalue(args);
if (xfs_bmap_one_block(args->dp, XFS_ATTR_FORK))
return xfs_attr_leaf_get(args);
@@ -212,14 +212,14 @@ xfs_attr_set_args(
* If the attribute list is non-existent or a shortform list,
* upgrade it to a single-leaf-block attribute list.
*/
- if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
- (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
- dp->i_d.di_anextents == 0)) {
+ if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL ||
+ (dp->i_afp->if_format == XFS_DINODE_FMT_EXTENTS &&
+ dp->i_afp->if_nextents == 0)) {
/*
* Build initial attribute list (if required).
*/
- if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
+ if (dp->i_afp->if_format == XFS_DINODE_FMT_EXTENTS)
xfs_attr_shortform_create(args);
/*
@@ -272,7 +272,7 @@ xfs_attr_remove_args(
if (!xfs_inode_hasattr(dp)) {
error = -ENOATTR;
- } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+ } else if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL) {
ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
error = xfs_attr_shortform_remove(args);
} else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
index 0d2d05908537..db4717657ca1 100644
--- a/fs/xfs/libxfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000,2002-2003,2005 Silicon Graphics, Inc.
* All Rights Reserved.
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 863444e2dda7..2f7e89e4be3e 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -309,14 +309,6 @@ xfs_attr3_leaf_verify(
return fa;
/*
- * In recovery there is a transient state where count == 0 is valid
- * because we may have transitioned an empty shortform attr to a leaf
- * if the attr didn't fit in shortform.
- */
- if (!xfs_log_in_recovery(mp) && ichdr.count == 0)
- return __this_address;
-
- /*
* firstused is the block offset of the first name info structure.
* Make sure it doesn't go off the block or crash into the header.
*/
@@ -331,6 +323,13 @@ xfs_attr3_leaf_verify(
(char *)bp->b_addr + ichdr.firstused)
return __this_address;
+ /*
+ * NOTE: This verifier historically failed empty leaf buffers because
+ * we expect the fork to be in another format. Empty attr fork format
+ * conversions are possible during xattr set, however, and format
+ * conversion is not atomic with the xattr set that triggers it. We
+ * cannot assume leaf blocks are non-empty until that is addressed.
+ */
buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize;
for (i = 0, ent = entries; i < ichdr.count; ent++, i++) {
fa = xfs_attr3_leaf_verify_entry(mp, buf_end, leaf, &ichdr,
@@ -489,7 +488,7 @@ xfs_attr_copy_value(
}
if (!args->value) {
- args->value = kmem_alloc_large(valuelen, 0);
+ args->value = kmem_alloc_large(valuelen, KM_NOLOCKDEP);
if (!args->value)
return -ENOMEM;
}
@@ -539,7 +538,7 @@ xfs_attr_shortform_bytesfit(
/* rounded down */
offset = (XFS_LITINO(mp) - bytes) >> 3;
- if (dp->i_d.di_format == XFS_DINODE_FMT_DEV) {
+ if (dp->i_df.if_format == XFS_DINODE_FMT_DEV) {
minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
return (offset >= minforkoff) ? minforkoff : 0;
}
@@ -567,7 +566,7 @@ xfs_attr_shortform_bytesfit(
dsize = dp->i_df.if_bytes;
- switch (dp->i_d.di_format) {
+ switch (dp->i_df.if_format) {
case XFS_DINODE_FMT_EXTENTS:
/*
* If there is no attr fork and the data fork is extents,
@@ -636,22 +635,19 @@ xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
* Create the initial contents of a shortform attribute list.
*/
void
-xfs_attr_shortform_create(xfs_da_args_t *args)
+xfs_attr_shortform_create(
+ struct xfs_da_args *args)
{
- xfs_attr_sf_hdr_t *hdr;
- xfs_inode_t *dp;
- struct xfs_ifork *ifp;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_ifork *ifp = dp->i_afp;
+ struct xfs_attr_sf_hdr *hdr;
trace_xfs_attr_sf_create(args);
- dp = args->dp;
- ASSERT(dp != NULL);
- ifp = dp->i_afp;
- ASSERT(ifp != NULL);
ASSERT(ifp->if_bytes == 0);
- if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) {
+ if (ifp->if_format == XFS_DINODE_FMT_EXTENTS) {
ifp->if_flags &= ~XFS_IFEXTENTS; /* just in case */
- dp->i_d.di_aformat = XFS_DINODE_FMT_LOCAL;
+ ifp->if_format = XFS_DINODE_FMT_LOCAL;
ifp->if_flags |= XFS_IFINLINE;
} else {
ASSERT(ifp->if_flags & XFS_IFINLINE);
@@ -719,13 +715,12 @@ xfs_attr_fork_remove(
struct xfs_inode *ip,
struct xfs_trans *tp)
{
- xfs_idestroy_fork(ip, XFS_ATTR_FORK);
- ip->i_d.di_forkoff = 0;
- ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
-
- ASSERT(ip->i_d.di_anextents == 0);
- ASSERT(ip->i_afp == NULL);
+ ASSERT(ip->i_afp->if_nextents == 0);
+ xfs_idestroy_fork(ip->i_afp);
+ kmem_cache_free(xfs_ifork_zone, ip->i_afp);
+ ip->i_afp = NULL;
+ ip->i_d.di_forkoff = 0;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
@@ -775,7 +770,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
totsize -= size;
if (totsize == sizeof(xfs_attr_sf_hdr_t) &&
(mp->m_flags & XFS_MOUNT_ATTR2) &&
- (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
+ (dp->i_df.if_format != XFS_DINODE_FMT_BTREE) &&
!(args->op_flags & XFS_DA_OP_ADDNAME)) {
xfs_attr_fork_remove(dp, args->trans);
} else {
@@ -785,7 +780,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) ||
(args->op_flags & XFS_DA_OP_ADDNAME) ||
!(mp->m_flags & XFS_MOUNT_ATTR2) ||
- dp->i_d.di_format == XFS_DINODE_FMT_BTREE);
+ dp->i_df.if_format == XFS_DINODE_FMT_BTREE);
xfs_trans_log_inode(args->trans, dp,
XFS_ILOG_CORE | XFS_ILOG_ADATA);
}
@@ -962,7 +957,7 @@ xfs_attr_shortform_allfit(
+ be16_to_cpu(name_loc->valuelen);
}
if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
- (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
+ (dp->i_df.if_format != XFS_DINODE_FMT_BTREE) &&
(bytes == sizeof(struct xfs_attr_sf_hdr)))
return -1;
return xfs_attr_shortform_bytesfit(dp, bytes);
@@ -981,7 +976,7 @@ xfs_attr_shortform_verify(
int i;
int64_t size;
- ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL);
+ ASSERT(ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL);
ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
sfp = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
size = ifp->if_bytes;
@@ -1085,7 +1080,7 @@ xfs_attr3_leaf_to_shortform(
if (forkoff == -1) {
ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
- ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
+ ASSERT(dp->i_df.if_format != XFS_DINODE_FMT_BTREE);
xfs_attr_fork_remove(dp, args->trans);
goto out;
}
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index 6dd2d937a42a..5be6be309302 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000,2002-2003,2005 Silicon Graphics, Inc.
* Copyright (c) 2013 Red Hat, Inc.
diff --git a/fs/xfs/libxfs/xfs_attr_remote.h b/fs/xfs/libxfs/xfs_attr_remote.h
index 6fb4572845ce..e1144f22b005 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.h
+++ b/fs/xfs/libxfs/xfs_attr_remote.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Red Hat, Inc.
* All Rights Reserved.
diff --git a/fs/xfs/libxfs/xfs_attr_sf.h b/fs/xfs/libxfs/xfs_attr_sf.h
index aafa4fe70624..bb004fb7944a 100644
--- a/fs/xfs/libxfs/xfs_attr_sf.h
+++ b/fs/xfs/libxfs/xfs_attr_sf.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000,2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
diff --git a/fs/xfs/libxfs/xfs_bit.h b/fs/xfs/libxfs/xfs_bit.h
index 99017b8df292..a04f266ae644 100644
--- a/fs/xfs/libxfs/xfs_bit.h
+++ b/fs/xfs/libxfs/xfs_bit.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000,2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index fda13cd7add0..667cdd0dfdf4 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -61,10 +61,10 @@ xfs_bmap_compute_maxlevels(
int sz; /* root block size */
/*
- * The maximum number of extents in a file, hence the maximum
- * number of leaf entries, is controlled by the type of di_nextents
- * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
- * (a signed 16-bit number, xfs_aextnum_t).
+ * The maximum number of extents in a file, hence the maximum number of
+ * leaf entries, is controlled by the size of the on-disk extent count,
+ * either a signed 32-bit number for the data fork, or a signed 16-bit
+ * number for the attr fork.
*
* Note that we can no longer assume that if we are in ATTR1 that
* the fork offset of all the inodes will be
@@ -120,10 +120,11 @@ xfs_bmbt_lookup_first(
*/
static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
{
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+
return whichfork != XFS_COW_FORK &&
- XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_NEXTENTS(ip, whichfork) >
- XFS_IFORK_MAXEXT(ip, whichfork);
+ ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
+ ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork);
}
/*
@@ -131,10 +132,11 @@ static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
*/
static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
{
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+
return whichfork != XFS_COW_FORK &&
- XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
- XFS_IFORK_NEXTENTS(ip, whichfork) <=
- XFS_IFORK_MAXEXT(ip, whichfork);
+ ifp->if_format == XFS_DINODE_FMT_BTREE &&
+ ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork);
}
/*
@@ -213,8 +215,8 @@ xfs_bmap_forkoff_reset(
int whichfork)
{
if (whichfork == XFS_ATTR_FORK &&
- ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
- ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
+ ip->i_df.if_format != XFS_DINODE_FMT_DEV &&
+ ip->i_df.if_format != XFS_DINODE_FMT_BTREE) {
uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
if (dfl_forkoff > ip->i_d.di_forkoff)
@@ -315,31 +317,28 @@ xfs_bmap_check_leaf_extents(
xfs_inode_t *ip, /* incore inode pointer */
int whichfork) /* data or attr fork */
{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_btree_block *block; /* current btree block */
xfs_fsblock_t bno; /* block # of "block" */
xfs_buf_t *bp; /* buffer for "block" */
int error; /* error return value */
xfs_extnum_t i=0, j; /* index into the extents list */
- struct xfs_ifork *ifp; /* fork structure */
int level; /* btree level, for checking */
- xfs_mount_t *mp; /* file system mount structure */
__be64 *pp; /* pointer to block address */
xfs_bmbt_rec_t *ep; /* pointer to current extent */
xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
xfs_bmbt_rec_t *nextp; /* pointer to next extent */
int bp_release = 0;
- if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
+ if (ifp->if_format != XFS_DINODE_FMT_BTREE)
return;
- }
/* skip large extent count inodes */
- if (ip->i_d.di_nextents > 10000)
+ if (ip->i_df.if_nextents > 10000)
return;
bno = NULLFSBLOCK;
- mp = ip->i_mount;
- ifp = XFS_IFORK_PTR(ip, whichfork);
block = ifp->if_broot;
/*
* Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
@@ -604,7 +603,7 @@ xfs_bmap_btree_to_extents(
ASSERT(cur);
ASSERT(whichfork != XFS_COW_FORK);
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
- ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
+ ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
ASSERT(be16_to_cpu(rblock->bb_level) == 1);
ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
@@ -632,7 +631,7 @@ xfs_bmap_btree_to_extents(
xfs_iroot_realloc(ip, -1, whichfork);
ASSERT(ifp->if_broot == NULL);
ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
- XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+ ifp->if_format = XFS_DINODE_FMT_EXTENTS;
*logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
return 0;
}
@@ -668,7 +667,7 @@ xfs_bmap_extents_to_btree(
mp = ip->i_mount;
ASSERT(whichfork != XFS_COW_FORK);
ifp = XFS_IFORK_PTR(ip, whichfork);
- ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
+ ASSERT(ifp->if_format == XFS_DINODE_FMT_EXTENTS);
/*
* Make space in the inode incore. This needs to be undone if we fail
@@ -692,7 +691,7 @@ xfs_bmap_extents_to_btree(
/*
* Convert to a btree with two levels, one record in root.
*/
- XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
+ ifp->if_format = XFS_DINODE_FMT_BTREE;
memset(&args, 0, sizeof(args));
args.tp = tp;
args.mp = mp;
@@ -750,7 +749,7 @@ xfs_bmap_extents_to_btree(
xfs_bmbt_disk_set_all(arp, &rec);
cnt++;
}
- ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
+ ASSERT(cnt == ifp->if_nextents);
xfs_btree_set_numrecs(ablock, cnt);
/*
@@ -778,7 +777,7 @@ out_unreserve_dquot:
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
out_root_realloc:
xfs_iroot_realloc(ip, -1, whichfork);
- XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+ ifp->if_format = XFS_DINODE_FMT_EXTENTS;
ASSERT(ifp->if_broot == NULL);
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
@@ -800,16 +799,16 @@ xfs_bmap_local_to_extents_empty(
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(whichfork != XFS_COW_FORK);
- ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
+ ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
ASSERT(ifp->if_bytes == 0);
- ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
+ ASSERT(ifp->if_nextents == 0);
xfs_bmap_forkoff_reset(ip, whichfork);
ifp->if_flags &= ~XFS_IFINLINE;
ifp->if_flags |= XFS_IFEXTENTS;
ifp->if_u1.if_root = NULL;
ifp->if_height = 0;
- XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+ ifp->if_format = XFS_DINODE_FMT_EXTENTS;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
@@ -840,7 +839,7 @@ xfs_bmap_local_to_extents(
*/
ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
ifp = XFS_IFORK_PTR(ip, whichfork);
- ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
+ ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
if (!ifp->if_bytes) {
xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
@@ -907,7 +906,7 @@ xfs_bmap_local_to_extents(
xfs_iext_first(ifp, &icur);
xfs_iext_insert(ip, &icur, &rec, 0);
- XFS_IFORK_NEXT_SET(ip, whichfork, 1);
+ ifp->if_nextents = 1;
ip->i_d.di_nblocks = 1;
xfs_trans_mod_dquot_byino(tp, ip,
XFS_TRANS_DQ_BCOUNT, 1L);
@@ -972,7 +971,8 @@ xfs_bmap_add_attrfork_extents(
xfs_btree_cur_t *cur; /* bmap btree cursor */
int error; /* error return value */
- if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
+ if (ip->i_df.if_nextents * sizeof(struct xfs_bmbt_rec) <=
+ XFS_IFORK_DSIZE(ip))
return 0;
cur = NULL;
error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
@@ -1033,7 +1033,7 @@ xfs_bmap_set_attrforkoff(
int size,
int *version)
{
- switch (ip->i_d.di_format) {
+ switch (ip->i_df.if_format) {
case XFS_DINODE_FMT_DEV:
ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
break;
@@ -1091,17 +1091,6 @@ xfs_bmap_add_attrfork(
goto trans_cancel;
if (XFS_IFORK_Q(ip))
goto trans_cancel;
- if (XFS_IS_CORRUPT(mp, ip->i_d.di_anextents != 0)) {
- error = -EFSCORRUPTED;
- goto trans_cancel;
- }
- if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
- /*
- * For inodes coming from pre-6.2 filesystems.
- */
- ASSERT(ip->i_d.di_aformat == 0);
- ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
- }
xfs_trans_ijoin(tp, ip, 0);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
@@ -1110,9 +1099,10 @@ xfs_bmap_add_attrfork(
goto trans_cancel;
ASSERT(ip->i_afp == NULL);
ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, 0);
+ ip->i_afp->if_format = XFS_DINODE_FMT_EXTENTS;
ip->i_afp->if_flags = XFS_IFEXTENTS;
logflags = 0;
- switch (ip->i_d.di_format) {
+ switch (ip->i_df.if_format) {
case XFS_DINODE_FMT_LOCAL:
error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
break;
@@ -1183,13 +1173,13 @@ xfs_iread_bmbt_block(
xfs_extnum_t num_recs;
xfs_extnum_t j;
int whichfork = cur->bc_ino.whichfork;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
block = xfs_btree_get_block(cur, level, &bp);
/* Abort if we find more records than nextents. */
num_recs = xfs_btree_get_numrecs(block);
- if (unlikely(ir->loaded + num_recs >
- XFS_IFORK_NEXTENTS(ip, whichfork))) {
+ if (unlikely(ir->loaded + num_recs > ifp->if_nextents)) {
xfs_warn(ip->i_mount, "corrupt dinode %llu, (btree extents).",
(unsigned long long)ip->i_ino);
xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, block,
@@ -1215,7 +1205,7 @@ xfs_iread_bmbt_block(
xfs_bmap_fork_to_state(whichfork));
trace_xfs_read_extent(ip, &ir->icur,
xfs_bmap_fork_to_state(whichfork), _THIS_IP_);
- xfs_iext_next(XFS_IFORK_PTR(ip, whichfork), &ir->icur);
+ xfs_iext_next(ifp, &ir->icur);
}
return 0;
@@ -1238,9 +1228,7 @@ xfs_iread_extents(
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- if (XFS_IS_CORRUPT(mp,
- XFS_IFORK_FORMAT(ip, whichfork) !=
- XFS_DINODE_FMT_BTREE)) {
+ if (XFS_IS_CORRUPT(mp, ifp->if_format != XFS_DINODE_FMT_BTREE)) {
error = -EFSCORRUPTED;
goto out;
}
@@ -1254,8 +1242,7 @@ xfs_iread_extents(
if (error)
goto out;
- if (XFS_IS_CORRUPT(mp,
- ir.loaded != XFS_IFORK_NEXTENTS(ip, whichfork))) {
+ if (XFS_IS_CORRUPT(mp, ir.loaded != ifp->if_nextents)) {
error = -EFSCORRUPTED;
goto out;
}
@@ -1289,14 +1276,13 @@ xfs_bmap_first_unused(
xfs_fileoff_t lowest, max;
int error;
- ASSERT(xfs_ifork_has_extents(ip, whichfork) ||
- XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
-
- if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
+ if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
*first_unused = 0;
return 0;
}
+ ASSERT(xfs_ifork_has_extents(ifp));
+
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(tp, ip, whichfork);
if (error)
@@ -1337,7 +1323,7 @@ xfs_bmap_last_before(
struct xfs_iext_cursor icur;
int error;
- switch (XFS_IFORK_FORMAT(ip, whichfork)) {
+ switch (ifp->if_format) {
case XFS_DINODE_FMT_LOCAL:
*last_block = 0;
return 0;
@@ -1436,16 +1422,17 @@ xfs_bmap_last_offset(
xfs_fileoff_t *last_block,
int whichfork)
{
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_bmbt_irec rec;
int is_empty;
int error;
*last_block = 0;
- if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
+ if (ifp->if_format == XFS_DINODE_FMT_LOCAL)
return 0;
- if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ip, whichfork)))
+ if (XFS_IS_CORRUPT(ip->i_mount, !xfs_ifork_has_extents(ifp)))
return -EFSCORRUPTED;
error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
@@ -1463,23 +1450,22 @@ xfs_bmap_last_offset(
*/
int /* 1=>1 block, 0=>otherwise */
xfs_bmap_one_block(
- xfs_inode_t *ip, /* incore inode */
- int whichfork) /* data or attr fork */
+ struct xfs_inode *ip, /* incore inode */
+ int whichfork) /* data or attr fork */
{
- struct xfs_ifork *ifp; /* inode fork pointer */
- int rval; /* return value */
- xfs_bmbt_irec_t s; /* internal version of extent */
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
+ int rval; /* return value */
+ struct xfs_bmbt_irec s; /* internal version of extent */
struct xfs_iext_cursor icur;
#ifndef DEBUG
if (whichfork == XFS_DATA_FORK)
return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
#endif /* !DEBUG */
- if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
+ if (ifp->if_nextents != 1)
return 0;
- if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+ if (ifp->if_format != XFS_DINODE_FMT_EXTENTS)
return 0;
- ifp = XFS_IFORK_PTR(ip, whichfork);
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
xfs_iext_first(ifp, &icur);
xfs_iext_get_extent(ifp, &icur, &s);
@@ -1501,10 +1487,11 @@ xfs_bmap_add_extent_delay_real(
struct xfs_bmalloca *bma,
int whichfork)
{
+ struct xfs_mount *mp = bma->ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
struct xfs_bmbt_irec *new = &bma->got;
int error; /* error return value */
int i; /* temp state */
- struct xfs_ifork *ifp; /* inode fork pointer */
xfs_fileoff_t new_endoff; /* end offset of new entry */
xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
/* left is 0, right is 1, prev is 2 */
@@ -1514,16 +1501,9 @@ xfs_bmap_add_extent_delay_real(
xfs_filblks_t da_old; /* old count del alloc blocks used */
xfs_filblks_t temp=0; /* value for da_new calculations */
int tmp_rval; /* partial logging flags */
- struct xfs_mount *mp;
- xfs_extnum_t *nextents;
struct xfs_bmbt_irec old;
- mp = bma->ip->i_mount;
- ifp = XFS_IFORK_PTR(bma->ip, whichfork);
ASSERT(whichfork != XFS_ATTR_FORK);
- nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
- &bma->ip->i_d.di_nextents);
-
ASSERT(!isnullstartblock(new->br_startblock));
ASSERT(!bma->cur ||
(bma->cur->bc_ino.flags & XFS_BTCUR_BMBT_WASDEL));
@@ -1614,7 +1594,7 @@ xfs_bmap_add_extent_delay_real(
xfs_iext_remove(bma->ip, &bma->icur, state);
xfs_iext_prev(ifp, &bma->icur);
xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
- (*nextents)--;
+ ifp->if_nextents--;
if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1718,8 +1698,8 @@ xfs_bmap_add_extent_delay_real(
PREV.br_startblock = new->br_startblock;
PREV.br_state = new->br_state;
xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
+ ifp->if_nextents++;
- (*nextents)++;
if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -1784,7 +1764,8 @@ xfs_bmap_add_extent_delay_real(
* The left neighbor is not contiguous.
*/
xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
- (*nextents)++;
+ ifp->if_nextents++;
+
if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -1870,7 +1851,8 @@ xfs_bmap_add_extent_delay_real(
* The right neighbor is not contiguous.
*/
xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
- (*nextents)++;
+ ifp->if_nextents++;
+
if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -1955,7 +1937,7 @@ xfs_bmap_add_extent_delay_real(
xfs_iext_next(ifp, &bma->icur);
xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
- (*nextents)++;
+ ifp->if_nextents++;
if (bma->cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -2159,8 +2141,7 @@ xfs_bmap_add_extent_unwritten_real(
xfs_iext_remove(ip, icur, state);
xfs_iext_prev(ifp, icur);
xfs_iext_update_extent(ip, state, icur, &LEFT);
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
+ ifp->if_nextents -= 2;
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -2212,8 +2193,7 @@ xfs_bmap_add_extent_unwritten_real(
xfs_iext_remove(ip, icur, state);
xfs_iext_prev(ifp, icur);
xfs_iext_update_extent(ip, state, icur, &LEFT);
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
+ ifp->if_nextents--;
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -2255,9 +2235,8 @@ xfs_bmap_add_extent_unwritten_real(
xfs_iext_remove(ip, icur, state);
xfs_iext_prev(ifp, icur);
xfs_iext_update_extent(ip, state, icur, &PREV);
+ ifp->if_nextents--;
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -2364,8 +2343,8 @@ xfs_bmap_add_extent_unwritten_real(
xfs_iext_update_extent(ip, state, icur, &PREV);
xfs_iext_insert(ip, icur, new, state);
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
+ ifp->if_nextents++;
+
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -2440,9 +2419,8 @@ xfs_bmap_add_extent_unwritten_real(
xfs_iext_update_extent(ip, state, icur, &PREV);
xfs_iext_next(ifp, icur);
xfs_iext_insert(ip, icur, new, state);
+ ifp->if_nextents++;
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -2493,9 +2471,8 @@ xfs_bmap_add_extent_unwritten_real(
xfs_iext_next(ifp, icur);
xfs_iext_insert(ip, icur, &r[1], state);
xfs_iext_insert(ip, icur, &r[0], state);
+ ifp->if_nextents += 2;
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
if (cur == NULL)
rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
else {
@@ -2810,9 +2787,8 @@ xfs_bmap_add_extent_hole_real(
xfs_iext_remove(ip, icur, state);
xfs_iext_prev(ifp, icur);
xfs_iext_update_extent(ip, state, icur, &left);
+ ifp->if_nextents--;
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
if (cur == NULL) {
rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
} else {
@@ -2910,8 +2886,8 @@ xfs_bmap_add_extent_hole_real(
* Insert a new entry.
*/
xfs_iext_insert(ip, icur, new, state);
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
+ ifp->if_nextents++;
+
if (cur == NULL) {
rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
} else {
@@ -3891,7 +3867,8 @@ xfs_bmapi_read(
int flags)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp;
+ int whichfork = xfs_bmapi_whichfork(flags);
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_bmbt_irec got;
xfs_fileoff_t obno;
xfs_fileoff_t end;
@@ -3899,48 +3876,23 @@ xfs_bmapi_read(
int error;
bool eof = false;
int n = 0;
- int whichfork = xfs_bmapi_whichfork(flags);
ASSERT(*nmap >= 1);
- ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
- XFS_BMAPI_COWFORK)));
+ ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_ENTIRE)));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
- if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
- XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
+ if (WARN_ON_ONCE(!ifp))
+ return -EFSCORRUPTED;
+
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
+ XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT))
return -EFSCORRUPTED;
- }
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
XFS_STATS_INC(mp, xs_blk_mapr);
- ifp = XFS_IFORK_PTR(ip, whichfork);
- if (!ifp) {
- /* No CoW fork? Return a hole. */
- if (whichfork == XFS_COW_FORK) {
- mval->br_startoff = bno;
- mval->br_startblock = HOLESTARTBLOCK;
- mval->br_blockcount = len;
- mval->br_state = XFS_EXT_NORM;
- *nmap = 1;
- return 0;
- }
-
- /*
- * A missing attr ifork implies that the inode says we're in
- * extents or btree format but failed to pass the inode fork
- * verifier while trying to load it. Treat that as a file
- * corruption too.
- */
-#ifdef DEBUG
- xfs_alert(mp, "%s: inode %llu missing fork %d",
- __func__, ip->i_ino, whichfork);
-#endif /* DEBUG */
- return -EFSCORRUPTED;
- }
-
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(NULL, ip, whichfork);
if (error)
@@ -4193,17 +4145,7 @@ xfs_bmapi_allocate(
bma->got.br_blockcount = bma->length;
bma->got.br_state = XFS_EXT_NORM;
- /*
- * In the data fork, a wasdelay extent has been initialized, so
- * shouldn't be flagged as unwritten.
- *
- * For the cow fork, however, we convert delalloc reservations
- * (extents allocated for speculative preallocation) to
- * allocated unwritten extents, and only convert the unwritten
- * extents to real extents when we're about to write the data.
- */
- if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
- (bma->flags & XFS_BMAPI_PREALLOC))
+ if (bma->flags & XFS_BMAPI_PREALLOC)
bma->got.br_state = XFS_EXT_UNWRITTEN;
if (bma->wasdel)
@@ -4317,11 +4259,13 @@ xfs_bmapi_minleft(
struct xfs_inode *ip,
int fork)
{
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, fork);
+
if (tp && tp->t_firstblock != NULLFSBLOCK)
return 0;
- if (XFS_IFORK_FORMAT(ip, fork) != XFS_DINODE_FMT_BTREE)
+ if (ifp->if_format != XFS_DINODE_FMT_BTREE)
return 1;
- return be16_to_cpu(XFS_IFORK_PTR(ip, fork)->if_broot->bb_level) + 1;
+ return be16_to_cpu(ifp->if_broot->bb_level) + 1;
}
/*
@@ -4336,11 +4280,13 @@ xfs_bmapi_finish(
int whichfork,
int error)
{
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
+
if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
- XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+ ifp->if_format != XFS_DINODE_FMT_EXTENTS)
bma->logflags &= ~xfs_ilog_fext(whichfork);
else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
- XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_BTREE)
+ ifp->if_format != XFS_DINODE_FMT_BTREE)
bma->logflags &= ~xfs_ilog_fbroot(whichfork);
if (bma->logflags)
@@ -4372,13 +4318,13 @@ xfs_bmapi_write(
.total = total,
};
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp;
+ int whichfork = xfs_bmapi_whichfork(flags);
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
xfs_fileoff_t end; /* end of mapped file region */
bool eof = false; /* after the end of extents */
int error; /* error return */
int n; /* current extent index */
xfs_fileoff_t obno; /* old block number (offset) */
- int whichfork; /* data or attr fork */
#ifdef DEBUG
xfs_fileoff_t orig_bno; /* original block number value */
@@ -4393,13 +4339,12 @@ xfs_bmapi_write(
orig_mval = mval;
orig_nmap = *nmap;
#endif
- whichfork = xfs_bmapi_whichfork(flags);
ASSERT(*nmap >= 1);
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
ASSERT(tp != NULL);
ASSERT(len > 0);
- ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
+ ASSERT(ifp->if_format != XFS_DINODE_FMT_LOCAL);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(!(flags & XFS_BMAPI_REMAP));
@@ -4415,7 +4360,7 @@ xfs_bmapi_write(
ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
(XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
- if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -4423,8 +4368,6 @@ xfs_bmapi_write(
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- ifp = XFS_IFORK_PTR(ip, whichfork);
-
XFS_STATS_INC(mp, xs_blk_mapw);
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
@@ -4534,9 +4477,8 @@ xfs_bmapi_write(
if (error)
goto error0;
- ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
- XFS_IFORK_NEXTENTS(ip, whichfork) >
- XFS_IFORK_MAXEXT(ip, whichfork));
+ ASSERT(ifp->if_format != XFS_DINODE_FMT_BTREE ||
+ ifp->if_nextents > XFS_IFORK_MAXEXT(ip, whichfork));
xfs_bmapi_finish(&bma, whichfork, 0);
xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
orig_nmap, *nmap);
@@ -4611,8 +4553,23 @@ xfs_bmapi_convert_delalloc(
bma.offset = bma.got.br_startoff;
bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN);
bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
+
+ /*
+ * When we're converting the delalloc reservations backing dirty pages
+ * in the page cache, we must be careful about how we create the new
+ * extents:
+ *
+ * New CoW fork extents are created unwritten, turned into real extents
+ * when we're about to write the data to disk, and mapped into the data
+ * fork after the write finishes. End of story.
+ *
+ * New data fork extents must be mapped in as unwritten and converted
+ * to real extents after the write succeeds to avoid exposing stale
+ * disk contents if we crash.
+ */
+ bma.flags = XFS_BMAPI_PREALLOC;
if (whichfork == XFS_COW_FORK)
- bma.flags = XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
+ bma.flags |= XFS_BMAPI_COWFORK;
if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
bma.prev.br_startoff = NULLFILEOFF;
@@ -4682,7 +4639,7 @@ xfs_bmapi_remap(
ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
- if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -4726,9 +4683,9 @@ xfs_bmapi_remap(
error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
error0:
- if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS)
+ if (ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS)
logflags &= ~XFS_ILOG_DEXT;
- else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
+ else if (ip->i_df.if_format != XFS_DINODE_FMT_BTREE)
logflags &= ~XFS_ILOG_DBROOT;
if (logflags)
@@ -5078,9 +5035,8 @@ xfs_bmap_del_extent_real(
* conversion to btree format, since the transaction will be dirty then.
*/
if (tp->t_blk_res == 0 &&
- XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_NEXTENTS(ip, whichfork) >=
- XFS_IFORK_MAXEXT(ip, whichfork) &&
+ ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
+ ifp->if_nextents >= XFS_IFORK_MAXEXT(ip, whichfork) &&
del->br_startoff > got.br_startoff && del_endoff < got_endoff)
return -ENOSPC;
@@ -5132,8 +5088,8 @@ xfs_bmap_del_extent_real(
*/
xfs_iext_remove(ip, icur, state);
xfs_iext_prev(ifp, icur);
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
+ ifp->if_nextents--;
+
flags |= XFS_ILOG_CORE;
if (!cur) {
flags |= xfs_ilog_fext(whichfork);
@@ -5241,8 +5197,8 @@ xfs_bmap_del_extent_real(
}
} else
flags |= xfs_ilog_fext(whichfork);
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
+
+ ifp->if_nextents++;
xfs_iext_next(ifp, icur);
xfs_iext_insert(ip, icur, &new, state);
break;
@@ -5322,7 +5278,7 @@ __xfs_bunmapi(
whichfork = xfs_bmapi_whichfork(flags);
ASSERT(whichfork != XFS_COW_FORK);
ifp = XFS_IFORK_PTR(ip, whichfork);
- if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)))
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)))
return -EFSCORRUPTED;
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
@@ -5360,7 +5316,7 @@ __xfs_bunmapi(
logflags = 0;
if (ifp->if_flags & XFS_IFBROOT) {
- ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
+ ASSERT(ifp->if_format == XFS_DINODE_FMT_BTREE);
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_ino.flags = 0;
} else
@@ -5605,10 +5561,10 @@ error0:
* logging the extent records if we've converted to btree format.
*/
if ((logflags & xfs_ilog_fext(whichfork)) &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
+ ifp->if_format != XFS_DINODE_FMT_EXTENTS)
logflags &= ~xfs_ilog_fext(whichfork);
else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
- XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
+ ifp->if_format != XFS_DINODE_FMT_BTREE)
logflags &= ~xfs_ilog_fbroot(whichfork);
/*
* Log inode even in the error case, if the transaction
@@ -5690,6 +5646,7 @@ xfs_bmse_merge(
struct xfs_btree_cur *cur,
int *logflags) /* output */
{
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_bmbt_irec new;
xfs_filblks_t blockcount;
int error, i;
@@ -5708,8 +5665,7 @@ xfs_bmse_merge(
* Update the on-disk extent count, the btree if necessary and log the
* inode.
*/
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
+ ifp->if_nextents--;
*logflags |= XFS_ILOG_CORE;
if (!cur) {
*logflags |= XFS_ILOG_DEXT;
@@ -5747,7 +5703,7 @@ xfs_bmse_merge(
done:
xfs_iext_remove(ip, icur, 0);
- xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur);
+ xfs_iext_prev(ifp, icur);
xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
&new);
@@ -5819,7 +5775,7 @@ xfs_bmap_collapse_extents(
int error = 0;
int logflags = 0;
- if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -5936,7 +5892,7 @@ xfs_bmap_insert_extents(
int error = 0;
int logflags = 0;
- if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -6030,18 +5986,18 @@ xfs_bmap_split_extent(
xfs_fileoff_t split_fsb)
{
int whichfork = XFS_DATA_FORK;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
struct xfs_btree_cur *cur = NULL;
struct xfs_bmbt_irec got;
struct xfs_bmbt_irec new; /* split extent */
struct xfs_mount *mp = ip->i_mount;
- struct xfs_ifork *ifp;
xfs_fsblock_t gotblkcnt; /* new block count for got */
struct xfs_iext_cursor icur;
int error = 0;
int logflags = 0;
int i = 0;
- if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, whichfork)) ||
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ifp)) ||
XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
return -EFSCORRUPTED;
}
@@ -6049,7 +6005,6 @@ xfs_bmap_split_extent(
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
- ifp = XFS_IFORK_PTR(ip, whichfork);
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
/* Read in all the extents */
error = xfs_iread_extents(tp, ip, whichfork);
@@ -6097,8 +6052,7 @@ xfs_bmap_split_extent(
/* Add new extent */
xfs_iext_next(ifp, &icur);
xfs_iext_insert(ip, &icur, &new, 0);
- XFS_IFORK_NEXT_SET(ip, whichfork,
- XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
+ ifp->if_nextents++;
if (cur) {
error = xfs_bmbt_lookup_eq(cur, &new, &i);
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index f3259ad5c22c..6028a3c825ba 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index 295a59cf8840..d9c63f17d2de 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -636,10 +636,7 @@ xfs_bmbt_change_owner(
ASSERT(tp || buffer_list);
ASSERT(!(tp && buffer_list));
- if (whichfork == XFS_DATA_FORK)
- ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_BTREE);
- else
- ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE);
+ ASSERT(XFS_IFORK_PTR(ip, whichfork)->if_format == XFS_DINODE_FMT_BTREE);
cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
if (!cur)
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.h b/fs/xfs/libxfs/xfs_bmap_btree.h
index 29b407d053b4..72bf74c79fb9 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.h
+++ b/fs/xfs/libxfs/xfs_bmap_btree.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000,2002-2005 Silicon Graphics, Inc.
* All Rights Reserved.
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 8626c5a81aad..10e50cbacacf 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
* All Rights Reserved.
diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h
index 53e503b6f186..6e25de6621e4 100644
--- a/fs/xfs/libxfs/xfs_da_btree.h
+++ b/fs/xfs/libxfs/xfs_da_btree.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000,2002,2005 Silicon Graphics, Inc.
* Copyright (c) 2013 Red Hat, Inc.
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
index 08c0a4d98b89..059ac108b1b3 100644
--- a/fs/xfs/libxfs/xfs_da_format.h
+++ b/fs/xfs/libxfs/xfs_da_format.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
* Copyright (c) 2013 Red Hat, Inc.
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 22557527cfdb..d8f586256add 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -178,6 +178,18 @@ static const struct xfs_defer_op_type *defer_op_types[] = {
[XFS_DEFER_OPS_TYPE_AGFL_FREE] = &xfs_agfl_free_defer_type,
};
+static void
+xfs_defer_create_intent(
+ struct xfs_trans *tp,
+ struct xfs_defer_pending *dfp,
+ bool sort)
+{
+ const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
+
+ dfp->dfp_intent = ops->create_intent(tp, &dfp->dfp_work,
+ dfp->dfp_count, sort);
+}
+
/*
* For each pending item in the intake list, log its intent item and the
* associated extents, then add the entire intake list to the end of
@@ -187,17 +199,11 @@ STATIC void
xfs_defer_create_intents(
struct xfs_trans *tp)
{
- struct list_head *li;
struct xfs_defer_pending *dfp;
- const struct xfs_defer_op_type *ops;
list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
- ops = defer_op_types[dfp->dfp_type];
- dfp->dfp_intent = ops->create_intent(tp, dfp->dfp_count);
trace_xfs_defer_create_intent(tp->t_mountp, dfp);
- list_sort(tp->t_mountp, &dfp->dfp_work, ops->diff_items);
- list_for_each(li, &dfp->dfp_work)
- ops->log_item(tp, dfp->dfp_intent, li);
+ xfs_defer_create_intent(tp, dfp, true);
}
}
@@ -234,10 +240,13 @@ xfs_defer_trans_roll(
struct xfs_log_item *lip;
struct xfs_buf *bplist[XFS_DEFER_OPS_NR_BUFS];
struct xfs_inode *iplist[XFS_DEFER_OPS_NR_INODES];
+ unsigned int ordered = 0; /* bitmap */
int bpcount = 0, ipcount = 0;
int i;
int error;
+ BUILD_BUG_ON(NBBY * sizeof(ordered) < XFS_DEFER_OPS_NR_BUFS);
+
list_for_each_entry(lip, &tp->t_items, li_trans) {
switch (lip->li_type) {
case XFS_LI_BUF:
@@ -248,7 +257,10 @@ xfs_defer_trans_roll(
ASSERT(0);
return -EFSCORRUPTED;
}
- xfs_trans_dirty_buf(tp, bli->bli_buf);
+ if (bli->bli_flags & XFS_BLI_ORDERED)
+ ordered |= (1U << bpcount);
+ else
+ xfs_trans_dirty_buf(tp, bli->bli_buf);
bplist[bpcount++] = bli->bli_buf;
}
break;
@@ -289,6 +301,8 @@ xfs_defer_trans_roll(
/* Rejoin the buffers and dirty them so the log moves forward. */
for (i = 0; i < bpcount; i++) {
xfs_trans_bjoin(tp, bplist[i]);
+ if (ordered & (1U << i))
+ xfs_trans_ordered_buf(tp, bplist[i]);
xfs_trans_bhold(tp, bplist[i]);
}
@@ -346,6 +360,53 @@ xfs_defer_cancel_list(
}
/*
+ * Log an intent-done item for the first pending intent, and finish the work
+ * items.
+ */
+static int
+xfs_defer_finish_one(
+ struct xfs_trans *tp,
+ struct xfs_defer_pending *dfp)
+{
+ const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
+ struct xfs_btree_cur *state = NULL;
+ struct list_head *li, *n;
+ int error;
+
+ trace_xfs_defer_pending_finish(tp->t_mountp, dfp);
+
+ dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count);
+ list_for_each_safe(li, n, &dfp->dfp_work) {
+ list_del(li);
+ dfp->dfp_count--;
+ error = ops->finish_item(tp, dfp->dfp_done, li, &state);
+ if (error == -EAGAIN) {
+ /*
+ * Caller wants a fresh transaction; put the work item
+ * back on the list and log a new log intent item to
+ * replace the old one. See "Requesting a Fresh
+ * Transaction while Finishing Deferred Work" above.
+ */
+ list_add(li, &dfp->dfp_work);
+ dfp->dfp_count++;
+ dfp->dfp_done = NULL;
+ xfs_defer_create_intent(tp, dfp, false);
+ }
+
+ if (error)
+ goto out;
+ }
+
+ /* Done with the dfp, free it. */
+ list_del(&dfp->dfp_list);
+ kmem_free(dfp);
+out:
+ if (ops->finish_cleanup)
+ ops->finish_cleanup(tp, state, error);
+ return error;
+}
+
+/*
* Finish all the pending work. This involves logging intent items for
* any work items that wandered in since the last transaction roll (if
* one has even happened), rolling the transaction, and finishing the
@@ -358,11 +419,7 @@ xfs_defer_finish_noroll(
struct xfs_trans **tp)
{
struct xfs_defer_pending *dfp;
- struct list_head *li;
- struct list_head *n;
- void *state;
int error = 0;
- const struct xfs_defer_op_type *ops;
LIST_HEAD(dop_pending);
ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
@@ -371,87 +428,30 @@ xfs_defer_finish_noroll(
/* Until we run out of pending work to finish... */
while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
- /* log intents and pull in intake items */
xfs_defer_create_intents(*tp);
list_splice_tail_init(&(*tp)->t_dfops, &dop_pending);
- /*
- * Roll the transaction.
- */
error = xfs_defer_trans_roll(tp);
if (error)
- goto out;
+ goto out_shutdown;
- /* Log an intent-done item for the first pending item. */
dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
dfp_list);
- ops = defer_op_types[dfp->dfp_type];
- trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
- dfp->dfp_done = ops->create_done(*tp, dfp->dfp_intent,
- dfp->dfp_count);
-
- /* Finish the work items. */
- state = NULL;
- list_for_each_safe(li, n, &dfp->dfp_work) {
- list_del(li);
- dfp->dfp_count--;
- error = ops->finish_item(*tp, li, dfp->dfp_done,
- &state);
- if (error == -EAGAIN) {
- /*
- * Caller wants a fresh transaction;
- * put the work item back on the list
- * and jump out.
- */
- list_add(li, &dfp->dfp_work);
- dfp->dfp_count++;
- break;
- } else if (error) {
- /*
- * Clean up after ourselves and jump out.
- * xfs_defer_cancel will take care of freeing
- * all these lists and stuff.
- */
- if (ops->finish_cleanup)
- ops->finish_cleanup(*tp, state, error);
- goto out;
- }
- }
- if (error == -EAGAIN) {
- /*
- * Caller wants a fresh transaction, so log a
- * new log intent item to replace the old one
- * and roll the transaction. See "Requesting
- * a Fresh Transaction while Finishing
- * Deferred Work" above.
- */
- dfp->dfp_intent = ops->create_intent(*tp,
- dfp->dfp_count);
- dfp->dfp_done = NULL;
- list_for_each(li, &dfp->dfp_work)
- ops->log_item(*tp, dfp->dfp_intent, li);
- } else {
- /* Done with the dfp, free it. */
- list_del(&dfp->dfp_list);
- kmem_free(dfp);
- }
-
- if (ops->finish_cleanup)
- ops->finish_cleanup(*tp, state, error);
- }
-
-out:
- if (error) {
- xfs_defer_trans_abort(*tp, &dop_pending);
- xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
- trace_xfs_defer_finish_error(*tp, error);
- xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
- xfs_defer_cancel(*tp);
- return error;
+ error = xfs_defer_finish_one(*tp, dfp);
+ if (error && error != -EAGAIN)
+ goto out_shutdown;
}
trace_xfs_defer_finish_done(*tp, _RET_IP_);
return 0;
+
+out_shutdown:
+ xfs_defer_trans_abort(*tp, &dop_pending);
+ xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
+ trace_xfs_defer_finish_error(*tp, error);
+ xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
+ xfs_defer_cancel(*tp);
+ return error;
}
int
diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
index 7c28d7608ac6..6b2ca580f2b0 100644
--- a/fs/xfs/libxfs/xfs_defer.h
+++ b/fs/xfs/libxfs/xfs_defer.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2016 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <darrick.wong@oracle.com>
@@ -6,6 +6,7 @@
#ifndef __XFS_DEFER_H__
#define __XFS_DEFER_H__
+struct xfs_btree_cur;
struct xfs_defer_op_type;
/*
@@ -28,8 +29,8 @@ enum xfs_defer_ops_type {
struct xfs_defer_pending {
struct list_head dfp_list; /* pending items */
struct list_head dfp_work; /* work items */
- void *dfp_intent; /* log intent item */
- void *dfp_done; /* log done item */
+ struct xfs_log_item *dfp_intent; /* log intent item */
+ struct xfs_log_item *dfp_done; /* log done item */
unsigned int dfp_count; /* # extent items */
enum xfs_defer_ops_type dfp_type;
};
@@ -43,15 +44,16 @@ void xfs_defer_move(struct xfs_trans *dtp, struct xfs_trans *stp);
/* Description of a deferred type. */
struct xfs_defer_op_type {
- void (*abort_intent)(void *);
- void *(*create_done)(struct xfs_trans *, void *, unsigned int);
- int (*finish_item)(struct xfs_trans *, struct list_head *, void *,
- void **);
- void (*finish_cleanup)(struct xfs_trans *, void *, int);
- void (*cancel_item)(struct list_head *);
- int (*diff_items)(void *, struct list_head *, struct list_head *);
- void *(*create_intent)(struct xfs_trans *, uint);
- void (*log_item)(struct xfs_trans *, void *, struct list_head *);
+ struct xfs_log_item *(*create_intent)(struct xfs_trans *tp,
+ struct list_head *items, unsigned int count, bool sort);
+ void (*abort_intent)(struct xfs_log_item *intent);
+ struct xfs_log_item *(*create_done)(struct xfs_trans *tp,
+ struct xfs_log_item *intent, unsigned int count);
+ int (*finish_item)(struct xfs_trans *tp, struct xfs_log_item *done,
+ struct list_head *item, struct xfs_btree_cur **state);
+ void (*finish_cleanup)(struct xfs_trans *tp,
+ struct xfs_btree_cur *state, int error);
+ void (*cancel_item)(struct list_head *item);
unsigned int max_items;
};
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index dd6fcaaea318..612a9c5e41b1 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -278,7 +278,7 @@ xfs_dir_createname(
if (!inum)
args->op_flags |= XFS_DA_OP_JUSTCHECK;
- if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+ if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
rval = xfs_dir2_sf_addname(args);
goto out_free;
}
@@ -373,7 +373,7 @@ xfs_dir_lookup(
args->op_flags |= XFS_DA_OP_CILOOKUP;
lock_mode = xfs_ilock_data_map_shared(dp);
- if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+ if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
rval = xfs_dir2_sf_lookup(args);
goto out_check_rval;
}
@@ -443,7 +443,7 @@ xfs_dir_removename(
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
- if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+ if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
rval = xfs_dir2_sf_removename(args);
goto out_free;
}
@@ -504,7 +504,7 @@ xfs_dir_replace(
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
- if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
+ if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
rval = xfs_dir2_sf_replace(args);
goto out_free;
}
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index 033777e282f2..e55378640b05 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
* All Rights Reserved.
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index 1dbf2f980a26..5b59d3f7746b 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -1104,7 +1104,7 @@ xfs_dir2_sf_to_block(
ASSERT(ifp->if_bytes == dp->i_d.di_size);
ASSERT(ifp->if_u1.if_data != NULL);
ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(oldsfp->i8count));
- ASSERT(dp->i_d.di_nextents == 0);
+ ASSERT(dp->i_df.if_nextents == 0);
/*
* Copy the directory into a temporary buffer.
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index 01ee0b926572..44c6a77cba05 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
* All Rights Reserved.
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index 7b7f6fb2ea3b..2463b5d73447 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -343,7 +343,7 @@ xfs_dir2_block_to_sf(
*/
ASSERT(dp->i_df.if_bytes == 0);
xfs_init_local_fork(dp, XFS_DATA_FORK, sfp, size);
- dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
+ dp->i_df.if_format = XFS_DINODE_FMT_LOCAL;
dp->i_d.di_size = size;
logflags |= XFS_ILOG_DDATA;
@@ -710,11 +710,11 @@ xfs_dir2_sf_verify(
struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
struct xfs_dir2_sf_hdr *sfp;
struct xfs_dir2_sf_entry *sfep;
struct xfs_dir2_sf_entry *next_sfep;
char *endp;
- struct xfs_ifork *ifp;
xfs_ino_t ino;
int i;
int i8count;
@@ -723,9 +723,8 @@ xfs_dir2_sf_verify(
int error;
uint8_t filetype;
- ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
+ ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
- ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
sfp = (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data;
size = ifp->if_bytes;
@@ -827,9 +826,9 @@ xfs_dir2_sf_create(
* If it's currently a zero-length extent file,
* convert it to local format.
*/
- if (dp->i_d.di_format == XFS_DINODE_FMT_EXTENTS) {
+ if (dp->i_df.if_format == XFS_DINODE_FMT_EXTENTS) {
dp->i_df.if_flags &= ~XFS_IFEXTENTS; /* just in case */
- dp->i_d.di_format = XFS_DINODE_FMT_LOCAL;
+ dp->i_df.if_format = XFS_DINODE_FMT_LOCAL;
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
dp->i_df.if_flags |= XFS_IFINLINE;
}
@@ -1027,7 +1026,7 @@ xfs_dir2_sf_replace_needblock(
int newsize;
struct xfs_dir2_sf_hdr *sfp;
- if (dp->i_d.di_format != XFS_DINODE_FMT_LOCAL)
+ if (dp->i_df.if_format != XFS_DINODE_FMT_LOCAL)
return false;
sfp = (struct xfs_dir2_sf_hdr *)dp->i_df.if_u1.if_data;
diff --git a/fs/xfs/libxfs/xfs_errortag.h b/fs/xfs/libxfs/xfs_errortag.h
index 79e6c4fb1d8a..53b305dea381 100644
--- a/fs/xfs/libxfs/xfs_errortag.h
+++ b/fs/xfs/libxfs/xfs_errortag.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
* Copyright (C) 2017 Oracle.
@@ -55,7 +55,8 @@
#define XFS_ERRTAG_FORCE_SCRUB_REPAIR 32
#define XFS_ERRTAG_FORCE_SUMMARY_RECALC 33
#define XFS_ERRTAG_IUNLINK_FALLBACK 34
-#define XFS_ERRTAG_MAX 35
+#define XFS_ERRTAG_BUF_IOERROR 35
+#define XFS_ERRTAG_MAX 36
/*
* Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
@@ -95,5 +96,6 @@
#define XFS_RANDOM_FORCE_SCRUB_REPAIR 1
#define XFS_RANDOM_FORCE_SUMMARY_RECALC 1
#define XFS_RANDOM_IUNLINK_FALLBACK (XFS_RANDOM_DEFAULT/10)
+#define XFS_RANDOM_BUF_IOERROR XFS_RANDOM_DEFAULT
#endif /* __XFS_ERRORTAG_H_ */
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 045556e78ee2..b42a52bfa1e9 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
@@ -964,13 +964,12 @@ enum xfs_dinode_fmt {
/*
* Inode data & attribute fork sizes, per inode.
*/
-#define XFS_DFORK_Q(dip) ((dip)->di_forkoff != 0)
#define XFS_DFORK_BOFF(dip) ((int)((dip)->di_forkoff << 3))
#define XFS_DFORK_DSIZE(dip,mp) \
- (XFS_DFORK_Q(dip) ? XFS_DFORK_BOFF(dip) : XFS_LITINO(mp))
+ ((dip)->di_forkoff ? XFS_DFORK_BOFF(dip) : XFS_LITINO(mp))
#define XFS_DFORK_ASIZE(dip,mp) \
- (XFS_DFORK_Q(dip) ? XFS_LITINO(mp) - XFS_DFORK_BOFF(dip) : 0)
+ ((dip)->di_forkoff ? XFS_LITINO(mp) - XFS_DFORK_BOFF(dip) : 0)
#define XFS_DFORK_SIZE(dip,mp,w) \
((w) == XFS_DATA_FORK ? \
XFS_DFORK_DSIZE(dip, mp) : \
@@ -1681,7 +1680,7 @@ struct xfs_acl_entry {
struct xfs_acl {
__be32 acl_cnt;
- struct xfs_acl_entry acl_entry[0];
+ struct xfs_acl_entry acl_entry[];
};
/*
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index 245188e4f6d3..84bcffa87753 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: LGPL-2.1
+/* SPDX-License-Identifier: LGPL-2.1 */
/*
* Copyright (c) 1995-2005 Silicon Graphics, Inc.
* All Rights Reserved.
diff --git a/fs/xfs/libxfs/xfs_health.h b/fs/xfs/libxfs/xfs_health.h
index 272005ac8c88..99e796256c5d 100644
--- a/fs/xfs/libxfs/xfs_health.h
+++ b/fs/xfs/libxfs/xfs_health.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2019 Oracle. All Rights Reserved.
* Author: Darrick J. Wong <darrick.wong@oracle.com>
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 39c5a6e24915..6f84ea85fdd8 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -161,8 +161,7 @@ xfs_imap_to_bp(
struct xfs_imap *imap,
struct xfs_dinode **dipp,
struct xfs_buf **bpp,
- uint buf_flags,
- uint iget_flags)
+ uint buf_flags)
{
struct xfs_buf *bp;
int error;
@@ -172,12 +171,7 @@ xfs_imap_to_bp(
(int)imap->im_len, buf_flags, &bp,
&xfs_inode_buf_ops);
if (error) {
- if (error == -EAGAIN) {
- ASSERT(buf_flags & XBF_TRYLOCK);
- return error;
- }
- xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
- __func__, error);
+ ASSERT(error != -EAGAIN || (buf_flags & XBF_TRYLOCK));
return error;
}
@@ -186,13 +180,36 @@ xfs_imap_to_bp(
return 0;
}
-void
+int
xfs_inode_from_disk(
struct xfs_inode *ip,
struct xfs_dinode *from)
{
struct xfs_icdinode *to = &ip->i_d;
struct inode *inode = VFS_I(ip);
+ int error;
+ xfs_failaddr_t fa;
+
+ ASSERT(ip->i_cowfp == NULL);
+ ASSERT(ip->i_afp == NULL);
+
+ fa = xfs_dinode_verify(ip->i_mount, ip->i_ino, from);
+ if (fa) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", from,
+ sizeof(*from), fa);
+ return -EFSCORRUPTED;
+ }
+
+ /*
+ * First get the permanent information that is needed to allocate an
+ * inode. If the inode is unused, mode is zero and we shouldn't mess
+ * with the unitialized part of it.
+ */
+ to->di_flushiter = be16_to_cpu(from->di_flushiter);
+ inode->i_generation = be32_to_cpu(from->di_gen);
+ inode->i_mode = be16_to_cpu(from->di_mode);
+ if (!inode->i_mode)
+ return 0;
/*
* Convert v1 inodes immediately to v2 inode format as this is the
@@ -208,10 +225,8 @@ xfs_inode_from_disk(
be16_to_cpu(from->di_projid_lo);
}
- to->di_format = from->di_format;
i_uid_write(inode, be32_to_cpu(from->di_uid));
i_gid_write(inode, be32_to_cpu(from->di_gid));
- to->di_flushiter = be16_to_cpu(from->di_flushiter);
/*
* Time is signed, so need to convert to signed 32 bit before
@@ -225,16 +240,11 @@ xfs_inode_from_disk(
inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
- inode->i_generation = be32_to_cpu(from->di_gen);
- inode->i_mode = be16_to_cpu(from->di_mode);
to->di_size = be64_to_cpu(from->di_size);
to->di_nblocks = be64_to_cpu(from->di_nblocks);
to->di_extsize = be32_to_cpu(from->di_extsize);
- to->di_nextents = be32_to_cpu(from->di_nextents);
- to->di_anextents = be16_to_cpu(from->di_anextents);
to->di_forkoff = from->di_forkoff;
- to->di_aformat = from->di_aformat;
to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
to->di_dmstate = be16_to_cpu(from->di_dmstate);
to->di_flags = be16_to_cpu(from->di_flags);
@@ -247,6 +257,22 @@ xfs_inode_from_disk(
to->di_flags2 = be64_to_cpu(from->di_flags2);
to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
}
+
+ error = xfs_iformat_data_fork(ip, from);
+ if (error)
+ return error;
+ if (from->di_forkoff) {
+ error = xfs_iformat_attr_fork(ip, from);
+ if (error)
+ goto out_destroy_data_fork;
+ }
+ if (xfs_is_reflink_inode(ip))
+ xfs_ifork_init_cow(ip);
+ return 0;
+
+out_destroy_data_fork:
+ xfs_idestroy_fork(&ip->i_df);
+ return error;
}
void
@@ -261,7 +287,7 @@ xfs_inode_to_disk(
to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
to->di_onlink = 0;
- to->di_format = from->di_format;
+ to->di_format = xfs_ifork_format(&ip->i_df);
to->di_uid = cpu_to_be32(i_uid_read(inode));
to->di_gid = cpu_to_be32(i_gid_read(inode));
to->di_projid_lo = cpu_to_be16(from->di_projid & 0xffff);
@@ -281,10 +307,10 @@ xfs_inode_to_disk(
to->di_size = cpu_to_be64(from->di_size);
to->di_nblocks = cpu_to_be64(from->di_nblocks);
to->di_extsize = cpu_to_be32(from->di_extsize);
- to->di_nextents = cpu_to_be32(from->di_nextents);
- to->di_anextents = cpu_to_be16(from->di_anextents);
+ to->di_nextents = cpu_to_be32(xfs_ifork_nextents(&ip->i_df));
+ to->di_anextents = cpu_to_be16(xfs_ifork_nextents(ip->i_afp));
to->di_forkoff = from->di_forkoff;
- to->di_aformat = from->di_aformat;
+ to->di_aformat = xfs_ifork_format(ip->i_afp);
to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
to->di_dmstate = cpu_to_be16(from->di_dmstate);
to->di_flags = cpu_to_be16(from->di_flags);
@@ -405,7 +431,7 @@ xfs_dinode_verify_forkoff(
struct xfs_dinode *dip,
struct xfs_mount *mp)
{
- if (!XFS_DFORK_Q(dip))
+ if (!dip->di_forkoff)
return NULL;
switch (dip->di_format) {
@@ -508,7 +534,7 @@ xfs_dinode_verify(
return __this_address;
}
- if (XFS_DFORK_Q(dip)) {
+ if (dip->di_forkoff) {
fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
if (fa)
return fa;
@@ -585,122 +611,6 @@ xfs_dinode_calc_crc(
}
/*
- * Read the disk inode attributes into the in-core inode structure.
- *
- * For version 5 superblocks, if we are initialising a new inode and we are not
- * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
- * inode core with a random generation number. If we are keeping inodes around,
- * we need to read the inode cluster to get the existing generation number off
- * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
- * format) then log recovery is dependent on the di_flushiter field being
- * initialised from the current on-disk value and hence we must also read the
- * inode off disk.
- */
-int
-xfs_iread(
- xfs_mount_t *mp,
- xfs_trans_t *tp,
- xfs_inode_t *ip,
- uint iget_flags)
-{
- xfs_buf_t *bp;
- xfs_dinode_t *dip;
- xfs_failaddr_t fa;
- int error;
-
- /*
- * Fill in the location information in the in-core inode.
- */
- error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
- if (error)
- return error;
-
- /* shortcut IO on inode allocation if possible */
- if ((iget_flags & XFS_IGET_CREATE) &&
- xfs_sb_version_has_v3inode(&mp->m_sb) &&
- !(mp->m_flags & XFS_MOUNT_IKEEP)) {
- VFS_I(ip)->i_generation = prandom_u32();
- return 0;
- }
-
- /*
- * Get pointers to the on-disk inode and the buffer containing it.
- */
- error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
- if (error)
- return error;
-
- /* even unallocated inodes are verified */
- fa = xfs_dinode_verify(mp, ip->i_ino, dip);
- if (fa) {
- xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", dip,
- sizeof(*dip), fa);
- error = -EFSCORRUPTED;
- goto out_brelse;
- }
-
- /*
- * If the on-disk inode is already linked to a directory
- * entry, copy all of the inode into the in-core inode.
- * xfs_iformat_fork() handles copying in the inode format
- * specific information.
- * Otherwise, just get the truly permanent information.
- */
- if (dip->di_mode) {
- xfs_inode_from_disk(ip, dip);
- error = xfs_iformat_fork(ip, dip);
- if (error) {
-#ifdef DEBUG
- xfs_alert(mp, "%s: xfs_iformat() returned error %d",
- __func__, error);
-#endif /* DEBUG */
- goto out_brelse;
- }
- } else {
- /*
- * Partial initialisation of the in-core inode. Just the bits
- * that xfs_ialloc won't overwrite or relies on being correct.
- */
- VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
- ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
-
- /*
- * Make sure to pull in the mode here as well in
- * case the inode is released without being used.
- * This ensures that xfs_inactive() will see that
- * the inode is already free and not try to mess
- * with the uninitialized part of it.
- */
- VFS_I(ip)->i_mode = 0;
- }
-
- ip->i_delayed_blks = 0;
-
- /*
- * Mark the buffer containing the inode as something to keep
- * around for a while. This helps to keep recently accessed
- * meta-data in-core longer.
- */
- xfs_buf_set_ref(bp, XFS_INO_REF);
-
- /*
- * Use xfs_trans_brelse() to release the buffer containing the on-disk
- * inode, because it was acquired with xfs_trans_read_buf() in
- * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
- * brelse(). If we're within a transaction, then xfs_trans_brelse()
- * will only release the buffer if it is not dirty within the
- * transaction. It will be OK to release the buffer in this case,
- * because inodes on disk are never destroyed and we will be locking the
- * new in-core inode before putting it in the cache where other
- * processes can find it. Thus we don't have to worry about the inode
- * being changed just because we released the buffer.
- */
- out_brelse:
- xfs_trans_brelse(tp, bp);
- return error;
-}
-
-/*
* Validate di_extsize hint.
*
* The rules are documented at xfs_ioctl_setattr_check_extsize().
diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h
index 9b373dcf9e34..865ac493c72a 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.h
+++ b/fs/xfs/libxfs/xfs_inode_buf.h
@@ -16,16 +16,12 @@ struct xfs_dinode;
* format specific structures at the appropriate time.
*/
struct xfs_icdinode {
- int8_t di_format; /* format of di_c data */
uint16_t di_flushiter; /* incremented on flush */
uint32_t di_projid; /* owner's project id */
xfs_fsize_t di_size; /* number of bytes in file */
xfs_rfsblock_t di_nblocks; /* # of direct & btree blocks used */
xfs_extlen_t di_extsize; /* basic/minimum extent size for file */
- xfs_extnum_t di_nextents; /* number of extents in data fork */
- xfs_aextnum_t di_anextents; /* number of extents in attribute fork*/
uint8_t di_forkoff; /* attr fork offs, <<3 for 64b align */
- int8_t di_aformat; /* format of attr fork's data */
uint32_t di_dmevmask; /* DMIG event mask */
uint16_t di_dmstate; /* DMIG state info */
uint16_t di_flags; /* random flags, XFS_DIFLAG_... */
@@ -48,13 +44,11 @@ struct xfs_imap {
int xfs_imap_to_bp(struct xfs_mount *, struct xfs_trans *,
struct xfs_imap *, struct xfs_dinode **,
- struct xfs_buf **, uint, uint);
-int xfs_iread(struct xfs_mount *, struct xfs_trans *,
- struct xfs_inode *, uint);
+ struct xfs_buf **, uint);
void xfs_dinode_calc_crc(struct xfs_mount *, struct xfs_dinode *);
void xfs_inode_to_disk(struct xfs_inode *ip, struct xfs_dinode *to,
xfs_lsn_t lsn);
-void xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from);
+int xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from);
void xfs_log_dinode_to_disk(struct xfs_log_dinode *from,
struct xfs_dinode *to);
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 518c6f0ec3a6..28b366275ae0 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -26,110 +26,6 @@
kmem_zone_t *xfs_ifork_zone;
-STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
-STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
-STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
-
-/*
- * Copy inode type and data and attr format specific information from the
- * on-disk inode to the in-core inode and fork structures. For fifos, devices,
- * and sockets this means set i_rdev to the proper value. For files,
- * directories, and symlinks this means to bring in the in-line data or extent
- * pointers as well as the attribute fork. For a fork in B-tree format, only
- * the root is immediately brought in-core. The rest will be read in later when
- * first referenced (see xfs_iread_extents()).
- */
-int
-xfs_iformat_fork(
- struct xfs_inode *ip,
- struct xfs_dinode *dip)
-{
- struct inode *inode = VFS_I(ip);
- struct xfs_attr_shortform *atp;
- int size;
- int error = 0;
- xfs_fsize_t di_size;
-
- switch (inode->i_mode & S_IFMT) {
- case S_IFIFO:
- case S_IFCHR:
- case S_IFBLK:
- case S_IFSOCK:
- ip->i_d.di_size = 0;
- inode->i_rdev = xfs_to_linux_dev_t(xfs_dinode_get_rdev(dip));
- break;
-
- case S_IFREG:
- case S_IFLNK:
- case S_IFDIR:
- switch (dip->di_format) {
- case XFS_DINODE_FMT_LOCAL:
- di_size = be64_to_cpu(dip->di_size);
- size = (int)di_size;
- error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
- break;
- case XFS_DINODE_FMT_EXTENTS:
- error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
- break;
- case XFS_DINODE_FMT_BTREE:
- error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
- break;
- default:
- xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
- dip, sizeof(*dip), __this_address);
- return -EFSCORRUPTED;
- }
- break;
-
- default:
- xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
- sizeof(*dip), __this_address);
- return -EFSCORRUPTED;
- }
- if (error)
- return error;
-
- if (xfs_is_reflink_inode(ip)) {
- ASSERT(ip->i_cowfp == NULL);
- xfs_ifork_init_cow(ip);
- }
-
- if (!XFS_DFORK_Q(dip))
- return 0;
-
- ASSERT(ip->i_afp == NULL);
- ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_NOFS);
-
- switch (dip->di_aformat) {
- case XFS_DINODE_FMT_LOCAL:
- atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
- size = be16_to_cpu(atp->hdr.totsize);
-
- error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
- break;
- case XFS_DINODE_FMT_EXTENTS:
- error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
- break;
- case XFS_DINODE_FMT_BTREE:
- error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
- break;
- default:
- xfs_inode_verifier_error(ip, error, __func__, dip,
- sizeof(*dip), __this_address);
- error = -EFSCORRUPTED;
- break;
- }
- if (error) {
- kmem_cache_free(xfs_ifork_zone, ip->i_afp);
- ip->i_afp = NULL;
- if (ip->i_cowfp)
- kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
- ip->i_cowfp = NULL;
- xfs_idestroy_fork(ip, XFS_DATA_FORK);
- }
- return error;
-}
-
void
xfs_init_local_fork(
struct xfs_inode *ip,
@@ -292,12 +188,11 @@ xfs_iformat_btree(
* or the number of extents is greater than the number of
* blocks.
*/
- if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <=
- XFS_IFORK_MAXEXT(ip, whichfork) ||
+ if (unlikely(ifp->if_nextents <= XFS_IFORK_MAXEXT(ip, whichfork) ||
nrecs == 0 ||
XFS_BMDR_SPACE_CALC(nrecs) >
XFS_DFORK_SIZE(dip, mp, whichfork) ||
- XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks) ||
+ ifp->if_nextents > ip->i_d.di_nblocks) ||
level == 0 || level > XFS_BTREE_MAXLEVELS) {
xfs_warn(mp, "corrupt inode %Lu (btree).",
(unsigned long long) ip->i_ino);
@@ -325,6 +220,110 @@ xfs_iformat_btree(
return 0;
}
+int
+xfs_iformat_data_fork(
+ struct xfs_inode *ip,
+ struct xfs_dinode *dip)
+{
+ struct inode *inode = VFS_I(ip);
+ int error;
+
+ /*
+ * Initialize the extent count early, as the per-format routines may
+ * depend on it.
+ */
+ ip->i_df.if_format = dip->di_format;
+ ip->i_df.if_nextents = be32_to_cpu(dip->di_nextents);
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFIFO:
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFSOCK:
+ ip->i_d.di_size = 0;
+ inode->i_rdev = xfs_to_linux_dev_t(xfs_dinode_get_rdev(dip));
+ return 0;
+ case S_IFREG:
+ case S_IFLNK:
+ case S_IFDIR:
+ switch (ip->i_df.if_format) {
+ case XFS_DINODE_FMT_LOCAL:
+ error = xfs_iformat_local(ip, dip, XFS_DATA_FORK,
+ be64_to_cpu(dip->di_size));
+ if (!error)
+ error = xfs_ifork_verify_local_data(ip);
+ return error;
+ case XFS_DINODE_FMT_EXTENTS:
+ return xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
+ case XFS_DINODE_FMT_BTREE:
+ return xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
+ default:
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
+ dip, sizeof(*dip), __this_address);
+ return -EFSCORRUPTED;
+ }
+ break;
+ default:
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
+ sizeof(*dip), __this_address);
+ return -EFSCORRUPTED;
+ }
+}
+
+static uint16_t
+xfs_dfork_attr_shortform_size(
+ struct xfs_dinode *dip)
+{
+ struct xfs_attr_shortform *atp =
+ (struct xfs_attr_shortform *)XFS_DFORK_APTR(dip);
+
+ return be16_to_cpu(atp->hdr.totsize);
+}
+
+int
+xfs_iformat_attr_fork(
+ struct xfs_inode *ip,
+ struct xfs_dinode *dip)
+{
+ int error = 0;
+
+ /*
+ * Initialize the extent count early, as the per-format routines may
+ * depend on it.
+ */
+ ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_NOFS);
+ ip->i_afp->if_format = dip->di_aformat;
+ if (unlikely(ip->i_afp->if_format == 0)) /* pre IRIX 6.2 file system */
+ ip->i_afp->if_format = XFS_DINODE_FMT_EXTENTS;
+ ip->i_afp->if_nextents = be16_to_cpu(dip->di_anextents);
+
+ switch (ip->i_afp->if_format) {
+ case XFS_DINODE_FMT_LOCAL:
+ error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK,
+ xfs_dfork_attr_shortform_size(dip));
+ if (!error)
+ error = xfs_ifork_verify_local_attr(ip);
+ break;
+ case XFS_DINODE_FMT_EXTENTS:
+ error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
+ break;
+ case XFS_DINODE_FMT_BTREE:
+ error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
+ break;
+ default:
+ xfs_inode_verifier_error(ip, error, __func__, dip,
+ sizeof(*dip), __this_address);
+ error = -EFSCORRUPTED;
+ break;
+ }
+
+ if (error) {
+ kmem_cache_free(xfs_ifork_zone, ip->i_afp);
+ ip->i_afp = NULL;
+ }
+ return error;
+}
+
/*
* Reallocate the space for if_broot based on the number of records
* being added or deleted as indicated in rec_diff. Move the records
@@ -504,38 +503,24 @@ xfs_idata_realloc(
void
xfs_idestroy_fork(
- xfs_inode_t *ip,
- int whichfork)
+ struct xfs_ifork *ifp)
{
- struct xfs_ifork *ifp;
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
if (ifp->if_broot != NULL) {
kmem_free(ifp->if_broot);
ifp->if_broot = NULL;
}
/*
- * If the format is local, then we can't have an extents
- * array so just look for an inline data array. If we're
- * not local then we may or may not have an extents list,
- * so check and free it up if we do.
+ * If the format is local, then we can't have an extents array so just
+ * look for an inline data array. If we're not local then we may or may
+ * not have an extents list, so check and free it up if we do.
*/
- if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
- if (ifp->if_u1.if_data != NULL) {
- kmem_free(ifp->if_u1.if_data);
- ifp->if_u1.if_data = NULL;
- }
- } else if ((ifp->if_flags & XFS_IFEXTENTS) && ifp->if_height) {
- xfs_iext_destroy(ifp);
- }
-
- if (whichfork == XFS_ATTR_FORK) {
- kmem_cache_free(xfs_ifork_zone, ip->i_afp);
- ip->i_afp = NULL;
- } else if (whichfork == XFS_COW_FORK) {
- kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
- ip->i_cowfp = NULL;
+ if (ifp->if_format == XFS_DINODE_FMT_LOCAL) {
+ kmem_free(ifp->if_u1.if_data);
+ ifp->if_u1.if_data = NULL;
+ } else if (ifp->if_flags & XFS_IFEXTENTS) {
+ if (ifp->if_height)
+ xfs_iext_destroy(ifp);
}
}
@@ -592,7 +577,7 @@ void
xfs_iflush_fork(
xfs_inode_t *ip,
xfs_dinode_t *dip,
- xfs_inode_log_item_t *iip,
+ struct xfs_inode_log_item *iip,
int whichfork)
{
char *cp;
@@ -618,7 +603,7 @@ xfs_iflush_fork(
}
cp = XFS_DFORK_PTR(dip, whichfork);
mp = ip->i_mount;
- switch (XFS_IFORK_FORMAT(ip, whichfork)) {
+ switch (ifp->if_format) {
case XFS_DINODE_FMT_LOCAL:
if ((iip->ili_fields & dataflag[whichfork]) &&
(ifp->if_bytes > 0)) {
@@ -633,7 +618,7 @@ xfs_iflush_fork(
!(iip->ili_fields & extflag[whichfork]));
if ((iip->ili_fields & extflag[whichfork]) &&
(ifp->if_bytes > 0)) {
- ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
+ ASSERT(ifp->if_nextents > 0);
(void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
whichfork);
}
@@ -691,48 +676,55 @@ xfs_ifork_init_cow(
ip->i_cowfp = kmem_zone_zalloc(xfs_ifork_zone,
KM_NOFS);
ip->i_cowfp->if_flags = XFS_IFEXTENTS;
- ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
- ip->i_cnextents = 0;
+ ip->i_cowfp->if_format = XFS_DINODE_FMT_EXTENTS;
}
-/* Default fork content verifiers. */
-struct xfs_ifork_ops xfs_default_ifork_ops = {
- .verify_attr = xfs_attr_shortform_verify,
- .verify_dir = xfs_dir2_sf_verify,
- .verify_symlink = xfs_symlink_shortform_verify,
-};
-
/* Verify the inline contents of the data fork of an inode. */
-xfs_failaddr_t
-xfs_ifork_verify_data(
- struct xfs_inode *ip,
- struct xfs_ifork_ops *ops)
+int
+xfs_ifork_verify_local_data(
+ struct xfs_inode *ip)
{
- /* Non-local data fork, we're done. */
- if (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
- return NULL;
+ xfs_failaddr_t fa = NULL;
- /* Check the inline data fork if there is one. */
switch (VFS_I(ip)->i_mode & S_IFMT) {
case S_IFDIR:
- return ops->verify_dir(ip);
+ fa = xfs_dir2_sf_verify(ip);
+ break;
case S_IFLNK:
- return ops->verify_symlink(ip);
+ fa = xfs_symlink_shortform_verify(ip);
+ break;
default:
- return NULL;
+ break;
}
+
+ if (fa) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork",
+ ip->i_df.if_u1.if_data, ip->i_df.if_bytes, fa);
+ return -EFSCORRUPTED;
+ }
+
+ return 0;
}
/* Verify the inline contents of the attr fork of an inode. */
-xfs_failaddr_t
-xfs_ifork_verify_attr(
- struct xfs_inode *ip,
- struct xfs_ifork_ops *ops)
+int
+xfs_ifork_verify_local_attr(
+ struct xfs_inode *ip)
{
- /* There has to be an attr fork allocated if aformat is local. */
- if (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
- return NULL;
- if (!XFS_IFORK_PTR(ip, XFS_ATTR_FORK))
- return __this_address;
- return ops->verify_attr(ip);
+ struct xfs_ifork *ifp = ip->i_afp;
+ xfs_failaddr_t fa;
+
+ if (!ifp)
+ fa = __this_address;
+ else
+ fa = xfs_attr_shortform_verify(ip);
+
+ if (fa) {
+ xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork",
+ ifp ? ifp->if_u1.if_data : NULL,
+ ifp ? ifp->if_bytes : 0, fa);
+ return -EFSCORRUPTED;
+ }
+
+ return 0;
}
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h
index 668ee942be22..a4953e95c4f3 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.h
+++ b/fs/xfs/libxfs/xfs_inode_fork.h
@@ -23,6 +23,8 @@ struct xfs_ifork {
} if_u1;
short if_broot_bytes; /* bytes allocated for root */
unsigned char if_flags; /* per-fork flags */
+ int8_t if_format; /* format of this fork */
+ xfs_extnum_t if_nextents; /* # of extents in this fork */
};
/*
@@ -55,43 +57,36 @@ struct xfs_ifork {
((w) == XFS_ATTR_FORK ? \
XFS_IFORK_ASIZE(ip) : \
0))
-#define XFS_IFORK_FORMAT(ip,w) \
- ((w) == XFS_DATA_FORK ? \
- (ip)->i_d.di_format : \
- ((w) == XFS_ATTR_FORK ? \
- (ip)->i_d.di_aformat : \
- (ip)->i_cformat))
-#define XFS_IFORK_FMT_SET(ip,w,n) \
- ((w) == XFS_DATA_FORK ? \
- ((ip)->i_d.di_format = (n)) : \
- ((w) == XFS_ATTR_FORK ? \
- ((ip)->i_d.di_aformat = (n)) : \
- ((ip)->i_cformat = (n))))
-#define XFS_IFORK_NEXTENTS(ip,w) \
- ((w) == XFS_DATA_FORK ? \
- (ip)->i_d.di_nextents : \
- ((w) == XFS_ATTR_FORK ? \
- (ip)->i_d.di_anextents : \
- (ip)->i_cnextents))
-#define XFS_IFORK_NEXT_SET(ip,w,n) \
- ((w) == XFS_DATA_FORK ? \
- ((ip)->i_d.di_nextents = (n)) : \
- ((w) == XFS_ATTR_FORK ? \
- ((ip)->i_d.di_anextents = (n)) : \
- ((ip)->i_cnextents = (n))))
#define XFS_IFORK_MAXEXT(ip, w) \
(XFS_IFORK_SIZE(ip, w) / sizeof(xfs_bmbt_rec_t))
-#define xfs_ifork_has_extents(ip, w) \
- (XFS_IFORK_FORMAT((ip), (w)) == XFS_DINODE_FMT_EXTENTS || \
- XFS_IFORK_FORMAT((ip), (w)) == XFS_DINODE_FMT_BTREE)
+static inline bool xfs_ifork_has_extents(struct xfs_ifork *ifp)
+{
+ return ifp->if_format == XFS_DINODE_FMT_EXTENTS ||
+ ifp->if_format == XFS_DINODE_FMT_BTREE;
+}
+
+static inline xfs_extnum_t xfs_ifork_nextents(struct xfs_ifork *ifp)
+{
+ if (!ifp)
+ return 0;
+ return ifp->if_nextents;
+}
+
+static inline int8_t xfs_ifork_format(struct xfs_ifork *ifp)
+{
+ if (!ifp)
+ return XFS_DINODE_FMT_EXTENTS;
+ return ifp->if_format;
+}
struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state);
-int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *);
+int xfs_iformat_data_fork(struct xfs_inode *, struct xfs_dinode *);
+int xfs_iformat_attr_fork(struct xfs_inode *, struct xfs_dinode *);
void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *,
struct xfs_inode_log_item *, int);
-void xfs_idestroy_fork(struct xfs_inode *, int);
+void xfs_idestroy_fork(struct xfs_ifork *ifp);
void xfs_idata_realloc(struct xfs_inode *ip, int64_t byte_diff,
int whichfork);
void xfs_iroot_realloc(struct xfs_inode *, int, int);
@@ -175,18 +170,7 @@ extern struct kmem_zone *xfs_ifork_zone;
extern void xfs_ifork_init_cow(struct xfs_inode *ip);
-typedef xfs_failaddr_t (*xfs_ifork_verifier_t)(struct xfs_inode *);
-
-struct xfs_ifork_ops {
- xfs_ifork_verifier_t verify_symlink;
- xfs_ifork_verifier_t verify_dir;
- xfs_ifork_verifier_t verify_attr;
-};
-extern struct xfs_ifork_ops xfs_default_ifork_ops;
-
-xfs_failaddr_t xfs_ifork_verify_data(struct xfs_inode *ip,
- struct xfs_ifork_ops *ops);
-xfs_failaddr_t xfs_ifork_verify_attr(struct xfs_inode *ip,
- struct xfs_ifork_ops *ops);
+int xfs_ifork_verify_local_data(struct xfs_inode *ip);
+int xfs_ifork_verify_local_attr(struct xfs_inode *ip);
#endif /* __XFS_INODE_FORK_H__ */
diff --git a/fs/xfs/libxfs/xfs_log_recover.h b/fs/xfs/libxfs/xfs_log_recover.h
index 3bf671637a91..641132d0e39d 100644
--- a/fs/xfs/libxfs/xfs_log_recover.h
+++ b/fs/xfs/libxfs/xfs_log_recover.h
@@ -7,6 +7,73 @@
#define __XFS_LOG_RECOVER_H__
/*
+ * Each log item type (XFS_LI_*) gets its own xlog_recover_item_ops to
+ * define how recovery should work for that type of log item.
+ */
+struct xlog_recover_item;
+
+/* Sorting hat for log items as they're read in. */
+enum xlog_recover_reorder {
+ XLOG_REORDER_BUFFER_LIST,
+ XLOG_REORDER_ITEM_LIST,
+ XLOG_REORDER_INODE_BUFFER_LIST,
+ XLOG_REORDER_CANCEL_LIST,
+};
+
+struct xlog_recover_item_ops {
+ uint16_t item_type; /* XFS_LI_* type code. */
+
+ /*
+ * Help sort recovered log items into the order required to replay them
+ * correctly. Log item types that always use XLOG_REORDER_ITEM_LIST do
+ * not have to supply a function here. See the comment preceding
+ * xlog_recover_reorder_trans for more details about what the return
+ * values mean.
+ */
+ enum xlog_recover_reorder (*reorder)(struct xlog_recover_item *item);
+
+ /* Start readahead for pass2, if provided. */
+ void (*ra_pass2)(struct xlog *log, struct xlog_recover_item *item);
+
+ /* Do whatever work we need to do for pass1, if provided. */
+ int (*commit_pass1)(struct xlog *log, struct xlog_recover_item *item);
+
+ /*
+ * This function should do whatever work is needed for pass2 of log
+ * recovery, if provided.
+ *
+ * If the recovered item is an intent item, this function should parse
+ * the recovered item to construct an in-core log intent item and
+ * insert it into the AIL. The in-core log intent item should have 1
+ * refcount so that the item is freed either (a) when we commit the
+ * recovered log item for the intent-done item; (b) replay the work and
+ * log a new intent-done item; or (c) recovery fails and we have to
+ * abort.
+ *
+ * If the recovered item is an intent-done item, this function should
+ * parse the recovered item to find the id of the corresponding intent
+ * log item. Next, it should find the in-core log intent item in the
+ * AIL and release it.
+ */
+ int (*commit_pass2)(struct xlog *log, struct list_head *buffer_list,
+ struct xlog_recover_item *item, xfs_lsn_t lsn);
+};
+
+extern const struct xlog_recover_item_ops xlog_icreate_item_ops;
+extern const struct xlog_recover_item_ops xlog_buf_item_ops;
+extern const struct xlog_recover_item_ops xlog_inode_item_ops;
+extern const struct xlog_recover_item_ops xlog_dquot_item_ops;
+extern const struct xlog_recover_item_ops xlog_quotaoff_item_ops;
+extern const struct xlog_recover_item_ops xlog_bui_item_ops;
+extern const struct xlog_recover_item_ops xlog_bud_item_ops;
+extern const struct xlog_recover_item_ops xlog_efi_item_ops;
+extern const struct xlog_recover_item_ops xlog_efd_item_ops;
+extern const struct xlog_recover_item_ops xlog_rui_item_ops;
+extern const struct xlog_recover_item_ops xlog_rud_item_ops;
+extern const struct xlog_recover_item_ops xlog_cui_item_ops;
+extern const struct xlog_recover_item_ops xlog_cud_item_ops;
+
+/*
* Macros, structures, prototypes for internal log manager use.
*/
@@ -22,13 +89,13 @@
/*
* item headers are in ri_buf[0]. Additional buffers follow.
*/
-typedef struct xlog_recover_item {
+struct xlog_recover_item {
struct list_head ri_list;
- int ri_type;
int ri_cnt; /* count of regions found */
int ri_total; /* total regions */
- xfs_log_iovec_t *ri_buf; /* ptr to regions buffer */
-} xlog_recover_item_t;
+ struct xfs_log_iovec *ri_buf; /* ptr to regions buffer */
+ const struct xlog_recover_item_ops *ri_ops;
+};
struct xlog_recover {
struct hlist_node r_list;
@@ -51,4 +118,12 @@ struct xlog_recover {
#define XLOG_RECOVER_PASS1 1
#define XLOG_RECOVER_PASS2 2
+void xlog_buf_readahead(struct xlog *log, xfs_daddr_t blkno, uint len,
+ const struct xfs_buf_ops *ops);
+bool xlog_is_buffer_cancelled(struct xlog *log, xfs_daddr_t blkno, uint len);
+void xlog_recover_iodone(struct xfs_buf *bp);
+
+void xlog_recover_release_intent(struct xlog *log, unsigned short intent_type,
+ uint64_t intent_id);
+
#endif /* __XFS_LOG_RECOVER_H__ */
diff --git a/fs/xfs/libxfs/xfs_quota_defs.h b/fs/xfs/libxfs/xfs_quota_defs.h
index b2113b17e53c..56d9dd787e7b 100644
--- a/fs/xfs/libxfs/xfs_quota_defs.h
+++ b/fs/xfs/libxfs/xfs_quota_defs.h
@@ -100,7 +100,6 @@ typedef uint16_t xfs_qwarncnt_t;
#define XFS_QMOPT_FORCE_RES 0x0000010 /* ignore quota limits */
#define XFS_QMOPT_SBVERSION 0x0000040 /* change superblock version num */
#define XFS_QMOPT_GQUOTA 0x0002000 /* group dquot requested */
-#define XFS_QMOPT_ENOSPC 0x0004000 /* enospc instead of edquot (prj) */
/*
* flags to xfs_trans_mod_dquot to indicate which field needs to be
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index f42c74cb8be5..9498ced947be 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -66,7 +66,7 @@ xfs_rtbuf_get(
ip = issum ? mp->m_rsumip : mp->m_rbmip;
- error = xfs_bmapi_read(ip, block, 1, &map, &nmap, XFS_DATA_FORK);
+ error = xfs_bmapi_read(ip, block, 1, &map, &nmap, 0);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index c526c5e5ab76..4df87546bd40 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -243,7 +243,7 @@ xfs_validate_sb_common(
} else if (sbp->sb_qflags & (XFS_PQUOTA_ENFD | XFS_GQUOTA_ENFD |
XFS_PQUOTA_CHKD | XFS_GQUOTA_CHKD)) {
xfs_notice(mp,
-"Superblock earlier than Version 5 has XFS_[PQ]UOTA_{ENFD|CHKD} bits.");
+"Superblock earlier than Version 5 has XFS_{P|G}QUOTA_{ENFD|CHKD} bits.");
return -EFSCORRUPTED;
}
diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
index 3b8260ca7d1b..594bc447a7dd 100644
--- a/fs/xfs/libxfs/xfs_symlink_remote.c
+++ b/fs/xfs/libxfs/xfs_symlink_remote.c
@@ -204,16 +204,12 @@ xfs_failaddr_t
xfs_symlink_shortform_verify(
struct xfs_inode *ip)
{
- char *sfp;
- char *endp;
- struct xfs_ifork *ifp;
- int size;
-
- ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_LOCAL);
- ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
- sfp = (char *)ifp->if_u1.if_data;
- size = ifp->if_bytes;
- endp = sfp + size;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+ char *sfp = (char *)ifp->if_u1.if_data;
+ int size = ifp->if_bytes;
+ char *endp = sfp + size;
+
+ ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
/*
* Zero length symlinks should never occur in memory as they are
diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
index 2b8ccb5b975d..b5dfb6654842 100644
--- a/fs/xfs/libxfs/xfs_trans_inode.c
+++ b/fs/xfs/libxfs/xfs_trans_inode.c
@@ -27,7 +27,7 @@ xfs_trans_ijoin(
struct xfs_inode *ip,
uint lock_flags)
{
- xfs_inode_log_item_t *iip;
+ struct xfs_inode_log_item *iip;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (ip->i_itemp == NULL)
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index add8598eacd5..7badd6dfe544 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -566,8 +566,9 @@ xchk_bmap_check_rmaps(
struct xfs_scrub *sc,
int whichfork)
{
- loff_t size;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, whichfork);
xfs_agnumber_t agno;
+ bool zero_size;
int error;
if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
@@ -579,6 +580,8 @@ xchk_bmap_check_rmaps(
if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
return 0;
+ ASSERT(XFS_IFORK_PTR(sc->ip, whichfork) != NULL);
+
/*
* Only do this for complex maps that are in btree format, or for
* situations where we would seem to have a size but zero extents.
@@ -586,19 +589,14 @@ xchk_bmap_check_rmaps(
* to flag this bmap as corrupt if there are rmaps that need to be
* reattached.
*/
- switch (whichfork) {
- case XFS_DATA_FORK:
- size = i_size_read(VFS_I(sc->ip));
- break;
- case XFS_ATTR_FORK:
- size = XFS_IFORK_Q(sc->ip);
- break;
- default:
- size = 0;
- break;
- }
- if (XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_BTREE &&
- (size == 0 || XFS_IFORK_NEXTENTS(sc->ip, whichfork) > 0))
+
+ if (whichfork == XFS_DATA_FORK)
+ zero_size = i_size_read(VFS_I(sc->ip)) == 0;
+ else
+ zero_size = false;
+
+ if (ifp->if_format != XFS_DINODE_FMT_BTREE &&
+ (zero_size || ifp->if_nextents > 0))
return 0;
for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
@@ -627,12 +625,14 @@ xchk_bmap(
struct xchk_bmap_info info = { NULL };
struct xfs_mount *mp = sc->mp;
struct xfs_inode *ip = sc->ip;
- struct xfs_ifork *ifp;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
xfs_fileoff_t endoff;
struct xfs_iext_cursor icur;
int error = 0;
- ifp = XFS_IFORK_PTR(ip, whichfork);
+ /* Non-existent forks can be ignored. */
+ if (!ifp)
+ goto out;
info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
info.whichfork = whichfork;
@@ -641,9 +641,6 @@ xchk_bmap(
switch (whichfork) {
case XFS_COW_FORK:
- /* Non-existent CoW forks are ignorable. */
- if (!ifp)
- goto out;
/* No CoW forks on non-reflink inodes/filesystems. */
if (!xfs_is_reflink_inode(ip)) {
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
@@ -651,8 +648,6 @@ xchk_bmap(
}
break;
case XFS_ATTR_FORK:
- if (!ifp)
- goto out_check_rmap;
if (!xfs_sb_version_hasattr(&mp->m_sb) &&
!xfs_sb_version_hasattr2(&mp->m_sb))
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
@@ -663,7 +658,7 @@ xchk_bmap(
}
/* Check the fork values */
- switch (XFS_IFORK_FORMAT(ip, whichfork)) {
+ switch (ifp->if_format) {
case XFS_DINODE_FMT_UUID:
case XFS_DINODE_FMT_DEV:
case XFS_DINODE_FMT_LOCAL:
@@ -717,7 +712,6 @@ xchk_bmap(
goto out;
}
-out_check_rmap:
error = xchk_bmap_check_rmaps(sc, whichfork);
if (!xchk_fblock_xref_process_error(sc, whichfork, 0, &error))
goto out;
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index 9a2e27ac1300..44b15015021f 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -468,7 +468,7 @@ xchk_da_btree(
int error;
/* Skip short format data structures; no btree to scan. */
- if (!xfs_ifork_has_extents(sc->ip, whichfork))
+ if (!xfs_ifork_has_extents(XFS_IFORK_PTR(sc->ip, whichfork)))
return 0;
/* Set up initial da state. */
diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
index fe2a6e030c8a..7c432997edad 100644
--- a/fs/xfs/scrub/dir.c
+++ b/fs/xfs/scrub/dir.c
@@ -635,7 +635,7 @@ xchk_directory_blocks(
{
struct xfs_bmbt_irec got;
struct xfs_da_args args;
- struct xfs_ifork *ifp;
+ struct xfs_ifork *ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
struct xfs_mount *mp = sc->mp;
xfs_fileoff_t leaf_lblk;
xfs_fileoff_t free_lblk;
@@ -647,11 +647,10 @@ xchk_directory_blocks(
int error;
/* Ignore local format directories. */
- if (sc->ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
- sc->ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
+ if (ifp->if_format != XFS_DINODE_FMT_EXTENTS &&
+ ifp->if_format != XFS_DINODE_FMT_BTREE)
return 0;
- ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
lblk = XFS_B_TO_FSB(mp, XFS_DIR2_DATA_OFFSET);
leaf_lblk = XFS_B_TO_FSB(mp, XFS_DIR2_LEAF_OFFSET);
free_lblk = XFS_B_TO_FSB(mp, XFS_DIR2_FREE_OFFSET);
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index 64c217eb06a7..6517d67e8d51 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -278,8 +278,7 @@ xchk_iallocbt_check_cluster(
&XFS_RMAP_OINFO_INODES);
/* Grab the inode cluster buffer. */
- error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &cluster_bp,
- 0, 0);
+ error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &cluster_bp, 0);
if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
return error;
diff --git a/fs/xfs/scrub/parent.c b/fs/xfs/scrub/parent.c
index 5705adc43a75..855aa8bcab64 100644
--- a/fs/xfs/scrub/parent.c
+++ b/fs/xfs/scrub/parent.c
@@ -90,7 +90,7 @@ xchk_parent_count_parent_dentries(
* if there is one.
*/
lock_mode = xfs_ilock_data_map_shared(parent);
- if (parent->i_d.di_nextents > 0)
+ if (parent->i_df.if_nextents > 0)
error = xfs_dir3_data_readahead(parent, 0, 0);
xfs_iunlock(parent, lock_mode);
if (error)
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 1fd4fb7a607c..b35611882ff9 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -382,7 +382,7 @@ xfs_map_blocks(
*/
retry:
xfs_ilock(ip, XFS_ILOCK_SHARED);
- ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
+ ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
(ip->i_df.if_flags & XFS_IFEXTENTS));
/*
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index c42f90e16b4f..bfad669e6b2f 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -367,7 +367,7 @@ xfs_attr_inactive(
* removal below.
*/
if (xfs_inode_hasattr(dp) &&
- dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
+ dp->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
error = xfs_attr3_root_inactive(&trans, dp);
if (error)
goto out_cancel;
@@ -388,8 +388,11 @@ out_cancel:
xfs_trans_cancel(trans);
out_destroy_fork:
/* kill the in-core attr fork before we drop the inode lock */
- if (dp->i_afp)
- xfs_idestroy_fork(dp, XFS_ATTR_FORK);
+ if (dp->i_afp) {
+ xfs_idestroy_fork(dp->i_afp);
+ kmem_cache_free(xfs_ifork_zone, dp->i_afp);
+ dp->i_afp = NULL;
+ }
if (lock_mode)
xfs_iunlock(dp, lock_mode);
return error;
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 5ff1d929d3b5..e380bd1a9bfc 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -512,9 +512,9 @@ xfs_attr_list_ilocked(
*/
if (!xfs_inode_hasattr(dp))
return 0;
- else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL)
+ if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL)
return xfs_attr_shortform_list(context);
- else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
+ if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
return xfs_attr_leaf_list(context);
return xfs_attr_node_list(context);
}
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index ee6f4229cebc..6736c5ab188f 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -22,16 +22,20 @@
#include "xfs_bmap_btree.h"
#include "xfs_trans_space.h"
#include "xfs_error.h"
+#include "xfs_log_priv.h"
+#include "xfs_log_recover.h"
kmem_zone_t *xfs_bui_zone;
kmem_zone_t *xfs_bud_zone;
+static const struct xfs_item_ops xfs_bui_item_ops;
+
static inline struct xfs_bui_log_item *BUI_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_bui_log_item, bui_item);
}
-void
+STATIC void
xfs_bui_item_free(
struct xfs_bui_log_item *buip)
{
@@ -45,13 +49,13 @@ xfs_bui_item_free(
* committed vs unpin operations in bulk insert operations. Hence the reference
* count to ensure only the last caller frees the BUI.
*/
-void
+STATIC void
xfs_bui_release(
struct xfs_bui_log_item *buip)
{
ASSERT(atomic_read(&buip->bui_refcount) > 0);
if (atomic_dec_and_test(&buip->bui_refcount)) {
- xfs_trans_ail_remove(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR);
+ xfs_trans_ail_delete(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR);
xfs_bui_item_free(buip);
}
}
@@ -124,17 +128,10 @@ xfs_bui_item_release(
xfs_bui_release(BUI_ITEM(lip));
}
-static const struct xfs_item_ops xfs_bui_item_ops = {
- .iop_size = xfs_bui_item_size,
- .iop_format = xfs_bui_item_format,
- .iop_unpin = xfs_bui_item_unpin,
- .iop_release = xfs_bui_item_release,
-};
-
/*
* Allocate and initialize an bui item with the given number of extents.
*/
-struct xfs_bui_log_item *
+STATIC struct xfs_bui_log_item *
xfs_bui_init(
struct xfs_mount *mp)
@@ -278,27 +275,6 @@ xfs_bmap_update_diff_items(
return ba->bi_owner->i_ino - bb->bi_owner->i_ino;
}
-/* Get an BUI. */
-STATIC void *
-xfs_bmap_update_create_intent(
- struct xfs_trans *tp,
- unsigned int count)
-{
- struct xfs_bui_log_item *buip;
-
- ASSERT(count == XFS_BUI_MAX_FAST_EXTENTS);
- ASSERT(tp != NULL);
-
- buip = xfs_bui_init(tp->t_mountp);
- ASSERT(buip != NULL);
-
- /*
- * Get a log_item_desc to point at the new item.
- */
- xfs_trans_add_item(tp, &buip->bui_item);
- return buip;
-}
-
/* Set the map extent flags for this mapping. */
static void
xfs_trans_set_bmap_flags(
@@ -326,16 +302,12 @@ xfs_trans_set_bmap_flags(
STATIC void
xfs_bmap_update_log_item(
struct xfs_trans *tp,
- void *intent,
- struct list_head *item)
+ struct xfs_bui_log_item *buip,
+ struct xfs_bmap_intent *bmap)
{
- struct xfs_bui_log_item *buip = intent;
- struct xfs_bmap_intent *bmap;
uint next_extent;
struct xfs_map_extent *map;
- bmap = container_of(item, struct xfs_bmap_intent, bi_list);
-
tp->t_flags |= XFS_TRANS_DIRTY;
set_bit(XFS_LI_DIRTY, &buip->bui_item.li_flags);
@@ -355,23 +327,44 @@ xfs_bmap_update_log_item(
bmap->bi_bmap.br_state);
}
+static struct xfs_log_item *
+xfs_bmap_update_create_intent(
+ struct xfs_trans *tp,
+ struct list_head *items,
+ unsigned int count,
+ bool sort)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_bui_log_item *buip = xfs_bui_init(mp);
+ struct xfs_bmap_intent *bmap;
+
+ ASSERT(count == XFS_BUI_MAX_FAST_EXTENTS);
+
+ xfs_trans_add_item(tp, &buip->bui_item);
+ if (sort)
+ list_sort(mp, items, xfs_bmap_update_diff_items);
+ list_for_each_entry(bmap, items, bi_list)
+ xfs_bmap_update_log_item(tp, buip, bmap);
+ return &buip->bui_item;
+}
+
/* Get an BUD so we can process all the deferred rmap updates. */
-STATIC void *
+static struct xfs_log_item *
xfs_bmap_update_create_done(
struct xfs_trans *tp,
- void *intent,
+ struct xfs_log_item *intent,
unsigned int count)
{
- return xfs_trans_get_bud(tp, intent);
+ return &xfs_trans_get_bud(tp, BUI_ITEM(intent))->bud_item;
}
/* Process a deferred rmap update. */
STATIC int
xfs_bmap_update_finish_item(
struct xfs_trans *tp,
+ struct xfs_log_item *done,
struct list_head *item,
- void *done_item,
- void **state)
+ struct xfs_btree_cur **state)
{
struct xfs_bmap_intent *bmap;
xfs_filblks_t count;
@@ -379,7 +372,7 @@ xfs_bmap_update_finish_item(
bmap = container_of(item, struct xfs_bmap_intent, bi_list);
count = bmap->bi_bmap.br_blockcount;
- error = xfs_trans_log_finish_bmap_update(tp, done_item,
+ error = xfs_trans_log_finish_bmap_update(tp, BUD_ITEM(done),
bmap->bi_type,
bmap->bi_owner, bmap->bi_whichfork,
bmap->bi_bmap.br_startoff,
@@ -398,9 +391,9 @@ xfs_bmap_update_finish_item(
/* Abort all pending BUIs. */
STATIC void
xfs_bmap_update_abort_intent(
- void *intent)
+ struct xfs_log_item *intent)
{
- xfs_bui_release(intent);
+ xfs_bui_release(BUI_ITEM(intent));
}
/* Cancel a deferred rmap update. */
@@ -416,10 +409,8 @@ xfs_bmap_update_cancel_item(
const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
.max_items = XFS_BUI_MAX_FAST_EXTENTS,
- .diff_items = xfs_bmap_update_diff_items,
.create_intent = xfs_bmap_update_create_intent,
.abort_intent = xfs_bmap_update_abort_intent,
- .log_item = xfs_bmap_update_log_item,
.create_done = xfs_bmap_update_create_done,
.finish_item = xfs_bmap_update_finish_item,
.cancel_item = xfs_bmap_update_cancel_item,
@@ -429,32 +420,30 @@ const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
* Process a bmap update intent item that was recovered from the log.
* We need to update some inode's bmbt.
*/
-int
-xfs_bui_recover(
- struct xfs_trans *parent_tp,
- struct xfs_bui_log_item *buip)
+STATIC int
+xfs_bui_item_recover(
+ struct xfs_log_item *lip,
+ struct xfs_trans *parent_tp)
{
- int error = 0;
- unsigned int bui_type;
+ struct xfs_bmbt_irec irec;
+ struct xfs_bui_log_item *buip = BUI_ITEM(lip);
+ struct xfs_trans *tp;
+ struct xfs_inode *ip = NULL;
+ struct xfs_mount *mp = parent_tp->t_mountp;
struct xfs_map_extent *bmap;
+ struct xfs_bud_log_item *budp;
xfs_fsblock_t startblock_fsb;
xfs_fsblock_t inode_fsb;
xfs_filblks_t count;
- bool op_ok;
- struct xfs_bud_log_item *budp;
+ xfs_exntst_t state;
enum xfs_bmap_intent_type type;
+ bool op_ok;
+ unsigned int bui_type;
int whichfork;
- xfs_exntst_t state;
- struct xfs_trans *tp;
- struct xfs_inode *ip = NULL;
- struct xfs_bmbt_irec irec;
- struct xfs_mount *mp = parent_tp->t_mountp;
-
- ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));
+ int error = 0;
/* Only one mapping operation per BUI... */
if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
- set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
xfs_bui_release(buip);
return -EFSCORRUPTED;
}
@@ -488,7 +477,6 @@ xfs_bui_recover(
* This will pull the BUI from the AIL and
* free the memory associated with it.
*/
- set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
xfs_bui_release(buip);
return -EFSCORRUPTED;
}
@@ -546,7 +534,6 @@ xfs_bui_recover(
xfs_bmap_unmap_extent(tp, ip, &irec);
}
- set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
xfs_defer_move(parent_tp, tp);
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -563,3 +550,121 @@ err_inode:
}
return error;
}
+
+STATIC bool
+xfs_bui_item_match(
+ struct xfs_log_item *lip,
+ uint64_t intent_id)
+{
+ return BUI_ITEM(lip)->bui_format.bui_id == intent_id;
+}
+
+static const struct xfs_item_ops xfs_bui_item_ops = {
+ .iop_size = xfs_bui_item_size,
+ .iop_format = xfs_bui_item_format,
+ .iop_unpin = xfs_bui_item_unpin,
+ .iop_release = xfs_bui_item_release,
+ .iop_recover = xfs_bui_item_recover,
+ .iop_match = xfs_bui_item_match,
+};
+
+/*
+ * Copy an BUI format buffer from the given buf, and into the destination
+ * BUI format structure. The BUI/BUD items were designed not to need any
+ * special alignment handling.
+ */
+static int
+xfs_bui_copy_format(
+ struct xfs_log_iovec *buf,
+ struct xfs_bui_log_format *dst_bui_fmt)
+{
+ struct xfs_bui_log_format *src_bui_fmt;
+ uint len;
+
+ src_bui_fmt = buf->i_addr;
+ len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
+
+ if (buf->i_len == len) {
+ memcpy(dst_bui_fmt, src_bui_fmt, len);
+ return 0;
+ }
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
+ return -EFSCORRUPTED;
+}
+
+/*
+ * This routine is called to create an in-core extent bmap update
+ * item from the bui format structure which was logged on disk.
+ * It allocates an in-core bui, copies the extents from the format
+ * structure into it, and adds the bui to the AIL with the given
+ * LSN.
+ */
+STATIC int
+xlog_recover_bui_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ int error;
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_bui_log_item *buip;
+ struct xfs_bui_log_format *bui_formatp;
+
+ bui_formatp = item->ri_buf[0].i_addr;
+
+ if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
+ return -EFSCORRUPTED;
+ }
+ buip = xfs_bui_init(mp);
+ error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
+ if (error) {
+ xfs_bui_item_free(buip);
+ return error;
+ }
+ atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
+ /*
+ * Insert the intent into the AIL directly and drop one reference so
+ * that finishing or canceling the work will drop the other.
+ */
+ xfs_trans_ail_insert(log->l_ailp, &buip->bui_item, lsn);
+ xfs_bui_release(buip);
+ return 0;
+}
+
+const struct xlog_recover_item_ops xlog_bui_item_ops = {
+ .item_type = XFS_LI_BUI,
+ .commit_pass2 = xlog_recover_bui_commit_pass2,
+};
+
+/*
+ * This routine is called when an BUD format structure is found in a committed
+ * transaction in the log. Its purpose is to cancel the corresponding BUI if it
+ * was still in the log. To do this it searches the AIL for the BUI with an id
+ * equal to that in the BUD format structure. If we find it we drop the BUD
+ * reference, which removes the BUI from the AIL and frees it.
+ */
+STATIC int
+xlog_recover_bud_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ struct xfs_bud_log_format *bud_formatp;
+
+ bud_formatp = item->ri_buf[0].i_addr;
+ if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
+ return -EFSCORRUPTED;
+ }
+
+ xlog_recover_release_intent(log, XFS_LI_BUI, bud_formatp->bud_bui_id);
+ return 0;
+}
+
+const struct xlog_recover_item_ops xlog_bud_item_ops = {
+ .item_type = XFS_LI_BUD,
+ .commit_pass2 = xlog_recover_bud_commit_pass2,
+};
diff --git a/fs/xfs/xfs_bmap_item.h b/fs/xfs/xfs_bmap_item.h
index ad479cc73de8..b9be62f8bd52 100644
--- a/fs/xfs/xfs_bmap_item.h
+++ b/fs/xfs/xfs_bmap_item.h
@@ -33,11 +33,6 @@ struct kmem_zone;
#define XFS_BUI_MAX_FAST_EXTENTS 1
/*
- * Define BUI flag bits. Manipulated by set/clear/test_bit operators.
- */
-#define XFS_BUI_RECOVERED 1
-
-/*
* This is the "bmap update intent" log item. It is used to log the fact that
* some reverse mappings need to change. It is used in conjunction with the
* "bmap update done" log item described below.
@@ -49,7 +44,6 @@ struct xfs_bui_log_item {
struct xfs_log_item bui_item;
atomic_t bui_refcount;
atomic_t bui_next_extent;
- unsigned long bui_flags; /* misc flags */
struct xfs_bui_log_format bui_format;
};
@@ -74,9 +68,4 @@ struct xfs_bud_log_item {
extern struct kmem_zone *xfs_bui_zone;
extern struct kmem_zone *xfs_bud_zone;
-struct xfs_bui_log_item *xfs_bui_init(struct xfs_mount *);
-void xfs_bui_item_free(struct xfs_bui_log_item *);
-void xfs_bui_release(struct xfs_bui_log_item *);
-int xfs_bui_recover(struct xfs_trans *parent_tp, struct xfs_bui_log_item *buip);
-
#endif /* __XFS_BMAP_ITEM_H__ */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 4f800f7fe888..f37f5cc4b19f 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -223,7 +223,7 @@ xfs_bmap_count_blocks(
if (!ifp)
return 0;
- switch (XFS_IFORK_FORMAT(ip, whichfork)) {
+ switch (ifp->if_format) {
case XFS_DINODE_FMT_BTREE:
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
error = xfs_iread_extents(tp, ip, whichfork);
@@ -449,7 +449,7 @@ xfs_getbmap(
break;
}
- switch (XFS_IFORK_FORMAT(ip, whichfork)) {
+ switch (ifp->if_format) {
case XFS_DINODE_FMT_EXTENTS:
case XFS_DINODE_FMT_BTREE:
break;
@@ -1210,17 +1210,26 @@ xfs_swap_extents_check_format(
struct xfs_inode *ip, /* target inode */
struct xfs_inode *tip) /* tmp inode */
{
+ struct xfs_ifork *ifp = &ip->i_df;
+ struct xfs_ifork *tifp = &tip->i_df;
+
+ /* User/group/project quota ids must match if quotas are enforced. */
+ if (XFS_IS_QUOTA_ON(ip->i_mount) &&
+ (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
+ !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
+ ip->i_d.di_projid != tip->i_d.di_projid))
+ return -EINVAL;
/* Should never get a local format */
- if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
- tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
+ if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
+ tifp->if_format == XFS_DINODE_FMT_LOCAL)
return -EINVAL;
/*
* if the target inode has less extents that then temporary inode then
* why did userspace call us?
*/
- if (ip->i_d.di_nextents < tip->i_d.di_nextents)
+ if (ifp->if_nextents < tifp->if_nextents)
return -EINVAL;
/*
@@ -1235,20 +1244,18 @@ xfs_swap_extents_check_format(
* form then we will end up with the target inode in the wrong format
* as we already know there are less extents in the temp inode.
*/
- if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
- tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
+ if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
+ tifp->if_format == XFS_DINODE_FMT_BTREE)
return -EINVAL;
/* Check temp in extent form to max in target */
- if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
- XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
+ if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
+ tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
return -EINVAL;
/* Check target in extent form to max in temp */
- if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
- XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
- XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
+ if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
+ ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
return -EINVAL;
/*
@@ -1260,22 +1267,20 @@ xfs_swap_extents_check_format(
* (a common defrag case) which will occur when the temp inode is in
* extent format...
*/
- if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+ if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
if (XFS_IFORK_Q(ip) &&
- XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
+ XFS_BMAP_BMDR_SPACE(tifp->if_broot) > XFS_IFORK_BOFF(ip))
return -EINVAL;
- if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
- XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
+ if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
return -EINVAL;
}
/* Reciprocal target->temp btree format checks */
- if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+ if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
if (XFS_IFORK_Q(tip) &&
XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
return -EINVAL;
- if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
- XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
+ if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
return -EINVAL;
}
@@ -1427,15 +1432,15 @@ xfs_swap_extent_forks(
/*
* Count the number of extended attribute blocks
*/
- if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
- (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
+ if (XFS_IFORK_Q(ip) && ip->i_afp->if_nextents > 0 &&
+ ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
&aforkblks);
if (error)
return error;
}
- if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
- (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
+ if (XFS_IFORK_Q(tip) && tip->i_afp->if_nextents > 0 &&
+ tip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
&taforkblks);
if (error)
@@ -1450,9 +1455,9 @@ xfs_swap_extent_forks(
* bmbt scan as the last step.
*/
if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
- if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
+ if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
(*target_log_flags) |= XFS_ILOG_DOWNER;
- if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
+ if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
(*src_log_flags) |= XFS_ILOG_DOWNER;
}
@@ -1468,9 +1473,6 @@ xfs_swap_extent_forks(
ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
- swap(ip->i_d.di_nextents, tip->i_d.di_nextents);
- swap(ip->i_d.di_format, tip->i_d.di_format);
-
/*
* The extents in the source inode could still contain speculative
* preallocation beyond EOF (e.g. the file is open but not modified
@@ -1484,7 +1486,7 @@ xfs_swap_extent_forks(
tip->i_delayed_blks = ip->i_delayed_blks;
ip->i_delayed_blks = 0;
- switch (ip->i_d.di_format) {
+ switch (ip->i_df.if_format) {
case XFS_DINODE_FMT_EXTENTS:
(*src_log_flags) |= XFS_ILOG_DEXT;
break;
@@ -1495,7 +1497,7 @@ xfs_swap_extent_forks(
break;
}
- switch (tip->i_d.di_format) {
+ switch (tip->i_df.if_format) {
case XFS_DINODE_FMT_EXTENTS:
(*target_log_flags) |= XFS_ILOG_DEXT;
break;
@@ -1606,7 +1608,7 @@ xfs_swap_extents(
if (xfs_inode_has_cow_data(tip)) {
error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
if (error)
- return error;
+ goto out_unlock;
}
/*
@@ -1615,9 +1617,9 @@ xfs_swap_extents(
* performed with log redo items!
*/
if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
- int w = XFS_DATA_FORK;
- uint32_t ipnext = XFS_IFORK_NEXTENTS(ip, w);
- uint32_t tipnext = XFS_IFORK_NEXTENTS(tip, w);
+ int w = XFS_DATA_FORK;
+ uint32_t ipnext = ip->i_df.if_nextents;
+ uint32_t tipnext = tip->i_df.if_nextents;
/*
* Conceptually this shouldn't affect the shape of either bmbt,
@@ -1717,10 +1719,11 @@ xfs_swap_extents(
/* Swap the cow forks. */
if (xfs_sb_version_hasreflink(&mp->m_sb)) {
- ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
- ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
+ ASSERT(!ip->i_cowfp ||
+ ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
+ ASSERT(!tip->i_cowfp ||
+ tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
- swap(ip->i_cnextents, tip->i_cnextents);
swap(ip->i_cowfp, tip->i_cowfp);
if (ip->i_cowfp && ip->i_cowfp->if_bytes)
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 65538d18e64f..20b748f7e186 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1197,8 +1197,10 @@ xfs_buf_ioend(
bp->b_ops->verify_read(bp);
}
- if (!bp->b_error)
+ if (!bp->b_error) {
+ bp->b_flags &= ~XBF_WRITE_FAIL;
bp->b_flags |= XBF_DONE;
+ }
if (bp->b_iodone)
(*(bp->b_iodone))(bp);
@@ -1242,10 +1244,26 @@ xfs_buf_ioerror_alert(
struct xfs_buf *bp,
xfs_failaddr_t func)
{
- xfs_alert_ratelimited(bp->b_mount,
-"metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
- func, (uint64_t)XFS_BUF_ADDR(bp), bp->b_length,
- -bp->b_error);
+ xfs_buf_alert_ratelimited(bp, "XFS: metadata IO error",
+ "metadata I/O error in \"%pS\" at daddr 0x%llx len %d error %d",
+ func, (uint64_t)XFS_BUF_ADDR(bp),
+ bp->b_length, -bp->b_error);
+}
+
+/*
+ * To simulate an I/O failure, the buffer must be locked and held with at least
+ * three references. The LRU reference is dropped by the stale call. The buf
+ * item reference is dropped via ioend processing. The third reference is owned
+ * by the caller and is dropped on I/O completion if the buffer is XBF_ASYNC.
+ */
+void
+xfs_buf_ioend_fail(
+ struct xfs_buf *bp)
+{
+ bp->b_flags &= ~XBF_DONE;
+ xfs_buf_stale(bp);
+ xfs_buf_ioerror(bp, -EIO);
+ xfs_buf_ioend(bp);
}
int
@@ -1258,7 +1276,7 @@ xfs_bwrite(
bp->b_flags |= XBF_WRITE;
bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
- XBF_WRITE_FAIL | XBF_DONE);
+ XBF_DONE);
error = xfs_buf_submit(bp);
if (error)
@@ -1272,6 +1290,11 @@ xfs_buf_bio_end_io(
{
struct xfs_buf *bp = (struct xfs_buf *)bio->bi_private;
+ if (!bio->bi_status &&
+ (bp->b_flags & XBF_WRITE) && (bp->b_flags & XBF_ASYNC) &&
+ XFS_TEST_ERROR(false, bp->b_mount, XFS_ERRTAG_BUF_IOERROR))
+ bio->bi_status = BLK_STS_IOERR;
+
/*
* don't overwrite existing errors - otherwise we can lose errors on
* buffers that require multiple bios to complete.
@@ -1480,10 +1503,7 @@ __xfs_buf_submit(
/* on shutdown we stale and complete the buffer immediately */
if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
- xfs_buf_ioerror(bp, -EIO);
- bp->b_flags &= ~XBF_DONE;
- xfs_buf_stale(bp);
- xfs_buf_ioend(bp);
+ xfs_buf_ioend_fail(bp);
return -EIO;
}
@@ -1642,7 +1662,8 @@ xfs_wait_buftarg(
struct xfs_buftarg *btp)
{
LIST_HEAD(dispose);
- int loop = 0;
+ int loop = 0;
+ bool write_fail = false;
/*
* First wait on the buftarg I/O count for all in-flight buffers to be
@@ -1670,17 +1691,29 @@ xfs_wait_buftarg(
bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
list_del_init(&bp->b_lru);
if (bp->b_flags & XBF_WRITE_FAIL) {
- xfs_alert(btp->bt_mount,
+ write_fail = true;
+ xfs_buf_alert_ratelimited(bp,
+ "XFS: Corruption Alert",
"Corruption Alert: Buffer at daddr 0x%llx had permanent write failures!",
(long long)bp->b_bn);
- xfs_alert(btp->bt_mount,
-"Please run xfs_repair to determine the extent of the problem.");
}
xfs_buf_rele(bp);
}
if (loop++ != 0)
delay(100);
}
+
+ /*
+ * If one or more failed buffers were freed, that means dirty metadata
+ * was thrown away. This should only ever happen after I/O completion
+ * handling has elevated I/O error(s) to permanent failures and shuts
+ * down the fs.
+ */
+ if (write_fail) {
+ ASSERT(XFS_FORCED_SHUTDOWN(btp->bt_mount));
+ xfs_alert(btp->bt_mount,
+ "Please run xfs_repair to determine the extent of the problem.");
+ }
}
static enum lru_status
@@ -1813,6 +1846,13 @@ xfs_alloc_buftarg(
btp->bt_bdev = bdev;
btp->bt_daxdev = dax_dev;
+ /*
+ * Buffer IO error rate limiting. Limit it to no more than 10 messages
+ * per 30 seconds so as to not spam logs too much on repeated errors.
+ */
+ ratelimit_state_init(&btp->bt_ioerror_rl, 30 * HZ,
+ DEFAULT_RATELIMIT_BURST);
+
if (xfs_setsize_buftarg_early(btp, bdev))
goto error_free;
@@ -1983,7 +2023,7 @@ xfs_buf_delwri_submit_buffers(
* synchronously. Otherwise, drop the buffer from the delwri
* queue and submit async.
*/
- bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
+ bp->b_flags &= ~_XBF_DELWRI_Q;
bp->b_flags |= XBF_WRITE;
if (wait_list) {
bp->b_flags &= ~XBF_ASYNC;
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 9a04c53c2488..050c53b739e2 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -91,6 +91,7 @@ typedef struct xfs_buftarg {
struct list_lru bt_lru;
struct percpu_counter bt_io_count;
+ struct ratelimit_state bt_ioerror_rl;
} xfs_buftarg_t;
struct xfs_buf;
@@ -263,6 +264,7 @@ extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
xfs_failaddr_t failaddr);
#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
+void xfs_buf_ioend_fail(struct xfs_buf *);
extern int __xfs_buf_submit(struct xfs_buf *bp, bool);
static inline int xfs_buf_submit(struct xfs_buf *bp)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 1545657c3ca0..9e75e8d6042e 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -410,7 +410,6 @@ xfs_buf_item_unpin(
{
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
xfs_buf_t *bp = bip->bli_buf;
- struct xfs_ail *ailp = lip->li_ailp;
int stale = bip->bli_flags & XFS_BLI_STALE;
int freed;
@@ -452,10 +451,10 @@ xfs_buf_item_unpin(
}
/*
- * If we get called here because of an IO error, we may
- * or may not have the item on the AIL. xfs_trans_ail_delete()
- * will take care of that situation.
- * xfs_trans_ail_delete() drops the AIL lock.
+ * If we get called here because of an IO error, we may or may
+ * not have the item on the AIL. xfs_trans_ail_delete() will
+ * take care of that situation. xfs_trans_ail_delete() drops
+ * the AIL lock.
*/
if (bip->bli_flags & XFS_BLI_STALE_INODE) {
xfs_buf_do_callbacks(bp);
@@ -463,47 +462,23 @@ xfs_buf_item_unpin(
list_del_init(&bp->b_li_list);
bp->b_iodone = NULL;
} else {
- spin_lock(&ailp->ail_lock);
- xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
+ xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR);
xfs_buf_item_relse(bp);
ASSERT(bp->b_log_item == NULL);
}
xfs_buf_relse(bp);
} else if (freed && remove) {
/*
- * There are currently two references to the buffer - the active
- * LRU reference and the buf log item. What we are about to do
- * here - simulate a failed IO completion - requires 3
- * references.
- *
- * The LRU reference is removed by the xfs_buf_stale() call. The
- * buf item reference is removed by the xfs_buf_iodone()
- * callback that is run by xfs_buf_do_callbacks() during ioend
- * processing (via the bp->b_iodone callback), and then finally
- * the ioend processing will drop the IO reference if the buffer
- * is marked XBF_ASYNC.
- *
- * Hence we need to take an additional reference here so that IO
- * completion processing doesn't free the buffer prematurely.
+ * The buffer must be locked and held by the caller to simulate
+ * an async I/O failure.
*/
xfs_buf_lock(bp);
xfs_buf_hold(bp);
bp->b_flags |= XBF_ASYNC;
- xfs_buf_ioerror(bp, -EIO);
- bp->b_flags &= ~XBF_DONE;
- xfs_buf_stale(bp);
- xfs_buf_ioend(bp);
+ xfs_buf_ioend_fail(bp);
}
}
-/*
- * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
- * seconds so as to not spam logs too much on repeated detection of the same
- * buffer being bad..
- */
-
-static DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
-
STATIC uint
xfs_buf_item_push(
struct xfs_log_item *lip,
@@ -533,11 +508,10 @@ xfs_buf_item_push(
trace_xfs_buf_item_push(bip);
/* has a previous flush failed due to IO errors? */
- if ((bp->b_flags & XBF_WRITE_FAIL) &&
- ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS: Failing async write")) {
- xfs_warn(bp->b_mount,
-"Failing async write on buffer block 0x%llx. Retrying async write.",
- (long long)bp->b_bn);
+ if (bp->b_flags & XBF_WRITE_FAIL) {
+ xfs_buf_alert_ratelimited(bp, "XFS: Failing async write",
+ "Failing async write on buffer block 0x%llx. Retrying async write.",
+ (long long)bp->b_bn);
}
if (!xfs_buf_delwri_queue(bp, buffer_list))
@@ -584,7 +558,7 @@ xfs_buf_item_put(
* state.
*/
if (aborted)
- xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
+ xfs_trans_ail_delete(lip, 0);
xfs_buf_item_relse(bip->bli_buf);
return true;
}
@@ -1229,61 +1203,19 @@ xfs_buf_iodone(
struct xfs_buf *bp,
struct xfs_log_item *lip)
{
- struct xfs_ail *ailp = lip->li_ailp;
-
ASSERT(BUF_ITEM(lip)->bli_buf == bp);
xfs_buf_rele(bp);
/*
- * If we are forcibly shutting down, this may well be
- * off the AIL already. That's because we simulate the
- * log-committed callbacks to unpin these buffers. Or we may never
- * have put this item on AIL because of the transaction was
- * aborted forcibly. xfs_trans_ail_delete() takes care of these.
+ * If we are forcibly shutting down, this may well be off the AIL
+ * already. That's because we simulate the log-committed callbacks to
+ * unpin these buffers. Or we may never have put this item on AIL
+ * because of the transaction was aborted forcibly.
+ * xfs_trans_ail_delete() takes care of these.
*
* Either way, AIL is useless if we're forcing a shutdown.
*/
- spin_lock(&ailp->ail_lock);
- xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
+ xfs_trans_ail_delete(lip, SHUTDOWN_CORRUPT_INCORE);
xfs_buf_item_free(BUF_ITEM(lip));
}
-
-/*
- * Requeue a failed buffer for writeback.
- *
- * We clear the log item failed state here as well, but we have to be careful
- * about reference counts because the only active reference counts on the buffer
- * may be the failed log items. Hence if we clear the log item failed state
- * before queuing the buffer for IO we can release all active references to
- * the buffer and free it, leading to use after free problems in
- * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
- * order we process them in - the buffer is locked, and we own the buffer list
- * so nothing on them is going to change while we are performing this action.
- *
- * Hence we can safely queue the buffer for IO before we clear the failed log
- * item state, therefore always having an active reference to the buffer and
- * avoiding the transient zero-reference state that leads to use-after-free.
- *
- * Return true if the buffer was added to the buffer list, false if it was
- * already on the buffer list.
- */
-bool
-xfs_buf_resubmit_failed_buffers(
- struct xfs_buf *bp,
- struct list_head *buffer_list)
-{
- struct xfs_log_item *lip;
- bool ret;
-
- ret = xfs_buf_delwri_queue(bp, buffer_list);
-
- /*
- * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this
- * function already have it acquired
- */
- list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
- xfs_clear_li_failed(lip);
-
- return ret;
-}
diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h
index 30114b510332..c9c57e2da932 100644
--- a/fs/xfs/xfs_buf_item.h
+++ b/fs/xfs/xfs_buf_item.h
@@ -59,8 +59,6 @@ void xfs_buf_attach_iodone(struct xfs_buf *,
struct xfs_log_item *);
void xfs_buf_iodone_callbacks(struct xfs_buf *);
void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
-bool xfs_buf_resubmit_failed_buffers(struct xfs_buf *,
- struct list_head *);
bool xfs_buf_log_check_iovec(struct xfs_log_iovec *iovec);
extern kmem_zone_t *xfs_buf_item_zone;
diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c
new file mode 100644
index 000000000000..04faa7310c4f
--- /dev/null
+++ b/fs/xfs/xfs_buf_item_recover.c
@@ -0,0 +1,984 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_mount.h"
+#include "xfs_trans.h"
+#include "xfs_buf_item.h"
+#include "xfs_trans_priv.h"
+#include "xfs_trace.h"
+#include "xfs_log.h"
+#include "xfs_log_priv.h"
+#include "xfs_log_recover.h"
+#include "xfs_error.h"
+#include "xfs_inode.h"
+#include "xfs_dir2.h"
+#include "xfs_quota.h"
+
+/*
+ * This structure is used during recovery to record the buf log items which
+ * have been canceled and should not be replayed.
+ */
+struct xfs_buf_cancel {
+ xfs_daddr_t bc_blkno;
+ uint bc_len;
+ int bc_refcount;
+ struct list_head bc_list;
+};
+
+static struct xfs_buf_cancel *
+xlog_find_buffer_cancelled(
+ struct xlog *log,
+ xfs_daddr_t blkno,
+ uint len)
+{
+ struct list_head *bucket;
+ struct xfs_buf_cancel *bcp;
+
+ if (!log->l_buf_cancel_table)
+ return NULL;
+
+ bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
+ list_for_each_entry(bcp, bucket, bc_list) {
+ if (bcp->bc_blkno == blkno && bcp->bc_len == len)
+ return bcp;
+ }
+
+ return NULL;
+}
+
+static bool
+xlog_add_buffer_cancelled(
+ struct xlog *log,
+ xfs_daddr_t blkno,
+ uint len)
+{
+ struct xfs_buf_cancel *bcp;
+
+ /*
+ * If we find an existing cancel record, this indicates that the buffer
+ * was cancelled multiple times. To ensure that during pass 2 we keep
+ * the record in the table until we reach its last occurrence in the
+ * log, a reference count is kept to tell how many times we expect to
+ * see this record during the second pass.
+ */
+ bcp = xlog_find_buffer_cancelled(log, blkno, len);
+ if (bcp) {
+ bcp->bc_refcount++;
+ return false;
+ }
+
+ bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0);
+ bcp->bc_blkno = blkno;
+ bcp->bc_len = len;
+ bcp->bc_refcount = 1;
+ list_add_tail(&bcp->bc_list, XLOG_BUF_CANCEL_BUCKET(log, blkno));
+ return true;
+}
+
+/*
+ * Check if there is and entry for blkno, len in the buffer cancel record table.
+ */
+bool
+xlog_is_buffer_cancelled(
+ struct xlog *log,
+ xfs_daddr_t blkno,
+ uint len)
+{
+ return xlog_find_buffer_cancelled(log, blkno, len) != NULL;
+}
+
+/*
+ * Check if there is and entry for blkno, len in the buffer cancel record table,
+ * and decremented the reference count on it if there is one.
+ *
+ * Remove the cancel record once the refcount hits zero, so that if the same
+ * buffer is re-used again after its last cancellation we actually replay the
+ * changes made at that point.
+ */
+static bool
+xlog_put_buffer_cancelled(
+ struct xlog *log,
+ xfs_daddr_t blkno,
+ uint len)
+{
+ struct xfs_buf_cancel *bcp;
+
+ bcp = xlog_find_buffer_cancelled(log, blkno, len);
+ if (!bcp) {
+ ASSERT(0);
+ return false;
+ }
+
+ if (--bcp->bc_refcount == 0) {
+ list_del(&bcp->bc_list);
+ kmem_free(bcp);
+ }
+ return true;
+}
+
+/* log buffer item recovery */
+
+/*
+ * Sort buffer items for log recovery. Most buffer items should end up on the
+ * buffer list and are recovered first, with the following exceptions:
+ *
+ * 1. XFS_BLF_CANCEL buffers must be processed last because some log items
+ * might depend on the incor ecancellation record, and replaying a cancelled
+ * buffer item can remove the incore record.
+ *
+ * 2. XFS_BLF_INODE_BUF buffers are handled after most regular items so that
+ * we replay di_next_unlinked only after flushing the inode 'free' state
+ * to the inode buffer.
+ *
+ * See xlog_recover_reorder_trans for more details.
+ */
+STATIC enum xlog_recover_reorder
+xlog_recover_buf_reorder(
+ struct xlog_recover_item *item)
+{
+ struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
+
+ if (buf_f->blf_flags & XFS_BLF_CANCEL)
+ return XLOG_REORDER_CANCEL_LIST;
+ if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
+ return XLOG_REORDER_INODE_BUFFER_LIST;
+ return XLOG_REORDER_BUFFER_LIST;
+}
+
+STATIC void
+xlog_recover_buf_ra_pass2(
+ struct xlog *log,
+ struct xlog_recover_item *item)
+{
+ struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
+
+ xlog_buf_readahead(log, buf_f->blf_blkno, buf_f->blf_len, NULL);
+}
+
+/*
+ * Build up the table of buf cancel records so that we don't replay cancelled
+ * data in the second pass.
+ */
+static int
+xlog_recover_buf_commit_pass1(
+ struct xlog *log,
+ struct xlog_recover_item *item)
+{
+ struct xfs_buf_log_format *bf = item->ri_buf[0].i_addr;
+
+ if (!xfs_buf_log_check_iovec(&item->ri_buf[0])) {
+ xfs_err(log->l_mp, "bad buffer log item size (%d)",
+ item->ri_buf[0].i_len);
+ return -EFSCORRUPTED;
+ }
+
+ if (!(bf->blf_flags & XFS_BLF_CANCEL))
+ trace_xfs_log_recover_buf_not_cancel(log, bf);
+ else if (xlog_add_buffer_cancelled(log, bf->blf_blkno, bf->blf_len))
+ trace_xfs_log_recover_buf_cancel_add(log, bf);
+ else
+ trace_xfs_log_recover_buf_cancel_ref_inc(log, bf);
+ return 0;
+}
+
+/*
+ * Validate the recovered buffer is of the correct type and attach the
+ * appropriate buffer operations to them for writeback. Magic numbers are in a
+ * few places:
+ * the first 16 bits of the buffer (inode buffer, dquot buffer),
+ * the first 32 bits of the buffer (most blocks),
+ * inside a struct xfs_da_blkinfo at the start of the buffer.
+ */
+static void
+xlog_recover_validate_buf_type(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp,
+ struct xfs_buf_log_format *buf_f,
+ xfs_lsn_t current_lsn)
+{
+ struct xfs_da_blkinfo *info = bp->b_addr;
+ uint32_t magic32;
+ uint16_t magic16;
+ uint16_t magicda;
+ char *warnmsg = NULL;
+
+ /*
+ * We can only do post recovery validation on items on CRC enabled
+ * fielsystems as we need to know when the buffer was written to be able
+ * to determine if we should have replayed the item. If we replay old
+ * metadata over a newer buffer, then it will enter a temporarily
+ * inconsistent state resulting in verification failures. Hence for now
+ * just avoid the verification stage for non-crc filesystems
+ */
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return;
+
+ magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
+ magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
+ magicda = be16_to_cpu(info->magic);
+ switch (xfs_blft_from_flags(buf_f)) {
+ case XFS_BLFT_BTREE_BUF:
+ switch (magic32) {
+ case XFS_ABTB_CRC_MAGIC:
+ case XFS_ABTB_MAGIC:
+ bp->b_ops = &xfs_bnobt_buf_ops;
+ break;
+ case XFS_ABTC_CRC_MAGIC:
+ case XFS_ABTC_MAGIC:
+ bp->b_ops = &xfs_cntbt_buf_ops;
+ break;
+ case XFS_IBT_CRC_MAGIC:
+ case XFS_IBT_MAGIC:
+ bp->b_ops = &xfs_inobt_buf_ops;
+ break;
+ case XFS_FIBT_CRC_MAGIC:
+ case XFS_FIBT_MAGIC:
+ bp->b_ops = &xfs_finobt_buf_ops;
+ break;
+ case XFS_BMAP_CRC_MAGIC:
+ case XFS_BMAP_MAGIC:
+ bp->b_ops = &xfs_bmbt_buf_ops;
+ break;
+ case XFS_RMAP_CRC_MAGIC:
+ bp->b_ops = &xfs_rmapbt_buf_ops;
+ break;
+ case XFS_REFC_CRC_MAGIC:
+ bp->b_ops = &xfs_refcountbt_buf_ops;
+ break;
+ default:
+ warnmsg = "Bad btree block magic!";
+ break;
+ }
+ break;
+ case XFS_BLFT_AGF_BUF:
+ if (magic32 != XFS_AGF_MAGIC) {
+ warnmsg = "Bad AGF block magic!";
+ break;
+ }
+ bp->b_ops = &xfs_agf_buf_ops;
+ break;
+ case XFS_BLFT_AGFL_BUF:
+ if (magic32 != XFS_AGFL_MAGIC) {
+ warnmsg = "Bad AGFL block magic!";
+ break;
+ }
+ bp->b_ops = &xfs_agfl_buf_ops;
+ break;
+ case XFS_BLFT_AGI_BUF:
+ if (magic32 != XFS_AGI_MAGIC) {
+ warnmsg = "Bad AGI block magic!";
+ break;
+ }
+ bp->b_ops = &xfs_agi_buf_ops;
+ break;
+ case XFS_BLFT_UDQUOT_BUF:
+ case XFS_BLFT_PDQUOT_BUF:
+ case XFS_BLFT_GDQUOT_BUF:
+#ifdef CONFIG_XFS_QUOTA
+ if (magic16 != XFS_DQUOT_MAGIC) {
+ warnmsg = "Bad DQUOT block magic!";
+ break;
+ }
+ bp->b_ops = &xfs_dquot_buf_ops;
+#else
+ xfs_alert(mp,
+ "Trying to recover dquots without QUOTA support built in!");
+ ASSERT(0);
+#endif
+ break;
+ case XFS_BLFT_DINO_BUF:
+ if (magic16 != XFS_DINODE_MAGIC) {
+ warnmsg = "Bad INODE block magic!";
+ break;
+ }
+ bp->b_ops = &xfs_inode_buf_ops;
+ break;
+ case XFS_BLFT_SYMLINK_BUF:
+ if (magic32 != XFS_SYMLINK_MAGIC) {
+ warnmsg = "Bad symlink block magic!";
+ break;
+ }
+ bp->b_ops = &xfs_symlink_buf_ops;
+ break;
+ case XFS_BLFT_DIR_BLOCK_BUF:
+ if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
+ magic32 != XFS_DIR3_BLOCK_MAGIC) {
+ warnmsg = "Bad dir block magic!";
+ break;
+ }
+ bp->b_ops = &xfs_dir3_block_buf_ops;
+ break;
+ case XFS_BLFT_DIR_DATA_BUF:
+ if (magic32 != XFS_DIR2_DATA_MAGIC &&
+ magic32 != XFS_DIR3_DATA_MAGIC) {
+ warnmsg = "Bad dir data magic!";
+ break;
+ }
+ bp->b_ops = &xfs_dir3_data_buf_ops;
+ break;
+ case XFS_BLFT_DIR_FREE_BUF:
+ if (magic32 != XFS_DIR2_FREE_MAGIC &&
+ magic32 != XFS_DIR3_FREE_MAGIC) {
+ warnmsg = "Bad dir3 free magic!";
+ break;
+ }
+ bp->b_ops = &xfs_dir3_free_buf_ops;
+ break;
+ case XFS_BLFT_DIR_LEAF1_BUF:
+ if (magicda != XFS_DIR2_LEAF1_MAGIC &&
+ magicda != XFS_DIR3_LEAF1_MAGIC) {
+ warnmsg = "Bad dir leaf1 magic!";
+ break;
+ }
+ bp->b_ops = &xfs_dir3_leaf1_buf_ops;
+ break;
+ case XFS_BLFT_DIR_LEAFN_BUF:
+ if (magicda != XFS_DIR2_LEAFN_MAGIC &&
+ magicda != XFS_DIR3_LEAFN_MAGIC) {
+ warnmsg = "Bad dir leafn magic!";
+ break;
+ }
+ bp->b_ops = &xfs_dir3_leafn_buf_ops;
+ break;
+ case XFS_BLFT_DA_NODE_BUF:
+ if (magicda != XFS_DA_NODE_MAGIC &&
+ magicda != XFS_DA3_NODE_MAGIC) {
+ warnmsg = "Bad da node magic!";
+ break;
+ }
+ bp->b_ops = &xfs_da3_node_buf_ops;
+ break;
+ case XFS_BLFT_ATTR_LEAF_BUF:
+ if (magicda != XFS_ATTR_LEAF_MAGIC &&
+ magicda != XFS_ATTR3_LEAF_MAGIC) {
+ warnmsg = "Bad attr leaf magic!";
+ break;
+ }
+ bp->b_ops = &xfs_attr3_leaf_buf_ops;
+ break;
+ case XFS_BLFT_ATTR_RMT_BUF:
+ if (magic32 != XFS_ATTR3_RMT_MAGIC) {
+ warnmsg = "Bad attr remote magic!";
+ break;
+ }
+ bp->b_ops = &xfs_attr3_rmt_buf_ops;
+ break;
+ case XFS_BLFT_SB_BUF:
+ if (magic32 != XFS_SB_MAGIC) {
+ warnmsg = "Bad SB block magic!";
+ break;
+ }
+ bp->b_ops = &xfs_sb_buf_ops;
+ break;
+#ifdef CONFIG_XFS_RT
+ case XFS_BLFT_RTBITMAP_BUF:
+ case XFS_BLFT_RTSUMMARY_BUF:
+ /* no magic numbers for verification of RT buffers */
+ bp->b_ops = &xfs_rtbuf_ops;
+ break;
+#endif /* CONFIG_XFS_RT */
+ default:
+ xfs_warn(mp, "Unknown buffer type %d!",
+ xfs_blft_from_flags(buf_f));
+ break;
+ }
+
+ /*
+ * Nothing else to do in the case of a NULL current LSN as this means
+ * the buffer is more recent than the change in the log and will be
+ * skipped.
+ */
+ if (current_lsn == NULLCOMMITLSN)
+ return;
+
+ if (warnmsg) {
+ xfs_warn(mp, warnmsg);
+ ASSERT(0);
+ }
+
+ /*
+ * We must update the metadata LSN of the buffer as it is written out to
+ * ensure that older transactions never replay over this one and corrupt
+ * the buffer. This can occur if log recovery is interrupted at some
+ * point after the current transaction completes, at which point a
+ * subsequent mount starts recovery from the beginning.
+ *
+ * Write verifiers update the metadata LSN from log items attached to
+ * the buffer. Therefore, initialize a bli purely to carry the LSN to
+ * the verifier. We'll clean it up in our ->iodone() callback.
+ */
+ if (bp->b_ops) {
+ struct xfs_buf_log_item *bip;
+
+ ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
+ bp->b_iodone = xlog_recover_iodone;
+ xfs_buf_item_init(bp, mp);
+ bip = bp->b_log_item;
+ bip->bli_item.li_lsn = current_lsn;
+ }
+}
+
+/*
+ * Perform a 'normal' buffer recovery. Each logged region of the
+ * buffer should be copied over the corresponding region in the
+ * given buffer. The bitmap in the buf log format structure indicates
+ * where to place the logged data.
+ */
+STATIC void
+xlog_recover_do_reg_buffer(
+ struct xfs_mount *mp,
+ struct xlog_recover_item *item,
+ struct xfs_buf *bp,
+ struct xfs_buf_log_format *buf_f,
+ xfs_lsn_t current_lsn)
+{
+ int i;
+ int bit;
+ int nbits;
+ xfs_failaddr_t fa;
+ const size_t size_disk_dquot = sizeof(struct xfs_disk_dquot);
+
+ trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
+
+ bit = 0;
+ i = 1; /* 0 is the buf format structure */
+ while (1) {
+ bit = xfs_next_bit(buf_f->blf_data_map,
+ buf_f->blf_map_size, bit);
+ if (bit == -1)
+ break;
+ nbits = xfs_contig_bits(buf_f->blf_data_map,
+ buf_f->blf_map_size, bit);
+ ASSERT(nbits > 0);
+ ASSERT(item->ri_buf[i].i_addr != NULL);
+ ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
+ ASSERT(BBTOB(bp->b_length) >=
+ ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
+
+ /*
+ * The dirty regions logged in the buffer, even though
+ * contiguous, may span multiple chunks. This is because the
+ * dirty region may span a physical page boundary in a buffer
+ * and hence be split into two separate vectors for writing into
+ * the log. Hence we need to trim nbits back to the length of
+ * the current region being copied out of the log.
+ */
+ if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
+ nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
+
+ /*
+ * Do a sanity check if this is a dquot buffer. Just checking
+ * the first dquot in the buffer should do. XXXThis is
+ * probably a good thing to do for other buf types also.
+ */
+ fa = NULL;
+ if (buf_f->blf_flags &
+ (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
+ if (item->ri_buf[i].i_addr == NULL) {
+ xfs_alert(mp,
+ "XFS: NULL dquot in %s.", __func__);
+ goto next;
+ }
+ if (item->ri_buf[i].i_len < size_disk_dquot) {
+ xfs_alert(mp,
+ "XFS: dquot too small (%d) in %s.",
+ item->ri_buf[i].i_len, __func__);
+ goto next;
+ }
+ fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr,
+ -1, 0);
+ if (fa) {
+ xfs_alert(mp,
+ "dquot corrupt at %pS trying to replay into block 0x%llx",
+ fa, bp->b_bn);
+ goto next;
+ }
+ }
+
+ memcpy(xfs_buf_offset(bp,
+ (uint)bit << XFS_BLF_SHIFT), /* dest */
+ item->ri_buf[i].i_addr, /* source */
+ nbits<<XFS_BLF_SHIFT); /* length */
+ next:
+ i++;
+ bit += nbits;
+ }
+
+ /* Shouldn't be any more regions */
+ ASSERT(i == item->ri_total);
+
+ xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
+}
+
+/*
+ * Perform a dquot buffer recovery.
+ * Simple algorithm: if we have found a QUOTAOFF log item of the same type
+ * (ie. USR or GRP), then just toss this buffer away; don't recover it.
+ * Else, treat it as a regular buffer and do recovery.
+ *
+ * Return false if the buffer was tossed and true if we recovered the buffer to
+ * indicate to the caller if the buffer needs writing.
+ */
+STATIC bool
+xlog_recover_do_dquot_buffer(
+ struct xfs_mount *mp,
+ struct xlog *log,
+ struct xlog_recover_item *item,
+ struct xfs_buf *bp,
+ struct xfs_buf_log_format *buf_f)
+{
+ uint type;
+
+ trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
+
+ /*
+ * Filesystems are required to send in quota flags at mount time.
+ */
+ if (!mp->m_qflags)
+ return false;
+
+ type = 0;
+ if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
+ type |= XFS_DQ_USER;
+ if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
+ type |= XFS_DQ_PROJ;
+ if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
+ type |= XFS_DQ_GROUP;
+ /*
+ * This type of quotas was turned off, so ignore this buffer
+ */
+ if (log->l_quotaoffs_flag & type)
+ return false;
+
+ xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
+ return true;
+}
+
+/*
+ * Perform recovery for a buffer full of inodes. In these buffers, the only
+ * data which should be recovered is that which corresponds to the
+ * di_next_unlinked pointers in the on disk inode structures. The rest of the
+ * data for the inodes is always logged through the inodes themselves rather
+ * than the inode buffer and is recovered in xlog_recover_inode_pass2().
+ *
+ * The only time when buffers full of inodes are fully recovered is when the
+ * buffer is full of newly allocated inodes. In this case the buffer will
+ * not be marked as an inode buffer and so will be sent to
+ * xlog_recover_do_reg_buffer() below during recovery.
+ */
+STATIC int
+xlog_recover_do_inode_buffer(
+ struct xfs_mount *mp,
+ struct xlog_recover_item *item,
+ struct xfs_buf *bp,
+ struct xfs_buf_log_format *buf_f)
+{
+ int i;
+ int item_index = 0;
+ int bit = 0;
+ int nbits = 0;
+ int reg_buf_offset = 0;
+ int reg_buf_bytes = 0;
+ int next_unlinked_offset;
+ int inodes_per_buf;
+ xfs_agino_t *logged_nextp;
+ xfs_agino_t *buffer_nextp;
+
+ trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
+
+ /*
+ * Post recovery validation only works properly on CRC enabled
+ * filesystems.
+ */
+ if (xfs_sb_version_hascrc(&mp->m_sb))
+ bp->b_ops = &xfs_inode_buf_ops;
+
+ inodes_per_buf = BBTOB(bp->b_length) >> mp->m_sb.sb_inodelog;
+ for (i = 0; i < inodes_per_buf; i++) {
+ next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
+ offsetof(xfs_dinode_t, di_next_unlinked);
+
+ while (next_unlinked_offset >=
+ (reg_buf_offset + reg_buf_bytes)) {
+ /*
+ * The next di_next_unlinked field is beyond
+ * the current logged region. Find the next
+ * logged region that contains or is beyond
+ * the current di_next_unlinked field.
+ */
+ bit += nbits;
+ bit = xfs_next_bit(buf_f->blf_data_map,
+ buf_f->blf_map_size, bit);
+
+ /*
+ * If there are no more logged regions in the
+ * buffer, then we're done.
+ */
+ if (bit == -1)
+ return 0;
+
+ nbits = xfs_contig_bits(buf_f->blf_data_map,
+ buf_f->blf_map_size, bit);
+ ASSERT(nbits > 0);
+ reg_buf_offset = bit << XFS_BLF_SHIFT;
+ reg_buf_bytes = nbits << XFS_BLF_SHIFT;
+ item_index++;
+ }
+
+ /*
+ * If the current logged region starts after the current
+ * di_next_unlinked field, then move on to the next
+ * di_next_unlinked field.
+ */
+ if (next_unlinked_offset < reg_buf_offset)
+ continue;
+
+ ASSERT(item->ri_buf[item_index].i_addr != NULL);
+ ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
+ ASSERT((reg_buf_offset + reg_buf_bytes) <= BBTOB(bp->b_length));
+
+ /*
+ * The current logged region contains a copy of the
+ * current di_next_unlinked field. Extract its value
+ * and copy it to the buffer copy.
+ */
+ logged_nextp = item->ri_buf[item_index].i_addr +
+ next_unlinked_offset - reg_buf_offset;
+ if (XFS_IS_CORRUPT(mp, *logged_nextp == 0)) {
+ xfs_alert(mp,
+ "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
+ "Trying to replay bad (0) inode di_next_unlinked field.",
+ item, bp);
+ return -EFSCORRUPTED;
+ }
+
+ buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
+ *buffer_nextp = *logged_nextp;
+
+ /*
+ * If necessary, recalculate the CRC in the on-disk inode. We
+ * have to leave the inode in a consistent state for whoever
+ * reads it next....
+ */
+ xfs_dinode_calc_crc(mp,
+ xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
+
+ }
+
+ return 0;
+}
+
+/*
+ * V5 filesystems know the age of the buffer on disk being recovered. We can
+ * have newer objects on disk than we are replaying, and so for these cases we
+ * don't want to replay the current change as that will make the buffer contents
+ * temporarily invalid on disk.
+ *
+ * The magic number might not match the buffer type we are going to recover
+ * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
+ * extract the LSN of the existing object in the buffer based on it's current
+ * magic number. If we don't recognise the magic number in the buffer, then
+ * return a LSN of -1 so that the caller knows it was an unrecognised block and
+ * so can recover the buffer.
+ *
+ * Note: we cannot rely solely on magic number matches to determine that the
+ * buffer has a valid LSN - we also need to verify that it belongs to this
+ * filesystem, so we need to extract the object's LSN and compare it to that
+ * which we read from the superblock. If the UUIDs don't match, then we've got a
+ * stale metadata block from an old filesystem instance that we need to recover
+ * over the top of.
+ */
+static xfs_lsn_t
+xlog_recover_get_buf_lsn(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp)
+{
+ uint32_t magic32;
+ uint16_t magic16;
+ uint16_t magicda;
+ void *blk = bp->b_addr;
+ uuid_t *uuid;
+ xfs_lsn_t lsn = -1;
+
+ /* v4 filesystems always recover immediately */
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ goto recover_immediately;
+
+ magic32 = be32_to_cpu(*(__be32 *)blk);
+ switch (magic32) {
+ case XFS_ABTB_CRC_MAGIC:
+ case XFS_ABTC_CRC_MAGIC:
+ case XFS_ABTB_MAGIC:
+ case XFS_ABTC_MAGIC:
+ case XFS_RMAP_CRC_MAGIC:
+ case XFS_REFC_CRC_MAGIC:
+ case XFS_IBT_CRC_MAGIC:
+ case XFS_IBT_MAGIC: {
+ struct xfs_btree_block *btb = blk;
+
+ lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
+ uuid = &btb->bb_u.s.bb_uuid;
+ break;
+ }
+ case XFS_BMAP_CRC_MAGIC:
+ case XFS_BMAP_MAGIC: {
+ struct xfs_btree_block *btb = blk;
+
+ lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
+ uuid = &btb->bb_u.l.bb_uuid;
+ break;
+ }
+ case XFS_AGF_MAGIC:
+ lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
+ uuid = &((struct xfs_agf *)blk)->agf_uuid;
+ break;
+ case XFS_AGFL_MAGIC:
+ lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
+ uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
+ break;
+ case XFS_AGI_MAGIC:
+ lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
+ uuid = &((struct xfs_agi *)blk)->agi_uuid;
+ break;
+ case XFS_SYMLINK_MAGIC:
+ lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
+ uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
+ break;
+ case XFS_DIR3_BLOCK_MAGIC:
+ case XFS_DIR3_DATA_MAGIC:
+ case XFS_DIR3_FREE_MAGIC:
+ lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
+ uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
+ break;
+ case XFS_ATTR3_RMT_MAGIC:
+ /*
+ * Remote attr blocks are written synchronously, rather than
+ * being logged. That means they do not contain a valid LSN
+ * (i.e. transactionally ordered) in them, and hence any time we
+ * see a buffer to replay over the top of a remote attribute
+ * block we should simply do so.
+ */
+ goto recover_immediately;
+ case XFS_SB_MAGIC:
+ /*
+ * superblock uuids are magic. We may or may not have a
+ * sb_meta_uuid on disk, but it will be set in the in-core
+ * superblock. We set the uuid pointer for verification
+ * according to the superblock feature mask to ensure we check
+ * the relevant UUID in the superblock.
+ */
+ lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
+ if (xfs_sb_version_hasmetauuid(&mp->m_sb))
+ uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
+ else
+ uuid = &((struct xfs_dsb *)blk)->sb_uuid;
+ break;
+ default:
+ break;
+ }
+
+ if (lsn != (xfs_lsn_t)-1) {
+ if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
+ goto recover_immediately;
+ return lsn;
+ }
+
+ magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
+ switch (magicda) {
+ case XFS_DIR3_LEAF1_MAGIC:
+ case XFS_DIR3_LEAFN_MAGIC:
+ case XFS_DA3_NODE_MAGIC:
+ lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
+ uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
+ break;
+ default:
+ break;
+ }
+
+ if (lsn != (xfs_lsn_t)-1) {
+ if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
+ goto recover_immediately;
+ return lsn;
+ }
+
+ /*
+ * We do individual object checks on dquot and inode buffers as they
+ * have their own individual LSN records. Also, we could have a stale
+ * buffer here, so we have to at least recognise these buffer types.
+ *
+ * A notd complexity here is inode unlinked list processing - it logs
+ * the inode directly in the buffer, but we don't know which inodes have
+ * been modified, and there is no global buffer LSN. Hence we need to
+ * recover all inode buffer types immediately. This problem will be
+ * fixed by logical logging of the unlinked list modifications.
+ */
+ magic16 = be16_to_cpu(*(__be16 *)blk);
+ switch (magic16) {
+ case XFS_DQUOT_MAGIC:
+ case XFS_DINODE_MAGIC:
+ goto recover_immediately;
+ default:
+ break;
+ }
+
+ /* unknown buffer contents, recover immediately */
+
+recover_immediately:
+ return (xfs_lsn_t)-1;
+
+}
+
+/*
+ * This routine replays a modification made to a buffer at runtime.
+ * There are actually two types of buffer, regular and inode, which
+ * are handled differently. Inode buffers are handled differently
+ * in that we only recover a specific set of data from them, namely
+ * the inode di_next_unlinked fields. This is because all other inode
+ * data is actually logged via inode records and any data we replay
+ * here which overlaps that may be stale.
+ *
+ * When meta-data buffers are freed at run time we log a buffer item
+ * with the XFS_BLF_CANCEL bit set to indicate that previous copies
+ * of the buffer in the log should not be replayed at recovery time.
+ * This is so that if the blocks covered by the buffer are reused for
+ * file data before we crash we don't end up replaying old, freed
+ * meta-data into a user's file.
+ *
+ * To handle the cancellation of buffer log items, we make two passes
+ * over the log during recovery. During the first we build a table of
+ * those buffers which have been cancelled, and during the second we
+ * only replay those buffers which do not have corresponding cancel
+ * records in the table. See xlog_recover_buf_pass[1,2] above
+ * for more details on the implementation of the table of cancel records.
+ */
+STATIC int
+xlog_recover_buf_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t current_lsn)
+{
+ struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_buf *bp;
+ int error;
+ uint buf_flags;
+ xfs_lsn_t lsn;
+
+ /*
+ * In this pass we only want to recover all the buffers which have
+ * not been cancelled and are not cancellation buffers themselves.
+ */
+ if (buf_f->blf_flags & XFS_BLF_CANCEL) {
+ if (xlog_put_buffer_cancelled(log, buf_f->blf_blkno,
+ buf_f->blf_len))
+ goto cancelled;
+ } else {
+
+ if (xlog_is_buffer_cancelled(log, buf_f->blf_blkno,
+ buf_f->blf_len))
+ goto cancelled;
+ }
+
+ trace_xfs_log_recover_buf_recover(log, buf_f);
+
+ buf_flags = 0;
+ if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
+ buf_flags |= XBF_UNMAPPED;
+
+ error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
+ buf_flags, &bp, NULL);
+ if (error)
+ return error;
+
+ /*
+ * Recover the buffer only if we get an LSN from it and it's less than
+ * the lsn of the transaction we are replaying.
+ *
+ * Note that we have to be extremely careful of readahead here.
+ * Readahead does not attach verfiers to the buffers so if we don't
+ * actually do any replay after readahead because of the LSN we found
+ * in the buffer if more recent than that current transaction then we
+ * need to attach the verifier directly. Failure to do so can lead to
+ * future recovery actions (e.g. EFI and unlinked list recovery) can
+ * operate on the buffers and they won't get the verifier attached. This
+ * can lead to blocks on disk having the correct content but a stale
+ * CRC.
+ *
+ * It is safe to assume these clean buffers are currently up to date.
+ * If the buffer is dirtied by a later transaction being replayed, then
+ * the verifier will be reset to match whatever recover turns that
+ * buffer into.
+ */
+ lsn = xlog_recover_get_buf_lsn(mp, bp);
+ if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
+ trace_xfs_log_recover_buf_skip(log, buf_f);
+ xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
+ goto out_release;
+ }
+
+ if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
+ error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
+ if (error)
+ goto out_release;
+ } else if (buf_f->blf_flags &
+ (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
+ bool dirty;
+
+ dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
+ if (!dirty)
+ goto out_release;
+ } else {
+ xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
+ }
+
+ /*
+ * Perform delayed write on the buffer. Asynchronous writes will be
+ * slower when taking into account all the buffers to be flushed.
+ *
+ * Also make sure that only inode buffers with good sizes stay in
+ * the buffer cache. The kernel moves inodes in buffers of 1 block
+ * or inode_cluster_size bytes, whichever is bigger. The inode
+ * buffers in the log can be a different size if the log was generated
+ * by an older kernel using unclustered inode buffers or a newer kernel
+ * running with a different inode cluster size. Regardless, if the
+ * the inode buffer size isn't max(blocksize, inode_cluster_size)
+ * for *our* value of inode_cluster_size, then we need to keep
+ * the buffer out of the buffer cache so that the buffer won't
+ * overlap with future reads of those inodes.
+ */
+ if (XFS_DINODE_MAGIC ==
+ be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
+ (BBTOB(bp->b_length) != M_IGEO(log->l_mp)->inode_cluster_size)) {
+ xfs_buf_stale(bp);
+ error = xfs_bwrite(bp);
+ } else {
+ ASSERT(bp->b_mount == mp);
+ bp->b_iodone = xlog_recover_iodone;
+ xfs_buf_delwri_queue(bp, buffer_list);
+ }
+
+out_release:
+ xfs_buf_relse(bp);
+ return error;
+cancelled:
+ trace_xfs_log_recover_buf_cancel(log, buf_f);
+ return 0;
+}
+
+const struct xlog_recover_item_ops xlog_buf_item_ops = {
+ .item_type = XFS_LI_BUF,
+ .reorder = xlog_recover_buf_reorder,
+ .ra_pass2 = xlog_recover_buf_ra_pass2,
+ .commit_pass1 = xlog_recover_buf_commit_pass1,
+ .commit_pass2 = xlog_recover_buf_commit_pass2,
+};
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 871ec22c9aee..66deddd5e296 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -524,7 +524,7 @@ xfs_readdir(
args.geo = dp->i_mount->m_dir_geo;
args.trans = tp;
- if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
+ if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL)
rval = xfs_dir2_sf_getdents(&args, ctx);
else if ((rval = xfs_dir2_isblock(&args, &v)))
;
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index af2c8e5ceea0..d5b7f03e93c8 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -75,7 +75,7 @@ xfs_qm_adjust_dqlimits(
int prealloc = 0;
ASSERT(d->d_id);
- defq = xfs_get_defquota(dq, q);
+ defq = xfs_get_defquota(q, xfs_dquot_type(dq));
if (defq->bsoftlimit && !d->d_blk_softlimit) {
d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit);
@@ -114,9 +114,14 @@ xfs_qm_adjust_dqlimits(
void
xfs_qm_adjust_dqtimers(
struct xfs_mount *mp,
- struct xfs_disk_dquot *d)
+ struct xfs_dquot *dq)
{
+ struct xfs_quotainfo *qi = mp->m_quotainfo;
+ struct xfs_disk_dquot *d = &dq->q_core;
+ struct xfs_def_quota *defq;
+
ASSERT(d->d_id);
+ defq = xfs_get_defquota(qi, xfs_dquot_type(dq));
#ifdef DEBUG
if (d->d_blk_hardlimit)
@@ -138,7 +143,7 @@ xfs_qm_adjust_dqtimers(
(be64_to_cpu(d->d_bcount) >
be64_to_cpu(d->d_blk_hardlimit)))) {
d->d_btimer = cpu_to_be32(ktime_get_real_seconds() +
- mp->m_quotainfo->qi_btimelimit);
+ defq->btimelimit);
} else {
d->d_bwarns = 0;
}
@@ -161,7 +166,7 @@ xfs_qm_adjust_dqtimers(
(be64_to_cpu(d->d_icount) >
be64_to_cpu(d->d_ino_hardlimit)))) {
d->d_itimer = cpu_to_be32(ktime_get_real_seconds() +
- mp->m_quotainfo->qi_itimelimit);
+ defq->itimelimit);
} else {
d->d_iwarns = 0;
}
@@ -184,7 +189,7 @@ xfs_qm_adjust_dqtimers(
(be64_to_cpu(d->d_rtbcount) >
be64_to_cpu(d->d_rtb_hardlimit)))) {
d->d_rtbtimer = cpu_to_be32(ktime_get_real_seconds() +
- mp->m_quotainfo->qi_rtbtimelimit);
+ defq->rtbtimelimit);
} else {
d->d_rtbwarns = 0;
}
@@ -205,16 +210,18 @@ xfs_qm_adjust_dqtimers(
*/
STATIC void
xfs_qm_init_dquot_blk(
- xfs_trans_t *tp,
- xfs_mount_t *mp,
- xfs_dqid_t id,
- uint type,
- xfs_buf_t *bp)
+ struct xfs_trans *tp,
+ struct xfs_mount *mp,
+ xfs_dqid_t id,
+ uint type,
+ struct xfs_buf *bp)
{
struct xfs_quotainfo *q = mp->m_quotainfo;
- xfs_dqblk_t *d;
- xfs_dqid_t curid;
- int i;
+ struct xfs_dqblk *d;
+ xfs_dqid_t curid;
+ unsigned int qflag;
+ unsigned int blftype;
+ int i;
ASSERT(tp);
ASSERT(xfs_buf_islocked(bp));
@@ -238,11 +245,39 @@ xfs_qm_init_dquot_blk(
}
}
- xfs_trans_dquot_buf(tp, bp,
- (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
- ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
- XFS_BLF_GDQUOT_BUF)));
- xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
+ if (type & XFS_DQ_USER) {
+ qflag = XFS_UQUOTA_CHKD;
+ blftype = XFS_BLF_UDQUOT_BUF;
+ } else if (type & XFS_DQ_PROJ) {
+ qflag = XFS_PQUOTA_CHKD;
+ blftype = XFS_BLF_PDQUOT_BUF;
+ } else {
+ qflag = XFS_GQUOTA_CHKD;
+ blftype = XFS_BLF_GDQUOT_BUF;
+ }
+
+ xfs_trans_dquot_buf(tp, bp, blftype);
+
+ /*
+ * quotacheck uses delayed writes to update all the dquots on disk in an
+ * efficient manner instead of logging the individual dquot changes as
+ * they are made. However if we log the buffer allocated here and crash
+ * after quotacheck while the logged initialisation is still in the
+ * active region of the log, log recovery can replay the dquot buffer
+ * initialisation over the top of the checked dquots and corrupt quota
+ * accounting.
+ *
+ * To avoid this problem, quotacheck cannot log the initialised buffer.
+ * We must still dirty the buffer and write it back before the
+ * allocation transaction clears the log. Therefore, mark the buffer as
+ * ordered instead of logging it directly. This is safe for quotacheck
+ * because it detects and repairs allocated but initialized dquot blocks
+ * in the quota inodes.
+ */
+ if (!(mp->m_qflags & qflag))
+ xfs_trans_ordered_buf(tp, bp);
+ else
+ xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
}
/*
@@ -1021,6 +1056,7 @@ xfs_qm_dqflush_done(
struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip;
struct xfs_dquot *dqp = qip->qli_dquot;
struct xfs_ail *ailp = lip->li_ailp;
+ xfs_lsn_t tail_lsn;
/*
* We only want to pull the item from the AIL if its
@@ -1034,10 +1070,11 @@ xfs_qm_dqflush_done(
((lip->li_lsn == qip->qli_flush_lsn) ||
test_bit(XFS_LI_FAILED, &lip->li_flags))) {
- /* xfs_trans_ail_delete() drops the AIL lock. */
spin_lock(&ailp->ail_lock);
if (lip->li_lsn == qip->qli_flush_lsn) {
- xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
+ /* xfs_ail_update_finish() drops the AIL lock */
+ tail_lsn = xfs_ail_delete_one(ailp, lip);
+ xfs_ail_update_finish(ailp, tail_lsn);
} else {
/*
* Clear the failed state since we are about to drop the
@@ -1068,6 +1105,7 @@ xfs_qm_dqflush(
struct xfs_buf **bpp)
{
struct xfs_mount *mp = dqp->q_mount;
+ struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
struct xfs_buf *bp;
struct xfs_dqblk *dqb;
struct xfs_disk_dquot *ddqp;
@@ -1084,31 +1122,15 @@ xfs_qm_dqflush(
xfs_qm_dqunpin_wait(dqp);
/*
- * This may have been unpinned because the filesystem is shutting
- * down forcibly. If that's the case we must not write this dquot
- * to disk, because the log record didn't make it to disk.
- *
- * We also have to remove the log item from the AIL in this case,
- * as we wait for an emptry AIL as part of the unmount process.
- */
- if (XFS_FORCED_SHUTDOWN(mp)) {
- struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
- dqp->dq_flags &= ~XFS_DQ_DIRTY;
-
- xfs_trans_ail_remove(lip, SHUTDOWN_CORRUPT_INCORE);
-
- error = -EIO;
- goto out_unlock;
- }
-
- /*
* Get the buffer containing the on-disk dquot
*/
error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
mp->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK,
&bp, &xfs_dquot_buf_ops);
- if (error)
+ if (error == -EAGAIN)
goto out_unlock;
+ if (error)
+ goto out_abort;
/*
* Calculate the location of the dquot inside the buffer.
@@ -1116,17 +1138,15 @@ xfs_qm_dqflush(
dqb = bp->b_addr + dqp->q_bufoffset;
ddqp = &dqb->dd_diskdq;
- /*
- * A simple sanity check in case we got a corrupted dquot.
- */
- fa = xfs_dqblk_verify(mp, dqb, be32_to_cpu(ddqp->d_id), 0);
+ /* sanity check the in-core structure before we flush */
+ fa = xfs_dquot_verify(mp, &dqp->q_core, be32_to_cpu(dqp->q_core.d_id),
+ 0);
if (fa) {
xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS",
- be32_to_cpu(ddqp->d_id), fa);
+ be32_to_cpu(dqp->q_core.d_id), fa);
xfs_buf_relse(bp);
- xfs_dqfunlock(dqp);
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- return -EFSCORRUPTED;
+ error = -EFSCORRUPTED;
+ goto out_abort;
}
/* This is the only portion of data that needs to persist */
@@ -1175,6 +1195,10 @@ xfs_qm_dqflush(
*bpp = bp;
return 0;
+out_abort:
+ dqp->dq_flags &= ~XFS_DQ_DIRTY;
+ xfs_trans_ail_delete(lip, 0);
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
out_unlock:
xfs_dqfunlock(dqp);
return error;
diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h
index fe3e46df604b..71e36c85e20b 100644
--- a/fs/xfs/xfs_dquot.h
+++ b/fs/xfs/xfs_dquot.h
@@ -154,7 +154,7 @@ void xfs_qm_dqdestroy(struct xfs_dquot *dqp);
int xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf **bpp);
void xfs_qm_dqunpin_wait(struct xfs_dquot *dqp);
void xfs_qm_adjust_dqtimers(struct xfs_mount *mp,
- struct xfs_disk_dquot *d);
+ struct xfs_dquot *d);
void xfs_qm_adjust_dqlimits(struct xfs_mount *mp,
struct xfs_dquot *d);
xfs_dqid_t xfs_qm_id_for_quotatype(struct xfs_inode *ip, uint type);
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index baad1748d0d1..349c92d26570 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -145,21 +145,6 @@ xfs_qm_dquot_logitem_push(
if (atomic_read(&dqp->q_pincount) > 0)
return XFS_ITEM_PINNED;
- /*
- * The buffer containing this item failed to be written back
- * previously. Resubmit the buffer for IO
- */
- if (test_bit(XFS_LI_FAILED, &lip->li_flags)) {
- if (!xfs_buf_trylock(bp))
- return XFS_ITEM_LOCKED;
-
- if (!xfs_buf_resubmit_failed_buffers(bp, buffer_list))
- rval = XFS_ITEM_FLUSHING;
-
- xfs_buf_unlock(bp);
- return rval;
- }
-
if (!xfs_dqlock_nowait(dqp))
return XFS_ITEM_LOCKED;
@@ -358,7 +343,7 @@ xfs_qm_qoff_logitem_relse(
ASSERT(test_bit(XFS_LI_IN_AIL, &lip->li_flags) ||
test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
XFS_FORCED_SHUTDOWN(lip->li_mountp));
- xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
+ xfs_trans_ail_delete(lip, 0);
kmem_free(lip->li_lv_shadow);
kmem_free(qoff);
}
diff --git a/fs/xfs/xfs_dquot_item_recover.c b/fs/xfs/xfs_dquot_item_recover.c
new file mode 100644
index 000000000000..3400be4c88f0
--- /dev/null
+++ b/fs/xfs/xfs_dquot_item_recover.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_quota.h"
+#include "xfs_trans.h"
+#include "xfs_buf_item.h"
+#include "xfs_trans_priv.h"
+#include "xfs_qm.h"
+#include "xfs_log.h"
+#include "xfs_log_priv.h"
+#include "xfs_log_recover.h"
+
+STATIC void
+xlog_recover_dquot_ra_pass2(
+ struct xlog *log,
+ struct xlog_recover_item *item)
+{
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_disk_dquot *recddq;
+ struct xfs_dq_logformat *dq_f;
+ uint type;
+
+ if (mp->m_qflags == 0)
+ return;
+
+ recddq = item->ri_buf[1].i_addr;
+ if (recddq == NULL)
+ return;
+ if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
+ return;
+
+ type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
+ ASSERT(type);
+ if (log->l_quotaoffs_flag & type)
+ return;
+
+ dq_f = item->ri_buf[0].i_addr;
+ ASSERT(dq_f);
+ ASSERT(dq_f->qlf_len == 1);
+
+ xlog_buf_readahead(log, dq_f->qlf_blkno,
+ XFS_FSB_TO_BB(mp, dq_f->qlf_len),
+ &xfs_dquot_buf_ra_ops);
+}
+
+/*
+ * Recover a dquot record
+ */
+STATIC int
+xlog_recover_dquot_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t current_lsn)
+{
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_buf *bp;
+ struct xfs_disk_dquot *ddq, *recddq;
+ struct xfs_dq_logformat *dq_f;
+ xfs_failaddr_t fa;
+ int error;
+ uint type;
+
+ /*
+ * Filesystems are required to send in quota flags at mount time.
+ */
+ if (mp->m_qflags == 0)
+ return 0;
+
+ recddq = item->ri_buf[1].i_addr;
+ if (recddq == NULL) {
+ xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
+ return -EFSCORRUPTED;
+ }
+ if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) {
+ xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
+ item->ri_buf[1].i_len, __func__);
+ return -EFSCORRUPTED;
+ }
+
+ /*
+ * This type of quotas was turned off, so ignore this record.
+ */
+ type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
+ ASSERT(type);
+ if (log->l_quotaoffs_flag & type)
+ return 0;
+
+ /*
+ * At this point we know that quota was _not_ turned off.
+ * Since the mount flags are not indicating to us otherwise, this
+ * must mean that quota is on, and the dquot needs to be replayed.
+ * Remember that we may not have fully recovered the superblock yet,
+ * so we can't do the usual trick of looking at the SB quota bits.
+ *
+ * The other possibility, of course, is that the quota subsystem was
+ * removed since the last mount - ENOSYS.
+ */
+ dq_f = item->ri_buf[0].i_addr;
+ ASSERT(dq_f);
+ fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id, 0);
+ if (fa) {
+ xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS",
+ dq_f->qlf_id, fa);
+ return -EFSCORRUPTED;
+ }
+ ASSERT(dq_f->qlf_len == 1);
+
+ /*
+ * At this point we are assuming that the dquots have been allocated
+ * and hence the buffer has valid dquots stamped in it. It should,
+ * therefore, pass verifier validation. If the dquot is bad, then the
+ * we'll return an error here, so we don't need to specifically check
+ * the dquot in the buffer after the verifier has run.
+ */
+ error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
+ XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
+ &xfs_dquot_buf_ops);
+ if (error)
+ return error;
+
+ ASSERT(bp);
+ ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
+
+ /*
+ * If the dquot has an LSN in it, recover the dquot only if it's less
+ * than the lsn of the transaction we are replaying.
+ */
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
+ xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
+
+ if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
+ goto out_release;
+ }
+ }
+
+ memcpy(ddq, recddq, item->ri_buf[1].i_len);
+ if (xfs_sb_version_hascrc(&mp->m_sb)) {
+ xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
+ XFS_DQUOT_CRC_OFF);
+ }
+
+ ASSERT(dq_f->qlf_size == 2);
+ ASSERT(bp->b_mount == mp);
+ bp->b_iodone = xlog_recover_iodone;
+ xfs_buf_delwri_queue(bp, buffer_list);
+
+out_release:
+ xfs_buf_relse(bp);
+ return 0;
+}
+
+const struct xlog_recover_item_ops xlog_dquot_item_ops = {
+ .item_type = XFS_LI_DQUOT,
+ .ra_pass2 = xlog_recover_dquot_ra_pass2,
+ .commit_pass2 = xlog_recover_dquot_commit_pass2,
+};
+
+/*
+ * Recover QUOTAOFF records. We simply make a note of it in the xlog
+ * structure, so that we know not to do any dquot item or dquot buffer recovery,
+ * of that type.
+ */
+STATIC int
+xlog_recover_quotaoff_commit_pass1(
+ struct xlog *log,
+ struct xlog_recover_item *item)
+{
+ struct xfs_qoff_logformat *qoff_f = item->ri_buf[0].i_addr;
+ ASSERT(qoff_f);
+
+ /*
+ * The logitem format's flag tells us if this was user quotaoff,
+ * group/project quotaoff or both.
+ */
+ if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
+ log->l_quotaoffs_flag |= XFS_DQ_USER;
+ if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
+ log->l_quotaoffs_flag |= XFS_DQ_PROJ;
+ if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
+ log->l_quotaoffs_flag |= XFS_DQ_GROUP;
+
+ return 0;
+}
+
+const struct xlog_recover_item_ops xlog_quotaoff_item_ops = {
+ .item_type = XFS_LI_QUOTAOFF,
+ .commit_pass1 = xlog_recover_quotaoff_commit_pass1,
+ /* nothing to commit in pass2 */
+};
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index a21e9cc6516a..7f6e20899473 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -53,6 +53,7 @@ static unsigned int xfs_errortag_random_default[] = {
XFS_RANDOM_FORCE_SCRUB_REPAIR,
XFS_RANDOM_FORCE_SUMMARY_RECALC,
XFS_RANDOM_IUNLINK_FALLBACK,
+ XFS_RANDOM_BUF_IOERROR,
};
struct xfs_errortag_attr {
@@ -162,6 +163,7 @@ XFS_ERRORTAG_ATTR_RW(buf_lru_ref, XFS_ERRTAG_BUF_LRU_REF);
XFS_ERRORTAG_ATTR_RW(force_repair, XFS_ERRTAG_FORCE_SCRUB_REPAIR);
XFS_ERRORTAG_ATTR_RW(bad_summary, XFS_ERRTAG_FORCE_SUMMARY_RECALC);
XFS_ERRORTAG_ATTR_RW(iunlink_fallback, XFS_ERRTAG_IUNLINK_FALLBACK);
+XFS_ERRORTAG_ATTR_RW(buf_ioerror, XFS_ERRTAG_BUF_IOERROR);
static struct attribute *xfs_errortag_attrs[] = {
XFS_ERRORTAG_ATTR_LIST(noerror),
@@ -199,6 +201,7 @@ static struct attribute *xfs_errortag_attrs[] = {
XFS_ERRORTAG_ATTR_LIST(force_repair),
XFS_ERRORTAG_ATTR_LIST(bad_summary),
XFS_ERRORTAG_ATTR_LIST(iunlink_fallback),
+ XFS_ERRORTAG_ATTR_LIST(buf_ioerror),
NULL,
};
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 6ea847f6e298..b9c333bae0a1 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -22,16 +22,20 @@
#include "xfs_bmap.h"
#include "xfs_trace.h"
#include "xfs_error.h"
+#include "xfs_log_priv.h"
+#include "xfs_log_recover.h"
kmem_zone_t *xfs_efi_zone;
kmem_zone_t *xfs_efd_zone;
+static const struct xfs_item_ops xfs_efi_item_ops;
+
static inline struct xfs_efi_log_item *EFI_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_efi_log_item, efi_item);
}
-void
+STATIC void
xfs_efi_item_free(
struct xfs_efi_log_item *efip)
{
@@ -49,13 +53,13 @@ xfs_efi_item_free(
* committed vs unpin operations in bulk insert operations. Hence the reference
* count to ensure only the last caller frees the EFI.
*/
-void
+STATIC void
xfs_efi_release(
struct xfs_efi_log_item *efip)
{
ASSERT(atomic_read(&efip->efi_refcount) > 0);
if (atomic_dec_and_test(&efip->efi_refcount)) {
- xfs_trans_ail_remove(&efip->efi_item, SHUTDOWN_LOG_IO_ERROR);
+ xfs_trans_ail_delete(&efip->efi_item, SHUTDOWN_LOG_IO_ERROR);
xfs_efi_item_free(efip);
}
}
@@ -139,18 +143,10 @@ xfs_efi_item_release(
xfs_efi_release(EFI_ITEM(lip));
}
-static const struct xfs_item_ops xfs_efi_item_ops = {
- .iop_size = xfs_efi_item_size,
- .iop_format = xfs_efi_item_format,
- .iop_unpin = xfs_efi_item_unpin,
- .iop_release = xfs_efi_item_release,
-};
-
-
/*
* Allocate and initialize an efi item with the given number of extents.
*/
-struct xfs_efi_log_item *
+STATIC struct xfs_efi_log_item *
xfs_efi_init(
struct xfs_mount *mp,
uint nextents)
@@ -161,7 +157,7 @@ xfs_efi_init(
ASSERT(nextents > 0);
if (nextents > XFS_EFI_MAX_FAST_EXTENTS) {
- size = (uint)(sizeof(xfs_efi_log_item_t) +
+ size = (uint)(sizeof(struct xfs_efi_log_item) +
((nextents - 1) * sizeof(xfs_extent_t)));
efip = kmem_zalloc(size, 0);
} else {
@@ -184,7 +180,7 @@ xfs_efi_init(
* one of which will be the native format for this kernel.
* It will handle the conversion of formats if necessary.
*/
-int
+STATIC int
xfs_efi_copy_format(xfs_log_iovec_t *buf, xfs_efi_log_format_t *dst_efi_fmt)
{
xfs_efi_log_format_t *src_efi_fmt = buf->i_addr;
@@ -412,41 +408,16 @@ xfs_extent_free_diff_items(
XFS_FSB_TO_AGNO(mp, rb->xefi_startblock);
}
-/* Get an EFI. */
-STATIC void *
-xfs_extent_free_create_intent(
- struct xfs_trans *tp,
- unsigned int count)
-{
- struct xfs_efi_log_item *efip;
-
- ASSERT(tp != NULL);
- ASSERT(count > 0);
-
- efip = xfs_efi_init(tp->t_mountp, count);
- ASSERT(efip != NULL);
-
- /*
- * Get a log_item_desc to point at the new item.
- */
- xfs_trans_add_item(tp, &efip->efi_item);
- return efip;
-}
-
/* Log a free extent to the intent item. */
STATIC void
xfs_extent_free_log_item(
struct xfs_trans *tp,
- void *intent,
- struct list_head *item)
+ struct xfs_efi_log_item *efip,
+ struct xfs_extent_free_item *free)
{
- struct xfs_efi_log_item *efip = intent;
- struct xfs_extent_free_item *free;
uint next_extent;
struct xfs_extent *extp;
- free = container_of(item, struct xfs_extent_free_item, xefi_list);
-
tp->t_flags |= XFS_TRANS_DIRTY;
set_bit(XFS_LI_DIRTY, &efip->efi_item.li_flags);
@@ -462,29 +433,50 @@ xfs_extent_free_log_item(
extp->ext_len = free->xefi_blockcount;
}
+static struct xfs_log_item *
+xfs_extent_free_create_intent(
+ struct xfs_trans *tp,
+ struct list_head *items,
+ unsigned int count,
+ bool sort)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_efi_log_item *efip = xfs_efi_init(mp, count);
+ struct xfs_extent_free_item *free;
+
+ ASSERT(count > 0);
+
+ xfs_trans_add_item(tp, &efip->efi_item);
+ if (sort)
+ list_sort(mp, items, xfs_extent_free_diff_items);
+ list_for_each_entry(free, items, xefi_list)
+ xfs_extent_free_log_item(tp, efip, free);
+ return &efip->efi_item;
+}
+
/* Get an EFD so we can process all the free extents. */
-STATIC void *
+static struct xfs_log_item *
xfs_extent_free_create_done(
struct xfs_trans *tp,
- void *intent,
+ struct xfs_log_item *intent,
unsigned int count)
{
- return xfs_trans_get_efd(tp, intent, count);
+ return &xfs_trans_get_efd(tp, EFI_ITEM(intent), count)->efd_item;
}
/* Process a free extent. */
STATIC int
xfs_extent_free_finish_item(
struct xfs_trans *tp,
+ struct xfs_log_item *done,
struct list_head *item,
- void *done_item,
- void **state)
+ struct xfs_btree_cur **state)
{
struct xfs_extent_free_item *free;
int error;
free = container_of(item, struct xfs_extent_free_item, xefi_list);
- error = xfs_trans_free_extent(tp, done_item,
+ error = xfs_trans_free_extent(tp, EFD_ITEM(done),
free->xefi_startblock,
free->xefi_blockcount,
&free->xefi_oinfo, free->xefi_skip_discard);
@@ -495,9 +487,9 @@ xfs_extent_free_finish_item(
/* Abort all pending EFIs. */
STATIC void
xfs_extent_free_abort_intent(
- void *intent)
+ struct xfs_log_item *intent)
{
- xfs_efi_release(intent);
+ xfs_efi_release(EFI_ITEM(intent));
}
/* Cancel a free extent. */
@@ -513,10 +505,8 @@ xfs_extent_free_cancel_item(
const struct xfs_defer_op_type xfs_extent_free_defer_type = {
.max_items = XFS_EFI_MAX_FAST_EXTENTS,
- .diff_items = xfs_extent_free_diff_items,
.create_intent = xfs_extent_free_create_intent,
.abort_intent = xfs_extent_free_abort_intent,
- .log_item = xfs_extent_free_log_item,
.create_done = xfs_extent_free_create_done,
.finish_item = xfs_extent_free_finish_item,
.cancel_item = xfs_extent_free_cancel_item,
@@ -529,12 +519,12 @@ const struct xfs_defer_op_type xfs_extent_free_defer_type = {
STATIC int
xfs_agfl_free_finish_item(
struct xfs_trans *tp,
+ struct xfs_log_item *done,
struct list_head *item,
- void *done_item,
- void **state)
+ struct xfs_btree_cur **state)
{
struct xfs_mount *mp = tp->t_mountp;
- struct xfs_efd_log_item *efdp = done_item;
+ struct xfs_efd_log_item *efdp = EFD_ITEM(done);
struct xfs_extent_free_item *free;
struct xfs_extent *extp;
struct xfs_buf *agbp;
@@ -579,10 +569,8 @@ xfs_agfl_free_finish_item(
/* sub-type with special handling for AGFL deferred frees */
const struct xfs_defer_op_type xfs_agfl_free_defer_type = {
.max_items = XFS_EFI_MAX_FAST_EXTENTS,
- .diff_items = xfs_extent_free_diff_items,
.create_intent = xfs_extent_free_create_intent,
.abort_intent = xfs_extent_free_abort_intent,
- .log_item = xfs_extent_free_log_item,
.create_done = xfs_extent_free_create_done,
.finish_item = xfs_agfl_free_finish_item,
.cancel_item = xfs_extent_free_cancel_item,
@@ -592,19 +580,19 @@ const struct xfs_defer_op_type xfs_agfl_free_defer_type = {
* Process an extent free intent item that was recovered from
* the log. We need to free the extents that it describes.
*/
-int
-xfs_efi_recover(
- struct xfs_mount *mp,
- struct xfs_efi_log_item *efip)
+STATIC int
+xfs_efi_item_recover(
+ struct xfs_log_item *lip,
+ struct xfs_trans *parent_tp)
{
- struct xfs_efd_log_item *efdp;
- struct xfs_trans *tp;
- int i;
- int error = 0;
- xfs_extent_t *extp;
- xfs_fsblock_t startblock_fsb;
-
- ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
+ struct xfs_efi_log_item *efip = EFI_ITEM(lip);
+ struct xfs_mount *mp = parent_tp->t_mountp;
+ struct xfs_efd_log_item *efdp;
+ struct xfs_trans *tp;
+ struct xfs_extent *extp;
+ xfs_fsblock_t startblock_fsb;
+ int i;
+ int error = 0;
/*
* First check the validity of the extents described by the
@@ -623,7 +611,6 @@ xfs_efi_recover(
* This will pull the EFI from the AIL and
* free the memory associated with it.
*/
- set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
xfs_efi_release(efip);
return -EFSCORRUPTED;
}
@@ -644,7 +631,6 @@ xfs_efi_recover(
}
- set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
error = xfs_trans_commit(tp);
return error;
@@ -652,3 +638,93 @@ abort_error:
xfs_trans_cancel(tp);
return error;
}
+
+STATIC bool
+xfs_efi_item_match(
+ struct xfs_log_item *lip,
+ uint64_t intent_id)
+{
+ return EFI_ITEM(lip)->efi_format.efi_id == intent_id;
+}
+
+static const struct xfs_item_ops xfs_efi_item_ops = {
+ .iop_size = xfs_efi_item_size,
+ .iop_format = xfs_efi_item_format,
+ .iop_unpin = xfs_efi_item_unpin,
+ .iop_release = xfs_efi_item_release,
+ .iop_recover = xfs_efi_item_recover,
+ .iop_match = xfs_efi_item_match,
+};
+
+/*
+ * This routine is called to create an in-core extent free intent
+ * item from the efi format structure which was logged on disk.
+ * It allocates an in-core efi, copies the extents from the format
+ * structure into it, and adds the efi to the AIL with the given
+ * LSN.
+ */
+STATIC int
+xlog_recover_efi_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_efi_log_item *efip;
+ struct xfs_efi_log_format *efi_formatp;
+ int error;
+
+ efi_formatp = item->ri_buf[0].i_addr;
+
+ efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
+ error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
+ if (error) {
+ xfs_efi_item_free(efip);
+ return error;
+ }
+ atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
+ /*
+ * Insert the intent into the AIL directly and drop one reference so
+ * that finishing or canceling the work will drop the other.
+ */
+ xfs_trans_ail_insert(log->l_ailp, &efip->efi_item, lsn);
+ xfs_efi_release(efip);
+ return 0;
+}
+
+const struct xlog_recover_item_ops xlog_efi_item_ops = {
+ .item_type = XFS_LI_EFI,
+ .commit_pass2 = xlog_recover_efi_commit_pass2,
+};
+
+/*
+ * This routine is called when an EFD format structure is found in a committed
+ * transaction in the log. Its purpose is to cancel the corresponding EFI if it
+ * was still in the log. To do this it searches the AIL for the EFI with an id
+ * equal to that in the EFD format structure. If we find it we drop the EFD
+ * reference, which removes the EFI from the AIL and frees it.
+ */
+STATIC int
+xlog_recover_efd_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ struct xfs_efd_log_format *efd_formatp;
+
+ efd_formatp = item->ri_buf[0].i_addr;
+ ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
+ ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
+ (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
+ ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
+
+ xlog_recover_release_intent(log, XFS_LI_EFI, efd_formatp->efd_efi_id);
+ return 0;
+}
+
+const struct xlog_recover_item_ops xlog_efd_item_ops = {
+ .item_type = XFS_LI_EFD,
+ .commit_pass2 = xlog_recover_efd_commit_pass2,
+};
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index 16aaab06d4ec..cd2860c875bf 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -17,11 +17,6 @@ struct kmem_zone;
#define XFS_EFI_MAX_FAST_EXTENTS 16
/*
- * Define EFI flag bits. Manipulated by set/clear/test_bit operators.
- */
-#define XFS_EFI_RECOVERED 1
-
-/*
* This is the "extent free intention" log item. It is used to log the fact
* that some extents need to be free. It is used in conjunction with the
* "extent free done" log item described below.
@@ -50,25 +45,24 @@ struct kmem_zone;
* of commit failure or log I/O errors. Note that the EFD is not inserted in the
* AIL, so at this point both the EFI and EFD are freed.
*/
-typedef struct xfs_efi_log_item {
+struct xfs_efi_log_item {
struct xfs_log_item efi_item;
atomic_t efi_refcount;
atomic_t efi_next_extent;
- unsigned long efi_flags; /* misc flags */
xfs_efi_log_format_t efi_format;
-} xfs_efi_log_item_t;
+};
/*
* This is the "extent free done" log item. It is used to log
* the fact that some extents earlier mentioned in an efi item
* have been freed.
*/
-typedef struct xfs_efd_log_item {
+struct xfs_efd_log_item {
struct xfs_log_item efd_item;
- xfs_efi_log_item_t *efd_efip;
+ struct xfs_efi_log_item *efd_efip;
uint efd_next_extent;
xfs_efd_log_format_t efd_format;
-} xfs_efd_log_item_t;
+};
/*
* Max number of extents in fast allocation path.
@@ -78,13 +72,4 @@ typedef struct xfs_efd_log_item {
extern struct kmem_zone *xfs_efi_zone;
extern struct kmem_zone *xfs_efd_zone;
-xfs_efi_log_item_t *xfs_efi_init(struct xfs_mount *, uint);
-int xfs_efi_copy_format(xfs_log_iovec_t *buf,
- xfs_efi_log_format_t *dst_efi_fmt);
-void xfs_efi_item_free(xfs_efi_log_item_t *);
-void xfs_efi_release(struct xfs_efi_log_item *);
-
-int xfs_efi_recover(struct xfs_mount *mp,
- struct xfs_efi_log_item *efip);
-
#endif /* __XFS_EXTFREE_ITEM_H__ */
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 4b8bdecc3863..00db81eac80d 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1102,7 +1102,7 @@ xfs_dir_open(
* certain to have the next operation be a read there.
*/
mode = xfs_ilock_data_map_shared(ip);
- if (ip->i_d.di_nextents > 0)
+ if (ip->i_df.if_nextents > 0)
error = xfs_dir3_data_readahead(ip, 0, 0);
xfs_iunlock(ip, mode);
return error;
@@ -1173,7 +1173,7 @@ xfs_file_llseek(
* Locking for serialisation of IO during page faults. This results in a lock
* ordering of:
*
- * mmap_sem (MM)
+ * mmap_lock (MM)
* sb_start_pagefault(vfs, freeze)
* i_mmaplock (XFS - truncate serialisation)
* page_lock (MM)
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 3e61d0cc23f8..ef1d5bb88b93 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -504,10 +504,7 @@ xfs_do_force_shutdown(
} else if (logerror) {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_LOGERROR,
"Log I/O Error Detected. Shutting down filesystem");
- } else if (flags & SHUTDOWN_DEVICE_REQ) {
- xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
- "All device paths lost. Shutting down filesystem");
- } else if (!(flags & SHUTDOWN_REMOTE_REQ)) {
+ } else {
xfs_alert_tag(mp, XFS_PTAG_SHUTDOWN_IOERROR,
"I/O Error Detected. Shutting down filesystem");
}
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 8bf1d15be3f6..5daef654956c 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -22,6 +22,7 @@
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
#include "xfs_reflink.h"
+#include "xfs_ialloc.h"
#include <linux/iversion.h>
@@ -62,8 +63,6 @@ xfs_inode_alloc(
memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
ip->i_afp = NULL;
ip->i_cowfp = NULL;
- ip->i_cnextents = 0;
- ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
memset(&ip->i_df, 0, sizeof(ip->i_df));
ip->i_flags = 0;
ip->i_delayed_blks = 0;
@@ -88,15 +87,18 @@ xfs_inode_free_callback(
case S_IFREG:
case S_IFDIR:
case S_IFLNK:
- xfs_idestroy_fork(ip, XFS_DATA_FORK);
+ xfs_idestroy_fork(&ip->i_df);
break;
}
- if (ip->i_afp)
- xfs_idestroy_fork(ip, XFS_ATTR_FORK);
- if (ip->i_cowfp)
- xfs_idestroy_fork(ip, XFS_COW_FORK);
-
+ if (ip->i_afp) {
+ xfs_idestroy_fork(ip->i_afp);
+ kmem_cache_free(xfs_ifork_zone, ip->i_afp);
+ }
+ if (ip->i_cowfp) {
+ xfs_idestroy_fork(ip->i_cowfp);
+ kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
+ }
if (ip->i_itemp) {
ASSERT(!test_bit(XFS_LI_IN_AIL,
&ip->i_itemp->ili_item.li_flags));
@@ -423,6 +425,7 @@ xfs_iget_cache_hit(
spin_unlock(&ip->i_flags_lock);
rcu_read_unlock();
+ ASSERT(!rwsem_is_locked(&inode->i_rwsem));
error = xfs_reinit_inode(mp, inode);
if (error) {
bool wake;
@@ -456,9 +459,6 @@ xfs_iget_cache_hit(
ip->i_sick = 0;
ip->i_checked = 0;
- ASSERT(!rwsem_is_locked(&inode->i_rwsem));
- init_rwsem(&inode->i_rwsem);
-
spin_unlock(&ip->i_flags_lock);
spin_unlock(&pag->pag_ici_lock);
} else {
@@ -479,7 +479,7 @@ xfs_iget_cache_hit(
xfs_ilock(ip, lock_flags);
if (!(flags & XFS_IGET_INCORE))
- xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
+ xfs_iflags_clear(ip, XFS_ISTALE);
XFS_STATS_INC(mp, xs_ig_found);
return 0;
@@ -510,18 +510,42 @@ xfs_iget_cache_miss(
if (!ip)
return -ENOMEM;
- error = xfs_iread(mp, tp, ip, flags);
+ error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
if (error)
goto out_destroy;
- if (!xfs_inode_verify_forks(ip)) {
- error = -EFSCORRUPTED;
- goto out_destroy;
+ /*
+ * For version 5 superblocks, if we are initialising a new inode and we
+ * are not utilising the XFS_MOUNT_IKEEP inode cluster mode, we can
+ * simply build the new inode core with a random generation number.
+ *
+ * For version 4 (and older) superblocks, log recovery is dependent on
+ * the di_flushiter field being initialised from the current on-disk
+ * value and hence we must also read the inode off disk even when
+ * initializing new inodes.
+ */
+ if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
+ (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) {
+ VFS_I(ip)->i_generation = prandom_u32();
+ } else {
+ struct xfs_dinode *dip;
+ struct xfs_buf *bp;
+
+ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0);
+ if (error)
+ goto out_destroy;
+
+ error = xfs_inode_from_disk(ip, dip);
+ if (!error)
+ xfs_buf_set_ref(bp, XFS_INO_REF);
+ xfs_trans_brelse(tp, bp);
+
+ if (error)
+ goto out_destroy;
}
trace_xfs_iget_miss(ip);
-
/*
* Check the inode free state is valid. This also detects lookup
* racing with unlinks.
@@ -561,7 +585,7 @@ xfs_iget_cache_miss(
*/
iflags = XFS_INEW;
if (flags & XFS_IGET_DONTCACHE)
- iflags |= XFS_IDONTCACHE;
+ d_mark_dontcache(VFS_I(ip));
ip->i_udquot = NULL;
ip->i_gdquot = NULL;
ip->i_pdquot = NULL;
@@ -737,13 +761,18 @@ xfs_icache_inode_is_allocated(
*/
#define XFS_LOOKUP_BATCH 32
-STATIC int
-xfs_inode_ag_walk_grab(
+/*
+ * Decide if the given @ip is eligible to be a part of the inode walk, and
+ * grab it if so. Returns true if it's ready to go or false if we should just
+ * ignore it.
+ */
+STATIC bool
+xfs_inode_walk_ag_grab(
struct xfs_inode *ip,
int flags)
{
struct inode *inode = VFS_I(ip);
- bool newinos = !!(flags & XFS_AGITER_INEW_WAIT);
+ bool newinos = !!(flags & XFS_INODE_WALK_INEW_WAIT);
ASSERT(rcu_read_lock_held());
@@ -768,39 +797,41 @@ xfs_inode_ag_walk_grab(
/* nothing to sync during shutdown */
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
- return -EFSCORRUPTED;
+ return false;
/* If we can't grab the inode, it must on it's way to reclaim. */
if (!igrab(inode))
- return -ENOENT;
+ return false;
/* inode is valid */
- return 0;
+ return true;
out_unlock_noent:
spin_unlock(&ip->i_flags_lock);
- return -ENOENT;
+ return false;
}
+/*
+ * For a given per-AG structure @pag, grab, @execute, and rele all incore
+ * inodes with the given radix tree @tag.
+ */
STATIC int
-xfs_inode_ag_walk(
- struct xfs_mount *mp,
+xfs_inode_walk_ag(
struct xfs_perag *pag,
- int (*execute)(struct xfs_inode *ip, int flags,
- void *args),
- int flags,
+ int iter_flags,
+ int (*execute)(struct xfs_inode *ip, void *args),
void *args,
- int tag,
- int iter_flags)
+ int tag)
{
+ struct xfs_mount *mp = pag->pag_mount;
uint32_t first_index;
int last_error = 0;
int skipped;
- int done;
+ bool done;
int nr_found;
restart:
- done = 0;
+ done = false;
skipped = 0;
first_index = 0;
nr_found = 0;
@@ -811,7 +842,7 @@ restart:
rcu_read_lock();
- if (tag == -1)
+ if (tag == XFS_ICI_NO_TAG)
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
(void **)batch, first_index,
XFS_LOOKUP_BATCH);
@@ -833,7 +864,7 @@ restart:
for (i = 0; i < nr_found; i++) {
struct xfs_inode *ip = batch[i];
- if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
+ if (done || !xfs_inode_walk_ag_grab(ip, iter_flags))
batch[i] = NULL;
/*
@@ -852,7 +883,7 @@ restart:
continue;
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
- done = 1;
+ done = true;
}
/* unlock now we've grabbed the inodes. */
@@ -861,10 +892,10 @@ restart:
for (i = 0; i < nr_found; i++) {
if (!batch[i])
continue;
- if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
+ if ((iter_flags & XFS_INODE_WALK_INEW_WAIT) &&
xfs_iflags_test(batch[i], XFS_INEW))
xfs_inew_wait(batch[i]);
- error = execute(batch[i], flags, args);
+ error = execute(batch[i], args);
xfs_irele(batch[i]);
if (error == -EAGAIN) {
skipped++;
@@ -889,6 +920,49 @@ restart:
return last_error;
}
+/* Fetch the next (possibly tagged) per-AG structure. */
+static inline struct xfs_perag *
+xfs_inode_walk_get_perag(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ int tag)
+{
+ if (tag == XFS_ICI_NO_TAG)
+ return xfs_perag_get(mp, agno);
+ return xfs_perag_get_tag(mp, agno, tag);
+}
+
+/*
+ * Call the @execute function on all incore inodes matching the radix tree
+ * @tag.
+ */
+int
+xfs_inode_walk(
+ struct xfs_mount *mp,
+ int iter_flags,
+ int (*execute)(struct xfs_inode *ip, void *args),
+ void *args,
+ int tag)
+{
+ struct xfs_perag *pag;
+ int error = 0;
+ int last_error = 0;
+ xfs_agnumber_t ag;
+
+ ag = 0;
+ while ((pag = xfs_inode_walk_get_perag(mp, ag, tag))) {
+ ag = pag->pag_agno + 1;
+ error = xfs_inode_walk_ag(pag, iter_flags, execute, args, tag);
+ xfs_perag_put(pag);
+ if (error) {
+ last_error = error;
+ if (error == -EFSCORRUPTED)
+ break;
+ }
+ }
+ return last_error;
+}
+
/*
* Background scanning to trim post-EOF preallocated space. This is queued
* based on the 'speculative_prealloc_lifetime' tunable (5m by default).
@@ -952,75 +1026,6 @@ xfs_cowblocks_worker(
xfs_queue_cowblocks(mp);
}
-int
-xfs_inode_ag_iterator_flags(
- struct xfs_mount *mp,
- int (*execute)(struct xfs_inode *ip, int flags,
- void *args),
- int flags,
- void *args,
- int iter_flags)
-{
- struct xfs_perag *pag;
- int error = 0;
- int last_error = 0;
- xfs_agnumber_t ag;
-
- ag = 0;
- while ((pag = xfs_perag_get(mp, ag))) {
- ag = pag->pag_agno + 1;
- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
- iter_flags);
- xfs_perag_put(pag);
- if (error) {
- last_error = error;
- if (error == -EFSCORRUPTED)
- break;
- }
- }
- return last_error;
-}
-
-int
-xfs_inode_ag_iterator(
- struct xfs_mount *mp,
- int (*execute)(struct xfs_inode *ip, int flags,
- void *args),
- int flags,
- void *args)
-{
- return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
-}
-
-int
-xfs_inode_ag_iterator_tag(
- struct xfs_mount *mp,
- int (*execute)(struct xfs_inode *ip, int flags,
- void *args),
- int flags,
- void *args,
- int tag)
-{
- struct xfs_perag *pag;
- int error = 0;
- int last_error = 0;
- xfs_agnumber_t ag;
-
- ag = 0;
- while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
- ag = pag->pag_agno + 1;
- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
- 0);
- xfs_perag_put(pag);
- if (error) {
- last_error = error;
- if (error == -EFSCORRUPTED)
- break;
- }
- }
- return last_error;
-}
-
/*
* Grab the inode for reclaim exclusively.
* Return 0 if we grabbed it, non-zero otherwise.
@@ -1128,7 +1133,7 @@ restart:
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
xfs_iunpin_wait(ip);
/* xfs_iflush_abort() drops the flush lock */
- xfs_iflush_abort(ip, false);
+ xfs_iflush_abort(ip);
goto reclaim;
}
if (xfs_ipincount(ip)) {
@@ -1419,59 +1424,90 @@ xfs_reclaim_inodes_count(
return reclaimable;
}
-STATIC int
+STATIC bool
xfs_inode_match_id(
struct xfs_inode *ip,
struct xfs_eofblocks *eofb)
{
if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
!uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
- return 0;
+ return false;
if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
!gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
- return 0;
+ return false;
if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
ip->i_d.di_projid != eofb->eof_prid)
- return 0;
+ return false;
- return 1;
+ return true;
}
/*
* A union-based inode filtering algorithm. Process the inode if any of the
* criteria match. This is for global/internal scans only.
*/
-STATIC int
+STATIC bool
xfs_inode_match_id_union(
struct xfs_inode *ip,
struct xfs_eofblocks *eofb)
{
if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
- return 1;
+ return true;
if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
- return 1;
+ return true;
if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
ip->i_d.di_projid == eofb->eof_prid)
- return 1;
+ return true;
- return 0;
+ return false;
+}
+
+/*
+ * Is this inode @ip eligible for eof/cow block reclamation, given some
+ * filtering parameters @eofb? The inode is eligible if @eofb is null or
+ * if the predicate functions match.
+ */
+static bool
+xfs_inode_matches_eofb(
+ struct xfs_inode *ip,
+ struct xfs_eofblocks *eofb)
+{
+ bool match;
+
+ if (!eofb)
+ return true;
+
+ if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
+ match = xfs_inode_match_id_union(ip, eofb);
+ else
+ match = xfs_inode_match_id(ip, eofb);
+ if (!match)
+ return false;
+
+ /* skip the inode if the file size is too small */
+ if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) &&
+ XFS_ISIZE(ip) < eofb->eof_min_file_size)
+ return false;
+
+ return true;
}
STATIC int
xfs_inode_free_eofblocks(
struct xfs_inode *ip,
- int flags,
void *args)
{
- int ret = 0;
- struct xfs_eofblocks *eofb = args;
- int match;
+ struct xfs_eofblocks *eofb = args;
+ bool wait;
+ int ret;
+
+ wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
if (!xfs_can_free_eofblocks(ip, false)) {
/* inode could be preallocated or append-only */
@@ -1484,62 +1520,34 @@ xfs_inode_free_eofblocks(
* If the mapping is dirty the operation can block and wait for some
* time. Unless we are waiting, skip it.
*/
- if (!(flags & SYNC_WAIT) &&
- mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
+ if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
return 0;
- if (eofb) {
- if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
- match = xfs_inode_match_id_union(ip, eofb);
- else
- match = xfs_inode_match_id(ip, eofb);
- if (!match)
- return 0;
-
- /* skip the inode if the file size is too small */
- if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
- XFS_ISIZE(ip) < eofb->eof_min_file_size)
- return 0;
- }
+ if (!xfs_inode_matches_eofb(ip, eofb))
+ return 0;
/*
* If the caller is waiting, return -EAGAIN to keep the background
* scanner moving and revisit the inode in a subsequent pass.
*/
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
- if (flags & SYNC_WAIT)
- ret = -EAGAIN;
- return ret;
+ if (wait)
+ return -EAGAIN;
+ return 0;
}
+
ret = xfs_free_eofblocks(ip);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return ret;
}
-static int
-__xfs_icache_free_eofblocks(
- struct xfs_mount *mp,
- struct xfs_eofblocks *eofb,
- int (*execute)(struct xfs_inode *ip, int flags,
- void *args),
- int tag)
-{
- int flags = SYNC_TRYLOCK;
-
- if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
- flags = SYNC_WAIT;
-
- return xfs_inode_ag_iterator_tag(mp, execute, flags,
- eofb, tag);
-}
-
int
xfs_icache_free_eofblocks(
struct xfs_mount *mp,
struct xfs_eofblocks *eofb)
{
- return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks,
+ return xfs_inode_walk(mp, 0, xfs_inode_free_eofblocks, eofb,
XFS_ICI_EOFBLOCKS_TAG);
}
@@ -1756,29 +1764,16 @@ xfs_prep_free_cowblocks(
STATIC int
xfs_inode_free_cowblocks(
struct xfs_inode *ip,
- int flags,
void *args)
{
struct xfs_eofblocks *eofb = args;
- int match;
int ret = 0;
if (!xfs_prep_free_cowblocks(ip))
return 0;
- if (eofb) {
- if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
- match = xfs_inode_match_id_union(ip, eofb);
- else
- match = xfs_inode_match_id(ip, eofb);
- if (!match)
- return 0;
-
- /* skip the inode if the file size is too small */
- if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
- XFS_ISIZE(ip) < eofb->eof_min_file_size)
- return 0;
- }
+ if (!xfs_inode_matches_eofb(ip, eofb))
+ return 0;
/* Free the CoW blocks */
xfs_ilock(ip, XFS_IOLOCK_EXCL);
@@ -1802,7 +1797,7 @@ xfs_icache_free_cowblocks(
struct xfs_mount *mp,
struct xfs_eofblocks *eofb)
{
- return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks,
+ return xfs_inode_walk(mp, 0, xfs_inode_free_cowblocks, eofb,
XFS_ICI_COWBLOCKS_TAG);
}
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 48f1fd2bb6ad..93b54e7d55f0 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -24,7 +24,7 @@ struct xfs_eofblocks {
* tags for inode radix tree
*/
#define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup
- in xfs_inode_ag_iterator */
+ in xfs_inode_walk */
#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */
#define XFS_ICI_EOFBLOCKS_TAG 1 /* inode has blocks beyond EOF */
#define XFS_ICI_COWBLOCKS_TAG 2 /* inode can have cow blocks to gc */
@@ -40,7 +40,7 @@ struct xfs_eofblocks {
/*
* flags for AG inode iterator
*/
-#define XFS_AGITER_INEW_WAIT 0x1 /* wait on new inodes */
+#define XFS_INODE_WALK_INEW_WAIT 0x1 /* wait on new inodes */
int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino,
uint flags, uint lock_flags, xfs_inode_t **ipp);
@@ -71,50 +71,9 @@ int xfs_inode_free_quota_cowblocks(struct xfs_inode *ip);
void xfs_cowblocks_worker(struct work_struct *);
void xfs_queue_cowblocks(struct xfs_mount *);
-int xfs_inode_ag_iterator(struct xfs_mount *mp,
- int (*execute)(struct xfs_inode *ip, int flags, void *args),
- int flags, void *args);
-int xfs_inode_ag_iterator_flags(struct xfs_mount *mp,
- int (*execute)(struct xfs_inode *ip, int flags, void *args),
- int flags, void *args, int iter_flags);
-int xfs_inode_ag_iterator_tag(struct xfs_mount *mp,
- int (*execute)(struct xfs_inode *ip, int flags, void *args),
- int flags, void *args, int tag);
-
-static inline int
-xfs_fs_eofblocks_from_user(
- struct xfs_fs_eofblocks *src,
- struct xfs_eofblocks *dst)
-{
- if (src->eof_version != XFS_EOFBLOCKS_VERSION)
- return -EINVAL;
-
- if (src->eof_flags & ~XFS_EOF_FLAGS_VALID)
- return -EINVAL;
-
- if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) ||
- memchr_inv(src->pad64, 0, sizeof(src->pad64)))
- return -EINVAL;
-
- dst->eof_flags = src->eof_flags;
- dst->eof_prid = src->eof_prid;
- dst->eof_min_file_size = src->eof_min_file_size;
-
- dst->eof_uid = INVALID_UID;
- if (src->eof_flags & XFS_EOF_FLAGS_UID) {
- dst->eof_uid = make_kuid(current_user_ns(), src->eof_uid);
- if (!uid_valid(dst->eof_uid))
- return -EINVAL;
- }
-
- dst->eof_gid = INVALID_GID;
- if (src->eof_flags & XFS_EOF_FLAGS_GID) {
- dst->eof_gid = make_kgid(current_user_ns(), src->eof_gid);
- if (!gid_valid(dst->eof_gid))
- return -EINVAL;
- }
- return 0;
-}
+int xfs_inode_walk(struct xfs_mount *mp, int iter_flags,
+ int (*execute)(struct xfs_inode *ip, void *args),
+ void *args, int tag);
int xfs_icache_inode_is_allocated(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_ino_t ino, bool *inuse);
diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c
index 490fee22b878..287a9e5c7d75 100644
--- a/fs/xfs/xfs_icreate_item.c
+++ b/fs/xfs/xfs_icreate_item.c
@@ -6,11 +6,19 @@
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
+#include "xfs_format.h"
#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_icreate_item.h"
#include "xfs_log.h"
+#include "xfs_log_priv.h"
+#include "xfs_log_recover.h"
+#include "xfs_ialloc.h"
+#include "xfs_trace.h"
kmem_zone_t *xfs_icreate_zone; /* inode create item zone */
@@ -107,3 +115,147 @@ xfs_icreate_log(
tp->t_flags |= XFS_TRANS_DIRTY;
set_bit(XFS_LI_DIRTY, &icp->ic_item.li_flags);
}
+
+static enum xlog_recover_reorder
+xlog_recover_icreate_reorder(
+ struct xlog_recover_item *item)
+{
+ /*
+ * Inode allocation buffers must be replayed before subsequent inode
+ * items try to modify those buffers. ICREATE items are the logical
+ * equivalent of logging a newly initialized inode buffer, so recover
+ * these at the same time that we recover logged buffers.
+ */
+ return XLOG_REORDER_BUFFER_LIST;
+}
+
+/*
+ * This routine is called when an inode create format structure is found in a
+ * committed transaction in the log. It's purpose is to initialise the inodes
+ * being allocated on disk. This requires us to get inode cluster buffers that
+ * match the range to be initialised, stamped with inode templates and written
+ * by delayed write so that subsequent modifications will hit the cached buffer
+ * and only need writing out at the end of recovery.
+ */
+STATIC int
+xlog_recover_icreate_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_icreate_log *icl;
+ struct xfs_ino_geometry *igeo = M_IGEO(mp);
+ xfs_agnumber_t agno;
+ xfs_agblock_t agbno;
+ unsigned int count;
+ unsigned int isize;
+ xfs_agblock_t length;
+ int bb_per_cluster;
+ int cancel_count;
+ int nbufs;
+ int i;
+
+ icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
+ if (icl->icl_type != XFS_LI_ICREATE) {
+ xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
+ return -EINVAL;
+ }
+
+ if (icl->icl_size != 1) {
+ xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
+ return -EINVAL;
+ }
+
+ agno = be32_to_cpu(icl->icl_ag);
+ if (agno >= mp->m_sb.sb_agcount) {
+ xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
+ return -EINVAL;
+ }
+ agbno = be32_to_cpu(icl->icl_agbno);
+ if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
+ xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
+ return -EINVAL;
+ }
+ isize = be32_to_cpu(icl->icl_isize);
+ if (isize != mp->m_sb.sb_inodesize) {
+ xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
+ return -EINVAL;
+ }
+ count = be32_to_cpu(icl->icl_count);
+ if (!count) {
+ xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
+ return -EINVAL;
+ }
+ length = be32_to_cpu(icl->icl_length);
+ if (!length || length >= mp->m_sb.sb_agblocks) {
+ xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
+ return -EINVAL;
+ }
+
+ /*
+ * The inode chunk is either full or sparse and we only support
+ * m_ino_geo.ialloc_min_blks sized sparse allocations at this time.
+ */
+ if (length != igeo->ialloc_blks &&
+ length != igeo->ialloc_min_blks) {
+ xfs_warn(log->l_mp,
+ "%s: unsupported chunk length", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /* verify inode count is consistent with extent length */
+ if ((count >> mp->m_sb.sb_inopblog) != length) {
+ xfs_warn(log->l_mp,
+ "%s: inconsistent inode count and chunk length",
+ __FUNCTION__);
+ return -EINVAL;
+ }
+
+ /*
+ * The icreate transaction can cover multiple cluster buffers and these
+ * buffers could have been freed and reused. Check the individual
+ * buffers for cancellation so we don't overwrite anything written after
+ * a cancellation.
+ */
+ bb_per_cluster = XFS_FSB_TO_BB(mp, igeo->blocks_per_cluster);
+ nbufs = length / igeo->blocks_per_cluster;
+ for (i = 0, cancel_count = 0; i < nbufs; i++) {
+ xfs_daddr_t daddr;
+
+ daddr = XFS_AGB_TO_DADDR(mp, agno,
+ agbno + i * igeo->blocks_per_cluster);
+ if (xlog_is_buffer_cancelled(log, daddr, bb_per_cluster))
+ cancel_count++;
+ }
+
+ /*
+ * We currently only use icreate for a single allocation at a time. This
+ * means we should expect either all or none of the buffers to be
+ * cancelled. Be conservative and skip replay if at least one buffer is
+ * cancelled, but warn the user that something is awry if the buffers
+ * are not consistent.
+ *
+ * XXX: This must be refined to only skip cancelled clusters once we use
+ * icreate for multiple chunk allocations.
+ */
+ ASSERT(!cancel_count || cancel_count == nbufs);
+ if (cancel_count) {
+ if (cancel_count != nbufs)
+ xfs_warn(mp,
+ "WARNING: partial inode chunk cancellation, skipped icreate.");
+ trace_xfs_log_recover_icreate_cancel(log, icl);
+ return 0;
+ }
+
+ trace_xfs_log_recover_icreate_recover(log, icl);
+ return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
+ length, be32_to_cpu(icl->icl_gen));
+}
+
+const struct xlog_recover_item_ops xlog_icreate_item_ops = {
+ .item_type = XFS_LI_ICREATE,
+ .reorder = xlog_recover_icreate_reorder,
+ .commit_pass2 = xlog_recover_icreate_commit_pass2,
+};
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index d1772786af29..4c91fb25ec66 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -112,7 +112,7 @@ xfs_ilock_data_map_shared(
{
uint lock_mode = XFS_ILOCK_SHARED;
- if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
+ if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE &&
(ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
lock_mode = XFS_ILOCK_EXCL;
xfs_ilock(ip, lock_mode);
@@ -125,7 +125,8 @@ xfs_ilock_attr_map_shared(
{
uint lock_mode = XFS_ILOCK_SHARED;
- if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
+ if (ip->i_afp &&
+ ip->i_afp->if_format == XFS_DINODE_FMT_BTREE &&
(ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
lock_mode = XFS_ILOCK_EXCL;
xfs_ilock(ip, lock_mode);
@@ -144,17 +145,17 @@ xfs_ilock_attr_map_shared(
*
* i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
*
- * mmap_sem locking order:
+ * mmap_lock locking order:
*
- * i_rwsem -> page lock -> mmap_sem
- * mmap_sem -> i_mmap_lock -> page_lock
+ * i_rwsem -> page lock -> mmap_lock
+ * mmap_lock -> i_mmap_lock -> page_lock
*
- * The difference in mmap_sem locking order mean that we cannot hold the
+ * The difference in mmap_lock locking order mean that we cannot hold the
* i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
- * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
+ * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
* in get_user_pages() to map the user pages into the kernel address space for
* direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
- * page faults already hold the mmap_sem.
+ * page faults already hold the mmap_lock.
*
* Hence to serialise fully against both syscall and mmap based IO, we need to
* take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
@@ -825,7 +826,7 @@ xfs_ialloc(
inode->i_mode &= ~S_ISGID;
ip->i_d.di_size = 0;
- ip->i_d.di_nextents = 0;
+ ip->i_df.if_nextents = 0;
ASSERT(ip->i_d.di_nblocks == 0);
tv = current_time(inode);
@@ -851,7 +852,7 @@ xfs_ialloc(
case S_IFCHR:
case S_IFBLK:
case S_IFSOCK:
- ip->i_d.di_format = XFS_DINODE_FMT_DEV;
+ ip->i_df.if_format = XFS_DINODE_FMT_DEV;
ip->i_df.if_flags = 0;
flags |= XFS_ILOG_DEV;
break;
@@ -907,7 +908,7 @@ xfs_ialloc(
}
/* FALLTHROUGH */
case S_IFLNK:
- ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
+ ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
ip->i_df.if_flags = XFS_IFEXTENTS;
ip->i_df.if_bytes = 0;
ip->i_df.if_u1.if_root = NULL;
@@ -915,11 +916,6 @@ xfs_ialloc(
default:
ASSERT(0);
}
- /*
- * Attribute fork settings for new inode.
- */
- ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
- ip->i_d.di_anextents = 0;
/*
* Log the new values stuffed into the inode.
@@ -1634,7 +1630,7 @@ xfs_release(
return 0;
/*
* If we can't get the iolock just skip truncating the blocks
- * past EOF because we could deadlock with the mmap_sem
+ * past EOF because we could deadlock with the mmap_lock
* otherwise. We'll get another chance to drop them once the
* last reference to the inode is dropped, so we'll never leak
* blocks permanently.
@@ -1686,7 +1682,7 @@ xfs_inactive_truncate(
if (error)
goto error_trans_cancel;
- ASSERT(ip->i_d.di_nextents == 0);
+ ASSERT(ip->i_df.if_nextents == 0);
error = xfs_trans_commit(tp);
if (error)
@@ -1836,7 +1832,7 @@ xfs_inactive(
if (S_ISREG(VFS_I(ip)->i_mode) &&
(ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
- ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
+ ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
truncate = 1;
error = xfs_qm_dqattach(ip);
@@ -1862,7 +1858,6 @@ xfs_inactive(
}
ASSERT(!ip->i_afp);
- ASSERT(ip->i_d.di_anextents == 0);
ASSERT(ip->i_d.di_forkoff == 0);
/*
@@ -2172,7 +2167,7 @@ xfs_iunlink_update_inode(
ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
- error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0, 0);
+ error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0);
if (error)
return error;
@@ -2302,7 +2297,7 @@ xfs_iunlink_map_ino(
return error;
}
- error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0, 0);
+ error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0);
if (error) {
xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
__func__, error);
@@ -2602,7 +2597,7 @@ xfs_ifree_cluster(
xfs_daddr_t blkno;
xfs_buf_t *bp;
xfs_inode_t *ip;
- xfs_inode_log_item_t *iip;
+ struct xfs_inode_log_item *iip;
struct xfs_log_item *lip;
struct xfs_perag *pag;
struct xfs_ino_geometry *igeo = M_IGEO(mp);
@@ -2662,7 +2657,7 @@ xfs_ifree_cluster(
*/
list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
if (lip->li_type == XFS_LI_INODE) {
- iip = (xfs_inode_log_item_t *)lip;
+ iip = (struct xfs_inode_log_item *)lip;
ASSERT(iip->ili_logged == 1);
lip->li_cb = xfs_istale_done;
xfs_trans_ail_copy_lsn(mp->m_ail,
@@ -2712,24 +2707,6 @@ xfs_ifree_cluster(
}
/*
- * Free any local-format buffers sitting around before we reset to
- * extents format.
- */
-static inline void
-xfs_ifree_local_data(
- struct xfs_inode *ip,
- int whichfork)
-{
- struct xfs_ifork *ifp;
-
- if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
- return;
-
- ifp = XFS_IFORK_PTR(ip, whichfork);
- xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
-}
-
-/*
* This is called to return an inode to the inode free list.
* The inode should already be truncated to 0 length and have
* no pages associated with it. This routine also assumes that
@@ -2749,8 +2726,7 @@ xfs_ifree(
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(VFS_I(ip)->i_nlink == 0);
- ASSERT(ip->i_d.di_nextents == 0);
- ASSERT(ip->i_d.di_anextents == 0);
+ ASSERT(ip->i_df.if_nextents == 0);
ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
ASSERT(ip->i_d.di_nblocks == 0);
@@ -2765,16 +2741,23 @@ xfs_ifree(
if (error)
return error;
- xfs_ifree_local_data(ip, XFS_DATA_FORK);
- xfs_ifree_local_data(ip, XFS_ATTR_FORK);
+ /*
+ * Free any local-format data sitting around before we reset the
+ * data fork to extents format. Note that the attr fork data has
+ * already been freed by xfs_attr_inactive.
+ */
+ if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
+ kmem_free(ip->i_df.if_u1.if_data);
+ ip->i_df.if_u1.if_data = NULL;
+ ip->i_df.if_bytes = 0;
+ }
VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
ip->i_d.di_flags = 0;
ip->i_d.di_flags2 = 0;
ip->i_d.di_dmevmask = 0;
ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
- ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
- ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
+ ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
/* Don't attempt to replay owner changes for a deleted inode */
ip->i_itemp->ili_fields &= ~(XFS_ILOG_AOWNER|XFS_ILOG_DOWNER);
@@ -3496,6 +3479,7 @@ xfs_iflush_cluster(
struct xfs_inode **cilist;
struct xfs_inode *cip;
struct xfs_ino_geometry *igeo = M_IGEO(mp);
+ int error = 0;
int nr_found;
int clcount = 0;
int i;
@@ -3588,11 +3572,10 @@ xfs_iflush_cluster(
* re-check that it's dirty before flushing.
*/
if (!xfs_inode_clean(cip)) {
- int error;
error = xfs_iflush_int(cip, bp);
if (error) {
xfs_iunlock(cip, XFS_ILOCK_SHARED);
- goto cluster_corrupt_out;
+ goto out_free;
}
clcount++;
} else {
@@ -3611,37 +3594,7 @@ out_free:
kmem_free(cilist);
out_put:
xfs_perag_put(pag);
- return 0;
-
-
-cluster_corrupt_out:
- /*
- * Corruption detected in the clustering loop. Invalidate the
- * inode buffer and shut down the filesystem.
- */
- rcu_read_unlock();
-
- /*
- * We'll always have an inode attached to the buffer for completion
- * process by the time we are called from xfs_iflush(). Hence we have
- * always need to do IO completion processing to abort the inodes
- * attached to the buffer. handle them just like the shutdown case in
- * xfs_buf_submit().
- */
- ASSERT(bp->b_iodone);
- bp->b_flags |= XBF_ASYNC;
- bp->b_flags &= ~XBF_DONE;
- xfs_buf_stale(bp);
- xfs_buf_ioerror(bp, -EIO);
- xfs_buf_ioend(bp);
-
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
-
- /* abort the corrupt inode, as it was not attached to the buffer */
- xfs_iflush_abort(cip, false);
- kmem_free(cilist);
- xfs_perag_put(pag);
- return -EFSCORRUPTED;
+ return error;
}
/*
@@ -3667,8 +3620,8 @@ xfs_iflush(
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
ASSERT(xfs_isiflocked(ip));
- ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
- ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
+ ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
+ ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
*bpp = NULL;
@@ -3688,42 +3641,20 @@ xfs_iflush(
}
/*
- * This may have been unpinned because the filesystem is shutting
- * down forcibly. If that's the case we must not write this inode
- * to disk, because the log record didn't make it to disk.
- *
- * We also have to remove the log item from the AIL in this case,
- * as we wait for an empty AIL as part of the unmount process.
- */
- if (XFS_FORCED_SHUTDOWN(mp)) {
- error = -EIO;
- goto abort_out;
- }
-
- /*
* Get the buffer containing the on-disk inode. We are doing a try-lock
- * operation here, so we may get an EAGAIN error. In that case, we
- * simply want to return with the inode still dirty.
+ * operation here, so we may get an EAGAIN error. In that case, return
+ * leaving the inode dirty.
*
* If we get any other error, we effectively have a corruption situation
- * and we cannot flush the inode, so we treat it the same as failing
- * xfs_iflush_int().
+ * and we cannot flush the inode. Abort the flush and shut down.
*/
- error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
- 0);
+ error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK);
if (error == -EAGAIN) {
xfs_ifunlock(ip);
return error;
}
if (error)
- goto corrupt_out;
-
- /*
- * First flush out the inode that xfs_iflush was called with.
- */
- error = xfs_iflush_int(ip, bp);
- if (error)
- goto corrupt_out;
+ goto abort;
/*
* If the buffer is pinned then push on the log now so we won't
@@ -3733,61 +3664,32 @@ xfs_iflush(
xfs_log_force(mp, 0);
/*
- * inode clustering: try to gather other inodes into this write
+ * Flush the provided inode then attempt to gather others from the
+ * cluster into the write.
*
- * Note: Any error during clustering will result in the filesystem
- * being shut down and completion callbacks run on the cluster buffer.
- * As we have already flushed and attached this inode to the buffer,
- * it has already been aborted and released by xfs_iflush_cluster() and
- * so we have no further error handling to do here.
+ * Note: Once we attempt to flush an inode, we must run buffer
+ * completion callbacks on any failure. If this fails, simulate an I/O
+ * failure on the buffer and shut down.
*/
- error = xfs_iflush_cluster(ip, bp);
- if (error)
- return error;
+ error = xfs_iflush_int(ip, bp);
+ if (!error)
+ error = xfs_iflush_cluster(ip, bp);
+ if (error) {
+ bp->b_flags |= XBF_ASYNC;
+ xfs_buf_ioend_fail(bp);
+ goto shutdown;
+ }
*bpp = bp;
return 0;
-corrupt_out:
- if (bp)
- xfs_buf_relse(bp);
+abort:
+ xfs_iflush_abort(ip);
+shutdown:
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
-abort_out:
- /* abort the corrupt inode, as it was not attached to the buffer */
- xfs_iflush_abort(ip, false);
return error;
}
-/*
- * If there are inline format data / attr forks attached to this inode,
- * make sure they're not corrupt.
- */
-bool
-xfs_inode_verify_forks(
- struct xfs_inode *ip)
-{
- struct xfs_ifork *ifp;
- xfs_failaddr_t fa;
-
- fa = xfs_ifork_verify_data(ip, &xfs_default_ifork_ops);
- if (fa) {
- ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
- xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork",
- ifp->if_u1.if_data, ifp->if_bytes, fa);
- return false;
- }
-
- fa = xfs_ifork_verify_attr(ip, &xfs_default_ifork_ops);
- if (fa) {
- ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
- xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork",
- ifp ? ifp->if_u1.if_data : NULL,
- ifp ? ifp->if_bytes : 0, fa);
- return false;
- }
- return true;
-}
-
STATIC int
xfs_iflush_int(
struct xfs_inode *ip,
@@ -3796,61 +3698,68 @@ xfs_iflush_int(
struct xfs_inode_log_item *iip = ip->i_itemp;
struct xfs_dinode *dip;
struct xfs_mount *mp = ip->i_mount;
+ int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
ASSERT(xfs_isiflocked(ip));
- ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
- ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
+ ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
+ ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
ASSERT(iip != NULL && iip->ili_fields != 0);
- /* set *dip = inode's place in the buffer */
dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
+ /*
+ * We don't flush the inode if any of the following checks fail, but we
+ * do still update the log item and attach to the backing buffer as if
+ * the flush happened. This is a formality to facilitate predictable
+ * error handling as the caller will shutdown and fail the buffer.
+ */
+ error = -EFSCORRUPTED;
if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
mp, XFS_ERRTAG_IFLUSH_1)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
"%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
- goto corrupt_out;
+ goto flush_out;
}
if (S_ISREG(VFS_I(ip)->i_mode)) {
if (XFS_TEST_ERROR(
- (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
- (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
+ ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
+ ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
mp, XFS_ERRTAG_IFLUSH_3)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
"%s: Bad regular inode %Lu, ptr "PTR_FMT,
__func__, ip->i_ino, ip);
- goto corrupt_out;
+ goto flush_out;
}
} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
if (XFS_TEST_ERROR(
- (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
- (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
- (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
+ ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
+ ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
+ ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
mp, XFS_ERRTAG_IFLUSH_4)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
"%s: Bad directory inode %Lu, ptr "PTR_FMT,
__func__, ip->i_ino, ip);
- goto corrupt_out;
+ goto flush_out;
}
}
- if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
+ if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
"%s: detected corrupt incore inode %Lu, "
"total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
__func__, ip->i_ino,
- ip->i_d.di_nextents + ip->i_d.di_anextents,
+ ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
ip->i_d.di_nblocks, ip);
- goto corrupt_out;
+ goto flush_out;
}
if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
mp, XFS_ERRTAG_IFLUSH_6)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
"%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
- goto corrupt_out;
+ goto flush_out;
}
/*
@@ -3865,9 +3774,16 @@ xfs_iflush_int(
if (!xfs_sb_version_has_v3inode(&mp->m_sb))
ip->i_d.di_flushiter++;
- /* Check the inline fork data before we write out. */
- if (!xfs_inode_verify_forks(ip))
- goto corrupt_out;
+ /*
+ * If there are inline format data / attr forks attached to this inode,
+ * make sure they are not corrupt.
+ */
+ if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
+ xfs_ifork_verify_local_data(ip))
+ goto flush_out;
+ if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
+ xfs_ifork_verify_local_attr(ip))
+ goto flush_out;
/*
* Copy the dirty parts of the inode into the on-disk inode. We always
@@ -3910,6 +3826,8 @@ xfs_iflush_int(
* need the AIL lock, because it is a 64 bit value that cannot be read
* atomically.
*/
+ error = 0;
+flush_out:
iip->ili_last_fields = iip->ili_fields;
iip->ili_fields = 0;
iip->ili_fsync_fields = 0;
@@ -3919,10 +3837,10 @@ xfs_iflush_int(
&iip->ili_item.li_lsn);
/*
- * Attach the function xfs_iflush_done to the inode's
- * buffer. This will remove the inode from the AIL
- * and unlock the inode's flush lock when the inode is
- * completely written to disk.
+ * Attach the inode item callback to the buffer whether the flush
+ * succeeded or not. If not, the caller will shut down and fail I/O
+ * completion on the buffer to remove the inode from the AIL and release
+ * the flush lock.
*/
xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
@@ -3931,10 +3849,7 @@ xfs_iflush_int(
ASSERT(!list_empty(&bp->b_li_list));
ASSERT(bp->b_iodone != NULL);
- return 0;
-
-corrupt_out:
- return -EFSCORRUPTED;
+ return error;
}
/* Release an inode. */
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index c6a63f6764a6..47d3b391030d 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -57,9 +57,6 @@ typedef struct xfs_inode {
struct xfs_icdinode i_d; /* most of ondisk inode */
- xfs_extnum_t i_cnextents; /* # of extents in cow fork */
- unsigned int i_cformat; /* format of cow fork */
-
/* VFS inode */
struct inode i_vnode; /* embedded VFS inode */
@@ -218,8 +215,7 @@ static inline bool xfs_inode_has_cow_data(struct xfs_inode *ip)
#define XFS_IFLOCK (1 << __XFS_IFLOCK_BIT)
#define __XFS_IPINNED_BIT 8 /* wakeup key for zero pin count */
#define XFS_IPINNED (1 << __XFS_IPINNED_BIT)
-#define XFS_IDONTCACHE (1 << 9) /* don't cache the inode long term */
-#define XFS_IEOFBLOCKS (1 << 10)/* has the preallocblocks tag set */
+#define XFS_IEOFBLOCKS (1 << 9) /* has the preallocblocks tag set */
/*
* If this unlinked inode is in the middle of recovery, don't let drop_inode
* truncate and free the inode. This can happen if we iget the inode during
@@ -467,6 +463,7 @@ int xfs_break_layouts(struct inode *inode, uint *iolock,
/* from xfs_iops.c */
extern void xfs_setup_inode(struct xfs_inode *ip);
extern void xfs_setup_iops(struct xfs_inode *ip);
+extern void xfs_diflags_to_iflags(struct xfs_inode *ip, bool init);
/*
* When setting up a newly allocated inode, we need to call
@@ -497,8 +494,6 @@ extern struct kmem_zone *xfs_inode_zone;
/* The default CoW extent size hint. */
#define XFS_DEFAULT_COWEXTSZ_HINT 32
-bool xfs_inode_verify_forks(struct xfs_inode *ip);
-
int xfs_iunlink_init(struct xfs_perag *pag);
void xfs_iunlink_destroy(struct xfs_perag *pag);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index f779cca2346f..ba47bf65b772 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -36,10 +36,10 @@ xfs_inode_item_data_fork_size(
{
struct xfs_inode *ip = iip->ili_inode;
- switch (ip->i_d.di_format) {
+ switch (ip->i_df.if_format) {
case XFS_DINODE_FMT_EXTENTS:
if ((iip->ili_fields & XFS_ILOG_DEXT) &&
- ip->i_d.di_nextents > 0 &&
+ ip->i_df.if_nextents > 0 &&
ip->i_df.if_bytes > 0) {
/* worst case, doesn't subtract delalloc extents */
*nbytes += XFS_IFORK_DSIZE(ip);
@@ -77,10 +77,10 @@ xfs_inode_item_attr_fork_size(
{
struct xfs_inode *ip = iip->ili_inode;
- switch (ip->i_d.di_aformat) {
+ switch (ip->i_afp->if_format) {
case XFS_DINODE_FMT_EXTENTS:
if ((iip->ili_fields & XFS_ILOG_AEXT) &&
- ip->i_d.di_anextents > 0 &&
+ ip->i_afp->if_nextents > 0 &&
ip->i_afp->if_bytes > 0) {
/* worst case, doesn't subtract unused space */
*nbytes += XFS_IFORK_ASIZE(ip);
@@ -142,13 +142,13 @@ xfs_inode_item_format_data_fork(
struct xfs_inode *ip = iip->ili_inode;
size_t data_bytes;
- switch (ip->i_d.di_format) {
+ switch (ip->i_df.if_format) {
case XFS_DINODE_FMT_EXTENTS:
iip->ili_fields &=
~(XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEV);
if ((iip->ili_fields & XFS_ILOG_DEXT) &&
- ip->i_d.di_nextents > 0 &&
+ ip->i_df.if_nextents > 0 &&
ip->i_df.if_bytes > 0) {
struct xfs_bmbt_rec *p;
@@ -227,18 +227,18 @@ xfs_inode_item_format_attr_fork(
struct xfs_inode *ip = iip->ili_inode;
size_t data_bytes;
- switch (ip->i_d.di_aformat) {
+ switch (ip->i_afp->if_format) {
case XFS_DINODE_FMT_EXTENTS:
iip->ili_fields &=
~(XFS_ILOG_ADATA | XFS_ILOG_ABROOT);
if ((iip->ili_fields & XFS_ILOG_AEXT) &&
- ip->i_d.di_anextents > 0 &&
+ ip->i_afp->if_nextents > 0 &&
ip->i_afp->if_bytes > 0) {
struct xfs_bmbt_rec *p;
ASSERT(xfs_iext_count(ip->i_afp) ==
- ip->i_d.di_anextents);
+ ip->i_afp->if_nextents);
p = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_IATTR_EXT);
data_bytes = xfs_iextents_copy(ip, p, XFS_ATTR_FORK);
@@ -305,7 +305,7 @@ xfs_inode_to_log_dinode(
struct inode *inode = VFS_I(ip);
to->di_magic = XFS_DINODE_MAGIC;
- to->di_format = from->di_format;
+ to->di_format = xfs_ifork_format(&ip->i_df);
to->di_uid = i_uid_read(inode);
to->di_gid = i_gid_read(inode);
to->di_projid_lo = from->di_projid & 0xffff;
@@ -326,10 +326,10 @@ xfs_inode_to_log_dinode(
to->di_size = from->di_size;
to->di_nblocks = from->di_nblocks;
to->di_extsize = from->di_extsize;
- to->di_nextents = from->di_nextents;
- to->di_anextents = from->di_anextents;
+ to->di_nextents = xfs_ifork_nextents(&ip->i_df);
+ to->di_anextents = xfs_ifork_nextents(ip->i_afp);
to->di_forkoff = from->di_forkoff;
- to->di_aformat = from->di_aformat;
+ to->di_aformat = xfs_ifork_format(ip->i_afp);
to->di_dmevmask = from->di_dmevmask;
to->di_dmstate = from->di_dmstate;
to->di_flags = from->di_flags;
@@ -497,21 +497,6 @@ xfs_inode_item_push(
if (xfs_ipincount(ip) > 0)
return XFS_ITEM_PINNED;
- /*
- * The buffer containing this item failed to be written back
- * previously. Resubmit the buffer for IO.
- */
- if (test_bit(XFS_LI_FAILED, &lip->li_flags)) {
- if (!xfs_buf_trylock(bp))
- return XFS_ITEM_LOCKED;
-
- if (!xfs_buf_resubmit_failed_buffers(bp, buffer_list))
- rval = XFS_ITEM_FLUSHING;
-
- xfs_buf_unlock(bp);
- return rval;
- }
-
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
return XFS_ITEM_LOCKED;
@@ -777,17 +762,12 @@ xfs_iflush_done(
*/
void
xfs_iflush_abort(
- xfs_inode_t *ip,
- bool stale)
+ struct xfs_inode *ip)
{
- xfs_inode_log_item_t *iip = ip->i_itemp;
+ struct xfs_inode_log_item *iip = ip->i_itemp;
if (iip) {
- if (test_bit(XFS_LI_IN_AIL, &iip->ili_item.li_flags)) {
- xfs_trans_ail_remove(&iip->ili_item,
- stale ? SHUTDOWN_LOG_IO_ERROR :
- SHUTDOWN_CORRUPT_INCORE);
- }
+ xfs_trans_ail_delete(&iip->ili_item, 0);
iip->ili_logged = 0;
/*
* Clear the ili_last_fields bits now that we know that the
@@ -812,7 +792,7 @@ xfs_istale_done(
struct xfs_buf *bp,
struct xfs_log_item *lip)
{
- xfs_iflush_abort(INODE_ITEM(lip)->ili_inode, true);
+ xfs_iflush_abort(INODE_ITEM(lip)->ili_inode);
}
/*
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index 07a60e74c39c..60b34bb66e8e 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -13,7 +13,7 @@ struct xfs_bmbt_rec;
struct xfs_inode;
struct xfs_mount;
-typedef struct xfs_inode_log_item {
+struct xfs_inode_log_item {
struct xfs_log_item ili_item; /* common portion */
struct xfs_inode *ili_inode; /* inode ptr */
xfs_lsn_t ili_flush_lsn; /* lsn at last flush */
@@ -23,7 +23,7 @@ typedef struct xfs_inode_log_item {
unsigned int ili_last_fields; /* fields when flushed */
unsigned int ili_fields; /* fields to be logged */
unsigned int ili_fsync_fields; /* logged since last fsync */
-} xfs_inode_log_item_t;
+};
static inline int xfs_inode_clean(xfs_inode_t *ip)
{
@@ -34,7 +34,7 @@ extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *);
extern void xfs_inode_item_destroy(struct xfs_inode *);
extern void xfs_iflush_done(struct xfs_buf *, struct xfs_log_item *);
extern void xfs_istale_done(struct xfs_buf *, struct xfs_log_item *);
-extern void xfs_iflush_abort(struct xfs_inode *, bool);
+extern void xfs_iflush_abort(struct xfs_inode *);
extern int xfs_inode_item_format_convert(xfs_log_iovec_t *,
struct xfs_inode_log_format *);
diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c
new file mode 100644
index 000000000000..dc3e26ff16c9
--- /dev/null
+++ b/fs/xfs/xfs_inode_item_recover.c
@@ -0,0 +1,394 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_inode_item.h"
+#include "xfs_trace.h"
+#include "xfs_trans_priv.h"
+#include "xfs_buf_item.h"
+#include "xfs_log.h"
+#include "xfs_error.h"
+#include "xfs_log_priv.h"
+#include "xfs_log_recover.h"
+#include "xfs_icache.h"
+#include "xfs_bmap_btree.h"
+
+STATIC void
+xlog_recover_inode_ra_pass2(
+ struct xlog *log,
+ struct xlog_recover_item *item)
+{
+ if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
+ struct xfs_inode_log_format *ilfp = item->ri_buf[0].i_addr;
+
+ xlog_buf_readahead(log, ilfp->ilf_blkno, ilfp->ilf_len,
+ &xfs_inode_buf_ra_ops);
+ } else {
+ struct xfs_inode_log_format_32 *ilfp = item->ri_buf[0].i_addr;
+
+ xlog_buf_readahead(log, ilfp->ilf_blkno, ilfp->ilf_len,
+ &xfs_inode_buf_ra_ops);
+ }
+}
+
+/*
+ * Inode fork owner changes
+ *
+ * If we have been told that we have to reparent the inode fork, it's because an
+ * extent swap operation on a CRC enabled filesystem has been done and we are
+ * replaying it. We need to walk the BMBT of the appropriate fork and change the
+ * owners of it.
+ *
+ * The complexity here is that we don't have an inode context to work with, so
+ * after we've replayed the inode we need to instantiate one. This is where the
+ * fun begins.
+ *
+ * We are in the middle of log recovery, so we can't run transactions. That
+ * means we cannot use cache coherent inode instantiation via xfs_iget(), as
+ * that will result in the corresponding iput() running the inode through
+ * xfs_inactive(). If we've just replayed an inode core that changes the link
+ * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
+ * transactions (bad!).
+ *
+ * So, to avoid this, we instantiate an inode directly from the inode core we've
+ * just recovered. We have the buffer still locked, and all we really need to
+ * instantiate is the inode core and the forks being modified. We can do this
+ * manually, then run the inode btree owner change, and then tear down the
+ * xfs_inode without having to run any transactions at all.
+ *
+ * Also, because we don't have a transaction context available here but need to
+ * gather all the buffers we modify for writeback so we pass the buffer_list
+ * instead for the operation to use.
+ */
+
+STATIC int
+xfs_recover_inode_owner_change(
+ struct xfs_mount *mp,
+ struct xfs_dinode *dip,
+ struct xfs_inode_log_format *in_f,
+ struct list_head *buffer_list)
+{
+ struct xfs_inode *ip;
+ int error;
+
+ ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
+
+ ip = xfs_inode_alloc(mp, in_f->ilf_ino);
+ if (!ip)
+ return -ENOMEM;
+
+ /* instantiate the inode */
+ ASSERT(dip->di_version >= 3);
+
+ error = xfs_inode_from_disk(ip, dip);
+ if (error)
+ goto out_free_ip;
+
+ if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
+ ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
+ error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
+ ip->i_ino, buffer_list);
+ if (error)
+ goto out_free_ip;
+ }
+
+ if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
+ ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
+ error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
+ ip->i_ino, buffer_list);
+ if (error)
+ goto out_free_ip;
+ }
+
+out_free_ip:
+ xfs_inode_free(ip);
+ return error;
+}
+
+STATIC int
+xlog_recover_inode_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t current_lsn)
+{
+ struct xfs_inode_log_format *in_f;
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_buf *bp;
+ struct xfs_dinode *dip;
+ int len;
+ char *src;
+ char *dest;
+ int error;
+ int attr_index;
+ uint fields;
+ struct xfs_log_dinode *ldip;
+ uint isize;
+ int need_free = 0;
+
+ if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
+ in_f = item->ri_buf[0].i_addr;
+ } else {
+ in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), 0);
+ need_free = 1;
+ error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
+ if (error)
+ goto error;
+ }
+
+ /*
+ * Inode buffers can be freed, look out for it,
+ * and do not replay the inode.
+ */
+ if (xlog_is_buffer_cancelled(log, in_f->ilf_blkno, in_f->ilf_len)) {
+ error = 0;
+ trace_xfs_log_recover_inode_cancel(log, in_f);
+ goto error;
+ }
+ trace_xfs_log_recover_inode_recover(log, in_f);
+
+ error = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
+ 0, &bp, &xfs_inode_buf_ops);
+ if (error)
+ goto error;
+ ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
+ dip = xfs_buf_offset(bp, in_f->ilf_boffset);
+
+ /*
+ * Make sure the place we're flushing out to really looks
+ * like an inode!
+ */
+ if (XFS_IS_CORRUPT(mp, !xfs_verify_magic16(bp, dip->di_magic))) {
+ xfs_alert(mp,
+ "%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld",
+ __func__, dip, bp, in_f->ilf_ino);
+ error = -EFSCORRUPTED;
+ goto out_release;
+ }
+ ldip = item->ri_buf[1].i_addr;
+ if (XFS_IS_CORRUPT(mp, ldip->di_magic != XFS_DINODE_MAGIC)) {
+ xfs_alert(mp,
+ "%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld",
+ __func__, item, in_f->ilf_ino);
+ error = -EFSCORRUPTED;
+ goto out_release;
+ }
+
+ /*
+ * If the inode has an LSN in it, recover the inode only if it's less
+ * than the lsn of the transaction we are replaying. Note: we still
+ * need to replay an owner change even though the inode is more recent
+ * than the transaction as there is no guarantee that all the btree
+ * blocks are more recent than this transaction, too.
+ */
+ if (dip->di_version >= 3) {
+ xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
+
+ if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
+ trace_xfs_log_recover_inode_skip(log, in_f);
+ error = 0;
+ goto out_owner_change;
+ }
+ }
+
+ /*
+ * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
+ * are transactional and if ordering is necessary we can determine that
+ * more accurately by the LSN field in the V3 inode core. Don't trust
+ * the inode versions we might be changing them here - use the
+ * superblock flag to determine whether we need to look at di_flushiter
+ * to skip replay when the on disk inode is newer than the log one
+ */
+ if (!xfs_sb_version_has_v3inode(&mp->m_sb) &&
+ ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
+ /*
+ * Deal with the wrap case, DI_MAX_FLUSH is less
+ * than smaller numbers
+ */
+ if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
+ ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
+ /* do nothing */
+ } else {
+ trace_xfs_log_recover_inode_skip(log, in_f);
+ error = 0;
+ goto out_release;
+ }
+ }
+
+ /* Take the opportunity to reset the flush iteration count */
+ ldip->di_flushiter = 0;
+
+ if (unlikely(S_ISREG(ldip->di_mode))) {
+ if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
+ (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
+ XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
+ XFS_ERRLEVEL_LOW, mp, ldip,
+ sizeof(*ldip));
+ xfs_alert(mp,
+ "%s: Bad regular inode log record, rec ptr "PTR_FMT", "
+ "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
+ __func__, item, dip, bp, in_f->ilf_ino);
+ error = -EFSCORRUPTED;
+ goto out_release;
+ }
+ } else if (unlikely(S_ISDIR(ldip->di_mode))) {
+ if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
+ (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
+ (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
+ XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
+ XFS_ERRLEVEL_LOW, mp, ldip,
+ sizeof(*ldip));
+ xfs_alert(mp,
+ "%s: Bad dir inode log record, rec ptr "PTR_FMT", "
+ "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
+ __func__, item, dip, bp, in_f->ilf_ino);
+ error = -EFSCORRUPTED;
+ goto out_release;
+ }
+ }
+ if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
+ XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
+ XFS_ERRLEVEL_LOW, mp, ldip,
+ sizeof(*ldip));
+ xfs_alert(mp,
+ "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
+ "dino bp "PTR_FMT", ino %Ld, total extents = %d, nblocks = %Ld",
+ __func__, item, dip, bp, in_f->ilf_ino,
+ ldip->di_nextents + ldip->di_anextents,
+ ldip->di_nblocks);
+ error = -EFSCORRUPTED;
+ goto out_release;
+ }
+ if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
+ XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
+ XFS_ERRLEVEL_LOW, mp, ldip,
+ sizeof(*ldip));
+ xfs_alert(mp,
+ "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
+ "dino bp "PTR_FMT", ino %Ld, forkoff 0x%x", __func__,
+ item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
+ error = -EFSCORRUPTED;
+ goto out_release;
+ }
+ isize = xfs_log_dinode_size(mp);
+ if (unlikely(item->ri_buf[1].i_len > isize)) {
+ XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
+ XFS_ERRLEVEL_LOW, mp, ldip,
+ sizeof(*ldip));
+ xfs_alert(mp,
+ "%s: Bad inode log record length %d, rec ptr "PTR_FMT,
+ __func__, item->ri_buf[1].i_len, item);
+ error = -EFSCORRUPTED;
+ goto out_release;
+ }
+
+ /* recover the log dinode inode into the on disk inode */
+ xfs_log_dinode_to_disk(ldip, dip);
+
+ fields = in_f->ilf_fields;
+ if (fields & XFS_ILOG_DEV)
+ xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
+
+ if (in_f->ilf_size == 2)
+ goto out_owner_change;
+ len = item->ri_buf[2].i_len;
+ src = item->ri_buf[2].i_addr;
+ ASSERT(in_f->ilf_size <= 4);
+ ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
+ ASSERT(!(fields & XFS_ILOG_DFORK) ||
+ (len == in_f->ilf_dsize));
+
+ switch (fields & XFS_ILOG_DFORK) {
+ case XFS_ILOG_DDATA:
+ case XFS_ILOG_DEXT:
+ memcpy(XFS_DFORK_DPTR(dip), src, len);
+ break;
+
+ case XFS_ILOG_DBROOT:
+ xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
+ (struct xfs_bmdr_block *)XFS_DFORK_DPTR(dip),
+ XFS_DFORK_DSIZE(dip, mp));
+ break;
+
+ default:
+ /*
+ * There are no data fork flags set.
+ */
+ ASSERT((fields & XFS_ILOG_DFORK) == 0);
+ break;
+ }
+
+ /*
+ * If we logged any attribute data, recover it. There may or
+ * may not have been any other non-core data logged in this
+ * transaction.
+ */
+ if (in_f->ilf_fields & XFS_ILOG_AFORK) {
+ if (in_f->ilf_fields & XFS_ILOG_DFORK) {
+ attr_index = 3;
+ } else {
+ attr_index = 2;
+ }
+ len = item->ri_buf[attr_index].i_len;
+ src = item->ri_buf[attr_index].i_addr;
+ ASSERT(len == in_f->ilf_asize);
+
+ switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
+ case XFS_ILOG_ADATA:
+ case XFS_ILOG_AEXT:
+ dest = XFS_DFORK_APTR(dip);
+ ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
+ memcpy(dest, src, len);
+ break;
+
+ case XFS_ILOG_ABROOT:
+ dest = XFS_DFORK_APTR(dip);
+ xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
+ len, (struct xfs_bmdr_block *)dest,
+ XFS_DFORK_ASIZE(dip, mp));
+ break;
+
+ default:
+ xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
+ ASSERT(0);
+ error = -EFSCORRUPTED;
+ goto out_release;
+ }
+ }
+
+out_owner_change:
+ /* Recover the swapext owner change unless inode has been deleted */
+ if ((in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)) &&
+ (dip->di_mode != 0))
+ error = xfs_recover_inode_owner_change(mp, dip, in_f,
+ buffer_list);
+ /* re-generate the checksum. */
+ xfs_dinode_calc_crc(log->l_mp, dip);
+
+ ASSERT(bp->b_mount == mp);
+ bp->b_iodone = xlog_recover_iodone;
+ xfs_buf_delwri_queue(bp, buffer_list);
+
+out_release:
+ xfs_buf_relse(bp);
+error:
+ if (need_free)
+ kmem_free(in_f);
+ return error;
+}
+
+const struct xlog_recover_item_ops xlog_inode_item_ops = {
+ .item_type = XFS_LI_INODE,
+ .ra_pass2 = xlog_recover_inode_ra_pass2,
+ .commit_pass2 = xlog_recover_inode_commit_pass2,
+};
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 309958186d33..a190212ca85d 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1104,26 +1104,17 @@ xfs_fill_fsxattr(
bool attr,
struct fsxattr *fa)
{
+ struct xfs_ifork *ifp = attr ? ip->i_afp : &ip->i_df;
+
simple_fill_fsxattr(fa, xfs_ip2xflags(ip));
fa->fsx_extsize = ip->i_d.di_extsize << ip->i_mount->m_sb.sb_blocklog;
fa->fsx_cowextsize = ip->i_d.di_cowextsize <<
ip->i_mount->m_sb.sb_blocklog;
fa->fsx_projid = ip->i_d.di_projid;
-
- if (attr) {
- if (ip->i_afp) {
- if (ip->i_afp->if_flags & XFS_IFEXTENTS)
- fa->fsx_nextents = xfs_iext_count(ip->i_afp);
- else
- fa->fsx_nextents = ip->i_d.di_anextents;
- } else
- fa->fsx_nextents = 0;
- } else {
- if (ip->i_df.if_flags & XFS_IFEXTENTS)
- fa->fsx_nextents = xfs_iext_count(&ip->i_df);
- else
- fa->fsx_nextents = ip->i_d.di_nextents;
- }
+ if (ifp && (ifp->if_flags & XFS_IFEXTENTS))
+ fa->fsx_nextents = xfs_iext_count(ifp);
+ else
+ fa->fsx_nextents = xfs_ifork_nextents(ifp);
}
STATIC int
@@ -1201,37 +1192,6 @@ xfs_flags2diflags2(
return di_flags2;
}
-STATIC void
-xfs_diflags_to_linux(
- struct xfs_inode *ip)
-{
- struct inode *inode = VFS_I(ip);
- unsigned int xflags = xfs_ip2xflags(ip);
-
- if (xflags & FS_XFLAG_IMMUTABLE)
- inode->i_flags |= S_IMMUTABLE;
- else
- inode->i_flags &= ~S_IMMUTABLE;
- if (xflags & FS_XFLAG_APPEND)
- inode->i_flags |= S_APPEND;
- else
- inode->i_flags &= ~S_APPEND;
- if (xflags & FS_XFLAG_SYNC)
- inode->i_flags |= S_SYNC;
- else
- inode->i_flags &= ~S_SYNC;
- if (xflags & FS_XFLAG_NOATIME)
- inode->i_flags |= S_NOATIME;
- else
- inode->i_flags &= ~S_NOATIME;
-#if 0 /* disabled until the flag switching races are sorted out */
- if (xflags & FS_XFLAG_DAX)
- inode->i_flags |= S_DAX;
- else
- inode->i_flags &= ~S_DAX;
-#endif
-}
-
static int
xfs_ioctl_setattr_xflags(
struct xfs_trans *tp,
@@ -1242,7 +1202,7 @@ xfs_ioctl_setattr_xflags(
uint64_t di_flags2;
/* Can't change realtime flag if any extents are allocated. */
- if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
+ if ((ip->i_df.if_nextents || ip->i_delayed_blks) &&
XFS_IS_REALTIME_INODE(ip) != (fa->fsx_xflags & FS_XFLAG_REALTIME))
return -EINVAL;
@@ -1269,71 +1229,33 @@ xfs_ioctl_setattr_xflags(
ip->i_d.di_flags = xfs_flags2diflags(ip, fa->fsx_xflags);
ip->i_d.di_flags2 = di_flags2;
- xfs_diflags_to_linux(ip);
+ xfs_diflags_to_iflags(ip, false);
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
XFS_STATS_INC(mp, xs_ig_attrchg);
return 0;
}
-/*
- * If we are changing DAX flags, we have to ensure the file is clean and any
- * cached objects in the address space are invalidated and removed. This
- * requires us to lock out other IO and page faults similar to a truncate
- * operation. The locks need to be held until the transaction has been committed
- * so that the cache invalidation is atomic with respect to the DAX flag
- * manipulation.
- */
-static int
-xfs_ioctl_setattr_dax_invalidate(
+static void
+xfs_ioctl_setattr_prepare_dax(
struct xfs_inode *ip,
- struct fsxattr *fa,
- int *join_flags)
+ struct fsxattr *fa)
{
- struct inode *inode = VFS_I(ip);
- struct super_block *sb = inode->i_sb;
- int error;
-
- *join_flags = 0;
-
- /*
- * It is only valid to set the DAX flag on regular files and
- * directories on filesystems where the block size is equal to the page
- * size. On directories it serves as an inherited hint so we don't
- * have to check the device for dax support or flush pagecache.
- */
- if (fa->fsx_xflags & FS_XFLAG_DAX) {
- struct xfs_buftarg *target = xfs_inode_buftarg(ip);
-
- if (!bdev_dax_supported(target->bt_bdev, sb->s_blocksize))
- return -EINVAL;
- }
-
- /* If the DAX state is not changing, we have nothing to do here. */
- if ((fa->fsx_xflags & FS_XFLAG_DAX) && IS_DAX(inode))
- return 0;
- if (!(fa->fsx_xflags & FS_XFLAG_DAX) && !IS_DAX(inode))
- return 0;
+ struct xfs_mount *mp = ip->i_mount;
+ struct inode *inode = VFS_I(ip);
if (S_ISDIR(inode->i_mode))
- return 0;
-
- /* lock, flush and invalidate mapping in preparation for flag change */
- xfs_ilock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL);
- error = filemap_write_and_wait(inode->i_mapping);
- if (error)
- goto out_unlock;
- error = invalidate_inode_pages2(inode->i_mapping);
- if (error)
- goto out_unlock;
-
- *join_flags = XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL;
- return 0;
+ return;
-out_unlock:
- xfs_iunlock(ip, XFS_MMAPLOCK_EXCL | XFS_IOLOCK_EXCL);
- return error;
+ if ((mp->m_flags & XFS_MOUNT_DAX_ALWAYS) ||
+ (mp->m_flags & XFS_MOUNT_DAX_NEVER))
+ return;
+ if (((fa->fsx_xflags & FS_XFLAG_DAX) &&
+ !(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)) ||
+ (!(fa->fsx_xflags & FS_XFLAG_DAX) &&
+ (ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)))
+ d_mark_dontcache(inode);
}
/*
@@ -1341,17 +1263,10 @@ out_unlock:
* have permission to do so. On success, return a clean transaction and the
* inode locked exclusively ready for further operation specific checks. On
* failure, return an error without modifying or locking the inode.
- *
- * The inode might already be IO locked on call. If this is the case, it is
- * indicated in @join_flags and we take full responsibility for ensuring they
- * are unlocked from now on. Hence if we have an error here, we still have to
- * unlock them. Otherwise, once they are joined to the transaction, they will
- * be unlocked on commit/cancel.
*/
static struct xfs_trans *
xfs_ioctl_setattr_get_trans(
- struct xfs_inode *ip,
- int join_flags)
+ struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
@@ -1368,8 +1283,7 @@ xfs_ioctl_setattr_get_trans(
goto out_unlock;
xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL | join_flags);
- join_flags = 0;
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
/*
* CAP_FOWNER overrides the following restrictions:
@@ -1390,8 +1304,6 @@ xfs_ioctl_setattr_get_trans(
out_cancel:
xfs_trans_cancel(tp);
out_unlock:
- if (join_flags)
- xfs_iunlock(ip, join_flags);
return ERR_PTR(error);
}
@@ -1420,7 +1332,7 @@ xfs_ioctl_setattr_check_extsize(
xfs_extlen_t size;
xfs_fsblock_t extsize_fsb;
- if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_d.di_nextents &&
+ if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_df.if_nextents &&
((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
return -EINVAL;
@@ -1513,11 +1425,9 @@ xfs_ioctl_setattr(
struct fsxattr old_fa;
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
- struct xfs_dquot *udqp = NULL;
struct xfs_dquot *pdqp = NULL;
struct xfs_dquot *olddquot = NULL;
int code;
- int join_flags = 0;
trace_xfs_ioctl_setattr(ip);
@@ -1536,23 +1446,14 @@ xfs_ioctl_setattr(
if (XFS_IS_QUOTA_ON(mp)) {
code = xfs_qm_vop_dqalloc(ip, VFS_I(ip)->i_uid,
VFS_I(ip)->i_gid, fa->fsx_projid,
- XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
+ XFS_QMOPT_PQUOTA, NULL, NULL, &pdqp);
if (code)
return code;
}
- /*
- * Changing DAX config may require inode locking for mapping
- * invalidation. These need to be held all the way to transaction commit
- * or cancel time, so need to be passed through to
- * xfs_ioctl_setattr_get_trans() so it can apply them to the join call
- * appropriately.
- */
- code = xfs_ioctl_setattr_dax_invalidate(ip, fa, &join_flags);
- if (code)
- goto error_free_dquots;
+ xfs_ioctl_setattr_prepare_dax(ip, fa);
- tp = xfs_ioctl_setattr_get_trans(ip, join_flags);
+ tp = xfs_ioctl_setattr_get_trans(ip);
if (IS_ERR(tp)) {
code = PTR_ERR(tp);
goto error_free_dquots;
@@ -1560,7 +1461,7 @@ xfs_ioctl_setattr(
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp) &&
ip->i_d.di_projid != fa->fsx_projid) {
- code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL, pdqp,
+ code = xfs_qm_vop_chown_reserve(tp, ip, NULL, NULL, pdqp,
capable(CAP_FOWNER) ? XFS_QMOPT_FORCE_RES : 0);
if (code) /* out of quota */
goto error_trans_cancel;
@@ -1626,7 +1527,6 @@ xfs_ioctl_setattr(
* Release any dquot(s) the inode had kept before chown.
*/
xfs_qm_dqrele(olddquot);
- xfs_qm_dqrele(udqp);
xfs_qm_dqrele(pdqp);
return code;
@@ -1634,7 +1534,6 @@ xfs_ioctl_setattr(
error_trans_cancel:
xfs_trans_cancel(tp);
error_free_dquots:
- xfs_qm_dqrele(udqp);
xfs_qm_dqrele(pdqp);
return code;
}
@@ -1682,7 +1581,6 @@ xfs_ioc_setxflags(
struct fsxattr fa;
struct fsxattr old_fa;
unsigned int flags;
- int join_flags = 0;
int error;
if (copy_from_user(&flags, arg, sizeof(flags)))
@@ -1699,18 +1597,9 @@ xfs_ioc_setxflags(
if (error)
return error;
- /*
- * Changing DAX config may require inode locking for mapping
- * invalidation. These need to be held all the way to transaction commit
- * or cancel time, so need to be passed through to
- * xfs_ioctl_setattr_get_trans() so it can apply them to the join call
- * appropriately.
- */
- error = xfs_ioctl_setattr_dax_invalidate(ip, &fa, &join_flags);
- if (error)
- goto out_drop_write;
+ xfs_ioctl_setattr_prepare_dax(ip, &fa);
- tp = xfs_ioctl_setattr_get_trans(ip, join_flags);
+ tp = xfs_ioctl_setattr_get_trans(ip);
if (IS_ERR(tp)) {
error = PTR_ERR(tp);
goto out_drop_write;
@@ -2082,6 +1971,41 @@ out:
return error;
}
+static inline int
+xfs_fs_eofblocks_from_user(
+ struct xfs_fs_eofblocks *src,
+ struct xfs_eofblocks *dst)
+{
+ if (src->eof_version != XFS_EOFBLOCKS_VERSION)
+ return -EINVAL;
+
+ if (src->eof_flags & ~XFS_EOF_FLAGS_VALID)
+ return -EINVAL;
+
+ if (memchr_inv(&src->pad32, 0, sizeof(src->pad32)) ||
+ memchr_inv(src->pad64, 0, sizeof(src->pad64)))
+ return -EINVAL;
+
+ dst->eof_flags = src->eof_flags;
+ dst->eof_prid = src->eof_prid;
+ dst->eof_min_file_size = src->eof_min_file_size;
+
+ dst->eof_uid = INVALID_UID;
+ if (src->eof_flags & XFS_EOF_FLAGS_UID) {
+ dst->eof_uid = make_kuid(current_user_ns(), src->eof_uid);
+ if (!uid_valid(dst->eof_uid))
+ return -EINVAL;
+ }
+
+ dst->eof_gid = INVALID_GID;
+ if (src->eof_flags & XFS_EOF_FLAGS_GID) {
+ dst->eof_gid = make_kgid(current_user_ns(), src->eof_gid);
+ if (!gid_valid(dst->eof_gid))
+ return -EINVAL;
+ }
+ return 0;
+}
+
/*
* Note: some of the ioctl's return positive numbers as a
* byte count indicating success, such as readlink_by_handle.
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index bb590a267a7f..b9a8c3798e08 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -352,22 +352,10 @@ xfs_quota_calc_throttle(
}
/*
- * If we are doing a write at the end of the file and there are no allocations
- * past this one, then extend the allocation out to the file system's write
- * iosize.
- *
* If we don't have a user specified preallocation size, dynamically increase
* the preallocation size as the size of the file grows. Cap the maximum size
* at a single extent or less if the filesystem is near full. The closer the
- * filesystem is to full, the smaller the maximum prealocation.
- *
- * As an exception we don't do any preallocation at all if the file is smaller
- * than the minimum preallocation and we are using the default dynamic
- * preallocation scheme, as it is likely this is the only write to the file that
- * is going to be done.
- *
- * We clean up any extra space left over when the file is closed in
- * xfs_inactive().
+ * filesystem is to being full, the smaller the maximum preallocation.
*/
STATIC xfs_fsblock_t
xfs_iomap_prealloc_size(
@@ -377,63 +365,70 @@ xfs_iomap_prealloc_size(
loff_t count,
struct xfs_iext_cursor *icur)
{
+ struct xfs_iext_cursor ncur = *icur;
+ struct xfs_bmbt_irec prev, got;
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
- struct xfs_bmbt_irec prev;
- int shift = 0;
int64_t freesp;
xfs_fsblock_t qblocks;
- int qshift = 0;
xfs_fsblock_t alloc_blocks = 0;
+ xfs_extlen_t plen;
+ int shift = 0;
+ int qshift = 0;
- if (offset + count <= XFS_ISIZE(ip))
- return 0;
-
- if (!(mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
- (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks)))
+ /*
+ * As an exception we don't do any preallocation at all if the file is
+ * smaller than the minimum preallocation and we are using the default
+ * dynamic preallocation scheme, as it is likely this is the only write
+ * to the file that is going to be done.
+ */
+ if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks))
return 0;
/*
- * If an explicit allocsize is set, the file is small, or we
- * are writing behind a hole, then use the minimum prealloc:
+ * Use the minimum preallocation size for small files or if we are
+ * writing right after a hole.
*/
- if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) ||
- XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
- !xfs_iext_peek_prev_extent(ifp, icur, &prev) ||
+ if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
+ !xfs_iext_prev_extent(ifp, &ncur, &prev) ||
prev.br_startoff + prev.br_blockcount < offset_fsb)
return mp->m_allocsize_blocks;
/*
- * Determine the initial size of the preallocation. We are beyond the
- * current EOF here, but we need to take into account whether this is
- * a sparse write or an extending write when determining the
- * preallocation size. Hence we need to look up the extent that ends
- * at the current write offset and use the result to determine the
- * preallocation size.
- *
- * If the extent is a hole, then preallocation is essentially disabled.
- * Otherwise we take the size of the preceding data extent as the basis
- * for the preallocation size. If the size of the extent is greater than
- * half the maximum extent length, then use the current offset as the
- * basis. This ensures that for large files the preallocation size
- * always extends to MAXEXTLEN rather than falling short due to things
- * like stripe unit/width alignment of real extents.
+ * Take the size of the preceding data extents as the basis for the
+ * preallocation size. Note that we don't care if the previous extents
+ * are written or not.
*/
- if (prev.br_blockcount <= (MAXEXTLEN >> 1))
- alloc_blocks = prev.br_blockcount << 1;
- else
+ plen = prev.br_blockcount;
+ while (xfs_iext_prev_extent(ifp, &ncur, &got)) {
+ if (plen > MAXEXTLEN / 2 ||
+ isnullstartblock(got.br_startblock) ||
+ got.br_startoff + got.br_blockcount != prev.br_startoff ||
+ got.br_startblock + got.br_blockcount != prev.br_startblock)
+ break;
+ plen += got.br_blockcount;
+ prev = got;
+ }
+
+ /*
+ * If the size of the extents is greater than half the maximum extent
+ * length, then use the current offset as the basis. This ensures that
+ * for large files the preallocation size always extends to MAXEXTLEN
+ * rather than falling short due to things like stripe unit/width
+ * alignment of real extents.
+ */
+ alloc_blocks = plen * 2;
+ if (alloc_blocks > MAXEXTLEN)
alloc_blocks = XFS_B_TO_FSB(mp, offset);
- if (!alloc_blocks)
- goto check_writeio;
qblocks = alloc_blocks;
/*
* MAXEXTLEN is not a power of two value but we round the prealloc down
* to the nearest power of two value after throttling. To prevent the
- * round down from unconditionally reducing the maximum supported prealloc
- * size, we round up first, apply appropriate throttling, round down and
- * cap the value to MAXEXTLEN.
+ * round down from unconditionally reducing the maximum supported
+ * prealloc size, we round up first, apply appropriate throttling,
+ * round down and cap the value to MAXEXTLEN.
*/
alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
alloc_blocks);
@@ -494,7 +489,6 @@ xfs_iomap_prealloc_size(
*/
while (alloc_blocks && alloc_blocks >= freesp)
alloc_blocks >>= 4;
-check_writeio:
if (alloc_blocks < mp->m_allocsize_blocks)
alloc_blocks = mp->m_allocsize_blocks;
trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
@@ -563,7 +557,7 @@ xfs_iomap_write_unwritten(
xfs_trans_ijoin(tp, ip, 0);
error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
- XFS_QMOPT_RES_REGBLKS);
+ XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES);
if (error)
goto error_on_bmapi_transaction;
@@ -856,7 +850,7 @@ xfs_buffered_write_iomap_begin(
xfs_ilock(ip, XFS_ILOCK_EXCL);
- if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(ip, XFS_DATA_FORK)) ||
+ if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) ||
XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
error = -EFSCORRUPTED;
goto out_unlock;
@@ -961,9 +955,16 @@ xfs_buffered_write_iomap_begin(
if (error)
goto out_unlock;
- if (eof) {
- prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork, offset,
- count, &icur);
+ if (eof && offset + count > XFS_ISIZE(ip)) {
+ /*
+ * Determine the initial size of the preallocation.
+ * We clean up any extra preallocation when the file is closed.
+ */
+ if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
+ prealloc_blocks = mp->m_allocsize_blocks;
+ else
+ prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
+ offset, count, &icur);
if (prealloc_blocks) {
xfs_extlen_t align;
xfs_off_t end_offset;
@@ -1258,12 +1259,12 @@ xfs_xattr_iomap_begin(
lockmode = xfs_ilock_attr_map_shared(ip);
/* if there are no attribute fork or extents, return ENOENT */
- if (!XFS_IFORK_Q(ip) || !ip->i_d.di_anextents) {
+ if (!XFS_IFORK_Q(ip) || !ip->i_afp->if_nextents) {
error = -ENOENT;
goto out_unlock;
}
- ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL);
+ ASSERT(ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL);
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
&nimaps, XFS_BMAPI_ATTRFORK);
out_unlock:
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index f7a99b3bbcf7..80a13c8561d8 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -25,13 +25,14 @@
#include <linux/posix_acl.h>
#include <linux/security.h>
#include <linux/iversion.h>
+#include <linux/fiemap.h>
/*
- * Directories have different lock order w.r.t. mmap_sem compared to regular
+ * Directories have different lock order w.r.t. mmap_lock compared to regular
* files. This is due to readdir potentially triggering page faults on a user
* buffer inside filldir(), and this happens with the ilock on the directory
* held. For regular files, the lock order is the other way around - the
- * mmap_sem is taken during the page fault, and then we lock the ilock to do
+ * mmap_lock is taken during the page fault, and then we lock the ilock to do
* block mapping. Hence we need a different class for the directory ilock so
* that lockdep can tell them apart.
*/
@@ -738,12 +739,7 @@ xfs_setattr_nonsize(
if (error) /* out of quota */
goto out_cancel;
}
- }
- /*
- * Change file ownership. Must be the owner or privileged.
- */
- if (mask & (ATTR_UID|ATTR_GID)) {
/*
* CAP_FSETID overrides the following restrictions:
*
@@ -877,7 +873,7 @@ xfs_setattr_size(
/*
* Short circuit the truncate case for zero length files.
*/
- if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) {
+ if (newsize == 0 && oldsize == 0 && ip->i_df.if_nextents == 0) {
if (!(iattr->ia_valid & (ATTR_CTIME|ATTR_MTIME)))
return 0;
@@ -1243,13 +1239,12 @@ xfs_inode_supports_dax(
{
struct xfs_mount *mp = ip->i_mount;
- /* Only supported on non-reflinked files. */
- if (!S_ISREG(VFS_I(ip)->i_mode) || xfs_is_reflink_inode(ip))
+ /* Only supported on regular files. */
+ if (!S_ISREG(VFS_I(ip)->i_mode))
return false;
- /* DAX mount option or DAX iflag must be set. */
- if (!(mp->m_flags & XFS_MOUNT_DAX) &&
- !(ip->i_d.di_flags2 & XFS_DIFLAG2_DAX))
+ /* Only supported on non-reflinked files. */
+ if (xfs_is_reflink_inode(ip))
return false;
/* Block size must match page size */
@@ -1260,26 +1255,51 @@ xfs_inode_supports_dax(
return xfs_inode_buftarg(ip)->bt_daxdev != NULL;
}
-STATIC void
+static bool
+xfs_inode_should_enable_dax(
+ struct xfs_inode *ip)
+{
+ if (!IS_ENABLED(CONFIG_FS_DAX))
+ return false;
+ if (ip->i_mount->m_flags & XFS_MOUNT_DAX_NEVER)
+ return false;
+ if (!xfs_inode_supports_dax(ip))
+ return false;
+ if (ip->i_mount->m_flags & XFS_MOUNT_DAX_ALWAYS)
+ return true;
+ if (ip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
+ return true;
+ return false;
+}
+
+void
xfs_diflags_to_iflags(
- struct inode *inode,
- struct xfs_inode *ip)
+ struct xfs_inode *ip,
+ bool init)
{
- uint16_t flags = ip->i_d.di_flags;
-
- inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | S_SYNC |
- S_NOATIME | S_DAX);
-
- if (flags & XFS_DIFLAG_IMMUTABLE)
- inode->i_flags |= S_IMMUTABLE;
- if (flags & XFS_DIFLAG_APPEND)
- inode->i_flags |= S_APPEND;
- if (flags & XFS_DIFLAG_SYNC)
- inode->i_flags |= S_SYNC;
- if (flags & XFS_DIFLAG_NOATIME)
- inode->i_flags |= S_NOATIME;
- if (xfs_inode_supports_dax(ip))
- inode->i_flags |= S_DAX;
+ struct inode *inode = VFS_I(ip);
+ unsigned int xflags = xfs_ip2xflags(ip);
+ unsigned int flags = 0;
+
+ ASSERT(!(IS_DAX(inode) && init));
+
+ if (xflags & FS_XFLAG_IMMUTABLE)
+ flags |= S_IMMUTABLE;
+ if (xflags & FS_XFLAG_APPEND)
+ flags |= S_APPEND;
+ if (xflags & FS_XFLAG_SYNC)
+ flags |= S_SYNC;
+ if (xflags & FS_XFLAG_NOATIME)
+ flags |= S_NOATIME;
+ if (init && xfs_inode_should_enable_dax(ip))
+ flags |= S_DAX;
+
+ /*
+ * S_DAX can only be set during inode initialization and is never set by
+ * the VFS, so we cannot mask off S_DAX in i_flags.
+ */
+ inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | S_SYNC | S_NOATIME);
+ inode->i_flags |= flags;
}
/*
@@ -1305,7 +1325,7 @@ xfs_setup_inode(
inode_fake_hash(inode);
i_size_write(inode, ip->i_d.di_size);
- xfs_diflags_to_iflags(inode, ip);
+ xfs_diflags_to_iflags(ip, true);
if (S_ISDIR(inode->i_mode)) {
/*
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index ff2da28fed90..16ca97a7ff00 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -104,9 +104,9 @@ xfs_bulkstat_one_int(
buf->bs_xflags = xfs_ip2xflags(ip);
buf->bs_extsize_blks = dic->di_extsize;
- buf->bs_extents = dic->di_nextents;
+ buf->bs_extents = xfs_ifork_nextents(&ip->i_df);
xfs_bulkstat_health(ip, buf);
- buf->bs_aextents = dic->di_anextents;
+ buf->bs_aextents = xfs_ifork_nextents(ip->i_afp);
buf->bs_forkoff = XFS_IFORK_BOFF(ip);
buf->bs_version = XFS_BULKSTAT_VERSION_V5;
@@ -115,7 +115,7 @@ xfs_bulkstat_one_int(
buf->bs_cowextsize_blks = dic->di_cowextsize;
}
- switch (dic->di_format) {
+ switch (ip->i_df.if_format) {
case XFS_DINODE_FMT_DEV:
buf->bs_rdev = sysv_encode_dev(inode->i_rdev);
buf->bs_blksize = BLKDEV_IOSIZE;
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 11c3502b07b1..ec015df55b77 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -18,21 +18,13 @@
#include "xfs_log.h"
#include "xfs_log_priv.h"
#include "xfs_log_recover.h"
-#include "xfs_inode_item.h"
-#include "xfs_extfree_item.h"
#include "xfs_trans_priv.h"
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
-#include "xfs_quota.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
-#include "xfs_bmap_btree.h"
#include "xfs_error.h"
-#include "xfs_dir2.h"
-#include "xfs_rmap_item.h"
#include "xfs_buf_item.h"
-#include "xfs_refcount_item.h"
-#include "xfs_bmap_item.h"
#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
@@ -56,17 +48,6 @@ xlog_do_recovery_pass(
struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
/*
- * This structure is used during recovery to record the buf log items which
- * have been canceled and should not be replayed.
- */
-struct xfs_buf_cancel {
- xfs_daddr_t bc_blkno;
- uint bc_len;
- int bc_refcount;
- struct list_head bc_list;
-};
-
-/*
* Sector aligned buffer routines for buffer create/read/write/access
*/
@@ -284,7 +265,7 @@ xlog_header_check_mount(
return 0;
}
-STATIC void
+void
xlog_recover_iodone(
struct xfs_buf *bp)
{
@@ -1779,12 +1760,72 @@ xlog_clear_stale_blocks(
return 0;
}
+/*
+ * Release the recovered intent item in the AIL that matches the given intent
+ * type and intent id.
+ */
+void
+xlog_recover_release_intent(
+ struct xlog *log,
+ unsigned short intent_type,
+ uint64_t intent_id)
+{
+ struct xfs_ail_cursor cur;
+ struct xfs_log_item *lip;
+ struct xfs_ail *ailp = log->l_ailp;
+
+ spin_lock(&ailp->ail_lock);
+ for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL;
+ lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
+ if (lip->li_type != intent_type)
+ continue;
+ if (!lip->li_ops->iop_match(lip, intent_id))
+ continue;
+
+ spin_unlock(&ailp->ail_lock);
+ lip->li_ops->iop_release(lip);
+ spin_lock(&ailp->ail_lock);
+ break;
+ }
+
+ xfs_trans_ail_cursor_done(&cur);
+ spin_unlock(&ailp->ail_lock);
+}
+
/******************************************************************************
*
* Log recover routines
*
******************************************************************************
*/
+static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
+ &xlog_buf_item_ops,
+ &xlog_inode_item_ops,
+ &xlog_dquot_item_ops,
+ &xlog_quotaoff_item_ops,
+ &xlog_icreate_item_ops,
+ &xlog_efi_item_ops,
+ &xlog_efd_item_ops,
+ &xlog_rui_item_ops,
+ &xlog_rud_item_ops,
+ &xlog_cui_item_ops,
+ &xlog_cud_item_ops,
+ &xlog_bui_item_ops,
+ &xlog_bud_item_ops,
+};
+
+static const struct xlog_recover_item_ops *
+xlog_find_item_ops(
+ struct xlog_recover_item *item)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
+ if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
+ return xlog_recover_item_ops[i];
+
+ return NULL;
+}
/*
* Sort the log items in the transaction.
@@ -1841,54 +1882,23 @@ xlog_recover_reorder_trans(
struct xlog_recover *trans,
int pass)
{
- xlog_recover_item_t *item, *n;
+ struct xlog_recover_item *item, *n;
int error = 0;
LIST_HEAD(sort_list);
LIST_HEAD(cancel_list);
LIST_HEAD(buffer_list);
LIST_HEAD(inode_buffer_list);
- LIST_HEAD(inode_list);
+ LIST_HEAD(item_list);
list_splice_init(&trans->r_itemq, &sort_list);
list_for_each_entry_safe(item, n, &sort_list, ri_list) {
- xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
+ enum xlog_recover_reorder fate = XLOG_REORDER_ITEM_LIST;
- switch (ITEM_TYPE(item)) {
- case XFS_LI_ICREATE:
- list_move_tail(&item->ri_list, &buffer_list);
- break;
- case XFS_LI_BUF:
- if (buf_f->blf_flags & XFS_BLF_CANCEL) {
- trace_xfs_log_recover_item_reorder_head(log,
- trans, item, pass);
- list_move(&item->ri_list, &cancel_list);
- break;
- }
- if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
- list_move(&item->ri_list, &inode_buffer_list);
- break;
- }
- list_move_tail(&item->ri_list, &buffer_list);
- break;
- case XFS_LI_INODE:
- case XFS_LI_DQUOT:
- case XFS_LI_QUOTAOFF:
- case XFS_LI_EFD:
- case XFS_LI_EFI:
- case XFS_LI_RUI:
- case XFS_LI_RUD:
- case XFS_LI_CUI:
- case XFS_LI_CUD:
- case XFS_LI_BUI:
- case XFS_LI_BUD:
- trace_xfs_log_recover_item_reorder_tail(log,
- trans, item, pass);
- list_move_tail(&item->ri_list, &inode_list);
- break;
- default:
+ item->ri_ops = xlog_find_item_ops(item);
+ if (!item->ri_ops) {
xfs_warn(log->l_mp,
- "%s: unrecognized type of log operation",
- __func__);
+ "%s: unrecognized type of log operation (%d)",
+ __func__, ITEM_TYPE(item));
ASSERT(0);
/*
* return the remaining items back to the transaction
@@ -1896,16 +1906,38 @@ xlog_recover_reorder_trans(
*/
if (!list_empty(&sort_list))
list_splice_init(&sort_list, &trans->r_itemq);
- error = -EIO;
- goto out;
+ error = -EFSCORRUPTED;
+ break;
+ }
+
+ if (item->ri_ops->reorder)
+ fate = item->ri_ops->reorder(item);
+
+ switch (fate) {
+ case XLOG_REORDER_BUFFER_LIST:
+ list_move_tail(&item->ri_list, &buffer_list);
+ break;
+ case XLOG_REORDER_CANCEL_LIST:
+ trace_xfs_log_recover_item_reorder_head(log,
+ trans, item, pass);
+ list_move(&item->ri_list, &cancel_list);
+ break;
+ case XLOG_REORDER_INODE_BUFFER_LIST:
+ list_move(&item->ri_list, &inode_buffer_list);
+ break;
+ case XLOG_REORDER_ITEM_LIST:
+ trace_xfs_log_recover_item_reorder_tail(log,
+ trans, item, pass);
+ list_move_tail(&item->ri_list, &item_list);
+ break;
}
}
-out:
+
ASSERT(list_empty(&sort_list));
if (!list_empty(&buffer_list))
list_splice(&buffer_list, &trans->r_itemq);
- if (!list_empty(&inode_list))
- list_splice_tail(&inode_list, &trans->r_itemq);
+ if (!list_empty(&item_list))
+ list_splice_tail(&item_list, &trans->r_itemq);
if (!list_empty(&inode_buffer_list))
list_splice_tail(&inode_buffer_list, &trans->r_itemq);
if (!list_empty(&cancel_list))
@@ -1913,2152 +1945,15 @@ out:
return error;
}
-/*
- * Build up the table of buf cancel records so that we don't replay
- * cancelled data in the second pass. For buffer records that are
- * not cancel records, there is nothing to do here so we just return.
- *
- * If we get a cancel record which is already in the table, this indicates
- * that the buffer was cancelled multiple times. In order to ensure
- * that during pass 2 we keep the record in the table until we reach its
- * last occurrence in the log, we keep a reference count in the cancel
- * record in the table to tell us how many times we expect to see this
- * record during the second pass.
- */
-STATIC int
-xlog_recover_buffer_pass1(
- struct xlog *log,
- struct xlog_recover_item *item)
-{
- xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
- struct list_head *bucket;
- struct xfs_buf_cancel *bcp;
-
- if (!xfs_buf_log_check_iovec(&item->ri_buf[0])) {
- xfs_err(log->l_mp, "bad buffer log item size (%d)",
- item->ri_buf[0].i_len);
- return -EFSCORRUPTED;
- }
-
- /*
- * If this isn't a cancel buffer item, then just return.
- */
- if (!(buf_f->blf_flags & XFS_BLF_CANCEL)) {
- trace_xfs_log_recover_buf_not_cancel(log, buf_f);
- return 0;
- }
-
- /*
- * Insert an xfs_buf_cancel record into the hash table of them.
- * If there is already an identical record, bump its reference count.
- */
- bucket = XLOG_BUF_CANCEL_BUCKET(log, buf_f->blf_blkno);
- list_for_each_entry(bcp, bucket, bc_list) {
- if (bcp->bc_blkno == buf_f->blf_blkno &&
- bcp->bc_len == buf_f->blf_len) {
- bcp->bc_refcount++;
- trace_xfs_log_recover_buf_cancel_ref_inc(log, buf_f);
- return 0;
- }
- }
-
- bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0);
- bcp->bc_blkno = buf_f->blf_blkno;
- bcp->bc_len = buf_f->blf_len;
- bcp->bc_refcount = 1;
- list_add_tail(&bcp->bc_list, bucket);
-
- trace_xfs_log_recover_buf_cancel_add(log, buf_f);
- return 0;
-}
-
-/*
- * Check to see whether the buffer being recovered has a corresponding
- * entry in the buffer cancel record table. If it is, return the cancel
- * buffer structure to the caller.
- */
-STATIC struct xfs_buf_cancel *
-xlog_peek_buffer_cancelled(
- struct xlog *log,
- xfs_daddr_t blkno,
- uint len,
- unsigned short flags)
-{
- struct list_head *bucket;
- struct xfs_buf_cancel *bcp;
-
- if (!log->l_buf_cancel_table) {
- /* empty table means no cancelled buffers in the log */
- ASSERT(!(flags & XFS_BLF_CANCEL));
- return NULL;
- }
-
- bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
- list_for_each_entry(bcp, bucket, bc_list) {
- if (bcp->bc_blkno == blkno && bcp->bc_len == len)
- return bcp;
- }
-
- /*
- * We didn't find a corresponding entry in the table, so return 0 so
- * that the buffer is NOT cancelled.
- */
- ASSERT(!(flags & XFS_BLF_CANCEL));
- return NULL;
-}
-
-/*
- * If the buffer is being cancelled then return 1 so that it will be cancelled,
- * otherwise return 0. If the buffer is actually a buffer cancel item
- * (XFS_BLF_CANCEL is set), then decrement the refcount on the entry in the
- * table and remove it from the table if this is the last reference.
- *
- * We remove the cancel record from the table when we encounter its last
- * occurrence in the log so that if the same buffer is re-used again after its
- * last cancellation we actually replay the changes made at that point.
- */
-STATIC int
-xlog_check_buffer_cancelled(
+void
+xlog_buf_readahead(
struct xlog *log,
xfs_daddr_t blkno,
uint len,
- unsigned short flags)
-{
- struct xfs_buf_cancel *bcp;
-
- bcp = xlog_peek_buffer_cancelled(log, blkno, len, flags);
- if (!bcp)
- return 0;
-
- /*
- * We've go a match, so return 1 so that the recovery of this buffer
- * is cancelled. If this buffer is actually a buffer cancel log
- * item, then decrement the refcount on the one in the table and
- * remove it if this is the last reference.
- */
- if (flags & XFS_BLF_CANCEL) {
- if (--bcp->bc_refcount == 0) {
- list_del(&bcp->bc_list);
- kmem_free(bcp);
- }
- }
- return 1;
-}
-
-/*
- * Perform recovery for a buffer full of inodes. In these buffers, the only
- * data which should be recovered is that which corresponds to the
- * di_next_unlinked pointers in the on disk inode structures. The rest of the
- * data for the inodes is always logged through the inodes themselves rather
- * than the inode buffer and is recovered in xlog_recover_inode_pass2().
- *
- * The only time when buffers full of inodes are fully recovered is when the
- * buffer is full of newly allocated inodes. In this case the buffer will
- * not be marked as an inode buffer and so will be sent to
- * xlog_recover_do_reg_buffer() below during recovery.
- */
-STATIC int
-xlog_recover_do_inode_buffer(
- struct xfs_mount *mp,
- xlog_recover_item_t *item,
- struct xfs_buf *bp,
- xfs_buf_log_format_t *buf_f)
-{
- int i;
- int item_index = 0;
- int bit = 0;
- int nbits = 0;
- int reg_buf_offset = 0;
- int reg_buf_bytes = 0;
- int next_unlinked_offset;
- int inodes_per_buf;
- xfs_agino_t *logged_nextp;
- xfs_agino_t *buffer_nextp;
-
- trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
-
- /*
- * Post recovery validation only works properly on CRC enabled
- * filesystems.
- */
- if (xfs_sb_version_hascrc(&mp->m_sb))
- bp->b_ops = &xfs_inode_buf_ops;
-
- inodes_per_buf = BBTOB(bp->b_length) >> mp->m_sb.sb_inodelog;
- for (i = 0; i < inodes_per_buf; i++) {
- next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
- offsetof(xfs_dinode_t, di_next_unlinked);
-
- while (next_unlinked_offset >=
- (reg_buf_offset + reg_buf_bytes)) {
- /*
- * The next di_next_unlinked field is beyond
- * the current logged region. Find the next
- * logged region that contains or is beyond
- * the current di_next_unlinked field.
- */
- bit += nbits;
- bit = xfs_next_bit(buf_f->blf_data_map,
- buf_f->blf_map_size, bit);
-
- /*
- * If there are no more logged regions in the
- * buffer, then we're done.
- */
- if (bit == -1)
- return 0;
-
- nbits = xfs_contig_bits(buf_f->blf_data_map,
- buf_f->blf_map_size, bit);
- ASSERT(nbits > 0);
- reg_buf_offset = bit << XFS_BLF_SHIFT;
- reg_buf_bytes = nbits << XFS_BLF_SHIFT;
- item_index++;
- }
-
- /*
- * If the current logged region starts after the current
- * di_next_unlinked field, then move on to the next
- * di_next_unlinked field.
- */
- if (next_unlinked_offset < reg_buf_offset)
- continue;
-
- ASSERT(item->ri_buf[item_index].i_addr != NULL);
- ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
- ASSERT((reg_buf_offset + reg_buf_bytes) <= BBTOB(bp->b_length));
-
- /*
- * The current logged region contains a copy of the
- * current di_next_unlinked field. Extract its value
- * and copy it to the buffer copy.
- */
- logged_nextp = item->ri_buf[item_index].i_addr +
- next_unlinked_offset - reg_buf_offset;
- if (XFS_IS_CORRUPT(mp, *logged_nextp == 0)) {
- xfs_alert(mp,
- "Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
- "Trying to replay bad (0) inode di_next_unlinked field.",
- item, bp);
- return -EFSCORRUPTED;
- }
-
- buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
- *buffer_nextp = *logged_nextp;
-
- /*
- * If necessary, recalculate the CRC in the on-disk inode. We
- * have to leave the inode in a consistent state for whoever
- * reads it next....
- */
- xfs_dinode_calc_crc(mp,
- xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
-
- }
-
- return 0;
-}
-
-/*
- * V5 filesystems know the age of the buffer on disk being recovered. We can
- * have newer objects on disk than we are replaying, and so for these cases we
- * don't want to replay the current change as that will make the buffer contents
- * temporarily invalid on disk.
- *
- * The magic number might not match the buffer type we are going to recover
- * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags. Hence
- * extract the LSN of the existing object in the buffer based on it's current
- * magic number. If we don't recognise the magic number in the buffer, then
- * return a LSN of -1 so that the caller knows it was an unrecognised block and
- * so can recover the buffer.
- *
- * Note: we cannot rely solely on magic number matches to determine that the
- * buffer has a valid LSN - we also need to verify that it belongs to this
- * filesystem, so we need to extract the object's LSN and compare it to that
- * which we read from the superblock. If the UUIDs don't match, then we've got a
- * stale metadata block from an old filesystem instance that we need to recover
- * over the top of.
- */
-static xfs_lsn_t
-xlog_recover_get_buf_lsn(
- struct xfs_mount *mp,
- struct xfs_buf *bp)
-{
- uint32_t magic32;
- uint16_t magic16;
- uint16_t magicda;
- void *blk = bp->b_addr;
- uuid_t *uuid;
- xfs_lsn_t lsn = -1;
-
- /* v4 filesystems always recover immediately */
- if (!xfs_sb_version_hascrc(&mp->m_sb))
- goto recover_immediately;
-
- magic32 = be32_to_cpu(*(__be32 *)blk);
- switch (magic32) {
- case XFS_ABTB_CRC_MAGIC:
- case XFS_ABTC_CRC_MAGIC:
- case XFS_ABTB_MAGIC:
- case XFS_ABTC_MAGIC:
- case XFS_RMAP_CRC_MAGIC:
- case XFS_REFC_CRC_MAGIC:
- case XFS_IBT_CRC_MAGIC:
- case XFS_IBT_MAGIC: {
- struct xfs_btree_block *btb = blk;
-
- lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
- uuid = &btb->bb_u.s.bb_uuid;
- break;
- }
- case XFS_BMAP_CRC_MAGIC:
- case XFS_BMAP_MAGIC: {
- struct xfs_btree_block *btb = blk;
-
- lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
- uuid = &btb->bb_u.l.bb_uuid;
- break;
- }
- case XFS_AGF_MAGIC:
- lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
- uuid = &((struct xfs_agf *)blk)->agf_uuid;
- break;
- case XFS_AGFL_MAGIC:
- lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
- uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
- break;
- case XFS_AGI_MAGIC:
- lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
- uuid = &((struct xfs_agi *)blk)->agi_uuid;
- break;
- case XFS_SYMLINK_MAGIC:
- lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
- uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
- break;
- case XFS_DIR3_BLOCK_MAGIC:
- case XFS_DIR3_DATA_MAGIC:
- case XFS_DIR3_FREE_MAGIC:
- lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
- uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
- break;
- case XFS_ATTR3_RMT_MAGIC:
- /*
- * Remote attr blocks are written synchronously, rather than
- * being logged. That means they do not contain a valid LSN
- * (i.e. transactionally ordered) in them, and hence any time we
- * see a buffer to replay over the top of a remote attribute
- * block we should simply do so.
- */
- goto recover_immediately;
- case XFS_SB_MAGIC:
- /*
- * superblock uuids are magic. We may or may not have a
- * sb_meta_uuid on disk, but it will be set in the in-core
- * superblock. We set the uuid pointer for verification
- * according to the superblock feature mask to ensure we check
- * the relevant UUID in the superblock.
- */
- lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
- if (xfs_sb_version_hasmetauuid(&mp->m_sb))
- uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
- else
- uuid = &((struct xfs_dsb *)blk)->sb_uuid;
- break;
- default:
- break;
- }
-
- if (lsn != (xfs_lsn_t)-1) {
- if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
- goto recover_immediately;
- return lsn;
- }
-
- magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
- switch (magicda) {
- case XFS_DIR3_LEAF1_MAGIC:
- case XFS_DIR3_LEAFN_MAGIC:
- case XFS_DA3_NODE_MAGIC:
- lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
- uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
- break;
- default:
- break;
- }
-
- if (lsn != (xfs_lsn_t)-1) {
- if (!uuid_equal(&mp->m_sb.sb_uuid, uuid))
- goto recover_immediately;
- return lsn;
- }
-
- /*
- * We do individual object checks on dquot and inode buffers as they
- * have their own individual LSN records. Also, we could have a stale
- * buffer here, so we have to at least recognise these buffer types.
- *
- * A notd complexity here is inode unlinked list processing - it logs
- * the inode directly in the buffer, but we don't know which inodes have
- * been modified, and there is no global buffer LSN. Hence we need to
- * recover all inode buffer types immediately. This problem will be
- * fixed by logical logging of the unlinked list modifications.
- */
- magic16 = be16_to_cpu(*(__be16 *)blk);
- switch (magic16) {
- case XFS_DQUOT_MAGIC:
- case XFS_DINODE_MAGIC:
- goto recover_immediately;
- default:
- break;
- }
-
- /* unknown buffer contents, recover immediately */
-
-recover_immediately:
- return (xfs_lsn_t)-1;
-
-}
-
-/*
- * Validate the recovered buffer is of the correct type and attach the
- * appropriate buffer operations to them for writeback. Magic numbers are in a
- * few places:
- * the first 16 bits of the buffer (inode buffer, dquot buffer),
- * the first 32 bits of the buffer (most blocks),
- * inside a struct xfs_da_blkinfo at the start of the buffer.
- */
-static void
-xlog_recover_validate_buf_type(
- struct xfs_mount *mp,
- struct xfs_buf *bp,
- xfs_buf_log_format_t *buf_f,
- xfs_lsn_t current_lsn)
-{
- struct xfs_da_blkinfo *info = bp->b_addr;
- uint32_t magic32;
- uint16_t magic16;
- uint16_t magicda;
- char *warnmsg = NULL;
-
- /*
- * We can only do post recovery validation on items on CRC enabled
- * fielsystems as we need to know when the buffer was written to be able
- * to determine if we should have replayed the item. If we replay old
- * metadata over a newer buffer, then it will enter a temporarily
- * inconsistent state resulting in verification failures. Hence for now
- * just avoid the verification stage for non-crc filesystems
- */
- if (!xfs_sb_version_hascrc(&mp->m_sb))
- return;
-
- magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
- magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
- magicda = be16_to_cpu(info->magic);
- switch (xfs_blft_from_flags(buf_f)) {
- case XFS_BLFT_BTREE_BUF:
- switch (magic32) {
- case XFS_ABTB_CRC_MAGIC:
- case XFS_ABTB_MAGIC:
- bp->b_ops = &xfs_bnobt_buf_ops;
- break;
- case XFS_ABTC_CRC_MAGIC:
- case XFS_ABTC_MAGIC:
- bp->b_ops = &xfs_cntbt_buf_ops;
- break;
- case XFS_IBT_CRC_MAGIC:
- case XFS_IBT_MAGIC:
- bp->b_ops = &xfs_inobt_buf_ops;
- break;
- case XFS_FIBT_CRC_MAGIC:
- case XFS_FIBT_MAGIC:
- bp->b_ops = &xfs_finobt_buf_ops;
- break;
- case XFS_BMAP_CRC_MAGIC:
- case XFS_BMAP_MAGIC:
- bp->b_ops = &xfs_bmbt_buf_ops;
- break;
- case XFS_RMAP_CRC_MAGIC:
- bp->b_ops = &xfs_rmapbt_buf_ops;
- break;
- case XFS_REFC_CRC_MAGIC:
- bp->b_ops = &xfs_refcountbt_buf_ops;
- break;
- default:
- warnmsg = "Bad btree block magic!";
- break;
- }
- break;
- case XFS_BLFT_AGF_BUF:
- if (magic32 != XFS_AGF_MAGIC) {
- warnmsg = "Bad AGF block magic!";
- break;
- }
- bp->b_ops = &xfs_agf_buf_ops;
- break;
- case XFS_BLFT_AGFL_BUF:
- if (magic32 != XFS_AGFL_MAGIC) {
- warnmsg = "Bad AGFL block magic!";
- break;
- }
- bp->b_ops = &xfs_agfl_buf_ops;
- break;
- case XFS_BLFT_AGI_BUF:
- if (magic32 != XFS_AGI_MAGIC) {
- warnmsg = "Bad AGI block magic!";
- break;
- }
- bp->b_ops = &xfs_agi_buf_ops;
- break;
- case XFS_BLFT_UDQUOT_BUF:
- case XFS_BLFT_PDQUOT_BUF:
- case XFS_BLFT_GDQUOT_BUF:
-#ifdef CONFIG_XFS_QUOTA
- if (magic16 != XFS_DQUOT_MAGIC) {
- warnmsg = "Bad DQUOT block magic!";
- break;
- }
- bp->b_ops = &xfs_dquot_buf_ops;
-#else
- xfs_alert(mp,
- "Trying to recover dquots without QUOTA support built in!");
- ASSERT(0);
-#endif
- break;
- case XFS_BLFT_DINO_BUF:
- if (magic16 != XFS_DINODE_MAGIC) {
- warnmsg = "Bad INODE block magic!";
- break;
- }
- bp->b_ops = &xfs_inode_buf_ops;
- break;
- case XFS_BLFT_SYMLINK_BUF:
- if (magic32 != XFS_SYMLINK_MAGIC) {
- warnmsg = "Bad symlink block magic!";
- break;
- }
- bp->b_ops = &xfs_symlink_buf_ops;
- break;
- case XFS_BLFT_DIR_BLOCK_BUF:
- if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
- magic32 != XFS_DIR3_BLOCK_MAGIC) {
- warnmsg = "Bad dir block magic!";
- break;
- }
- bp->b_ops = &xfs_dir3_block_buf_ops;
- break;
- case XFS_BLFT_DIR_DATA_BUF:
- if (magic32 != XFS_DIR2_DATA_MAGIC &&
- magic32 != XFS_DIR3_DATA_MAGIC) {
- warnmsg = "Bad dir data magic!";
- break;
- }
- bp->b_ops = &xfs_dir3_data_buf_ops;
- break;
- case XFS_BLFT_DIR_FREE_BUF:
- if (magic32 != XFS_DIR2_FREE_MAGIC &&
- magic32 != XFS_DIR3_FREE_MAGIC) {
- warnmsg = "Bad dir3 free magic!";
- break;
- }
- bp->b_ops = &xfs_dir3_free_buf_ops;
- break;
- case XFS_BLFT_DIR_LEAF1_BUF:
- if (magicda != XFS_DIR2_LEAF1_MAGIC &&
- magicda != XFS_DIR3_LEAF1_MAGIC) {
- warnmsg = "Bad dir leaf1 magic!";
- break;
- }
- bp->b_ops = &xfs_dir3_leaf1_buf_ops;
- break;
- case XFS_BLFT_DIR_LEAFN_BUF:
- if (magicda != XFS_DIR2_LEAFN_MAGIC &&
- magicda != XFS_DIR3_LEAFN_MAGIC) {
- warnmsg = "Bad dir leafn magic!";
- break;
- }
- bp->b_ops = &xfs_dir3_leafn_buf_ops;
- break;
- case XFS_BLFT_DA_NODE_BUF:
- if (magicda != XFS_DA_NODE_MAGIC &&
- magicda != XFS_DA3_NODE_MAGIC) {
- warnmsg = "Bad da node magic!";
- break;
- }
- bp->b_ops = &xfs_da3_node_buf_ops;
- break;
- case XFS_BLFT_ATTR_LEAF_BUF:
- if (magicda != XFS_ATTR_LEAF_MAGIC &&
- magicda != XFS_ATTR3_LEAF_MAGIC) {
- warnmsg = "Bad attr leaf magic!";
- break;
- }
- bp->b_ops = &xfs_attr3_leaf_buf_ops;
- break;
- case XFS_BLFT_ATTR_RMT_BUF:
- if (magic32 != XFS_ATTR3_RMT_MAGIC) {
- warnmsg = "Bad attr remote magic!";
- break;
- }
- bp->b_ops = &xfs_attr3_rmt_buf_ops;
- break;
- case XFS_BLFT_SB_BUF:
- if (magic32 != XFS_SB_MAGIC) {
- warnmsg = "Bad SB block magic!";
- break;
- }
- bp->b_ops = &xfs_sb_buf_ops;
- break;
-#ifdef CONFIG_XFS_RT
- case XFS_BLFT_RTBITMAP_BUF:
- case XFS_BLFT_RTSUMMARY_BUF:
- /* no magic numbers for verification of RT buffers */
- bp->b_ops = &xfs_rtbuf_ops;
- break;
-#endif /* CONFIG_XFS_RT */
- default:
- xfs_warn(mp, "Unknown buffer type %d!",
- xfs_blft_from_flags(buf_f));
- break;
- }
-
- /*
- * Nothing else to do in the case of a NULL current LSN as this means
- * the buffer is more recent than the change in the log and will be
- * skipped.
- */
- if (current_lsn == NULLCOMMITLSN)
- return;
-
- if (warnmsg) {
- xfs_warn(mp, warnmsg);
- ASSERT(0);
- }
-
- /*
- * We must update the metadata LSN of the buffer as it is written out to
- * ensure that older transactions never replay over this one and corrupt
- * the buffer. This can occur if log recovery is interrupted at some
- * point after the current transaction completes, at which point a
- * subsequent mount starts recovery from the beginning.
- *
- * Write verifiers update the metadata LSN from log items attached to
- * the buffer. Therefore, initialize a bli purely to carry the LSN to
- * the verifier. We'll clean it up in our ->iodone() callback.
- */
- if (bp->b_ops) {
- struct xfs_buf_log_item *bip;
-
- ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone);
- bp->b_iodone = xlog_recover_iodone;
- xfs_buf_item_init(bp, mp);
- bip = bp->b_log_item;
- bip->bli_item.li_lsn = current_lsn;
- }
-}
-
-/*
- * Perform a 'normal' buffer recovery. Each logged region of the
- * buffer should be copied over the corresponding region in the
- * given buffer. The bitmap in the buf log format structure indicates
- * where to place the logged data.
- */
-STATIC void
-xlog_recover_do_reg_buffer(
- struct xfs_mount *mp,
- xlog_recover_item_t *item,
- struct xfs_buf *bp,
- xfs_buf_log_format_t *buf_f,
- xfs_lsn_t current_lsn)
-{
- int i;
- int bit;
- int nbits;
- xfs_failaddr_t fa;
- const size_t size_disk_dquot = sizeof(struct xfs_disk_dquot);
-
- trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
-
- bit = 0;
- i = 1; /* 0 is the buf format structure */
- while (1) {
- bit = xfs_next_bit(buf_f->blf_data_map,
- buf_f->blf_map_size, bit);
- if (bit == -1)
- break;
- nbits = xfs_contig_bits(buf_f->blf_data_map,
- buf_f->blf_map_size, bit);
- ASSERT(nbits > 0);
- ASSERT(item->ri_buf[i].i_addr != NULL);
- ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
- ASSERT(BBTOB(bp->b_length) >=
- ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
-
- /*
- * The dirty regions logged in the buffer, even though
- * contiguous, may span multiple chunks. This is because the
- * dirty region may span a physical page boundary in a buffer
- * and hence be split into two separate vectors for writing into
- * the log. Hence we need to trim nbits back to the length of
- * the current region being copied out of the log.
- */
- if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
- nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
-
- /*
- * Do a sanity check if this is a dquot buffer. Just checking
- * the first dquot in the buffer should do. XXXThis is
- * probably a good thing to do for other buf types also.
- */
- fa = NULL;
- if (buf_f->blf_flags &
- (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
- if (item->ri_buf[i].i_addr == NULL) {
- xfs_alert(mp,
- "XFS: NULL dquot in %s.", __func__);
- goto next;
- }
- if (item->ri_buf[i].i_len < size_disk_dquot) {
- xfs_alert(mp,
- "XFS: dquot too small (%d) in %s.",
- item->ri_buf[i].i_len, __func__);
- goto next;
- }
- fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr,
- -1, 0);
- if (fa) {
- xfs_alert(mp,
- "dquot corrupt at %pS trying to replay into block 0x%llx",
- fa, bp->b_bn);
- goto next;
- }
- }
-
- memcpy(xfs_buf_offset(bp,
- (uint)bit << XFS_BLF_SHIFT), /* dest */
- item->ri_buf[i].i_addr, /* source */
- nbits<<XFS_BLF_SHIFT); /* length */
- next:
- i++;
- bit += nbits;
- }
-
- /* Shouldn't be any more regions */
- ASSERT(i == item->ri_total);
-
- xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
-}
-
-/*
- * Perform a dquot buffer recovery.
- * Simple algorithm: if we have found a QUOTAOFF log item of the same type
- * (ie. USR or GRP), then just toss this buffer away; don't recover it.
- * Else, treat it as a regular buffer and do recovery.
- *
- * Return false if the buffer was tossed and true if we recovered the buffer to
- * indicate to the caller if the buffer needs writing.
- */
-STATIC bool
-xlog_recover_do_dquot_buffer(
- struct xfs_mount *mp,
- struct xlog *log,
- struct xlog_recover_item *item,
- struct xfs_buf *bp,
- struct xfs_buf_log_format *buf_f)
-{
- uint type;
-
- trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
-
- /*
- * Filesystems are required to send in quota flags at mount time.
- */
- if (!mp->m_qflags)
- return false;
-
- type = 0;
- if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
- type |= XFS_DQ_USER;
- if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
- type |= XFS_DQ_PROJ;
- if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
- type |= XFS_DQ_GROUP;
- /*
- * This type of quotas was turned off, so ignore this buffer
- */
- if (log->l_quotaoffs_flag & type)
- return false;
-
- xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
- return true;
-}
-
-/*
- * This routine replays a modification made to a buffer at runtime.
- * There are actually two types of buffer, regular and inode, which
- * are handled differently. Inode buffers are handled differently
- * in that we only recover a specific set of data from them, namely
- * the inode di_next_unlinked fields. This is because all other inode
- * data is actually logged via inode records and any data we replay
- * here which overlaps that may be stale.
- *
- * When meta-data buffers are freed at run time we log a buffer item
- * with the XFS_BLF_CANCEL bit set to indicate that previous copies
- * of the buffer in the log should not be replayed at recovery time.
- * This is so that if the blocks covered by the buffer are reused for
- * file data before we crash we don't end up replaying old, freed
- * meta-data into a user's file.
- *
- * To handle the cancellation of buffer log items, we make two passes
- * over the log during recovery. During the first we build a table of
- * those buffers which have been cancelled, and during the second we
- * only replay those buffers which do not have corresponding cancel
- * records in the table. See xlog_recover_buffer_pass[1,2] above
- * for more details on the implementation of the table of cancel records.
- */
-STATIC int
-xlog_recover_buffer_pass2(
- struct xlog *log,
- struct list_head *buffer_list,
- struct xlog_recover_item *item,
- xfs_lsn_t current_lsn)
-{
- xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
- xfs_mount_t *mp = log->l_mp;
- xfs_buf_t *bp;
- int error;
- uint buf_flags;
- xfs_lsn_t lsn;
-
- /*
- * In this pass we only want to recover all the buffers which have
- * not been cancelled and are not cancellation buffers themselves.
- */
- if (xlog_check_buffer_cancelled(log, buf_f->blf_blkno,
- buf_f->blf_len, buf_f->blf_flags)) {
- trace_xfs_log_recover_buf_cancel(log, buf_f);
- return 0;
- }
-
- trace_xfs_log_recover_buf_recover(log, buf_f);
-
- buf_flags = 0;
- if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
- buf_flags |= XBF_UNMAPPED;
-
- error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
- buf_flags, &bp, NULL);
- if (error)
- return error;
-
- /*
- * Recover the buffer only if we get an LSN from it and it's less than
- * the lsn of the transaction we are replaying.
- *
- * Note that we have to be extremely careful of readahead here.
- * Readahead does not attach verfiers to the buffers so if we don't
- * actually do any replay after readahead because of the LSN we found
- * in the buffer if more recent than that current transaction then we
- * need to attach the verifier directly. Failure to do so can lead to
- * future recovery actions (e.g. EFI and unlinked list recovery) can
- * operate on the buffers and they won't get the verifier attached. This
- * can lead to blocks on disk having the correct content but a stale
- * CRC.
- *
- * It is safe to assume these clean buffers are currently up to date.
- * If the buffer is dirtied by a later transaction being replayed, then
- * the verifier will be reset to match whatever recover turns that
- * buffer into.
- */
- lsn = xlog_recover_get_buf_lsn(mp, bp);
- if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
- trace_xfs_log_recover_buf_skip(log, buf_f);
- xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
- goto out_release;
- }
-
- if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
- error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
- if (error)
- goto out_release;
- } else if (buf_f->blf_flags &
- (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
- bool dirty;
-
- dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
- if (!dirty)
- goto out_release;
- } else {
- xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
- }
-
- /*
- * Perform delayed write on the buffer. Asynchronous writes will be
- * slower when taking into account all the buffers to be flushed.
- *
- * Also make sure that only inode buffers with good sizes stay in
- * the buffer cache. The kernel moves inodes in buffers of 1 block
- * or inode_cluster_size bytes, whichever is bigger. The inode
- * buffers in the log can be a different size if the log was generated
- * by an older kernel using unclustered inode buffers or a newer kernel
- * running with a different inode cluster size. Regardless, if the
- * the inode buffer size isn't max(blocksize, inode_cluster_size)
- * for *our* value of inode_cluster_size, then we need to keep
- * the buffer out of the buffer cache so that the buffer won't
- * overlap with future reads of those inodes.
- */
- if (XFS_DINODE_MAGIC ==
- be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
- (BBTOB(bp->b_length) != M_IGEO(log->l_mp)->inode_cluster_size)) {
- xfs_buf_stale(bp);
- error = xfs_bwrite(bp);
- } else {
- ASSERT(bp->b_mount == mp);
- bp->b_iodone = xlog_recover_iodone;
- xfs_buf_delwri_queue(bp, buffer_list);
- }
-
-out_release:
- xfs_buf_relse(bp);
- return error;
-}
-
-/*
- * Inode fork owner changes
- *
- * If we have been told that we have to reparent the inode fork, it's because an
- * extent swap operation on a CRC enabled filesystem has been done and we are
- * replaying it. We need to walk the BMBT of the appropriate fork and change the
- * owners of it.
- *
- * The complexity here is that we don't have an inode context to work with, so
- * after we've replayed the inode we need to instantiate one. This is where the
- * fun begins.
- *
- * We are in the middle of log recovery, so we can't run transactions. That
- * means we cannot use cache coherent inode instantiation via xfs_iget(), as
- * that will result in the corresponding iput() running the inode through
- * xfs_inactive(). If we've just replayed an inode core that changes the link
- * count to zero (i.e. it's been unlinked), then xfs_inactive() will run
- * transactions (bad!).
- *
- * So, to avoid this, we instantiate an inode directly from the inode core we've
- * just recovered. We have the buffer still locked, and all we really need to
- * instantiate is the inode core and the forks being modified. We can do this
- * manually, then run the inode btree owner change, and then tear down the
- * xfs_inode without having to run any transactions at all.
- *
- * Also, because we don't have a transaction context available here but need to
- * gather all the buffers we modify for writeback so we pass the buffer_list
- * instead for the operation to use.
- */
-
-STATIC int
-xfs_recover_inode_owner_change(
- struct xfs_mount *mp,
- struct xfs_dinode *dip,
- struct xfs_inode_log_format *in_f,
- struct list_head *buffer_list)
+ const struct xfs_buf_ops *ops)
{
- struct xfs_inode *ip;
- int error;
-
- ASSERT(in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER));
-
- ip = xfs_inode_alloc(mp, in_f->ilf_ino);
- if (!ip)
- return -ENOMEM;
-
- /* instantiate the inode */
- ASSERT(dip->di_version >= 3);
- xfs_inode_from_disk(ip, dip);
-
- error = xfs_iformat_fork(ip, dip);
- if (error)
- goto out_free_ip;
-
- if (!xfs_inode_verify_forks(ip)) {
- error = -EFSCORRUPTED;
- goto out_free_ip;
- }
-
- if (in_f->ilf_fields & XFS_ILOG_DOWNER) {
- ASSERT(in_f->ilf_fields & XFS_ILOG_DBROOT);
- error = xfs_bmbt_change_owner(NULL, ip, XFS_DATA_FORK,
- ip->i_ino, buffer_list);
- if (error)
- goto out_free_ip;
- }
-
- if (in_f->ilf_fields & XFS_ILOG_AOWNER) {
- ASSERT(in_f->ilf_fields & XFS_ILOG_ABROOT);
- error = xfs_bmbt_change_owner(NULL, ip, XFS_ATTR_FORK,
- ip->i_ino, buffer_list);
- if (error)
- goto out_free_ip;
- }
-
-out_free_ip:
- xfs_inode_free(ip);
- return error;
-}
-
-STATIC int
-xlog_recover_inode_pass2(
- struct xlog *log,
- struct list_head *buffer_list,
- struct xlog_recover_item *item,
- xfs_lsn_t current_lsn)
-{
- struct xfs_inode_log_format *in_f;
- xfs_mount_t *mp = log->l_mp;
- xfs_buf_t *bp;
- xfs_dinode_t *dip;
- int len;
- char *src;
- char *dest;
- int error;
- int attr_index;
- uint fields;
- struct xfs_log_dinode *ldip;
- uint isize;
- int need_free = 0;
-
- if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
- in_f = item->ri_buf[0].i_addr;
- } else {
- in_f = kmem_alloc(sizeof(struct xfs_inode_log_format), 0);
- need_free = 1;
- error = xfs_inode_item_format_convert(&item->ri_buf[0], in_f);
- if (error)
- goto error;
- }
-
- /*
- * Inode buffers can be freed, look out for it,
- * and do not replay the inode.
- */
- if (xlog_check_buffer_cancelled(log, in_f->ilf_blkno,
- in_f->ilf_len, 0)) {
- error = 0;
- trace_xfs_log_recover_inode_cancel(log, in_f);
- goto error;
- }
- trace_xfs_log_recover_inode_recover(log, in_f);
-
- error = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
- 0, &bp, &xfs_inode_buf_ops);
- if (error)
- goto error;
- ASSERT(in_f->ilf_fields & XFS_ILOG_CORE);
- dip = xfs_buf_offset(bp, in_f->ilf_boffset);
-
- /*
- * Make sure the place we're flushing out to really looks
- * like an inode!
- */
- if (XFS_IS_CORRUPT(mp, !xfs_verify_magic16(bp, dip->di_magic))) {
- xfs_alert(mp,
- "%s: Bad inode magic number, dip = "PTR_FMT", dino bp = "PTR_FMT", ino = %Ld",
- __func__, dip, bp, in_f->ilf_ino);
- error = -EFSCORRUPTED;
- goto out_release;
- }
- ldip = item->ri_buf[1].i_addr;
- if (XFS_IS_CORRUPT(mp, ldip->di_magic != XFS_DINODE_MAGIC)) {
- xfs_alert(mp,
- "%s: Bad inode log record, rec ptr "PTR_FMT", ino %Ld",
- __func__, item, in_f->ilf_ino);
- error = -EFSCORRUPTED;
- goto out_release;
- }
-
- /*
- * If the inode has an LSN in it, recover the inode only if it's less
- * than the lsn of the transaction we are replaying. Note: we still
- * need to replay an owner change even though the inode is more recent
- * than the transaction as there is no guarantee that all the btree
- * blocks are more recent than this transaction, too.
- */
- if (dip->di_version >= 3) {
- xfs_lsn_t lsn = be64_to_cpu(dip->di_lsn);
-
- if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
- trace_xfs_log_recover_inode_skip(log, in_f);
- error = 0;
- goto out_owner_change;
- }
- }
-
- /*
- * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
- * are transactional and if ordering is necessary we can determine that
- * more accurately by the LSN field in the V3 inode core. Don't trust
- * the inode versions we might be changing them here - use the
- * superblock flag to determine whether we need to look at di_flushiter
- * to skip replay when the on disk inode is newer than the log one
- */
- if (!xfs_sb_version_has_v3inode(&mp->m_sb) &&
- ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
- /*
- * Deal with the wrap case, DI_MAX_FLUSH is less
- * than smaller numbers
- */
- if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
- ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
- /* do nothing */
- } else {
- trace_xfs_log_recover_inode_skip(log, in_f);
- error = 0;
- goto out_release;
- }
- }
-
- /* Take the opportunity to reset the flush iteration count */
- ldip->di_flushiter = 0;
-
- if (unlikely(S_ISREG(ldip->di_mode))) {
- if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
- (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
- XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
- XFS_ERRLEVEL_LOW, mp, ldip,
- sizeof(*ldip));
- xfs_alert(mp,
- "%s: Bad regular inode log record, rec ptr "PTR_FMT", "
- "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
- __func__, item, dip, bp, in_f->ilf_ino);
- error = -EFSCORRUPTED;
- goto out_release;
- }
- } else if (unlikely(S_ISDIR(ldip->di_mode))) {
- if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
- (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
- (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
- XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
- XFS_ERRLEVEL_LOW, mp, ldip,
- sizeof(*ldip));
- xfs_alert(mp,
- "%s: Bad dir inode log record, rec ptr "PTR_FMT", "
- "ino ptr = "PTR_FMT", ino bp = "PTR_FMT", ino %Ld",
- __func__, item, dip, bp, in_f->ilf_ino);
- error = -EFSCORRUPTED;
- goto out_release;
- }
- }
- if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
- XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
- XFS_ERRLEVEL_LOW, mp, ldip,
- sizeof(*ldip));
- xfs_alert(mp,
- "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
- "dino bp "PTR_FMT", ino %Ld, total extents = %d, nblocks = %Ld",
- __func__, item, dip, bp, in_f->ilf_ino,
- ldip->di_nextents + ldip->di_anextents,
- ldip->di_nblocks);
- error = -EFSCORRUPTED;
- goto out_release;
- }
- if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
- XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
- XFS_ERRLEVEL_LOW, mp, ldip,
- sizeof(*ldip));
- xfs_alert(mp,
- "%s: Bad inode log record, rec ptr "PTR_FMT", dino ptr "PTR_FMT", "
- "dino bp "PTR_FMT", ino %Ld, forkoff 0x%x", __func__,
- item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
- error = -EFSCORRUPTED;
- goto out_release;
- }
- isize = xfs_log_dinode_size(mp);
- if (unlikely(item->ri_buf[1].i_len > isize)) {
- XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
- XFS_ERRLEVEL_LOW, mp, ldip,
- sizeof(*ldip));
- xfs_alert(mp,
- "%s: Bad inode log record length %d, rec ptr "PTR_FMT,
- __func__, item->ri_buf[1].i_len, item);
- error = -EFSCORRUPTED;
- goto out_release;
- }
-
- /* recover the log dinode inode into the on disk inode */
- xfs_log_dinode_to_disk(ldip, dip);
-
- fields = in_f->ilf_fields;
- if (fields & XFS_ILOG_DEV)
- xfs_dinode_put_rdev(dip, in_f->ilf_u.ilfu_rdev);
-
- if (in_f->ilf_size == 2)
- goto out_owner_change;
- len = item->ri_buf[2].i_len;
- src = item->ri_buf[2].i_addr;
- ASSERT(in_f->ilf_size <= 4);
- ASSERT((in_f->ilf_size == 3) || (fields & XFS_ILOG_AFORK));
- ASSERT(!(fields & XFS_ILOG_DFORK) ||
- (len == in_f->ilf_dsize));
-
- switch (fields & XFS_ILOG_DFORK) {
- case XFS_ILOG_DDATA:
- case XFS_ILOG_DEXT:
- memcpy(XFS_DFORK_DPTR(dip), src, len);
- break;
-
- case XFS_ILOG_DBROOT:
- xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src, len,
- (xfs_bmdr_block_t *)XFS_DFORK_DPTR(dip),
- XFS_DFORK_DSIZE(dip, mp));
- break;
-
- default:
- /*
- * There are no data fork flags set.
- */
- ASSERT((fields & XFS_ILOG_DFORK) == 0);
- break;
- }
-
- /*
- * If we logged any attribute data, recover it. There may or
- * may not have been any other non-core data logged in this
- * transaction.
- */
- if (in_f->ilf_fields & XFS_ILOG_AFORK) {
- if (in_f->ilf_fields & XFS_ILOG_DFORK) {
- attr_index = 3;
- } else {
- attr_index = 2;
- }
- len = item->ri_buf[attr_index].i_len;
- src = item->ri_buf[attr_index].i_addr;
- ASSERT(len == in_f->ilf_asize);
-
- switch (in_f->ilf_fields & XFS_ILOG_AFORK) {
- case XFS_ILOG_ADATA:
- case XFS_ILOG_AEXT:
- dest = XFS_DFORK_APTR(dip);
- ASSERT(len <= XFS_DFORK_ASIZE(dip, mp));
- memcpy(dest, src, len);
- break;
-
- case XFS_ILOG_ABROOT:
- dest = XFS_DFORK_APTR(dip);
- xfs_bmbt_to_bmdr(mp, (struct xfs_btree_block *)src,
- len, (xfs_bmdr_block_t*)dest,
- XFS_DFORK_ASIZE(dip, mp));
- break;
-
- default:
- xfs_warn(log->l_mp, "%s: Invalid flag", __func__);
- ASSERT(0);
- error = -EFSCORRUPTED;
- goto out_release;
- }
- }
-
-out_owner_change:
- /* Recover the swapext owner change unless inode has been deleted */
- if ((in_f->ilf_fields & (XFS_ILOG_DOWNER|XFS_ILOG_AOWNER)) &&
- (dip->di_mode != 0))
- error = xfs_recover_inode_owner_change(mp, dip, in_f,
- buffer_list);
- /* re-generate the checksum. */
- xfs_dinode_calc_crc(log->l_mp, dip);
-
- ASSERT(bp->b_mount == mp);
- bp->b_iodone = xlog_recover_iodone;
- xfs_buf_delwri_queue(bp, buffer_list);
-
-out_release:
- xfs_buf_relse(bp);
-error:
- if (need_free)
- kmem_free(in_f);
- return error;
-}
-
-/*
- * Recover QUOTAOFF records. We simply make a note of it in the xlog
- * structure, so that we know not to do any dquot item or dquot buffer recovery,
- * of that type.
- */
-STATIC int
-xlog_recover_quotaoff_pass1(
- struct xlog *log,
- struct xlog_recover_item *item)
-{
- xfs_qoff_logformat_t *qoff_f = item->ri_buf[0].i_addr;
- ASSERT(qoff_f);
-
- /*
- * The logitem format's flag tells us if this was user quotaoff,
- * group/project quotaoff or both.
- */
- if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
- log->l_quotaoffs_flag |= XFS_DQ_USER;
- if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
- log->l_quotaoffs_flag |= XFS_DQ_PROJ;
- if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
- log->l_quotaoffs_flag |= XFS_DQ_GROUP;
-
- return 0;
-}
-
-/*
- * Recover a dquot record
- */
-STATIC int
-xlog_recover_dquot_pass2(
- struct xlog *log,
- struct list_head *buffer_list,
- struct xlog_recover_item *item,
- xfs_lsn_t current_lsn)
-{
- xfs_mount_t *mp = log->l_mp;
- xfs_buf_t *bp;
- struct xfs_disk_dquot *ddq, *recddq;
- xfs_failaddr_t fa;
- int error;
- xfs_dq_logformat_t *dq_f;
- uint type;
-
-
- /*
- * Filesystems are required to send in quota flags at mount time.
- */
- if (mp->m_qflags == 0)
- return 0;
-
- recddq = item->ri_buf[1].i_addr;
- if (recddq == NULL) {
- xfs_alert(log->l_mp, "NULL dquot in %s.", __func__);
- return -EFSCORRUPTED;
- }
- if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) {
- xfs_alert(log->l_mp, "dquot too small (%d) in %s.",
- item->ri_buf[1].i_len, __func__);
- return -EFSCORRUPTED;
- }
-
- /*
- * This type of quotas was turned off, so ignore this record.
- */
- type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
- ASSERT(type);
- if (log->l_quotaoffs_flag & type)
- return 0;
-
- /*
- * At this point we know that quota was _not_ turned off.
- * Since the mount flags are not indicating to us otherwise, this
- * must mean that quota is on, and the dquot needs to be replayed.
- * Remember that we may not have fully recovered the superblock yet,
- * so we can't do the usual trick of looking at the SB quota bits.
- *
- * The other possibility, of course, is that the quota subsystem was
- * removed since the last mount - ENOSYS.
- */
- dq_f = item->ri_buf[0].i_addr;
- ASSERT(dq_f);
- fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id, 0);
- if (fa) {
- xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS",
- dq_f->qlf_id, fa);
- return -EFSCORRUPTED;
- }
- ASSERT(dq_f->qlf_len == 1);
-
- /*
- * At this point we are assuming that the dquots have been allocated
- * and hence the buffer has valid dquots stamped in it. It should,
- * therefore, pass verifier validation. If the dquot is bad, then the
- * we'll return an error here, so we don't need to specifically check
- * the dquot in the buffer after the verifier has run.
- */
- error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dq_f->qlf_blkno,
- XFS_FSB_TO_BB(mp, dq_f->qlf_len), 0, &bp,
- &xfs_dquot_buf_ops);
- if (error)
- return error;
-
- ASSERT(bp);
- ddq = xfs_buf_offset(bp, dq_f->qlf_boffset);
-
- /*
- * If the dquot has an LSN in it, recover the dquot only if it's less
- * than the lsn of the transaction we are replaying.
- */
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
- struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddq;
- xfs_lsn_t lsn = be64_to_cpu(dqb->dd_lsn);
-
- if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
- goto out_release;
- }
- }
-
- memcpy(ddq, recddq, item->ri_buf[1].i_len);
- if (xfs_sb_version_hascrc(&mp->m_sb)) {
- xfs_update_cksum((char *)ddq, sizeof(struct xfs_dqblk),
- XFS_DQUOT_CRC_OFF);
- }
-
- ASSERT(dq_f->qlf_size == 2);
- ASSERT(bp->b_mount == mp);
- bp->b_iodone = xlog_recover_iodone;
- xfs_buf_delwri_queue(bp, buffer_list);
-
-out_release:
- xfs_buf_relse(bp);
- return 0;
-}
-
-/*
- * This routine is called to create an in-core extent free intent
- * item from the efi format structure which was logged on disk.
- * It allocates an in-core efi, copies the extents from the format
- * structure into it, and adds the efi to the AIL with the given
- * LSN.
- */
-STATIC int
-xlog_recover_efi_pass2(
- struct xlog *log,
- struct xlog_recover_item *item,
- xfs_lsn_t lsn)
-{
- int error;
- struct xfs_mount *mp = log->l_mp;
- struct xfs_efi_log_item *efip;
- struct xfs_efi_log_format *efi_formatp;
-
- efi_formatp = item->ri_buf[0].i_addr;
-
- efip = xfs_efi_init(mp, efi_formatp->efi_nextents);
- error = xfs_efi_copy_format(&item->ri_buf[0], &efip->efi_format);
- if (error) {
- xfs_efi_item_free(efip);
- return error;
- }
- atomic_set(&efip->efi_next_extent, efi_formatp->efi_nextents);
-
- spin_lock(&log->l_ailp->ail_lock);
- /*
- * The EFI has two references. One for the EFD and one for EFI to ensure
- * it makes it into the AIL. Insert the EFI into the AIL directly and
- * drop the EFI reference. Note that xfs_trans_ail_update() drops the
- * AIL lock.
- */
- xfs_trans_ail_update(log->l_ailp, &efip->efi_item, lsn);
- xfs_efi_release(efip);
- return 0;
-}
-
-
-/*
- * This routine is called when an EFD format structure is found in a committed
- * transaction in the log. Its purpose is to cancel the corresponding EFI if it
- * was still in the log. To do this it searches the AIL for the EFI with an id
- * equal to that in the EFD format structure. If we find it we drop the EFD
- * reference, which removes the EFI from the AIL and frees it.
- */
-STATIC int
-xlog_recover_efd_pass2(
- struct xlog *log,
- struct xlog_recover_item *item)
-{
- xfs_efd_log_format_t *efd_formatp;
- xfs_efi_log_item_t *efip = NULL;
- struct xfs_log_item *lip;
- uint64_t efi_id;
- struct xfs_ail_cursor cur;
- struct xfs_ail *ailp = log->l_ailp;
-
- efd_formatp = item->ri_buf[0].i_addr;
- ASSERT((item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_32_t) +
- ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_32_t)))) ||
- (item->ri_buf[0].i_len == (sizeof(xfs_efd_log_format_64_t) +
- ((efd_formatp->efd_nextents - 1) * sizeof(xfs_extent_64_t)))));
- efi_id = efd_formatp->efd_efi_id;
-
- /*
- * Search for the EFI with the id in the EFD format structure in the
- * AIL.
- */
- spin_lock(&ailp->ail_lock);
- lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
- while (lip != NULL) {
- if (lip->li_type == XFS_LI_EFI) {
- efip = (xfs_efi_log_item_t *)lip;
- if (efip->efi_format.efi_id == efi_id) {
- /*
- * Drop the EFD reference to the EFI. This
- * removes the EFI from the AIL and frees it.
- */
- spin_unlock(&ailp->ail_lock);
- xfs_efi_release(efip);
- spin_lock(&ailp->ail_lock);
- break;
- }
- }
- lip = xfs_trans_ail_cursor_next(ailp, &cur);
- }
-
- xfs_trans_ail_cursor_done(&cur);
- spin_unlock(&ailp->ail_lock);
-
- return 0;
-}
-
-/*
- * This routine is called to create an in-core extent rmap update
- * item from the rui format structure which was logged on disk.
- * It allocates an in-core rui, copies the extents from the format
- * structure into it, and adds the rui to the AIL with the given
- * LSN.
- */
-STATIC int
-xlog_recover_rui_pass2(
- struct xlog *log,
- struct xlog_recover_item *item,
- xfs_lsn_t lsn)
-{
- int error;
- struct xfs_mount *mp = log->l_mp;
- struct xfs_rui_log_item *ruip;
- struct xfs_rui_log_format *rui_formatp;
-
- rui_formatp = item->ri_buf[0].i_addr;
-
- ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
- error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
- if (error) {
- xfs_rui_item_free(ruip);
- return error;
- }
- atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
-
- spin_lock(&log->l_ailp->ail_lock);
- /*
- * The RUI has two references. One for the RUD and one for RUI to ensure
- * it makes it into the AIL. Insert the RUI into the AIL directly and
- * drop the RUI reference. Note that xfs_trans_ail_update() drops the
- * AIL lock.
- */
- xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn);
- xfs_rui_release(ruip);
- return 0;
-}
-
-
-/*
- * This routine is called when an RUD format structure is found in a committed
- * transaction in the log. Its purpose is to cancel the corresponding RUI if it
- * was still in the log. To do this it searches the AIL for the RUI with an id
- * equal to that in the RUD format structure. If we find it we drop the RUD
- * reference, which removes the RUI from the AIL and frees it.
- */
-STATIC int
-xlog_recover_rud_pass2(
- struct xlog *log,
- struct xlog_recover_item *item)
-{
- struct xfs_rud_log_format *rud_formatp;
- struct xfs_rui_log_item *ruip = NULL;
- struct xfs_log_item *lip;
- uint64_t rui_id;
- struct xfs_ail_cursor cur;
- struct xfs_ail *ailp = log->l_ailp;
-
- rud_formatp = item->ri_buf[0].i_addr;
- ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
- rui_id = rud_formatp->rud_rui_id;
-
- /*
- * Search for the RUI with the id in the RUD format structure in the
- * AIL.
- */
- spin_lock(&ailp->ail_lock);
- lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
- while (lip != NULL) {
- if (lip->li_type == XFS_LI_RUI) {
- ruip = (struct xfs_rui_log_item *)lip;
- if (ruip->rui_format.rui_id == rui_id) {
- /*
- * Drop the RUD reference to the RUI. This
- * removes the RUI from the AIL and frees it.
- */
- spin_unlock(&ailp->ail_lock);
- xfs_rui_release(ruip);
- spin_lock(&ailp->ail_lock);
- break;
- }
- }
- lip = xfs_trans_ail_cursor_next(ailp, &cur);
- }
-
- xfs_trans_ail_cursor_done(&cur);
- spin_unlock(&ailp->ail_lock);
-
- return 0;
-}
-
-/*
- * Copy an CUI format buffer from the given buf, and into the destination
- * CUI format structure. The CUI/CUD items were designed not to need any
- * special alignment handling.
- */
-static int
-xfs_cui_copy_format(
- struct xfs_log_iovec *buf,
- struct xfs_cui_log_format *dst_cui_fmt)
-{
- struct xfs_cui_log_format *src_cui_fmt;
- uint len;
-
- src_cui_fmt = buf->i_addr;
- len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
-
- if (buf->i_len == len) {
- memcpy(dst_cui_fmt, src_cui_fmt, len);
- return 0;
- }
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
- return -EFSCORRUPTED;
-}
-
-/*
- * This routine is called to create an in-core extent refcount update
- * item from the cui format structure which was logged on disk.
- * It allocates an in-core cui, copies the extents from the format
- * structure into it, and adds the cui to the AIL with the given
- * LSN.
- */
-STATIC int
-xlog_recover_cui_pass2(
- struct xlog *log,
- struct xlog_recover_item *item,
- xfs_lsn_t lsn)
-{
- int error;
- struct xfs_mount *mp = log->l_mp;
- struct xfs_cui_log_item *cuip;
- struct xfs_cui_log_format *cui_formatp;
-
- cui_formatp = item->ri_buf[0].i_addr;
-
- cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
- error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
- if (error) {
- xfs_cui_item_free(cuip);
- return error;
- }
- atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
-
- spin_lock(&log->l_ailp->ail_lock);
- /*
- * The CUI has two references. One for the CUD and one for CUI to ensure
- * it makes it into the AIL. Insert the CUI into the AIL directly and
- * drop the CUI reference. Note that xfs_trans_ail_update() drops the
- * AIL lock.
- */
- xfs_trans_ail_update(log->l_ailp, &cuip->cui_item, lsn);
- xfs_cui_release(cuip);
- return 0;
-}
-
-
-/*
- * This routine is called when an CUD format structure is found in a committed
- * transaction in the log. Its purpose is to cancel the corresponding CUI if it
- * was still in the log. To do this it searches the AIL for the CUI with an id
- * equal to that in the CUD format structure. If we find it we drop the CUD
- * reference, which removes the CUI from the AIL and frees it.
- */
-STATIC int
-xlog_recover_cud_pass2(
- struct xlog *log,
- struct xlog_recover_item *item)
-{
- struct xfs_cud_log_format *cud_formatp;
- struct xfs_cui_log_item *cuip = NULL;
- struct xfs_log_item *lip;
- uint64_t cui_id;
- struct xfs_ail_cursor cur;
- struct xfs_ail *ailp = log->l_ailp;
-
- cud_formatp = item->ri_buf[0].i_addr;
- if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
- return -EFSCORRUPTED;
- }
- cui_id = cud_formatp->cud_cui_id;
-
- /*
- * Search for the CUI with the id in the CUD format structure in the
- * AIL.
- */
- spin_lock(&ailp->ail_lock);
- lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
- while (lip != NULL) {
- if (lip->li_type == XFS_LI_CUI) {
- cuip = (struct xfs_cui_log_item *)lip;
- if (cuip->cui_format.cui_id == cui_id) {
- /*
- * Drop the CUD reference to the CUI. This
- * removes the CUI from the AIL and frees it.
- */
- spin_unlock(&ailp->ail_lock);
- xfs_cui_release(cuip);
- spin_lock(&ailp->ail_lock);
- break;
- }
- }
- lip = xfs_trans_ail_cursor_next(ailp, &cur);
- }
-
- xfs_trans_ail_cursor_done(&cur);
- spin_unlock(&ailp->ail_lock);
-
- return 0;
-}
-
-/*
- * Copy an BUI format buffer from the given buf, and into the destination
- * BUI format structure. The BUI/BUD items were designed not to need any
- * special alignment handling.
- */
-static int
-xfs_bui_copy_format(
- struct xfs_log_iovec *buf,
- struct xfs_bui_log_format *dst_bui_fmt)
-{
- struct xfs_bui_log_format *src_bui_fmt;
- uint len;
-
- src_bui_fmt = buf->i_addr;
- len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
-
- if (buf->i_len == len) {
- memcpy(dst_bui_fmt, src_bui_fmt, len);
- return 0;
- }
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
- return -EFSCORRUPTED;
-}
-
-/*
- * This routine is called to create an in-core extent bmap update
- * item from the bui format structure which was logged on disk.
- * It allocates an in-core bui, copies the extents from the format
- * structure into it, and adds the bui to the AIL with the given
- * LSN.
- */
-STATIC int
-xlog_recover_bui_pass2(
- struct xlog *log,
- struct xlog_recover_item *item,
- xfs_lsn_t lsn)
-{
- int error;
- struct xfs_mount *mp = log->l_mp;
- struct xfs_bui_log_item *buip;
- struct xfs_bui_log_format *bui_formatp;
-
- bui_formatp = item->ri_buf[0].i_addr;
-
- if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
- return -EFSCORRUPTED;
- }
- buip = xfs_bui_init(mp);
- error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
- if (error) {
- xfs_bui_item_free(buip);
- return error;
- }
- atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
-
- spin_lock(&log->l_ailp->ail_lock);
- /*
- * The RUI has two references. One for the RUD and one for RUI to ensure
- * it makes it into the AIL. Insert the RUI into the AIL directly and
- * drop the RUI reference. Note that xfs_trans_ail_update() drops the
- * AIL lock.
- */
- xfs_trans_ail_update(log->l_ailp, &buip->bui_item, lsn);
- xfs_bui_release(buip);
- return 0;
-}
-
-
-/*
- * This routine is called when an BUD format structure is found in a committed
- * transaction in the log. Its purpose is to cancel the corresponding BUI if it
- * was still in the log. To do this it searches the AIL for the BUI with an id
- * equal to that in the BUD format structure. If we find it we drop the BUD
- * reference, which removes the BUI from the AIL and frees it.
- */
-STATIC int
-xlog_recover_bud_pass2(
- struct xlog *log,
- struct xlog_recover_item *item)
-{
- struct xfs_bud_log_format *bud_formatp;
- struct xfs_bui_log_item *buip = NULL;
- struct xfs_log_item *lip;
- uint64_t bui_id;
- struct xfs_ail_cursor cur;
- struct xfs_ail *ailp = log->l_ailp;
-
- bud_formatp = item->ri_buf[0].i_addr;
- if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format)) {
- XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
- return -EFSCORRUPTED;
- }
- bui_id = bud_formatp->bud_bui_id;
-
- /*
- * Search for the BUI with the id in the BUD format structure in the
- * AIL.
- */
- spin_lock(&ailp->ail_lock);
- lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
- while (lip != NULL) {
- if (lip->li_type == XFS_LI_BUI) {
- buip = (struct xfs_bui_log_item *)lip;
- if (buip->bui_format.bui_id == bui_id) {
- /*
- * Drop the BUD reference to the BUI. This
- * removes the BUI from the AIL and frees it.
- */
- spin_unlock(&ailp->ail_lock);
- xfs_bui_release(buip);
- spin_lock(&ailp->ail_lock);
- break;
- }
- }
- lip = xfs_trans_ail_cursor_next(ailp, &cur);
- }
-
- xfs_trans_ail_cursor_done(&cur);
- spin_unlock(&ailp->ail_lock);
-
- return 0;
-}
-
-/*
- * This routine is called when an inode create format structure is found in a
- * committed transaction in the log. It's purpose is to initialise the inodes
- * being allocated on disk. This requires us to get inode cluster buffers that
- * match the range to be initialised, stamped with inode templates and written
- * by delayed write so that subsequent modifications will hit the cached buffer
- * and only need writing out at the end of recovery.
- */
-STATIC int
-xlog_recover_do_icreate_pass2(
- struct xlog *log,
- struct list_head *buffer_list,
- xlog_recover_item_t *item)
-{
- struct xfs_mount *mp = log->l_mp;
- struct xfs_icreate_log *icl;
- struct xfs_ino_geometry *igeo = M_IGEO(mp);
- xfs_agnumber_t agno;
- xfs_agblock_t agbno;
- unsigned int count;
- unsigned int isize;
- xfs_agblock_t length;
- int bb_per_cluster;
- int cancel_count;
- int nbufs;
- int i;
-
- icl = (struct xfs_icreate_log *)item->ri_buf[0].i_addr;
- if (icl->icl_type != XFS_LI_ICREATE) {
- xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad type");
- return -EINVAL;
- }
-
- if (icl->icl_size != 1) {
- xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad icl size");
- return -EINVAL;
- }
-
- agno = be32_to_cpu(icl->icl_ag);
- if (agno >= mp->m_sb.sb_agcount) {
- xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agno");
- return -EINVAL;
- }
- agbno = be32_to_cpu(icl->icl_agbno);
- if (!agbno || agbno == NULLAGBLOCK || agbno >= mp->m_sb.sb_agblocks) {
- xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad agbno");
- return -EINVAL;
- }
- isize = be32_to_cpu(icl->icl_isize);
- if (isize != mp->m_sb.sb_inodesize) {
- xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad isize");
- return -EINVAL;
- }
- count = be32_to_cpu(icl->icl_count);
- if (!count) {
- xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad count");
- return -EINVAL;
- }
- length = be32_to_cpu(icl->icl_length);
- if (!length || length >= mp->m_sb.sb_agblocks) {
- xfs_warn(log->l_mp, "xlog_recover_do_icreate_trans: bad length");
- return -EINVAL;
- }
-
- /*
- * The inode chunk is either full or sparse and we only support
- * m_ino_geo.ialloc_min_blks sized sparse allocations at this time.
- */
- if (length != igeo->ialloc_blks &&
- length != igeo->ialloc_min_blks) {
- xfs_warn(log->l_mp,
- "%s: unsupported chunk length", __FUNCTION__);
- return -EINVAL;
- }
-
- /* verify inode count is consistent with extent length */
- if ((count >> mp->m_sb.sb_inopblog) != length) {
- xfs_warn(log->l_mp,
- "%s: inconsistent inode count and chunk length",
- __FUNCTION__);
- return -EINVAL;
- }
-
- /*
- * The icreate transaction can cover multiple cluster buffers and these
- * buffers could have been freed and reused. Check the individual
- * buffers for cancellation so we don't overwrite anything written after
- * a cancellation.
- */
- bb_per_cluster = XFS_FSB_TO_BB(mp, igeo->blocks_per_cluster);
- nbufs = length / igeo->blocks_per_cluster;
- for (i = 0, cancel_count = 0; i < nbufs; i++) {
- xfs_daddr_t daddr;
-
- daddr = XFS_AGB_TO_DADDR(mp, agno,
- agbno + i * igeo->blocks_per_cluster);
- if (xlog_check_buffer_cancelled(log, daddr, bb_per_cluster, 0))
- cancel_count++;
- }
-
- /*
- * We currently only use icreate for a single allocation at a time. This
- * means we should expect either all or none of the buffers to be
- * cancelled. Be conservative and skip replay if at least one buffer is
- * cancelled, but warn the user that something is awry if the buffers
- * are not consistent.
- *
- * XXX: This must be refined to only skip cancelled clusters once we use
- * icreate for multiple chunk allocations.
- */
- ASSERT(!cancel_count || cancel_count == nbufs);
- if (cancel_count) {
- if (cancel_count != nbufs)
- xfs_warn(mp,
- "WARNING: partial inode chunk cancellation, skipped icreate.");
- trace_xfs_log_recover_icreate_cancel(log, icl);
- return 0;
- }
-
- trace_xfs_log_recover_icreate_recover(log, icl);
- return xfs_ialloc_inode_init(mp, NULL, buffer_list, count, agno, agbno,
- length, be32_to_cpu(icl->icl_gen));
-}
-
-STATIC void
-xlog_recover_buffer_ra_pass2(
- struct xlog *log,
- struct xlog_recover_item *item)
-{
- struct xfs_buf_log_format *buf_f = item->ri_buf[0].i_addr;
- struct xfs_mount *mp = log->l_mp;
-
- if (xlog_peek_buffer_cancelled(log, buf_f->blf_blkno,
- buf_f->blf_len, buf_f->blf_flags)) {
- return;
- }
-
- xfs_buf_readahead(mp->m_ddev_targp, buf_f->blf_blkno,
- buf_f->blf_len, NULL);
-}
-
-STATIC void
-xlog_recover_inode_ra_pass2(
- struct xlog *log,
- struct xlog_recover_item *item)
-{
- struct xfs_inode_log_format ilf_buf;
- struct xfs_inode_log_format *ilfp;
- struct xfs_mount *mp = log->l_mp;
- int error;
-
- if (item->ri_buf[0].i_len == sizeof(struct xfs_inode_log_format)) {
- ilfp = item->ri_buf[0].i_addr;
- } else {
- ilfp = &ilf_buf;
- memset(ilfp, 0, sizeof(*ilfp));
- error = xfs_inode_item_format_convert(&item->ri_buf[0], ilfp);
- if (error)
- return;
- }
-
- if (xlog_peek_buffer_cancelled(log, ilfp->ilf_blkno, ilfp->ilf_len, 0))
- return;
-
- xfs_buf_readahead(mp->m_ddev_targp, ilfp->ilf_blkno,
- ilfp->ilf_len, &xfs_inode_buf_ra_ops);
-}
-
-STATIC void
-xlog_recover_dquot_ra_pass2(
- struct xlog *log,
- struct xlog_recover_item *item)
-{
- struct xfs_mount *mp = log->l_mp;
- struct xfs_disk_dquot *recddq;
- struct xfs_dq_logformat *dq_f;
- uint type;
- int len;
-
-
- if (mp->m_qflags == 0)
- return;
-
- recddq = item->ri_buf[1].i_addr;
- if (recddq == NULL)
- return;
- if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot))
- return;
-
- type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP);
- ASSERT(type);
- if (log->l_quotaoffs_flag & type)
- return;
-
- dq_f = item->ri_buf[0].i_addr;
- ASSERT(dq_f);
- ASSERT(dq_f->qlf_len == 1);
-
- len = XFS_FSB_TO_BB(mp, dq_f->qlf_len);
- if (xlog_peek_buffer_cancelled(log, dq_f->qlf_blkno, len, 0))
- return;
-
- xfs_buf_readahead(mp->m_ddev_targp, dq_f->qlf_blkno, len,
- &xfs_dquot_buf_ra_ops);
-}
-
-STATIC void
-xlog_recover_ra_pass2(
- struct xlog *log,
- struct xlog_recover_item *item)
-{
- switch (ITEM_TYPE(item)) {
- case XFS_LI_BUF:
- xlog_recover_buffer_ra_pass2(log, item);
- break;
- case XFS_LI_INODE:
- xlog_recover_inode_ra_pass2(log, item);
- break;
- case XFS_LI_DQUOT:
- xlog_recover_dquot_ra_pass2(log, item);
- break;
- case XFS_LI_EFI:
- case XFS_LI_EFD:
- case XFS_LI_QUOTAOFF:
- case XFS_LI_RUI:
- case XFS_LI_RUD:
- case XFS_LI_CUI:
- case XFS_LI_CUD:
- case XFS_LI_BUI:
- case XFS_LI_BUD:
- default:
- break;
- }
-}
-
-STATIC int
-xlog_recover_commit_pass1(
- struct xlog *log,
- struct xlog_recover *trans,
- struct xlog_recover_item *item)
-{
- trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
-
- switch (ITEM_TYPE(item)) {
- case XFS_LI_BUF:
- return xlog_recover_buffer_pass1(log, item);
- case XFS_LI_QUOTAOFF:
- return xlog_recover_quotaoff_pass1(log, item);
- case XFS_LI_INODE:
- case XFS_LI_EFI:
- case XFS_LI_EFD:
- case XFS_LI_DQUOT:
- case XFS_LI_ICREATE:
- case XFS_LI_RUI:
- case XFS_LI_RUD:
- case XFS_LI_CUI:
- case XFS_LI_CUD:
- case XFS_LI_BUI:
- case XFS_LI_BUD:
- /* nothing to do in pass 1 */
- return 0;
- default:
- xfs_warn(log->l_mp, "%s: invalid item type (%d)",
- __func__, ITEM_TYPE(item));
- ASSERT(0);
- return -EFSCORRUPTED;
- }
-}
-
-STATIC int
-xlog_recover_commit_pass2(
- struct xlog *log,
- struct xlog_recover *trans,
- struct list_head *buffer_list,
- struct xlog_recover_item *item)
-{
- trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
-
- switch (ITEM_TYPE(item)) {
- case XFS_LI_BUF:
- return xlog_recover_buffer_pass2(log, buffer_list, item,
- trans->r_lsn);
- case XFS_LI_INODE:
- return xlog_recover_inode_pass2(log, buffer_list, item,
- trans->r_lsn);
- case XFS_LI_EFI:
- return xlog_recover_efi_pass2(log, item, trans->r_lsn);
- case XFS_LI_EFD:
- return xlog_recover_efd_pass2(log, item);
- case XFS_LI_RUI:
- return xlog_recover_rui_pass2(log, item, trans->r_lsn);
- case XFS_LI_RUD:
- return xlog_recover_rud_pass2(log, item);
- case XFS_LI_CUI:
- return xlog_recover_cui_pass2(log, item, trans->r_lsn);
- case XFS_LI_CUD:
- return xlog_recover_cud_pass2(log, item);
- case XFS_LI_BUI:
- return xlog_recover_bui_pass2(log, item, trans->r_lsn);
- case XFS_LI_BUD:
- return xlog_recover_bud_pass2(log, item);
- case XFS_LI_DQUOT:
- return xlog_recover_dquot_pass2(log, buffer_list, item,
- trans->r_lsn);
- case XFS_LI_ICREATE:
- return xlog_recover_do_icreate_pass2(log, buffer_list, item);
- case XFS_LI_QUOTAOFF:
- /* nothing to do in pass2 */
- return 0;
- default:
- xfs_warn(log->l_mp, "%s: invalid item type (%d)",
- __func__, ITEM_TYPE(item));
- ASSERT(0);
- return -EFSCORRUPTED;
- }
+ if (!xlog_is_buffer_cancelled(log, blkno, len))
+ xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
}
STATIC int
@@ -4072,8 +1967,12 @@ xlog_recover_items_pass2(
int error = 0;
list_for_each_entry(item, item_list, ri_list) {
- error = xlog_recover_commit_pass2(log, trans,
- buffer_list, item);
+ trace_xfs_log_recover_item_recover(log, trans, item,
+ XLOG_RECOVER_PASS2);
+
+ if (item->ri_ops->commit_pass2)
+ error = item->ri_ops->commit_pass2(log, buffer_list,
+ item, trans->r_lsn);
if (error)
return error;
}
@@ -4110,12 +2009,16 @@ xlog_recover_commit_trans(
return error;
list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
+ trace_xfs_log_recover_item_recover(log, trans, item, pass);
+
switch (pass) {
case XLOG_RECOVER_PASS1:
- error = xlog_recover_commit_pass1(log, trans, item);
+ if (item->ri_ops->commit_pass1)
+ error = item->ri_ops->commit_pass1(log, item);
break;
case XLOG_RECOVER_PASS2:
- xlog_recover_ra_pass2(log, item);
+ if (item->ri_ops->ra_pass2)
+ item->ri_ops->ra_pass2(log, item);
list_move_tail(&item->ri_list, &ra_list);
items_queued++;
if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
@@ -4152,9 +2055,9 @@ STATIC void
xlog_recover_add_item(
struct list_head *head)
{
- xlog_recover_item_t *item;
+ struct xlog_recover_item *item;
- item = kmem_zalloc(sizeof(xlog_recover_item_t), 0);
+ item = kmem_zalloc(sizeof(struct xlog_recover_item), 0);
INIT_LIST_HEAD(&item->ri_list);
list_add_tail(&item->ri_list, head);
}
@@ -4166,7 +2069,7 @@ xlog_recover_add_to_cont_trans(
char *dp,
int len)
{
- xlog_recover_item_t *item;
+ struct xlog_recover_item *item;
char *ptr, *old_ptr;
int old_len;
@@ -4189,7 +2092,8 @@ xlog_recover_add_to_cont_trans(
}
/* take the tail entry */
- item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
+ item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
+ ri_list);
old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
old_len = item->ri_buf[item->ri_cnt-1].i_len;
@@ -4223,7 +2127,7 @@ xlog_recover_add_to_trans(
int len)
{
struct xfs_inode_log_format *in_f; /* any will do */
- xlog_recover_item_t *item;
+ struct xlog_recover_item *item;
char *ptr;
if (!len)
@@ -4259,13 +2163,14 @@ xlog_recover_add_to_trans(
in_f = (struct xfs_inode_log_format *)ptr;
/* take the tail entry */
- item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
+ item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
+ ri_list);
if (item->ri_total != 0 &&
item->ri_total == item->ri_cnt) {
/* tail item is in use, get a new one */
xlog_recover_add_item(&trans->r_itemq);
item = list_entry(trans->r_itemq.prev,
- xlog_recover_item_t, ri_list);
+ struct xlog_recover_item, ri_list);
}
if (item->ri_total == 0) { /* first region to be added */
@@ -4311,7 +2216,7 @@ STATIC void
xlog_recover_free_trans(
struct xlog_recover *trans)
{
- xlog_recover_item_t *item, *n;
+ struct xlog_recover_item *item, *n;
int i;
hlist_del_init(&trans->r_list);
@@ -4563,180 +2468,6 @@ xlog_recover_process_data(
return 0;
}
-/* Recover the EFI if necessary. */
-STATIC int
-xlog_recover_process_efi(
- struct xfs_mount *mp,
- struct xfs_ail *ailp,
- struct xfs_log_item *lip)
-{
- struct xfs_efi_log_item *efip;
- int error;
-
- /*
- * Skip EFIs that we've already processed.
- */
- efip = container_of(lip, struct xfs_efi_log_item, efi_item);
- if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
- return 0;
-
- spin_unlock(&ailp->ail_lock);
- error = xfs_efi_recover(mp, efip);
- spin_lock(&ailp->ail_lock);
-
- return error;
-}
-
-/* Release the EFI since we're cancelling everything. */
-STATIC void
-xlog_recover_cancel_efi(
- struct xfs_mount *mp,
- struct xfs_ail *ailp,
- struct xfs_log_item *lip)
-{
- struct xfs_efi_log_item *efip;
-
- efip = container_of(lip, struct xfs_efi_log_item, efi_item);
-
- spin_unlock(&ailp->ail_lock);
- xfs_efi_release(efip);
- spin_lock(&ailp->ail_lock);
-}
-
-/* Recover the RUI if necessary. */
-STATIC int
-xlog_recover_process_rui(
- struct xfs_mount *mp,
- struct xfs_ail *ailp,
- struct xfs_log_item *lip)
-{
- struct xfs_rui_log_item *ruip;
- int error;
-
- /*
- * Skip RUIs that we've already processed.
- */
- ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
- if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags))
- return 0;
-
- spin_unlock(&ailp->ail_lock);
- error = xfs_rui_recover(mp, ruip);
- spin_lock(&ailp->ail_lock);
-
- return error;
-}
-
-/* Release the RUI since we're cancelling everything. */
-STATIC void
-xlog_recover_cancel_rui(
- struct xfs_mount *mp,
- struct xfs_ail *ailp,
- struct xfs_log_item *lip)
-{
- struct xfs_rui_log_item *ruip;
-
- ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
-
- spin_unlock(&ailp->ail_lock);
- xfs_rui_release(ruip);
- spin_lock(&ailp->ail_lock);
-}
-
-/* Recover the CUI if necessary. */
-STATIC int
-xlog_recover_process_cui(
- struct xfs_trans *parent_tp,
- struct xfs_ail *ailp,
- struct xfs_log_item *lip)
-{
- struct xfs_cui_log_item *cuip;
- int error;
-
- /*
- * Skip CUIs that we've already processed.
- */
- cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
- if (test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags))
- return 0;
-
- spin_unlock(&ailp->ail_lock);
- error = xfs_cui_recover(parent_tp, cuip);
- spin_lock(&ailp->ail_lock);
-
- return error;
-}
-
-/* Release the CUI since we're cancelling everything. */
-STATIC void
-xlog_recover_cancel_cui(
- struct xfs_mount *mp,
- struct xfs_ail *ailp,
- struct xfs_log_item *lip)
-{
- struct xfs_cui_log_item *cuip;
-
- cuip = container_of(lip, struct xfs_cui_log_item, cui_item);
-
- spin_unlock(&ailp->ail_lock);
- xfs_cui_release(cuip);
- spin_lock(&ailp->ail_lock);
-}
-
-/* Recover the BUI if necessary. */
-STATIC int
-xlog_recover_process_bui(
- struct xfs_trans *parent_tp,
- struct xfs_ail *ailp,
- struct xfs_log_item *lip)
-{
- struct xfs_bui_log_item *buip;
- int error;
-
- /*
- * Skip BUIs that we've already processed.
- */
- buip = container_of(lip, struct xfs_bui_log_item, bui_item);
- if (test_bit(XFS_BUI_RECOVERED, &buip->bui_flags))
- return 0;
-
- spin_unlock(&ailp->ail_lock);
- error = xfs_bui_recover(parent_tp, buip);
- spin_lock(&ailp->ail_lock);
-
- return error;
-}
-
-/* Release the BUI since we're cancelling everything. */
-STATIC void
-xlog_recover_cancel_bui(
- struct xfs_mount *mp,
- struct xfs_ail *ailp,
- struct xfs_log_item *lip)
-{
- struct xfs_bui_log_item *buip;
-
- buip = container_of(lip, struct xfs_bui_log_item, bui_item);
-
- spin_unlock(&ailp->ail_lock);
- xfs_bui_release(buip);
- spin_lock(&ailp->ail_lock);
-}
-
-/* Is this log item a deferred action intent? */
-static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
-{
- switch (lip->li_type) {
- case XFS_LI_EFI:
- case XFS_LI_RUI:
- case XFS_LI_CUI:
- case XFS_LI_BUI:
- return true;
- default:
- return false;
- }
-}
-
/* Take all the collected deferred ops and finish them in order. */
static int
xlog_finish_defer_ops(
@@ -4771,6 +2502,13 @@ xlog_finish_defer_ops(
return xfs_trans_commit(tp);
}
+/* Is this log item a deferred action intent? */
+static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
+{
+ return lip->li_ops->iop_recover != NULL &&
+ lip->li_ops->iop_match != NULL;
+}
+
/*
* When this is called, all of the log intent items which did not have
* corresponding log done items should be in the AIL. What we do now
@@ -4841,23 +2579,14 @@ xlog_recover_process_intents(
/*
* NOTE: If your intent processing routine can create more
- * deferred ops, you /must/ attach them to the dfops in this
- * routine or else those subsequent intents will get
+ * deferred ops, you /must/ attach them to the transaction in
+ * this routine or else those subsequent intents will get
* replayed in the wrong order!
*/
- switch (lip->li_type) {
- case XFS_LI_EFI:
- error = xlog_recover_process_efi(log->l_mp, ailp, lip);
- break;
- case XFS_LI_RUI:
- error = xlog_recover_process_rui(log->l_mp, ailp, lip);
- break;
- case XFS_LI_CUI:
- error = xlog_recover_process_cui(parent_tp, ailp, lip);
- break;
- case XFS_LI_BUI:
- error = xlog_recover_process_bui(parent_tp, ailp, lip);
- break;
+ if (!test_and_set_bit(XFS_LI_RECOVERED, &lip->li_flags)) {
+ spin_unlock(&ailp->ail_lock);
+ error = lip->li_ops->iop_recover(lip, parent_tp);
+ spin_lock(&ailp->ail_lock);
}
if (error)
goto out;
@@ -4901,21 +2630,9 @@ xlog_recover_cancel_intents(
break;
}
- switch (lip->li_type) {
- case XFS_LI_EFI:
- xlog_recover_cancel_efi(log->l_mp, ailp, lip);
- break;
- case XFS_LI_RUI:
- xlog_recover_cancel_rui(log->l_mp, ailp, lip);
- break;
- case XFS_LI_CUI:
- xlog_recover_cancel_cui(log->l_mp, ailp, lip);
- break;
- case XFS_LI_BUI:
- xlog_recover_cancel_bui(log->l_mp, ailp, lip);
- break;
- }
-
+ spin_unlock(&ailp->ail_lock);
+ lip->li_ops->iop_release(lip);
+ spin_lock(&ailp->ail_lock);
lip = xfs_trans_ail_cursor_next(ailp, &cur);
}
@@ -4987,7 +2704,7 @@ xlog_recover_process_one_iunlink(
/*
* Get the on disk inode to find the next inode in the bucket.
*/
- error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0, 0);
+ error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0);
if (error)
goto fail_iput;
diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c
index e0f9d3b6abe9..bc66d95c8d4c 100644
--- a/fs/xfs/xfs_message.c
+++ b/fs/xfs/xfs_message.c
@@ -117,3 +117,25 @@ xfs_hex_dump(const void *p, int length)
{
print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1);
}
+
+void
+xfs_buf_alert_ratelimited(
+ struct xfs_buf *bp,
+ const char *rlmsg,
+ const char *fmt,
+ ...)
+{
+ struct xfs_mount *mp = bp->b_mount;
+ struct va_format vaf;
+ va_list args;
+
+ /* use the more aggressive per-target rate limit for buffers */
+ if (!___ratelimit(&bp->b_target->bt_ioerror_rl, rlmsg))
+ return;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ __xfs_printk(KERN_ALERT, mp, &vaf);
+ va_end(args);
+}
diff --git a/fs/xfs/xfs_message.h b/fs/xfs/xfs_message.h
index 0b05e10995a0..4d9bd6bb63ca 100644
--- a/fs/xfs/xfs_message.h
+++ b/fs/xfs/xfs_message.h
@@ -31,15 +31,27 @@ void xfs_debug(const struct xfs_mount *mp, const char *fmt, ...)
}
#endif
-#define xfs_printk_ratelimited(func, dev, fmt, ...) \
+#define xfs_printk_ratelimited(func, dev, fmt, ...) \
do { \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
if (__ratelimit(&_rs)) \
- func(dev, fmt, ##__VA_ARGS__); \
+ func(dev, fmt, ##__VA_ARGS__); \
} while (0)
+#define xfs_printk_once(func, dev, fmt, ...) \
+({ \
+ static bool __section(.data.once) __print_once; \
+ bool __ret_print_once = !__print_once; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ func(dev, fmt, ##__VA_ARGS__); \
+ } \
+ unlikely(__ret_print_once); \
+})
+
#define xfs_emerg_ratelimited(dev, fmt, ...) \
xfs_printk_ratelimited(xfs_emerg, dev, fmt, ##__VA_ARGS__)
#define xfs_alert_ratelimited(dev, fmt, ...) \
@@ -57,9 +69,17 @@ do { \
#define xfs_debug_ratelimited(dev, fmt, ...) \
xfs_printk_ratelimited(xfs_debug, dev, fmt, ##__VA_ARGS__)
+#define xfs_warn_once(dev, fmt, ...) \
+ xfs_printk_once(xfs_warn, dev, fmt, ##__VA_ARGS__)
+#define xfs_notice_once(dev, fmt, ...) \
+ xfs_printk_once(xfs_notice, dev, fmt, ##__VA_ARGS__)
+
void assfail(struct xfs_mount *mp, char *expr, char *f, int l);
void asswarn(struct xfs_mount *mp, char *expr, char *f, int l);
extern void xfs_hex_dump(const void *p, int length);
+void xfs_buf_alert_ratelimited(struct xfs_buf *bp, const char *rlmsg,
+ const char *fmt, ...);
+
#endif /* __XFS_MESSAGE_H */
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index c5513e5a226a..d5dcf9869860 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1190,39 +1190,6 @@ xfs_log_sbcount(xfs_mount_t *mp)
}
/*
- * Deltas for the inode count are +/-64, hence we use a large batch size
- * of 128 so we don't need to take the counter lock on every update.
- */
-#define XFS_ICOUNT_BATCH 128
-int
-xfs_mod_icount(
- struct xfs_mount *mp,
- int64_t delta)
-{
- percpu_counter_add_batch(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
- if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) {
- ASSERT(0);
- percpu_counter_add(&mp->m_icount, -delta);
- return -EINVAL;
- }
- return 0;
-}
-
-int
-xfs_mod_ifree(
- struct xfs_mount *mp,
- int64_t delta)
-{
- percpu_counter_add(&mp->m_ifree, delta);
- if (percpu_counter_compare(&mp->m_ifree, 0) < 0) {
- ASSERT(0);
- percpu_counter_add(&mp->m_ifree, -delta);
- return -EINVAL;
- }
- return 0;
-}
-
-/*
* Deltas for the block count can vary from 1 to very large, but lock contention
* only occurs on frequent small block count updates such as in the delayed
* allocation path for buffered writes (page a time updates). Hence we set
@@ -1300,10 +1267,9 @@ xfs_mod_fdblocks(
spin_unlock(&mp->m_sb_lock);
return 0;
}
- printk_once(KERN_WARNING
- "Filesystem \"%s\": reserve blocks depleted! "
- "Consider increasing reserve pool size.",
- mp->m_super->s_id);
+ xfs_warn_once(mp,
+"Reserve blocks depleted! Consider increasing reserve pool size.");
+
fdblocks_enospc:
spin_unlock(&mp->m_sb_lock);
return -ENOSPC;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index b2e4598fdf7d..3725d25ad97e 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -55,61 +55,25 @@ struct xfs_error_cfg {
long retry_timeout; /* in jiffies, -1 = infinite */
};
+/*
+ * The struct xfsmount layout is optimised to separate read-mostly variables
+ * from variables that are frequently modified. We put the read-mostly variables
+ * first, then place all the other variables at the end.
+ *
+ * Typically, read-mostly variables are those that are set at mount time and
+ * never changed again, or only change rarely as a result of things like sysfs
+ * knobs being tweaked.
+ */
typedef struct xfs_mount {
+ struct xfs_sb m_sb; /* copy of fs superblock */
struct super_block *m_super;
-
- /*
- * Bitsets of per-fs metadata that have been checked and/or are sick.
- * Callers must hold m_sb_lock to access these two fields.
- */
- uint8_t m_fs_checked;
- uint8_t m_fs_sick;
- /*
- * Bitsets of rt metadata that have been checked and/or are sick.
- * Callers must hold m_sb_lock to access this field.
- */
- uint8_t m_rt_checked;
- uint8_t m_rt_sick;
-
struct xfs_ail *m_ail; /* fs active log item list */
-
- struct xfs_sb m_sb; /* copy of fs superblock */
- spinlock_t m_sb_lock; /* sb counter lock */
- struct percpu_counter m_icount; /* allocated inodes counter */
- struct percpu_counter m_ifree; /* free inodes counter */
- struct percpu_counter m_fdblocks; /* free block counter */
- /*
- * Count of data device blocks reserved for delayed allocations,
- * including indlen blocks. Does not include allocated CoW staging
- * extents or anything related to the rt device.
- */
- struct percpu_counter m_delalloc_blks;
-
struct xfs_buf *m_sb_bp; /* buffer for superblock */
char *m_rtname; /* realtime device name */
char *m_logname; /* external log device name */
- int m_bsize; /* fs logical block size */
- xfs_agnumber_t m_agfrotor; /* last ag where space found */
- xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */
- spinlock_t m_agirotor_lock;/* .. and lock protecting it */
- xfs_agnumber_t m_maxagi; /* highest inode alloc group */
- uint m_allocsize_log;/* min write size log bytes */
- uint m_allocsize_blocks; /* min write size blocks */
struct xfs_da_geometry *m_dir_geo; /* directory block geometry */
struct xfs_da_geometry *m_attr_geo; /* attribute block geometry */
struct xlog *m_log; /* log specific stuff */
- struct xfs_ino_geometry m_ino_geo; /* inode geometry */
- int m_logbufs; /* number of log buffers */
- int m_logbsize; /* size of each log buffer */
- uint m_rsumlevels; /* rt summary levels */
- uint m_rsumsize; /* size of rt summary, bytes */
- /*
- * Optional cache of rt summary level per bitmap block with the
- * invariant that m_rsum_cache[bbno] <= the minimum i for which
- * rsum[i][bbno] != 0. Reads and writes are serialized by the rsumip
- * inode lock.
- */
- uint8_t *m_rsum_cache;
struct xfs_inode *m_rbmip; /* pointer to bitmap inode */
struct xfs_inode *m_rsumip; /* pointer to summary inode */
struct xfs_inode *m_rootip; /* pointer to root directory */
@@ -117,9 +81,26 @@ typedef struct xfs_mount {
xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
+ /*
+ * Optional cache of rt summary level per bitmap block with the
+ * invariant that m_rsum_cache[bbno] <= the minimum i for which
+ * rsum[i][bbno] != 0. Reads and writes are serialized by the rsumip
+ * inode lock.
+ */
+ uint8_t *m_rsum_cache;
+ struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
+ struct workqueue_struct *m_buf_workqueue;
+ struct workqueue_struct *m_unwritten_workqueue;
+ struct workqueue_struct *m_cil_workqueue;
+ struct workqueue_struct *m_reclaim_workqueue;
+ struct workqueue_struct *m_eofblocks_workqueue;
+ struct workqueue_struct *m_sync_workqueue;
+
+ int m_bsize; /* fs logical block size */
uint8_t m_blkbit_log; /* blocklog + NBBY */
uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
uint8_t m_agno_log; /* log #ag's */
+ uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
uint m_blockmask; /* sb_blocksize-1 */
uint m_blockwsize; /* sb_blocksize in words */
uint m_blockwmask; /* blockwsize-1 */
@@ -138,47 +119,82 @@ typedef struct xfs_mount {
xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */
uint m_alloc_set_aside; /* space we can't use */
uint m_ag_max_usable; /* max space per AG */
- struct radix_tree_root m_perag_tree; /* per-ag accounting info */
- spinlock_t m_perag_lock; /* lock for m_perag_tree */
- struct mutex m_growlock; /* growfs mutex */
+ int m_dalign; /* stripe unit */
+ int m_swidth; /* stripe width */
+ xfs_agnumber_t m_maxagi; /* highest inode alloc group */
+ uint m_allocsize_log;/* min write size log bytes */
+ uint m_allocsize_blocks; /* min write size blocks */
+ int m_logbufs; /* number of log buffers */
+ int m_logbsize; /* size of each log buffer */
+ uint m_rsumlevels; /* rt summary levels */
+ uint m_rsumsize; /* size of rt summary, bytes */
int m_fixedfsid[2]; /* unchanged for life of FS */
- uint64_t m_flags; /* global mount flags */
- bool m_finobt_nores; /* no per-AG finobt resv. */
uint m_qflags; /* quota status flags */
+ uint64_t m_flags; /* global mount flags */
+ int64_t m_low_space[XFS_LOWSP_MAX];
+ struct xfs_ino_geometry m_ino_geo; /* inode geometry */
struct xfs_trans_resv m_resv; /* precomputed res values */
+ /* low free space thresholds */
+ bool m_always_cow;
+ bool m_fail_unmount;
+ bool m_finobt_nores; /* no per-AG finobt resv. */
+ bool m_update_sb; /* sb needs update in mount */
+
+ /*
+ * Bitsets of per-fs metadata that have been checked and/or are sick.
+ * Callers must hold m_sb_lock to access these two fields.
+ */
+ uint8_t m_fs_checked;
+ uint8_t m_fs_sick;
+ /*
+ * Bitsets of rt metadata that have been checked and/or are sick.
+ * Callers must hold m_sb_lock to access this field.
+ */
+ uint8_t m_rt_checked;
+ uint8_t m_rt_sick;
+
+ /*
+ * End of read-mostly variables. Frequently written variables and locks
+ * should be placed below this comment from now on. The first variable
+ * here is marked as cacheline aligned so they it is separated from
+ * the read-mostly variables.
+ */
+
+ spinlock_t ____cacheline_aligned m_sb_lock; /* sb counter lock */
+ struct percpu_counter m_icount; /* allocated inodes counter */
+ struct percpu_counter m_ifree; /* free inodes counter */
+ struct percpu_counter m_fdblocks; /* free block counter */
+ /*
+ * Count of data device blocks reserved for delayed allocations,
+ * including indlen blocks. Does not include allocated CoW staging
+ * extents or anything related to the rt device.
+ */
+ struct percpu_counter m_delalloc_blks;
+
+ struct radix_tree_root m_perag_tree; /* per-ag accounting info */
+ spinlock_t m_perag_lock; /* lock for m_perag_tree */
uint64_t m_resblks; /* total reserved blocks */
uint64_t m_resblks_avail;/* available reserved blocks */
uint64_t m_resblks_save; /* reserved blks @ remount,ro */
- int m_dalign; /* stripe unit */
- int m_swidth; /* stripe width */
- uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
- atomic_t m_active_trans; /* number trans frozen */
- struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
struct delayed_work m_reclaim_work; /* background inode reclaim */
struct delayed_work m_eofblocks_work; /* background eof blocks
trimming */
struct delayed_work m_cowblocks_work; /* background cow blocks
trimming */
- bool m_update_sb; /* sb needs update in mount */
- int64_t m_low_space[XFS_LOWSP_MAX];
- /* low free space thresholds */
struct xfs_kobj m_kobj;
struct xfs_kobj m_error_kobj;
struct xfs_kobj m_error_meta_kobj;
struct xfs_error_cfg m_error_cfg[XFS_ERR_CLASS_MAX][XFS_ERR_ERRNO_MAX];
struct xstats m_stats; /* per-fs stats */
+ xfs_agnumber_t m_agfrotor; /* last ag where space found */
+ xfs_agnumber_t m_agirotor; /* last ag dir inode alloced */
+ spinlock_t m_agirotor_lock;/* .. and lock protecting it */
/*
* Workqueue item so that we can coalesce multiple inode flush attempts
* into a single flush.
*/
struct work_struct m_flush_inodes_work;
- struct workqueue_struct *m_buf_workqueue;
- struct workqueue_struct *m_unwritten_workqueue;
- struct workqueue_struct *m_cil_workqueue;
- struct workqueue_struct *m_reclaim_workqueue;
- struct workqueue_struct *m_eofblocks_workqueue;
- struct workqueue_struct *m_sync_workqueue;
/*
* Generation of the filesysyem layout. This is incremented by each
@@ -190,9 +206,8 @@ typedef struct xfs_mount {
* to various other kinds of pain inflicted on the pNFS server.
*/
uint32_t m_generation;
+ struct mutex m_growlock; /* growfs mutex */
- bool m_always_cow;
- bool m_fail_unmount;
#ifdef DEBUG
/*
* Frequency with which errors are injected. Replaces xfs_etest; the
@@ -237,8 +252,8 @@ typedef struct xfs_mount {
#define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams
allocator */
#define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */
-
-#define XFS_MOUNT_DAX (1ULL << 62) /* TEST ONLY! */
+#define XFS_MOUNT_DAX_ALWAYS (1ULL << 26)
+#define XFS_MOUNT_DAX_NEVER (1ULL << 27)
/*
* Max and min values for mount-option defined I/O
@@ -259,8 +274,6 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname,
#define SHUTDOWN_LOG_IO_ERROR 0x0002 /* write attempt to the log failed */
#define SHUTDOWN_FORCE_UMOUNT 0x0004 /* shutdown from a forced unmount */
#define SHUTDOWN_CORRUPT_INCORE 0x0008 /* corrupt in-memory data structures */
-#define SHUTDOWN_REMOTE_REQ 0x0010 /* shutdown came from remote cell */
-#define SHUTDOWN_DEVICE_REQ 0x0020 /* failed all paths to the device */
/*
* Flags for xfs_mountfs
@@ -394,8 +407,6 @@ extern int xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount,
xfs_agnumber_t *maxagi);
extern void xfs_unmountfs(xfs_mount_t *);
-extern int xfs_mod_icount(struct xfs_mount *mp, int64_t delta);
-extern int xfs_mod_ifree(struct xfs_mount *mp, int64_t delta);
extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
bool reserved);
extern int xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index bb3008d390aa..b101feb2aab4 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -58,9 +58,8 @@ xfs_fs_get_uuid(
{
struct xfs_mount *mp = XFS_M(sb);
- printk_once(KERN_NOTICE
-"XFS (%s): using experimental pNFS feature, use at your own risk!\n",
- mp->m_super->s_id);
+ xfs_notice_once(mp,
+"Using experimental pNFS feature, use at your own risk!");
if (*len < sizeof(uuid_t))
return -EINVAL;
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index c225691fad15..d6cd83317344 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -558,7 +558,7 @@ xfs_qm_set_defquota(
return;
ddqp = &dqp->q_core;
- defq = xfs_get_defquota(dqp, qinf);
+ defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp));
/*
* Timers and warnings have been already set, let's just set the
@@ -577,19 +577,22 @@ xfs_qm_set_defquota(
static void
xfs_qm_init_timelimits(
struct xfs_mount *mp,
- struct xfs_quotainfo *qinf)
+ uint type)
{
+ struct xfs_quotainfo *qinf = mp->m_quotainfo;
+ struct xfs_def_quota *defq;
struct xfs_disk_dquot *ddqp;
struct xfs_dquot *dqp;
- uint type;
int error;
- qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
- qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
- qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
- qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
- qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
- qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
+ defq = xfs_get_defquota(qinf, type);
+
+ defq->btimelimit = XFS_QM_BTIMELIMIT;
+ defq->itimelimit = XFS_QM_ITIMELIMIT;
+ defq->rtbtimelimit = XFS_QM_RTBTIMELIMIT;
+ defq->bwarnlimit = XFS_QM_BWARNLIMIT;
+ defq->iwarnlimit = XFS_QM_IWARNLIMIT;
+ defq->rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
/*
* We try to get the limits from the superuser's limits fields.
@@ -597,39 +600,30 @@ xfs_qm_init_timelimits(
*
* Since we may not have done a quotacheck by this point, just read
* the dquot without attaching it to any hashtables or lists.
- *
- * Timers and warnings are globally set by the first timer found in
- * user/group/proj quota types, otherwise a default value is used.
- * This should be split into different fields per quota type.
*/
- if (XFS_IS_UQUOTA_RUNNING(mp))
- type = XFS_DQ_USER;
- else if (XFS_IS_GQUOTA_RUNNING(mp))
- type = XFS_DQ_GROUP;
- else
- type = XFS_DQ_PROJ;
error = xfs_qm_dqget_uncached(mp, 0, type, &dqp);
if (error)
return;
ddqp = &dqp->q_core;
+
/*
* The warnings and timers set the grace period given to
* a user or group before he or she can not perform any
* more writing. If it is zero, a default is used.
*/
if (ddqp->d_btimer)
- qinf->qi_btimelimit = be32_to_cpu(ddqp->d_btimer);
+ defq->btimelimit = be32_to_cpu(ddqp->d_btimer);
if (ddqp->d_itimer)
- qinf->qi_itimelimit = be32_to_cpu(ddqp->d_itimer);
+ defq->itimelimit = be32_to_cpu(ddqp->d_itimer);
if (ddqp->d_rtbtimer)
- qinf->qi_rtbtimelimit = be32_to_cpu(ddqp->d_rtbtimer);
+ defq->rtbtimelimit = be32_to_cpu(ddqp->d_rtbtimer);
if (ddqp->d_bwarns)
- qinf->qi_bwarnlimit = be16_to_cpu(ddqp->d_bwarns);
+ defq->bwarnlimit = be16_to_cpu(ddqp->d_bwarns);
if (ddqp->d_iwarns)
- qinf->qi_iwarnlimit = be16_to_cpu(ddqp->d_iwarns);
+ defq->iwarnlimit = be16_to_cpu(ddqp->d_iwarns);
if (ddqp->d_rtbwarns)
- qinf->qi_rtbwarnlimit = be16_to_cpu(ddqp->d_rtbwarns);
+ defq->rtbwarnlimit = be16_to_cpu(ddqp->d_rtbwarns);
xfs_qm_dqdestroy(dqp);
}
@@ -675,7 +669,9 @@ xfs_qm_init_quotainfo(
mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
- xfs_qm_init_timelimits(mp, qinf);
+ xfs_qm_init_timelimits(mp, XFS_DQ_USER);
+ xfs_qm_init_timelimits(mp, XFS_DQ_GROUP);
+ xfs_qm_init_timelimits(mp, XFS_DQ_PROJ);
if (XFS_IS_UQUOTA_RUNNING(mp))
xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
@@ -780,7 +776,8 @@ xfs_qm_qino_alloc(
}
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
- XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp);
+ need_alloc ? XFS_QM_QINOCREATE_SPACE_RES(mp) : 0,
+ 0, 0, &tp);
if (error)
return error;
@@ -1116,7 +1113,7 @@ xfs_qm_quotacheck_dqadjust(
*/
if (dqp->q_core.d_id) {
xfs_qm_adjust_dqlimits(mp, dqp);
- xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
+ xfs_qm_adjust_dqtimers(mp, dqp);
}
dqp->dq_flags |= XFS_DQ_DIRTY;
@@ -1730,8 +1727,7 @@ xfs_qm_vop_dqalloc(
pq = xfs_qm_dqhold(ip->i_pdquot);
}
}
- if (uq)
- trace_xfs_dquot_dqalloc(ip);
+ trace_xfs_dquot_dqalloc(ip);
xfs_iunlock(ip, lockflags);
if (O_udqpp)
@@ -1808,7 +1804,7 @@ xfs_qm_vop_chown_reserve(
{
struct xfs_mount *mp = ip->i_mount;
uint64_t delblks;
- unsigned int blkflags, prjflags = 0;
+ unsigned int blkflags;
struct xfs_dquot *udq_unres = NULL;
struct xfs_dquot *gdq_unres = NULL;
struct xfs_dquot *pdq_unres = NULL;
@@ -1849,7 +1845,6 @@ xfs_qm_vop_chown_reserve(
if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
ip->i_d.di_projid != be32_to_cpu(pdqp->q_core.d_id)) {
- prjflags = XFS_QMOPT_ENOSPC;
pdq_delblks = pdqp;
if (delblks) {
ASSERT(ip->i_pdquot);
@@ -1859,8 +1854,7 @@ xfs_qm_vop_chown_reserve(
error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
udq_delblks, gdq_delblks, pdq_delblks,
- ip->i_d.di_nblocks, 1,
- flags | blkflags | prjflags);
+ ip->i_d.di_nblocks, 1, flags | blkflags);
if (error)
return error;
@@ -1878,8 +1872,7 @@ xfs_qm_vop_chown_reserve(
ASSERT(udq_unres || gdq_unres || pdq_unres);
error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
udq_delblks, gdq_delblks, pdq_delblks,
- (xfs_qcnt_t)delblks, 0,
- flags | blkflags | prjflags);
+ (xfs_qcnt_t)delblks, 0, flags | blkflags);
if (error)
return error;
xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
@@ -1932,7 +1925,6 @@ xfs_qm_vop_create_dqattach(
return;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
if (udqp && XFS_IS_UQUOTA_ON(mp)) {
ASSERT(ip->i_udquot == NULL);
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 4e57edca8bce..7b0e771fcbce 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -41,13 +41,20 @@ extern struct kmem_zone *xfs_qm_dqtrxzone;
*/
#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1
+/* Defaults for each quota type: time limits, warn limits, usage limits */
struct xfs_def_quota {
- xfs_qcnt_t bhardlimit; /* default data blk hard limit */
- xfs_qcnt_t bsoftlimit; /* default data blk soft limit */
- xfs_qcnt_t ihardlimit; /* default inode count hard limit */
- xfs_qcnt_t isoftlimit; /* default inode count soft limit */
- xfs_qcnt_t rtbhardlimit; /* default realtime blk hard limit */
- xfs_qcnt_t rtbsoftlimit; /* default realtime blk soft limit */
+ time64_t btimelimit; /* limit for blks timer */
+ time64_t itimelimit; /* limit for inodes timer */
+ time64_t rtbtimelimit; /* limit for rt blks timer */
+ xfs_qwarncnt_t bwarnlimit; /* limit for blks warnings */
+ xfs_qwarncnt_t iwarnlimit; /* limit for inodes warnings */
+ xfs_qwarncnt_t rtbwarnlimit; /* limit for rt blks warnings */
+ xfs_qcnt_t bhardlimit; /* default data blk hard limit */
+ xfs_qcnt_t bsoftlimit; /* default data blk soft limit */
+ xfs_qcnt_t ihardlimit; /* default inode count hard limit */
+ xfs_qcnt_t isoftlimit; /* default inode count soft limit */
+ xfs_qcnt_t rtbhardlimit; /* default realtime blk hard limit */
+ xfs_qcnt_t rtbsoftlimit; /* default realtime blk soft limit */
};
/*
@@ -55,28 +62,22 @@ struct xfs_def_quota {
* The mount structure keeps a pointer to this.
*/
struct xfs_quotainfo {
- struct radix_tree_root qi_uquota_tree;
- struct radix_tree_root qi_gquota_tree;
- struct radix_tree_root qi_pquota_tree;
- struct mutex qi_tree_lock;
+ struct radix_tree_root qi_uquota_tree;
+ struct radix_tree_root qi_gquota_tree;
+ struct radix_tree_root qi_pquota_tree;
+ struct mutex qi_tree_lock;
struct xfs_inode *qi_uquotaip; /* user quota inode */
struct xfs_inode *qi_gquotaip; /* group quota inode */
struct xfs_inode *qi_pquotaip; /* project quota inode */
- struct list_lru qi_lru;
- int qi_dquots;
- time64_t qi_btimelimit; /* limit for blks timer */
- time64_t qi_itimelimit; /* limit for inodes timer */
- time64_t qi_rtbtimelimit;/* limit for rt blks timer */
- xfs_qwarncnt_t qi_bwarnlimit; /* limit for blks warnings */
- xfs_qwarncnt_t qi_iwarnlimit; /* limit for inodes warnings */
- xfs_qwarncnt_t qi_rtbwarnlimit;/* limit for rt blks warnings */
- struct mutex qi_quotaofflock;/* to serialize quotaoff */
- xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */
- uint qi_dqperchunk; /* # ondisk dqs in above chunk */
+ struct list_lru qi_lru;
+ int qi_dquots;
+ struct mutex qi_quotaofflock;/* to serialize quotaoff */
+ xfs_filblks_t qi_dqchunklen; /* # BBs in a chunk of dqs */
+ uint qi_dqperchunk; /* # ondisk dq in above chunk */
struct xfs_def_quota qi_usr_default;
struct xfs_def_quota qi_grp_default;
struct xfs_def_quota qi_prj_default;
- struct shrinker qi_shrinker;
+ struct shrinker qi_shrinker;
};
static inline struct radix_tree_root *
@@ -113,6 +114,17 @@ xfs_quota_inode(xfs_mount_t *mp, uint dq_flags)
return NULL;
}
+static inline int
+xfs_dquot_type(struct xfs_dquot *dqp)
+{
+ if (XFS_QM_ISUDQ(dqp))
+ return XFS_DQ_USER;
+ if (XFS_QM_ISGDQ(dqp))
+ return XFS_DQ_GROUP;
+ ASSERT(XFS_QM_ISPDQ(dqp));
+ return XFS_DQ_PROJ;
+}
+
extern void xfs_trans_mod_dquot(struct xfs_trans *tp, struct xfs_dquot *dqp,
uint field, int64_t delta);
extern void xfs_trans_dqjoin(struct xfs_trans *, struct xfs_dquot *);
@@ -164,19 +176,19 @@ extern int xfs_qm_scall_quotaon(struct xfs_mount *, uint);
extern int xfs_qm_scall_quotaoff(struct xfs_mount *, uint);
static inline struct xfs_def_quota *
-xfs_get_defquota(struct xfs_dquot *dqp, struct xfs_quotainfo *qi)
+xfs_get_defquota(struct xfs_quotainfo *qi, int type)
{
- struct xfs_def_quota *defq;
-
- if (XFS_QM_ISUDQ(dqp))
- defq = &qi->qi_usr_default;
- else if (XFS_QM_ISGDQ(dqp))
- defq = &qi->qi_grp_default;
- else {
- ASSERT(XFS_QM_ISPDQ(dqp));
- defq = &qi->qi_prj_default;
+ switch (type) {
+ case XFS_DQ_USER:
+ return &qi->qi_usr_default;
+ case XFS_DQ_GROUP:
+ return &qi->qi_grp_default;
+ case XFS_DQ_PROJ:
+ return &qi->qi_prj_default;
+ default:
+ ASSERT(0);
+ return NULL;
}
- return defq;
}
#endif /* __XFS_QM_H__ */
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 5d5ac65aa1cc..7effd7a28136 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -302,7 +302,7 @@ xfs_qm_scall_trunc_qfile(
goto out_unlock;
}
- ASSERT(ip->i_d.di_nextents == 0);
+ ASSERT(ip->i_df.if_nextents == 0);
xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
error = xfs_trans_commit(tp);
@@ -357,11 +357,11 @@ xfs_qm_scall_quotaon(
int error;
uint qf;
- flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
/*
- * Switching on quota accounting must be done at mount time.
+ * Switching on quota accounting must be done at mount time,
+ * only consider quota enforcement stuff here.
*/
- flags &= ~(XFS_ALL_QUOTA_ACCT);
+ flags &= XFS_ALL_QUOTA_ENFD;
if (flags == 0) {
xfs_debug(mp, "%s: zero flags, m_qflags=%x",
@@ -479,7 +479,7 @@ xfs_qm_scall_setqlim(
goto out_unlock;
}
- defq = xfs_get_defquota(dqp, q);
+ defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
xfs_dqunlock(dqp);
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_setqlim, 0, 0, 0, &tp);
@@ -555,32 +555,40 @@ xfs_qm_scall_setqlim(
ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
if (id == 0) {
- /*
- * Timelimits for the super user set the relative time
- * the other users can be over quota for this file system.
- * If it is zero a default is used. Ditto for the default
- * soft and hard limit values (already done, above), and
- * for warnings.
- */
- if (newlim->d_fieldmask & QC_SPC_TIMER) {
- q->qi_btimelimit = newlim->d_spc_timer;
- ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
- }
- if (newlim->d_fieldmask & QC_INO_TIMER) {
- q->qi_itimelimit = newlim->d_ino_timer;
- ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
- }
- if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
- q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
- ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
- }
if (newlim->d_fieldmask & QC_SPC_WARNS)
- q->qi_bwarnlimit = newlim->d_spc_warns;
+ defq->bwarnlimit = newlim->d_spc_warns;
if (newlim->d_fieldmask & QC_INO_WARNS)
- q->qi_iwarnlimit = newlim->d_ino_warns;
+ defq->iwarnlimit = newlim->d_ino_warns;
if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
- q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
- } else {
+ defq->rtbwarnlimit = newlim->d_rt_spc_warns;
+ }
+
+ /*
+ * Timelimits for the super user set the relative time the other users
+ * can be over quota for this file system. If it is zero a default is
+ * used. Ditto for the default soft and hard limit values (already
+ * done, above), and for warnings.
+ *
+ * For other IDs, userspace can bump out the grace period if over
+ * the soft limit.
+ */
+ if (newlim->d_fieldmask & QC_SPC_TIMER)
+ ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
+ if (newlim->d_fieldmask & QC_INO_TIMER)
+ ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
+ if (newlim->d_fieldmask & QC_RT_SPC_TIMER)
+ ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
+
+ if (id == 0) {
+ if (newlim->d_fieldmask & QC_SPC_TIMER)
+ defq->btimelimit = newlim->d_spc_timer;
+ if (newlim->d_fieldmask & QC_INO_TIMER)
+ defq->itimelimit = newlim->d_ino_timer;
+ if (newlim->d_fieldmask & QC_RT_SPC_TIMER)
+ defq->rtbtimelimit = newlim->d_rt_spc_timer;
+ }
+
+ if (id != 0) {
/*
* If the user is now over quota, start the timelimit.
* The user will not be 'warned'.
@@ -588,7 +596,7 @@ xfs_qm_scall_setqlim(
* is on or off. We don't really want to bother with iterating
* over all ondisk dquots and turning the timers on/off.
*/
- xfs_qm_adjust_dqtimers(mp, ddq);
+ xfs_qm_adjust_dqtimers(mp, dqp);
}
dqp->dq_flags |= XFS_DQ_DIRTY;
xfs_trans_log_dquot(tp, dqp);
@@ -729,9 +737,10 @@ xfs_qm_scall_getquota_next(
STATIC int
xfs_dqrele_inode(
struct xfs_inode *ip,
- int flags,
void *args)
{
+ uint *flags = args;
+
/* skip quota inodes */
if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
@@ -743,15 +752,15 @@ xfs_dqrele_inode(
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
- if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
+ if ((*flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
xfs_qm_dqrele(ip->i_udquot);
ip->i_udquot = NULL;
}
- if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
+ if ((*flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
- if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
+ if ((*flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
xfs_qm_dqrele(ip->i_pdquot);
ip->i_pdquot = NULL;
}
@@ -768,10 +777,10 @@ xfs_dqrele_inode(
*/
void
xfs_qm_dqrele_all_inodes(
- struct xfs_mount *mp,
- uint flags)
+ struct xfs_mount *mp,
+ uint flags)
{
ASSERT(mp->m_quotainfo);
- xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
- XFS_AGITER_INEW_WAIT);
+ xfs_inode_walk(mp, XFS_INODE_WALK_INEW_WAIT, xfs_dqrele_inode,
+ &flags, XFS_ICI_NO_TAG);
}
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 38669e827206..bf809b77a316 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -21,10 +21,10 @@ xfs_qm_fill_state(
struct qc_type_state *tstate,
struct xfs_mount *mp,
struct xfs_inode *ip,
- xfs_ino_t ino)
+ xfs_ino_t ino,
+ struct xfs_def_quota *defq)
{
- struct xfs_quotainfo *q = mp->m_quotainfo;
- bool tempqip = false;
+ bool tempqip = false;
tstate->ino = ino;
if (!ip && ino == NULLFSINO)
@@ -36,13 +36,13 @@ xfs_qm_fill_state(
}
tstate->flags |= QCI_SYSFILE;
tstate->blocks = ip->i_d.di_nblocks;
- tstate->nextents = ip->i_d.di_nextents;
- tstate->spc_timelimit = (u32)q->qi_btimelimit;
- tstate->ino_timelimit = (u32)q->qi_itimelimit;
- tstate->rt_spc_timelimit = (u32)q->qi_rtbtimelimit;
- tstate->spc_warnlimit = q->qi_bwarnlimit;
- tstate->ino_warnlimit = q->qi_iwarnlimit;
- tstate->rt_spc_warnlimit = q->qi_rtbwarnlimit;
+ tstate->nextents = ip->i_df.if_nextents;
+ tstate->spc_timelimit = (u32)defq->btimelimit;
+ tstate->ino_timelimit = (u32)defq->itimelimit;
+ tstate->rt_spc_timelimit = (u32)defq->rtbtimelimit;
+ tstate->spc_warnlimit = defq->bwarnlimit;
+ tstate->ino_warnlimit = defq->iwarnlimit;
+ tstate->rt_spc_warnlimit = defq->rtbwarnlimit;
if (tempqip)
xfs_irele(ip);
}
@@ -77,11 +77,11 @@ xfs_fs_get_quota_state(
state->s_state[PRJQUOTA].flags |= QCI_LIMITS_ENFORCED;
xfs_qm_fill_state(&state->s_state[USRQUOTA], mp, q->qi_uquotaip,
- mp->m_sb.sb_uquotino);
+ mp->m_sb.sb_uquotino, &q->qi_usr_default);
xfs_qm_fill_state(&state->s_state[GRPQUOTA], mp, q->qi_gquotaip,
- mp->m_sb.sb_gquotino);
+ mp->m_sb.sb_gquotino, &q->qi_grp_default);
xfs_qm_fill_state(&state->s_state[PRJQUOTA], mp, q->qi_pquotaip,
- mp->m_sb.sb_pquotino);
+ mp->m_sb.sb_pquotino, &q->qi_prj_default);
return 0;
}
@@ -109,8 +109,8 @@ xfs_fs_set_info(
int type,
struct qc_info *info)
{
- struct xfs_mount *mp = XFS_M(sb);
- struct qc_dqblk newlim;
+ struct xfs_mount *mp = XFS_M(sb);
+ struct qc_dqblk newlim;
if (sb_rdonly(sb))
return -EROFS;
diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
index 8eeed73928cd..c81639891e29 100644
--- a/fs/xfs/xfs_refcount_item.c
+++ b/fs/xfs/xfs_refcount_item.c
@@ -18,16 +18,20 @@
#include "xfs_log.h"
#include "xfs_refcount.h"
#include "xfs_error.h"
+#include "xfs_log_priv.h"
+#include "xfs_log_recover.h"
kmem_zone_t *xfs_cui_zone;
kmem_zone_t *xfs_cud_zone;
+static const struct xfs_item_ops xfs_cui_item_ops;
+
static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_cui_log_item, cui_item);
}
-void
+STATIC void
xfs_cui_item_free(
struct xfs_cui_log_item *cuip)
{
@@ -44,13 +48,13 @@ xfs_cui_item_free(
* committed vs unpin operations in bulk insert operations. Hence the reference
* count to ensure only the last caller frees the CUI.
*/
-void
+STATIC void
xfs_cui_release(
struct xfs_cui_log_item *cuip)
{
ASSERT(atomic_read(&cuip->cui_refcount) > 0);
if (atomic_dec_and_test(&cuip->cui_refcount)) {
- xfs_trans_ail_remove(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
+ xfs_trans_ail_delete(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
xfs_cui_item_free(cuip);
}
}
@@ -123,17 +127,10 @@ xfs_cui_item_release(
xfs_cui_release(CUI_ITEM(lip));
}
-static const struct xfs_item_ops xfs_cui_item_ops = {
- .iop_size = xfs_cui_item_size,
- .iop_format = xfs_cui_item_format,
- .iop_unpin = xfs_cui_item_unpin,
- .iop_release = xfs_cui_item_release,
-};
-
/*
* Allocate and initialize an cui item with the given number of extents.
*/
-struct xfs_cui_log_item *
+STATIC struct xfs_cui_log_item *
xfs_cui_init(
struct xfs_mount *mp,
uint nextents)
@@ -284,27 +281,6 @@ xfs_refcount_update_diff_items(
XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
}
-/* Get an CUI. */
-STATIC void *
-xfs_refcount_update_create_intent(
- struct xfs_trans *tp,
- unsigned int count)
-{
- struct xfs_cui_log_item *cuip;
-
- ASSERT(tp != NULL);
- ASSERT(count > 0);
-
- cuip = xfs_cui_init(tp->t_mountp, count);
- ASSERT(cuip != NULL);
-
- /*
- * Get a log_item_desc to point at the new item.
- */
- xfs_trans_add_item(tp, &cuip->cui_item);
- return cuip;
-}
-
/* Set the phys extent flags for this reverse mapping. */
static void
xfs_trans_set_refcount_flags(
@@ -328,16 +304,12 @@ xfs_trans_set_refcount_flags(
STATIC void
xfs_refcount_update_log_item(
struct xfs_trans *tp,
- void *intent,
- struct list_head *item)
+ struct xfs_cui_log_item *cuip,
+ struct xfs_refcount_intent *refc)
{
- struct xfs_cui_log_item *cuip = intent;
- struct xfs_refcount_intent *refc;
uint next_extent;
struct xfs_phys_extent *ext;
- refc = container_of(item, struct xfs_refcount_intent, ri_list);
-
tp->t_flags |= XFS_TRANS_DIRTY;
set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
@@ -354,23 +326,44 @@ xfs_refcount_update_log_item(
xfs_trans_set_refcount_flags(ext, refc->ri_type);
}
+static struct xfs_log_item *
+xfs_refcount_update_create_intent(
+ struct xfs_trans *tp,
+ struct list_head *items,
+ unsigned int count,
+ bool sort)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_cui_log_item *cuip = xfs_cui_init(mp, count);
+ struct xfs_refcount_intent *refc;
+
+ ASSERT(count > 0);
+
+ xfs_trans_add_item(tp, &cuip->cui_item);
+ if (sort)
+ list_sort(mp, items, xfs_refcount_update_diff_items);
+ list_for_each_entry(refc, items, ri_list)
+ xfs_refcount_update_log_item(tp, cuip, refc);
+ return &cuip->cui_item;
+}
+
/* Get an CUD so we can process all the deferred refcount updates. */
-STATIC void *
+static struct xfs_log_item *
xfs_refcount_update_create_done(
struct xfs_trans *tp,
- void *intent,
+ struct xfs_log_item *intent,
unsigned int count)
{
- return xfs_trans_get_cud(tp, intent);
+ return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item;
}
/* Process a deferred refcount update. */
STATIC int
xfs_refcount_update_finish_item(
struct xfs_trans *tp,
+ struct xfs_log_item *done,
struct list_head *item,
- void *done_item,
- void **state)
+ struct xfs_btree_cur **state)
{
struct xfs_refcount_intent *refc;
xfs_fsblock_t new_fsb;
@@ -378,12 +371,10 @@ xfs_refcount_update_finish_item(
int error;
refc = container_of(item, struct xfs_refcount_intent, ri_list);
- error = xfs_trans_log_finish_refcount_update(tp, done_item,
- refc->ri_type,
- refc->ri_startblock,
- refc->ri_blockcount,
- &new_fsb, &new_aglen,
- (struct xfs_btree_cur **)state);
+ error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done),
+ refc->ri_type, refc->ri_startblock, refc->ri_blockcount,
+ &new_fsb, &new_aglen, state);
+
/* Did we run out of reservation? Requeue what we didn't finish. */
if (!error && new_aglen > 0) {
ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE ||
@@ -396,24 +387,12 @@ xfs_refcount_update_finish_item(
return error;
}
-/* Clean up after processing deferred refcounts. */
-STATIC void
-xfs_refcount_update_finish_cleanup(
- struct xfs_trans *tp,
- void *state,
- int error)
-{
- struct xfs_btree_cur *rcur = state;
-
- xfs_refcount_finish_one_cleanup(tp, rcur, error);
-}
-
/* Abort all pending CUIs. */
STATIC void
xfs_refcount_update_abort_intent(
- void *intent)
+ struct xfs_log_item *intent)
{
- xfs_cui_release(intent);
+ xfs_cui_release(CUI_ITEM(intent));
}
/* Cancel a deferred refcount update. */
@@ -429,13 +408,11 @@ xfs_refcount_update_cancel_item(
const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
.max_items = XFS_CUI_MAX_FAST_EXTENTS,
- .diff_items = xfs_refcount_update_diff_items,
.create_intent = xfs_refcount_update_create_intent,
.abort_intent = xfs_refcount_update_abort_intent,
- .log_item = xfs_refcount_update_log_item,
.create_done = xfs_refcount_update_create_done,
.finish_item = xfs_refcount_update_finish_item,
- .finish_cleanup = xfs_refcount_update_finish_cleanup,
+ .finish_cleanup = xfs_refcount_finish_one_cleanup,
.cancel_item = xfs_refcount_update_cancel_item,
};
@@ -443,28 +420,27 @@ const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
* Process a refcount update intent item that was recovered from the log.
* We need to update the refcountbt.
*/
-int
-xfs_cui_recover(
- struct xfs_trans *parent_tp,
- struct xfs_cui_log_item *cuip)
+STATIC int
+xfs_cui_item_recover(
+ struct xfs_log_item *lip,
+ struct xfs_trans *parent_tp)
{
- int i;
- int error = 0;
- unsigned int refc_type;
+ struct xfs_bmbt_irec irec;
+ struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
struct xfs_phys_extent *refc;
- xfs_fsblock_t startblock_fsb;
- bool op_ok;
struct xfs_cud_log_item *cudp;
struct xfs_trans *tp;
struct xfs_btree_cur *rcur = NULL;
- enum xfs_refcount_intent_type type;
+ struct xfs_mount *mp = parent_tp->t_mountp;
+ xfs_fsblock_t startblock_fsb;
xfs_fsblock_t new_fsb;
xfs_extlen_t new_len;
- struct xfs_bmbt_irec irec;
+ unsigned int refc_type;
+ bool op_ok;
bool requeue_only = false;
- struct xfs_mount *mp = parent_tp->t_mountp;
-
- ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags));
+ enum xfs_refcount_intent_type type;
+ int i;
+ int error = 0;
/*
* First check the validity of the extents described by the
@@ -495,7 +471,6 @@ xfs_cui_recover(
* This will pull the CUI from the AIL and
* free the memory associated with it.
*/
- set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
xfs_cui_release(cuip);
return -EFSCORRUPTED;
}
@@ -579,7 +554,6 @@ xfs_cui_recover(
}
xfs_refcount_finish_one_cleanup(tp, rcur, error);
- set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
xfs_defer_move(parent_tp, tp);
error = xfs_trans_commit(tp);
return error;
@@ -590,3 +564,117 @@ abort_error:
xfs_trans_cancel(tp);
return error;
}
+
+STATIC bool
+xfs_cui_item_match(
+ struct xfs_log_item *lip,
+ uint64_t intent_id)
+{
+ return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
+}
+
+static const struct xfs_item_ops xfs_cui_item_ops = {
+ .iop_size = xfs_cui_item_size,
+ .iop_format = xfs_cui_item_format,
+ .iop_unpin = xfs_cui_item_unpin,
+ .iop_release = xfs_cui_item_release,
+ .iop_recover = xfs_cui_item_recover,
+ .iop_match = xfs_cui_item_match,
+};
+
+/*
+ * Copy an CUI format buffer from the given buf, and into the destination
+ * CUI format structure. The CUI/CUD items were designed not to need any
+ * special alignment handling.
+ */
+static int
+xfs_cui_copy_format(
+ struct xfs_log_iovec *buf,
+ struct xfs_cui_log_format *dst_cui_fmt)
+{
+ struct xfs_cui_log_format *src_cui_fmt;
+ uint len;
+
+ src_cui_fmt = buf->i_addr;
+ len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
+
+ if (buf->i_len == len) {
+ memcpy(dst_cui_fmt, src_cui_fmt, len);
+ return 0;
+ }
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
+ return -EFSCORRUPTED;
+}
+
+/*
+ * This routine is called to create an in-core extent refcount update
+ * item from the cui format structure which was logged on disk.
+ * It allocates an in-core cui, copies the extents from the format
+ * structure into it, and adds the cui to the AIL with the given
+ * LSN.
+ */
+STATIC int
+xlog_recover_cui_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ int error;
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_cui_log_item *cuip;
+ struct xfs_cui_log_format *cui_formatp;
+
+ cui_formatp = item->ri_buf[0].i_addr;
+
+ cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
+ error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
+ if (error) {
+ xfs_cui_item_free(cuip);
+ return error;
+ }
+ atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
+ /*
+ * Insert the intent into the AIL directly and drop one reference so
+ * that finishing or canceling the work will drop the other.
+ */
+ xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
+ xfs_cui_release(cuip);
+ return 0;
+}
+
+const struct xlog_recover_item_ops xlog_cui_item_ops = {
+ .item_type = XFS_LI_CUI,
+ .commit_pass2 = xlog_recover_cui_commit_pass2,
+};
+
+/*
+ * This routine is called when an CUD format structure is found in a committed
+ * transaction in the log. Its purpose is to cancel the corresponding CUI if it
+ * was still in the log. To do this it searches the AIL for the CUI with an id
+ * equal to that in the CUD format structure. If we find it we drop the CUD
+ * reference, which removes the CUI from the AIL and frees it.
+ */
+STATIC int
+xlog_recover_cud_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ struct xfs_cud_log_format *cud_formatp;
+
+ cud_formatp = item->ri_buf[0].i_addr;
+ if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
+ return -EFSCORRUPTED;
+ }
+
+ xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
+ return 0;
+}
+
+const struct xlog_recover_item_ops xlog_cud_item_ops = {
+ .item_type = XFS_LI_CUD,
+ .commit_pass2 = xlog_recover_cud_commit_pass2,
+};
diff --git a/fs/xfs/xfs_refcount_item.h b/fs/xfs/xfs_refcount_item.h
index e47530f30489..f4f2e836540b 100644
--- a/fs/xfs/xfs_refcount_item.h
+++ b/fs/xfs/xfs_refcount_item.h
@@ -33,11 +33,6 @@ struct kmem_zone;
#define XFS_CUI_MAX_FAST_EXTENTS 16
/*
- * Define CUI flag bits. Manipulated by set/clear/test_bit operators.
- */
-#define XFS_CUI_RECOVERED 1
-
-/*
* This is the "refcount update intent" log item. It is used to log
* the fact that some reverse mappings need to change. It is used in
* conjunction with the "refcount update done" log item described
@@ -51,7 +46,6 @@ struct xfs_cui_log_item {
struct xfs_log_item cui_item;
atomic_t cui_refcount;
atomic_t cui_next_extent;
- unsigned long cui_flags; /* misc flags */
struct xfs_cui_log_format cui_format;
};
@@ -77,9 +71,4 @@ struct xfs_cud_log_item {
extern struct kmem_zone *xfs_cui_zone;
extern struct kmem_zone *xfs_cud_zone;
-struct xfs_cui_log_item *xfs_cui_init(struct xfs_mount *, uint);
-void xfs_cui_item_free(struct xfs_cui_log_item *);
-void xfs_cui_release(struct xfs_cui_log_item *);
-int xfs_cui_recover(struct xfs_trans *parent_tp, struct xfs_cui_log_item *cuip);
-
#endif /* __XFS_REFCOUNT_ITEM_H__ */
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
index 4911b68f95dd..a86599db20a6 100644
--- a/fs/xfs/xfs_rmap_item.c
+++ b/fs/xfs/xfs_rmap_item.c
@@ -18,16 +18,20 @@
#include "xfs_log.h"
#include "xfs_rmap.h"
#include "xfs_error.h"
+#include "xfs_log_priv.h"
+#include "xfs_log_recover.h"
kmem_zone_t *xfs_rui_zone;
kmem_zone_t *xfs_rud_zone;
+static const struct xfs_item_ops xfs_rui_item_ops;
+
static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
{
return container_of(lip, struct xfs_rui_log_item, rui_item);
}
-void
+STATIC void
xfs_rui_item_free(
struct xfs_rui_log_item *ruip)
{
@@ -44,13 +48,13 @@ xfs_rui_item_free(
* committed vs unpin operations in bulk insert operations. Hence the reference
* count to ensure only the last caller frees the RUI.
*/
-void
+STATIC void
xfs_rui_release(
struct xfs_rui_log_item *ruip)
{
ASSERT(atomic_read(&ruip->rui_refcount) > 0);
if (atomic_dec_and_test(&ruip->rui_refcount)) {
- xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
+ xfs_trans_ail_delete(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
xfs_rui_item_free(ruip);
}
}
@@ -122,17 +126,10 @@ xfs_rui_item_release(
xfs_rui_release(RUI_ITEM(lip));
}
-static const struct xfs_item_ops xfs_rui_item_ops = {
- .iop_size = xfs_rui_item_size,
- .iop_format = xfs_rui_item_format,
- .iop_unpin = xfs_rui_item_unpin,
- .iop_release = xfs_rui_item_release,
-};
-
/*
* Allocate and initialize an rui item with the given number of extents.
*/
-struct xfs_rui_log_item *
+STATIC struct xfs_rui_log_item *
xfs_rui_init(
struct xfs_mount *mp,
uint nextents)
@@ -160,7 +157,7 @@ xfs_rui_init(
* RUI format structure. The RUI/RUD items were designed not to need any
* special alignment handling.
*/
-int
+STATIC int
xfs_rui_copy_format(
struct xfs_log_iovec *buf,
struct xfs_rui_log_format *dst_rui_fmt)
@@ -352,41 +349,16 @@ xfs_rmap_update_diff_items(
XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
}
-/* Get an RUI. */
-STATIC void *
-xfs_rmap_update_create_intent(
- struct xfs_trans *tp,
- unsigned int count)
-{
- struct xfs_rui_log_item *ruip;
-
- ASSERT(tp != NULL);
- ASSERT(count > 0);
-
- ruip = xfs_rui_init(tp->t_mountp, count);
- ASSERT(ruip != NULL);
-
- /*
- * Get a log_item_desc to point at the new item.
- */
- xfs_trans_add_item(tp, &ruip->rui_item);
- return ruip;
-}
-
/* Log rmap updates in the intent item. */
STATIC void
xfs_rmap_update_log_item(
struct xfs_trans *tp,
- void *intent,
- struct list_head *item)
+ struct xfs_rui_log_item *ruip,
+ struct xfs_rmap_intent *rmap)
{
- struct xfs_rui_log_item *ruip = intent;
- struct xfs_rmap_intent *rmap;
uint next_extent;
struct xfs_map_extent *map;
- rmap = container_of(item, struct xfs_rmap_intent, ri_list);
-
tp->t_flags |= XFS_TRANS_DIRTY;
set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
@@ -406,58 +378,64 @@ xfs_rmap_update_log_item(
rmap->ri_bmap.br_state);
}
+static struct xfs_log_item *
+xfs_rmap_update_create_intent(
+ struct xfs_trans *tp,
+ struct list_head *items,
+ unsigned int count,
+ bool sort)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_rui_log_item *ruip = xfs_rui_init(mp, count);
+ struct xfs_rmap_intent *rmap;
+
+ ASSERT(count > 0);
+
+ xfs_trans_add_item(tp, &ruip->rui_item);
+ if (sort)
+ list_sort(mp, items, xfs_rmap_update_diff_items);
+ list_for_each_entry(rmap, items, ri_list)
+ xfs_rmap_update_log_item(tp, ruip, rmap);
+ return &ruip->rui_item;
+}
+
/* Get an RUD so we can process all the deferred rmap updates. */
-STATIC void *
+static struct xfs_log_item *
xfs_rmap_update_create_done(
struct xfs_trans *tp,
- void *intent,
+ struct xfs_log_item *intent,
unsigned int count)
{
- return xfs_trans_get_rud(tp, intent);
+ return &xfs_trans_get_rud(tp, RUI_ITEM(intent))->rud_item;
}
/* Process a deferred rmap update. */
STATIC int
xfs_rmap_update_finish_item(
struct xfs_trans *tp,
+ struct xfs_log_item *done,
struct list_head *item,
- void *done_item,
- void **state)
+ struct xfs_btree_cur **state)
{
struct xfs_rmap_intent *rmap;
int error;
rmap = container_of(item, struct xfs_rmap_intent, ri_list);
- error = xfs_trans_log_finish_rmap_update(tp, done_item,
- rmap->ri_type,
- rmap->ri_owner, rmap->ri_whichfork,
- rmap->ri_bmap.br_startoff,
- rmap->ri_bmap.br_startblock,
- rmap->ri_bmap.br_blockcount,
- rmap->ri_bmap.br_state,
- (struct xfs_btree_cur **)state);
+ error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done),
+ rmap->ri_type, rmap->ri_owner, rmap->ri_whichfork,
+ rmap->ri_bmap.br_startoff, rmap->ri_bmap.br_startblock,
+ rmap->ri_bmap.br_blockcount, rmap->ri_bmap.br_state,
+ state);
kmem_free(rmap);
return error;
}
-/* Clean up after processing deferred rmaps. */
-STATIC void
-xfs_rmap_update_finish_cleanup(
- struct xfs_trans *tp,
- void *state,
- int error)
-{
- struct xfs_btree_cur *rcur = state;
-
- xfs_rmap_finish_one_cleanup(tp, rcur, error);
-}
-
/* Abort all pending RUIs. */
STATIC void
xfs_rmap_update_abort_intent(
- void *intent)
+ struct xfs_log_item *intent)
{
- xfs_rui_release(intent);
+ xfs_rui_release(RUI_ITEM(intent));
}
/* Cancel a deferred rmap update. */
@@ -473,13 +451,11 @@ xfs_rmap_update_cancel_item(
const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
.max_items = XFS_RUI_MAX_FAST_EXTENTS,
- .diff_items = xfs_rmap_update_diff_items,
.create_intent = xfs_rmap_update_create_intent,
.abort_intent = xfs_rmap_update_abort_intent,
- .log_item = xfs_rmap_update_log_item,
.create_done = xfs_rmap_update_create_done,
.finish_item = xfs_rmap_update_finish_item,
- .finish_cleanup = xfs_rmap_update_finish_cleanup,
+ .finish_cleanup = xfs_rmap_finish_one_cleanup,
.cancel_item = xfs_rmap_update_cancel_item,
};
@@ -487,24 +463,24 @@ const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
* Process an rmap update intent item that was recovered from the log.
* We need to update the rmapbt.
*/
-int
-xfs_rui_recover(
- struct xfs_mount *mp,
- struct xfs_rui_log_item *ruip)
+STATIC int
+xfs_rui_item_recover(
+ struct xfs_log_item *lip,
+ struct xfs_trans *parent_tp)
{
- int i;
- int error = 0;
+ struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
struct xfs_map_extent *rmap;
- xfs_fsblock_t startblock_fsb;
- bool op_ok;
struct xfs_rud_log_item *rudp;
- enum xfs_rmap_intent_type type;
- int whichfork;
- xfs_exntst_t state;
struct xfs_trans *tp;
struct xfs_btree_cur *rcur = NULL;
-
- ASSERT(!test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags));
+ struct xfs_mount *mp = parent_tp->t_mountp;
+ xfs_fsblock_t startblock_fsb;
+ enum xfs_rmap_intent_type type;
+ xfs_exntst_t state;
+ bool op_ok;
+ int i;
+ int whichfork;
+ int error = 0;
/*
* First check the validity of the extents described by the
@@ -539,7 +515,6 @@ xfs_rui_recover(
* This will pull the RUI from the AIL and
* free the memory associated with it.
*/
- set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
xfs_rui_release(ruip);
return -EFSCORRUPTED;
}
@@ -597,7 +572,6 @@ xfs_rui_recover(
}
xfs_rmap_finish_one_cleanup(tp, rcur, error);
- set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
error = xfs_trans_commit(tp);
return error;
@@ -606,3 +580,90 @@ abort_error:
xfs_trans_cancel(tp);
return error;
}
+
+STATIC bool
+xfs_rui_item_match(
+ struct xfs_log_item *lip,
+ uint64_t intent_id)
+{
+ return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
+}
+
+static const struct xfs_item_ops xfs_rui_item_ops = {
+ .iop_size = xfs_rui_item_size,
+ .iop_format = xfs_rui_item_format,
+ .iop_unpin = xfs_rui_item_unpin,
+ .iop_release = xfs_rui_item_release,
+ .iop_recover = xfs_rui_item_recover,
+ .iop_match = xfs_rui_item_match,
+};
+
+/*
+ * This routine is called to create an in-core extent rmap update
+ * item from the rui format structure which was logged on disk.
+ * It allocates an in-core rui, copies the extents from the format
+ * structure into it, and adds the rui to the AIL with the given
+ * LSN.
+ */
+STATIC int
+xlog_recover_rui_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ int error;
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_rui_log_item *ruip;
+ struct xfs_rui_log_format *rui_formatp;
+
+ rui_formatp = item->ri_buf[0].i_addr;
+
+ ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
+ error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
+ if (error) {
+ xfs_rui_item_free(ruip);
+ return error;
+ }
+ atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
+ /*
+ * Insert the intent into the AIL directly and drop one reference so
+ * that finishing or canceling the work will drop the other.
+ */
+ xfs_trans_ail_insert(log->l_ailp, &ruip->rui_item, lsn);
+ xfs_rui_release(ruip);
+ return 0;
+}
+
+const struct xlog_recover_item_ops xlog_rui_item_ops = {
+ .item_type = XFS_LI_RUI,
+ .commit_pass2 = xlog_recover_rui_commit_pass2,
+};
+
+/*
+ * This routine is called when an RUD format structure is found in a committed
+ * transaction in the log. Its purpose is to cancel the corresponding RUI if it
+ * was still in the log. To do this it searches the AIL for the RUI with an id
+ * equal to that in the RUD format structure. If we find it we drop the RUD
+ * reference, which removes the RUI from the AIL and frees it.
+ */
+STATIC int
+xlog_recover_rud_commit_pass2(
+ struct xlog *log,
+ struct list_head *buffer_list,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ struct xfs_rud_log_format *rud_formatp;
+
+ rud_formatp = item->ri_buf[0].i_addr;
+ ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
+
+ xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
+ return 0;
+}
+
+const struct xlog_recover_item_ops xlog_rud_item_ops = {
+ .item_type = XFS_LI_RUD,
+ .commit_pass2 = xlog_recover_rud_commit_pass2,
+};
diff --git a/fs/xfs/xfs_rmap_item.h b/fs/xfs/xfs_rmap_item.h
index 8708e4a5aa5c..31e6cdfff71f 100644
--- a/fs/xfs/xfs_rmap_item.h
+++ b/fs/xfs/xfs_rmap_item.h
@@ -36,11 +36,6 @@ struct kmem_zone;
#define XFS_RUI_MAX_FAST_EXTENTS 16
/*
- * Define RUI flag bits. Manipulated by set/clear/test_bit operators.
- */
-#define XFS_RUI_RECOVERED 1
-
-/*
* This is the "rmap update intent" log item. It is used to log the fact that
* some reverse mappings need to change. It is used in conjunction with the
* "rmap update done" log item described below.
@@ -52,7 +47,6 @@ struct xfs_rui_log_item {
struct xfs_log_item rui_item;
atomic_t rui_refcount;
atomic_t rui_next_extent;
- unsigned long rui_flags; /* misc flags */
struct xfs_rui_log_format rui_format;
};
@@ -77,11 +71,4 @@ struct xfs_rud_log_item {
extern struct kmem_zone *xfs_rui_zone;
extern struct kmem_zone *xfs_rud_zone;
-struct xfs_rui_log_item *xfs_rui_init(struct xfs_mount *, uint);
-int xfs_rui_copy_format(struct xfs_log_iovec *buf,
- struct xfs_rui_log_format *dst_rui_fmt);
-void xfs_rui_item_free(struct xfs_rui_log_item *);
-void xfs_rui_release(struct xfs_rui_log_item *);
-int xfs_rui_recover(struct xfs_mount *mp, struct xfs_rui_log_item *ruip);
-
#endif /* __XFS_RMAP_ITEM_H__ */
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index a123cd8267d9..379cbff438bc 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -47,6 +47,39 @@ static struct kset *xfs_kset; /* top-level xfs sysfs dir */
static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
#endif
+enum xfs_dax_mode {
+ XFS_DAX_INODE = 0,
+ XFS_DAX_ALWAYS = 1,
+ XFS_DAX_NEVER = 2,
+};
+
+static void
+xfs_mount_set_dax_mode(
+ struct xfs_mount *mp,
+ enum xfs_dax_mode mode)
+{
+ switch (mode) {
+ case XFS_DAX_INODE:
+ mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER);
+ break;
+ case XFS_DAX_ALWAYS:
+ mp->m_flags |= XFS_MOUNT_DAX_ALWAYS;
+ mp->m_flags &= ~XFS_MOUNT_DAX_NEVER;
+ break;
+ case XFS_DAX_NEVER:
+ mp->m_flags |= XFS_MOUNT_DAX_NEVER;
+ mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS;
+ break;
+ }
+}
+
+static const struct constant_table dax_param_enums[] = {
+ {"inode", XFS_DAX_INODE },
+ {"always", XFS_DAX_ALWAYS },
+ {"never", XFS_DAX_NEVER },
+ {}
+};
+
/*
* Table driven mount option parser.
*/
@@ -59,7 +92,7 @@ enum {
Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
- Opt_discard, Opt_nodiscard, Opt_dax,
+ Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
};
static const struct fs_parameter_spec xfs_fs_parameters[] = {
@@ -103,6 +136,7 @@ static const struct fs_parameter_spec xfs_fs_parameters[] = {
fsparam_flag("discard", Opt_discard),
fsparam_flag("nodiscard", Opt_nodiscard),
fsparam_flag("dax", Opt_dax),
+ fsparam_enum("dax", Opt_dax_enum, dax_param_enums),
{}
};
@@ -129,7 +163,8 @@ xfs_fs_show_options(
{ XFS_MOUNT_GRPID, ",grpid" },
{ XFS_MOUNT_DISCARD, ",discard" },
{ XFS_MOUNT_LARGEIO, ",largeio" },
- { XFS_MOUNT_DAX, ",dax" },
+ { XFS_MOUNT_DAX_ALWAYS, ",dax=always" },
+ { XFS_MOUNT_DAX_NEVER, ",dax=never" },
{ 0, NULL }
};
struct xfs_mount *mp = XFS_M(root->d_sb);
@@ -702,7 +737,7 @@ xfs_fs_drop_inode(
return 0;
}
- return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
+ return generic_drop_inode(inode);
}
static void
@@ -772,7 +807,8 @@ xfs_fs_statfs(
statp->f_blocks = sbp->sb_dblocks - lsize;
spin_unlock(&mp->m_sb_lock);
- statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
+ /* make sure statp->f_bfree does not underflow */
+ statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
statp->f_bavail = statp->f_bfree;
fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
@@ -838,8 +874,10 @@ xfs_restore_resvblks(struct xfs_mount *mp)
* there is no log replay required to write the inodes to disk - this is the
* primary difference between a sync and a quiesce.
*
- * Note: xfs_log_quiesce() stops background log work - the callers must ensure
- * it is started again when appropriate.
+ * We cancel log work early here to ensure all transactions the log worker may
+ * run have finished before we clean up and log the superblock and write an
+ * unmount record. The unfreeze process is responsible for restarting the log
+ * worker correctly.
*/
void
xfs_quiesce_attr(
@@ -847,9 +885,7 @@ xfs_quiesce_attr(
{
int error = 0;
- /* wait for all modifications to complete */
- while (atomic_read(&mp->m_active_trans) > 0)
- delay(100);
+ cancel_delayed_work_sync(&mp->m_log->l_work);
/* force the log to unpin objects from the now complete transactions */
xfs_log_force(mp, XFS_LOG_SYNC);
@@ -863,12 +899,6 @@ xfs_quiesce_attr(
if (error)
xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
"Frozen image may not be consistent.");
- /*
- * Just warn here till VFS can correctly support
- * read-only remount without racing.
- */
- WARN_ON(atomic_read(&mp->m_active_trans) != 0);
-
xfs_log_quiesce(mp);
}
@@ -1261,7 +1291,10 @@ xfs_fc_parse_param(
return 0;
#ifdef CONFIG_FS_DAX
case Opt_dax:
- mp->m_flags |= XFS_MOUNT_DAX;
+ xfs_mount_set_dax_mode(mp, XFS_DAX_ALWAYS);
+ return 0;
+ case Opt_dax_enum:
+ xfs_mount_set_dax_mode(mp, result.uint_32);
return 0;
#endif
default:
@@ -1454,7 +1487,7 @@ xfs_fc_fill_super(
if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
sb->s_flags |= SB_I_VERSION;
- if (mp->m_flags & XFS_MOUNT_DAX) {
+ if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) {
bool rtdev_is_dax = false, datadev_is_dax;
xfs_warn(mp,
@@ -1468,7 +1501,7 @@ xfs_fc_fill_super(
if (!rtdev_is_dax && !datadev_is_dax) {
xfs_alert(mp,
"DAX unsupported by block device. Turning off DAX.");
- mp->m_flags &= ~XFS_MOUNT_DAX;
+ xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
}
if (xfs_sb_version_hasreflink(&mp->m_sb)) {
xfs_alert(mp,
@@ -1754,7 +1787,6 @@ static int xfs_init_fs_context(
INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
spin_lock_init(&mp->m_perag_lock);
mutex_init(&mp->m_growlock);
- atomic_set(&mp->m_active_trans, 0);
INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 13fb4b919648..8e88a7ca387e 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -243,8 +243,7 @@ xfs_symlink(
*/
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
- if (resblks)
- resblks -= XFS_IALLOC_SPACE_RES(mp);
+ resblks -= XFS_IALLOC_SPACE_RES(mp);
/*
* If the symlink will fit into the inode, write it inline.
*/
@@ -252,7 +251,7 @@ xfs_symlink(
xfs_init_local_fork(ip, XFS_DATA_FORK, target_path, pathlen);
ip->i_d.di_size = pathlen;
- ip->i_d.di_format = XFS_DINODE_FMT_LOCAL;
+ ip->i_df.if_format = XFS_DINODE_FMT_LOCAL;
xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE);
} else {
int offset;
@@ -265,8 +264,7 @@ xfs_symlink(
if (error)
goto out_trans_cancel;
- if (resblks)
- resblks -= fs_blocks;
+ resblks -= fs_blocks;
ip->i_d.di_size = pathlen;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
@@ -386,7 +384,7 @@ xfs_inactive_symlink_rmt(
* either 1 or 2 extents and that we can
* free them all in one bunmapi call.
*/
- ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2);
+ ASSERT(ip->i_df.if_nextents > 0 && ip->i_df.if_nextents <= 2);
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
if (error)
diff --git a/fs/xfs/xfs_sysctl.c b/fs/xfs/xfs_sysctl.c
index 31b3bdbd2eba..021ef96d0542 100644
--- a/fs/xfs/xfs_sysctl.c
+++ b/fs/xfs/xfs_sysctl.c
@@ -13,7 +13,7 @@ STATIC int
xfs_stats_clear_proc_handler(
struct ctl_table *ctl,
int write,
- void __user *buffer,
+ void *buffer,
size_t *lenp,
loff_t *ppos)
{
@@ -33,7 +33,7 @@ STATIC int
xfs_panic_mask_proc_handler(
struct ctl_table *ctl,
int write,
- void __user *buffer,
+ void *buffer,
size_t *lenp,
loff_t *ppos)
{
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index a4323a63438d..460136628a79 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -1897,8 +1897,8 @@ DECLARE_EVENT_CLASS(xfs_swap_extent_class,
__entry->dev = VFS_I(ip)->i_sb->s_dev;
__entry->which = which;
__entry->ino = ip->i_ino;
- __entry->format = ip->i_d.di_format;
- __entry->nex = ip->i_d.di_nextents;
+ __entry->format = ip->i_df.if_format;
+ __entry->nex = ip->i_df.if_nextents;
__entry->broot_size = ip->i_df.if_broot_bytes;
__entry->fork_off = XFS_IFORK_BOFF(ip);
),
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 28b983ff8b11..3c94e5ff4316 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -68,7 +68,6 @@ xfs_trans_free(
xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
trace_xfs_trans_free(tp, _RET_IP_);
- atomic_dec(&tp->t_mountp->m_active_trans);
if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
sb_end_intwrite(tp->t_mountp->m_super);
xfs_trans_free_dqinfo(tp);
@@ -125,8 +124,6 @@ xfs_trans_dup(
xfs_defer_move(ntp, tp);
xfs_trans_dup_dqinfo(tp, ntp);
-
- atomic_inc(&tp->t_mountp->m_active_trans);
return ntp;
}
@@ -275,7 +272,6 @@ xfs_trans_alloc(
*/
WARN_ON(resp->tr_logres > 0 &&
mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
- atomic_inc(&mp->m_active_trans);
tp->t_magic = XFS_TRANS_HEADER_MAGIC;
tp->t_flags = flags;
@@ -299,20 +295,19 @@ xfs_trans_alloc(
/*
* Create an empty transaction with no reservation. This is a defensive
- * mechanism for routines that query metadata without actually modifying
- * them -- if the metadata being queried is somehow cross-linked (think a
- * btree block pointer that points higher in the tree), we risk deadlock.
- * However, blocks grabbed as part of a transaction can be re-grabbed.
- * The verifiers will notice the corrupt block and the operation will fail
- * back to userspace without deadlocking.
+ * mechanism for routines that query metadata without actually modifying them --
+ * if the metadata being queried is somehow cross-linked (think a btree block
+ * pointer that points higher in the tree), we risk deadlock. However, blocks
+ * grabbed as part of a transaction can be re-grabbed. The verifiers will
+ * notice the corrupt block and the operation will fail back to userspace
+ * without deadlocking.
*
- * Note the zero-length reservation; this transaction MUST be cancelled
- * without any dirty data.
+ * Note the zero-length reservation; this transaction MUST be cancelled without
+ * any dirty data.
*
- * Callers should obtain freeze protection to avoid two conflicts with fs
- * freezing: (1) having active transactions trip the m_active_trans ASSERTs;
- * and (2) grabbing buffers at the same time that freeze is trying to drain
- * the buffer LRU list.
+ * Callers should obtain freeze protection to avoid a conflict with fs freezing
+ * where we can be grabbing buffers at the same time that freeze is trying to
+ * drain the buffer LRU list.
*/
int
xfs_trans_alloc_empty(
@@ -534,57 +529,9 @@ xfs_trans_apply_sb_deltas(
sizeof(sbp->sb_frextents) - 1);
}
-STATIC int
-xfs_sb_mod8(
- uint8_t *field,
- int8_t delta)
-{
- int8_t counter = *field;
-
- counter += delta;
- if (counter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- *field = counter;
- return 0;
-}
-
-STATIC int
-xfs_sb_mod32(
- uint32_t *field,
- int32_t delta)
-{
- int32_t counter = *field;
-
- counter += delta;
- if (counter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- *field = counter;
- return 0;
-}
-
-STATIC int
-xfs_sb_mod64(
- uint64_t *field,
- int64_t delta)
-{
- int64_t counter = *field;
-
- counter += delta;
- if (counter < 0) {
- ASSERT(0);
- return -EINVAL;
- }
- *field = counter;
- return 0;
-}
-
/*
- * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
- * and apply superblock counter changes to the in-core superblock. The
+ * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations and
+ * apply superblock counter changes to the in-core superblock. The
* t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
* applied to the in-core superblock. The idea is that that has already been
* done.
@@ -593,7 +540,12 @@ xfs_sb_mod64(
* used block counts are not updated in the on disk superblock. In this case,
* XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
* still need to update the incore superblock with the changes.
+ *
+ * Deltas for the inode count are +/-64, hence we use a large batch size of 128
+ * so we don't need to take the counter lock on every update.
*/
+#define XFS_ICOUNT_BATCH 128
+
void
xfs_trans_unreserve_and_mod_sb(
struct xfs_trans *tp)
@@ -629,20 +581,21 @@ xfs_trans_unreserve_and_mod_sb(
/* apply the per-cpu counters */
if (blkdelta) {
error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
- if (error)
- goto out;
+ ASSERT(!error);
}
if (idelta) {
- error = xfs_mod_icount(mp, idelta);
- if (error)
- goto out_undo_fdblocks;
+ percpu_counter_add_batch(&mp->m_icount, idelta,
+ XFS_ICOUNT_BATCH);
+ if (idelta < 0)
+ ASSERT(__percpu_counter_compare(&mp->m_icount, 0,
+ XFS_ICOUNT_BATCH) >= 0);
}
if (ifreedelta) {
- error = xfs_mod_ifree(mp, ifreedelta);
- if (error)
- goto out_undo_icount;
+ percpu_counter_add(&mp->m_ifree, ifreedelta);
+ if (ifreedelta < 0)
+ ASSERT(percpu_counter_compare(&mp->m_ifree, 0) >= 0);
}
if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
@@ -650,95 +603,23 @@ xfs_trans_unreserve_and_mod_sb(
/* apply remaining deltas */
spin_lock(&mp->m_sb_lock);
- if (rtxdelta) {
- error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
- if (error)
- goto out_undo_ifree;
- }
-
- if (tp->t_dblocks_delta != 0) {
- error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
- if (error)
- goto out_undo_frextents;
- }
- if (tp->t_agcount_delta != 0) {
- error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
- if (error)
- goto out_undo_dblocks;
- }
- if (tp->t_imaxpct_delta != 0) {
- error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
- if (error)
- goto out_undo_agcount;
- }
- if (tp->t_rextsize_delta != 0) {
- error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
- tp->t_rextsize_delta);
- if (error)
- goto out_undo_imaxpct;
- }
- if (tp->t_rbmblocks_delta != 0) {
- error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
- tp->t_rbmblocks_delta);
- if (error)
- goto out_undo_rextsize;
- }
- if (tp->t_rblocks_delta != 0) {
- error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
- if (error)
- goto out_undo_rbmblocks;
- }
- if (tp->t_rextents_delta != 0) {
- error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
- tp->t_rextents_delta);
- if (error)
- goto out_undo_rblocks;
- }
- if (tp->t_rextslog_delta != 0) {
- error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
- tp->t_rextslog_delta);
- if (error)
- goto out_undo_rextents;
- }
+ mp->m_sb.sb_frextents += rtxdelta;
+ mp->m_sb.sb_dblocks += tp->t_dblocks_delta;
+ mp->m_sb.sb_agcount += tp->t_agcount_delta;
+ mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
+ mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
+ mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
+ mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
+ mp->m_sb.sb_rextents += tp->t_rextents_delta;
+ mp->m_sb.sb_rextslog += tp->t_rextslog_delta;
spin_unlock(&mp->m_sb_lock);
- return;
-out_undo_rextents:
- if (tp->t_rextents_delta)
- xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
-out_undo_rblocks:
- if (tp->t_rblocks_delta)
- xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
-out_undo_rbmblocks:
- if (tp->t_rbmblocks_delta)
- xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
-out_undo_rextsize:
- if (tp->t_rextsize_delta)
- xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
-out_undo_imaxpct:
- if (tp->t_rextsize_delta)
- xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
-out_undo_agcount:
- if (tp->t_agcount_delta)
- xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
-out_undo_dblocks:
- if (tp->t_dblocks_delta)
- xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
-out_undo_frextents:
- if (rtxdelta)
- xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
-out_undo_ifree:
- spin_unlock(&mp->m_sb_lock);
- if (ifreedelta)
- xfs_mod_ifree(mp, -ifreedelta);
-out_undo_icount:
- if (idelta)
- xfs_mod_icount(mp, -idelta);
-out_undo_fdblocks:
- if (blkdelta)
- xfs_mod_fdblocks(mp, -blkdelta, rsvd);
-out:
- ASSERT(error == 0);
+ /*
+ * Debug checks outside of the spinlock so they don't lock up the
+ * machine if they fail.
+ */
+ ASSERT(mp->m_sb.sb_imax_pct >= 0);
+ ASSERT(mp->m_sb.sb_rextslog >= 0);
return;
}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 752c7fef9de7..8308bf6d7e40 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -59,12 +59,14 @@ struct xfs_log_item {
#define XFS_LI_ABORTED 1
#define XFS_LI_FAILED 2
#define XFS_LI_DIRTY 3 /* log item dirty in transaction */
+#define XFS_LI_RECOVERED 4 /* log intent item has been recovered */
#define XFS_LI_FLAGS \
{ (1 << XFS_LI_IN_AIL), "IN_AIL" }, \
{ (1 << XFS_LI_ABORTED), "ABORTED" }, \
{ (1 << XFS_LI_FAILED), "FAILED" }, \
- { (1 << XFS_LI_DIRTY), "DIRTY" }
+ { (1 << XFS_LI_DIRTY), "DIRTY" }, \
+ { (1 << XFS_LI_RECOVERED), "RECOVERED" }
struct xfs_item_ops {
unsigned flags;
@@ -77,6 +79,8 @@ struct xfs_item_ops {
void (*iop_release)(struct xfs_log_item *);
xfs_lsn_t (*iop_committed)(struct xfs_log_item *, xfs_lsn_t);
void (*iop_error)(struct xfs_log_item *, xfs_buf_t *);
+ int (*iop_recover)(struct xfs_log_item *lip, struct xfs_trans *tp);
+ bool (*iop_match)(struct xfs_log_item *item, uint64_t id);
};
/*
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 564253550b75..ac5019361a13 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -345,6 +345,45 @@ xfs_ail_delete(
xfs_trans_ail_cursor_clear(ailp, lip);
}
+/*
+ * Requeue a failed buffer for writeback.
+ *
+ * We clear the log item failed state here as well, but we have to be careful
+ * about reference counts because the only active reference counts on the buffer
+ * may be the failed log items. Hence if we clear the log item failed state
+ * before queuing the buffer for IO we can release all active references to
+ * the buffer and free it, leading to use after free problems in
+ * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
+ * order we process them in - the buffer is locked, and we own the buffer list
+ * so nothing on them is going to change while we are performing this action.
+ *
+ * Hence we can safely queue the buffer for IO before we clear the failed log
+ * item state, therefore always having an active reference to the buffer and
+ * avoiding the transient zero-reference state that leads to use-after-free.
+ */
+static inline int
+xfsaild_resubmit_item(
+ struct xfs_log_item *lip,
+ struct list_head *buffer_list)
+{
+ struct xfs_buf *bp = lip->li_buf;
+
+ if (!xfs_buf_trylock(bp))
+ return XFS_ITEM_LOCKED;
+
+ if (!xfs_buf_delwri_queue(bp, buffer_list)) {
+ xfs_buf_unlock(bp);
+ return XFS_ITEM_FLUSHING;
+ }
+
+ /* protected by ail_lock */
+ list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
+ xfs_clear_li_failed(lip);
+
+ xfs_buf_unlock(bp);
+ return XFS_ITEM_SUCCESS;
+}
+
static inline uint
xfsaild_push_item(
struct xfs_ail *ailp,
@@ -365,6 +404,8 @@ xfsaild_push_item(
*/
if (!lip->li_ops->iop_push)
return XFS_ITEM_PINNED;
+ if (test_bit(XFS_LI_FAILED, &lip->li_flags))
+ return xfsaild_resubmit_item(lip, &ailp->ail_buf_list);
return lip->li_ops->iop_push(lip, &ailp->ail_buf_list);
}
@@ -774,6 +815,17 @@ xfs_trans_ail_update_bulk(
xfs_ail_update_finish(ailp, tail_lsn);
}
+/* Insert a log item into the AIL. */
+void
+xfs_trans_ail_insert(
+ struct xfs_ail *ailp,
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
+{
+ spin_lock(&ailp->ail_lock);
+ xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
+}
+
/*
* Delete one log item from the AIL.
*
@@ -800,39 +852,19 @@ xfs_ail_delete_one(
return 0;
}
-/**
- * Remove a log items from the AIL
- *
- * @xfs_trans_ail_delete_bulk takes an array of log items that all need to
- * removed from the AIL. The caller is already holding the AIL lock, and done
- * all the checks necessary to ensure the items passed in via @log_items are
- * ready for deletion. This includes checking that the items are in the AIL.
- *
- * For each log item to be removed, unlink it from the AIL, clear the IN_AIL
- * flag from the item and reset the item's lsn to 0. If we remove the first
- * item in the AIL, update the log tail to match the new minimum LSN in the
- * AIL.
- *
- * This function will not drop the AIL lock until all items are removed from
- * the AIL to minimise the amount of lock traffic on the AIL. This does not
- * greatly increase the AIL hold time, but does significantly reduce the amount
- * of traffic on the lock, especially during IO completion.
- *
- * This function must be called with the AIL lock held. The lock is dropped
- * before returning.
- */
void
xfs_trans_ail_delete(
- struct xfs_ail *ailp,
struct xfs_log_item *lip,
int shutdown_type)
{
+ struct xfs_ail *ailp = lip->li_ailp;
struct xfs_mount *mp = ailp->ail_mount;
xfs_lsn_t tail_lsn;
+ spin_lock(&ailp->ail_lock);
if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
spin_unlock(&ailp->ail_lock);
- if (!XFS_FORCED_SHUTDOWN(mp)) {
+ if (shutdown_type && !XFS_FORCED_SHUTDOWN(mp)) {
xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
"%s: attempting to delete a log item that is not in the AIL",
__func__);
@@ -841,6 +873,7 @@ xfs_trans_ail_delete(
return;
}
+ /* xfs_ail_update_finish() drops the AIL lock */
tail_lsn = xfs_ail_delete_one(ailp, lip);
xfs_ail_update_finish(ailp, tail_lsn);
}
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index d1b9869bc5fa..c0f73b82c055 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -388,7 +388,7 @@ xfs_trans_apply_dquot_deltas(
*/
if (d->d_id) {
xfs_qm_adjust_dqlimits(tp->t_mountp, dqp);
- xfs_qm_adjust_dqtimers(tp->t_mountp, d);
+ xfs_qm_adjust_dqtimers(tp->t_mountp, dqp);
}
dqp->dq_flags |= XFS_DQ_DIRTY;
@@ -591,7 +591,7 @@ xfs_trans_dqresv(
xfs_dqlock(dqp);
- defq = xfs_get_defquota(dqp, q);
+ defq = xfs_get_defquota(q, xfs_dquot_type(dqp));
if (flags & XFS_TRANS_DQ_RES_BLKS) {
hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
@@ -602,7 +602,7 @@ xfs_trans_dqresv(
softlimit = defq->bsoftlimit;
timer = be32_to_cpu(dqp->q_core.d_btimer);
warns = be16_to_cpu(dqp->q_core.d_bwarns);
- warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
+ warnlimit = defq->bwarnlimit;
resbcountp = &dqp->q_res_bcount;
} else {
ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
@@ -614,7 +614,7 @@ xfs_trans_dqresv(
softlimit = defq->rtbsoftlimit;
timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
- warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
+ warnlimit = defq->rtbwarnlimit;
resbcountp = &dqp->q_res_rtbcount;
}
@@ -650,7 +650,7 @@ xfs_trans_dqresv(
total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
timer = be32_to_cpu(dqp->q_core.d_itimer);
warns = be16_to_cpu(dqp->q_core.d_iwarns);
- warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
+ warnlimit = defq->iwarnlimit;
hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
if (!hardlimit)
hardlimit = defq->ihardlimit;
@@ -711,7 +711,7 @@ xfs_trans_dqresv(
error_return:
xfs_dqunlock(dqp);
- if (flags & XFS_QMOPT_ENOSPC)
+ if (XFS_QM_ISPDQ(dqp))
return -ENOSPC;
return -EDQUOT;
}
@@ -751,8 +751,7 @@ xfs_trans_reserve_quota_bydquots(
ASSERT(flags & XFS_QMOPT_RESBLK_MASK);
if (udqp) {
- error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos,
- (flags & ~XFS_QMOPT_ENOSPC));
+ error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, flags);
if (error)
return error;
}
@@ -803,16 +802,12 @@ xfs_trans_reserve_quota_nblks(
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
return 0;
- if (XFS_IS_PQUOTA_ON(mp))
- flags |= XFS_QMOPT_ENOSPC;
ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
- XFS_TRANS_DQ_RES_RTBLKS ||
- (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
- XFS_TRANS_DQ_RES_BLKS);
+ ASSERT((flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_RTBLKS ||
+ (flags & ~(XFS_QMOPT_FORCE_RES)) == XFS_TRANS_DQ_RES_BLKS);
/*
* Reserve nblks against these dquots, with trans as the mediator.
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 35655eac01a6..3004aeac9110 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -91,26 +91,13 @@ xfs_trans_ail_update(
xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
}
+void xfs_trans_ail_insert(struct xfs_ail *ailp, struct xfs_log_item *lip,
+ xfs_lsn_t lsn);
+
xfs_lsn_t xfs_ail_delete_one(struct xfs_ail *ailp, struct xfs_log_item *lip);
void xfs_ail_update_finish(struct xfs_ail *ailp, xfs_lsn_t old_lsn)
__releases(ailp->ail_lock);
-void xfs_trans_ail_delete(struct xfs_ail *ailp, struct xfs_log_item *lip,
- int shutdown_type);
-
-static inline void
-xfs_trans_ail_remove(
- struct xfs_log_item *lip,
- int shutdown_type)
-{
- struct xfs_ail *ailp = lip->li_ailp;
-
- spin_lock(&ailp->ail_lock);
- /* xfs_trans_ail_delete() drops the AIL lock */
- if (test_bit(XFS_LI_IN_AIL, &lip->li_flags))
- xfs_trans_ail_delete(ailp, lip, shutdown_type);
- else
- spin_unlock(&ailp->ail_lock);
-}
+void xfs_trans_ail_delete(struct xfs_log_item *lip, int shutdown_type);
void xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
void xfs_ail_push_all(struct xfs_ail *);
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index fc5d7276026e..bca48b308c02 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -12,7 +12,6 @@
#include "xfs_inode.h"
#include "xfs_attr.h"
#include "xfs_acl.h"
-#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include <linux/posix_acl_xattr.h>
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index d79b821ed1c7..07bc42d62673 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -1330,7 +1330,7 @@ static int zonefs_read_super(struct super_block *sb)
goto unmap;
}
- uuid_copy(&sbi->s_uuid, (uuid_t *)super->s_uuid);
+ import_uuid(&sbi->s_uuid, super->s_uuid);
ret = 0;
unmap: