summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorRoss Zwisler <ross.zwisler@linux.intel.com>2018-02-03 00:26:26 -0700
committerRoss Zwisler <ross.zwisler@linux.intel.com>2018-02-03 00:26:26 -0700
commitee95f4059a833839bf52972191b2d4c3d3cec552 (patch)
treea1c8587d9b82e64a75dde376a90a3d69b0f4847a /fs
parentd121f07691415df824e6b60520f782f6d13b3c81 (diff)
parentf81e1d35a6e36d30888c46283b8dd1022e847124 (diff)
Merge branch 'for-4.16/nfit' into libnvdimm-for-next
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/dir.c37
-rw-r--r--fs/afs/inode.c4
-rw-r--r--fs/afs/rxrpc.c2
-rw-r--r--fs/afs/write.c8
-rw-r--r--fs/btrfs/delayed-inode.c45
-rw-r--r--fs/btrfs/volumes.c1
-rw-r--r--fs/exec.c9
-rw-r--r--fs/super.c6
-rw-r--r--fs/userfaultfd.c20
-rw-r--r--fs/xfs/xfs_aops.c4
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_qm.c46
12 files changed, 136 insertions, 48 deletions
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index ff8d5bf4354f..23c7f395d718 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -895,20 +895,38 @@ error:
* However, if we didn't have a callback promise outstanding, or it was
* outstanding on a different server, then it won't break it either...
*/
-static int afs_dir_remove_link(struct dentry *dentry, struct key *key)
+static int afs_dir_remove_link(struct dentry *dentry, struct key *key,
+ unsigned long d_version_before,
+ unsigned long d_version_after)
{
+ bool dir_valid;
int ret = 0;
+ /* There were no intervening changes on the server if the version
+ * number we got back was incremented by exactly 1.
+ */
+ dir_valid = (d_version_after == d_version_before + 1);
+
if (d_really_is_positive(dentry)) {
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
- kdebug("AFS_VNODE_DELETED");
- clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
-
- ret = afs_validate(vnode, key);
- if (ret == -ESTALE)
+ if (dir_valid) {
+ drop_nlink(&vnode->vfs_inode);
+ if (vnode->vfs_inode.i_nlink == 0) {
+ set_bit(AFS_VNODE_DELETED, &vnode->flags);
+ clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ }
ret = 0;
+ } else {
+ clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+
+ if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
+ kdebug("AFS_VNODE_DELETED");
+
+ ret = afs_validate(vnode, key);
+ if (ret == -ESTALE)
+ ret = 0;
+ }
_debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, ret);
}
@@ -923,6 +941,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
struct afs_fs_cursor fc;
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
struct key *key;
+ unsigned long d_version = (unsigned long)dentry->d_fsdata;
int ret;
_enter("{%x:%u},{%pd}",
@@ -955,7 +974,9 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
afs_vnode_commit_status(&fc, dvnode, fc.cb_break);
ret = afs_end_vnode_operation(&fc);
if (ret == 0)
- ret = afs_dir_remove_link(dentry, key);
+ ret = afs_dir_remove_link(
+ dentry, key, d_version,
+ (unsigned long)dvnode->status.data_version);
}
error_key:
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 3415eb7484f6..1e81864ef0b2 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -377,6 +377,10 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
}
read_sequnlock_excl(&vnode->cb_lock);
+
+ if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
+ clear_nlink(&vnode->vfs_inode);
+
if (valid)
goto valid;
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index ea1460b9b71a..e1126659f043 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -885,7 +885,7 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
{
struct afs_net *net = call->net;
enum afs_call_state state;
- u32 remote_abort;
+ u32 remote_abort = 0;
int ret;
_enter("{%s,%zu},,%zu,%d",
diff --git a/fs/afs/write.c b/fs/afs/write.c
index cb5f8a3df577..9370e2feb999 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -198,7 +198,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
ret = afs_fill_page(vnode, key, pos + copied,
len - copied, page);
if (ret < 0)
- return ret;
+ goto out;
}
SetPageUptodate(page);
}
@@ -206,10 +206,12 @@ int afs_write_end(struct file *file, struct address_space *mapping,
set_page_dirty(page);
if (PageDirty(page))
_debug("dirtied");
+ ret = copied;
+
+out:
unlock_page(page);
put_page(page);
-
- return copied;
+ return ret;
}
/*
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 5d73f79ded8b..056276101c63 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -87,6 +87,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
spin_lock(&root->inode_lock);
node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
+
if (node) {
if (btrfs_inode->delayed_node) {
refcount_inc(&node->refs); /* can be accessed */
@@ -94,9 +95,30 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
spin_unlock(&root->inode_lock);
return node;
}
- btrfs_inode->delayed_node = node;
- /* can be accessed and cached in the inode */
- refcount_add(2, &node->refs);
+
+ /*
+ * It's possible that we're racing into the middle of removing
+ * this node from the radix tree. In this case, the refcount
+ * was zero and it should never go back to one. Just return
+ * NULL like it was never in the radix at all; our release
+ * function is in the process of removing it.
+ *
+ * Some implementations of refcount_inc refuse to bump the
+ * refcount once it has hit zero. If we don't do this dance
+ * here, refcount_inc() may decide to just WARN_ONCE() instead
+ * of actually bumping the refcount.
+ *
+ * If this node is properly in the radix, we want to bump the
+ * refcount twice, once for the inode and once for this get
+ * operation.
+ */
+ if (refcount_inc_not_zero(&node->refs)) {
+ refcount_inc(&node->refs);
+ btrfs_inode->delayed_node = node;
+ } else {
+ node = NULL;
+ }
+
spin_unlock(&root->inode_lock);
return node;
}
@@ -254,17 +276,18 @@ static void __btrfs_release_delayed_node(
mutex_unlock(&delayed_node->mutex);
if (refcount_dec_and_test(&delayed_node->refs)) {
- bool free = false;
struct btrfs_root *root = delayed_node->root;
+
spin_lock(&root->inode_lock);
- if (refcount_read(&delayed_node->refs) == 0) {
- radix_tree_delete(&root->delayed_nodes_tree,
- delayed_node->inode_id);
- free = true;
- }
+ /*
+ * Once our refcount goes to zero, nobody is allowed to bump it
+ * back up. We can delete it now.
+ */
+ ASSERT(refcount_read(&delayed_node->refs) == 0);
+ radix_tree_delete(&root->delayed_nodes_tree,
+ delayed_node->inode_id);
spin_unlock(&root->inode_lock);
- if (free)
- kmem_cache_free(delayed_node_cache, delayed_node);
+ kmem_cache_free(delayed_node_cache, delayed_node);
}
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 49810b70afd3..a25684287501 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -237,7 +237,6 @@ static struct btrfs_device *__alloc_device(void)
kfree(dev);
return ERR_PTR(-ENOMEM);
}
- bio_get(dev->flush_bio);
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_alloc_list);
diff --git a/fs/exec.c b/fs/exec.c
index 5688b5e1b937..7eb8d21bcab9 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1349,9 +1349,14 @@ void setup_new_exec(struct linux_binprm * bprm)
current->sas_ss_sp = current->sas_ss_size = 0;
- /* Figure out dumpability. */
+ /*
+ * Figure out dumpability. Note that this checking only of current
+ * is wrong, but userspace depends on it. This should be testing
+ * bprm->secureexec instead.
+ */
if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
- bprm->secureexec)
+ !(uid_eq(current_euid(), current_uid()) &&
+ gid_eq(current_egid(), current_gid())))
set_dumpable(current->mm, suid_dumpable);
else
set_dumpable(current->mm, SUID_DUMP_USER);
diff --git a/fs/super.c b/fs/super.c
index 7ff1349609e4..06bd25d90ba5 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -517,7 +517,11 @@ retry:
hlist_add_head(&s->s_instances, &type->fs_supers);
spin_unlock(&sb_lock);
get_filesystem(type);
- register_shrinker(&s->s_shrink);
+ err = register_shrinker(&s->s_shrink);
+ if (err) {
+ deactivate_locked_super(s);
+ s = ERR_PTR(err);
+ }
return s;
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ac9a4e65ca49..41a75f9f23fd 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -570,11 +570,14 @@ out:
static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
struct userfaultfd_wait_queue *ewq)
{
+ struct userfaultfd_ctx *release_new_ctx;
+
if (WARN_ON_ONCE(current->flags & PF_EXITING))
goto out;
ewq->ctx = ctx;
init_waitqueue_entry(&ewq->wq, current);
+ release_new_ctx = NULL;
spin_lock(&ctx->event_wqh.lock);
/*
@@ -601,8 +604,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
new = (struct userfaultfd_ctx *)
(unsigned long)
ewq->msg.arg.reserved.reserved1;
-
- userfaultfd_ctx_put(new);
+ release_new_ctx = new;
}
break;
}
@@ -617,6 +619,20 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
__set_current_state(TASK_RUNNING);
spin_unlock(&ctx->event_wqh.lock);
+ if (release_new_ctx) {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = release_new_ctx->mm;
+
+ /* the various vma->vm_userfaultfd_ctx still points to it */
+ down_write(&mm->mmap_sem);
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
+ vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
+ up_write(&mm->mmap_sem);
+
+ userfaultfd_ctx_put(release_new_ctx);
+ }
+
/*
* ctx may go away after this if the userfault pseudo fd is
* already released.
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 21e2d70884e1..4fc526a27a94 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -399,7 +399,7 @@ xfs_map_blocks(
(ip->i_df.if_flags & XFS_IFEXTENTS));
ASSERT(offset <= mp->m_super->s_maxbytes);
- if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes)
+ if (offset > mp->m_super->s_maxbytes - count)
count = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
@@ -1312,7 +1312,7 @@ xfs_get_blocks(
lockmode = xfs_ilock_data_map_shared(ip);
ASSERT(offset <= mp->m_super->s_maxbytes);
- if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes)
+ if (offset > mp->m_super->s_maxbytes - size)
size = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 7ab52a8bc0a9..66e1edbfb2b2 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -1006,7 +1006,7 @@ xfs_file_iomap_begin(
}
ASSERT(offset <= mp->m_super->s_maxbytes);
- if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
+ if (offset > mp->m_super->s_maxbytes - length)
length = mp->m_super->s_maxbytes - offset;
offset_fsb = XFS_B_TO_FSBT(mp, offset);
end_fsb = XFS_B_TO_FSB(mp, offset + length);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index ec952dfad359..b897b11afb2c 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -48,7 +48,7 @@
STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
-
+STATIC void xfs_qm_destroy_quotainos(xfs_quotainfo_t *qi);
STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
/*
* We use the batch lookup interface to iterate over the dquots as it
@@ -695,9 +695,17 @@ xfs_qm_init_quotainfo(
qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
- register_shrinker(&qinf->qi_shrinker);
+
+ error = register_shrinker(&qinf->qi_shrinker);
+ if (error)
+ goto out_free_inos;
+
return 0;
+out_free_inos:
+ mutex_destroy(&qinf->qi_quotaofflock);
+ mutex_destroy(&qinf->qi_tree_lock);
+ xfs_qm_destroy_quotainos(qinf);
out_free_lru:
list_lru_destroy(&qinf->qi_lru);
out_free_qinf:
@@ -706,7 +714,6 @@ out_free_qinf:
return error;
}
-
/*
* Gets called when unmounting a filesystem or when all quotas get
* turned off.
@@ -723,19 +730,8 @@ xfs_qm_destroy_quotainfo(
unregister_shrinker(&qi->qi_shrinker);
list_lru_destroy(&qi->qi_lru);
-
- if (qi->qi_uquotaip) {
- IRELE(qi->qi_uquotaip);
- qi->qi_uquotaip = NULL; /* paranoia */
- }
- if (qi->qi_gquotaip) {
- IRELE(qi->qi_gquotaip);
- qi->qi_gquotaip = NULL;
- }
- if (qi->qi_pquotaip) {
- IRELE(qi->qi_pquotaip);
- qi->qi_pquotaip = NULL;
- }
+ xfs_qm_destroy_quotainos(qi);
+ mutex_destroy(&qi->qi_tree_lock);
mutex_destroy(&qi->qi_quotaofflock);
kmem_free(qi);
mp->m_quotainfo = NULL;
@@ -1600,6 +1596,24 @@ error_rele:
}
STATIC void
+xfs_qm_destroy_quotainos(
+ xfs_quotainfo_t *qi)
+{
+ if (qi->qi_uquotaip) {
+ IRELE(qi->qi_uquotaip);
+ qi->qi_uquotaip = NULL; /* paranoia */
+ }
+ if (qi->qi_gquotaip) {
+ IRELE(qi->qi_gquotaip);
+ qi->qi_gquotaip = NULL;
+ }
+ if (qi->qi_pquotaip) {
+ IRELE(qi->qi_pquotaip);
+ qi->qi_pquotaip = NULL;
+ }
+}
+
+STATIC void
xfs_qm_dqfree_one(
struct xfs_dquot *dqp)
{