diff options
author | Rohith Surabattula <rohiths@microsoft.com> | 2021-04-13 00:26:42 -0500 |
---|---|---|
committer | Steve French <stfrench@microsoft.com> | 2021-05-03 11:20:35 -0500 |
commit | c3f207ab29f793b8c942ce8067ed123f18d5b81b (patch) | |
tree | ecfa1fdb80e4aa3281c34008d6962d61eb471573 /fs/cifs/file.c | |
parent | fee742b502894c8ed02506fff61d7605934f93cb (diff) |
cifs: Deferred close for files
When file is closed, SMB2 close request is not sent to server
immediately and is deferred for acregmax defined interval. When file is
reopened by same process for read or write, the file handle
is reused if an oplock is held.
When client receives a oplock/lease break, file is closed immediately
if reference count is zero, else oplock is downgraded.
Signed-off-by: Rohith Surabattula <rohiths@microsoft.com>
Reviewed-by: Shyam Prasad N <sprasad@microsoft.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
Diffstat (limited to 'fs/cifs/file.c')
-rw-r--r-- | fs/cifs/file.c | 84 |
1 files changed, 82 insertions, 2 deletions
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 3d4e6e7dac1d..7e97aeabd616 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -322,9 +322,12 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, cfile->dentry = dget(dentry); cfile->f_flags = file->f_flags; cfile->invalidHandle = false; + cfile->oplock_break_received = false; + cfile->deferred_scheduled = false; cfile->tlink = cifs_get_tlink(tlink); INIT_WORK(&cfile->oplock_break, cifs_oplock_break); INIT_WORK(&cfile->put, cifsFileInfo_put_work); + INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close); mutex_init(&cfile->fh_mutex); spin_lock_init(&cfile->file_info_lock); @@ -565,6 +568,23 @@ int cifs_open(struct inode *inode, struct file *file) file->f_op = &cifs_file_direct_ops; } + spin_lock(&CIFS_I(inode)->deferred_lock); + /* Get the cached handle as SMB2 close is deferred */ + rc = cifs_get_readable_path(tcon, full_path, &cfile); + if (rc == 0) { + if (file->f_flags == cfile->f_flags) { + file->private_data = cfile; + cifs_del_deferred_close(cfile); + spin_unlock(&CIFS_I(inode)->deferred_lock); + goto out; + } else { + spin_unlock(&CIFS_I(inode)->deferred_lock); + _cifsFileInfo_put(cfile, true, false); + } + } else { + spin_unlock(&CIFS_I(inode)->deferred_lock); + } + if (server->oplocks) oplock = REQ_OPLOCK; else @@ -846,11 +866,52 @@ reopen_error_exit: return rc; } +void smb2_deferred_work_close(struct work_struct *work) +{ + struct cifsFileInfo *cfile = container_of(work, + struct cifsFileInfo, deferred.work); + + spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); + cifs_del_deferred_close(cfile); + cfile->deferred_scheduled = false; + spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock); + _cifsFileInfo_put(cfile, true, false); +} + int cifs_close(struct inode *inode, struct file *file) { + struct cifsFileInfo *cfile; + struct cifsInodeInfo *cinode = CIFS_I(inode); + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + struct cifs_deferred_close *dclose; + if (file->private_data != NULL) { - _cifsFileInfo_put(file->private_data, true, false); + cfile = file->private_data; file->private_data = NULL; + dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL); + if ((cinode->oplock == CIFS_CACHE_RHW_FLG) && + dclose) { + if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) + inode->i_ctime = inode->i_mtime = current_time(inode); + spin_lock(&cinode->deferred_lock); + cifs_add_deferred_close(cfile, dclose); + if (cfile->deferred_scheduled) { + mod_delayed_work(deferredclose_wq, + &cfile->deferred, cifs_sb->ctx->acregmax); + } else { + /* Deferred close for files */ + queue_delayed_work(deferredclose_wq, + &cfile->deferred, cifs_sb->ctx->acregmax); + cfile->deferred_scheduled = true; + spin_unlock(&cinode->deferred_lock); + return 0; + } + spin_unlock(&cinode->deferred_lock); + _cifsFileInfo_put(cfile, true, false); + } else { + _cifsFileInfo_put(cfile, true, false); + kfree(dclose); + } } /* return code from the ->release op is always ignored */ @@ -1947,7 +2008,8 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, if (fsuid_only && !uid_eq(open_file->uid, current_fsuid())) continue; if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) { - if (!open_file->invalidHandle) { + if ((!open_file->invalidHandle) && + (!open_file->oplock_break_received)) { /* found a good file */ /* lock it so it will not be closed on us */ cifsFileInfo_get(open_file); @@ -2476,6 +2538,8 @@ retry: if (cfile) cifsFileInfo_put(cfile); free_xid(xid); + /* Indication to update ctime and mtime as close is deferred */ + set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); return rc; } @@ -2584,6 +2648,8 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, unlock_page(page); put_page(page); + /* Indication to update ctime and mtime as close is deferred */ + set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); return rc; } @@ -4744,6 +4810,8 @@ void cifs_oplock_break(struct work_struct *work) struct TCP_Server_Info *server = tcon->ses->server; int rc = 0; bool purge_cache = false; + bool is_deferred = false; + struct cifs_deferred_close *dclose; wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE); @@ -4791,6 +4859,18 @@ oplock_break_ack: cifs_dbg(FYI, "Oplock release rc = %d\n", rc); } _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); + /* + * When oplock break is received and there are no active + * file handles but cached, then set the flag oplock_break_received. + * So, new open will not use cached handle. + */ + spin_lock(&CIFS_I(inode)->deferred_lock); + is_deferred = cifs_is_deferred_close(cfile, &dclose); + if (is_deferred) { + cfile->oplock_break_received = true; + mod_delayed_work(deferredclose_wq, &cfile->deferred, 0); + } + spin_unlock(&CIFS_I(inode)->deferred_lock); cifs_done_oplock_break(cinode); } |