diff options
author | Christian Brauner <brauner@kernel.org> | 2024-02-02 12:09:26 +0100 |
---|---|---|
committer | Christian Brauner <brauner@kernel.org> | 2024-02-05 13:11:45 +0100 |
commit | c4b3ffb508a0376643578365a4caacf681c5dc9e (patch) | |
tree | 7f85094d5f6bb45755ae613e9251afe4a66a4e8f /fs/locks.c | |
parent | 6613476e225e090cc9aad49be7fa504e290dd33d (diff) | |
parent | c69ff4071935f946f1cddc59e1d36a03442ed015 (diff) |
Merge series 'filelock: split file leases out of struct file_lock' of https://lore.kernel.org/r/20240131-flsplit-v3-0-c6129007ee8d@kernel.org
Pull file locking series from Jeff Layton:
Long ago, file locks used to hang off of a singly-linked list in struct
inode. Because of this, when leases were added, they were added to the
same list and so they had to be tracked using the same sort of
structure.
Several years ago, we added struct file_lock_context, which allowed us
to use separate lists to track different types of file locks. Given
that, leases no longer need to be tracked using struct file_lock.
That said, a lot of the underlying infrastructure _is_ the same between
file leases and locks, so we can't completely separate everything.
This patchset first splits a group of fields used by both file locks and
leases into a new struct file_lock_core, that is then embedded in struct
file_lock. Coccinelle was then used to convert a lot of the callers to
deal with the move, with the remaining 25% or so converted by hand.
It then converts several internal functions in fs/locks.c to work with
struct file_lock_core. Lastly, struct file_lock is split into struct
file_lock and file_lease, and the lease-related APIs converted to take
struct file_lease.
I also added a few small helpers and converted several users over to
them. That reduces the size of the per-fs conversion patches later in
the series. I played with some others too, but they were too awkward
or not frequently used enough to make it worthwhile.
* series 'filelock: split file leases out of struct file_lock' of https://lore.kernel.org/r/20240131-flsplit-v3-0-c6129007ee8d@kernel.org: (47 commits)
filelock: split leases out of struct file_lock
filelock: remove temporary compatibility macros
smb/server: adapt to breakup of struct file_lock
smb/client: adapt to breakup of struct file_lock
ocfs2: adapt to breakup of struct file_lock
nfsd: adapt to breakup of struct file_lock
nfs: adapt to breakup of struct file_lock
lockd: adapt to breakup of struct file_lock
fuse: adapt to breakup of struct file_lock
gfs2: adapt to breakup of struct file_lock
dlm: adapt to breakup of struct file_lock
ceph: adapt to breakup of struct file_lock
afs: adapt to breakup of struct file_lock
9p: adapt to breakup of struct file_lock
filelock: convert seqfile handling to use file_lock_core
filelock: convert locks_translate_pid to take file_lock_core
filelock: convert locks_insert_lock_ctx and locks_delete_lock_ctx
filelock: convert locks_wake_up_blocks to take a file_lock_core pointer
filelock: make assign_type helper take a file_lock_core pointer
filelock: reorganize locks_delete_block and __locks_insert_block
...
Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'fs/locks.c')
-rw-r--r-- | fs/locks.c | 851 |
1 files changed, 459 insertions, 392 deletions
diff --git a/fs/locks.c b/fs/locks.c index cc7c117ee192..33c7f4a8c729 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -48,7 +48,6 @@ * children. * */ - #include <linux/capability.h> #include <linux/file.h> #include <linux/fdtable.h> @@ -70,24 +69,28 @@ #include <linux/uaccess.h> -#define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) -#define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) -#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) -#define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK) -#define IS_REMOTELCK(fl) (fl->fl_pid <= 0) +static struct file_lock *file_lock(struct file_lock_core *flc) +{ + return container_of(flc, struct file_lock, c); +} + +static struct file_lease *file_lease(struct file_lock_core *flc) +{ + return container_of(flc, struct file_lease, c); +} -static bool lease_breaking(struct file_lock *fl) +static bool lease_breaking(struct file_lease *fl) { - return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); + return fl->c.flc_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); } -static int target_leasetype(struct file_lock *fl) +static int target_leasetype(struct file_lease *fl) { - if (fl->fl_flags & FL_UNLOCK_PENDING) + if (fl->c.flc_flags & FL_UNLOCK_PENDING) return F_UNLCK; - if (fl->fl_flags & FL_DOWNGRADE_PENDING) + if (fl->c.flc_flags & FL_DOWNGRADE_PENDING) return F_RDLCK; - return fl->fl_type; + return fl->c.flc_type; } static int leases_enable = 1; @@ -168,6 +171,7 @@ static DEFINE_SPINLOCK(blocked_lock_lock); static struct kmem_cache *flctx_cache __ro_after_init; static struct kmem_cache *filelock_cache __ro_after_init; +static struct kmem_cache *filelease_cache __ro_after_init; static struct file_lock_context * locks_get_lock_context(struct inode *inode, int type) @@ -204,11 +208,12 @@ out: static void locks_dump_ctx_list(struct list_head *list, char *list_type) { - struct file_lock *fl; + struct file_lock_core *flc; - list_for_each_entry(fl, list, fl_list) { - pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid); - } + list_for_each_entry(flc, list, flc_list) + pr_warn("%s: fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", + list_type, flc->flc_owner, flc->flc_flags, + flc->flc_type, flc->flc_pid); } static void @@ -229,19 +234,19 @@ locks_check_ctx_lists(struct inode *inode) } static void -locks_check_ctx_file_list(struct file *filp, struct list_head *list, - char *list_type) +locks_check_ctx_file_list(struct file *filp, struct list_head *list, char *list_type) { - struct file_lock *fl; + struct file_lock_core *flc; struct inode *inode = file_inode(filp); - list_for_each_entry(fl, list, fl_list) - if (fl->fl_file == filp) + list_for_each_entry(flc, list, flc_list) + if (flc->flc_file == filp) pr_warn("Leaked %s lock on dev=0x%x:0x%x ino=0x%lx " " fl_owner=%p fl_flags=0x%x fl_type=0x%x fl_pid=%u\n", list_type, MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino, - fl->fl_owner, fl->fl_flags, fl->fl_type, fl->fl_pid); + flc->flc_owner, flc->flc_flags, + flc->flc_type, flc->flc_pid); } void @@ -255,13 +260,13 @@ locks_free_lock_context(struct inode *inode) } } -static void locks_init_lock_heads(struct file_lock *fl) +static void locks_init_lock_heads(struct file_lock_core *flc) { - INIT_HLIST_NODE(&fl->fl_link); - INIT_LIST_HEAD(&fl->fl_list); - INIT_LIST_HEAD(&fl->fl_blocked_requests); - INIT_LIST_HEAD(&fl->fl_blocked_member); - init_waitqueue_head(&fl->fl_wait); + INIT_HLIST_NODE(&flc->flc_link); + INIT_LIST_HEAD(&flc->flc_list); + INIT_LIST_HEAD(&flc->flc_blocked_requests); + INIT_LIST_HEAD(&flc->flc_blocked_member); + init_waitqueue_head(&flc->flc_wait); } /* Allocate an empty lock structure. */ @@ -270,19 +275,33 @@ struct file_lock *locks_alloc_lock(void) struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL); if (fl) - locks_init_lock_heads(fl); + locks_init_lock_heads(&fl->c); return fl; } EXPORT_SYMBOL_GPL(locks_alloc_lock); +/* Allocate an empty lock structure. */ +struct file_lease *locks_alloc_lease(void) +{ + struct file_lease *fl = kmem_cache_zalloc(filelease_cache, GFP_KERNEL); + + if (fl) + locks_init_lock_heads(&fl->c); + + return fl; +} +EXPORT_SYMBOL_GPL(locks_alloc_lease); + void locks_release_private(struct file_lock *fl) { - BUG_ON(waitqueue_active(&fl->fl_wait)); - BUG_ON(!list_empty(&fl->fl_list)); - BUG_ON(!list_empty(&fl->fl_blocked_requests)); - BUG_ON(!list_empty(&fl->fl_blocked_member)); - BUG_ON(!hlist_unhashed(&fl->fl_link)); + struct file_lock_core *flc = &fl->c; + + BUG_ON(waitqueue_active(&flc->flc_wait)); + BUG_ON(!list_empty(&flc->flc_list)); + BUG_ON(!list_empty(&flc->flc_blocked_requests)); + BUG_ON(!list_empty(&flc->flc_blocked_member)); + BUG_ON(!hlist_unhashed(&flc->flc_link)); if (fl->fl_ops) { if (fl->fl_ops->fl_release_private) @@ -292,8 +311,8 @@ void locks_release_private(struct file_lock *fl) if (fl->fl_lmops) { if (fl->fl_lmops->lm_put_owner) { - fl->fl_lmops->lm_put_owner(fl->fl_owner); - fl->fl_owner = NULL; + fl->fl_lmops->lm_put_owner(flc->flc_owner); + flc->flc_owner = NULL; } fl->fl_lmops = NULL; } @@ -309,16 +328,15 @@ EXPORT_SYMBOL_GPL(locks_release_private); * %true: @owner has at least one blocker * %false: @owner has no blockers */ -bool locks_owner_has_blockers(struct file_lock_context *flctx, - fl_owner_t owner) +bool locks_owner_has_blockers(struct file_lock_context *flctx, fl_owner_t owner) { - struct file_lock *fl; + struct file_lock_core *flc; spin_lock(&flctx->flc_lock); - list_for_each_entry(fl, &flctx->flc_posix, fl_list) { - if (fl->fl_owner != owner) + list_for_each_entry(flc, &flctx->flc_posix, flc_list) { + if (flc->flc_owner != owner) continue; - if (!list_empty(&fl->fl_blocked_requests)) { + if (!list_empty(&flc->flc_blocked_requests)) { spin_unlock(&flctx->flc_lock); return true; } @@ -336,35 +354,52 @@ void locks_free_lock(struct file_lock *fl) } EXPORT_SYMBOL(locks_free_lock); +/* Free a lease which is not in use. */ +void locks_free_lease(struct file_lease *fl) +{ + kmem_cache_free(filelease_cache, fl); +} +EXPORT_SYMBOL(locks_free_lease); + static void locks_dispose_list(struct list_head *dispose) { - struct file_lock *fl; + struct file_lock_core *flc; while (!list_empty(dispose)) { - fl = list_first_entry(dispose, struct file_lock, fl_list); - list_del_init(&fl->fl_list); - locks_free_lock(fl); + flc = list_first_entry(dispose, struct file_lock_core, flc_list); + list_del_init(&flc->flc_list); + if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) + locks_free_lease(file_lease(flc)); + else + locks_free_lock(file_lock(flc)); } } void locks_init_lock(struct file_lock *fl) { memset(fl, 0, sizeof(struct file_lock)); - locks_init_lock_heads(fl); + locks_init_lock_heads(&fl->c); } EXPORT_SYMBOL(locks_init_lock); +void locks_init_lease(struct file_lease *fl) +{ + memset(fl, 0, sizeof(*fl)); + locks_init_lock_heads(&fl->c); +} +EXPORT_SYMBOL(locks_init_lease); + /* * Initialize a new lock from an existing file_lock structure. */ void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) { - new->fl_owner = fl->fl_owner; - new->fl_pid = fl->fl_pid; - new->fl_file = NULL; - new->fl_flags = fl->fl_flags; - new->fl_type = fl->fl_type; + new->c.flc_owner = fl->c.flc_owner; + new->c.flc_pid = fl->c.flc_pid; + new->c.flc_file = NULL; + new->c.flc_flags = fl->c.flc_flags; + new->c.flc_type = fl->c.flc_type; new->fl_start = fl->fl_start; new->fl_end = fl->fl_end; new->fl_lmops = fl->fl_lmops; @@ -372,7 +407,7 @@ void locks_copy_conflock(struct file_lock *new, struct file_lock *fl) if (fl->fl_lmops) { if (fl->fl_lmops->lm_get_owner) - fl->fl_lmops->lm_get_owner(fl->fl_owner); + fl->fl_lmops->lm_get_owner(fl->c.flc_owner); } } EXPORT_SYMBOL(locks_copy_conflock); @@ -384,7 +419,7 @@ void locks_copy_lock(struct file_lock *new, struct file_lock *fl) locks_copy_conflock(new, fl); - new->fl_file = fl->fl_file; + new->c.flc_file = fl->c.flc_file; new->fl_ops = fl->fl_ops; if (fl->fl_ops) { @@ -400,15 +435,17 @@ static void locks_move_blocks(struct file_lock *new, struct file_lock *fl) /* * As ctx->flc_lock is held, new requests cannot be added to - * ->fl_blocked_requests, so we don't need a lock to check if it + * ->flc_blocked_requests, so we don't need a lock to check if it * is empty. */ - if (list_empty(&fl->fl_blocked_requests)) + if (list_empty(&fl->c.flc_blocked_requests)) return; spin_lock(&blocked_lock_lock); - list_splice_init(&fl->fl_blocked_requests, &new->fl_blocked_requests); - list_for_each_entry(f, &new->fl_blocked_requests, fl_blocked_member) - f->fl_blocker = new; + list_splice_init(&fl->c.flc_blocked_requests, + &new->c.flc_blocked_requests); + list_for_each_entry(f, &new->c.flc_blocked_requests, + c.flc_blocked_member) + f->c.flc_blocker = &new->c; spin_unlock(&blocked_lock_lock); } @@ -429,21 +466,21 @@ static void flock_make_lock(struct file *filp, struct file_lock *fl, int type) { locks_init_lock(fl); - fl->fl_file = filp; - fl->fl_owner = filp; - fl->fl_pid = current->tgid; - fl->fl_flags = FL_FLOCK; - fl->fl_type = type; + fl->c.flc_file = filp; + fl->c.flc_owner = filp; + fl->c.flc_pid = current->tgid; + fl->c.flc_flags = FL_FLOCK; + fl->c.flc_type = type; fl->fl_end = OFFSET_MAX; } -static int assign_type(struct file_lock *fl, int type) +static int assign_type(struct file_lock_core *flc, int type) { switch (type) { case F_RDLCK: case F_WRLCK: case F_UNLCK: - fl->fl_type = type; + flc->flc_type = type; break; default: return -EINVAL; @@ -488,14 +525,14 @@ static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, } else fl->fl_end = OFFSET_MAX; - fl->fl_owner = current->files; - fl->fl_pid = current->tgid; - fl->fl_file = filp; - fl->fl_flags = FL_POSIX; + fl->c.flc_owner = current->files; + fl->c.flc_pid = current->tgid; + fl->c.flc_file = filp; + fl->c.flc_flags = FL_POSIX; fl->fl_ops = NULL; fl->fl_lmops = NULL; - return assign_type(fl, l->l_type); + return assign_type(&fl->c, l->l_type); } /* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX @@ -516,16 +553,16 @@ static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, /* default lease lock manager operations */ static bool -lease_break_callback(struct file_lock *fl) +lease_break_callback(struct file_lease *fl) { kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); return false; } static void -lease_setup(struct file_lock *fl, void **priv) +lease_setup(struct file_lease *fl, void **priv) { - struct file *filp = fl->fl_file; + struct file *filp = fl->c.flc_file; struct fasync_struct *fa = *priv; /* @@ -539,7 +576,7 @@ lease_setup(struct file_lock *fl, void **priv) __f_setown(filp, task_pid(current), PIDTYPE_TGID, 0); } -static const struct lock_manager_operations lease_manager_ops = { +static const struct lease_manager_operations lease_manager_ops = { .lm_break = lease_break_callback, .lm_change = lease_modify, .lm_setup = lease_setup, @@ -548,27 +585,24 @@ static const struct lock_manager_operations lease_manager_ops = { /* * Initialize a lease, use the default lock manager operations */ -static int lease_init(struct file *filp, int type, struct file_lock *fl) +static int lease_init(struct file *filp, int type, struct file_lease *fl) { - if (assign_type(fl, type) != 0) + if (assign_type(&fl->c, type) != 0) return -EINVAL; - fl->fl_owner = filp; - fl->fl_pid = current->tgid; + fl->c.flc_owner = filp; + fl->c.flc_pid = current->tgid; - fl->fl_file = filp; - fl->fl_flags = FL_LEASE; - fl->fl_start = 0; - fl->fl_end = OFFSET_MAX; - fl->fl_ops = NULL; + fl->c.flc_file = filp; + fl->c.flc_flags = FL_LEASE; fl->fl_lmops = &lease_manager_ops; return 0; } /* Allocate a file_lock initialised to this type of lease */ -static struct file_lock *lease_alloc(struct file *filp, int type) +static struct file_lease *lease_alloc(struct file *filp, int type) { - struct file_lock *fl = locks_alloc_lock(); + struct file_lease *fl = locks_alloc_lease(); int error = -ENOMEM; if (fl == NULL) @@ -576,7 +610,7 @@ static struct file_lock *lease_alloc(struct file *filp, int type) error = lease_init(filp, type, fl); if (error) { - locks_free_lock(fl); + locks_free_lease(fl); return ERR_PTR(error); } return fl; @@ -593,26 +627,26 @@ static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) /* * Check whether two locks have the same owner. */ -static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) +static int posix_same_owner(struct file_lock_core *fl1, struct file_lock_core *fl2) { - return fl1->fl_owner == fl2->fl_owner; + return fl1->flc_owner == fl2->flc_owner; } /* Must be called with the flc_lock held! */ -static void locks_insert_global_locks(struct file_lock *fl) +static void locks_insert_global_locks(struct file_lock_core *flc) { struct file_lock_list_struct *fll = this_cpu_ptr(&file_lock_list); percpu_rwsem_assert_held(&file_rwsem); spin_lock(&fll->lock); - fl->fl_link_cpu = smp_processor_id(); - hlist_add_head(&fl->fl_link, &fll->hlist); + flc->flc_link_cpu = smp_processor_id(); + hlist_add_head(&flc->flc_link, &fll->hlist); spin_unlock(&fll->lock); } /* Must be called with the flc_lock held! */ -static void locks_delete_global_locks(struct file_lock *fl) +static void locks_delete_global_locks(struct file_lock_core *flc) { struct file_lock_list_struct *fll; @@ -623,33 +657,33 @@ static void locks_delete_global_locks(struct file_lock *fl) * is done while holding the flc_lock, and new insertions into the list * also require that it be held. */ - if (hlist_unhashed(&fl->fl_link)) + if (hlist_unhashed(&flc->flc_link)) return; - fll = per_cpu_ptr(&file_lock_list, fl->fl_link_cpu); + fll = per_cpu_ptr(&file_lock_list, flc->flc_link_cpu); spin_lock(&fll->lock); - hlist_del_init(&fl->fl_link); + hlist_del_init(&flc->flc_link); spin_unlock(&fll->lock); } static unsigned long -posix_owner_key(struct file_lock *fl) +posix_owner_key(struct file_lock_core *flc) { - return (unsigned long)fl->fl_owner; + return (unsigned long) flc->flc_owner; } -static void locks_insert_global_blocked(struct file_lock *waiter) +static void locks_insert_global_blocked(struct file_lock_core *waiter) { lockdep_assert_held(&blocked_lock_lock); - hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter)); + hash_add(blocked_hash, &waiter->flc_link, posix_owner_key(waiter)); } -static void locks_delete_global_blocked(struct file_lock *waiter) +static void locks_delete_global_blocked(struct file_lock_core *waiter) { lockdep_assert_held(&blocked_lock_lock); - hash_del(&waiter->fl_link); + hash_del(&waiter->flc_link); } /* Remove waiter from blocker's block list. @@ -657,41 +691,39 @@ static void locks_delete_global_blocked(struct file_lock *waiter) * * Must be called with blocked_lock_lock held. */ -static void __locks_delete_block(struct file_lock *waiter) +static void __locks_unlink_block(struct file_lock_core *waiter) { locks_delete_global_blocked(waiter); - list_del_init(&waiter->fl_blocked_member); + list_del_init(&waiter->flc_blocked_member); } -static void __locks_wake_up_blocks(struct file_lock *blocker) +static void __locks_wake_up_blocks(struct file_lock_core *blocker) { - while (!list_empty(&blocker->fl_blocked_requests)) { - struct file_lock *waiter; + while (!list_empty(&blocker->flc_blocked_requests)) { + struct file_lock_core *waiter; + struct file_lock *fl; + + waiter = list_first_entry(&blocker->flc_blocked_requests, + struct file_lock_core, flc_blocked_member); - waiter = list_first_entry(&blocker->fl_blocked_requests, - struct file_lock, fl_blocked_member); - __locks_delete_block(waiter); - if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) - waiter->fl_lmops->lm_notify(waiter); + fl = file_lock(waiter); + __locks_unlink_block(waiter); + if ((waiter->flc_flags & (FL_POSIX | FL_FLOCK)) && + fl->fl_lmops && fl->fl_lmops->lm_notify) + fl->fl_lmops->lm_notify(fl); else - wake_up(&waiter->fl_wait); + locks_wake_up(fl); /* - * The setting of fl_blocker to NULL marks the "done" + * The setting of flc_blocker to NULL marks the "done" * point in deleting a block. Paired with acquire at the top * of locks_delete_block(). */ - smp_store_release(&waiter->fl_blocker, NULL); + smp_store_release(&waiter->flc_blocker, NULL); } } -/** - * locks_delete_block - stop waiting for a file lock - * @waiter: the lock which was waiting - * - * lockd/nfsd need to disconnect the lock while working on it. - */ -int locks_delete_block(struct file_lock *waiter) +static int __locks_delete_block(struct file_lock_core *waiter) { int status = -ENOENT; @@ -716,24 +748,35 @@ int locks_delete_block(struct file_lock *waiter) * no new locks can be inserted into its fl_blocked_requests list, and * can avoid doing anything further if the list is empty. */ - if (!smp_load_acquire(&waiter->fl_blocker) && - list_empty(&waiter->fl_blocked_requests)) + if (!smp_load_acquire(&waiter->flc_blocker) && + list_empty(&waiter->flc_blocked_requests)) return status; spin_lock(&blocked_lock_lock); - if (waiter->fl_blocker) + if (waiter->flc_blocker) status = 0; __locks_wake_up_blocks(waiter); - __locks_delete_block(waiter); + __locks_unlink_block(waiter); /* * The setting of fl_blocker to NULL marks the "done" point in deleting * a block. Paired with acquire at the top of this function. */ - smp_store_release(&waiter->fl_blocker, NULL); + smp_store_release(&waiter->flc_blocker, NULL); spin_unlock(&blocked_lock_lock); return status; } + +/** + * locks_delete_block - stop waiting for a file lock + * @waiter: the lock which was waiting + * + * lockd/nfsd need to disconnect the lock while working on it. + */ +int locks_delete_block(struct file_lock *waiter) +{ + return __locks_delete_block(&waiter->c); +} EXPORT_SYMBOL(locks_delete_block); /* Insert waiter into blocker's block list. @@ -751,26 +794,28 @@ EXPORT_SYMBOL(locks_delete_block); * waiters, and add beneath any waiter that blocks the new waiter. * Thus wakeups don't happen until needed. */ -static void __locks_insert_block(struct file_lock *blocker, - struct file_lock *waiter, - bool conflict(struct file_lock *, - struct file_lock *)) +static void __locks_insert_block(struct file_lock_core *blocker, + struct file_lock_core *waiter, + bool conflict(struct file_lock_core *, + struct file_lock_core *)) { - struct file_lock *fl; - BUG_ON(!list_empty(&waiter->fl_blocked_member)); + struct file_lock_core *flc; + BUG_ON(!list_empty(&waiter->flc_blocked_member)); new_blocker: - list_for_each_entry(fl, &blocker->fl_blocked_requests, fl_blocked_member) - if (conflict(fl, waiter)) { - blocker = fl; + list_for_each_entry(flc, &blocker->flc_blocked_requests, flc_blocked_member) + if (conflict(flc, waiter)) { + blocker = flc; goto new_blocker; } - waiter->fl_blocker = blocker; - list_add_tail(&waiter->fl_blocked_member, &blocker->fl_blocked_requests); - if (IS_POSIX(blocker) && !IS_OFDLCK(blocker)) + waiter->flc_blocker = blocker; + list_add_tail(&waiter->flc_blocked_member, + &blocker->flc_blocked_requests); + + if ((blocker->flc_flags & (FL_POSIX|FL_OFDLCK)) == (FL_POSIX|FL_OFDLCK)) locks_insert_global_blocked(waiter); - /* The requests in waiter->fl_blocked are known to conflict with + /* The requests in waiter->flc_blocked are known to conflict with * waiter, but might not conflict with blocker, or the requests * and lock which block it. So they all need to be woken. */ @@ -778,10 +823,10 @@ new_blocker: } /* Must be called with flc_lock held. */ -static void locks_insert_block(struct file_lock *blocker, - struct file_lock *waiter, - bool conflict(struct file_lock *, - struct file_lock *)) +static void locks_insert_block(struct file_lock_core *blocker, + struct file_lock_core *waiter, + bool conflict(struct file_lock_core *, + struct file_lock_core *)) { spin_lock(&blocked_lock_lock); __locks_insert_block(blocker, waiter, conflict); @@ -793,7 +838,7 @@ static void locks_insert_block(struct file_lock *blocker, * * Must be called with the inode->flc_lock held! */ -static void locks_wake_up_blocks(struct file_lock *blocker) +static void locks_wake_up_blocks(struct file_lock_core *blocker) { /* * Avoid taking global lock if list is empty. This is safe since new @@ -802,7 +847,7 @@ static void locks_wake_up_blocks(struct file_lock *blocker) * fl_blocked_requests list does not require the flc_lock, so we must * recheck list_empty() after acquiring the blocked_lock_lock. */ - if (list_empty(&blocker->fl_blocked_requests)) + if (list_empty(&blocker->flc_blocked_requests)) return; spin_lock(&blocked_lock_lock); @@ -811,39 +856,39 @@ static void locks_wake_up_blocks(struct file_lock *blocker) } static void -locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before) +locks_insert_lock_ctx(struct file_lock_core *fl, struct list_head *before) { - list_add_tail(&fl->fl_list, before); + list_add_tail(&fl->flc_list, before); locks_insert_global_locks(fl); } static void -locks_unlink_lock_ctx(struct file_lock *fl) +locks_unlink_lock_ctx(struct file_lock_core *fl) { locks_delete_global_locks(fl); - list_del_init(&fl->fl_list); + list_del_init(&fl->flc_list); locks_wake_up_blocks(fl); } static void -locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose) +locks_delete_lock_ctx(struct file_lock_core *fl, struct list_head *dispose) { locks_unlink_lock_ctx(fl); if (dispose) - list_add(&fl->fl_list, dispose); + list_add(&fl->flc_list, dispose); else - locks_free_lock(fl); + locks_free_lock(file_lock(fl)); } /* Determine if lock sys_fl blocks lock caller_fl. Common functionality * checks for shared/exclusive status of overlapping locks. */ -static bool locks_conflict(struct file_lock *caller_fl, - struct file_lock *sys_fl) +static bool locks_conflict(struct file_lock_core *caller_flc, + struct file_lock_core *sys_flc) { - if (sys_fl->fl_type == F_WRLCK) + if (sys_flc->flc_type == F_WRLCK) return true; - if (caller_fl->fl_type == F_WRLCK) + if (caller_flc->flc_type == F_WRLCK) return true; return false; } @@ -851,20 +896,23 @@ static bool locks_conflict(struct file_lock *caller_fl, /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific * checking before calling the locks_conflict(). */ -static bool posix_locks_conflict(struct file_lock *caller_fl, - struct file_lock *sys_fl) +static bool posix_locks_conflict(struct file_lock_core *caller_flc, + struct file_lock_core *sys_flc) { + struct file_lock *caller_fl = file_lock(caller_flc); + struct file_lock *sys_fl = file_lock(sys_flc); + /* POSIX locks owned by the same process do not conflict with * each other. */ - if (posix_same_owner(caller_fl, sys_fl)) + if (posix_same_owner(caller_flc, sys_flc)) return false; /* Check whether they overlap */ if (!locks_overlap(caller_fl, sys_fl)) return false; - return locks_conflict(caller_fl, sys_fl); + return locks_conflict(caller_flc, sys_flc); } /* Determine if lock sys_fl blocks lock caller_fl. Used on xx_GETLK @@ -873,28 +921,31 @@ static bool posix_locks_conflict(struct file_lock *caller_fl, static bool posix_test_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) { + struct file_lock_core *caller = &caller_fl->c; + struct file_lock_core *sys = &sys_fl->c; + /* F_UNLCK checks any locks on the same fd. */ - if (caller_fl->fl_type == F_UNLCK) { - if (!posix_same_owner(caller_fl, sys_fl)) + if (lock_is_unlock(caller_fl)) { + if (!posix_same_owner(caller, sys)) return false; return locks_overlap(caller_fl, sys_fl); } - return posix_locks_conflict(caller_fl, sys_fl); + return posix_locks_conflict(caller, sys); } /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific * checking before calling the locks_conflict(). */ -static bool flock_locks_conflict(struct file_lock *caller_fl, - struct file_lock *sys_fl) +static bool flock_locks_conflict(struct file_lock_core *caller_flc, + struct file_lock_core *sys_flc) { /* FLOCK locks referring to the same filp do not conflict with * each other. */ - if (caller_fl->fl_file == sys_fl->fl_file) + if (caller_flc->flc_file == sys_flc->flc_file) return false; - return locks_conflict(caller_fl, sys_fl); + return locks_conflict(caller_flc, sys_flc); } void @@ -908,13 +959,13 @@ posix_test_lock(struct file *filp, struct file_lock *fl) ctx = locks_inode_context(inode); if (!ctx || list_empty_careful(&ctx->flc_posix)) { - fl->fl_type = F_UNLCK; + fl->c.flc_type = F_UNLCK; return; } retry: spin_lock(&ctx->flc_lock); - list_for_each_entry(cfl, &ctx->flc_posix, fl_list) { + list_for_each_entry(cfl, &ctx->flc_posix, c.flc_list) { if (!posix_test_locks_conflict(fl, cfl)) continue; if (cfl->fl_lmops && cfl->fl_lmops->lm_lock_expirable @@ -930,7 +981,7 @@ retry: locks_copy_conflock(fl, cfl); goto out; } - fl->fl_type = F_UNLCK; + fl->c.flc_type = F_UNLCK; out: spin_unlock(&ctx->flc_lock); return; @@ -972,25 +1023,27 @@ EXPORT_SYMBOL(posix_test_lock); #define MAX_DEADLK_ITERATIONS 10 -/* Find a lock that the owner of the given block_fl is blocking on. */ -static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) +/* Find a lock that the owner of the given @blocker is blocking on. */ +static struct file_lock_core *what_owner_is_waiting_for(struct file_lock_core *blocker) { - struct file_lock *fl; + struct file_lock_core *flc; - hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) { - if (posix_same_owner(fl, block_fl)) { - while (fl->fl_blocker) - fl = fl->fl_blocker; - return fl; + hash_for_each_possible(blocked_hash, flc, flc_link, posix_owner_key(blocker)) { + if (posix_same_owner(flc, blocker)) { + while (flc->flc_blocker) + flc = flc->flc_blocker; + return flc; } } return NULL; } /* Must be called with the blocked_lock_lock held! */ -static int posix_locks_deadlock(struct file_lock *caller_fl, - struct file_lock *block_fl) +static bool posix_locks_deadlock(struct file_lock *caller_fl, + struct file_lock *block_fl) { + struct file_lock_core *caller = &caller_fl->c; + struct file_lock_core *blocker = &block_fl->c; int i = 0; lockdep_assert_held(&blocked_lock_lock); @@ -999,16 +1052,16 @@ static int posix_locks_deadlock(struct file_lock *caller_fl, * This deadlock detector can't reasonably detect deadlocks with * FL_OFDLCK locks, since they aren't owned by a process, per-se. */ - if (IS_OFDLCK(caller_fl)) - return 0; + if (caller->flc_flags & FL_OFDLCK) + return false; - while ((block_fl = what_owner_is_waiting_for(block_fl))) { + while ((blocker = what_owner_is_waiting_for(blocker))) { if (i++ > MAX_DEADLK_ITERATIONS) - return 0; - if (posix_same_owner(caller_fl, block_fl)) - return 1; + return false; + if (posix_same_owner(caller, blocker)) + return true; } - return 0; + return false; } /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks @@ -1027,14 +1080,14 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) bool found = false; LIST_HEAD(dispose); - ctx = locks_get_lock_context(inode, request->fl_type); + ctx = locks_get_lock_context(inode, request->c.flc_type); if (!ctx) { - if (request->fl_type != F_UNLCK) + if (request->c.flc_type != F_UNLCK) return -ENOMEM; - return (request->fl_flags & FL_EXISTS) ? -ENOENT : 0; + return (request->c.flc_flags & FL_EXISTS) ? -ENOENT : 0; } - if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) { + if (!(request->c.flc_flags & FL_ACCESS) && (request->c.flc_type != F_UNLCK)) { new_fl = locks_alloc_lock(); if (!new_fl) return -ENOMEM; @@ -1042,41 +1095,41 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); - if (request->fl_flags & FL_ACCESS) + if (request->c.flc_flags & FL_ACCESS) goto find_conflict; - list_for_each_entry(fl, &ctx->flc_flock, fl_list) { - if (request->fl_file != fl->fl_file) + list_for_each_entry(fl, &ctx->flc_flock, c.flc_list) { + if (request->c.flc_file != fl->c.flc_file) continue; - if (request->fl_type == fl->fl_type) + if (request->c.flc_type == fl->c.flc_type) goto out; found = true; - locks_delete_lock_ctx(fl, &dispose); + locks_delete_lock_ctx(&fl->c, &dispose); break; } - if (request->fl_type == F_UNLCK) { - if ((request->fl_flags & FL_EXISTS) && !found) + if (lock_is_unlock(request)) { + if ((request->c.flc_flags & FL_EXISTS) && !found) error = -ENOENT; goto out; } find_conflict: - list_for_each_entry(fl, &ctx->flc_flock, fl_list) { - if (!flock_locks_conflict(request, fl)) + list_for_each_entry(fl, &ctx->flc_flock, c.flc_list) { + if (!flock_locks_conflict(&request->c, &fl->c)) continue; error = -EAGAIN; - if (!(request->fl_flags & FL_SLEEP)) + if (!(request->c.flc_flags & FL_SLEEP)) goto out; error = FILE_LOCK_DEFERRED; - locks_insert_block(fl, request, flock_locks_conflict); + locks_insert_block(&fl->c, &request->c, flock_locks_conflict); goto out; } - if (request->fl_flags & FL_ACCESS) + if (request->c.flc_flags & FL_ACCESS) goto out; locks_copy_lock(new_fl, request); locks_move_blocks(new_fl, request); - locks_insert_lock_ctx(new_fl, &ctx->flc_flock); + locks_insert_lock_ctx(&new_fl->c, &ctx->flc_flock); new_fl = NULL; error = 0; @@ -1105,9 +1158,9 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, void *owner; void (*func)(void); - ctx = locks_get_lock_context(inode, request->fl_type); + ctx = locks_get_lock_context(inode, request->c.flc_type); if (!ctx) - return (request->fl_type == F_UNLCK) ? 0 : -ENOMEM; + return lock_is_unlock(request) ? 0 : -ENOMEM; /* * We may need two file_lock structures for this operation, @@ -1115,8 +1168,8 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, * * In some cases we can be sure, that no new locks will be needed */ - if (!(request->fl_flags & FL_ACCESS) && - (request->fl_type != F_UNLCK || + if (!(request->c.flc_flags & FL_ACCESS) && + (request->c.flc_type != F_UNLCK || request->fl_start != 0 || request->fl_end != OFFSET_MAX)) { new_fl = locks_alloc_lock(); new_fl2 = locks_alloc_lock(); @@ -1130,9 +1183,9 @@ retry: * there are any, either return error or put the request on the * blocker's list of waiters and the global blocked_hash. */ - if (request->fl_type != F_UNLCK) { - list_for_each_entry(fl, &ctx->flc_posix, fl_list) { - if (!posix_locks_conflict(request, fl)) + if (request->c.flc_type != F_UNLCK) { + list_for_each_entry(fl, &ctx->flc_posix, c.flc_list) { + if (!posix_locks_conflict(&request->c, &fl->c)) continue; if (fl->fl_lmops && fl->fl_lmops->lm_lock_expirable && (*fl->fl_lmops->lm_lock_expirable)(fl)) { @@ -1148,7 +1201,7 @@ retry: if (conflock) locks_copy_conflock(conflock, fl); error = -EAGAIN; - if (!(request->fl_flags & FL_SLEEP)) + if (!(request->c.flc_flags & FL_SLEEP)) goto out; /* * Deadlock detection and insertion into the blocked @@ -1160,10 +1213,10 @@ retry: * Ensure that we don't find any locks blocked on this * request during deadlock detection. */ - __locks_wake_up_blocks(request); + __locks_wake_up_blocks(&request->c); if (likely(!posix_locks_deadlock(request, fl))) { error = FILE_LOCK_DEFERRED; - __locks_insert_block(fl, request, + __locks_insert_block(&fl->c, &request->c, posix_locks_conflict); } spin_unlock(&blocked_lock_lock); @@ -1173,22 +1226,22 @@ retry: /* If we're just looking for a conflict, we're done. */ error = 0; - if (request->fl_flags & FL_ACCESS) + if (request->c.flc_flags & FL_ACCESS) goto out; /* Find the first old lock with the same owner as the new lock */ - list_for_each_entry(fl, &ctx->flc_posix, fl_list) { - if (posix_same_owner(request, fl)) + list_for_each_entry(fl, &ctx->flc_posix, c.flc_list) { + if (posix_same_owner(&request->c, &fl->c)) break; } /* Process locks with this owner. */ - list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) { - if (!posix_same_owner(request, fl)) + list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, c.flc_list) { + if (!posix_same_owner(&request->c, &fl->c)) break; /* Detect adjacent or overlapping regions (if same lock type) */ - if (request->fl_type == fl->fl_type) { + if (request->c.flc_type == fl->c.flc_type) { /* In all comparisons of start vs end, use * "start - 1" rather than "end + 1". If end * is OFFSET_MAX, end + 1 will become negative. @@ -1215,7 +1268,7 @@ retry: else request->fl_end = fl->fl_end; if (added) { - locks_delete_lock_ctx(fl, &dispose); + locks_delete_lock_ctx(&fl->c, &dispose); continue; } request = fl; @@ -1228,7 +1281,7 @@ retry: continue; if (fl->fl_start > request->fl_end) break; - if (request->fl_type == F_UNLCK) + if (lock_is_unlock(request)) added = true; if (fl->fl_start < request->fl_start) left = fl; @@ -1244,7 +1297,7 @@ retry: * one (This may happen several times). */ if (added) { - locks_delete_lock_ctx(fl, &dispose); + locks_delete_lock_ctx(&fl->c, &dispose); continue; } /* @@ -1261,8 +1314,9 @@ retry: locks_move_blocks(new_fl, request); request = new_fl; new_fl = NULL; - locks_insert_lock_ctx(request, &fl->fl_list); - locks_delete_lock_ctx(fl, &dispose); + locks_insert_lock_ctx(&request->c, + &fl->c.flc_list); + locks_delete_lock_ctx(&fl->c, &dispose); added = true; } } @@ -1279,8 +1333,8 @@ retry: error = 0; if (!added) { - if (request->fl_type == F_UNLCK) { - if (request->fl_flags & FL_EXISTS) + if (lock_is_unlock(request)) { + if (request->c.flc_flags & FL_EXISTS) error = -ENOENT; goto out; } @@ -1291,7 +1345,7 @@ retry: } locks_copy_lock(new_fl, request); locks_move_blocks(new_fl, request); - locks_insert_lock_ctx(new_fl, &fl->fl_list); + locks_insert_lock_ctx(&new_fl->c, &fl->c.flc_list); fl = new_fl; new_fl = NULL; } @@ -1303,14 +1357,14 @@ retry: left = new_fl2; new_fl2 = NULL; locks_copy_lock(left, right); - locks_insert_lock_ctx(left, &fl->fl_list); + locks_insert_lock_ctx(&left->c, &fl->c.flc_list); } right->fl_start = request->fl_end + 1; - locks_wake_up_blocks(right); + locks_wake_up_blocks(&right->c); } if (left) { left->fl_end = request->fl_start - 1; - locks_wake_up_blocks(left); + locks_wake_up_blocks(&left->c); } out: spin_unlock(&ctx->flc_lock); @@ -1364,8 +1418,8 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl) error = posix_lock_inode(inode, fl, NULL); if (error != FILE_LOCK_DEFERRED) break; - error = wait_event_interruptible(fl->fl_wait, - list_empty(&fl->fl_blocked_member)); + error = wait_event_interruptible(fl->c.flc_wait, + list_empty(&fl->c.flc_blocked_member)); if (error) break; } @@ -1373,37 +1427,37 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl) return error; } -static void lease_clear_pending(struct file_lock *fl, int arg) +static void lease_clear_pending(struct file_lease *fl, int arg) { switch (arg) { case F_UNLCK: - fl->fl_flags &= ~FL_UNLOCK_PENDING; + fl->c.flc_flags &= ~FL_UNLOCK_PENDING; fallthrough; case F_RDLCK: - fl->fl_flags &= ~FL_DOWNGRADE_PENDING; + fl->c.flc_flags &= ~FL_DOWNGRADE_PENDING; } } /* We already had a lease on this file; just change its type */ -int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) +int lease_modify(struct file_lease *fl, int arg, struct list_head *dispose) { - int error = assign_type(fl, arg); + int error = assign_type(&fl->c, arg); if (error) return error; lease_clear_pending(fl, arg); - locks_wake_up_blocks(fl); + locks_wake_up_blocks(&fl->c); if (arg == F_UNLCK) { - struct file *filp = fl->fl_file; + struct file *filp = fl->c.flc_file; f_delown(filp); filp->f_owner.signum = 0; - fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); + fasync_helper(0, fl->c.flc_file, 0, &fl->fl_fasync); if (fl->fl_fasync != NULL) { printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); fl->fl_fasync = NULL; } - locks_delete_lock_ctx(fl, dispose); + locks_delete_lock_ctx(&fl->c, dispose); } return 0; } @@ -1420,11 +1474,11 @@ static bool past_time(unsigned long then) static void time_out_leases(struct inode *inode, struct list_head *dispose) { struct file_lock_context *ctx = inode->i_flctx; - struct file_lock *fl, *tmp; + struct file_lease *fl, *tmp; lockdep_assert_held(&ctx->flc_lock); - list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { + list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) { trace_time_out_leases(inode, fl); if (past_time(fl->fl_downgrade_time)) lease_modify(fl, F_RDLCK, dispose); @@ -1433,38 +1487,40 @@ static void time_out_leases(struct inode *inode, struct list_head *dispose) } } -static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker) +static bool leases_conflict(struct file_lock_core *lc, struct file_lock_core *bc) { bool rc; + struct file_lease *lease = file_lease(lc); + struct file_lease *breaker = file_lease(bc); if (lease->fl_lmops->lm_breaker_owns_lease && lease->fl_lmops->lm_breaker_owns_lease(lease)) return false; - if ((breaker->fl_flags & FL_LAYOUT) != (lease->fl_flags & FL_LAYOUT)) { + if ((bc->flc_flags & FL_LAYOUT) != (lc->flc_flags & FL_LAYOUT)) { rc = false; goto trace; } - if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) { + if ((bc->flc_flags & FL_DELEG) && (lc->flc_flags & FL_LEASE)) { rc = false; goto trace; } - rc = locks_conflict(breaker, lease); + rc = locks_conflict(bc, lc); trace: trace_leases_conflict(rc, lease, breaker); return rc; } static bool -any_leases_conflict(struct inode *inode, struct file_lock *breaker) +any_leases_conflict(struct inode *inode, struct file_lease *breaker) { struct file_lock_context *ctx = inode->i_flctx; - struct file_lock *fl; + struct file_lock_core *flc; lockdep_assert_held(&ctx->flc_lock); - list_for_each_entry(fl, &ctx->flc_lease, fl_list) { - if (leases_conflict(fl, breaker)) + list_for_each_entry(flc, &ctx->flc_lease, flc_list) { + if (leases_conflict(flc, &breaker->c)) return true; } return false; @@ -1487,7 +1543,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) { int error = 0; struct file_lock_context *ctx; - struct file_lock *new_fl, *fl, *tmp; + struct file_lease *new_fl, *fl, *tmp; unsigned long break_time; int want_write = (mode & O_ACCMODE) != O_RDONLY; LIST_HEAD(dispose); @@ -1495,7 +1551,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK); if (IS_ERR(new_fl)) return PTR_ERR(new_fl); - new_fl->fl_flags = type; + new_fl->c.flc_flags = type; /* typically we will check that ctx is non-NULL before calling */ ctx = locks_inode_context(inode); @@ -1519,22 +1575,22 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) break_time++; /* so that 0 means no break time */ } - list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) { - if (!leases_conflict(fl, new_fl)) + list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) { + if (!leases_conflict(&fl->c, &new_fl->c)) continue; if (want_write) { - if (fl->fl_flags & FL_UNLOCK_PENDING) + if (fl->c.flc_flags & FL_UNLOCK_PENDING) continue; - fl->fl_flags |= FL_UNLOCK_PENDING; + fl->c.flc_flags |= FL_UNLOCK_PENDING; fl->fl_break_time = break_time; } else { if (lease_breaking(fl)) continue; - fl->fl_flags |= FL_DOWNGRADE_PENDING; + fl->c.flc_flags |= FL_DOWNGRADE_PENDING; fl->fl_downgrade_time = break_time; } if (fl->fl_lmops->lm_break(fl)) - locks_delete_lock_ctx(fl, &dispose); + locks_delete_lock_ctx(&fl->c, &dispose); } if (list_empty(&ctx->flc_lease)) @@ -1547,26 +1603,26 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) } restart: - fl = list_first_entry(&ctx->flc_lease, struct file_lock, fl_list); + fl = list_first_entry(&ctx->flc_lease, struct file_lease, c.flc_list); break_time = fl->fl_break_time; if (break_time != 0) break_time -= jiffies; if (break_time == 0) break_time++; - locks_insert_block(fl, new_fl, leases_conflict); + locks_insert_block(&fl->c, &new_fl->c, leases_conflict); trace_break_lease_block(inode, new_fl); spin_unlock(&ctx->flc_lock); percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); - error = wait_event_interruptible_timeout(new_fl->fl_wait, - list_empty(&new_fl->fl_blocked_member), - break_time); + error = wait_event_interruptible_timeout(new_fl->c.flc_wait, + list_empty(&new_fl->c.flc_blocked_member), + break_time); percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); trace_break_lease_unblock(inode, new_fl); - locks_delete_block(new_fl); + __locks_delete_block(&new_fl->c); if (error >= 0) { /* * Wait for the next conflicting lease that has not been @@ -1583,7 +1639,7 @@ out: percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); free_lock: - locks_free_lock(new_fl); + locks_free_lease(new_fl); return error; } EXPORT_SYMBOL(__break_lease); @@ -1601,14 +1657,14 @@ void lease_get_mtime(struct inode *inode, struct timespec64 *time) { bool has_lease = false; struct file_lock_context *ctx; - struct file_lock *fl; + struct file_lock_core *flc; ctx = locks_inode_context(inode); if (ctx && !list_empty_careful(&ctx->flc_lease)) { spin_lock(&ctx->flc_lock); - fl = list_first_entry_or_null(&ctx->flc_lease, - struct file_lock, fl_list); - if (fl && (fl->fl_type == F_WRLCK)) + flc = list_first_entry_or_null(&ctx->flc_lease, + struct file_lock_core, flc_list); + if (flc && flc->flc_type == F_WRLCK) has_lease = true; spin_unlock(&ctx->flc_lock); } @@ -1643,7 +1699,7 @@ EXPORT_SYMBOL(lease_get_mtime); */ int fcntl_getlease(struct file *filp) { - struct file_lock *fl; + struct file_lease *fl; struct inode *inode = file_inode(filp); struct file_lock_context *ctx; int type = F_UNLCK; @@ -1654,8 +1710,8 @@ int fcntl_getlease(struct file *filp) percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); - list_for_each_entry(fl, &ctx->flc_lease, fl_list) { - if (fl->fl_file != filp) + list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) { + if (fl->c.flc_file != filp) continue; type = target_leasetype(fl); break; @@ -1715,12 +1771,12 @@ check_conflicting_open(struct file *filp, const int arg, int flags) } static int -generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **priv) +generic_add_lease(struct file *filp, int arg, struct file_lease **flp, void **priv) { - struct file_lock *fl, *my_fl = NULL, *lease; + struct file_lease *fl, *my_fl = NULL, *lease; struct inode *inode = file_inode(filp); struct file_lock_context *ctx; - bool is_deleg = (*flp)->fl_flags & FL_DELEG; + bool is_deleg = (*flp)->c.flc_flags & FL_DELEG; int error; LIST_HEAD(dispose); @@ -1746,7 +1802,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **pri percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); - error = check_conflicting_open(filp, arg, lease->fl_flags); + error = check_conflicting_open(filp, arg, lease->c.flc_flags); if (error) goto out; @@ -1759,9 +1815,9 @@ generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **pri * except for this filp. */ error = -EAGAIN; - list_for_each_entry(fl, &ctx->flc_lease, fl_list) { - if (fl->fl_file == filp && - fl->fl_owner == lease->fl_owner) { + list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) { + if (fl->c.flc_file == filp && + fl->c.flc_owner == lease->c.flc_owner) { my_fl = fl; continue; } @@ -1776,7 +1832,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **pri * Modifying our existing lease is OK, but no getting a * new lease if someone else is opening for write: */ - if (fl->fl_flags & FL_UNLOCK_PENDING) + if (fl->c.flc_flags & FL_UNLOCK_PENDING) goto out; } @@ -1792,7 +1848,7 @@ generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **pri if (!leases_enable) goto out; - locks_insert_lock_ctx(lease, &ctx->flc_lease); + locks_insert_lock_ctx(&lease->c, &ctx->flc_lease); /* * The check in break_lease() is lockless. It's possible for another * open to race in after we did the earlier check for a conflicting @@ -1803,9 +1859,9 @@ generic_add_lease(struct file *filp, int arg, struct file_lock **flp, void **pri * precedes these checks. */ smp_mb(); - error = check_conflicting_open(filp, arg, lease->fl_flags); + error = check_conflicting_open(filp, arg, lease->c.flc_flags); if (error) { - locks_unlink_lock_ctx(lease); + locks_unlink_lock_ctx(&lease->c); goto out; } @@ -1826,7 +1882,7 @@ out: static int generic_delete_lease(struct file *filp, void *owner) { int error = -EAGAIN; - struct file_lock *fl, *victim = NULL; + struct file_lease *fl, *victim = NULL; struct inode *inode = file_inode(filp); struct file_lock_context *ctx; LIST_HEAD(dispose); @@ -1839,9 +1895,9 @@ static int generic_delete_lease(struct file *filp, void *owner) percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); - list_for_each_entry(fl, &ctx->flc_lease, fl_list) { - if (fl->fl_file == filp && - fl->fl_owner == owner) { + list_for_each_entry(fl, &ctx->flc_lease, c.flc_list) { + if (fl->c.flc_file == filp && + fl->c.flc_owner == owner) { victim = fl; break; } @@ -1866,7 +1922,7 @@ static int generic_delete_lease(struct file *filp, void *owner) * The (input) flp->fl_lmops->lm_break function is required * by break_lease(). */ -int generic_setlease(struct file *filp, int arg, struct file_lock **flp, +int generic_setlease(struct file *filp, int arg, struct file_lease **flp, void **priv) { struct inode *inode = file_inode(filp); @@ -1913,7 +1969,7 @@ lease_notifier_chain_init(void) } static inline void -setlease_notifier(int arg, struct file_lock *lease) +setlease_notifier(int arg, struct file_lease *lease) { if (arg != F_UNLCK) srcu_notifier_call_chain(&lease_notifier_chain, arg, lease); @@ -1949,7 +2005,7 @@ EXPORT_SYMBOL_GPL(lease_unregister_notifier); * may be NULL if the lm_setup operation doesn't require it. */ int -vfs_setlease(struct file *filp, int arg, struct file_lock **lease, void **priv) +vfs_setlease(struct file *filp, int arg, struct file_lease **lease, void **priv) { if (lease) setlease_notifier(arg, *lease); @@ -1962,7 +2018,7 @@ EXPORT_SYMBOL_GPL(vfs_setlease); static int do_fcntl_add_lease(unsigned int fd, struct file *filp, int arg) { - struct file_lock *fl; + struct file_lease *fl; struct fasync_struct *new; int error; @@ -1972,14 +2028,14 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, int arg) new = fasync_alloc(); if (!new) { - locks_free_lock(fl); + locks_free_lease(fl); return -ENOMEM; } new->fa_fd = fd; error = vfs_setlease(filp, arg, &fl, (void **)&new); if (fl) - locks_free_lock(fl); + locks_free_lease(fl); if (new) fasync_free(new); return error; @@ -2017,8 +2073,8 @@ static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl) error = flock_lock_inode(inode, fl); if (error != FILE_LOCK_DEFERRED) break; - error = wait_event_interruptible(fl->fl_wait, - list_empty(&fl->fl_blocked_member)); + error = wait_event_interruptible(fl->c.flc_wait, + list_empty(&fl->c.flc_blocked_member)); if (error) break; } @@ -2036,7 +2092,7 @@ static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl) int locks_lock_inode_wait(struct inode *inode, struct file_lock *fl) { int res = 0; - switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { + switch (fl->c.flc_flags & (FL_POSIX|FL_FLOCK)) { case FL_POSIX: res = posix_lock_inode_wait(inode, fl); break; @@ -2098,13 +2154,13 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) flock_make_lock(f.file, &fl, type); - error = security_file_lock(f.file, fl.fl_type); + error = security_file_lock(f.file, fl.c.flc_type); if (error) goto out_putf; can_sleep = !(cmd & LOCK_NB); if (can_sleep) - fl.fl_flags |= FL_SLEEP; + fl.c.flc_flags |= FL_SLEEP; if (f.file->f_op->flock) error = f.file->f_op->flock(f.file, @@ -2130,7 +2186,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) */ int vfs_test_lock(struct file *filp, struct file_lock *fl) { - WARN_ON_ONCE(filp != fl->fl_file); + WARN_ON_ONCE(filp != fl->c.flc_file); if (filp->f_op->lock) return filp->f_op->lock(filp, F_GETLK, fl); posix_test_lock(filp, fl); @@ -2145,25 +2201,28 @@ EXPORT_SYMBOL_GPL(vfs_test_lock); * * Used to translate a fl_pid into a namespace virtual pid number */ -static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns) +static pid_t locks_translate_pid(struct file_lock_core *fl, struct pid_namespace *ns) { pid_t vnr; struct pid *pid; - if (IS_OFDLCK(fl)) + if (fl->flc_flags & FL_OFDLCK) return -1; - if (IS_REMOTELCK(fl)) - return fl->fl_pid; + + /* Remote locks report a negative pid value */ + if (fl->flc_pid <= 0) + return fl->flc_pid; + /* * If the flock owner process is dead and its pid has been already * freed, the translation below won't work, but we still want to show * flock owner pid number in init pidns. */ if (ns == &init_pid_ns) - return (pid_t)fl->fl_pid; + return (pid_t) fl->flc_pid; rcu_read_lock(); - pid = find_pid_ns(fl->fl_pid, &init_pid_ns); + pid = find_pid_ns(fl->flc_pid, &init_pid_ns); vnr = pid_nr_ns(pid, ns); rcu_read_unlock(); return vnr; @@ -2171,7 +2230,7 @@ static pid_t locks_translate_pid(struct file_lock *fl, struct pid_namespace *ns) static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) { - flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current)); + flock->l_pid = locks_translate_pid(&fl->c, task_active_pid_ns(current)); #if BITS_PER_LONG == 32 /* * Make sure we can represent the posix lock via @@ -2186,19 +2245,19 @@ static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : fl->fl_end - fl->fl_start + 1; flock->l_whence = 0; - flock->l_type = fl->fl_type; + flock->l_type = fl->c.flc_type; return 0; } #if BITS_PER_LONG == 32 static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) { - flock->l_pid = locks_translate_pid(fl, task_active_pid_ns(current)); + flock->l_pid = locks_translate_pid(&fl->c, task_active_pid_ns(current)); flock->l_start = fl->fl_start; flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : fl->fl_end - fl->fl_start + 1; flock->l_whence = 0; - flock->l_type = fl->fl_type; + flock->l_type = fl->c.flc_type; } #endif @@ -2227,16 +2286,16 @@ int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock *flock) if (flock->l_pid != 0) goto out; - fl->fl_flags |= FL_OFDLCK; - fl->fl_owner = filp; + fl->c.flc_flags |= FL_OFDLCK; + fl->c.flc_owner = filp; } error = vfs_test_lock(filp, fl); if (error) goto out; - flock->l_type = fl->fl_type; - if (fl->fl_type != F_UNLCK) { + flock->l_type = fl->c.flc_type; + if (fl->c.flc_type != F_UNLCK) { error = posix_lock_to_flock(flock, fl); if (error) goto out; @@ -2283,7 +2342,7 @@ out: */ int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) { - WARN_ON_ONCE(filp != fl->fl_file); + WARN_ON_ONCE(filp != fl->c.flc_file); if (filp->f_op->lock) return filp->f_op->lock(filp, cmd, fl); else @@ -2296,7 +2355,7 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd, { int error; - error = security_file_lock(filp, fl->fl_type); + error = security_file_lock(filp, fl->c.flc_type); if (error) return error; @@ -2304,8 +2363,8 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd, error = vfs_lock_file(filp, cmd, fl, NULL); if (error != FILE_LOCK_DEFERRED) break; - error = wait_event_interruptible(fl->fl_wait, - list_empty(&fl->fl_blocked_member)); + error = wait_event_interruptible(fl->c.flc_wait, + list_empty(&fl->c.flc_blocked_member)); if (error) break; } @@ -2318,13 +2377,13 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd, static int check_fmode_for_setlk(struct file_lock *fl) { - switch (fl->fl_type) { + switch (fl->c.flc_type) { case F_RDLCK: - if (!(fl->fl_file->f_mode & FMODE_READ)) + if (!(fl->c.flc_file->f_mode & FMODE_READ)) return -EBADF; break; case F_WRLCK: - if (!(fl->fl_file->f_mode & FMODE_WRITE)) + if (!(fl->c.flc_file->f_mode & FMODE_WRITE)) return -EBADF; } return 0; @@ -2363,8 +2422,8 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, goto out; cmd = F_SETLK; - file_lock->fl_flags |= FL_OFDLCK; - file_lock->fl_owner = filp; + file_lock->c.flc_flags |= FL_OFDLCK; + file_lock->c.flc_owner = filp; break; case F_OFD_SETLKW: error = -EINVAL; @@ -2372,11 +2431,11 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, goto out; cmd = F_SETLKW; - file_lock->fl_flags |= FL_OFDLCK; - file_lock->fl_owner = filp; + file_lock->c.flc_flags |= FL_OFDLCK; + file_lock->c.flc_owner = filp; fallthrough; case F_SETLKW: - file_lock->fl_flags |= FL_SLEEP; + file_lock->c.flc_flags |= FL_SLEEP; } error = do_lock_file_wait(filp, cmd, file_lock); @@ -2386,8 +2445,8 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, * lock that was just acquired. There is no need to do that when we're * unlocking though, or for OFD locks. */ - if (!error && file_lock->fl_type != F_UNLCK && - !(file_lock->fl_flags & FL_OFDLCK)) { + if (!error && file_lock->c.flc_type != F_UNLCK && + !(file_lock->c.flc_flags & FL_OFDLCK)) { struct files_struct *files = current->files; /* * We need that spin_lock here - it prevents reordering between @@ -2398,7 +2457,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, f = files_lookup_fd_locked(files, fd); spin_unlock(&files->file_lock); if (f != filp) { - file_lock->fl_type = F_UNLCK; + file_lock->c.flc_type = F_UNLCK; error = do_lock_file_wait(filp, cmd, file_lock); WARN_ON_ONCE(error); error = -EBADF; @@ -2437,16 +2496,16 @@ int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 *flock) if (flock->l_pid != 0) goto out; - fl->fl_flags |= FL_OFDLCK; - fl->fl_owner = filp; + fl->c.flc_flags |= FL_OFDLCK; + fl->c.flc_owner = filp; } error = vfs_test_lock(filp, fl); if (error) goto out; - flock->l_type = fl->fl_type; - if (fl->fl_type != F_UNLCK) + flock->l_type = fl->c.flc_type; + if (fl->c.flc_type != F_UNLCK) posix_lock_to_flock64(flock, fl); out: @@ -2486,8 +2545,8 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, goto out; cmd = F_SETLK64; - file_lock->fl_flags |= FL_OFDLCK; - file_lock->fl_owner = filp; + file_lock->c.flc_flags |= FL_OFDLCK; + file_lock->c.flc_owner = filp; break; case F_OFD_SETLKW: error = -EINVAL; @@ -2495,11 +2554,11 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, goto out; cmd = F_SETLKW64; - file_lock->fl_flags |= FL_OFDLCK; - file_lock->fl_owner = filp; + file_lock->c.flc_flags |= FL_OFDLCK; + file_lock->c.flc_owner = filp; fallthrough; case F_SETLKW64: - file_lock->fl_flags |= FL_SLEEP; + file_lock->c.flc_flags |= FL_SLEEP; } error = do_lock_file_wait(filp, cmd, file_lock); @@ -2509,8 +2568,8 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, * lock that was just acquired. There is no need to do that when we're * unlocking though, or for OFD locks. */ - if (!error && file_lock->fl_type != F_UNLCK && - !(file_lock->fl_flags & FL_OFDLCK)) { + if (!error && file_lock->c.flc_type != F_UNLCK && + !(file_lock->c.flc_flags & FL_OFDLCK)) { struct files_struct *files = current->files; /* * We need that spin_lock here - it prevents reordering between @@ -2521,7 +2580,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, f = files_lookup_fd_locked(files, fd); spin_unlock(&files->file_lock); if (f != filp) { - file_lock->fl_type = F_UNLCK; + file_lock->c.flc_type = F_UNLCK; error = do_lock_file_wait(filp, cmd, file_lock); WARN_ON_ONCE(error); error = -EBADF; @@ -2555,13 +2614,13 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner) return; locks_init_lock(&lock); - lock.fl_type = F_UNLCK; - lock.fl_flags = FL_POSIX | FL_CLOSE; + lock.c.flc_type = F_UNLCK; + lock.c.flc_flags = FL_POSIX | FL_CLOSE; lock.fl_start = 0; lock.fl_end = OFFSET_MAX; - lock.fl_owner = owner; - lock.fl_pid = current->tgid; - lock.fl_file = filp; + lock.c.flc_owner = owner; + lock.c.flc_pid = current->tgid; + lock.c.flc_file = filp; lock.fl_ops = NULL; lock.fl_lmops = NULL; @@ -2584,7 +2643,7 @@ locks_remove_flock(struct file *filp, struct file_lock_context *flctx) return; flock_make_lock(filp, &fl, F_UNLCK); - fl.fl_flags |= FL_CLOSE; + fl.c.flc_flags |= FL_CLOSE; if (filp->f_op->flock) filp->f_op->flock(filp, F_SETLKW, &fl); @@ -2599,7 +2658,7 @@ locks_remove_flock(struct file *filp, struct file_lock_context *flctx) static void locks_remove_lease(struct file *filp, struct file_lock_context *ctx) { - struct file_lock *fl, *tmp; + struct file_lease *fl, *tmp; LIST_HEAD(dispose); if (list_empty(&ctx->flc_lease)) @@ -2607,8 +2666,8 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx) percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); - list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) - if (filp == fl->fl_file) + list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, c.flc_list) + if (filp == fl->c.flc_file) lease_modify(fl, F_UNLCK, &dispose); spin_unlock(&ctx->flc_lock); percpu_up_read(&file_rwsem); @@ -2652,7 +2711,7 @@ void locks_remove_file(struct file *filp) */ int vfs_cancel_lock(struct file *filp, struct file_lock *fl) { - WARN_ON_ONCE(filp != fl->fl_file); + WARN_ON_ONCE(filp != fl->c.flc_file); if (filp->f_op->lock) return filp->f_op->lock(filp, F_CANCELLK, fl); return 0; @@ -2691,69 +2750,73 @@ struct locks_iterator { loff_t li_pos; }; -static void lock_get_status(struct seq_file *f, struct file_lock *fl, +static void lock_get_status(struct seq_file *f, struct file_lock_core *flc, loff_t id, char *pfx, int repeat) { struct inode *inode = NULL; - unsigned int fl_pid; + unsigned int pid; struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb); - int type; + int type = flc->flc_type; + struct file_lock *fl = file_lock(flc); + + pid = locks_translate_pid(flc, proc_pidns); - fl_pid = locks_translate_pid(fl, proc_pidns); /* * If lock owner is dead (and pid is freed) or not visible in current * pidns, zero is shown as a pid value. Check lock info from * init_pid_ns to get saved lock pid value. */ - - if (fl->fl_file != NULL) - inode = file_inode(fl->fl_file); + if (flc->flc_file != NULL) + inode = file_inode(flc->flc_file); seq_printf(f, "%lld: ", id); if (repeat) seq_printf(f, "%*s", repeat - 1 + (int)strlen(pfx), pfx); - if (IS_POSIX(fl)) { - if (fl->fl_flags & FL_ACCESS) + if (flc->flc_flags & FL_POSIX) { + if (flc->flc_flags & FL_ACCESS) seq_puts(f, "ACCESS"); - else if (IS_OFDLCK(fl)) + else if (flc->flc_flags & FL_OFDLCK) seq_puts(f, "OFDLCK"); else seq_puts(f, "POSIX "); seq_printf(f, " %s ", (inode == NULL) ? "*NOINODE*" : "ADVISORY "); - } else if (IS_FLOCK(fl)) { + } else if (flc->flc_flags & FL_FLOCK) { seq_puts(f, "FLOCK ADVISORY "); - } else if (IS_LEASE(fl)) { - if (fl->fl_flags & FL_DELEG) + } else if (flc->flc_flags & (FL_LEASE|FL_DELEG|FL_LAYOUT)) { + struct file_lease *lease = file_lease(flc); + + type = target_leasetype(lease); + + if (flc->flc_flags & FL_DELEG) seq_puts(f, "DELEG "); else seq_puts(f, "LEASE "); - if (lease_breaking(fl)) + if (lease_breaking(lease)) seq_puts(f, "BREAKING "); - else if (fl->fl_file) + else if (flc->flc_file) seq_puts(f, "ACTIVE "); else seq_puts(f, "BREAKER "); } else { seq_puts(f, "UNKNOWN UNKNOWN "); } - type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type; seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" : (type == F_RDLCK) ? "READ" : "UNLCK"); if (inode) { /* userspace relies on this representation of dev_t */ - seq_printf(f, "%d %02x:%02x:%lu ", fl_pid, + seq_printf(f, "%d %02x:%02x:%lu ", pid, MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino); } else { - seq_printf(f, "%d <none>:0 ", fl_pid); + seq_printf(f, "%d <none>:0 ", pid); } - if (IS_POSIX(fl)) { + if (flc->flc_flags & FL_POSIX) { if (fl->fl_end == OFFSET_MAX) seq_printf(f, "%Ld EOF\n", fl->fl_start); else @@ -2763,17 +2826,18 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl, } } -static struct file_lock *get_next_blocked_member(struct file_lock *node) +static struct file_lock_core *get_next_blocked_member(struct file_lock_core *node) { - struct file_lock *tmp; + struct file_lock_core *tmp; /* NULL node or root node */ - if (node == NULL || node->fl_blocker == NULL) + if (node == NULL || node->flc_blocker == NULL) return NULL; /* Next member in the linked list could be itself */ - tmp = list_next_entry(node, fl_blocked_member); - if (list_entry_is_head(tmp, &node->fl_blocker->fl_blocked_requests, fl_blocked_member) + tmp = list_next_entry(node, flc_blocked_member); + if (list_entry_is_head(tmp, &node->flc_blocker->flc_blocked_requests, + flc_blocked_member) || tmp == node) { return NULL; } @@ -2784,18 +2848,18 @@ static struct file_lock *get_next_blocked_member(struct file_lock *node) static int locks_show(struct seq_file *f, void *v) { struct locks_iterator *iter = f->private; - struct file_lock *cur, *tmp; + struct file_lock_core *cur, *tmp; struct pid_namespace *proc_pidns = proc_pid_ns(file_inode(f->file)->i_sb); int level = 0; - cur = hlist_entry(v, struct file_lock, fl_link); + cur = hlist_entry(v, struct file_lock_core, flc_link); if (locks_translate_pid(cur, proc_pidns) == 0) return 0; - /* View this crossed linked list as a binary tree, the first member of fl_blocked_requests - * is the left child of current node, the next silibing in fl_blocked_member is the - * right child, we can alse get the parent of current node from fl_blocker, so this + /* View this crossed linked list as a binary tree, the first member of flc_blocked_requests + * is the left child of current node, the next silibing in flc_blocked_member is the + * right child, we can alse get the parent of current node from flc_blocker, so this * question becomes traversal of a binary tree */ while (cur != NULL) { @@ -2804,17 +2868,18 @@ static int locks_show(struct seq_file *f, void *v) else lock_get_status(f, cur, iter->li_pos, "", level); - if (!list_empty(&cur->fl_blocked_requests)) { + if (!list_empty(&cur->flc_blocked_requests)) { /* Turn left */ - cur = list_first_entry_or_null(&cur->fl_blocked_requests, - struct file_lock, fl_blocked_member); + cur = list_first_entry_or_null(&cur->flc_blocked_requests, + struct file_lock_core, + flc_blocked_member); level++; } else { /* Turn right */ tmp = get_next_blocked_member(cur); /* Fall back to parent node */ - while (tmp == NULL && cur->fl_blocker != NULL) { - cur = cur->fl_blocker; + while (tmp == NULL && cur->flc_blocker != NULL) { + cur = cur->flc_blocker; level--; tmp = get_next_blocked_member(cur); } @@ -2829,14 +2894,13 @@ static void __show_fd_locks(struct seq_file *f, struct list_head *head, int *id, struct file *filp, struct files_struct *files) { - struct file_lock *fl; + struct file_lock_core *fl; - list_for_each_entry(fl, head, fl_list) { + list_for_each_entry(fl, head, flc_list) { - if (filp != fl->fl_file) + if (filp != fl->flc_file) continue; - if (fl->fl_owner != files && - fl->fl_owner != filp) + if (fl->flc_owner != files && fl->flc_owner != filp) continue; (*id)++; @@ -2915,6 +2979,9 @@ static int __init filelock_init(void) filelock_cache = kmem_cache_create("file_lock_cache", sizeof(struct file_lock), 0, SLAB_PANIC, NULL); + filelease_cache = kmem_cache_create("file_lock_cache", + sizeof(struct file_lease), 0, SLAB_PANIC, NULL); + for_each_possible_cpu(i) { struct file_lock_list_struct *fll = per_cpu_ptr(&file_lock_list, i); |