From 1f1aaf82825865a50cef0b4722607abb12aeee52 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 16 Nov 2010 11:52:57 +0000 Subject: SELinux: return -ECONNREFUSED from ip_postroute to signal fatal error The SELinux netfilter hooks just return NF_DROP if they drop a packet. We want to signal that a drop in this hook is a permanant fatal error and is not transient. If we do this the error will be passed back up the stack in some places and applications will get a faster interaction that something went wrong. Signed-off-by: Eric Paris Signed-off-by: David S. Miller --- security/selinux/hooks.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'security') diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index d9154cf90ae1..2c145f12d991 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -4585,11 +4585,11 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex, secmark_perm = PACKET__SEND; break; default: - return NF_DROP; + return NF_DROP_ERR(-ECONNREFUSED); } if (secmark_perm == PACKET__FORWARD_OUT) { if (selinux_skb_peerlbl_sid(skb, family, &peer_sid)) - return NF_DROP; + return NF_DROP_ERR(-ECONNREFUSED); } else peer_sid = SECINITSID_KERNEL; } else { @@ -4602,28 +4602,28 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex, ad.u.net.netif = ifindex; ad.u.net.family = family; if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL)) - return NF_DROP; + return NF_DROP_ERR(-ECONNREFUSED); if (secmark_active) if (avc_has_perm(peer_sid, skb->secmark, SECCLASS_PACKET, secmark_perm, &ad)) - return NF_DROP; + return NF_DROP_ERR(-ECONNREFUSED); if (peerlbl_active) { u32 if_sid; u32 node_sid; if (sel_netif_sid(ifindex, &if_sid)) - return NF_DROP; + return NF_DROP_ERR(-ECONNREFUSED); if (avc_has_perm(peer_sid, if_sid, SECCLASS_NETIF, NETIF__EGRESS, &ad)) - return NF_DROP; + return NF_DROP_ERR(-ECONNREFUSED); if (sel_netnode_sid(addrp, family, &node_sid)) - return NF_DROP; + return NF_DROP_ERR(-ECONNREFUSED); if (avc_has_perm(peer_sid, node_sid, SECCLASS_NODE, NODE__SENDTO, &ad)) - return NF_DROP; + return NF_DROP_ERR(-ECONNREFUSED); } return NF_ACCEPT; -- cgit v1.2.3-58-ga151 From 04f6d70f6e64900a5d70a5fc199dd9d5fa787738 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 23 Nov 2010 06:28:02 +0000 Subject: SELinux: Only return netlink error when we know the return is fatal Some of the SELinux netlink code returns a fatal error when the error might actually be transient. This patch just silently drops packets on potentially transient errors but continues to return a permanant error indicator when the denial was because of policy. Based-on-comments-by: Paul Moore Signed-off-by: Eric Paris Reviewed-by: Paul Moore Signed-off-by: David S. Miller --- security/selinux/hooks.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'security') diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 2c145f12d991..f590fb8e9143 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -4589,7 +4589,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex, } if (secmark_perm == PACKET__FORWARD_OUT) { if (selinux_skb_peerlbl_sid(skb, family, &peer_sid)) - return NF_DROP_ERR(-ECONNREFUSED); + return NF_DROP; } else peer_sid = SECINITSID_KERNEL; } else { @@ -4602,7 +4602,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex, ad.u.net.netif = ifindex; ad.u.net.family = family; if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL)) - return NF_DROP_ERR(-ECONNREFUSED); + return NF_DROP; if (secmark_active) if (avc_has_perm(peer_sid, skb->secmark, @@ -4614,13 +4614,13 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex, u32 node_sid; if (sel_netif_sid(ifindex, &if_sid)) - return NF_DROP_ERR(-ECONNREFUSED); + return NF_DROP; if (avc_has_perm(peer_sid, if_sid, SECCLASS_NETIF, NETIF__EGRESS, &ad)) return NF_DROP_ERR(-ECONNREFUSED); if (sel_netnode_sid(addrp, family, &node_sid)) - return NF_DROP_ERR(-ECONNREFUSED); + return NF_DROP; if (avc_has_perm(peer_sid, node_sid, SECCLASS_NODE, NODE__SENDTO, &ad)) return NF_DROP_ERR(-ECONNREFUSED); -- cgit v1.2.3-58-ga151 From 2fe66ec242d3f76e3b0101f36419e7e5405bcff3 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 23 Nov 2010 06:28:08 +0000 Subject: SELinux: indicate fatal error in compat netfilter code The SELinux ip postroute code indicates when policy rejected a packet and passes the error back up the stack. The compat code does not. This patch sends the same kind of error back up the stack in the compat code. Based-on-patch-by: Paul Moore Signed-off-by: Eric Paris Reviewed-by: Paul Moore Signed-off-by: David S. Miller --- security/selinux/hooks.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'security') diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index f590fb8e9143..156ef93d6f7d 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -4524,11 +4524,11 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb, if (selinux_secmark_enabled()) if (avc_has_perm(sksec->sid, skb->secmark, SECCLASS_PACKET, PACKET__SEND, &ad)) - return NF_DROP; + return NF_DROP_ERR(-ECONNREFUSED); if (selinux_policycap_netpeer) if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto)) - return NF_DROP; + return NF_DROP_ERR(-ECONNREFUSED); return NF_ACCEPT; } -- cgit v1.2.3-58-ga151 From 3fc5e98d8cf85e0d77fc597b49e9268dff67400e Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 22 Dec 2010 16:24:13 +0000 Subject: KEYS: Don't call up_write() if __key_link_begin() returns an error In construct_alloc_key(), up_write() is called in the error path if __key_link_begin() fails, but this is incorrect as __key_link_begin() only returns with the nominated keyring locked if it returns successfully. Without this patch, you might see the following in dmesg: ===================================== [ BUG: bad unlock balance detected! ] ------------------------------------- mount.cifs/5769 is trying to release lock (&key->sem) at: [] request_key_and_link+0x263/0x3fc but there are no more locks to release! other info that might help us debug this: 3 locks held by mount.cifs/5769: #0: (&type->s_umount_key#41/1){+.+.+.}, at: [] sget+0x278/0x3e7 #1: (&ret_buf->session_mutex){+.+.+.}, at: [] cifs_get_smb_ses+0x35a/0x443 [cifs] #2: (root_key_user.cons_lock){+.+.+.}, at: [] request_key_and_link+0x10a/0x3fc stack backtrace: Pid: 5769, comm: mount.cifs Not tainted 2.6.37-rc6+ #1 Call Trace: [] ? request_key_and_link+0x263/0x3fc [] print_unlock_inbalance_bug+0xca/0xd5 [] lock_release_non_nested+0xc1/0x263 [] ? request_key_and_link+0x263/0x3fc [] ? request_key_and_link+0x263/0x3fc [] lock_release+0x17d/0x1a4 [] up_write+0x23/0x3b [] request_key_and_link+0x263/0x3fc [] ? cifs_get_spnego_key+0x61/0x21f [cifs] [] request_key+0x41/0x74 [] cifs_get_spnego_key+0x200/0x21f [cifs] [] CIFS_SessSetup+0x55d/0x1273 [cifs] [] cifs_setup_session+0x90/0x1ae [cifs] [] cifs_get_smb_ses+0x37f/0x443 [cifs] [] cifs_mount+0x1aa1/0x23f3 [cifs] [] ? alloc_debug_processing+0xdb/0x120 [] ? cifs_get_spnego_key+0x1ef/0x21f [cifs] [] cifs_do_mount+0x165/0x2b3 [cifs] [] vfs_kern_mount+0xaf/0x1dc [] do_kern_mount+0x4d/0xef [] do_mount+0x6f4/0x733 [] sys_mount+0x88/0xc2 [] system_call_fastpath+0x16/0x1b Reported-by: Jeff Layton Signed-off-by: David Howells Reviewed-and-Tested-by: Jeff Layton Signed-off-by: Linus Torvalds --- security/keys/request_key.c | 1 - 1 file changed, 1 deletion(-) (limited to 'security') diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 0088dd8bf68a..0ea52d25a6bd 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -403,7 +403,6 @@ link_check_failed: return ret; link_prealloc_failed: - up_write(&dest_keyring->sem); mutex_unlock(&user->cons_lock); kleave(" = %d [prelink]", ret); return ret; -- cgit v1.2.3-58-ga151 From 867c20265459d30a01b021a9c1e81fb4c5832aa9 Mon Sep 17 00:00:00 2001 From: Mimi Zohar Date: Mon, 3 Jan 2011 14:59:10 -0800 Subject: ima: fix add LSM rule bug If security_filter_rule_init() doesn't return a rule, then not everything is as fine as the return code implies. This bug only occurs when the LSM (eg. SELinux) is disabled at runtime. Adding an empty LSM rule causes ima_match_rules() to always succeed, ignoring any remaining rules. default IMA TCB policy: # PROC_SUPER_MAGIC dont_measure fsmagic=0x9fa0 # SYSFS_MAGIC dont_measure fsmagic=0x62656572 # DEBUGFS_MAGIC dont_measure fsmagic=0x64626720 # TMPFS_MAGIC dont_measure fsmagic=0x01021994 # SECURITYFS_MAGIC dont_measure fsmagic=0x73636673 < LSM specific rule > dont_measure obj_type=var_log_t measure func=BPRM_CHECK measure func=FILE_MMAP mask=MAY_EXEC measure func=FILE_CHECK mask=MAY_READ uid=0 Thus without the patch, with the boot parameters 'tcb selinux=0', adding the above 'dont_measure obj_type=var_log_t' rule to the default IMA TCB measurement policy, would result in nothing being measured. The patch prevents the default TCB policy from being replaced. Signed-off-by: Mimi Zohar Cc: James Morris Acked-by: Serge Hallyn Cc: David Safford Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- security/integrity/ima/ima_policy.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'security') diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index aef8c0a923ab..d661afbe474c 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -253,6 +253,8 @@ static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry, result = security_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal, args, &entry->lsm[lsm_rule].rule); + if (!entry->lsm[lsm_rule].rule) + return -EINVAL; return result; } -- cgit v1.2.3-58-ga151 From 3610cda53f247e176bcbb7a7cca64bc53b12acdb Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 5 Jan 2011 15:38:53 -0800 Subject: af_unix: Avoid socket->sk NULL OOPS in stream connect security hooks. unix_release() can asynchornously set socket->sk to NULL, and it does so without holding the unix_state_lock() on "other" during stream connects. However, the reverse mapping, sk->sk_socket, is only transitioned to NULL under the unix_state_lock(). Therefore make the security hooks follow the reverse mapping instead of the forward mapping. Reported-by: Jeremy Fitzhardinge Reported-by: Linus Torvalds Signed-off-by: David S. Miller --- include/linux/security.h | 15 +++++++-------- net/unix/af_unix.c | 2 +- security/capability.c | 2 +- security/security.c | 3 +-- security/selinux/hooks.c | 10 +++++----- security/smack/smack_lsm.c | 14 +++++++------- 6 files changed, 22 insertions(+), 24 deletions(-) (limited to 'security') diff --git a/include/linux/security.h b/include/linux/security.h index fd4d55fb8845..d47a4c24b3e4 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -796,8 +796,9 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @unix_stream_connect: * Check permissions before establishing a Unix domain stream connection * between @sock and @other. - * @sock contains the socket structure. - * @other contains the peer socket structure. + * @sock contains the sock structure. + * @other contains the peer sock structure. + * @newsk contains the new sock structure. * Return 0 if permission is granted. * @unix_may_send: * Check permissions before connecting or sending datagrams from @sock to @@ -1568,8 +1569,7 @@ struct security_operations { int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen); #ifdef CONFIG_SECURITY_NETWORK - int (*unix_stream_connect) (struct socket *sock, - struct socket *other, struct sock *newsk); + int (*unix_stream_connect) (struct sock *sock, struct sock *other, struct sock *newsk); int (*unix_may_send) (struct socket *sock, struct socket *other); int (*socket_create) (int family, int type, int protocol, int kern); @@ -2525,8 +2525,7 @@ static inline int security_inode_getsecctx(struct inode *inode, void **ctx, u32 #ifdef CONFIG_SECURITY_NETWORK -int security_unix_stream_connect(struct socket *sock, struct socket *other, - struct sock *newsk); +int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk); int security_unix_may_send(struct socket *sock, struct socket *other); int security_socket_create(int family, int type, int protocol, int kern); int security_socket_post_create(struct socket *sock, int family, @@ -2567,8 +2566,8 @@ void security_tun_dev_post_create(struct sock *sk); int security_tun_dev_attach(struct sock *sk); #else /* CONFIG_SECURITY_NETWORK */ -static inline int security_unix_stream_connect(struct socket *sock, - struct socket *other, +static inline int security_unix_stream_connect(struct sock *sock, + struct sock *other, struct sock *newsk) { return 0; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 417d7a6c36cf..dd419d286204 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1157,7 +1157,7 @@ restart: goto restart; } - err = security_unix_stream_connect(sock, other->sk_socket, newsk); + err = security_unix_stream_connect(sk, other, newsk); if (err) { unix_state_unlock(sk); goto out_unlock; diff --git a/security/capability.c b/security/capability.c index c773635ca3a0..2a5df2b7da83 100644 --- a/security/capability.c +++ b/security/capability.c @@ -548,7 +548,7 @@ static int cap_sem_semop(struct sem_array *sma, struct sembuf *sops, } #ifdef CONFIG_SECURITY_NETWORK -static int cap_unix_stream_connect(struct socket *sock, struct socket *other, +static int cap_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk) { return 0; diff --git a/security/security.c b/security/security.c index 1b798d3df710..e5fb07a3052d 100644 --- a/security/security.c +++ b/security/security.c @@ -977,8 +977,7 @@ EXPORT_SYMBOL(security_inode_getsecctx); #ifdef CONFIG_SECURITY_NETWORK -int security_unix_stream_connect(struct socket *sock, struct socket *other, - struct sock *newsk) +int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk) { return security_ops->unix_stream_connect(sock, other, newsk); } diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index c82538a4b1a4..6f637d2678ac 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3921,18 +3921,18 @@ static int selinux_socket_shutdown(struct socket *sock, int how) return sock_has_perm(current, sock->sk, SOCKET__SHUTDOWN); } -static int selinux_socket_unix_stream_connect(struct socket *sock, - struct socket *other, +static int selinux_socket_unix_stream_connect(struct sock *sock, + struct sock *other, struct sock *newsk) { - struct sk_security_struct *sksec_sock = sock->sk->sk_security; - struct sk_security_struct *sksec_other = other->sk->sk_security; + struct sk_security_struct *sksec_sock = sock->sk_security; + struct sk_security_struct *sksec_other = other->sk_security; struct sk_security_struct *sksec_new = newsk->sk_security; struct common_audit_data ad; int err; COMMON_AUDIT_DATA_INIT(&ad, NET); - ad.u.net.sk = other->sk; + ad.u.net.sk = other; err = avc_has_perm(sksec_sock->sid, sksec_other->sid, sksec_other->sclass, diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 489a85afa477..ccb71a044a1a 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -2408,22 +2408,22 @@ static int smack_setprocattr(struct task_struct *p, char *name, /** * smack_unix_stream_connect - Smack access on UDS - * @sock: one socket - * @other: the other socket + * @sock: one sock + * @other: the other sock * @newsk: unused * * Return 0 if a subject with the smack of sock could access * an object with the smack of other, otherwise an error code */ -static int smack_unix_stream_connect(struct socket *sock, - struct socket *other, struct sock *newsk) +static int smack_unix_stream_connect(struct sock *sock, + struct sock *other, struct sock *newsk) { - struct inode *sp = SOCK_INODE(sock); - struct inode *op = SOCK_INODE(other); + struct inode *sp = SOCK_INODE(sock->sk_socket); + struct inode *op = SOCK_INODE(other->sk_socket); struct smk_audit_info ad; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET); - smk_ad_setfield_u_net_sk(&ad, other->sk); + smk_ad_setfield_u_net_sk(&ad, other); return smk_access(smk_of_inode(sp), smk_of_inode(op), MAY_READWRITE, &ad); } -- cgit v1.2.3-58-ga151 From da5029563a0a026c64821b09e8e7b4fd81d3fe1b Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 7 Jan 2011 17:49:33 +1100 Subject: fs: dcache scale d_unhashed Protect d_unhashed(dentry) condition with d_lock. This means keeping DCACHE_UNHASHED bit in synch with hash manipulations. Signed-off-by: Nick Piggin --- arch/powerpc/platforms/cell/spufs/inode.c | 3 ++ drivers/usb/core/inode.c | 3 ++ fs/autofs4/autofs_i.h | 13 ------ fs/autofs4/expire.c | 21 ++++++--- fs/ceph/dir.c | 5 ++- fs/configfs/configfs_internal.h | 2 + fs/dcache.c | 74 +++++++++++++++++++++---------- fs/libfs.c | 29 ++++++++---- fs/ocfs2/dcache.c | 5 ++- security/tomoyo/realpath.c | 1 + 10 files changed, 102 insertions(+), 54 deletions(-) (limited to 'security') diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 29a406a77547..5aef1a7f5e4b 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -166,6 +166,9 @@ static void spufs_prune_dir(struct dentry *dir) __d_drop(dentry); spin_unlock(&dentry->d_lock); simple_unlink(dir->d_inode, dentry); + /* XXX: what is dcache_lock protecting here? Other + * filesystems (IB, configfs) release dcache_lock + * before unlink */ spin_unlock(&dcache_lock); dput(dentry); } else { diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index b690aa35df9a..e3ab4437ea96 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c @@ -347,10 +347,13 @@ static int usbfs_empty (struct dentry *dentry) list_for_each(list, &dentry->d_subdirs) { struct dentry *de = list_entry(list, struct dentry, d_u.d_child); + spin_lock(&de->d_lock); if (usbfs_positive(de)) { + spin_unlock(&de->d_lock); spin_unlock(&dcache_lock); return 0; } + spin_unlock(&de->d_lock); } spin_unlock(&dcache_lock); diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 3d283abf67d7..3912dcf047e5 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -254,19 +254,6 @@ static inline int simple_positive(struct dentry *dentry) return dentry->d_inode && !d_unhashed(dentry); } -static inline int __simple_empty(struct dentry *dentry) -{ - struct dentry *child; - int ret = 0; - - list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) - if (simple_positive(child)) - goto out; - ret = 1; -out: - return ret; -} - static inline void autofs4_add_expiring(struct dentry *dentry) { struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 413b5642e6cf..ee6402050f13 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -160,14 +160,18 @@ static int autofs4_tree_busy(struct vfsmount *mnt, spin_lock(&dcache_lock); for (p = top; p; p = next_dentry(p, top)) { + spin_lock(&p->d_lock); /* Negative dentry - give up */ - if (!simple_positive(p)) + if (!simple_positive(p)) { + spin_unlock(&p->d_lock); continue; + } DPRINTK("dentry %p %.*s", p, (int) p->d_name.len, p->d_name.name); - p = dget(p); + p = dget_dlock(p); + spin_unlock(&p->d_lock); spin_unlock(&dcache_lock); /* @@ -228,14 +232,18 @@ static struct dentry *autofs4_check_leaves(struct vfsmount *mnt, spin_lock(&dcache_lock); for (p = parent; p; p = next_dentry(p, parent)) { + spin_lock(&p->d_lock); /* Negative dentry - give up */ - if (!simple_positive(p)) + if (!simple_positive(p)) { + spin_unlock(&p->d_lock); continue; + } DPRINTK("dentry %p %.*s", p, (int) p->d_name.len, p->d_name.name); - p = dget(p); + p = dget_dlock(p); + spin_unlock(&p->d_lock); spin_unlock(&dcache_lock); if (d_mountpoint(p)) { @@ -324,12 +332,15 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child); /* Negative dentry - give up */ + spin_lock(&dentry->d_lock); if (!simple_positive(dentry)) { next = next->next; + spin_unlock(&dentry->d_lock); continue; } - dentry = dget(dentry); + dentry = dget_dlock(dentry); + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); spin_lock(&sbi->fs_lock); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 3ecf915a4550..571f270dca0f 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -136,6 +136,7 @@ more: fi->at_end = 1; goto out_unlock; } + spin_lock(&dentry->d_lock); if (!d_unhashed(dentry) && dentry->d_inode && ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && @@ -145,13 +146,13 @@ more: dentry->d_name.len, dentry->d_name.name, di->offset, filp->f_pos, d_unhashed(dentry) ? " unhashed" : "", !dentry->d_inode ? " null" : ""); + spin_unlock(&dentry->d_lock); p = p->prev; dentry = list_entry(p, struct dentry, d_u.d_child); di = ceph_dentry(dentry); } - spin_lock(&dentry->d_lock); - dentry->d_count++; + dget_dlock(dentry); spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index da6061a6df40..e58b4c30e216 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -121,6 +121,7 @@ static inline struct config_item *configfs_get_config_item(struct dentry *dentry struct config_item * item = NULL; spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); if (!d_unhashed(dentry)) { struct configfs_dirent * sd = dentry->d_fsdata; if (sd->s_type & CONFIGFS_ITEM_LINK) { @@ -129,6 +130,7 @@ static inline struct config_item *configfs_get_config_item(struct dentry *dentry } else item = config_item_get(sd->s_element); } + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); return item; diff --git a/fs/dcache.c b/fs/dcache.c index 81e91502b294..ee127f9ab274 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -46,6 +46,7 @@ * - d_name * - d_lru * - d_count + * - d_unhashed() * * Ordering: * dcache_lock @@ -53,6 +54,13 @@ * dcache_lru_lock * dcache_hash_lock * + * If there is an ancestor relationship: + * dentry->d_parent->...->d_parent->d_lock + * ... + * dentry->d_parent->d_lock + * dentry->d_lock + * + * If no ancestor relationship: * if (dentry1 < dentry2) * dentry1->d_lock * dentry2->d_lock @@ -379,7 +387,9 @@ int d_invalidate(struct dentry * dentry) * If it's already been dropped, return OK. */ spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); if (d_unhashed(dentry)) { + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); return 0; } @@ -388,9 +398,11 @@ int d_invalidate(struct dentry * dentry) * to get rid of unused child entries. */ if (!list_empty(&dentry->d_subdirs)) { + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); shrink_dcache_parent(dentry); spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); } /* @@ -403,7 +415,6 @@ int d_invalidate(struct dentry * dentry) * we might still populate it if it was a * working directory or similar). */ - spin_lock(&dentry->d_lock); if (dentry->d_count > 1) { if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { spin_unlock(&dentry->d_lock); @@ -490,35 +501,44 @@ EXPORT_SYMBOL(dget_parent); * any other hashed alias over that one unless @want_discon is set, * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias. */ - -static struct dentry * __d_find_alias(struct inode *inode, int want_discon) +static struct dentry *__d_find_alias(struct inode *inode, int want_discon) { - struct list_head *head, *next, *tmp; - struct dentry *alias, *discon_alias=NULL; + struct dentry *alias, *discon_alias; - head = &inode->i_dentry; - next = inode->i_dentry.next; - while (next != head) { - tmp = next; - next = tmp->next; - prefetch(next); - alias = list_entry(tmp, struct dentry, d_alias); +again: + discon_alias = NULL; + list_for_each_entry(alias, &inode->i_dentry, d_alias) { + spin_lock(&alias->d_lock); if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { if (IS_ROOT(alias) && - (alias->d_flags & DCACHE_DISCONNECTED)) + (alias->d_flags & DCACHE_DISCONNECTED)) { discon_alias = alias; - else if (!want_discon) { - __dget_locked(alias); + } else if (!want_discon) { + __dget_locked_dlock(alias); + spin_unlock(&alias->d_lock); + return alias; + } + } + spin_unlock(&alias->d_lock); + } + if (discon_alias) { + alias = discon_alias; + spin_lock(&alias->d_lock); + if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { + if (IS_ROOT(alias) && + (alias->d_flags & DCACHE_DISCONNECTED)) { + __dget_locked_dlock(alias); + spin_unlock(&alias->d_lock); return alias; } } + spin_unlock(&alias->d_lock); + goto again; } - if (discon_alias) - __dget_locked(discon_alias); - return discon_alias; + return NULL; } -struct dentry * d_find_alias(struct inode *inode) +struct dentry *d_find_alias(struct inode *inode) { struct dentry *de = NULL; @@ -801,8 +821,8 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); dentry_lru_del(dentry); - spin_unlock(&dentry->d_lock); __d_drop(dentry); + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); for (;;) { @@ -817,8 +837,8 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) d_u.d_child) { spin_lock(&loop->d_lock); dentry_lru_del(loop); - spin_unlock(&loop->d_lock); __d_drop(loop); + spin_unlock(&loop->d_lock); cond_resched_lock(&dcache_lock); } spin_unlock(&dcache_lock); @@ -1863,7 +1883,10 @@ static void d_move_locked(struct dentry * dentry, struct dentry * target) /* * XXXX: do we really need to take target->d_lock? */ - if (target < dentry) { + if (d_ancestor(dentry, target)) { + spin_lock(&dentry->d_lock); + spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); + } else if (d_ancestor(target, dentry) || target < dentry) { spin_lock(&target->d_lock); spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); } else { @@ -2542,13 +2565,16 @@ resume: struct list_head *tmp = next; struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); next = tmp->next; - if (d_unhashed(dentry)||!dentry->d_inode) + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); + if (d_unhashed(dentry) || !dentry->d_inode) { + spin_unlock(&dentry->d_lock); continue; + } if (!list_empty(&dentry->d_subdirs)) { + spin_unlock(&dentry->d_lock); this_parent = dentry; goto repeat; } - spin_lock(&dentry->d_lock); dentry->d_count--; spin_unlock(&dentry->d_lock); } diff --git a/fs/libfs.c b/fs/libfs.c index b9d25d83e228..433e7139c23a 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -16,6 +16,11 @@ #include +static inline int simple_positive(struct dentry *dentry) +{ + return dentry->d_inode && !d_unhashed(dentry); +} + int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { @@ -100,8 +105,10 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin) while (n && p != &file->f_path.dentry->d_subdirs) { struct dentry *next; next = list_entry(p, struct dentry, d_u.d_child); - if (!d_unhashed(next) && next->d_inode) + spin_lock(&next->d_lock); + if (simple_positive(next)) n--; + spin_unlock(&next->d_lock); p = p->next; } list_add_tail(&cursor->d_u.d_child, p); @@ -155,9 +162,13 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) for (p=q->next; p != &dentry->d_subdirs; p=p->next) { struct dentry *next; next = list_entry(p, struct dentry, d_u.d_child); - if (d_unhashed(next) || !next->d_inode) + spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); + if (!simple_positive(next)) { + spin_unlock(&next->d_lock); continue; + } + spin_unlock(&next->d_lock); spin_unlock(&dcache_lock); if (filldir(dirent, next->d_name.name, next->d_name.len, filp->f_pos, @@ -259,20 +270,20 @@ int simple_link(struct dentry *old_dentry, struct inode *dir, struct dentry *den return 0; } -static inline int simple_positive(struct dentry *dentry) -{ - return dentry->d_inode && !d_unhashed(dentry); -} - int simple_empty(struct dentry *dentry) { struct dentry *child; int ret = 0; spin_lock(&dcache_lock); - list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) - if (simple_positive(child)) + list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) { + spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED); + if (simple_positive(child)) { + spin_unlock(&child->d_lock); goto out; + } + spin_unlock(&child->d_lock); + } ret = 1; out: spin_unlock(&dcache_lock); diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index 895532ac4d98..35e5f5a9ef59 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c @@ -174,13 +174,16 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode, list_for_each(p, &inode->i_dentry) { dentry = list_entry(p, struct dentry, d_alias); + spin_lock(&dentry->d_lock); if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { mlog(0, "dentry found: %.*s\n", dentry->d_name.len, dentry->d_name.name); - dget_locked(dentry); + dget_locked_dlock(dentry); + spin_unlock(&dentry->d_lock); break; } + spin_unlock(&dentry->d_lock); dentry = NULL; } diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c index 1d0bf8fa1922..d1e05b047715 100644 --- a/security/tomoyo/realpath.c +++ b/security/tomoyo/realpath.c @@ -14,6 +14,7 @@ #include #include #include "common.h" +#include "../../fs/internal.h" /** * tomoyo_encode: Convert binary string to ascii string. -- cgit v1.2.3-58-ga151 From 2fd6b7f50797f2e993eea59e0a0b8c6399c811dc Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 7 Jan 2011 17:49:34 +1100 Subject: fs: dcache scale subdirs Protect d_subdirs and d_child with d_lock, except in filesystems that aren't using dcache_lock for these anyway (eg. using i_mutex). Note: if we change the locking rule in future so that ->d_child protection is provided only with ->d_parent->d_lock, it may allow us to reduce some locking. But it would be an exception to an otherwise regular locking scheme, so we'd have to see some good results. Probably not worthwhile. Signed-off-by: Nick Piggin --- drivers/staging/smbfs/cache.c | 4 + drivers/usb/core/inode.c | 8 +- fs/autofs4/autofs_i.h | 11 ++ fs/autofs4/expire.c | 127 ++++++++++----------- fs/autofs4/root.c | 18 ++- fs/ceph/dir.c | 6 +- fs/ceph/inode.c | 8 +- fs/coda/cache.c | 2 + fs/dcache.c | 248 +++++++++++++++++++++++++++++------------- fs/libfs.c | 24 +++- fs/ncpfs/dir.c | 3 + fs/ncpfs/ncplib_kernel.h | 4 + fs/notify/fsnotify.c | 4 +- include/linux/dcache.h | 1 + kernel/cgroup.c | 19 +++- security/selinux/selinuxfs.c | 12 +- 16 files changed, 339 insertions(+), 160 deletions(-) (limited to 'security') diff --git a/drivers/staging/smbfs/cache.c b/drivers/staging/smbfs/cache.c index 0beded260b00..920434b6c071 100644 --- a/drivers/staging/smbfs/cache.c +++ b/drivers/staging/smbfs/cache.c @@ -63,6 +63,7 @@ smb_invalidate_dircache_entries(struct dentry *parent) struct dentry *dentry; spin_lock(&dcache_lock); + spin_lock(&parent->d_lock); next = parent->d_subdirs.next; while (next != &parent->d_subdirs) { dentry = list_entry(next, struct dentry, d_u.d_child); @@ -70,6 +71,7 @@ smb_invalidate_dircache_entries(struct dentry *parent) smb_age_dentry(server, dentry); next = next->next; } + spin_unlock(&parent->d_lock); spin_unlock(&dcache_lock); } @@ -97,6 +99,7 @@ smb_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos) /* If a pointer is invalid, we search the dentry. */ spin_lock(&dcache_lock); + spin_lock(&parent->d_lock); next = parent->d_subdirs.next; while (next != &parent->d_subdirs) { dent = list_entry(next, struct dentry, d_u.d_child); @@ -111,6 +114,7 @@ smb_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos) } dent = NULL; out_unlock: + spin_unlock(&parent->d_lock); spin_unlock(&dcache_lock); return dent; } diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index e3ab4437ea96..89a0e8366585 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c @@ -344,18 +344,20 @@ static int usbfs_empty (struct dentry *dentry) struct list_head *list; spin_lock(&dcache_lock); - + spin_lock(&dentry->d_lock); list_for_each(list, &dentry->d_subdirs) { struct dentry *de = list_entry(list, struct dentry, d_u.d_child); - spin_lock(&de->d_lock); + + spin_lock_nested(&de->d_lock, DENTRY_D_LOCK_NESTED); if (usbfs_positive(de)) { spin_unlock(&de->d_lock); + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); return 0; } spin_unlock(&de->d_lock); } - + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); return 1; } diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 3912dcf047e5..9d2ae9b30d9f 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -254,6 +254,17 @@ static inline int simple_positive(struct dentry *dentry) return dentry->d_inode && !d_unhashed(dentry); } +static inline void __autofs4_add_expiring(struct dentry *dentry) +{ + struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); + struct autofs_info *ino = autofs4_dentry_ino(dentry); + if (ino) { + if (list_empty(&ino->expiring)) + list_add(&ino->expiring, &sbi->expiring_list); + } + return; +} + static inline void autofs4_add_expiring(struct dentry *dentry) { struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index ee6402050f13..968c1434af62 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -91,24 +91,64 @@ done: } /* - * Calculate next entry in top down tree traversal. - * From next_mnt in namespace.c - elegant. + * Calculate and dget next entry in top down tree traversal. */ -static struct dentry *next_dentry(struct dentry *p, struct dentry *root) +static struct dentry *get_next_positive_dentry(struct dentry *prev, + struct dentry *root) { - struct list_head *next = p->d_subdirs.next; + struct list_head *next; + struct dentry *p, *ret; + + if (prev == NULL) + return dget(prev); + spin_lock(&dcache_lock); +relock: + p = prev; + spin_lock(&p->d_lock); +again: + next = p->d_subdirs.next; if (next == &p->d_subdirs) { while (1) { - if (p == root) + struct dentry *parent; + + if (p == root) { + spin_unlock(&p->d_lock); + spin_unlock(&dcache_lock); + dput(prev); return NULL; + } + + parent = p->d_parent; + if (!spin_trylock(&parent->d_lock)) { + spin_unlock(&p->d_lock); + cpu_relax(); + goto relock; + } + spin_unlock(&p->d_lock); next = p->d_u.d_child.next; - if (next != &p->d_parent->d_subdirs) + p = parent; + if (next != &parent->d_subdirs) break; - p = p->d_parent; } } - return list_entry(next, struct dentry, d_u.d_child); + ret = list_entry(next, struct dentry, d_u.d_child); + + spin_lock_nested(&ret->d_lock, DENTRY_D_LOCK_NESTED); + /* Negative dentry - try next */ + if (!simple_positive(ret)) { + spin_unlock(&ret->d_lock); + p = ret; + goto again; + } + dget_dlock(ret); + spin_unlock(&ret->d_lock); + spin_unlock(&p->d_lock); + spin_unlock(&dcache_lock); + + dput(prev); + + return ret; } /* @@ -158,22 +198,11 @@ static int autofs4_tree_busy(struct vfsmount *mnt, if (!simple_positive(top)) return 1; - spin_lock(&dcache_lock); - for (p = top; p; p = next_dentry(p, top)) { - spin_lock(&p->d_lock); - /* Negative dentry - give up */ - if (!simple_positive(p)) { - spin_unlock(&p->d_lock); - continue; - } - + p = NULL; + while ((p = get_next_positive_dentry(p, top))) { DPRINTK("dentry %p %.*s", p, (int) p->d_name.len, p->d_name.name); - p = dget_dlock(p); - spin_unlock(&p->d_lock); - spin_unlock(&dcache_lock); - /* * Is someone visiting anywhere in the subtree ? * If there's no mount we need to check the usage @@ -208,10 +237,7 @@ static int autofs4_tree_busy(struct vfsmount *mnt, return 1; } } - dput(p); - spin_lock(&dcache_lock); } - spin_unlock(&dcache_lock); /* Timeout of a tree mount is ultimately determined by its top dentry */ if (!autofs4_can_expire(top, timeout, do_now)) @@ -230,36 +256,21 @@ static struct dentry *autofs4_check_leaves(struct vfsmount *mnt, DPRINTK("parent %p %.*s", parent, (int)parent->d_name.len, parent->d_name.name); - spin_lock(&dcache_lock); - for (p = parent; p; p = next_dentry(p, parent)) { - spin_lock(&p->d_lock); - /* Negative dentry - give up */ - if (!simple_positive(p)) { - spin_unlock(&p->d_lock); - continue; - } - + p = NULL; + while ((p = get_next_positive_dentry(p, parent))) { DPRINTK("dentry %p %.*s", p, (int) p->d_name.len, p->d_name.name); - p = dget_dlock(p); - spin_unlock(&p->d_lock); - spin_unlock(&dcache_lock); - if (d_mountpoint(p)) { /* Can we umount this guy */ if (autofs4_mount_busy(mnt, p)) - goto cont; + continue; /* Can we expire this guy */ if (autofs4_can_expire(p, timeout, do_now)) return p; } -cont: - dput(p); - spin_lock(&dcache_lock); } - spin_unlock(&dcache_lock); return NULL; } @@ -310,8 +321,8 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, { unsigned long timeout; struct dentry *root = sb->s_root; + struct dentry *dentry; struct dentry *expired = NULL; - struct list_head *next; int do_now = how & AUTOFS_EXP_IMMEDIATE; int exp_leaves = how & AUTOFS_EXP_LEAVES; struct autofs_info *ino; @@ -323,26 +334,8 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, now = jiffies; timeout = sbi->exp_timeout; - spin_lock(&dcache_lock); - next = root->d_subdirs.next; - - /* On exit from the loop expire is set to a dgot dentry - * to expire or it's NULL */ - while ( next != &root->d_subdirs ) { - struct dentry *dentry = list_entry(next, struct dentry, d_u.d_child); - - /* Negative dentry - give up */ - spin_lock(&dentry->d_lock); - if (!simple_positive(dentry)) { - next = next->next; - spin_unlock(&dentry->d_lock); - continue; - } - - dentry = dget_dlock(dentry); - spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); - + dentry = NULL; + while ((dentry = get_next_positive_dentry(dentry, root))) { spin_lock(&sbi->fs_lock); ino = autofs4_dentry_ino(dentry); @@ -405,11 +398,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, } next: spin_unlock(&sbi->fs_lock); - dput(dentry); - spin_lock(&dcache_lock); - next = next->next; } - spin_unlock(&dcache_lock); return NULL; found: @@ -420,7 +409,11 @@ found: init_completion(&ino->expire_complete); spin_unlock(&sbi->fs_lock); spin_lock(&dcache_lock); + spin_lock(&expired->d_parent->d_lock); + spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED); list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child); + spin_unlock(&expired->d_lock); + spin_unlock(&expired->d_parent->d_lock); spin_unlock(&dcache_lock); return expired; } diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 7922509b5b2b..7a9ed6b88291 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -143,10 +143,13 @@ static int autofs4_dir_open(struct inode *inode, struct file *file) * it. */ spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) { + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); return -ENOENT; } + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); out: @@ -253,7 +256,9 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) lookup_type = autofs4_need_mount(nd->flags); spin_lock(&sbi->fs_lock); spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); if (!(lookup_type || ino->flags & AUTOFS_INF_PENDING)) { + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); spin_unlock(&sbi->fs_lock); goto follow; @@ -266,6 +271,7 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) */ if (ino->flags & AUTOFS_INF_PENDING || (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) { + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); spin_unlock(&sbi->fs_lock); @@ -275,6 +281,7 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) goto follow; } + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); spin_unlock(&sbi->fs_lock); follow: @@ -347,10 +354,12 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd) /* Check for a non-mountpoint directory with no contents */ spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); if (S_ISDIR(dentry->d_inode->i_mode) && !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) { DPRINTK("dentry=%p %.*s, emptydir", dentry, dentry->d_name.len, dentry->d_name.name); + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); /* The daemon never causes a mount to trigger */ @@ -367,6 +376,7 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd) return status; } + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); return 1; @@ -776,12 +786,16 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry) return -EACCES; spin_lock(&dcache_lock); + spin_lock(&sbi->lookup_lock); + spin_lock(&dentry->d_lock); if (!list_empty(&dentry->d_subdirs)) { + spin_unlock(&dentry->d_lock); + spin_unlock(&sbi->lookup_lock); spin_unlock(&dcache_lock); return -ENOTEMPTY; } - autofs4_add_expiring(dentry); - spin_lock(&dentry->d_lock); + __autofs4_add_expiring(dentry); + spin_unlock(&sbi->lookup_lock); __d_drop(dentry); spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 571f270dca0f..2c924e8d85fe 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -113,6 +113,7 @@ static int __dcache_readdir(struct file *filp, last); spin_lock(&dcache_lock); + spin_lock(&parent->d_lock); /* start at beginning? */ if (filp->f_pos == 2 || last == NULL || @@ -136,7 +137,7 @@ more: fi->at_end = 1; goto out_unlock; } - spin_lock(&dentry->d_lock); + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); if (!d_unhashed(dentry) && dentry->d_inode && ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && @@ -154,6 +155,7 @@ more: dget_dlock(dentry); spin_unlock(&dentry->d_lock); + spin_unlock(&parent->d_lock); spin_unlock(&dcache_lock); dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, @@ -188,10 +190,12 @@ more: } spin_lock(&dcache_lock); + spin_lock(&parent->d_lock); p = p->prev; /* advance to next dentry */ goto more; out_unlock: + spin_unlock(&parent->d_lock); spin_unlock(&dcache_lock); out: if (last) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index bb68c799074d..2c6944473366 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -842,11 +842,13 @@ static void ceph_set_dentry_offset(struct dentry *dn) spin_unlock(&inode->i_lock); spin_lock(&dcache_lock); - spin_lock(&dn->d_lock); + spin_lock(&dir->d_lock); + spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); list_move(&dn->d_u.d_child, &dir->d_subdirs); dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset, dn->d_u.d_child.prev, dn->d_u.d_child.next); spin_unlock(&dn->d_lock); + spin_unlock(&dir->d_lock); spin_unlock(&dcache_lock); } @@ -1232,9 +1234,11 @@ retry_lookup: } else { /* reorder parent's d_subdirs */ spin_lock(&dcache_lock); - spin_lock(&dn->d_lock); + spin_lock(&parent->d_lock); + spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); list_move(&dn->d_u.d_child, &parent->d_subdirs); spin_unlock(&dn->d_lock); + spin_unlock(&parent->d_lock); spin_unlock(&dcache_lock); } diff --git a/fs/coda/cache.c b/fs/coda/cache.c index 9060f08e70cf..859393fca2b7 100644 --- a/fs/coda/cache.c +++ b/fs/coda/cache.c @@ -94,6 +94,7 @@ static void coda_flag_children(struct dentry *parent, int flag) struct dentry *de; spin_lock(&dcache_lock); + spin_lock(&parent->d_lock); list_for_each(child, &parent->d_subdirs) { de = list_entry(child, struct dentry, d_u.d_child); @@ -102,6 +103,7 @@ static void coda_flag_children(struct dentry *parent, int flag) continue; coda_flag_inode(de->d_inode, flag); } + spin_unlock(&parent->d_lock); spin_unlock(&dcache_lock); return; } diff --git a/fs/dcache.c b/fs/dcache.c index ee127f9ab274..a661247a20d5 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -47,6 +47,8 @@ * - d_lru * - d_count * - d_unhashed() + * - d_parent and d_subdirs + * - childrens' d_child and d_parent * * Ordering: * dcache_lock @@ -223,24 +225,22 @@ static void dentry_lru_move_tail(struct dentry *dentry) * * If this is the root of the dentry tree, return NULL. * - * dcache_lock and d_lock must be held by caller, are dropped by d_kill. + * dcache_lock and d_lock and d_parent->d_lock must be held by caller, and + * are dropped by d_kill. */ -static struct dentry *d_kill(struct dentry *dentry) +static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) __releases(dentry->d_lock) + __releases(parent->d_lock) __releases(dcache_lock) { - struct dentry *parent; - list_del(&dentry->d_u.d_child); + if (parent) + spin_unlock(&parent->d_lock); dentry_iput(dentry); /* * dentry_iput drops the locks, at which point nobody (except * transient RCU lookups) can reach this dentry. */ - if (IS_ROOT(dentry)) - parent = NULL; - else - parent = dentry->d_parent; d_free(dentry); return parent; } @@ -312,6 +312,7 @@ EXPORT_SYMBOL(d_drop); void dput(struct dentry *dentry) { + struct dentry *parent; if (!dentry) return; @@ -319,6 +320,10 @@ repeat: if (dentry->d_count == 1) might_sleep(); spin_lock(&dentry->d_lock); + if (IS_ROOT(dentry)) + parent = NULL; + else + parent = dentry->d_parent; if (dentry->d_count == 1) { if (!spin_trylock(&dcache_lock)) { /* @@ -330,10 +335,17 @@ repeat: spin_unlock(&dentry->d_lock); goto repeat; } + if (parent && !spin_trylock(&parent->d_lock)) { + spin_unlock(&dentry->d_lock); + spin_unlock(&dcache_lock); + goto repeat; + } } dentry->d_count--; if (dentry->d_count) { spin_unlock(&dentry->d_lock); + if (parent) + spin_unlock(&parent->d_lock); spin_unlock(&dcache_lock); return; } @@ -355,6 +367,8 @@ repeat: dentry_lru_add(dentry); spin_unlock(&dentry->d_lock); + if (parent) + spin_unlock(&parent->d_lock); spin_unlock(&dcache_lock); return; @@ -363,7 +377,7 @@ unhash_it: kill_it: /* if dentry was on the d_lru list delete it from there */ dentry_lru_del(dentry); - dentry = d_kill(dentry); + dentry = d_kill(dentry, parent); if (dentry) goto repeat; } @@ -584,12 +598,13 @@ EXPORT_SYMBOL(d_prune_aliases); * quadratic behavior of shrink_dcache_parent(), but is also expected * to be beneficial in reducing dentry cache fragmentation. */ -static void prune_one_dentry(struct dentry * dentry) +static void prune_one_dentry(struct dentry *dentry, struct dentry *parent) __releases(dentry->d_lock) + __releases(parent->d_lock) __releases(dcache_lock) { __d_drop(dentry); - dentry = d_kill(dentry); + dentry = d_kill(dentry, parent); /* * Prune ancestors. Locking is simpler than in dput(), @@ -597,9 +612,20 @@ static void prune_one_dentry(struct dentry * dentry) */ while (dentry) { spin_lock(&dcache_lock); +again: spin_lock(&dentry->d_lock); + if (IS_ROOT(dentry)) + parent = NULL; + else + parent = dentry->d_parent; + if (parent && !spin_trylock(&parent->d_lock)) { + spin_unlock(&dentry->d_lock); + goto again; + } dentry->d_count--; if (dentry->d_count) { + if (parent) + spin_unlock(&parent->d_lock); spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); return; @@ -607,7 +633,7 @@ static void prune_one_dentry(struct dentry * dentry) dentry_lru_del(dentry); __d_drop(dentry); - dentry = d_kill(dentry); + dentry = d_kill(dentry, parent); } } @@ -616,29 +642,40 @@ static void shrink_dentry_list(struct list_head *list) struct dentry *dentry; while (!list_empty(list)) { + struct dentry *parent; + dentry = list_entry(list->prev, struct dentry, d_lru); if (!spin_trylock(&dentry->d_lock)) { +relock: spin_unlock(&dcache_lru_lock); cpu_relax(); spin_lock(&dcache_lru_lock); continue; } - __dentry_lru_del(dentry); - /* * We found an inuse dentry which was not removed from * the LRU because of laziness during lookup. Do not free * it - just keep it off the LRU list. */ if (dentry->d_count) { + __dentry_lru_del(dentry); spin_unlock(&dentry->d_lock); continue; } + if (IS_ROOT(dentry)) + parent = NULL; + else + parent = dentry->d_parent; + if (parent && !spin_trylock(&parent->d_lock)) { + spin_unlock(&dentry->d_lock); + goto relock; + } + __dentry_lru_del(dentry); spin_unlock(&dcache_lru_lock); - prune_one_dentry(dentry); + prune_one_dentry(dentry, parent); /* dcache_lock and dentry->d_lock dropped */ spin_lock(&dcache_lock); spin_lock(&dcache_lru_lock); @@ -833,14 +870,16 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) /* this is a branch with children - detach all of them * from the system in one go */ spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); list_for_each_entry(loop, &dentry->d_subdirs, d_u.d_child) { - spin_lock(&loop->d_lock); + spin_lock_nested(&loop->d_lock, + DENTRY_D_LOCK_NESTED); dentry_lru_del(loop); __d_drop(loop); spin_unlock(&loop->d_lock); - cond_resched_lock(&dcache_lock); } + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); /* move to the first child */ @@ -868,16 +907,17 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) BUG(); } - if (IS_ROOT(dentry)) + if (IS_ROOT(dentry)) { parent = NULL; - else { + list_del(&dentry->d_u.d_child); + } else { parent = dentry->d_parent; spin_lock(&parent->d_lock); parent->d_count--; + list_del(&dentry->d_u.d_child); spin_unlock(&parent->d_lock); } - list_del(&dentry->d_u.d_child); detached++; inode = dentry->d_inode; @@ -958,6 +998,7 @@ int have_submounts(struct dentry *parent) spin_lock(&dcache_lock); if (d_mountpoint(parent)) goto positive; + spin_lock(&this_parent->d_lock); repeat: next = this_parent->d_subdirs.next; resume: @@ -965,22 +1006,34 @@ resume: struct list_head *tmp = next; struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); next = tmp->next; + + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); /* Have we found a mount point ? */ - if (d_mountpoint(dentry)) + if (d_mountpoint(dentry)) { + spin_unlock(&dentry->d_lock); + spin_unlock(&this_parent->d_lock); goto positive; + } if (!list_empty(&dentry->d_subdirs)) { + spin_unlock(&this_parent->d_lock); + spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); this_parent = dentry; + spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); goto repeat; } + spin_unlock(&dentry->d_lock); } /* * All done at this level ... ascend and resume the search. */ if (this_parent != parent) { next = this_parent->d_u.d_child.next; + spin_unlock(&this_parent->d_lock); this_parent = this_parent->d_parent; + spin_lock(&this_parent->d_lock); goto resume; } + spin_unlock(&this_parent->d_lock); spin_unlock(&dcache_lock); return 0; /* No mount points found in tree */ positive: @@ -1010,6 +1063,7 @@ static int select_parent(struct dentry * parent) int found = 0; spin_lock(&dcache_lock); + spin_lock(&this_parent->d_lock); repeat: next = this_parent->d_subdirs.next; resume: @@ -1017,8 +1071,9 @@ resume: struct list_head *tmp = next; struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); next = tmp->next; + BUG_ON(this_parent == dentry); - spin_lock(&dentry->d_lock); + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); /* * move only zero ref count dentries to the end @@ -1031,33 +1086,44 @@ resume: dentry_lru_del(dentry); } - spin_unlock(&dentry->d_lock); - /* * We can return to the caller if we have found some (this * ensures forward progress). We'll be coming back to find * the rest. */ - if (found && need_resched()) + if (found && need_resched()) { + spin_unlock(&dentry->d_lock); goto out; + } /* * Descend a level if the d_subdirs list is non-empty. */ if (!list_empty(&dentry->d_subdirs)) { + spin_unlock(&this_parent->d_lock); + spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); this_parent = dentry; + spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); goto repeat; } + + spin_unlock(&dentry->d_lock); } /* * All done at this level ... ascend and resume the search. */ if (this_parent != parent) { + struct dentry *tmp; next = this_parent->d_u.d_child.next; - this_parent = this_parent->d_parent; + tmp = this_parent->d_parent; + spin_unlock(&this_parent->d_lock); + BUG_ON(tmp == this_parent); + this_parent = tmp; + spin_lock(&this_parent->d_lock); goto resume; } out: + spin_unlock(&this_parent->d_lock); spin_unlock(&dcache_lock); return found; } @@ -1155,18 +1221,19 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) INIT_LIST_HEAD(&dentry->d_lru); INIT_LIST_HEAD(&dentry->d_subdirs); INIT_LIST_HEAD(&dentry->d_alias); + INIT_LIST_HEAD(&dentry->d_u.d_child); if (parent) { - dentry->d_parent = dget(parent); + spin_lock(&dcache_lock); + spin_lock(&parent->d_lock); + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); + dentry->d_parent = dget_dlock(parent); dentry->d_sb = parent->d_sb; - } else { - INIT_LIST_HEAD(&dentry->d_u.d_child); - } - - spin_lock(&dcache_lock); - if (parent) list_add(&dentry->d_u.d_child, &parent->d_subdirs); - spin_unlock(&dcache_lock); + spin_unlock(&dentry->d_lock); + spin_unlock(&parent->d_lock); + spin_unlock(&dcache_lock); + } this_cpu_inc(nr_dentry); @@ -1684,13 +1751,18 @@ int d_validate(struct dentry *dentry, struct dentry *dparent) struct dentry *child; spin_lock(&dcache_lock); + spin_lock(&dparent->d_lock); list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) { if (dentry == child) { - __dget_locked(dentry); + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); + __dget_locked_dlock(dentry); + spin_unlock(&dentry->d_lock); + spin_unlock(&dparent->d_lock); spin_unlock(&dcache_lock); return 1; } } + spin_unlock(&dparent->d_lock); spin_unlock(&dcache_lock); return 0; @@ -1802,17 +1874,6 @@ void dentry_update_name_case(struct dentry *dentry, struct qstr *name) } EXPORT_SYMBOL(dentry_update_name_case); -/* - * When switching names, the actual string doesn't strictly have to - * be preserved in the target - because we're dropping the target - * anyway. As such, we can just do a simple memcpy() to copy over - * the new name before we switch. - * - * Note that we have to be a lot more careful about getting the hash - * switched - we have to switch the hash value properly even if it - * then no longer matches the actual (corrupted) string of the target. - * The hash value has to match the hash queue that the dentry is on.. - */ static void switch_names(struct dentry *dentry, struct dentry *target) { if (dname_external(target)) { @@ -1854,18 +1915,53 @@ static void switch_names(struct dentry *dentry, struct dentry *target) swap(dentry->d_name.len, target->d_name.len); } +static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target) +{ + /* + * XXXX: do we really need to take target->d_lock? + */ + if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent) + spin_lock(&target->d_parent->d_lock); + else { + if (d_ancestor(dentry->d_parent, target->d_parent)) { + spin_lock(&dentry->d_parent->d_lock); + spin_lock_nested(&target->d_parent->d_lock, + DENTRY_D_LOCK_NESTED); + } else { + spin_lock(&target->d_parent->d_lock); + spin_lock_nested(&dentry->d_parent->d_lock, + DENTRY_D_LOCK_NESTED); + } + } + if (target < dentry) { + spin_lock_nested(&target->d_lock, 2); + spin_lock_nested(&dentry->d_lock, 3); + } else { + spin_lock_nested(&dentry->d_lock, 2); + spin_lock_nested(&target->d_lock, 3); + } +} + +static void dentry_unlock_parents_for_move(struct dentry *dentry, + struct dentry *target) +{ + if (target->d_parent != dentry->d_parent) + spin_unlock(&dentry->d_parent->d_lock); + if (target->d_parent != target) + spin_unlock(&target->d_parent->d_lock); +} + /* - * We cannibalize "target" when moving dentry on top of it, - * because it's going to be thrown away anyway. We could be more - * polite about it, though. - * - * This forceful removal will result in ugly /proc output if - * somebody holds a file open that got deleted due to a rename. - * We could be nicer about the deleted file, and let it show - * up under the name it had before it was deleted rather than - * under the original name of the file that was moved on top of it. + * When switching names, the actual string doesn't strictly have to + * be preserved in the target - because we're dropping the target + * anyway. As such, we can just do a simple memcpy() to copy over + * the new name before we switch. + * + * Note that we have to be a lot more careful about getting the hash + * switched - we have to switch the hash value properly even if it + * then no longer matches the actual (corrupted) string of the target. + * The hash value has to match the hash queue that the dentry is on.. */ - /* * d_move_locked - move a dentry * @dentry: entry to move @@ -1879,20 +1975,12 @@ static void d_move_locked(struct dentry * dentry, struct dentry * target) if (!dentry->d_inode) printk(KERN_WARNING "VFS: moving negative dcache entry\n"); + BUG_ON(d_ancestor(dentry, target)); + BUG_ON(d_ancestor(target, dentry)); + write_seqlock(&rename_lock); - /* - * XXXX: do we really need to take target->d_lock? - */ - if (d_ancestor(dentry, target)) { - spin_lock(&dentry->d_lock); - spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); - } else if (d_ancestor(target, dentry) || target < dentry) { - spin_lock(&target->d_lock); - spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); - } else { - spin_lock(&dentry->d_lock); - spin_lock_nested(&target->d_lock, DENTRY_D_LOCK_NESTED); - } + + dentry_lock_for_move(dentry, target); /* Move the dentry to the target hash queue, if on different bucket */ spin_lock(&dcache_hash_lock); @@ -1924,6 +2012,8 @@ static void d_move_locked(struct dentry * dentry, struct dentry * target) } list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); + + dentry_unlock_parents_for_move(dentry, target); spin_unlock(&target->d_lock); fsnotify_d_move(dentry); spin_unlock(&dentry->d_lock); @@ -2013,17 +2103,20 @@ out_err: /* * Prepare an anonymous dentry for life in the superblock's dentry tree as a * named dentry in place of the dentry to be replaced. + * returns with anon->d_lock held! */ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) { struct dentry *dparent, *aparent; - switch_names(dentry, anon); - swap(dentry->d_name.hash, anon->d_name.hash); + dentry_lock_for_move(anon, dentry); dparent = dentry->d_parent; aparent = anon->d_parent; + switch_names(dentry, anon); + swap(dentry->d_name.hash, anon->d_name.hash); + dentry->d_parent = (aparent == anon) ? dentry : aparent; list_del(&dentry->d_u.d_child); if (!IS_ROOT(dentry)) @@ -2038,6 +2131,10 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) else INIT_LIST_HEAD(&anon->d_u.d_child); + dentry_unlock_parents_for_move(anon, dentry); + spin_unlock(&dentry->d_lock); + + /* anon->d_lock still locked, returns locked */ anon->d_flags &= ~DCACHE_DISCONNECTED; } @@ -2073,7 +2170,6 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) /* Is this an anonymous mountpoint that we could splice * into our tree? */ if (IS_ROOT(alias)) { - spin_lock(&alias->d_lock); __d_materialise_dentry(dentry, alias); __d_drop(alias); goto found; @@ -2558,6 +2654,7 @@ void d_genocide(struct dentry *root) struct list_head *next; spin_lock(&dcache_lock); + spin_lock(&this_parent->d_lock); repeat: next = this_parent->d_subdirs.next; resume: @@ -2571,8 +2668,10 @@ resume: continue; } if (!list_empty(&dentry->d_subdirs)) { - spin_unlock(&dentry->d_lock); + spin_unlock(&this_parent->d_lock); + spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); this_parent = dentry; + spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); goto repeat; } dentry->d_count--; @@ -2580,12 +2679,13 @@ resume: } if (this_parent != root) { next = this_parent->d_u.d_child.next; - spin_lock(&this_parent->d_lock); this_parent->d_count--; spin_unlock(&this_parent->d_lock); this_parent = this_parent->d_parent; + spin_lock(&this_parent->d_lock); goto resume; } + spin_unlock(&this_parent->d_lock); spin_unlock(&dcache_lock); } diff --git a/fs/libfs.c b/fs/libfs.c index 433e7139c23a..cc4794914b52 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -81,7 +81,8 @@ int dcache_dir_close(struct inode *inode, struct file *file) loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin) { - mutex_lock(&file->f_path.dentry->d_inode->i_mutex); + struct dentry *dentry = file->f_path.dentry; + mutex_lock(&dentry->d_inode->i_mutex); switch (origin) { case 1: offset += file->f_pos; @@ -89,7 +90,7 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin) if (offset >= 0) break; default: - mutex_unlock(&file->f_path.dentry->d_inode->i_mutex); + mutex_unlock(&dentry->d_inode->i_mutex); return -EINVAL; } if (offset != file->f_pos) { @@ -100,22 +101,25 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin) loff_t n = file->f_pos - 2; spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); + /* d_lock not required for cursor */ list_del(&cursor->d_u.d_child); - p = file->f_path.dentry->d_subdirs.next; - while (n && p != &file->f_path.dentry->d_subdirs) { + p = dentry->d_subdirs.next; + while (n && p != &dentry->d_subdirs) { struct dentry *next; next = list_entry(p, struct dentry, d_u.d_child); - spin_lock(&next->d_lock); + spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); if (simple_positive(next)) n--; spin_unlock(&next->d_lock); p = p->next; } list_add_tail(&cursor->d_u.d_child, p); + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); } } - mutex_unlock(&file->f_path.dentry->d_inode->i_mutex); + mutex_unlock(&dentry->d_inode->i_mutex); return offset; } @@ -156,6 +160,7 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) /* fallthrough */ default: spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); if (filp->f_pos == 2) list_move(q, &dentry->d_subdirs); @@ -169,6 +174,7 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) } spin_unlock(&next->d_lock); + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); if (filldir(dirent, next->d_name.name, next->d_name.len, filp->f_pos, @@ -176,11 +182,15 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) dt_type(next->d_inode)) < 0) return 0; spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); + spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); /* next is still alive */ list_move(q, p); + spin_unlock(&next->d_lock); p = q; filp->f_pos++; } + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); } return 0; @@ -276,6 +286,7 @@ int simple_empty(struct dentry *dentry) int ret = 0; spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) { spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED); if (simple_positive(child)) { @@ -286,6 +297,7 @@ int simple_empty(struct dentry *dentry) } ret = 1; out: + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); return ret; } diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index bbbf7922f422..102278ed38bd 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -392,6 +392,7 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos) /* If a pointer is invalid, we search the dentry. */ spin_lock(&dcache_lock); + spin_lock(&parent->d_lock); next = parent->d_subdirs.next; while (next != &parent->d_subdirs) { dent = list_entry(next, struct dentry, d_u.d_child); @@ -400,11 +401,13 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos) dget_locked(dent); else dent = NULL; + spin_unlock(&parent->d_lock); spin_unlock(&dcache_lock); goto out; } next = next->next; } + spin_unlock(&parent->d_lock); spin_unlock(&dcache_lock); return NULL; diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h index 244d1b73fda7..c4b718ff9a6b 100644 --- a/fs/ncpfs/ncplib_kernel.h +++ b/fs/ncpfs/ncplib_kernel.h @@ -194,6 +194,7 @@ ncp_renew_dentries(struct dentry *parent) struct dentry *dentry; spin_lock(&dcache_lock); + spin_lock(&parent->d_lock); next = parent->d_subdirs.next; while (next != &parent->d_subdirs) { dentry = list_entry(next, struct dentry, d_u.d_child); @@ -205,6 +206,7 @@ ncp_renew_dentries(struct dentry *parent) next = next->next; } + spin_unlock(&parent->d_lock); spin_unlock(&dcache_lock); } @@ -216,6 +218,7 @@ ncp_invalidate_dircache_entries(struct dentry *parent) struct dentry *dentry; spin_lock(&dcache_lock); + spin_lock(&parent->d_lock); next = parent->d_subdirs.next; while (next != &parent->d_subdirs) { dentry = list_entry(next, struct dentry, d_u.d_child); @@ -223,6 +226,7 @@ ncp_invalidate_dircache_entries(struct dentry *parent) ncp_age_dentry(server, dentry); next = next->next; } + spin_unlock(&parent->d_lock); spin_unlock(&dcache_lock); } diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 20dc218707ca..aa4f25e803f6 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -68,17 +68,19 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode) /* run all of the children of the original inode and fix their * d_flags to indicate parental interest (their parent is the * original inode) */ + spin_lock(&alias->d_lock); list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) { if (!child->d_inode) continue; - spin_lock(&child->d_lock); + spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED); if (watched) child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; else child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; spin_unlock(&child->d_lock); } + spin_unlock(&alias->d_lock); } spin_unlock(&dcache_lock); } diff --git a/include/linux/dcache.h b/include/linux/dcache.h index b0ade2d46805..ddf4f55624f7 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -305,6 +305,7 @@ static inline struct dentry *dget_dlock(struct dentry *dentry) } return dentry; } + static inline struct dentry *dget(struct dentry *dentry) { if (dentry) { diff --git a/kernel/cgroup.c b/kernel/cgroup.c index eb7af39350c6..7b4705b51d4a 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -877,23 +877,31 @@ static void cgroup_clear_directory(struct dentry *dentry) BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); spin_lock(&dcache_lock); + spin_lock(&dentry->d_lock); node = dentry->d_subdirs.next; while (node != &dentry->d_subdirs) { struct dentry *d = list_entry(node, struct dentry, d_u.d_child); + + spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED); list_del_init(node); if (d->d_inode) { /* This should never be called on a cgroup * directory with child cgroups */ BUG_ON(d->d_inode->i_mode & S_IFDIR); - d = dget_locked(d); + dget_locked_dlock(d); + spin_unlock(&d->d_lock); + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); d_delete(d); simple_unlink(dentry->d_inode, d); dput(d); spin_lock(&dcache_lock); - } + spin_lock(&dentry->d_lock); + } else + spin_unlock(&d->d_lock); node = dentry->d_subdirs.next; } + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); } @@ -902,10 +910,17 @@ static void cgroup_clear_directory(struct dentry *dentry) */ static void cgroup_d_remove_dir(struct dentry *dentry) { + struct dentry *parent; + cgroup_clear_directory(dentry); spin_lock(&dcache_lock); + parent = dentry->d_parent; + spin_lock(&parent->d_lock); + spin_lock(&dentry->d_lock); list_del_init(&dentry->d_u.d_child); + spin_unlock(&dentry->d_lock); + spin_unlock(&parent->d_lock); spin_unlock(&dcache_lock); remove_dir(dentry); } diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 073fd5b0a53a..017ec096446e 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -1146,22 +1146,30 @@ static void sel_remove_entries(struct dentry *de) struct list_head *node; spin_lock(&dcache_lock); + spin_lock(&de->d_lock); node = de->d_subdirs.next; while (node != &de->d_subdirs) { struct dentry *d = list_entry(node, struct dentry, d_u.d_child); + + spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED); list_del_init(node); if (d->d_inode) { - d = dget_locked(d); + dget_locked_dlock(d); + spin_unlock(&de->d_lock); + spin_unlock(&d->d_lock); spin_unlock(&dcache_lock); d_delete(d); simple_unlink(de->d_inode, d); dput(d); spin_lock(&dcache_lock); - } + spin_lock(&de->d_lock); + } else + spin_unlock(&d->d_lock); node = de->d_subdirs.next; } + spin_unlock(&de->d_lock); spin_unlock(&dcache_lock); } -- cgit v1.2.3-58-ga151 From b5c84bf6f6fa3a7dfdcb556023a62953574b60ee Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 7 Jan 2011 17:49:38 +1100 Subject: fs: dcache remove dcache_lock dcache_lock no longer protects anything. remove it. Signed-off-by: Nick Piggin --- Documentation/filesystems/Locking | 16 +-- Documentation/filesystems/dentry-locking.txt | 40 ++++--- Documentation/filesystems/porting | 8 +- arch/powerpc/platforms/cell/spufs/inode.c | 5 +- drivers/infiniband/hw/ipath/ipath_fs.c | 6 +- drivers/infiniband/hw/qib/qib_fs.c | 3 - drivers/staging/pohmelfs/path_entry.c | 2 - drivers/staging/smbfs/cache.c | 4 - drivers/usb/core/inode.c | 3 - fs/9p/vfs_inode.c | 2 - fs/affs/amigaffs.c | 2 - fs/autofs4/autofs_i.h | 3 + fs/autofs4/expire.c | 10 +- fs/autofs4/root.c | 44 ++++---- fs/autofs4/waitq.c | 7 +- fs/ceph/dir.c | 6 +- fs/ceph/inode.c | 4 - fs/cifs/inode.c | 3 - fs/coda/cache.c | 2 - fs/configfs/configfs_internal.h | 2 - fs/configfs/inode.c | 6 +- fs/dcache.c | 160 ++++----------------------- fs/exportfs/expfs.c | 4 - fs/libfs.c | 8 -- fs/namei.c | 9 +- fs/ncpfs/dir.c | 3 - fs/ncpfs/ncplib_kernel.h | 4 - fs/nfs/dir.c | 3 - fs/nfs/getroot.c | 2 - fs/nfs/namespace.c | 3 - fs/notify/fsnotify.c | 2 - fs/ocfs2/dcache.c | 2 - include/linux/dcache.h | 5 +- include/linux/fs.h | 6 +- include/linux/fsnotify.h | 2 - include/linux/fsnotify_backend.h | 11 +- include/linux/namei.h | 1 - kernel/cgroup.c | 6 - mm/filemap.c | 3 - security/selinux/selinuxfs.c | 4 - 40 files changed, 109 insertions(+), 307 deletions(-) (limited to 'security') diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index a15ee207b449..bdad6414dfa0 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -21,14 +21,14 @@ prototypes: char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen); locking rules: - dcache_lock rename_lock ->d_lock may block -d_revalidate: no no no yes -d_hash no no no no -d_compare: no yes no no -d_delete: yes no yes no -d_release: no no no yes -d_iput: no no no yes -d_dname: no no no no + rename_lock ->d_lock may block +d_revalidate: no no yes +d_hash no no no +d_compare: yes no no +d_delete: no yes no +d_release: no no yes +d_iput: no no yes +d_dname: no no no --------------------------- inode_operations --------------------------- prototypes: diff --git a/Documentation/filesystems/dentry-locking.txt b/Documentation/filesystems/dentry-locking.txt index 79334ed5daa7..30b6a40f5650 100644 --- a/Documentation/filesystems/dentry-locking.txt +++ b/Documentation/filesystems/dentry-locking.txt @@ -31,6 +31,7 @@ significant change is the way d_lookup traverses the hash chain, it doesn't acquire the dcache_lock for this and rely on RCU to ensure that the dentry has not been *freed*. +dcache_lock no longer exists, dentry locking is explained in fs/dcache.c Dcache locking details ====================== @@ -50,14 +51,12 @@ Safe lock-free look-up of dcache hash table Dcache is a complex data structure with the hash table entries also linked together in other lists. In 2.4 kernel, dcache_lock protected -all the lists. We applied RCU only on hash chain walking. The rest of -the lists are still protected by dcache_lock. Some of the important -changes are : +all the lists. RCU dentry hash walking works like this: 1. The deletion from hash chain is done using hlist_del_rcu() macro which doesn't initialize next pointer of the deleted dentry and this allows us to walk safely lock-free while a deletion is - happening. + happening. This is a standard hlist_rcu iteration. 2. Insertion of a dentry into the hash table is done using hlist_add_head_rcu() which take care of ordering the writes - the @@ -66,19 +65,18 @@ changes are : which has since been replaced by hlist_for_each_entry_rcu(), while walking the hash chain. The only requirement is that all initialization to the dentry must be done before - hlist_add_head_rcu() since we don't have dcache_lock protection - while traversing the hash chain. This isn't different from the - existing code. - -3. The dentry looked up without holding dcache_lock by cannot be - returned for walking if it is unhashed. It then may have a NULL - d_inode or other bogosity since RCU doesn't protect the other - fields in the dentry. We therefore use a flag DCACHE_UNHASHED to - indicate unhashed dentries and use this in conjunction with a - per-dentry lock (d_lock). Once looked up without the dcache_lock, - we acquire the per-dentry lock (d_lock) and check if the dentry is - unhashed. If so, the look-up is failed. If not, the reference count - of the dentry is increased and the dentry is returned. + hlist_add_head_rcu() since we don't have lock protection + while traversing the hash chain. + +3. The dentry looked up without holding locks cannot be returned for + walking if it is unhashed. It then may have a NULL d_inode or other + bogosity since RCU doesn't protect the other fields in the dentry. We + therefore use a flag DCACHE_UNHASHED to indicate unhashed dentries + and use this in conjunction with a per-dentry lock (d_lock). Once + looked up without locks, we acquire the per-dentry lock (d_lock) and + check if the dentry is unhashed. If so, the look-up is failed. If not, + the reference count of the dentry is increased and the dentry is + returned. 4. Once a dentry is looked up, it must be ensured during the path walk for that component it doesn't go away. In pre-2.5.10 code, this was @@ -86,10 +84,10 @@ changes are : In some sense, dcache_rcu path walking looks like the pre-2.5.10 version. -5. All dentry hash chain updates must take the dcache_lock as well as - the per-dentry lock in that order. dput() does this to ensure that - a dentry that has just been looked up in another CPU doesn't get - deleted before dget() can be done on it. +5. All dentry hash chain updates must take the per-dentry lock (see + fs/dcache.c). This excludes dput() to ensure that a dentry that has + been looked up concurrently does not get deleted before dget() can + take a ref. 6. There are several ways to do reference counting of RCU protected objects. One such example is in ipv4 route cache where deferred diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 9fd31940a8ef..1eb76959d096 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting @@ -216,7 +216,6 @@ had ->revalidate()) add calls in ->follow_link()/->readlink(). ->d_parent changes are not protected by BKL anymore. Read access is safe if at least one of the following is true: * filesystem has no cross-directory rename() - * dcache_lock is held * we know that parent had been locked (e.g. we are looking at ->d_parent of ->lookup() argument). * we are called from ->rename(). @@ -340,3 +339,10 @@ look at examples of other filesystems) for guidance. .d_hash() calling convention and locking rules are significantly changed. Read updated documentation in Documentation/filesystems/vfs.txt (and look at examples of other filesystems) for guidance. + +--- +[mandatory] + dcache_lock is gone, replaced by fine grained locks. See fs/dcache.c +for details of what locks to replace dcache_lock with in order to protect +particular things. Most of the time, a filesystem only needs ->d_lock, which +protects *all* the dcache state of a given dentry. diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 5aef1a7f5e4b..2662b50ea8d4 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -159,21 +159,18 @@ static void spufs_prune_dir(struct dentry *dir) mutex_lock(&dir->d_inode->i_mutex); list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); if (!(d_unhashed(dentry)) && dentry->d_inode) { dget_locked_dlock(dentry); __d_drop(dentry); spin_unlock(&dentry->d_lock); simple_unlink(dir->d_inode, dentry); - /* XXX: what is dcache_lock protecting here? Other + /* XXX: what was dcache_lock protecting here? Other * filesystems (IB, configfs) release dcache_lock * before unlink */ - spin_unlock(&dcache_lock); dput(dentry); } else { spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); } } shrink_dcache_parent(dir); diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 18aee04a8411..925e88227ded 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -277,18 +277,14 @@ static int remove_file(struct dentry *parent, char *name) goto bail; } - spin_lock(&dcache_lock); spin_lock(&tmp->d_lock); if (!(d_unhashed(tmp) && tmp->d_inode)) { dget_locked_dlock(tmp); __d_drop(tmp); spin_unlock(&tmp->d_lock); - spin_unlock(&dcache_lock); simple_unlink(parent->d_inode, tmp); - } else { + } else spin_unlock(&tmp->d_lock); - spin_unlock(&dcache_lock); - } ret = 0; bail: diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index fe4b242f0094..49af4a6538ba 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -453,17 +453,14 @@ static int remove_file(struct dentry *parent, char *name) goto bail; } - spin_lock(&dcache_lock); spin_lock(&tmp->d_lock); if (!(d_unhashed(tmp) && tmp->d_inode)) { dget_locked_dlock(tmp); __d_drop(tmp); spin_unlock(&tmp->d_lock); - spin_unlock(&dcache_lock); simple_unlink(parent->d_inode, tmp); } else { spin_unlock(&tmp->d_lock); - spin_unlock(&dcache_lock); } ret = 0; diff --git a/drivers/staging/pohmelfs/path_entry.c b/drivers/staging/pohmelfs/path_entry.c index bbe42f42ca8f..400a9fc386ad 100644 --- a/drivers/staging/pohmelfs/path_entry.c +++ b/drivers/staging/pohmelfs/path_entry.c @@ -101,7 +101,6 @@ rename_retry: d = first; seq = read_seqbegin(&rename_lock); rcu_read_lock(); - spin_lock(&dcache_lock); if (!IS_ROOT(d) && d_unhashed(d)) len += UNHASHED_OBSCURE_STRING_SIZE; /* Obscure " (deleted)" string */ @@ -110,7 +109,6 @@ rename_retry: len += d->d_name.len + 1; /* Plus slash */ d = d->d_parent; } - spin_unlock(&dcache_lock); rcu_read_unlock(); if (read_seqretry(&rename_lock, seq)) goto rename_retry; diff --git a/drivers/staging/smbfs/cache.c b/drivers/staging/smbfs/cache.c index 920434b6c071..75dfd403fb90 100644 --- a/drivers/staging/smbfs/cache.c +++ b/drivers/staging/smbfs/cache.c @@ -62,7 +62,6 @@ smb_invalidate_dircache_entries(struct dentry *parent) struct list_head *next; struct dentry *dentry; - spin_lock(&dcache_lock); spin_lock(&parent->d_lock); next = parent->d_subdirs.next; while (next != &parent->d_subdirs) { @@ -72,7 +71,6 @@ smb_invalidate_dircache_entries(struct dentry *parent) next = next->next; } spin_unlock(&parent->d_lock); - spin_unlock(&dcache_lock); } /* @@ -98,7 +96,6 @@ smb_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos) } /* If a pointer is invalid, we search the dentry. */ - spin_lock(&dcache_lock); spin_lock(&parent->d_lock); next = parent->d_subdirs.next; while (next != &parent->d_subdirs) { @@ -115,7 +112,6 @@ smb_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos) dent = NULL; out_unlock: spin_unlock(&parent->d_lock); - spin_unlock(&dcache_lock); return dent; } diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index 89a0e8366585..1b125c224dcf 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c @@ -343,7 +343,6 @@ static int usbfs_empty (struct dentry *dentry) { struct list_head *list; - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); list_for_each(list, &dentry->d_subdirs) { struct dentry *de = list_entry(list, struct dentry, d_u.d_child); @@ -352,13 +351,11 @@ static int usbfs_empty (struct dentry *dentry) if (usbfs_positive(de)) { spin_unlock(&de->d_lock); spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); return 0; } spin_unlock(&de->d_lock); } spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); return 1; } diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 47dfd5d29a6b..1073bca8488c 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -270,13 +270,11 @@ static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode) { struct dentry *dentry; - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); /* Directory should have only one entry. */ BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry)); dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); return dentry; } diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index 2321cc92d44f..600101a21ba2 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -128,7 +128,6 @@ affs_fix_dcache(struct dentry *dentry, u32 entry_ino) void *data = dentry->d_fsdata; struct list_head *head, *next; - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); head = &inode->i_dentry; next = head->next; @@ -141,7 +140,6 @@ affs_fix_dcache(struct dentry *dentry, u32 entry_ino) next = next->next; } spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); } diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 9d2ae9b30d9f..0fffe1c24cec 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -16,6 +16,7 @@ #include #include #include +#include #include /* This is the range of ioctl() numbers we claim as ours */ @@ -60,6 +61,8 @@ do { \ current->pid, __func__, ##args); \ } while (0) +extern spinlock_t autofs4_lock; + /* Unified info structure. This is pointed to by both the dentry and inode structures. Each file in the filesystem has an instance of this structure. It holds a reference to the dentry, so dentries are never diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 968c1434af62..2f7951d67d16 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -102,7 +102,7 @@ static struct dentry *get_next_positive_dentry(struct dentry *prev, if (prev == NULL) return dget(prev); - spin_lock(&dcache_lock); + spin_lock(&autofs4_lock); relock: p = prev; spin_lock(&p->d_lock); @@ -114,7 +114,7 @@ again: if (p == root) { spin_unlock(&p->d_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); dput(prev); return NULL; } @@ -144,7 +144,7 @@ again: dget_dlock(ret); spin_unlock(&ret->d_lock); spin_unlock(&p->d_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); dput(prev); @@ -408,13 +408,13 @@ found: ino->flags |= AUTOFS_INF_EXPIRING; init_completion(&ino->expire_complete); spin_unlock(&sbi->fs_lock); - spin_lock(&dcache_lock); + spin_lock(&autofs4_lock); spin_lock(&expired->d_parent->d_lock); spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED); list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child); spin_unlock(&expired->d_lock); spin_unlock(&expired->d_parent->d_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); return expired; } diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 7a9ed6b88291..10ca68a96dc7 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -23,6 +23,8 @@ #include "autofs_i.h" +DEFINE_SPINLOCK(autofs4_lock); + static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *); static int autofs4_dir_unlink(struct inode *,struct dentry *); static int autofs4_dir_rmdir(struct inode *,struct dentry *); @@ -142,15 +144,15 @@ static int autofs4_dir_open(struct inode *inode, struct file *file) * autofs file system so just let the libfs routines handle * it. */ - spin_lock(&dcache_lock); + spin_lock(&autofs4_lock); spin_lock(&dentry->d_lock); if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) { spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); return -ENOENT; } spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); out: return dcache_dir_open(inode, file); @@ -255,11 +257,11 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) /* We trigger a mount for almost all flags */ lookup_type = autofs4_need_mount(nd->flags); spin_lock(&sbi->fs_lock); - spin_lock(&dcache_lock); + spin_lock(&autofs4_lock); spin_lock(&dentry->d_lock); if (!(lookup_type || ino->flags & AUTOFS_INF_PENDING)) { spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); spin_unlock(&sbi->fs_lock); goto follow; } @@ -272,7 +274,7 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) if (ino->flags & AUTOFS_INF_PENDING || (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) { spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); spin_unlock(&sbi->fs_lock); status = try_to_fill_dentry(dentry, nd->flags); @@ -282,7 +284,7 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) goto follow; } spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); spin_unlock(&sbi->fs_lock); follow: /* @@ -353,14 +355,14 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd) return 0; /* Check for a non-mountpoint directory with no contents */ - spin_lock(&dcache_lock); + spin_lock(&autofs4_lock); spin_lock(&dentry->d_lock); if (S_ISDIR(dentry->d_inode->i_mode) && !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) { DPRINTK("dentry=%p %.*s, emptydir", dentry, dentry->d_name.len, dentry->d_name.name); spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); /* The daemon never causes a mount to trigger */ if (oz_mode) @@ -377,7 +379,7 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd) return status; } spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); return 1; } @@ -432,7 +434,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry) const unsigned char *str = name->name; struct list_head *p, *head; - spin_lock(&dcache_lock); + spin_lock(&autofs4_lock); spin_lock(&sbi->lookup_lock); head = &sbi->active_list; list_for_each(p, head) { @@ -465,14 +467,14 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry) dget_dlock(active); spin_unlock(&active->d_lock); spin_unlock(&sbi->lookup_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); return active; } next: spin_unlock(&active->d_lock); } spin_unlock(&sbi->lookup_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); return NULL; } @@ -487,7 +489,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry) const unsigned char *str = name->name; struct list_head *p, *head; - spin_lock(&dcache_lock); + spin_lock(&autofs4_lock); spin_lock(&sbi->lookup_lock); head = &sbi->expiring_list; list_for_each(p, head) { @@ -520,14 +522,14 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry) dget_dlock(expiring); spin_unlock(&expiring->d_lock); spin_unlock(&sbi->lookup_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); return expiring; } next: spin_unlock(&expiring->d_lock); } spin_unlock(&sbi->lookup_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); return NULL; } @@ -763,12 +765,12 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry) dir->i_mtime = CURRENT_TIME; - spin_lock(&dcache_lock); + spin_lock(&autofs4_lock); autofs4_add_expiring(dentry); spin_lock(&dentry->d_lock); __d_drop(dentry); spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); return 0; } @@ -785,20 +787,20 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry) if (!autofs4_oz_mode(sbi)) return -EACCES; - spin_lock(&dcache_lock); + spin_lock(&autofs4_lock); spin_lock(&sbi->lookup_lock); spin_lock(&dentry->d_lock); if (!list_empty(&dentry->d_subdirs)) { spin_unlock(&dentry->d_lock); spin_unlock(&sbi->lookup_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); return -ENOTEMPTY; } __autofs4_add_expiring(dentry); spin_unlock(&sbi->lookup_lock); __d_drop(dentry); spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); if (atomic_dec_and_test(&ino->count)) { p_ino = autofs4_dentry_ino(dentry->d_parent); diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 4be8f778a418..c5f8459c905e 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -194,14 +194,15 @@ static int autofs4_getpath(struct autofs_sb_info *sbi, rename_retry: buf = *name; len = 0; + seq = read_seqbegin(&rename_lock); rcu_read_lock(); - spin_lock(&dcache_lock); + spin_lock(&autofs4_lock); for (tmp = dentry ; tmp != root ; tmp = tmp->d_parent) len += tmp->d_name.len + 1; if (!len || --len > NAME_MAX) { - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); rcu_read_unlock(); if (read_seqretry(&rename_lock, seq)) goto rename_retry; @@ -217,7 +218,7 @@ rename_retry: p -= tmp->d_name.len; strncpy(p, tmp->d_name.name, tmp->d_name.len); } - spin_unlock(&dcache_lock); + spin_unlock(&autofs4_lock); rcu_read_unlock(); if (read_seqretry(&rename_lock, seq)) goto rename_retry; diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 2c924e8d85fe..58abc3da6111 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -112,7 +112,6 @@ static int __dcache_readdir(struct file *filp, dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos, last); - spin_lock(&dcache_lock); spin_lock(&parent->d_lock); /* start at beginning? */ @@ -156,7 +155,6 @@ more: dget_dlock(dentry); spin_unlock(&dentry->d_lock); spin_unlock(&parent->d_lock); - spin_unlock(&dcache_lock); dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); @@ -182,21 +180,19 @@ more: filp->f_pos++; - /* make sure a dentry wasn't dropped while we didn't have dcache_lock */ + /* make sure a dentry wasn't dropped while we didn't have parent lock */ if (!ceph_i_test(dir, CEPH_I_COMPLETE)) { dout(" lost I_COMPLETE on %p; falling back to mds\n", dir); err = -EAGAIN; goto out; } - spin_lock(&dcache_lock); spin_lock(&parent->d_lock); p = p->prev; /* advance to next dentry */ goto more; out_unlock: spin_unlock(&parent->d_lock); - spin_unlock(&dcache_lock); out: if (last) dput(last); diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 2c6944473366..2a48cafc174b 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -841,7 +841,6 @@ static void ceph_set_dentry_offset(struct dentry *dn) di->offset = ceph_inode(inode)->i_max_offset++; spin_unlock(&inode->i_lock); - spin_lock(&dcache_lock); spin_lock(&dir->d_lock); spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); list_move(&dn->d_u.d_child, &dir->d_subdirs); @@ -849,7 +848,6 @@ static void ceph_set_dentry_offset(struct dentry *dn) dn->d_u.d_child.prev, dn->d_u.d_child.next); spin_unlock(&dn->d_lock); spin_unlock(&dir->d_lock); - spin_unlock(&dcache_lock); } /* @@ -1233,13 +1231,11 @@ retry_lookup: goto retry_lookup; } else { /* reorder parent's d_subdirs */ - spin_lock(&dcache_lock); spin_lock(&parent->d_lock); spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); list_move(&dn->d_u.d_child, &parent->d_subdirs); spin_unlock(&dn->d_lock); spin_unlock(&parent->d_lock); - spin_unlock(&dcache_lock); } di = dn->d_fsdata; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 003698365ece..99b9a2cc14b7 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -809,17 +809,14 @@ inode_has_hashed_dentries(struct inode *inode) { struct dentry *dentry; - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); list_for_each_entry(dentry, &inode->i_dentry, d_alias) { if (!d_unhashed(dentry) || IS_ROOT(dentry)) { spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); return true; } } spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); return false; } diff --git a/fs/coda/cache.c b/fs/coda/cache.c index 859393fca2b7..5525e1c660fd 100644 --- a/fs/coda/cache.c +++ b/fs/coda/cache.c @@ -93,7 +93,6 @@ static void coda_flag_children(struct dentry *parent, int flag) struct list_head *child; struct dentry *de; - spin_lock(&dcache_lock); spin_lock(&parent->d_lock); list_for_each(child, &parent->d_subdirs) { @@ -104,7 +103,6 @@ static void coda_flag_children(struct dentry *parent, int flag) coda_flag_inode(de->d_inode, flag); } spin_unlock(&parent->d_lock); - spin_unlock(&dcache_lock); return; } diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index e58b4c30e216..026cf68553a4 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -120,7 +120,6 @@ static inline struct config_item *configfs_get_config_item(struct dentry *dentry { struct config_item * item = NULL; - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); if (!d_unhashed(dentry)) { struct configfs_dirent * sd = dentry->d_fsdata; @@ -131,7 +130,6 @@ static inline struct config_item *configfs_get_config_item(struct dentry *dentry item = config_item_get(sd->s_element); } spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); return item; } diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 79b37765d8ff..fb3a55fff82a 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -250,18 +250,14 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent) struct dentry * dentry = sd->s_dentry; if (dentry) { - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); if (!(d_unhashed(dentry) && dentry->d_inode)) { dget_locked_dlock(dentry); __d_drop(dentry); spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); simple_unlink(parent->d_inode, dentry); - } else { + } else spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); - } } } diff --git a/fs/dcache.c b/fs/dcache.c index a9bc4ecc21e1..0dbae053b664 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -54,11 +54,10 @@ * - d_alias, d_inode * * Ordering: - * dcache_lock - * dcache_inode_lock - * dentry->d_lock - * dcache_lru_lock - * dcache_hash_lock + * dcache_inode_lock + * dentry->d_lock + * dcache_lru_lock + * dcache_hash_lock * * If there is an ancestor relationship: * dentry->d_parent->...->d_parent->d_lock @@ -77,12 +76,10 @@ EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_inode_lock); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_hash_lock); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock); -__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock); EXPORT_SYMBOL(rename_lock); EXPORT_SYMBOL(dcache_inode_lock); -EXPORT_SYMBOL(dcache_lock); static struct kmem_cache *dentry_cache __read_mostly; @@ -139,7 +136,7 @@ static void __d_free(struct rcu_head *head) } /* - * no dcache_lock, please. + * no locks, please. */ static void d_free(struct dentry *dentry) { @@ -162,7 +159,6 @@ static void d_free(struct dentry *dentry) static void dentry_iput(struct dentry * dentry) __releases(dentry->d_lock) __releases(dcache_inode_lock) - __releases(dcache_lock) { struct inode *inode = dentry->d_inode; if (inode) { @@ -170,7 +166,6 @@ static void dentry_iput(struct dentry * dentry) list_del_init(&dentry->d_alias); spin_unlock(&dentry->d_lock); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); if (!inode->i_nlink) fsnotify_inoderemove(inode); if (dentry->d_op && dentry->d_op->d_iput) @@ -180,7 +175,6 @@ static void dentry_iput(struct dentry * dentry) } else { spin_unlock(&dentry->d_lock); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); } } @@ -235,14 +229,13 @@ static void dentry_lru_move_tail(struct dentry *dentry) * * If this is the root of the dentry tree, return NULL. * - * dcache_lock and d_lock and d_parent->d_lock must be held by caller, and - * are dropped by d_kill. + * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by + * d_kill. */ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) __releases(dentry->d_lock) __releases(parent->d_lock) __releases(dcache_inode_lock) - __releases(dcache_lock) { dentry->d_parent = NULL; list_del(&dentry->d_u.d_child); @@ -285,11 +278,9 @@ EXPORT_SYMBOL(__d_drop); void d_drop(struct dentry *dentry) { - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); __d_drop(dentry); spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); } EXPORT_SYMBOL(d_drop); @@ -337,21 +328,10 @@ repeat: else parent = dentry->d_parent; if (dentry->d_count == 1) { - if (!spin_trylock(&dcache_lock)) { - /* - * Something of a livelock possibility we could avoid - * by taking dcache_lock and trying again, but we - * want to reduce dcache_lock anyway so this will - * get improved. - */ -drop1: - spin_unlock(&dentry->d_lock); - goto repeat; - } if (!spin_trylock(&dcache_inode_lock)) { drop2: - spin_unlock(&dcache_lock); - goto drop1; + spin_unlock(&dentry->d_lock); + goto repeat; } if (parent && !spin_trylock(&parent->d_lock)) { spin_unlock(&dcache_inode_lock); @@ -363,7 +343,6 @@ drop2: spin_unlock(&dentry->d_lock); if (parent) spin_unlock(&parent->d_lock); - spin_unlock(&dcache_lock); return; } @@ -387,7 +366,6 @@ drop2: if (parent) spin_unlock(&parent->d_lock); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); return; unhash_it: @@ -418,11 +396,9 @@ int d_invalidate(struct dentry * dentry) /* * If it's already been dropped, return OK. */ - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); if (d_unhashed(dentry)) { spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); return 0; } /* @@ -431,9 +407,7 @@ int d_invalidate(struct dentry * dentry) */ if (!list_empty(&dentry->d_subdirs)) { spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); shrink_dcache_parent(dentry); - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); } @@ -450,19 +424,17 @@ int d_invalidate(struct dentry * dentry) if (dentry->d_count > 1) { if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); return -EBUSY; } } __d_drop(dentry); spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); return 0; } EXPORT_SYMBOL(d_invalidate); -/* This must be called with dcache_lock and d_lock held */ +/* This must be called with d_lock held */ static inline struct dentry * __dget_locked_dlock(struct dentry *dentry) { dentry->d_count++; @@ -470,7 +442,7 @@ static inline struct dentry * __dget_locked_dlock(struct dentry *dentry) return dentry; } -/* This should be called _only_ with dcache_lock held */ +/* This must be called with d_lock held */ static inline struct dentry * __dget_locked(struct dentry *dentry) { spin_lock(&dentry->d_lock); @@ -575,11 +547,9 @@ struct dentry *d_find_alias(struct inode *inode) struct dentry *de = NULL; if (!list_empty(&inode->i_dentry)) { - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); de = __d_find_alias(inode, 0); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); } return de; } @@ -593,7 +563,6 @@ void d_prune_aliases(struct inode *inode) { struct dentry *dentry; restart: - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); list_for_each_entry(dentry, &inode->i_dentry, d_alias) { spin_lock(&dentry->d_lock); @@ -602,14 +571,12 @@ restart: __d_drop(dentry); spin_unlock(&dentry->d_lock); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); dput(dentry); goto restart; } spin_unlock(&dentry->d_lock); } spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); } EXPORT_SYMBOL(d_prune_aliases); @@ -625,17 +592,14 @@ static void prune_one_dentry(struct dentry *dentry, struct dentry *parent) __releases(dentry->d_lock) __releases(parent->d_lock) __releases(dcache_inode_lock) - __releases(dcache_lock) { __d_drop(dentry); dentry = d_kill(dentry, parent); /* - * Prune ancestors. Locking is simpler than in dput(), - * because dcache_lock needs to be taken anyway. + * Prune ancestors. */ while (dentry) { - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); again: spin_lock(&dentry->d_lock); @@ -653,7 +617,6 @@ again: spin_unlock(&parent->d_lock); spin_unlock(&dentry->d_lock); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); return; } @@ -702,8 +665,7 @@ relock: spin_unlock(&dcache_lru_lock); prune_one_dentry(dentry, parent); - /* dcache_lock, dcache_inode_lock and dentry->d_lock dropped */ - spin_lock(&dcache_lock); + /* dcache_inode_lock and dentry->d_lock dropped */ spin_lock(&dcache_inode_lock); spin_lock(&dcache_lru_lock); } @@ -725,7 +687,6 @@ static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags) LIST_HEAD(tmp); int cnt = *count; - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); relock: spin_lock(&dcache_lru_lock); @@ -766,7 +727,6 @@ relock: list_splice(&referenced, &sb->s_dentry_lru); spin_unlock(&dcache_lru_lock); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); } /** @@ -788,7 +748,6 @@ static void prune_dcache(int count) if (unused == 0 || count == 0) return; - spin_lock(&dcache_lock); if (count >= unused) prune_ratio = 1; else @@ -825,11 +784,9 @@ static void prune_dcache(int count) if (down_read_trylock(&sb->s_umount)) { if ((sb->s_root != NULL) && (!list_empty(&sb->s_dentry_lru))) { - spin_unlock(&dcache_lock); __shrink_dcache_sb(sb, &w_count, DCACHE_REFERENCED); pruned -= w_count; - spin_lock(&dcache_lock); } up_read(&sb->s_umount); } @@ -845,7 +802,6 @@ static void prune_dcache(int count) if (p) __put_super(p); spin_unlock(&sb_lock); - spin_unlock(&dcache_lock); } /** @@ -859,7 +815,6 @@ void shrink_dcache_sb(struct super_block *sb) { LIST_HEAD(tmp); - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); spin_lock(&dcache_lru_lock); while (!list_empty(&sb->s_dentry_lru)) { @@ -868,7 +823,6 @@ void shrink_dcache_sb(struct super_block *sb) } spin_unlock(&dcache_lru_lock); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); } EXPORT_SYMBOL(shrink_dcache_sb); @@ -885,12 +839,10 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) BUG_ON(!IS_ROOT(dentry)); /* detach this root from the system */ - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); dentry_lru_del(dentry); __d_drop(dentry); spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); for (;;) { /* descend to the first leaf in the current subtree */ @@ -899,7 +851,6 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) /* this is a branch with children - detach all of them * from the system in one go */ - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); list_for_each_entry(loop, &dentry->d_subdirs, d_u.d_child) { @@ -910,7 +861,6 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) spin_unlock(&loop->d_lock); } spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); /* move to the first child */ dentry = list_entry(dentry->d_subdirs.next, @@ -977,8 +927,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) /* * destroy the dentries attached to a superblock on unmounting - * - we don't need to use dentry->d_lock, and only need dcache_lock when - * removing the dentry from the system lists and hashes because: + * - we don't need to use dentry->d_lock because: * - the superblock is detached from all mountings and open files, so the * dentry trees will not be rearranged by the VFS * - s_umount is write-locked, so the memory pressure shrinker will ignore @@ -1029,7 +978,6 @@ rename_retry: this_parent = parent; seq = read_seqbegin(&rename_lock); - spin_lock(&dcache_lock); if (d_mountpoint(parent)) goto positive; spin_lock(&this_parent->d_lock); @@ -1075,7 +1023,6 @@ resume: if (this_parent != child->d_parent || read_seqretry(&rename_lock, seq)) { spin_unlock(&this_parent->d_lock); - spin_unlock(&dcache_lock); rcu_read_unlock(); goto rename_retry; } @@ -1084,12 +1031,10 @@ resume: goto resume; } spin_unlock(&this_parent->d_lock); - spin_unlock(&dcache_lock); if (read_seqretry(&rename_lock, seq)) goto rename_retry; return 0; /* No mount points found in tree */ positive: - spin_unlock(&dcache_lock); if (read_seqretry(&rename_lock, seq)) goto rename_retry; return 1; @@ -1121,7 +1066,6 @@ rename_retry: this_parent = parent; seq = read_seqbegin(&rename_lock); - spin_lock(&dcache_lock); spin_lock(&this_parent->d_lock); repeat: next = this_parent->d_subdirs.next; @@ -1185,7 +1129,6 @@ resume: if (this_parent != child->d_parent || read_seqretry(&rename_lock, seq)) { spin_unlock(&this_parent->d_lock); - spin_unlock(&dcache_lock); rcu_read_unlock(); goto rename_retry; } @@ -1195,7 +1138,6 @@ resume: } out: spin_unlock(&this_parent->d_lock); - spin_unlock(&dcache_lock); if (read_seqretry(&rename_lock, seq)) goto rename_retry; return found; @@ -1297,7 +1239,6 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) INIT_LIST_HEAD(&dentry->d_u.d_child); if (parent) { - spin_lock(&dcache_lock); spin_lock(&parent->d_lock); spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); dentry->d_parent = dget_dlock(parent); @@ -1305,7 +1246,6 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) list_add(&dentry->d_u.d_child, &parent->d_subdirs); spin_unlock(&dentry->d_lock); spin_unlock(&parent->d_lock); - spin_unlock(&dcache_lock); } this_cpu_inc(nr_dentry); @@ -1325,7 +1265,6 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name) } EXPORT_SYMBOL(d_alloc_name); -/* the caller must hold dcache_lock */ static void __d_instantiate(struct dentry *dentry, struct inode *inode) { spin_lock(&dentry->d_lock); @@ -1354,11 +1293,9 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) void d_instantiate(struct dentry *entry, struct inode * inode) { BUG_ON(!list_empty(&entry->d_alias)); - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); __d_instantiate(entry, inode); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); security_d_instantiate(entry, inode); } EXPORT_SYMBOL(d_instantiate); @@ -1422,11 +1359,9 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode) BUG_ON(!list_empty(&entry->d_alias)); - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); result = __d_instantiate_unique(entry, inode); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); if (!result) { security_d_instantiate(entry, inode); @@ -1515,12 +1450,11 @@ struct dentry *d_obtain_alias(struct inode *inode) } tmp->d_parent = tmp; /* make sure dput doesn't croak */ - spin_lock(&dcache_lock); + spin_lock(&dcache_inode_lock); res = __d_find_alias(inode, 0); if (res) { spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); dput(tmp); goto out_iput; } @@ -1538,7 +1472,6 @@ struct dentry *d_obtain_alias(struct inode *inode) spin_unlock(&tmp->d_lock); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); return tmp; out_iput: @@ -1568,21 +1501,18 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) struct dentry *new = NULL; if (inode && S_ISDIR(inode->i_mode)) { - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); new = __d_find_alias(inode, 1); if (new) { BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED)); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); security_d_instantiate(new, inode); d_move(new, dentry); iput(inode); } else { - /* already taking dcache_lock, so d_add() by hand */ + /* already taking dcache_inode_lock, so d_add() by hand */ __d_instantiate(dentry, inode); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); security_d_instantiate(dentry, inode); d_rehash(dentry); } @@ -1655,12 +1585,10 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, * Negative dentry: instantiate it unless the inode is a directory and * already has a dentry. */ - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) { __d_instantiate(found, inode); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); security_d_instantiate(found, inode); return found; } @@ -1672,7 +1600,6 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, new = list_entry(inode->i_dentry.next, struct dentry, d_alias); dget_locked(new); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); security_d_instantiate(found, inode); d_move(new, found); iput(inode); @@ -1843,7 +1770,6 @@ int d_validate(struct dentry *dentry, struct dentry *dparent) { struct dentry *child; - spin_lock(&dcache_lock); spin_lock(&dparent->d_lock); list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) { if (dentry == child) { @@ -1851,12 +1777,10 @@ int d_validate(struct dentry *dentry, struct dentry *dparent) __dget_locked_dlock(dentry); spin_unlock(&dentry->d_lock); spin_unlock(&dparent->d_lock); - spin_unlock(&dcache_lock); return 1; } } spin_unlock(&dparent->d_lock); - spin_unlock(&dcache_lock); return 0; } @@ -1889,7 +1813,6 @@ void d_delete(struct dentry * dentry) /* * Are we the only user? */ - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); spin_lock(&dentry->d_lock); isdir = S_ISDIR(dentry->d_inode->i_mode); @@ -1905,7 +1828,6 @@ void d_delete(struct dentry * dentry) spin_unlock(&dentry->d_lock); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); fsnotify_nameremove(dentry, isdir); } @@ -1932,13 +1854,11 @@ static void _d_rehash(struct dentry * entry) void d_rehash(struct dentry * entry) { - spin_lock(&dcache_lock); spin_lock(&entry->d_lock); spin_lock(&dcache_hash_lock); _d_rehash(entry); spin_unlock(&dcache_hash_lock); spin_unlock(&entry->d_lock); - spin_unlock(&dcache_lock); } EXPORT_SYMBOL(d_rehash); @@ -1961,11 +1881,9 @@ void dentry_update_name_case(struct dentry *dentry, struct qstr *name) BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); memcpy((unsigned char *)dentry->d_name.name, name->name, name->len); spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); } EXPORT_SYMBOL(dentry_update_name_case); @@ -2058,14 +1976,14 @@ static void dentry_unlock_parents_for_move(struct dentry *dentry, * The hash value has to match the hash queue that the dentry is on.. */ /* - * d_move_locked - move a dentry + * d_move - move a dentry * @dentry: entry to move * @target: new dentry * * Update the dcache to reflect the move of a file name. Negative * dcache entries should not be moved in this way. */ -static void d_move_locked(struct dentry * dentry, struct dentry * target) +void d_move(struct dentry * dentry, struct dentry * target) { if (!dentry->d_inode) printk(KERN_WARNING "VFS: moving negative dcache entry\n"); @@ -2114,22 +2032,6 @@ static void d_move_locked(struct dentry * dentry, struct dentry * target) spin_unlock(&dentry->d_lock); write_sequnlock(&rename_lock); } - -/** - * d_move - move a dentry - * @dentry: entry to move - * @target: new dentry - * - * Update the dcache to reflect the move of a file name. Negative - * dcache entries should not be moved in this way. - */ - -void d_move(struct dentry * dentry, struct dentry * target) -{ - spin_lock(&dcache_lock); - d_move_locked(dentry, target); - spin_unlock(&dcache_lock); -} EXPORT_SYMBOL(d_move); /** @@ -2155,13 +2057,12 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) * This helper attempts to cope with remotely renamed directories * * It assumes that the caller is already holding - * dentry->d_parent->d_inode->i_mutex and the dcache_lock + * dentry->d_parent->d_inode->i_mutex and the dcache_inode_lock * * Note: If ever the locking in lock_rename() changes, then please * remember to update this too... */ static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias) - __releases(dcache_lock) __releases(dcache_inode_lock) { struct mutex *m1 = NULL, *m2 = NULL; @@ -2185,11 +2086,10 @@ static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias) goto out_err; m2 = &alias->d_parent->d_inode->i_mutex; out_unalias: - d_move_locked(alias, dentry); + d_move(alias, dentry); ret = alias; out_err: spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); if (m2) mutex_unlock(m2); if (m1) @@ -2249,7 +2149,6 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) BUG_ON(!d_unhashed(dentry)); - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); if (!inode) { @@ -2295,7 +2194,6 @@ found: spin_unlock(&dcache_hash_lock); spin_unlock(&actual->d_lock); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); out_nolock: if (actual == dentry) { security_d_instantiate(dentry, inode); @@ -2307,7 +2205,6 @@ out_nolock: shouldnt_be_hashed: spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); BUG(); } EXPORT_SYMBOL_GPL(d_materialise_unique); @@ -2421,11 +2318,9 @@ char *__d_path(const struct path *path, struct path *root, int error; prepend(&res, &buflen, "\0", 1); - spin_lock(&dcache_lock); write_seqlock(&rename_lock); error = prepend_path(path, root, &res, &buflen); write_sequnlock(&rename_lock); - spin_unlock(&dcache_lock); if (error) return ERR_PTR(error); @@ -2487,14 +2382,12 @@ char *d_path(const struct path *path, char *buf, int buflen) return path->dentry->d_op->d_dname(path->dentry, buf, buflen); get_fs_root(current->fs, &root); - spin_lock(&dcache_lock); write_seqlock(&rename_lock); tmp = root; error = path_with_deleted(path, &tmp, &res, &buflen); if (error) res = ERR_PTR(error); write_sequnlock(&rename_lock); - spin_unlock(&dcache_lock); path_put(&root); return res; } @@ -2520,14 +2413,12 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen) return path->dentry->d_op->d_dname(path->dentry, buf, buflen); get_fs_root(current->fs, &root); - spin_lock(&dcache_lock); write_seqlock(&rename_lock); tmp = root; error = path_with_deleted(path, &tmp, &res, &buflen); if (!error && !path_equal(&tmp, &root)) error = prepend_unreachable(&res, &buflen); write_sequnlock(&rename_lock); - spin_unlock(&dcache_lock); path_put(&root); if (error) res = ERR_PTR(error); @@ -2594,11 +2485,9 @@ char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen) { char *retval; - spin_lock(&dcache_lock); write_seqlock(&rename_lock); retval = __dentry_path(dentry, buf, buflen); write_sequnlock(&rename_lock); - spin_unlock(&dcache_lock); return retval; } @@ -2609,7 +2498,6 @@ char *dentry_path(struct dentry *dentry, char *buf, int buflen) char *p = NULL; char *retval; - spin_lock(&dcache_lock); write_seqlock(&rename_lock); if (d_unlinked(dentry)) { p = buf + buflen; @@ -2619,12 +2507,10 @@ char *dentry_path(struct dentry *dentry, char *buf, int buflen) } retval = __dentry_path(dentry, buf, buflen); write_sequnlock(&rename_lock); - spin_unlock(&dcache_lock); if (!IS_ERR(retval) && p) *p = '/'; /* restore '/' overriden with '\0' */ return retval; Elong: - spin_unlock(&dcache_lock); return ERR_PTR(-ENAMETOOLONG); } @@ -2658,7 +2544,6 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) get_fs_root_and_pwd(current->fs, &root, &pwd); error = -ENOENT; - spin_lock(&dcache_lock); write_seqlock(&rename_lock); if (!d_unlinked(pwd.dentry)) { unsigned long len; @@ -2669,7 +2554,6 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) prepend(&cwd, &buflen, "\0", 1); error = prepend_path(&pwd, &tmp, &cwd, &buflen); write_sequnlock(&rename_lock); - spin_unlock(&dcache_lock); if (error) goto out; @@ -2690,7 +2574,6 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size) } } else { write_sequnlock(&rename_lock); - spin_unlock(&dcache_lock); } out: @@ -2776,7 +2659,6 @@ void d_genocide(struct dentry *root) rename_retry: this_parent = root; seq = read_seqbegin(&rename_lock); - spin_lock(&dcache_lock); spin_lock(&this_parent->d_lock); repeat: next = this_parent->d_subdirs.next; @@ -2823,7 +2705,6 @@ resume: if (this_parent != child->d_parent || read_seqretry(&rename_lock, seq)) { spin_unlock(&this_parent->d_lock); - spin_unlock(&dcache_lock); rcu_read_unlock(); goto rename_retry; } @@ -2832,7 +2713,6 @@ resume: goto resume; } spin_unlock(&this_parent->d_lock); - spin_unlock(&dcache_lock); if (read_seqretry(&rename_lock, seq)) goto rename_retry; } diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index 84b8c460a781..53a5c08fb63c 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -47,24 +47,20 @@ find_acceptable_alias(struct dentry *result, if (acceptable(context, result)) return result; - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); list_for_each_entry(dentry, &result->d_inode->i_dentry, d_alias) { dget_locked(dentry); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); if (toput) dput(toput); if (dentry != result && acceptable(context, dentry)) { dput(result); return dentry; } - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); toput = dentry; } spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); if (toput) dput(toput); diff --git a/fs/libfs.c b/fs/libfs.c index cc4794914b52..28b36663c44e 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -100,7 +100,6 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin) struct dentry *cursor = file->private_data; loff_t n = file->f_pos - 2; - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); /* d_lock not required for cursor */ list_del(&cursor->d_u.d_child); @@ -116,7 +115,6 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int origin) } list_add_tail(&cursor->d_u.d_child, p); spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); } } mutex_unlock(&dentry->d_inode->i_mutex); @@ -159,7 +157,6 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) i++; /* fallthrough */ default: - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); if (filp->f_pos == 2) list_move(q, &dentry->d_subdirs); @@ -175,13 +172,11 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) spin_unlock(&next->d_lock); spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); if (filldir(dirent, next->d_name.name, next->d_name.len, filp->f_pos, next->d_inode->i_ino, dt_type(next->d_inode)) < 0) return 0; - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED); /* next is still alive */ @@ -191,7 +186,6 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir) filp->f_pos++; } spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); } return 0; } @@ -285,7 +279,6 @@ int simple_empty(struct dentry *dentry) struct dentry *child; int ret = 0; - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) { spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED); @@ -298,7 +291,6 @@ int simple_empty(struct dentry *dentry) ret = 1; out: spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); return ret; } diff --git a/fs/namei.c b/fs/namei.c index cbfa5fb31072..5642bc2be418 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -612,8 +612,8 @@ int follow_up(struct path *path) return 1; } -/* no need for dcache_lock, as serialization is taken care in - * namespace.c +/* + * serialization is taken care of in namespace.c */ static int __follow_mount(struct path *path) { @@ -645,9 +645,6 @@ static void follow_mount(struct path *path) } } -/* no need for dcache_lock, as serialization is taken care in - * namespace.c - */ int follow_down(struct path *path) { struct vfsmount *mounted; @@ -2131,12 +2128,10 @@ void dentry_unhash(struct dentry *dentry) { dget(dentry); shrink_dcache_parent(dentry); - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); if (dentry->d_count == 2) __d_drop(dentry); spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); } int vfs_rmdir(struct inode *dir, struct dentry *dentry) diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index 102278ed38bd..de15c533311c 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -391,7 +391,6 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos) } /* If a pointer is invalid, we search the dentry. */ - spin_lock(&dcache_lock); spin_lock(&parent->d_lock); next = parent->d_subdirs.next; while (next != &parent->d_subdirs) { @@ -402,13 +401,11 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos) else dent = NULL; spin_unlock(&parent->d_lock); - spin_unlock(&dcache_lock); goto out; } next = next->next; } spin_unlock(&parent->d_lock); - spin_unlock(&dcache_lock); return NULL; out: diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h index c4b718ff9a6b..1220df75ff22 100644 --- a/fs/ncpfs/ncplib_kernel.h +++ b/fs/ncpfs/ncplib_kernel.h @@ -193,7 +193,6 @@ ncp_renew_dentries(struct dentry *parent) struct list_head *next; struct dentry *dentry; - spin_lock(&dcache_lock); spin_lock(&parent->d_lock); next = parent->d_subdirs.next; while (next != &parent->d_subdirs) { @@ -207,7 +206,6 @@ ncp_renew_dentries(struct dentry *parent) next = next->next; } spin_unlock(&parent->d_lock); - spin_unlock(&dcache_lock); } static inline void @@ -217,7 +215,6 @@ ncp_invalidate_dircache_entries(struct dentry *parent) struct list_head *next; struct dentry *dentry; - spin_lock(&dcache_lock); spin_lock(&parent->d_lock); next = parent->d_subdirs.next; while (next != &parent->d_subdirs) { @@ -227,7 +224,6 @@ ncp_invalidate_dircache_entries(struct dentry *parent) next = next->next; } spin_unlock(&parent->d_lock); - spin_unlock(&dcache_lock); } struct ncp_cache_head { diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 12de824edb5c..eb77471b8823 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1718,11 +1718,9 @@ static int nfs_unlink(struct inode *dir, struct dentry *dentry) dfprintk(VFS, "NFS: unlink(%s/%ld, %s)\n", dir->i_sb->s_id, dir->i_ino, dentry->d_name.name); - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); if (dentry->d_count > 1) { spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); /* Start asynchronous writeout of the inode */ write_inode_now(dentry->d_inode, 0); error = nfs_sillyrename(dir, dentry); @@ -1733,7 +1731,6 @@ static int nfs_unlink(struct inode *dir, struct dentry *dentry) need_rehash = 1; } spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); error = nfs_safe_remove(dentry); if (!error || error == -ENOENT) { nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c index 850f67d5f0ac..b3e36c3430de 100644 --- a/fs/nfs/getroot.c +++ b/fs/nfs/getroot.c @@ -63,13 +63,11 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i * This again causes shrink_dcache_for_umount_subtree() to * Oops, since the test for IS_ROOT() will fail. */ - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); spin_lock(&sb->s_root->d_lock); list_del_init(&sb->s_root->d_alias); spin_unlock(&sb->s_root->d_lock); spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); } return 0; } diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 78c0ebb0b07c..74aaf3963c10 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -60,7 +60,6 @@ rename_retry: seq = read_seqbegin(&rename_lock); rcu_read_lock(); - spin_lock(&dcache_lock); while (!IS_ROOT(dentry) && dentry != droot) { namelen = dentry->d_name.len; buflen -= namelen + 1; @@ -71,7 +70,6 @@ rename_retry: *--end = '/'; dentry = dentry->d_parent; } - spin_unlock(&dcache_lock); rcu_read_unlock(); if (read_seqretry(&rename_lock, seq)) goto rename_retry; @@ -91,7 +89,6 @@ rename_retry: memcpy(end, base, namelen); return end; Elong_unlock: - spin_unlock(&dcache_lock); rcu_read_unlock(); if (read_seqretry(&rename_lock, seq)) goto rename_retry; diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index ae769fc9b66c..9be6ec1f36d8 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -59,7 +59,6 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode) /* determine if the children should tell inode about their events */ watched = fsnotify_inode_watches_children(inode); - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); /* run all of the dentries associated with this inode. Since this is a * directory, there damn well better only be one item on this list */ @@ -84,7 +83,6 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode) spin_unlock(&alias->d_lock); } spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); } /* Notify this dentry's parent about a child's events. */ diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index c31b5c647ac7..b7de749bdd12 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c @@ -169,7 +169,6 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode, struct list_head *p; struct dentry *dentry = NULL; - spin_lock(&dcache_lock); spin_lock(&dcache_inode_lock); list_for_each(p, &inode->i_dentry) { dentry = list_entry(p, struct dentry, d_alias); @@ -189,7 +188,6 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode, } spin_unlock(&dcache_inode_lock); - spin_unlock(&dcache_lock); return dentry; } diff --git a/include/linux/dcache.h b/include/linux/dcache.h index c963ebada922..a2ceb94b0e38 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -183,7 +183,6 @@ struct dentry_operations { #define DCACHE_GENOCIDE 0x0200 extern spinlock_t dcache_inode_lock; -extern spinlock_t dcache_lock; extern seqlock_t rename_lock; static inline int dname_external(struct dentry *dentry) @@ -296,8 +295,8 @@ extern char *dentry_path(struct dentry *, char *, int); * destroyed when it has references. dget() should never be * called for dentries with zero reference counter. For these cases * (preferably none, functions in dcache.c are sufficient for normal - * needs and they take necessary precautions) you should hold dcache_lock - * and call dget_locked() instead of dget(). + * needs and they take necessary precautions) you should hold d_lock + * and call dget_dlock() instead of dget(). */ static inline struct dentry *dget_dlock(struct dentry *dentry) { diff --git a/include/linux/fs.h b/include/linux/fs.h index 090f0eacde29..296cf2fde945 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1378,7 +1378,7 @@ struct super_block { #else struct list_head s_files; #endif - /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ + /* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */ struct list_head s_dentry_lru; /* unused dentry lru */ int s_nr_dentry_unused; /* # of dentry on lru */ @@ -2446,6 +2446,10 @@ static inline ino_t parent_ino(struct dentry *dentry) { ino_t res; + /* + * Don't strictly need d_lock here? If the parent ino could change + * then surely we'd have a deeper race in the caller? + */ spin_lock(&dentry->d_lock); res = dentry->d_parent->d_inode->i_ino; spin_unlock(&dentry->d_lock); diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index b10bcdeaef76..2a53f10712b3 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -17,7 +17,6 @@ /* * fsnotify_d_instantiate - instantiate a dentry for inode - * Called with dcache_lock held. */ static inline void fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode) @@ -62,7 +61,6 @@ static inline int fsnotify_perm(struct file *file, int mask) /* * fsnotify_d_move - dentry has been moved - * Called with dcache_lock and dentry->d_lock held. */ static inline void fsnotify_d_move(struct dentry *dentry) { diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 7380763595d3..69ad89b50489 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -329,9 +329,15 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) { struct dentry *parent; - assert_spin_locked(&dcache_lock); assert_spin_locked(&dentry->d_lock); + /* + * Serialisation of setting PARENT_WATCHED on the dentries is provided + * by d_lock. If inotify_inode_watched changes after we have taken + * d_lock, the following __fsnotify_update_child_dentry_flags call will + * find our entry, so it will spin until we complete here, and update + * us with the new state. + */ parent = dentry->d_parent; if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode)) dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; @@ -341,15 +347,12 @@ static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) /* * fsnotify_d_instantiate - instantiate a dentry for inode - * Called with dcache_lock held. */ static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode) { if (!inode) return; - assert_spin_locked(&dcache_lock); - spin_lock(&dentry->d_lock); __fsnotify_update_dcache_flags(dentry); spin_unlock(&dentry->d_lock); diff --git a/include/linux/namei.h b/include/linux/namei.h index 05b441d93642..aec730b53935 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -41,7 +41,6 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; * - require a directory * - ending slashes ok even for nonexistent files * - internal "there are more path components" flag - * - locked when lookup done with dcache_lock held * - dentry cache is untrusted; force a real lookup */ #define LOOKUP_FOLLOW 1 diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 7b4705b51d4a..1864cb6a6a59 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -876,7 +876,6 @@ static void cgroup_clear_directory(struct dentry *dentry) struct list_head *node; BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex)); - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); node = dentry->d_subdirs.next; while (node != &dentry->d_subdirs) { @@ -891,18 +890,15 @@ static void cgroup_clear_directory(struct dentry *dentry) dget_locked_dlock(d); spin_unlock(&d->d_lock); spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); d_delete(d); simple_unlink(dentry->d_inode, d); dput(d); - spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); } else spin_unlock(&d->d_lock); node = dentry->d_subdirs.next; } spin_unlock(&dentry->d_lock); - spin_unlock(&dcache_lock); } /* @@ -914,14 +910,12 @@ static void cgroup_d_remove_dir(struct dentry *dentry) cgroup_clear_directory(dentry); - spin_lock(&dcache_lock); parent = dentry->d_parent; spin_lock(&parent->d_lock); spin_lock(&dentry->d_lock); list_del_init(&dentry->d_u.d_child); spin_unlock(&dentry->d_lock); spin_unlock(&parent->d_lock); - spin_unlock(&dcache_lock); remove_dir(dentry); } diff --git a/mm/filemap.c b/mm/filemap.c index 6b9aee20f242..ca389394fa2a 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -102,9 +102,6 @@ * ->inode_lock (zap_pte_range->set_page_dirty) * ->private_lock (zap_pte_range->__set_page_dirty_buffers) * - * ->task->proc_lock - * ->dcache_lock (proc_pid_lookup) - * * (code doesn't rely on that order, so you could switch it around) * ->tasklist_lock (memory_failure, collect_procs_ao) * ->i_mmap_lock diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 017ec096446e..2285d693f296 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -1145,7 +1145,6 @@ static void sel_remove_entries(struct dentry *de) { struct list_head *node; - spin_lock(&dcache_lock); spin_lock(&de->d_lock); node = de->d_subdirs.next; while (node != &de->d_subdirs) { @@ -1158,11 +1157,9 @@ static void sel_remove_entries(struct dentry *de) dget_locked_dlock(d); spin_unlock(&de->d_lock); spin_unlock(&d->d_lock); - spin_unlock(&dcache_lock); d_delete(d); simple_unlink(de->d_inode, d); dput(d); - spin_lock(&dcache_lock); spin_lock(&de->d_lock); } else spin_unlock(&d->d_lock); @@ -1170,7 +1167,6 @@ static void sel_remove_entries(struct dentry *de) } spin_unlock(&de->d_lock); - spin_unlock(&dcache_lock); } #define BOOL_DIR_NAME "booleans" -- cgit v1.2.3-58-ga151 From dc0474be3e27463d4d4a2793f82366eed906f223 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 7 Jan 2011 17:49:43 +1100 Subject: fs: dcache rationalise dget variants dget_locked was a shortcut to avoid the lazy lru manipulation when we already held dcache_lock (lru manipulation was relatively cheap at that point). However, how that the lru lock is an innermost one, we never hold it at any caller, so the lock cost can now be avoided. We already have well working lazy dcache LRU, so it should be fine to defer LRU manipulations to scan time. Signed-off-by: Nick Piggin --- arch/powerpc/platforms/cell/spufs/inode.c | 2 +- drivers/infiniband/hw/ipath/ipath_fs.c | 2 +- drivers/infiniband/hw/qib/qib_fs.c | 2 +- drivers/staging/smbfs/cache.c | 2 +- fs/configfs/inode.c | 2 +- fs/dcache.c | 36 ++++++++++--------------------- fs/exportfs/expfs.c | 2 +- fs/ncpfs/dir.c | 2 +- fs/ocfs2/dcache.c | 2 +- include/linux/dcache.h | 15 +++---------- kernel/cgroup.c | 2 +- security/selinux/selinuxfs.c | 2 +- 12 files changed, 24 insertions(+), 47 deletions(-) (limited to 'security') diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 2662b50ea8d4..03185de7cd4a 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -161,7 +161,7 @@ static void spufs_prune_dir(struct dentry *dir) list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { spin_lock(&dentry->d_lock); if (!(d_unhashed(dentry)) && dentry->d_inode) { - dget_locked_dlock(dentry); + dget_dlock(dentry); __d_drop(dentry); spin_unlock(&dentry->d_lock); simple_unlink(dir->d_inode, dentry); diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 925e88227ded..31ae1b108aea 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -279,7 +279,7 @@ static int remove_file(struct dentry *parent, char *name) spin_lock(&tmp->d_lock); if (!(d_unhashed(tmp) && tmp->d_inode)) { - dget_locked_dlock(tmp); + dget_dlock(tmp); __d_drop(tmp); spin_unlock(&tmp->d_lock); simple_unlink(parent->d_inode, tmp); diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 49af4a6538ba..df7fa251dcdc 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -455,7 +455,7 @@ static int remove_file(struct dentry *parent, char *name) spin_lock(&tmp->d_lock); if (!(d_unhashed(tmp) && tmp->d_inode)) { - dget_locked_dlock(tmp); + dget_dlock(tmp); __d_drop(tmp); spin_unlock(&tmp->d_lock); simple_unlink(parent->d_inode, tmp); diff --git a/drivers/staging/smbfs/cache.c b/drivers/staging/smbfs/cache.c index 75dfd403fb90..f2a1323ca827 100644 --- a/drivers/staging/smbfs/cache.c +++ b/drivers/staging/smbfs/cache.c @@ -102,7 +102,7 @@ smb_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos) dent = list_entry(next, struct dentry, d_u.d_child); if ((unsigned long)dent->d_fsdata == fpos) { if (dent->d_inode) - dget_locked(dent); + dget(dent); else dent = NULL; goto out_unlock; diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index fb3a55fff82a..c83f4768eeaa 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -252,7 +252,7 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent) if (dentry) { spin_lock(&dentry->d_lock); if (!(d_unhashed(dentry) && dentry->d_inode)) { - dget_locked_dlock(dentry); + dget_dlock(dentry); __d_drop(dentry); spin_unlock(&dentry->d_lock); simple_unlink(parent->d_inode, dentry); diff --git a/fs/dcache.c b/fs/dcache.c index 01f016799fd4..b4d2e28eef5b 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -429,32 +429,17 @@ int d_invalidate(struct dentry * dentry) EXPORT_SYMBOL(d_invalidate); /* This must be called with d_lock held */ -static inline struct dentry * __dget_locked_dlock(struct dentry *dentry) +static inline void __dget_dlock(struct dentry *dentry) { dentry->d_count++; - dentry_lru_del(dentry); - return dentry; } -/* This must be called with d_lock held */ -static inline struct dentry * __dget_locked(struct dentry *dentry) +static inline void __dget(struct dentry *dentry) { spin_lock(&dentry->d_lock); - __dget_locked_dlock(dentry); + __dget_dlock(dentry); spin_unlock(&dentry->d_lock); - return dentry; -} - -struct dentry * dget_locked_dlock(struct dentry *dentry) -{ - return __dget_locked_dlock(dentry); -} - -struct dentry * dget_locked(struct dentry *dentry) -{ - return __dget_locked(dentry); } -EXPORT_SYMBOL(dget_locked); struct dentry *dget_parent(struct dentry *dentry) { @@ -512,7 +497,7 @@ again: (alias->d_flags & DCACHE_DISCONNECTED)) { discon_alias = alias; } else if (!want_discon) { - __dget_locked_dlock(alias); + __dget_dlock(alias); spin_unlock(&alias->d_lock); return alias; } @@ -525,7 +510,7 @@ again: if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) { if (IS_ROOT(alias) && (alias->d_flags & DCACHE_DISCONNECTED)) { - __dget_locked_dlock(alias); + __dget_dlock(alias); spin_unlock(&alias->d_lock); return alias; } @@ -561,7 +546,7 @@ restart: list_for_each_entry(dentry, &inode->i_dentry, d_alias) { spin_lock(&dentry->d_lock); if (!dentry->d_count) { - __dget_locked_dlock(dentry); + __dget_dlock(dentry); __d_drop(dentry); spin_unlock(&dentry->d_lock); spin_unlock(&dcache_inode_lock); @@ -1257,7 +1242,8 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) * don't need child lock because it is not subject * to concurrency here */ - dentry->d_parent = dget_dlock(parent); + __dget_dlock(parent); + dentry->d_parent = parent; dentry->d_sb = parent->d_sb; list_add(&dentry->d_u.d_child, &parent->d_subdirs); spin_unlock(&parent->d_lock); @@ -1360,7 +1346,7 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry, continue; if (memcmp(qstr->name, name, len)) continue; - dget_locked(alias); + __dget(alias); return alias; } @@ -1613,7 +1599,7 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, * reference to it, move it in place and use it. */ new = list_entry(inode->i_dentry.next, struct dentry, d_alias); - dget_locked(new); + __dget(new); spin_unlock(&dcache_inode_lock); security_d_instantiate(found, inode); d_move(new, found); @@ -1789,7 +1775,7 @@ int d_validate(struct dentry *dentry, struct dentry *dparent) list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) { if (dentry == child) { spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); - __dget_locked_dlock(dentry); + __dget_dlock(dentry); spin_unlock(&dentry->d_lock); spin_unlock(&dparent->d_lock); return 1; diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index 53a5c08fb63c..f06a940065f6 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -49,7 +49,7 @@ find_acceptable_alias(struct dentry *result, spin_lock(&dcache_inode_lock); list_for_each_entry(dentry, &result->d_inode->i_dentry, d_alias) { - dget_locked(dentry); + dget(dentry); spin_unlock(&dcache_inode_lock); if (toput) dput(toput); diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index de15c533311c..0ba3cdc95a44 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -397,7 +397,7 @@ ncp_dget_fpos(struct dentry *dentry, struct dentry *parent, unsigned long fpos) dent = list_entry(next, struct dentry, d_u.d_child); if ((unsigned long)dent->d_fsdata == fpos) { if (dent->d_inode) - dget_locked(dent); + dget(dent); else dent = NULL; spin_unlock(&parent->d_lock); diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index b7de749bdd12..4d54c60ceee4 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c @@ -178,7 +178,7 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode, mlog(0, "dentry found: %.*s\n", dentry->d_name.len, dentry->d_name.name); - dget_locked_dlock(dentry); + dget_dlock(dentry); spin_unlock(&dentry->d_lock); break; } diff --git a/include/linux/dcache.h b/include/linux/dcache.h index a2ceb94b0e38..ca648685f0cc 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -287,23 +287,17 @@ extern char *dentry_path(struct dentry *, char *, int); /* Allocation counts.. */ /** - * dget, dget_locked - get a reference to a dentry + * dget, dget_dlock - get a reference to a dentry * @dentry: dentry to get a reference to * * Given a dentry or %NULL pointer increment the reference count * if appropriate and return the dentry. A dentry will not be - * destroyed when it has references. dget() should never be - * called for dentries with zero reference counter. For these cases - * (preferably none, functions in dcache.c are sufficient for normal - * needs and they take necessary precautions) you should hold d_lock - * and call dget_dlock() instead of dget(). + * destroyed when it has references. */ static inline struct dentry *dget_dlock(struct dentry *dentry) { - if (dentry) { - BUG_ON(!dentry->d_count); + if (dentry) dentry->d_count++; - } return dentry; } @@ -317,9 +311,6 @@ static inline struct dentry *dget(struct dentry *dentry) return dentry; } -extern struct dentry * dget_locked(struct dentry *); -extern struct dentry * dget_locked_dlock(struct dentry *); - extern struct dentry *dget_parent(struct dentry *dentry); /** diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 1864cb6a6a59..9f41470c3949 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -887,7 +887,7 @@ static void cgroup_clear_directory(struct dentry *dentry) /* This should never be called on a cgroup * directory with child cgroups */ BUG_ON(d->d_inode->i_mode & S_IFDIR); - dget_locked_dlock(d); + dget_dlock(d); spin_unlock(&d->d_lock); spin_unlock(&dentry->d_lock); d_delete(d); diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 2285d693f296..43deac219491 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -1154,7 +1154,7 @@ static void sel_remove_entries(struct dentry *de) list_del_init(node); if (d->d_inode) { - dget_locked_dlock(d); + dget_dlock(d); spin_unlock(&de->d_lock); spin_unlock(&d->d_lock); d_delete(d); -- cgit v1.2.3-58-ga151 From 31e6b01f4183ff419a6d1f86177cbf4662347cec Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Fri, 7 Jan 2011 17:49:52 +1100 Subject: fs: rcu-walk for path lookup Perform common cases of path lookups without any stores or locking in the ancestor dentry elements. This is called rcu-walk, as opposed to the current algorithm which is a refcount based walk, or ref-walk. This results in far fewer atomic operations on every path element, significantly improving path lookup performance. It also avoids cacheline bouncing on common dentries, significantly improving scalability. The overall design is like this: * LOOKUP_RCU is set in nd->flags, which distinguishes rcu-walk from ref-walk. * Take the RCU lock for the entire path walk, starting with the acquiring of the starting path (eg. root/cwd/fd-path). So now dentry refcounts are not required for dentry persistence. * synchronize_rcu is called when unregistering a filesystem, so we can access d_ops and i_ops during rcu-walk. * Similarly take the vfsmount lock for the entire path walk. So now mnt refcounts are not required for persistence. Also we are free to perform mount lookups, and to assume dentry mount points and mount roots are stable up and down the path. * Have a per-dentry seqlock to protect the dentry name, parent, and inode, so we can load this tuple atomically, and also check whether any of its members have changed. * Dentry lookups (based on parent, candidate string tuple) recheck the parent sequence after the child is found in case anything changed in the parent during the path walk. * inode is also RCU protected so we can load d_inode and use the inode for limited things. * i_mode, i_uid, i_gid can be tested for exec permissions during path walk. * i_op can be loaded. When we reach the destination dentry, we lock it, recheck lookup sequence, and increment its refcount and mountpoint refcount. RCU and vfsmount locks are dropped. This is termed "dropping rcu-walk". If the dentry refcount does not match, we can not drop rcu-walk gracefully at the current point in the lokup, so instead return -ECHILD (for want of a better errno). This signals the path walking code to re-do the entire lookup with a ref-walk. Aside from the final dentry, there are other situations that may be encounted where we cannot continue rcu-walk. In that case, we drop rcu-walk (ie. take a reference on the last good dentry) and continue with a ref-walk. Again, if we can drop rcu-walk gracefully, we return -ECHILD and do the whole lookup using ref-walk. But it is very important that we can continue with ref-walk for most cases, particularly to avoid the overhead of double lookups, and to gain the scalability advantages on common path elements (like cwd and root). The cases where rcu-walk cannot continue are: * NULL dentry (ie. any uncached path element) * parent with d_inode->i_op->permission or ACLs * dentries with d_revalidate * Following links In future patches, permission checks and d_revalidate become rcu-walk aware. It may be possible eventually to make following links rcu-walk aware. Uncached path elements will always require dropping to ref-walk mode, at the very least because i_mutex needs to be grabbed, and objects allocated. Signed-off-by: Nick Piggin --- Documentation/filesystems/dentry-locking.txt | 172 ------- Documentation/filesystems/path-lookup.txt | 345 +++++++++++++ fs/dcache.c | 203 +++++++- fs/filesystems.c | 3 + fs/namei.c | 743 ++++++++++++++++++++++----- fs/proc/proc_sysctl.c | 4 + include/linux/dcache.h | 32 +- include/linux/namei.h | 15 +- include/linux/security.h | 8 +- security/security.c | 9 + 10 files changed, 1194 insertions(+), 340 deletions(-) delete mode 100644 Documentation/filesystems/dentry-locking.txt create mode 100644 Documentation/filesystems/path-lookup.txt (limited to 'security') diff --git a/Documentation/filesystems/dentry-locking.txt b/Documentation/filesystems/dentry-locking.txt deleted file mode 100644 index 30b6a40f5650..000000000000 --- a/Documentation/filesystems/dentry-locking.txt +++ /dev/null @@ -1,172 +0,0 @@ -RCU-based dcache locking model -============================== - -On many workloads, the most common operation on dcache is to look up a -dentry, given a parent dentry and the name of the child. Typically, -for every open(), stat() etc., the dentry corresponding to the -pathname will be looked up by walking the tree starting with the first -component of the pathname and using that dentry along with the next -component to look up the next level and so on. Since it is a frequent -operation for workloads like multiuser environments and web servers, -it is important to optimize this path. - -Prior to 2.5.10, dcache_lock was acquired in d_lookup and thus in -every component during path look-up. Since 2.5.10 onwards, fast-walk -algorithm changed this by holding the dcache_lock at the beginning and -walking as many cached path component dentries as possible. This -significantly decreases the number of acquisition of -dcache_lock. However it also increases the lock hold time -significantly and affects performance in large SMP machines. Since -2.5.62 kernel, dcache has been using a new locking model that uses RCU -to make dcache look-up lock-free. - -The current dcache locking model is not very different from the -existing dcache locking model. Prior to 2.5.62 kernel, dcache_lock -protected the hash chain, d_child, d_alias, d_lru lists as well as -d_inode and several other things like mount look-up. RCU-based changes -affect only the way the hash chain is protected. For everything else -the dcache_lock must be taken for both traversing as well as -updating. The hash chain updates too take the dcache_lock. The -significant change is the way d_lookup traverses the hash chain, it -doesn't acquire the dcache_lock for this and rely on RCU to ensure -that the dentry has not been *freed*. - -dcache_lock no longer exists, dentry locking is explained in fs/dcache.c - -Dcache locking details -====================== - -For many multi-user workloads, open() and stat() on files are very -frequently occurring operations. Both involve walking of path names to -find the dentry corresponding to the concerned file. In 2.4 kernel, -dcache_lock was held during look-up of each path component. Contention -and cache-line bouncing of this global lock caused significant -scalability problems. With the introduction of RCU in Linux kernel, -this was worked around by making the look-up of path components during -path walking lock-free. - - -Safe lock-free look-up of dcache hash table -=========================================== - -Dcache is a complex data structure with the hash table entries also -linked together in other lists. In 2.4 kernel, dcache_lock protected -all the lists. RCU dentry hash walking works like this: - -1. The deletion from hash chain is done using hlist_del_rcu() macro - which doesn't initialize next pointer of the deleted dentry and - this allows us to walk safely lock-free while a deletion is - happening. This is a standard hlist_rcu iteration. - -2. Insertion of a dentry into the hash table is done using - hlist_add_head_rcu() which take care of ordering the writes - the - writes to the dentry must be visible before the dentry is - inserted. This works in conjunction with hlist_for_each_rcu(), - which has since been replaced by hlist_for_each_entry_rcu(), while - walking the hash chain. The only requirement is that all - initialization to the dentry must be done before - hlist_add_head_rcu() since we don't have lock protection - while traversing the hash chain. - -3. The dentry looked up without holding locks cannot be returned for - walking if it is unhashed. It then may have a NULL d_inode or other - bogosity since RCU doesn't protect the other fields in the dentry. We - therefore use a flag DCACHE_UNHASHED to indicate unhashed dentries - and use this in conjunction with a per-dentry lock (d_lock). Once - looked up without locks, we acquire the per-dentry lock (d_lock) and - check if the dentry is unhashed. If so, the look-up is failed. If not, - the reference count of the dentry is increased and the dentry is - returned. - -4. Once a dentry is looked up, it must be ensured during the path walk - for that component it doesn't go away. In pre-2.5.10 code, this was - done holding a reference to the dentry. dcache_rcu does the same. - In some sense, dcache_rcu path walking looks like the pre-2.5.10 - version. - -5. All dentry hash chain updates must take the per-dentry lock (see - fs/dcache.c). This excludes dput() to ensure that a dentry that has - been looked up concurrently does not get deleted before dget() can - take a ref. - -6. There are several ways to do reference counting of RCU protected - objects. One such example is in ipv4 route cache where deferred - freeing (using call_rcu()) is done as soon as the reference count - goes to zero. This cannot be done in the case of dentries because - tearing down of dentries require blocking (dentry_iput()) which - isn't supported from RCU callbacks. Instead, tearing down of - dentries happen synchronously in dput(), but actual freeing happens - later when RCU grace period is over. This allows safe lock-free - walking of the hash chains, but a matched dentry may have been - partially torn down. The checking of DCACHE_UNHASHED flag with - d_lock held detects such dentries and prevents them from being - returned from look-up. - - -Maintaining POSIX rename semantics -================================== - -Since look-up of dentries is lock-free, it can race against a -concurrent rename operation. For example, during rename of file A to -B, look-up of either A or B must succeed. So, if look-up of B happens -after A has been removed from the hash chain but not added to the new -hash chain, it may fail. Also, a comparison while the name is being -written concurrently by a rename may result in false positive matches -violating rename semantics. Issues related to race with rename are -handled as described below : - -1. Look-up can be done in two ways - d_lookup() which is safe from - simultaneous renames and __d_lookup() which is not. If - __d_lookup() fails, it must be followed up by a d_lookup() to - correctly determine whether a dentry is in the hash table or - not. d_lookup() protects look-ups using a sequence lock - (rename_lock). - -2. The name associated with a dentry (d_name) may be changed if a - rename is allowed to happen simultaneously. To avoid memcmp() in - __d_lookup() go out of bounds due to a rename and false positive - comparison, the name comparison is done while holding the - per-dentry lock. This prevents concurrent renames during this - operation. - -3. Hash table walking during look-up may move to a different bucket as - the current dentry is moved to a different bucket due to rename. - But we use hlists in dcache hash table and they are - null-terminated. So, even if a dentry moves to a different bucket, - hash chain walk will terminate. [with a list_head list, it may not - since termination is when the list_head in the original bucket is - reached]. Since we redo the d_parent check and compare name while - holding d_lock, lock-free look-up will not race against d_move(). - -4. There can be a theoretical race when a dentry keeps coming back to - original bucket due to double moves. Due to this look-up may - consider that it has never moved and can end up in a infinite loop. - But this is not any worse that theoretical livelocks we already - have in the kernel. - - -Important guidelines for filesystem developers related to dcache_rcu -==================================================================== - -1. Existing dcache interfaces (pre-2.5.62) exported to filesystem - don't change. Only dcache internal implementation changes. However - filesystems *must not* delete from the dentry hash chains directly - using the list macros like allowed earlier. They must use dcache - APIs like d_drop() or __d_drop() depending on the situation. - -2. d_flags is now protected by a per-dentry lock (d_lock). All access - to d_flags must be protected by it. - -3. For a hashed dentry, checking of d_count needs to be protected by - d_lock. - - -Papers and other documentation on dcache locking -================================================ - -1. Scaling dcache with RCU (http://linuxjournal.com/article.php?sid=7124). - -2. http://lse.sourceforge.net/locking/dcache/dcache.html - - - diff --git a/Documentation/filesystems/path-lookup.txt b/Documentation/filesystems/path-lookup.txt new file mode 100644 index 000000000000..09b2878724a1 --- /dev/null +++ b/Documentation/filesystems/path-lookup.txt @@ -0,0 +1,345 @@ +Path walking and name lookup locking +==================================== + +Path resolution is the finding a dentry corresponding to a path name string, by +performing a path walk. Typically, for every open(), stat() etc., the path name +will be resolved. Paths are resolved by walking the namespace tree, starting +with the first component of the pathname (eg. root or cwd) with a known dentry, +then finding the child of that dentry, which is named the next component in the +path string. Then repeating the lookup from the child dentry and finding its +child with the next element, and so on. + +Since it is a frequent operation for workloads like multiuser environments and +web servers, it is important to optimize this code. + +Path walking synchronisation history: +Prior to 2.5.10, dcache_lock was acquired in d_lookup (dcache hash lookup) and +thus in every component during path look-up. Since 2.5.10 onwards, fast-walk +algorithm changed this by holding the dcache_lock at the beginning and walking +as many cached path component dentries as possible. This significantly +decreases the number of acquisition of dcache_lock. However it also increases +the lock hold time significantly and affects performance in large SMP machines. +Since 2.5.62 kernel, dcache has been using a new locking model that uses RCU to +make dcache look-up lock-free. + +All the above algorithms required taking a lock and reference count on the +dentry that was looked up, so that may be used as the basis for walking the +next path element. This is inefficient and unscalable. It is inefficient +because of the locks and atomic operations required for every dentry element +slows things down. It is not scalable because many parallel applications that +are path-walk intensive tend to do path lookups starting from a common dentry +(usually, the root "/" or current working directory). So contention on these +common path elements causes lock and cacheline queueing. + +Since 2.6.38, RCU is used to make a significant part of the entire path walk +(including dcache look-up) completely "store-free" (so, no locks, atomics, or +even stores into cachelines of common dentries). This is known as "rcu-walk" +path walking. + +Path walking overview +===================== + +A name string specifies a start (root directory, cwd, fd-relative) and a +sequence of elements (directory entry names), which together refer to a path in +the namespace. A path is represented as a (dentry, vfsmount) tuple. The name +elements are sub-strings, seperated by '/'. + +Name lookups will want to find a particular path that a name string refers to +(usually the final element, or parent of final element). This is done by taking +the path given by the name's starting point (which we know in advance -- eg. +current->fs->cwd or current->fs->root) as the first parent of the lookup. Then +iteratively for each subsequent name element, look up the child of the current +parent with the given name and if it is not the desired entry, make it the +parent for the next lookup. + +A parent, of course, must be a directory, and we must have appropriate +permissions on the parent inode to be able to walk into it. + +Turning the child into a parent for the next lookup requires more checks and +procedures. Symlinks essentially substitute the symlink name for the target +name in the name string, and require some recursive path walking. Mount points +must be followed into (thus changing the vfsmount that subsequent path elements +refer to), switching from the mount point path to the root of the particular +mounted vfsmount. These behaviours are variously modified depending on the +exact path walking flags. + +Path walking then must, broadly, do several particular things: +- find the start point of the walk; +- perform permissions and validity checks on inodes; +- perform dcache hash name lookups on (parent, name element) tuples; +- traverse mount points; +- traverse symlinks; +- lookup and create missing parts of the path on demand. + +Safe store-free look-up of dcache hash table +============================================ + +Dcache name lookup +------------------ +In order to lookup a dcache (parent, name) tuple, we take a hash on the tuple +and use that to select a bucket in the dcache-hash table. The list of entries +in that bucket is then walked, and we do a full comparison of each entry +against our (parent, name) tuple. + +The hash lists are RCU protected, so list walking is not serialised with +concurrent updates (insertion, deletion from the hash). This is a standard RCU +list application with the exception of renames, which will be covered below. + +Parent and name members of a dentry, as well as its membership in the dcache +hash, and its inode are protected by the per-dentry d_lock spinlock. A +reference is taken on the dentry (while the fields are verified under d_lock), +and this stabilises its d_inode pointer and actual inode. This gives a stable +point to perform the next step of our path walk against. + +These members are also protected by d_seq seqlock, although this offers +read-only protection and no durability of results, so care must be taken when +using d_seq for synchronisation (see seqcount based lookups, below). + +Renames +------- +Back to the rename case. In usual RCU protected lists, the only operations that +will happen to an object is insertion, and then eventually removal from the +list. The object will not be reused until an RCU grace period is complete. +This ensures the RCU list traversal primitives can run over the object without +problems (see RCU documentation for how this works). + +However when a dentry is renamed, its hash value can change, requiring it to be +moved to a new hash list. Allocating and inserting a new alias would be +expensive and also problematic for directory dentries. Latency would be far to +high to wait for a grace period after removing the dentry and before inserting +it in the new hash bucket. So what is done is to insert the dentry into the +new list immediately. + +However, when the dentry's list pointers are updated to point to objects in the +new list before waiting for a grace period, this can result in a concurrent RCU +lookup of the old list veering off into the new (incorrect) list and missing +the remaining dentries on the list. + +There is no fundamental problem with walking down the wrong list, because the +dentry comparisons will never match. However it is fatal to miss a matching +dentry. So a seqlock is used to detect when a rename has occurred, and so the +lookup can be retried. + + 1 2 3 + +---+ +---+ +---+ +hlist-->| N-+->| N-+->| N-+-> +head <--+-P |<-+-P |<-+-P | + +---+ +---+ +---+ + +Rename of dentry 2 may require it deleted from the above list, and inserted +into a new list. Deleting 2 gives the following list. + + 1 3 + +---+ +---+ (don't worry, the longer pointers do not +hlist-->| N-+-------->| N-+-> impose a measurable performance overhead +head <--+-P |<--------+-P | on modern CPUs) + +---+ +---+ + ^ 2 ^ + | +---+ | + | | N-+----+ + +----+-P | + +---+ + +This is a standard RCU-list deletion, which leaves the deleted object's +pointers intact, so a concurrent list walker that is currently looking at +object 2 will correctly continue to object 3 when it is time to traverse the +next object. + +However, when inserting object 2 onto a new list, we end up with this: + + 1 3 + +---+ +---+ +hlist-->| N-+-------->| N-+-> +head <--+-P |<--------+-P | + +---+ +---+ + 2 + +---+ + | N-+----> + <----+-P | + +---+ + +Because we didn't wait for a grace period, there may be a concurrent lookup +still at 2. Now when it follows 2's 'next' pointer, it will walk off into +another list without ever having checked object 3. + +A related, but distinctly different, issue is that of rename atomicity versus +lookup operations. If a file is renamed from 'A' to 'B', a lookup must only +find either 'A' or 'B'. So if a lookup of 'A' returns NULL, a subsequent lookup +of 'B' must succeed (note the reverse is not true). + +Between deleting the dentry from the old hash list, and inserting it on the new +hash list, a lookup may find neither 'A' nor 'B' matching the dentry. The same +rename seqlock is also used to cover this race in much the same way, by +retrying a negative lookup result if a rename was in progress. + +Seqcount based lookups +---------------------- +In refcount based dcache lookups, d_lock is used to serialise access to +the dentry, stabilising it while comparing its name and parent and then +taking a reference count (the reference count then gives a stable place to +start the next part of the path walk from). + +As explained above, we would like to do path walking without taking locks or +reference counts on intermediate dentries along the path. To do this, a per +dentry seqlock (d_seq) is used to take a "coherent snapshot" of what the dentry +looks like (its name, parent, and inode). That snapshot is then used to start +the next part of the path walk. When loading the coherent snapshot under d_seq, +care must be taken to load the members up-front, and use those pointers rather +than reloading from the dentry later on (otherwise we'd have interesting things +like d_inode going NULL underneath us, if the name was unlinked). + +Also important is to avoid performing any destructive operations (pretty much: +no non-atomic stores to shared data), and to recheck the seqcount when we are +"done" with the operation. Retry or abort if the seqcount does not match. +Avoiding destructive or changing operations means we can easily unwind from +failure. + +What this means is that a caller, provided they are holding RCU lock to +protect the dentry object from disappearing, can perform a seqcount based +lookup which does not increment the refcount on the dentry or write to +it in any way. This returned dentry can be used for subsequent operations, +provided that d_seq is rechecked after that operation is complete. + +Inodes are also rcu freed, so the seqcount lookup dentry's inode may also be +queried for permissions. + +With this two parts of the puzzle, we can do path lookups without taking +locks or refcounts on dentry elements. + +RCU-walk path walking design +============================ + +Path walking code now has two distinct modes, ref-walk and rcu-walk. ref-walk +is the traditional[*] way of performing dcache lookups using d_lock to +serialise concurrent modifications to the dentry and take a reference count on +it. ref-walk is simple and obvious, and may sleep, take locks, etc while path +walking is operating on each dentry. rcu-walk uses seqcount based dentry +lookups, and can perform lookup of intermediate elements without any stores to +shared data in the dentry or inode. rcu-walk can not be applied to all cases, +eg. if the filesystem must sleep or perform non trivial operations, rcu-walk +must be switched to ref-walk mode. + +[*] RCU is still used for the dentry hash lookup in ref-walk, but not the full + path walk. + +Where ref-walk uses a stable, refcounted ``parent'' to walk the remaining +path string, rcu-walk uses a d_seq protected snapshot. When looking up a +child of this parent snapshot, we open d_seq critical section on the child +before closing d_seq critical section on the parent. This gives an interlocking +ladder of snapshots to walk down. + + + proc 101 + /----------------\ + / comm: "vi" \ + / fs.root: dentry0 \ + \ fs.cwd: dentry2 / + \ / + \----------------/ + +So when vi wants to open("/home/npiggin/test.c", O_RDWR), then it will +start from current->fs->root, which is a pinned dentry. Alternatively, +"./test.c" would start from cwd; both names refer to the same path in +the context of proc101. + + dentry 0 + +---------------------+ rcu-walk begins here, we note d_seq, check the + | name: "/" | inode's permission, and then look up the next + | inode: 10 | path element which is "home"... + | children:"home", ...| + +---------------------+ + | + dentry 1 V + +---------------------+ ... which brings us here. We find dentry1 via + | name: "home" | hash lookup, then note d_seq and compare name + | inode: 678 | string and parent pointer. When we have a match, + | children:"npiggin" | we now recheck the d_seq of dentry0. Then we + +---------------------+ check inode and look up the next element. + | + dentry2 V + +---------------------+ Note: if dentry0 is now modified, lookup is + | name: "npiggin" | not necessarily invalid, so we need only keep a + | inode: 543 | parent for d_seq verification, and grandparents + | children:"a.c", ... | can be forgotten. + +---------------------+ + | + dentry3 V + +---------------------+ At this point we have our destination dentry. + | name: "a.c" | We now take its d_lock, verify d_seq of this + | inode: 14221 | dentry. If that checks out, we can increment + | children:NULL | its refcount because we're holding d_lock. + +---------------------+ + +Taking a refcount on a dentry from rcu-walk mode, by taking its d_lock, +re-checking its d_seq, and then incrementing its refcount is called +"dropping rcu" or dropping from rcu-walk into ref-walk mode. + +It is, in some sense, a bit of a house of cards. If the seqcount check of the +parent snapshot fails, the house comes down, because we had closed the d_seq +section on the grandparent, so we have nothing left to stand on. In that case, +the path walk must be fully restarted (which we do in ref-walk mode, to avoid +live locks). It is costly to have a full restart, but fortunately they are +quite rare. + +When we reach a point where sleeping is required, or a filesystem callout +requires ref-walk, then instead of restarting the walk, we attempt to drop rcu +at the last known good dentry we have. Avoiding a full restart in ref-walk in +these cases is fundamental for performance and scalability because blocking +operations such as creates and unlinks are not uncommon. + +The detailed design for rcu-walk is like this: +* LOOKUP_RCU is set in nd->flags, which distinguishes rcu-walk from ref-walk. +* Take the RCU lock for the entire path walk, starting with the acquiring + of the starting path (eg. root/cwd/fd-path). So now dentry refcounts are + not required for dentry persistence. +* synchronize_rcu is called when unregistering a filesystem, so we can + access d_ops and i_ops during rcu-walk. +* Similarly take the vfsmount lock for the entire path walk. So now mnt + refcounts are not required for persistence. Also we are free to perform mount + lookups, and to assume dentry mount points and mount roots are stable up and + down the path. +* Have a per-dentry seqlock to protect the dentry name, parent, and inode, + so we can load this tuple atomically, and also check whether any of its + members have changed. +* Dentry lookups (based on parent, candidate string tuple) recheck the parent + sequence after the child is found in case anything changed in the parent + during the path walk. +* inode is also RCU protected so we can load d_inode and use the inode for + limited things. +* i_mode, i_uid, i_gid can be tested for exec permissions during path walk. +* i_op can be loaded. +* When the destination dentry is reached, drop rcu there (ie. take d_lock, + verify d_seq, increment refcount). +* If seqlock verification fails anywhere along the path, do a full restart + of the path lookup in ref-walk mode. -ECHILD tends to be used (for want of + a better errno) to signal an rcu-walk failure. + +The cases where rcu-walk cannot continue are: +* NULL dentry (ie. any uncached path element) +* parent with d_inode->i_op->permission or ACLs +* dentries with d_revalidate +* Following links + +In future patches, permission checks and d_revalidate become rcu-walk aware. It +may be possible eventually to make following links rcu-walk aware. + +Uncached path elements will always require dropping to ref-walk mode, at the +very least because i_mutex needs to be grabbed, and objects allocated. + +Final note: +"store-free" path walking is not strictly store free. We take vfsmount lock +and refcounts (both of which can be made per-cpu), and we also store to the +stack (which is essentially CPU-local), and we also have to take locks and +refcount on final dentry. + +The point is that shared data, where practically possible, is not locked +or stored into. The result is massive improvements in performance and +scalability of path resolution. + + +Papers and other documentation on dcache locking +================================================ + +1. Scaling dcache with RCU (http://linuxjournal.com/article.php?sid=7124). + +2. http://lse.sourceforge.net/locking/dcache/dcache.html diff --git a/fs/dcache.c b/fs/dcache.c index dc0551c9755d..187fea040108 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -152,9 +152,23 @@ static void d_free(struct dentry *dentry) call_rcu(&dentry->d_u.d_rcu, __d_free); } +/** + * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups + * After this call, in-progress rcu-walk path lookup will fail. This + * should be called after unhashing, and after changing d_inode (if + * the dentry has not already been unhashed). + */ +static inline void dentry_rcuwalk_barrier(struct dentry *dentry) +{ + assert_spin_locked(&dentry->d_lock); + /* Go through a barrier */ + write_seqcount_barrier(&dentry->d_seq); +} + /* * Release the dentry's inode, using the filesystem - * d_iput() operation if defined. + * d_iput() operation if defined. Dentry has no refcount + * and is unhashed. */ static void dentry_iput(struct dentry * dentry) __releases(dentry->d_lock) @@ -178,6 +192,28 @@ static void dentry_iput(struct dentry * dentry) } } +/* + * Release the dentry's inode, using the filesystem + * d_iput() operation if defined. dentry remains in-use. + */ +static void dentry_unlink_inode(struct dentry * dentry) + __releases(dentry->d_lock) + __releases(dcache_inode_lock) +{ + struct inode *inode = dentry->d_inode; + dentry->d_inode = NULL; + list_del_init(&dentry->d_alias); + dentry_rcuwalk_barrier(dentry); + spin_unlock(&dentry->d_lock); + spin_unlock(&dcache_inode_lock); + if (!inode->i_nlink) + fsnotify_inoderemove(inode); + if (dentry->d_op && dentry->d_op->d_iput) + dentry->d_op->d_iput(dentry, inode); + else + iput(inode); +} + /* * dentry_lru_(add|del|move_tail) must be called with d_lock held. */ @@ -272,6 +308,7 @@ void __d_drop(struct dentry *dentry) spin_lock(&dcache_hash_lock); hlist_del_rcu(&dentry->d_hash); spin_unlock(&dcache_hash_lock); + dentry_rcuwalk_barrier(dentry); } } EXPORT_SYMBOL(__d_drop); @@ -309,6 +346,7 @@ relock: spin_unlock(&dcache_inode_lock); goto relock; } + if (ref) dentry->d_count--; /* if dentry was on the d_lru list delete it from there */ @@ -1221,6 +1259,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) dentry->d_count = 1; dentry->d_flags = DCACHE_UNHASHED; spin_lock_init(&dentry->d_lock); + seqcount_init(&dentry->d_seq); dentry->d_inode = NULL; dentry->d_parent = NULL; dentry->d_sb = NULL; @@ -1269,6 +1308,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) if (inode) list_add(&dentry->d_alias, &inode->i_dentry); dentry->d_inode = inode; + dentry_rcuwalk_barrier(dentry); spin_unlock(&dentry->d_lock); fsnotify_d_instantiate(dentry, inode); } @@ -1610,6 +1650,111 @@ err_out: } EXPORT_SYMBOL(d_add_ci); +/** + * __d_lookup_rcu - search for a dentry (racy, store-free) + * @parent: parent dentry + * @name: qstr of name we wish to find + * @seq: returns d_seq value at the point where the dentry was found + * @inode: returns dentry->d_inode when the inode was found valid. + * Returns: dentry, or NULL + * + * __d_lookup_rcu is the dcache lookup function for rcu-walk name + * resolution (store-free path walking) design described in + * Documentation/filesystems/path-lookup.txt. + * + * This is not to be used outside core vfs. + * + * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock + * held, and rcu_read_lock held. The returned dentry must not be stored into + * without taking d_lock and checking d_seq sequence count against @seq + * returned here. + * + * A refcount may be taken on the found dentry with the __d_rcu_to_refcount + * function. + * + * Alternatively, __d_lookup_rcu may be called again to look up the child of + * the returned dentry, so long as its parent's seqlock is checked after the + * child is looked up. Thus, an interlocking stepping of sequence lock checks + * is formed, giving integrity down the path walk. + */ +struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name, + unsigned *seq, struct inode **inode) +{ + unsigned int len = name->len; + unsigned int hash = name->hash; + const unsigned char *str = name->name; + struct hlist_head *head = d_hash(parent, hash); + struct hlist_node *node; + struct dentry *dentry; + + /* + * Note: There is significant duplication with __d_lookup_rcu which is + * required to prevent single threaded performance regressions + * especially on architectures where smp_rmb (in seqcounts) are costly. + * Keep the two functions in sync. + */ + + /* + * The hash list is protected using RCU. + * + * Carefully use d_seq when comparing a candidate dentry, to avoid + * races with d_move(). + * + * It is possible that concurrent renames can mess up our list + * walk here and result in missing our dentry, resulting in the + * false-negative result. d_lookup() protects against concurrent + * renames using rename_lock seqlock. + * + * See Documentation/vfs/dcache-locking.txt for more details. + */ + hlist_for_each_entry_rcu(dentry, node, head, d_hash) { + struct inode *i; + const char *tname; + int tlen; + + if (dentry->d_name.hash != hash) + continue; + +seqretry: + *seq = read_seqcount_begin(&dentry->d_seq); + if (dentry->d_parent != parent) + continue; + if (d_unhashed(dentry)) + continue; + tlen = dentry->d_name.len; + tname = dentry->d_name.name; + i = dentry->d_inode; + /* + * This seqcount check is required to ensure name and + * len are loaded atomically, so as not to walk off the + * edge of memory when walking. If we could load this + * atomically some other way, we could drop this check. + */ + if (read_seqcount_retry(&dentry->d_seq, *seq)) + goto seqretry; + if (parent->d_op && parent->d_op->d_compare) { + if (parent->d_op->d_compare(parent, *inode, + dentry, i, + tlen, tname, name)) + continue; + } else { + if (tlen != len) + continue; + if (memcmp(tname, str, tlen)) + continue; + } + /* + * No extra seqcount check is required after the name + * compare. The caller must perform a seqcount check in + * order to do anything useful with the returned dentry + * anyway. + */ + *inode = i; + return dentry; + } + return NULL; +} + /** * d_lookup - search for a dentry * @parent: parent dentry @@ -1621,9 +1766,9 @@ EXPORT_SYMBOL(d_add_ci); * dentry is returned. The caller must use dput to free the entry when it has * finished using it. %NULL is returned if the dentry does not exist. */ -struct dentry * d_lookup(struct dentry * parent, struct qstr * name) +struct dentry *d_lookup(struct dentry *parent, struct qstr *name) { - struct dentry * dentry = NULL; + struct dentry *dentry; unsigned seq; do { @@ -1636,7 +1781,7 @@ struct dentry * d_lookup(struct dentry * parent, struct qstr * name) } EXPORT_SYMBOL(d_lookup); -/* +/** * __d_lookup - search for a dentry (racy) * @parent: parent dentry * @name: qstr of name we wish to find @@ -1651,16 +1796,23 @@ EXPORT_SYMBOL(d_lookup); * * __d_lookup callers must be commented. */ -struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) +struct dentry *__d_lookup(struct dentry *parent, struct qstr *name) { unsigned int len = name->len; unsigned int hash = name->hash; const unsigned char *str = name->name; struct hlist_head *head = d_hash(parent,hash); - struct dentry *found = NULL; struct hlist_node *node; + struct dentry *found = NULL; struct dentry *dentry; + /* + * Note: There is significant duplication with __d_lookup_rcu which is + * required to prevent single threaded performance regressions + * especially on architectures where smp_rmb (in seqcounts) are costly. + * Keep the two functions in sync. + */ + /* * The hash list is protected using RCU. * @@ -1677,24 +1829,15 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) rcu_read_lock(); hlist_for_each_entry_rcu(dentry, node, head, d_hash) { - struct qstr *qstr; + const char *tname; + int tlen; if (dentry->d_name.hash != hash) continue; - if (dentry->d_parent != parent) - continue; spin_lock(&dentry->d_lock); - - /* - * Recheck the dentry after taking the lock - d_move may have - * changed things. Don't bother checking the hash because - * we're about to compare the whole name anyway. - */ if (dentry->d_parent != parent) goto next; - - /* non-existing due to RCU? */ if (d_unhashed(dentry)) goto next; @@ -1702,16 +1845,17 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name) * It is safe to compare names since d_move() cannot * change the qstr (protected by d_lock). */ - qstr = &dentry->d_name; + tlen = dentry->d_name.len; + tname = dentry->d_name.name; if (parent->d_op && parent->d_op->d_compare) { if (parent->d_op->d_compare(parent, parent->d_inode, dentry, dentry->d_inode, - qstr->len, qstr->name, name)) + tlen, tname, name)) goto next; } else { - if (qstr->len != len) + if (tlen != len) goto next; - if (memcmp(qstr->name, str, len)) + if (memcmp(tname, str, tlen)) goto next; } @@ -1821,7 +1965,7 @@ again: goto again; } dentry->d_flags &= ~DCACHE_CANT_MOUNT; - dentry_iput(dentry); + dentry_unlink_inode(dentry); fsnotify_nameremove(dentry, isdir); return; } @@ -1884,7 +2028,9 @@ void dentry_update_name_case(struct dentry *dentry, struct qstr *name) BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */ spin_lock(&dentry->d_lock); + write_seqcount_begin(&dentry->d_seq); memcpy((unsigned char *)dentry->d_name.name, name->name, name->len); + write_seqcount_end(&dentry->d_seq); spin_unlock(&dentry->d_lock); } EXPORT_SYMBOL(dentry_update_name_case); @@ -1997,6 +2143,9 @@ void d_move(struct dentry * dentry, struct dentry * target) dentry_lock_for_move(dentry, target); + write_seqcount_begin(&dentry->d_seq); + write_seqcount_begin(&target->d_seq); + /* Move the dentry to the target hash queue, if on different bucket */ spin_lock(&dcache_hash_lock); if (!d_unhashed(dentry)) @@ -2005,6 +2154,7 @@ void d_move(struct dentry * dentry, struct dentry * target) spin_unlock(&dcache_hash_lock); /* Unhash the target: dput() will then get rid of it */ + /* __d_drop does write_seqcount_barrier, but they're OK to nest. */ __d_drop(target); list_del(&dentry->d_u.d_child); @@ -2028,6 +2178,9 @@ void d_move(struct dentry * dentry, struct dentry * target) list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs); + write_seqcount_end(&target->d_seq); + write_seqcount_end(&dentry->d_seq); + dentry_unlock_parents_for_move(dentry, target); spin_unlock(&target->d_lock); fsnotify_d_move(dentry); @@ -2110,6 +2263,9 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) dentry_lock_for_move(anon, dentry); + write_seqcount_begin(&dentry->d_seq); + write_seqcount_begin(&anon->d_seq); + dparent = dentry->d_parent; aparent = anon->d_parent; @@ -2130,6 +2286,9 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) else INIT_LIST_HEAD(&anon->d_u.d_child); + write_seqcount_end(&dentry->d_seq); + write_seqcount_end(&anon->d_seq); + dentry_unlock_parents_for_move(anon, dentry); spin_unlock(&dentry->d_lock); diff --git a/fs/filesystems.c b/fs/filesystems.c index 68ba492d8eef..751d6b255a12 100644 --- a/fs/filesystems.c +++ b/fs/filesystems.c @@ -115,6 +115,9 @@ int unregister_filesystem(struct file_system_type * fs) tmp = &(*tmp)->next; } write_unlock(&file_systems_lock); + + synchronize_rcu(); + return -EINVAL; } diff --git a/fs/namei.c b/fs/namei.c index 5642bc2be418..8d3f15b3a541 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -169,8 +169,8 @@ EXPORT_SYMBOL(putname); /* * This does basic POSIX ACL permission checking */ -static int acl_permission_check(struct inode *inode, int mask, - int (*check_acl)(struct inode *inode, int mask)) +static inline int __acl_permission_check(struct inode *inode, int mask, + int (*check_acl)(struct inode *inode, int mask), int rcu) { umode_t mode = inode->i_mode; @@ -180,9 +180,13 @@ static int acl_permission_check(struct inode *inode, int mask, mode >>= 6; else { if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) { - int error = check_acl(inode, mask); - if (error != -EAGAIN) - return error; + if (rcu) { + return -ECHILD; + } else { + int error = check_acl(inode, mask); + if (error != -EAGAIN) + return error; + } } if (in_group_p(inode->i_gid)) @@ -197,6 +201,12 @@ static int acl_permission_check(struct inode *inode, int mask, return -EACCES; } +static inline int acl_permission_check(struct inode *inode, int mask, + int (*check_acl)(struct inode *inode, int mask)) +{ + return __acl_permission_check(inode, mask, check_acl, 0); +} + /** * generic_permission - check for access rights on a Posix-like filesystem * @inode: inode to check access rights for @@ -374,6 +384,173 @@ void path_put(struct path *path) } EXPORT_SYMBOL(path_put); +/** + * nameidata_drop_rcu - drop this nameidata out of rcu-walk + * @nd: nameidata pathwalk data to drop + * @Returns: 0 on success, -ECHLID on failure + * + * Path walking has 2 modes, rcu-walk and ref-walk (see + * Documentation/filesystems/path-lookup.txt). __drop_rcu* functions attempt + * to drop out of rcu-walk mode and take normal reference counts on dentries + * and vfsmounts to transition to rcu-walk mode. __drop_rcu* functions take + * refcounts at the last known good point before rcu-walk got stuck, so + * ref-walk may continue from there. If this is not successful (eg. a seqcount + * has changed), then failure is returned and path walk restarts from the + * beginning in ref-walk mode. + * + * nameidata_drop_rcu attempts to drop the current nd->path and nd->root into + * ref-walk. Must be called from rcu-walk context. + */ +static int nameidata_drop_rcu(struct nameidata *nd) +{ + struct fs_struct *fs = current->fs; + struct dentry *dentry = nd->path.dentry; + + BUG_ON(!(nd->flags & LOOKUP_RCU)); + if (nd->root.mnt) { + spin_lock(&fs->lock); + if (nd->root.mnt != fs->root.mnt || + nd->root.dentry != fs->root.dentry) + goto err_root; + } + spin_lock(&dentry->d_lock); + if (!__d_rcu_to_refcount(dentry, nd->seq)) + goto err; + BUG_ON(nd->inode != dentry->d_inode); + spin_unlock(&dentry->d_lock); + if (nd->root.mnt) { + path_get(&nd->root); + spin_unlock(&fs->lock); + } + mntget(nd->path.mnt); + + rcu_read_unlock(); + br_read_unlock(vfsmount_lock); + nd->flags &= ~LOOKUP_RCU; + return 0; +err: + spin_unlock(&dentry->d_lock); +err_root: + if (nd->root.mnt) + spin_unlock(&fs->lock); + return -ECHILD; +} + +/* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing. */ +static inline int nameidata_drop_rcu_maybe(struct nameidata *nd) +{ + if (nd->flags & LOOKUP_RCU) + return nameidata_drop_rcu(nd); + return 0; +} + +/** + * nameidata_dentry_drop_rcu - drop nameidata and dentry out of rcu-walk + * @nd: nameidata pathwalk data to drop + * @dentry: dentry to drop + * @Returns: 0 on success, -ECHLID on failure + * + * nameidata_dentry_drop_rcu attempts to drop the current nd->path and nd->root, + * and dentry into ref-walk. @dentry must be a path found by a do_lookup call on + * @nd. Must be called from rcu-walk context. + */ +static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry) +{ + struct fs_struct *fs = current->fs; + struct dentry *parent = nd->path.dentry; + + BUG_ON(!(nd->flags & LOOKUP_RCU)); + if (nd->root.mnt) { + spin_lock(&fs->lock); + if (nd->root.mnt != fs->root.mnt || + nd->root.dentry != fs->root.dentry) + goto err_root; + } + spin_lock(&parent->d_lock); + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); + if (!__d_rcu_to_refcount(dentry, nd->seq)) + goto err; + /* + * If the sequence check on the child dentry passed, then the child has + * not been removed from its parent. This means the parent dentry must + * be valid and able to take a reference at this point. + */ + BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); + BUG_ON(!parent->d_count); + parent->d_count++; + spin_unlock(&dentry->d_lock); + spin_unlock(&parent->d_lock); + if (nd->root.mnt) { + path_get(&nd->root); + spin_unlock(&fs->lock); + } + mntget(nd->path.mnt); + + rcu_read_unlock(); + br_read_unlock(vfsmount_lock); + nd->flags &= ~LOOKUP_RCU; + return 0; +err: + spin_unlock(&dentry->d_lock); + spin_unlock(&parent->d_lock); +err_root: + if (nd->root.mnt) + spin_unlock(&fs->lock); + return -ECHILD; +} + +/* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing. */ +static inline int nameidata_dentry_drop_rcu_maybe(struct nameidata *nd, struct dentry *dentry) +{ + if (nd->flags & LOOKUP_RCU) + return nameidata_dentry_drop_rcu(nd, dentry); + return 0; +} + +/** + * nameidata_drop_rcu_last - drop nameidata ending path walk out of rcu-walk + * @nd: nameidata pathwalk data to drop + * @Returns: 0 on success, -ECHLID on failure + * + * nameidata_drop_rcu_last attempts to drop the current nd->path into ref-walk. + * nd->path should be the final element of the lookup, so nd->root is discarded. + * Must be called from rcu-walk context. + */ +static int nameidata_drop_rcu_last(struct nameidata *nd) +{ + struct dentry *dentry = nd->path.dentry; + + BUG_ON(!(nd->flags & LOOKUP_RCU)); + nd->flags &= ~LOOKUP_RCU; + nd->root.mnt = NULL; + spin_lock(&dentry->d_lock); + if (!__d_rcu_to_refcount(dentry, nd->seq)) + goto err_unlock; + BUG_ON(nd->inode != dentry->d_inode); + spin_unlock(&dentry->d_lock); + + mntget(nd->path.mnt); + + rcu_read_unlock(); + br_read_unlock(vfsmount_lock); + + return 0; + +err_unlock: + spin_unlock(&dentry->d_lock); + rcu_read_unlock(); + br_read_unlock(vfsmount_lock); + return -ECHILD; +} + +/* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing. */ +static inline int nameidata_drop_rcu_last_maybe(struct nameidata *nd) +{ + if (likely(nd->flags & LOOKUP_RCU)) + return nameidata_drop_rcu_last(nd); + return 0; +} + /** * release_open_intent - free up open intent resources * @nd: pointer to nameidata @@ -459,26 +636,40 @@ force_reval_path(struct path *path, struct nameidata *nd) * short-cut DAC fails, then call ->permission() to do more * complete permission check. */ -static int exec_permission(struct inode *inode) +static inline int __exec_permission(struct inode *inode, int rcu) { int ret; if (inode->i_op->permission) { + if (rcu) + return -ECHILD; ret = inode->i_op->permission(inode, MAY_EXEC); if (!ret) goto ok; return ret; } - ret = acl_permission_check(inode, MAY_EXEC, inode->i_op->check_acl); + ret = __acl_permission_check(inode, MAY_EXEC, inode->i_op->check_acl, rcu); if (!ret) goto ok; + if (rcu && ret == -ECHILD) + return ret; if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH)) goto ok; return ret; ok: - return security_inode_permission(inode, MAY_EXEC); + return security_inode_exec_permission(inode, rcu); +} + +static int exec_permission(struct inode *inode) +{ + return __exec_permission(inode, 0); +} + +static int exec_permission_rcu(struct inode *inode) +{ + return __exec_permission(inode, 1); } static __always_inline void set_root(struct nameidata *nd) @@ -489,8 +680,20 @@ static __always_inline void set_root(struct nameidata *nd) static int link_path_walk(const char *, struct nameidata *); +static __always_inline void set_root_rcu(struct nameidata *nd) +{ + if (!nd->root.mnt) { + struct fs_struct *fs = current->fs; + spin_lock(&fs->lock); + nd->root = fs->root; + spin_unlock(&fs->lock); + } +} + static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link) { + int ret; + if (IS_ERR(link)) goto fail; @@ -500,8 +703,10 @@ static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *l nd->path = nd->root; path_get(&nd->root); } + nd->inode = nd->path.dentry->d_inode; - return link_path_walk(link, nd); + ret = link_path_walk(link, nd); + return ret; fail: path_put(&nd->path); return PTR_ERR(link); @@ -516,11 +721,12 @@ static void path_put_conditional(struct path *path, struct nameidata *nd) static inline void path_to_nameidata(struct path *path, struct nameidata *nd) { - dput(nd->path.dentry); - if (nd->path.mnt != path->mnt) { - mntput(nd->path.mnt); - nd->path.mnt = path->mnt; + if (!(nd->flags & LOOKUP_RCU)) { + dput(nd->path.dentry); + if (nd->path.mnt != path->mnt) + mntput(nd->path.mnt); } + nd->path.mnt = path->mnt; nd->path.dentry = path->dentry; } @@ -535,9 +741,11 @@ __do_follow_link(struct path *path, struct nameidata *nd, void **p) if (path->mnt != nd->path.mnt) { path_to_nameidata(path, nd); + nd->inode = nd->path.dentry->d_inode; dget(dentry); } mntget(path->mnt); + nd->last_type = LAST_BIND; *p = dentry->d_inode->i_op->follow_link(dentry, nd); error = PTR_ERR(*p); @@ -591,6 +799,20 @@ loop: return err; } +static int follow_up_rcu(struct path *path) +{ + struct vfsmount *parent; + struct dentry *mountpoint; + + parent = path->mnt->mnt_parent; + if (parent == path->mnt) + return 0; + mountpoint = path->mnt->mnt_mountpoint; + path->dentry = mountpoint; + path->mnt = parent; + return 1; +} + int follow_up(struct path *path) { struct vfsmount *parent; @@ -615,6 +837,21 @@ int follow_up(struct path *path) /* * serialization is taken care of in namespace.c */ +static void __follow_mount_rcu(struct nameidata *nd, struct path *path, + struct inode **inode) +{ + while (d_mountpoint(path->dentry)) { + struct vfsmount *mounted; + mounted = __lookup_mnt(path->mnt, path->dentry, 1); + if (!mounted) + return; + path->mnt = mounted; + path->dentry = mounted->mnt_root; + nd->seq = read_seqcount_begin(&path->dentry->d_seq); + *inode = path->dentry->d_inode; + } +} + static int __follow_mount(struct path *path) { int res = 0; @@ -660,7 +897,42 @@ int follow_down(struct path *path) return 0; } -static __always_inline void follow_dotdot(struct nameidata *nd) +static int follow_dotdot_rcu(struct nameidata *nd) +{ + struct inode *inode = nd->inode; + + set_root_rcu(nd); + + while(1) { + if (nd->path.dentry == nd->root.dentry && + nd->path.mnt == nd->root.mnt) { + break; + } + if (nd->path.dentry != nd->path.mnt->mnt_root) { + struct dentry *old = nd->path.dentry; + struct dentry *parent = old->d_parent; + unsigned seq; + + seq = read_seqcount_begin(&parent->d_seq); + if (read_seqcount_retry(&old->d_seq, nd->seq)) + return -ECHILD; + inode = parent->d_inode; + nd->path.dentry = parent; + nd->seq = seq; + break; + } + if (!follow_up_rcu(&nd->path)) + break; + nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); + inode = nd->path.dentry->d_inode; + } + __follow_mount_rcu(nd, &nd->path, &inode); + nd->inode = inode; + + return 0; +} + +static void follow_dotdot(struct nameidata *nd) { set_root(nd); @@ -681,6 +953,7 @@ static __always_inline void follow_dotdot(struct nameidata *nd) break; } follow_mount(&nd->path); + nd->inode = nd->path.dentry->d_inode; } /* @@ -718,18 +991,17 @@ static struct dentry *d_alloc_and_lookup(struct dentry *parent, * It _is_ time-critical. */ static int do_lookup(struct nameidata *nd, struct qstr *name, - struct path *path) + struct path *path, struct inode **inode) { struct vfsmount *mnt = nd->path.mnt; - struct dentry *dentry, *parent; + struct dentry *dentry, *parent = nd->path.dentry; struct inode *dir; /* * See if the low-level filesystem might want * to use its own hash.. */ - if (nd->path.dentry->d_op && nd->path.dentry->d_op->d_hash) { - int err = nd->path.dentry->d_op->d_hash(nd->path.dentry, - nd->path.dentry->d_inode, name); + if (parent->d_op && parent->d_op->d_hash) { + int err = parent->d_op->d_hash(parent, nd->inode, name); if (err < 0) return err; } @@ -739,21 +1011,48 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, * of a false negative due to a concurrent rename, we're going to * do the non-racy lookup, below. */ - dentry = __d_lookup(nd->path.dentry, name); - if (!dentry) - goto need_lookup; + if (nd->flags & LOOKUP_RCU) { + unsigned seq; + + *inode = nd->inode; + dentry = __d_lookup_rcu(parent, name, &seq, inode); + if (!dentry) { + if (nameidata_drop_rcu(nd)) + return -ECHILD; + goto need_lookup; + } + /* Memory barrier in read_seqcount_begin of child is enough */ + if (__read_seqcount_retry(&parent->d_seq, nd->seq)) + return -ECHILD; + + nd->seq = seq; + if (dentry->d_op && dentry->d_op->d_revalidate) { + /* We commonly drop rcu-walk here */ + if (nameidata_dentry_drop_rcu(nd, dentry)) + return -ECHILD; + goto need_revalidate; + } + path->mnt = mnt; + path->dentry = dentry; + __follow_mount_rcu(nd, path, inode); + } else { + dentry = __d_lookup(parent, name); + if (!dentry) + goto need_lookup; found: - if (dentry->d_op && dentry->d_op->d_revalidate) - goto need_revalidate; + if (dentry->d_op && dentry->d_op->d_revalidate) + goto need_revalidate; done: - path->mnt = mnt; - path->dentry = dentry; - __follow_mount(path); + path->mnt = mnt; + path->dentry = dentry; + __follow_mount(path); + *inode = path->dentry->d_inode; + } return 0; need_lookup: - parent = nd->path.dentry; dir = parent->d_inode; + BUG_ON(nd->inode != dir); mutex_lock(&dir->i_mutex); /* @@ -815,7 +1114,6 @@ static inline int follow_on_final(struct inode *inode, unsigned lookup_flags) static int link_path_walk(const char *name, struct nameidata *nd) { struct path next; - struct inode *inode; int err; unsigned int lookup_flags = nd->flags; @@ -824,18 +1122,28 @@ static int link_path_walk(const char *name, struct nameidata *nd) if (!*name) goto return_reval; - inode = nd->path.dentry->d_inode; if (nd->depth) lookup_flags = LOOKUP_FOLLOW | (nd->flags & LOOKUP_CONTINUE); /* At this point we know we have a real path component. */ for(;;) { + struct inode *inode; unsigned long hash; struct qstr this; unsigned int c; nd->flags |= LOOKUP_CONTINUE; - err = exec_permission(inode); + if (nd->flags & LOOKUP_RCU) { + err = exec_permission_rcu(nd->inode); + if (err == -ECHILD) { + if (nameidata_drop_rcu(nd)) + return -ECHILD; + goto exec_again; + } + } else { +exec_again: + err = exec_permission(nd->inode); + } if (err) break; @@ -866,37 +1174,44 @@ static int link_path_walk(const char *name, struct nameidata *nd) if (this.name[0] == '.') switch (this.len) { default: break; - case 2: + case 2: if (this.name[1] != '.') break; - follow_dotdot(nd); - inode = nd->path.dentry->d_inode; + if (nd->flags & LOOKUP_RCU) { + if (follow_dotdot_rcu(nd)) + return -ECHILD; + } else + follow_dotdot(nd); /* fallthrough */ case 1: continue; } /* This does the actual lookups.. */ - err = do_lookup(nd, &this, &next); + err = do_lookup(nd, &this, &next, &inode); if (err) break; - err = -ENOENT; - inode = next.dentry->d_inode; if (!inode) goto out_dput; if (inode->i_op->follow_link) { + /* We commonly drop rcu-walk here */ + if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry)) + return -ECHILD; + BUG_ON(inode != next.dentry->d_inode); err = do_follow_link(&next, nd); if (err) goto return_err; + nd->inode = nd->path.dentry->d_inode; err = -ENOENT; - inode = nd->path.dentry->d_inode; - if (!inode) + if (!nd->inode) break; - } else + } else { path_to_nameidata(&next, nd); + nd->inode = inode; + } err = -ENOTDIR; - if (!inode->i_op->lookup) + if (!nd->inode->i_op->lookup) break; continue; /* here ends the main loop */ @@ -911,32 +1226,39 @@ last_component: if (this.name[0] == '.') switch (this.len) { default: break; - case 2: + case 2: if (this.name[1] != '.') break; - follow_dotdot(nd); - inode = nd->path.dentry->d_inode; + if (nd->flags & LOOKUP_RCU) { + if (follow_dotdot_rcu(nd)) + return -ECHILD; + } else + follow_dotdot(nd); /* fallthrough */ case 1: goto return_reval; } - err = do_lookup(nd, &this, &next); + err = do_lookup(nd, &this, &next, &inode); if (err) break; - inode = next.dentry->d_inode; if (follow_on_final(inode, lookup_flags)) { + if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry)) + return -ECHILD; + BUG_ON(inode != next.dentry->d_inode); err = do_follow_link(&next, nd); if (err) goto return_err; - inode = nd->path.dentry->d_inode; - } else + nd->inode = nd->path.dentry->d_inode; + } else { path_to_nameidata(&next, nd); + nd->inode = inode; + } err = -ENOENT; - if (!inode) + if (!nd->inode) break; if (lookup_flags & LOOKUP_DIRECTORY) { err = -ENOTDIR; - if (!inode->i_op->lookup) + if (!nd->inode->i_op->lookup) break; } goto return_base; @@ -958,6 +1280,8 @@ return_reval: */ if (nd->path.dentry && nd->path.dentry->d_sb && (nd->path.dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)) { + if (nameidata_drop_rcu_maybe(nd)) + return -ECHILD; err = -ESTALE; /* Note: we do not d_invalidate() */ if (!nd->path.dentry->d_op->d_revalidate( @@ -965,16 +1289,34 @@ return_reval: break; } return_base: + if (nameidata_drop_rcu_last_maybe(nd)) + return -ECHILD; return 0; out_dput: - path_put_conditional(&next, nd); + if (!(nd->flags & LOOKUP_RCU)) + path_put_conditional(&next, nd); break; } - path_put(&nd->path); + if (!(nd->flags & LOOKUP_RCU)) + path_put(&nd->path); return_err: return err; } +static inline int path_walk_rcu(const char *name, struct nameidata *nd) +{ + current->total_link_count = 0; + + return link_path_walk(name, nd); +} + +static inline int path_walk_simple(const char *name, struct nameidata *nd) +{ + current->total_link_count = 0; + + return link_path_walk(name, nd); +} + static int path_walk(const char *name, struct nameidata *nd) { struct path save = nd->path; @@ -1000,6 +1342,88 @@ static int path_walk(const char *name, struct nameidata *nd) return result; } +static void path_finish_rcu(struct nameidata *nd) +{ + if (nd->flags & LOOKUP_RCU) { + /* RCU dangling. Cancel it. */ + nd->flags &= ~LOOKUP_RCU; + nd->root.mnt = NULL; + rcu_read_unlock(); + br_read_unlock(vfsmount_lock); + } + if (nd->file) + fput(nd->file); +} + +static int path_init_rcu(int dfd, const char *name, unsigned int flags, struct nameidata *nd) +{ + int retval = 0; + int fput_needed; + struct file *file; + + nd->last_type = LAST_ROOT; /* if there are only slashes... */ + nd->flags = flags | LOOKUP_RCU; + nd->depth = 0; + nd->root.mnt = NULL; + nd->file = NULL; + + if (*name=='/') { + struct fs_struct *fs = current->fs; + + br_read_lock(vfsmount_lock); + rcu_read_lock(); + + spin_lock(&fs->lock); + nd->root = fs->root; + nd->path = nd->root; + nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); + spin_unlock(&fs->lock); + + } else if (dfd == AT_FDCWD) { + struct fs_struct *fs = current->fs; + + br_read_lock(vfsmount_lock); + rcu_read_lock(); + + spin_lock(&fs->lock); + nd->path = fs->pwd; + nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); + spin_unlock(&fs->lock); + } else { + struct dentry *dentry; + + file = fget_light(dfd, &fput_needed); + retval = -EBADF; + if (!file) + goto out_fail; + + dentry = file->f_path.dentry; + + retval = -ENOTDIR; + if (!S_ISDIR(dentry->d_inode->i_mode)) + goto fput_fail; + + retval = file_permission(file, MAY_EXEC); + if (retval) + goto fput_fail; + + nd->path = file->f_path; + if (fput_needed) + nd->file = file; + + nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); + br_read_lock(vfsmount_lock); + rcu_read_lock(); + } + nd->inode = nd->path.dentry->d_inode; + return 0; + +fput_fail: + fput_light(file, fput_needed); +out_fail: + return retval; +} + static int path_init(int dfd, const char *name, unsigned int flags, struct nameidata *nd) { int retval = 0; @@ -1040,6 +1464,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, struct namei fput_light(file, fput_needed); } + nd->inode = nd->path.dentry->d_inode; return 0; fput_fail: @@ -1052,16 +1477,53 @@ out_fail: static int do_path_lookup(int dfd, const char *name, unsigned int flags, struct nameidata *nd) { - int retval = path_init(dfd, name, flags, nd); - if (!retval) - retval = path_walk(name, nd); - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry && - nd->path.dentry->d_inode)) - audit_inode(name, nd->path.dentry); + int retval; + + /* + * Path walking is largely split up into 2 different synchronisation + * schemes, rcu-walk and ref-walk (explained in + * Documentation/filesystems/path-lookup.txt). These share much of the + * path walk code, but some things particularly setup, cleanup, and + * following mounts are sufficiently divergent that functions are + * duplicated. Typically there is a function foo(), and its RCU + * analogue, foo_rcu(). + * + * -ECHILD is the error number of choice (just to avoid clashes) that + * is returned if some aspect of an rcu-walk fails. Such an error must + * be handled by restarting a traditional ref-walk (which will always + * be able to complete). + */ + retval = path_init_rcu(dfd, name, flags, nd); + if (unlikely(retval)) + return retval; + retval = path_walk_rcu(name, nd); + path_finish_rcu(nd); if (nd->root.mnt) { path_put(&nd->root); nd->root.mnt = NULL; } + + if (unlikely(retval == -ECHILD || retval == -ESTALE)) { + /* slower, locked walk */ + if (retval == -ESTALE) + flags |= LOOKUP_REVAL; + retval = path_init(dfd, name, flags, nd); + if (unlikely(retval)) + return retval; + retval = path_walk(name, nd); + if (nd->root.mnt) { + path_put(&nd->root); + nd->root.mnt = NULL; + } + } + + if (likely(!retval)) { + if (unlikely(!audit_dummy_context())) { + if (nd->path.dentry && nd->inode) + audit_inode(name, nd->path.dentry); + } + } + return retval; } @@ -1104,10 +1566,11 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, path_get(&nd->path); nd->root = nd->path; path_get(&nd->root); + nd->inode = nd->path.dentry->d_inode; retval = path_walk(name, nd); if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry && - nd->path.dentry->d_inode)) + nd->inode)) audit_inode(name, nd->path.dentry); path_put(&nd->root); @@ -1488,6 +1951,7 @@ out_unlock: mutex_unlock(&dir->d_inode->i_mutex); dput(nd->path.dentry); nd->path.dentry = path->dentry; + if (error) return error; /* Don't check for write permission, don't truncate */ @@ -1582,6 +2046,9 @@ exit: return ERR_PTR(error); } +/* + * Handle O_CREAT case for do_filp_open + */ static struct file *do_last(struct nameidata *nd, struct path *path, int open_flag, int acc_mode, int mode, const char *pathname) @@ -1603,42 +2070,16 @@ static struct file *do_last(struct nameidata *nd, struct path *path, } /* fallthrough */ case LAST_ROOT: - if (open_flag & O_CREAT) - goto exit; - /* fallthrough */ + goto exit; case LAST_BIND: audit_inode(pathname, dir); goto ok; } /* trailing slashes? */ - if (nd->last.name[nd->last.len]) { - if (open_flag & O_CREAT) - goto exit; - nd->flags |= LOOKUP_DIRECTORY | LOOKUP_FOLLOW; - } - - /* just plain open? */ - if (!(open_flag & O_CREAT)) { - error = do_lookup(nd, &nd->last, path); - if (error) - goto exit; - error = -ENOENT; - if (!path->dentry->d_inode) - goto exit_dput; - if (path->dentry->d_inode->i_op->follow_link) - return NULL; - error = -ENOTDIR; - if (nd->flags & LOOKUP_DIRECTORY) { - if (!path->dentry->d_inode->i_op->lookup) - goto exit_dput; - } - path_to_nameidata(path, nd); - audit_inode(pathname, nd->path.dentry); - goto ok; - } + if (nd->last.name[nd->last.len]) + goto exit; - /* OK, it's O_CREAT */ mutex_lock(&dir->d_inode->i_mutex); path->dentry = lookup_hash(nd); @@ -1709,8 +2150,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path, return NULL; path_to_nameidata(path, nd); + nd->inode = path->dentry->d_inode; error = -EISDIR; - if (S_ISDIR(path->dentry->d_inode->i_mode)) + if (S_ISDIR(nd->inode->i_mode)) goto exit; ok: filp = finish_open(nd, open_flag, acc_mode); @@ -1741,7 +2183,7 @@ struct file *do_filp_open(int dfd, const char *pathname, struct path path; int count = 0; int flag = open_to_namei_flags(open_flag); - int force_reval = 0; + int flags; if (!(open_flag & O_CREAT)) mode = 0; @@ -1770,54 +2212,84 @@ struct file *do_filp_open(int dfd, const char *pathname, if (open_flag & O_APPEND) acc_mode |= MAY_APPEND; - /* find the parent */ -reval: - error = path_init(dfd, pathname, LOOKUP_PARENT, &nd); + flags = LOOKUP_OPEN; + if (open_flag & O_CREAT) { + flags |= LOOKUP_CREATE; + if (open_flag & O_EXCL) + flags |= LOOKUP_EXCL; + } + if (open_flag & O_DIRECTORY) + flags |= LOOKUP_DIRECTORY; + if (!(open_flag & O_NOFOLLOW)) + flags |= LOOKUP_FOLLOW; + + filp = get_empty_filp(); + if (!filp) + return ERR_PTR(-ENFILE); + + filp->f_flags = open_flag; + nd.intent.open.file = filp; + nd.intent.open.flags = flag; + nd.intent.open.create_mode = mode; + + if (open_flag & O_CREAT) + goto creat; + + /* !O_CREAT, simple open */ + error = do_path_lookup(dfd, pathname, flags, &nd); + if (unlikely(error)) + goto out_filp; + error = -ELOOP; + if (!(nd.flags & LOOKUP_FOLLOW)) { + if (nd.inode->i_op->follow_link) + goto out_path; + } + error = -ENOTDIR; + if (nd.flags & LOOKUP_DIRECTORY) { + if (!nd.inode->i_op->lookup) + goto out_path; + } + audit_inode(pathname, nd.path.dentry); + filp = finish_open(&nd, open_flag, acc_mode); + return filp; + +creat: + /* OK, have to create the file. Find the parent. */ + error = path_init_rcu(dfd, pathname, + LOOKUP_PARENT | (flags & LOOKUP_REVAL), &nd); if (error) - return ERR_PTR(error); - if (force_reval) - nd.flags |= LOOKUP_REVAL; + goto out_filp; + error = path_walk_rcu(pathname, &nd); + path_finish_rcu(&nd); + if (unlikely(error == -ECHILD || error == -ESTALE)) { + /* slower, locked walk */ + if (error == -ESTALE) { +reval: + flags |= LOOKUP_REVAL; + } + error = path_init(dfd, pathname, + LOOKUP_PARENT | (flags & LOOKUP_REVAL), &nd); + if (error) + goto out_filp; - current->total_link_count = 0; - error = link_path_walk(pathname, &nd); - if (error) { - filp = ERR_PTR(error); - goto out; + error = path_walk_simple(pathname, &nd); } - if (unlikely(!audit_dummy_context()) && (open_flag & O_CREAT)) + if (unlikely(error)) + goto out_filp; + if (unlikely(!audit_dummy_context())) audit_inode(pathname, nd.path.dentry); /* * We have the parent and last component. */ - - error = -ENFILE; - filp = get_empty_filp(); - if (filp == NULL) - goto exit_parent; - nd.intent.open.file = filp; - filp->f_flags = open_flag; - nd.intent.open.flags = flag; - nd.intent.open.create_mode = mode; - nd.flags &= ~LOOKUP_PARENT; - nd.flags |= LOOKUP_OPEN; - if (open_flag & O_CREAT) { - nd.flags |= LOOKUP_CREATE; - if (open_flag & O_EXCL) - nd.flags |= LOOKUP_EXCL; - } - if (open_flag & O_DIRECTORY) - nd.flags |= LOOKUP_DIRECTORY; - if (!(open_flag & O_NOFOLLOW)) - nd.flags |= LOOKUP_FOLLOW; + nd.flags = flags; filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname); while (unlikely(!filp)) { /* trailing symlink */ struct path holder; - struct inode *inode = path.dentry->d_inode; void *cookie; error = -ELOOP; /* S_ISDIR part is a temporary automount kludge */ - if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(inode->i_mode)) + if (!(nd.flags & LOOKUP_FOLLOW) && !S_ISDIR(nd.inode->i_mode)) goto exit_dput; if (count++ == 32) goto exit_dput; @@ -1838,36 +2310,33 @@ reval: goto exit_dput; error = __do_follow_link(&path, &nd, &cookie); if (unlikely(error)) { + if (!IS_ERR(cookie) && nd.inode->i_op->put_link) + nd.inode->i_op->put_link(path.dentry, &nd, cookie); /* nd.path had been dropped */ - if (!IS_ERR(cookie) && inode->i_op->put_link) - inode->i_op->put_link(path.dentry, &nd, cookie); - path_put(&path); - release_open_intent(&nd); - filp = ERR_PTR(error); - goto out; + nd.path = path; + goto out_path; } holder = path; nd.flags &= ~LOOKUP_PARENT; filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname); - if (inode->i_op->put_link) - inode->i_op->put_link(holder.dentry, &nd, cookie); + if (nd.inode->i_op->put_link) + nd.inode->i_op->put_link(holder.dentry, &nd, cookie); path_put(&holder); } out: if (nd.root.mnt) path_put(&nd.root); - if (filp == ERR_PTR(-ESTALE) && !force_reval) { - force_reval = 1; + if (filp == ERR_PTR(-ESTALE) && !(flags & LOOKUP_REVAL)) goto reval; - } return filp; exit_dput: path_put_conditional(&path, &nd); +out_path: + path_put(&nd.path); +out_filp: if (!IS_ERR(nd.intent.open.file)) release_open_intent(&nd); -exit_parent: - path_put(&nd.path); filp = ERR_PTR(error); goto out; } diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index ae4b0fd9033f..998e3a715bcc 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -402,6 +402,10 @@ static int proc_sys_compare(const struct dentry *parent, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { + /* Although proc doesn't have negative dentries, rcu-walk means + * that inode here can be NULL */ + if (!inode) + return 0; if (name->len != len) return 1; if (memcmp(name->name, str, len)) diff --git a/include/linux/dcache.h b/include/linux/dcache.h index ca648685f0cc..c2e7390289cc 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -90,6 +91,7 @@ struct dentry { unsigned int d_count; /* protected by d_lock */ unsigned int d_flags; /* protected by d_lock */ spinlock_t d_lock; /* per dentry lock */ + seqcount_t d_seq; /* per dentry seqlock */ int d_mounted; struct inode *d_inode; /* Where the name belongs to - NULL is * negative */ @@ -266,9 +268,33 @@ extern void d_move(struct dentry *, struct dentry *); extern struct dentry *d_ancestor(struct dentry *, struct dentry *); /* appendix may either be NULL or be used for transname suffixes */ -extern struct dentry * d_lookup(struct dentry *, struct qstr *); -extern struct dentry * __d_lookup(struct dentry *, struct qstr *); -extern struct dentry * d_hash_and_lookup(struct dentry *, struct qstr *); +extern struct dentry *d_lookup(struct dentry *, struct qstr *); +extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *); +extern struct dentry *__d_lookup(struct dentry *, struct qstr *); +extern struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name, + unsigned *seq, struct inode **inode); + +/** + * __d_rcu_to_refcount - take a refcount on dentry if sequence check is ok + * @dentry: dentry to take a ref on + * @seq: seqcount to verify against + * @Returns: 0 on failure, else 1. + * + * __d_rcu_to_refcount operates on a dentry,seq pair that was returned + * by __d_lookup_rcu, to get a reference on an rcu-walk dentry. + */ +static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq) +{ + int ret = 0; + + assert_spin_locked(&dentry->d_lock); + if (!read_seqcount_retry(&dentry->d_seq, seq)) { + ret = 1; + dentry->d_count++; + } + + return ret; +} /* validate "insecure" dentry pointer */ extern int d_validate(struct dentry *, struct dentry *); diff --git a/include/linux/namei.h b/include/linux/namei.h index aec730b53935..18d06add0a40 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -19,7 +19,10 @@ struct nameidata { struct path path; struct qstr last; struct path root; + struct file *file; + struct inode *inode; /* path.dentry.d_inode */ unsigned int flags; + unsigned seq; int last_type; unsigned depth; char *saved_names[MAX_NESTED_LINKS + 1]; @@ -43,11 +46,13 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; * - internal "there are more path components" flag * - dentry cache is untrusted; force a real lookup */ -#define LOOKUP_FOLLOW 1 -#define LOOKUP_DIRECTORY 2 -#define LOOKUP_CONTINUE 4 -#define LOOKUP_PARENT 16 -#define LOOKUP_REVAL 64 +#define LOOKUP_FOLLOW 0x0001 +#define LOOKUP_DIRECTORY 0x0002 +#define LOOKUP_CONTINUE 0x0004 + +#define LOOKUP_PARENT 0x0010 +#define LOOKUP_REVAL 0x0020 +#define LOOKUP_RCU 0x0040 /* * Intent data */ diff --git a/include/linux/security.h b/include/linux/security.h index fd4d55fb8845..ed95401970c7 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -457,7 +457,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * called when the actual read/write operations are performed. * @inode contains the inode structure to check. * @mask contains the permission mask. - * @nd contains the nameidata (may be NULL). * Return 0 if permission is granted. * @inode_setattr: * Check permission before setting file attributes. Note that the kernel @@ -1713,6 +1712,7 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, int security_inode_readlink(struct dentry *dentry); int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd); int security_inode_permission(struct inode *inode, int mask); +int security_inode_exec_permission(struct inode *inode, unsigned int flags); int security_inode_setattr(struct dentry *dentry, struct iattr *attr); int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry); int security_inode_setxattr(struct dentry *dentry, const char *name, @@ -2102,6 +2102,12 @@ static inline int security_inode_permission(struct inode *inode, int mask) return 0; } +static inline int security_inode_exec_permission(struct inode *inode, + unsigned int flags) +{ + return 0; +} + static inline int security_inode_setattr(struct dentry *dentry, struct iattr *attr) { diff --git a/security/security.c b/security/security.c index 1b798d3df710..c645e263ca8d 100644 --- a/security/security.c +++ b/security/security.c @@ -513,6 +513,15 @@ int security_inode_permission(struct inode *inode, int mask) return security_ops->inode_permission(inode, mask); } +int security_inode_exec_permission(struct inode *inode, unsigned int flags) +{ + if (unlikely(IS_PRIVATE(inode))) + return 0; + if (flags) + return -ECHILD; + return security_ops->inode_permission(inode, MAY_EXEC); +} + int security_inode_setattr(struct dentry *dentry, struct iattr *attr) { if (unlikely(IS_PRIVATE(dentry->d_inode))) -- cgit v1.2.3-58-ga151