summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@shinybook.infradead.org>2005-08-09 16:51:35 +0100
committerDavid Woodhouse <dwmw2@shinybook.infradead.org>2005-08-09 16:51:35 +0100
commitc973b112c76c9d8fd042991128f218a738cc8d0a (patch)
treee813b0da5d0a0e19e06de6462d145a29ad683026 /fs
parentc5fbc3966f48279dbebfde10248c977014aa9988 (diff)
parent00dd1e433967872f3997a45d5adf35056fdf2f56 (diff)
Merge with /shiny/git/linux-2.6/.git
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig11
-rw-r--r--fs/autofs4/autofs_i.h1
-rw-r--r--fs/autofs4/inode.c73
-rw-r--r--fs/bio.c9
-rw-r--r--fs/dcache.c7
-rw-r--r--fs/ext2/ialloc.c1
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/ext2/xip.c2
-rw-r--r--fs/ext3/ialloc.c2
-rw-r--r--fs/ext3/xattr.c2
-rw-r--r--fs/fcntl.c5
-rw-r--r--fs/hfs/bnode.c2
-rw-r--r--fs/hfs/extent.c3
-rw-r--r--fs/hfsplus/bnode.c2
-rw-r--r--fs/hfsplus/extents.c4
-rw-r--r--fs/hostfs/hostfs.h1
-rw-r--r--fs/hostfs/hostfs_kern.c2
-rw-r--r--fs/hostfs/hostfs_user.c16
-rw-r--r--fs/inotify.c5
-rw-r--r--fs/isofs/compress.c6
-rw-r--r--fs/jffs/intrep.c3
-rw-r--r--fs/jfs/jfs_dmap.c46
-rw-r--r--fs/jfs/jfs_dtree.c13
-rw-r--r--fs/jfs/jfs_logmgr.c3
-rw-r--r--fs/jfs/jfs_metapage.c11
-rw-r--r--fs/locks.c81
-rw-r--r--fs/mbcache.c3
-rw-r--r--fs/namei.c4
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/ntfs/sysctl.h2
-rw-r--r--fs/reiserfs/inode.c12
-rw-r--r--fs/reiserfs/journal.c4
-rw-r--r--fs/reiserfs/xattr.c1
-rw-r--r--fs/sysfs/file.c18
-rw-r--r--fs/sysfs/inode.c2
35 files changed, 257 insertions, 104 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index 5d0c4be43dba..e54be7058359 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -363,12 +363,15 @@ config INOTIFY
bool "Inotify file change notification support"
default y
---help---
- Say Y here to enable inotify support and the /dev/inotify character
- device. Inotify is a file change notification system and a
+ Say Y here to enable inotify support and the associated system
+ calls. Inotify is a file change notification system and a
replacement for dnotify. Inotify fixes numerous shortcomings in
dnotify and introduces several new features. It allows monitoring
- of both files and directories via a single open fd. Multiple file
- events are supported.
+ of both files and directories via a single open fd. Other features
+ include multiple file events, one-shot support, and unmount
+ notification.
+
+ For more information, see Documentation/filesystems/inotify.txt
If unsure, say Y.
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 9c09641ce907..fca83e28edcf 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -92,6 +92,7 @@ struct autofs_wait_queue {
struct autofs_sb_info {
u32 magic;
+ struct dentry *root;
struct file *pipe;
pid_t oz_pgrp;
int catatonic;
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 4bb14cc68040..0a3c05d10167 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -16,6 +16,7 @@
#include <linux/pagemap.h>
#include <linux/parser.h>
#include <linux/bitops.h>
+#include <linux/smp_lock.h>
#include "autofs_i.h"
#include <linux/module.h>
@@ -76,6 +77,66 @@ void autofs4_free_ino(struct autofs_info *ino)
kfree(ino);
}
+/*
+ * Deal with the infamous "Busy inodes after umount ..." message.
+ *
+ * Clean up the dentry tree. This happens with autofs if the user
+ * space program goes away due to a SIGKILL, SIGSEGV etc.
+ */
+static void autofs4_force_release(struct autofs_sb_info *sbi)
+{
+ struct dentry *this_parent = sbi->root;
+ struct list_head *next;
+
+ spin_lock(&dcache_lock);
+repeat:
+ next = this_parent->d_subdirs.next;
+resume:
+ while (next != &this_parent->d_subdirs) {
+ struct dentry *dentry = list_entry(next, struct dentry, d_child);
+
+ /* Negative dentry - don`t care */
+ if (!simple_positive(dentry)) {
+ next = next->next;
+ continue;
+ }
+
+ if (!list_empty(&dentry->d_subdirs)) {
+ this_parent = dentry;
+ goto repeat;
+ }
+
+ next = next->next;
+ spin_unlock(&dcache_lock);
+
+ DPRINTK("dentry %p %.*s",
+ dentry, (int)dentry->d_name.len, dentry->d_name.name);
+
+ dput(dentry);
+ spin_lock(&dcache_lock);
+ }
+
+ if (this_parent != sbi->root) {
+ struct dentry *dentry = this_parent;
+
+ next = this_parent->d_child.next;
+ this_parent = this_parent->d_parent;
+ spin_unlock(&dcache_lock);
+ DPRINTK("parent dentry %p %.*s",
+ dentry, (int)dentry->d_name.len, dentry->d_name.name);
+ dput(dentry);
+ spin_lock(&dcache_lock);
+ goto resume;
+ }
+ spin_unlock(&dcache_lock);
+
+ dput(sbi->root);
+ sbi->root = NULL;
+ shrink_dcache_sb(sbi->sb);
+
+ return;
+}
+
static void autofs4_put_super(struct super_block *sb)
{
struct autofs_sb_info *sbi = autofs4_sbi(sb);
@@ -85,6 +146,10 @@ static void autofs4_put_super(struct super_block *sb)
if ( !sbi->catatonic )
autofs4_catatonic_mode(sbi); /* Free wait queues, close pipe */
+ /* Clean up and release dangling references */
+ if (sbi)
+ autofs4_force_release(sbi);
+
kfree(sbi);
DPRINTK("shutting down");
@@ -199,6 +264,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
s->s_fs_info = sbi;
sbi->magic = AUTOFS_SBI_MAGIC;
+ sbi->root = NULL;
sbi->catatonic = 0;
sbi->exp_timeout = 0;
sbi->oz_pgrp = process_group(current);
@@ -267,6 +333,13 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
sbi->pipe = pipe;
/*
+ * Take a reference to the root dentry so we get a chance to
+ * clean up the dentry tree on umount.
+ * See autofs4_force_release.
+ */
+ sbi->root = dget(root);
+
+ /*
* Success! Install the root dentry now to indicate completion.
*/
s->s_root = root;
diff --git a/fs/bio.c b/fs/bio.c
index ca8f7a850fe3..1f2d4649b188 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -248,19 +248,16 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src)
{
request_queue_t *q = bdev_get_queue(bio_src->bi_bdev);
- memcpy(bio->bi_io_vec, bio_src->bi_io_vec, bio_src->bi_max_vecs * sizeof(struct bio_vec));
+ memcpy(bio->bi_io_vec, bio_src->bi_io_vec,
+ bio_src->bi_max_vecs * sizeof(struct bio_vec));
bio->bi_sector = bio_src->bi_sector;
bio->bi_bdev = bio_src->bi_bdev;
bio->bi_flags |= 1 << BIO_CLONED;
bio->bi_rw = bio_src->bi_rw;
-
- /*
- * notes -- maybe just leave bi_idx alone. assume identical mapping
- * for the clone
- */
bio->bi_vcnt = bio_src->bi_vcnt;
bio->bi_size = bio_src->bi_size;
+ bio->bi_idx = bio_src->bi_idx;
bio_phys_segments(q, bio);
bio_hw_segments(q, bio);
}
diff --git a/fs/dcache.c b/fs/dcache.c
index 3aa8a7e980d8..a15a2e1f5520 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fs.h>
+#include <linux/fsnotify.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
@@ -101,6 +102,7 @@ static inline void dentry_iput(struct dentry * dentry)
list_del_init(&dentry->d_alias);
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
+ fsnotify_inoderemove(inode);
if (dentry->d_op && dentry->d_op->d_iput)
dentry->d_op->d_iput(dentry, inode);
else
@@ -1165,13 +1167,16 @@ out:
void d_delete(struct dentry * dentry)
{
+ int isdir = 0;
/*
* Are we the only user?
*/
spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
+ isdir = S_ISDIR(dentry->d_inode->i_mode);
if (atomic_read(&dentry->d_count) == 1) {
dentry_iput(dentry);
+ fsnotify_nameremove(dentry, isdir);
return;
}
@@ -1180,6 +1185,8 @@ void d_delete(struct dentry * dentry)
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
+
+ fsnotify_nameremove(dentry, isdir);
}
static void __d_rehash(struct dentry * entry, struct hlist_head *list)
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 77e059149212..161f156d98c8 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -612,6 +612,7 @@ got:
err = ext2_init_acl(inode, dir);
if (err) {
DQUOT_FREE_INODE(inode);
+ DQUOT_DROP(inode);
goto fail2;
}
mark_inode_dirty(inode);
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 27982b500e84..0099462d4271 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -823,7 +823,7 @@ cleanup:
void
ext2_xattr_put_super(struct super_block *sb)
{
- mb_cache_shrink(ext2_xattr_cache, sb->s_bdev);
+ mb_cache_shrink(sb->s_bdev);
}
diff --git a/fs/ext2/xip.c b/fs/ext2/xip.c
index 0aa5ac159c09..ca7f00312388 100644
--- a/fs/ext2/xip.c
+++ b/fs/ext2/xip.c
@@ -36,7 +36,7 @@ __ext2_get_sector(struct inode *inode, sector_t offset, int create,
*result = tmp.b_blocknr;
/* did we get a sparse block (hole in the file)? */
- if (!(*result)) {
+ if (!tmp.b_blocknr && !rc) {
BUG_ON(create);
rc = -ENODATA;
}
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index 1e6f3ea28713..6981bd014ede 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -604,12 +604,14 @@ got:
err = ext3_init_acl(handle, inode, dir);
if (err) {
DQUOT_FREE_INODE(inode);
+ DQUOT_DROP(inode);
goto fail2;
}
err = ext3_mark_inode_dirty(handle, inode);
if (err) {
ext3_std_error(sb, err);
DQUOT_FREE_INODE(inode);
+ DQUOT_DROP(inode);
goto fail2;
}
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 3f9dfa643b19..269c7b92db9a 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -1106,7 +1106,7 @@ cleanup:
void
ext3_xattr_put_super(struct super_block *sb)
{
- mb_cache_shrink(ext3_xattr_cache, sb->s_bdev);
+ mb_cache_shrink(sb->s_bdev);
}
/*
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 286a9f8f3d49..6fbc9d8fcc36 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -288,7 +288,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
break;
case F_SETLK:
case F_SETLKW:
- err = fcntl_setlk(filp, cmd, (struct flock __user *) arg);
+ err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
break;
case F_GETOWN:
/*
@@ -376,7 +376,8 @@ asmlinkage long sys_fcntl64(unsigned int fd, unsigned int cmd, unsigned long arg
break;
case F_SETLK64:
case F_SETLKW64:
- err = fcntl_setlk64(filp, cmd, (struct flock64 __user *) arg);
+ err = fcntl_setlk64(fd, filp, cmd,
+ (struct flock64 __user *) arg);
break;
default:
err = do_fcntl(fd, cmd, arg, filp);
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index 6ad1211f84ed..a096c5a56664 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -480,6 +480,8 @@ void hfs_bnode_put(struct hfs_bnode *node)
return;
}
for (i = 0; i < tree->pages_per_bnode; i++) {
+ if (!node->page[i])
+ continue;
mark_page_accessed(node->page[i]);
#if REF_PAGES
put_page(node->page[i]);
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
index cbc8510ad222..5ea6b3d45eaa 100644
--- a/fs/hfs/extent.c
+++ b/fs/hfs/extent.c
@@ -482,7 +482,8 @@ void hfs_file_truncate(struct inode *inode)
page_cache_release(page);
mark_inode_dirty(inode);
return;
- }
+ } else if (inode->i_size == HFS_I(inode)->phys_size)
+ return;
size = inode->i_size + HFS_SB(sb)->alloc_blksz - 1;
blk_cnt = size / HFS_SB(sb)->alloc_blksz;
alloc_cnt = HFS_I(inode)->alloc_blocks;
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 267872e84d71..8868d3b766fd 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -643,6 +643,8 @@ void hfs_bnode_put(struct hfs_bnode *node)
return;
}
for (i = 0; i < tree->pages_per_bnode; i++) {
+ if (!node->page[i])
+ continue;
mark_page_accessed(node->page[i]);
#if REF_PAGES
put_page(node->page[i]);
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index 376498cc64fd..e7235ca79a95 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -461,7 +461,9 @@ void hfsplus_file_truncate(struct inode *inode)
page_cache_release(page);
mark_inode_dirty(inode);
return;
- }
+ } else if (inode->i_size == HFSPLUS_I(inode).phys_size)
+ return;
+
blk_cnt = (inode->i_size + HFSPLUS_SB(sb).alloc_blksz - 1) >> HFSPLUS_SB(sb).alloc_blksz_shift;
alloc_cnt = HFSPLUS_I(inode).alloc_blocks;
if (blk_cnt == alloc_cnt)
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index c1516d013bf6..67bca0d4a33b 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -69,6 +69,7 @@ extern int read_file(int fd, unsigned long long *offset, char *buf, int len);
extern int write_file(int fd, unsigned long long *offset, const char *buf,
int len);
extern int lseek_file(int fd, long long offset, int whence);
+extern int fsync_file(int fd, int datasync);
extern int file_create(char *name, int ur, int uw, int ux, int gr,
int gw, int gx, int or, int ow, int ox);
extern int set_attr(const char *file, struct hostfs_iattr *attrs);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 88e68caa3784..b2d18200a003 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -382,7 +382,7 @@ int hostfs_file_open(struct inode *ino, struct file *file)
int hostfs_fsync(struct file *file, struct dentry *dentry, int datasync)
{
- return(0);
+ return fsync_file(HOSTFS_I(dentry->d_inode)->fd, datasync);
}
static struct file_operations hostfs_file_fops = {
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
index 4796e8490f7d..b97809deba66 100644
--- a/fs/hostfs/hostfs_user.c
+++ b/fs/hostfs/hostfs_user.c
@@ -153,10 +153,24 @@ int lseek_file(int fd, long long offset, int whence)
int ret;
ret = lseek64(fd, offset, whence);
- if(ret < 0) return(-errno);
+ if(ret < 0)
+ return(-errno);
return(0);
}
+int fsync_file(int fd, int datasync)
+{
+ int ret;
+ if (datasync)
+ ret = fdatasync(fd);
+ else
+ ret = fsync(fd);
+
+ if (ret < 0)
+ return -errno;
+ return 0;
+}
+
void close_file(void *stream)
{
close(*((int *) stream));
diff --git a/fs/inotify.c b/fs/inotify.c
index a8a714e48140..27ebcac5e07f 100644
--- a/fs/inotify.c
+++ b/fs/inotify.c
@@ -90,6 +90,7 @@ struct inotify_device {
unsigned int queue_size; /* size of the queue (bytes) */
unsigned int event_count; /* number of pending events */
unsigned int max_events; /* maximum number of events */
+ u32 last_wd; /* the last wd allocated */
};
/*
@@ -352,7 +353,7 @@ static int inotify_dev_get_wd(struct inotify_device *dev,
do {
if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL)))
return -ENOSPC;
- ret = idr_get_new(&dev->idr, watch, &watch->wd);
+ ret = idr_get_new_above(&dev->idr, watch, dev->last_wd, &watch->wd);
} while (ret == -EAGAIN);
return ret;
@@ -401,6 +402,7 @@ static struct inotify_watch *create_watch(struct inotify_device *dev,
return ERR_PTR(ret);
}
+ dev->last_wd = ret;
watch->mask = mask;
atomic_set(&watch->count, 0);
INIT_LIST_HEAD(&watch->d_list);
@@ -899,6 +901,7 @@ asmlinkage long sys_inotify_init(void)
dev->queue_size = 0;
dev->max_events = inotify_max_queued_events;
dev->user = user;
+ dev->last_wd = 0;
atomic_set(&dev->count, 0);
get_inotify_dev(dev);
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 34a44e451689..4917315db732 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -129,8 +129,14 @@ static int zisofs_readpage(struct file *file, struct page *page)
cend = le32_to_cpu(*(__le32 *)(bh->b_data + (blockendptr & bufmask)));
brelse(bh);
+ if (cstart > cend)
+ goto eio;
+
csize = cend-cstart;
+ if (csize > deflateBound(1UL << zisofs_block_shift))
+ goto eio;
+
/* Now page[] contains an array of pages, any of which can be NULL,
and the locks on which we hold. We should now read the data and
release the pages. If the pages are NULL the decompressed data
diff --git a/fs/jffs/intrep.c b/fs/jffs/intrep.c
index fc589ddd0762..456d7e6e29c2 100644
--- a/fs/jffs/intrep.c
+++ b/fs/jffs/intrep.c
@@ -3397,6 +3397,9 @@ jffs_garbage_collect_thread(void *ptr)
siginfo_t info;
unsigned long signr = 0;
+ if (try_to_freeze())
+ continue;
+
spin_lock_irq(&current->sighand->siglock);
signr = dequeue_signal(current, &current->blocked, &info);
spin_unlock_irq(&current->sighand->siglock);
diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
index 0732f206ca60..c739626f5bf1 100644
--- a/fs/jfs/jfs_dmap.c
+++ b/fs/jfs/jfs_dmap.c
@@ -75,7 +75,7 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static void dbSplit(dmtree_t * tp, int leafno, int splitsz, int newval);
static void dbBackSplit(dmtree_t * tp, int leafno);
-static void dbJoin(dmtree_t * tp, int leafno, int newval);
+static int dbJoin(dmtree_t * tp, int leafno, int newval);
static void dbAdjTree(dmtree_t * tp, int leafno, int newval);
static int dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc,
int level);
@@ -98,8 +98,8 @@ static int dbExtend(struct inode *ip, s64 blkno, s64 nblocks, s64 addnblocks);
static int dbFindBits(u32 word, int l2nb);
static int dbFindCtl(struct bmap * bmp, int l2nb, int level, s64 * blkno);
static int dbFindLeaf(dmtree_t * tp, int l2nb, int *leafidx);
-static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
- int nblocks);
+static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+ int nblocks);
static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks);
static int dbMaxBud(u8 * cp);
@@ -378,6 +378,7 @@ int dbFree(struct inode *ip, s64 blkno, s64 nblocks)
/* free the blocks. */
if ((rc = dbFreeDmap(bmp, dp, blkno, nb))) {
+ jfs_error(ip->i_sb, "dbFree: error in block map\n");
release_metapage(mp);
IREAD_UNLOCK(ipbmap);
return (rc);
@@ -2020,7 +2021,7 @@ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
s8 oldroot;
- int rc, word;
+ int rc = 0, word;
/* save the current value of the root (i.e. maximum free string)
* of the dmap tree.
@@ -2028,11 +2029,11 @@ static int dbFreeDmap(struct bmap * bmp, struct dmap * dp, s64 blkno,
oldroot = dp->tree.stree[ROOT];
/* free the specified (blocks) bits */
- dbFreeBits(bmp, dp, blkno, nblocks);
+ rc = dbFreeBits(bmp, dp, blkno, nblocks);
- /* if the root has not changed, done. */
- if (dp->tree.stree[ROOT] == oldroot)
- return (0);
+ /* if error or the root has not changed, done. */
+ if (rc || (dp->tree.stree[ROOT] == oldroot))
+ return (rc);
/* root changed. bubble the change up to the dmap control pages.
* if the adjustment of the upper level control pages fails,
@@ -2221,15 +2222,16 @@ static void dbAllocBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
* blkno - starting block number of the bits to be freed.
* nblocks - number of bits to be freed.
*
- * RETURN VALUES: none
+ * RETURN VALUES: 0 for success
*
* serialization: IREAD_LOCK(ipbmap) or IWRITE_LOCK(ipbmap) held on entry/exit;
*/
-static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
+static int dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
int nblocks)
{
int dbitno, word, rembits, nb, nwords, wbitno, nw, agno;
dmtree_t *tp = (dmtree_t *) & dp->tree;
+ int rc = 0;
int size;
/* determine the bit number and word within the dmap of the
@@ -2278,8 +2280,10 @@ static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
/* update the leaf for this dmap word.
*/
- dbJoin(tp, word,
- dbMaxBud((u8 *) & dp->wmap[word]));
+ rc = dbJoin(tp, word,
+ dbMaxBud((u8 *) & dp->wmap[word]));
+ if (rc)
+ return rc;
word += 1;
} else {
@@ -2310,7 +2314,9 @@ static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
/* update the leaf.
*/
- dbJoin(tp, word, size);
+ rc = dbJoin(tp, word, size);
+ if (rc)
+ return rc;
/* get the number of dmap words handled.
*/
@@ -2357,6 +2363,8 @@ static void dbFreeBits(struct bmap * bmp, struct dmap * dp, s64 blkno,
}
BMAP_UNLOCK(bmp);
+
+ return 0;
}
@@ -2464,7 +2472,9 @@ dbAdjCtl(struct bmap * bmp, s64 blkno, int newval, int alloc, int level)
}
dbSplit((dmtree_t *) dcp, leafno, dcp->budmin, newval);
} else {
- dbJoin((dmtree_t *) dcp, leafno, newval);
+ rc = dbJoin((dmtree_t *) dcp, leafno, newval);
+ if (rc)
+ return rc;
}
/* check if the root of the current dmap control page changed due
@@ -2689,7 +2699,7 @@ static void dbBackSplit(dmtree_t * tp, int leafno)
*
* RETURN VALUES: none
*/
-static void dbJoin(dmtree_t * tp, int leafno, int newval)
+static int dbJoin(dmtree_t * tp, int leafno, int newval)
{
int budsz, buddy;
s8 *leaf;
@@ -2729,7 +2739,9 @@ static void dbJoin(dmtree_t * tp, int leafno, int newval)
if (newval > leaf[buddy])
break;
- assert(newval == leaf[buddy]);
+ /* It shouldn't be less */
+ if (newval < leaf[buddy])
+ return -EIO;
/* check which (leafno or buddy) is the left buddy.
* the left buddy gets to claim the blocks resulting
@@ -2761,6 +2773,8 @@ static void dbJoin(dmtree_t * tp, int leafno, int newval)
/* update the leaf value.
*/
dbAdjTree(tp, leafno, newval);
+
+ return 0;
}
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 73b5fc7eda80..404f33eae507 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -381,9 +381,12 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
* It's time to move the inline table to an external
* page and begin to build the xtree
*/
- if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage) ||
- dbAlloc(ip, 0, sbi->nbperpage, &xaddr))
- goto clean_up; /* No space */
+ if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage))
+ goto clean_up;
+ if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
+ DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
+ goto clean_up;
+ }
/*
* Save the table, we're going to overwrite it with the
@@ -397,13 +400,15 @@ static u32 add_index(tid_t tid, struct inode *ip, s64 bn, int slot)
xtInitRoot(tid, ip);
/*
- * Allocate the first block & add it to the xtree
+ * Add the first block to the xtree
*/
if (xtInsert(tid, ip, 0, 0, sbi->nbperpage, &xaddr, 0)) {
/* This really shouldn't fail */
jfs_warn("add_index: xtInsert failed!");
memcpy(&jfs_ip->i_dirtable, temp_table,
sizeof (temp_table));
+ dbFree(ip, xaddr, sbi->nbperpage);
+ DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
goto clean_up;
}
ip->i_size = PSIZE;
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 79d07624bfe1..22815e88e7cc 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1030,7 +1030,8 @@ static int lmLogSync(struct jfs_log * log, int nosyncwait)
* starting until all current transactions are completed
* by setting syncbarrier flag.
*/
- if (written > LOGSYNC_BARRIER(logsize) && logsize > 32 * LOGPSIZE) {
+ if (!test_bit(log_SYNCBARRIER, &log->flag) &&
+ (written > LOGSYNC_BARRIER(logsize)) && log->active) {
set_bit(log_SYNCBARRIER, &log->flag);
jfs_info("log barrier on: lsn=0x%x syncpt=0x%x", lsn,
log->syncpt);
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 6c5485d16c39..13d7e3f1feb4 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -561,7 +561,6 @@ static int metapage_releasepage(struct page *page, int gfp_mask)
dump_mem("page", page, sizeof(struct page));
dump_stack();
}
- WARN_ON(mp->lsn);
if (mp->lsn)
remove_from_logsync(mp);
remove_metapage(page, mp);
@@ -641,7 +640,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
} else {
page = read_cache_page(mapping, page_index,
(filler_t *)mapping->a_ops->readpage, NULL);
- if (IS_ERR(page)) {
+ if (IS_ERR(page) || !PageUptodate(page)) {
jfs_err("read_cache_page failed!");
return NULL;
}
@@ -783,14 +782,6 @@ void release_metapage(struct metapage * mp)
if (test_bit(META_discard, &mp->flag) && !mp->count) {
clear_page_dirty(page);
ClearPageUptodate(page);
-#ifdef _NOT_YET
- if (page->mapping) {
- /* Remove from page cache and page cache reference */
- remove_from_page_cache(page);
- page_cache_release(page);
- metapage_releasepage(page, 0);
- }
-#endif
}
#else
/* Try to keep metapages from using up too much memory */
diff --git a/fs/locks.c b/fs/locks.c
index 29fa5da6c117..11956b6179ff 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1591,7 +1591,8 @@ out:
/* Apply the lock described by l to an open file descriptor.
* This implements both the F_SETLK and F_SETLKW commands of fcntl().
*/
-int fcntl_setlk(struct file *filp, unsigned int cmd, struct flock __user *l)
+int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
+ struct flock __user *l)
{
struct file_lock *file_lock = locks_alloc_lock();
struct flock flock;
@@ -1620,6 +1621,7 @@ int fcntl_setlk(struct file *filp, unsigned int cmd, struct flock __user *l)
goto out;
}
+again:
error = flock_to_posix_lock(filp, file_lock, &flock);
if (error)
goto out;
@@ -1648,25 +1650,33 @@ int fcntl_setlk(struct file *filp, unsigned int cmd, struct flock __user *l)
if (error)
goto out;
- if (filp->f_op && filp->f_op->lock != NULL) {
+ if (filp->f_op && filp->f_op->lock != NULL)
error = filp->f_op->lock(filp, cmd, file_lock);
- goto out;
- }
+ else {
+ for (;;) {
+ error = __posix_lock_file(inode, file_lock);
+ if ((error != -EAGAIN) || (cmd == F_SETLK))
+ break;
+ error = wait_event_interruptible(file_lock->fl_wait,
+ !file_lock->fl_next);
+ if (!error)
+ continue;
- for (;;) {
- error = __posix_lock_file(inode, file_lock);
- if ((error != -EAGAIN) || (cmd == F_SETLK))
+ locks_delete_block(file_lock);
break;
- error = wait_event_interruptible(file_lock->fl_wait,
- !file_lock->fl_next);
- if (!error)
- continue;
+ }
+ }
- locks_delete_block(file_lock);
- break;
+ /*
+ * Attempt to detect a close/fcntl race and recover by
+ * releasing the lock that was just acquired.
+ */
+ if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) {
+ flock.l_type = F_UNLCK;
+ goto again;
}
- out:
+out:
locks_free_lock(file_lock);
return error;
}
@@ -1724,7 +1734,8 @@ out:
/* Apply the lock described by l to an open file descriptor.
* This implements both the F_SETLK and F_SETLKW commands of fcntl().
*/
-int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
+int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
+ struct flock64 __user *l)
{
struct file_lock *file_lock = locks_alloc_lock();
struct flock64 flock;
@@ -1753,6 +1764,7 @@ int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
goto out;
}
+again:
error = flock64_to_posix_lock(filp, file_lock, &flock);
if (error)
goto out;
@@ -1781,22 +1793,30 @@ int fcntl_setlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
if (error)
goto out;
- if (filp->f_op && filp->f_op->lock != NULL) {
+ if (filp->f_op && filp->f_op->lock != NULL)
error = filp->f_op->lock(filp, cmd, file_lock);
- goto out;
- }
+ else {
+ for (;;) {
+ error = __posix_lock_file(inode, file_lock);
+ if ((error != -EAGAIN) || (cmd == F_SETLK64))
+ break;
+ error = wait_event_interruptible(file_lock->fl_wait,
+ !file_lock->fl_next);
+ if (!error)
+ continue;
- for (;;) {
- error = __posix_lock_file(inode, file_lock);
- if ((error != -EAGAIN) || (cmd == F_SETLK64))
+ locks_delete_block(file_lock);
break;
- error = wait_event_interruptible(file_lock->fl_wait,
- !file_lock->fl_next);
- if (!error)
- continue;
+ }
+ }
- locks_delete_block(file_lock);
- break;
+ /*
+ * Attempt to detect a close/fcntl race and recover by
+ * releasing the lock that was just acquired.
+ */
+ if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) {
+ flock.l_type = F_UNLCK;
+ goto again;
}
out:
@@ -1888,12 +1908,7 @@ void locks_remove_flock(struct file *filp)
while ((fl = *before) != NULL) {
if (fl->fl_file == filp) {
- /*
- * We might have a POSIX lock that was created at the same time
- * the filp was closed for the last time. Just remove that too,
- * regardless of ownership, since nobody can own it.
- */
- if (IS_FLOCK(fl) || IS_POSIX(fl)) {
+ if (IS_FLOCK(fl)) {
locks_delete_lock(before);
continue;
}
diff --git a/fs/mbcache.c b/fs/mbcache.c
index c7170b9221a3..b002a088857d 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -316,11 +316,10 @@ fail:
* currently in use cannot be freed, and thus remain in the cache. All others
* are freed.
*
- * @cache: which cache to shrink
* @bdev: which device's cache entries to shrink
*/
void
-mb_cache_shrink(struct mb_cache *cache, struct block_device *bdev)
+mb_cache_shrink(struct block_device *bdev)
{
LIST_HEAD(free_list);
struct list_head *l, *ltmp;
diff --git a/fs/namei.c b/fs/namei.c
index e252b12d39be..264e232addda 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1801,7 +1801,6 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
}
up(&dentry->d_inode->i_sem);
if (!error) {
- fsnotify_rmdir(dentry, dentry->d_inode, dir);
d_delete(dentry);
}
dput(dentry);
@@ -1874,7 +1873,6 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry)
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
- fsnotify_unlink(dentry, dir);
d_delete(dentry);
}
@@ -2218,7 +2216,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
if (!error) {
const char *new_name = old_dentry->d_name.name;
- fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir);
+ fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir, new_dentry->d_inode);
}
fsnotify_oldname_free(old_name);
diff --git a/fs/namespace.c b/fs/namespace.c
index 587eb0d707ee..79bd8a46e1e7 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -160,7 +160,7 @@ clone_mnt(struct vfsmount *old, struct dentry *root)
mnt->mnt_root = dget(root);
mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
- mnt->mnt_namespace = old->mnt_namespace;
+ mnt->mnt_namespace = current->namespace;
/* stick the duplicate mount on the same expiry list
* as the original if that was on one */
diff --git a/fs/ntfs/sysctl.h b/fs/ntfs/sysctl.h
index df749cc0aac8..c8064cae8f17 100644
--- a/fs/ntfs/sysctl.h
+++ b/fs/ntfs/sysctl.h
@@ -26,7 +26,7 @@
#include <linux/config.h>
-#if (DEBUG && CONFIG_SYSCTL)
+#if defined(DEBUG) && defined(CONFIG_SYSCTL)
extern int ntfs_sysctl(int add);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 1aaf2c7d44e6..d9f614a57731 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1980,7 +1980,17 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
out_inserted_sd:
inode->i_nlink = 0;
th->t_trans_id = 0; /* so the caller can't use this handle later */
- iput(inode);
+
+ /* If we were inheriting an ACL, we need to release the lock so that
+ * iput doesn't deadlock in reiserfs_delete_xattrs. The locking
+ * code really needs to be reworked, but this will take care of it
+ * for now. -jeffm */
+ if (REISERFS_I(dir)->i_acl_default) {
+ reiserfs_write_unlock_xattrs(dir->i_sb);
+ iput(inode);
+ reiserfs_write_lock_xattrs(dir->i_sb);
+ } else
+ iput(inode);
return err;
}
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index c66c27ec4100..ca7989b04be3 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -556,14 +556,14 @@ static inline void insert_journal_hash(struct reiserfs_journal_cnode **table,
}
/* lock the current transaction */
-inline static void lock_journal(struct super_block *p_s_sb)
+static inline void lock_journal(struct super_block *p_s_sb)
{
PROC_INFO_INC(p_s_sb, journal.lock_journal);
down(&SB_JOURNAL(p_s_sb)->j_lock);
}
/* unlock the current transaction */
-inline static void unlock_journal(struct super_block *p_s_sb)
+static inline void unlock_journal(struct super_block *p_s_sb)
{
up(&SB_JOURNAL(p_s_sb)->j_lock);
}
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index e386d3db3051..87ac9dc8b381 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -39,7 +39,6 @@
#include <linux/xattr.h>
#include <linux/reiserfs_xattr.h>
#include <linux/reiserfs_acl.h>
-#include <linux/mbcache.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#include <linux/smp_lock.h>
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 335288b9be0f..4013d7905e84 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -437,8 +437,8 @@ int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
{
struct dentry *dir = kobj->dentry;
struct dentry *victim;
- struct sysfs_dirent *sd;
- umode_t umode = (mode & S_IALLUGO) | S_IFREG;
+ struct inode * inode;
+ struct iattr newattrs;
int res = -ENOENT;
down(&dir->d_inode->i_sem);
@@ -446,13 +446,15 @@ int sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, mode_t mode)
if (!IS_ERR(victim)) {
if (victim->d_inode &&
(victim->d_parent->d_inode == dir->d_inode)) {
- sd = victim->d_fsdata;
- attr->mode = mode;
- sd->s_mode = umode;
- victim->d_inode->i_mode = umode;
- dput(victim);
- res = 0;
+ inode = victim->d_inode;
+ down(&inode->i_sem);
+ newattrs.ia_mode = (mode & S_IALLUGO) |
+ (inode->i_mode & ~S_IALLUGO);
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+ res = notify_change(victim, &newattrs);
+ up(&inode->i_sem);
}
+ dput(victim);
}
up(&dir->d_inode->i_sem);
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 8de13bafaa76..d727dc960634 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -85,7 +85,7 @@ int sysfs_setattr(struct dentry * dentry, struct iattr * iattr)
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
mode &= ~S_ISGID;
- sd_iattr->ia_mode = mode;
+ sd_iattr->ia_mode = sd->s_mode = mode;
}
return error;