summaryrefslogtreecommitdiffstats
path: root/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/buffer.c')
-rw-r--r--fs/buffer.c88
1 files changed, 38 insertions, 50 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 7e9e409feaa..6a25d7df89b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -278,7 +278,7 @@ EXPORT_SYMBOL(thaw_bdev);
*/
static void do_sync(unsigned long wait)
{
- wakeup_bdflush(0);
+ wakeup_pdflush(0);
sync_inodes(0); /* All mappings, inodes and their blockdevs */
DQUOT_SYNC(NULL);
sync_supers(); /* Write the superblocks */
@@ -331,7 +331,7 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
return ret;
}
-asmlinkage long sys_fsync(unsigned int fd)
+static long do_fsync(unsigned int fd, int datasync)
{
struct file * file;
struct address_space *mapping;
@@ -342,14 +342,14 @@ asmlinkage long sys_fsync(unsigned int fd)
if (!file)
goto out;
- mapping = file->f_mapping;
-
ret = -EINVAL;
if (!file->f_op || !file->f_op->fsync) {
/* Why? We can still call filemap_fdatawrite */
goto out_putf;
}
+ mapping = file->f_mapping;
+
current->flags |= PF_SYNCWRITE;
ret = filemap_fdatawrite(mapping);
@@ -358,7 +358,7 @@ asmlinkage long sys_fsync(unsigned int fd)
* which could cause livelocks in fsync_buffers_list
*/
down(&mapping->host->i_sem);
- err = file->f_op->fsync(file, file->f_dentry, 0);
+ err = file->f_op->fsync(file, file->f_dentry, datasync);
if (!ret)
ret = err;
up(&mapping->host->i_sem);
@@ -373,39 +373,14 @@ out:
return ret;
}
-asmlinkage long sys_fdatasync(unsigned int fd)
+asmlinkage long sys_fsync(unsigned int fd)
{
- struct file * file;
- struct address_space *mapping;
- int ret, err;
-
- ret = -EBADF;
- file = fget(fd);
- if (!file)
- goto out;
-
- ret = -EINVAL;
- if (!file->f_op || !file->f_op->fsync)
- goto out_putf;
-
- mapping = file->f_mapping;
-
- current->flags |= PF_SYNCWRITE;
- ret = filemap_fdatawrite(mapping);
- down(&mapping->host->i_sem);
- err = file->f_op->fsync(file, file->f_dentry, 1);
- if (!ret)
- ret = err;
- up(&mapping->host->i_sem);
- err = filemap_fdatawait(mapping);
- if (!ret)
- ret = err;
- current->flags &= ~PF_SYNCWRITE;
+ return do_fsync(fd, 0);
+}
-out_putf:
- fput(file);
-out:
- return ret;
+asmlinkage long sys_fdatasync(unsigned int fd)
+{
+ return do_fsync(fd, 1);
}
/*
@@ -522,13 +497,13 @@ static void free_more_memory(void)
struct zone **zones;
pg_data_t *pgdat;
- wakeup_bdflush(1024);
+ wakeup_pdflush(1024);
yield();
for_each_pgdat(pgdat) {
zones = pgdat->node_zonelists[GFP_NOFS&GFP_ZONEMASK].zones;
if (*zones)
- try_to_free_pages(zones, GFP_NOFS, 0);
+ try_to_free_pages(zones, GFP_NOFS);
}
}
@@ -538,8 +513,8 @@ static void free_more_memory(void)
*/
static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
{
- static DEFINE_SPINLOCK(page_uptodate_lock);
unsigned long flags;
+ struct buffer_head *first;
struct buffer_head *tmp;
struct page *page;
int page_uptodate = 1;
@@ -561,7 +536,9 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
* two buffer heads end IO at almost the same time and both
* decide that the page is now completely done.
*/
- spin_lock_irqsave(&page_uptodate_lock, flags);
+ first = page_buffers(page);
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
@@ -574,7 +551,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
} while (tmp != bh);
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
/*
* If none of the buffers had errors and they are all
@@ -586,7 +564,8 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
return;
still_busy:
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
return;
}
@@ -597,8 +576,8 @@ still_busy:
void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
char b[BDEVNAME_SIZE];
- static DEFINE_SPINLOCK(page_uptodate_lock);
unsigned long flags;
+ struct buffer_head *first;
struct buffer_head *tmp;
struct page *page;
@@ -619,7 +598,10 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
SetPageError(page);
}
- spin_lock_irqsave(&page_uptodate_lock, flags);
+ first = page_buffers(page);
+ local_irq_save(flags);
+ bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+
clear_buffer_async_write(bh);
unlock_buffer(bh);
tmp = bh->b_this_page;
@@ -630,12 +612,14 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
}
tmp = tmp->b_this_page;
}
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
end_page_writeback(page);
return;
still_busy:
- spin_unlock_irqrestore(&page_uptodate_lock, flags);
+ bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
+ local_irq_restore(flags);
return;
}
@@ -1951,7 +1935,6 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
if (err)
break;
if (buffer_new(bh)) {
- clear_buffer_new(bh);
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
if (PageUptodate(page)) {
@@ -1993,9 +1976,14 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
if (!buffer_uptodate(*wait_bh))
err = -EIO;
}
- if (!err)
- return err;
-
+ if (!err) {
+ bh = head;
+ do {
+ if (buffer_new(bh))
+ clear_buffer_new(bh);
+ } while ((bh = bh->b_this_page) != head);
+ return 0;
+ }
/* Error case: */
/*
* Zero out any newly allocated blocks to avoid exposing stale