summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-24 09:17:12 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-24 09:17:12 -0700
commitd333fc8d3006296f2893d17817bc0f67cf87f353 (patch)
treef759092ae962b0071e805e4526ffff57995d487b /fs
parent1a93fa86bf6eb0d8bed84ef18c8b35be389edee5 (diff)
parentd4a8f3677fe2c2fc86443254fe42825e244c194d (diff)
Merge branch 'fixes' of git://git.linux-nfs.org/pub/linux/nfs-2.6
* 'fixes' of git://git.linux-nfs.org/pub/linux/nfs-2.6: NFS: Fix nfs_direct_dirty_pages() NFS: Fix handful of compiler warnings in direct.c NFS: Avoid a deadlock situation on write
Diffstat (limited to 'fs')
-rw-r--r--fs/nfs/direct.c53
-rw-r--r--fs/nfs/pagelist.c20
-rw-r--r--fs/nfs/write.c6
3 files changed, 57 insertions, 22 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 345aa5c0f38..0c542ec92d5 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -122,19 +122,25 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_
return -EINVAL;
}
-static void nfs_direct_dirty_pages(struct page **pages, int npages)
+static void nfs_direct_dirty_pages(struct page **pages, unsigned int pgbase, size_t count)
{
- int i;
+ unsigned int npages;
+ unsigned int i;
+
+ if (count == 0)
+ return;
+ pages += (pgbase >> PAGE_SHIFT);
+ npages = (count + (pgbase & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
struct page *page = pages[i];
if (!PageCompound(page))
- set_page_dirty_lock(page);
+ set_page_dirty(page);
}
}
-static void nfs_direct_release_pages(struct page **pages, int npages)
+static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
{
- int i;
+ unsigned int i;
for (i = 0; i < npages; i++)
page_cache_release(pages[i]);
}
@@ -224,17 +230,18 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
if (nfs_readpage_result(task, data) != 0)
return;
- nfs_direct_dirty_pages(data->pagevec, data->npages);
- nfs_direct_release_pages(data->pagevec, data->npages);
-
spin_lock(&dreq->lock);
-
- if (likely(task->tk_status >= 0))
- dreq->count += data->res.count;
- else
+ if (unlikely(task->tk_status < 0)) {
dreq->error = task->tk_status;
-
- spin_unlock(&dreq->lock);
+ spin_unlock(&dreq->lock);
+ } else {
+ dreq->count += data->res.count;
+ spin_unlock(&dreq->lock);
+ nfs_direct_dirty_pages(data->pagevec,
+ data->args.pgbase,
+ data->res.count);
+ }
+ nfs_direct_release_pages(data->pagevec, data->npages);
if (put_dreq(dreq))
nfs_direct_complete(dreq);
@@ -279,9 +286,12 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo
result = get_user_pages(current, current->mm, user_addr,
data->npages, 1, 0, data->pagevec, NULL);
up_read(&current->mm->mmap_sem);
- if (unlikely(result < data->npages)) {
- if (result > 0)
- nfs_direct_release_pages(data->pagevec, result);
+ if (result < 0) {
+ nfs_readdata_release(data);
+ break;
+ }
+ if ((unsigned)result < data->npages) {
+ nfs_direct_release_pages(data->pagevec, result);
nfs_readdata_release(data);
break;
}
@@ -610,9 +620,12 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l
result = get_user_pages(current, current->mm, user_addr,
data->npages, 0, 0, data->pagevec, NULL);
up_read(&current->mm->mmap_sem);
- if (unlikely(result < data->npages)) {
- if (result > 0)
- nfs_direct_release_pages(data->pagevec, result);
+ if (result < 0) {
+ nfs_writedata_release(data);
+ break;
+ }
+ if ((unsigned)result < data->npages) {
+ nfs_direct_release_pages(data->pagevec, result);
nfs_writedata_release(data);
break;
}
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index cbdd1c6aaa9..c5bb51a29e8 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -355,6 +355,26 @@ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
nfs_pageio_doio(desc);
}
+/**
+ * nfs_pageio_cond_complete - Conditional I/O completion
+ * @desc: pointer to io descriptor
+ * @index: page index
+ *
+ * It is important to ensure that processes don't try to take locks
+ * on non-contiguous ranges of pages as that might deadlock. This
+ * function should be called before attempting to wait on a locked
+ * nfs_page. It will complete the I/O if the page index 'index'
+ * is not contiguous with the existing list of pages in 'desc'.
+ */
+void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
+{
+ if (!list_empty(&desc->pg_list)) {
+ struct nfs_page *prev = nfs_list_entry(desc->pg_list.prev);
+ if (index != prev->wb_index + 1)
+ nfs_pageio_doio(desc);
+ }
+}
+
#define NFS_SCAN_MAXENTRIES 16
/**
* nfs_scan_list - Scan a list for matching requests
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index b084c03ce49..af344a158e0 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -273,8 +273,6 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
* request as dirty (in which case we don't care).
*/
spin_unlock(req_lock);
- /* Prevent deadlock! */
- nfs_pageio_complete(pgio);
ret = nfs_wait_on_request(req);
nfs_release_request(req);
if (ret != 0)
@@ -321,6 +319,8 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc
pgio = &mypgio;
}
+ nfs_pageio_cond_complete(pgio, page->index);
+
err = nfs_page_async_flush(pgio, page);
if (err <= 0)
goto out;
@@ -329,6 +329,8 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc
if (!offset)
goto out;
+ nfs_pageio_cond_complete(pgio, page->index);
+
ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
if (ctx == NULL) {
err = -EBADF;