NFS: Ensure that writepage respects the nonblock flag
authorTrond Myklebust <Trond.Myklebust@netapp.com>
Fri, 30 Jul 2010 19:31:57 +0000 (15:31 -0400)
committerTrond Myklebust <Trond.Myklebust@netapp.com>
Fri, 30 Jul 2010 19:38:56 +0000 (15:38 -0400)
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
fs/nfs/write.c

index 0a6c65a1f9d786c6116c2698cde922ae887e8dd5..bb72ad34d51dd37fcf32a346b103c1db54a6a227 100644 (file)
@@ -222,7 +222,7 @@ static void nfs_end_page_writeback(struct page *page)
                clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
 }
 
-static struct nfs_page *nfs_find_and_lock_request(struct page *page)
+static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
 {
        struct inode *inode = page->mapping->host;
        struct nfs_page *req;
@@ -241,7 +241,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page)
                 *       request as dirty (in which case we don't care).
                 */
                spin_unlock(&inode->i_lock);
-               ret = nfs_wait_on_request(req);
+               if (!nonblock)
+                       ret = nfs_wait_on_request(req);
+               else
+                       ret = -EAGAIN;
                nfs_release_request(req);
                if (ret != 0)
                        return ERR_PTR(ret);
@@ -256,12 +259,12 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page)
  * May return an error if the user signalled nfs_wait_on_request().
  */
 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
-                               struct page *page)
+                               struct page *page, bool nonblock)
 {
        struct nfs_page *req;
        int ret = 0;
 
-       req = nfs_find_and_lock_request(page);
+       req = nfs_find_and_lock_request(page, nonblock);
        if (!req)
                goto out;
        ret = PTR_ERR(req);
@@ -283,12 +286,20 @@ out:
 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
 {
        struct inode *inode = page->mapping->host;
+       int ret;
 
        nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
        nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
 
        nfs_pageio_cond_complete(pgio, page->index);
-       return nfs_page_async_flush(pgio, page);
+       ret = nfs_page_async_flush(pgio, page,
+                       wbc->sync_mode == WB_SYNC_NONE ||
+                       wbc->nonblocking != 0);
+       if (ret == -EAGAIN) {
+               redirty_page_for_writepage(wbc, page);
+               ret = 0;
+       }
+       return ret;
 }
 
 /*
@@ -1546,7 +1557,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
 
        nfs_fscache_release_page(page, GFP_KERNEL);
 
-       req = nfs_find_and_lock_request(page);
+       req = nfs_find_and_lock_request(page, false);
        ret = PTR_ERR(req);
        if (IS_ERR(req))
                goto out;