Author: alc
Date: Thu Jun 18 05:56:24 2009
New Revision: 194425
URL: http://svn.freebsd.org/changeset/base/194425

Log:
  Fix some of the style errors in *getpages().

Modified:
  head/sys/fs/nfsclient/nfs_clbio.c
  head/sys/nfsclient/nfs_bio.c

Modified: head/sys/fs/nfsclient/nfs_clbio.c
==============================================================================
--- head/sys/fs/nfsclient/nfs_clbio.c   Thu Jun 18 05:56:12 2009        
(r194424)
+++ head/sys/fs/nfsclient/nfs_clbio.c   Thu Jun 18 05:56:24 2009        
(r194425)
@@ -198,7 +198,7 @@ ncl_getpages(struct vop_getpages_args *a
 
        if ((object = vp->v_object) == NULL) {
                ncl_printf("nfs_getpages: called with non-merged cache 
vnode??\n");
-               return VM_PAGER_ERROR;
+               return (VM_PAGER_ERROR);
        }
 
        if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
@@ -206,7 +206,7 @@ ncl_getpages(struct vop_getpages_args *a
                if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
                        mtx_unlock(&np->n_mtx);
                        ncl_printf("nfs_getpages: called on non-cacheable 
vnode??\n");
-                       return VM_PAGER_ERROR;
+                       return (VM_PAGER_ERROR);
                } else
                        mtx_unlock(&np->n_mtx);
        }
@@ -227,23 +227,18 @@ ncl_getpages(struct vop_getpages_args *a
         * allow the pager to zero-out the blanks.  Partially valid pages
         * can only occur at the file EOF.
         */
-
-       {
-               vm_page_t m = pages[ap->a_reqpage];
-
-               VM_OBJECT_LOCK(object);
-               if (m->valid != 0) {
-                       vm_page_lock_queues();
-                       for (i = 0; i < npages; ++i) {
-                               if (i != ap->a_reqpage)
-                                       vm_page_free(pages[i]);
-                       }
-                       vm_page_unlock_queues();
-                       VM_OBJECT_UNLOCK(object);
-                       return(0);
+       VM_OBJECT_LOCK(object);
+       if (pages[ap->a_reqpage]->valid != 0) {
+               vm_page_lock_queues();
+               for (i = 0; i < npages; ++i) {
+                       if (i != ap->a_reqpage)
+                               vm_page_free(pages[i]);
                }
+               vm_page_unlock_queues();
                VM_OBJECT_UNLOCK(object);
+               return (0);
        }
+       VM_OBJECT_UNLOCK(object);
 
        /*
         * We use only the kva address for the buffer, but this is extremely
@@ -281,7 +276,7 @@ ncl_getpages(struct vop_getpages_args *a
                }
                vm_page_unlock_queues();
                VM_OBJECT_UNLOCK(object);
-               return VM_PAGER_ERROR;
+               return (VM_PAGER_ERROR);
        }
 
        /*
@@ -347,7 +342,7 @@ ncl_getpages(struct vop_getpages_args *a
        }
        vm_page_unlock_queues();
        VM_OBJECT_UNLOCK(object);
-       return 0;
+       return (0);
 }
 
 /*

Modified: head/sys/nfsclient/nfs_bio.c
==============================================================================
--- head/sys/nfsclient/nfs_bio.c        Thu Jun 18 05:56:12 2009        
(r194424)
+++ head/sys/nfsclient/nfs_bio.c        Thu Jun 18 05:56:24 2009        
(r194425)
@@ -101,7 +101,7 @@ nfs_getpages(struct vop_getpages_args *a
 
        if ((object = vp->v_object) == NULL) {
                nfs_printf("nfs_getpages: called with non-merged cache 
vnode??\n");
-               return VM_PAGER_ERROR;
+               return (VM_PAGER_ERROR);
        }
 
        if (nfs_directio_enable && !nfs_directio_allow_mmap) {
@@ -109,7 +109,7 @@ nfs_getpages(struct vop_getpages_args *a
                if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
                        mtx_unlock(&np->n_mtx);
                        nfs_printf("nfs_getpages: called on non-cacheable 
vnode??\n");
-                       return VM_PAGER_ERROR;
+                       return (VM_PAGER_ERROR);
                } else
                        mtx_unlock(&np->n_mtx);
        }
@@ -130,23 +130,18 @@ nfs_getpages(struct vop_getpages_args *a
         * allow the pager to zero-out the blanks.  Partially valid pages
         * can only occur at the file EOF.
         */
-
-       {
-               vm_page_t m = pages[ap->a_reqpage];
-
-               VM_OBJECT_LOCK(object);
-               if (m->valid != 0) {
-                       vm_page_lock_queues();
-                       for (i = 0; i < npages; ++i) {
-                               if (i != ap->a_reqpage)
-                                       vm_page_free(pages[i]);
-                       }
-                       vm_page_unlock_queues();
-                       VM_OBJECT_UNLOCK(object);
-                       return(0);
+       VM_OBJECT_LOCK(object);
+       if (pages[ap->a_reqpage]->valid != 0) {
+               vm_page_lock_queues();
+               for (i = 0; i < npages; ++i) {
+                       if (i != ap->a_reqpage)
+                               vm_page_free(pages[i]);
                }
+               vm_page_unlock_queues();
                VM_OBJECT_UNLOCK(object);
+               return (0);
        }
+       VM_OBJECT_UNLOCK(object);
 
        /*
         * We use only the kva address for the buffer, but this is extremely
@@ -184,7 +179,7 @@ nfs_getpages(struct vop_getpages_args *a
                }
                vm_page_unlock_queues();
                VM_OBJECT_UNLOCK(object);
-               return VM_PAGER_ERROR;
+               return (VM_PAGER_ERROR);
        }
 
        /*
@@ -250,7 +245,7 @@ nfs_getpages(struct vop_getpages_args *a
        }
        vm_page_unlock_queues();
        VM_OBJECT_UNLOCK(object);
-       return 0;
+       return (0);
 }
 
 /*
_______________________________________________
[email protected] mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "[email protected]"

Reply via email to