Gitweb:     
http://git.kernel.org/git/?p=linux/kernel/git/torvalds/linux-2.6.git;a=commit;h=4a9e5ef1f4f15205e477817a5cefc34bd3f65f55
Commit:     4a9e5ef1f4f15205e477817a5cefc34bd3f65f55
Parent:     eb2be189317d031895b5ca534fbf735eb546158b
Author:     Nick Piggin <[EMAIL PROTECTED]>
AuthorDate: Tue Oct 16 01:24:58 2007 -0700
Committer:  Linus Torvalds <[EMAIL PROTECTED]>
CommitDate: Tue Oct 16 09:42:54 2007 -0700

    mm: write iovec cleanup
    
    Hide some of the open-coded nr_segs tests into the iovec helpers.  This is 
all
    to simplify generic_file_buffered_write, because that gets more complex in 
the
    next patch.
    
    Signed-off-by: Nick Piggin <[EMAIL PROTECTED]>
    Signed-off-by: Andrew Morton <[EMAIL PROTECTED]>
    Signed-off-by: Linus Torvalds <[EMAIL PROTECTED]>
---
 mm/filemap.c     |   36 +++-------
 mm/filemap.h     |  206 +++++++++++++++++++++++++++---------------------------
 mm/filemap_xip.c |   17 ++---
 3 files changed, 120 insertions(+), 139 deletions(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index fb4c1c0..c59d5b3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1823,12 +1823,7 @@ generic_file_buffered_write(struct kiocb *iocb, const 
struct iovec *iov,
        /*
         * handle partial DIO write.  Adjust cur_iov if needed.
         */
-       if (likely(nr_segs == 1))
-               buf = iov->iov_base + written;
-       else {
-               filemap_set_next_iovec(&cur_iov, &iov_offset, written);
-               buf = cur_iov->iov_base + iov_offset;
-       }
+       filemap_set_next_iovec(&cur_iov, nr_segs, &iov_offset, written);
 
        do {
                struct page *page;
@@ -1838,6 +1833,7 @@ generic_file_buffered_write(struct kiocb *iocb, const 
struct iovec *iov,
                size_t bytes;           /* Bytes to write to page */
                size_t copied;          /* Bytes copied from user */
 
+               buf = cur_iov->iov_base + iov_offset;
                offset = (pos & (PAGE_CACHE_SIZE - 1));
                index = pos >> PAGE_CACHE_SHIFT;
                bytes = PAGE_CACHE_SIZE - offset;
@@ -1869,13 +1865,10 @@ generic_file_buffered_write(struct kiocb *iocb, const 
struct iovec *iov,
                if (unlikely(status))
                        goto fs_write_aop_error;
 
-               if (likely(nr_segs == 1))
-                       copied = filemap_copy_from_user(page, offset,
-                                                       buf, bytes);
-               else
-                       copied = filemap_copy_from_user_iovec(page, offset,
-                                               cur_iov, iov_offset, bytes);
+               copied = filemap_copy_from_user(page, offset,
+                                       cur_iov, nr_segs, iov_offset, bytes);
                flush_dcache_page(page);
+
                status = a_ops->commit_write(file, page, offset, offset+bytes);
                if (unlikely(status < 0 || status == AOP_TRUNCATED_PAGE))
                        goto fs_write_aop_error;
@@ -1886,20 +1879,11 @@ generic_file_buffered_write(struct kiocb *iocb, const 
struct iovec *iov,
                if (unlikely(status > 0)) /* filesystem did partial write */
                        copied = status;
 
-               if (likely(copied > 0)) {
-                       written += copied;
-                       count -= copied;
-                       pos += copied;
-                       buf += copied;
-                       if (unlikely(nr_segs > 1)) {
-                               filemap_set_next_iovec(&cur_iov,
-                                               &iov_offset, copied);
-                               if (count)
-                                       buf = cur_iov->iov_base + iov_offset;
-                       } else {
-                               iov_offset += copied;
-                       }
-               }
+               written += copied;
+               count -= copied;
+               pos += copied;
+               filemap_set_next_iovec(&cur_iov, nr_segs, &iov_offset, copied);
+
                unlock_page(page);
                mark_page_accessed(page);
                page_cache_release(page);
diff --git a/mm/filemap.h b/mm/filemap.h
dissimilarity index 67%
index a1e10a2..b500d93 100644
--- a/mm/filemap.h
+++ b/mm/filemap.h
@@ -1,103 +1,103 @@
-/*
- *     linux/mm/filemap.h
- *
- * Copyright (C) 1994-1999  Linus Torvalds
- */
-
-#ifndef __FILEMAP_H
-#define __FILEMAP_H
-
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/uio.h>
-#include <linux/uaccess.h>
-
-size_t
-__filemap_copy_from_user_iovec_inatomic(char *vaddr,
-                                       const struct iovec *iov,
-                                       size_t base,
-                                       size_t bytes);
-
-/*
- * Copy as much as we can into the page and return the number of bytes which
- * were sucessfully copied.  If a fault is encountered then clear the page
- * out to (offset+bytes) and return the number of bytes which were copied.
- *
- * NOTE: For this to work reliably we really want 
copy_from_user_inatomic_nocache
- * to *NOT* zero any tail of the buffer that it failed to copy.  If it does,
- * and if the following non-atomic copy succeeds, then there is a small window
- * where the target page contains neither the data before the write, nor the
- * data after the write (it contains zero).  A read at this time will see
- * data that is inconsistent with any ordering of the read and the write.
- * (This has been detected in practice).
- */
-static inline size_t
-filemap_copy_from_user(struct page *page, unsigned long offset,
-                       const char __user *buf, unsigned bytes)
-{
-       char *kaddr;
-       int left;
-
-       kaddr = kmap_atomic(page, KM_USER0);
-       left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
-       kunmap_atomic(kaddr, KM_USER0);
-
-       if (left != 0) {
-               /* Do it the slow way */
-               kaddr = kmap(page);
-               left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
-               kunmap(page);
-       }
-       return bytes - left;
-}
-
-/*
- * This has the same sideeffects and return value as filemap_copy_from_user().
- * The difference is that on a fault we need to memset the remainder of the
- * page (out to offset+bytes), to emulate filemap_copy_from_user()'s
- * single-segment behaviour.
- */
-static inline size_t
-filemap_copy_from_user_iovec(struct page *page, unsigned long offset,
-                       const struct iovec *iov, size_t base, size_t bytes)
-{
-       char *kaddr;
-       size_t copied;
-
-       kaddr = kmap_atomic(page, KM_USER0);
-       copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
-                                                        base, bytes);
-       kunmap_atomic(kaddr, KM_USER0);
-       if (copied != bytes) {
-               kaddr = kmap(page);
-               copied = __filemap_copy_from_user_iovec_inatomic(kaddr + 
offset, iov,
-                                                                base, bytes);
-               if (bytes - copied)
-                       memset(kaddr + offset + copied, 0, bytes - copied);
-               kunmap(page);
-       }
-       return copied;
-}
-
-static inline void
-filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
-{
-       const struct iovec *iov = *iovp;
-       size_t base = *basep;
-
-       while (bytes) {
-               int copy = min(bytes, iov->iov_len - base);
-
-               bytes -= copy;
-               base += copy;
-               if (iov->iov_len == base) {
-                       iov++;
-                       base = 0;
-               }
-       }
-       *iovp = iov;
-       *basep = base;
-}
-#endif
+/*
+ *     linux/mm/filemap.h
+ *
+ * Copyright (C) 1994-1999  Linus Torvalds
+ */
+
+#ifndef __FILEMAP_H
+#define __FILEMAP_H
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+
+size_t
+__filemap_copy_from_user_iovec_inatomic(char *vaddr,
+                                       const struct iovec *iov,
+                                       size_t base,
+                                       size_t bytes);
+
+/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were sucessfully copied.  If a fault is encountered then return the number 
of
+ * bytes which were copied.
+ */
+static inline size_t
+filemap_copy_from_user_atomic(struct page *page, unsigned long offset,
+                       const struct iovec *iov, unsigned long nr_segs,
+                       size_t base, size_t bytes)
+{
+       char *kaddr;
+       size_t copied;
+
+       kaddr = kmap_atomic(page, KM_USER0);
+       if (likely(nr_segs == 1)) {
+               int left;
+               char __user *buf = iov->iov_base + base;
+               left = __copy_from_user_inatomic_nocache(kaddr + offset,
+                                                       buf, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset,
+                                                       iov, base, bytes);
+       }
+       kunmap_atomic(kaddr, KM_USER0);
+
+       return copied;
+}
+
+/*
+ * This has the same sideeffects and return value as
+ * filemap_copy_from_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ */
+static inline size_t
+filemap_copy_from_user(struct page *page, unsigned long offset,
+                       const struct iovec *iov, unsigned long nr_segs,
+                        size_t base, size_t bytes)
+{
+       char *kaddr;
+       size_t copied;
+
+       kaddr = kmap(page);
+       if (likely(nr_segs == 1)) {
+               int left;
+               char __user *buf = iov->iov_base + base;
+               left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset,
+                                                       iov, base, bytes);
+       }
+       kunmap(page);
+       return copied;
+}
+
+static inline void
+filemap_set_next_iovec(const struct iovec **iovp, unsigned long nr_segs,
+                                                size_t *basep, size_t bytes)
+{
+       if (likely(nr_segs == 1)) {
+               *basep += bytes;
+       } else {
+               const struct iovec *iov = *iovp;
+               size_t base = *basep;
+
+               while (bytes) {
+                       int copy = min(bytes, iov->iov_len - base);
+
+                       bytes -= copy;
+                       base += copy;
+                       if (iov->iov_len == base) {
+                               iov++;
+                               base = 0;
+                       }
+               }
+               *iovp = iov;
+               *basep = base;
+       }
+}
+#endif
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 53ee6a2..32132f3 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -15,7 +15,6 @@
 #include <linux/rmap.h>
 #include <linux/sched.h>
 #include <asm/tlbflush.h>
-#include "filemap.h"
 
 /*
  * We do use our own empty page to avoid interference with other users
@@ -288,6 +287,7 @@ __xip_file_write(struct file *filp, const char __user *buf,
                unsigned long index;
                unsigned long offset;
                size_t copied;
+               char *kaddr;
 
                offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
                index = pos >> PAGE_CACHE_SHIFT;
@@ -295,14 +295,6 @@ __xip_file_write(struct file *filp, const char __user *buf,
                if (bytes > count)
                        bytes = count;
 
-               /*
-                * Bring in the user page that we will copy from _first_.
-                * Otherwise there's a nasty deadlock on copying from the
-                * same page as we're writing to, without it being marked
-                * up-to-date.
-                */
-               fault_in_pages_readable(buf, bytes);
-
                page = a_ops->get_xip_page(mapping,
                                           index*(PAGE_SIZE/512), 0);
                if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
@@ -319,8 +311,13 @@ __xip_file_write(struct file *filp, const char __user *buf,
                        break;
                }
 
-               copied = filemap_copy_from_user(page, offset, buf, bytes);
+               fault_in_pages_readable(buf, bytes);
+               kaddr = kmap_atomic(page, KM_USER0);
+               copied = bytes -
+                       __copy_from_user_inatomic_nocache(kaddr, buf, bytes);
+               kunmap_atomic(kaddr, KM_USER0);
                flush_dcache_page(page);
+
                if (likely(copied > 0)) {
                        status = copied;
 
-
To unsubscribe from this list: send the line "unsubscribe git-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to