For now we try to grab a huge cache page if the minimum requirements have been
satisfied.

Signed-off-by: Ning Qu <[email protected]>
---
 mm/shmem.c | 31 ++++++++++++++++++++++++++-----
 1 file changed, 26 insertions(+), 5 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 2fc450d..0a423a9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1640,8 +1640,21 @@ shmem_write_begin(struct file *file, struct 
address_space *mapping,
        struct inode *inode = mapping->host;
        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
        gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
+       int ret = 0;
+       int getpage_flags = 0;
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE_PAGECACHE
+       /*
+        * Do not allocate a huge page in the first huge page range in page
+        * cache. This way we can avoid most small files overhead.
+        */
+       if (pos >= HPAGE_PMD_SIZE)
+               getpage_flags |= AOP_FLAG_TRANSHUGE;
+#endif
+       ret = shmem_getpage(inode, index, pagep, SGP_WRITE, gfp,
+                               getpage_flags, NULL);
 
-       return shmem_getpage(inode, index, pagep, SGP_WRITE, gfp, 0, NULL);
+       return ret;
 }
 
 static int
@@ -1655,10 +1668,18 @@ shmem_write_end(struct file *file, struct address_space 
*mapping,
                i_size_write(inode, pos + copied);
 
        if (!PageUptodate(page)) {
-               if (copied < PAGE_CACHE_SIZE) {
-                       unsigned from = pos & (PAGE_CACHE_SIZE - 1);
-                       zero_user_segments(page, 0, from,
-                                       from + copied, PAGE_CACHE_SIZE);
+               if (copied < len) {
+                       unsigned from;
+                       if (PageTransHugeCache(page)) {
+                               from = pos & ~HPAGE_PMD_MASK;
+                               zero_huge_user(page, 0, from);
+                               zero_huge_user(page, from + copied,
+                                              HPAGE_PMD_SIZE);
+                       } else {
+                               from = pos & ~PAGE_CACHE_MASK;
+                               zero_user_segments(page, 0, from,
+                                               from + copied, PAGE_CACHE_SIZE);
+                       }
                }
                SetPageUptodate(page);
        }
-- 
1.8.4


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to