stapio:stp_main_loop:430 probe_start() returned 0
 0xc016f24e : page_cache_sync_readahead+0x1/0x20 [kernel]
 0xc016f58f : ____pagevec_lru_add+0x8d/0xdb [kernel]
 0xc016f620 : __lru_cache_add+0x43/0x53 [kernel]
 0xc016a23b : find_get_pages+0x8f/0xaf [kernel]
 0xc018d007 : sys_sendfile+0x19/0x7d [kernel]
 0xc018d7c8 : sys_write+0x1d/0x65 [kernel]
 0xc018db3e : __fput+0xee/0x17a [kernel]
 0xc0103b3f : ia32_sysenter_target+0x7b/0x124 [kernel]
===>: %s
page_cache_sync_readahead 0xc016f24e : page_cache_sync_readahead
+0x1/0x20 [kernel]
 0xc016f58f : ____pagevec_lru_add+0x8d/0xdb [kernel]
 0xc016f5fa : __lru_cache_add+0x1d/0x53 [kernel]
 0xc016a276 : find_get_pages_contig+0x1b/0xc2 [kernel]
 0xc018d007 : sys_sendfile+0x19/0x7d [kernel]
 0xc018d7c8 : sys_write+0x1d/0x65 [kernel]
 0xc018db3e : __fput+0xee/0x17a [kernel]
 0xc0103b3f : ia32_sysenter_target+0x7b/0x124 [kernel]
===>: %s
page_cache_sync_readahead 0xc016f24e : page_cache_sync_readahead
+0x1/0x20 [kernel]
 0xc016f58f : ____pagevec_lru_add+0x8d/0xdb [kernel]
 0xc016f5fa : __lru_cache_add+0x1d/0x53 [kernel]
 0xc016a276 : find_get_pages_contig+0x1b/0xc2 [kernel]
 0xc018d007 : sys_sendfile+0x19/0x7d [kernel]
 0xc018d7c8 : sys_write+0x1d/0x65 [kernel]
 0xc018db3e : __fput+0xee/0x17a [kernel]
 0xc0103b3f : ia32_sysenter_target+0x7b/0x124 [kernel]
===>: %s
page_cache_sync_readahead 0xc016f24e : page_cache_sync_readahead
+0x1/0x20 [kernel]
 0xc016f58f : ____pagevec_lru_add+0x8d/0xdb [kernel]
 0xc016f5fa : __lru_cache_add+0x1d/0x53 [kernel]
 0xc016a276 : find_get_pages_contig+0x1b/0xc2 [kernel]
 0xc018d007 : sys_sendfile+0x19/0x7d [kernel]
 0xc018d7c8 : sys_write+0x1d/0x65 [kernel]
 0xc018db3e : __fput+0xee/0x17a [kernel]
 0xc0103b3f : ia32_sysenter_target+0x7b/0x124 [kernel]
===>: %s
page_cache_sync_readahead 0xc016f24e : page_cache_sync_readahead
+0x1/0x20 [kernel]
 0xc016f58f : ____pagevec_lru_add+0x8d/0xdb [kernel]
 0xc016f5fa : __lru_cache_add+0x1d/0x53 [kernel]
 0xc016a276 : find_get_pages_contig+0x1b/0xc2 [kernel]
 0xc018d007 : sys_sendfile+0x19/0x7d [kernel]
 0xc018d7c8 : sys_write+0x1d/0x65 [kernel]
 0xc018db3e : __fput+0xee/0x17a [kernel]
 0xc0103b3f : ia32_sysenter_target+0x7b/0x124 [kernel]
===>: %s
page_cache_sync_readahead 0xc016f24e : page_cache_sync_readahead
+0x1/0x20 [kernel]
 0xc016f58f : ____pagevec_lru_add+0x8d/0xdb [kernel]
 0xc016f5fa : __lru_cache_add+0x1d/0x53 [kernel]
 0xc016a276 : find_get_pages_contig+0x1b/0xc2 [kernel]
 0xc018d007 : sys_sendfile+0x19/0x7d [kernel]
 0xc018d7c8 : sys_write+0x1d/0x65 [kernel]
 0xc018db3e : __fput+0xee/0x17a [kernel]
 0xc0103b3f : ia32_sysenter_target+0x7b/0x124 [kernel]
===>: %s
page_cache_sync_readahead 0xc016f24e : page_cache_sync_readahead
+0x1/0x20 [kernel]
 0xc016f58f : ____pagevec_lru_add+0x8d/0xdb [kernel]
 0xc016f5fa : __lru_cache_add+0x1d/0x53 [kernel]
 0xc016a276 : find_get_pages_contig+0x1b/0xc2 [kernel]
 0xc018d007 : sys_sendfile+0x19/0x7d [kernel]
 0xc018d7c8 : sys_write+0x1d/0x65 [kernel]
 0xc018db3e : __fput+0xee/0x17a [kernel]
 0xc0103b3f : ia32_sysenter_target+0x7b/0x124 [kernel]
===>: %s



/*
 * A minimal readahead algorithm for trivial sequential/random reads.
 */
static unsigned long
ondemand_readahead(struct address_space *mapping,
                   struct file_ra_state *ra, struct file *filp,
                   bool hit_readahead_marker, pgoff_t offset,
                   unsigned long req_size)
{
        int     max = ra->ra_pages;     /* max readahead pages */
        pgoff_t prev_offset;
        int     sequential;

        /*
         * It's the expected callback offset, assume sequential access.
         * Ramp up sizes, and push forward the readahead window.
         */
        if (offset && (offset == (ra->start + ra->size - ra->async_size)
||
                        offset == (ra->start + ra->size))) {
                ra->start += ra->size;
                ra->size = get_next_ra_size(ra, max);
                ra->async_size = ra->size;
                goto readit;
        }

        prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
        sequential = offset - prev_offset <= 1UL || req_size > max;

        /*
         * Standalone, small read.
         * Read as is, and do not pollute the readahead state.
         */
        if (!hit_readahead_marker && !sequential) {
                return __do_page_cache_readahead(mapping, filp,
                                                offset, req_size, 0);
        }

        /*
         * Hit a marked page without valid readahead state.
         * E.g. interleaved reads.
         * Query the pagecache for async_size, which normally equals to
         * readahead size. Ramp it up and use it as the new readahead
size.
         */
        if (hit_readahead_marker) {
                pgoff_t start;

                rcu_read_lock();
                start = radix_tree_next_hole(&mapping->page_tree,
offset,max+1);
                rcu_read_unlock();

                if (!start || start - offset > max)
                        return 0;

                ra->start = start;
                ra->size = start - offset;      /* old async_size */
                ra->size = get_next_ra_size(ra, max);
                ra->async_size = ra->size;
                goto readit;
        }

       /*
         * It may be one of
         *      - first read on start of file
         *      - sequential cache miss
         *      - oversize random read
         * Start readahead for it.
         */
        ra->start = offset;
        ra->size = get_init_ra_size(req_size, max);
        ra->async_size = ra->size > req_size ? ra->size - req_size :
ra->size;

readit:
        return ra_submit(ra, mapping, filp);
}


static int
__do_page_cache_readahead(struct address_space *mapping, struct file
*filp,
                        pgoff_t offset, unsigned long nr_to_read,
                        unsigned long lookahead_size)
{
        struct inode *inode = mapping->host;
        struct page *page;
        unsigned long end_index;        /* The last page we want to read
*/
        LIST_HEAD(page_pool);
        int page_idx;
        int ret = 0;
        loff_t isize = i_size_read(inode);

        if (isize == 0)
                goto out;

        end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);

        /*
         * Preallocate as many pages as we will need.
         */
        for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
                pgoff_t page_offset = offset + page_idx;

                if (page_offset > end_index)
                        break;

                rcu_read_lock();
                page = radix_tree_lookup(&mapping->page_tree,
page_offset);
                rcu_read_unlock();
                if (page)
                        continue;

                page = page_cache_alloc_cold(mapping);
                if (!page)
                        break;
                page->index = page_offset;
                list_add(&page->lru, &page_pool);
                if (page_idx == nr_to_read - lookahead_size)
                        SetPageReadahead(page);
                ret++;
        }

        /*
         * Now start the IO.  We ignore I/O errors - if the page is not
         * uptodate then the caller will launch readpage again, and
         * will then handle the error.
         */
        if (ret)
                read_pages(mapping, filp, &page_pool, ret);
        BUG_ON(!list_empty(&page_pool));
out:
        return ret;
}




Reply via email to