--- 867f9dd82dcde9d5c11f49bde159c3c6a0276c4e +++ e18a5c100486a3c0f648294ee44f3760aacfafbf @@ -17,6 +17,7 @@ #include #include #include +#include /* * Initialise a struct file's readahead state. Assumes that the caller has @@ -107,7 +108,7 @@ int read_cache_pages(struct address_spac EXPORT_SYMBOL(read_cache_pages); static int read_pages(struct address_space *mapping, struct file *filp, - struct list_head *pages, unsigned nr_pages) + struct list_head *pages, unsigned nr_pages, int tail) { struct blk_plug plug; unsigned page_idx; @@ -125,8 +126,8 @@ static int read_pages(struct address_spa for (page_idx = 0; page_idx < nr_pages; page_idx++) { struct page *page = list_to_page(pages); list_del(&page->lru); - if (!add_to_page_cache_lru(page, mapping, - page->index, GFP_KERNEL)) { + if (!__add_to_page_cache_lru(page, mapping, + page->index, GFP_KERNEL, tail)) { mapping->a_ops->readpage(filp, page); } page_cache_release(page); @@ -139,6 +140,28 @@ out: return ret; } +static inline int nr_mapped(void) +{ + return global_page_state(NR_FILE_MAPPED) + + global_page_state(NR_ANON_PAGES); +} + +/* + * This examines how large in pages a file size is and returns 1 if it is + * more than half the unmapped ram. Avoid doing read_page_state which is + * expensive unless we already know it is likely to be large enough. + */ +static int large_isize(unsigned long nr_pages) +{ + if (nr_pages * 6 > vm_total_pages) { + unsigned long unmapped_ram = vm_total_pages - nr_mapped(); + + if (nr_pages * 2 > unmapped_ram) + return 1; + } + return 0; +} + /* * __do_page_cache_readahead() actually reads a chunk of disk. It allocates all * the pages first, then submits them all for I/O. This avoids the very bad @@ -196,7 +219,8 @@ __do_page_cache_readahead(struct address * will then handle the error. */ if (ret) - read_pages(mapping, filp, &page_pool, ret); + read_pages(mapping, filp, &page_pool, ret, + large_isize(end_index)); BUG_ON(!list_empty(&page_pool)); out: return ret;