FILESYSTEM: dynamic read-ahead
/mm/readahead.c
blob:286251a4d091da15b8f993b9214bacf4783738e5 -> blob:e18a5c100486a3c0f648294ee44f3760aacfafbf
--- mm/readahead.c
+++ mm/readahead.c
@@ -17,32 +17,7 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/pagevec.h>
#include <linux/pagemap.h>
-
-unsigned long max_readahead_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
-
-static int __init readahead(char *str)
-{
- unsigned long bytes;
-
- if (!str)
- return -EINVAL;
- bytes = memparse(str, &str);
- if (*str != '\0')
- return -EINVAL;
-
- if (bytes) {
- if (bytes < PAGE_CACHE_SIZE) /* missed 'k'/'m' suffixes? */
- return -EINVAL;
- if (bytes > 256 << 20) /* limit to 256MB */
- bytes = 256 << 20;
- }
-
- max_readahead_pages = bytes / PAGE_CACHE_SIZE;
- default_backing_dev_info.ra_pages = max_readahead_pages;
- return 0;
-}
-
-early_param("readahead", readahead);
+#include <linux/swap.h>
/*
* Initialise a struct file's readahead state. Assumes that the caller has
@@ -133,7 +108,7 @@ int read_cache_pages(struct address_spac
EXPORT_SYMBOL(read_cache_pages);
static int read_pages(struct address_space *mapping, struct file *filp,
- struct list_head *pages, unsigned nr_pages)
+ struct list_head *pages, unsigned nr_pages, int tail)
{
struct blk_plug plug;
unsigned page_idx;
@@ -151,8 +126,8 @@ static int read_pages(struct address_spa
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = list_to_page(pages);
list_del(&page->lru);
- if (!add_to_page_cache_lru(page, mapping,
- page->index, GFP_KERNEL)) {
+ if (!__add_to_page_cache_lru(page, mapping,
+ page->index, GFP_KERNEL, tail)) {
mapping->a_ops->readpage(filp, page);
}
page_cache_release(page);
@@ -165,6 +140,28 @@ out:
return ret;
}
+static inline int nr_mapped(void)
+{
+ return global_page_state(NR_FILE_MAPPED) +
+ global_page_state(NR_ANON_PAGES);
+}
+
+/*
+ * This examines how large in pages a file size is and returns 1 if it is
+ * more than half the unmapped ram. Avoid doing read_page_state which is
+ * expensive unless we already know it is likely to be large enough.
+ */
+static int large_isize(unsigned long nr_pages)
+{
+ if (nr_pages * 6 > vm_total_pages) {
+ unsigned long unmapped_ram = vm_total_pages - nr_mapped();
+
+ if (nr_pages * 2 > unmapped_ram)
+ return 1;
+ }
+ return 0;
+}
+
/*
* __do_page_cache_readahead() actually reads a chunk of disk. It allocates all
* the pages first, then submits them all for I/O. This avoids the very bad
@@ -222,7 +219,8 @@ __do_page_cache_readahead(struct address
* will then handle the error.
*/
if (ret)
- read_pages(mapping, filp, &page_pool, ret);
+ read_pages(mapping, filp, &page_pool, ret,
+ large_isize(end_index));
BUG_ON(!list_empty(&page_pool));
out:
return ret;