Commit a0b8cab3 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: remove lru parameter from __pagevec_lru_add and remove parts of pagevec API

Now that the LRU to add a page to is decided at LRU-add time, remove the
misleading lru parameter from __pagevec_lru_add.  A consequence of this
is that the pagevec_lru_add_file, pagevec_lru_add_anon and similar
helpers are misleading as the caller no longer has direct control over
what LRU the page is added to.  Unused helpers are removed by this patch
and existing users of pagevec_lru_add_file() are converted to use
lru_cache_add_file() directly and use the per-cpu pagevecs instead of
creating their own pagevec.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Alexey Lyahkov <alexey.lyashkov@gmail.com>
Cc: Andrew Perepechko <anserper@ya.ru>
Cc: Robin Dong <sanbai@taobao.com>
Cc: Theodore Tso <tytso@mit.edu>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Bernd Schubert <bernd.schubert@fastmail.fm>
Cc: David Howells <dhowells@redhat.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 059285a2
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/swap.h>
#include "internal.h" #include "internal.h"
/* /*
...@@ -227,8 +228,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op) ...@@ -227,8 +228,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
*/ */
static int cachefiles_read_backing_file_one(struct cachefiles_object *object, static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
struct fscache_retrieval *op, struct fscache_retrieval *op,
struct page *netpage, struct page *netpage)
struct pagevec *pagevec)
{ {
struct cachefiles_one_read *monitor; struct cachefiles_one_read *monitor;
struct address_space *bmapping; struct address_space *bmapping;
...@@ -237,8 +237,6 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object, ...@@ -237,8 +237,6 @@ static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
_enter(""); _enter("");
pagevec_reinit(pagevec);
_debug("read back %p{%lu,%d}", _debug("read back %p{%lu,%d}",
netpage, netpage->index, page_count(netpage)); netpage, netpage->index, page_count(netpage));
...@@ -283,9 +281,7 @@ installed_new_backing_page: ...@@ -283,9 +281,7 @@ installed_new_backing_page:
backpage = newpage; backpage = newpage;
newpage = NULL; newpage = NULL;
page_cache_get(backpage); lru_cache_add_file(backpage);
pagevec_add(pagevec, backpage);
__pagevec_lru_add_file(pagevec);
read_backing_page: read_backing_page:
ret = bmapping->a_ops->readpage(NULL, backpage); ret = bmapping->a_ops->readpage(NULL, backpage);
...@@ -452,8 +448,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, ...@@ -452,8 +448,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
if (block) { if (block) {
/* submit the apparently valid page to the backing fs to be /* submit the apparently valid page to the backing fs to be
* read from disk */ * read from disk */
ret = cachefiles_read_backing_file_one(object, op, page, ret = cachefiles_read_backing_file_one(object, op, page);
&pagevec);
} else if (cachefiles_has_space(cache, 0, 1) == 0) { } else if (cachefiles_has_space(cache, 0, 1) == 0) {
/* there's space in the cache we can use */ /* there's space in the cache we can use */
fscache_mark_page_cached(op, page); fscache_mark_page_cached(op, page);
...@@ -482,14 +477,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, ...@@ -482,14 +477,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
{ {
struct cachefiles_one_read *monitor = NULL; struct cachefiles_one_read *monitor = NULL;
struct address_space *bmapping = object->backer->d_inode->i_mapping; struct address_space *bmapping = object->backer->d_inode->i_mapping;
struct pagevec lru_pvec;
struct page *newpage = NULL, *netpage, *_n, *backpage = NULL; struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
int ret = 0; int ret = 0;
_enter(""); _enter("");
pagevec_init(&lru_pvec, 0);
list_for_each_entry_safe(netpage, _n, list, lru) { list_for_each_entry_safe(netpage, _n, list, lru) {
list_del(&netpage->lru); list_del(&netpage->lru);
...@@ -534,9 +526,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, ...@@ -534,9 +526,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
backpage = newpage; backpage = newpage;
newpage = NULL; newpage = NULL;
page_cache_get(backpage); lru_cache_add_file(backpage);
if (!pagevec_add(&lru_pvec, backpage))
__pagevec_lru_add_file(&lru_pvec);
reread_backing_page: reread_backing_page:
ret = bmapping->a_ops->readpage(NULL, backpage); ret = bmapping->a_ops->readpage(NULL, backpage);
...@@ -559,9 +549,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, ...@@ -559,9 +549,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
goto nomem; goto nomem;
} }
page_cache_get(netpage); lru_cache_add_file(netpage);
if (!pagevec_add(&lru_pvec, netpage))
__pagevec_lru_add_file(&lru_pvec);
/* install a monitor */ /* install a monitor */
page_cache_get(netpage); page_cache_get(netpage);
...@@ -643,9 +631,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, ...@@ -643,9 +631,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
fscache_mark_page_cached(op, netpage); fscache_mark_page_cached(op, netpage);
page_cache_get(netpage); lru_cache_add_file(netpage);
if (!pagevec_add(&lru_pvec, netpage))
__pagevec_lru_add_file(&lru_pvec);
/* the netpage is unlocked and marked up to date here */ /* the netpage is unlocked and marked up to date here */
fscache_end_io(op, netpage, 0); fscache_end_io(op, netpage, 0);
...@@ -661,8 +647,6 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, ...@@ -661,8 +647,6 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
out: out:
/* tidy up */ /* tidy up */
pagevec_lru_add_file(&lru_pvec);
if (newpage) if (newpage)
page_cache_release(newpage); page_cache_release(newpage);
if (netpage) if (netpage)
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/pagevec.h> #include <linux/pagevec.h>
#include <linux/namei.h> #include <linux/namei.h>
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/swap.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <linux/xattr.h> #include <linux/xattr.h>
...@@ -1758,7 +1759,6 @@ EXPORT_SYMBOL_GPL(nfs_unlink); ...@@ -1758,7 +1759,6 @@ EXPORT_SYMBOL_GPL(nfs_unlink);
*/ */
int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{ {
struct pagevec lru_pvec;
struct page *page; struct page *page;
char *kaddr; char *kaddr;
struct iattr attr; struct iattr attr;
...@@ -1798,11 +1798,8 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) ...@@ -1798,11 +1798,8 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
* No big deal if we can't add this page to the page cache here. * No big deal if we can't add this page to the page cache here.
* READLINK will get the missing page from the server if needed. * READLINK will get the missing page from the server if needed.
*/ */
pagevec_init(&lru_pvec, 0); if (!add_to_page_cache_lru(page, dentry->d_inode->i_mapping, 0,
if (!add_to_page_cache(page, dentry->d_inode->i_mapping, 0,
GFP_KERNEL)) { GFP_KERNEL)) {
pagevec_add(&lru_pvec, page);
pagevec_lru_add_file(&lru_pvec);
SetPageUptodate(page); SetPageUptodate(page);
unlock_page(page); unlock_page(page);
} else } else
......
...@@ -21,7 +21,7 @@ struct pagevec { ...@@ -21,7 +21,7 @@ struct pagevec {
}; };
void __pagevec_release(struct pagevec *pvec); void __pagevec_release(struct pagevec *pvec);
void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru); void __pagevec_lru_add(struct pagevec *pvec);
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
pgoff_t start, unsigned nr_pages); pgoff_t start, unsigned nr_pages);
unsigned pagevec_lookup_tag(struct pagevec *pvec, unsigned pagevec_lookup_tag(struct pagevec *pvec,
...@@ -64,36 +64,4 @@ static inline void pagevec_release(struct pagevec *pvec) ...@@ -64,36 +64,4 @@ static inline void pagevec_release(struct pagevec *pvec)
__pagevec_release(pvec); __pagevec_release(pvec);
} }
static inline void __pagevec_lru_add_anon(struct pagevec *pvec)
{
__pagevec_lru_add(pvec, LRU_INACTIVE_ANON);
}
static inline void __pagevec_lru_add_active_anon(struct pagevec *pvec)
{
__pagevec_lru_add(pvec, LRU_ACTIVE_ANON);
}
static inline void __pagevec_lru_add_file(struct pagevec *pvec)
{
__pagevec_lru_add(pvec, LRU_INACTIVE_FILE);
}
static inline void __pagevec_lru_add_active_file(struct pagevec *pvec)
{
__pagevec_lru_add(pvec, LRU_ACTIVE_FILE);
}
static inline void pagevec_lru_add_file(struct pagevec *pvec)
{
if (pagevec_count(pvec))
__pagevec_lru_add_file(pvec);
}
static inline void pagevec_lru_add_anon(struct pagevec *pvec)
{
if (pagevec_count(pvec))
__pagevec_lru_add_anon(pvec);
}
#endif /* _LINUX_PAGEVEC_H */ #endif /* _LINUX_PAGEVEC_H */
...@@ -505,7 +505,7 @@ void __lru_cache_add(struct page *page, enum lru_list lru) ...@@ -505,7 +505,7 @@ void __lru_cache_add(struct page *page, enum lru_list lru)
page_cache_get(page); page_cache_get(page);
if (!pagevec_space(pvec)) if (!pagevec_space(pvec))
__pagevec_lru_add(pvec, lru); __pagevec_lru_add(pvec);
pagevec_add(pvec, page); pagevec_add(pvec, page);
put_cpu_var(lru_add_pvec); put_cpu_var(lru_add_pvec);
} }
...@@ -628,7 +628,7 @@ void lru_add_drain_cpu(int cpu) ...@@ -628,7 +628,7 @@ void lru_add_drain_cpu(int cpu)
struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu);
if (pagevec_count(pvec)) if (pagevec_count(pvec))
__pagevec_lru_add(pvec, NR_LRU_LISTS); __pagevec_lru_add(pvec);
pvec = &per_cpu(lru_rotate_pvecs, cpu); pvec = &per_cpu(lru_rotate_pvecs, cpu);
if (pagevec_count(pvec)) { if (pagevec_count(pvec)) {
...@@ -832,12 +832,10 @@ void lru_add_page_tail(struct page *page, struct page *page_tail, ...@@ -832,12 +832,10 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
void *arg) void *arg)
{ {
enum lru_list requested_lru = (enum lru_list)arg;
int file = page_is_file_cache(page); int file = page_is_file_cache(page);
int active = PageActive(page); int active = PageActive(page);
enum lru_list lru = page_lru(page); enum lru_list lru = page_lru(page);
WARN_ON_ONCE(requested_lru < NR_LRU_LISTS && requested_lru != lru);
VM_BUG_ON(PageUnevictable(page)); VM_BUG_ON(PageUnevictable(page));
VM_BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
...@@ -851,11 +849,9 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, ...@@ -851,11 +849,9 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
* Add the passed pages to the LRU, then drop the caller's refcount * Add the passed pages to the LRU, then drop the caller's refcount
* on them. Reinitialises the caller's pagevec. * on them. Reinitialises the caller's pagevec.
*/ */
void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) void __pagevec_lru_add(struct pagevec *pvec)
{ {
VM_BUG_ON(is_unevictable_lru(lru)); pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL);
pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru);
} }
EXPORT_SYMBOL(__pagevec_lru_add); EXPORT_SYMBOL(__pagevec_lru_add);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment