Commit 1ab6c499 authored by Dave Chinner's avatar Dave Chinner Committed by Al Viro

fs: convert fs shrinkers to new scan/count API

Convert the filesystem shrinkers to use the new API, and standardise some
of the behaviours of the shrinkers at the same time.  For example,
nr_to_scan means the number of objects to scan, not the number of objects
to free.

I refactored the CIFS idmap shrinker a little - it really needs to be
broken up into a shrinker per tree and keep an item count with the tree
root so that we don't need to walk the tree every time the shrinker needs
to count the number of objects in the tree (i.e.  all the time under
memory pressure).

[glommer@openvz.org: fixes for ext4, ubifs, nfs, cifs and glock. Fixes are needed mainly due to new code merged in the tree]
[assorted fixes folded in]
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarGlauber Costa <glommer@openvz.org>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Acked-by: default avatarArtem Bityutskiy <artem.bityutskiy@linux.intel.com>
Acked-by: default avatarJan Kara <jack@suse.cz>
Acked-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 35163417
...@@ -931,13 +931,15 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, ...@@ -931,13 +931,15 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
struct ext4_inode_info *ei; struct ext4_inode_info *ei;
struct list_head *cur, *tmp; struct list_head *cur, *tmp;
LIST_HEAD(skipped); LIST_HEAD(skipped);
int ret, nr_shrunk = 0; int nr_shrunk = 0;
int retried = 0, skip_precached = 1, nr_skipped = 0; int retried = 0, skip_precached = 1, nr_skipped = 0;
spin_lock(&sbi->s_es_lru_lock); spin_lock(&sbi->s_es_lru_lock);
retry: retry:
list_for_each_safe(cur, tmp, &sbi->s_es_lru) { list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
int shrunk;
/* /*
* If we have already reclaimed all extents from extent * If we have already reclaimed all extents from extent
* status tree, just stop the loop immediately. * status tree, just stop the loop immediately.
...@@ -964,13 +966,13 @@ retry: ...@@ -964,13 +966,13 @@ retry:
continue; continue;
write_lock(&ei->i_es_lock); write_lock(&ei->i_es_lock);
ret = __es_try_to_reclaim_extents(ei, nr_to_scan); shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
if (ei->i_es_lru_nr == 0) if (ei->i_es_lru_nr == 0)
list_del_init(&ei->i_es_lru); list_del_init(&ei->i_es_lru);
write_unlock(&ei->i_es_lock); write_unlock(&ei->i_es_lock);
nr_shrunk += ret; nr_shrunk += shrunk;
nr_to_scan -= ret; nr_to_scan -= shrunk;
if (nr_to_scan == 0) if (nr_to_scan == 0)
break; break;
} }
...@@ -1007,7 +1009,20 @@ retry: ...@@ -1007,7 +1009,20 @@ retry:
return nr_shrunk; return nr_shrunk;
} }
static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) static unsigned long ext4_es_count(struct shrinker *shrink,
struct shrink_control *sc)
{
unsigned long nr;
struct ext4_sb_info *sbi;
sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
nr = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
trace_ext4_es_shrink_enter(sbi->s_sb, sc->nr_to_scan, nr);
return nr;
}
static unsigned long ext4_es_scan(struct shrinker *shrink,
struct shrink_control *sc)
{ {
struct ext4_sb_info *sbi = container_of(shrink, struct ext4_sb_info *sbi = container_of(shrink,
struct ext4_sb_info, s_es_shrinker); struct ext4_sb_info, s_es_shrinker);
...@@ -1022,9 +1037,8 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc) ...@@ -1022,9 +1037,8 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL); nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL);
ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret); trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
return ret; return nr_shrunk;
} }
void ext4_es_register_shrinker(struct ext4_sb_info *sbi) void ext4_es_register_shrinker(struct ext4_sb_info *sbi)
...@@ -1032,7 +1046,8 @@ void ext4_es_register_shrinker(struct ext4_sb_info *sbi) ...@@ -1032,7 +1046,8 @@ void ext4_es_register_shrinker(struct ext4_sb_info *sbi)
INIT_LIST_HEAD(&sbi->s_es_lru); INIT_LIST_HEAD(&sbi->s_es_lru);
spin_lock_init(&sbi->s_es_lru_lock); spin_lock_init(&sbi->s_es_lru_lock);
sbi->s_es_last_sorted = 0; sbi->s_es_last_sorted = 0;
sbi->s_es_shrinker.shrink = ext4_es_shrink; sbi->s_es_shrinker.scan_objects = ext4_es_scan;
sbi->s_es_shrinker.count_objects = ext4_es_count;
sbi->s_es_shrinker.seeks = DEFAULT_SEEKS; sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&sbi->s_es_shrinker); register_shrinker(&sbi->s_es_shrinker);
} }
...@@ -1076,7 +1091,7 @@ static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, ...@@ -1076,7 +1091,7 @@ static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
struct ext4_es_tree *tree = &ei->i_es_tree; struct ext4_es_tree *tree = &ei->i_es_tree;
struct rb_node *node; struct rb_node *node;
struct extent_status *es; struct extent_status *es;
int nr_shrunk = 0; unsigned long nr_shrunk = 0;
static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST); DEFAULT_RATELIMIT_BURST);
......
...@@ -1427,21 +1427,22 @@ __acquires(&lru_lock) ...@@ -1427,21 +1427,22 @@ __acquires(&lru_lock)
* gfs2_dispose_glock_lru() above. * gfs2_dispose_glock_lru() above.
*/ */
static void gfs2_scan_glock_lru(int nr) static long gfs2_scan_glock_lru(int nr)
{ {
struct gfs2_glock *gl; struct gfs2_glock *gl;
LIST_HEAD(skipped); LIST_HEAD(skipped);
LIST_HEAD(dispose); LIST_HEAD(dispose);
long freed = 0;
spin_lock(&lru_lock); spin_lock(&lru_lock);
while(nr && !list_empty(&lru_list)) { while ((nr-- >= 0) && !list_empty(&lru_list)) {
gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
/* Test for being demotable */ /* Test for being demotable */
if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
list_move(&gl->gl_lru, &dispose); list_move(&gl->gl_lru, &dispose);
atomic_dec(&lru_count); atomic_dec(&lru_count);
nr--; freed++;
continue; continue;
} }
...@@ -1451,23 +1452,28 @@ static void gfs2_scan_glock_lru(int nr) ...@@ -1451,23 +1452,28 @@ static void gfs2_scan_glock_lru(int nr)
if (!list_empty(&dispose)) if (!list_empty(&dispose))
gfs2_dispose_glock_lru(&dispose); gfs2_dispose_glock_lru(&dispose);
spin_unlock(&lru_lock); spin_unlock(&lru_lock);
return freed;
} }
static int gfs2_shrink_glock_memory(struct shrinker *shrink, static unsigned long gfs2_glock_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc) struct shrink_control *sc)
{ {
if (sc->nr_to_scan) { if (!(sc->gfp_mask & __GFP_FS))
if (!(sc->gfp_mask & __GFP_FS)) return SHRINK_STOP;
return -1; return gfs2_scan_glock_lru(sc->nr_to_scan);
gfs2_scan_glock_lru(sc->nr_to_scan); }
}
static unsigned long gfs2_glock_shrink_count(struct shrinker *shrink,
struct shrink_control *sc)
{
return vfs_pressure_ratio(atomic_read(&lru_count)); return vfs_pressure_ratio(atomic_read(&lru_count));
} }
static struct shrinker glock_shrinker = { static struct shrinker glock_shrinker = {
.shrink = gfs2_shrink_glock_memory,
.seeks = DEFAULT_SEEKS, .seeks = DEFAULT_SEEKS,
.count_objects = gfs2_glock_shrink_count,
.scan_objects = gfs2_glock_shrink_scan,
}; };
/** /**
......
...@@ -32,7 +32,8 @@ ...@@ -32,7 +32,8 @@
struct workqueue_struct *gfs2_control_wq; struct workqueue_struct *gfs2_control_wq;
static struct shrinker qd_shrinker = { static struct shrinker qd_shrinker = {
.shrink = gfs2_shrink_qd_memory, .count_objects = gfs2_qd_shrink_count,
.scan_objects = gfs2_qd_shrink_scan,
.seeks = DEFAULT_SEEKS, .seeks = DEFAULT_SEEKS,
}; };
......
...@@ -75,17 +75,16 @@ static LIST_HEAD(qd_lru_list); ...@@ -75,17 +75,16 @@ static LIST_HEAD(qd_lru_list);
static atomic_t qd_lru_count = ATOMIC_INIT(0); static atomic_t qd_lru_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(qd_lru_lock); static DEFINE_SPINLOCK(qd_lru_lock);
int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc) unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc)
{ {
struct gfs2_quota_data *qd; struct gfs2_quota_data *qd;
struct gfs2_sbd *sdp; struct gfs2_sbd *sdp;
int nr_to_scan = sc->nr_to_scan; int nr_to_scan = sc->nr_to_scan;
long freed = 0;
if (nr_to_scan == 0)
goto out;
if (!(sc->gfp_mask & __GFP_FS)) if (!(sc->gfp_mask & __GFP_FS))
return -1; return SHRINK_STOP;
spin_lock(&qd_lru_lock); spin_lock(&qd_lru_lock);
while (nr_to_scan && !list_empty(&qd_lru_list)) { while (nr_to_scan && !list_empty(&qd_lru_list)) {
...@@ -110,10 +109,15 @@ int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc) ...@@ -110,10 +109,15 @@ int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
kmem_cache_free(gfs2_quotad_cachep, qd); kmem_cache_free(gfs2_quotad_cachep, qd);
spin_lock(&qd_lru_lock); spin_lock(&qd_lru_lock);
nr_to_scan--; nr_to_scan--;
freed++;
} }
spin_unlock(&qd_lru_lock); spin_unlock(&qd_lru_lock);
return freed;
}
out: unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
struct shrink_control *sc)
{
return vfs_pressure_ratio(atomic_read(&qd_lru_count)); return vfs_pressure_ratio(atomic_read(&qd_lru_count));
} }
......
...@@ -53,8 +53,10 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) ...@@ -53,8 +53,10 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
return ret; return ret;
} }
extern int gfs2_shrink_qd_memory(struct shrinker *shrink, extern unsigned long gfs2_qd_shrink_count(struct shrinker *shrink,
struct shrink_control *sc); struct shrink_control *sc);
extern unsigned long gfs2_qd_shrink_scan(struct shrinker *shrink,
struct shrink_control *sc);
extern const struct quotactl_ops gfs2_quotactl_ops; extern const struct quotactl_ops gfs2_quotactl_ops;
#endif /* __QUOTA_DOT_H__ */ #endif /* __QUOTA_DOT_H__ */
...@@ -86,18 +86,6 @@ static LIST_HEAD(mb_cache_list); ...@@ -86,18 +86,6 @@ static LIST_HEAD(mb_cache_list);
static LIST_HEAD(mb_cache_lru_list); static LIST_HEAD(mb_cache_lru_list);
static DEFINE_SPINLOCK(mb_cache_spinlock); static DEFINE_SPINLOCK(mb_cache_spinlock);
/*
* What the mbcache registers as to get shrunk dynamically.
*/
static int mb_cache_shrink_fn(struct shrinker *shrink,
struct shrink_control *sc);
static struct shrinker mb_cache_shrinker = {
.shrink = mb_cache_shrink_fn,
.seeks = DEFAULT_SEEKS,
};
static inline int static inline int
__mb_cache_entry_is_hashed(struct mb_cache_entry *ce) __mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
{ {
...@@ -151,7 +139,7 @@ forget: ...@@ -151,7 +139,7 @@ forget:
/* /*
* mb_cache_shrink_fn() memory pressure callback * mb_cache_shrink_scan() memory pressure callback
* *
* This function is called by the kernel memory management when memory * This function is called by the kernel memory management when memory
* gets low. * gets low.
...@@ -159,17 +147,16 @@ forget: ...@@ -159,17 +147,16 @@ forget:
* @shrink: (ignored) * @shrink: (ignored)
* @sc: shrink_control passed from reclaim * @sc: shrink_control passed from reclaim
* *
* Returns the number of objects which are present in the cache. * Returns the number of objects freed.
*/ */
static int static unsigned long
mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc) mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{ {
LIST_HEAD(free_list); LIST_HEAD(free_list);
struct mb_cache *cache;
struct mb_cache_entry *entry, *tmp; struct mb_cache_entry *entry, *tmp;
int count = 0;
int nr_to_scan = sc->nr_to_scan; int nr_to_scan = sc->nr_to_scan;
gfp_t gfp_mask = sc->gfp_mask; gfp_t gfp_mask = sc->gfp_mask;
unsigned long freed = 0;
mb_debug("trying to free %d entries", nr_to_scan); mb_debug("trying to free %d entries", nr_to_scan);
spin_lock(&mb_cache_spinlock); spin_lock(&mb_cache_spinlock);
...@@ -179,19 +166,37 @@ mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc) ...@@ -179,19 +166,37 @@ mb_cache_shrink_fn(struct shrinker *shrink, struct shrink_control *sc)
struct mb_cache_entry, e_lru_list); struct mb_cache_entry, e_lru_list);
list_move_tail(&ce->e_lru_list, &free_list); list_move_tail(&ce->e_lru_list, &free_list);
__mb_cache_entry_unhash(ce); __mb_cache_entry_unhash(ce);
freed++;
}
spin_unlock(&mb_cache_spinlock);
list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
__mb_cache_entry_forget(entry, gfp_mask);
} }
return freed;
}
static unsigned long
mb_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
struct mb_cache *cache;
unsigned long count = 0;
spin_lock(&mb_cache_spinlock);
list_for_each_entry(cache, &mb_cache_list, c_cache_list) { list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
mb_debug("cache %s (%d)", cache->c_name, mb_debug("cache %s (%d)", cache->c_name,
atomic_read(&cache->c_entry_count)); atomic_read(&cache->c_entry_count));
count += atomic_read(&cache->c_entry_count); count += atomic_read(&cache->c_entry_count);
} }
spin_unlock(&mb_cache_spinlock); spin_unlock(&mb_cache_spinlock);
list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
__mb_cache_entry_forget(entry, gfp_mask);
}
return vfs_pressure_ratio(count); return vfs_pressure_ratio(count);
} }
static struct shrinker mb_cache_shrinker = {
.count_objects = mb_cache_shrink_count,
.scan_objects = mb_cache_shrink_scan,
.seeks = DEFAULT_SEEKS,
};
/* /*
* mb_cache_create() create a new cache * mb_cache_create() create a new cache
......
...@@ -2006,17 +2006,18 @@ static void nfs_access_free_list(struct list_head *head) ...@@ -2006,17 +2006,18 @@ static void nfs_access_free_list(struct list_head *head)
} }
} }
int nfs_access_cache_shrinker(struct shrinker *shrink, unsigned long
struct shrink_control *sc) nfs_access_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
{ {
LIST_HEAD(head); LIST_HEAD(head);
struct nfs_inode *nfsi, *next; struct nfs_inode *nfsi, *next;
struct nfs_access_entry *cache; struct nfs_access_entry *cache;
int nr_to_scan = sc->nr_to_scan; int nr_to_scan = sc->nr_to_scan;
gfp_t gfp_mask = sc->gfp_mask; gfp_t gfp_mask = sc->gfp_mask;
long freed = 0;
if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
return (nr_to_scan == 0) ? 0 : -1; return SHRINK_STOP;
spin_lock(&nfs_access_lru_lock); spin_lock(&nfs_access_lru_lock);
list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) { list_for_each_entry_safe(nfsi, next, &nfs_access_lru_list, access_cache_inode_lru) {
...@@ -2032,6 +2033,7 @@ int nfs_access_cache_shrinker(struct shrinker *shrink, ...@@ -2032,6 +2033,7 @@ int nfs_access_cache_shrinker(struct shrinker *shrink,
struct nfs_access_entry, lru); struct nfs_access_entry, lru);
list_move(&cache->lru, &head); list_move(&cache->lru, &head);
rb_erase(&cache->rb_node, &nfsi->access_cache); rb_erase(&cache->rb_node, &nfsi->access_cache);
freed++;
if (!list_empty(&nfsi->access_cache_entry_lru)) if (!list_empty(&nfsi->access_cache_entry_lru))
list_move_tail(&nfsi->access_cache_inode_lru, list_move_tail(&nfsi->access_cache_inode_lru,
&nfs_access_lru_list); &nfs_access_lru_list);
...@@ -2046,6 +2048,12 @@ remove_lru_entry: ...@@ -2046,6 +2048,12 @@ remove_lru_entry:
} }
spin_unlock(&nfs_access_lru_lock); spin_unlock(&nfs_access_lru_lock);
nfs_access_free_list(&head); nfs_access_free_list(&head);
return freed;
}
unsigned long
nfs_access_cache_count(struct shrinker *shrink, struct shrink_control *sc)
{
return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries)); return vfs_pressure_ratio(atomic_long_read(&nfs_access_nr_entries));
} }
......
...@@ -273,8 +273,10 @@ extern struct nfs_client *nfs_init_client(struct nfs_client *clp, ...@@ -273,8 +273,10 @@ extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
const char *ip_addr); const char *ip_addr);
/* dir.c */ /* dir.c */
extern int nfs_access_cache_shrinker(struct shrinker *shrink, extern unsigned long nfs_access_cache_count(struct shrinker *shrink,
struct shrink_control *sc); struct shrink_control *sc);
extern unsigned long nfs_access_cache_scan(struct shrinker *shrink,
struct shrink_control *sc);
struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int); struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int);
int nfs_create(struct inode *, struct dentry *, umode_t, bool); int nfs_create(struct inode *, struct dentry *, umode_t, bool);
int nfs_mkdir(struct inode *, struct dentry *, umode_t); int nfs_mkdir(struct inode *, struct dentry *, umode_t);
......
...@@ -360,7 +360,8 @@ static void unregister_nfs4_fs(void) ...@@ -360,7 +360,8 @@ static void unregister_nfs4_fs(void)
#endif #endif
static struct shrinker acl_shrinker = { static struct shrinker acl_shrinker = {
.shrink = nfs_access_cache_shrinker, .count_objects = nfs_access_cache_count,
.scan_objects = nfs_access_cache_scan,
.seeks = DEFAULT_SEEKS, .seeks = DEFAULT_SEEKS,
}; };
......
...@@ -59,11 +59,14 @@ static unsigned int longest_chain_cachesize; ...@@ -59,11 +59,14 @@ static unsigned int longest_chain_cachesize;
static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec);
static void cache_cleaner_func(struct work_struct *unused); static void cache_cleaner_func(struct work_struct *unused);
static int nfsd_reply_cache_shrink(struct shrinker *shrink, static unsigned long nfsd_reply_cache_count(struct shrinker *shrink,
struct shrink_control *sc); struct shrink_control *sc);
static unsigned long nfsd_reply_cache_scan(struct shrinker *shrink,
struct shrink_control *sc);
static struct shrinker nfsd_reply_cache_shrinker = { static struct shrinker nfsd_reply_cache_shrinker = {
.shrink = nfsd_reply_cache_shrink, .scan_objects = nfsd_reply_cache_scan,
.count_objects = nfsd_reply_cache_count,
.seeks = 1, .seeks = 1,
}; };
...@@ -232,16 +235,18 @@ nfsd_cache_entry_expired(struct svc_cacherep *rp) ...@@ -232,16 +235,18 @@ nfsd_cache_entry_expired(struct svc_cacherep *rp)
* Walk the LRU list and prune off entries that are older than RC_EXPIRE. * Walk the LRU list and prune off entries that are older than RC_EXPIRE.
* Also prune the oldest ones when the total exceeds the max number of entries. * Also prune the oldest ones when the total exceeds the max number of entries.
*/ */
static void static long
prune_cache_entries(void) prune_cache_entries(void)
{ {
struct svc_cacherep *rp, *tmp; struct svc_cacherep *rp, *tmp;
long freed = 0;
list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) { list_for_each_entry_safe(rp, tmp, &lru_head, c_lru) {
if (!nfsd_cache_entry_expired(rp) && if (!nfsd_cache_entry_expired(rp) &&
num_drc_entries <= max_drc_entries) num_drc_entries <= max_drc_entries)
break; break;
nfsd_reply_cache_free_locked(rp); nfsd_reply_cache_free_locked(rp);
freed++;
} }
/* /*
...@@ -254,6 +259,7 @@ prune_cache_entries(void) ...@@ -254,6 +259,7 @@ prune_cache_entries(void)
cancel_delayed_work(&cache_cleaner); cancel_delayed_work(&cache_cleaner);
else else
mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE); mod_delayed_work(system_wq, &cache_cleaner, RC_EXPIRE);
return freed;
} }
static void static void
...@@ -264,20 +270,28 @@ cache_cleaner_func(struct work_struct *unused) ...@@ -264,20 +270,28 @@ cache_cleaner_func(struct work_struct *unused)
spin_unlock(&cache_lock); spin_unlock(&cache_lock);
} }
static int static unsigned long
nfsd_reply_cache_shrink(struct shrinker *shrink, struct shrink_control *sc) nfsd_reply_cache_count(struct shrinker *shrink, struct shrink_control *sc)
{ {
unsigned int num; unsigned long num;
spin_lock(&cache_lock); spin_lock(&cache_lock);
if (sc->nr_to_scan)
prune_cache_entries();
num = num_drc_entries; num = num_drc_entries;
spin_unlock(&cache_lock); spin_unlock(&cache_lock);
return num; return num;
} }
static unsigned long
nfsd_reply_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
{
unsigned long freed;