Commit 1c5e9c27 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds
Browse files

mm: numa: limit scope of lock for NUMA migrate rate limiting

NUMA migrate rate limiting protects a migration counter and window using
a lock but in some cases this can be a contended lock.  It is not
critical that the number of pages be perfect, lost updates are
acceptable.  Reduce the importance of this lock.
Signed-off-by: default avatarMel Gorman <>
Reviewed-by: default avatarRik van Riel <>
Cc: Alex Thorlton <>
Signed-off-by: default avatarAndrew Morton <>
Signed-off-by: default avatarLinus Torvalds <>
parent 1c30e017
...@@ -764,10 +764,7 @@ typedef struct pglist_data { ...@@ -764,10 +764,7 @@ typedef struct pglist_data {
int kswapd_max_order; int kswapd_max_order;
enum zone_type classzone_idx; enum zone_type classzone_idx;
/* /* Lock serializing the migrate rate limiting window */
* Lock serializing the per destination node AutoNUMA memory
* migration rate limiting data.
spinlock_t numabalancing_migrate_lock; spinlock_t numabalancing_migrate_lock;
/* Rate limiting time interval */ /* Rate limiting time interval */
...@@ -1602,26 +1602,29 @@ bool migrate_ratelimited(int node) ...@@ -1602,26 +1602,29 @@ bool migrate_ratelimited(int node)
static bool numamigrate_update_ratelimit(pg_data_t *pgdat, static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
unsigned long nr_pages) unsigned long nr_pages)
{ {
bool rate_limited = false;
/* /*
* Rate-limit the amount of data that is being migrated to a node. * Rate-limit the amount of data that is being migrated to a node.
* Optimal placement is no good if the memory bus is saturated and * Optimal placement is no good if the memory bus is saturated and
* all the time is being spent migrating! * all the time is being spent migrating!
*/ */
if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) { if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
pgdat->numabalancing_migrate_nr_pages = 0; pgdat->numabalancing_migrate_nr_pages = 0;
pgdat->numabalancing_migrate_next_window = jiffies + pgdat->numabalancing_migrate_next_window = jiffies +
msecs_to_jiffies(migrate_interval_millisecs); msecs_to_jiffies(migrate_interval_millisecs);
} }
if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages)
rate_limited = true; return true;
pgdat->numabalancing_migrate_nr_pages += nr_pages; /*
spin_unlock(&pgdat->numabalancing_migrate_lock); * This is an unlocked non-atomic update so errors are possible.
* The consequences are failing to migrate when we potentiall should
return rate_limited; * have which is not severe enough to warrant locking. If it is ever
* a problem, it can be converted to a per-cpu counter.
pgdat->numabalancing_migrate_nr_pages += nr_pages;
return false;
} }
static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment