Newer
Older
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
zone->prev_priority = zone->temp_priority;
}
return ret;
}
/*
* For kswapd, balance_pgdat() will work across all this node's zones until
* they are all at pages_high.
*
* If `nr_pages' is non-zero then it is the number of pages which are to be
* reclaimed, regardless of the zone occupancies. This is a software suspend
* special.
*
* Returns the number of pages which were actually freed.
*
* There is special handling here for zones which are full of pinned pages.
* This can happen if the pages are all mlocked, or if they are all used by
* device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb.
* What we do is to detect the case where all pages in the zone have been
* scanned twice and there has been zero successful reclaim. Mark the zone as
* dead and from now on, only perform a short scan. Basically we're polling
* the zone for when the problem goes away.
*
* kswapd scans the zones in the highmem->normal->dma direction. It skips
* zones which have free_pages > pages_high, but once a zone is found to have
* free_pages <= pages_high, we scan that zone and the lower zones regardless
* of the number of free pages in the lower zones. This interoperates with
* the page allocator fallback scheme to ensure that aging of pages is balanced
* across the zones.
*/
static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order)
{
int to_free = nr_pages;
int all_zones_ok;
int priority;
int i;
int total_scanned, total_reclaimed;
struct reclaim_state *reclaim_state = current->reclaim_state;
struct scan_control sc;
loop_again:
total_scanned = 0;
total_reclaimed = 0;
sc.gfp_mask = GFP_KERNEL;
sc.may_writepage = 0;
sc.nr_mapped = read_page_state(nr_mapped);
inc_page_state(pageoutrun);
for (i = 0; i < pgdat->nr_zones; i++) {
struct zone *zone = pgdat->node_zones + i;
zone->temp_priority = DEF_PRIORITY;
}
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long lru_pages = 0;
/* The swap token gets in the way of swapout... */
if (!priority)
disable_swap_token();
all_zones_ok = 1;
if (nr_pages == 0) {
/*
* Scan in the highmem->dma direction for the highest
* zone which needs scanning
*/
for (i = pgdat->nr_zones - 1; i >= 0; i--) {
struct zone *zone = pgdat->node_zones + i;
continue;
if (zone->all_unreclaimable &&
priority != DEF_PRIORITY)
continue;
if (!zone_watermark_ok(zone, order,
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
end_zone = i;
goto scan;
}
}
goto out;
} else {
end_zone = pgdat->nr_zones - 1;
}
scan:
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
lru_pages += zone->nr_active + zone->nr_inactive;
}
/*
* Now scan the zone in the dma->highmem direction, stopping
* at the last zone which needs scanning.
*
* We do this because the page allocator works in the opposite
* direction. This prevents the page allocator from allocating
* pages behind kswapd's direction of progress, which would
* cause too much scanning of the lower zones.
*/
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
continue;
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue;
if (nr_pages == 0) { /* Not software suspend */
if (!zone_watermark_ok(zone, order,
all_zones_ok = 0;
}
zone->temp_priority = priority;
if (zone->prev_priority > priority)
zone->prev_priority = priority;
sc.nr_scanned = 0;
sc.nr_reclaimed = 0;
sc.priority = priority;
sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX;
atomic_inc(&zone->reclaim_in_progress);
atomic_dec(&zone->reclaim_in_progress);
nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
lru_pages);
sc.nr_reclaimed += reclaim_state->reclaimed_slab;
total_reclaimed += sc.nr_reclaimed;
total_scanned += sc.nr_scanned;
if (zone->all_unreclaimable)
continue;
if (nr_slab == 0 && zone->pages_scanned >=
(zone->nr_active + zone->nr_inactive) * 4)
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
zone->all_unreclaimable = 1;
/*
* If we've done a decent amount of scanning and
* the reclaim ratio is low, start doing writepage
* even in laptop mode
*/
if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
total_scanned > total_reclaimed+total_reclaimed/2)
sc.may_writepage = 1;
}
if (nr_pages && to_free > total_reclaimed)
continue; /* swsusp: need to do more work */
if (all_zones_ok)
break; /* kswapd: all done */
/*
* OK, kswapd is getting into trouble. Take a nap, then take
* another pass across the zones.
*/
if (total_scanned && priority < DEF_PRIORITY - 2)
blk_congestion_wait(WRITE, HZ/10);
/*
* We do this so kswapd doesn't build up large priorities for
* example when it is freeing in parallel with allocators. It
* matches the direct reclaim path behaviour in terms of impact
* on zone->*_priority.
*/
if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages))
break;
}
out:
for (i = 0; i < pgdat->nr_zones; i++) {
struct zone *zone = pgdat->node_zones + i;
zone->prev_priority = zone->temp_priority;
}
if (!all_zones_ok) {
cond_resched();
goto loop_again;
}
return total_reclaimed;
}
/*
* The background pageout daemon, started as a kernel thread
* from the init process.
*
* This basically trickles out pages so that we have _some_
* free memory available even if there is no other activity
* that frees anything up. This is needed for things like routing
* etc, where we otherwise might have all activity going on in
* asynchronous contexts that cannot page things out.
*
* If there are applications that are active memory-allocators
* (most normal use), this basically shouldn't matter.
*/
static int kswapd(void *p)
{
unsigned long order;
pg_data_t *pgdat = (pg_data_t*)p;
struct task_struct *tsk = current;
DEFINE_WAIT(wait);
struct reclaim_state reclaim_state = {
.reclaimed_slab = 0,
};
cpumask_t cpumask;
daemonize("kswapd%d", pgdat->node_id);
cpumask = node_to_cpumask(pgdat->node_id);
if (!cpus_empty(cpumask))
set_cpus_allowed(tsk, cpumask);
current->reclaim_state = &reclaim_state;
/*
* Tell the memory management that we're a "memory allocator",
* and that if we need more memory we should get access to it
* regardless (see "__alloc_pages()"). "kswapd" should
* never get caught in the normal page freeing logic.
*
* (Kswapd normally doesn't need memory anyway, but sometimes
* you need a small amount of memory in order to be able to
* page out something else, and this flag essentially protects
* us from recursively trying to free more memory as we're
* trying to free the first piece of memory in the first place).
*/
tsk->flags |= PF_MEMALLOC|PF_KSWAPD;
order = 0;
for ( ; ; ) {
unsigned long new_order;
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
new_order = pgdat->kswapd_max_order;
pgdat->kswapd_max_order = 0;
if (order < new_order) {
/*
* Don't sleep if someone wants a larger 'order'
* allocation
*/
order = new_order;
} else {
schedule();
order = pgdat->kswapd_max_order;
}
finish_wait(&pgdat->kswapd_wait, &wait);
balance_pgdat(pgdat, 0, order);
}
return 0;
}
/*
* A zone is low on free memory, so wake its kswapd task to service it.
*/
void wakeup_kswapd(struct zone *zone, int order)
{
pg_data_t *pgdat;
if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
return;
if (pgdat->kswapd_max_order < order)
pgdat->kswapd_max_order = order;
if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
if (!waitqueue_active(&pgdat->kswapd_wait))
wake_up_interruptible(&pgdat->kswapd_wait);
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
}
#ifdef CONFIG_PM
/*
* Try to free `nr_pages' of memory, system-wide. Returns the number of freed
* pages.
*/
int shrink_all_memory(int nr_pages)
{
pg_data_t *pgdat;
int nr_to_free = nr_pages;
int ret = 0;
struct reclaim_state reclaim_state = {
.reclaimed_slab = 0,
};
current->reclaim_state = &reclaim_state;
for_each_pgdat(pgdat) {
int freed;
freed = balance_pgdat(pgdat, nr_to_free, 0);
ret += freed;
nr_to_free -= freed;
if (nr_to_free <= 0)
break;
}
current->reclaim_state = NULL;
return ret;
}
#endif
#ifdef CONFIG_HOTPLUG_CPU
/* It's optimal to keep kswapds on the same CPUs as their memory, but
not required for correctness. So if the last cpu in a node goes
away, we get changed to run anywhere: as the first one comes back,
restore their cpu bindings. */
static int __devinit cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
pg_data_t *pgdat;
cpumask_t mask;
if (action == CPU_ONLINE) {
for_each_pgdat(pgdat) {
mask = node_to_cpumask(pgdat->node_id);
if (any_online_cpu(mask) != NR_CPUS)
/* One of our CPUs online: restore mask */
set_cpus_allowed(pgdat->kswapd, mask);
}
}
return NOTIFY_OK;
}
#endif /* CONFIG_HOTPLUG_CPU */
static int __init kswapd_init(void)
{
pg_data_t *pgdat;
swap_setup();
for_each_pgdat(pgdat)
pgdat->kswapd
= find_task_by_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL));
total_memory = nr_free_pagecache_pages();
hotcpu_notifier(cpu_callback, 0);
return 0;
}
module_init(kswapd_init)