Commit 71baba4b authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm, page_alloc: rename __GFP_WAIT to __GFP_RECLAIM

__GFP_WAIT was used to signal that the caller was in atomic context and
could not sleep.  Now it is possible to distinguish between true atomic
context and callers that are not willing to sleep.  The latter should
clear __GFP_DIRECT_RECLAIM so kswapd will still wake.  As clearing
__GFP_WAIT behaves differently, there is a risk that people will clear the
wrong flags.  This patch renames __GFP_WAIT to __GFP_RECLAIM to clearly
indicate what it does -- setting it allows all reclaim activity, clearing
them prevents it.

[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Christoph Lameter <cl@linux.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Cc: Vitaly Wool <vitalywool@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 40113370
...@@ -638,7 +638,7 @@ int blk_queue_enter(struct request_queue *q, gfp_t gfp) ...@@ -638,7 +638,7 @@ int blk_queue_enter(struct request_queue *q, gfp_t gfp)
if (percpu_ref_tryget_live(&q->q_usage_counter)) if (percpu_ref_tryget_live(&q->q_usage_counter))
return 0; return 0;
if (!(gfp & __GFP_WAIT)) if (!gfpflags_allow_blocking(gfp))
return -EBUSY; return -EBUSY;
ret = wait_event_interruptible(q->mq_freeze_wq, ret = wait_event_interruptible(q->mq_freeze_wq,
...@@ -2038,7 +2038,7 @@ void generic_make_request(struct bio *bio) ...@@ -2038,7 +2038,7 @@ void generic_make_request(struct bio *bio)
do { do {
struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct request_queue *q = bdev_get_queue(bio->bi_bdev);
if (likely(blk_queue_enter(q, __GFP_WAIT) == 0)) { if (likely(blk_queue_enter(q, __GFP_DIRECT_RECLAIM) == 0)) {
q->make_request_fn(q, bio); q->make_request_fn(q, bio);
......
...@@ -1186,7 +1186,7 @@ static struct request *blk_mq_map_request(struct request_queue *q, ...@@ -1186,7 +1186,7 @@ static struct request *blk_mq_map_request(struct request_queue *q,
ctx = blk_mq_get_ctx(q); ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu); hctx = q->mq_ops->map_queue(q, ctx->cpu);
blk_mq_set_alloc_data(&alloc_data, q, blk_mq_set_alloc_data(&alloc_data, q,
__GFP_WAIT|__GFP_HIGH, false, ctx, hctx); __GFP_RECLAIM|__GFP_HIGH, false, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw); rq = __blk_mq_alloc_request(&alloc_data, rw);
ctx = alloc_data.ctx; ctx = alloc_data.ctx;
hctx = alloc_data.hctx; hctx = alloc_data.hctx;
......
...@@ -444,7 +444,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, ...@@ -444,7 +444,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
} }
rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_RECLAIM);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
err = PTR_ERR(rq); err = PTR_ERR(rq);
goto error_free_buffer; goto error_free_buffer;
...@@ -495,7 +495,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, ...@@ -495,7 +495,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
break; break;
} }
if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_RECLAIM)) {
err = DRIVER_ERROR << 24; err = DRIVER_ERROR << 24;
goto error; goto error;
} }
...@@ -536,7 +536,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, ...@@ -536,7 +536,7 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq; struct request *rq;
int err; int err;
rq = blk_get_request(q, WRITE, __GFP_WAIT); rq = blk_get_request(q, WRITE, __GFP_RECLAIM);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
blk_rq_set_block_pc(rq); blk_rq_set_block_pc(rq);
......
...@@ -1007,7 +1007,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho ...@@ -1007,7 +1007,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_set_page_unchanged(b->bm_pages[page_nr]); bm_set_page_unchanged(b->bm_pages[page_nr]);
if (ctx->flags & BM_AIO_COPY_PAGES) { if (ctx->flags & BM_AIO_COPY_PAGES) {
page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_WAIT); page = mempool_alloc(drbd_md_io_page_pool, __GFP_HIGHMEM|__GFP_RECLAIM);
copy_highpage(page, b->bm_pages[page_nr]); copy_highpage(page, b->bm_pages[page_nr]);
bm_store_page_idx(page, page_nr); bm_store_page_idx(page, page_nr);
} else } else
......
...@@ -173,7 +173,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd) ...@@ -173,7 +173,7 @@ static struct mtip_cmd *mtip_get_int_command(struct driver_data *dd)
{ {
struct request *rq; struct request *rq;
rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true); rq = blk_mq_alloc_request(dd->queue, 0, __GFP_RECLAIM, true);
return blk_mq_rq_to_pdu(rq); return blk_mq_rq_to_pdu(rq);
} }
......
...@@ -723,7 +723,7 @@ static int pd_special_command(struct pd_unit *disk, ...@@ -723,7 +723,7 @@ static int pd_special_command(struct pd_unit *disk,
struct request *rq; struct request *rq;
int err = 0; int err = 0;
rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT); rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
......
...@@ -704,14 +704,14 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * ...@@ -704,14 +704,14 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
int ret = 0; int ret = 0;
rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
WRITE : READ, __GFP_WAIT); WRITE : READ, __GFP_RECLAIM);
if (IS_ERR(rq)) if (IS_ERR(rq))
return PTR_ERR(rq); return PTR_ERR(rq);
blk_rq_set_block_pc(rq); blk_rq_set_block_pc(rq);
if (cgc->buflen) { if (cgc->buflen) {
ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
__GFP_WAIT); __GFP_RECLAIM);
if (ret) if (ret)
goto out; goto out;
} }
......
...@@ -2216,7 +2216,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2216,7 +2216,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
mapping = file_inode(obj->base.filp)->i_mapping; mapping = file_inode(obj->base.filp)->i_mapping;
gfp = mapping_gfp_mask(mapping); gfp = mapping_gfp_mask(mapping);
gfp |= __GFP_NORETRY | __GFP_NOWARN; gfp |= __GFP_NORETRY | __GFP_NOWARN;
gfp &= ~(__GFP_IO | __GFP_WAIT); gfp &= ~(__GFP_IO | __GFP_RECLAIM);
sg = st->sgl; sg = st->sgl;
st->nents = 0; st->nents = 0;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
......
...@@ -92,7 +92,7 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk, ...@@ -92,7 +92,7 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk,
struct request *rq; struct request *rq;
int error; int error;
rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_DRV_PRIV; rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->special = (char *)pc; rq->special = (char *)pc;
......
...@@ -441,7 +441,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, ...@@ -441,7 +441,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd,
struct request *rq; struct request *rq;
int error; int error;
rq = blk_get_request(drive->queue, write, __GFP_WAIT); rq = blk_get_request(drive->queue, write, __GFP_RECLAIM);
memcpy(rq->cmd, cmd, BLK_MAX_CDB); memcpy(rq->cmd, cmd, BLK_MAX_CDB);
rq->cmd_type = REQ_TYPE_ATA_PC; rq->cmd_type = REQ_TYPE_ATA_PC;
......
...@@ -303,7 +303,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi) ...@@ -303,7 +303,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi)
struct request *rq; struct request *rq;
int ret; int ret;
rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_DRV_PRIV; rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->cmd_flags = REQ_QUIET; rq->cmd_flags = REQ_QUIET;
ret = blk_execute_rq(drive->queue, cd->disk, rq, 0); ret = blk_execute_rq(drive->queue, cd->disk, rq, 0);
......
...@@ -165,7 +165,7 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting, ...@@ -165,7 +165,7 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
if (!(setting->flags & DS_SYNC)) if (!(setting->flags & DS_SYNC))
return setting->set(drive, arg); return setting->set(drive, arg);
rq = blk_get_request(q, READ, __GFP_WAIT); rq = blk_get_request(q, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_DRV_PRIV; rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->cmd_len = 5; rq->cmd_len = 5;
rq->cmd[0] = REQ_DEVSET_EXEC; rq->cmd[0] = REQ_DEVSET_EXEC;
......
...@@ -477,7 +477,7 @@ static int set_multcount(ide_drive_t *drive, int arg) ...@@ -477,7 +477,7 @@ static int set_multcount(ide_drive_t *drive, int arg)
if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) if (drive->special_flags & IDE_SFLAG_SET_MULTMODE)
return -EBUSY; return -EBUSY;
rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_ATA_TASKFILE; rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
drive->mult_req = arg; drive->mult_req = arg;
......
...@@ -125,7 +125,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg) ...@@ -125,7 +125,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
if (NULL == (void *) arg) { if (NULL == (void *) arg) {
struct request *rq; struct request *rq;
rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_ATA_TASKFILE; rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
err = blk_execute_rq(drive->queue, NULL, rq, 0); err = blk_execute_rq(drive->queue, NULL, rq, 0);
blk_put_request(rq); blk_put_request(rq);
...@@ -221,7 +221,7 @@ static int generic_drive_reset(ide_drive_t *drive) ...@@ -221,7 +221,7 @@ static int generic_drive_reset(ide_drive_t *drive)
struct request *rq; struct request *rq;
int ret = 0; int ret = 0;
rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_DRV_PRIV; rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->cmd_len = 1; rq->cmd_len = 1;
rq->cmd[0] = REQ_DRIVE_RESET; rq->cmd[0] = REQ_DRIVE_RESET;
......
...@@ -31,7 +31,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) ...@@ -31,7 +31,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
} }
spin_unlock_irq(&hwif->lock); spin_unlock_irq(&hwif->lock);
rq = blk_get_request(q, READ, __GFP_WAIT); rq = blk_get_request(q, READ, __GFP_RECLAIM);
rq->cmd[0] = REQ_PARK_HEADS; rq->cmd[0] = REQ_PARK_HEADS;
rq->cmd_len = 1; rq->cmd_len = 1;
rq->cmd_type = REQ_TYPE_DRV_PRIV; rq->cmd_type = REQ_TYPE_DRV_PRIV;
......
...@@ -18,7 +18,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) ...@@ -18,7 +18,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg)
} }
memset(&rqpm, 0, sizeof(rqpm)); memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND; rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND;
rq->special = &rqpm; rq->special = &rqpm;
rqpm.pm_step = IDE_PM_START_SUSPEND; rqpm.pm_step = IDE_PM_START_SUSPEND;
...@@ -88,7 +88,7 @@ int generic_ide_resume(struct device *dev) ...@@ -88,7 +88,7 @@ int generic_ide_resume(struct device *dev)
} }
memset(&rqpm, 0, sizeof(rqpm)); memset(&rqpm, 0, sizeof(rqpm));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_ATA_PM_RESUME; rq->cmd_type = REQ_TYPE_ATA_PM_RESUME;
rq->cmd_flags |= REQ_PREEMPT; rq->cmd_flags |= REQ_PREEMPT;
rq->special = &rqpm; rq->special = &rqpm;
......
...@@ -852,7 +852,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size) ...@@ -852,7 +852,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE); BUG_ON(cmd != REQ_IDETAPE_READ && cmd != REQ_IDETAPE_WRITE);
BUG_ON(size < 0 || size % tape->blk_size); BUG_ON(size < 0 || size % tape->blk_size);
rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_DRV_PRIV; rq->cmd_type = REQ_TYPE_DRV_PRIV;
rq->cmd[13] = cmd; rq->cmd[13] = cmd;
rq->rq_disk = tape->disk; rq->rq_disk = tape->disk;
...@@ -860,7 +860,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size) ...@@ -860,7 +860,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size)
if (size) { if (size) {
ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size, ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size,
__GFP_WAIT); __GFP_RECLAIM);
if (ret) if (ret)
goto out_put; goto out_put;
} }
......
...@@ -430,7 +430,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf, ...@@ -430,7 +430,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
int error; int error;
int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE; int rw = !(cmd->tf_flags & IDE_TFLAG_WRITE) ? READ : WRITE;
rq = blk_get_request(drive->queue, rw, __GFP_WAIT); rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM);
rq->cmd_type = REQ_TYPE_ATA_TASKFILE; rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
/* /*
...@@ -441,7 +441,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf, ...@@ -441,7 +441,7 @@ int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
*/ */
if (nsect) { if (nsect) {
error = blk_rq_map_kern(drive->queue, rq, buf, error = blk_rq_map_kern(drive->queue, rq, buf,
nsect * SECTOR_SIZE, __GFP_WAIT); nsect * SECTOR_SIZE, __GFP_RECLAIM);
if (error) if (error)
goto put_req; goto put_req;
} }
......
...@@ -1680,7 +1680,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd) ...@@ -1680,7 +1680,7 @@ int qib_setup_eagerbufs(struct qib_ctxtdata *rcd)
* heavy filesystem activity makes these fail, and we can * heavy filesystem activity makes these fail, and we can
* use compound pages. * use compound pages.
*/ */
gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
egrcnt = rcd->rcvegrcnt; egrcnt = rcd->rcvegrcnt;
egroff = rcd->rcvegr_tid_base; egroff = rcd->rcvegr_tid_base;
......
...@@ -75,7 +75,7 @@ MODULE_LICENSE("GPL"); ...@@ -75,7 +75,7 @@ MODULE_LICENSE("GPL");
/* /*
* Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
* allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
* __GFP_NOWARN, to suppress page allocation failure warnings. * __GFP_NOWARN, to suppress page allocation failure warnings.
*/ */
#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN) #define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
......
...@@ -1025,11 +1025,13 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, ...@@ -1025,11 +1025,13 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
req->special = (void *)0; req->special = (void *)0;
if (buffer && bufflen) { if (buffer && bufflen) {
ret = blk_rq_map_kern(q, req, buffer, bufflen, __GFP_WAIT); ret = blk_rq_map_kern(q, req, buffer, bufflen,
__GFP_DIRECT_RECLAIM);
if (ret) if (ret)
goto out; goto out;
} else if (ubuffer && bufflen) { } else if (ubuffer && bufflen) {
ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, __GFP_WAIT); ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen,
__GFP_DIRECT_RECLAIM);
if (ret) if (ret)
goto out; goto out;
bio = req->bio; bio = req->bio;
......
...@@ -1970,7 +1970,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev) ...@@ -1970,7 +1970,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
struct request *req; struct request *req;
/* /*
* blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a * blk_get_request with GFP_KERNEL (__GFP_RECLAIM) sleeps until a
* request becomes available * request becomes available
*/ */
req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL); req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
......
...@@ -222,13 +222,13 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, ...@@ -222,13 +222,13 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int write = (data_direction == DMA_TO_DEVICE); int write = (data_direction == DMA_TO_DEVICE);
int ret = DRIVER_ERROR << 24; int ret = DRIVER_ERROR << 24;
req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM);
if (IS_ERR(req)) if (IS_ERR(req))
return ret; return ret;
blk_rq_set_block_pc(req); blk_rq_set_block_pc(req);
if (bufflen && blk_rq_map_kern(sdev->request_queue, req, if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
buffer, bufflen, __GFP_WAIT)) buffer, bufflen, __GFP_RECLAIM))
goto out; goto out;
req->cmd_len = COMMAND_SIZE(cmd[0]); req->cmd_len = COMMAND_SIZE(cmd[0]);
......
...@@ -1560,7 +1560,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd) ...@@ -1560,7 +1560,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctxtdata *rcd)
* heavy filesystem activity makes these fail, and we can * heavy filesystem activity makes these fail, and we can
* use compound pages. * use compound pages.
*/ */
gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
/* /*
* The minimum size of the eager buffers is a groups of MTU-sized * The minimum size of the eager buffers is a groups of MTU-sized
......
...@@ -905,7 +905,7 @@ static int ipath_create_user_egr(struct ipath_portdata *pd) ...@@ -905,7 +905,7 @@ static int ipath_create_user_egr(struct ipath_portdata *pd)
* heavy filesystem activity makes these fail, and we can * heavy filesystem activity makes these fail, and we can
* use compound pages. * use compound pages.
*/ */
gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
egrcnt = dd->ipath_rcvegrcnt; egrcnt = dd->ipath_rcvegrcnt;
/* TID number offset for this port */ /* TID number offset for this port */
......
...@@ -30,7 +30,7 @@ extern unsigned cachefiles_debug; ...@@ -30,7 +30,7 @@ extern unsigned cachefiles_debug;
#define CACHEFILES_DEBUG_KLEAVE 2 #define CACHEFILES_DEBUG_KLEAVE 2
#define CACHEFILES_DEBUG_KDEBUG 4 #define CACHEFILES_DEBUG_KDEBUG 4
#define cachefiles_gfp (__GFP_WAIT | __GFP_NORETRY | __GFP_NOMEMALLOC) #define cachefiles_gfp (__GFP_RECLAIM | __GFP_NORETRY | __GFP_NOMEMALLOC)
/* /*
* node records * node records
......
...@@ -361,7 +361,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio, ...@@ -361,7 +361,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
/* /*
* bio_alloc() is guaranteed to return a bio when called with * bio_alloc() is guaranteed to return a bio when called with
* __GFP_WAIT and we request a valid number of vectors. * __GFP_RECLAIM and we request a valid number of vectors.
*/ */
bio = bio_alloc(GFP_KERNEL, nr_vecs); bio = bio_alloc(GFP_KERNEL, nr_vecs);
......
...@@ -72,7 +72,7 @@ static inline struct nilfs_mdt_info *NILFS_MDT(const struct inode *inode) ...@@ -72,7 +72,7 @@ static inline struct nilfs_mdt_info *NILFS_MDT(const struct inode *inode)
} }
/* Default GFP flags using highmem */ /* Default GFP flags using highmem */
#define NILFS_MDT_GFP (__GFP_WAIT | __GFP_IO | __GFP_HIGHMEM) #define NILFS_MDT_GFP (__GFP_RECLAIM | __GFP_IO | __GFP_HIGHMEM)
int nilfs_mdt_get_block(struct inode *, unsigned long, int, int nilfs_mdt_get_block(struct inode *, unsigned long, int,
void (*init_block)(struct inode *, void (*init_block)(struct inode *,
......
...@@ -107,7 +107,7 @@ struct vm_area_struct; ...@@ -107,7 +107,7 @@ struct vm_area_struct;
* can be cleared when the reclaiming of pages would cause unnecessary * can be cleared when the reclaiming of pages would cause unnecessary
* disruption. * disruption.
*/ */
#define __GFP_WAIT ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM)) #define __GFP_RECLAIM ((__force gfp_t)(___GFP_DIRECT_RECLAIM|___GFP_KSWAPD_RECLAIM))
#define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */ #define __GFP_DIRECT_RECLAIM ((__force gfp_t)___GFP_DIRECT_RECLAIM) /* Caller can reclaim */
#define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */ #define __GFP_KSWAPD_RECLAIM ((__force gfp_t)___GFP_KSWAPD_RECLAIM) /* kswapd can wake */
...@@ -126,12 +126,12 @@ struct vm_area_struct; ...@@ -126,12 +126,12 @@ struct vm_area_struct;
*/ */
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM) #define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
#define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM) #define GFP_NOWAIT (__GFP_KSWAPD_RECLAIM)
#define GFP_NOIO (__GFP_WAIT) #define GFP_NOIO (__GFP_RECLAIM)
#define GFP_NOFS (__GFP_WAIT | __GFP_IO) #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)
#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS) #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ #define GFP_TEMPORARY (__GFP_RECLAIM | __GFP_IO | __GFP_FS | \
__GFP_RECLAIMABLE) __GFP_RECLAIMABLE)
#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM) #define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE) #define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
#define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ #define GFP_TRANSHUGE ((GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
...@@ -143,12 +143,12 @@ struct vm_area_struct; ...@@ -143,12 +143,12 @@ struct vm_area_struct;
#define GFP_MOVABLE_SHIFT 3 #define GFP_MOVABLE_SHIFT 3
/* Control page allocator reclaim behavior */ /* Control page allocator reclaim behavior */
#define GFP_RECLAIM_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS|\ #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC) __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
/* Control slab gfp mask during early boot */ /* Control slab gfp mask during early boot */
#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
/* Control allocation constraints */ /* Control allocation constraints */
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
......
...@@ -257,7 +257,7 @@ static int hib_submit_io(int rw, pgoff_t page_off, void *addr, ...@@ -257,7 +257,7 @@ static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
struct bio *bio; struct bio *bio;
int error = 0; int error = 0;
bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
bio->bi_bdev = hib_resume_bdev; bio->bi_bdev = hib_resume_bdev;
...@@ -356,7 +356,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) ...@@ -356,7 +356,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
return -ENOSPC; return -ENOSPC;
if (hb) { if (hb) {
src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN | src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN |
__GFP_NORETRY); __GFP_NORETRY);
if (src) { if (src) {
copy_page(src, buf); copy_page(src, buf);
...@@ -364,7 +364,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) ...@@ -364,7 +364,7 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
ret = hib_wait_io(hb); /* Free pages */ ret = hib_wait_io(hb); /* Free pages */
if (ret) if (ret)
return ret; return ret;
src = (void *)__get_free_page(__GFP_WAIT | src = (void *)__get_free_page(__GFP_RECLAIM |
__GFP_NOWARN | __GFP_NOWARN |
__GFP_NORETRY); __GFP_NORETRY);
if (src) { if (src) {
...@@ -672,7 +672,7 @@ static int save_image_lzo(struct swap_map_handle *handle, ...@@ -672,7 +672,7 @@ static int save_image_lzo(struct swap_map_handle *handle,
nr_threads = num_online_cpus() - 1; nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH);
if (!page) { if (!page) {
printk(KERN_ERR "PM: Failed to allocate LZO page\n"); printk(KERN_ERR "PM: Failed to allocate LZO page\n");
ret = -ENOMEM; ret = -ENOMEM;
...@@ -975,7 +975,7 @@ static int get_swap_reader(struct swap_map_handle *handle, ...@@ -975,7 +975,7 @@ static int get_swap_reader(struct swap_map_handle *handle,
last = tmp; last = tmp;
tmp->map = (struct swap_map_page *) tmp->map = (struct swap_map_page *)
__get_free_page(__GFP_WAIT | __GFP_HIGH); __get_free_page(__GFP_RECLAIM | __GFP_HIGH);
if (!tmp->map) { if (!tmp->map) {
release_swap_reader(handle); release_swap_reader(handle);
return -ENOMEM; return -ENOMEM;
...@@ -1242,9 +1242,9 @@ static int load_image_lzo(struct swap_map_handle *handle, ...@@ -1242,9 +1242,9 @@ static int load_image_lzo(struct swap_map_handle *handle,
for (i = 0; i < read_pages; i++) { for (i = 0; i < read_pages; i++) {
page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
__GFP_WAIT | __GFP_HIGH : __GFP_RECLAIM | __GFP_HIGH :
__GFP_WAIT | __GFP_NOWARN | __GFP_RECLAIM | __GFP_NOWARN |
__GFP_NORETRY); __GFP_NORETRY);
if (!page[i]) { if (!page[i]) {
if (i < LZO_CMP_PAGES) { if (i < LZO_CMP_PAGES) {
......
...@@ -135,7 +135,7 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags) ...@@ -135,7 +135,7 @@ static inline unsigned alloc_local_tag(struct percpu_ida_cpu *tags)
* TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course). * TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, of course).
* *
* @gfp indicates whether or not to wait until a free id is available (it's not * @gfp indicates whether or not to wait until a free id is available (it's not
* used for internal memory allocations); thus if passed __GFP_WAIT we may sleep * used for internal memory allocations); thus if passed __GFP_RECLAIM we may sleep
* however long it takes until another thread frees an id (same semantics as a * however long it takes until another thread frees an id (same semantics as a
* mempool). * mempool).
* *
......
...@@ -3,11 +3,11 @@ ...@@ -3,11 +3,11 @@
static struct { static struct {
struct fault_attr attr; struct fault_attr attr;
bool ignore_gfp_wait; bool ignore_gfp_reclaim;
bool cache_filter; bool cache_filter;
} failslab = { } failslab = {
.attr = FAULT_ATTR_INITIALIZER, .attr = FAULT_ATTR_INITIALIZER,
.ignore_gfp_wait = true, .ignore_gfp_reclaim = true,
.cache_filter = false, .cache_filter = false,
}; };
...@@ -16,7 +16,7 @@ bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags) ...@@ -16,7 +16,7 @@ bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags)
if (gfpflags & __GFP_NOFAIL) if (gfpflags & __GFP_NOFAIL)
return false; return false;
if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) if (failslab.ignore_gfp_reclaim && (gfpflags & __GFP_RECLAIM))
return false;