Commit 721a9602 authored by Jens Axboe's avatar Jens Axboe

block: kill off REQ_UNPLUG

With the plugging now being explicitly controlled by the
submitter, callers need not pass down unplugging hints
to the block layer. If they want to unplug, it's because they
manually plugged on their own - in which case, they should just
unplug at will.
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent cf15900e
...@@ -1290,7 +1290,7 @@ get_rq: ...@@ -1290,7 +1290,7 @@ get_rq:
} }
plug = current->plug; plug = current->plug;
if (plug && !sync) { if (plug) {
if (!plug->should_sort && !list_empty(&plug->list)) { if (!plug->should_sort && !list_empty(&plug->list)) {
struct request *__rq; struct request *__rq;
......
...@@ -80,7 +80,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, ...@@ -80,7 +80,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
rw |= REQ_FUA; rw |= REQ_FUA;
rw |= REQ_UNPLUG | REQ_SYNC; rw |= REQ_SYNC;
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(GFP_NOIO, 1);
bio->bi_bdev = bdev->md_bdev; bio->bi_bdev = bdev->md_bdev;
......
...@@ -377,7 +377,7 @@ union p_header { ...@@ -377,7 +377,7 @@ union p_header {
#define DP_HARDBARRIER 1 /* depricated */ #define DP_HARDBARRIER 1 /* depricated */
#define DP_RW_SYNC 2 /* equals REQ_SYNC */ #define DP_RW_SYNC 2 /* equals REQ_SYNC */
#define DP_MAY_SET_IN_SYNC 4 #define DP_MAY_SET_IN_SYNC 4
#define DP_UNPLUG 8 /* equals REQ_UNPLUG */ #define DP_UNPLUG 8 /* not used anymore */
#define DP_FUA 16 /* equals REQ_FUA */ #define DP_FUA 16 /* equals REQ_FUA */
#define DP_FLUSH 32 /* equals REQ_FLUSH */ #define DP_FLUSH 32 /* equals REQ_FLUSH */
#define DP_DISCARD 64 /* equals REQ_DISCARD */ #define DP_DISCARD 64 /* equals REQ_DISCARD */
......
...@@ -2477,12 +2477,11 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw) ...@@ -2477,12 +2477,11 @@ static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
{ {
if (mdev->agreed_pro_version >= 95) if (mdev->agreed_pro_version >= 95)
return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
(bi_rw & REQ_UNPLUG ? DP_UNPLUG : 0) |
(bi_rw & REQ_FUA ? DP_FUA : 0) | (bi_rw & REQ_FUA ? DP_FUA : 0) |
(bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
(bi_rw & REQ_DISCARD ? DP_DISCARD : 0); (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
else else
return bi_rw & (REQ_SYNC | REQ_UNPLUG) ? DP_RW_SYNC : 0; return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
} }
/* Used to send write requests /* Used to send write requests
......
...@@ -1100,8 +1100,6 @@ next_bio: ...@@ -1100,8 +1100,6 @@ next_bio:
/* > e->sector, unless this is the first bio */ /* > e->sector, unless this is the first bio */
bio->bi_sector = sector; bio->bi_sector = sector;
bio->bi_bdev = mdev->ldev->backing_bdev; bio->bi_bdev = mdev->ldev->backing_bdev;
/* we special case some flags in the multi-bio case, see below
* (REQ_UNPLUG) */
bio->bi_rw = rw; bio->bi_rw = rw;
bio->bi_private = e; bio->bi_private = e;
bio->bi_end_io = drbd_endio_sec; bio->bi_end_io = drbd_endio_sec;
...@@ -1130,10 +1128,6 @@ next_bio: ...@@ -1130,10 +1128,6 @@ next_bio:
bios = bios->bi_next; bios = bios->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
/* strip off REQ_UNPLUG unless it is the last bio */
if (bios)
bio->bi_rw &= ~REQ_UNPLUG;
drbd_generic_make_request(mdev, fault_type, bio); drbd_generic_make_request(mdev, fault_type, bio);
} while (bios); } while (bios);
return 0; return 0;
...@@ -1621,12 +1615,11 @@ static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf) ...@@ -1621,12 +1615,11 @@ static unsigned long write_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
{ {
if (mdev->agreed_pro_version >= 95) if (mdev->agreed_pro_version >= 95)
return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
(dpf & DP_UNPLUG ? REQ_UNPLUG : 0) |
(dpf & DP_FUA ? REQ_FUA : 0) | (dpf & DP_FUA ? REQ_FUA : 0) |
(dpf & DP_FLUSH ? REQ_FUA : 0) | (dpf & DP_FLUSH ? REQ_FUA : 0) |
(dpf & DP_DISCARD ? REQ_DISCARD : 0); (dpf & DP_DISCARD ? REQ_DISCARD : 0);
else else
return dpf & DP_RW_SYNC ? (REQ_SYNC | REQ_UNPLUG) : 0; return dpf & DP_RW_SYNC ? REQ_SYNC : 0;
} }
/* mirrored write */ /* mirrored write */
......
...@@ -347,7 +347,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait) ...@@ -347,7 +347,7 @@ static void write_page(struct bitmap *bitmap, struct page *page, int wait)
atomic_inc(&bitmap->pending_writes); atomic_inc(&bitmap->pending_writes);
set_buffer_locked(bh); set_buffer_locked(bh);
set_buffer_mapped(bh); set_buffer_mapped(bh);
submit_bh(WRITE | REQ_UNPLUG | REQ_SYNC, bh); submit_bh(WRITE | REQ_SYNC, bh);
bh = bh->b_this_page; bh = bh->b_this_page;
} }
......
...@@ -352,7 +352,7 @@ static void dispatch_io(int rw, unsigned int num_regions, ...@@ -352,7 +352,7 @@ static void dispatch_io(int rw, unsigned int num_regions,
BUG_ON(num_regions > DM_IO_MAX_REGIONS); BUG_ON(num_regions > DM_IO_MAX_REGIONS);
if (sync) if (sync)
rw |= REQ_SYNC | REQ_UNPLUG; rw |= REQ_SYNC;
/* /*
* For multiple regions we need to be careful to rewind * For multiple regions we need to be careful to rewind
......
...@@ -356,11 +356,8 @@ static int run_io_job(struct kcopyd_job *job) ...@@ -356,11 +356,8 @@ static int run_io_job(struct kcopyd_job *job)
if (job->rw == READ) if (job->rw == READ)
r = dm_io(&io_req, 1, &job->source, NULL); r = dm_io(&io_req, 1, &job->source, NULL);
else { else
if (job->num_dests > 1)
io_req.bi_rw |= REQ_UNPLUG;
r = dm_io(&io_req, job->num_dests, job->dests, NULL); r = dm_io(&io_req, job->num_dests, job->dests, NULL);
}
return r; return r;
} }
......
...@@ -777,8 +777,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, ...@@ -777,8 +777,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
bio->bi_end_io = super_written; bio->bi_end_io = super_written;
atomic_inc(&mddev->pending_writes); atomic_inc(&mddev->pending_writes);
submit_bio(REQ_WRITE | REQ_SYNC | REQ_UNPLUG | REQ_FLUSH | REQ_FUA, submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio);
bio);
} }
void md_super_wait(mddev_t *mddev) void md_super_wait(mddev_t *mddev)
...@@ -806,7 +805,7 @@ int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, ...@@ -806,7 +805,7 @@ int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
struct completion event; struct completion event;
int ret; int ret;
rw |= REQ_SYNC | REQ_UNPLUG; rw |= REQ_SYNC;
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
rdev->meta_bdev : rdev->bdev; rdev->meta_bdev : rdev->bdev;
......
...@@ -2182,7 +2182,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ...@@ -2182,7 +2182,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
unsigned long nr_written = 0; unsigned long nr_written = 0;
if (wbc->sync_mode == WB_SYNC_ALL) if (wbc->sync_mode == WB_SYNC_ALL)
write_flags = WRITE_SYNC_PLUG; write_flags = WRITE_SYNC;
else else
write_flags = WRITE; write_flags = WRITE;
......
...@@ -767,7 +767,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list) ...@@ -767,7 +767,7 @@ static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
* still in flight on potentially older * still in flight on potentially older
* contents. * contents.
*/ */
write_dirty_buffer(bh, WRITE_SYNC_PLUG); write_dirty_buffer(bh, WRITE_SYNC);
/* /*
* Kick off IO for the previous mapping. Note * Kick off IO for the previous mapping. Note
...@@ -1602,14 +1602,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata); ...@@ -1602,14 +1602,8 @@ EXPORT_SYMBOL(unmap_underlying_metadata);
* prevents this contention from occurring. * prevents this contention from occurring.
* *
* If block_write_full_page() is called with wbc->sync_mode == * If block_write_full_page() is called with wbc->sync_mode ==
* WB_SYNC_ALL, the writes are posted using WRITE_SYNC_PLUG; this * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
* causes the writes to be flagged as synchronous writes, but the * causes the writes to be flagged as synchronous writes.
* block device queue will NOT be unplugged, since usually many pages
* will be pushed to the out before the higher-level caller actually
* waits for the writes to be completed. The various wait functions,
* such as wait_on_writeback_range() will ultimately call sync_page()
* which will ultimately call blk_run_backing_dev(), which will end up
* unplugging the device queue.
*/ */
static int __block_write_full_page(struct inode *inode, struct page *page, static int __block_write_full_page(struct inode *inode, struct page *page,
get_block_t *get_block, struct writeback_control *wbc, get_block_t *get_block, struct writeback_control *wbc,
...@@ -1622,7 +1616,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, ...@@ -1622,7 +1616,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
const unsigned blocksize = 1 << inode->i_blkbits; const unsigned blocksize = 1 << inode->i_blkbits;
int nr_underway = 0; int nr_underway = 0;
int write_op = (wbc->sync_mode == WB_SYNC_ALL ? int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
WRITE_SYNC_PLUG : WRITE); WRITE_SYNC : WRITE);
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
......
...@@ -1173,7 +1173,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, ...@@ -1173,7 +1173,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct dio *dio; struct dio *dio;
if (rw & WRITE) if (rw & WRITE)
rw = WRITE_ODIRECT_PLUG; rw = WRITE_ODIRECT;
if (bdev) if (bdev)
bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev)); bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
......
...@@ -310,8 +310,7 @@ static int io_submit_init(struct ext4_io_submit *io, ...@@ -310,8 +310,7 @@ static int io_submit_init(struct ext4_io_submit *io,
io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh); io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
io->io_bio = bio; io->io_bio = bio;
io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
WRITE_SYNC_PLUG : WRITE);
io->io_next_block = bh->b_blocknr; io->io_next_block = bh->b_blocknr;
return 0; return 0;
} }
......
...@@ -121,7 +121,7 @@ __acquires(&sdp->sd_log_lock) ...@@ -121,7 +121,7 @@ __acquires(&sdp->sd_log_lock)
lock_buffer(bh); lock_buffer(bh);
if (test_clear_buffer_dirty(bh)) { if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync; bh->b_end_io = end_buffer_write_sync;
submit_bh(WRITE_SYNC_PLUG, bh); submit_bh(WRITE_SYNC, bh);
} else { } else {
unlock_buffer(bh); unlock_buffer(bh);
brelse(bh); brelse(bh);
...@@ -647,7 +647,7 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp) ...@@ -647,7 +647,7 @@ static void gfs2_ordered_write(struct gfs2_sbd *sdp)
lock_buffer(bh); lock_buffer(bh);
if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) { if (buffer_mapped(bh) && test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync; bh->b_end_io = end_buffer_write_sync;
submit_bh(WRITE_SYNC_PLUG, bh); submit_bh(WRITE_SYNC, bh);
} else { } else {
unlock_buffer(bh); unlock_buffer(bh);
brelse(bh); brelse(bh);
......
...@@ -200,7 +200,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp) ...@@ -200,7 +200,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
} }
gfs2_log_unlock(sdp); gfs2_log_unlock(sdp);
submit_bh(WRITE_SYNC_PLUG, bh); submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp); gfs2_log_lock(sdp);
n = 0; n = 0;
...@@ -210,7 +210,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp) ...@@ -210,7 +210,7 @@ static void buf_lo_before_commit(struct gfs2_sbd *sdp)
gfs2_log_unlock(sdp); gfs2_log_unlock(sdp);
lock_buffer(bd2->bd_bh); lock_buffer(bd2->bd_bh);
bh = gfs2_log_fake_buf(sdp, bd2->bd_bh); bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
submit_bh(WRITE_SYNC_PLUG, bh); submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp); gfs2_log_lock(sdp);
if (++n >= num) if (++n >= num)
break; break;
...@@ -352,7 +352,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp) ...@@ -352,7 +352,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
sdp->sd_log_num_revoke--; sdp->sd_log_num_revoke--;
if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
submit_bh(WRITE_SYNC_PLUG, bh); submit_bh(WRITE_SYNC, bh);
bh = gfs2_log_get_buf(sdp); bh = gfs2_log_get_buf(sdp);
mh = (struct gfs2_meta_header *)bh->b_data; mh = (struct gfs2_meta_header *)bh->b_data;
...@@ -369,7 +369,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp) ...@@ -369,7 +369,7 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
} }
gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
submit_bh(WRITE_SYNC_PLUG, bh); submit_bh(WRITE_SYNC, bh);
} }
static void revoke_lo_before_scan(struct gfs2_jdesc *jd, static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
...@@ -571,7 +571,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh, ...@@ -571,7 +571,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
ptr = bh_log_ptr(bh); ptr = bh_log_ptr(bh);
get_bh(bh); get_bh(bh);
submit_bh(WRITE_SYNC_PLUG, bh); submit_bh(WRITE_SYNC, bh);
gfs2_log_lock(sdp); gfs2_log_lock(sdp);
while(!list_empty(list)) { while(!list_empty(list)) {
bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list); bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
...@@ -597,7 +597,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh, ...@@ -597,7 +597,7 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
} else { } else {
bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh); bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
} }
submit_bh(WRITE_SYNC_PLUG, bh1); submit_bh(WRITE_SYNC, bh1);
gfs2_log_lock(sdp); gfs2_log_lock(sdp);
ptr += 2; ptr += 2;
} }
......
...@@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb ...@@ -37,7 +37,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
int nr_underway = 0; int nr_underway = 0;
int write_op = REQ_META | int write_op = REQ_META |
(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE); (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
BUG_ON(!page_has_buffers(page)); BUG_ON(!page_has_buffers(page));
......
...@@ -333,7 +333,7 @@ void journal_commit_transaction(journal_t *journal) ...@@ -333,7 +333,7 @@ void journal_commit_transaction(journal_t *journal)
* instead we rely on sync_buffer() doing the unplug for us. * instead we rely on sync_buffer() doing the unplug for us.
*/ */
if (commit_transaction->t_synchronous_commit) if (commit_transaction->t_synchronous_commit)
write_op = WRITE_SYNC_PLUG; write_op = WRITE_SYNC;
spin_lock(&commit_transaction->t_handle_lock); spin_lock(&commit_transaction->t_handle_lock);
while (commit_transaction->t_updates) { while (commit_transaction->t_updates) {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
......
...@@ -137,9 +137,9 @@ static int journal_submit_commit_record(journal_t *journal, ...@@ -137,9 +137,9 @@ static int journal_submit_commit_record(journal_t *journal,
if (journal->j_flags & JBD2_BARRIER && if (journal->j_flags & JBD2_BARRIER &&
!JBD2_HAS_INCOMPAT_FEATURE(journal, !JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
ret = submit_bh(WRITE_SYNC_PLUG | WRITE_FLUSH_FUA, bh); ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
else else
ret = submit_bh(WRITE_SYNC_PLUG, bh); ret = submit_bh(WRITE_SYNC, bh);
*cbh = bh; *cbh = bh;
return ret; return ret;
...@@ -369,7 +369,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) ...@@ -369,7 +369,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
* instead we rely on sync_buffer() doing the unplug for us. * instead we rely on sync_buffer() doing the unplug for us.
*/ */
if (commit_transaction->t_synchronous_commit) if (commit_transaction->t_synchronous_commit)
write_op = WRITE_SYNC_PLUG; write_op = WRITE_SYNC;
trace_jbd2_commit_locking(journal, commit_transaction); trace_jbd2_commit_locking(journal, commit_transaction);
stats.run.rs_wait = commit_transaction->t_max_wait; stats.run.rs_wait = commit_transaction->t_max_wait;
stats.run.rs_locked = jiffies; stats.run.rs_locked = jiffies;
......
...@@ -509,7 +509,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, ...@@ -509,7 +509,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
* Last BIO is always sent through the following * Last BIO is always sent through the following
* submission. * submission.
*/ */
rw |= REQ_SYNC | REQ_UNPLUG; rw |= REQ_SYNC;
res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
} }
......
...@@ -413,8 +413,7 @@ xfs_submit_ioend_bio( ...@@ -413,8 +413,7 @@ xfs_submit_ioend_bio(
if (xfs_ioend_new_eof(ioend)) if (xfs_ioend_new_eof(ioend))
xfs_mark_inode_dirty(XFS_I(ioend->io_inode)); xfs_mark_inode_dirty(XFS_I(ioend->io_inode));
submit_bio(wbc->sync_mode == WB_SYNC_ALL ? submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
WRITE_SYNC_PLUG : WRITE, bio);
} }
STATIC struct bio * STATIC struct bio *
......
...@@ -128,7 +128,6 @@ enum rq_flag_bits { ...@@ -128,7 +128,6 @@ enum rq_flag_bits {
__REQ_NOIDLE, /* don't anticipate more IO after this one */ __REQ_NOIDLE, /* don't anticipate more IO after this one */
/* bio only flags */ /* bio only flags */
__REQ_UNPLUG, /* unplug the immediately after submission */
__REQ_RAHEAD, /* read ahead, can fail anytime */ __REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_THROTTLED, /* This bio has already been subjected to __REQ_THROTTLED, /* This bio has already been subjected to
* throttling rules. Don't do it again. */ * throttling rules. Don't do it again. */
...@@ -172,7 +171,6 @@ enum rq_flag_bits { ...@@ -172,7 +171,6 @@ enum rq_flag_bits {
REQ_NOIDLE | REQ_FLUSH | REQ_FUA) REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
#define REQ_CLONE_MASK REQ_COMMON_MASK #define REQ_CLONE_MASK REQ_COMMON_MASK
#define REQ_UNPLUG (1 << __REQ_UNPLUG)
#define REQ_RAHEAD (1 << __REQ_RAHEAD) #define REQ_RAHEAD (1 << __REQ_RAHEAD)
#define REQ_THROTTLED (1 << __REQ_THROTTLED) #define REQ_THROTTLED (1 << __REQ_THROTTLED)
......
...@@ -135,16 +135,10 @@ struct inodes_stat_t { ...@@ -135,16 +135,10 @@ struct inodes_stat_t {
* block layer could (in theory) choose to ignore this * block layer could (in theory) choose to ignore this
* request if it runs into resource problems. * request if it runs into resource problems.
* WRITE A normal async write. Device will be plugged. * WRITE A normal async write. Device will be plugged.
* WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down * WRITE_SYNC Synchronous write. Identical to WRITE, but passes down
* the hint that someone will be waiting on this IO * the hint that someone will be waiting on this IO
* shortly. The device must still be unplugged explicitly, * shortly. The write equivalent of READ_SYNC.
* WRITE_SYNC_PLUG does not do this as we could be * WRITE_ODIRECT Special case write for O_DIRECT only.
* submitting more writes before we actually wait on any
* of them.
* WRITE_SYNC Like WRITE_SYNC_PLUG, but also unplugs the device
* immediately after submission. The write equivalent
* of READ_SYNC.
* WRITE_ODIRECT_PLUG Special case write for O_DIRECT only.
* WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush. * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush.
* WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on
* non-volatile media on completion. * non-volatile media on completion.
...@@ -160,18 +154,14 @@ struct inodes_stat_t { ...@@ -160,18 +154,14 @@ struct inodes_stat_t {
#define WRITE RW_MASK #define WRITE RW_MASK
#define READA RWA_MASK #define READA RWA_MASK
#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) #define READ_SYNC (READ | REQ_SYNC)
#define READ_META (READ | REQ_META) #define READ_META (READ | REQ_META)
#define WRITE_SYNC_PLUG (WRITE | REQ_SYNC | REQ_NOIDLE) #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) #define WRITE_ODIRECT (WRITE | REQ_SYNC)
#define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC)
#define WRITE_META (WRITE | REQ_META) #define WRITE_META (WRITE | REQ_META)
#define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ #define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH)
REQ_FLUSH) #define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ #define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
REQ_FUA)
#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
REQ_FLUSH | REQ_FUA)
#define SEL_IN 1 #define SEL_IN 1
#define SEL_OUT 2 #define SEL_OUT 2
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
static int submit(int rw, struct block_device *bdev, sector_t sector, static int submit(int rw, struct block_device *bdev, sector_t sector,
struct page *page, struct bio **bio_chain) struct page *page, struct bio **bio_chain)
{ {
const int bio_rw = rw | REQ_SYNC | REQ_UNPLUG; const int bio_rw = rw | REQ_SYNC;
struct bio *bio; struct bio *bio;
bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
......
...@@ -106,7 +106,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) ...@@ -106,7 +106,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
goto out; goto out;
} }
if (wbc->sync_mode == WB_SYNC_ALL) if (wbc->sync_mode == WB_SYNC_ALL)
rw |= REQ_SYNC | REQ_UNPLUG; rw |= REQ_SYNC;
count_vm_event(PSWPOUT); count_vm_event(PSWPOUT);
set_page_writeback(page); set_page_writeback(page);
unlock_page(page); unlock_page(page);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment