Commit 4aff5e23 authored by Jens Axboe's avatar Jens Axboe Committed by Jens Axboe

[PATCH] Split struct request ->flags into two parts

Right now ->flags is a bit of a mess: some are request types, and
others are just modifiers. Clean this up by splitting it into
->cmd_type and ->cmd_flags. This allows introduction of generic
Linux block message types, useful for sending generic Linux commands
to block devices.
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent 77ed74da
......@@ -1335,7 +1335,7 @@ static void as_add_request(request_queue_t *q, struct request *rq)
arq->state = AS_RQ_NEW;
if (rq_data_dir(arq->request) == READ
|| (arq->request->flags & REQ_RW_SYNC))
|| (arq->request->cmd_flags & REQ_RW_SYNC))
arq->is_sync = 1;
else
arq->is_sync = 0;
......
......@@ -242,7 +242,7 @@ void elv_dispatch_sort(request_queue_t *q, struct request *rq)
list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry);
if (pos->flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED))
break;
if (rq->sector >= boundary) {
if (pos->sector < boundary)
......@@ -313,7 +313,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
e->ops->elevator_deactivate_req_fn(q, rq);
}
rq->flags &= ~REQ_STARTED;
rq->cmd_flags &= ~REQ_STARTED;
elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
}
......@@ -344,13 +344,13 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
switch (where) {
case ELEVATOR_INSERT_FRONT:
rq->flags |= REQ_SOFTBARRIER;
rq->cmd_flags |= REQ_SOFTBARRIER;
list_add(&rq->queuelist, &q->queue_head);
break;
case ELEVATOR_INSERT_BACK:
rq->flags |= REQ_SOFTBARRIER;
rq->cmd_flags |= REQ_SOFTBARRIER;
elv_drain_elevator(q);
list_add_tail(&rq->queuelist, &q->queue_head);
/*
......@@ -369,7 +369,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
case ELEVATOR_INSERT_SORT:
BUG_ON(!blk_fs_request(rq));
rq->flags |= REQ_SORTED;
rq->cmd_flags |= REQ_SORTED;
q->nr_sorted++;
if (q->last_merge == NULL && rq_mergeable(rq))
q->last_merge = rq;
......@@ -387,7 +387,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where)
* insertion; otherwise, requests should be requeued
* in ordseq order.
*/
rq->flags |= REQ_SOFTBARRIER;
rq->cmd_flags |= REQ_SOFTBARRIER;
if (q->ordseq == 0) {
list_add(&rq->queuelist, &q->queue_head);
......@@ -429,9 +429,9 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
int plug)
{
if (q->ordcolor)
rq->flags |= REQ_ORDERED_COLOR;
rq->cmd_flags |= REQ_ORDERED_COLOR;
if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
/*
* toggle ordered color
*/
......@@ -452,7 +452,7 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
} else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
} else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
where = ELEVATOR_INSERT_BACK;
if (plug)
......@@ -493,7 +493,7 @@ struct request *elv_next_request(request_queue_t *q)
int ret;
while ((rq = __elv_next_request(q)) != NULL) {
if (!(rq->flags & REQ_STARTED)) {
if (!(rq->cmd_flags & REQ_STARTED)) {
elevator_t *e = q->elevator;
/*
......@@ -510,7 +510,7 @@ struct request *elv_next_request(request_queue_t *q)
* it, a request that has been delayed should
* not be passed by new incoming requests
*/
rq->flags |= REQ_STARTED;
rq->cmd_flags |= REQ_STARTED;
blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
}
......@@ -519,7 +519,7 @@ struct request *elv_next_request(request_queue_t *q)
q->boundary_rq = NULL;
}
if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
if ((rq->cmd_flags & REQ_DONTPREP) || !q->prep_rq_fn)
break;
ret = q->prep_rq_fn(q, rq);
......@@ -541,7 +541,7 @@ struct request *elv_next_request(request_queue_t *q)
nr_bytes = rq->data_len;
blkdev_dequeue_request(rq);
rq->flags |= REQ_QUIET;
rq->cmd_flags |= REQ_QUIET;
end_that_request_chunk(rq, 0, nr_bytes);
end_that_request_last(rq, 0);
} else {
......
......@@ -382,8 +382,8 @@ unsigned blk_ordered_req_seq(struct request *rq)
if (rq == &q->post_flush_rq)
return QUEUE_ORDSEQ_POSTFLUSH;
if ((rq->flags & REQ_ORDERED_COLOR) ==
(q->orig_bar_rq->flags & REQ_ORDERED_COLOR))
if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
(q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR))
return QUEUE_ORDSEQ_DRAIN;
else
return QUEUE_ORDSEQ_DONE;
......@@ -446,8 +446,8 @@ static void queue_flush(request_queue_t *q, unsigned which)
end_io = post_flush_end_io;
}
rq->cmd_flags = REQ_HARDBARRIER;
rq_init(q, rq);
rq->flags = REQ_HARDBARRIER;
rq->elevator_private = NULL;
rq->rq_disk = q->bar_rq.rq_disk;
rq->rl = NULL;
......@@ -471,9 +471,11 @@ static inline struct request *start_ordered(request_queue_t *q,
blkdev_dequeue_request(rq);
q->orig_bar_rq = rq;
rq = &q->bar_rq;
rq->cmd_flags = 0;
rq_init(q, rq);
rq->flags = bio_data_dir(q->orig_bar_rq->bio);
rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
rq->cmd_flags |= REQ_RW;
rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
rq->elevator_private = NULL;
rq->rl = NULL;
init_request_from_bio(rq, q->orig_bar_rq->bio);
......@@ -1124,7 +1126,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq)
}
list_del_init(&rq->queuelist);
rq->flags &= ~REQ_QUEUED;
rq->cmd_flags &= ~REQ_QUEUED;
rq->tag = -1;
if (unlikely(bqt->tag_index[tag] == NULL))
......@@ -1160,7 +1162,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq)
struct blk_queue_tag *bqt = q->queue_tags;
int tag;
if (unlikely((rq->flags & REQ_QUEUED))) {
if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d",
__FUNCTION__, rq,
......@@ -1174,7 +1176,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq)
__set_bit(tag, bqt->tag_map);
rq->flags |= REQ_QUEUED;
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;
blkdev_dequeue_request(rq);
......@@ -1210,65 +1212,31 @@ void blk_queue_invalidate_tags(request_queue_t *q)
printk(KERN_ERR
"%s: bad tag found on list\n", __FUNCTION__);
list_del_init(&rq->queuelist);
rq->flags &= ~REQ_QUEUED;
rq->cmd_flags &= ~REQ_QUEUED;
} else
blk_queue_end_tag(q, rq);
rq->flags &= ~REQ_STARTED;
rq->cmd_flags &= ~REQ_STARTED;
__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
}
}
EXPORT_SYMBOL(blk_queue_invalidate_tags);
static const char * const rq_flags[] = {
"REQ_RW",
"REQ_FAILFAST",
"REQ_SORTED",
"REQ_SOFTBARRIER",
"REQ_HARDBARRIER",
"REQ_FUA",
"REQ_CMD",
"REQ_NOMERGE",
"REQ_STARTED",
"REQ_DONTPREP",
"REQ_QUEUED",
"REQ_ELVPRIV",
"REQ_PC",
"REQ_BLOCK_PC",
"REQ_SENSE",
"REQ_FAILED",
"REQ_QUIET",
"REQ_SPECIAL",
"REQ_DRIVE_CMD",
"REQ_DRIVE_TASK",
"REQ_DRIVE_TASKFILE",
"REQ_PREEMPT",
"REQ_PM_SUSPEND",
"REQ_PM_RESUME",
"REQ_PM_SHUTDOWN",
"REQ_ORDERED_COLOR",
};
void blk_dump_rq_flags(struct request *rq, char *msg)
{
int bit;
printk("%s: dev %s: flags = ", msg,
rq->rq_disk ? rq->rq_disk->disk_name : "?");
bit = 0;
do {
if (rq->flags & (1 << bit))
printk("%s ", rq_flags[bit]);
bit++;
} while (bit < __REQ_NR_BITS);
printk("%s: dev %s: type=%x, flags=%x\n", msg,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type,
rq->cmd_flags);
printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
rq->nr_sectors,
rq->current_nr_sectors);
printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) {
if (blk_pc_request(rq)) {
printk("cdb: ");
for (bit = 0; bit < sizeof(rq->cmd); bit++)
printk("%02x ", rq->cmd[bit]);
......@@ -1441,7 +1409,7 @@ static inline int ll_new_mergeable(request_queue_t *q,
int nr_phys_segs = bio_phys_segments(q, bio);
if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
req->flags |= REQ_NOMERGE;
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
return 0;
......@@ -1464,7 +1432,7 @@ static inline int ll_new_hw_segment(request_queue_t *q,
if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
|| req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
req->flags |= REQ_NOMERGE;
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
return 0;
......@@ -1491,7 +1459,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req,
max_sectors = q->max_sectors;
if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
req->flags |= REQ_NOMERGE;
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
return 0;
......@@ -1530,7 +1498,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req,
if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
req->flags |= REQ_NOMERGE;
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
return 0;
......@@ -2029,7 +1997,7 @@ EXPORT_SYMBOL(blk_get_queue);
static inline void blk_free_request(request_queue_t *q, struct request *rq)
{
if (rq->flags & REQ_ELVPRIV)
if (rq->cmd_flags & REQ_ELVPRIV)
elv_put_request(q, rq);
mempool_free(rq, q->rq.rq_pool);
}
......@@ -2044,17 +2012,17 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio,
return NULL;
/*
* first three bits are identical in rq->flags and bio->bi_rw,
* first three bits are identical in rq->cmd_flags and bio->bi_rw,
* see bio.h and blkdev.h
*/
rq->flags = rw;
rq->cmd_flags = rw;
if (priv) {
if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) {
mempool_free(rq, q->rq.rq_pool);
return NULL;
}
rq->flags |= REQ_ELVPRIV;
rq->cmd_flags |= REQ_ELVPRIV;
}
return rq;
......@@ -2351,7 +2319,8 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
* must not attempt merges on this) and that it acts as a soft
* barrier
*/
rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER;
rq->cmd_type = REQ_TYPE_SPECIAL;
rq->cmd_flags |= REQ_SOFTBARRIER;
rq->special = data;
......@@ -2558,7 +2527,7 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk,
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
rq->rq_disk = bd_disk;
rq->flags |= REQ_NOMERGE;
rq->cmd_flags |= REQ_NOMERGE;
rq->end_io = done;
WARN_ON(irqs_disabled());
spin_lock_irq(q->queue_lock);
......@@ -2728,7 +2697,7 @@ void __blk_put_request(request_queue_t *q, struct request *req)
*/
if (rl) {
int rw = rq_data_dir(req);
int priv = req->flags & REQ_ELVPRIV;
int priv = req->cmd_flags & REQ_ELVPRIV;
BUG_ON(!list_empty(&req->queuelist));
......@@ -2890,22 +2859,22 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
static void init_request_from_bio(struct request *req, struct bio *bio)
{
req->flags |= REQ_CMD;
req->cmd_type = REQ_TYPE_FS;
/*
* inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
*/
if (bio_rw_ahead(bio) || bio_failfast(bio))
req->flags |= REQ_FAILFAST;
req->cmd_flags |= REQ_FAILFAST;
/*
* REQ_BARRIER implies no merging, but lets make it explicit
*/
if (unlikely(bio_barrier(bio)))
req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
if (bio_sync(bio))
req->flags |= REQ_RW_SYNC;
req->cmd_flags |= REQ_RW_SYNC;
req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector;
......@@ -3306,7 +3275,7 @@ static int __end_that_request_first(struct request *req, int uptodate,
req->errors = 0;
if (!uptodate) {
if (blk_fs_request(req) && !(req->flags & REQ_QUIET))
if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))
printk("end_request: I/O error, dev %s, sector %llu\n",
req->rq_disk ? req->rq_disk->disk_name : "?",
(unsigned long long)req->sector);
......@@ -3569,8 +3538,8 @@ EXPORT_SYMBOL(end_request);
void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
{
/* first two bits are identical in rq->flags and bio->bi_rw */
rq->flags |= (bio->bi_rw & 3);
/* first two bits are identical in rq->cmd_flags and bio->bi_rw */
rq->cmd_flags |= (bio->bi_rw & 3);
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->nr_hw_segments = bio_hw_segments(q, bio);
......
......@@ -294,7 +294,7 @@ static int sg_io(struct file *file, request_queue_t *q,
rq->sense = sense;
rq->sense_len = 0;
rq->flags |= REQ_BLOCK_PC;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
bio = rq->bio;
/*
......@@ -470,7 +470,7 @@ int sg_scsi_ioctl(struct file *file, struct request_queue *q,
memset(sense, 0, sizeof(sense));
rq->sense = sense;
rq->sense_len = 0;
rq->flags |= REQ_BLOCK_PC;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
blk_execute_rq(q, disk, rq, 0);
......@@ -502,7 +502,7 @@ static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int c
int err;
rq = blk_get_request(q, WRITE, __GFP_WAIT);
rq->flags |= REQ_BLOCK_PC;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->data = NULL;
rq->data_len = 0;
rq->timeout = BLK_DEFAULT_TIMEOUT;
......
......@@ -2991,8 +2991,8 @@ static void do_fd_request(request_queue_t * q)
if (usage_count == 0) {
printk("warning: usage count=0, current_req=%p exiting\n",
current_req);
printk("sect=%ld flags=%lx\n", (long)current_req->sector,
current_req->flags);
printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector,
current_req->cmd_type, current_req->cmd_flags);
return;
}
if (test_bit(0, &fdc_busy)) {
......
......@@ -407,10 +407,10 @@ static void do_nbd_request(request_queue_t * q)
struct nbd_device *lo;
blkdev_dequeue_request(req);
dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%lx)\n",
req->rq_disk->disk_name, req, req->flags);
dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
req->rq_disk->disk_name, req, req->cmd_type);
if (!(req->flags & REQ_CMD))
if (!blk_fs_request(req))
goto error_out;
lo = req->rq_disk->private_data;
......@@ -489,7 +489,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
switch (cmd) {
case NBD_DISCONNECT:
printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
sreq.flags = REQ_SPECIAL;
sreq.cmd_type = REQ_TYPE_SPECIAL;
nbd_cmd(&sreq) = NBD_CMD_DISC;
/*
* Set these to sane values in case server implementation
......
......@@ -437,7 +437,7 @@ static char *pd_buf; /* buffer for request in progress */
static enum action do_pd_io_start(void)
{
if (pd_req->flags & REQ_SPECIAL) {
if (blk_special_request(pd_req)) {
phase = pd_special;
return pd_special();
}
......
......@@ -365,16 +365,16 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
rq->sense = sense;
memset(sense, 0, sizeof(sense));
rq->sense_len = 0;
rq->flags |= REQ_BLOCK_PC | REQ_HARDBARRIER;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->cmd_flags |= REQ_HARDBARRIER;
if (cgc->quiet)
rq->flags |= REQ_QUIET;
rq->cmd_flags |= REQ_QUIET;
memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
rq->ref_count++;
rq->flags |= REQ_NOMERGE;
rq->waiting = &wait;
rq->end_io = blk_end_sync_rq;
elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
......
......@@ -313,7 +313,7 @@ static void do_xd_request (request_queue_t * q)
int res = 0;
int retry;
if (!(req->flags & REQ_CMD)) {
if (!blk_fs_request(req)) {
end_request(req, 0);
continue;
}
......
......@@ -2129,7 +2129,7 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
rq->cmd[9] = 0xf8;
rq->cmd_len = 12;
rq->flags |= REQ_BLOCK_PC;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->timeout = 60 * HZ;
bio = rq->bio;
......
......@@ -1338,8 +1338,10 @@ static void do_cdu31a_request(request_queue_t * q)
}
/* WTF??? */
if (!(req->flags & REQ_CMD))
if (!blk_fs_request(req)) {
end_request(req, 0);
continue;
}
if (rq_data_dir(req) == WRITE) {
end_request(req, 0);
continue;
......
......@@ -372,7 +372,7 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
{
int log = 0;
if (!sense || !rq || (rq->flags & REQ_QUIET))
if (!sense || !rq || (rq->cmd_flags & REQ_QUIET))
return 0;
switch (sense->sense_key) {
......@@ -597,7 +597,7 @@ static void cdrom_prepare_request(ide_drive_t *drive, struct request *rq)
struct cdrom_info *cd = drive->driver_data;
ide_init_drive_cmd(rq);
rq->flags = REQ_PC;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->rq_disk = cd->disk;
}
......@@ -617,7 +617,7 @@ static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
rq->cmd[0] = GPCMD_REQUEST_SENSE;
rq->cmd[4] = rq->data_len = 18;
rq->flags = REQ_SENSE;
rq->cmd_type = REQ_TYPE_SENSE;
/* NOTE! Save the failed command in "rq->buffer" */
rq->buffer = (void *) failed_command;
......@@ -630,10 +630,10 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
struct request *rq = HWGROUP(drive)->rq;
int nsectors = rq->hard_cur_sectors;
if ((rq->flags & REQ_SENSE) && uptodate) {
if (blk_sense_request(rq) && uptodate) {
/*
* For REQ_SENSE, "rq->buffer" points to the original failed
* request
* For REQ_TYPE_SENSE, "rq->buffer" points to the original
* failed request
*/
struct request *failed = (struct request *) rq->buffer;
struct cdrom_info *info = drive->driver_data;
......@@ -706,17 +706,17 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
return 1;
}
if (rq->flags & REQ_SENSE) {
if (blk_sense_request(rq)) {
/* We got an error trying to get sense info
from the drive (probably while trying
to recover from a former error). Just give up. */
rq->flags |= REQ_FAILED;
rq->cmd_flags |= REQ_FAILED;
cdrom_end_request(drive, 0);
ide_error(drive, "request sense failure", stat);
return 1;
} else if (rq->flags & (REQ_PC | REQ_BLOCK_PC)) {
} else if (blk_pc_request(rq)) {
/* All other functions, except for READ. */
unsigned long flags;
......@@ -724,7 +724,7 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
* if we have an error, pass back CHECK_CONDITION as the
* scsi status byte
*/
if ((rq->flags & REQ_BLOCK_PC) && !rq->errors)
if (!rq->errors)
rq->errors = SAM_STAT_CHECK_CONDITION;
/* Check for tray open. */
......@@ -735,12 +735,12 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
cdrom_saw_media_change (drive);
/*printk("%s: media changed\n",drive->name);*/
return 0;
} else if (!(rq->flags & REQ_QUIET)) {
} else if (!(rq->cmd_flags & REQ_QUIET)) {
/* Otherwise, print an error. */
ide_dump_status(drive, "packet command error", stat);
}
rq->flags |= REQ_FAILED;
rq->cmd_flags |= REQ_FAILED;
/*
* instead of playing games with moving completions around,
......@@ -881,7 +881,7 @@ static int cdrom_timer_expiry(ide_drive_t *drive)
wait = ATAPI_WAIT_PC;
break;
default:
if (!(rq->flags & REQ_QUIET))
if (!(rq->cmd_flags & REQ_QUIET))
printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n", rq->cmd[0]);
wait = 0;
break;
......@@ -1124,7 +1124,7 @@ static ide_startstop_t cdrom_read_intr (ide_drive_t *drive)
if (rq->current_nr_sectors > 0) {
printk (KERN_ERR "%s: cdrom_read_intr: data underrun (%d blocks)\n",
drive->name, rq->current_nr_sectors);
rq->flags |= REQ_FAILED;
rq->cmd_flags |= REQ_FAILED;
cdrom_end_request(drive, 0);
} else
cdrom_end_request(drive, 1);
......@@ -1456,7 +1456,7 @@ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive)
printk ("%s: cdrom_pc_intr: data underrun %d\n",
drive->name, pc->buflen);
*/
rq->flags |= REQ_FAILED;