Commit 242f9dcb authored by Jens Axboe's avatar Jens Axboe

block: unify request timeout handling

Right now SCSI and others do their own command timeout handling.
Move those bits to the block layer.

Instead of having a timer per command, we try to be a bit more clever
and simply have one per-queue. This avoids the overhead of having to
tear down and setup a timer for each command, so it will result in a lot
less timer fiddling.
Signed-off-by: default avatarMike Anderson <andmike@linux.vnet.ibm.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 608aeef1
......@@ -4,8 +4,8 @@
obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o ioctl.o genhd.o \
scsi_ioctl.o cmd-filter.o
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
ioctl.o genhd.o scsi_ioctl.o cmd-filter.o
obj-$(CONFIG_BLK_DEV_BSG) += bsg.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
......
......@@ -110,6 +110,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
memset(rq, 0, sizeof(*rq));
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1;
rq->q = q;
rq->sector = rq->hard_sector = (sector_t) -1;
......@@ -490,6 +491,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
}
init_timer(&q->unplug_timer);
setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
INIT_LIST_HEAD(&q->timeout_list);
kobject_init(&q->kobj, &blk_queue_ktype);
......@@ -897,6 +900,8 @@ EXPORT_SYMBOL(blk_start_queueing);
*/
void blk_requeue_request(struct request_queue *q, struct request *rq)
{
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
if (blk_rq_tagged(rq))
......@@ -1650,6 +1655,8 @@ static void end_that_request_last(struct request *req, int error)
{
struct gendisk *disk = req->rq_disk;
blk_delete_timer(req);
if (blk_rq_tagged(req))
blk_queue_end_tag(req->q, req);
......
......@@ -77,6 +77,18 @@ void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
}
EXPORT_SYMBOL(blk_queue_softirq_done);
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
{
q->rq_timeout = timeout;
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
{
q->rq_timed_out_fn = fn;
}
EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
/**
* blk_queue_make_request - define an alternate make_request function for a device
* @q: the request queue for the device to be affected
......
......@@ -101,18 +101,7 @@ static struct notifier_block __cpuinitdata blk_cpu_notifier = {
.notifier_call = blk_cpu_notify,
};
/**
* blk_complete_request - end I/O on a request
* @req: the request being processed
*
* Description:
* Ends all I/O on a request. It does not handle partial completions,
* unless the driver actually implements this in its completion callback
* through requeueing. The actual completion happens out-of-order,
* through a softirq handler. The user must have registered a completion
* callback through blk_queue_softirq_done().
**/
void blk_complete_request(struct request *req)
void __blk_complete_request(struct request *req)
{
struct request_queue *q = req->q;
unsigned long flags;
......@@ -151,6 +140,23 @@ do_local:
local_irq_restore(flags);
}
/**
* blk_complete_request - end I/O on a request
* @req: the request being processed
*
* Description:
* Ends all I/O on a request. It does not handle partial completions,
* unless the driver actually implements this in its completion callback
* through requeueing. The actual completion happens out-of-order,
* through a softirq handler. The user must have registered a completion
* callback through blk_queue_softirq_done().
**/
void blk_complete_request(struct request *req)
{
if (!blk_mark_rq_complete(req))
__blk_complete_request(req);
}
EXPORT_SYMBOL(blk_complete_request);
__init int blk_softirq_init(void)
......
/*
* Functions related to generic timeout handling of requests.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include "blk.h"
/*
* blk_delete_timer - Delete/cancel timer for a given function.
* @req: request that we are canceling timer for
*
*/
void blk_delete_timer(struct request *req)
{
struct request_queue *q = req->q;
/*
* Nothing to detach
*/
if (!q->rq_timed_out_fn || !req->deadline)
return;
list_del_init(&req->timeout_list);
if (list_empty(&q->timeout_list))
del_timer(&q->timeout);
}
static void blk_rq_timed_out(struct request *req)
{
struct request_queue *q = req->q;
enum blk_eh_timer_return ret;
ret = q->rq_timed_out_fn(req);
switch (ret) {
case BLK_EH_HANDLED:
__blk_complete_request(req);
break;
case BLK_EH_RESET_TIMER:
blk_clear_rq_complete(req);
blk_add_timer(req);
break;
case BLK_EH_NOT_HANDLED:
/*
* LLD handles this for now but in the future
* we can send a request msg to abort the command
* and we can move more of the generic scsi eh code to
* the blk layer.
*/
break;
default:
printk(KERN_ERR "block: bad eh return: %d\n", ret);
break;
}
}
void blk_rq_timed_out_timer(unsigned long data)
{
struct request_queue *q = (struct request_queue *) data;
unsigned long flags, uninitialized_var(next), next_set = 0;
struct request *rq, *tmp;
spin_lock_irqsave(q->queue_lock, flags);
list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) {
if (time_after_eq(jiffies, rq->deadline)) {
list_del_init(&rq->timeout_list);
/*
* Check if we raced with end io completion
*/
if (blk_mark_rq_complete(rq))
continue;
blk_rq_timed_out(rq);
}
if (!next_set) {
next = rq->deadline;
next_set = 1;
} else if (time_after(next, rq->deadline))
next = rq->deadline;
}
if (next_set && !list_empty(&q->timeout_list))
mod_timer(&q->timeout, round_jiffies(next));
spin_unlock_irqrestore(q->queue_lock, flags);
}
/**
* blk_abort_request -- Request request recovery for the specified command
* @req: pointer to the request of interest
*
* This function requests that the block layer start recovery for the
* request by deleting the timer and calling the q's timeout function.
* LLDDs who implement their own error recovery MAY ignore the timeout
* event if they generated blk_abort_req. Must hold queue lock.
*/
void blk_abort_request(struct request *req)
{
blk_delete_timer(req);
blk_rq_timed_out(req);
}
EXPORT_SYMBOL_GPL(blk_abort_request);
/**
* blk_add_timer - Start timeout timer for a single request
* @req: request that is about to start running.
*
* Notes:
* Each request has its own timer, and as it is added to the queue, we
* set up the timer. When the request completes, we cancel the timer.
*/
void blk_add_timer(struct request *req)
{
struct request_queue *q = req->q;
unsigned long expiry;
if (!q->rq_timed_out_fn)
return;
BUG_ON(!list_empty(&req->timeout_list));
BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags));
if (req->timeout)
req->deadline = jiffies + req->timeout;
else {
req->deadline = jiffies + q->rq_timeout;
/*
* Some LLDs, like scsi, peek at the timeout to prevent
* a command from being retried forever.
*/
req->timeout = q->rq_timeout;
}
list_add_tail(&req->timeout_list, &q->timeout_list);
/*
* If the timer isn't already pending or this timeout is earlier
* than an existing one, modify the timer. Round to next nearest
* second.
*/
expiry = round_jiffies(req->deadline);
/*
* We use ->deadline == 0 to detect whether a timer was added or
* not, so just increase to next jiffy for that specific case
*/
if (unlikely(!req->deadline))
req->deadline = 1;
if (!timer_pending(&q->timeout) ||
time_before(expiry, q->timeout.expires))
mod_timer(&q->timeout, expiry);
}
......@@ -17,6 +17,30 @@ void __blk_queue_free_tags(struct request_queue *q);
void blk_unplug_work(struct work_struct *work);
void blk_unplug_timeout(unsigned long data);
void blk_rq_timed_out_timer(unsigned long data);
void blk_delete_timer(struct request *);
void blk_add_timer(struct request *);
/*
* Internal atomic flags for request handling
*/
enum rq_atomic_flags {
REQ_ATOM_COMPLETE = 0,
};
/*
* EH timer and IO completion will both attempt to 'grab' the request, make
* sure that only one of them suceeds
*/
static inline int blk_mark_rq_complete(struct request *rq)
{
return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
}
static inline void blk_clear_rq_complete(struct request *rq)
{
clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
}
struct io_context *current_io_context(gfp_t gfp_flags, int node);
......
......@@ -36,6 +36,8 @@
#include <linux/hash.h>
#include <linux/uaccess.h>
#include "blk.h"
static DEFINE_SPINLOCK(elv_list_lock);
static LIST_HEAD(elv_list);
......@@ -771,6 +773,12 @@ struct request *elv_next_request(struct request_queue *q)
*/
rq->cmd_flags |= REQ_STARTED;
blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
/*
* We are now handing the request to the hardware,
* add the timeout handler
*/
blk_add_timer(rq);
}
if (!q->boundary_rq || q->boundary_rq == rq) {
......
......@@ -33,6 +33,7 @@
*/
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
......@@ -457,29 +458,29 @@ static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
* RETURNS:
* EH_HANDLED or EH_NOT_HANDLED
*/
enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct ata_port *ap = ata_shost_to_port(host);
unsigned long flags;
struct ata_queued_cmd *qc;
enum scsi_eh_timer_return ret;
enum blk_eh_timer_return ret;
DPRINTK("ENTER\n");
if (ap->ops->error_handler) {
ret = EH_NOT_HANDLED;
ret = BLK_EH_NOT_HANDLED;
goto out;
}
ret = EH_HANDLED;
ret = BLK_EH_HANDLED;
spin_lock_irqsave(ap->lock, flags);
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc) {
WARN_ON(qc->scsicmd != cmd);
qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
qc->err_mask |= AC_ERR_TIMEOUT;
ret = EH_NOT_HANDLED;
ret = BLK_EH_NOT_HANDLED;
}
spin_unlock_irqrestore(ap->lock, flags);
......@@ -831,7 +832,7 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
* Note that ATA_QCFLAG_FAILED is unconditionally set after
* this function completes.
*/
scsi_req_abort_cmd(qc->scsicmd);
blk_abort_request(qc->scsicmd->request);
}
/**
......
......@@ -152,7 +152,7 @@ extern int ata_bus_probe(struct ata_port *ap);
/* libata-eh.c */
extern unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd);
extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd);
extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
extern enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
extern void ata_scsi_error(struct Scsi_Host *host);
extern void ata_port_wait_eh(struct ata_port *ap);
extern void ata_eh_fastdrain_timerfn(unsigned long arg);
......
......@@ -1139,7 +1139,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd
srbcmd->id = cpu_to_le32(scmd_id(cmd));
srbcmd->lun = cpu_to_le32(cmd->device->lun);
srbcmd->flags = cpu_to_le32(flag);
timeout = cmd->timeout_per_command/HZ;
timeout = cmd->request->timeout/HZ;
if (timeout == 0)
timeout = 1;
srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds
......
......@@ -464,7 +464,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
/* use request field to save the ptr. to completion struct. */
scp->request = (struct request *)&wait;
scp->timeout_per_command = timeout*HZ;
scp->cmd_len = 12;
scp->cmnd = cmnd;
cmndinfo.priority = IOCTL_PRI;
......@@ -1995,23 +1994,12 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar priority)
register Scsi_Cmnd *pscp;
register Scsi_Cmnd *nscp;
ulong flags;
unchar b, t;
TRACE(("gdth_putq() priority %d\n",priority));
spin_lock_irqsave(&ha->smp_lock, flags);
if (!cmndinfo->internal_command) {
if (!cmndinfo->internal_command)
cmndinfo->priority = priority;
b = scp->device->channel;
t = scp->device->id;
if (priority >= DEFAULT_PRI) {
if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
(b==ha->virt_bus && t<MAX_HDRIVES && ha->hdr[t].lock)) {
TRACE2(("gdth_putq(): locked IO ->update_timeout()\n"));
cmndinfo->timeout = gdth_update_timeout(scp, 0);
}
}
}
if (ha->req_first==NULL) {
ha->req_first = scp; /* queue was empty */
......@@ -3899,6 +3887,39 @@ static const char *gdth_info(struct Scsi_Host *shp)
return ((const char *)ha->binfo.type_string);
}
static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
{
gdth_ha_str *ha = shost_priv(scp->device->host);
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
unchar b, t;
ulong flags;
enum blk_eh_timer_return retval = BLK_EH_NOT_HANDLED;
TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
b = scp->device->channel;
t = scp->device->id;
/*
* We don't really honor the command timeout, but we try to
* honor 6 times of the actual command timeout! So reset the
* timer if this is less than 6th timeout on this command!
*/
if (++cmndinfo->timeout_count < 6)
retval = BLK_EH_RESET_TIMER;
/* Reset the timeout if it is locked IO */
spin_lock_irqsave(&ha->smp_lock, flags);
if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
(b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
TRACE2(("%s(): locked IO, reset timeout\n", __func__));
retval = BLK_EH_RESET_TIMER;
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
return retval;
}
static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
{
gdth_ha_str *ha = shost_priv(scp->device->host);
......@@ -3992,7 +4013,7 @@ static int gdth_queuecommand(struct scsi_cmnd *scp,
BUG_ON(!cmndinfo);
scp->scsi_done = done;
gdth_update_timeout(scp, scp->timeout_per_command * 6);
cmndinfo->timeout_count = 0;
cmndinfo->priority = DEFAULT_PRI;
return __gdth_queuecommand(ha, scp, cmndinfo);
......@@ -4096,12 +4117,10 @@ static int ioc_lockdrv(void __user *arg)
ha->hdr[j].lock = 1;
spin_unlock_irqrestore(&ha->smp_lock, flags);
gdth_wait_completion(ha, ha->bus_cnt, j);
gdth_stop_timeout(ha, ha->bus_cnt, j);
} else {
spin_lock_irqsave(&ha->smp_lock, flags);
ha->hdr[j].lock = 0;
spin_unlock_irqrestore(&ha->smp_lock, flags);
gdth_start_timeout(ha, ha->bus_cnt, j);
gdth_next(ha);
}
}
......@@ -4539,18 +4558,14 @@ static int gdth_ioctl(struct inode *inode, struct file *filep,
spin_lock_irqsave(&ha->smp_lock, flags);
ha->raw[i].lock = 1;
spin_unlock_irqrestore(&ha->smp_lock, flags);
for (j = 0; j < ha->tid_cnt; ++j) {
for (j = 0; j < ha->tid_cnt; ++j)
gdth_wait_completion(ha, i, j);
gdth_stop_timeout(ha, i, j);
}
} else {
spin_lock_irqsave(&ha->smp_lock, flags);
ha->raw[i].lock = 0;
spin_unlock_irqrestore(&ha->smp_lock, flags);
for (j = 0; j < ha->tid_cnt; ++j) {
gdth_start_timeout(ha, i, j);
for (j = 0; j < ha->tid_cnt; ++j)
gdth_next(ha);
}
}
}
break;
......@@ -4644,6 +4659,7 @@ static struct scsi_host_template gdth_template = {
.slave_configure = gdth_slave_configure,
.bios_param = gdth_bios_param,
.proc_info = gdth_proc_info,
.eh_timed_out = gdth_timed_out,
.proc_name = "gdth",
.can_queue = GDTH_MAXCMDS,
.this_id = -1,
......
......@@ -916,7 +916,7 @@ typedef struct {
gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
dma_addr_t sense_paddr; /* sense dma-addr */
unchar priority;
int timeout;
int timeout_count; /* # of timeout calls */
volatile int wait_for_completion;
ushort status;
ulong32 info;
......
......@@ -748,69 +748,3 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
}
static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id)
{
ulong flags;
Scsi_Cmnd *scp;
unchar b, t;
spin_lock_irqsave(&ha->smp_lock, flags);
for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
if (!cmndinfo->internal_command) {
b = scp->device->channel;
t = scp->device->id;
if (t == (unchar)id && b == (unchar)busnum) {
TRACE2(("gdth_stop_timeout(): update_timeout()\n"));
cmndinfo->timeout = gdth_update_timeout(scp, 0);
}
}
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
}
static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id)
{
ulong flags;
Scsi_Cmnd *scp;
unchar b, t;
spin_lock_irqsave(&ha->smp_lock, flags);
for (scp = ha->req_first; scp; scp = (Scsi_Cmnd *)scp->SCp.ptr) {
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
if (!cmndinfo->internal_command) {
b = scp->device->channel;
t = scp->device->id;
if (t == (unchar)id && b == (unchar)busnum) {
TRACE2(("gdth_start_timeout(): update_timeout()\n"));
gdth_update_timeout(scp, cmndinfo->timeout);
}
}
}
spin_unlock_irqrestore(&ha->smp_lock, flags);
}
static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout)
{
int oldto;
oldto = scp->timeout_per_command;
scp->timeout_per_command = timeout;
if (timeout == 0) {
del_timer(&scp->eh_timeout);
scp->eh_timeout.data = (unsigned long) NULL;
scp->eh_timeout.expires = 0;
} else {
if (scp->eh_timeout.data != (unsigned long) NULL)
del_timer(&scp->eh_timeout);
scp->eh_timeout.data = (unsigned long) scp;
scp->eh_timeout.expires = jiffies + timeout;
add_timer(&scp->eh_timeout);
}
return oldto;
}
......@@ -20,9 +20,6 @@ static char *gdth_ioctl_alloc(gdth_ha_str *ha, int size, int scratch,
ulong64 *paddr);
static void gdth_ioctl_free(gdth_ha_str *ha, int size, char *buf, ulong64 paddr);
static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
static void gdth_stop_timeout(gdth_ha_str *ha, int busnum, int id);
static void gdth_start_timeout(gdth_ha_str *ha, int busnum, int id);
static int gdth_update_timeout(Scsi_Cmnd *scp, int timeout);
#endif
......@@ -756,7 +756,7 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
init_event_struct(evt_struct,
handle_cmd_rsp,
VIOSRP_SRP_FORMAT,
cmnd->timeout_per_command/HZ);
cmnd->request->timeout/HZ);
evt_struct->cmnd = cmnd;
evt_struct->cmnd_done = done;
......
......@@ -612,7 +612,7 @@ static int idescsi_queue (struct scsi_cmnd *cmd,
pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
pc->scsi_cmd = cmd;
pc->done = done;
pc->timeout = jiffies + cmd->timeout_per_command;
pc->timeout = jiffies + cmd->request->timeout;
if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
......
......@@ -3670,7 +3670,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
sdev->no_uld_attach = 1;
}
if (ipr_is_vset_device(res)) {
sdev->timeout = IPR_VSET_RW_TIMEOUT;
blk_queue_rq_timeout(sdev->request_queue,
IPR_VSET_RW_TIMEOUT);
blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
}
if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
......
......@@ -3818,7 +3818,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
scb->cmd.dcdb.segment_4G = 0;
scb->cmd.dcdb.enhanced_sg = 0;
TimeOut = scb->scsi_cmd->timeout_per_command;
TimeOut = scb->scsi_cmd->request->timeout;
if (ha->subsys->param[4] & 0x00100000) { /* If NEW Tape DCDB is Supported */
if (!scb->sg_len) {
......
......@@ -1476,12 +1476,12 @@ static void iscsi_start_tx(struct iscsi_conn *conn)
scsi_queue_work(conn->session->host, &conn->xmitwork);
}
static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
{
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
struct iscsi_conn *conn;
enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
enum blk_eh_timer_return rc = BLK_EH_NOT_HANDLED;
cls_session = starget_to_session(scsi_target(scmd->device));
session = cls_session->dd_data;
......@@ -1494,14 +1494,14 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
* We are probably in the middle of iscsi recovery so let
* that complete and handle the error.
*/
rc = EH_RESET_TIMER;
rc = BLK_EH_RESET_TIMER;
goto done;
}
conn = session->leadconn;
if (!conn) {
/* In the middle of shuting down */
rc = EH_RESET_TIMER;
rc = BLK_EH_RESET_TIMER;
goto done;
}
......@@ -1513,20 +1513,21 @@ static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
*/
if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
(conn->ping_timeout * HZ), jiffies))
rc = EH_RESET_TIMER;
rc = BLK_EH_RESET_TIMER;
/*
* if we are about to check the transport then give the command
* more time
*/
if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
jiffies))
rc = EH_RESET_TIMER;
rc = BLK_EH_RESET_TIMER;
/* if in the middle of checking the transport then give us more time */
if (conn->ping_task)
rc = EH_RESET_TIMER;
rc = BLK_EH_RESET_TIMER;
done:
spin_unlock(&session->lock);
debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
debug_scsi("return %s\n", rc == BLK_EH_RESET_TIMER ?
"timer reset" : "nh");
return rc;
}
......
......@@ -398,7 +398,7 @@ void sas_ata_task_abort(struct sas_task *task)
/* Bounce SCSI-initiated commands to the SCSI EH */
if (qc->scsicmd) {
scsi_req_abort_cmd(qc->scsicmd);
blk_abort_request(qc->scsicmd->request);
scsi_schedule_eh(qc->scsicmd->device->host);
return;
}
......
......@@ -55,7 +55,7 @@ void sas_unregister_phys(struct sas_ha_struct *sas_ha);
int sas_register_ports(struct sas_ha_struct *sas_ha);
void sas_unregister_ports(struct sas_ha_struct *sas_ha);
enum scsi_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *);
int sas_init_queue(struct sas_ha_struct *sas_ha);
int sas_init_events(struct sas_ha_struct *sas_ha);
......