Commit 0ca9caae authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
  [S390] stacktrace bug.
  [S390] cio: remove casts from/to (void *).
  [S390] cio: Remove grace period for vary off chpid.
  [S390] cio: Use ccw_dev_id and subchannel_id in ccw_device_private
  [S390] monwriter kzalloc size.
  [S390] cio: add missing KERN_INFO printk header.
  [S390] irq change improvements.
parents 6bf1f75b 75e9de18
......@@ -109,7 +109,7 @@ static LIST_HEAD(appldata_ops_list);
*
* schedule work and reschedule timer
*/
static void appldata_timer_function(unsigned long data, struct pt_regs *regs)
static void appldata_timer_function(unsigned long data)
{
P_DEBUG(" -= Timer =-\n");
P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
......
......@@ -117,8 +117,8 @@ void do_extint(struct pt_regs *regs, unsigned short code)
int index;
struct pt_regs *old_regs;
irq_enter();
old_regs = set_irq_regs(regs);
irq_enter();
asm volatile ("mc 0,0");
if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
/**
......@@ -134,8 +134,8 @@ void do_extint(struct pt_regs *regs, unsigned short code)
p->handler(code);
}
}
set_irq_regs(old_regs);
irq_exit();
set_irq_regs(old_regs);
}
EXPORT_SYMBOL(register_external_interrupt);
......
......@@ -62,27 +62,26 @@ static inline unsigned long save_context_stack(struct stack_trace *trace,
void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
{
register unsigned long sp asm ("15");
unsigned long orig_sp;
unsigned long orig_sp, new_sp;
sp &= PSW_ADDR_INSN;
orig_sp = sp;
orig_sp = sp & PSW_ADDR_INSN;
sp = save_context_stack(trace, &trace->skip, sp,
new_sp = save_context_stack(trace, &trace->skip, orig_sp,
S390_lowcore.panic_stack - PAGE_SIZE,
S390_lowcore.panic_stack);
if ((sp != orig_sp) && !trace->all_contexts)
if ((new_sp != orig_sp) && !trace->all_contexts)
return;
sp = save_context_stack(trace, &trace->skip, sp,
new_sp = save_context_stack(trace, &trace->skip, new_sp,
S390_lowcore.async_stack - ASYNC_SIZE,
S390_lowcore.async_stack);
if ((sp != orig_sp) && !trace->all_contexts)
if ((new_sp != orig_sp) && !trace->all_contexts)
return;
if (task)
save_context_stack(trace, &trace->skip, sp,
save_context_stack(trace, &trace->skip, new_sp,
(unsigned long) task_stack_page(task),
(unsigned long) task_stack_page(task) + THREAD_SIZE);
else
save_context_stack(trace, &trace->skip, sp,
save_context_stack(trace, &trace->skip, new_sp,
S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
return;
......
......@@ -209,11 +209,11 @@ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
* Do the callback functions of expired vtimer events.
* Called from within the interrupt handler.
*/
static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs)
static void do_callbacks(struct list_head *cb_list)
{
struct vtimer_queue *vt_list;
struct vtimer_list *event, *tmp;
void (*fn)(unsigned long, struct pt_regs*);
void (*fn)(unsigned long);
unsigned long data;
if (list_empty(cb_list))
......@@ -224,7 +224,7 @@ static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs)
list_for_each_entry_safe(event, tmp, cb_list, entry) {
fn = event->function;
data = event->data;
fn(data, regs);
fn(data);
if (!event->interval)
/* delete one shot timer */
......@@ -275,7 +275,7 @@ static void do_cpu_timer_interrupt(__u16 error_code)
list_move_tail(&event->entry, &cb_list);
}
spin_unlock(&vt_list->lock);
do_callbacks(&cb_list, get_irq_regs());
do_callbacks(&cb_list);
/* next event is first in list */
spin_lock(&vt_list->lock);
......
......@@ -110,7 +110,7 @@ static int monwrite_new_hdr(struct mon_private *monpriv)
monbuf = kzalloc(sizeof(struct mon_buf), GFP_KERNEL);
if (!monbuf)
return -ENOMEM;
monbuf->data = kzalloc(monbuf->hdr.datalen,
monbuf->data = kzalloc(monhdr->datalen,
GFP_KERNEL | GFP_DMA);
if (!monbuf->data) {
kfree(monbuf);
......
......@@ -370,7 +370,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
struct res_acc_data *res_data;
struct subchannel *sch;
res_data = (struct res_acc_data *)data;
res_data = data;
sch = get_subchannel_by_schid(schid);
if (!sch)
/* Check if a subchannel is newly available. */
......@@ -444,7 +444,7 @@ __get_chpid_from_lir(void *data)
u32 isinfo[28];
} *lir;
lir = (struct lir*) data;
lir = data;
if (!(lir->iq&0x80))
/* NULL link incident record */
return -EINVAL;
......@@ -628,7 +628,7 @@ __chp_add(struct subchannel_id schid, void *data)
struct channel_path *chp;
struct subchannel *sch;
chp = (struct channel_path *)data;
chp = data;
sch = get_subchannel_by_schid(schid);
if (!sch)
/* Check if the subchannel is now available. */
......@@ -707,8 +707,7 @@ chp_process_crw(int chpid, int on)
return chp_add(chpid);
}
static inline int
__check_for_io_and_kill(struct subchannel *sch, int index)
static inline int check_for_io_on_path(struct subchannel *sch, int index)
{
int cc;
......@@ -718,10 +717,8 @@ __check_for_io_and_kill(struct subchannel *sch, int index)
cc = stsch(sch->schid, &sch->schib);
if (cc)
return 0;
if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) {
device_set_waiting(sch);
if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
return 1;
}
return 0;
}
......@@ -750,12 +747,10 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
} else {
sch->opm &= ~(0x80 >> chp);
sch->lpm &= ~(0x80 >> chp);
/*
* Give running I/O a grace period in which it
* can successfully terminate, even using the
* just varied off path. Then kill it.
*/
if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) {
if (check_for_io_on_path(sch, chp))
/* Path verification is done after killing. */
device_kill_io(sch);
else if (!sch->lpm) {
if (css_enqueue_subchannel_slow(sch->schid)) {
css_clear_subchannel_slow_list();
need_rescan = 1;
......
......@@ -609,8 +609,8 @@ do_IRQ (struct pt_regs *regs)
struct irb *irb;
struct pt_regs *old_regs;
irq_enter ();
old_regs = set_irq_regs(regs);
irq_enter();
asm volatile ("mc 0,0");
if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
/**
......@@ -655,8 +655,8 @@ do_IRQ (struct pt_regs *regs)
* out of the sie which costs more cycles than it saves.
*/
} while (!MACHINE_IS_VM && tpi (NULL) != 0);
irq_exit();
set_irq_regs(old_regs);
irq_exit ();
}
#ifdef CONFIG_CCW_CONSOLE
......
......@@ -177,7 +177,7 @@ get_subchannel_by_schid(struct subchannel_id schid)
struct device *dev;
dev = bus_find_device(&css_bus_type, NULL,
(void *)&schid, check_subchannel);
&schid, check_subchannel);
return dev ? to_subchannel(dev) : NULL;
}
......
......@@ -76,9 +76,8 @@ struct ccw_device_private {
int state; /* device state */
atomic_t onoff;
unsigned long registered;
__u16 devno; /* device number */
__u16 sch_no; /* subchannel number */
__u8 ssid; /* subchannel set id */
struct ccw_dev_id dev_id; /* device id */
struct subchannel_id schid; /* subchannel number */
__u8 imask; /* lpm mask for SNID/SID/SPGID */
int iretry; /* retry counter SNID/SID/SPGID */
struct {
......@@ -171,7 +170,7 @@ void device_trigger_reprobe(struct subchannel *);
/* Helper functions for vary on/off. */
int device_is_online(struct subchannel *);
void device_set_waiting(struct subchannel *);
void device_kill_io(struct subchannel *);
/* Machine check helper function. */
void device_kill_pending_timer(struct subchannel *);
......
......@@ -552,21 +552,19 @@ ccw_device_register(struct ccw_device *cdev)
}
struct match_data {
unsigned int devno;
unsigned int ssid;
struct ccw_dev_id dev_id;
struct ccw_device * sibling;
};
static int
match_devno(struct device * dev, void * data)
{
struct match_data * d = (struct match_data *)data;
struct match_data * d = data;
struct ccw_device * cdev;
cdev = to_ccwdev(dev);
if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
(cdev->private->devno == d->devno) &&
(cdev->private->ssid == d->ssid) &&
ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
(cdev != d->sibling)) {
cdev->private->state = DEV_STATE_NOT_OPER;
return 1;
......@@ -574,15 +572,13 @@ match_devno(struct device * dev, void * data)
return 0;
}
static struct ccw_device *
get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid,
struct ccw_device *sibling)
static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
struct ccw_device *sibling)
{
struct device *dev;
struct match_data data;
data.devno = devno;
data.ssid = ssid;
data.dev_id = *dev_id;
data.sibling = sibling;
dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
......@@ -595,7 +591,7 @@ ccw_device_add_changed(void *data)
struct ccw_device *cdev;
cdev = (struct ccw_device *)data;
cdev = data;
if (device_add(&cdev->dev)) {
put_device(&cdev->dev);
return;
......@@ -616,9 +612,9 @@ ccw_device_do_unreg_rereg(void *data)
struct subchannel *sch;
int need_rename;
cdev = (struct ccw_device *)data;
cdev = data;
sch = to_subchannel(cdev->dev.parent);
if (cdev->private->devno != sch->schib.pmcw.dev) {
if (cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
/*
* The device number has changed. This is usually only when
* a device has been detached under VM and then re-appeared
......@@ -633,10 +629,12 @@ ccw_device_do_unreg_rereg(void *data)
* get possibly sick...
*/
struct ccw_device *other_cdev;
struct ccw_dev_id dev_id;
need_rename = 1;
other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev,
sch->schid.ssid, cdev);
dev_id.devno = sch->schib.pmcw.dev;
dev_id.ssid = sch->schid.ssid;
other_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
if (other_cdev) {
struct subchannel *other_sch;
......@@ -652,7 +650,7 @@ ccw_device_do_unreg_rereg(void *data)
}
/* Update ssd info here. */
css_get_ssd_info(sch);
cdev->private->devno = sch->schib.pmcw.dev;
cdev->private->dev_id.devno = sch->schib.pmcw.dev;
} else
need_rename = 0;
device_remove_files(&cdev->dev);
......@@ -662,7 +660,7 @@ ccw_device_do_unreg_rereg(void *data)
snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
sch->schid.ssid, sch->schib.pmcw.dev);
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_add_changed, (void *)cdev);
ccw_device_add_changed, cdev);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
......@@ -687,7 +685,7 @@ io_subchannel_register(void *data)
int ret;
unsigned long flags;
cdev = (struct ccw_device *) data;
cdev = data;
sch = to_subchannel(cdev->dev.parent);
if (klist_node_attached(&cdev->dev.knode_parent)) {
......@@ -759,7 +757,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
break;
sch = to_subchannel(cdev->dev.parent);
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister, (void *) cdev);
ccw_device_call_sch_unregister, cdev);
queue_work(slow_path_wq, &cdev->private->kick_work);
if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq);
......@@ -774,7 +772,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
if (!get_device(&cdev->dev))
break;
PREPARE_WORK(&cdev->private->kick_work,
io_subchannel_register, (void *) cdev);
io_subchannel_register, cdev);
queue_work(slow_path_wq, &cdev->private->kick_work);
break;
}
......@@ -792,9 +790,9 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
/* Init private data. */
priv = cdev->private;
priv->devno = sch->schib.pmcw.dev;
priv->ssid = sch->schid.ssid;
priv->sch_no = sch->schid.sch_no;
priv->dev_id.devno = sch->schib.pmcw.dev;
priv->dev_id.ssid = sch->schid.ssid;
priv->schid = sch->schid;
priv->state = DEV_STATE_NOT_OPER;
INIT_LIST_HEAD(&priv->cmb_list);
init_waitqueue_head(&priv->wait_q);
......@@ -912,7 +910,7 @@ io_subchannel_remove (struct subchannel *sch)
*/
if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_unregister, (void *) cdev);
ccw_device_unregister, cdev);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
return 0;
......@@ -1055,7 +1053,7 @@ __ccwdev_check_busid(struct device *dev, void *id)
{
char *bus_id;
bus_id = (char *)id;
bus_id = id;
return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0);
}
......
......@@ -21,7 +21,6 @@ enum dev_state {
/* states to wait for i/o completion before doing something */
DEV_STATE_CLEAR_VERIFY,
DEV_STATE_TIMEOUT_KILL,
DEV_STATE_WAIT4IO,
DEV_STATE_QUIESCE,
/* special states for devices gone not operational */
DEV_STATE_DISCONNECTED,
......
......@@ -59,18 +59,6 @@ device_set_disconnected(struct subchannel *sch)
cdev->private->state = DEV_STATE_DISCONNECTED;
}
void
device_set_waiting(struct subchannel *sch)
{
struct ccw_device *cdev;
if (!sch->dev.driver_data)
return;
cdev = sch->dev.driver_data;
ccw_device_set_timeout(cdev, 10*HZ);
cdev->private->state = DEV_STATE_WAIT4IO;
}
/*
* Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
*/
......@@ -183,9 +171,9 @@ ccw_device_handle_oper(struct ccw_device *cdev)
cdev->id.cu_model != cdev->private->senseid.cu_model ||
cdev->id.dev_type != cdev->private->senseid.dev_type ||
cdev->id.dev_model != cdev->private->senseid.dev_model ||
cdev->private->devno != sch->schib.pmcw.dev) {
cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_do_unreg_rereg, (void *)cdev);
ccw_device_do_unreg_rereg, cdev);
queue_work(ccw_device_work, &cdev->private->kick_work);
return 0;
}
......@@ -255,7 +243,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
case DEV_STATE_NOT_OPER:
CIO_DEBUG(KERN_WARNING, 2,
"SenseID : unknown device %04x on subchannel "
"0.%x.%04x\n", cdev->private->devno,
"0.%x.%04x\n", cdev->private->dev_id.devno,
sch->schid.ssid, sch->schid.sch_no);
break;
case DEV_STATE_OFFLINE:
......@@ -282,14 +270,15 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: "
"CU Type/Mod = %04X/%02X, Dev Type/Mod = "
"%04X/%02X\n",
cdev->private->ssid, cdev->private->devno,
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno,
cdev->id.cu_type, cdev->id.cu_model,
cdev->id.dev_type, cdev->id.dev_model);
break;
case DEV_STATE_BOXED:
CIO_DEBUG(KERN_WARNING, 2,
"SenseID : boxed device %04x on subchannel "
"0.%x.%04x\n", cdev->private->devno,
"0.%x.%04x\n", cdev->private->dev_id.devno,
sch->schid.ssid, sch->schid.sch_no);
break;
}
......@@ -325,13 +314,13 @@ ccw_device_oper_notify(void *data)
struct subchannel *sch;
int ret;
cdev = (struct ccw_device *)data;
cdev = data;
sch = to_subchannel(cdev->dev.parent);
ret = (sch->driver && sch->driver->notify) ?
sch->driver->notify(&sch->dev, CIO_OPER) : 0;
if (!ret)
/* Driver doesn't want device back. */
ccw_device_do_unreg_rereg((void *)cdev);
ccw_device_do_unreg_rereg(cdev);
else {
/* Reenable channel measurements, if needed. */
cmf_reenable(cdev);
......@@ -363,12 +352,12 @@ ccw_device_done(struct ccw_device *cdev, int state)
if (state == DEV_STATE_BOXED)
CIO_DEBUG(KERN_WARNING, 2,
"Boxed device %04x on subchannel %04x\n",
cdev->private->devno, sch->schid.sch_no);
cdev->private->dev_id.devno, sch->schid.sch_no);
if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0;
PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
(void *)cdev);
cdev);
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
}
wake_up(&cdev->private->wait_q);
......@@ -412,7 +401,8 @@ static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
/* PGID mismatch, can't pathgroup. */
CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
"0.%x.%04x, can't pathgroup\n",
cdev->private->ssid, cdev->private->devno);
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
cdev->private->options.pgroup = 0;
return;
}
......@@ -523,7 +513,7 @@ ccw_device_nopath_notify(void *data)
struct subchannel *sch;
int ret;
cdev = (struct ccw_device *)data;
cdev = data;
sch = to_subchannel(cdev->dev.parent);
/* Extra sanity. */
if (sch->lpm)
......@@ -537,7 +527,7 @@ ccw_device_nopath_notify(void *data)
if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister,
(void *)cdev);
cdev);
queue_work(ccw_device_work,
&cdev->private->kick_work);
} else
......@@ -592,7 +582,7 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
break;
default:
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, (void *)cdev);
ccw_device_nopath_notify, cdev);
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
......@@ -723,7 +713,7 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent);
if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister, (void *)cdev);
ccw_device_call_sch_unregister, cdev);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
wake_up(&cdev->private->wait_q);
......@@ -754,7 +744,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
}
if (get_device(&cdev->dev)) {
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_call_sch_unregister, (void *)cdev);
ccw_device_call_sch_unregister, cdev);
queue_work(ccw_device_work, &cdev->private->kick_work);
}
wake_up(&cdev->private->wait_q);
......@@ -859,7 +849,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent);
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, (void *)cdev);
ccw_device_nopath_notify, cdev);
queue_work(ccw_device_notify_work,
&cdev->private->kick_work);
} else
......@@ -885,7 +875,8 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
/* Basic sense hasn't started. Try again. */
ccw_device_do_sense(cdev, irb);
else {
printk("Huh? %s(%s): unsolicited interrupt...\n",
printk(KERN_INFO "Huh? %s(%s): unsolicited "
"interrupt...\n",
__FUNCTION__, cdev->dev.bus_id);
if (cdev->handler)
cdev->handler (cdev, 0, irb);
......@@ -944,10 +935,10 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
cdev->private->state = DEV_STATE_ONLINE;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-ETIMEDOUT));
ERR_PTR(-EIO));
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, (void *)cdev);
ccw_device_nopath_notify, cdev);
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
} else if (cdev->private->flags.doverify)
/* Start delayed path verification. */
......@@ -970,7 +961,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
sch = to_subchannel(cdev->dev.parent);
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, (void *)cdev);
ccw_device_nopath_notify, cdev);
queue_work(ccw_device_notify_work,
&cdev->private->kick_work);
} else
......@@ -981,51 +972,15 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
cdev->private->state = DEV_STATE_ONLINE;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-ETIMEDOUT));
}
static void
ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct irb *irb;
struct subchannel *sch;
irb = (struct irb *) __LC_IRB;
/*
* Accumulate status and find out if a basic sense is needed.
* This is fine since we have already adapted the lpm.
*/
ccw_device_accumulate_irb(cdev, irb);
if (cdev->private->flags.dosense) {
if (ccw_device_do_sense(cdev, irb) == 0) {
cdev->private->state = DEV_STATE_W4SENSE;
}
return;
}
/* Iff device is idle, reset timeout. */
sch = to_subchannel(cdev->dev.parent);
if (!stsch(sch->schid, &sch->schib))
if (sch->schib.scsw.actl == 0)
ccw_device_set_timeout(cdev, 0);
/* Call the handler. */
ccw_device_call_handler(cdev);
if (!sch->lpm) {
PREPARE_WORK(&cdev->private->kick_work,
ccw_device_nopath_notify, (void *)cdev);
queue_work(ccw_device_notify_work, &cdev->private->kick_work);
} else if (cdev->private->flags.doverify)
ccw_device_online_verify(cdev, 0);
ERR_PTR(-EIO));
}
static void
ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
void device_kill_io(struct subchannel *sch)
{
int ret;
struct subchannel *sch;
struct ccw_device *cdev;