Commit 39ce941e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
  [S390] dcss: Initialize workqueue before using it.
  [S390] Remove BUILD_BUG_ON() in vmem code.
  [S390] sclp_tty/sclp_vt220: Fix scheduling while atomic
  [S390] dasd: fix panic caused by alias device offline
  [S390] dasd: add ifcc handling
  [S390] latencytop s390 support.
  [S390] Implement ext2_find_next_bit.
  [S390] Cleanup & optimize bitops.
  [S390] Define GENERIC_LOCKBREAK.
  [S390] console: allow vt220 console to be the only console
  [S390] Fix couple of section mismatches.
  [S390] Fix smp_call_function_mask semantics.
  [S390] Fix linker script.
  [S390] DEBUG_PAGEALLOC support for s390.
  [S390] cio: Add shutdown callback for ccwgroup.
  [S390] cio: Update documentation.
  [S390] cio: Clean up chsc response code handling.
  [S390] cio: make sense id procedure work with partial hardware response
parents 3d412f60 c5411dba
......@@ -59,7 +59,7 @@
<title>Introduction</title>
<para>
This document describes the interfaces available for device drivers that
drive s390 based channel attached devices. This includes interfaces for
drive s390 based channel attached I/O devices. This includes interfaces for
interaction with the hardware and interfaces for interacting with the
common driver core. Those interfaces are provided by the s390 common I/O
layer.
......@@ -86,9 +86,10 @@
The ccw bus typically contains the majority of devices available to
a s390 system. Named after the channel command word (ccw), the basic
command structure used to address its devices, the ccw bus contains
so-called channel attached devices. They are addressed via subchannels,
visible on the css bus. A device driver, however, will never interact
with the subchannel directly, but only via the device on the ccw bus,
so-called channel attached devices. They are addressed via I/O
subchannels, visible on the css bus. A device driver for
channel-attached devices, however, will never interact with the
subchannel directly, but only via the I/O device on the ccw bus,
the ccw device.
</para>
<sect1 id="channelIO">
......@@ -116,7 +117,6 @@
!Iinclude/asm-s390/ccwdev.h
!Edrivers/s390/cio/device.c
!Edrivers/s390/cio/device_ops.c
!Edrivers/s390/cio/airq.c
</sect1>
<sect1 id="cmf">
<title>The channel-measurement facility</title>
......@@ -147,4 +147,15 @@
</sect1>
</chapter>
<chapter id="genericinterfaces">
<title>Generic interfaces</title>
<para>
Some interfaces are available to other drivers that do not necessarily
have anything to do with the busses described above, but still are
indirectly using basic infrastructure in the common I/O layer.
One example is the support for adapter interrupts.
</para>
!Edrivers/s390/cio/airq.c
</chapter>
</book>
......@@ -16,6 +16,9 @@ config LOCKDEP_SUPPORT
config STACKTRACE_SUPPORT
def_bool y
config HAVE_LATENCYTOP_SUPPORT
def_bool y
config RWSEM_GENERIC_SPINLOCK
bool
......@@ -47,6 +50,11 @@ config NO_IOMEM
config NO_DMA
def_bool y
config GENERIC_LOCKBREAK
bool
default y
depends on SMP && PREEMPT
mainmenu "Linux Kernel Configuration"
config S390
......
......@@ -6,4 +6,12 @@ config TRACE_IRQFLAGS_SUPPORT
source "lib/Kconfig.debug"
config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
depends on DEBUG_KERNEL
help
Unmap pages from the kernel linear mapping after free_pages().
This results in a slowdown, but helps to find certain types of
memory corruptions.
endmenu
......@@ -11,6 +11,7 @@
#include <linux/sys.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/cache.h>
#include <asm/lowcore.h>
#include <asm/errno.h>
......@@ -830,9 +831,7 @@ mcck_return:
* Restart interruption handler, kick starter for additional CPUs
*/
#ifdef CONFIG_SMP
#ifndef CONFIG_HOTPLUG_CPU
.section .init.text,"ax"
#endif
__CPUINIT
.globl restart_int_handler
restart_int_handler:
l %r15,__LC_SAVE_AREA+60 # load ksp
......@@ -845,9 +844,7 @@ restart_int_handler:
br %r14 # branch to start_secondary
restart_addr:
.long start_secondary
#ifndef CONFIG_HOTPLUG_CPU
.previous
#endif
#else
/*
* If we do not run with SMP enabled, let the new CPU crash ...
......
......@@ -11,6 +11,7 @@
#include <linux/sys.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/cache.h>
#include <asm/lowcore.h>
#include <asm/errno.h>
......@@ -801,9 +802,7 @@ mcck_return:
* Restart interruption handler, kick starter for additional CPUs
*/
#ifdef CONFIG_SMP
#ifndef CONFIG_HOTPLUG_CPU
.section .init.text,"ax"
#endif
__CPUINIT
.globl restart_int_handler
restart_int_handler:
lg %r15,__LC_SAVE_AREA+120 # load ksp
......@@ -814,9 +813,7 @@ restart_int_handler:
lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone
stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on
jg start_secondary
#ifndef CONFIG_HOTPLUG_CPU
.previous
#endif
#else
/*
* If we do not run with SMP enabled, let the new CPU crash ...
......
......@@ -439,7 +439,7 @@ static void ipl_run(struct shutdown_trigger *trigger)
reipl_ccw_dev(&ipl_info.data.ccw.dev_id);
}
static int ipl_init(void)
static int __init ipl_init(void)
{
int rc;
......@@ -471,8 +471,11 @@ out:
return 0;
}
static struct shutdown_action ipl_action = {SHUTDOWN_ACTION_IPL_STR, ipl_run,
ipl_init};
static struct shutdown_action __refdata ipl_action = {
.name = SHUTDOWN_ACTION_IPL_STR,
.fn = ipl_run,
.init = ipl_init,
};
/*
* reipl shutdown action: Reboot Linux on shutdown.
......@@ -792,7 +795,7 @@ static int __init reipl_fcp_init(void)
return 0;
}
static int reipl_init(void)
static int __init reipl_init(void)
{
int rc;
......@@ -819,8 +822,11 @@ static int reipl_init(void)
return 0;
}
static struct shutdown_action reipl_action = {SHUTDOWN_ACTION_REIPL_STR,
reipl_run, reipl_init};
static struct shutdown_action __refdata reipl_action = {
.name = SHUTDOWN_ACTION_REIPL_STR,
.fn = reipl_run,
.init = reipl_init,
};
/*
* dump shutdown action: Dump Linux on shutdown.
......@@ -998,7 +1004,7 @@ static int __init dump_fcp_init(void)
return 0;
}
static int dump_init(void)
static int __init dump_init(void)
{
int rc;
......@@ -1020,8 +1026,11 @@ static int dump_init(void)
return 0;
}
static struct shutdown_action dump_action = {SHUTDOWN_ACTION_DUMP_STR,
dump_run, dump_init};
static struct shutdown_action __refdata dump_action = {
.name = SHUTDOWN_ACTION_DUMP_STR,
.fn = dump_run,
.init = dump_init,
};
/*
* vmcmd shutdown action: Trigger vm command on shutdown.
......
......@@ -77,7 +77,7 @@ unsigned long machine_flags = 0;
unsigned long elf_hwcap = 0;
char elf_platform[ELF_PLATFORM_SIZE];
struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
struct mem_chunk __meminitdata memory_chunk[MEMORY_CHUNKS];
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
static unsigned long __initdata memory_end;
......@@ -145,7 +145,7 @@ __setup("condev=", condev_setup);
static int __init conmode_setup(char *str)
{
#if defined(CONFIG_SCLP_CONSOLE)
#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
SET_CONSOLE_SCLP;
#endif
......@@ -183,7 +183,7 @@ static void __init conmode_default(void)
*/
cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
if (ptr == NULL) {
#if defined(CONFIG_SCLP_CONSOLE)
#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
return;
......@@ -193,7 +193,7 @@ static void __init conmode_default(void)
SET_CONSOLE_3270;
#elif defined(CONFIG_TN3215_CONSOLE)
SET_CONSOLE_3215;
#elif defined(CONFIG_SCLP_CONSOLE)
#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
} else if (strncmp(ptr + 8, "3215", 4) == 0) {
......@@ -201,7 +201,7 @@ static void __init conmode_default(void)
SET_CONSOLE_3215;
#elif defined(CONFIG_TN3270_CONSOLE)
SET_CONSOLE_3270;
#elif defined(CONFIG_SCLP_CONSOLE)
#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
}
......@@ -212,7 +212,7 @@ static void __init conmode_default(void)
SET_CONSOLE_3270;
#endif
} else {
#if defined(CONFIG_SCLP_CONSOLE)
#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
SET_CONSOLE_SCLP;
#endif
}
......@@ -528,7 +528,7 @@ static void __init setup_memory_end(void)
memory_size = 0;
memory_end &= PAGE_MASK;
max_mem = memory_end ? min(VMALLOC_START, memory_end) : VMALLOC_START;
max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS;
memory_end = min(max_mem, memory_end);
/*
......
......@@ -225,12 +225,11 @@ EXPORT_SYMBOL(smp_call_function_single);
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int
smp_call_function_mask(cpumask_t mask,
void (*func)(void *), void *info,
int wait)
int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
int wait)
{
preempt_disable();
cpu_clear(smp_processor_id(), mask);
__smp_call_function_map(func, info, 0, wait, mask);
preempt_enable();
return 0;
......@@ -1008,7 +1007,7 @@ static struct notifier_block __cpuinitdata smp_cpu_nb = {
.notifier_call = smp_cpu_notify,
};
static int smp_add_present_cpu(int cpu)
static int __devinit smp_add_present_cpu(int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
struct sys_device *s = &c->sysdev;
......@@ -1036,8 +1035,8 @@ out:
}
#ifdef CONFIG_HOTPLUG_CPU
static ssize_t rescan_store(struct sys_device *dev, const char *buf,
size_t count)
static ssize_t __ref rescan_store(struct sys_device *dev,
const char *buf, size_t count)
{
cpumask_t newcpus;
int cpu;
......
......@@ -14,7 +14,8 @@
static unsigned long save_context_stack(struct stack_trace *trace,
unsigned long sp,
unsigned long low,
unsigned long high)
unsigned long high,
int savesched)
{
struct stack_frame *sf;
struct pt_regs *regs;
......@@ -47,10 +48,12 @@ static unsigned long save_context_stack(struct stack_trace *trace,
return sp;
regs = (struct pt_regs *)sp;
addr = regs->psw.addr & PSW_ADDR_INSN;
if (!trace->skip)
trace->entries[trace->nr_entries++] = addr;
else
trace->skip--;
if (savesched || !in_sched_functions(addr)) {
if (!trace->skip)
trace->entries[trace->nr_entries++] = addr;
else
trace->skip--;
}
if (trace->nr_entries >= trace->max_entries)
return sp;
low = sp;
......@@ -66,15 +69,27 @@ void save_stack_trace(struct stack_trace *trace)
orig_sp = sp & PSW_ADDR_INSN;
new_sp = save_context_stack(trace, orig_sp,
S390_lowcore.panic_stack - PAGE_SIZE,
S390_lowcore.panic_stack);
S390_lowcore.panic_stack, 1);
if (new_sp != orig_sp)
return;
new_sp = save_context_stack(trace, new_sp,
S390_lowcore.async_stack - ASYNC_SIZE,
S390_lowcore.async_stack);
S390_lowcore.async_stack, 1);
if (new_sp != orig_sp)
return;
save_context_stack(trace, new_sp,
S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
S390_lowcore.thread_info + THREAD_SIZE, 1);
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
unsigned long sp, low, high;
sp = tsk->thread.ksp & PSW_ADDR_INSN;
low = (unsigned long) task_stack_page(tsk);
high = (unsigned long) task_pt_regs(tsk);
save_context_stack(trace, sp, low, high, 0);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
......@@ -271,7 +271,10 @@ void die(const char * str, struct pt_regs * regs, long err)
printk("PREEMPT ");
#endif
#ifdef CONFIG_SMP
printk("SMP");
printk("SMP ");
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC
printk("DEBUG_PAGEALLOC");
#endif
printk("\n");
notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
......
......@@ -35,7 +35,7 @@ SECTIONS
KPROBES_TEXT
*(.fixup)
*(.gnu.warning)
} = 0x0700
} :text = 0x0700
_etext = .; /* End of text section */
......
......@@ -167,6 +167,33 @@ void __init mem_init(void)
PFN_ALIGN((unsigned long)&_eshared) - 1);
}
#ifdef CONFIG_DEBUG_PAGEALLOC
void kernel_map_pages(struct page *page, int numpages, int enable)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long address;
int i;
for (i = 0; i < numpages; i++) {
address = page_to_phys(page + i);
pgd = pgd_offset_k(address);
pud = pud_offset(pgd, address);
pmd = pmd_offset(pud, address);
pte = pte_offset_kernel(pmd, address);
if (!enable) {
ptep_invalidate(address, pte);
continue;
}
*pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW));
/* Flush cpu write queue. */
mb();
}
}
#endif
void free_initmem(void)
{
unsigned long addr;
......
......@@ -62,7 +62,7 @@ void __meminit memmap_init(unsigned long size, int nid, unsigned long zone,
}
}
static void __init_refok *vmem_alloc_pages(unsigned int order)
static void __ref *vmem_alloc_pages(unsigned int order)
{
if (slab_is_available())
return (void *)__get_free_pages(GFP_KERNEL, order);
......@@ -250,7 +250,7 @@ static int insert_memory_segment(struct memory_segment *seg)
{
struct memory_segment *tmp;
if (seg->start + seg->size >= VMALLOC_START ||
if (seg->start + seg->size >= VMEM_MAX_PHYS ||
seg->start + seg->size < seg->start)
return -ERANGE;
......@@ -360,7 +360,6 @@ void __init vmem_map_init(void)
{
int i;
BUILD_BUG_ON((unsigned long)VMEM_MAP + VMEM_MAP_SIZE > VMEM_MAP_MAX);
NODE_DATA(0)->node_mem_map = VMEM_MAP;
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
......
......@@ -1057,12 +1057,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
if (device->features & DASD_FEATURE_ERPLOG) {
dasd_log_sense(cqr, irb);
}
/* If we have no sense data, or we just don't want complex ERP
* for this request, but if we have retries left, then just
* reset this request and retry it in the fastpath
/*
* If we don't want complex ERP for this request, then just
* reset this and retry it in the fastpath
*/
if (!(cqr->irb.esw.esw0.erw.cons &&
test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) &&
if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
cqr->retries > 0) {
DEV_MESSAGE(KERN_DEBUG, device,
"default ERP in fastpath (%i retries left)",
......@@ -1707,7 +1706,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
req = (struct request *) cqr->callback_data;
dasd_profile_end(cqr->block, cqr, req);
status = cqr->memdev->discipline->free_cp(cqr, req);
status = cqr->block->base->discipline->free_cp(cqr, req);
if (status <= 0)
error = status ? status : -EIO;
dasd_end_request(req, error);
......@@ -1742,12 +1741,8 @@ restart:
/* Process requests that may be recovered */
if (cqr->status == DASD_CQR_NEED_ERP) {
if (cqr->irb.esw.esw0.erw.cons &&
test_bit(DASD_CQR_FLAGS_USE_ERP,
&cqr->flags)) {
erp_fn = base->discipline->erp_action(cqr);
erp_fn(cqr);
}
erp_fn = base->discipline->erp_action(cqr);
erp_fn(cqr);
goto restart;
}
......
......@@ -164,7 +164,7 @@ dasd_3990_erp_alternate_path(struct dasd_ccw_req * erp)
/* reset status to submit the request again... */
erp->status = DASD_CQR_FILLED;
erp->retries = 1;
erp->retries = 10;
} else {
DEV_MESSAGE(KERN_ERR, device,
"No alternate channel path left (lpum=%x / "
......@@ -301,8 +301,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
erp->function = dasd_3990_erp_action_4;
} else {
if (sense[25] == 0x1D) { /* state change pending */
if (sense && (sense[25] == 0x1D)) { /* state change pending */
DEV_MESSAGE(KERN_INFO, device,
"waiting for state change pending "
......@@ -311,7 +310,7 @@ dasd_3990_erp_action_4(struct dasd_ccw_req * erp, char *sense)
dasd_3990_erp_block_queue(erp, 30*HZ);
} else if (sense[25] == 0x1E) { /* busy */
} else if (sense && (sense[25] == 0x1E)) { /* busy */
DEV_MESSAGE(KERN_INFO, device,
"busy - redriving request later, "
"%d retries left",
......@@ -2119,6 +2118,34 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
*****************************************************************************
*/
/*
* DASD_3990_ERP_CONTROL_CHECK
*
* DESCRIPTION
* Does a generic inspection if a control check occured and sets up
* the related error recovery procedure
*
* PARAMETER
* erp pointer to the currently created default ERP
*
* RETURN VALUES
* erp_filled pointer to the erp
*/
static struct dasd_ccw_req *
dasd_3990_erp_control_check(struct dasd_ccw_req *erp)
{
struct dasd_device *device = erp->startdev;
if (erp->refers->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK
| SCHN_STAT_CHN_CTRL_CHK)) {
DEV_MESSAGE(KERN_DEBUG, device, "%s",
"channel or interface control check");
erp = dasd_3990_erp_action_4(erp, NULL);
}
return erp;
}
/*
* DASD_3990_ERP_INSPECT
*
......@@ -2145,8 +2172,11 @@ dasd_3990_erp_inspect(struct dasd_ccw_req * erp)
if (erp_new)
return erp_new;
/* check if no concurrent sens is available */
if (!erp->refers->irb.esw.esw0.erw.cons)
erp_new = dasd_3990_erp_control_check(erp);
/* distinguish between 24 and 32 byte sense data */
if (sense[27] & DASD_SENSE_BIT_0) {
else if (sense[27] & DASD_SENSE_BIT_0) {
/* inspect the 24 byte sense data */
erp_new = dasd_3990_erp_inspect_24(erp, sense);
......@@ -2285,6 +2315,17 @@ dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2)
// return 0; /* CCW doesn't match */
}
if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons)
return 0;
if ((cqr1->irb.esw.esw0.erw.cons == 0) &&
(cqr2->irb.esw.esw0.erw.cons == 0)) {
if ((cqr1->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_CTRL_CHK)) ==
(cqr2->irb.scsw.cstat & (SCHN_STAT_INTF_CTRL_CHK |
SCHN_STAT_CHN_CTRL_CHK)))
return 1; /* match with ifcc*/
}
/* check sense data; byte 0-2,25,27 */
if (!((memcmp (cqr1->irb.ecw, cqr2->irb.ecw, 3) == 0) &&
(cqr1->irb.ecw[27] == cqr2->irb.ecw[27]) &&
......@@ -2560,17 +2601,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
return cqr;
}
/* check if sense data are available */
if (!cqr->irb.ecw) {
DEV_MESSAGE(KERN_DEBUG, device,
"ERP called witout sense data avail ..."
"request %p - NO ERP possible", cqr);
cqr->status = DASD_CQR_FAILED;
return cqr;
}
/* check if error happened before */
erp = dasd_3990_erp_in_erp(cqr);
......
......@@ -415,6 +415,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->queue = dev_info->dcssblk_queue;
dev_info->gd->private_data = dev_info;
dev_info->gd->driverfs_dev = &dev_info->dev;
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
/*
* load the segment
*/
......@@ -472,9 +474,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
if (rc)
goto unregister_dev;
blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request);
blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096);
add_disk(dev_info->gd);
switch (dev_info->segment_type) {
......
......@@ -332,7 +332,7 @@ sclp_tty_write_string(const unsigned char *str, int count)
if (sclp_ttybuf == NULL) {
while (list_empty(&sclp_tty_pages)) {
spin_unlock_irqrestore(&sclp_tty_lock, flags);
if (in_interrupt())
if (in_atomic())
sclp_sync_wait();