Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
xcap
xcap-capability-linux
Commits
41c594ab
Commit
41c594ab
authored
Apr 05, 2006
by
Ralf Baechle
Browse files
[MIPS] MT: Improved multithreading support.
Signed-off-by:
Ralf Baechle
<
ralf@linux-mips.org
>
parent
2600990e
Changes
51
Hide whitespace changes
Inline
Side-by-side
arch/mips/Kconfig
View file @
41c594ab
...
...
@@ -1447,6 +1447,10 @@ choice
prompt "MIPS MT options"
depends on MIPS_MT
config MIPS_MT_SMTC
bool "SMTC: Use all TCs on all VPEs for SMP"
select SMP
config MIPS_MT_SMP
bool "Use 1 TC on each available VPE for SMP"
select SMP
...
...
@@ -1613,7 +1617,7 @@ source "mm/Kconfig"
config SMP
bool "Multi-Processing support"
depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP
depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP
|| MIPS_MT_SMTC
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
...
...
arch/mips/kernel/Makefile
View file @
41c594ab
...
...
@@ -34,7 +34,9 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
obj-$(CONFIG_SMP)
+=
smp.o
obj-$(CONFIG_MIPS_MT_SMP)
+=
smp_mt.o
obj-$(CONFIG_MIPS_MT)
+=
mips-mt.o
obj-$(CONFIG_MIPS_MT_SMTC)
+=
smtc.o smtc-asm.o smtc-proc.o
obj-$(CONFIG_MIPS_MT_SMP)
+=
smp-mt.o
obj-$(CONFIG_MIPS_APSP_KSPD)
+=
kspd.o
obj-$(CONFIG_MIPS_VPE_LOADER)
+=
vpe.o
...
...
arch/mips/kernel/asm-offsets.c
View file @
41c594ab
...
...
@@ -69,6 +69,9 @@ void output_ptreg_defines(void)
offset
(
"#define PT_BVADDR "
,
struct
pt_regs
,
cp0_badvaddr
);
offset
(
"#define PT_STATUS "
,
struct
pt_regs
,
cp0_status
);
offset
(
"#define PT_CAUSE "
,
struct
pt_regs
,
cp0_cause
);
#ifdef CONFIG_MIPS_MT_SMTC
offset
(
"#define PT_TCSTATUS "
,
struct
pt_regs
,
cp0_tcstatus
);
#endif
/* CONFIG_MIPS_MT_SMTC */
size
(
"#define PT_SIZE "
,
struct
pt_regs
);
linefeed
;
}
...
...
arch/mips/kernel/entry.S
View file @
41c594ab
...
...
@@ -17,6 +17,9 @@
#include <asm/isadep.h>
#include <asm/thread_info.h>
#include <asm/war.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif
#ifdef CONFIG_PREEMPT
.
macro
preempt_stop
...
...
@@ -75,6 +78,37 @@ FEXPORT(syscall_exit)
bnez
t0
,
syscall_exit_work
FEXPORT
(
restore_all
)
#
restore
full
frame
#ifdef CONFIG_MIPS_MT_SMTC
/*
Detect
and
execute
deferred
IPI
"interrupts"
*/
move
a0
,
sp
jal
deferred_smtc_ipi
/*
Re
-
arm
any
temporarily
masked
interrupts
not
explicitly
"acked"
*/
mfc0
v0
,
CP0_TCSTATUS
ori
v1
,
v0
,
TCSTATUS_IXMT
mtc0
v1
,
CP0_TCSTATUS
andi
v0
,
TCSTATUS_IXMT
ehb
mfc0
t0
,
CP0_TCCONTEXT
DMT
9
#
dmt
t1
jal
mips_ihb
mfc0
t2
,
CP0_STATUS
andi
t3
,
t0
,
0xff00
or
t2
,
t2
,
t3
mtc0
t2
,
CP0_STATUS
ehb
andi
t1
,
t1
,
VPECONTROL_TE
beqz
t1
,
1
f
EMT
1
:
mfc0
v1
,
CP0_TCSTATUS
/
*
We
set
IXMT
above
,
XOR
should
cler
it
here
*/
xori
v1
,
v1
,
TCSTATUS_IXMT
or
v1
,
v0
,
v1
mtc0
v1
,
CP0_TCSTATUS
ehb
xor
t0
,
t0
,
t3
mtc0
t0
,
CP0_TCCONTEXT
#endif /* CONFIG_MIPS_MT_SMTC */
.
set
noat
RESTORE_TEMP
RESTORE_AT
...
...
arch/mips/kernel/gdb-low.S
View file @
41c594ab
...
...
@@ -283,11 +283,33 @@
*/
3
:
#ifdef CONFIG_MIPS_MT_SMTC
/
*
Read
-
modify
write
of
Status
must
be
atomic
*/
mfc0
t2
,
CP0_TCSTATUS
ori
t1
,
t2
,
TCSTATUS_IXMT
mtc0
t1
,
CP0_TCSTATUS
andi
t2
,
t2
,
TCSTATUS_IXMT
ehb
DMT
9
#
dmt
t1
jal
mips_ihb
nop
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0
t0
,
CP0_STATUS
ori
t0
,
0x1f
xori
t0
,
0x1f
mtc0
t0
,
CP0_STATUS
#ifdef CONFIG_MIPS_MT_SMTC
andi
t1
,
t1
,
VPECONTROL_TE
beqz
t1
,
9
f
nop
EMT
#
emt
9
:
mfc0
t1
,
CP0_TCSTATUS
xori
t1
,
t1
,
TCSTATUS_IXMT
or
t1
,
t1
,
t2
mtc0
t1
,
CP0_TCSTATUS
ehb
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_L
v0
,
GDB_FR_STATUS
(
sp
)
LONG_L
v1
,
GDB_FR_EPC
(
sp
)
mtc0
v0
,
CP0_STATUS
...
...
arch/mips/kernel/gdb-stub.c
View file @
41c594ab
...
...
@@ -140,6 +140,7 @@
#include
<asm/system.h>
#include
<asm/gdb-stub.h>
#include
<asm/inst.h>
#include
<asm/smp.h>
/*
* external low-level support routines
...
...
@@ -669,6 +670,64 @@ static void kgdb_wait(void *arg)
local_irq_restore
(
flags
);
}
/*
* GDB stub needs to call kgdb_wait on all processor with interrupts
* disabled, so it uses it's own special variant.
*/
static
int
kgdb_smp_call_kgdb_wait
(
void
)
{
#ifdef CONFIG_SMP
struct
call_data_struct
data
;
int
i
,
cpus
=
num_online_cpus
()
-
1
;
int
cpu
=
smp_processor_id
();
/*
* Can die spectacularly if this CPU isn't yet marked online
*/
BUG_ON
(
!
cpu_online
(
cpu
));
if
(
!
cpus
)
return
0
;
if
(
spin_is_locked
(
&
smp_call_lock
))
{
/*
* Some other processor is trying to make us do something
* but we're not going to respond... give up
*/
return
-
1
;
}
/*
* We will continue here, accepting the fact that
* the kernel may deadlock if another CPU attempts
* to call smp_call_function now...
*/
data
.
func
=
kgdb_wait
;
data
.
info
=
NULL
;
atomic_set
(
&
data
.
started
,
0
);
data
.
wait
=
0
;
spin_lock
(
&
smp_call_lock
);
call_data
=
&
data
;
mb
();
/* Send a message to all other CPUs and wait for them to respond */
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
if
(
cpu_online
(
i
)
&&
i
!=
cpu
)
core_send_ipi
(
i
,
SMP_CALL_FUNCTION
);
/* Wait for response */
/* FIXME: lock-up detection, backtrace on lock-up */
while
(
atomic_read
(
&
data
.
started
)
!=
cpus
)
barrier
();
call_data
=
NULL
;
spin_unlock
(
&
smp_call_lock
);
#endif
return
0
;
}
/*
* This function does all command processing for interfacing to gdb. It
...
...
@@ -718,7 +777,7 @@ void handle_exception (struct gdb_regs *regs)
/*
* force other cpus to enter kgdb
*/
smp_call_
function
(
kgdb_wait
,
NULL
,
0
,
0
);
kgdb_
smp_call_
kgdb_wait
(
);
/*
* If we're in breakpoint() increment the PC
...
...
arch/mips/kernel/genex.S
View file @
41c594ab
...
...
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/cacheops.h>
#include <asm/regdef.h>
#include <asm/fpregdef.h>
...
...
@@ -171,6 +172,15 @@ NESTED(except_vec_vi, 0, sp)
SAVE_AT
.
set
push
.
set
noreorder
#ifdef CONFIG_MIPS_MT_SMTC
/
*
*
To
keep
from
blindly
blocking
*
all
*
interrupts
*
during
service
by
SMTC
kernel
,
we
also
want
to
*
pass
the
IM
value
to
be
cleared
.
*/
EXPORT
(
except_vec_vi_mori
)
ori
a0
,
$
0
,
0
#endif /* CONFIG_MIPS_MT_SMTC */
EXPORT
(
except_vec_vi_lui
)
lui
v0
,
0
/*
Patched
*/
j
except_vec_vi_handler
...
...
@@ -187,6 +197,25 @@ EXPORT(except_vec_vi_end)
NESTED
(
except_vec_vi_handler
,
0,
sp
)
SAVE_TEMP
SAVE_STATIC
#ifdef CONFIG_MIPS_MT_SMTC
/
*
*
SMTC
has
an
interesting
problem
that
interrupts
are
level
-
triggered
,
*
and
the
CLI
macro
will
clear
EXL
,
potentially
causing
a
duplicate
*
interrupt
service
invocation
.
So
we
need
to
clear
the
associated
*
IM
bit
of
Status
prior
to
doing
CLI
,
and
restore
it
after
the
*
service
routine
has
been
invoked
-
we
must
assume
that
the
*
service
routine
will
have
cleared
the
state
,
and
any
active
*
level
represents
a
new
or
otherwised
unserviced
event
...
*/
mfc0
t1
,
CP0_STATUS
and
t0
,
a0
,
t1
mfc0
t2
,
CP0_TCCONTEXT
or
t0
,
t0
,
t2
mtc0
t0
,
CP0_TCCONTEXT
xor
t1
,
t1
,
t0
mtc0
t1
,
CP0_STATUS
ehb
#endif /* CONFIG_MIPS_MT_SMTC */
CLI
move
a0
,
sp
jalr
v0
...
...
arch/mips/kernel/head.S
View file @
41c594ab
...
...
@@ -18,6 +18,7 @@
#include <linux/threads.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/regdef.h>
#include <asm/page.h>
#include <asm/mipsregs.h>
...
...
@@ -82,12 +83,33 @@
*/
.
macro
setup_c0_status
set
clr
.
set
push
#ifdef CONFIG_MIPS_MT_SMTC
/
*
*
For
SMTC
,
we
need
to
set
privilege
and
disable
interrupts
only
for
*
the
current
TC
,
using
the
TCStatus
register
.
*/
mfc0
t0
,
CP0_TCSTATUS
/
*
Fortunately
CU
0
is
in
the
same
place
in
both
registers
*/
/
*
Set
TCU0
,
TMX
,
TKSU
(
for
later
inversion
)
and
IXMT
*/
li
t1
,
ST0_CU0
|
0x08001c00
or
t0
,
t1
/
*
Clear
TKSU
,
leave
IXMT
*/
xori
t0
,
0x00001800
mtc0
t0
,
CP0_TCSTATUS
ehb
/
*
We
need
to
leave
the
global
IE
bit
set
,
but
clear
EXL
...
*/
mfc0
t0
,
CP0_STATUS
or
t0
,
ST0_CU0
| ST0_EXL |
ST0_ERL
| \set |
\
clr
xor
t0
,
ST0_EXL
| ST0_ERL |
\
clr
mtc0
t0
,
CP0_STATUS
#else
mfc0
t0
,
CP0_STATUS
or
t0
,
ST0_CU0|\set|
0x1f
|
\
clr
xor
t0
,
0x1f
|
\
clr
mtc0
t0
,
CP0_STATUS
.
set
noreorder
sll
zero
,
3
#
ehb
#endif
.
set
pop
.
endm
...
...
@@ -134,6 +156,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
ARC64_TWIDDLE_PC
#ifdef CONFIG_MIPS_MT_SMTC
/
*
*
In
SMTC
kernel
,
"CLI"
is
thread
-
specific
,
in
TCStatus
.
*
We
still
need
to
enable
interrupts
globally
in
Status
,
*
and
clear
EXL
/
ERL
.
*
*
TCContext
is
used
to
track
interrupt
levels
under
*
service
in
SMTC
kernel
.
Clear
for
boot
TC
before
*
allowing
any
interrupts
.
*/
mtc0
zero
,
CP0_TCCONTEXT
mfc0
t0
,
CP0_STATUS
ori
t0
,
t0
,
0xff1f
xori
t0
,
t0
,
0x001e
mtc0
t0
,
CP0_STATUS
#endif /* CONFIG_MIPS_MT_SMTC */
PTR_LA
t0
,
__bss_start
#
clear
.
bss
LONG_S
zero
,
(
t0
)
PTR_LA
t1
,
__bss_stop
-
LONGSIZE
...
...
@@ -166,8 +206,25 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
*
function
after
setting
up
the
stack
and
gp
registers
.
*/
NESTED
(
smp_bootstrap
,
16,
sp
)
#ifdef CONFIG_MIPS_MT_SMTC
/
*
*
Read
-
modify
-
writes
of
Status
must
be
atomic
,
and
this
*
is
one
case
where
CLI
is
invoked
without
EXL
being
*
necessarily
set
.
The
CLI
and
setup_c0_status
will
*
in
fact
be
redundant
for
all
but
the
first
TC
of
*
each
VPE
being
booted
.
*/
DMT
10
#
dmt
t2
/*
t0
,
t1
are
used
by
CLI
and
setup_c0_status
()
*/
jal
mips_ihb
#endif /* CONFIG_MIPS_MT_SMTC */
setup_c0_status_sec
smp_slave_setup
#ifdef CONFIG_MIPS_MT_SMTC
andi
t2
,
t2
,
VPECONTROL_TE
beqz
t2
,
2
f
EMT
#
emt
2
:
#endif /* CONFIG_MIPS_MT_SMTC */
j
start_secondary
END
(
smp_bootstrap
)
#endif /* CONFIG_SMP */
...
...
arch/mips/kernel/i8259.c
View file @
41c594ab
...
...
@@ -187,6 +187,10 @@ handle_real_irq:
outb
(
cached_21
,
0x21
);
outb
(
0x60
+
irq
,
0x20
);
/* 'Specific EOI' to master */
}
#ifdef CONFIG_MIPS_MT_SMTC
if
(
irq_hwmask
[
irq
]
&
ST0_IM
)
set_c0_status
(
irq_hwmask
[
irq
]
&
ST0_IM
);
#endif
/* CONFIG_MIPS_MT_SMTC */
spin_unlock_irqrestore
(
&
i8259A_lock
,
flags
);
return
;
...
...
arch/mips/kernel/irq-msc01.c
View file @
41c594ab
...
...
@@ -76,6 +76,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
mask_msc_irq
(
irq
);
if
(
!
cpu_has_veic
)
MSCIC_WRITE
(
MSC01_IC_EOI
,
0
);
#ifdef CONFIG_MIPS_MT_SMTC
/* This actually needs to be a call into platform code */
if
(
irq_hwmask
[
irq
]
&
ST0_IM
)
set_c0_status
(
irq_hwmask
[
irq
]
&
ST0_IM
);
#endif
/* CONFIG_MIPS_MT_SMTC */
}
/*
...
...
@@ -92,6 +97,10 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
MSCIC_WRITE
(
MSC01_IC_SUP
+
irq
*
8
,
r
|
~
MSC01_IC_SUP_EDGE_BIT
);
MSCIC_WRITE
(
MSC01_IC_SUP
+
irq
*
8
,
r
);
}
#ifdef CONFIG_MIPS_MT_SMTC
if
(
irq_hwmask
[
irq
]
&
ST0_IM
)
set_c0_status
(
irq_hwmask
[
irq
]
&
ST0_IM
);
#endif
/* CONFIG_MIPS_MT_SMTC */
}
/*
...
...
arch/mips/kernel/irq.c
View file @
41c594ab
...
...
@@ -38,6 +38,15 @@ void ack_bad_irq(unsigned int irq)
atomic_t
irq_err_count
;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC Kernel needs to manipulate low-level CPU interrupt mask
* in do_IRQ. These are passed in setup_irq_smtc() and stored
* in this table.
*/
unsigned
long
irq_hwmask
[
NR_IRQS
];
#endif
/* CONFIG_MIPS_MT_SMTC */
#undef do_IRQ
/*
...
...
@@ -49,6 +58,7 @@ asmlinkage unsigned int do_IRQ(unsigned int irq, struct pt_regs *regs)
{
irq_enter
();
__DO_IRQ_SMTC_HOOK
();
__do_IRQ
(
irq
,
regs
);
irq_exit
();
...
...
@@ -129,6 +139,9 @@ void __init init_IRQ(void)
irq_desc
[
i
].
depth
=
1
;
irq_desc
[
i
].
handler
=
&
no_irq_type
;
spin_lock_init
(
&
irq_desc
[
i
].
lock
);
#ifdef CONFIG_MIPS_MT_SMTC
irq_hwmask
[
i
]
=
0
;
#endif
/* CONFIG_MIPS_MT_SMTC */
}
arch_init_irq
();
...
...
arch/mips/kernel/mips-mt.c
0 → 100644
View file @
41c594ab
/*
* General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include
<linux/kernel.h>
#include
<linux/sched.h>
#include
<linux/cpumask.h>
#include
<linux/interrupt.h>
#include
<asm/cpu.h>
#include
<asm/processor.h>
#include
<asm/atomic.h>
#include
<asm/system.h>
#include
<asm/hardirq.h>
#include
<asm/mmu_context.h>
#include
<asm/smp.h>
#include
<asm/mipsmtregs.h>
#include
<asm/r4kcache.h>
#include
<asm/cacheflush.h>
/*
* CPU mask used to set process affinity for MT VPEs/TCs with FPUs
*/
cpumask_t
mt_fpu_cpumask
;
#ifdef CONFIG_MIPS_MT_FPAFF
#include
<linux/cpu.h>
#include
<linux/delay.h>
#include
<asm/uaccess.h>
unsigned
long
mt_fpemul_threshold
=
0
;
/*
* Replacement functions for the sys_sched_setaffinity() and
* sys_sched_getaffinity() system calls, so that we can integrate
* FPU affinity with the user's requested processor affinity.
* This code is 98% identical with the sys_sched_setaffinity()
* and sys_sched_getaffinity() system calls, and should be
* updated when kernel/sched.c changes.
*/
/*
* find_process_by_pid - find a process with a matching PID value.
* used in sys_sched_set/getaffinity() in kernel/sched.c, so
* cloned here.
*/
static
inline
task_t
*
find_process_by_pid
(
pid_t
pid
)
{
return
pid
?
find_task_by_pid
(
pid
)
:
current
;
}
/*
* mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
*/
asmlinkage
long
mipsmt_sys_sched_setaffinity
(
pid_t
pid
,
unsigned
int
len
,
unsigned
long
__user
*
user_mask_ptr
)
{
cpumask_t
new_mask
;
cpumask_t
effective_mask
;
int
retval
;
task_t
*
p
;
if
(
len
<
sizeof
(
new_mask
))
return
-
EINVAL
;
if
(
copy_from_user
(
&
new_mask
,
user_mask_ptr
,
sizeof
(
new_mask
)))
return
-
EFAULT
;
lock_cpu_hotplug
();
read_lock
(
&
tasklist_lock
);
p
=
find_process_by_pid
(
pid
);
if
(
!
p
)
{
read_unlock
(
&
tasklist_lock
);
unlock_cpu_hotplug
();
return
-
ESRCH
;
}
/*
* It is not safe to call set_cpus_allowed with the
* tasklist_lock held. We will bump the task_struct's
* usage count and drop tasklist_lock before invoking
* set_cpus_allowed.
*/
get_task_struct
(
p
);
retval
=
-
EPERM
;
if
((
current
->
euid
!=
p
->
euid
)
&&
(
current
->
euid
!=
p
->
uid
)
&&
!
capable
(
CAP_SYS_NICE
))
{
read_unlock
(
&
tasklist_lock
);
goto
out_unlock
;
}
/* Record new user-specified CPU set for future reference */
p
->
thread
.
user_cpus_allowed
=
new_mask
;
/* Unlock the task list */
read_unlock
(
&
tasklist_lock
);
/* Compute new global allowed CPU set if necessary */
if
(
(
p
->
thread
.
mflags
&
MF_FPUBOUND
)
&&
cpus_intersects
(
new_mask
,
mt_fpu_cpumask
))
{
cpus_and
(
effective_mask
,
new_mask
,
mt_fpu_cpumask
);
retval
=
set_cpus_allowed
(
p
,
effective_mask
);
}
else
{
p
->
thread
.
mflags
&=
~
MF_FPUBOUND
;
retval
=
set_cpus_allowed
(
p
,
new_mask
);
}
out_unlock:
put_task_struct
(
p
);
unlock_cpu_hotplug
();
return
retval
;
}
/*
* mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
*/
asmlinkage
long
mipsmt_sys_sched_getaffinity
(
pid_t
pid
,
unsigned
int
len
,
unsigned
long
__user
*
user_mask_ptr
)
{
unsigned
int
real_len
;
cpumask_t
mask
;
int
retval
;
task_t
*
p
;
real_len
=
sizeof
(
mask
);
if
(
len
<
real_len
)
return
-
EINVAL
;
lock_cpu_hotplug
();
read_lock
(
&
tasklist_lock
);
retval
=
-
ESRCH
;
p
=
find_process_by_pid
(
pid
);
if
(
!
p
)
goto
out_unlock
;
retval
=
0
;
cpus_and
(
mask
,
p
->
thread
.
user_cpus_allowed
,
cpu_possible_map
);
out_unlock:
read_unlock
(
&
tasklist_lock
);
unlock_cpu_hotplug
();
if
(
retval
)
return
retval
;
if
(
copy_to_user
(
user_mask_ptr
,
&
mask
,
real_len
))
return
-
EFAULT
;
return
real_len
;
}
#endif
/* CONFIG_MIPS_MT_FPAFF */
/*
* Dump new MIPS MT state for the core. Does not leave TCs halted.
* Takes an argument which taken to be a pre-call MVPControl value.
*/
void
mips_mt_regdump
(
unsigned
long
mvpctl
)
{
unsigned
long
flags
;
unsigned
long
vpflags
;
unsigned
long
mvpconf0
;
int
nvpe
;
int
ntc
;
int
i
;
int
tc
;
unsigned
long
haltval
;
unsigned
long
tcstatval
;
#ifdef CONFIG_MIPS_MT_SMTC
void
smtc_soft_dump
(
void
);
#endif
/* CONFIG_MIPT_MT_SMTC */
local_irq_save
(
flags
);
vpflags
=
dvpe
();
printk
(
"=== MIPS MT State Dump ===
\n
"
);
printk
(
"-- Global State --
\n
"
);
printk
(
" MVPControl Passed: %08lx
\n
"
,
mvpctl
);
printk
(
" MVPControl Read: %08lx
\n
"
,
vpflags
);
printk
(
" MVPConf0 : %08lx
\n
"
,
(
mvpconf0
=
read_c0_mvpconf0
()));
nvpe
=
((
mvpconf0
&
MVPCONF0_PVPE
)
>>
MVPCONF0_PVPE_SHIFT
)
+
1
;
ntc
=
((
mvpconf0
&
MVPCONF0_PTC
)
>>
MVPCONF0_PTC_SHIFT
)
+
1
;
printk
(
"-- per-VPE State --
\n
"
);
for
(
i
=
0
;
i
<
nvpe
;
i
++
)
{
for
(
tc
=
0
;
tc
<
ntc
;
tc
++
)
{
settc
(
tc
);
if
((
read_tc_c0_tcbind
()
&
TCBIND_CURVPE
)
==
i
)
{
printk
(
" VPE %d
\n
"
,
i
);
printk
(
" VPEControl : %08lx
\n
"
,
read_vpe_c0_vpecontrol
());
printk
(
" VPEConf0 : %08lx
\n
"
,
read_vpe_c0_vpeconf0
());
printk
(
" VPE%d.Status : %08lx
\n
"
,
i
,
read_vpe_c0_status
());
printk
(
" VPE%d.EPC : %08lx
\n
"
,
i
,
read_vpe_c0_epc
());
printk
(
" VPE%d.Cause : %08lx
\n
"
,
i
,
read_vpe_c0_cause
());
printk
(
" VPE%d.Config7 : %08lx
\n
"
,
i
,
read_vpe_c0_config7
());
break
;
/* Next VPE */
}
}