Commit 8f65873e authored by Graf Yang's avatar Graf Yang Committed by Bryan Wu

Blackfin arch: SMP supporting patchset: Blackfin kernel and memory management code

Blackfin dual core BF561 processor can support SMP like features.
https://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:smp-like

In this patch, we provide SMP extend to Blackfin kernel and memory management code
Singed-off-by: default avatarGraf Yang <graf.yang@analog.com>
Signed-off-by: default avatarMike Frysinger <vapier.adi@gmail.com>
Signed-off-by: default avatarBryan Wu <cooloney@kernel.org>
parent b8a98989
......@@ -56,6 +56,9 @@ int main(void)
/* offsets into the thread struct */
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
DEFINE(THREAD_SR, offsetof(struct thread_struct, seqstat));
DEFINE(PT_SR, offsetof(struct thread_struct, seqstat));
DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
DEFINE(THREAD_PC, offsetof(struct thread_struct, pc));
DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE);
......@@ -128,5 +131,31 @@ int main(void)
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(SIGTRAP, SIGTRAP);
/* PDA management (in L1 scratchpad) */
DEFINE(PDA_SYSCFG, offsetof(struct blackfin_pda, syscfg));
#ifdef CONFIG_SMP
DEFINE(PDA_IRQFLAGS, offsetof(struct blackfin_pda, imask));
#endif
DEFINE(PDA_IPDT, offsetof(struct blackfin_pda, ipdt));
DEFINE(PDA_IPDT_SWAPCOUNT, offsetof(struct blackfin_pda, ipdt_swapcount));
DEFINE(PDA_DPDT, offsetof(struct blackfin_pda, dpdt));
DEFINE(PDA_DPDT_SWAPCOUNT, offsetof(struct blackfin_pda, dpdt_swapcount));
DEFINE(PDA_EXIPTR, offsetof(struct blackfin_pda, ex_iptr));
DEFINE(PDA_EXOPTR, offsetof(struct blackfin_pda, ex_optr));
DEFINE(PDA_EXBUF, offsetof(struct blackfin_pda, ex_buf));
DEFINE(PDA_EXIMASK, offsetof(struct blackfin_pda, ex_imask));
DEFINE(PDA_EXSTACK, offsetof(struct blackfin_pda, ex_stack));
#ifdef ANOMALY_05000261
DEFINE(PDA_LFRETX, offsetof(struct blackfin_pda, last_cplb_fault_retx));
#endif
DEFINE(PDA_DCPLB, offsetof(struct blackfin_pda, dcplb_fault_addr));
DEFINE(PDA_ICPLB, offsetof(struct blackfin_pda, icplb_fault_addr));
DEFINE(PDA_RETX, offsetof(struct blackfin_pda, retx));
DEFINE(PDA_SEQSTAT, offsetof(struct blackfin_pda, seqstat));
#ifdef CONFIG_SMP
/* Inter-core lock (in L2 SRAM) */
DEFINE(SIZEOF_CORELOCK, sizeof(struct corelock_slot));
#endif
return 0;
}
......@@ -68,3 +68,37 @@ EXPORT_SYMBOL(insw_8);
EXPORT_SYMBOL(outsl);
EXPORT_SYMBOL(insl);
EXPORT_SYMBOL(insl_16);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__raw_atomic_update_asm);
EXPORT_SYMBOL(__raw_atomic_clear_asm);
EXPORT_SYMBOL(__raw_atomic_set_asm);
EXPORT_SYMBOL(__raw_atomic_xor_asm);
EXPORT_SYMBOL(__raw_atomic_test_asm);
EXPORT_SYMBOL(__raw_xchg_1_asm);
EXPORT_SYMBOL(__raw_xchg_2_asm);
EXPORT_SYMBOL(__raw_xchg_4_asm);
EXPORT_SYMBOL(__raw_cmpxchg_1_asm);
EXPORT_SYMBOL(__raw_cmpxchg_2_asm);
EXPORT_SYMBOL(__raw_cmpxchg_4_asm);
EXPORT_SYMBOL(__raw_spin_is_locked_asm);
EXPORT_SYMBOL(__raw_spin_lock_asm);
EXPORT_SYMBOL(__raw_spin_trylock_asm);
EXPORT_SYMBOL(__raw_spin_unlock_asm);
EXPORT_SYMBOL(__raw_read_lock_asm);
EXPORT_SYMBOL(__raw_read_trylock_asm);
EXPORT_SYMBOL(__raw_read_unlock_asm);
EXPORT_SYMBOL(__raw_write_lock_asm);
EXPORT_SYMBOL(__raw_write_trylock_asm);
EXPORT_SYMBOL(__raw_write_unlock_asm);
EXPORT_SYMBOL(__raw_bit_set_asm);
EXPORT_SYMBOL(__raw_bit_clear_asm);
EXPORT_SYMBOL(__raw_bit_toggle_asm);
EXPORT_SYMBOL(__raw_bit_test_asm);
EXPORT_SYMBOL(__raw_bit_test_set_asm);
EXPORT_SYMBOL(__raw_bit_test_clear_asm);
EXPORT_SYMBOL(__raw_bit_test_toggle_asm);
EXPORT_SYMBOL(__raw_uncached_fetch_asm);
EXPORT_SYMBOL(__raw_smp_mark_barrier_asm);
EXPORT_SYMBOL(__raw_smp_check_barrier_asm);
#endif
......@@ -30,6 +30,7 @@
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <asm/blackfin.h>
#include <asm/asm-offsets.h>
#include <asm/context.S>
......
......@@ -36,7 +36,7 @@
#include <linux/irq.h>
#include <asm/trace.h>
static unsigned long irq_err_count;
static atomic_t irq_err_count;
static spinlock_t irq_controller_lock;
/*
......@@ -48,7 +48,7 @@ void dummy_mask_unmask_irq(unsigned int irq)
void ack_bad_irq(unsigned int irq)
{
irq_err_count += 1;
atomic_inc(&irq_err_count);
printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
}
EXPORT_SYMBOL(ack_bad_irq);
......@@ -72,7 +72,7 @@ static struct irq_desc bad_irq_desc = {
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v;
int i = *(loff_t *) v, j;
struct irqaction *action;
unsigned long flags;
......@@ -80,19 +80,20 @@ int show_interrupts(struct seq_file *p, void *v)
spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto unlock;
seq_printf(p, "%3d: %10u ", i, kstat_irqs(i));
goto skip;
seq_printf(p, "%3d: ", i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
seq_printf(p, " %8s", irq_desc[i].chip->name);
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
seq_printf(p, " %s", action->name);
seq_putc(p, '\n');
unlock:
skip:
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS) {
seq_printf(p, "Err: %10lu\n", irq_err_count);
}
} else if (i == NR_IRQS)
seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
return 0;
}
......@@ -101,7 +102,6 @@ int show_interrupts(struct seq_file *p, void *v)
* come via this function. Instead, they should provide their
* own 'handler'
*/
#ifdef CONFIG_DO_IRQ_L1
__attribute__((l1_text))
#endif
......
......@@ -363,12 +363,12 @@ void kgdb_passive_cpu_callback(void *info)
void kgdb_roundup_cpus(unsigned long flags)
{
smp_call_function(kgdb_passive_cpu_callback, NULL, 0, 0);
smp_call_function(kgdb_passive_cpu_callback, NULL, 0);
}
void kgdb_roundup_cpu(int cpu, unsigned long flags)
{
smp_call_function_single(cpu, kgdb_passive_cpu_callback, NULL, 0, 0);
smp_call_function_single(cpu, kgdb_passive_cpu_callback, NULL, 0);
}
#endif
......
......@@ -343,7 +343,13 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
pr_debug("location is %x, value is %x type is %d \n",
(unsigned int) location32, value,
ELF32_R_TYPE(rel[i].r_info));
#ifdef CONFIG_SMP
if ((unsigned long)location16 >= COREB_L1_DATA_A_START) {
printk(KERN_ERR "module %s: cannot relocate in L1: %u (SMP kernel)",
mod->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
#endif
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_pcrel24:
......@@ -436,6 +442,7 @@ module_finalize(const Elf_Ehdr * hdr,
{
unsigned int i, strindex = 0, symindex = 0;
char *secstrings;
long err = 0;
secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
......@@ -460,8 +467,10 @@ module_finalize(const Elf_Ehdr * hdr,
(strcmp(".rela.l1.text", secstrings + sechdrs[i].sh_name) == 0) ||
((strcmp(".rela.text", secstrings + sechdrs[i].sh_name) == 0) &&
(hdr->e_flags & (EF_BFIN_CODE_IN_L1|EF_BFIN_CODE_IN_L2))))) {
apply_relocate_add((Elf_Shdr *) sechdrs, strtab,
err = apply_relocate_add((Elf_Shdr *) sechdrs, strtab,
symindex, i, mod);
if (err < 0)
return -ENOEXEC;
}
}
return 0;
......
......@@ -171,6 +171,13 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
unsigned long clone_flags;
unsigned long newsp;
#ifdef __ARCH_SYNC_CORE_DCACHE
if (current->rt.nr_cpus_allowed == num_possible_cpus()) {
current->cpus_allowed = cpumask_of_cpu(smp_processor_id());
current->rt.nr_cpus_allowed = 1;
}
#endif
/* syscall2 puts clone_flags in r0 and usp in r1 */
clone_flags = regs->r0;
newsp = regs->r1;
......@@ -338,22 +345,22 @@ int _access_ok(unsigned long addr, unsigned long size)
if (addr >= (unsigned long)__init_begin &&
addr + size <= (unsigned long)__init_end)
return 1;
if (addr >= L1_SCRATCH_START
&& addr + size <= L1_SCRATCH_START + L1_SCRATCH_LENGTH)
if (addr >= get_l1_scratch_start()
&& addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH)
return 1;
#if L1_CODE_LENGTH != 0
if (addr >= L1_CODE_START + (_etext_l1 - _stext_l1)
&& addr + size <= L1_CODE_START + L1_CODE_LENGTH)
if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1)
&& addr + size <= get_l1_code_start() + L1_CODE_LENGTH)
return 1;
#endif
#if L1_DATA_A_LENGTH != 0
if (addr >= L1_DATA_A_START + (_ebss_l1 - _sdata_l1)
&& addr + size <= L1_DATA_A_START + L1_DATA_A_LENGTH)
if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1)
&& addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH)
return 1;
#endif
#if L1_DATA_B_LENGTH != 0
if (addr >= L1_DATA_B_START + (_ebss_b_l1 - _sdata_b_l1)
&& addr + size <= L1_DATA_B_START + L1_DATA_B_LENGTH)
if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1)
&& addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH)
return 1;
#endif
#if L2_LENGTH != 0
......
......@@ -220,8 +220,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
pr_debug("ptrace: user address is valid\n");
if (L1_CODE_LENGTH != 0 && addr >= L1_CODE_START
&& addr + sizeof(tmp) <= L1_CODE_START + L1_CODE_LENGTH) {
if (L1_CODE_LENGTH != 0 && addr >= get_l1_code_start()
&& addr + sizeof(tmp) <= get_l1_code_start() + L1_CODE_LENGTH) {
safe_dma_memcpy (&tmp, (const void *)(addr), sizeof(tmp));
copied = sizeof(tmp);
......@@ -300,8 +300,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
pr_debug("ptrace: user address is valid\n");
if (L1_CODE_LENGTH != 0 && addr >= L1_CODE_START
&& addr + sizeof(data) <= L1_CODE_START + L1_CODE_LENGTH) {
if (L1_CODE_LENGTH != 0 && addr >= get_l1_code_start()
&& addr + sizeof(data) <= get_l1_code_start() + L1_CODE_LENGTH) {
safe_dma_memcpy ((void *)(addr), &data, sizeof(data));
copied = sizeof(data);
......
......@@ -21,7 +21,7 @@
* the core reset.
*/
__attribute__((l1_text))
static void bfin_reset(void)
static void _bfin_reset(void)
{
/* Wait for completion of "system" events such as cache line
* line fills so that we avoid infinite stalls later on as
......@@ -66,6 +66,18 @@ static void bfin_reset(void)
}
}
static void bfin_reset(void)
{
if (ANOMALY_05000353 || ANOMALY_05000386)
_bfin_reset();
else
/* the bootrom checks to see how it was reset and will
* automatically perform a software reset for us when
* it starts executing boot
*/
asm("raise 1;");
}
__attribute__((weak))
void native_machine_restart(char *cmd)
{
......@@ -75,14 +87,10 @@ void machine_restart(char *cmd)
{
native_machine_restart(cmd);
local_irq_disable();
if (ANOMALY_05000353 || ANOMALY_05000386)
bfin_reset();
if (smp_processor_id())
smp_call_function((void *)bfin_reset, 0, 1);
else
/* the bootrom checks to see how it was reset and will
* automatically perform a software reset for us when
* it starts executing boot
*/
asm("raise 1;");
bfin_reset();
}
__attribute__((weak))
......
......@@ -26,11 +26,10 @@
#include <asm/blackfin.h>
#include <asm/cplbinit.h>
#include <asm/div64.h>
#include <asm/cpu.h>
#include <asm/fixed_code.h>
#include <asm/early_printk.h>
static DEFINE_PER_CPU(struct cpu, cpu_devices);
u16 _bfin_swrst;
EXPORT_SYMBOL(_bfin_swrst);
......@@ -79,29 +78,76 @@ static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
void __init bfin_cache_init(void)
{
DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
generate_cplb_tables();
void __init generate_cplb_tables(void)
{
unsigned int cpu;
/* Generate per-CPU I&D CPLB tables */
for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
generate_cplb_tables_cpu(cpu);
}
#endif
void __cpuinit bfin_setup_caches(unsigned int cpu)
{
#ifdef CONFIG_BFIN_ICACHE
bfin_icache_init();
printk(KERN_INFO "Instruction Cache Enabled\n");
#ifdef CONFIG_MPU
bfin_icache_init(icplb_tbl[cpu]);
#else
bfin_icache_init(icplb_tables[cpu]);
#endif
#endif
#ifdef CONFIG_BFIN_DCACHE
bfin_dcache_init();
printk(KERN_INFO "Data Cache Enabled"
#ifdef CONFIG_MPU
bfin_dcache_init(dcplb_tbl[cpu]);
#else
bfin_dcache_init(dcplb_tables[cpu]);
#endif
#endif
/*
* In cache coherence emulation mode, we need to have the
* D-cache enabled before running any atomic operation which
* might invove cache invalidation (i.e. spinlock, rwlock).
* So printk's are deferred until then.
*/
#ifdef CONFIG_BFIN_ICACHE
printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
#endif
#ifdef CONFIG_BFIN_DCACHE
printk(KERN_INFO "Data Cache Enabled for CPU%u"
# if defined CONFIG_BFIN_WB
" (write-back)"
# elif defined CONFIG_BFIN_WT
" (write-through)"
# endif
"\n");
"\n", cpu);
#endif
}
void __cpuinit bfin_setup_cpudata(unsigned int cpu)
{
struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
cpudata->idle = current;
cpudata->loops_per_jiffy = loops_per_jiffy;
cpudata->cclk = get_cclk();
cpudata->imemctl = bfin_read_IMEM_CONTROL();
cpudata->dmemctl = bfin_read_DMEM_CONTROL();
}
void __init bfin_cache_init(void)
{
#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
generate_cplb_tables();
#endif
bfin_setup_caches(0);
}
void __init bfin_relocate_l1_mem(void)
{
unsigned long l1_code_length;
......@@ -230,7 +276,7 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
/* record all known change-points (starting and ending addresses),
omitting those that are for empty memory regions */
chgidx = 0;
for (i = 0; i < old_nr; i++) {
for (i = 0; i < old_nr; i++) {
if (map[i].size != 0) {
change_point[chgidx]->addr = map[i].addr;
change_point[chgidx++]->pentry = &map[i];
......@@ -238,13 +284,13 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
change_point[chgidx++]->pentry = &map[i];
}
}
chg_nr = chgidx; /* true number of change-points */
chg_nr = chgidx; /* true number of change-points */
/* sort change-point list by memory addresses (low -> high) */
still_changing = 1;
while (still_changing) {
while (still_changing) {
still_changing = 0;
for (i = 1; i < chg_nr; i++) {
for (i = 1; i < chg_nr; i++) {
/* if <current_addr> > <last_addr>, swap */
/* or, if current=<start_addr> & last=<end_addr>, swap */
if ((change_point[i]->addr < change_point[i-1]->addr) ||
......@@ -261,10 +307,10 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
}
/* create a new memmap, removing overlaps */
overlap_entries = 0; /* number of entries in the overlap table */
new_entry = 0; /* index for creating new memmap entries */
last_type = 0; /* start with undefined memory type */
last_addr = 0; /* start with 0 as last starting address */
overlap_entries = 0; /* number of entries in the overlap table */
new_entry = 0; /* index for creating new memmap entries */
last_type = 0; /* start with undefined memory type */
last_addr = 0; /* start with 0 as last starting address */
/* loop through change-points, determining affect on the new memmap */
for (chgidx = 0; chgidx < chg_nr; chgidx++) {
/* keep track of all overlapping memmap entries */
......@@ -286,14 +332,14 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
if (overlap_list[i]->type > current_type)
current_type = overlap_list[i]->type;
/* continue building up new memmap based on this information */
if (current_type != last_type) {
if (current_type != last_type) {
if (last_type != 0) {
new_map[new_entry].size =
change_point[chgidx]->addr - last_addr;
/* move forward only if the new size was non-zero */
if (new_map[new_entry].size != 0)
if (++new_entry >= BFIN_MEMMAP_MAX)
break; /* no more space left for new entries */
break; /* no more space left for new entries */
}
if (current_type != 0) {
new_map[new_entry].addr = change_point[chgidx]->addr;
......@@ -303,9 +349,9 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
last_type = current_type;
}
}
new_nr = new_entry; /* retain count for new entries */
new_nr = new_entry; /* retain count for new entries */
/* copy new mapping into original location */
/* copy new mapping into original location */
memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
*pnr_map = new_nr;
......@@ -361,7 +407,6 @@ static __init int parse_memmap(char *arg)
* - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
* @ from <start> to <start>+<mem>, type RAM
* $ from <start> to <start>+<mem>, type RESERVED
*
*/
static __init void parse_cmdline_early(char *cmdline_p)
{
......@@ -383,12 +428,10 @@ static __init void parse_cmdline_early(char *cmdline_p)
if (*to != ' ') {
if (*to == '$'
|| *(to + 1) == '$')
reserved_mem_dcache_on =
1;
reserved_mem_dcache_on = 1;
if (*to == '#'
|| *(to + 1) == '#')
reserved_mem_icache_on =
1;
reserved_mem_icache_on = 1;
}
}
} else if (!memcmp(to, "earlyprintk=", 12)) {
......@@ -417,9 +460,8 @@ static __init void parse_cmdline_early(char *cmdline_p)
* [_ramend - DMA_UNCACHED_REGION,
* _ramend]: uncached DMA region
* [_ramend, physical_mem_end]: memory not managed by kernel
*
*/
static __init void memory_setup(void)
static __init void memory_setup(void)
{
#ifdef CONFIG_MTD_UCLINUX
unsigned long mtd_phys = 0;
......@@ -436,7 +478,7 @@ static __init void memory_setup(void)
memory_end = _ramend - DMA_UNCACHED_REGION;
#ifdef CONFIG_MPU
/* Round up to multiple of 4MB. */
/* Round up to multiple of 4MB */
memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
#else
memory_start = PAGE_ALIGN(_ramstart);
......@@ -616,7 +658,7 @@ static __init void setup_bootmem_allocator(void)
end_pfn = memory_end >> PAGE_SHIFT;
/*
* give all the memory to the bootmap allocator, tell it to put the
* give all the memory to the bootmap allocator, tell it to put the
* boot mem_map at the start of memory.
*/
bootmap_size = init_bootmem_node(NODE_DATA(0),
......@@ -791,7 +833,11 @@ void __init setup_arch(char **cmdline_p)
bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
#endif
#ifdef CONFIG_SMP
if (_bfin_swrst & SWRST_DBL_FAULT_A) {
#else
if (_bfin_swrst & RESET_DOUBLE) {
#endif
printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
#ifdef CONFIG_DEBUG_DOUBLEFAULT
/* We assume the crashing kernel, and the current symbol table match */
......@@ -835,7 +881,7 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
cclk / 1000000, sclk / 1000000);
cclk / 1000000, sclk / 1000000);
if (ANOMALY_05000273 && (cclk >> 1) <= sclk)
printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n");
......@@ -867,18 +913,21 @@ void __init setup_arch(char **cmdline_p)
BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
!= SAFE_USER_INSTRUCTION - FIXED_CODE_START);
#ifdef CONFIG_SMP
platform_init_cpus();
#endif
init_exception_vectors();
bfin_cache_init();
bfin_cache_init(); /* Initialize caches for the boot CPU */
}
static int __init topology_init(void)
{
int cpu;
unsigned int cpu;
/* Record CPU-private information for the boot processor. */
bfin_setup_cpudata(0);
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
register_cpu(c, cpu);
register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
}
return 0;
......@@ -983,15 +1032,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
char *cpu, *mmu, *fpu, *vendor, *cache;
uint32_t revid;
u_long cclk = 0, sclk = 0;
u_long sclk = 0;
u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, *(unsigned int *)v);
cpu = CPU;
mmu = "none";
fpu = "none";
revid = bfin_revid();
cclk = get_cclk();
sclk = get_sclk();
switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
......@@ -1003,10 +1052,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
break;
}
seq_printf(m, "processor\t: %d\n"
"vendor_id\t: %s\n",
*(unsigned int *)v,
vendor);
seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n",
*(unsigned int *)v, vendor);
if (CPUID == bfin_cpuid())
seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
......@@ -1016,7 +1063,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
"stepping\t: %d\n",
cpu, cclk/1000000, sclk/1000000,
cpu, cpudata->cclk/1000000, sclk/1000000,
#ifdef CONFIG_MPU
"mpu on",
#else
......@@ -1025,16 +1072,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
revid);
seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
cclk/1000000, cclk%1000000,
cpudata->cclk/1000000, cpudata->cclk%1000000,
sclk/1000000, sclk%1000000);
seq_printf(m, "bogomips\t: %lu.%02lu\n"
"Calibration\t: %lu loops\n",
(loops_per_jiffy * HZ) / 500000,
((loops_per_jiffy * HZ) / 5000) % 100,
(loops_per_jiffy * HZ));
(cpudata->loops_per_jiffy * HZ) / 500000,
((cpudata->loops_per_jiffy * HZ) / 5000) % 100,
(cpudata->loops_per_jiffy * HZ));
/* Check Cache configutation */
switch (bfin_read_DMEM_CONTROL() & (1 << DMC0_P | 1 << DMC1_P)) {
switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
case ACACHE_BSRAM:
cache = "dbank-A/B\t: cache/sram";
dcache_size = 16;
......@@ -1058,10 +1105,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
}
/* Is it turned on? */
if ((bfin_read_DMEM_CONTROL() & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))