smp.c 9.61 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
/*
 * arch/sh/kernel/smp.c
 *
 * SMP support for the SuperH processors.
 *
6
 * Copyright (C) 2002 - 2010 Paul Mundt
7
 * Copyright (C) 2006 - 2007 Akio Idehara
Linus Torvalds's avatar
Linus Torvalds committed
8
 *
9
10
11
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
Linus Torvalds's avatar
Linus Torvalds committed
12
 */
Evgeniy Polyakov's avatar
Evgeniy Polyakov committed
13
#include <linux/err.h>
Linus Torvalds's avatar
Linus Torvalds committed
14
15
16
17
18
#include <linux/cache.h>
#include <linux/cpumask.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/spinlock.h>
19
#include <linux/mm.h>
Linus Torvalds's avatar
Linus Torvalds committed
20
#include <linux/module.h>
21
#include <linux/cpu.h>
22
#include <linux/interrupt.h>
23
#include <linux/sched.h>
Arun Sharma's avatar
Arun Sharma committed
24
#include <linux/atomic.h>
25
#include <linux/clockchips.h>
Linus Torvalds's avatar
Linus Torvalds committed
26
27
28
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/smp.h>
29
30
#include <asm/cacheflush.h>
#include <asm/sections.h>
31
#include <asm/setup.h>
Linus Torvalds's avatar
Linus Torvalds committed
32

33
34
int __cpu_number_map[NR_CPUS];		/* Map physical to logical */
int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
Linus Torvalds's avatar
Linus Torvalds committed
35

36
37
struct plat_smp_ops *mp_ops = NULL;

38
39
40
/* State of each CPU */
DEFINE_PER_CPU(int, cpu_state) = { 0 };

41
void register_smp_ops(struct plat_smp_ops *ops)
42
43
44
45
46
47
48
{
	if (mp_ops)
		printk(KERN_WARNING "Overriding previously set SMP ops\n");

	mp_ops = ops;
}

49
static inline void smp_store_cpu_info(unsigned int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
50
{
51
52
	struct sh_cpuinfo *c = cpu_data + cpu;

53
54
	memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));

55
	c->loops_per_jiffy = loops_per_jiffy;
Linus Torvalds's avatar
Linus Torvalds committed
56
57
58
59
60
61
}

void __init smp_prepare_cpus(unsigned int max_cpus)
{
	unsigned int cpu = smp_processor_id();

62
63
	init_new_context(current, &init_mm);
	current_thread_info()->cpu = cpu;
64
	mp_ops->prepare_cpus(max_cpus);
65
66

#ifndef CONFIG_HOTPLUG_CPU
67
	init_cpu_present(cpu_possible_mask);
68
#endif
Linus Torvalds's avatar
Linus Torvalds committed
69
70
}

71
void __init smp_prepare_boot_cpu(void)
Linus Torvalds's avatar
Linus Torvalds committed
72
73
74
{
	unsigned int cpu = smp_processor_id();

75
76
77
	__cpu_number_map[0] = cpu;
	__cpu_logical_map[0] = cpu;

78
79
	set_cpu_online(cpu, true);
	set_cpu_possible(cpu, true);
80
81

	per_cpu(cpu_state, cpu) = CPU_ONLINE;
Linus Torvalds's avatar
Linus Torvalds committed
82
83
}

Paul Mundt's avatar
Paul Mundt committed
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
#ifdef CONFIG_HOTPLUG_CPU
void native_cpu_die(unsigned int cpu)
{
	unsigned int i;

	for (i = 0; i < 10; i++) {
		smp_rmb();
		if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
			if (system_state == SYSTEM_RUNNING)
				pr_info("CPU %u is now offline\n", cpu);

			return;
		}

		msleep(100);
	}

	pr_err("CPU %u didn't die...\n", cpu);
}

int native_cpu_disable(unsigned int cpu)
{
	return cpu == 0 ? -EPERM : 0;
}

void play_dead_common(void)
{
	idle_task_exit();
	irq_ctx_exit(raw_smp_processor_id());
	mb();

115
	__this_cpu_write(cpu_state, CPU_DEAD);
Paul Mundt's avatar
Paul Mundt committed
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
	local_irq_disable();
}

void native_play_dead(void)
{
	play_dead_common();
}

int __cpu_disable(void)
{
	unsigned int cpu = smp_processor_id();
	int ret;

	ret = mp_ops->cpu_disable(cpu);
	if (ret)
		return ret;

	/*
	 * Take this CPU offline.  Once we clear this, we can't return,
	 * and we must not schedule until we're ready to give up the cpu.
	 */
	set_cpu_online(cpu, false);

	/*
	 * OK - migrate IRQs away from this CPU
	 */
	migrate_irqs();

	/*
	 * Flush user cache and TLB mappings, and then remove this CPU
	 * from the vm mask set of all processes.
	 */
	flush_cache_all();
149
#ifdef CONFIG_MMU
Paul Mundt's avatar
Paul Mundt committed
150
	local_flush_tlb_all();
151
#endif
Paul Mundt's avatar
Paul Mundt committed
152

153
	clear_tasks_mm_cpumask(cpu);
Paul Mundt's avatar
Paul Mundt committed
154
155
156
157

	return 0;
}
#else /* ... !CONFIG_HOTPLUG_CPU */
158
int native_cpu_disable(unsigned int cpu)
Paul Mundt's avatar
Paul Mundt committed
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
{
	return -ENOSYS;
}

void native_cpu_die(unsigned int cpu)
{
	/* We said "no" in __cpu_disable */
	BUG();
}

void native_play_dead(void)
{
	BUG();
}
#endif

175
asmlinkage void start_secondary(void)
Linus Torvalds's avatar
Linus Torvalds committed
176
{
177
	unsigned int cpu = smp_processor_id();
178
	struct mm_struct *mm = &init_mm;
Linus Torvalds's avatar
Linus Torvalds committed
179

180
	enable_mmu();
181
182
183
	atomic_inc(&mm->mm_count);
	atomic_inc(&mm->mm_users);
	current->active_mm = mm;
184
#ifdef CONFIG_MMU
185
	enter_lazy_tlb(mm, current);
Paul Mundt's avatar
Paul Mundt committed
186
	local_flush_tlb_all();
187
#endif
188
189
190
191
192

	per_cpu_trap_init();

	preempt_disable();

193
	notify_cpu_starting(cpu);
194

195
	local_irq_enable();
Linus Torvalds's avatar
Linus Torvalds committed
196

197
198
199
	calibrate_delay();

	smp_store_cpu_info(cpu);
Linus Torvalds's avatar
Linus Torvalds committed
200

201
	set_cpu_online(cpu, true);
202
	per_cpu(cpu_state, cpu) = CPU_ONLINE;
Linus Torvalds's avatar
Linus Torvalds committed
203

204
	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
Linus Torvalds's avatar
Linus Torvalds committed
205
206
}

207
208
209
210
211
212
213
214
215
extern struct {
	unsigned long sp;
	unsigned long bss_start;
	unsigned long bss_end;
	void *start_kernel_fn;
	void *cpu_init_fn;
	void *thread_info;
} stack_start;

216
int __cpu_up(unsigned int cpu, struct task_struct *tsk)
Linus Torvalds's avatar
Linus Torvalds committed
217
{
218
	unsigned long timeout;
219

220
221
	per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;

222
223
224
225
226
	/* Fill in data in head.S for secondary cpus */
	stack_start.sp = tsk->thread.sp;
	stack_start.thread_info = tsk->stack;
	stack_start.bss_start = 0; /* don't clear bss for secondary cpus */
	stack_start.start_kernel_fn = start_secondary;
Linus Torvalds's avatar
Linus Torvalds committed
227

228
229
230
	flush_icache_range((unsigned long)&stack_start,
			   (unsigned long)&stack_start + sizeof(stack_start));
	wmb();
Linus Torvalds's avatar
Linus Torvalds committed
231

232
	mp_ops->start_cpu(cpu, (unsigned long)_stext);
Linus Torvalds's avatar
Linus Torvalds committed
233

234
235
236
237
238
239
	timeout = jiffies + HZ;
	while (time_before(jiffies, timeout)) {
		if (cpu_online(cpu))
			break;

		udelay(10);
Paul Mundt's avatar
Paul Mundt committed
240
		barrier();
241
242
243
244
245
246
	}

	if (cpu_online(cpu))
		return 0;

	return -ENOENT;
Linus Torvalds's avatar
Linus Torvalds committed
247
248
249
250
}

void __init smp_cpus_done(unsigned int max_cpus)
{
251
252
253
254
255
256
257
258
259
260
	unsigned long bogosum = 0;
	int cpu;

	for_each_online_cpu(cpu)
		bogosum += cpu_data[cpu].loops_per_jiffy;

	printk(KERN_INFO "SMP: Total of %d processors activated "
	       "(%lu.%02lu BogoMIPS).\n", num_online_cpus(),
	       bogosum / (500000/HZ),
	       (bogosum / (5000/HZ)) % 100);
Linus Torvalds's avatar
Linus Torvalds committed
261
262
263
264
}

void smp_send_reschedule(int cpu)
{
265
	mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
Linus Torvalds's avatar
Linus Torvalds committed
266
267
268
269
}

void smp_send_stop(void)
{
270
	smp_call_function(stop_this_cpu, 0, 0);
Linus Torvalds's avatar
Linus Torvalds committed
271
272
}

273
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
Linus Torvalds's avatar
Linus Torvalds committed
274
{
275
	int cpu;
Linus Torvalds's avatar
Linus Torvalds committed
276

277
	for_each_cpu(cpu, mask)
278
		mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION);
279
}
Linus Torvalds's avatar
Linus Torvalds committed
280

281
282
void arch_send_call_function_single_ipi(int cpu)
{
283
	mp_ops->send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE);
Linus Torvalds's avatar
Linus Torvalds committed
284
285
}

286
287
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
288
289
290
{
	int cpu;

291
	for_each_cpu(cpu, mask)
292
		mp_ops->send_ipi(cpu, SMP_MSG_TIMER);
293
294
295
296
297
}

static void ipi_timer(void)
{
	irq_enter();
298
	tick_receive_broadcast();
299
300
	irq_exit();
}
301
#endif
302

303
304
305
306
307
308
309
void smp_message_recv(unsigned int msg)
{
	switch (msg) {
	case SMP_MSG_FUNCTION:
		generic_smp_call_function_interrupt();
		break;
	case SMP_MSG_RESCHEDULE:
310
		scheduler_ipi();
311
312
313
314
		break;
	case SMP_MSG_FUNCTION_SINGLE:
		generic_smp_call_function_single_interrupt();
		break;
315
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
316
317
318
	case SMP_MSG_TIMER:
		ipi_timer();
		break;
319
#endif
320
321
322
323
324
325
326
	default:
		printk(KERN_WARNING "SMP %d: %s(): unknown IPI %d\n",
		       smp_processor_id(), __func__, msg);
		break;
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
327
328
329
330
331
332
/* Not really SMP stuff ... */
int setup_profiling_timer(unsigned int multiplier)
{
	return 0;
}

333
334
#ifdef CONFIG_MMU

Paul Mundt's avatar
Paul Mundt committed
335
336
337
338
339
340
341
static void flush_tlb_all_ipi(void *info)
{
	local_flush_tlb_all();
}

void flush_tlb_all(void)
{
342
	on_each_cpu(flush_tlb_all_ipi, 0, 1);
Paul Mundt's avatar
Paul Mundt committed
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
}

static void flush_tlb_mm_ipi(void *mm)
{
	local_flush_tlb_mm((struct mm_struct *)mm);
}

/*
 * The following tlb flush calls are invoked when old translations are
 * being torn down, or pte attributes are changing. For single threaded
 * address spaces, a new context is obtained on the current cpu, and tlb
 * context on other cpus are invalidated to force a new context allocation
 * at switch_mm time, should the mm ever be used on other cpus. For
 * multithreaded address spaces, intercpu interrupts have to be sent.
 * Another case where intercpu interrupts are required is when the target
 * mm might be active on another cpu (eg debuggers doing the flushes on
 * behalf of debugees, kswapd stealing pages from another process etc).
 * Kanoj 07/00.
 */
void flush_tlb_mm(struct mm_struct *mm)
{
	preempt_disable();

	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
367
		smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1);
Paul Mundt's avatar
Paul Mundt committed
368
369
	} else {
		int i;
370
		for_each_online_cpu(i)
Paul Mundt's avatar
Paul Mundt committed
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
			if (smp_processor_id() != i)
				cpu_context(i, mm) = 0;
	}
	local_flush_tlb_mm(mm);

	preempt_enable();
}

struct flush_tlb_data {
	struct vm_area_struct *vma;
	unsigned long addr1;
	unsigned long addr2;
};

static void flush_tlb_range_ipi(void *info)
{
	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;

	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
}

void flush_tlb_range(struct vm_area_struct *vma,
		     unsigned long start, unsigned long end)
{
	struct mm_struct *mm = vma->vm_mm;

	preempt_disable();
	if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
		struct flush_tlb_data fd;

		fd.vma = vma;
		fd.addr1 = start;
		fd.addr2 = end;
404
		smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1);
Paul Mundt's avatar
Paul Mundt committed
405
406
	} else {
		int i;
407
		for_each_online_cpu(i)
Paul Mundt's avatar
Paul Mundt committed
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
			if (smp_processor_id() != i)
				cpu_context(i, mm) = 0;
	}
	local_flush_tlb_range(vma, start, end);
	preempt_enable();
}

static void flush_tlb_kernel_range_ipi(void *info)
{
	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;

	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
}

void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	struct flush_tlb_data fd;

	fd.addr1 = start;
	fd.addr2 = end;
428
	on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1);
Paul Mundt's avatar
Paul Mundt committed
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
}

static void flush_tlb_page_ipi(void *info)
{
	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;

	local_flush_tlb_page(fd->vma, fd->addr1);
}

void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
	preempt_disable();
	if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
	    (current->mm != vma->vm_mm)) {
		struct flush_tlb_data fd;

		fd.vma = vma;
		fd.addr1 = page;
447
		smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1);
Paul Mundt's avatar
Paul Mundt committed
448
449
	} else {
		int i;
450
		for_each_online_cpu(i)
Paul Mundt's avatar
Paul Mundt committed
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
			if (smp_processor_id() != i)
				cpu_context(i, vma->vm_mm) = 0;
	}
	local_flush_tlb_page(vma, page);
	preempt_enable();
}

static void flush_tlb_one_ipi(void *info)
{
	struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
	local_flush_tlb_one(fd->addr1, fd->addr2);
}

void flush_tlb_one(unsigned long asid, unsigned long vaddr)
{
	struct flush_tlb_data fd;

	fd.addr1 = asid;
	fd.addr2 = vaddr;

471
	smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1);
Paul Mundt's avatar
Paul Mundt committed
472
473
	local_flush_tlb_one(asid, vaddr);
}
474
475

#endif