prom_init.c 78.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
/*
 * Procedures for interfacing to Open Firmware.
 *
 * Paul Mackerras	August 1996.
 * Copyright (C) 1996-2005 Paul Mackerras.
 * 
 *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
 *    {engebret|bergner}@us.ibm.com 
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#undef DEBUG_PROM

#include <stdarg.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/threads.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/stringify.h>
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/bitops.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/pci.h>
#include <asm/iommu.h>
#include <asm/btext.h>
#include <asm/sections.h>
#include <asm/machdep.h>
45
#include <asm/opal.h>
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68

#include <linux/linux_logo.h>

/*
 * Eventually bump that one up
 */
#define DEVTREE_CHUNK_SIZE	0x100000

/*
 * This is the size of the local memory reserve map that gets copied
 * into the boot params passed to the kernel. That size is totally
 * flexible as the kernel just reads the list until it encounters an
 * entry with size 0, so it can be changed without breaking binary
 * compatibility
 */
#define MEM_RESERVE_MAP_SIZE	8

/*
 * prom_init() is called very early on, before the kernel text
 * and data have been mapped to KERNELBASE.  At this point the code
 * is running at whatever address it has been loaded at.
 * On ppc32 we compile with -mrelocatable, which means that references
 * to extern and static variables get relocated automatically.
69
70
 * ppc64 objects are always relocatable, we just need to relocate the
 * TOC.
71
72
73
74
75
76
77
78
79
80
81
 *
 * Because OF may have mapped I/O devices into the area starting at
 * KERNELBASE, particularly on CHRP machines, we can't safely call
 * OF once the kernel has been mapped to KERNELBASE.  Therefore all
 * OF calls must be done within prom_init().
 *
 * ADDR is used in calls to call_prom.  The 4th and following
 * arguments to call_prom should be 32-bit values.
 * On ppc64, 64 bit values are truncated to 32 bits (and
 * fortunately don't get interpreted as two arguments).
 */
82
83
#define ADDR(x)		(u32)(unsigned long)(x)

84
#ifdef CONFIG_PPC64
85
#define OF_WORKAROUNDS	0
86
#else
87
88
#define OF_WORKAROUNDS	of_workarounds
int of_workarounds;
89
90
#endif

91
92
93
#define OF_WA_CLAIM	1	/* do phys/virt claim separately, then map */
#define OF_WA_LONGTRAIL	2	/* work around longtrail bugs */

94
95
#define PROM_BUG() do {						\
        prom_printf("kernel BUG at %s line 0x%x!\n",		\
96
		    __FILE__, __LINE__);			\
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
        __asm__ __volatile__(".long " BUG_ILLEGAL_INSTR);	\
} while (0)

#ifdef DEBUG_PROM
#define prom_debug(x...)	prom_printf(x)
#else
#define prom_debug(x...)
#endif


typedef u32 prom_arg_t;

struct prom_args {
        u32 service;
        u32 nargs;
        u32 nret;
        prom_arg_t args[10];
};

struct prom_t {
	ihandle root;
118
	phandle chosen;
119
120
	int cpu;
	ihandle stdout;
121
	ihandle mmumap;
122
	ihandle memory;
123
124
125
};

struct mem_map_entry {
126
127
	u64	base;
	u64	size;
128
129
130
131
};

typedef u32 cell_t;

132
133
134
extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
		    unsigned long r6, unsigned long r7, unsigned long r8,
		    unsigned long r9);
135
136

#ifdef CONFIG_PPC64
137
extern int enter_prom(struct prom_args *args, unsigned long entry);
138
#else
139
static inline int enter_prom(struct prom_args *args, unsigned long entry)
140
{
141
	return ((int (*)(struct prom_args *))entry)(args);
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
}
#endif

extern void copy_and_flush(unsigned long dest, unsigned long src,
			   unsigned long size, unsigned long offset);

/* prom structure */
static struct prom_t __initdata prom;

static unsigned long prom_entry __initdata;

#define PROM_SCRATCH_SIZE 256

static char __initdata of_stdout_device[256];
static char __initdata prom_scratch[PROM_SCRATCH_SIZE];

static unsigned long __initdata dt_header_start;
static unsigned long __initdata dt_struct_start, dt_struct_end;
static unsigned long __initdata dt_string_start, dt_string_end;

static unsigned long __initdata prom_initrd_start, prom_initrd_end;

#ifdef CONFIG_PPC64
Jeremy Kerr's avatar
Jeremy Kerr committed
165
166
static int __initdata prom_iommu_force_on;
static int __initdata prom_iommu_off;
167
168
169
170
static unsigned long __initdata prom_tce_alloc_start;
static unsigned long __initdata prom_tce_alloc_end;
#endif

171
172
173
174
175
176
177
178
179
/* Platforms codes are now obsolete in the kernel. Now only used within this
 * file and ultimately gone too. Feel free to change them if you need, they
 * are not shared with anything outside of this file anymore
 */
#define PLATFORM_PSERIES	0x0100
#define PLATFORM_PSERIES_LPAR	0x0101
#define PLATFORM_LPAR		0x0001
#define PLATFORM_POWERMAC	0x0400
#define PLATFORM_GENERIC	0x0500
180
#define PLATFORM_OPAL		0x0600
181

182
183
184
185
static int __initdata of_platform;

static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];

186
187
static unsigned long __initdata prom_memory_limit;

188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
static unsigned long __initdata alloc_top;
static unsigned long __initdata alloc_top_high;
static unsigned long __initdata alloc_bottom;
static unsigned long __initdata rmo_top;
static unsigned long __initdata ram_top;

static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
static int __initdata mem_reserve_cnt;

static cell_t __initdata regbuf[1024];


/*
 * Error results ... some OF calls will return "-1" on error, some
 * will return 0, some will return either. To simplify, here are
 * macros to use with any ihandle or phandle return value to check if
 * it is valid
 */

#define PROM_ERROR		(-1u)
#define PHANDLE_VALID(p)	((p) != 0 && (p) != PROM_ERROR)
#define IHANDLE_VALID(i)	((i) != 0 && (i) != PROM_ERROR)


/* This is the one and *ONLY* place where we actually call open
 * firmware.
 */

static int __init call_prom(const char *service, int nargs, int nret, ...)
{
	int i;
	struct prom_args args;
	va_list list;

	args.service = ADDR(service);
	args.nargs = nargs;
	args.nret = nret;

	va_start(list, nret);
	for (i = 0; i < nargs; i++)
		args.args[i] = va_arg(list, prom_arg_t);
	va_end(list);

	for (i = 0; i < nret; i++)
		args.args[nargs+i] = 0;

234
	if (enter_prom(&args, prom_entry) < 0)
235
		return PROM_ERROR;
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256

	return (nret > 0) ? args.args[nargs] : 0;
}

static int __init call_prom_ret(const char *service, int nargs, int nret,
				prom_arg_t *rets, ...)
{
	int i;
	struct prom_args args;
	va_list list;

	args.service = ADDR(service);
	args.nargs = nargs;
	args.nret = nret;

	va_start(list, rets);
	for (i = 0; i < nargs; i++)
		args.args[i] = va_arg(list, prom_arg_t);
	va_end(list);

	for (i = 0; i < nret; i++)
257
		args.args[nargs+i] = 0;
258

259
	if (enter_prom(&args, prom_entry) < 0)
260
		return PROM_ERROR;
261
262
263

	if (rets != NULL)
		for (i = 1; i < nret; ++i)
264
			rets[i-1] = args.args[nargs+i];
265
266
267
268
269
270
271
272
273

	return (nret > 0) ? args.args[nargs] : 0;
}


static void __init prom_print(const char *msg)
{
	const char *p, *q;

274
	if (prom.stdout == 0)
275
276
277
278
279
280
		return;

	for (p = msg; *p != 0; p = q) {
		for (q = p; *q != 0 && *q != '\n'; ++q)
			;
		if (q > p)
281
			call_prom("write", 3, 1, prom.stdout, p, q - p);
282
283
284
		if (*q == 0)
			break;
		++q;
285
		call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
	}
}


static void __init prom_print_hex(unsigned long val)
{
	int i, nibbles = sizeof(val)*2;
	char buf[sizeof(val)*2+1];

	for (i = nibbles-1;  i >= 0;  i--) {
		buf[i] = (val & 0xf) + '0';
		if (buf[i] > '9')
			buf[i] += ('a'-'0'-10);
		val >>= 4;
	}
	buf[nibbles] = '\0';
302
	call_prom("write", 3, 1, prom.stdout, buf, nibbles);
303
304
}

305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
/* max number of decimal digits in an unsigned long */
#define UL_DIGITS 21
static void __init prom_print_dec(unsigned long val)
{
	int i, size;
	char buf[UL_DIGITS+1];

	for (i = UL_DIGITS-1; i >= 0;  i--) {
		buf[i] = (val % 10) + '0';
		val = val/10;
		if (val == 0)
			break;
	}
	/* shift stuff down */
	size = UL_DIGITS - i;
320
	call_prom("write", 3, 1, prom.stdout, buf+i, size);
321
}
322
323
324
325
326
327

static void __init prom_printf(const char *format, ...)
{
	const char *p, *q, *s;
	va_list args;
	unsigned long v;
328
	long vs;
329
330
331
332
333
334

	va_start(args, format);
	for (p = format; *p != 0; p = q) {
		for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
			;
		if (q > p)
335
			call_prom("write", 3, 1, prom.stdout, p, q - p);
336
337
338
339
		if (*q == 0)
			break;
		if (*q == '\n') {
			++q;
340
			call_prom("write", 3, 1, prom.stdout,
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
				  ADDR("\r\n"), 2);
			continue;
		}
		++q;
		if (*q == 0)
			break;
		switch (*q) {
		case 's':
			++q;
			s = va_arg(args, const char *);
			prom_print(s);
			break;
		case 'x':
			++q;
			v = va_arg(args, unsigned long);
			prom_print_hex(v);
			break;
358
359
360
361
		case 'd':
			++q;
			vs = va_arg(args, int);
			if (vs < 0) {
362
				prom_print("-");
363
364
365
366
				vs = -vs;
			}
			prom_print_dec(vs);
			break;
367
368
		case 'l':
			++q;
369
370
371
372
373
374
375
			if (*q == 0)
				break;
			else if (*q == 'x') {
				++q;
				v = va_arg(args, unsigned long);
				prom_print_hex(v);
			} else if (*q == 'u') { /* '%lu' */
376
377
378
				++q;
				v = va_arg(args, unsigned long);
				prom_print_dec(v);
379
380
381
382
			} else if (*q == 'd') { /* %ld */
				++q;
				vs = va_arg(args, long);
				if (vs < 0) {
383
					prom_print("-");
384
385
386
					vs = -vs;
				}
				prom_print_dec(vs);
387
388
			}
			break;
389
390
391
392
393
		}
	}
}


394
395
396
397
static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
				unsigned long align)
{

398
399
400
401
402
403
404
405
406
	if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
		/*
		 * Old OF requires we claim physical and virtual separately
		 * and then map explicitly (assuming virtual mode)
		 */
		int ret;
		prom_arg_t result;

		ret = call_prom_ret("call-method", 5, 2, &result,
407
				    ADDR("claim"), prom.memory,
408
409
410
411
				    align, size, virt);
		if (ret != 0 || result == -1)
			return -1;
		ret = call_prom_ret("call-method", 5, 2, &result,
412
				    ADDR("claim"), prom.mmumap,
413
414
415
				    align, size, virt);
		if (ret != 0) {
			call_prom("call-method", 4, 1, ADDR("release"),
416
				  prom.memory, size, virt);
417
418
419
			return -1;
		}
		/* the 0x12 is M (coherence) + PP == read/write */
420
		call_prom("call-method", 6, 1,
421
			  ADDR("map"), prom.mmumap, 0x12, size, virt, virt);
422
423
424
425
		return virt;
	}
	return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
			 (prom_arg_t)align);
426
427
}

428
429
430
static void __init __attribute__((noreturn)) prom_panic(const char *reason)
{
	prom_print(reason);
431
432
	/* Do not call exit because it clears the screen on pmac
	 * it also causes some sort of double-fault on early pmacs */
433
	if (of_platform == PLATFORM_POWERMAC)
434
435
		asm("trap\n");

436
	/* ToDo: should put up an SRC here on pSeries */
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
	call_prom("exit", 0, 0);

	for (;;)			/* should never get here */
		;
}


static int __init prom_next_node(phandle *nodep)
{
	phandle node;

	if ((node = *nodep) != 0
	    && (*nodep = call_prom("child", 1, 1, node)) != 0)
		return 1;
	if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
		return 1;
	for (;;) {
		if ((node = call_prom("parent", 1, 1, node)) == 0)
			return 0;
		if ((*nodep = call_prom("peer", 1, 1, node)) != 0)
			return 1;
	}
}

461
static int inline prom_getprop(phandle node, const char *pname,
462
463
464
465
466
467
			       void *value, size_t valuelen)
{
	return call_prom("getprop", 4, 1, node, ADDR(pname),
			 (u32)(unsigned long) value, (u32) valuelen);
}

468
static int inline prom_getproplen(phandle node, const char *pname)
469
470
471
472
{
	return call_prom("getproplen", 2, 1, node, ADDR(pname));
}

473
static void add_string(char **str, const char *q)
474
{
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
	char *p = *str;

	while (*q)
		*p++ = *q++;
	*p++ = ' ';
	*str = p;
}

static char *tohex(unsigned int x)
{
	static char digits[] = "0123456789abcdef";
	static char result[9];
	int i;

	result[8] = 0;
	i = 8;
	do {
		--i;
		result[i] = digits[x & 0xf];
		x >>= 4;
	} while (x != 0 && i > 0);
	return &result[i];
}

static int __init prom_setprop(phandle node, const char *nodename,
			       const char *pname, void *value, size_t valuelen)
{
	char cmd[256], *p;

	if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
		return call_prom("setprop", 4, 1, node, ADDR(pname),
				 (u32)(unsigned long) value, (u32) valuelen);

	/* gah... setprop doesn't work on longtrail, have to use interpret */
	p = cmd;
	add_string(&p, "dev");
	add_string(&p, nodename);
	add_string(&p, tohex((u32)(unsigned long) value));
	add_string(&p, tohex(valuelen));
	add_string(&p, tohex(ADDR(pname)));
515
	add_string(&p, tohex(strlen(pname)));
516
517
518
	add_string(&p, "property");
	*p = 0;
	return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
519
520
}

521
/* We can't use the standard versions because of relocation headaches. */
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
#define isxdigit(c)	(('0' <= (c) && (c) <= '9') \
			 || ('a' <= (c) && (c) <= 'f') \
			 || ('A' <= (c) && (c) <= 'F'))

#define isdigit(c)	('0' <= (c) && (c) <= '9')
#define islower(c)	('a' <= (c) && (c) <= 'z')
#define toupper(c)	(islower(c) ? ((c) - 'a' + 'A') : (c))

unsigned long prom_strtoul(const char *cp, const char **endp)
{
	unsigned long result = 0, base = 10, value;

	if (*cp == '0') {
		base = 8;
		cp++;
		if (toupper(*cp) == 'X') {
			cp++;
			base = 16;
		}
	}

	while (isxdigit(*cp) &&
	       (value = isdigit(*cp) ? *cp - '0' : toupper(*cp) - 'A' + 10) < base) {
		result = result * base + value;
		cp++;
	}

	if (endp)
		*endp = cp;

	return result;
}

unsigned long prom_memparse(const char *ptr, const char **retptr)
{
	unsigned long ret = prom_strtoul(ptr, retptr);
	int shift = 0;

	/*
	 * We can't use a switch here because GCC *may* generate a
	 * jump table which won't work, because we're not running at
	 * the address we're linked at.
	 */
	if ('G' == **retptr || 'g' == **retptr)
		shift = 30;

	if ('M' == **retptr || 'm' == **retptr)
		shift = 20;

	if ('K' == **retptr || 'k' == **retptr)
		shift = 10;

	if (shift) {
		ret <<= shift;
		(*retptr)++;
	}

	return ret;
}

582
583
584
585
586
587
/*
 * Early parsing of the command line passed to the kernel, used for
 * "mem=x" and the options that affect the iommu
 */
static void __init early_cmdline_parse(void)
{
588
	const char *opt;
589

590
	char *p;
591
592
	int l = 0;

593
594
595
596
	prom_cmd_line[0] = 0;
	p = prom_cmd_line;
	if ((long)prom.chosen > 0)
		l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
597
#ifdef CONFIG_CMDLINE
598
	if (l <= 0 || p[0] == '\0') /* dbl check */
599
600
		strlcpy(prom_cmd_line,
			CONFIG_CMDLINE, sizeof(prom_cmd_line));
601
#endif /* CONFIG_CMDLINE */
602
	prom_printf("command line: %s\n", prom_cmd_line);
603
604

#ifdef CONFIG_PPC64
605
	opt = strstr(prom_cmd_line, "iommu=");
606
607
608
609
610
	if (opt) {
		prom_printf("iommu opt is: %s\n", opt);
		opt += 6;
		while (*opt && *opt == ' ')
			opt++;
611
612
613
614
		if (!strncmp(opt, "off", 3))
			prom_iommu_off = 1;
		else if (!strncmp(opt, "force", 5))
			prom_iommu_force_on = 1;
615
616
	}
#endif
617
	opt = strstr(prom_cmd_line, "mem=");
618
619
	if (opt) {
		opt += 4;
620
		prom_memory_limit = prom_memparse(opt, (const char **)&opt);
621
622
#ifdef CONFIG_PPC64
		/* Align to 16 MB == size of ppc64 large page */
623
		prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000);
624
625
#endif
	}
626
627
}

628
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
629
/*
630
631
632
633
634
 * The architecture vector has an array of PVR mask/value pairs,
 * followed by # option vectors - 1, followed by the option vectors.
 *
 * See prom.h for the definition of the bits specified in the
 * architecture vector.
635
636
637
638
639
640
641
642
 *
 * Because the description vector contains a mix of byte and word
 * values, we declare it as an unsigned char array, and use this
 * macro to put word values in.
 */
#define W(x)	((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
		((x) >> 8) & 0xff, (x) & 0xff

643
unsigned char ibm_architecture_vec[] = {
644
	W(0xfffe0000), W(0x003a0000),	/* POWER5/POWER5+ */
645
	W(0xffff0000), W(0x003e0000),	/* POWER6 */
646
	W(0xffff0000), W(0x003f0000),	/* POWER7 */
647
648
	W(0xffff0000), W(0x004b0000),	/* POWER8 */
	W(0xffffffff), W(0x0f000004),	/* all 2.07-compliant */
649
	W(0xffffffff), W(0x0f000003),	/* all 2.06-compliant */
650
	W(0xffffffff), W(0x0f000002),	/* all 2.05-compliant */
651
	W(0xfffffffe), W(0x0f000001),	/* all 2.04-compliant and earlier */
652
	6 - 1,				/* 6 option vectors */
653
654

	/* option vector 1: processor architectures supported */
655
	3 - 2,				/* length */
656
657
	0,				/* don't ignore, don't halt */
	OV1_PPC_2_00 | OV1_PPC_2_01 | OV1_PPC_2_02 | OV1_PPC_2_03 |
658
	OV1_PPC_2_04 | OV1_PPC_2_05 | OV1_PPC_2_06 | OV1_PPC_2_07,
659
660

	/* option vector 2: Open Firmware options supported */
661
	34 - 2,				/* length */
662
663
664
665
666
667
668
	OV2_REAL_MODE,
	0, 0,
	W(0xffffffff),			/* real_base */
	W(0xffffffff),			/* real_size */
	W(0xffffffff),			/* virt_base */
	W(0xffffffff),			/* virt_size */
	W(0xffffffff),			/* load_base */
669
	W(256),				/* 256MB min RMA */
670
671
672
673
674
	W(0xffffffff),			/* full client load */
	0,				/* min RMA percentage of total RAM */
	48,				/* max log_2(hash table size) */

	/* option vector 3: processor options supported */
675
	3 - 2,				/* length */
676
	0,				/* don't ignore, don't halt */
677
	OV3_FP | OV3_VMX | OV3_DFP,
678
679

	/* option vector 4: IBM PAPR implementation */
680
	3 - 2,				/* length */
681
	0,				/* don't halt */
682
	OV4_MIN_ENT_CAP,		/* minimum VP entitled capacity */
683
684

	/* option vector 5: PAPR/OF options */
685
	19 - 2,				/* length */
686
	0,				/* don't ignore, don't halt */
687
688
689
690
691
692
693
694
	OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
	OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
#ifdef CONFIG_PCI_MSI
	/* PCIe/MSI support.  Without MSI full PCIe is not supported */
	OV5_FEAT(OV5_MSI),
#else
	0,
#endif
695
	0,
696
697
698
699
700
#ifdef CONFIG_PPC_SMLPAR
	OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
#else
	0,
#endif
701
	OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
702
703
704
	0,
	0,
	0,
705
706
707
708
	/* WARNING: The offset of the "number of cores" field below
	 * must match by the macro below. Update the definition if
	 * the structure layout changes.
	 */
709
#define IBM_ARCH_VEC_NRCORES_OFFSET	117
710
	W(NR_CPUS),			/* number of cores supported */
711
712
713
714
	0,
	0,
	0,
	0,
715
716
717
	OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
	OV5_FEAT(OV5_PFO_HW_842),
	OV5_FEAT(OV5_SUB_PROCESSORS),
718
719
720
721
722
723
	/* option vector 6: IBM PAPR hints */
	4 - 2,				/* length */
	0,
	0,
	OV6_LINUX,

724
725
726
};

/* Old method - ELF header with PT_NOTE sections */
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
static struct fake_elf {
	Elf32_Ehdr	elfhdr;
	Elf32_Phdr	phdr[2];
	struct chrpnote {
		u32	namesz;
		u32	descsz;
		u32	type;
		char	name[8];	/* "PowerPC" */
		struct chrpdesc {
			u32	real_mode;
			u32	real_base;
			u32	real_size;
			u32	virt_base;
			u32	virt_size;
			u32	load_base;
		} chrpdesc;
	} chrpnote;
	struct rpanote {
		u32	namesz;
		u32	descsz;
		u32	type;
		char	name[24];	/* "IBM,RPA-Client-Config" */
		struct rpadesc {
			u32	lpar_affinity;
			u32	min_rmo_size;
			u32	min_rmo_percent;
			u32	max_pft_size;
			u32	splpar;
			u32	min_load;
			u32	new_mem_def;
			u32	ignore_me;
		} rpadesc;
	} rpanote;
760
} fake_elf = {
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
	.elfhdr = {
		.e_ident = { 0x7f, 'E', 'L', 'F',
			     ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
		.e_type = ET_EXEC,	/* yeah right */
		.e_machine = EM_PPC,
		.e_version = EV_CURRENT,
		.e_phoff = offsetof(struct fake_elf, phdr),
		.e_phentsize = sizeof(Elf32_Phdr),
		.e_phnum = 2
	},
	.phdr = {
		[0] = {
			.p_type = PT_NOTE,
			.p_offset = offsetof(struct fake_elf, chrpnote),
			.p_filesz = sizeof(struct chrpnote)
		}, [1] = {
			.p_type = PT_NOTE,
			.p_offset = offsetof(struct fake_elf, rpanote),
			.p_filesz = sizeof(struct rpanote)
		}
	},
	.chrpnote = {
		.namesz = sizeof("PowerPC"),
		.descsz = sizeof(struct chrpdesc),
		.type = 0x1275,
		.name = "PowerPC",
		.chrpdesc = {
			.real_mode = ~0U,	/* ~0 means "don't care" */
			.real_base = ~0U,
			.real_size = ~0U,
			.virt_base = ~0U,
			.virt_size = ~0U,
			.load_base = ~0U
		},
	},
	.rpanote = {
		.namesz = sizeof("IBM,RPA-Client-Config"),
		.descsz = sizeof(struct rpadesc),
		.type = 0x12759999,
		.name = "IBM,RPA-Client-Config",
		.rpadesc = {
802
803
			.lpar_affinity = 0,
			.min_rmo_size = 64,	/* in megabytes */
804
			.min_rmo_percent = 0,
805
			.max_pft_size = 48,	/* 2^48 bytes max PFT size */
806
807
			.splpar = 1,
			.min_load = ~0U,
808
			.new_mem_def = 0
809
810
811
812
		}
	}
};

813
814
815
816
817
818
819
820
821
822
823
static int __init prom_count_smt_threads(void)
{
	phandle node;
	char type[64];
	unsigned int plen;

	/* Pick up th first CPU node we can find */
	for (node = 0; prom_next_node(&node); ) {
		type[0] = 0;
		prom_getprop(node, "device_type", type, sizeof(type));

824
		if (strcmp(type, "cpu"))
825
826
827
828
829
830
831
832
833
834
			continue;
		/*
		 * There is an entry for each smt thread, each entry being
		 * 4 bytes long.  All cpus should have the same number of
		 * smt threads, so return after finding the first.
		 */
		plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s");
		if (plen == PROM_ERROR)
			break;
		plen >>= 2;
835
		prom_debug("Found %lu smt threads per core\n", (unsigned long)plen);
836
837
838

		/* Sanity check */
		if (plen < 1 || plen > 64) {
839
			prom_printf("Threads per core %lu out of bounds, assuming 1\n",
840
841
842
843
844
845
846
847
848
849
850
851
				    (unsigned long)plen);
			return 1;
		}
		return plen;
	}
	prom_debug("No threads found, assuming 1 per core\n");

	return 1;

}


852
853
static void __init prom_send_capabilities(void)
{
854
855
	ihandle elfloader, root;
	prom_arg_t ret;
856
	u32 *cores;
857
858
859

	root = call_prom("open", 1, 1, ADDR("/"));
	if (root != 0) {
860
861
862
863
864
865
		/* We need to tell the FW about the number of cores we support.
		 *
		 * To do that, we count the number of threads on the first core
		 * (we assume this is the same for all cores) and use it to
		 * divide NR_CPUS.
		 */
866
		cores = (u32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
867
868
		if (*cores != NR_CPUS) {
			prom_printf("WARNING ! "
869
				    "ibm_architecture_vec structure inconsistent: %lu!\n",
870
871
				    *cores);
		} else {
872
			*cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
873
874
			prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
				    *cores, NR_CPUS);
875
876
		}

877
		/* try calling the ibm,client-architecture-support method */
878
		prom_printf("Calling ibm,client-architecture-support...");
879
880
		if (call_prom_ret("call-method", 3, 2, &ret,
				  ADDR("ibm,client-architecture-support"),
881
				  root,
882
883
884
				  ADDR(ibm_architecture_vec)) == 0) {
			/* the call exists... */
			if (ret)
885
				prom_printf("\nWARNING: ibm,client-architecture"
886
887
					    "-support call FAILED!\n");
			call_prom("close", 1, 0, root);
888
			prom_printf(" done\n");
889
890
891
			return;
		}
		call_prom("close", 1, 0, root);
892
		prom_printf(" not implemented\n");
893
	}
894

895
	/* no ibm,client-architecture-support call, try the old way */
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
	elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader"));
	if (elfloader == 0) {
		prom_printf("couldn't open /packages/elf-loader\n");
		return;
	}
	call_prom("call-method", 3, 1, ADDR("process-elf-header"),
			elfloader, ADDR(&fake_elf));
	call_prom("close", 1, 0, elfloader);
}
#endif

/*
 * Memory allocation strategy... our layout is normally:
 *
 *  at 14Mb or more we have vmlinux, then a gap and initrd.  In some
 *  rare cases, initrd might end up being before the kernel though.
 *  We assume this won't override the final kernel at 0, we have no
 *  provision to handle that in this version, but it should hopefully
 *  never happen.
 *
 *  alloc_top is set to the top of RMO, eventually shrink down if the
 *  TCEs overlap
 *
 *  alloc_bottom is set to the top of kernel/initrd
 *
 *  from there, allocations are done this way : rtas is allocated
 *  topmost, and the device-tree is allocated from the bottom. We try
 *  to grow the device-tree allocation as we progress. If we can't,
 *  then we fail, we don't currently have a facility to restart
 *  elsewhere, but that shouldn't be necessary.
 *
 *  Note that calls to reserve_mem have to be done explicitly, memory
 *  allocated with either alloc_up or alloc_down isn't automatically
 *  reserved.
 */


/*
 * Allocates memory in the RMO upward from the kernel/initrd
 *
 * When align is 0, this is a special case, it means to allocate in place
 * at the current location of alloc_bottom or fail (that is basically
 * extending the previous allocation). Used for the device-tree flattening
 */
static unsigned long __init alloc_up(unsigned long size, unsigned long align)
{
942
	unsigned long base = alloc_bottom;
943
944
	unsigned long addr = 0;

945
946
	if (align)
		base = _ALIGN_UP(base, align);
947
	prom_debug("alloc_up(%x, %x)\n", size, align);
948
	if (ram_top == 0)
949
950
951
		prom_panic("alloc_up() called with mem not initialized\n");

	if (align)
952
		base = _ALIGN_UP(alloc_bottom, align);
953
	else
954
		base = alloc_bottom;
955

956
	for(; (base + size) <= alloc_top; 
957
958
959
	    base = _ALIGN_UP(base + 0x100000, align)) {
		prom_debug("    trying: 0x%x\n\r", base);
		addr = (unsigned long)prom_claim(base, size, 0);
960
		if (addr != PROM_ERROR && addr != 0)
961
962
963
964
965
966
967
			break;
		addr = 0;
		if (align == 0)
			break;
	}
	if (addr == 0)
		return 0;
968
	alloc_bottom = addr + size;
969
970

	prom_debug(" -> %x\n", addr);
971
972
973
974
975
	prom_debug("  alloc_bottom : %x\n", alloc_bottom);
	prom_debug("  alloc_top    : %x\n", alloc_top);
	prom_debug("  alloc_top_hi : %x\n", alloc_top_high);
	prom_debug("  rmo_top      : %x\n", rmo_top);
	prom_debug("  ram_top      : %x\n", ram_top);
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990

	return addr;
}

/*
 * Allocates memory downward, either from top of RMO, or if highmem
 * is set, from the top of RAM.  Note that this one doesn't handle
 * failures.  It does claim memory if highmem is not set.
 */
static unsigned long __init alloc_down(unsigned long size, unsigned long align,
				       int highmem)
{
	unsigned long base, addr = 0;

	prom_debug("alloc_down(%x, %x, %s)\n", size, align,
991
992
		   highmem ? "(high)" : "(low)");
	if (ram_top == 0)
993
994
995
996
		prom_panic("alloc_down() called with mem not initialized\n");

	if (highmem) {
		/* Carve out storage for the TCE table. */
997
998
		addr = _ALIGN_DOWN(alloc_top_high - size, align);
		if (addr <= alloc_bottom)
999
1000
			return 0;
		/* Will we bump into the RMO ? If yes, check out that we
For faster browsing, not all history is shown. View entire blame