mdesc.c 20.1 KB
Newer Older
1
2
/* mdesc.c: Sun4V machine description handling.
 *
3
 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
4
5
6
 */
#include <linux/kernel.h>
#include <linux/types.h>
7
#include <linux/lmb.h>
8
#include <linux/log2.h>
9
10
#include <linux/list.h>
#include <linux/slab.h>
11
#include <linux/mm.h>
12
#include <linux/miscdevice.h>
13

14
#include <asm/cpudata.h>
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
#include <asm/hypervisor.h>
#include <asm/mdesc.h>
#include <asm/prom.h>
#include <asm/oplib.h>
#include <asm/smp.h>

/* Unlike the OBP device tree, the machine description is a full-on
 * DAG.  An arbitrary number of ARCs are possible from one
 * node to other nodes and thus we can't use the OBP device_node
 * data structure to represent these nodes inside of the kernel.
 *
 * Actually, it isn't even a DAG, because there are back pointers
 * which create cycles in the graph.
 *
 * mdesc_hdr and mdesc_elem describe the layout of the data structure
 * we get from the Hypervisor.
 */
struct mdesc_hdr {
	u32	version; /* Transport version */
	u32	node_sz; /* node block size */
	u32	name_sz; /* name block size */
	u32	data_sz; /* data block size */
37
} __attribute__((aligned(16)));
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60

struct mdesc_elem {
	u8	tag;
#define MD_LIST_END	0x00
#define MD_NODE		0x4e
#define MD_NODE_END	0x45
#define MD_NOOP		0x20
#define MD_PROP_ARC	0x61
#define MD_PROP_VAL	0x76
#define MD_PROP_STR	0x73
#define MD_PROP_DATA	0x64
	u8	name_len;
	u16	resv;
	u32	name_offset;
	union {
		struct {
			u32	data_len;
			u32	data_offset;
		} data;
		u64	val;
	} d;
};

61
62
63
64
struct mdesc_mem_ops {
	struct mdesc_handle *(*alloc)(unsigned int mdesc_size);
	void (*free)(struct mdesc_handle *handle);
};
65

66
67
68
69
70
71
72
73
struct mdesc_handle {
	struct list_head	list;
	struct mdesc_mem_ops	*mops;
	void			*self_base;
	atomic_t		refcnt;
	unsigned int		handle_size;
	struct mdesc_hdr	mdesc;
};
74

75
76
77
static void mdesc_handle_init(struct mdesc_handle *hp,
			      unsigned int handle_size,
			      void *base)
78
{
79
80
81
82
83
84
85
	BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1));

	memset(hp, 0, handle_size);
	INIT_LIST_HEAD(&hp->list);
	hp->self_base = base;
	atomic_set(&hp->refcnt, 1);
	hp->handle_size = handle_size;
86
87
}

88
static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
89
{
90
	unsigned int handle_size, alloc_size;
91
92
	struct mdesc_handle *hp;
	unsigned long paddr;
93

94
95
96
97
	handle_size = (sizeof(struct mdesc_handle) -
		       sizeof(struct mdesc_hdr) +
		       mdesc_size);
	alloc_size = PAGE_ALIGN(handle_size);
98

99
	paddr = lmb_alloc(alloc_size, PAGE_SIZE);
100

101
102
103
104
105
	hp = NULL;
	if (paddr) {
		hp = __va(paddr);
		mdesc_handle_init(hp, handle_size, hp);
	}
106
	return hp;
107
108
}

109
static void mdesc_lmb_free(struct mdesc_handle *hp)
110
{
111
112
	unsigned int alloc_size, handle_size = hp->handle_size;
	unsigned long start, end;
113

114
115
	BUG_ON(atomic_read(&hp->refcnt) != 0);
	BUG_ON(!list_empty(&hp->list));
116

117
118
119
120
121
122
123
124
125
126
127
128
	alloc_size = PAGE_ALIGN(handle_size);

	start = (unsigned long) hp;
	end = start + alloc_size;

	while (start < end) {
		struct page *p;

		p = virt_to_page(start);
		ClearPageReserved(p);
		__free_page(p);
		start += PAGE_SIZE;
129
130
131
	}
}

132
133
134
static struct mdesc_mem_ops lmb_mdesc_ops = {
	.alloc = mdesc_lmb_alloc,
	.free  = mdesc_lmb_free,
135
136
137
};

static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
138
{
139
140
	unsigned int handle_size;
	void *base;
141

142
143
144
145
	handle_size = (sizeof(struct mdesc_handle) -
		       sizeof(struct mdesc_hdr) +
		       mdesc_size);

146
	base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
147
148
149
150
151
152
153
154
155
156
	if (base) {
		struct mdesc_handle *hp;
		unsigned long addr;

		addr = (unsigned long)base;
		addr = (addr + 15UL) & ~15UL;
		hp = (struct mdesc_handle *) addr;

		mdesc_handle_init(hp, handle_size, base);
		return hp;
157
	}
158
159

	return NULL;
160
161
}

162
static void mdesc_kfree(struct mdesc_handle *hp)
163
{
164
165
166
167
	BUG_ON(atomic_read(&hp->refcnt) != 0);
	BUG_ON(!list_empty(&hp->list));

	kfree(hp->self_base);
168
169
}

170
171
172
173
174
175
176
static struct mdesc_mem_ops kmalloc_mdesc_memops = {
	.alloc = mdesc_kmalloc,
	.free  = mdesc_kfree,
};

static struct mdesc_handle *mdesc_alloc(unsigned int mdesc_size,
					struct mdesc_mem_ops *mops)
177
{
178
	struct mdesc_handle *hp = mops->alloc(mdesc_size);
179

180
181
	if (hp)
		hp->mops = mops;
182

183
184
	return hp;
}
185

186
static void mdesc_free(struct mdesc_handle *hp)
187
{
188
189
	hp->mops->free(hp);
}
190

191
192
193
static struct mdesc_handle *cur_mdesc;
static LIST_HEAD(mdesc_zombie_list);
static DEFINE_SPINLOCK(mdesc_lock);
194

195
196
197
198
struct mdesc_handle *mdesc_grab(void)
{
	struct mdesc_handle *hp;
	unsigned long flags;
199

200
201
202
203
204
	spin_lock_irqsave(&mdesc_lock, flags);
	hp = cur_mdesc;
	if (hp)
		atomic_inc(&hp->refcnt);
	spin_unlock_irqrestore(&mdesc_lock, flags);
205

206
	return hp;
207
}
208
EXPORT_SYMBOL(mdesc_grab);
209

210
void mdesc_release(struct mdesc_handle *hp)
211
{
212
	unsigned long flags;
213

214
215
216
217
	spin_lock_irqsave(&mdesc_lock, flags);
	if (atomic_dec_and_test(&hp->refcnt)) {
		list_del_init(&hp->list);
		hp->mops->free(hp);
218
	}
219
	spin_unlock_irqrestore(&mdesc_lock, flags);
220
}
221
EXPORT_SYMBOL(mdesc_release);
222

223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
static DEFINE_MUTEX(mdesc_mutex);
static struct mdesc_notifier_client *client_list;

void mdesc_register_notifier(struct mdesc_notifier_client *client)
{
	u64 node;

	mutex_lock(&mdesc_mutex);
	client->next = client_list;
	client_list = client;

	mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
		client->add(cur_mdesc, node);

	mutex_unlock(&mdesc_mutex);
}

240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node)
{
	const u64 *id;
	u64 a;

	id = NULL;
	mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
		u64 target;

		target = mdesc_arc_target(hp, a);
		id = mdesc_get_property(hp, target,
					"cfg-handle", NULL);
		if (id)
			break;
	}

	return id;
}

259
260
261
262
263
264
265
266
267
/* Run 'func' on nodes which are in A but not in B.  */
static void invoke_on_missing(const char *name,
			      struct mdesc_handle *a,
			      struct mdesc_handle *b,
			      void (*func)(struct mdesc_handle *, u64))
{
	u64 node;

	mdesc_for_each_node_by_name(a, node, name) {
268
269
270
		int found = 0, is_vdc_port = 0;
		const char *name_prop;
		const u64 *id;
271
272
		u64 fnode;

273
274
275
276
277
278
279
280
281
282
283
284
285
		name_prop = mdesc_get_property(a, node, "name", NULL);
		if (name_prop && !strcmp(name_prop, "vdc-port")) {
			is_vdc_port = 1;
			id = parent_cfg_handle(a, node);
		} else
			id = mdesc_get_property(a, node, "id", NULL);

		if (!id) {
			printk(KERN_ERR "MD: Cannot find ID for %s node.\n",
			       (name_prop ? name_prop : name));
			continue;
		}

286
		mdesc_for_each_node_by_name(b, fnode, name) {
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
			const u64 *fid;

			if (is_vdc_port) {
				name_prop = mdesc_get_property(b, fnode,
							       "name", NULL);
				if (!name_prop ||
				    strcmp(name_prop, "vdc-port"))
					continue;
				fid = parent_cfg_handle(b, fnode);
				if (!fid) {
					printk(KERN_ERR "MD: Cannot find ID "
					       "for vdc-port node.\n");
					continue;
				}
			} else
				fid = mdesc_get_property(b, fnode,
							 "id", NULL);
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333

			if (*id == *fid) {
				found = 1;
				break;
			}
		}
		if (!found)
			func(a, node);
	}
}

static void notify_one(struct mdesc_notifier_client *p,
		       struct mdesc_handle *old_hp,
		       struct mdesc_handle *new_hp)
{
	invoke_on_missing(p->node_name, old_hp, new_hp, p->remove);
	invoke_on_missing(p->node_name, new_hp, old_hp, p->add);
}

static void mdesc_notify_clients(struct mdesc_handle *old_hp,
				 struct mdesc_handle *new_hp)
{
	struct mdesc_notifier_client *p = client_list;

	while (p) {
		notify_one(p, old_hp, new_hp);
		p = p->next;
	}
}

334
void mdesc_update(void)
335
{
336
337
338
339
	unsigned long len, real_len, status;
	struct mdesc_handle *hp, *orig_hp;
	unsigned long flags;

340
341
	mutex_lock(&mdesc_mutex);

342
343
344
345
346
	(void) sun4v_mach_desc(0UL, 0UL, &len);

	hp = mdesc_alloc(len, &kmalloc_mdesc_memops);
	if (!hp) {
		printk(KERN_ERR "MD: mdesc alloc fails\n");
347
		goto out;
348
349
350
351
352
353
354
355
	}

	status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
	if (status != HV_EOK || real_len > len) {
		printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
		       status);
		atomic_dec(&hp->refcnt);
		mdesc_free(hp);
356
		goto out;
357
	}
358

359
360
361
	spin_lock_irqsave(&mdesc_lock, flags);
	orig_hp = cur_mdesc;
	cur_mdesc = hp;
362
	spin_unlock_irqrestore(&mdesc_lock, flags);
363

364
365
366
	mdesc_notify_clients(orig_hp, hp);

	spin_lock_irqsave(&mdesc_lock, flags);
367
368
369
370
371
	if (atomic_dec_and_test(&orig_hp->refcnt))
		mdesc_free(orig_hp);
	else
		list_add(&orig_hp->list, &mdesc_zombie_list);
	spin_unlock_irqrestore(&mdesc_lock, flags);
372
373
374

out:
	mutex_unlock(&mdesc_mutex);
375
376
}

377
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
378
379
380
381
{
	return (struct mdesc_elem *) (mdesc + 1);
}

382
static void *name_block(struct mdesc_hdr *mdesc)
383
384
385
386
{
	return ((void *) node_block(mdesc)) + mdesc->node_sz;
}

387
static void *data_block(struct mdesc_hdr *mdesc)
388
389
390
391
{
	return ((void *) name_block(mdesc)) + mdesc->name_sz;
}

392
393
u64 mdesc_node_by_name(struct mdesc_handle *hp,
		       u64 from_node, const char *name)
394
{
395
396
397
398
399
	struct mdesc_elem *ep = node_block(&hp->mdesc);
	const char *names = name_block(&hp->mdesc);
	u64 last_node = hp->mdesc.node_sz / 16;
	u64 ret;

400
401
402
	if (from_node == MDESC_NODE_NULL) {
		ret = from_node = 0;
	} else if (from_node >= last_node) {
403
		return MDESC_NODE_NULL;
404
405
406
	} else {
		ret = ep[from_node].d.val;
	}
407
408
409
410
411
412
413
414
415
416
417
418
419

	while (ret < last_node) {
		if (ep[ret].tag != MD_NODE)
			return MDESC_NODE_NULL;
		if (!strcmp(names + ep[ret].name_offset, name))
			break;
		ret = ep[ret].d.val;
	}
	if (ret >= last_node)
		ret = MDESC_NODE_NULL;
	return ret;
}
EXPORT_SYMBOL(mdesc_node_by_name);
420

421
422
423
424
425
426
427
const void *mdesc_get_property(struct mdesc_handle *hp, u64 node,
			       const char *name, int *lenp)
{
	const char *names = name_block(&hp->mdesc);
	u64 last_node = hp->mdesc.node_sz / 16;
	void *data = data_block(&hp->mdesc);
	struct mdesc_elem *ep;
428

429
430
	if (node == MDESC_NODE_NULL || node >= last_node)
		return NULL;
431

432
433
434
435
436
437
438
439
440
441
	ep = node_block(&hp->mdesc) + node;
	ep++;
	for (; ep->tag != MD_NODE_END; ep++) {
		void *val = NULL;
		int len = 0;

		switch (ep->tag) {
		case MD_PROP_VAL:
			val = &ep->d.val;
			len = 8;
442
443
			break;

444
445
446
447
448
		case MD_PROP_STR:
		case MD_PROP_DATA:
			val = data + ep->d.data.data_offset;
			len = ep->d.data.data_len;
			break;
449

450
		default:
451
452
			break;
		}
453
454
		if (!val)
			continue;
455

456
457
458
459
460
		if (!strcmp(names + ep->name_offset, name)) {
			if (lenp)
				*lenp = len;
			return val;
		}
461
462
	}

463
464
465
	return NULL;
}
EXPORT_SYMBOL(mdesc_get_property);
466

467
468
469
470
471
u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type)
{
	struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
	const char *names = name_block(&hp->mdesc);
	u64 last_node = hp->mdesc.node_sz / 16;
472

473
474
	if (from == MDESC_NODE_NULL || from >= last_node)
		return MDESC_NODE_NULL;
475

476
477
478
479
480
481
482
483
484
485
486
	ep = base + from;

	ep++;
	for (; ep->tag != MD_NODE_END; ep++) {
		if (ep->tag != MD_PROP_ARC)
			continue;

		if (strcmp(names + ep->name_offset, arc_type))
			continue;

		return ep - base;
487
	}
488
489

	return MDESC_NODE_NULL;
490
}
491
EXPORT_SYMBOL(mdesc_next_arc);
492

493
u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc)
494
{
495
496
497
498
499
	struct mdesc_elem *ep, *base = node_block(&hp->mdesc);

	ep = base + arc;

	return ep->d.val;
500
}
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
EXPORT_SYMBOL(mdesc_arc_target);

const char *mdesc_node_name(struct mdesc_handle *hp, u64 node)
{
	struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
	const char *names = name_block(&hp->mdesc);
	u64 last_node = hp->mdesc.node_sz / 16;

	if (node == MDESC_NODE_NULL || node >= last_node)
		return NULL;

	ep = base + node;
	if (ep->tag != MD_NODE)
		return NULL;

	return names + ep->name_offset;
}
EXPORT_SYMBOL(mdesc_node_name);
519
520
521

static void __init report_platform_properties(void)
{
522
523
	struct mdesc_handle *hp = mdesc_grab();
	u64 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
524
525
526
	const char *s;
	const u64 *v;

527
	if (pn == MDESC_NODE_NULL) {
528
529
530
531
		prom_printf("No platform node in machine-description.\n");
		prom_halt();
	}

532
	s = mdesc_get_property(hp, pn, "banner-name", NULL);
533
	printk("PLATFORM: banner-name [%s]\n", s);
534
	s = mdesc_get_property(hp, pn, "name", NULL);
535
536
	printk("PLATFORM: name [%s]\n", s);

537
	v = mdesc_get_property(hp, pn, "hostid", NULL);
538
	if (v)
539
		printk("PLATFORM: hostid [%08llx]\n", *v);
540
	v = mdesc_get_property(hp, pn, "serial#", NULL);
541
	if (v)
542
		printk("PLATFORM: serial# [%08llx]\n", *v);
543
	v = mdesc_get_property(hp, pn, "stick-frequency", NULL);
544
	printk("PLATFORM: stick-frequency [%08llx]\n", *v);
545
	v = mdesc_get_property(hp, pn, "mac-address", NULL);
546
	if (v)
547
		printk("PLATFORM: mac-address [%llx]\n", *v);
548
	v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL);
549
	if (v)
550
		printk("PLATFORM: watchdog-resolution [%llu ms]\n", *v);
551
	v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL);
552
	if (v)
553
		printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v);
554
	v = mdesc_get_property(hp, pn, "max-cpus", NULL);
555
	if (v)
556
		printk("PLATFORM: max-cpus [%llu]\n", *v);
557

558
559
560
561
562
563
564
565
566
567
568
569
#ifdef CONFIG_SMP
	{
		int max_cpu, i;

		if (v) {
			max_cpu = *v;
			if (max_cpu > NR_CPUS)
				max_cpu = NR_CPUS;
		} else {
			max_cpu = NR_CPUS;
		}
		for (i = 0; i < max_cpu; i++)
570
			set_cpu_possible(i, true);
571
572
573
	}
#endif

574
	mdesc_release(hp);
575
576
}

577
static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c,
578
579
					struct mdesc_handle *hp,
					u64 mp)
580
{
581
582
583
	const u64 *level = mdesc_get_property(hp, mp, "level", NULL);
	const u64 *size = mdesc_get_property(hp, mp, "size", NULL);
	const u64 *line_size = mdesc_get_property(hp, mp, "line-size", NULL);
584
585
586
	const char *type;
	int type_len;

587
	type = mdesc_get_property(hp, mp, "type", &type_len);
588
589
590

	switch (*level) {
	case 1:
591
		if (of_find_in_proplist(type, "instn", type_len)) {
592
593
			c->icache_size = *size;
			c->icache_line_size = *line_size;
594
		} else if (of_find_in_proplist(type, "data", type_len)) {
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
			c->dcache_size = *size;
			c->dcache_line_size = *line_size;
		}
		break;

	case 2:
		c->ecache_size = *size;
		c->ecache_line_size = *line_size;
		break;

	default:
		break;
	}

	if (*level == 1) {
610
		u64 a;
611

612
613
614
		mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
			u64 target = mdesc_arc_target(hp, a);
			const char *name = mdesc_node_name(hp, target);
615

616
617
			if (!strcmp(name, "cache"))
				fill_in_one_cache(c, hp, target);
618
619
620
621
		}
	}
}

622
static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
623
{
624
	u64 a;
625

626
627
628
	mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
		u64 t = mdesc_arc_target(hp, a);
		const char *name;
629
630
		const u64 *id;

631
632
633
		name = mdesc_node_name(hp, t);
		if (!strcmp(name, "cpu")) {
			id = mdesc_get_property(hp, t, "id", NULL);
634
635
636
			if (*id < NR_CPUS)
				cpu_data(*id).core_id = core_id;
		} else {
637
			u64 j;
638

639
640
641
			mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) {
				u64 n = mdesc_arc_target(hp, j);
				const char *n_name;
642

643
644
				n_name = mdesc_node_name(hp, n);
				if (strcmp(n_name, "cpu"))
645
646
					continue;

647
				id = mdesc_get_property(hp, n, "id", NULL);
648
649
650
651
652
653
654
				if (*id < NR_CPUS)
					cpu_data(*id).core_id = core_id;
			}
		}
	}
}

655
static void __cpuinit set_core_ids(struct mdesc_handle *hp)
656
657
{
	int idx;
658
	u64 mp;
659
660

	idx = 1;
661
662
	mdesc_for_each_node_by_name(hp, mp, "cache") {
		const u64 *level;
663
664
665
		const char *type;
		int len;

666
		level = mdesc_get_property(hp, mp, "level", NULL);
667
668
669
		if (*level != 1)
			continue;

670
		type = mdesc_get_property(hp, mp, "type", &len);
671
		if (!of_find_in_proplist(type, "instn", len))
672
673
			continue;

674
		mark_core_ids(hp, mp, idx);
675
676
677
678
679

		idx++;
	}
}

680
static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
681
{
682
	u64 a;
683

684
685
686
	mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
		u64 t = mdesc_arc_target(hp, a);
		const char *name;
687
688
		const u64 *id;

689
690
		name = mdesc_node_name(hp, t);
		if (strcmp(name, "cpu"))
691
692
			continue;

693
		id = mdesc_get_property(hp, t, "id", NULL);
694
695
696
697
698
		if (*id < NR_CPUS)
			cpu_data(*id).proc_id = proc_id;
	}
}

699
static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
700
701
{
	int idx;
702
	u64 mp;
703
704

	idx = 0;
705
	mdesc_for_each_node_by_name(hp, mp, exec_unit_name) {
706
707
708
		const char *type;
		int len;

709
		type = mdesc_get_property(hp, mp, "type", &len);
710
711
		if (!of_find_in_proplist(type, "int", len) &&
		    !of_find_in_proplist(type, "integer", len))
712
713
			continue;

714
		mark_proc_ids(hp, mp, idx);
715
716
717
718
719

		idx++;
	}
}

720
static void __cpuinit set_proc_ids(struct mdesc_handle *hp)
721
{
722
723
	__set_proc_ids(hp, "exec_unit");
	__set_proc_ids(hp, "exec-unit");
724
725
}

726
static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
727
					 unsigned char def)
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
{
	u64 val;

	if (!p)
		goto use_default;
	val = *p;

	if (!val || val >= 64)
		goto use_default;

	*mask = ((1U << val) * 64U) - 1U;
	return;

use_default:
	*mask = ((1U << def) * 64U) - 1U;
}

745
static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
746
				     struct trap_per_cpu *tb)
747
748
749
{
	const u64 *val;

750
	val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL);
751
752
	get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7);

753
	val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL);
754
755
	get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7);

756
	val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL);
757
758
	get_one_mondo_bits(val, &tb->resum_qmask, 6);

759
	val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL);
760
761
762
	get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
}

763
static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
764
{
765
	struct mdesc_handle *hp = mdesc_grab();
766
	void *ret = NULL;
767
	u64 mp;
768

769
770
	mdesc_for_each_node_by_name(hp, mp, "cpu") {
		const u64 *id = mdesc_get_property(hp, mp, "id", NULL);
771
		int cpuid = *id;
772
773

#ifdef CONFIG_SMP
774
775
776
777
		if (cpuid >= NR_CPUS) {
			printk(KERN_WARNING "Ignoring CPU %d which is "
			       ">= NR_CPUS (%d)\n",
			       cpuid, NR_CPUS);
778
			continue;
779
		}
780
		if (!cpu_isset(cpuid, *mask))
781
782
783
			continue;
#endif

784
785
786
787
788
789
790
791
		ret = func(hp, mp, cpuid, arg);
		if (ret)
			goto out;
	}
out:
	mdesc_release(hp);
	return ret;
}
792

793
794
795
796
797
798
799
800
static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
{
	ncpus_probed++;
#ifdef CONFIG_SMP
	set_cpu_present(cpuid, true);
#endif
	return NULL;
}
801

802
803
804
805
void __cpuinit mdesc_populate_present_mask(cpumask_t *mask)
{
	if (tlb_type != hypervisor)
		return;
806

807
808
809
	ncpus_probed = 0;
	mdesc_iterate_over_cpus(record_one_cpu, NULL, mask);
}
810

811
812
813
814
815
816
static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg)
{
	const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
	struct trap_per_cpu *tb;
	cpuinfo_sparc *c;
	u64 a;
817

818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
#ifndef CONFIG_SMP
	/* On uniprocessor we only want the values for the
	 * real physical cpu the kernel booted onto, however
	 * cpu_data() only has one entry at index 0.
	 */
	if (cpuid != real_hard_smp_processor_id())
		return NULL;
	cpuid = 0;
#endif

	c = &cpu_data(cpuid);
	c->clock_tick = *cfreq;

	tb = &trap_block[cpuid];
	get_mondo_data(hp, mp, tb);

	mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
		u64 j, t = mdesc_arc_target(hp, a);
		const char *t_name;

		t_name = mdesc_node_name(hp, t);
		if (!strcmp(t_name, "cache")) {
			fill_in_one_cache(c, hp, t);
			continue;
842
843
		}

844
845
846
		mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
			u64 n = mdesc_arc_target(hp, j);
			const char *n_name;
847

848
849
850
851
			n_name = mdesc_node_name(hp, n);
			if (!strcmp(n_name, "cache"))
				fill_in_one_cache(c, hp, n);
		}
852
853
	}

854
855
856
857
858
859
	c->core_id = 0;
	c->proc_id = -1;

	return NULL;
}

860
void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask)
861
862
863
{
	struct mdesc_handle *hp;

864
865
	mdesc_populate_present_mask(mask);
	mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask);
866

867
868
869
870
#ifdef CONFIG_SMP
	sparc64_multi_core = 1;
#endif

871
872
	hp = mdesc_grab();

873
874
	set_core_ids(hp);
	set_proc_ids(hp);
875

876
	mdesc_release(hp);
877
878

	smp_fill_in_sib_core_maps();
879
880
}

881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
static ssize_t mdesc_read(struct file *file, char __user *buf,
			  size_t len, loff_t *offp)
{
	struct mdesc_handle *hp = mdesc_grab();
	int err;

	if (!hp)
		return -ENODEV;

	err = hp->handle_size;
	if (len < hp->handle_size)
		err = -EMSGSIZE;
	else if (copy_to_user(buf, &hp->mdesc, hp->handle_size))
		err = -EFAULT;
	mdesc_release(hp);

	return err;
}

static const struct file_operations mdesc_fops = {
	.read	= mdesc_read,
	.owner	= THIS_MODULE,
};

static struct miscdevice mdesc_misc = {
	.minor	= MISC_DYNAMIC_MINOR,
	.name	= "mdesc",
	.fops	= &mdesc_fops,
};

static int __init mdesc_misc_init(void)
{
	return misc_register(&mdesc_misc);
}

__initcall(mdesc_misc_init);

918
919
void __init sun4v_mdesc_init(void)
{
920
	struct mdesc_handle *hp;
921
922
923
924
925
926
	unsigned long len, real_len, status;

	(void) sun4v_mach_desc(0UL, 0UL, &len);

	printk("MDESC: Size is %lu bytes.\n", len);

927
	hp = mdesc_alloc(len, &lmb_mdesc_ops);
928
929
930
931
	if (hp == NULL) {
		prom_printf("MDESC: alloc of %lu bytes failed.\n", len);
		prom_halt();
	}
932

933
	status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
934
935
936
937
	if (status != HV_EOK || real_len > len) {
		prom_printf("sun4v_mach_desc fails, err(%lu), "
			    "len(%lu), real_len(%lu)\n",
			    status, len, real_len);
938
		mdesc_free(hp);
939
940
941
		prom_halt();
	}

942
	cur_mdesc = hp;
943
944
945

	report_platform_properties();
}