radeon_device.c 27.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
#include <linux/console.h>
29
#include <linux/slab.h>
30
31
32
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
33
#include <linux/vgaarb.h>
34
#include <linux/vga_switcheroo.h>
35
#include <linux/efi.h>
36
37
38
39
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"

40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
static const char radeon_family_name[][16] = {
	"R100",
	"RV100",
	"RS100",
	"RV200",
	"RS200",
	"R200",
	"RV250",
	"RS300",
	"RV280",
	"R300",
	"R350",
	"RV350",
	"RV380",
	"R420",
	"R423",
	"RV410",
	"RS400",
	"RS480",
	"RS600",
	"RS690",
	"RS740",
	"RV515",
	"R520",
	"RV530",
	"RV560",
	"RV570",
	"R580",
	"R600",
	"RV610",
	"RV630",
	"RV670",
	"RV620",
	"RV635",
	"RS780",
	"RS880",
	"RV770",
	"RV730",
	"RV710",
	"RV740",
	"CEDAR",
	"REDWOOD",
	"JUNIPER",
	"CYPRESS",
	"HEMLOCK",
85
	"PALM",
86
87
	"SUMO",
	"SUMO2",
88
89
90
	"BARTS",
	"TURKS",
	"CAICOS",
91
	"CAYMAN",
92
	"ARUBA",
93
94
95
	"TAHITI",
	"PITCAIRN",
	"VERDE",
96
97
98
	"LAST",
};

99
100
101
/*
 * Clear GPU surface registers.
 */
102
void radeon_surface_init(struct radeon_device *rdev)
103
104
105
106
107
{
	/* FIXME: check this out */
	if (rdev->family < CHIP_R600) {
		int i;

108
109
110
111
112
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
			if (rdev->surface_regs[i].bo)
				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
			else
				radeon_clear_surface_reg(rdev, i);
113
		}
114
115
		/* enable surfaces */
		WREG32(RADEON_SURFACE_CNTL, 0);
116
117
118
	}
}

119
120
121
/*
 * GPU scratch registers helpers function.
 */
122
void radeon_scratch_init(struct radeon_device *rdev)
123
124
125
126
127
128
129
130
131
{
	int i;

	/* FIXME: check this out */
	if (rdev->family < CHIP_R300) {
		rdev->scratch.num_reg = 5;
	} else {
		rdev->scratch.num_reg = 7;
	}
132
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
133
134
	for (i = 0; i < rdev->scratch.num_reg; i++) {
		rdev->scratch.free[i] = true;
135
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
	}
}

int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
{
	int i;

	for (i = 0; i < rdev->scratch.num_reg; i++) {
		if (rdev->scratch.free[i]) {
			rdev->scratch.free[i] = false;
			*reg = rdev->scratch.reg[i];
			return 0;
		}
	}
	return -EINVAL;
}

void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
{
	int i;

	for (i = 0; i < rdev->scratch.num_reg; i++) {
		if (rdev->scratch.reg[i] == reg) {
			rdev->scratch.free[i] = true;
			return;
		}
	}
}

165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
void radeon_wb_disable(struct radeon_device *rdev)
{
	int r;

	if (rdev->wb.wb_obj) {
		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
		if (unlikely(r != 0))
			return;
		radeon_bo_kunmap(rdev->wb.wb_obj);
		radeon_bo_unpin(rdev->wb.wb_obj);
		radeon_bo_unreserve(rdev->wb.wb_obj);
	}
	rdev->wb.enabled = false;
}

void radeon_wb_fini(struct radeon_device *rdev)
{
	radeon_wb_disable(rdev);
	if (rdev->wb.wb_obj) {
		radeon_bo_unref(&rdev->wb.wb_obj);
		rdev->wb.wb = NULL;
		rdev->wb.wb_obj = NULL;
	}
}

int radeon_wb_init(struct radeon_device *rdev)
{
	int r;

	if (rdev->wb.wb_obj == NULL) {
195
		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
196
				     RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
		if (r) {
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
			return r;
		}
	}
	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
	if (unlikely(r != 0)) {
		radeon_wb_fini(rdev);
		return r;
	}
	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
			  &rdev->wb.gpu_addr);
	if (r) {
		radeon_bo_unreserve(rdev->wb.wb_obj);
		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
		radeon_wb_fini(rdev);
		return r;
	}
	r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
	radeon_bo_unreserve(rdev->wb.wb_obj);
	if (r) {
		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
		radeon_wb_fini(rdev);
		return r;
	}

223
224
	/* clear wb memory */
	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
225
226
	/* disable event_write fences */
	rdev->wb.use_event = false;
227
	/* disabled via module param */
228
	if (radeon_no_wb == 1) {
229
		rdev->wb.enabled = false;
230
	} else {
231
		if (rdev->flags & RADEON_IS_AGP) {
232
233
234
235
			/* often unreliable on AGP */
			rdev->wb.enabled = false;
		} else if (rdev->family < CHIP_R300) {
			/* often unreliable on pre-r300 */
236
			rdev->wb.enabled = false;
237
		} else {
238
			rdev->wb.enabled = true;
239
			/* event_write fences are only available on r600+ */
240
			if (rdev->family >= CHIP_R600) {
241
				rdev->wb.use_event = true;
242
			}
243
		}
244
	}
245
246
	/* always use writeback/events on NI, APUs */
	if (rdev->family >= CHIP_PALM) {
247
248
249
		rdev->wb.enabled = true;
		rdev->wb.use_event = true;
	}
250
251
252
253
254
255

	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");

	return 0;
}

256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
/**
 * radeon_vram_location - try to find VRAM location
 * @rdev: radeon device structure holding all necessary informations
 * @mc: memory controller structure holding memory informations
 * @base: base address at which to put VRAM
 *
 * Function will place try to place VRAM at base address provided
 * as parameter (which is so far either PCI aperture address or
 * for IGP TOM base address).
 *
 * If there is not enough space to fit the unvisible VRAM in the 32bits
 * address space then we limit the VRAM size to the aperture.
 *
 * If we are using AGP and if the AGP aperture doesn't allow us to have
 * room for all the VRAM than we restrict the VRAM to the PCI aperture
 * size and print a warning.
 *
 * This function will never fails, worst case are limiting VRAM.
 *
 * Note: GTT start, end, size should be initialized before calling this
 * function on AGP platform.
 *
Lucas De Marchi's avatar
Lucas De Marchi committed
278
 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
 * this shouldn't be a problem as we are using the PCI aperture as a reference.
 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
 * not IGP.
 *
 * Note: we use mc_vram_size as on some board we need to program the mc to
 * cover the whole aperture even if VRAM size is inferior to aperture size
 * Novell bug 204882 + along with lots of ubuntu ones
 *
 * Note: when limiting vram it's safe to overwritte real_vram_size because
 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
 * ones)
 *
 * Note: IGP TOM addr should be the same as the aperture addr, we don't
 * explicitly check for that thought.
 *
 * FIXME: when reducing VRAM size align new size on power of 2.
296
 */
297
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
298
{
299
300
301
302
303
304
305
	mc->vram_start = base;
	if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
		mc->real_vram_size = mc->aper_size;
		mc->mc_vram_size = mc->aper_size;
	}
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
306
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
307
308
309
310
311
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
		mc->real_vram_size = mc->aper_size;
		mc->mc_vram_size = mc->aper_size;
	}
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
312
313
	if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
		mc->real_vram_size = radeon_vram_limit;
314
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
315
316
317
			mc->mc_vram_size >> 20, mc->vram_start,
			mc->vram_end, mc->real_vram_size >> 20);
}
318

319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
/**
 * radeon_gtt_location - try to find GTT location
 * @rdev: radeon device structure holding all necessary informations
 * @mc: memory controller structure holding memory informations
 *
 * Function will place try to place GTT before or after VRAM.
 *
 * If GTT size is bigger than space left then we ajust GTT size.
 * Thus function will never fails.
 *
 * FIXME: when reducing GTT size align new size on power of 2.
 */
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
	u64 size_af, size_bf;

335
336
	size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
	size_bf = mc->vram_start & ~mc->gtt_base_align;
337
338
339
340
	if (size_bf > size_af) {
		if (mc->gtt_size > size_bf) {
			dev_warn(rdev->dev, "limiting GTT\n");
			mc->gtt_size = size_bf;
341
		}
342
		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
343
	} else {
344
345
346
347
		if (mc->gtt_size > size_af) {
			dev_warn(rdev->dev, "limiting GTT\n");
			mc->gtt_size = size_af;
		}
348
		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
349
	}
350
	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
351
	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
352
			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
353
354
355
356
357
}

/*
 * GPU helpers function.
 */
358
bool radeon_card_posted(struct radeon_device *rdev)
359
360
361
{
	uint32_t reg;

362
363
364
	if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
		return false;

365
	/* first check CRTCs */
366
367
368
369
370
371
	if (ASIC_IS_DCE41(rdev)) {
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
		if (reg & EVERGREEN_CRTC_MASTER_EN)
			return true;
	} else if (ASIC_IS_DCE4(rdev)) {
372
373
374
375
376
377
378
379
380
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
		if (reg & EVERGREEN_CRTC_MASTER_EN)
			return true;
	} else if (ASIC_IS_AVIVO(rdev)) {
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
		      RREG32(AVIVO_D2CRTC_CONTROL);
		if (reg & AVIVO_CRTC_EN) {
			return true;
		}
	} else {
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
		      RREG32(RADEON_CRTC2_GEN_CNTL);
		if (reg & RADEON_CRTC_EN) {
			return true;
		}
	}

	/* then check MEM_SIZE, in case the crtcs are off */
	if (rdev->family >= CHIP_R600)
		reg = RREG32(R600_CONFIG_MEMSIZE);
	else
		reg = RREG32(RADEON_CONFIG_MEMSIZE);

	if (reg)
		return true;

	return false;

}

407
408
409
void radeon_update_bandwidth_info(struct radeon_device *rdev)
{
	fixed20_12 a;
410
411
	u32 sclk = rdev->pm.current_sclk;
	u32 mclk = rdev->pm.current_mclk;
412

413
414
415
416
417
418
	/* sclk/mclk in Mhz */
	a.full = dfixed_const(100);
	rdev->pm.sclk.full = dfixed_const(sclk);
	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
	rdev->pm.mclk.full = dfixed_const(mclk);
	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
419

420
	if (rdev->flags & RADEON_IS_IGP) {
421
		a.full = dfixed_const(16);
422
		/* core_bandwidth = sclk(Mhz) * 16 */
423
		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
424
425
426
	}
}

427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
bool radeon_boot_test_post_card(struct radeon_device *rdev)
{
	if (radeon_card_posted(rdev))
		return true;

	if (rdev->bios) {
		DRM_INFO("GPU not posted. posting now...\n");
		if (rdev->is_atom_bios)
			atom_asic_init(rdev->mode_info.atom_context);
		else
			radeon_combios_asic_init(rdev->ddev);
		return true;
	} else {
		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
		return false;
	}
}

445
446
int radeon_dummy_page_init(struct radeon_device *rdev)
{
447
448
	if (rdev->dummy_page.page)
		return 0;
449
450
451
452
453
	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
	if (rdev->dummy_page.page == NULL)
		return -ENOMEM;
	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
454
455
	if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
		dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
		__free_page(rdev->dummy_page.page);
		rdev->dummy_page.page = NULL;
		return -ENOMEM;
	}
	return 0;
}

void radeon_dummy_page_fini(struct radeon_device *rdev)
{
	if (rdev->dummy_page.page == NULL)
		return;
	pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
			PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
	__free_page(rdev->dummy_page.page);
	rdev->dummy_page.page = NULL;
}

473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522

/* ATOM accessor methods */
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
{
	struct radeon_device *rdev = info->dev->dev_private;
	uint32_t r;

	r = rdev->pll_rreg(rdev, reg);
	return r;
}

static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
{
	struct radeon_device *rdev = info->dev->dev_private;

	rdev->pll_wreg(rdev, reg, val);
}

static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
{
	struct radeon_device *rdev = info->dev->dev_private;
	uint32_t r;

	r = rdev->mc_rreg(rdev, reg);
	return r;
}

static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
{
	struct radeon_device *rdev = info->dev->dev_private;

	rdev->mc_wreg(rdev, reg, val);
}

static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
	struct radeon_device *rdev = info->dev->dev_private;

	WREG32(reg*4, val);
}

static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
{
	struct radeon_device *rdev = info->dev->dev_private;
	uint32_t r;

	r = RREG32(reg*4);
	return r;
}

523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
	struct radeon_device *rdev = info->dev->dev_private;

	WREG32_IO(reg*4, val);
}

static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
{
	struct radeon_device *rdev = info->dev->dev_private;
	uint32_t r;

	r = RREG32_IO(reg*4);
	return r;
}

539
540
int radeon_atombios_init(struct radeon_device *rdev)
{
541
542
543
544
545
546
547
548
549
550
	struct card_info *atom_card_info =
	    kzalloc(sizeof(struct card_info), GFP_KERNEL);

	if (!atom_card_info)
		return -ENOMEM;

	rdev->mode_info.atom_card_info = atom_card_info;
	atom_card_info->dev = rdev->ddev;
	atom_card_info->reg_read = cail_reg_read;
	atom_card_info->reg_write = cail_reg_write;
551
552
553
554
555
556
557
558
559
	/* needed for iio ops */
	if (rdev->rio_mem) {
		atom_card_info->ioreg_read = cail_ioreg_read;
		atom_card_info->ioreg_write = cail_ioreg_write;
	} else {
		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
		atom_card_info->ioreg_read = cail_reg_read;
		atom_card_info->ioreg_write = cail_reg_write;
	}
560
561
562
563
564
565
	atom_card_info->mc_read = cail_mc_read;
	atom_card_info->mc_write = cail_mc_write;
	atom_card_info->pll_read = cail_pll_read;
	atom_card_info->pll_write = cail_pll_write;

	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
566
	mutex_init(&rdev->mode_info.atom_context->mutex);
567
	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
568
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
569
570
571
572
573
	return 0;
}

void radeon_atombios_fini(struct radeon_device *rdev)
{
574
575
576
577
	if (rdev->mode_info.atom_context) {
		kfree(rdev->mode_info.atom_context->scratch);
		kfree(rdev->mode_info.atom_context);
	}
578
	kfree(rdev->mode_info.atom_card_info);
579
580
581
582
583
584
585
586
587
588
589
590
}

int radeon_combios_init(struct radeon_device *rdev)
{
	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
	return 0;
}

void radeon_combios_fini(struct radeon_device *rdev)
{
}

591
592
593
594
595
596
597
598
599
600
601
/* if we get transitioned to only one device, tak VGA back */
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
{
	struct radeon_device *rdev = cookie;
	radeon_vga_set_state(rdev, state);
	if (state)
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
	else
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
602

603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
void radeon_check_arguments(struct radeon_device *rdev)
{
	/* vramlimit must be a power of two */
	switch (radeon_vram_limit) {
	case 0:
	case 4:
	case 8:
	case 16:
	case 32:
	case 64:
	case 128:
	case 256:
	case 512:
	case 1024:
	case 2048:
	case 4096:
		break;
	default:
		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
				radeon_vram_limit);
		radeon_vram_limit = 0;
		break;
	}
	radeon_vram_limit = radeon_vram_limit << 20;
	/* gtt size must be power of two and greater or equal to 32M */
	switch (radeon_gart_size) {
	case 4:
	case 8:
	case 16:
		dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
				radeon_gart_size);
		radeon_gart_size = 512;
		break;
	case 32:
	case 64:
	case 128:
	case 256:
	case 512:
	case 1024:
	case 2048:
	case 4096:
		break;
	default:
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
				radeon_gart_size);
		radeon_gart_size = 512;
		break;
	}
	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
	/* AGP mode can only be -1, 1, 2, 4, 8 */
	switch (radeon_agpmode) {
	case -1:
	case 0:
	case 1:
	case 2:
	case 4:
	case 8:
		break;
	default:
		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
		radeon_agpmode = 0;
		break;
	}
}

669
670
671
672
673
674
675
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
	if (state == VGA_SWITCHEROO_ON) {
		printk(KERN_INFO "radeon: switched on\n");
		/* don't suspend or resume card normally */
676
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
677
		radeon_resume_kms(dev);
678
		dev->switch_power_state = DRM_SWITCH_POWER_ON;
679
		drm_kms_helper_poll_enable(dev);
680
681
	} else {
		printk(KERN_INFO "radeon: switched off\n");
682
		drm_kms_helper_poll_disable(dev);
683
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
684
		radeon_suspend_kms(dev, pmm);
685
		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
686
687
688
689
690
691
692
693
694
695
696
697
698
699
	}
}

static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	bool can_switch;

	spin_lock(&dev->count_lock);
	can_switch = (dev->open_count == 0);
	spin_unlock(&dev->count_lock);
	return can_switch;
}

700
701
702
703
704
static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
	.set_gpu_state = radeon_switcheroo_set_state,
	.reprobe = NULL,
	.can_switch = radeon_switcheroo_can_switch,
};
705

706
707
708
709
710
int radeon_device_init(struct radeon_device *rdev,
		       struct drm_device *ddev,
		       struct pci_dev *pdev,
		       uint32_t flags)
{
711
	int r, i;
712
	int dma_bits;
713
714

	rdev->shutdown = false;
715
	rdev->dev = &pdev->dev;
716
717
718
719
720
721
722
	rdev->ddev = ddev;
	rdev->pdev = pdev;
	rdev->flags = flags;
	rdev->family = flags & RADEON_FAMILY_MASK;
	rdev->is_atom_bios = false;
	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
723
	rdev->accel_working = false;
724

725
726
727
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
		pdev->subsystem_vendor, pdev->subsystem_device);
728

729
730
	/* mutex initialization are all done here so we
	 * can recall function without having locking issues */
731
	mutex_init(&rdev->ring_lock);
732
	mutex_init(&rdev->dc_hw_i2c_mutex);
733
	atomic_set(&rdev->ih.lock, 0);
734
	mutex_init(&rdev->gem.mutex);
735
	mutex_init(&rdev->pm.mutex);
736
	init_rwsem(&rdev->pm.mclk_lock);
737
	init_rwsem(&rdev->exclusive_lock);
738
	init_waitqueue_head(&rdev->irq.vblank_queue);
739
	init_waitqueue_head(&rdev->irq.idle_queue);
740
741
742
	r = radeon_gem_init(rdev);
	if (r)
		return r;
743
	/* initialize vm here */
744
	mutex_init(&rdev->vm_manager.lock);
745
746
747
	rdev->vm_manager.use_bitmap = 1;
	rdev->vm_manager.max_pfn = 1 << 20;
	INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
748

749
750
	/* Set asic functions */
	r = radeon_asic_init(rdev);
751
	if (r)
752
		return r;
753
	radeon_check_arguments(rdev);
754

755
756
757
758
759
760
761
762
	/* all of the newer IGP chips have an internal gart
	 * However some rs4xx report as AGP, so remove that here.
	 */
	if ((rdev->family >= CHIP_RS400) &&
	    (rdev->flags & RADEON_IS_IGP)) {
		rdev->flags &= ~RADEON_IS_AGP;
	}

763
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
764
		radeon_agp_disable(rdev);
765
766
	}

767
768
	/* set DMA mask + need_dma32 flags.
	 * PCIE - can handle 40-bits.
769
	 * IGP - can handle 40-bits
770
	 * AGP - generally dma32 is safest
771
	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
772
773
774
775
	 */
	rdev->need_dma32 = false;
	if (rdev->flags & RADEON_IS_AGP)
		rdev->need_dma32 = true;
776
777
	if ((rdev->flags & RADEON_IS_PCI) &&
	    (rdev->family < CHIP_RS400))
778
779
780
781
		rdev->need_dma32 = true;

	dma_bits = rdev->need_dma32 ? 32 : 40;
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
782
	if (r) {
783
		rdev->need_dma32 = true;
784
		dma_bits = 32;
785
786
		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
	}
787
788
789
790
791
	r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
	if (r) {
		pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
		printk(KERN_WARNING "radeon: No coherent DMA available.\n");
	}
792
793
794

	/* Registers mapping */
	/* TODO: block userspace mapping of io register */
795
796
	rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
	rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
797
798
799
800
801
802
803
	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
	if (rdev->rmmio == NULL) {
		return -ENOMEM;
	}
	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
	DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);

804
805
806
807
808
809
810
811
812
813
814
	/* io port mapping */
	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
			break;
		}
	}
	if (rdev->rio_mem == NULL)
		DRM_ERROR("Unable to find PCI I/O BAR\n");

815
	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
816
817
818
	/* this will fail for cards that aren't VGA class devices, just
	 * ignore it */
	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
819
	vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
820

821
	r = radeon_init(rdev);
822
	if (r)
823
824
		return r;

825
826
827
828
	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
		/* Acceleration not working on AGP card try again
		 * with fallback to PCI or PCIE GART
		 */
829
		radeon_asic_reset(rdev);
830
831
832
		radeon_fini(rdev);
		radeon_agp_disable(rdev);
		r = radeon_init(rdev);
833
834
		if (r)
			return r;
835
	}
836
	if ((radeon_testing & 1)) {
837
838
		radeon_test_moves(rdev);
	}
839
840
841
	if ((radeon_testing & 2)) {
		radeon_test_syncing(rdev);
	}
842
	if (radeon_benchmarking) {
843
		radeon_benchmark(rdev, radeon_benchmarking);
844
	}
845
	return 0;
846
847
}

848
849
static void radeon_debugfs_remove_files(struct radeon_device *rdev);

850
851
852
853
void radeon_device_fini(struct radeon_device *rdev)
{
	DRM_INFO("radeon: finishing device.\n");
	rdev->shutdown = true;
854
855
	/* evict vram memory */
	radeon_bo_evict_vram(rdev);
856
	radeon_fini(rdev);
857
	vga_switcheroo_unregister_client(rdev->pdev);
858
	vga_client_register(rdev->pdev, NULL, NULL, NULL);
859
860
	if (rdev->rio_mem)
		pci_iounmap(rdev->pdev, rdev->rio_mem);
861
	rdev->rio_mem = NULL;
862
863
	iounmap(rdev->rmmio);
	rdev->rmmio = NULL;
864
	radeon_debugfs_remove_files(rdev);
865
866
867
868
869
870
871
872
}


/*
 * Suspend & resume.
 */
int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
873
	struct radeon_device *rdev;
874
	struct drm_crtc *crtc;
875
	struct drm_connector *connector;
876
	int i, r;
877

878
	if (dev == NULL || dev->dev_private == NULL) {
879
880
881
882
883
		return -ENODEV;
	}
	if (state.event == PM_EVENT_PRETHAW) {
		return 0;
	}
884
885
	rdev = dev->dev_private;

886
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
887
		return 0;
888

889
890
	drm_kms_helper_poll_disable(dev);

891
892
893
894
895
	/* turn off display hw */
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
	}

896
897
898
	/* unpin the front buffers */
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
899
		struct radeon_bo *robj;
900
901
902
903

		if (rfb == NULL || rfb->obj == NULL) {
			continue;
		}
904
		robj = gem_to_radeon_bo(rfb->obj);
905
906
		/* don't unpin kernel fb objects */
		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
907
			r = radeon_bo_reserve(robj, false);
908
			if (r == 0) {
909
910
911
				radeon_bo_unpin(robj);
				radeon_bo_unreserve(robj);
			}
912
913
914
		}
	}
	/* evict vram memory */
915
	radeon_bo_evict_vram(rdev);
916
917

	mutex_lock(&rdev->ring_lock);
918
	/* wait for gpu to finish processing current batch */
919
	for (i = 0; i < RADEON_NUM_RINGS; i++)
920
921
		radeon_fence_wait_empty_locked(rdev, i);
	mutex_unlock(&rdev->ring_lock);
922

923
924
	radeon_save_bios_scratch_regs(rdev);

925
	radeon_pm_suspend(rdev);
926
	radeon_suspend(rdev);
927
	radeon_hpd_fini(rdev);
928
	/* evict remaining vram memory */
929
	radeon_bo_evict_vram(rdev);
930

931
932
	radeon_agp_suspend(rdev);

933
934
935
936
937
938
	pci_save_state(dev->pdev);
	if (state.event == PM_EVENT_SUSPEND) {
		/* Shut down the device */
		pci_disable_device(dev->pdev);
		pci_set_power_state(dev->pdev, PCI_D3hot);
	}
939
	console_lock();
940
	radeon_fbdev_set_suspend(rdev, 1);
941
	console_unlock();
942
943
944
945
946
	return 0;
}

int radeon_resume_kms(struct drm_device *dev)
{
947
	struct drm_connector *connector;
948
949
	struct radeon_device *rdev = dev->dev_private;

950
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
951
952
		return 0;

953
	console_lock();
954
955
956
	pci_set_power_state(dev->pdev, PCI_D0);
	pci_restore_state(dev->pdev);
	if (pci_enable_device(dev->pdev)) {
957
		console_unlock();
958
959
		return -1;
	}
960
961
	/* resume AGP if in use */
	radeon_agp_resume(rdev);
962
	radeon_resume(rdev);
963
	radeon_pm_resume(rdev);
964
	radeon_restore_bios_scratch_regs(rdev);
965

966
	radeon_fbdev_set_suspend(rdev, 0);
967
	console_unlock();
968

969
970
	/* init dig PHYs, disp eng pll */
	if (rdev->is_atom_bios) {
971
		radeon_atom_encoder_init(rdev);
972
		radeon_atom_disp_eng_pll_init(rdev);
973
	}
974
975
	/* reset hpd state */
	radeon_hpd_init(rdev);
976
977
	/* blat the mode back in */
	drm_helper_resume_force_mode(dev);
978
979
980
981
	/* turn on display hw */
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
	}
982
983

	drm_kms_helper_poll_enable(dev);
984
985
986
	return 0;
}

987
988
989
int radeon_gpu_reset(struct radeon_device *rdev)
{
	int r;
990
	int resched;
991

992
	down_write(&rdev->exclusive_lock);
993
	radeon_save_bios_scratch_regs(rdev);
994
995
	/* block TTM */
	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
996
997
998
999
1000
1001
1002
1003
	radeon_suspend(rdev);

	r = radeon_asic_reset(rdev);
	if (!r) {
		dev_info(rdev->dev, "GPU reset succeed\n");
		radeon_resume(rdev);
		radeon_restore_bios_scratch_regs(rdev);
		drm_helper_resume_force_mode(rdev->ddev);
1004
		ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
1005
	}
1006
1007
1008
1009
1010
1011

	if (r) {
		/* bad news, how to tell it to userspace ? */
		dev_info(rdev->dev, "GPU reset failed\n");
	}

1012
	up_write(&rdev->exclusive_lock);
1013
1014
1015
	return r;
}

1016
1017
1018
1019
1020
1021
1022
1023
1024
1025

/*
 * Debugfs
 */
int radeon_debugfs_add_files(struct radeon_device *rdev,
			     struct drm_info_list *files,
			     unsigned nfiles)
{
	unsigned i;

1026
1027
	for (i = 0; i < rdev->debugfs_count; i++) {
		if (rdev->debugfs[i].files == files) {
1028
1029
1030
1031
			/* Already registered */
			return 0;
		}
	}
1032

1033
	i = rdev->debugfs_count + 1;
1034
1035
1036
1037
	if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
		DRM_ERROR("Reached maximum number of debugfs components.\n");
		DRM_ERROR("Report so we increase "
		          "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
1038
1039
		return -EINVAL;
	}
1040
1041
1042
	rdev->debugfs[rdev->debugfs_count].files = files;
	rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
	rdev->debugfs_count = i;
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
#if defined(CONFIG_DEBUG_FS)
	drm_debugfs_create_files(files, nfiles,
				 rdev->ddev->control->debugfs_root,
				 rdev->ddev->control);
	drm_debugfs_create_files(files, nfiles,
				 rdev->ddev->primary->debugfs_root,
				 rdev->ddev->primary);
#endif
	return 0;
}

1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
static void radeon_debugfs_remove_files(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
	unsigned i;

	for (i = 0; i < rdev->debugfs_count; i++) {
		drm_debugfs_remove_files(rdev->debugfs[i].files,
					 rdev->debugfs[i].num_files,
					 rdev->ddev->control);
		drm_debugfs_remove_files(rdev->debugfs[i].files,
					 rdev->debugfs[i].num_files,
					 rdev->ddev->primary);
	}
#endif
}

1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor)
{
	return 0;
}

void radeon_debugfs_cleanup(struct drm_minor *minor)
{
}
#endif