ide-dma.c 22.5 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
/*
2
3
 *  IDE DMA support (including IDE PCI BM-DMA).
 *
4
5
6
 *  Copyright (C) 1995-1998   Mark Lord
 *  Copyright (C) 1999-2000   Andre Hedrick <andre@linux-ide.org>
 *  Copyright (C) 2004, 2007  Bartlomiej Zolnierkiewicz
7
 *
Linus Torvalds's avatar
Linus Torvalds committed
8
 *  May be copied or modified under the terms of the GNU General Public License
9
10
 *
 *  DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
Linus Torvalds's avatar
Linus Torvalds committed
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
 */

/*
 *  Special Thanks to Mark for his Six years of work.
 */

/*
 * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
 * fixing the problem with the BIOS on some Acer motherboards.
 *
 * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
 * "TX" chipset compatibility and for providing patches for the "TX" chipset.
 *
 * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
 * at generic DMA -- his patches were referred to when preparing this code.
 *
 * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
 * for supplying a Promise UDMA board & WD UDMA drive for this work!
 */

#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ide.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
42
#include <linux/dma-mapping.h>
Linus Torvalds's avatar
Linus Torvalds committed
43
44
45
46
47
48

#include <asm/io.h>
#include <asm/irq.h>

static const struct drive_list_entry drive_whitelist [] = {

49
50
51
52
	{ "Micropolis 2112A"	,       NULL		},
	{ "CONNER CTMA 4000"	,       NULL		},
	{ "CONNER CTT8000-A"	,       NULL		},
	{ "ST34342A"		,	NULL		},
Linus Torvalds's avatar
Linus Torvalds committed
53
54
55
56
57
	{ NULL			,	NULL		}
};

static const struct drive_list_entry drive_blacklist [] = {

58
59
60
61
62
	{ "WDC AC11000H"	,	NULL 		},
	{ "WDC AC22100H"	,	NULL 		},
	{ "WDC AC32500H"	,	NULL 		},
	{ "WDC AC33100H"	,	NULL 		},
	{ "WDC AC31600H"	,	NULL 		},
Linus Torvalds's avatar
Linus Torvalds committed
63
64
	{ "WDC AC32100H"	,	"24.09P07"	},
	{ "WDC AC23200L"	,	"21.10N21"	},
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
	{ "Compaq CRD-8241B"	,	NULL 		},
	{ "CRD-8400B"		,	NULL 		},
	{ "CRD-8480B",			NULL 		},
	{ "CRD-8482B",			NULL 		},
	{ "CRD-84"		,	NULL 		},
	{ "SanDisk SDP3B"	,	NULL 		},
	{ "SanDisk SDP3B-64"	,	NULL 		},
	{ "SANYO CD-ROM CRD"	,	NULL 		},
	{ "HITACHI CDR-8"	,	NULL 		},
	{ "HITACHI CDR-8335"	,	NULL 		},
	{ "HITACHI CDR-8435"	,	NULL 		},
	{ "Toshiba CD-ROM XM-6202B"	,	NULL 		},
	{ "TOSHIBA CD-ROM XM-1702BC",	NULL 		},
	{ "CD-532E-A"		,	NULL 		},
	{ "E-IDE CD-ROM CR-840",	NULL 		},
	{ "CD-ROM Drive/F5A",	NULL 		},
	{ "WPI CDD-820",		NULL 		},
	{ "SAMSUNG CD-ROM SC-148C",	NULL 		},
	{ "SAMSUNG CD-ROM SC",	NULL 		},
	{ "ATAPI CD-ROM DRIVE 40X MAXIMUM",	NULL 		},
	{ "_NEC DV5800A",               NULL            },
86
	{ "SAMSUNG CD-ROM SN-124",	"N001" },
87
	{ "Seagate STT20000A",		NULL  },
88
	{ "CD-ROM CDR_U200",		"1.09" },
Linus Torvalds's avatar
Linus Torvalds committed
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
	{ NULL			,	NULL		}

};

/**
 *	ide_dma_intr	-	IDE DMA interrupt handler
 *	@drive: the drive the interrupt is for
 *
 *	Handle an interrupt completing a read/write DMA transfer on an 
 *	IDE device
 */
 
ide_startstop_t ide_dma_intr (ide_drive_t *drive)
{
	u8 stat = 0, dma_stat = 0;

	dma_stat = HWIF(drive)->ide_dma_end(drive);
106
107
	stat = ide_read_status(drive);

Linus Torvalds's avatar
Linus Torvalds committed
108
109
110
111
	if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
		if (!dma_stat) {
			struct request *rq = HWGROUP(drive)->rq;

Tejun Heo's avatar
Tejun Heo committed
112
			task_end_request(drive, rq, stat);
Linus Torvalds's avatar
Linus Torvalds committed
113
114
115
116
117
118
119
120
121
122
			return ide_stopped;
		}
		printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n", 
		       drive->name, dma_stat);
	}
	return ide_error(drive, "dma_intr", stat);
}

EXPORT_SYMBOL_GPL(ide_dma_intr);

123
124
125
126
127
static int ide_dma_good_drive(ide_drive_t *drive)
{
	return ide_in_drive_list(drive->id, drive_whitelist);
}

Linus Torvalds's avatar
Linus Torvalds committed
128
129
130
131
132
/**
 *	ide_build_sglist	-	map IDE scatter gather for DMA I/O
 *	@drive: the drive to build the DMA table for
 *	@rq: the request holding the sg list
 *
133
134
 *	Perform the DMA mapping magic necessary to access the source or
 *	target buffers of a request via DMA.  The lower layers of the
Linus Torvalds's avatar
Linus Torvalds committed
135
 *	kernel provide the necessary cache management so that we can
136
 *	operate in a portable fashion.
Linus Torvalds's avatar
Linus Torvalds committed
137
138
139
140
141
142
143
144
145
146
 */

int ide_build_sglist(ide_drive_t *drive, struct request *rq)
{
	ide_hwif_t *hwif = HWIF(drive);
	struct scatterlist *sg = hwif->sg_table;

	ide_map_sg(drive, rq);

	if (rq_data_dir(rq) == READ)
147
		hwif->sg_dma_direction = DMA_FROM_DEVICE;
Linus Torvalds's avatar
Linus Torvalds committed
148
	else
149
		hwif->sg_dma_direction = DMA_TO_DEVICE;
Linus Torvalds's avatar
Linus Torvalds committed
150

151
152
	return dma_map_sg(hwif->dev, sg, hwif->sg_nents,
			  hwif->sg_dma_direction);
Linus Torvalds's avatar
Linus Torvalds committed
153
154
155
156
}

EXPORT_SYMBOL_GPL(ide_build_sglist);

157
#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
Linus Torvalds's avatar
Linus Torvalds committed
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
/**
 *	ide_build_dmatable	-	build IDE DMA table
 *
 *	ide_build_dmatable() prepares a dma request. We map the command
 *	to get the pci bus addresses of the buffers and then build up
 *	the PRD table that the IDE layer wants to be fed. The code
 *	knows about the 64K wrap bug in the CS5530.
 *
 *	Returns the number of built PRD entries if all went okay,
 *	returns 0 otherwise.
 *
 *	May also be invoked from trm290.c
 */
 
int ide_build_dmatable (ide_drive_t *drive, struct request *rq)
{
	ide_hwif_t *hwif	= HWIF(drive);
	unsigned int *table	= hwif->dmatable_cpu;
	unsigned int is_trm290	= (hwif->chipset == ide_trm290) ? 1 : 0;
	unsigned int count = 0;
	int i;
	struct scatterlist *sg;

	hwif->sg_nents = i = ide_build_sglist(drive, rq);

	if (!i)
		return 0;

	sg = hwif->sg_table;
	while (i) {
		u32 cur_addr;
		u32 cur_len;

		cur_addr = sg_dma_address(sg);
		cur_len = sg_dma_len(sg);

		/*
		 * Fill in the dma table, without crossing any 64kB boundaries.
		 * Most hardware requires 16-bit alignment of all blocks,
		 * but the trm290 requires 32-bit alignment.
		 */

		while (cur_len) {
			if (count++ >= PRD_ENTRIES) {
				printk(KERN_ERR "%s: DMA table too small\n", drive->name);
				goto use_pio_instead;
			} else {
				u32 xcount, bcount = 0x10000 - (cur_addr & 0xffff);

				if (bcount > cur_len)
					bcount = cur_len;
				*table++ = cpu_to_le32(cur_addr);
				xcount = bcount & 0xffff;
				if (is_trm290)
					xcount = ((xcount >> 2) - 1) << 16;
				if (xcount == 0x0000) {
	/* 
	 * Most chipsets correctly interpret a length of 0x0000 as 64KB,
	 * but at least one (e.g. CS5530) misinterprets it as zero (!).
	 * So here we break the 64KB entry into two 32KB entries instead.
	 */
					if (count++ >= PRD_ENTRIES) {
						printk(KERN_ERR "%s: DMA table too small\n", drive->name);
						goto use_pio_instead;
					}
					*table++ = cpu_to_le32(0x8000);
					*table++ = cpu_to_le32(cur_addr + 0x8000);
					xcount = 0x8000;
				}
				*table++ = cpu_to_le32(xcount);
				cur_addr += bcount;
				cur_len -= bcount;
			}
		}

Jens Axboe's avatar
Jens Axboe committed
233
		sg = sg_next(sg);
Linus Torvalds's avatar
Linus Torvalds committed
234
235
236
237
238
239
240
241
		i--;
	}

	if (count) {
		if (!is_trm290)
			*--table |= cpu_to_le32(0x80000000);
		return count;
	}
242

Linus Torvalds's avatar
Linus Torvalds committed
243
	printk(KERN_ERR "%s: empty DMA table?\n", drive->name);
244

Linus Torvalds's avatar
Linus Torvalds committed
245
use_pio_instead:
246
247
	ide_destroy_dmatable(drive);

Linus Torvalds's avatar
Linus Torvalds committed
248
249
250
251
	return 0; /* revert to PIO for this request */
}

EXPORT_SYMBOL_GPL(ide_build_dmatable);
252
#endif
Linus Torvalds's avatar
Linus Torvalds committed
253
254
255
256
257
258
259
260
261
262
263
264
265
266

/**
 *	ide_destroy_dmatable	-	clean up DMA mapping
 *	@drive: The drive to unmap
 *
 *	Teardown mappings after DMA has completed. This must be called
 *	after the completion of each use of ide_build_dmatable and before
 *	the next use of ide_build_dmatable. Failure to do so will cause
 *	an oops as only one mapping can be live for each target at a given
 *	time.
 */
 
void ide_destroy_dmatable (ide_drive_t *drive)
{
267
	ide_hwif_t *hwif = drive->hwif;
Linus Torvalds's avatar
Linus Torvalds committed
268

269
	dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents,
270
		     hwif->sg_dma_direction);
Linus Torvalds's avatar
Linus Torvalds committed
271
272
273
274
}

EXPORT_SYMBOL_GPL(ide_destroy_dmatable);

275
#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
Linus Torvalds's avatar
Linus Torvalds committed
276
277
278
279
280
281
282
283
284
285
286
287
288
/**
 *	config_drive_for_dma	-	attempt to activate IDE DMA
 *	@drive: the drive to place in DMA mode
 *
 *	If the drive supports at least mode 2 DMA or UDMA of any kind
 *	then attempt to place it into DMA mode. Drives that are known to
 *	support DMA but predate the DMA properties or that are known
 *	to have DMA handling bugs are also set up appropriately based
 *	on the good/bad drive lists.
 */
 
static int config_drive_for_dma (ide_drive_t *drive)
{
289
	ide_hwif_t *hwif = drive->hwif;
Linus Torvalds's avatar
Linus Torvalds committed
290
291
	struct hd_driveid *id = drive->id;

292
293
	if (drive->media != ide_disk) {
		if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
294
			return 0;
295
	}
296

297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
	/*
	 * Enable DMA on any drive that has
	 * UltraDMA (mode 0/1/2/3/4/5/6) enabled
	 */
	if ((id->field_valid & 4) && ((id->dma_ultra >> 8) & 0x7f))
		return 1;

	/*
	 * Enable DMA on any drive that has mode2 DMA
	 * (multi or single) enabled
	 */
	if (id->field_valid & 2)	/* regular DMA */
		if ((id->dma_mword & 0x404) == 0x404 ||
		    (id->dma_1word & 0x404) == 0x404)
			return 1;
312

313
314
315
316
317
	/* Consult the list of known "good" drives */
	if (ide_dma_good_drive(drive))
		return 1;

	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
}

/**
 *	dma_timer_expiry	-	handle a DMA timeout
 *	@drive: Drive that timed out
 *
 *	An IDE DMA transfer timed out. In the event of an error we ask
 *	the driver to resolve the problem, if a DMA transfer is still
 *	in progress we continue to wait (arguably we need to add a 
 *	secondary 'I don't care what the drive thinks' timeout here)
 *	Finally if we have an interrupt we let it complete the I/O.
 *	But only one time - we clear expiry and if it's still not
 *	completed after WAIT_CMD, we error and retry in PIO.
 *	This can occur if an interrupt is lost or due to hang or bugs.
 */
 
static int dma_timer_expiry (ide_drive_t *drive)
{
	ide_hwif_t *hwif	= HWIF(drive);
	u8 dma_stat		= hwif->INB(hwif->dma_status);

	printk(KERN_WARNING "%s: dma_timer_expiry: dma status == 0x%02x\n",
		drive->name, dma_stat);

	if ((dma_stat & 0x18) == 0x18)	/* BUSY Stupid Early Timer !! */
		return WAIT_CMD;

	HWGROUP(drive)->expiry = NULL;	/* one free ride for now */

	/* 1 dmaing, 2 error, 4 intr */
	if (dma_stat & 2)	/* ERROR */
		return -1;

	if (dma_stat & 1)	/* DMAing */
		return WAIT_CMD;

	if (dma_stat & 4)	/* Got an Interrupt */
		return WAIT_CMD;

	return 0;	/* Status is unknown -- reset the bus */
}

/**
361
 *	ide_dma_host_set	-	Enable/disable DMA on a host
Linus Torvalds's avatar
Linus Torvalds committed
362
363
 *	@drive: drive to control
 *
364
365
 *	Enable/disable DMA on an IDE controller following generic
 *	bus-mastering IDE controller behaviour.
Linus Torvalds's avatar
Linus Torvalds committed
366
367
 */

368
void ide_dma_host_set(ide_drive_t *drive, int on)
Linus Torvalds's avatar
Linus Torvalds committed
369
370
371
372
373
{
	ide_hwif_t *hwif	= HWIF(drive);
	u8 unit			= (drive->select.b.unit & 0x01);
	u8 dma_stat		= hwif->INB(hwif->dma_status);

374
375
376
377
378
379
	if (on)
		dma_stat |= (1 << (5 + unit));
	else
		dma_stat &= ~(1 << (5 + unit));

	hwif->OUTB(dma_stat, hwif->dma_status);
Linus Torvalds's avatar
Linus Torvalds committed
380
381
}

382
EXPORT_SYMBOL_GPL(ide_dma_host_set);
383
#endif /* CONFIG_BLK_DEV_IDEDMA_SFF  */
Linus Torvalds's avatar
Linus Torvalds committed
384
385

/**
386
 *	ide_dma_off_quietly	-	Generic DMA kill
Linus Torvalds's avatar
Linus Torvalds committed
387
388
389
390
391
 *	@drive: drive to control
 *
 *	Turn off the current DMA on this IDE controller. 
 */

392
void ide_dma_off_quietly(ide_drive_t *drive)
Linus Torvalds's avatar
Linus Torvalds committed
393
394
395
396
{
	drive->using_dma = 0;
	ide_toggle_bounce(drive, 0);

397
	drive->hwif->dma_host_set(drive, 0);
Linus Torvalds's avatar
Linus Torvalds committed
398
399
}

400
EXPORT_SYMBOL(ide_dma_off_quietly);
Linus Torvalds's avatar
Linus Torvalds committed
401
402

/**
403
 *	ide_dma_off	-	disable DMA on a device
Linus Torvalds's avatar
Linus Torvalds committed
404
405
406
407
408
409
 *	@drive: drive to disable DMA on
 *
 *	Disable IDE DMA for a device on this IDE controller.
 *	Inform the user that DMA has been disabled.
 */

410
void ide_dma_off(ide_drive_t *drive)
Linus Torvalds's avatar
Linus Torvalds committed
411
412
{
	printk(KERN_INFO "%s: DMA disabled\n", drive->name);
413
	ide_dma_off_quietly(drive);
Linus Torvalds's avatar
Linus Torvalds committed
414
415
}

416
EXPORT_SYMBOL(ide_dma_off);
Linus Torvalds's avatar
Linus Torvalds committed
417
418

/**
419
 *	ide_dma_on		-	Enable DMA on a device
Linus Torvalds's avatar
Linus Torvalds committed
420
421
422
423
 *	@drive: drive to enable DMA on
 *
 *	Enable IDE DMA for a device on this IDE controller.
 */
424
425

void ide_dma_on(ide_drive_t *drive)
Linus Torvalds's avatar
Linus Torvalds committed
426
427
428
429
{
	drive->using_dma = 1;
	ide_toggle_bounce(drive, 1);

430
	drive->hwif->dma_host_set(drive, 1);
Linus Torvalds's avatar
Linus Torvalds committed
431
432
}

433
#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
Linus Torvalds's avatar
Linus Torvalds committed
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
/**
 *	ide_dma_setup	-	begin a DMA phase
 *	@drive: target device
 *
 *	Build an IDE DMA PRD (IDE speak for scatter gather table)
 *	and then set up the DMA transfer registers for a device
 *	that follows generic IDE PCI DMA behaviour. Controllers can
 *	override this function if they need to
 *
 *	Returns 0 on success. If a PIO fallback is required then 1
 *	is returned. 
 */

int ide_dma_setup(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;
	struct request *rq = HWGROUP(drive)->rq;
	unsigned int reading;
	u8 dma_stat;

	if (rq_data_dir(rq))
		reading = 0;
	else
		reading = 1 << 3;

	/* fall back to pio! */
	if (!ide_build_dmatable(drive, rq)) {
		ide_map_sg(drive, rq);
		return 1;
	}

	/* PRD table */
466
	if (hwif->mmio)
467
468
469
		writel(hwif->dmatable_dma, (void __iomem *)hwif->dma_prdtable);
	else
		outl(hwif->dmatable_dma, hwif->dma_prdtable);
Linus Torvalds's avatar
Linus Torvalds committed
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547

	/* specify r/w */
	hwif->OUTB(reading, hwif->dma_command);

	/* read dma_status for INTR & ERROR flags */
	dma_stat = hwif->INB(hwif->dma_status);

	/* clear INTR & ERROR flags */
	hwif->OUTB(dma_stat|6, hwif->dma_status);
	drive->waiting_for_dma = 1;
	return 0;
}

EXPORT_SYMBOL_GPL(ide_dma_setup);

static void ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
{
	/* issue cmd to drive */
	ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry);
}

void ide_dma_start(ide_drive_t *drive)
{
	ide_hwif_t *hwif	= HWIF(drive);
	u8 dma_cmd		= hwif->INB(hwif->dma_command);

	/* Note that this is done *after* the cmd has
	 * been issued to the drive, as per the BM-IDE spec.
	 * The Promise Ultra33 doesn't work correctly when
	 * we do this part before issuing the drive cmd.
	 */
	/* start DMA */
	hwif->OUTB(dma_cmd|1, hwif->dma_command);
	hwif->dma = 1;
	wmb();
}

EXPORT_SYMBOL_GPL(ide_dma_start);

/* returns 1 on error, 0 otherwise */
int __ide_dma_end (ide_drive_t *drive)
{
	ide_hwif_t *hwif	= HWIF(drive);
	u8 dma_stat = 0, dma_cmd = 0;

	drive->waiting_for_dma = 0;
	/* get dma_command mode */
	dma_cmd = hwif->INB(hwif->dma_command);
	/* stop DMA */
	hwif->OUTB(dma_cmd&~1, hwif->dma_command);
	/* get DMA status */
	dma_stat = hwif->INB(hwif->dma_status);
	/* clear the INTR & ERROR bits */
	hwif->OUTB(dma_stat|6, hwif->dma_status);
	/* purge DMA mappings */
	ide_destroy_dmatable(drive);
	/* verify good DMA status */
	hwif->dma = 0;
	wmb();
	return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
}

EXPORT_SYMBOL(__ide_dma_end);

/* returns 1 if dma irq issued, 0 otherwise */
static int __ide_dma_test_irq(ide_drive_t *drive)
{
	ide_hwif_t *hwif	= HWIF(drive);
	u8 dma_stat		= hwif->INB(hwif->dma_status);

	/* return 1 if INTR asserted */
	if ((dma_stat & 4) == 4)
		return 1;
	if (!drive->waiting_for_dma)
		printk(KERN_WARNING "%s: (%s) called while not waiting\n",
			drive->name, __FUNCTION__);
	return 0;
}
548
549
#else
static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
550
#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
Linus Torvalds's avatar
Linus Torvalds committed
551
552
553
554
555

int __ide_dma_bad_drive (ide_drive_t *drive)
{
	struct hd_driveid *id = drive->id;

556
	int blacklist = ide_in_drive_list(id, drive_blacklist);
Linus Torvalds's avatar
Linus Torvalds committed
557
558
559
560
561
562
563
564
565
566
	if (blacklist) {
		printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
				    drive->name, id->model);
		return blacklist;
	}
	return 0;
}

EXPORT_SYMBOL(__ide_dma_bad_drive);

567
568
569
570
571
572
static const u8 xfer_mode_bases[] = {
	XFER_UDMA_0,
	XFER_MW_DMA_0,
	XFER_SW_DMA_0,
};

573
static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
574
575
576
{
	struct hd_driveid *id = drive->id;
	ide_hwif_t *hwif = drive->hwif;
577
	const struct ide_port_ops *port_ops = hwif->port_ops;
578
579
580
581
582
583
584
	unsigned int mask = 0;

	switch(base) {
	case XFER_UDMA_0:
		if ((id->field_valid & 4) == 0)
			break;

585
586
		if (port_ops && port_ops->udma_filter)
			mask = port_ops->udma_filter(drive);
587
588
589
		else
			mask = hwif->ultra_mask;
		mask &= id->dma_ultra;
590

591
592
593
594
595
596
597
		/*
		 * avoid false cable warning from eighty_ninty_three()
		 */
		if (req_mode > XFER_UDMA_2) {
			if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
				mask &= 0x07;
		}
598
599
		break;
	case XFER_MW_DMA_0:
600
601
		if ((id->field_valid & 2) == 0)
			break;
602
603
		if (port_ops && port_ops->mdma_filter)
			mask = port_ops->mdma_filter(drive);
604
605
606
		else
			mask = hwif->mwdma_mask;
		mask &= id->dma_mword;
607
608
		break;
	case XFER_SW_DMA_0:
609
		if (id->field_valid & 2) {
610
			mask = id->dma_1word & hwif->swdma_mask;
611
612
613
614
615
616
617
618
619
620
621
622
623
624
		} else if (id->tDMA) {
			/*
			 * ide_fix_driveid() doesn't convert ->tDMA to the
			 * CPU endianness so we need to do it here
			 */
			u8 mode = le16_to_cpu(id->tDMA);

			/*
			 * if the mode is valid convert it to the mask
			 * (the maximum allowed mode is XFER_SW_DMA_2)
			 */
			if (mode <= 2)
				mask = ((2 << mode) - 1) & hwif->swdma_mask;
		}
625
626
627
628
629
630
631
632
633
634
		break;
	default:
		BUG();
		break;
	}

	return mask;
}

/**
635
 *	ide_find_dma_mode	-	compute DMA speed
636
 *	@drive: IDE device
637
638
639
640
 *	@req_mode: requested mode
 *
 *	Checks the drive/host capabilities and finds the speed to use for
 *	the DMA transfer.  The speed is then limited by the requested mode.
641
 *
642
643
 *	Returns 0 if the drive/host combination is incapable of DMA transfers
 *	or if the requested mode is not a DMA mode.
644
645
 */

646
u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
647
648
649
650
651
652
{
	ide_hwif_t *hwif = drive->hwif;
	unsigned int mask;
	int x, i;
	u8 mode = 0;

653
654
655
656
	if (drive->media != ide_disk) {
		if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
			return 0;
	}
657
658

	for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
659
660
661
		if (req_mode < xfer_mode_bases[i])
			continue;
		mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
662
663
664
665
666
667
668
		x = fls(mask) - 1;
		if (x >= 0) {
			mode = xfer_mode_bases[i] + x;
			break;
		}
	}

669
670
671
672
673
674
675
676
	if (hwif->chipset == ide_acorn && mode == 0) {
		/*
		 * is this correct?
		 */
		if (ide_dma_good_drive(drive) && drive->id->eide_dma_time < 150)
			mode = XFER_MW_DMA_1;
	}

677
678
679
	mode = min(mode, req_mode);

	printk(KERN_INFO "%s: %s mode selected\n", drive->name,
680
			  mode ? ide_xfer_verbose(mode) : "no DMA");
681

682
	return mode;
683
684
}

685
EXPORT_SYMBOL_GPL(ide_find_dma_mode);
686

687
static int ide_tune_dma(ide_drive_t *drive)
688
{
689
	ide_hwif_t *hwif = drive->hwif;
690
691
	u8 speed;

692
	if (noautodma || drive->nodma || (drive->id->capability & 1) == 0)
693
694
695
696
		return 0;

	/* consult the list of known "bad" drives */
	if (__ide_dma_bad_drive(drive))
697
698
		return 0;

699
700
701
	if (ide_id_dma_bug(drive))
		return 0;

702
	if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
703
704
		return config_drive_for_dma(drive);

705
706
	speed = ide_max_dma_mode(drive);

707
708
	if (!speed)
		return 0;
709

710
	if (ide_set_dma_mode(drive, speed))
711
		return 0;
712

713
	return 1;
714
715
}

716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
static int ide_dma_check(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;
	int vdma = (hwif->host_flags & IDE_HFLAG_VDMA)? 1 : 0;

	if (!vdma && ide_tune_dma(drive))
		return 0;

	/* TODO: always do PIO fallback */
	if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
		return -1;

	ide_set_max_pio(drive);

	return vdma ? 0 : -1;
}

733
int ide_id_dma_bug(ide_drive_t *drive)
Linus Torvalds's avatar
Linus Torvalds committed
734
{
735
	struct hd_driveid *id = drive->id;
Linus Torvalds's avatar
Linus Torvalds committed
736
737
738

	if (id->field_valid & 4) {
		if ((id->dma_ultra >> 8) && (id->dma_mword >> 8))
739
			goto err_out;
Linus Torvalds's avatar
Linus Torvalds committed
740
741
	} else if (id->field_valid & 2) {
		if ((id->dma_mword >> 8) && (id->dma_1word >> 8))
742
			goto err_out;
Linus Torvalds's avatar
Linus Torvalds committed
743
	}
744
745
746
747
	return 0;
err_out:
	printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
	return 1;
Linus Torvalds's avatar
Linus Torvalds committed
748
749
}

750
751
752
753
int ide_set_dma(ide_drive_t *drive)
{
	int rc;

754
755
756
757
758
759
	/*
	 * Force DMAing for the beginning of the check.
	 * Some chipsets appear to do interesting
	 * things, if not checked and cleared.
	 *   PARANOIA!!!
	 */
760
	ide_dma_off_quietly(drive);
761

762
763
764
	rc = ide_dma_check(drive);
	if (rc)
		return rc;
765

766
767
768
	ide_dma_on(drive);

	return 0;
769
770
}

771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
void ide_check_dma_crc(ide_drive_t *drive)
{
	u8 mode;

	ide_dma_off_quietly(drive);
	drive->crc_count = 0;
	mode = drive->current_speed;
	/*
	 * Don't try non Ultra-DMA modes without iCRC's.  Force the
	 * device to PIO and make the user enable SWDMA/MWDMA modes.
	 */
	if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
		mode--;
	else
		mode = XFER_PIO_4;
	ide_set_xfer_rate(drive, mode);
	if (drive->current_speed >= XFER_SW_DMA_0)
		ide_dma_on(drive);
}

791
#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
792
void ide_dma_lost_irq (ide_drive_t *drive)
Linus Torvalds's avatar
Linus Torvalds committed
793
794
795
796
{
	printk("%s: DMA interrupt recovery\n", drive->name);
}

797
EXPORT_SYMBOL(ide_dma_lost_irq);
Linus Torvalds's avatar
Linus Torvalds committed
798

799
void ide_dma_timeout (ide_drive_t *drive)
Linus Torvalds's avatar
Linus Torvalds committed
800
{
801
802
	ide_hwif_t *hwif = HWIF(drive);

Linus Torvalds's avatar
Linus Torvalds committed
803
804
	printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);

805
806
807
808
	if (hwif->ide_dma_test_irq(drive))
		return;

	hwif->ide_dma_end(drive);
Linus Torvalds's avatar
Linus Torvalds committed
809
810
}

811
EXPORT_SYMBOL(ide_dma_timeout);
Linus Torvalds's avatar
Linus Torvalds committed
812

813
static void ide_release_dma_engine(ide_hwif_t *hwif)
Linus Torvalds's avatar
Linus Torvalds committed
814
815
{
	if (hwif->dmatable_cpu) {
816
817
818
819
		struct pci_dev *pdev = to_pci_dev(hwif->dev);

		pci_free_consistent(pdev, PRD_ENTRIES * PRD_BYTES,
				    hwif->dmatable_cpu, hwif->dmatable_dma);
Linus Torvalds's avatar
Linus Torvalds committed
820
821
822
823
824
825
826
		hwif->dmatable_cpu = NULL;
	}
}

static int ide_release_iomio_dma(ide_hwif_t *hwif)
{
	release_region(hwif->dma_base, 8);
827
828
	if (hwif->extra_ports)
		release_region(hwif->extra_base, hwif->extra_ports);
Linus Torvalds's avatar
Linus Torvalds committed
829
830
831
832
833
834
	return 1;
}

/*
 * Needed for allowing full modular support of ide-driver
 */
835
int ide_release_dma(ide_hwif_t *hwif)
Linus Torvalds's avatar
Linus Torvalds committed
836
{
837
838
	ide_release_dma_engine(hwif);

839
	if (hwif->mmio)
Linus Torvalds's avatar
Linus Torvalds committed
840
		return 1;
841
842
	else
		return ide_release_iomio_dma(hwif);
Linus Torvalds's avatar
Linus Torvalds committed
843
844
845
846
}

static int ide_allocate_dma_engine(ide_hwif_t *hwif)
{
847
848
849
	struct pci_dev *pdev = to_pci_dev(hwif->dev);

	hwif->dmatable_cpu = pci_alloc_consistent(pdev,
Linus Torvalds's avatar
Linus Torvalds committed
850
851
852
853
854
855
						  PRD_ENTRIES * PRD_BYTES,
						  &hwif->dmatable_dma);

	if (hwif->dmatable_cpu)
		return 0;

856
857
	printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n",
	       hwif->cds->name);
Linus Torvalds's avatar
Linus Torvalds committed
858
859
860
861

	return 1;
}

862
static int ide_mapped_mmio_dma(ide_hwif_t *hwif, unsigned long base)
Linus Torvalds's avatar
Linus Torvalds committed
863
864
865
866
867
868
{
	printk(KERN_INFO "    %s: MMIO-DMA ", hwif->name);

	return 0;
}

869
static int ide_iomio_dma(ide_hwif_t *hwif, unsigned long base)
Linus Torvalds's avatar
Linus Torvalds committed
870
871
{
	printk(KERN_INFO "    %s: BM-DMA at 0x%04lx-0x%04lx",
872
	       hwif->name, base, base + 7);
873

874
	if (!request_region(base, 8, hwif->name)) {
Linus Torvalds's avatar
Linus Torvalds committed
875
876
877
		printk(" -- Error, ports in use.\n");
		return 1;
	}
878
879
880
881
882
883
884
885

	if (hwif->cds->extra) {
		hwif->extra_base = base + (hwif->channel ? 8 : 16);

		if (!hwif->mate || !hwif->mate->extra_ports) {
			if (!request_region(hwif->extra_base,
					    hwif->cds->extra, hwif->cds->name)) {
				printk(" -- Error, extra ports in use.\n");
886
				release_region(base, 8);
887
888
889
890
				return 1;
			}
			hwif->extra_ports = hwif->cds->extra;
		}
Linus Torvalds's avatar
Linus Torvalds committed
891
	}
892

Linus Torvalds's avatar
Linus Torvalds committed
893
894
895
	return 0;
}

896
static int ide_dma_iobase(ide_hwif_t *hwif, unsigned long base)
Linus Torvalds's avatar
Linus Torvalds committed
897
{
898
	if (hwif->mmio)
899
		return ide_mapped_mmio_dma(hwif, base);
900

901
	return ide_iomio_dma(hwif, base);
Linus Torvalds's avatar
Linus Torvalds committed
902
903
}

904
void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
Linus Torvalds's avatar
Linus Torvalds committed
905
{
906
907
	u8 dma_stat;

908
	if (ide_dma_iobase(hwif, base))
Linus Torvalds's avatar
Linus Torvalds committed
909
910
911
912
913
914
915
		return;

	if (ide_allocate_dma_engine(hwif)) {
		ide_release_dma(hwif);
		return;
	}

916
917
	hwif->dma_base = base;

918
919
920
921
922
923
924
925
926
927
	if (!hwif->dma_command)
		hwif->dma_command	= hwif->dma_base + 0;
	if (!hwif->dma_vendor1)
		hwif->dma_vendor1	= hwif->dma_base + 1;
	if (!hwif->dma_status)
		hwif->dma_status	= hwif->dma_base + 2;
	if (!hwif->dma_vendor3)
		hwif->dma_vendor3	= hwif->dma_base + 3;
	if (!hwif->dma_prdtable)
		hwif->dma_prdtable	= hwif->dma_base + 4;
Linus Torvalds's avatar
Linus Torvalds committed
928

929
930
	if (!hwif->dma_host_set)
		hwif->dma_host_set = &ide_dma_host_set;
Linus Torvalds's avatar
Linus Torvalds committed
931
932
933
934
935
936
937
938
939
940
	if (!hwif->dma_setup)
		hwif->dma_setup = &ide_dma_setup;
	if (!hwif->dma_exec_cmd)
		hwif->dma_exec_cmd = &ide_dma_exec_cmd;
	if (!hwif->dma_start)
		hwif->dma_start = &ide_dma_start;
	if (!hwif->ide_dma_end)
		hwif->ide_dma_end = &__ide_dma_end;
	if (!hwif->ide_dma_test_irq)
		hwif->ide_dma_test_irq = &__ide_dma_test_irq;
941
942
	if (!hwif->dma_timeout)
		hwif->dma_timeout = &ide_dma_timeout;
943
944
	if (!hwif->dma_lost_irq)
		hwif->dma_lost_irq = &ide_dma_lost_irq;
Linus Torvalds's avatar
Linus Torvalds committed
945

946
947
948
949
	dma_stat = hwif->INB(hwif->dma_status);
	printk(KERN_CONT ", BIOS settings: %s:%s, %s:%s\n",
	       hwif->drives[0].name, (dma_stat & 0x20) ? "DMA" : "PIO",
	       hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "PIO");
Linus Torvalds's avatar
Linus Torvalds committed
950
951
952
}

EXPORT_SYMBOL_GPL(ide_setup_dma);
953
#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */