sata_mv.c 71.4 KB
Newer Older
1
2
3
/*
 * sata_mv.c - Marvell SATA support
 *
4
 * Copyright 2005: EMC Corporation, all rights reserved.
5
 * Copyright 2005 Red Hat, Inc.  All rights reserved.
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
 *
 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; version 2 of the License.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 *
 */

Jeff Garzik's avatar
Jeff Garzik committed
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
/*
  sata_mv TODO list:

  1) Needs a full errata audit for all chipsets.  I implemented most
  of the errata workarounds found in the Marvell vendor driver, but
  I distinctly remember a couple workarounds (one related to PCI-X)
  are still needed.

  4) Add NCQ support (easy to intermediate, once new-EH support appears)

  5) Investigate problems with PCI Message Signalled Interrupts (MSI).

  6) Add port multiplier support (intermediate)

  8) Develop a low-power-consumption strategy, and implement it.

  9) [Experiment, low priority] See if ATAPI can be supported using
  "unknown FIS" or "vendor-specific FIS" support, or something creative
  like that.

  10) [Experiment, low priority] Investigate interrupt coalescing.
  Quite often, especially with PCI Message Signalled Interrupts (MSI),
  the overhead reduced by interrupt mitigation is quite often not
  worth the latency cost.

  11) [Experiment, Marvell value added] Is it possible to use target
  mode to cross-connect two Linux boxes with Marvell cards?  If so,
  creating LibATA target mode support would be very interesting.

  Target mode, for those without docs, is the ability to directly
  connect two SATA controllers.

  13) Verify that 7042 is fully supported.  I only have a 6042.

*/


61
62
63
64
65
66
67
68
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
69
#include <linux/device.h>
70
#include <scsi/scsi_host.h>
71
#include <scsi/scsi_cmnd.h>
Jeff Garzik's avatar
Jeff Garzik committed
72
#include <scsi/scsi_device.h>
73
74
75
#include <linux/libata.h>

#define DRV_NAME	"sata_mv"
Jeff Garzik's avatar
Jeff Garzik committed
76
#define DRV_VERSION	"1.01"
77
78
79
80
81
82
83
84
85
86
87
88

enum {
	/* BAR's are enumerated in terms of pci_resource_start() terms */
	MV_PRIMARY_BAR		= 0,	/* offset 0x10: memory space */
	MV_IO_BAR		= 2,	/* offset 0x18: IO space */
	MV_MISC_BAR		= 3,	/* offset 0x1c: FLASH, NVRAM, SRAM */

	MV_MAJOR_REG_AREA_SZ	= 0x10000,	/* 64KB */
	MV_MINOR_REG_AREA_SZ	= 0x2000,	/* 8KB */

	MV_PCI_REG_BASE		= 0,
	MV_IRQ_COAL_REG_BASE	= 0x18000,	/* 6xxx part only */
89
90
91
92
93
94
	MV_IRQ_COAL_CAUSE		= (MV_IRQ_COAL_REG_BASE + 0x08),
	MV_IRQ_COAL_CAUSE_LO		= (MV_IRQ_COAL_REG_BASE + 0x88),
	MV_IRQ_COAL_CAUSE_HI		= (MV_IRQ_COAL_REG_BASE + 0x8c),
	MV_IRQ_COAL_THRESHOLD		= (MV_IRQ_COAL_REG_BASE + 0xcc),
	MV_IRQ_COAL_TIME_THRESHOLD	= (MV_IRQ_COAL_REG_BASE + 0xd0),

95
	MV_SATAHC0_REG_BASE	= 0x20000,
96
	MV_FLASH_CTL		= 0x1046c,
97
98
	MV_GPIO_PORT_CTL	= 0x104f0,
	MV_RESET_CFG		= 0x180d8,
99
100
101
102
103
104

	MV_PCI_REG_SZ		= MV_MAJOR_REG_AREA_SZ,
	MV_SATAHC_REG_SZ	= MV_MAJOR_REG_AREA_SZ,
	MV_SATAHC_ARBTR_REG_SZ	= MV_MINOR_REG_AREA_SZ,		/* arbiter */
	MV_PORT_REG_SZ		= MV_MINOR_REG_AREA_SZ,

105
106
107
108
109
110
111
112
113
114
115
116
117
118
	MV_MAX_Q_DEPTH		= 32,
	MV_MAX_Q_DEPTH_MASK	= MV_MAX_Q_DEPTH - 1,

	/* CRQB needs alignment on a 1KB boundary. Size == 1KB
	 * CRPB needs alignment on a 256B boundary. Size == 256B
	 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
	 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
	 */
	MV_CRQB_Q_SZ		= (32 * MV_MAX_Q_DEPTH),
	MV_CRPB_Q_SZ		= (8 * MV_MAX_Q_DEPTH),
	MV_MAX_SG_CT		= 176,
	MV_SG_TBL_SZ		= (16 * MV_MAX_SG_CT),
	MV_PORT_PRIV_DMA_SZ	= (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),

119
120
121
	MV_PORTS_PER_HC		= 4,
	/* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
	MV_PORT_HC_SHIFT	= 2,
122
	/* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
123
124
125
126
127
	MV_PORT_MASK		= 3,

	/* Host Flags */
	MV_FLAG_DUAL_HC		= (1 << 30),  /* two SATA Host Controllers */
	MV_FLAG_IRQ_COALESCE	= (1 << 29),  /* IRQ coalescing capability */
128
	MV_COMMON_FLAGS		= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129
130
				  ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
				  ATA_FLAG_PIO_POLLING,
131
	MV_6XXX_FLAGS		= MV_FLAG_IRQ_COALESCE,
132

133
134
	CRQB_FLAG_READ		= (1 << 0),
	CRQB_TAG_SHIFT		= 1,
135
136
	CRQB_IOID_SHIFT		= 6,	/* CRQB Gen-II/IIE IO Id shift */
	CRQB_HOSTQ_SHIFT	= 17,	/* CRQB Gen-II/IIE HostQueTag shift */
137
138
139
140
141
	CRQB_CMD_ADDR_SHIFT	= 8,
	CRQB_CMD_CS		= (0x2 << 11),
	CRQB_CMD_LAST		= (1 << 15),

	CRPB_FLAG_STATUS_SHIFT	= 8,
142
143
	CRPB_IOID_SHIFT_6	= 5,	/* CRPB Gen-II IO Id shift */
	CRPB_IOID_SHIFT_7	= 7,	/* CRPB Gen-IIE IO Id shift */
144
145
146

	EPRD_FLAG_END_OF_TBL	= (1 << 31),

147
148
	/* PCI interface registers */

149
150
	PCI_COMMAND_OFS		= 0xc00,

151
152
153
154
155
	PCI_MAIN_CMD_STS_OFS	= 0xd30,
	STOP_PCI_MASTER		= (1 << 2),
	PCI_MASTER_EMPTY	= (1 << 3),
	GLOB_SFT_RST		= (1 << 4),

156
157
158
159
160
161
162
163
164
165
166
167
168
	MV_PCI_MODE		= 0xd00,
	MV_PCI_EXP_ROM_BAR_CTL	= 0xd2c,
	MV_PCI_DISC_TIMER	= 0xd04,
	MV_PCI_MSI_TRIGGER	= 0xc38,
	MV_PCI_SERR_MASK	= 0xc28,
	MV_PCI_XBAR_TMOUT	= 0x1d04,
	MV_PCI_ERR_LOW_ADDRESS	= 0x1d40,
	MV_PCI_ERR_HIGH_ADDRESS	= 0x1d44,
	MV_PCI_ERR_ATTRIBUTE	= 0x1d48,
	MV_PCI_ERR_COMMAND	= 0x1d50,

	PCI_IRQ_CAUSE_OFS		= 0x1d58,
	PCI_IRQ_MASK_OFS		= 0x1d5c,
169
170
171
172
173
174
175
176
177
178
179
	PCI_UNMASK_ALL_IRQS	= 0x7fffff,	/* bits 22-0 */

	HC_MAIN_IRQ_CAUSE_OFS	= 0x1d60,
	HC_MAIN_IRQ_MASK_OFS	= 0x1d64,
	PORT0_ERR		= (1 << 0),	/* shift by port # */
	PORT0_DONE		= (1 << 1),	/* shift by port # */
	HC0_IRQ_PEND		= 0x1ff,	/* bits 0-8 = HC0's ports */
	HC_SHIFT		= 9,		/* bits 9-17 = HC1's ports */
	PCI_ERR			= (1 << 18),
	TRAN_LO_DONE		= (1 << 19),	/* 6xxx: IRQ coalescing */
	TRAN_HI_DONE		= (1 << 20),	/* 6xxx: IRQ coalescing */
180
181
	PORTS_0_3_COAL_DONE	= (1 << 8),
	PORTS_4_7_COAL_DONE	= (1 << 17),
182
183
184
185
186
	PORTS_0_7_COAL_DONE	= (1 << 21),	/* 6xxx: IRQ coalescing */
	GPIO_INT		= (1 << 22),
	SELF_INT		= (1 << 23),
	TWSI_INT		= (1 << 24),
	HC_MAIN_RSVD		= (0x7f << 25),	/* bits 31-25 */
187
	HC_MAIN_RSVD_5		= (0x1fff << 19), /* bits 31-19 */
188
	HC_MAIN_MASKED_IRQS	= (TRAN_LO_DONE | TRAN_HI_DONE |
189
190
				   PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
				   HC_MAIN_RSVD),
191
192
	HC_MAIN_MASKED_IRQS_5	= (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
				   HC_MAIN_RSVD_5),
193
194
195
196
197

	/* SATAHC registers */
	HC_CFG_OFS		= 0,

	HC_IRQ_CAUSE_OFS	= 0x14,
198
	CRPB_DMA_DONE		= (1 << 0),	/* shift by port # */
199
200
201
202
	HC_IRQ_COAL		= (1 << 4),	/* IRQ coalescing */
	DEV_IRQ			= (1 << 8),	/* shift by port # */

	/* Shadow block registers */
203
204
	SHD_BLK_OFS		= 0x100,
	SHD_CTL_AST_OFS		= 0x20,		/* ofs from SHD_BLK_OFS */
205
206
207
208

	/* SATA registers */
	SATA_STATUS_OFS		= 0x300,  /* ctrl, err regs follow status */
	SATA_ACTIVE_OFS		= 0x350,
209
	PHY_MODE3		= 0x310,
210
211
	PHY_MODE4		= 0x314,
	PHY_MODE2		= 0x330,
212
213
214
	MV5_PHY_MODE		= 0x74,
	MV5_LT_MODE		= 0x30,
	MV5_PHY_CTL		= 0x0C,
215
216
217
	SATA_INTERFACE_CTL	= 0x050,

	MV_M2_PREAMP_MASK	= 0x7e0,
218
219
220

	/* Port registers */
	EDMA_CFG_OFS		= 0,
221
222
223
224
225
	EDMA_CFG_Q_DEPTH	= 0,			/* queueing disabled */
	EDMA_CFG_NCQ		= (1 << 5),
	EDMA_CFG_NCQ_GO_ON_ERR	= (1 << 14),		/* continue on error */
	EDMA_CFG_RD_BRST_EXT	= (1 << 11),		/* read burst 512B */
	EDMA_CFG_WR_BUFF_LEN	= (1 << 13),		/* write buffer 512B */
226
227
228

	EDMA_ERR_IRQ_CAUSE_OFS	= 0x8,
	EDMA_ERR_IRQ_MASK_OFS	= 0xc,
229
230
231
232
233
234
	EDMA_ERR_D_PAR		= (1 << 0),	/* UDMA data parity err */
	EDMA_ERR_PRD_PAR	= (1 << 1),	/* UDMA PRD parity err */
	EDMA_ERR_DEV		= (1 << 2),	/* device error */
	EDMA_ERR_DEV_DCON	= (1 << 3),	/* device disconnect */
	EDMA_ERR_DEV_CON	= (1 << 4),	/* device connected */
	EDMA_ERR_SERR		= (1 << 5),	/* SError bits [WBDST] raised */
235
236
	EDMA_ERR_SELF_DIS	= (1 << 7),	/* Gen II/IIE self-disable */
	EDMA_ERR_SELF_DIS_5	= (1 << 8),	/* Gen I self-disable */
237
	EDMA_ERR_BIST_ASYNC	= (1 << 8),	/* BIST FIS or Async Notify */
238
	EDMA_ERR_TRANS_IRQ_7	= (1 << 8),	/* Gen IIE transprt layer irq */
239
240
241
242
243
	EDMA_ERR_CRQB_PAR	= (1 << 9),	/* CRQB parity error */
	EDMA_ERR_CRPB_PAR	= (1 << 10),	/* CRPB parity error */
	EDMA_ERR_INTRL_PAR	= (1 << 11),	/* internal parity error */
	EDMA_ERR_IORDY		= (1 << 12),	/* IORdy timeout */
	EDMA_ERR_LNK_CTRL_RX	= (0xf << 13),	/* link ctrl rx error */
244
	EDMA_ERR_LNK_CTRL_RX_2	= (1 << 15),
245
246
247
248
	EDMA_ERR_LNK_DATA_RX	= (0xf << 17),	/* link data rx error */
	EDMA_ERR_LNK_CTRL_TX	= (0x1f << 21),	/* link ctrl tx error */
	EDMA_ERR_LNK_DATA_TX	= (0x1f << 26),	/* link data tx error */
	EDMA_ERR_TRANS_PROTO	= (1 << 31),	/* transport protocol error */
249
250
	EDMA_ERR_OVERRUN_5	= (1 << 5),
	EDMA_ERR_UNDERRUN_5	= (1 << 6),
251
252
253
254
255
256
	EDMA_EH_FREEZE		= EDMA_ERR_D_PAR |
				  EDMA_ERR_PRD_PAR |
				  EDMA_ERR_DEV_DCON |
				  EDMA_ERR_DEV_CON |
				  EDMA_ERR_SERR |
				  EDMA_ERR_SELF_DIS |
257
				  EDMA_ERR_CRQB_PAR |
258
259
260
261
262
263
264
265
266
267
268
269
270
271
				  EDMA_ERR_CRPB_PAR |
				  EDMA_ERR_INTRL_PAR |
				  EDMA_ERR_IORDY |
				  EDMA_ERR_LNK_CTRL_RX_2 |
				  EDMA_ERR_LNK_DATA_RX |
				  EDMA_ERR_LNK_DATA_TX |
				  EDMA_ERR_TRANS_PROTO,
	EDMA_EH_FREEZE_5	= EDMA_ERR_D_PAR |
				  EDMA_ERR_PRD_PAR |
				  EDMA_ERR_DEV_DCON |
				  EDMA_ERR_DEV_CON |
				  EDMA_ERR_OVERRUN_5 |
				  EDMA_ERR_UNDERRUN_5 |
				  EDMA_ERR_SELF_DIS_5 |
272
				  EDMA_ERR_CRQB_PAR |
273
274
275
				  EDMA_ERR_CRPB_PAR |
				  EDMA_ERR_INTRL_PAR |
				  EDMA_ERR_IORDY,
276

277
278
279
280
281
282
283
284
285
286
287
	EDMA_REQ_Q_BASE_HI_OFS	= 0x10,
	EDMA_REQ_Q_IN_PTR_OFS	= 0x14,		/* also contains BASE_LO */

	EDMA_REQ_Q_OUT_PTR_OFS	= 0x18,
	EDMA_REQ_Q_PTR_SHIFT	= 5,

	EDMA_RSP_Q_BASE_HI_OFS	= 0x1c,
	EDMA_RSP_Q_IN_PTR_OFS	= 0x20,
	EDMA_RSP_Q_OUT_PTR_OFS	= 0x24,		/* also contains BASE_LO */
	EDMA_RSP_Q_PTR_SHIFT	= 3,

Jeff Garzik's avatar
Jeff Garzik committed
288
289
290
291
	EDMA_CMD_OFS		= 0x28,		/* EDMA command register */
	EDMA_EN			= (1 << 0),	/* enable EDMA */
	EDMA_DS			= (1 << 1),	/* disable EDMA; self-negated */
	ATA_RST			= (1 << 2),	/* reset trans/link/phy */
292

293
	EDMA_IORDY_TMOUT	= 0x34,
294
295
	EDMA_ARB_CFG		= 0x38,

296
297
	/* Host private flags (hp_flags) */
	MV_HP_FLAG_MSI		= (1 << 0),
298
299
300
301
	MV_HP_ERRATA_50XXB0	= (1 << 1),
	MV_HP_ERRATA_50XXB2	= (1 << 2),
	MV_HP_ERRATA_60X1B2	= (1 << 3),
	MV_HP_ERRATA_60X1C0	= (1 << 4),
302
	MV_HP_ERRATA_XX42A0	= (1 << 5),
Jeff Garzik's avatar
Jeff Garzik committed
303
304
305
	MV_HP_GEN_I		= (1 << 6),	/* Generation I: 50xx */
	MV_HP_GEN_II		= (1 << 7),	/* Generation II: 60xx */
	MV_HP_GEN_IIE		= (1 << 8),	/* Generation IIE: 6042/7042 */
306

307
	/* Port private flags (pp_flags) */
Jeff Garzik's avatar
Jeff Garzik committed
308
309
	MV_PP_FLAG_EDMA_EN	= (1 << 0),	/* is EDMA engine enabled? */
	MV_PP_FLAG_HAD_A_RESET	= (1 << 2),	/* 1st hard reset complete? */
310
311
};

312
313
#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
314
#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
315

Jeff Garzik's avatar
Jeff Garzik committed
316
enum {
317
318
319
320
	/* DMA boundary 0xffff is required by the s/g splitting
	 * we need on /length/ in mv_fill-sg().
	 */
	MV_DMA_BOUNDARY		= 0xffffU,
Jeff Garzik's avatar
Jeff Garzik committed
321

Jeff Garzik's avatar
Jeff Garzik committed
322
323
324
	/* mask of register bits containing lower 32 bits
	 * of EDMA request queue DMA address
	 */
Jeff Garzik's avatar
Jeff Garzik committed
325
326
	EDMA_REQ_Q_BASE_LO_MASK	= 0xfffffc00U,

Jeff Garzik's avatar
Jeff Garzik committed
327
	/* ditto, for response queue */
Jeff Garzik's avatar
Jeff Garzik committed
328
329
330
	EDMA_RSP_Q_BASE_LO_MASK	= 0xffffff00U,
};

331
332
333
334
335
336
enum chip_type {
	chip_504x,
	chip_508x,
	chip_5080,
	chip_604x,
	chip_608x,
337
338
	chip_6042,
	chip_7042,
339
340
};

341
342
/* Command ReQuest Block: 32B */
struct mv_crqb {
343
344
345
346
	__le32			sg_addr;
	__le32			sg_addr_hi;
	__le16			ctrl_flags;
	__le16			ata_cmd[11];
347
};
348

349
struct mv_crqb_iie {
350
351
352
353
354
	__le32			addr;
	__le32			addr_hi;
	__le32			flags;
	__le32			len;
	__le32			ata_cmd[4];
355
356
};

357
358
/* Command ResPonse Block: 8B */
struct mv_crpb {
359
360
361
	__le16			id;
	__le16			flags;
	__le32			tmstmp;
362
363
};

364
365
/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
struct mv_sg {
366
367
368
369
	__le32			addr;
	__le32			flags_size;
	__le32			addr_hi;
	__le32			reserved;
370
};
371

372
373
374
375
376
377
378
struct mv_port_priv {
	struct mv_crqb		*crqb;
	dma_addr_t		crqb_dma;
	struct mv_crpb		*crpb;
	dma_addr_t		crpb_dma;
	struct mv_sg		*sg_tbl;
	dma_addr_t		sg_tbl_dma;
379
380
381
382

	unsigned int		req_idx;
	unsigned int		resp_idx;

383
384
385
	u32			pp_flags;
};

386
387
388
389
390
struct mv_port_signal {
	u32			amps;
	u32			pre;
};

391
392
struct mv_host_priv;
struct mv_hw_ops {
393
394
	void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port);
395
396
397
	void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
	void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio);
398
399
	int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc);
400
401
	void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
	void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
402
403
};

404
405
struct mv_host_priv {
	u32			hp_flags;
406
	struct mv_port_signal	signal[8];
407
	const struct mv_hw_ops	*ops;
408
409
410
};

static void mv_irq_clear(struct ata_port *ap);
411
412
413
414
static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
415
416
417
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static void mv_qc_prep(struct ata_queued_cmd *qc);
418
static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
419
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
420
421
422
423
static void mv_error_handler(struct ata_port *ap);
static void mv_post_int_cmd(struct ata_queued_cmd *qc);
static void mv_eh_freeze(struct ata_port *ap);
static void mv_eh_thaw(struct ata_port *ap);
Jeff Garzik's avatar
Jeff Garzik committed
424
static int mv_slave_config(struct scsi_device *sdev);
425
426
static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);

427
428
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port);
429
430
431
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio);
432
433
static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc);
434
435
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
436

437
438
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
			   unsigned int port);
439
440
441
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
			   void __iomem *mmio);
442
443
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
			unsigned int n_hc);
444
445
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
446
447
static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
			     unsigned int port_no);
448

449
450
451
452
453
454
455
static struct scsi_host_template mv5_sht = {
	.module			= THIS_MODULE,
	.name			= DRV_NAME,
	.ioctl			= ata_scsi_ioctl,
	.queuecommand		= ata_scsi_queuecmd,
	.can_queue		= ATA_DEF_QUEUE,
	.this_id		= ATA_SHT_THIS_ID,
456
	.sg_tablesize		= MV_MAX_SG_CT / 2,
457
458
459
460
461
	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
	.emulated		= ATA_SHT_EMULATED,
	.use_clustering		= 1,
	.proc_name		= DRV_NAME,
	.dma_boundary		= MV_DMA_BOUNDARY,
Jeff Garzik's avatar
Jeff Garzik committed
462
	.slave_configure	= mv_slave_config,
463
464
465
466
467
	.slave_destroy		= ata_scsi_slave_destroy,
	.bios_param		= ata_std_bios_param,
};

static struct scsi_host_template mv6_sht = {
468
469
470
471
	.module			= THIS_MODULE,
	.name			= DRV_NAME,
	.ioctl			= ata_scsi_ioctl,
	.queuecommand		= ata_scsi_queuecmd,
472
	.can_queue		= ATA_DEF_QUEUE,
473
	.this_id		= ATA_SHT_THIS_ID,
474
	.sg_tablesize		= MV_MAX_SG_CT / 2,
475
476
	.cmd_per_lun		= ATA_SHT_CMD_PER_LUN,
	.emulated		= ATA_SHT_EMULATED,
477
	.use_clustering		= 1,
478
479
	.proc_name		= DRV_NAME,
	.dma_boundary		= MV_DMA_BOUNDARY,
Jeff Garzik's avatar
Jeff Garzik committed
480
	.slave_configure	= mv_slave_config,
Tejun Heo's avatar
Tejun Heo committed
481
	.slave_destroy		= ata_scsi_slave_destroy,
482
483
484
	.bios_param		= ata_std_bios_param,
};

485
486
487
488
489
490
491
492
493
static const struct ata_port_operations mv5_ops = {
	.port_disable		= ata_port_disable,

	.tf_load		= ata_tf_load,
	.tf_read		= ata_tf_read,
	.check_status		= ata_check_status,
	.exec_command		= ata_exec_command,
	.dev_select		= ata_std_dev_select,

494
	.cable_detect		= ata_cable_sata,
495
496
497

	.qc_prep		= mv_qc_prep,
	.qc_issue		= mv_qc_issue,
Tejun Heo's avatar
Tejun Heo committed
498
	.data_xfer		= ata_data_xfer,
499
500

	.irq_clear		= mv_irq_clear,
501
502
	.irq_on			= ata_irq_on,
	.irq_ack		= ata_irq_ack,
503

504
505
506
507
508
	.error_handler		= mv_error_handler,
	.post_internal_cmd	= mv_post_int_cmd,
	.freeze			= mv_eh_freeze,
	.thaw			= mv_eh_thaw,

509
510
511
512
513
514
515
516
	.scr_read		= mv5_scr_read,
	.scr_write		= mv5_scr_write,

	.port_start		= mv_port_start,
	.port_stop		= mv_port_stop,
};

static const struct ata_port_operations mv6_ops = {
517
518
519
520
521
522
523
524
	.port_disable		= ata_port_disable,

	.tf_load		= ata_tf_load,
	.tf_read		= ata_tf_read,
	.check_status		= ata_check_status,
	.exec_command		= ata_exec_command,
	.dev_select		= ata_std_dev_select,

525
	.cable_detect		= ata_cable_sata,
526

527
528
	.qc_prep		= mv_qc_prep,
	.qc_issue		= mv_qc_issue,
Tejun Heo's avatar
Tejun Heo committed
529
	.data_xfer		= ata_data_xfer,
530
531

	.irq_clear		= mv_irq_clear,
532
533
	.irq_on			= ata_irq_on,
	.irq_ack		= ata_irq_ack,
534

535
536
537
538
539
	.error_handler		= mv_error_handler,
	.post_internal_cmd	= mv_post_int_cmd,
	.freeze			= mv_eh_freeze,
	.thaw			= mv_eh_thaw,

540
541
542
	.scr_read		= mv_scr_read,
	.scr_write		= mv_scr_write,

543
544
	.port_start		= mv_port_start,
	.port_stop		= mv_port_stop,
545
546
};

547
548
549
550
551
552
553
554
555
static const struct ata_port_operations mv_iie_ops = {
	.port_disable		= ata_port_disable,

	.tf_load		= ata_tf_load,
	.tf_read		= ata_tf_read,
	.check_status		= ata_check_status,
	.exec_command		= ata_exec_command,
	.dev_select		= ata_std_dev_select,

556
	.cable_detect		= ata_cable_sata,
557
558
559

	.qc_prep		= mv_qc_prep_iie,
	.qc_issue		= mv_qc_issue,
Tejun Heo's avatar
Tejun Heo committed
560
	.data_xfer		= ata_data_xfer,
561
562

	.irq_clear		= mv_irq_clear,
563
564
	.irq_on			= ata_irq_on,
	.irq_ack		= ata_irq_ack,
565

566
567
568
569
570
	.error_handler		= mv_error_handler,
	.post_internal_cmd	= mv_post_int_cmd,
	.freeze			= mv_eh_freeze,
	.thaw			= mv_eh_thaw,

571
572
573
574
575
576
577
	.scr_read		= mv_scr_read,
	.scr_write		= mv_scr_write,

	.port_start		= mv_port_start,
	.port_stop		= mv_port_stop,
};

578
static const struct ata_port_info mv_port_info[] = {
579
	{  /* chip_504x */
Jeff Garzik's avatar
Jeff Garzik committed
580
		.flags		= MV_COMMON_FLAGS,
581
		.pio_mask	= 0x1f,	/* pio0-4 */
582
		.udma_mask	= ATA_UDMA6,
583
		.port_ops	= &mv5_ops,
584
585
	},
	{  /* chip_508x */
586
		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
587
		.pio_mask	= 0x1f,	/* pio0-4 */
588
		.udma_mask	= ATA_UDMA6,
589
		.port_ops	= &mv5_ops,
590
	},
591
	{  /* chip_5080 */
592
		.flags		= MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
593
		.pio_mask	= 0x1f,	/* pio0-4 */
594
		.udma_mask	= ATA_UDMA6,
595
		.port_ops	= &mv5_ops,
596
	},
597
	{  /* chip_604x */
598
		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
599
		.pio_mask	= 0x1f,	/* pio0-4 */
600
		.udma_mask	= ATA_UDMA6,
601
		.port_ops	= &mv6_ops,
602
603
	},
	{  /* chip_608x */
604
605
		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS |
				  MV_FLAG_DUAL_HC,
606
		.pio_mask	= 0x1f,	/* pio0-4 */
607
		.udma_mask	= ATA_UDMA6,
608
		.port_ops	= &mv6_ops,
609
	},
610
	{  /* chip_6042 */
611
		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
612
		.pio_mask	= 0x1f,	/* pio0-4 */
613
		.udma_mask	= ATA_UDMA6,
614
615
616
		.port_ops	= &mv_iie_ops,
	},
	{  /* chip_7042 */
617
		.flags		= MV_COMMON_FLAGS | MV_6XXX_FLAGS,
618
		.pio_mask	= 0x1f,	/* pio0-4 */
619
		.udma_mask	= ATA_UDMA6,
620
621
		.port_ops	= &mv_iie_ops,
	},
622
623
};

624
static const struct pci_device_id mv_pci_tbl[] = {
625
626
627
628
	{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
	{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
	{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
	{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
629
630
631
	/* RocketRAID 1740/174x have different identifiers */
	{ PCI_VDEVICE(TTI, 0x1740), chip_508x },
	{ PCI_VDEVICE(TTI, 0x1742), chip_508x },
632
633
634
635
636
637
638
639
640

	{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
	{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
	{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
	{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
	{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },

	{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },

641
642
643
	/* Adaptec 1430SA */
	{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },

644
645
	{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },

Morrison, Tom's avatar
Morrison, Tom committed
646
647
648
	/* add Marvell 7042 support */
	{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },

649
	{ }			/* terminate list */
650
651
652
653
654
655
656
657
658
};

static struct pci_driver mv_pci_driver = {
	.name			= DRV_NAME,
	.id_table		= mv_pci_tbl,
	.probe			= mv_init_one,
	.remove			= ata_pci_remove_one,
};

659
660
661
662
663
static const struct mv_hw_ops mv5xxx_ops = {
	.phy_errata		= mv5_phy_errata,
	.enable_leds		= mv5_enable_leds,
	.read_preamp		= mv5_read_preamp,
	.reset_hc		= mv5_reset_hc,
664
665
	.reset_flash		= mv5_reset_flash,
	.reset_bus		= mv5_reset_bus,
666
667
668
669
670
671
672
};

static const struct mv_hw_ops mv6xxx_ops = {
	.phy_errata		= mv6_phy_errata,
	.enable_leds		= mv6_enable_leds,
	.read_preamp		= mv6_read_preamp,
	.reset_hc		= mv6_reset_hc,
673
674
	.reset_flash		= mv6_reset_flash,
	.reset_bus		= mv_reset_pci_bus,
675
676
};

677
678
679
680
681
682
/*
 * module options
 */
static int msi;	      /* Use PCI msi; either zero (off, default) or non-zero */


683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
/* move to PCI layer or libata core? */
static int pci_go_64(struct pci_dev *pdev)
{
	int rc;

	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
		rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
		if (rc) {
			rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
			if (rc) {
				dev_printk(KERN_ERR, &pdev->dev,
					   "64-bit DMA enable failed\n");
				return rc;
			}
		}
	} else {
		rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
		if (rc) {
			dev_printk(KERN_ERR, &pdev->dev,
				   "32-bit DMA enable failed\n");
			return rc;
		}
		rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
		if (rc) {
			dev_printk(KERN_ERR, &pdev->dev,
				   "32-bit consistent DMA enable failed\n");
			return rc;
		}
	}

	return rc;
}

716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
/*
 * Functions
 */

static inline void writelfl(unsigned long data, void __iomem *addr)
{
	writel(data, addr);
	(void) readl(addr);	/* flush to avoid PCI posted write */
}

static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
{
	return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
}

731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
static inline unsigned int mv_hc_from_port(unsigned int port)
{
	return port >> MV_PORT_HC_SHIFT;
}

static inline unsigned int mv_hardport_from_port(unsigned int port)
{
	return port & MV_PORT_MASK;
}

static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
						 unsigned int port)
{
	return mv_hc_base(base, mv_hc_from_port(port));
}

747
748
static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
{
749
	return  mv_hc_base_from_port(base, port) +
750
		MV_SATAHC_ARBTR_REG_SZ +
751
		(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
752
753
754
755
}

static inline void __iomem *mv_ap_base(struct ata_port *ap)
{
Tejun Heo's avatar
Tejun Heo committed
756
	return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
757
758
}

Jeff Garzik's avatar
Jeff Garzik committed
759
static inline int mv_get_hc_count(unsigned long port_flags)
760
{
Jeff Garzik's avatar
Jeff Garzik committed
761
	return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
762
763
764
}

static void mv_irq_clear(struct ata_port *ap)
765
766
767
{
}

Jeff Garzik's avatar
Jeff Garzik committed
768
769
770
771
772
773
774
775
776
777
778
static int mv_slave_config(struct scsi_device *sdev)
{
	int rc = ata_scsi_slave_config(sdev);
	if (rc)
		return rc;

	blk_queue_max_phys_segments(sdev->request_queue, MV_MAX_SG_CT / 2);

	return 0;	/* scsi layer doesn't check return value, sigh */
}

779
780
781
782
static void mv_set_edma_ptrs(void __iomem *port_mmio,
			     struct mv_host_priv *hpriv,
			     struct mv_port_priv *pp)
{
783
784
	u32 index;

785
786
787
	/*
	 * initialize request queue
	 */
788
789
	index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;

790
791
	WARN_ON(pp->crqb_dma & 0x3ff);
	writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
792
	writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
793
794
795
		 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);

	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
796
		writelfl((pp->crqb_dma & 0xffffffff) | index,
797
798
			 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
	else
799
		writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
800
801
802
803

	/*
	 * initialize response queue
	 */
804
805
	index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;

806
807
808
809
	WARN_ON(pp->crpb_dma & 0xff);
	writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);

	if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
810
		writelfl((pp->crpb_dma & 0xffffffff) | index,
811
812
			 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
	else
813
		writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
814

815
	writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
816
817
818
		 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
}

819
820
821
822
823
/**
 *      mv_start_dma - Enable eDMA engine
 *      @base: port base address
 *      @pp: port private data
 *
824
825
 *      Verify the local cache of the eDMA state is accurate with a
 *      WARN_ON.
826
827
828
829
 *
 *      LOCKING:
 *      Inherited from caller.
 */
830
831
static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
			 struct mv_port_priv *pp)
832
{
833
	if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
834
835
836
837
838
		/* clear EDMA event indicators, if any */
		writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);

		mv_set_edma_ptrs(base, hpriv, pp);

839
840
841
		writelfl(EDMA_EN, base + EDMA_CMD_OFS);
		pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
	}
842
	WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
843
844
}

845
/**
Jeff Garzik's avatar
Jeff Garzik committed
846
 *      __mv_stop_dma - Disable eDMA engine
847
848
 *      @ap: ATA channel to manipulate
 *
849
850
 *      Verify the local cache of the eDMA state is accurate with a
 *      WARN_ON.
851
852
853
854
 *
 *      LOCKING:
 *      Inherited from caller.
 */
Jeff Garzik's avatar
Jeff Garzik committed
855
static int __mv_stop_dma(struct ata_port *ap)
856
{
857
858
859
	void __iomem *port_mmio = mv_ap_base(ap);
	struct mv_port_priv *pp	= ap->private_data;
	u32 reg;
860
	int i, err = 0;
861

862
	if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
863
		/* Disable EDMA if active.   The disable bit auto clears.
864
865
866
		 */
		writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
		pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
867
	} else {
868
		WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
869
  	}
870

871
872
873
	/* now properly wait for the eDMA to stop */
	for (i = 1000; i > 0; i--) {
		reg = readl(port_mmio + EDMA_CMD_OFS);
874
		if (!(reg & EDMA_EN))
875
			break;
876

877
878
879
		udelay(100);
	}

880
	if (reg & EDMA_EN) {
881
		ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
882
		err = -EIO;
883
	}
884
885

	return err;
886
887
}

Jeff Garzik's avatar
Jeff Garzik committed
888
889
890
891
892
893
894
895
896
897
898
899
static int mv_stop_dma(struct ata_port *ap)
{
	unsigned long flags;
	int rc;

	spin_lock_irqsave(&ap->host->lock, flags);
	rc = __mv_stop_dma(ap);
	spin_unlock_irqrestore(&ap->host->lock, flags);

	return rc;
}

Jeff Garzik's avatar
Jeff Garzik committed
900
#ifdef ATA_DEBUG
901
static void mv_dump_mem(void __iomem *start, unsigned bytes)
902
{
903
904
905
906
907
908
909
910
911
912
	int b, w;
	for (b = 0; b < bytes; ) {
		DPRINTK("%p: ", start + b);
		for (w = 0; b < bytes && w < 4; w++) {
			printk("%08x ",readl(start + b));
			b += sizeof(u32);
		}
		printk("\n");
	}
}
Jeff Garzik's avatar
Jeff Garzik committed
913
914
#endif

915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
{
#ifdef ATA_DEBUG
	int b, w;
	u32 dw;
	for (b = 0; b < bytes; ) {
		DPRINTK("%02x: ", b);
		for (w = 0; b < bytes && w < 4; w++) {
			(void) pci_read_config_dword(pdev,b,&dw);
			printk("%08x ",dw);
			b += sizeof(u32);
		}
		printk("\n");
	}
#endif
}
static void mv_dump_all_regs(void __iomem *mmio_base, int port,
			     struct pci_dev *pdev)
{
#ifdef ATA_DEBUG
935
	void __iomem *hc_base = mv_hc_base(mmio_base,
936
937
938
939
940
941
942
943
944
945
946
947
948
					   port >> MV_PORT_HC_SHIFT);
	void __iomem *port_base;
	int start_port, num_ports, p, start_hc, num_hcs, hc;

	if (0 > port) {
		start_hc = start_port = 0;
		num_ports = 8;		/* shld be benign for 4 port devs */
		num_hcs = 2;
	} else {
		start_hc = port >> MV_PORT_HC_SHIFT;
		start_port = port;
		num_ports = num_hcs = 1;
	}
949
	DPRINTK("All registers for port(s) %u-%u:\n", start_port,
950
951
952
953
954
955
956
957
958
959
960
961
		num_ports > 1 ? num_ports - 1 : start_port);

	if (NULL != pdev) {
		DPRINTK("PCI config space regs:\n");
		mv_dump_pci_cfg(pdev, 0x68);
	}
	DPRINTK("PCI regs:\n");
	mv_dump_mem(mmio_base+0xc00, 0x3c);
	mv_dump_mem(mmio_base+0xd00, 0x34);
	mv_dump_mem(mmio_base+0xf00, 0x4);
	mv_dump_mem(mmio_base+0x1d00, 0x6c);
	for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
962
		hc_base = mv_hc_base(mmio_base, hc);
963
964
965
966
967
968
969
970
971
972
973
		DPRINTK("HC regs (HC %i):\n", hc);
		mv_dump_mem(hc_base, 0x1c);
	}
	for (p = start_port; p < start_port + num_ports; p++) {
		port_base = mv_port_base(mmio_base, p);
		DPRINTK("EDMA regs (port %i):\n",p);
		mv_dump_mem(port_base, 0x54);
		DPRINTK("SATA regs (port %i):\n",p);
		mv_dump_mem(port_base+0x300, 0x60);
	}
#endif
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
}

static unsigned int mv_scr_offset(unsigned int sc_reg_in)
{
	unsigned int ofs;

	switch (sc_reg_in) {
	case SCR_STATUS:
	case SCR_CONTROL:
	case SCR_ERROR:
		ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
		break;
	case SCR_ACTIVE:
		ofs = SATA_ACTIVE_OFS;   /* active is not with the others */
		break;
	default:
		ofs = 0xffffffffU;
		break;
	}
	return ofs;
}

996
static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
997
998
999
{
	unsigned int ofs = mv_scr_offset(sc_reg_in);

1000
	if (ofs != 0xffffffffU) {
For faster browsing, not all history is shown. View entire blame