gianfar.c 86.4 KB
Newer Older
Jan Ceuleers's avatar
Jan Ceuleers committed
1
/* drivers/net/ethernet/freescale/gianfar.c
Linus Torvalds's avatar
Linus Torvalds committed
2
3
 *
 * Gianfar Ethernet Driver
4
5
 * This driver is designed for the non-CPM ethernet controllers
 * on the 85xx and 83xx family of integrated processors
Linus Torvalds's avatar
Linus Torvalds committed
6
7
8
 * Based on 8260_io/fcc_enet.c
 *
 * Author: Andy Fleming
9
 * Maintainer: Kumar Gala
10
 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
Linus Torvalds's avatar
Linus Torvalds committed
11
 *
12
 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
13
 * Copyright 2007 MontaVista Software, Inc.
Linus Torvalds's avatar
Linus Torvalds committed
14
15
16
17
18
19
20
21
22
23
24
25
26
 *
 * This program is free software; you can redistribute  it and/or modify it
 * under  the terms of  the GNU General  Public License as published by the
 * Free Software Foundation;  either version 2 of the  License, or (at your
 * option) any later version.
 *
 *  Gianfar:  AKA Lambda Draconis, "Dragon"
 *  RA 11 31 24.2
 *  Dec +69 19 52
 *  V 3.84
 *  B-V +1.62
 *
 *  Theory of operation
27
 *
28
29
 *  The driver is initialized through of_device. Configuration information
 *  is therefore conveyed through an OF-style device tree.
Linus Torvalds's avatar
Linus Torvalds committed
30
31
32
 *
 *  The Gianfar Ethernet Controller uses a ring of buffer
 *  descriptors.  The beginning is indicated by a register
33
34
 *  pointing to the physical address of the start of the ring.
 *  The end is determined by a "wrap" bit being set in the
Linus Torvalds's avatar
Linus Torvalds committed
35
36
37
 *  last descriptor of the ring.
 *
 *  When a packet is received, the RXF bit in the
38
 *  IEVENT register is set, triggering an interrupt when the
Linus Torvalds's avatar
Linus Torvalds committed
39
40
41
 *  corresponding bit in the IMASK register is also set (if
 *  interrupt coalescing is active, then the interrupt may not
 *  happen immediately, but will wait until either a set number
42
 *  of frames or amount of time have passed).  In NAPI, the
Linus Torvalds's avatar
Linus Torvalds committed
43
 *  interrupt handler will signal there is work to be done, and
44
 *  exit. This method will start at the last known empty
45
 *  descriptor, and process every subsequent descriptor until there
Linus Torvalds's avatar
Linus Torvalds committed
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
 *  are none left with data (NAPI will stop after a set number of
 *  packets to give time to other tasks, but will eventually
 *  process all the packets).  The data arrives inside a
 *  pre-allocated skb, and so after the skb is passed up to the
 *  stack, a new skb must be allocated, and the address field in
 *  the buffer descriptor must be updated to indicate this new
 *  skb.
 *
 *  When the kernel requests that a packet be transmitted, the
 *  driver starts where it left off last time, and points the
 *  descriptor at the buffer which was passed in.  The driver
 *  then informs the DMA engine that there are packets ready to
 *  be transmitted.  Once the controller is finished transmitting
 *  the packet, an interrupt may be triggered (under the same
 *  conditions as for reception, but depending on the TXF bit).
 *  The driver then cleans up the buffer.
 */

64
65
66
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DEBUG

Linus Torvalds's avatar
Linus Torvalds committed
67
68
69
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
70
#include <linux/unistd.h>
Linus Torvalds's avatar
Linus Torvalds committed
71
72
73
74
75
76
77
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
78
#include <linux/if_vlan.h>
Linus Torvalds's avatar
Linus Torvalds committed
79
80
#include <linux/spinlock.h>
#include <linux/mm.h>
81
#include <linux/of_mdio.h>
82
#include <linux/of_platform.h>
83
84
85
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
Kumar Gala's avatar
Kumar Gala committed
86
#include <linux/in.h>
87
#include <linux/net_tstamp.h>
Linus Torvalds's avatar
Linus Torvalds committed
88
89

#include <asm/io.h>
90
#include <asm/reg.h>
Linus Torvalds's avatar
Linus Torvalds committed
91
92
93
94
95
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/crc32.h>
96
97
#include <linux/mii.h>
#include <linux/phy.h>
98
99
#include <linux/phy_fixed.h>
#include <linux/of.h>
100
#include <linux/of_net.h>
Linus Torvalds's avatar
Linus Torvalds committed
101
102
103
104
105

#include "gianfar.h"

#define TX_TIMEOUT      (1*HZ)

106
const char gfar_driver_version[] = "1.3";
Linus Torvalds's avatar
Linus Torvalds committed
107
108
109

static int gfar_enet_open(struct net_device *dev);
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
110
static void gfar_reset_task(struct work_struct *work);
Linus Torvalds's avatar
Linus Torvalds committed
111
112
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
113
struct sk_buff *gfar_new_skb(struct net_device *dev);
114
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
115
			   struct sk_buff *skb);
Linus Torvalds's avatar
Linus Torvalds committed
116
117
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
118
119
120
static irqreturn_t gfar_error(int irq, void *dev_id);
static irqreturn_t gfar_transmit(int irq, void *dev_id);
static irqreturn_t gfar_interrupt(int irq, void *dev_id);
Linus Torvalds's avatar
Linus Torvalds committed
121
122
123
static void adjust_link(struct net_device *dev);
static void init_registers(struct net_device *dev);
static int init_phy(struct net_device *dev);
124
static int gfar_probe(struct platform_device *ofdev);
125
static int gfar_remove(struct platform_device *ofdev);
126
static void free_skb_resources(struct gfar_private *priv);
Linus Torvalds's avatar
Linus Torvalds committed
127
128
static void gfar_set_multi(struct net_device *dev);
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
Kapil Juneja's avatar
Kapil Juneja committed
129
static void gfar_configure_serdes(struct net_device *dev);
130
static int gfar_poll(struct napi_struct *napi, int budget);
131
132
133
#ifdef CONFIG_NET_POLL_CONTROLLER
static void gfar_netpoll(struct net_device *dev);
#endif
134
135
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
136
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
Wu Jiajun-B06378's avatar
Wu Jiajun-B06378 committed
137
			      int amount_pull, struct napi_struct *napi);
138
void gfar_halt(struct net_device *dev);
139
static void gfar_halt_nodisable(struct net_device *dev);
140
141
void gfar_start(struct net_device *dev);
static void gfar_clear_exact_match(struct net_device *dev);
142
143
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
				  const u8 *addr);
144
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Linus Torvalds's avatar
Linus Torvalds committed
145
146
147
148
149

MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Gianfar Ethernet Driver");
MODULE_LICENSE("GPL");

150
static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
151
152
153
154
155
156
157
			    dma_addr_t buf)
{
	u32 lstatus;

	bdp->bufPtr = buf;

	lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
158
	if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
159
160
161
162
163
164
165
		lstatus |= BD_LFLAG(RXBD_WRAP);

	eieio();

	bdp->lstatus = lstatus;
}

166
static int gfar_init_bds(struct net_device *ndev)
167
{
168
	struct gfar_private *priv = netdev_priv(ndev);
169
170
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;
171
172
	struct txbd8 *txbdp;
	struct rxbd8 *rxbdp;
173
	int i, j;
174

175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
		/* Initialize some variables in our dev structure */
		tx_queue->num_txbdfree = tx_queue->tx_ring_size;
		tx_queue->dirty_tx = tx_queue->tx_bd_base;
		tx_queue->cur_tx = tx_queue->tx_bd_base;
		tx_queue->skb_curtx = 0;
		tx_queue->skb_dirtytx = 0;

		/* Initialize Transmit Descriptor Ring */
		txbdp = tx_queue->tx_bd_base;
		for (j = 0; j < tx_queue->tx_ring_size; j++) {
			txbdp->lstatus = 0;
			txbdp->bufPtr = 0;
			txbdp++;
		}
191

192
193
194
		/* Set the last descriptor in the ring to indicate wrap */
		txbdp--;
		txbdp->status |= TXBD_WRAP;
195
196
	}

197
198
199
200
201
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
		rx_queue->cur_rx = rx_queue->rx_bd_base;
		rx_queue->skb_currx = 0;
		rxbdp = rx_queue->rx_bd_base;
202

203
204
		for (j = 0; j < rx_queue->rx_ring_size; j++) {
			struct sk_buff *skb = rx_queue->rx_skbuff[j];
205

206
207
208
209
210
211
			if (skb) {
				gfar_init_rxbdp(rx_queue, rxbdp,
						rxbdp->bufPtr);
			} else {
				skb = gfar_new_skb(ndev);
				if (!skb) {
212
					netdev_err(ndev, "Can't allocate RX buffers\n");
213
					return -ENOMEM;
214
215
216
217
				}
				rx_queue->rx_skbuff[j] = skb;

				gfar_new_rxbdp(rx_queue, rxbdp, skb);
218
219
			}

220
			rxbdp++;
221
222
223
224
225
226
227
228
229
		}

	}

	return 0;
}

static int gfar_alloc_skb_resources(struct net_device *ndev)
{
230
	void *vaddr;
231
232
	dma_addr_t addr;
	int i, j, k;
233
234
	struct gfar_private *priv = netdev_priv(ndev);
	struct device *dev = &priv->ofdev->dev;
235
236
237
	struct gfar_priv_tx_q *tx_queue = NULL;
	struct gfar_priv_rx_q *rx_queue = NULL;

238
239
240
241
242
243
244
	priv->total_tx_ring_size = 0;
	for (i = 0; i < priv->num_tx_queues; i++)
		priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;

	priv->total_rx_ring_size = 0;
	for (i = 0; i < priv->num_rx_queues; i++)
		priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
245
246

	/* Allocate memory for the buffer descriptors */
247
	vaddr = dma_alloc_coherent(dev,
248
249
250
			sizeof(struct txbd8) * priv->total_tx_ring_size +
			sizeof(struct rxbd8) * priv->total_rx_ring_size,
			&addr, GFP_KERNEL);
251
	if (!vaddr) {
252
253
		netif_err(priv, ifup, ndev,
			  "Could not allocate buffer descriptors!\n");
254
255
256
		return -ENOMEM;
	}

257
258
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
259
		tx_queue->tx_bd_base = vaddr;
260
261
262
		tx_queue->tx_bd_dma_base = addr;
		tx_queue->dev = ndev;
		/* enet DMA only understands physical addresses */
263
264
		addr  += sizeof(struct txbd8) * tx_queue->tx_ring_size;
		vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
265
	}
266
267

	/* Start the rx descriptor ring where the tx ring leaves off */
268
269
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
270
		rx_queue->rx_bd_base = vaddr;
271
272
		rx_queue->rx_bd_dma_base = addr;
		rx_queue->dev = ndev;
273
274
		addr  += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
		vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
275
	}
276
277

	/* Setup the skbuff rings */
278
279
	for (i = 0; i < priv->num_tx_queues; i++) {
		tx_queue = priv->tx_queue[i];
280
281
282
283
284
		tx_queue->tx_skbuff =
			kmalloc_array(tx_queue->tx_ring_size,
				      sizeof(*tx_queue->tx_skbuff),
				      GFP_KERNEL);
		if (!tx_queue->tx_skbuff)
285
			goto cleanup;
286

287
288
289
		for (k = 0; k < tx_queue->tx_ring_size; k++)
			tx_queue->tx_skbuff[k] = NULL;
	}
290

291
292
	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_queue = priv->rx_queue[i];
293
294
295
296
297
		rx_queue->rx_skbuff =
			kmalloc_array(rx_queue->rx_ring_size,
				      sizeof(*rx_queue->rx_skbuff),
				      GFP_KERNEL);
		if (!rx_queue->rx_skbuff)
298
299
300
301
302
			goto cleanup;

		for (j = 0; j < rx_queue->rx_ring_size; j++)
			rx_queue->rx_skbuff[j] = NULL;
	}
303

304
305
	if (gfar_init_bds(ndev))
		goto cleanup;
306
307
308
309
310
311
312
313

	return 0;

cleanup:
	free_skb_resources(priv);
	return -ENOMEM;
}

314
315
static void gfar_init_tx_rx_base(struct gfar_private *priv)
{
316
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
317
	u32 __iomem *baddr;
318
319
320
	int i;

	baddr = &regs->tbase0;
321
	for (i = 0; i < priv->num_tx_queues; i++) {
322
		gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
323
		baddr += 2;
324
325
326
	}

	baddr = &regs->rbase0;
327
	for (i = 0; i < priv->num_rx_queues; i++) {
328
		gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
329
		baddr += 2;
330
331
332
	}
}

333
334
335
static void gfar_init_mac(struct net_device *ndev)
{
	struct gfar_private *priv = netdev_priv(ndev);
336
	struct gfar __iomem *regs = priv->gfargrp[0].regs;
337
338
339
340
	u32 rctrl = 0;
	u32 tctrl = 0;
	u32 attrs = 0;

341
342
	/* write the tx/rx base registers */
	gfar_init_tx_rx_base(priv);
343

344
	/* Configure the coalescing support */
345
	gfar_configure_coalescing(priv, 0xFF, 0xFF);
346

Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
347
	if (priv->rx_filer_enable) {
348
		rctrl |= RCTRL_FILREN;
Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
349
350
351
		/* Program the RIR0 reg with the required distribution */
		gfar_write(&regs->rir0, DEFAULT_RIR0);
	}
352

353
354
355
356
	/* Restore PROMISC mode */
	if (ndev->flags & IFF_PROMISC)
		rctrl |= RCTRL_PROM;

357
	if (ndev->features & NETIF_F_RXCSUM)
358
359
360
361
362
363
364
365
366
367
368
369
370
371
		rctrl |= RCTRL_CHECKSUMMING;

	if (priv->extended_hash) {
		rctrl |= RCTRL_EXTHASH;

		gfar_clear_exact_match(ndev);
		rctrl |= RCTRL_EMEN;
	}

	if (priv->padding) {
		rctrl &= ~RCTRL_PAL_MASK;
		rctrl |= RCTRL_PADDING(priv->padding);
	}

372
373
374
	/* Insert receive time stamps into padding alignment bytes */
	if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
		rctrl &= ~RCTRL_PAL_MASK;
375
		rctrl |= RCTRL_PADDING(8);
376
377
378
		priv->padding = 8;
	}

379
380
381
382
	/* Enable HW time stamping if requested from user space */
	if (priv->hwts_rx_en)
		rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;

Jiri Pirko's avatar
Jiri Pirko committed
383
	if (ndev->features & NETIF_F_HW_VLAN_RX)
384
		rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
385
386
387
388
389
390
391

	/* Init rctrl based on our settings */
	gfar_write(&regs->rctrl, rctrl);

	if (ndev->features & NETIF_F_IP_CSUM)
		tctrl |= TCTRL_INIT_CSUM;

392
393
394
395
396
397
398
	if (priv->prio_sched_en)
		tctrl |= TCTRL_TXSCHED_PRIO;
	else {
		tctrl |= TCTRL_TXSCHED_WRRS;
		gfar_write(&regs->tr03wt, DEFAULT_WRRS_WEIGHT);
		gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
	}
399

400
401
402
403
404
405
406
407
408
	gfar_write(&regs->tctrl, tctrl);

	/* Set the extraction length and index */
	attrs = ATTRELI_EL(priv->rx_stash_size) |
		ATTRELI_EI(priv->rx_stash_index);

	gfar_write(&regs->attreli, attrs);

	/* Start with defaults, and add stashing or locking
Jan Ceuleers's avatar
Jan Ceuleers committed
409
410
	 * depending on the approprate variables
	 */
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
	attrs = ATTR_INIT_SETTINGS;

	if (priv->bd_stash_en)
		attrs |= ATTR_BDSTASH;

	if (priv->rx_stash_size != 0)
		attrs |= ATTR_BUFSTASH;

	gfar_write(&regs->attr, attrs);

	gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
	gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
	gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
}

Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
426
427
428
429
430
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
	unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
	unsigned long tx_packets = 0, tx_bytes = 0;
431
	int i;
Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
432
433
434

	for (i = 0; i < priv->num_rx_queues; i++) {
		rx_packets += priv->rx_queue[i]->stats.rx_packets;
435
		rx_bytes   += priv->rx_queue[i]->stats.rx_bytes;
Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
436
437
438
439
		rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
	}

	dev->stats.rx_packets = rx_packets;
440
	dev->stats.rx_bytes   = rx_bytes;
Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
441
442
443
	dev->stats.rx_dropped = rx_dropped;

	for (i = 0; i < priv->num_tx_queues; i++) {
444
445
		tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
		tx_packets += priv->tx_queue[i]->stats.tx_packets;
Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
446
447
	}

448
	dev->stats.tx_bytes   = tx_bytes;
Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
449
450
451
452
453
	dev->stats.tx_packets = tx_packets;

	return &dev->stats;
}

454
455
456
457
458
static const struct net_device_ops gfar_netdev_ops = {
	.ndo_open = gfar_enet_open,
	.ndo_start_xmit = gfar_start_xmit,
	.ndo_stop = gfar_close,
	.ndo_change_mtu = gfar_change_mtu,
459
	.ndo_set_features = gfar_set_features,
460
	.ndo_set_rx_mode = gfar_set_multi,
461
462
	.ndo_tx_timeout = gfar_timeout,
	.ndo_do_ioctl = gfar_ioctl,
Sandeep Gopalpet's avatar
Sandeep Gopalpet committed
463
	.ndo_get_stats = gfar_get_stats,
464
465
	.ndo_set_mac_address = eth_mac_addr,
	.ndo_validate_addr = eth_validate_addr,
466
467
468
469
470
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller = gfar_netpoll,
#endif
};

471
472
void lock_rx_qs(struct gfar_private *priv)
{
473
	int i;
474
475
476
477
478
479
480

	for (i = 0; i < priv->num_rx_queues; i++)
		spin_lock(&priv->rx_queue[i]->rxlock);
}

void lock_tx_qs(struct gfar_private *priv)
{
481
	int i;
482
483
484
485
486
487
488

	for (i = 0; i < priv->num_tx_queues; i++)
		spin_lock(&priv->tx_queue[i]->txlock);
}

void unlock_rx_qs(struct gfar_private *priv)
{
489
	int i;
490
491
492
493
494
495
496

	for (i = 0; i < priv->num_rx_queues; i++)
		spin_unlock(&priv->rx_queue[i]->rxlock);
}

void unlock_tx_qs(struct gfar_private *priv)
{
497
	int i;
498
499
500
501
502

	for (i = 0; i < priv->num_tx_queues; i++)
		spin_unlock(&priv->tx_queue[i]->txlock);
}

Jiri Pirko's avatar
Jiri Pirko committed
503
504
505
506
507
508
static bool gfar_is_vlan_on(struct gfar_private *priv)
{
	return (priv->ndev->features & NETIF_F_HW_VLAN_RX) ||
	       (priv->ndev->features & NETIF_F_HW_VLAN_TX);
}

509
510
/* Returns 1 if incoming frames use an FCB */
static inline int gfar_uses_fcb(struct gfar_private *priv)
511
{
Jiri Pirko's avatar
Jiri Pirko committed
512
	return gfar_is_vlan_on(priv) ||
513
514
	       (priv->ndev->features & NETIF_F_RXCSUM) ||
	       (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
515
}
516

517
518
static void free_tx_pointers(struct gfar_private *priv)
{
519
	int i;
520
521
522
523
524
525
526

	for (i = 0; i < priv->num_tx_queues; i++)
		kfree(priv->tx_queue[i]);
}

static void free_rx_pointers(struct gfar_private *priv)
{
527
	int i;
528
529
530
531
532

	for (i = 0; i < priv->num_rx_queues; i++)
		kfree(priv->rx_queue[i]);
}

533
534
static void unmap_group_regs(struct gfar_private *priv)
{
535
	int i;
536
537
538
539
540
541

	for (i = 0; i < MAXGROUPS; i++)
		if (priv->gfargrp[i].regs)
			iounmap(priv->gfargrp[i].regs);
}

542
543
544
545
546
547
548
549
550
551
552
553
554
static void free_gfar_dev(struct gfar_private *priv)
{
	int i, j;

	for (i = 0; i < priv->num_grps; i++)
		for (j = 0; j < GFAR_NUM_IRQS; j++) {
			kfree(priv->gfargrp[i].irqinfo[j]);
			priv->gfargrp[i].irqinfo[j] = NULL;
		}

	free_netdev(priv->ndev);
}

555
556
static void disable_napi(struct gfar_private *priv)
{
557
	int i;
558
559
560
561
562
563
564

	for (i = 0; i < priv->num_grps; i++)
		napi_disable(&priv->gfargrp[i].napi);
}

static void enable_napi(struct gfar_private *priv)
{
565
	int i;
566
567
568
569
570
571

	for (i = 0; i < priv->num_grps; i++)
		napi_enable(&priv->gfargrp[i].napi);
}

static int gfar_parse_group(struct device_node *np,
572
			    struct gfar_private *priv, const char *model)
573
{
574
	struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
575
	u32 *queue_mask;
576
577
	int i;

578
579
580
581
	for (i = 0; i < GFAR_NUM_IRQS; i++) {
		grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
					  GFP_KERNEL);
		if (!grp->irqinfo[i])
582
583
			return -ENOMEM;
	}
584

585
586
	grp->regs = of_iomap(np, 0);
	if (!grp->regs)
587
588
		return -ENOMEM;

589
	gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
590
591
592

	/* If we aren't the FEC we have multiple interrupts */
	if (model && strcasecmp(model, "FEC")) {
593
594
595
596
597
		gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
		gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
		if (gfar_irq(grp, TX)->irq == NO_IRQ ||
		    gfar_irq(grp, RX)->irq == NO_IRQ ||
		    gfar_irq(grp, ER)->irq == NO_IRQ)
598
599
600
			return -EINVAL;
	}

601
602
603
	grp->grp_id = priv->num_grps;
	grp->priv = priv;
	spin_lock_init(&grp->grplock);
604
605
	if (priv->mode == MQ_MG_MODE) {
		queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
606
		grp->rx_bit_map = queue_mask ?
607
608
			*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
		queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
609
		grp->tx_bit_map = queue_mask ?
610
			*queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
611
	} else {
612
613
		grp->rx_bit_map = 0xFF;
		grp->tx_bit_map = 0xFF;
614
615
616
617
618
619
	}
	priv->num_grps++;

	return 0;
}

620
static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
621
622
623
624
{
	const char *model;
	const char *ctype;
	const void *mac_addr;
625
626
627
	int err = 0, i;
	struct net_device *dev = NULL;
	struct gfar_private *priv = NULL;
628
	struct device_node *np = ofdev->dev.of_node;
629
	struct device_node *child = NULL;
Andy Fleming's avatar
Andy Fleming committed
630
631
632
	const u32 *stash;
	const u32 *stash_len;
	const u32 *stash_idx;
633
634
	unsigned int num_tx_qs, num_rx_qs;
	u32 *tx_queues, *rx_queues;
635
636
637
638

	if (!np || !of_device_is_available(np))
		return -ENODEV;

639
640
641
642
643
	/* parse the num of tx and rx queues */
	tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
	num_tx_qs = tx_queues ? *tx_queues : 1;

	if (num_tx_qs > MAX_TX_QS) {
644
645
646
		pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
		       num_tx_qs, MAX_TX_QS);
		pr_err("Cannot do alloc_etherdev, aborting\n");
647
648
649
650
651
652
653
		return -EINVAL;
	}

	rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
	num_rx_qs = rx_queues ? *rx_queues : 1;

	if (num_rx_qs > MAX_RX_QS) {
654
655
656
		pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
		       num_rx_qs, MAX_RX_QS);
		pr_err("Cannot do alloc_etherdev, aborting\n");
657
658
659
660
661
662
663
664
665
666
667
668
		return -EINVAL;
	}

	*pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
	dev = *pdev;
	if (NULL == dev)
		return -ENOMEM;

	priv = netdev_priv(dev);
	priv->ndev = dev;

	priv->num_tx_queues = num_tx_qs;
669
	netif_set_real_num_rx_queues(dev, num_rx_qs);
670
	priv->num_rx_queues = num_rx_qs;
671
	priv->num_grps = 0x0;
672

Jan Ceuleers's avatar
Jan Ceuleers committed
673
	/* Init Rx queue filer rule set linked list */
Sebastian Poehn's avatar
Sebastian Poehn committed
674
675
676
677
	INIT_LIST_HEAD(&priv->rx_list.list);
	priv->rx_list.count = 0;
	mutex_init(&priv->rx_queue_access);

678
679
	model = of_get_property(np, "model", NULL);

680
681
	for (i = 0; i < MAXGROUPS; i++)
		priv->gfargrp[i].regs = NULL;
682

683
684
685
686
687
688
689
	/* Parse and initialize group specific information */
	if (of_device_is_compatible(np, "fsl,etsec2")) {
		priv->mode = MQ_MG_MODE;
		for_each_child_of_node(np, child) {
			err = gfar_parse_group(child, priv, model);
			if (err)
				goto err_grp_init;
690
		}
691
692
693
	} else {
		priv->mode = SQ_SG_MODE;
		err = gfar_parse_group(np, priv, model);
694
		if (err)
695
			goto err_grp_init;
696
697
	}

698
699
700
701
702
703
	for (i = 0; i < priv->num_tx_queues; i++)
	       priv->tx_queue[i] = NULL;
	for (i = 0; i < priv->num_rx_queues; i++)
		priv->rx_queue[i] = NULL;

	for (i = 0; i < priv->num_tx_queues; i++) {
704
705
		priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
					    GFP_KERNEL);
706
707
708
709
710
711
712
713
714
715
716
		if (!priv->tx_queue[i]) {
			err = -ENOMEM;
			goto tx_alloc_failed;
		}
		priv->tx_queue[i]->tx_skbuff = NULL;
		priv->tx_queue[i]->qindex = i;
		priv->tx_queue[i]->dev = dev;
		spin_lock_init(&(priv->tx_queue[i]->txlock));
	}

	for (i = 0; i < priv->num_rx_queues; i++) {
717
718
		priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
					    GFP_KERNEL);
719
720
721
722
723
724
725
726
727
728
729
		if (!priv->rx_queue[i]) {
			err = -ENOMEM;
			goto rx_alloc_failed;
		}
		priv->rx_queue[i]->rx_skbuff = NULL;
		priv->rx_queue[i]->qindex = i;
		priv->rx_queue[i]->dev = dev;
		spin_lock_init(&(priv->rx_queue[i]->rxlock));
	}


Andy Fleming's avatar
Andy Fleming committed
730
731
	stash = of_get_property(np, "bd-stash", NULL);

732
	if (stash) {
Andy Fleming's avatar
Andy Fleming committed
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
		priv->bd_stash_en = 1;
	}

	stash_len = of_get_property(np, "rx-stash-len", NULL);

	if (stash_len)
		priv->rx_stash_size = *stash_len;

	stash_idx = of_get_property(np, "rx-stash-idx", NULL);

	if (stash_idx)
		priv->rx_stash_index = *stash_idx;

	if (stash_len || stash_idx)
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;

750
	mac_addr = of_get_mac_address(np);
751

752
	if (mac_addr)
753
		memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
754
755

	if (model && !strcasecmp(model, "TSEC"))
756
757
758
759
760
		priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
				     FSL_GIANFAR_DEV_HAS_COALESCE |
				     FSL_GIANFAR_DEV_HAS_RMON |
				     FSL_GIANFAR_DEV_HAS_MULTI_INTR;

761
	if (model && !strcasecmp(model, "eTSEC"))
762
763
764
765
766
767
768
769
770
771
		priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
				     FSL_GIANFAR_DEV_HAS_COALESCE |
				     FSL_GIANFAR_DEV_HAS_RMON |
				     FSL_GIANFAR_DEV_HAS_MULTI_INTR |
				     FSL_GIANFAR_DEV_HAS_PADDING |
				     FSL_GIANFAR_DEV_HAS_CSUM |
				     FSL_GIANFAR_DEV_HAS_VLAN |
				     FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
				     FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
				     FSL_GIANFAR_DEV_HAS_TIMER;
772
773
774
775
776
777
778
779
780
781
782
783

	ctype = of_get_property(np, "phy-connection-type", NULL);

	/* We only care about rgmii-id.  The rest are autodetected */
	if (ctype && !strcmp(ctype, "rgmii-id"))
		priv->interface = PHY_INTERFACE_MODE_RGMII_ID;
	else
		priv->interface = PHY_INTERFACE_MODE_MII;

	if (of_get_property(np, "fsl,magic-packet", NULL))
		priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;

784
	priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
785
786

	/* Find the TBI PHY.  If it's not there, we don't support SGMII */
787
	priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
788
789
790

	return 0;

791
792
793
794
rx_alloc_failed:
	free_rx_pointers(priv);
tx_alloc_failed:
	free_tx_pointers(priv);
795
796
err_grp_init:
	unmap_group_regs(priv);
797
	free_gfar_dev(priv);
798
799
800
	return err;
}

801
static int gfar_hwtstamp_ioctl(struct net_device *netdev,
802
			       struct ifreq *ifr, int cmd)
803
804
805
806
807
808
809
810
811
812
813
{
	struct hwtstamp_config config;
	struct gfar_private *priv = netdev_priv(netdev);

	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
		return -EFAULT;

	/* reserved for future extensions */
	if (config.flags)
		return -EINVAL;

814
815
816
817
818
819
820
821
822
823
	switch (config.tx_type) {
	case HWTSTAMP_TX_OFF:
		priv->hwts_tx_en = 0;
		break;
	case HWTSTAMP_TX_ON:
		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
			return -ERANGE;
		priv->hwts_tx_en = 1;
		break;
	default:
824
		return -ERANGE;
825
	}
826
827
828

	switch (config.rx_filter) {
	case HWTSTAMP_FILTER_NONE:
829
830
831
832
833
		if (priv->hwts_rx_en) {
			stop_gfar(netdev);
			priv->hwts_rx_en = 0;
			startup_gfar(netdev);
		}
834
835
836
837
		break;
	default:
		if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
			return -ERANGE;
838
839
840
841
842
		if (!priv->hwts_rx_en) {
			stop_gfar(netdev);
			priv->hwts_rx_en = 1;
			startup_gfar(netdev);
		}
843
844
845
846
847
848
849
850
		config.rx_filter = HWTSTAMP_FILTER_ALL;
		break;
	}

	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
		-EFAULT : 0;
}

851
852
853
854
855
856
857
858
/* Ioctl MII Interface */
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct gfar_private *priv = netdev_priv(dev);

	if (!netif_running(dev))
		return -EINVAL;

859
860
861
	if (cmd == SIOCSHWTSTAMP)
		return gfar_hwtstamp_ioctl(dev, rq, cmd);

862
863
864
	if (!priv->phydev)
		return -ENODEV;

865
	return phy_mii_ioctl(priv->phydev, rq, cmd);
866
867
}

868
869
870
871
static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
{
	unsigned int new_bit_map = 0x0;
	int mask = 0x1 << (max_qs - 1), i;
872

873
874
875
876
877
878
879
	for (i = 0; i < max_qs; i++) {
		if (bit_map & mask)
			new_bit_map = new_bit_map + (1 << i);
		mask = mask >> 0x1;
	}
	return new_bit_map;
}
880

881
882
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
				   u32 class)
883
884
885
886
887
888
{
	u32 rqfpr = FPR_FILER_MASK;
	u32 rqfcr = 0x0;

	rqfar--;
	rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
889
890
	priv->ftp_rqfpr[rqfar] = rqfpr;
	priv->ftp_rqfcr[rqfar] = rqfcr;
891
892
893
894
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_NOMATCH;
895
896
	priv->ftp_rqfpr[rqfar] = rqfpr;
	priv->ftp_rqfcr[rqfar] = rqfcr;
897
898
899
900
901
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
	rqfpr = class;
902
903
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
904
905
906
907
908
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar--;
	rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
	rqfpr = class;
909
910
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
911
912
913
914
915
916
917
918
919
920
921
922
923
924
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	return rqfar;
}

static void gfar_init_filer_table(struct gfar_private *priv)
{
	int i = 0x0;
	u32 rqfar = MAX_FILER_IDX;
	u32 rqfcr = 0x0;
	u32 rqfpr = FPR_FILER_MASK;

	/* Default rule */
	rqfcr = RQFCR_CMP_MATCH;
925
926
	priv->ftp_rqfcr[rqfar] = rqfcr;
	priv->ftp_rqfpr[rqfar] = rqfpr;
927
928
929
930
931
932
933
934
935
	gfar_write_filer(priv, rqfar, rqfcr, rqfpr);

	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
	rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);

936
	/* cur_filer_idx indicated the first non-masked rule */
937
938
939
940
941
	priv->cur_filer_idx = rqfar;

	/* Rest are masked rules */
	rqfcr = RQFCR_CMP_NOMATCH;
	for (i = 0; i < rqfar; i++) {
942
943
		priv->ftp_rqfcr[i] = rqfcr;
		priv->ftp_rqfpr[i] = rqfpr;
944
945
946
947
		gfar_write_filer(priv, i, rqfcr, rqfpr);
	}
}

948
949
950
951
952
953
954
955
956
957
static void gfar_detect_errata(struct gfar_private *priv)
{
	struct device *dev = &priv->ofdev->dev;
	unsigned int pvr = mfspr(SPRN_PVR);
	unsigned int svr = mfspr(SPRN_SVR);
	unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
	unsigned int rev = svr & 0xffff;

	/* MPC8313 Rev 2.0 and higher; All MPC837x */
	if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
958
	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
959
960
		priv->errata |= GFAR_ERRATA_74;

961
962
	/* MPC8313 and MPC837x all rev */
	if ((pvr == 0x80850010 && mod == 0x80b0) ||
963
	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
964
965
		priv->errata |= GFAR_ERRATA_76;

966
967
	/* MPC8313 and MPC837x all rev */
	if ((pvr == 0x80850010 && mod == 0x80b0) ||
968
	    (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
969
970
		priv->errata |= GFAR_ERRATA_A002;

971
972
	/* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
	if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
973
	    (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
974
975
		priv->errata |= GFAR_ERRATA_12;

976
977
978
979
980
	if (priv->errata)
		dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
			 priv->errata);
}

981
/* Set up the ethernet device structure, private data,
Jan Ceuleers's avatar
Jan Ceuleers committed
982
983
 * and anything else we need before we start
 */
984
static int gfar_probe(struct platform_device *ofdev)
Linus Torvalds's avatar
Linus Torvalds committed
985
986
987
988
{
	u32 tempval;
	struct net_device *dev = NULL;
	struct gfar_private *priv = NULL;
989
	struct gfar __iomem *regs = NULL;
990
	int err = 0, i, grp_idx = 0;
991
	u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
992
	u32 isrg = 0;
993
	u32 __iomem *baddr;
Linus Torvalds's avatar
Linus Torvalds committed
994

995
	err = gfar_of_init(ofdev, &dev);
Linus Torvalds's avatar
Linus Torvalds committed
996

997
998
	if (err)
		return err;
Linus Torvalds's avatar
Linus Torvalds committed
999
1000

	priv = netdev_priv(dev);
For faster browsing, not all history is shown. View entire blame