en_netdev.c 30.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
/*
 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 */

#include <linux/etherdevice.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
#include <linux/delay.h>
38
#include <linux/slab.h>
39
40
41
42
43
44
45
46
47
48
49
50
51
52

#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/cq.h>

#include "mlx4_en.h"
#include "en_port.h"

static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_en_dev *mdev = priv->mdev;
	int err;
Eli Cohen's avatar
Eli Cohen committed
53
	int idx;
54

Jiri Pirko's avatar
Jiri Pirko committed
55
	en_dbg(HW, priv, "adding VLAN:%d\n", vid);
56

Jiri Pirko's avatar
Jiri Pirko committed
57
	set_bit(vid, priv->active_vlans);
58
59
60
61

	/* Add VID to port VLAN filter */
	mutex_lock(&mdev->state_lock);
	if (mdev->device_up && priv->port_up) {
Jiri Pirko's avatar
Jiri Pirko committed
62
		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
63
		if (err)
64
			en_err(priv, "Failed configuring VLAN filter\n");
65
	}
Eli Cohen's avatar
Eli Cohen committed
66
67
	if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
		en_err(priv, "failed adding vlan %d\n", vid);
68
	mutex_unlock(&mdev->state_lock);
Eli Cohen's avatar
Eli Cohen committed
69

70
71
72
73
74
75
76
}

static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_en_dev *mdev = priv->mdev;
	int err;
Eli Cohen's avatar
Eli Cohen committed
77
	int idx;
78

Jiri Pirko's avatar
Jiri Pirko committed
79
	en_dbg(HW, priv, "Killing VID:%d\n", vid);
80

Jiri Pirko's avatar
Jiri Pirko committed
81
	clear_bit(vid, priv->active_vlans);
82
83
84

	/* Remove VID from port VLAN filter */
	mutex_lock(&mdev->state_lock);
Eli Cohen's avatar
Eli Cohen committed
85
86
87
88
89
	if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
		mlx4_unregister_vlan(mdev->dev, priv->port, idx);
	else
		en_err(priv, "could not find vid %d in cache\n", vid);

90
	if (mdev->device_up && priv->port_up) {
Jiri Pirko's avatar
Jiri Pirko committed
91
		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
92
		if (err)
93
			en_err(priv, "Failed configuring VLAN filter\n");
94
95
96
97
	}
	mutex_unlock(&mdev->state_lock);
}

98
u64 mlx4_en_mac_to_u64(u8 *addr)
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
{
	u64 mac = 0;
	int i;

	for (i = 0; i < ETH_ALEN; i++) {
		mac <<= 8;
		mac |= addr[i];
	}
	return mac;
}

static int mlx4_en_set_mac(struct net_device *dev, void *addr)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_en_dev *mdev = priv->mdev;
	struct sockaddr *saddr = addr;

	if (!is_valid_ether_addr(saddr->sa_data))
		return -EADDRNOTAVAIL;

	memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
	priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
	queue_work(mdev->workqueue, &priv->mac_task);
	return 0;
}

static void mlx4_en_do_set_mac(struct work_struct *work)
{
	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
						 mac_task);
	struct mlx4_en_dev *mdev = priv->mdev;
	int err = 0;

	mutex_lock(&mdev->state_lock);
	if (priv->port_up) {
		/* Remove old MAC and insert the new one */
135
136
		err = mlx4_replace_mac(mdev->dev, priv->port,
				       priv->base_qpn, priv->mac, 0);
137
		if (err)
138
			en_err(priv, "Failed changing HW MAC address\n");
139
	} else
140
141
		en_dbg(HW, priv, "Port is down while "
				 "registering mac, exiting...\n");
142
143
144
145
146
147
148
149

	mutex_unlock(&mdev->state_lock);
}

static void mlx4_en_clear_list(struct net_device *dev)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);

150
151
	kfree(priv->mc_addrs);
	priv->mc_addrs_cnt = 0;
152
153
154
155
156
}

static void mlx4_en_cache_mclist(struct net_device *dev)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
157
	struct netdev_hw_addr *ha;
158
159
160
161
162
163
164
165
	char *mc_addrs;
	int mc_addrs_cnt = netdev_mc_count(dev);
	int i;

	mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
	if (!mc_addrs) {
		en_err(priv, "failed to allocate multicast list\n");
		return;
166
	}
167
	i = 0;
168
169
	netdev_for_each_mc_addr(ha, dev)
		memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
170
171
	priv->mc_addrs = mc_addrs;
	priv->mc_addrs_cnt = mc_addrs_cnt;
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
}


static void mlx4_en_set_multicast(struct net_device *dev)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);

	if (!priv->port_up)
		return;

	queue_work(priv->mdev->workqueue, &priv->mcast_task);
}

static void mlx4_en_do_set_multicast(struct work_struct *work)
{
	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
						 mcast_task);
	struct mlx4_en_dev *mdev = priv->mdev;
	struct net_device *dev = priv->dev;
	u64 mcast_addr = 0;
192
	u8 mc_list[16] = {0};
193
194
195
196
	int err;

	mutex_lock(&mdev->state_lock);
	if (!mdev->device_up) {
197
198
		en_dbg(HW, priv, "Card is not up, "
				 "ignoring multicast change.\n");
199
200
201
		goto out;
	}
	if (!priv->port_up) {
202
203
		en_dbg(HW, priv, "Port is down, "
				 "ignoring  multicast change.\n");
204
205
206
207
208
209
210
211
212
213
		goto out;
	}

	/*
	 * Promsicuous mode: disable all filters
	 */

	if (dev->flags & IFF_PROMISC) {
		if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
			if (netif_msg_rx_status(priv))
214
				en_warn(priv, "Entering promiscuous mode\n");
215
216
217
			priv->flags |= MLX4_EN_FLAG_PROMISC;

			/* Enable promiscouos mode */
218
219
			if (!(mdev->dev->caps.flags &
						MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
220
221
222
223
224
				err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
							     priv->base_qpn, 1);
			else
				err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
							       priv->port);
225
			if (err)
226
				en_err(priv, "Failed enabling "
Lucas De Marchi's avatar
Lucas De Marchi committed
227
					     "promiscuous mode\n");
228
229
230
231
232

			/* Disable port multicast filter (unconditionally) */
			err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
						  0, MLX4_MCAST_DISABLE);
			if (err)
233
234
				en_err(priv, "Failed disabling "
					     "multicast filter\n");
235

236
237
238
239
240
241
242
243
244
			/* Add the default qp number as multicast promisc */
			if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
				err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
								 priv->port);
				if (err)
					en_err(priv, "Failed entering multicast promisc mode\n");
				priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
			}

Jiri Pirko's avatar
Jiri Pirko committed
245
246
247
248
			/* Disable port VLAN filter */
			err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
			if (err)
				en_err(priv, "Failed disabling VLAN filter\n");
249
250
251
252
253
		}
		goto out;
	}

	/*
Lucas De Marchi's avatar
Lucas De Marchi committed
254
	 * Not in promiscuous mode
255
256
257
258
	 */

	if (priv->flags & MLX4_EN_FLAG_PROMISC) {
		if (netif_msg_rx_status(priv))
259
			en_warn(priv, "Leaving promiscuous mode\n");
260
261
262
		priv->flags &= ~MLX4_EN_FLAG_PROMISC;

		/* Disable promiscouos mode */
263
		if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
264
265
266
267
268
			err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
						     priv->base_qpn, 0);
		else
			err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
							  priv->port);
269
		if (err)
Lucas De Marchi's avatar
Lucas De Marchi committed
270
			en_err(priv, "Failed disabling promiscuous mode\n");
271

272
273
274
275
276
		/* Disable Multicast promisc */
		if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
			err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
							    priv->port);
			if (err)
Lucas De Marchi's avatar
Lucas De Marchi committed
277
				en_err(priv, "Failed disabling multicast promiscuous mode\n");
278
279
280
			priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
		}

281
		/* Enable port VLAN filter */
Jiri Pirko's avatar
Jiri Pirko committed
282
		err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
283
		if (err)
284
			en_err(priv, "Failed enabling VLAN filter\n");
285
286
287
288
289
290
291
	}

	/* Enable/disable the multicast filter according to IFF_ALLMULTI */
	if (dev->flags & IFF_ALLMULTI) {
		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
					  0, MLX4_MCAST_DISABLE);
		if (err)
292
			en_err(priv, "Failed disabling multicast filter\n");
293
294
295
296
297
298
299
300
301

		/* Add the default qp number as multicast promisc */
		if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
			err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
							 priv->port);
			if (err)
				en_err(priv, "Failed entering multicast promisc mode\n");
			priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
		}
302
	} else {
303
		int i;
304
305
306
307
308
		/* Disable Multicast promisc */
		if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
			err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
							    priv->port);
			if (err)
Lucas De Marchi's avatar
Lucas De Marchi committed
309
				en_err(priv, "Failed disabling multicast promiscuous mode\n");
310
311
			priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
		}
312

313
314
315
		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
					  0, MLX4_MCAST_DISABLE);
		if (err)
316
			en_err(priv, "Failed disabling multicast filter\n");
317

318
319
320
321
322
323
324
		/* Detach our qp from all the multicast addresses */
		for (i = 0; i < priv->mc_addrs_cnt; i++) {
			memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
			mc_list[5] = priv->port;
			mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
					      mc_list, MLX4_PROT_ETH);
		}
325
326
327
328
329
330
331
332
333
		/* Flush mcast filter and init it with broadcast address */
		mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
				    1, MLX4_MCAST_CONFIG);

		/* Update multicast list - we cache all addresses so they won't
		 * change while HW is updated holding the command semaphor */
		netif_tx_lock_bh(dev);
		mlx4_en_cache_mclist(dev);
		netif_tx_unlock_bh(dev);
334
335
336
		for (i = 0; i < priv->mc_addrs_cnt; i++) {
			mcast_addr =
			      mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
337
338
339
340
			memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
			mc_list[5] = priv->port;
			mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
					      mc_list, 0, MLX4_PROT_ETH);
341
342
343
344
345
346
			mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
					    mcast_addr, 0, MLX4_MCAST_CONFIG);
		}
		err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
					  0, MLX4_MCAST_ENABLE);
		if (err)
347
			en_err(priv, "Failed enabling multicast filter\n");
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
	}
out:
	mutex_unlock(&mdev->state_lock);
}

#ifdef CONFIG_NET_POLL_CONTROLLER
static void mlx4_en_netpoll(struct net_device *dev)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_en_cq *cq;
	unsigned long flags;
	int i;

	for (i = 0; i < priv->rx_ring_num; i++) {
		cq = &priv->rx_cq[i];
		spin_lock_irqsave(&cq->lock, flags);
		napi_synchronize(&cq->napi);
		mlx4_en_process_rx_cq(dev, cq, 0);
		spin_unlock_irqrestore(&cq->lock, flags);
	}
}
#endif

static void mlx4_en_tx_timeout(struct net_device *dev)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_en_dev *mdev = priv->mdev;

	if (netif_msg_timer(priv))
377
		en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
378

379
	priv->port_stats.tx_timeout++;
380
	en_dbg(DRV, priv, "Scheduling watchdog\n");
381
	queue_work(mdev->workqueue, &priv->watchdog_task);
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
}


static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);

	spin_lock_bh(&priv->stats_lock);
	memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
	spin_unlock_bh(&priv->stats_lock);

	return &priv->ret_stats;
}

static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
{
	struct mlx4_en_cq *cq;
	int i;

	/* If we haven't received a specific coalescing setting
402
	 * (module param), we set the moderation parameters as follows:
403
404
405
406
	 * - moder_cnt is set to the number of mtu sized packets to
	 *   satisfy our coelsing target.
	 * - moder_time is set to a fixed value.
	 */
407
	priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
408
	priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
409
410
	en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
			   "rx_frames:%d rx_usecs:%d\n",
411
412
413
414
415
416
417
		 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);

	/* Setup cq moderation params */
	for (i = 0; i < priv->rx_ring_num; i++) {
		cq = &priv->rx_cq[i];
		cq->moder_cnt = priv->rx_frames;
		cq->moder_time = priv->rx_usecs;
418
419
420
		priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
		priv->last_moder_packets[i] = 0;
		priv->last_moder_bytes[i] = 0;
421
422
423
424
425
426
427
428
429
430
431
432
433
434
	}

	for (i = 0; i < priv->tx_ring_num; i++) {
		cq = &priv->tx_cq[i];
		cq->moder_cnt = MLX4_EN_TX_COAL_PKTS;
		cq->moder_time = MLX4_EN_TX_COAL_TIME;
	}

	/* Reset auto-moderation params */
	priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
	priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
	priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
	priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
	priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
435
	priv->adaptive_rx_coal = 1;
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
	priv->last_moder_jiffies = 0;
	priv->last_moder_tx_packets = 0;
}

static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
{
	unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
	struct mlx4_en_cq *cq;
	unsigned long packets;
	unsigned long rate;
	unsigned long avg_pkt_size;
	unsigned long rx_packets;
	unsigned long rx_bytes;
	unsigned long rx_pkt_diff;
	int moder_time;
451
	int ring, err;
452
453
454
455

	if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
		return;

456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
	for (ring = 0; ring < priv->rx_ring_num; ring++) {
		spin_lock_bh(&priv->stats_lock);
		rx_packets = priv->rx_ring[ring].packets;
		rx_bytes = priv->rx_ring[ring].bytes;
		spin_unlock_bh(&priv->stats_lock);

		rx_pkt_diff = ((unsigned long) (rx_packets -
				priv->last_moder_packets[ring]));
		packets = rx_pkt_diff;
		rate = packets * HZ / period;
		avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
				priv->last_moder_bytes[ring])) / packets : 0;

		/* Apply auto-moderation only when packet rate
		 * exceeds a rate that it matters */
		if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
		    avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
473
474
475
476
477
478
479
480
481
			if (rate < priv->pkt_rate_low)
				moder_time = priv->rx_usecs_low;
			else if (rate > priv->pkt_rate_high)
				moder_time = priv->rx_usecs_high;
			else
				moder_time = (rate - priv->pkt_rate_low) *
					(priv->rx_usecs_high - priv->rx_usecs_low) /
					(priv->pkt_rate_high - priv->pkt_rate_low) +
					priv->rx_usecs_low;
482
483
		} else {
			moder_time = priv->rx_usecs_low;
484
485
		}

486
487
488
		if (moder_time != priv->last_moder_time[ring]) {
			priv->last_moder_time[ring] = moder_time;
			cq = &priv->rx_cq[ring];
489
490
			cq->moder_time = moder_time;
			err = mlx4_en_set_cq_moder(priv, cq);
491
492
493
			if (err)
				en_err(priv, "Failed modifying moderation "
					     "for cq:%d\n", ring);
494
		}
495
496
		priv->last_moder_packets[ring] = rx_packets;
		priv->last_moder_bytes[ring] = rx_bytes;
497
498
499
500
501
502
503
	}

	priv->last_moder_jiffies = jiffies;
}

static void mlx4_en_do_get_stats(struct work_struct *work)
{
504
	struct delayed_work *delay = to_delayed_work(work);
505
506
507
508
509
510
511
	struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
						 stats_task);
	struct mlx4_en_dev *mdev = priv->mdev;
	int err;

	err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
	if (err)
512
		en_dbg(HW, priv, "Could not update stats\n");
513
514
515
516
517
518
519
520

	mutex_lock(&mdev->state_lock);
	if (mdev->device_up) {
		if (priv->port_up)
			mlx4_en_auto_moderation(priv);

		queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
	}
521
522
523
524
	if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
		queue_work(mdev->workqueue, &priv->mac_task);
		mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
	}
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
	mutex_unlock(&mdev->state_lock);
}

static void mlx4_en_linkstate(struct work_struct *work)
{
	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
						 linkstate_task);
	struct mlx4_en_dev *mdev = priv->mdev;
	int linkstate = priv->link_state;

	mutex_lock(&mdev->state_lock);
	/* If observable port state changed set carrier state and
	 * report to system log */
	if (priv->last_link_state != linkstate) {
		if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
540
			en_info(priv, "Link Down\n");
541
542
			netif_carrier_off(priv->dev);
		} else {
543
			en_info(priv, "Link Up\n");
544
545
546
547
548
549
550
551
			netif_carrier_on(priv->dev);
		}
	}
	priv->last_link_state = linkstate;
	mutex_unlock(&mdev->state_lock);
}


552
int mlx4_en_start_port(struct net_device *dev)
553
554
555
556
557
558
559
560
561
562
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_cq *cq;
	struct mlx4_en_tx_ring *tx_ring;
	int rx_index = 0;
	int tx_index = 0;
	int err = 0;
	int i;
	int j;
563
	u8 mc_list[16] = {0};
564
565

	if (priv->port_up) {
566
		en_dbg(DRV, priv, "start port called while port already up\n");
567
568
569
570
571
572
		return 0;
	}

	/* Calculate Rx buf size */
	dev->mtu = min(dev->mtu, priv->max_mtu);
	mlx4_en_calc_rx_buf(dev);
573
	en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
574

575
	/* Configure rx cq's and rings */
576
577
	err = mlx4_en_activate_rx_rings(priv);
	if (err) {
578
		en_err(priv, "Failed to activate RX rings\n");
579
580
		return err;
	}
581
582
583
	for (i = 0; i < priv->rx_ring_num; i++) {
		cq = &priv->rx_cq[i];

584
		err = mlx4_en_activate_cq(priv, cq, i);
585
		if (err) {
586
			en_err(priv, "Failed activating Rx CQ\n");
587
			goto cq_err;
588
589
590
591
592
		}
		for (j = 0; j < cq->size; j++)
			cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
		err = mlx4_en_set_cq_moder(priv, cq);
		if (err) {
593
			en_err(priv, "Failed setting cq moderation parameters");
594
595
596
597
			mlx4_en_deactivate_cq(priv, cq);
			goto cq_err;
		}
		mlx4_en_arm_cq(priv, cq);
598
		priv->rx_ring[i].cqn = cq->mcq.cqn;
599
600
601
		++rx_index;
	}

602
603
604
605
606
607
608
609
610
611
	/* Set port mac number */
	en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
	err = mlx4_register_mac(mdev->dev, priv->port,
				priv->mac, &priv->base_qpn, 0);
	if (err) {
		en_err(priv, "Failed setting port mac\n");
		goto cq_err;
	}
	mdev->mac_removed[priv->port] = 0;

612
613
	err = mlx4_en_config_rss_steer(priv);
	if (err) {
614
		en_err(priv, "Failed configuring rss steering\n");
615
		goto mac_err;
616
617
618
619
620
621
	}

	/* Configure tx cq's and rings */
	for (i = 0; i < priv->tx_ring_num; i++) {
		/* Configure cq */
		cq = &priv->tx_cq[i];
622
		err = mlx4_en_activate_cq(priv, cq, i);
623
		if (err) {
624
			en_err(priv, "Failed allocating Tx CQ\n");
625
626
627
628
			goto tx_err;
		}
		err = mlx4_en_set_cq_moder(priv, cq);
		if (err) {
629
			en_err(priv, "Failed setting cq moderation parameters");
630
631
632
			mlx4_en_deactivate_cq(priv, cq);
			goto tx_err;
		}
633
		en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
634
635
636
637
		cq->buf->wqe_index = cpu_to_be16(0xffff);

		/* Configure ring */
		tx_ring = &priv->tx_ring[i];
638
		err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn);
639
		if (err) {
640
			en_err(priv, "Failed allocating Tx ring\n");
641
642
643
644
645
646
647
648
649
650
651
652
			mlx4_en_deactivate_cq(priv, cq);
			goto tx_err;
		}
		/* Set initial ownership of all Tx TXBBs to SW (1) */
		for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
			*((u32 *) (tx_ring->buf + j)) = 0xffffffff;
		++tx_index;
	}

	/* Configure port */
	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
				    priv->rx_skb_size + ETH_FCS_LEN,
653
654
655
656
				    priv->prof->tx_pause,
				    priv->prof->tx_ppp,
				    priv->prof->rx_pause,
				    priv->prof->rx_ppp);
657
	if (err) {
658
659
		en_err(priv, "Failed setting port general configurations "
			     "for port %d, with error %d\n", priv->port, err);
660
661
662
663
664
		goto tx_err;
	}
	/* Set default qp number */
	err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
	if (err) {
665
		en_err(priv, "Failed setting default qp numbers\n");
666
667
668
669
		goto tx_err;
	}

	/* Init port */
670
	en_dbg(HW, priv, "Initializing port\n");
671
672
	err = mlx4_INIT_PORT(mdev->dev, priv->port);
	if (err) {
673
		en_err(priv, "Failed Initializing port\n");
674
		goto tx_err;
675
676
	}

677
678
679
680
681
682
683
	/* Attach rx QP to bradcast address */
	memset(&mc_list[10], 0xff, ETH_ALEN);
	mc_list[5] = priv->port;
	if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
				  0, MLX4_PROT_ETH))
		mlx4_warn(mdev, "Failed Attaching Broadcast\n");

Herbert Xu's avatar
Herbert Xu committed
684
685
686
	/* Must redo promiscuous mode setup. */
	priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);

687
688
689
690
	/* Schedule multicast task to populate multicast list */
	queue_work(mdev->workqueue, &priv->mcast_task);

	priv->port_up = true;
691
	netif_tx_start_all_queues(dev);
692
693
694
695
696
697
698
699
700
	return 0;

tx_err:
	while (tx_index--) {
		mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
		mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
	}

	mlx4_en_release_rss_steer(priv);
701
702
mac_err:
	mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
703
704
705
cq_err:
	while (rx_index--)
		mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
706
707
	for (i = 0; i < priv->rx_ring_num; i++)
		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
708
709
710
711
712

	return err; /* need to close devices */
}


713
void mlx4_en_stop_port(struct net_device *dev)
714
715
716
717
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_en_dev *mdev = priv->mdev;
	int i;
718
	u8 mc_list[16] = {0};
719
720

	if (!priv->port_up) {
721
		en_dbg(DRV, priv, "stop port called while port already down\n");
722
723
724
725
726
		return;
	}

	/* Synchronize with tx routine */
	netif_tx_lock_bh(dev);
727
	netif_tx_stop_all_queues(dev);
728
729
	netif_tx_unlock_bh(dev);

730
	/* Set port as not active */
731
	priv->port_up = false;
732

733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
	/* Detach All multicasts */
	memset(&mc_list[10], 0xff, ETH_ALEN);
	mc_list[5] = priv->port;
	mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
			      MLX4_PROT_ETH);
	for (i = 0; i < priv->mc_addrs_cnt; i++) {
		memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
		mc_list[5] = priv->port;
		mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
				      mc_list, MLX4_PROT_ETH);
	}
	mlx4_en_clear_list(dev);
	/* Flush multicast filter */
	mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);

748
	/* Unregister Mac address for the port */
749
	mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
750
	mdev->mac_removed[priv->port] = 1;
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771

	/* Free TX Rings */
	for (i = 0; i < priv->tx_ring_num; i++) {
		mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
		mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
	}
	msleep(10);

	for (i = 0; i < priv->tx_ring_num; i++)
		mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);

	/* Free RSS qps */
	mlx4_en_release_rss_steer(priv);

	/* Free RX Rings */
	for (i = 0; i < priv->rx_ring_num; i++) {
		mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
		while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
			msleep(1);
		mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
	}
772
773
774

	/* close port*/
	mlx4_CLOSE_PORT(mdev->dev, priv->port);
775
776
777
778
779
780
781
782
783
}

static void mlx4_en_restart(struct work_struct *work)
{
	struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
						 watchdog_task);
	struct mlx4_en_dev *mdev = priv->mdev;
	struct net_device *dev = priv->dev;

784
	en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
785
786
787
788
789

	mutex_lock(&mdev->state_lock);
	if (priv->port_up) {
		mlx4_en_stop_port(dev);
		if (mlx4_en_start_port(dev))
790
			en_err(priv, "Failed restarting port %d\n", priv->port);
791
792
	}
	mutex_unlock(&mdev->state_lock);
793
794
795
796
797
798
799
800
801
802
803
804
805
}


static int mlx4_en_open(struct net_device *dev)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_en_dev *mdev = priv->mdev;
	int i;
	int err = 0;

	mutex_lock(&mdev->state_lock);

	if (!mdev->device_up) {
806
		en_err(priv, "Cannot open - device down/disabled\n");
807
808
809
810
811
812
		err = -EBUSY;
		goto out;
	}

	/* Reset HW statistics and performance counters */
	if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
813
		en_dbg(HW, priv, "Failed dumping statistics\n");
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828

	memset(&priv->stats, 0, sizeof(priv->stats));
	memset(&priv->pstats, 0, sizeof(priv->pstats));

	for (i = 0; i < priv->tx_ring_num; i++) {
		priv->tx_ring[i].bytes = 0;
		priv->tx_ring[i].packets = 0;
	}
	for (i = 0; i < priv->rx_ring_num; i++) {
		priv->rx_ring[i].bytes = 0;
		priv->rx_ring[i].packets = 0;
	}

	err = mlx4_en_start_port(dev);
	if (err)
829
		en_err(priv, "Failed starting port:%d\n", priv->port);
830
831
832
833
834
835
836
837
838
839
840
841

out:
	mutex_unlock(&mdev->state_lock);
	return err;
}


static int mlx4_en_close(struct net_device *dev)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_en_dev *mdev = priv->mdev;

842
	en_dbg(IFDOWN, priv, "Close port called\n");
843
844
845
846
847
848
849
850
851
852

	mutex_lock(&mdev->state_lock);

	mlx4_en_stop_port(dev);
	netif_carrier_off(dev);

	mutex_unlock(&mdev->state_lock);
	return 0;
}

853
void mlx4_en_free_resources(struct mlx4_en_priv *priv)
854
855
856
857
858
859
860
{
	int i;

	for (i = 0; i < priv->tx_ring_num; i++) {
		if (priv->tx_ring[i].tx_info)
			mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
		if (priv->tx_cq[i].buf)
861
			mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
862
863
864
865
866
867
	}

	for (i = 0; i < priv->rx_ring_num; i++) {
		if (priv->rx_ring[i].rx_info)
			mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
		if (priv->rx_cq[i].buf)
868
			mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
869
870
871
	}
}

872
int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
873
874
875
{
	struct mlx4_en_port_profile *prof = priv->prof;
	int i;
876
877
878
879
880
881
882
	int base_tx_qpn, err;

	err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
	if (err) {
		en_err(priv, "failed reserving range for TX rings\n");
		return err;
	}
883
884
885
886
887
888
889

	/* Create tx Rings */
	for (i = 0; i < priv->tx_ring_num; i++) {
		if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
				      prof->tx_ring_size, i, TX))
			goto err;

890
		if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
					   prof->tx_ring_size, TXBB_SIZE))
			goto err;
	}

	/* Create rx Rings */
	for (i = 0; i < priv->rx_ring_num; i++) {
		if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
				      prof->rx_ring_size, i, RX))
			goto err;

		if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
					   prof->rx_ring_size, priv->stride))
			goto err;
	}

	return 0;

err:
909
	en_err(priv, "Failed to allocate NIC resources\n");
910
	mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
911
912
913
914
915
916
917
918
919
	return -ENOMEM;
}


void mlx4_en_destroy_netdev(struct net_device *dev)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_en_dev *mdev = priv->mdev;

920
	en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937

	/* Unregister device - this will close the port if it was up */
	if (priv->registered)
		unregister_netdev(dev);

	if (priv->allocated)
		mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);

	cancel_delayed_work(&priv->stats_task);
	/* flush any pending task for this netdev */
	flush_workqueue(mdev->workqueue);

	/* Detach the netdev so tasks would not attempt to access it */
	mutex_lock(&mdev->state_lock);
	mdev->pndev[priv->port] = NULL;
	mutex_unlock(&mdev->state_lock);

938
	mlx4_en_free_resources(priv);
939
940
941
942
943
944
945
946
947
	free_netdev(dev);
}

static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
{
	struct mlx4_en_priv *priv = netdev_priv(dev);
	struct mlx4_en_dev *mdev = priv->mdev;
	int err = 0;

948
	en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
949
950
951
		 dev->mtu, new_mtu);

	if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
952
		en_err(priv, "Bad MTU size:%d.\n", new_mtu);
953
954
955
956
957
958
959
960
961
		return -EPERM;
	}
	dev->mtu = new_mtu;

	if (netif_running(dev)) {
		mutex_lock(&mdev->state_lock);
		if (!mdev->device_up) {
			/* NIC is probably restarting - let watchdog task reset
			 * the port */
962
			en_dbg(DRV, priv, "Change MTU called with card down!?\n");
963
964
965
966
		} else {
			mlx4_en_stop_port(dev);
			err = mlx4_en_start_port(dev);
			if (err) {
967
				en_err(priv, "Failed restarting port:%d\n",
968
969
970
971
972
973
974
975
976
					 priv->port);
				queue_work(mdev->workqueue, &priv->watchdog_task);
			}
		}
		mutex_unlock(&mdev->state_lock);
	}
	return 0;
}

977
978
979
980
static const struct net_device_ops mlx4_netdev_ops = {
	.ndo_open		= mlx4_en_open,
	.ndo_stop		= mlx4_en_close,
	.ndo_start_xmit		= mlx4_en_xmit,
Yevgeny Petrilin's avatar
Yevgeny Petrilin committed
981
	.ndo_select_queue	= mlx4_en_select_queue,
982
	.ndo_get_stats		= mlx4_en_get_stats,
983
	.ndo_set_rx_mode	= mlx4_en_set_multicast,
984
	.ndo_set_mac_address	= mlx4_en_set_mac,
985
	.ndo_validate_addr	= eth_validate_addr,
986
987
988
989
990
991
992
993
994
	.ndo_change_mtu		= mlx4_en_change_mtu,
	.ndo_tx_timeout		= mlx4_en_tx_timeout,
	.ndo_vlan_rx_add_vid	= mlx4_en_vlan_rx_add_vid,
	.ndo_vlan_rx_kill_vid	= mlx4_en_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= mlx4_en_netpoll,
#endif
};

995
996
997
998
999
1000
1001
1002
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
			struct mlx4_en_port_profile *prof)
{
	struct net_device *dev;
	struct mlx4_en_priv *priv;
	int i;
	int err;

1003
1004
	dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
	    prof->tx_ring_num, prof->rx_ring_num);
1005
1006
1007
1008
1009
1010
	if (dev == NULL) {
		mlx4_err(mdev, "Net device allocation failed\n");
		return -ENOMEM;
	}

	SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
1011
	dev->dev_id =  port - 1;
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039

	/*
	 * Initialize driver private data
	 */

	priv = netdev_priv(dev);
	memset(priv, 0, sizeof(struct mlx4_en_priv));
	priv->dev = dev;
	priv->mdev = mdev;
	priv->prof = prof;
	priv->port = port;
	priv->port_up = false;
	priv->flags = prof->flags;
	priv->tx_ring_num = prof->tx_ring_num;
	priv->rx_ring_num = prof->rx_ring_num;
	priv->mac_index = -1;
	priv->msg_enable = MLX4_EN_MSG_LEVEL;
	spin_lock_init(&priv->stats_lock);
	INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
	INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
	INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
	INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
	INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);

	/* Query for default mac and max mtu */
	priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
	priv->mac = mdev->dev->caps.def_mac[priv->port];
	if (ILLEGAL_MAC(priv->mac)) {
1040
		en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
			 priv->port, priv->mac);
		err = -EINVAL;
		goto out;
	}

	priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
					  DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
	err = mlx4_en_alloc_resources(priv);
	if (err)
		goto out;

	/* Allocate page for receive rings */
	err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
				MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
	if (err) {
1056
		en_err(priv, "Failed to allocate page for rx qps\n");
1057
1058
1059
1060
1061
1062
1063
		goto out;
	}
	priv->allocated = 1;

	/*
	 * Initialize netdev entry points
	 */
1064
	dev->netdev_ops = &mlx4_netdev_ops;
1065
	dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
1066
1067
	netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
	netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
1068

1069
1070
1071
1072
	SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);

	/* Set defualt MAC */
	dev->addr_len = ETH_ALEN;
1073
1074
1075
1076
	for (i = 0; i < ETH_ALEN; i++) {
		dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
		dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
	}
1077
1078
1079
1080

	/*
	 * Set driver features
	 */
1081
1082
1083
1084
1085
1086
	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
	if (mdev->LSO_support)
		dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;

	dev->vlan_features = dev->hw_features;

1087
	dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
1088
1089
1090
	dev->features = dev->hw_features | NETIF_F_HIGHDMA |
			NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
			NETIF_F_HW_VLAN_FILTER;
1091
1092
1093
1094
1095
1096

	mdev->pndev[port] = dev;

	netif_carrier_off(dev);
	err = register_netdev(dev);
	if (err) {
1097
		en_err(priv, "Netdev registration failed for port %d\n", port);
1098
1099
		goto out;
	}
1100
	priv->registered = 1;
1101
1102
1103
1104

	en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
	en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);

1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
	/* Configure port */
	err = mlx4_SET_PORT_general(mdev->dev, priv->port,
				    MLX4_EN_MIN_MTU,
				    0, 0, 0, 0);
	if (err) {
		en_err(priv, "Failed setting port general configurations "
		       "for port %d, with error %d\n", priv->port, err);
		goto out;
	}

	/* Init port */
	en_warn(priv, "Initializing port\n");
	err = mlx4_INIT_PORT(mdev->dev, priv->port);
	if (err) {
		en_err(priv, "Failed Initializing port\n");
		goto out;
	}
1122
	mlx4_en_set_default_moderation(priv);
1123
1124
1125
1126
1127
1128
1129
1130
	queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
	return 0;

out:
	mlx4_en_destroy_netdev(dev);
	return err;
}