ntb_transport.c 44.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
/*
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 *   redistributing this file, you may do so under either license.
 *
 *   GPL LICENSE SUMMARY
 *
 *   Copyright(c) 2012 Intel Corporation. All rights reserved.
 *
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of version 2 of the GNU General Public License as
 *   published by the Free Software Foundation.
 *
 *   BSD LICENSE
 *
 *   Copyright(c) 2012 Intel Corporation. All rights reserved.
 *
 *   Redistribution and use in source and binary forms, with or without
 *   modification, are permitted provided that the following conditions
 *   are met:
 *
 *     * Redistributions of source code must retain the above copyright
 *       notice, this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copy
 *       notice, this list of conditions and the following disclaimer in
 *       the documentation and/or other materials provided with the
 *       distribution.
 *     * Neither the name of Intel Corporation nor the names of its
 *       contributors may be used to endorse or promote products derived
 *       from this software without specific prior written permission.
 *
 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 * Intel PCIe NTB Linux driver
 *
 * Contact Information:
 * Jon Mason <jon.mason@intel.com>
 */
#include <linux/debugfs.h>
#include <linux/delay.h>
50
#include <linux/dmaengine.h>
51
52
53
54
55
56
57
58
59
60
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "ntb_hw.h"

Jon Mason's avatar
Jon Mason committed
61
#define NTB_TRANSPORT_VERSION	3
62

63
static unsigned int transport_mtu = 0x401E;
64
65
66
module_param(transport_mtu, uint, 0644);
MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");

Jon Mason's avatar
Jon Mason committed
67
static unsigned char max_num_clients;
68
69
70
module_param(max_num_clients, byte, 0644);
MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");

71
72
73
74
static unsigned int copy_bytes = 1024;
module_param(copy_bytes, uint, 0644);
MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");

75
76
77
78
79
80
81
82
struct ntb_queue_entry {
	/* ntb_queue list reference */
	struct list_head entry;
	/* pointers to data to be transfered */
	void *cb_data;
	void *buf;
	unsigned int len;
	unsigned int flags;
83
84
85
86
87
88
89

	struct ntb_transport_qp *qp;
	union {
		struct ntb_payload_header __iomem *tx_hdr;
		struct ntb_payload_header *rx_hdr;
	};
	unsigned int index;
90
91
};

Jon Mason's avatar
Jon Mason committed
92
93
94
95
struct ntb_rx_info {
	unsigned int entry;
};

96
97
98
99
struct ntb_transport_qp {
	struct ntb_transport *transport;
	struct ntb_device *ndev;
	void *cb_data;
100
	struct dma_chan *dma_chan;
101
102
103
104
105

	bool client_ready;
	bool qp_link;
	u8 qp_num;	/* Only 64 QP's are allowed.  0-63 */

Jon Mason's avatar
Jon Mason committed
106
	struct ntb_rx_info __iomem *rx_info;
Jon Mason's avatar
Jon Mason committed
107
108
	struct ntb_rx_info *remote_rx_info;

Jon Mason's avatar
Jon Mason committed
109
110
	void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
			   void *data, int len);
111
112
	struct list_head tx_free_q;
	spinlock_t ntb_tx_free_q_lock;
Jon Mason's avatar
Jon Mason committed
113
	void __iomem *tx_mw;
114
	dma_addr_t tx_mw_phys;
Jon Mason's avatar
Jon Mason committed
115
116
	unsigned int tx_index;
	unsigned int tx_max_entry;
117
	unsigned int tx_max_frame;
118

Jon Mason's avatar
Jon Mason committed
119
120
	void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
			   void *data, int len);
121
122
123
124
	struct list_head rx_pend_q;
	struct list_head rx_free_q;
	spinlock_t ntb_rx_pend_q_lock;
	spinlock_t ntb_rx_free_q_lock;
Jon Mason's avatar
Jon Mason committed
125
126
127
	void *rx_buff;
	unsigned int rx_index;
	unsigned int rx_max_entry;
128
	unsigned int rx_max_frame;
129
	dma_cookie_t last_cookie;
130

Jon Mason's avatar
Jon Mason committed
131
	void (*event_handler)(void *data, int status);
132
	struct delayed_work link_work;
133
	struct work_struct link_cleanup;
134
135
136
137
138
139
140
141
142
143
144

	struct dentry *debugfs_dir;
	struct dentry *debugfs_stats;

	/* Stats */
	u64 rx_bytes;
	u64 rx_pkts;
	u64 rx_ring_empty;
	u64 rx_err_no_buf;
	u64 rx_err_oflow;
	u64 rx_err_ver;
145
146
	u64 rx_memcpy;
	u64 rx_async;
147
148
149
	u64 tx_bytes;
	u64 tx_pkts;
	u64 tx_ring_full;
150
151
152
	u64 tx_err_no_buf;
	u64 tx_memcpy;
	u64 tx_async;
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
};

struct ntb_transport_mw {
	size_t size;
	void *virt_addr;
	dma_addr_t dma_addr;
};

struct ntb_transport_client_dev {
	struct list_head entry;
	struct device dev;
};

struct ntb_transport {
	struct list_head entry;
	struct list_head client_devs;

	struct ntb_device *ndev;
Jon Mason's avatar
Jon Mason committed
171
	struct ntb_transport_mw *mw;
172
173
174
175
176
	struct ntb_transport_qp *qps;
	unsigned int max_qps;
	unsigned long qp_bitmap;
	bool transport_link;
	struct delayed_work link_work;
177
	struct work_struct link_cleanup;
178
179
180
181
182
183
184
185
};

enum {
	DESC_DONE_FLAG = 1 << 0,
	LINK_DOWN_FLAG = 1 << 1,
};

struct ntb_payload_header {
Jon Mason's avatar
Jon Mason committed
186
	unsigned int ver;
187
188
189
190
191
192
193
	unsigned int len;
	unsigned int flags;
};

enum {
	VERSION = 0,
	QP_LINKS,
Jon Mason's avatar
Jon Mason committed
194
195
196
197
198
199
	NUM_QPS,
	NUM_MWS,
	MW0_SZ_HIGH,
	MW0_SZ_LOW,
	MW1_SZ_HIGH,
	MW1_SZ_LOW,
200
201
202
	MAX_SPAD,
};

Jon Mason's avatar
Jon Mason committed
203
#define QP_TO_MW(ndev, qp)	((qp) % ntb_max_mw(ndev))
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
#define NTB_QP_DEF_NUM_ENTRIES	100
#define NTB_LINK_DOWN_TIMEOUT	10

static int ntb_match_bus(struct device *dev, struct device_driver *drv)
{
	return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
}

static int ntb_client_probe(struct device *dev)
{
	const struct ntb_client *drv = container_of(dev->driver,
						    struct ntb_client, driver);
	struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
	int rc = -EINVAL;

	get_device(dev);
	if (drv && drv->probe)
		rc = drv->probe(pdev);
	if (rc)
		put_device(dev);

	return rc;
}

static int ntb_client_remove(struct device *dev)
{
	const struct ntb_client *drv = container_of(dev->driver,
						    struct ntb_client, driver);
	struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);

	if (drv && drv->remove)
		drv->remove(pdev);

	put_device(dev);

	return 0;
}

Jon Mason's avatar
Jon Mason committed
242
static struct bus_type ntb_bus_type = {
243
244
245
246
247
248
249
250
	.name = "ntb_bus",
	.match = ntb_match_bus,
	.probe = ntb_client_probe,
	.remove = ntb_client_remove,
};

static LIST_HEAD(ntb_transport_list);

251
static int ntb_bus_init(struct ntb_transport *nt)
252
253
254
255
256
257
258
259
260
261
262
263
{
	if (list_empty(&ntb_transport_list)) {
		int rc = bus_register(&ntb_bus_type);
		if (rc)
			return rc;
	}

	list_add(&nt->entry, &ntb_transport_list);

	return 0;
}

264
static void ntb_bus_remove(struct ntb_transport *nt)
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
{
	struct ntb_transport_client_dev *client_dev, *cd;

	list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
		dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
			dev_name(&client_dev->dev));
		list_del(&client_dev->entry);
		device_unregister(&client_dev->dev);
	}

	list_del(&nt->entry);

	if (list_empty(&ntb_transport_list))
		bus_unregister(&ntb_bus_type);
}

static void ntb_client_release(struct device *dev)
{
	struct ntb_transport_client_dev *client_dev;
	client_dev = container_of(dev, struct ntb_transport_client_dev, dev);

	kfree(client_dev);
}

/**
 * ntb_unregister_client_dev - Unregister NTB client device
 * @device_name: Name of NTB client device
 *
 * Unregister an NTB client device with the NTB transport layer
 */
void ntb_unregister_client_dev(char *device_name)
{
	struct ntb_transport_client_dev *client, *cd;
	struct ntb_transport *nt;

	list_for_each_entry(nt, &ntb_transport_list, entry)
		list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
			if (!strncmp(dev_name(&client->dev), device_name,
				     strlen(device_name))) {
				list_del(&client->entry);
				device_unregister(&client->dev);
			}
}
EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);

/**
 * ntb_register_client_dev - Register NTB client device
 * @device_name: Name of NTB client device
 *
 * Register an NTB client device with the NTB transport layer
 */
int ntb_register_client_dev(char *device_name)
{
	struct ntb_transport_client_dev *client_dev;
	struct ntb_transport *nt;
Jon Mason's avatar
Jon Mason committed
320
	int rc, i = 0;
321

322
323
324
	if (list_empty(&ntb_transport_list))
		return -ENODEV;

325
326
327
328
329
330
331
332
333
334
335
336
337
	list_for_each_entry(nt, &ntb_transport_list, entry) {
		struct device *dev;

		client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
				     GFP_KERNEL);
		if (!client_dev) {
			rc = -ENOMEM;
			goto err;
		}

		dev = &client_dev->dev;

		/* setup and register client devices */
Jon Mason's avatar
Jon Mason committed
338
		dev_set_name(dev, "%s%d", device_name, i);
339
340
341
342
343
344
345
346
347
348
349
		dev->bus = &ntb_bus_type;
		dev->release = ntb_client_release;
		dev->parent = &ntb_query_pdev(nt->ndev)->dev;

		rc = device_register(dev);
		if (rc) {
			kfree(client_dev);
			goto err;
		}

		list_add_tail(&client_dev->entry, &nt->client_devs);
Jon Mason's avatar
Jon Mason committed
350
		i++;
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
	}

	return 0;

err:
	ntb_unregister_client_dev(device_name);

	return rc;
}
EXPORT_SYMBOL_GPL(ntb_register_client_dev);

/**
 * ntb_register_client - Register NTB client driver
 * @drv: NTB client driver to be registered
 *
 * Register an NTB client driver with the NTB transport layer
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
int ntb_register_client(struct ntb_client *drv)
{
	drv->driver.bus = &ntb_bus_type;

374
375
376
	if (list_empty(&ntb_transport_list))
		return -ENODEV;

377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
	return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(ntb_register_client);

/**
 * ntb_unregister_client - Unregister NTB client driver
 * @drv: NTB client driver to be unregistered
 *
 * Unregister an NTB client driver with the NTB transport layer
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
void ntb_unregister_client(struct ntb_client *drv)
{
	driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(ntb_unregister_client);

static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
			    loff_t *offp)
{
	struct ntb_transport_qp *qp;
399
	char *buf;
400
401
	ssize_t ret, out_offset, out_count;

402
	out_count = 1000;
403
404
405
406

	buf = kmalloc(out_count, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;
407
408
409
410
411
412
413
414
415

	qp = filp->private_data;
	out_offset = 0;
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "NTB QP stats\n");
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_bytes - \t%llu\n", qp->rx_bytes);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_pkts - \t%llu\n", qp->rx_pkts);
416
417
418
419
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_memcpy - \t%llu\n", qp->rx_memcpy);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_async - \t%llu\n", qp->rx_async);
420
421
422
423
424
425
426
427
428
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_ring_empty - %llu\n", qp->rx_ring_empty);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_ver - \t%llu\n", qp->rx_err_ver);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
429
			       "rx_buff - \t%p\n", qp->rx_buff);
430
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
431
			       "rx_index - \t%u\n", qp->rx_index);
432
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
433
			       "rx_max_entry - \t%u\n", qp->rx_max_entry);
434
435
436
437
438

	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_bytes - \t%llu\n", qp->tx_bytes);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_pkts - \t%llu\n", qp->tx_pkts);
439
440
441
442
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_memcpy - \t%llu\n", qp->tx_memcpy);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_async - \t%llu\n", qp->tx_async);
443
444
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_ring_full - \t%llu\n", qp->tx_ring_full);
445
446
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
447
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
448
			       "tx_mw - \t%p\n", qp->tx_mw);
449
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
450
			       "tx_index - \t%u\n", qp->tx_index);
451
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
452
			       "tx_max_entry - \t%u\n", qp->tx_max_entry);
453
454

	out_offset += snprintf(buf + out_offset, out_count - out_offset,
455
			       "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
456
			       "Up" : "Down");
457
458
	if (out_offset > out_count)
		out_offset = out_count;
459
460

	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
461
	kfree(buf);
462
463
464
465
466
	return ret;
}

static const struct file_operations ntb_qp_debugfs_stats = {
	.owner = THIS_MODULE,
Jon Mason's avatar
Jon Mason committed
467
	.open = simple_open,
468
469
470
471
472
473
474
475
476
477
478
479
480
481
	.read = debugfs_read,
};

static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
			 struct list_head *list)
{
	unsigned long flags;

	spin_lock_irqsave(lock, flags);
	list_add_tail(entry, list);
	spin_unlock_irqrestore(lock, flags);
}

static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
Jon Mason's avatar
Jon Mason committed
482
					   struct list_head *list)
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
{
	struct ntb_queue_entry *entry;
	unsigned long flags;

	spin_lock_irqsave(lock, flags);
	if (list_empty(list)) {
		entry = NULL;
		goto out;
	}
	entry = list_first_entry(list, struct ntb_queue_entry, entry);
	list_del(&entry->entry);
out:
	spin_unlock_irqrestore(lock, flags);

	return entry;
}

static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
				      unsigned int qp_num)
{
	struct ntb_transport_qp *qp = &nt->qps[qp_num];
504
	unsigned int rx_size, num_qps_mw;
Jon Mason's avatar
Jon Mason committed
505
	u8 mw_num, mw_max;
Jon Mason's avatar
Jon Mason committed
506
	unsigned int i;
507

Jon Mason's avatar
Jon Mason committed
508
509
510
	mw_max = ntb_max_mw(nt->ndev);
	mw_num = QP_TO_MW(nt->ndev, qp_num);

Jon Mason's avatar
Jon Mason committed
511
	WARN_ON(nt->mw[mw_num].virt_addr == NULL);
512

Jon Mason's avatar
Jon Mason committed
513
514
	if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
		num_qps_mw = nt->max_qps / mw_max + 1;
515
	else
Jon Mason's avatar
Jon Mason committed
516
		num_qps_mw = nt->max_qps / mw_max;
517

Jon Mason's avatar
Jon Mason committed
518
	rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
519
	qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
Jon Mason's avatar
Jon Mason committed
520
521
	rx_size -= sizeof(struct ntb_rx_info);

522
523
	qp->remote_rx_info = qp->rx_buff + rx_size;

524
525
	/* Due to housekeeping, there must be atleast 2 buffs */
	qp->rx_max_frame = min(transport_mtu, rx_size / 2);
Jon Mason's avatar
Jon Mason committed
526
527
528
	qp->rx_max_entry = rx_size / qp->rx_max_frame;
	qp->rx_index = 0;

529
	qp->remote_rx_info->entry = qp->rx_max_entry - 1;
530

531
	/* setup the hdr offsets with 0's */
Jon Mason's avatar
Jon Mason committed
532
533
534
	for (i = 0; i < qp->rx_max_entry; i++) {
		void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
			       sizeof(struct ntb_payload_header);
535
		memset(offset, 0, sizeof(struct ntb_payload_header));
Jon Mason's avatar
Jon Mason committed
536
	}
537
538
539

	qp->rx_pkts = 0;
	qp->tx_pkts = 0;
Jon Mason's avatar
Jon Mason committed
540
	qp->tx_index = 0;
541
542
}

Jon Mason's avatar
Jon Mason committed
543
544
545
546
547
548
549
550
551
552
553
554
static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
{
	struct ntb_transport_mw *mw = &nt->mw[num_mw];
	struct pci_dev *pdev = ntb_query_pdev(nt->ndev);

	if (!mw->virt_addr)
		return;

	dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
	mw->virt_addr = NULL;
}

555
556
557
558
559
static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
{
	struct ntb_transport_mw *mw = &nt->mw[num_mw];
	struct pci_dev *pdev = ntb_query_pdev(nt->ndev);

Jon Mason's avatar
Jon Mason committed
560
561
562
563
564
565
566
	/* No need to re-setup */
	if (mw->size == ALIGN(size, 4096))
		return 0;

	if (mw->size != 0)
		ntb_free_mw(nt, num_mw);

567
568
569
570
571
572
	/* Alloc memory for receiving data.  Must be 4k aligned */
	mw->size = ALIGN(size, 4096);

	mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
					   GFP_KERNEL);
	if (!mw->virt_addr) {
Jon Mason's avatar
Jon Mason committed
573
		mw->size = 0;
574
575
576
577
578
579
580
581
582
583
584
		dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
		       (int) mw->size);
		return -ENOMEM;
	}

	/* Notify HW the memory location of the receive buffer */
	ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);

	return 0;
}

585
static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
586
587
588
589
590
591
592
593
594
595
596
597
598
599
{
	struct ntb_transport *nt = qp->transport;
	struct pci_dev *pdev = ntb_query_pdev(nt->ndev);

	if (qp->qp_link == NTB_LINK_DOWN) {
		cancel_delayed_work_sync(&qp->link_work);
		return;
	}

	if (qp->event_handler)
		qp->event_handler(qp->cb_data, NTB_LINK_DOWN);

	dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
	qp->qp_link = NTB_LINK_DOWN;
600
601
602
603
604
605
606
607
608
609
}

static void ntb_qp_link_cleanup_work(struct work_struct *work)
{
	struct ntb_transport_qp *qp = container_of(work,
						   struct ntb_transport_qp,
						   link_cleanup);
	struct ntb_transport *nt = qp->transport;

	ntb_qp_link_cleanup(qp);
610
611
612
613
614
615

	if (nt->transport_link == NTB_LINK_UP)
		schedule_delayed_work(&qp->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

616
617
618
619
620
static void ntb_qp_link_down(struct ntb_transport_qp *qp)
{
	schedule_work(&qp->link_cleanup);
}

621
static void ntb_transport_link_cleanup(struct ntb_transport *nt)
622
623
624
{
	int i;

625
626
627
628
629
	/* Pass along the info to any clients */
	for (i = 0; i < nt->max_qps; i++)
		if (!test_bit(i, &nt->qp_bitmap))
			ntb_qp_link_cleanup(&nt->qps[i]);

630
631
632
633
634
635
636
637
638
639
640
641
642
	if (nt->transport_link == NTB_LINK_DOWN)
		cancel_delayed_work_sync(&nt->link_work);
	else
		nt->transport_link = NTB_LINK_DOWN;

	/* The scratchpad registers keep the values if the remote side
	 * goes down, blast them now to give them a sane value the next
	 * time they are accessed
	 */
	for (i = 0; i < MAX_SPAD; i++)
		ntb_write_local_spad(nt->ndev, i, 0);
}

643
644
645
646
647
648
649
650
static void ntb_transport_link_cleanup_work(struct work_struct *work)
{
	struct ntb_transport *nt = container_of(work, struct ntb_transport,
						link_cleanup);

	ntb_transport_link_cleanup(nt);
}

651
652
653
654
655
656
657
658
659
static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
{
	struct ntb_transport *nt = data;

	switch (event) {
	case NTB_EVENT_HW_LINK_UP:
		schedule_delayed_work(&nt->link_work, 0);
		break;
	case NTB_EVENT_HW_LINK_DOWN:
660
		schedule_work(&nt->link_cleanup);
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
		break;
	default:
		BUG();
	}
}

static void ntb_transport_link_work(struct work_struct *work)
{
	struct ntb_transport *nt = container_of(work, struct ntb_transport,
						link_work.work);
	struct ntb_device *ndev = nt->ndev;
	struct pci_dev *pdev = ntb_query_pdev(ndev);
	u32 val;
	int rc, i;

Jon Mason's avatar
Jon Mason committed
676
	/* send the local info, in the opposite order of the way we read it */
Jon Mason's avatar
Jon Mason committed
677
	for (i = 0; i < ntb_max_mw(ndev); i++) {
Jon Mason's avatar
Jon Mason committed
678
679
680
681
682
683
684
685
		rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
					   ntb_get_mw_size(ndev, i) >> 32);
		if (rc) {
			dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
				(u32)(ntb_get_mw_size(ndev, i) >> 32),
				MW0_SZ_HIGH + (i * 2));
			goto out;
		}
686

Jon Mason's avatar
Jon Mason committed
687
688
689
690
691
692
693
694
		rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
					   (u32) ntb_get_mw_size(ndev, i));
		if (rc) {
			dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
				(u32) ntb_get_mw_size(ndev, i),
				MW0_SZ_LOW + (i * 2));
			goto out;
		}
695
696
	}

Jon Mason's avatar
Jon Mason committed
697
	rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
698
699
	if (rc) {
		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
Jon Mason's avatar
Jon Mason committed
700
			ntb_max_mw(ndev), NUM_MWS);
701
702
703
704
705
706
707
708
709
710
		goto out;
	}

	rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
	if (rc) {
		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
			nt->max_qps, NUM_QPS);
		goto out;
	}

Jon Mason's avatar
Jon Mason committed
711
	rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
712
713
	if (rc) {
		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
Jon Mason's avatar
Jon Mason committed
714
			NTB_TRANSPORT_VERSION, VERSION);
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
		goto out;
	}

	/* Query the remote side for its info */
	rc = ntb_read_remote_spad(ndev, VERSION, &val);
	if (rc) {
		dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
		goto out;
	}

	if (val != NTB_TRANSPORT_VERSION)
		goto out;
	dev_dbg(&pdev->dev, "Remote version = %d\n", val);

	rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
	if (rc) {
		dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
		goto out;
	}

	if (val != nt->max_qps)
		goto out;
	dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);

Jon Mason's avatar
Jon Mason committed
739
	rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
740
	if (rc) {
Jon Mason's avatar
Jon Mason committed
741
		dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
742
743
744
		goto out;
	}

Jon Mason's avatar
Jon Mason committed
745
	if (val != ntb_max_mw(ndev))
746
		goto out;
Jon Mason's avatar
Jon Mason committed
747
	dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
748

Jon Mason's avatar
Jon Mason committed
749
	for (i = 0; i < ntb_max_mw(ndev); i++) {
Jon Mason's avatar
Jon Mason committed
750
		u64 val64;
751

Jon Mason's avatar
Jon Mason committed
752
753
754
755
756
757
		rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
		if (rc) {
			dev_err(&pdev->dev, "Error reading remote spad %d\n",
				MW0_SZ_HIGH + (i * 2));
			goto out1;
		}
758

Jon Mason's avatar
Jon Mason committed
759
		val64 = (u64) val << 32;
760

Jon Mason's avatar
Jon Mason committed
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
		rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
		if (rc) {
			dev_err(&pdev->dev, "Error reading remote spad %d\n",
				MW0_SZ_LOW + (i * 2));
			goto out1;
		}

		val64 |= val;

		dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);

		rc = ntb_set_mw(nt, i, val64);
		if (rc)
			goto out1;
	}
776
777
778
779
780
781
782
783
784
785
786
787
788
789

	nt->transport_link = NTB_LINK_UP;

	for (i = 0; i < nt->max_qps; i++) {
		struct ntb_transport_qp *qp = &nt->qps[i];

		ntb_transport_setup_qp_mw(nt, i);

		if (qp->client_ready == NTB_LINK_UP)
			schedule_delayed_work(&qp->link_work, 0);
	}

	return;

Jon Mason's avatar
Jon Mason committed
790
out1:
Jon Mason's avatar
Jon Mason committed
791
	for (i = 0; i < ntb_max_mw(ndev); i++)
Jon Mason's avatar
Jon Mason committed
792
		ntb_free_mw(nt, i);
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
out:
	if (ntb_hw_link_status(ndev))
		schedule_delayed_work(&nt->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

static void ntb_qp_link_work(struct work_struct *work)
{
	struct ntb_transport_qp *qp = container_of(work,
						   struct ntb_transport_qp,
						   link_work.work);
	struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
	struct ntb_transport *nt = qp->transport;
	int rc, val;

	WARN_ON(nt->transport_link != NTB_LINK_UP);

	rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
	if (rc) {
		dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
		return;
	}

	rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
	if (rc)
		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
			val | 1 << qp->qp_num, QP_LINKS);

	/* query remote spad for qp ready bits */
	rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
	if (rc)
		dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);

	dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);

	/* See if the remote side is up */
	if (1 << qp->qp_num & val) {
		qp->qp_link = NTB_LINK_UP;

		dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
		if (qp->event_handler)
			qp->event_handler(qp->cb_data, NTB_LINK_UP);
	} else if (nt->transport_link == NTB_LINK_UP)
		schedule_delayed_work(&qp->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

840
static int ntb_transport_init_queue(struct ntb_transport *nt,
Jon Mason's avatar
Jon Mason committed
841
				    unsigned int qp_num)
842
843
{
	struct ntb_transport_qp *qp;
844
	unsigned int num_qps_mw, tx_size;
Jon Mason's avatar
Jon Mason committed
845
	u8 mw_num, mw_max;
846
	u64 qp_offset;
Jon Mason's avatar
Jon Mason committed
847
848
849

	mw_max = ntb_max_mw(nt->ndev);
	mw_num = QP_TO_MW(nt->ndev, qp_num);
850
851
852
853
854
855
856
857
858

	qp = &nt->qps[qp_num];
	qp->qp_num = qp_num;
	qp->transport = nt;
	qp->ndev = nt->ndev;
	qp->qp_link = NTB_LINK_DOWN;
	qp->client_ready = NTB_LINK_DOWN;
	qp->event_handler = NULL;

Jon Mason's avatar
Jon Mason committed
859
860
	if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
		num_qps_mw = nt->max_qps / mw_max + 1;
861
	else
Jon Mason's avatar
Jon Mason committed
862
		num_qps_mw = nt->max_qps / mw_max;
863

Jon Mason's avatar
Jon Mason committed
864
	tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
865
866
867
868
869
870
871
872
873
	qp_offset = qp_num / mw_max * tx_size;
	qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
	if (!qp->tx_mw)
		return -EINVAL;

	qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
	if (!qp->tx_mw_phys)
		return -EINVAL;

Jon Mason's avatar
Jon Mason committed
874
	tx_size -= sizeof(struct ntb_rx_info);
875
	qp->rx_info = qp->tx_mw + tx_size;
Jon Mason's avatar
Jon Mason committed
876

877
878
	/* Due to housekeeping, there must be atleast 2 buffs */
	qp->tx_max_frame = min(transport_mtu, tx_size / 2);
Jon Mason's avatar
Jon Mason committed
879
	qp->tx_max_entry = tx_size / qp->tx_max_frame;
880

881
	if (ntb_query_debugfs(nt->ndev)) {
882
883
884
885
		char debugfs_name[4];

		snprintf(debugfs_name, 4, "qp%d", qp_num);
		qp->debugfs_dir = debugfs_create_dir(debugfs_name,
886
						 ntb_query_debugfs(nt->ndev));
887
888
889
890
891
892
893

		qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
							qp->debugfs_dir, qp,
							&ntb_qp_debugfs_stats);
	}

	INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
894
	INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
895
896
897
898
899
900
901
902

	spin_lock_init(&qp->ntb_rx_pend_q_lock);
	spin_lock_init(&qp->ntb_rx_free_q_lock);
	spin_lock_init(&qp->ntb_tx_free_q_lock);

	INIT_LIST_HEAD(&qp->rx_pend_q);
	INIT_LIST_HEAD(&qp->rx_free_q);
	INIT_LIST_HEAD(&qp->tx_free_q);
903
904

	return 0;
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
}

int ntb_transport_init(struct pci_dev *pdev)
{
	struct ntb_transport *nt;
	int rc, i;

	nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
	if (!nt)
		return -ENOMEM;

	nt->ndev = ntb_register_transport(pdev, nt);
	if (!nt->ndev) {
		rc = -EIO;
		goto err;
	}

Jon Mason's avatar
Jon Mason committed
922
923
924
925
926
927
928
929
930
931
932
	nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
			 GFP_KERNEL);
	if (!nt->mw) {
		rc = -ENOMEM;
		goto err1;
	}

	if (max_num_clients)
		nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
	else
		nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
933
934
935
936
937

	nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
			  GFP_KERNEL);
	if (!nt->qps) {
		rc = -ENOMEM;
Jon Mason's avatar
Jon Mason committed
938
		goto err2;
939
940
941
942
	}

	nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;

943
944
945
946
947
	for (i = 0; i < nt->max_qps; i++) {
		rc = ntb_transport_init_queue(nt, i);
		if (rc)
			goto err3;
	}
948
949

	INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
950
	INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
951
952
953
954

	rc = ntb_register_event_callback(nt->ndev,
					 ntb_transport_event_callback);
	if (rc)
Jon Mason's avatar
Jon Mason committed
955
		goto err3;
956
957
958
959

	INIT_LIST_HEAD(&nt->client_devs);
	rc = ntb_bus_init(nt);
	if (rc)
Jon Mason's avatar
Jon Mason committed
960
		goto err4;
961
962
963
964
965
966

	if (ntb_hw_link_status(nt->ndev))
		schedule_delayed_work(&nt->link_work, 0);

	return 0;

Jon Mason's avatar
Jon Mason committed
967
err4:
968
	ntb_unregister_event_callback(nt->ndev);
Jon Mason's avatar
Jon Mason committed
969
err3:
970
	kfree(nt->qps);
Jon Mason's avatar
Jon Mason committed
971
972
err2:
	kfree(nt->mw);
973
974
975
976
977
978
979
980
981
982
err1:
	ntb_unregister_transport(nt->ndev);
err:
	kfree(nt);
	return rc;
}

void ntb_transport_free(void *transport)
{
	struct ntb_transport *nt = transport;
Jon Mason's avatar
Jon Mason committed
983
	struct ntb_device *ndev = nt->ndev;
984
985
	int i;

986
	ntb_transport_link_cleanup(nt);
987
988

	/* verify that all the qp's are freed */
989
	for (i = 0; i < nt->max_qps; i++) {
990
991
		if (!test_bit(i, &nt->qp_bitmap))
			ntb_transport_free_queue(&nt->qps[i]);
992
993
		debugfs_remove_recursive(nt->qps[i].debugfs_dir);
	}
994
995
996
997
998

	ntb_bus_remove(nt);

	cancel_delayed_work_sync(&nt->link_work);

Jon Mason's avatar
Jon Mason committed
999
	ntb_unregister_event_callback(ndev);
1000

Jon Mason's avatar
Jon Mason committed
1001
	for (i = 0; i < ntb_max_mw(ndev); i++)
Jon Mason's avatar
Jon Mason committed
1002
		ntb_free_mw(nt, i);
1003
1004

	kfree(nt->qps);
Jon Mason's avatar
Jon Mason committed
1005
1006
	kfree(nt->mw);
	ntb_unregister_transport(ndev);
1007
1008
1009
	kfree(nt);
}

1010
static void ntb_rx_copy_callback(void *data)
1011
{
1012
1013
	struct ntb_queue_entry *entry = data;
	struct ntb_transport_qp *qp = entry->qp;
1014
1015
	void *cb_data = entry->cb_data;
	unsigned int len = entry->len;
1016
1017
1018
1019
1020
	struct ntb_payload_header *hdr = entry->rx_hdr;

	/* Ensure that the data is fully copied out before clearing the flag */
	wmb();
	hdr->flags = 0;
1021

1022
	iowrite32(entry->index, &qp->rx_info->entry);
1023
1024

	ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1025
1026
1027

	if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
		qp->rx_handler(qp, qp->cb_data, cb_data, len);
1028
1029
}

1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
{
	void *buf = entry->buf;
	size_t len = entry->len;

	memcpy(buf, offset, len);

	ntb_rx_copy_callback(entry);
}

static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
			 size_t len)
{
	struct dma_async_tx_descriptor *txd;
	struct ntb_transport_qp *qp = entry->qp;
	struct dma_chan *chan = qp->dma_chan;
	struct dma_device *device;
	size_t pay_off, buff_off;
1048
	struct dmaengine_unmap_data *unmap;
1049
1050
1051
1052
1053
1054
1055
1056
	dma_cookie_t cookie;
	void *buf = entry->buf;

	entry->len = len;

	if (!chan)
		goto err;

Jon Mason's avatar
Jon Mason committed
1057
	if (len < copy_bytes)
1058
		goto err_wait;
1059
1060
1061
1062
1063
1064

	device = chan->device;
	pay_off = (size_t) offset & ~PAGE_MASK;
	buff_off = (size_t) buf & ~PAGE_MASK;

	if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1065
		goto err_wait;
1066

1067
1068
1069
	unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
	if (!unmap)
		goto err_wait;
1070

1071
1072
1073
1074
1075
1076
1077
	unmap->len = len;
	unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
				      pay_off, len, DMA_TO_DEVICE);
	if (dma_mapping_error(device->dev, unmap->addr[0]))
		goto err_get_unmap;

	unmap->to_cnt = 1;
1078

1079
1080
1081
1082
1083
1084
1085
1086
	unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
				      buff_off, len, DMA_FROM_DEVICE);
	if (dma_mapping_error(device->dev, unmap->addr[1]))
		goto err_get_unmap;

	unmap->from_cnt = 1;

	txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1087
1088
					     unmap->addr[0], len,
					     DMA_PREP_INTERRUPT);
1089
	if (!txd)
1090
		goto err_get_unmap;
1091
1092
1093

	txd->callback = ntb_rx_copy_callback;
	txd->callback_param = entry;
1094
	dma_set_unmap(txd, unmap);
1095
1096
1097

	cookie = dmaengine_submit(txd);
	if (dma_submit_error(cookie))
1098
1099
1100
		goto err_set_unmap;

	dmaengine_unmap_put(unmap);
1101
1102
1103
1104
1105
1106
1107

	qp->last_cookie = cookie;

	qp->rx_async++;

	return;

1108
1109
1110
1111
1112
err_set_unmap:
	dmaengine_unmap_put(unmap);
err_get_unmap:
	dmaengine_unmap_put(unmap);
err_wait:
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
	/* If the callbacks come out of order, the writing of the index to the
	 * last completed will be out of order.  This may result in the
	 * receive stalling forever.
	 */
	dma_sync_wait(chan, qp->last_cookie);
err:
	ntb_memcpy_rx(entry, offset);
	qp->rx_memcpy++;
}

1123
1124
1125
1126
1127
1128
static int ntb_process_rxc(struct ntb_transport_qp *qp)
{
	struct ntb_payload_header *hdr;
	struct ntb_queue_entry *entry;
	void *offset;

Jon Mason's avatar
Jon Mason committed
1129
1130
1131
	offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
	hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);

1132
1133
1134
	entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
	if (!entry) {
		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
Jon Mason's avatar
Jon Mason committed
1135
			"no buffer - HDR ver %u, len %d, flags %x\n",
1136
1137
1138
1139
1140
1141
1142
			hdr->ver, hdr->len, hdr->flags);
		qp->rx_err_no_buf++;
		return -ENOMEM;
	}

	if (!(hdr->flags & DESC_DONE_FLAG)) {
		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
Jon Mason's avatar
Jon Mason committed
1143
			     &qp->rx_pend_q);
1144
1145
1146
1147
		qp->rx_ring_empty++;
		return -EAGAIN;
	}

Jon Mason's avatar
Jon Mason committed
1148
	if (hdr->ver != (u32) qp->rx_pkts) {
1149
		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
Jon Mason's avatar
Jon Mason committed
1150
			"qp %d: version mismatch, expected %llu - got %u\n",
1151
1152
			qp->qp_num, qp->rx_pkts, hdr->ver);
		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
Jon Mason's avatar
Jon Mason committed
1153
			     &qp->rx_pend_q);
1154
1155
1156
1157
1158
1159
1160
		qp->rx_err_ver++;
		return -EIO;
	}

	if (hdr->flags & LINK_DOWN_FLAG) {
		ntb_qp_link_down(qp);

1161
		goto err;
1162
1163
1164
	}

	dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
Jon Mason's avatar
Jon Mason committed
1165
		"rx offset %u, ver %u - %d payload received, buf size %d\n",
Jon Mason's avatar
Jon Mason committed
1166
		qp->rx_index, hdr->ver, hdr->len, entry->len);
1167

1168
1169
	qp->rx_bytes += hdr->len;
	qp->rx_pkts++;
1170

1171
	if (hdr->len > entry->len) {
1172
1173
1174
1175
		qp->rx_err_oflow++;
		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
			"RX overflow! Wanted %d got %d\n",
			hdr->len, entry->len);
1176
1177

		goto err;
1178
1179
	}

1180
1181
1182
1183
	entry->index = qp->rx_index;
	entry->rx_hdr = hdr;

	ntb_async_rx(entry, offset, hdr->len);
1184
1185

out:
1186
1187
1188
1189
1190
1191
	qp->rx_index++;
	qp->rx_index %= qp->rx_max_entry;

	return 0;

err:
Jon Mason's avatar
Jon Mason committed
1192
	ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
Jon Mason's avatar
Jon Mason committed
1193
1194
1195
	/* Ensure that the data is fully copied out before clearing the flag */
	wmb();
	hdr->flags = 0;
Jon Mason's avatar
Jon Mason committed
1196
	iowrite32(qp->rx_index, &qp->rx_info->entry);
Jon Mason's avatar
Jon Mason committed
1197

1198
	goto out;
1199
1200
}

1201
static int ntb_transport_rxc_db(void *data, int db_num)
1202
{
1203
	struct ntb_transport_qp *qp = data;
Jon Mason's avatar
Jon Mason committed
1204
	int rc, i;
1205

1206
1207
1208
	dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
		__func__, db_num);

Jon Mason's avatar
Jon Mason committed
1209
1210
1211
1212
	/* Limit the number of packets processed in a single interrupt to
	 * provide fairness to others
	 */
	for (i = 0; i < qp->rx_max_entry; i++) {
1213
		rc = ntb_process_rxc(qp);
Jon Mason's avatar
Jon Mason committed
1214
1215
1216
		if (rc)
			break;
	}
1217
1218
1219

	if (qp->dma_chan)
		dma_async_issue_pending(qp->dma_chan);
1220

1221
	return i;
1222
1223
}

1224
static void ntb_tx_copy_callback(void *data)
1225
{
1226
1227
1228
	struct ntb_queue_entry *entry = data;
	struct ntb_transport_qp *qp = entry->qp;
	struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1229

1230
	/* Ensure that the data is fully copied out before setting the flags */
Jon Mason's avatar
Jon Mason committed
1231
	wmb();
Jon Mason's avatar
Jon Mason committed
1232
	iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1233

Jon Mason's avatar
Jon Mason committed
1234
	ntb_ring_doorbell(qp->ndev, qp->qp_num);
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250

	/* The entry length can only be zero if the packet is intended to be a
	 * "link down" or similar.  Since no payload is being sent in these
	 * cases, there is nothing to add to the completion queue.
	 */
	if (entry->len > 0) {
		qp->tx_bytes += entry->len;

		if (qp->tx_handler)
			qp->tx_handler(qp, qp->cb_data, entry->cb_data,
				       entry->len);
	}

	ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
}

1251
static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1252
{
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
	memcpy_toio(offset, entry->buf, entry->len);

	ntb_tx_copy_callback(entry);
}

static void ntb_async_tx(struct ntb_transport_qp *qp,
			 struct ntb_queue_entry *entry)
{
	struct ntb_payload_header __iomem *hdr;
	struct dma_async_tx_descriptor *txd;
	struct dma_chan *chan = qp->dma_chan;
	struct dma_device *device;
	size_t dest_off, buff_off;
1266
1267
	struct dmaengine_unmap_data *unmap;
	dma_addr_t dest;
1268
	dma_cookie_t cookie;
Jon Mason's avatar
Jon Mason committed
1269
	void __iomem *offset;
1270
1271
	size_t len = entry->len;
	void *buf = entry->buf;
1272

Jon Mason's avatar
Jon Mason committed
1273
	offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1274
1275
	hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
	entry->tx_hdr = hdr;
1276