ntb_transport.c 44.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
/*
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 *   redistributing this file, you may do so under either license.
 *
 *   GPL LICENSE SUMMARY
 *
 *   Copyright(c) 2012 Intel Corporation. All rights reserved.
 *
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of version 2 of the GNU General Public License as
 *   published by the Free Software Foundation.
 *
 *   BSD LICENSE
 *
 *   Copyright(c) 2012 Intel Corporation. All rights reserved.
 *
 *   Redistribution and use in source and binary forms, with or without
 *   modification, are permitted provided that the following conditions
 *   are met:
 *
 *     * Redistributions of source code must retain the above copyright
 *       notice, this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copy
 *       notice, this list of conditions and the following disclaimer in
 *       the documentation and/or other materials provided with the
 *       distribution.
 *     * Neither the name of Intel Corporation nor the names of its
 *       contributors may be used to endorse or promote products derived
 *       from this software without specific prior written permission.
 *
 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 * Intel PCIe NTB Linux driver
 *
 * Contact Information:
 * Jon Mason <jon.mason@intel.com>
 */
#include <linux/debugfs.h>
#include <linux/delay.h>
50
#include <linux/dmaengine.h>
51
52
53
54
55
56
57
58
59
60
61
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/ntb.h>
#include "ntb_hw.h"

Jon Mason's avatar
Jon Mason committed
62
#define NTB_TRANSPORT_VERSION	3
63

64
static unsigned int transport_mtu = 0x401E;
65
66
67
module_param(transport_mtu, uint, 0644);
MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");

Jon Mason's avatar
Jon Mason committed
68
static unsigned char max_num_clients;
69
70
71
module_param(max_num_clients, byte, 0644);
MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");

72
73
74
75
static unsigned int copy_bytes = 1024;
module_param(copy_bytes, uint, 0644);
MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");

76
77
78
79
80
81
82
83
struct ntb_queue_entry {
	/* ntb_queue list reference */
	struct list_head entry;
	/* pointers to data to be transfered */
	void *cb_data;
	void *buf;
	unsigned int len;
	unsigned int flags;
84
85
86
87
88
89
90

	struct ntb_transport_qp *qp;
	union {
		struct ntb_payload_header __iomem *tx_hdr;
		struct ntb_payload_header *rx_hdr;
	};
	unsigned int index;
91
92
};

Jon Mason's avatar
Jon Mason committed
93
94
95
96
struct ntb_rx_info {
	unsigned int entry;
};

97
98
99
100
struct ntb_transport_qp {
	struct ntb_transport *transport;
	struct ntb_device *ndev;
	void *cb_data;
101
	struct dma_chan *dma_chan;
102
103
104
105
106

	bool client_ready;
	bool qp_link;
	u8 qp_num;	/* Only 64 QP's are allowed.  0-63 */

Jon Mason's avatar
Jon Mason committed
107
	struct ntb_rx_info __iomem *rx_info;
Jon Mason's avatar
Jon Mason committed
108
109
	struct ntb_rx_info *remote_rx_info;

110
111
112
113
	void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
			    void *data, int len);
	struct list_head tx_free_q;
	spinlock_t ntb_tx_free_q_lock;
Jon Mason's avatar
Jon Mason committed
114
	void __iomem *tx_mw;
115
	dma_addr_t tx_mw_phys;
Jon Mason's avatar
Jon Mason committed
116
117
	unsigned int tx_index;
	unsigned int tx_max_entry;
118
	unsigned int tx_max_frame;
119
120
121
122
123
124
125
126

	void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
			    void *data, int len);
	struct tasklet_struct rx_work;
	struct list_head rx_pend_q;
	struct list_head rx_free_q;
	spinlock_t ntb_rx_pend_q_lock;
	spinlock_t ntb_rx_free_q_lock;
Jon Mason's avatar
Jon Mason committed
127
128
129
	void *rx_buff;
	unsigned int rx_index;
	unsigned int rx_max_entry;
130
	unsigned int rx_max_frame;
131
	dma_cookie_t last_cookie;
132
133
134

	void (*event_handler) (void *data, int status);
	struct delayed_work link_work;
135
	struct work_struct link_cleanup;
136
137
138
139
140
141
142
143
144
145
146

	struct dentry *debugfs_dir;
	struct dentry *debugfs_stats;

	/* Stats */
	u64 rx_bytes;
	u64 rx_pkts;
	u64 rx_ring_empty;
	u64 rx_err_no_buf;
	u64 rx_err_oflow;
	u64 rx_err_ver;
147
148
	u64 rx_memcpy;
	u64 rx_async;
149
150
151
	u64 tx_bytes;
	u64 tx_pkts;
	u64 tx_ring_full;
152
153
154
	u64 tx_err_no_buf;
	u64 tx_memcpy;
	u64 tx_async;
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
};

struct ntb_transport_mw {
	size_t size;
	void *virt_addr;
	dma_addr_t dma_addr;
};

struct ntb_transport_client_dev {
	struct list_head entry;
	struct device dev;
};

struct ntb_transport {
	struct list_head entry;
	struct list_head client_devs;

	struct ntb_device *ndev;
Jon Mason's avatar
Jon Mason committed
173
	struct ntb_transport_mw *mw;
174
175
176
177
178
	struct ntb_transport_qp *qps;
	unsigned int max_qps;
	unsigned long qp_bitmap;
	bool transport_link;
	struct delayed_work link_work;
179
	struct work_struct link_cleanup;
180
181
182
183
184
185
186
187
};

enum {
	DESC_DONE_FLAG = 1 << 0,
	LINK_DOWN_FLAG = 1 << 1,
};

struct ntb_payload_header {
Jon Mason's avatar
Jon Mason committed
188
	unsigned int ver;
189
190
191
192
193
194
195
	unsigned int len;
	unsigned int flags;
};

enum {
	VERSION = 0,
	QP_LINKS,
Jon Mason's avatar
Jon Mason committed
196
197
198
199
200
201
	NUM_QPS,
	NUM_MWS,
	MW0_SZ_HIGH,
	MW0_SZ_LOW,
	MW1_SZ_HIGH,
	MW1_SZ_LOW,
202
203
204
	MAX_SPAD,
};

Jon Mason's avatar
Jon Mason committed
205
#define QP_TO_MW(ndev, qp)	((qp) % ntb_max_mw(ndev))
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
#define NTB_QP_DEF_NUM_ENTRIES	100
#define NTB_LINK_DOWN_TIMEOUT	10

static int ntb_match_bus(struct device *dev, struct device_driver *drv)
{
	return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
}

static int ntb_client_probe(struct device *dev)
{
	const struct ntb_client *drv = container_of(dev->driver,
						    struct ntb_client, driver);
	struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
	int rc = -EINVAL;

	get_device(dev);
	if (drv && drv->probe)
		rc = drv->probe(pdev);
	if (rc)
		put_device(dev);

	return rc;
}

static int ntb_client_remove(struct device *dev)
{
	const struct ntb_client *drv = container_of(dev->driver,
						    struct ntb_client, driver);
	struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);

	if (drv && drv->remove)
		drv->remove(pdev);

	put_device(dev);

	return 0;
}

Jon Mason's avatar
Jon Mason committed
244
static struct bus_type ntb_bus_type = {
245
246
247
248
249
250
251
252
	.name = "ntb_bus",
	.match = ntb_match_bus,
	.probe = ntb_client_probe,
	.remove = ntb_client_remove,
};

static LIST_HEAD(ntb_transport_list);

253
static int ntb_bus_init(struct ntb_transport *nt)
254
255
256
257
258
259
260
261
262
263
264
265
{
	if (list_empty(&ntb_transport_list)) {
		int rc = bus_register(&ntb_bus_type);
		if (rc)
			return rc;
	}

	list_add(&nt->entry, &ntb_transport_list);

	return 0;
}

266
static void ntb_bus_remove(struct ntb_transport *nt)
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
{
	struct ntb_transport_client_dev *client_dev, *cd;

	list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
		dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
			dev_name(&client_dev->dev));
		list_del(&client_dev->entry);
		device_unregister(&client_dev->dev);
	}

	list_del(&nt->entry);

	if (list_empty(&ntb_transport_list))
		bus_unregister(&ntb_bus_type);
}

static void ntb_client_release(struct device *dev)
{
	struct ntb_transport_client_dev *client_dev;
	client_dev = container_of(dev, struct ntb_transport_client_dev, dev);

	kfree(client_dev);
}

/**
 * ntb_unregister_client_dev - Unregister NTB client device
 * @device_name: Name of NTB client device
 *
 * Unregister an NTB client device with the NTB transport layer
 */
void ntb_unregister_client_dev(char *device_name)
{
	struct ntb_transport_client_dev *client, *cd;
	struct ntb_transport *nt;

	list_for_each_entry(nt, &ntb_transport_list, entry)
		list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
			if (!strncmp(dev_name(&client->dev), device_name,
				     strlen(device_name))) {
				list_del(&client->entry);
				device_unregister(&client->dev);
			}
}
EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);

/**
 * ntb_register_client_dev - Register NTB client device
 * @device_name: Name of NTB client device
 *
 * Register an NTB client device with the NTB transport layer
 */
int ntb_register_client_dev(char *device_name)
{
	struct ntb_transport_client_dev *client_dev;
	struct ntb_transport *nt;
Jon Mason's avatar
Jon Mason committed
322
	int rc, i = 0;
323

324
325
326
	if (list_empty(&ntb_transport_list))
		return -ENODEV;

327
328
329
330
331
332
333
334
335
336
337
338
339
	list_for_each_entry(nt, &ntb_transport_list, entry) {
		struct device *dev;

		client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
				     GFP_KERNEL);
		if (!client_dev) {
			rc = -ENOMEM;
			goto err;
		}

		dev = &client_dev->dev;

		/* setup and register client devices */
Jon Mason's avatar
Jon Mason committed
340
		dev_set_name(dev, "%s%d", device_name, i);
341
342
343
344
345
346
347
348
349
350
351
		dev->bus = &ntb_bus_type;
		dev->release = ntb_client_release;
		dev->parent = &ntb_query_pdev(nt->ndev)->dev;

		rc = device_register(dev);
		if (rc) {
			kfree(client_dev);
			goto err;
		}

		list_add_tail(&client_dev->entry, &nt->client_devs);
Jon Mason's avatar
Jon Mason committed
352
		i++;
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
	}

	return 0;

err:
	ntb_unregister_client_dev(device_name);

	return rc;
}
EXPORT_SYMBOL_GPL(ntb_register_client_dev);

/**
 * ntb_register_client - Register NTB client driver
 * @drv: NTB client driver to be registered
 *
 * Register an NTB client driver with the NTB transport layer
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
int ntb_register_client(struct ntb_client *drv)
{
	drv->driver.bus = &ntb_bus_type;

376
377
378
	if (list_empty(&ntb_transport_list))
		return -ENODEV;

379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
	return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(ntb_register_client);

/**
 * ntb_unregister_client - Unregister NTB client driver
 * @drv: NTB client driver to be unregistered
 *
 * Unregister an NTB client driver with the NTB transport layer
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
void ntb_unregister_client(struct ntb_client *drv)
{
	driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(ntb_unregister_client);

static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
			    loff_t *offp)
{
	struct ntb_transport_qp *qp;
401
	char *buf;
402
403
	ssize_t ret, out_offset, out_count;

404
	out_count = 1000;
405
406
407
408

	buf = kmalloc(out_count, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;
409
410
411
412
413
414
415
416
417

	qp = filp->private_data;
	out_offset = 0;
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "NTB QP stats\n");
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_bytes - \t%llu\n", qp->rx_bytes);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_pkts - \t%llu\n", qp->rx_pkts);
418
419
420
421
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_memcpy - \t%llu\n", qp->rx_memcpy);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_async - \t%llu\n", qp->rx_async);
422
423
424
425
426
427
428
429
430
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_ring_empty - %llu\n", qp->rx_ring_empty);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_ver - \t%llu\n", qp->rx_err_ver);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
431
			       "rx_buff - \t%p\n", qp->rx_buff);
432
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
433
			       "rx_index - \t%u\n", qp->rx_index);
434
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
435
			       "rx_max_entry - \t%u\n", qp->rx_max_entry);
436
437
438
439
440

	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_bytes - \t%llu\n", qp->tx_bytes);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_pkts - \t%llu\n", qp->tx_pkts);
441
442
443
444
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_memcpy - \t%llu\n", qp->tx_memcpy);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_async - \t%llu\n", qp->tx_async);
445
446
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_ring_full - \t%llu\n", qp->tx_ring_full);
447
448
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
449
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
450
			       "tx_mw - \t%p\n", qp->tx_mw);
451
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
452
			       "tx_index - \t%u\n", qp->tx_index);
453
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
454
			       "tx_max_entry - \t%u\n", qp->tx_max_entry);
455
456

	out_offset += snprintf(buf + out_offset, out_count - out_offset,
457
			       "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
458
			       "Up" : "Down");
459
460
	if (out_offset > out_count)
		out_offset = out_count;
461
462

	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
463
	kfree(buf);
464
465
466
467
468
	return ret;
}

static const struct file_operations ntb_qp_debugfs_stats = {
	.owner = THIS_MODULE,
Jon Mason's avatar
Jon Mason committed
469
	.open = simple_open,
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
	.read = debugfs_read,
};

static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
			 struct list_head *list)
{
	unsigned long flags;

	spin_lock_irqsave(lock, flags);
	list_add_tail(entry, list);
	spin_unlock_irqrestore(lock, flags);
}

static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
						struct list_head *list)
{
	struct ntb_queue_entry *entry;
	unsigned long flags;

	spin_lock_irqsave(lock, flags);
	if (list_empty(list)) {
		entry = NULL;
		goto out;
	}
	entry = list_first_entry(list, struct ntb_queue_entry, entry);
	list_del(&entry->entry);
out:
	spin_unlock_irqrestore(lock, flags);

	return entry;
}

static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
				      unsigned int qp_num)
{
	struct ntb_transport_qp *qp = &nt->qps[qp_num];
506
	unsigned int rx_size, num_qps_mw;
Jon Mason's avatar
Jon Mason committed
507
	u8 mw_num, mw_max;
Jon Mason's avatar
Jon Mason committed
508
	unsigned int i;
509

Jon Mason's avatar
Jon Mason committed
510
511
512
	mw_max = ntb_max_mw(nt->ndev);
	mw_num = QP_TO_MW(nt->ndev, qp_num);

Jon Mason's avatar
Jon Mason committed
513
	WARN_ON(nt->mw[mw_num].virt_addr == NULL);
514

Jon Mason's avatar
Jon Mason committed
515
516
	if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
		num_qps_mw = nt->max_qps / mw_max + 1;
517
	else
Jon Mason's avatar
Jon Mason committed
518
		num_qps_mw = nt->max_qps / mw_max;
519

Jon Mason's avatar
Jon Mason committed
520
	rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
521
	qp->rx_buff = nt->mw[mw_num].virt_addr + qp_num / mw_max * rx_size;
Jon Mason's avatar
Jon Mason committed
522
523
	rx_size -= sizeof(struct ntb_rx_info);

524
525
	qp->remote_rx_info = qp->rx_buff + rx_size;

526
527
	/* Due to housekeeping, there must be atleast 2 buffs */
	qp->rx_max_frame = min(transport_mtu, rx_size / 2);
Jon Mason's avatar
Jon Mason committed
528
529
530
	qp->rx_max_entry = rx_size / qp->rx_max_frame;
	qp->rx_index = 0;

531
	qp->remote_rx_info->entry = qp->rx_max_entry - 1;
532

533
	/* setup the hdr offsets with 0's */
Jon Mason's avatar
Jon Mason committed
534
535
536
	for (i = 0; i < qp->rx_max_entry; i++) {
		void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
			       sizeof(struct ntb_payload_header);
537
		memset(offset, 0, sizeof(struct ntb_payload_header));
Jon Mason's avatar
Jon Mason committed
538
	}
539
540
541

	qp->rx_pkts = 0;
	qp->tx_pkts = 0;
Jon Mason's avatar
Jon Mason committed
542
	qp->tx_index = 0;
543
544
}

Jon Mason's avatar
Jon Mason committed
545
546
547
548
549
550
551
552
553
554
555
556
static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
{
	struct ntb_transport_mw *mw = &nt->mw[num_mw];
	struct pci_dev *pdev = ntb_query_pdev(nt->ndev);

	if (!mw->virt_addr)
		return;

	dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
	mw->virt_addr = NULL;
}

557
558
559
560
561
static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
{
	struct ntb_transport_mw *mw = &nt->mw[num_mw];
	struct pci_dev *pdev = ntb_query_pdev(nt->ndev);

Jon Mason's avatar
Jon Mason committed
562
563
564
565
566
567
568
	/* No need to re-setup */
	if (mw->size == ALIGN(size, 4096))
		return 0;

	if (mw->size != 0)
		ntb_free_mw(nt, num_mw);

569
570
571
572
573
574
	/* Alloc memory for receiving data.  Must be 4k aligned */
	mw->size = ALIGN(size, 4096);

	mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
					   GFP_KERNEL);
	if (!mw->virt_addr) {
Jon Mason's avatar
Jon Mason committed
575
		mw->size = 0;
576
577
578
579
580
581
582
583
584
585
586
		dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
		       (int) mw->size);
		return -ENOMEM;
	}

	/* Notify HW the memory location of the receive buffer */
	ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);

	return 0;
}

587
static void ntb_qp_link_cleanup(struct work_struct *work)
588
{
589
590
591
	struct ntb_transport_qp *qp = container_of(work,
						   struct ntb_transport_qp,
						   link_cleanup);
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
	struct ntb_transport *nt = qp->transport;
	struct pci_dev *pdev = ntb_query_pdev(nt->ndev);

	if (qp->qp_link == NTB_LINK_DOWN) {
		cancel_delayed_work_sync(&qp->link_work);
		return;
	}

	if (qp->event_handler)
		qp->event_handler(qp->cb_data, NTB_LINK_DOWN);

	dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
	qp->qp_link = NTB_LINK_DOWN;

	if (nt->transport_link == NTB_LINK_UP)
		schedule_delayed_work(&qp->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

611
612
613
614
615
616
static void ntb_qp_link_down(struct ntb_transport_qp *qp)
{
	schedule_work(&qp->link_cleanup);
}

static void ntb_transport_link_cleanup(struct work_struct *work)
617
{
618
619
	struct ntb_transport *nt = container_of(work, struct ntb_transport,
						link_cleanup);
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
	int i;

	if (nt->transport_link == NTB_LINK_DOWN)
		cancel_delayed_work_sync(&nt->link_work);
	else
		nt->transport_link = NTB_LINK_DOWN;

	/* Pass along the info to any clients */
	for (i = 0; i < nt->max_qps; i++)
		if (!test_bit(i, &nt->qp_bitmap))
			ntb_qp_link_down(&nt->qps[i]);

	/* The scratchpad registers keep the values if the remote side
	 * goes down, blast them now to give them a sane value the next
	 * time they are accessed
	 */
	for (i = 0; i < MAX_SPAD; i++)
		ntb_write_local_spad(nt->ndev, i, 0);
}

static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
{
	struct ntb_transport *nt = data;

	switch (event) {
	case NTB_EVENT_HW_LINK_UP:
		schedule_delayed_work(&nt->link_work, 0);
		break;
	case NTB_EVENT_HW_LINK_DOWN:
649
		schedule_work(&nt->link_cleanup);
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
		break;
	default:
		BUG();
	}
}

static void ntb_transport_link_work(struct work_struct *work)
{
	struct ntb_transport *nt = container_of(work, struct ntb_transport,
						link_work.work);
	struct ntb_device *ndev = nt->ndev;
	struct pci_dev *pdev = ntb_query_pdev(ndev);
	u32 val;
	int rc, i;

Jon Mason's avatar
Jon Mason committed
665
	/* send the local info, in the opposite order of the way we read it */
Jon Mason's avatar
Jon Mason committed
666
	for (i = 0; i < ntb_max_mw(ndev); i++) {
Jon Mason's avatar
Jon Mason committed
667
668
669
670
671
672
673
674
		rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
					   ntb_get_mw_size(ndev, i) >> 32);
		if (rc) {
			dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
				(u32)(ntb_get_mw_size(ndev, i) >> 32),
				MW0_SZ_HIGH + (i * 2));
			goto out;
		}
675

Jon Mason's avatar
Jon Mason committed
676
677
678
679
680
681
682
683
		rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
					   (u32) ntb_get_mw_size(ndev, i));
		if (rc) {
			dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
				(u32) ntb_get_mw_size(ndev, i),
				MW0_SZ_LOW + (i * 2));
			goto out;
		}
684
685
	}

Jon Mason's avatar
Jon Mason committed
686
	rc = ntb_write_remote_spad(ndev, NUM_MWS, ntb_max_mw(ndev));
687
688
	if (rc) {
		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
Jon Mason's avatar
Jon Mason committed
689
			ntb_max_mw(ndev), NUM_MWS);
690
691
692
693
694
695
696
697
698
699
		goto out;
	}

	rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
	if (rc) {
		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
			nt->max_qps, NUM_QPS);
		goto out;
	}

Jon Mason's avatar
Jon Mason committed
700
	rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
701
702
	if (rc) {
		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
Jon Mason's avatar
Jon Mason committed
703
			NTB_TRANSPORT_VERSION, VERSION);
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
		goto out;
	}

	/* Query the remote side for its info */
	rc = ntb_read_remote_spad(ndev, VERSION, &val);
	if (rc) {
		dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
		goto out;
	}

	if (val != NTB_TRANSPORT_VERSION)
		goto out;
	dev_dbg(&pdev->dev, "Remote version = %d\n", val);

	rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
	if (rc) {
		dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
		goto out;
	}

	if (val != nt->max_qps)
		goto out;
	dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);

Jon Mason's avatar
Jon Mason committed
728
	rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
729
	if (rc) {
Jon Mason's avatar
Jon Mason committed
730
		dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
731
732
733
		goto out;
	}

Jon Mason's avatar
Jon Mason committed
734
	if (val != ntb_max_mw(ndev))
735
		goto out;
Jon Mason's avatar
Jon Mason committed
736
	dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
737

Jon Mason's avatar
Jon Mason committed
738
	for (i = 0; i < ntb_max_mw(ndev); i++) {
Jon Mason's avatar
Jon Mason committed
739
		u64 val64;
740

Jon Mason's avatar
Jon Mason committed
741
742
743
744
745
746
		rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
		if (rc) {
			dev_err(&pdev->dev, "Error reading remote spad %d\n",
				MW0_SZ_HIGH + (i * 2));
			goto out1;
		}
747

Jon Mason's avatar
Jon Mason committed
748
		val64 = (u64) val << 32;
749

Jon Mason's avatar
Jon Mason committed
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
		rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
		if (rc) {
			dev_err(&pdev->dev, "Error reading remote spad %d\n",
				MW0_SZ_LOW + (i * 2));
			goto out1;
		}

		val64 |= val;

		dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);

		rc = ntb_set_mw(nt, i, val64);
		if (rc)
			goto out1;
	}
765
766
767
768
769
770
771
772
773
774
775
776
777
778

	nt->transport_link = NTB_LINK_UP;

	for (i = 0; i < nt->max_qps; i++) {
		struct ntb_transport_qp *qp = &nt->qps[i];

		ntb_transport_setup_qp_mw(nt, i);

		if (qp->client_ready == NTB_LINK_UP)
			schedule_delayed_work(&qp->link_work, 0);
	}

	return;

Jon Mason's avatar
Jon Mason committed
779
out1:
Jon Mason's avatar
Jon Mason committed
780
	for (i = 0; i < ntb_max_mw(ndev); i++)
Jon Mason's avatar
Jon Mason committed
781
		ntb_free_mw(nt, i);
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
out:
	if (ntb_hw_link_status(ndev))
		schedule_delayed_work(&nt->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

static void ntb_qp_link_work(struct work_struct *work)
{
	struct ntb_transport_qp *qp = container_of(work,
						   struct ntb_transport_qp,
						   link_work.work);
	struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
	struct ntb_transport *nt = qp->transport;
	int rc, val;

	WARN_ON(nt->transport_link != NTB_LINK_UP);

	rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
	if (rc) {
		dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
		return;
	}

	rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
	if (rc)
		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
			val | 1 << qp->qp_num, QP_LINKS);

	/* query remote spad for qp ready bits */
	rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
	if (rc)
		dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);

	dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);

	/* See if the remote side is up */
	if (1 << qp->qp_num & val) {
		qp->qp_link = NTB_LINK_UP;

		dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
		if (qp->event_handler)
			qp->event_handler(qp->cb_data, NTB_LINK_UP);
	} else if (nt->transport_link == NTB_LINK_UP)
		schedule_delayed_work(&qp->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

829
static int ntb_transport_init_queue(struct ntb_transport *nt,
830
831
832
				     unsigned int qp_num)
{
	struct ntb_transport_qp *qp;
833
	unsigned int num_qps_mw, tx_size;
Jon Mason's avatar
Jon Mason committed
834
	u8 mw_num, mw_max;
835
	u64 qp_offset;
Jon Mason's avatar
Jon Mason committed
836
837
838

	mw_max = ntb_max_mw(nt->ndev);
	mw_num = QP_TO_MW(nt->ndev, qp_num);
839
840
841
842
843
844
845
846
847

	qp = &nt->qps[qp_num];
	qp->qp_num = qp_num;
	qp->transport = nt;
	qp->ndev = nt->ndev;
	qp->qp_link = NTB_LINK_DOWN;
	qp->client_ready = NTB_LINK_DOWN;
	qp->event_handler = NULL;

Jon Mason's avatar
Jon Mason committed
848
849
	if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max)
		num_qps_mw = nt->max_qps / mw_max + 1;
850
	else
Jon Mason's avatar
Jon Mason committed
851
		num_qps_mw = nt->max_qps / mw_max;
852

Jon Mason's avatar
Jon Mason committed
853
	tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
854
855
856
857
858
859
860
861
862
	qp_offset = qp_num / mw_max * tx_size;
	qp->tx_mw = ntb_get_mw_vbase(nt->ndev, mw_num) + qp_offset;
	if (!qp->tx_mw)
		return -EINVAL;

	qp->tx_mw_phys = ntb_get_mw_base(qp->ndev, mw_num) + qp_offset;
	if (!qp->tx_mw_phys)
		return -EINVAL;

Jon Mason's avatar
Jon Mason committed
863
	tx_size -= sizeof(struct ntb_rx_info);
864
	qp->rx_info = qp->tx_mw + tx_size;
Jon Mason's avatar
Jon Mason committed
865

866
867
	/* Due to housekeeping, there must be atleast 2 buffs */
	qp->tx_max_frame = min(transport_mtu, tx_size / 2);
Jon Mason's avatar
Jon Mason committed
868
	qp->tx_max_entry = tx_size / qp->tx_max_frame;
869

870
	if (ntb_query_debugfs(nt->ndev)) {
871
872
873
874
		char debugfs_name[4];

		snprintf(debugfs_name, 4, "qp%d", qp_num);
		qp->debugfs_dir = debugfs_create_dir(debugfs_name,
875
						 ntb_query_debugfs(nt->ndev));
876
877
878
879
880
881
882

		qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
							qp->debugfs_dir, qp,
							&ntb_qp_debugfs_stats);
	}

	INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
883
	INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
884
885
886
887
888
889
890
891

	spin_lock_init(&qp->ntb_rx_pend_q_lock);
	spin_lock_init(&qp->ntb_rx_free_q_lock);
	spin_lock_init(&qp->ntb_tx_free_q_lock);

	INIT_LIST_HEAD(&qp->rx_pend_q);
	INIT_LIST_HEAD(&qp->rx_free_q);
	INIT_LIST_HEAD(&qp->tx_free_q);
892
893

	return 0;
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
}

int ntb_transport_init(struct pci_dev *pdev)
{
	struct ntb_transport *nt;
	int rc, i;

	nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
	if (!nt)
		return -ENOMEM;

	nt->ndev = ntb_register_transport(pdev, nt);
	if (!nt->ndev) {
		rc = -EIO;
		goto err;
	}

Jon Mason's avatar
Jon Mason committed
911
912
913
914
915
916
917
918
919
920
921
	nt->mw = kcalloc(ntb_max_mw(nt->ndev), sizeof(struct ntb_transport_mw),
			 GFP_KERNEL);
	if (!nt->mw) {
		rc = -ENOMEM;
		goto err1;
	}

	if (max_num_clients)
		nt->max_qps = min(ntb_max_cbs(nt->ndev), max_num_clients);
	else
		nt->max_qps = min(ntb_max_cbs(nt->ndev), ntb_max_mw(nt->ndev));
922
923
924
925
926

	nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
			  GFP_KERNEL);
	if (!nt->qps) {
		rc = -ENOMEM;
Jon Mason's avatar
Jon Mason committed
927
		goto err2;
928
929
930
931
	}

	nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;

932
933
934
935
936
	for (i = 0; i < nt->max_qps; i++) {
		rc = ntb_transport_init_queue(nt, i);
		if (rc)
			goto err3;
	}
937
938

	INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
939
	INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
940
941
942
943

	rc = ntb_register_event_callback(nt->ndev,
					 ntb_transport_event_callback);
	if (rc)
Jon Mason's avatar
Jon Mason committed
944
		goto err3;
945
946
947
948

	INIT_LIST_HEAD(&nt->client_devs);
	rc = ntb_bus_init(nt);
	if (rc)
Jon Mason's avatar
Jon Mason committed
949
		goto err4;
950
951
952
953
954
955

	if (ntb_hw_link_status(nt->ndev))
		schedule_delayed_work(&nt->link_work, 0);

	return 0;

Jon Mason's avatar
Jon Mason committed
956
err4:
957
	ntb_unregister_event_callback(nt->ndev);
Jon Mason's avatar
Jon Mason committed
958
err3:
959
	kfree(nt->qps);
Jon Mason's avatar
Jon Mason committed
960
961
err2:
	kfree(nt->mw);
962
963
964
965
966
967
968
969
970
971
err1:
	ntb_unregister_transport(nt->ndev);
err:
	kfree(nt);
	return rc;
}

void ntb_transport_free(void *transport)
{
	struct ntb_transport *nt = transport;
Jon Mason's avatar
Jon Mason committed
972
	struct ntb_device *ndev = nt->ndev;
973
974
975
976
977
	int i;

	nt->transport_link = NTB_LINK_DOWN;

	/* verify that all the qp's are freed */
978
	for (i = 0; i < nt->max_qps; i++) {
979
980
		if (!test_bit(i, &nt->qp_bitmap))
			ntb_transport_free_queue(&nt->qps[i]);
981
982
		debugfs_remove_recursive(nt->qps[i].debugfs_dir);
	}
983
984
985
986
987

	ntb_bus_remove(nt);

	cancel_delayed_work_sync(&nt->link_work);

Jon Mason's avatar
Jon Mason committed
988
	ntb_unregister_event_callback(ndev);
989

Jon Mason's avatar
Jon Mason committed
990
	for (i = 0; i < ntb_max_mw(ndev); i++)
Jon Mason's avatar
Jon Mason committed
991
		ntb_free_mw(nt, i);
992
993

	kfree(nt->qps);
Jon Mason's avatar
Jon Mason committed
994
995
	kfree(nt->mw);
	ntb_unregister_transport(ndev);
996
997
998
	kfree(nt);
}

999
static void ntb_rx_copy_callback(void *data)
1000
{
1001
1002
	struct ntb_queue_entry *entry = data;
	struct ntb_transport_qp *qp = entry->qp;
1003
1004
	void *cb_data = entry->cb_data;
	unsigned int len = entry->len;
1005
1006
1007
1008
1009
	struct ntb_payload_header *hdr = entry->rx_hdr;

	/* Ensure that the data is fully copied out before clearing the flag */
	wmb();
	hdr->flags = 0;
1010

1011
	iowrite32(entry->index, &qp->rx_info->entry);
1012
1013

	ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
1014
1015
1016

	if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
		qp->rx_handler(qp, qp->cb_data, cb_data, len);
1017
1018
}

1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
{
	void *buf = entry->buf;
	size_t len = entry->len;

	memcpy(buf, offset, len);

	ntb_rx_copy_callback(entry);
}

static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
			 size_t len)
{
	struct dma_async_tx_descriptor *txd;
	struct ntb_transport_qp *qp = entry->qp;
	struct dma_chan *chan = qp->dma_chan;
	struct dma_device *device;
	size_t pay_off, buff_off;
1037
	struct dmaengine_unmap_data *unmap;
1038
1039
1040
1041
1042
1043
1044
1045
1046
	dma_cookie_t cookie;
	void *buf = entry->buf;

	entry->len = len;

	if (!chan)
		goto err;

	if (len < copy_bytes) 
1047
		goto err_wait;
1048
1049
1050
1051
1052
1053

	device = chan->device;
	pay_off = (size_t) offset & ~PAGE_MASK;
	buff_off = (size_t) buf & ~PAGE_MASK;

	if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
1054
		goto err_wait;
1055

1056
1057
1058
	unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
	if (!unmap)
		goto err_wait;
1059

1060
1061
1062
1063
1064
1065
1066
	unmap->len = len;
	unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
				      pay_off, len, DMA_TO_DEVICE);
	if (dma_mapping_error(device->dev, unmap->addr[0]))
		goto err_get_unmap;

	unmap->to_cnt = 1;
1067

1068
1069
1070
1071
1072
1073
1074
1075
	unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
				      buff_off, len, DMA_FROM_DEVICE);
	if (dma_mapping_error(device->dev, unmap->addr[1]))
		goto err_get_unmap;

	unmap->from_cnt = 1;

	txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1076
1077
					     unmap->addr[0], len,
					     DMA_PREP_INTERRUPT);
1078
	if (!txd)
1079
		goto err_get_unmap;
1080
1081
1082

	txd->callback = ntb_rx_copy_callback;
	txd->callback_param = entry;
1083
	dma_set_unmap(txd, unmap);
1084
1085
1086

	cookie = dmaengine_submit(txd);
	if (dma_submit_error(cookie))
1087
1088
1089
		goto err_set_unmap;

	dmaengine_unmap_put(unmap);
1090
1091
1092
1093
1094
1095
1096

	qp->last_cookie = cookie;

	qp->rx_async++;

	return;

1097
1098
1099
1100
1101
err_set_unmap:
	dmaengine_unmap_put(unmap);
err_get_unmap:
	dmaengine_unmap_put(unmap);
err_wait:
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
	/* If the callbacks come out of order, the writing of the index to the
	 * last completed will be out of order.  This may result in the
	 * receive stalling forever.
	 */
	dma_sync_wait(chan, qp->last_cookie);
err:
	ntb_memcpy_rx(entry, offset);
	qp->rx_memcpy++;
}

1112
1113
1114
1115
1116
1117
static int ntb_process_rxc(struct ntb_transport_qp *qp)
{
	struct ntb_payload_header *hdr;
	struct ntb_queue_entry *entry;
	void *offset;

Jon Mason's avatar
Jon Mason committed
1118
1119
1120
	offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
	hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);

1121
1122
1123
	entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
	if (!entry) {
		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
Jon Mason's avatar
Jon Mason committed
1124
			"no buffer - HDR ver %u, len %d, flags %x\n",
1125
1126
1127
1128
1129
1130
1131
			hdr->ver, hdr->len, hdr->flags);
		qp->rx_err_no_buf++;
		return -ENOMEM;
	}

	if (!(hdr->flags & DESC_DONE_FLAG)) {
		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
Jon Mason's avatar
Jon Mason committed
1132
			     &qp->rx_pend_q);
1133
1134
1135
1136
		qp->rx_ring_empty++;
		return -EAGAIN;
	}

Jon Mason's avatar
Jon Mason committed
1137
	if (hdr->ver != (u32) qp->rx_pkts) {
1138
		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
Jon Mason's avatar
Jon Mason committed
1139
			"qp %d: version mismatch, expected %llu - got %u\n",
1140
1141
			qp->qp_num, qp->rx_pkts, hdr->ver);
		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
Jon Mason's avatar
Jon Mason committed
1142
			     &qp->rx_pend_q);
1143
1144
1145
1146
1147
1148
1149
		qp->rx_err_ver++;
		return -EIO;
	}

	if (hdr->flags & LINK_DOWN_FLAG) {
		ntb_qp_link_down(qp);

1150
		goto err;
1151
1152
1153
	}

	dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
Jon Mason's avatar
Jon Mason committed
1154
		"rx offset %u, ver %u - %d payload received, buf size %d\n",
Jon Mason's avatar
Jon Mason committed
1155
		qp->rx_index, hdr->ver, hdr->len, entry->len);
1156

1157
1158
	qp->rx_bytes += hdr->len;
	qp->rx_pkts++;
1159

1160
	if (hdr->len > entry->len) {
1161
1162
1163
1164
		qp->rx_err_oflow++;
		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
			"RX overflow! Wanted %d got %d\n",
			hdr->len, entry->len);
1165
1166

		goto err;
1167
1168
	}

1169
1170
1171
1172
	entry->index = qp->rx_index;
	entry->rx_hdr = hdr;

	ntb_async_rx(entry, offset, hdr->len);
1173
1174

out:
1175
1176
1177
1178
1179
1180
1181
1182
	qp->rx_index++;
	qp->rx_index %= qp->rx_max_entry;

	return 0;

err:
	ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
		     &qp->rx_pend_q);
Jon Mason's avatar
Jon Mason committed
1183
1184
1185
	/* Ensure that the data is fully copied out before clearing the flag */
	wmb();
	hdr->flags = 0;
Jon Mason's avatar
Jon Mason committed
1186
	iowrite32(qp->rx_index, &qp->rx_info->entry);
Jon Mason's avatar
Jon Mason committed
1187

1188
	goto out;
1189
1190
1191
1192
1193
}

static void ntb_transport_rx(unsigned long data)
{
	struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
Jon Mason's avatar
Jon Mason committed
1194
	int rc, i;
1195

Jon Mason's avatar
Jon Mason committed
1196
1197
1198
1199
	/* Limit the number of packets processed in a single interrupt to
	 * provide fairness to others
	 */
	for (i = 0; i < qp->rx_max_entry; i++) {
1200
		rc = ntb_process_rxc(qp);
Jon Mason's avatar
Jon Mason committed
1201
1202
1203
		if (rc)
			break;
	}
1204
1205
1206

	if (qp->dma_chan)
		dma_async_issue_pending(qp->dma_chan);
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
}

static void ntb_transport_rxc_db(void *data, int db_num)
{
	struct ntb_transport_qp *qp = data;

	dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
		__func__, db_num);

	tasklet_schedule(&qp->rx_work);
}

1219
static void ntb_tx_copy_callback(void *data)
1220
{
1221
1222
1223
	struct ntb_queue_entry *entry = data;
	struct ntb_transport_qp *qp = entry->qp;
	struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
1224

1225
	/* Ensure that the data is fully copied out before setting the flags */
Jon Mason's avatar
Jon Mason committed
1226
	wmb();
Jon Mason's avatar
Jon Mason committed
1227
	iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
1228

Jon Mason's avatar
Jon Mason committed
1229
	ntb_ring_doorbell(qp->ndev, qp->qp_num);
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245

	/* The entry length can only be zero if the packet is intended to be a
	 * "link down" or similar.  Since no payload is being sent in these
	 * cases, there is nothing to add to the completion queue.
	 */
	if (entry->len > 0) {
		qp->tx_bytes += entry->len;

		if (qp->tx_handler)
			qp->tx_handler(qp, qp->cb_data, entry->cb_data,
				       entry->len);
	}

	ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
}

1246
static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1247
{
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
	memcpy_toio(offset, entry->buf, entry->len);

	ntb_tx_copy_callback(entry);
}

static void ntb_async_tx(struct ntb_transport_qp *qp,
			 struct ntb_queue_entry *entry)
{
	struct ntb_payload_header __iomem *hdr;
	struct dma_async_tx_descriptor *txd;
	struct dma_chan *chan = qp->dma_chan;
	struct dma_device *device;
	size_t dest_off, buff_off;
1261
1262
	struct dmaengine_unmap_data *unmap;
	dma_addr_t dest;
1263
	dma_cookie_t cookie;
Jon Mason's avatar
Jon Mason committed
1264
	void __iomem *offset;
1265
1266
	size_t len = entry->len;
	void *buf = entry->buf;
1267

Jon Mason's avatar
Jon Mason committed
1268
	offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
1269
1270
	hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
	entry->tx_hdr = hdr;
1271

1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
	iowrite32(entry->len, &hdr->len);
	iowrite32((u32) qp->tx_pkts, &hdr->ver);

	if (!chan)
		goto err;

	if (len < copy_bytes)
		goto err;

	device = chan->device;
	dest = qp->tx_mw_phys + qp->tx_max_frame * qp->tx_index;
	buff_off = (size_t) buf & ~PAGE_MASK;
	dest_off = (size_t) dest & ~PAGE_MASK;

	if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
		goto err;

1289
1290
	unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
	if (!unmap)
1291
1292
		goto err;

1293
1294
1295
1296
1297
1298
1299
1300
1301
	unmap->len = len;
	unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
				      buff_off, len, DMA_TO_DEVICE);
	if (dma_mapping_error(device->dev, unmap->addr[0]))
		goto err_get_unmap;

	unmap->to_cnt = 1;

	txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1302
					     DMA_PREP_INTERRUPT);
1303
	if (!txd)
1304
		goto err_get_unmap;
1305
1306
1307

	txd->callback = ntb_tx_copy_callback;
	txd->callback_param = entry;
1308
	dma_set_unmap(txd, unmap);
1309
1310
1311

	cookie = dmaengine_submit(txd);
	if (dma_submit_error(cookie))
1312
1313
1314
		goto err_set_unmap;

	dmaengine_unmap_put(unmap);
1315
1316
1317
1318