ntb_transport.c 53.8 KB
Newer Older
1 2 3 4 5 6 7
/*
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 *   redistributing this file, you may do so under either license.
 *
 *   GPL LICENSE SUMMARY
 *
 *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8
 *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9 10 11 12 13 14 15 16
 *
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of version 2 of the GNU General Public License as
 *   published by the Free Software Foundation.
 *
 *   BSD LICENSE
 *
 *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17
 *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 *
 *   Redistribution and use in source and binary forms, with or without
 *   modification, are permitted provided that the following conditions
 *   are met:
 *
 *     * Redistributions of source code must retain the above copyright
 *       notice, this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copy
 *       notice, this list of conditions and the following disclaimer in
 *       the documentation and/or other materials provided with the
 *       distribution.
 *     * Neither the name of Intel Corporation nor the names of its
 *       contributors may be used to endorse or promote products derived
 *       from this software without specific prior written permission.
 *
 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
45
 * PCIe NTB Transport Linux driver
46 47 48 49 50 51
 *
 * Contact Information:
 * Jon Mason <jon.mason@intel.com>
 */
#include <linux/debugfs.h>
#include <linux/delay.h>
52
#include <linux/dmaengine.h>
53 54 55 56 57 58 59 60
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
61
#include <linux/uaccess.h>
62 63
#include "linux/ntb.h"
#include "linux/ntb_transport.h"
64

65 66 67 68 69 70 71 72 73 74 75 76 77
#define NTB_TRANSPORT_VERSION	4
#define NTB_TRANSPORT_VER	"4"
#define NTB_TRANSPORT_NAME	"ntb_transport"
#define NTB_TRANSPORT_DESC	"Software Queue-Pair Transport over NTB"

MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
MODULE_VERSION(NTB_TRANSPORT_VER);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");

static unsigned long max_mw_size;
module_param(max_mw_size, ulong, 0644);
MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
78

79
static unsigned int transport_mtu = 0x10000;
80 81 82
module_param(transport_mtu, uint, 0644);
MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");

Jon Mason's avatar
Jon Mason committed
83
static unsigned char max_num_clients;
84 85 86
module_param(max_num_clients, byte, 0644);
MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");

87 88 89 90
static unsigned int copy_bytes = 1024;
module_param(copy_bytes, uint, 0644);
MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");

91 92 93 94
static bool use_dma;
module_param(use_dma, bool, 0644);
MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");

95 96
static struct dentry *nt_debugfs_dir;

97 98 99
struct ntb_queue_entry {
	/* ntb_queue list reference */
	struct list_head entry;
100
	/* pointers to data to be transferred */
101 102 103 104
	void *cb_data;
	void *buf;
	unsigned int len;
	unsigned int flags;
105 106 107 108 109 110 111

	struct ntb_transport_qp *qp;
	union {
		struct ntb_payload_header __iomem *tx_hdr;
		struct ntb_payload_header *rx_hdr;
	};
	unsigned int index;
112 113
};

Jon Mason's avatar
Jon Mason committed
114 115 116 117
struct ntb_rx_info {
	unsigned int entry;
};

118
struct ntb_transport_qp {
119 120
	struct ntb_transport_ctx *transport;
	struct ntb_dev *ndev;
121
	void *cb_data;
122 123
	struct dma_chan *tx_dma_chan;
	struct dma_chan *rx_dma_chan;
124 125

	bool client_ready;
126
	bool link_is_up;
127
	bool active;
128

129
	u8 qp_num;	/* Only 64 QP's are allowed.  0-63 */
130
	u64 qp_bit;
131

Jon Mason's avatar
Jon Mason committed
132
	struct ntb_rx_info __iomem *rx_info;
Jon Mason's avatar
Jon Mason committed
133 134
	struct ntb_rx_info *remote_rx_info;

Jon Mason's avatar
Jon Mason committed
135 136
	void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
			   void *data, int len);
137 138
	struct list_head tx_free_q;
	spinlock_t ntb_tx_free_q_lock;
Jon Mason's avatar
Jon Mason committed
139
	void __iomem *tx_mw;
140
	dma_addr_t tx_mw_phys;
Jon Mason's avatar
Jon Mason committed
141 142
	unsigned int tx_index;
	unsigned int tx_max_entry;
143
	unsigned int tx_max_frame;
144

Jon Mason's avatar
Jon Mason committed
145 146
	void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
			   void *data, int len);
147
	struct list_head rx_post_q;
148 149
	struct list_head rx_pend_q;
	struct list_head rx_free_q;
150 151
	/* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
	spinlock_t ntb_rx_q_lock;
Jon Mason's avatar
Jon Mason committed
152 153 154
	void *rx_buff;
	unsigned int rx_index;
	unsigned int rx_max_entry;
155
	unsigned int rx_max_frame;
156
	unsigned int rx_alloc_entry;
157
	dma_cookie_t last_cookie;
158
	struct tasklet_struct rxc_db_work;
159

Jon Mason's avatar
Jon Mason committed
160
	void (*event_handler)(void *data, int status);
161
	struct delayed_work link_work;
162
	struct work_struct link_cleanup;
163 164 165 166 167 168 169 170 171 172 173

	struct dentry *debugfs_dir;
	struct dentry *debugfs_stats;

	/* Stats */
	u64 rx_bytes;
	u64 rx_pkts;
	u64 rx_ring_empty;
	u64 rx_err_no_buf;
	u64 rx_err_oflow;
	u64 rx_err_ver;
174 175
	u64 rx_memcpy;
	u64 rx_async;
176
	u64 dma_rx_prep_err;
177 178 179
	u64 tx_bytes;
	u64 tx_pkts;
	u64 tx_ring_full;
180 181 182
	u64 tx_err_no_buf;
	u64 tx_memcpy;
	u64 tx_async;
183
	u64 dma_tx_prep_err;
184 185 186
};

struct ntb_transport_mw {
187 188 189 190 191 192 193
	phys_addr_t phys_addr;
	resource_size_t phys_size;
	resource_size_t xlat_align;
	resource_size_t xlat_align_size;
	void __iomem *vbase;
	size_t xlat_size;
	size_t buff_size;
194 195 196 197 198 199
	void *virt_addr;
	dma_addr_t dma_addr;
};

struct ntb_transport_client_dev {
	struct list_head entry;
200
	struct ntb_transport_ctx *nt;
201 202 203
	struct device dev;
};

204
struct ntb_transport_ctx {
205 206 207
	struct list_head entry;
	struct list_head client_devs;

208 209 210 211 212 213 214 215 216 217
	struct ntb_dev *ndev;

	struct ntb_transport_mw *mw_vec;
	struct ntb_transport_qp *qp_vec;
	unsigned int mw_count;
	unsigned int qp_count;
	u64 qp_bitmap;
	u64 qp_bitmap_free;

	bool link_is_up;
218
	struct delayed_work link_work;
219
	struct work_struct link_cleanup;
220 221

	struct dentry *debugfs_node_dir;
222 223 224
};

enum {
225 226
	DESC_DONE_FLAG = BIT(0),
	LINK_DOWN_FLAG = BIT(1),
227 228 229
};

struct ntb_payload_header {
Jon Mason's avatar
Jon Mason committed
230
	unsigned int ver;
231 232 233 234 235 236 237
	unsigned int len;
	unsigned int flags;
};

enum {
	VERSION = 0,
	QP_LINKS,
Jon Mason's avatar
Jon Mason committed
238 239 240 241 242 243
	NUM_QPS,
	NUM_MWS,
	MW0_SZ_HIGH,
	MW0_SZ_LOW,
	MW1_SZ_HIGH,
	MW1_SZ_LOW,
244 245 246
	MAX_SPAD,
};

247 248 249 250 251 252 253
#define dev_client_dev(__dev) \
	container_of((__dev), struct ntb_transport_client_dev, dev)

#define drv_client(__drv) \
	container_of((__drv), struct ntb_transport_client, driver)

#define QP_TO_MW(nt, qp)	((qp) % nt->mw_count)
254 255
#define NTB_QP_DEF_NUM_ENTRIES	100
#define NTB_LINK_DOWN_TIMEOUT	10
256 257
#define DMA_RETRIES		20
#define DMA_OUT_RESOURCE_TO	50
258

259 260 261 262 263 264
static void ntb_transport_rxc_db(unsigned long data);
static const struct ntb_ctx_ops ntb_transport_ops;
static struct ntb_client ntb_transport_client;

static int ntb_transport_bus_match(struct device *dev,
				   struct device_driver *drv)
265 266 267 268
{
	return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
}

269
static int ntb_transport_bus_probe(struct device *dev)
270
{
271
	const struct ntb_transport_client *client;
272 273 274
	int rc = -EINVAL;

	get_device(dev);
275 276 277

	client = drv_client(dev->driver);
	rc = client->probe(dev);
278 279 280 281 282 283
	if (rc)
		put_device(dev);

	return rc;
}

284
static int ntb_transport_bus_remove(struct device *dev)
285
{
286
	const struct ntb_transport_client *client;
287

288 289
	client = drv_client(dev->driver);
	client->remove(dev);
290 291 292 293 294 295

	put_device(dev);

	return 0;
}

296 297 298 299 300
static struct bus_type ntb_transport_bus = {
	.name = "ntb_transport",
	.match = ntb_transport_bus_match,
	.probe = ntb_transport_bus_probe,
	.remove = ntb_transport_bus_remove,
301 302 303 304
};

static LIST_HEAD(ntb_transport_list);

305
static int ntb_bus_init(struct ntb_transport_ctx *nt)
306
{
307
	list_add_tail(&nt->entry, &ntb_transport_list);
308 309 310
	return 0;
}

311
static void ntb_bus_remove(struct ntb_transport_ctx *nt)
312 313 314 315 316 317 318 319 320 321 322 323 324
{
	struct ntb_transport_client_dev *client_dev, *cd;

	list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
		dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
			dev_name(&client_dev->dev));
		list_del(&client_dev->entry);
		device_unregister(&client_dev->dev);
	}

	list_del(&nt->entry);
}

325
static void ntb_transport_client_release(struct device *dev)
326 327 328
{
	struct ntb_transport_client_dev *client_dev;

329
	client_dev = dev_client_dev(dev);
330 331 332 333
	kfree(client_dev);
}

/**
334
 * ntb_transport_unregister_client_dev - Unregister NTB client device
335 336 337 338
 * @device_name: Name of NTB client device
 *
 * Unregister an NTB client device with the NTB transport layer
 */
339
void ntb_transport_unregister_client_dev(char *device_name)
340 341
{
	struct ntb_transport_client_dev *client, *cd;
342
	struct ntb_transport_ctx *nt;
343 344 345 346 347 348 349 350 351

	list_for_each_entry(nt, &ntb_transport_list, entry)
		list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
			if (!strncmp(dev_name(&client->dev), device_name,
				     strlen(device_name))) {
				list_del(&client->entry);
				device_unregister(&client->dev);
			}
}
352
EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
353 354

/**
355
 * ntb_transport_register_client_dev - Register NTB client device
356 357 358 359
 * @device_name: Name of NTB client device
 *
 * Register an NTB client device with the NTB transport layer
 */
360
int ntb_transport_register_client_dev(char *device_name)
361 362
{
	struct ntb_transport_client_dev *client_dev;
363
	struct ntb_transport_ctx *nt;
364
	int node;
Jon Mason's avatar
Jon Mason committed
365
	int rc, i = 0;
366

367 368 369
	if (list_empty(&ntb_transport_list))
		return -ENODEV;

370 371 372
	list_for_each_entry(nt, &ntb_transport_list, entry) {
		struct device *dev;

373 374 375 376
		node = dev_to_node(&nt->ndev->dev);

		client_dev = kzalloc_node(sizeof(*client_dev),
					  GFP_KERNEL, node);
377 378 379 380 381 382 383 384
		if (!client_dev) {
			rc = -ENOMEM;
			goto err;
		}

		dev = &client_dev->dev;

		/* setup and register client devices */
Jon Mason's avatar
Jon Mason committed
385
		dev_set_name(dev, "%s%d", device_name, i);
386 387 388
		dev->bus = &ntb_transport_bus;
		dev->release = ntb_transport_client_release;
		dev->parent = &nt->ndev->dev;
389 390 391 392 393 394 395 396

		rc = device_register(dev);
		if (rc) {
			kfree(client_dev);
			goto err;
		}

		list_add_tail(&client_dev->entry, &nt->client_devs);
Jon Mason's avatar
Jon Mason committed
397
		i++;
398 399 400 401 402
	}

	return 0;

err:
403
	ntb_transport_unregister_client_dev(device_name);
404 405 406

	return rc;
}
407
EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
408 409

/**
410
 * ntb_transport_register_client - Register NTB client driver
411 412 413 414 415 416
 * @drv: NTB client driver to be registered
 *
 * Register an NTB client driver with the NTB transport layer
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
417
int ntb_transport_register_client(struct ntb_transport_client *drv)
418
{
419
	drv->driver.bus = &ntb_transport_bus;
420

421 422 423
	if (list_empty(&ntb_transport_list))
		return -ENODEV;

424 425
	return driver_register(&drv->driver);
}
426
EXPORT_SYMBOL_GPL(ntb_transport_register_client);
427 428

/**
429
 * ntb_transport_unregister_client - Unregister NTB client driver
430 431 432 433 434 435
 * @drv: NTB client driver to be unregistered
 *
 * Unregister an NTB client driver with the NTB transport layer
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
436
void ntb_transport_unregister_client(struct ntb_transport_client *drv)
437 438 439
{
	driver_unregister(&drv->driver);
}
440
EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
441 442 443 444 445

static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
			    loff_t *offp)
{
	struct ntb_transport_qp *qp;
446
	char *buf;
447 448
	ssize_t ret, out_offset, out_count;

449 450 451 452 453
	qp = filp->private_data;

	if (!qp || !qp->link_is_up)
		return 0;

454
	out_count = 1000;
455 456 457 458

	buf = kmalloc(out_count, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;
459 460 461

	out_offset = 0;
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Dave Jiang's avatar
Dave Jiang committed
462
			       "\nNTB QP stats:\n\n");
463 464 465 466
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_bytes - \t%llu\n", qp->rx_bytes);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_pkts - \t%llu\n", qp->rx_pkts);
467 468 469 470
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_memcpy - \t%llu\n", qp->rx_memcpy);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_async - \t%llu\n", qp->rx_async);
471 472 473 474 475 476 477 478 479
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_ring_empty - %llu\n", qp->rx_ring_empty);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_ver - \t%llu\n", qp->rx_err_ver);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Dave Jiang's avatar
Dave Jiang committed
480
			       "rx_buff - \t0x%p\n", qp->rx_buff);
481
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
482
			       "rx_index - \t%u\n", qp->rx_index);
483
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
484 485 486
			       "rx_max_entry - \t%u\n", qp->rx_max_entry);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry);
487 488 489 490 491

	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_bytes - \t%llu\n", qp->tx_bytes);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_pkts - \t%llu\n", qp->tx_pkts);
492 493 494 495
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_memcpy - \t%llu\n", qp->tx_memcpy);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_async - \t%llu\n", qp->tx_async);
496 497
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_ring_full - \t%llu\n", qp->tx_ring_full);
498 499
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
500
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Dave Jiang's avatar
Dave Jiang committed
501
			       "tx_mw - \t0x%p\n", qp->tx_mw);
502
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Dave Jiang's avatar
Dave Jiang committed
503
			       "tx_index (H) - \t%u\n", qp->tx_index);
504
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Dave Jiang's avatar
Dave Jiang committed
505
			       "RRI (T) - \t%u\n",
506
			       qp->remote_rx_info->entry);
Dave Jiang's avatar
Dave Jiang committed
507 508
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_max_entry - \t%u\n", qp->tx_max_entry);
509 510 511
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "free tx - \t%u\n",
			       ntb_transport_tx_free_entry(qp));
512 513 514 515 516 517
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "DMA tx prep err - \t%llu\n",
			       qp->dma_tx_prep_err);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "DMA rx prep err - \t%llu\n",
			       qp->dma_rx_prep_err);
518 519

	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Dave Jiang's avatar
Dave Jiang committed
520 521
			       "\n");
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
522 523 524 525 526
			       "Using TX DMA - \t%s\n",
			       qp->tx_dma_chan ? "Yes" : "No");
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "Using RX DMA - \t%s\n",
			       qp->rx_dma_chan ? "Yes" : "No");
Dave Jiang's avatar
Dave Jiang committed
527 528
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "QP Link - \t%s\n",
529
			       qp->link_is_up ? "Up" : "Down");
Dave Jiang's avatar
Dave Jiang committed
530 531 532
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "\n");

533 534
	if (out_offset > out_count)
		out_offset = out_count;
535 536

	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
537
	kfree(buf);
538 539 540 541 542
	return ret;
}

static const struct file_operations ntb_qp_debugfs_stats = {
	.owner = THIS_MODULE,
543
	.open = simple_open,
544 545 546 547 548 549 550 551 552 553 554 555 556 557
	.read = debugfs_read,
};

static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
			 struct list_head *list)
{
	unsigned long flags;

	spin_lock_irqsave(lock, flags);
	list_add_tail(entry, list);
	spin_unlock_irqrestore(lock, flags);
}

static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
Jon Mason's avatar
Jon Mason committed
558
					   struct list_head *list)
559 560 561 562 563 564 565 566 567 568 569
{
	struct ntb_queue_entry *entry;
	unsigned long flags;

	spin_lock_irqsave(lock, flags);
	if (list_empty(list)) {
		entry = NULL;
		goto out;
	}
	entry = list_first_entry(list, struct ntb_queue_entry, entry);
	list_del(&entry->entry);
570

571 572 573 574 575 576
out:
	spin_unlock_irqrestore(lock, flags);

	return entry;
}

577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
					   struct list_head *list,
					   struct list_head *to_list)
{
	struct ntb_queue_entry *entry;
	unsigned long flags;

	spin_lock_irqsave(lock, flags);

	if (list_empty(list)) {
		entry = NULL;
	} else {
		entry = list_first_entry(list, struct ntb_queue_entry, entry);
		list_move_tail(&entry->entry, to_list);
	}

	spin_unlock_irqrestore(lock, flags);

	return entry;
}

598 599
static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
				     unsigned int qp_num)
600
{
601 602
	struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
	struct ntb_transport_mw *mw;
603 604
	struct ntb_dev *ndev = nt->ndev;
	struct ntb_queue_entry *entry;
605
	unsigned int rx_size, num_qps_mw;
606
	unsigned int mw_num, mw_count, qp_count;
Jon Mason's avatar
Jon Mason committed
607
	unsigned int i;
608
	int node;
609

610 611
	mw_count = nt->mw_count;
	qp_count = nt->qp_count;
Jon Mason's avatar
Jon Mason committed
612

613 614 615 616 617
	mw_num = QP_TO_MW(nt, qp_num);
	mw = &nt->mw_vec[mw_num];

	if (!mw->virt_addr)
		return -ENOMEM;
618

619 620
	if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
		num_qps_mw = qp_count / mw_count + 1;
621
	else
622
		num_qps_mw = qp_count / mw_count;
623

624
	rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
625
	qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
Jon Mason's avatar
Jon Mason committed
626 627
	rx_size -= sizeof(struct ntb_rx_info);

628 629
	qp->remote_rx_info = qp->rx_buff + rx_size;

630 631
	/* Due to housekeeping, there must be atleast 2 buffs */
	qp->rx_max_frame = min(transport_mtu, rx_size / 2);
Jon Mason's avatar
Jon Mason committed
632 633 634
	qp->rx_max_entry = rx_size / qp->rx_max_frame;
	qp->rx_index = 0;

635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
	/*
	 * Checking to see if we have more entries than the default.
	 * We should add additional entries if that is the case so we
	 * can be in sync with the transport frames.
	 */
	node = dev_to_node(&ndev->dev);
	for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
		entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
		if (!entry)
			return -ENOMEM;

		entry->qp = qp;
		ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
			     &qp->rx_free_q);
		qp->rx_alloc_entry++;
	}

652
	qp->remote_rx_info->entry = qp->rx_max_entry - 1;
653

654
	/* setup the hdr offsets with 0's */
Jon Mason's avatar
Jon Mason committed
655
	for (i = 0; i < qp->rx_max_entry; i++) {
656 657
		void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
				sizeof(struct ntb_payload_header));
658
		memset(offset, 0, sizeof(struct ntb_payload_header));
Jon Mason's avatar
Jon Mason committed
659
	}
660 661 662

	qp->rx_pkts = 0;
	qp->tx_pkts = 0;
663
	qp->tx_index = 0;
664 665

	return 0;
666 667
}

668
static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
Jon Mason's avatar
Jon Mason committed
669
{
670 671
	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
	struct pci_dev *pdev = nt->ndev->pdev;
Jon Mason's avatar
Jon Mason committed
672 673 674 675

	if (!mw->virt_addr)
		return;

676 677 678 679 680
	ntb_mw_clear_trans(nt->ndev, num_mw);
	dma_free_coherent(&pdev->dev, mw->buff_size,
			  mw->virt_addr, mw->dma_addr);
	mw->xlat_size = 0;
	mw->buff_size = 0;
Jon Mason's avatar
Jon Mason committed
681 682 683
	mw->virt_addr = NULL;
}

684
static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
685
		      resource_size_t size)
686
{
687 688
	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
	struct pci_dev *pdev = nt->ndev->pdev;
689
	size_t xlat_size, buff_size;
690 691
	int rc;

692 693 694
	if (!size)
		return -EINVAL;

695 696
	xlat_size = round_up(size, mw->xlat_align_size);
	buff_size = round_up(size, mw->xlat_align);
697

Jon Mason's avatar
Jon Mason committed
698
	/* No need to re-setup */
699
	if (mw->xlat_size == xlat_size)
Jon Mason's avatar
Jon Mason committed
700 701
		return 0;

702
	if (mw->buff_size)
Jon Mason's avatar
Jon Mason committed
703 704
		ntb_free_mw(nt, num_mw);

705 706 707
	/* Alloc memory for receiving data.  Must be aligned */
	mw->xlat_size = xlat_size;
	mw->buff_size = buff_size;
708

709 710
	mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
					   &mw->dma_addr, GFP_KERNEL);
711
	if (!mw->virt_addr) {
712 713
		mw->xlat_size = 0;
		mw->buff_size = 0;
714
		dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
715
			buff_size);
716 717 718
		return -ENOMEM;
	}

719 720 721 722 723 724
	/*
	 * we must ensure that the memory address allocated is BAR size
	 * aligned in order for the XLAT register to take the value. This
	 * is a requirement of the hardware. It is recommended to setup CMA
	 * for BAR sizes equal or greater than 4MB.
	 */
725 726
	if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
		dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
727 728 729 730 731
			&mw->dma_addr);
		ntb_free_mw(nt, num_mw);
		return -ENOMEM;
	}

732
	/* Notify HW the memory location of the receive buffer */
733 734 735 736 737 738
	rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size);
	if (rc) {
		dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
		ntb_free_mw(nt, num_mw);
		return -EIO;
	}
739 740 741 742

	return 0;
}

743 744 745
static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
{
	qp->link_is_up = false;
746
	qp->active = false;
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763

	qp->tx_index = 0;
	qp->rx_index = 0;
	qp->rx_bytes = 0;
	qp->rx_pkts = 0;
	qp->rx_ring_empty = 0;
	qp->rx_err_no_buf = 0;
	qp->rx_err_oflow = 0;
	qp->rx_err_ver = 0;
	qp->rx_memcpy = 0;
	qp->rx_async = 0;
	qp->tx_bytes = 0;
	qp->tx_pkts = 0;
	qp->tx_ring_full = 0;
	qp->tx_err_no_buf = 0;
	qp->tx_memcpy = 0;
	qp->tx_async = 0;
764 765
	qp->dma_tx_prep_err = 0;
	qp->dma_rx_prep_err = 0;
766 767
}

768
static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
769
{
770 771
	struct ntb_transport_ctx *nt = qp->transport;
	struct pci_dev *pdev = nt->ndev->pdev;
772

773
	dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
774 775 776

	cancel_delayed_work_sync(&qp->link_work);
	ntb_qp_link_down_reset(qp);
777 778 779

	if (qp->event_handler)
		qp->event_handler(qp->cb_data, qp->link_is_up);
780 781 782 783 784 785 786
}

static void ntb_qp_link_cleanup_work(struct work_struct *work)
{
	struct ntb_transport_qp *qp = container_of(work,
						   struct ntb_transport_qp,
						   link_cleanup);
787
	struct ntb_transport_ctx *nt = qp->transport;
788 789

	ntb_qp_link_cleanup(qp);
790

791
	if (nt->link_is_up)
792 793 794 795
		schedule_delayed_work(&qp->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

796 797 798 799 800
static void ntb_qp_link_down(struct ntb_transport_qp *qp)
{
	schedule_work(&qp->link_cleanup);
}

801
static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
802
{
803 804
	struct ntb_transport_qp *qp;
	u64 qp_bitmap_alloc;
805 806
	int i;

807 808
	qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;

809
	/* Pass along the info to any clients */
810 811 812 813 814 815 816
	for (i = 0; i < nt->qp_count; i++)
		if (qp_bitmap_alloc & BIT_ULL(i)) {
			qp = &nt->qp_vec[i];
			ntb_qp_link_cleanup(qp);
			cancel_work_sync(&qp->link_cleanup);
			cancel_delayed_work_sync(&qp->link_work);
		}
817

818
	if (!nt->link_is_up)
819 820 821 822 823 824 825
		cancel_delayed_work_sync(&nt->link_work);

	/* The scratchpad registers keep the values if the remote side
	 * goes down, blast them now to give them a sane value the next
	 * time they are accessed
	 */
	for (i = 0; i < MAX_SPAD; i++)
826
		ntb_spad_write(nt->ndev, i, 0);
827 828
}

829 830
static void ntb_transport_link_cleanup_work(struct work_struct *work)
{
831 832
	struct ntb_transport_ctx *nt =
		container_of(work, struct ntb_transport_ctx, link_cleanup);
833 834 835 836

	ntb_transport_link_cleanup(nt);
}

837
static void ntb_transport_event_callback(void *data)
838
{
839
	struct ntb_transport_ctx *nt = data;
840

841
	if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
842
		schedule_delayed_work(&nt->link_work, 0);
843
	else
844
		schedule_work(&nt->link_cleanup);
845 846 847 848
}

static void ntb_transport_link_work(struct work_struct *work)
{
849 850 851 852 853
	struct ntb_transport_ctx *nt =
		container_of(work, struct ntb_transport_ctx, link_work.work);
	struct ntb_dev *ndev = nt->ndev;
	struct pci_dev *pdev = ndev->pdev;
	resource_size_t size;
854
	u32 val;
855
	int rc = 0, i, spad;
856

Jon Mason's avatar
Jon Mason committed
857
	/* send the local info, in the opposite order of the way we read it */
858 859
	for (i = 0; i < nt->mw_count; i++) {
		size = nt->mw_vec[i].phys_size;
860

861 862
		if (max_mw_size && size > max_mw_size)
			size = max_mw_size;
863

864
		spad = MW0_SZ_HIGH + (i * 2);
865
		ntb_peer_spad_write(ndev, spad, upper_32_bits(size));
866

867
		spad = MW0_SZ_LOW + (i * 2);
868
		ntb_peer_spad_write(ndev, spad, lower_32_bits(size));
869 870
	}

871
	ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
872

873
	ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count);
874

875
	ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
876

877
	/* Query the remote side for its info */
878
	val = ntb_spad_read(ndev, VERSION);
879 880
	dev_dbg(&pdev->dev, "Remote version = %d\n", val);
	if (val != NTB_TRANSPORT_VERSION)
881 882
		goto out;

883
	val = ntb_spad_read(ndev, NUM_QPS);
884
	dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
885
	if (val != nt->qp_count)
886 887
		goto out;

888
	val = ntb_spad_read(ndev, NUM_MWS);
Jon Mason's avatar
Jon Mason committed
889
	dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
890 891
	if (val != nt->mw_count)
		goto out;
892

893
	for (i = 0; i < nt->mw_count; i++) {
Jon Mason's avatar
Jon Mason committed
894
		u64 val64;
895

896
		val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
897
		val64 = (u64)val << 32;
Jon Mason's avatar
Jon Mason committed
898

899
		val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
Jon Mason's avatar
Jon Mason committed
900 901
		val64 |= val;

902
		dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
Jon Mason's avatar
Jon Mason committed
903 904 905 906 907

		rc = ntb_set_mw(nt, i, val64);
		if (rc)
			goto out1;
	}
908

909
	nt->link_is_up = true;
910

911 912
	for (i = 0; i < nt->qp_count; i++) {
		struct ntb_transport_qp *qp = &nt->qp_vec[i];
913 914 915

		ntb_transport_setup_qp_mw(nt, i);

916
		if (qp->client_ready)
917 918 919 920 921
			schedule_delayed_work(&qp->link_work, 0);
	}

	return;

Jon Mason's avatar
Jon Mason committed
922
out1:
923
	for (i = 0; i < nt->mw_count; i++)
Jon Mason's avatar
Jon Mason committed
924
		ntb_free_mw(nt, i);
925 926 927 928 929 930 931

	/* if there's an actual failure, we should just bail */
	if (rc < 0) {
		ntb_link_disable(ndev);
		return;
	}

932
out:
933
	if (ntb_link_is_up(ndev, NULL, NULL) == 1)
934 935 936 937 938 939 940 941 942
		schedule_delayed_work(&nt->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

static void ntb_qp_link_work(struct work_struct *work)
{
	struct ntb_transport_qp *qp = container_of(work,
						   struct ntb_transport_qp,
						   link_work.work);
943 944 945
	struct pci_dev *pdev = qp->ndev->pdev;
	struct ntb_transport_ctx *nt = qp->transport;
	int val;
946

947
	WARN_ON(!nt->link_is_up);
948

949
	val = ntb_spad_read(nt->ndev, QP_LINKS);
950

951
	ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
952 953

	/* query remote spad for qp ready bits */
954
	ntb_peer_spad_read(nt->ndev, QP_LINKS);
955
	dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
956 957

	/* See if the remote side is up */
958
	if (val & BIT(qp->qp_num)) {
959
		dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
960
		qp->link_is_up = true;
961
		qp->active = true;
962

963
		if (qp->event_handler)
964
			qp->event_handler(qp->cb_data, qp->link_is_up);
965

966 967
		if (qp->active)
			tasklet_schedule(&qp->rxc_db_work);
968
	} else if (nt->link_is_up)
969 970 971 972
		schedule_delayed_work(&qp->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

973
static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
Jon Mason's avatar
Jon Mason committed
974
				    unsigned int qp_num)
975 976
{
	struct ntb_transport_qp *qp;
977 978
	phys_addr_t mw_base;
	resource_size_t mw_size;
979
	unsigned int num_qps_mw, tx_size;
980
	unsigned int mw_num, mw_count, qp_count;
981
	u64 qp_offset;
Jon Mason's avatar
Jon Mason committed
982

983 984
	mw_count = nt->mw_count;
	qp_count = nt->qp_count;
985

986 987 988
	mw_num = QP_TO_MW(nt, qp_num);

	qp = &nt->qp_vec[qp_num];
989 990 991
	qp->qp_num = qp_num;
	qp->transport = nt;
	qp->ndev = nt->ndev;
992
	qp->client_ready = false;
993
	qp->event_handler = NULL;
994
	ntb_qp_link_down_reset(qp);
995

996 997
	if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
		num_qps_mw = qp_count / mw_count + 1;
998
	else
999 1000 1001 1002
		num_qps_mw = qp_count / mw_count;

	mw_base = nt->mw_vec[mw_num].phys_addr;
	mw_size = nt->mw_vec[mw_num].phys_size;
1003

1004
	tx_size = (unsigned int)mw_size / num_qps_mw;
1005
	qp_offset = tx_size * (qp_num / mw_count);
1006 1007

	qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
1008 1009 1010
	if (!qp->tx_mw)
		return -EINVAL;

1011
	qp->tx_mw_phys = mw_base + qp_offset;
1012 1013 1014
	if (!qp->tx_mw_phys)
		return -EINVAL;

Jon Mason's avatar
Jon Mason committed
1015
	tx_size -= sizeof(struct ntb_rx_info);
1016
	qp->rx_info = qp->tx_mw + tx_size;
Jon Mason's avatar
Jon Mason committed
1017

1018 1019
	/* Due to housekeeping, there must be atleast 2 buffs */
	qp->tx_max_frame = min(transport_mtu, tx_size / 2);
Jon Mason's avatar
Jon Mason committed
1020
	qp->tx_max_entry = tx_size / qp->tx_max_frame;
1021

1022
	if (nt->debugfs_node_dir) {
1023 1024 1025 1026
		char debugfs_name[4];

		snprintf(debugfs_name, 4, "qp%d", qp_num);
		qp->debugfs_dir = debugfs_create_dir(debugfs_name,
1027
						     nt->debugfs_node_dir);
1028 1029 1030 1031

		qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
							qp->debugfs_dir, qp,
							&ntb_qp_debugfs_stats);
1032 1033 1034
	} else {
		qp->debugfs_dir = NULL;
		qp->debugfs_stats = NULL;
1035 1036 1037
	}

	INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
1038
	INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
1039

1040
	spin_lock_init(&qp->ntb_rx_q_lock);
1041 1042
	spin_lock_init(&qp->ntb_tx_free_q_lock);

1043
	INIT_LIST_HEAD(&qp->rx_post_q);
1044 1045 1046
	INIT_LIST_HEAD(&qp->rx_pend_q);
	INIT_LIST_HEAD(&qp->rx_free_q);
	INIT_LIST_HEAD(&qp->tx_free_q);
1047

1048 1049 1050
	tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
		     (unsigned long)qp);

1051
	return 0;
1052 1053
}

1054
static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1055
{
1056 1057 1058 1059
	struct ntb_transport_ctx *nt;
	struct ntb_transport_mw *mw;
	unsigned int mw_count, qp_count;
	u64 qp_bitmap;
1060
	int node;
1061 1062
	int rc, i;

1063 1064 1065 1066 1067 1068 1069
	mw_count = ntb_mw_count(ndev);
	if (ntb_spad_count(ndev) < (NUM_MWS + 1 + mw_count * 2)) {
		dev_err(&ndev->dev, "Not enough scratch pad registers for %s",
			NTB_TRANSPORT_NAME);
		return -EIO;
	}

1070 1071 1072 1073 1074 1075 1076
	if (ntb_db_is_unsafe(ndev))
		dev_dbg(&ndev->dev,
			"doorbell is unsafe, proceed anyway...\n");
	if (ntb_spad_is_unsafe(ndev))
		dev_dbg(&ndev->dev,
			"scratchpad is unsafe, proceed anyway...\n");

1077 1078 1079
	node = dev_to_node(&ndev->dev);

	nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
1080 1081 1082
	if (!nt)
		return -ENOMEM;

1083 1084 1085 1086
	nt->ndev = ndev;

	nt->mw_count = mw_count;

1087 1088
	nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
				  GFP_KERNEL, node);
1089 1090
	if (!nt->mw_vec) {
		rc = -ENOMEM;
1091 1092 1093
		goto err;
	}

1094 1095 1096 1097 1098 1099 1100 1101
	for (i = 0; i < mw_count; i++) {
		mw = &nt->mw_vec[i];

		rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size,
				      &mw->xlat_align, &mw->xlat_align_size);
		if (rc)
			goto err1;

1102
		mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
1103 1104 1105 1106 1107 1108 1109 1110 1111
		if (!mw->vbase) {
			rc = -ENOMEM;
			goto err1;
		}

		mw->buff_size = 0;
		mw->xlat_size = 0;
		mw->virt_addr = NULL;
		mw->dma_addr = 0;
Jon Mason's avatar
Jon Mason committed
1112 1113
	}

1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
	qp_bitmap = ntb_db_valid_mask(ndev);

	qp_count = ilog2(qp_bitmap);
	if (max_num_clients && max_num_clients < qp_count)
		qp_count = max_num_clients;
	else if (mw_count < qp_count)
		qp_count = mw_count;

	qp_bitmap &= BIT_ULL(qp_count) - 1;

	nt->qp_count = qp_count;
	nt->qp_bitmap = qp_bitmap;
	nt->qp_bitmap_free = qp_bitmap;
1127

1128 1129
	nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
				  GFP_KERNEL, node);
1130
	if (!nt->qp_vec) {
1131
		rc = -ENOMEM;
1132
		goto err1;
1133 1134
	}

1135 1136 1137 1138 1139 1140
	if (nt_debugfs_dir) {
		nt->debugfs_node_dir =
			debugfs_create_dir(pci_name(ndev->pdev),
					   nt_debugfs_dir);
	}

1141
	for (i = 0; i < qp_count; i++) {
1142 1143
		rc = ntb_transport_init_queue(nt, i);
		if (rc)
1144
			goto err2;
1145
	}
1146 1147

	INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
1148
	INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
1149

1150
	rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
1151
	if (rc)
1152
		goto err2;
1153 1154 1155 1156

	INIT_LIST_HEAD(&nt->client_devs);
	rc = ntb_bus_init(nt);
	if (rc)
1157
		goto err3;
1158

1159 1160 1161
	nt->link_is_up = false;
	ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
	ntb_link_event(ndev);
1162 1163 1164

	return 0;

Jon Mason's avatar
Jon Mason committed
1165
err3:
1166
	ntb_clear_ctx(ndev);
Jon Mason's avatar
Jon Mason committed
1167
err2:
1168
	kfree(nt->qp_vec);
1169
err1:
1170 1171 1172 1173
	while (i--) {
		mw = &nt->mw_vec[i];
		iounmap(mw->vbase);
	}
1174
	kfree(nt->mw_vec);
1175 1176 1177 1178 1179
err:
	kfree(nt);
	return rc;
}

1180
static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
1181
{
1182 1183 1184
	struct ntb_transport_ctx *nt = ndev->ctx;
	struct ntb_transport_qp *qp;
	u64 qp_bitmap_alloc;
1185 1186
	int i;

1187
	ntb_transport_link_cleanup(nt);
1188 1189 1190 1191
	cancel_work_sync(&nt->link_cleanup);
	cancel_delayed_work_sync(&nt->link_work);

	qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
1192 1193

	/* verify that all the qp's are freed */
1194 1195 1196 1197 1198
	for (i = 0; i < nt->qp_count; i++) {
		qp = &nt->qp_vec[i];
		if (qp_bitmap_alloc & BIT_ULL(i))
			ntb_transport_free_queue(qp);
		debugfs_remove_recursive(qp->debugfs_dir);
1199
	}
1200

1201 1202
	ntb_link_disable(ndev);
	ntb_clear_ctx(ndev);
1203

1204
	ntb_bus_remove(nt);
1205

1206
	for (i = nt->mw_count; i--; ) {
Jon Mason's avatar
Jon Mason committed
1207
		ntb_free_mw(nt, i);
1208 1209
		iounmap(nt->mw_vec[i].vbase);
	}
1210

1211 1212
	kfree(nt->qp_vec);
	kfree(nt->mw_vec);
1213 1214 1215
	kfree(nt);
}

1216
static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1217
{
1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
	struct ntb_queue_entry *entry;
	void *cb_data;
	unsigned int len;
	unsigned long irqflags;

	spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);

	while (!list_empty(&qp->rx_post_q)) {
		entry = list_first_entry(&qp->rx_post_q,
					 struct ntb_queue_entry, entry);
		if (!(entry->flags & DESC_DONE_FLAG))
			break;

		entry->rx_hdr->flags = 0;
		iowrite32(entry->index, &qp->rx_info->entry);

		cb_data = entry->cb_data;
		len = entry->len;

		list_move_tail(&entry->entry, &qp->rx_free_q);

		spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);

		if (qp->rx_handler && qp->client_ready)
			qp->rx_handler(qp, qp->cb_data, cb_data, len);

		spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
	}
1246

1247 1248
	spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
}
1249

1250 1251 1252
static void ntb_rx_copy_callback(void *data)
{
	struct ntb_queue_entry *entry = data;
1253

1254
	entry->flags |= DESC_DONE_FLAG;
1255

1256
	ntb_complete_rxc(entry->qp);
1257 1258
}

1259 1260 1261 1262 1263 1264 1265
static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void