ntb_transport.c 49.2 KB
Newer Older
1 2 3 4 5 6 7
/*
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 *   redistributing this file, you may do so under either license.
 *
 *   GPL LICENSE SUMMARY
 *
 *   Copyright(c) 2012 Intel Corporation. All rights reserved.
8
 *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9 10 11 12 13 14 15 16
 *
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of version 2 of the GNU General Public License as
 *   published by the Free Software Foundation.
 *
 *   BSD LICENSE
 *
 *   Copyright(c) 2012 Intel Corporation. All rights reserved.
17
 *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
 *
 *   Redistribution and use in source and binary forms, with or without
 *   modification, are permitted provided that the following conditions
 *   are met:
 *
 *     * Redistributions of source code must retain the above copyright
 *       notice, this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copy
 *       notice, this list of conditions and the following disclaimer in
 *       the documentation and/or other materials provided with the
 *       distribution.
 *     * Neither the name of Intel Corporation nor the names of its
 *       contributors may be used to endorse or promote products derived
 *       from this software without specific prior written permission.
 *
 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
45
 * PCIe NTB Transport Linux driver
46 47 48 49 50 51
 *
 * Contact Information:
 * Jon Mason <jon.mason@intel.com>
 */
#include <linux/debugfs.h>
#include <linux/delay.h>
52
#include <linux/dmaengine.h>
53 54 55 56 57 58 59 60
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/types.h>
61
#include <linux/uaccess.h>
62 63
#include "linux/ntb.h"
#include "linux/ntb_transport.h"
64

65 66 67 68 69 70 71 72 73 74 75 76 77
#define NTB_TRANSPORT_VERSION	4
#define NTB_TRANSPORT_VER	"4"
#define NTB_TRANSPORT_NAME	"ntb_transport"
#define NTB_TRANSPORT_DESC	"Software Queue-Pair Transport over NTB"

MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
MODULE_VERSION(NTB_TRANSPORT_VER);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Intel Corporation");

static unsigned long max_mw_size;
module_param(max_mw_size, ulong, 0644);
MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
78

79
static unsigned int transport_mtu = 0x401E;
80 81 82
module_param(transport_mtu, uint, 0644);
MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");

Jon Mason's avatar
Jon Mason committed
83
static unsigned char max_num_clients;
84 85 86
module_param(max_num_clients, byte, 0644);
MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");

87 88 89 90
static unsigned int copy_bytes = 1024;
module_param(copy_bytes, uint, 0644);
MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");

91 92
static struct dentry *nt_debugfs_dir;

93 94 95
struct ntb_queue_entry {
	/* ntb_queue list reference */
	struct list_head entry;
96
	/* pointers to data to be transferred */
97 98 99 100
	void *cb_data;
	void *buf;
	unsigned int len;
	unsigned int flags;
101 102 103 104 105 106 107

	struct ntb_transport_qp *qp;
	union {
		struct ntb_payload_header __iomem *tx_hdr;
		struct ntb_payload_header *rx_hdr;
	};
	unsigned int index;
108 109
};

Jon Mason's avatar
Jon Mason committed
110 111 112 113
struct ntb_rx_info {
	unsigned int entry;
};

114
struct ntb_transport_qp {
115 116
	struct ntb_transport_ctx *transport;
	struct ntb_dev *ndev;
117
	void *cb_data;
118
	struct dma_chan *dma_chan;
119 120

	bool client_ready;
121 122
	bool link_is_up;

123
	u8 qp_num;	/* Only 64 QP's are allowed.  0-63 */
124
	u64 qp_bit;
125

Jon Mason's avatar
Jon Mason committed
126
	struct ntb_rx_info __iomem *rx_info;
Jon Mason's avatar
Jon Mason committed
127 128
	struct ntb_rx_info *remote_rx_info;

Jon Mason's avatar
Jon Mason committed
129 130
	void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
			   void *data, int len);
131 132
	struct list_head tx_free_q;
	spinlock_t ntb_tx_free_q_lock;
Jon Mason's avatar
Jon Mason committed
133
	void __iomem *tx_mw;
134
	dma_addr_t tx_mw_phys;
Jon Mason's avatar
Jon Mason committed
135 136
	unsigned int tx_index;
	unsigned int tx_max_entry;
137
	unsigned int tx_max_frame;
138

Jon Mason's avatar
Jon Mason committed
139 140
	void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
			   void *data, int len);
141 142 143 144
	struct list_head rx_pend_q;
	struct list_head rx_free_q;
	spinlock_t ntb_rx_pend_q_lock;
	spinlock_t ntb_rx_free_q_lock;
Jon Mason's avatar
Jon Mason committed
145 146 147
	void *rx_buff;
	unsigned int rx_index;
	unsigned int rx_max_entry;
148
	unsigned int rx_max_frame;
149
	dma_cookie_t last_cookie;
150
	struct tasklet_struct rxc_db_work;
151

Jon Mason's avatar
Jon Mason committed
152
	void (*event_handler)(void *data, int status);
153
	struct delayed_work link_work;
154
	struct work_struct link_cleanup;
155 156 157 158 159 160 161 162 163 164 165

	struct dentry *debugfs_dir;
	struct dentry *debugfs_stats;

	/* Stats */
	u64 rx_bytes;
	u64 rx_pkts;
	u64 rx_ring_empty;
	u64 rx_err_no_buf;
	u64 rx_err_oflow;
	u64 rx_err_ver;
166 167
	u64 rx_memcpy;
	u64 rx_async;
168 169 170
	u64 tx_bytes;
	u64 tx_pkts;
	u64 tx_ring_full;
171 172 173
	u64 tx_err_no_buf;
	u64 tx_memcpy;
	u64 tx_async;
174 175 176
};

struct ntb_transport_mw {
177 178 179 180 181 182 183
	phys_addr_t phys_addr;
	resource_size_t phys_size;
	resource_size_t xlat_align;
	resource_size_t xlat_align_size;
	void __iomem *vbase;
	size_t xlat_size;
	size_t buff_size;
184 185 186 187 188 189
	void *virt_addr;
	dma_addr_t dma_addr;
};

struct ntb_transport_client_dev {
	struct list_head entry;
190
	struct ntb_transport_ctx *nt;
191 192 193
	struct device dev;
};

194
struct ntb_transport_ctx {
195 196 197
	struct list_head entry;
	struct list_head client_devs;

198 199 200 201 202 203 204 205 206 207
	struct ntb_dev *ndev;

	struct ntb_transport_mw *mw_vec;
	struct ntb_transport_qp *qp_vec;
	unsigned int mw_count;
	unsigned int qp_count;
	u64 qp_bitmap;
	u64 qp_bitmap_free;

	bool link_is_up;
208
	struct delayed_work link_work;
209
	struct work_struct link_cleanup;
210 211 212
};

enum {
213 214
	DESC_DONE_FLAG = BIT(0),
	LINK_DOWN_FLAG = BIT(1),
215 216 217
};

struct ntb_payload_header {
Jon Mason's avatar
Jon Mason committed
218
	unsigned int ver;
219 220 221 222 223 224 225
	unsigned int len;
	unsigned int flags;
};

enum {
	VERSION = 0,
	QP_LINKS,
Jon Mason's avatar
Jon Mason committed
226 227 228 229 230 231
	NUM_QPS,
	NUM_MWS,
	MW0_SZ_HIGH,
	MW0_SZ_LOW,
	MW1_SZ_HIGH,
	MW1_SZ_LOW,
232 233 234
	MAX_SPAD,
};

235 236 237 238 239 240 241
#define dev_client_dev(__dev) \
	container_of((__dev), struct ntb_transport_client_dev, dev)

#define drv_client(__drv) \
	container_of((__drv), struct ntb_transport_client, driver)

#define QP_TO_MW(nt, qp)	((qp) % nt->mw_count)
242 243 244
#define NTB_QP_DEF_NUM_ENTRIES	100
#define NTB_LINK_DOWN_TIMEOUT	10

245 246 247 248 249 250
static void ntb_transport_rxc_db(unsigned long data);
static const struct ntb_ctx_ops ntb_transport_ops;
static struct ntb_client ntb_transport_client;

static int ntb_transport_bus_match(struct device *dev,
				   struct device_driver *drv)
251 252 253 254
{
	return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
}

255
static int ntb_transport_bus_probe(struct device *dev)
256
{
257
	const struct ntb_transport_client *client;
258 259 260
	int rc = -EINVAL;

	get_device(dev);
261 262 263

	client = drv_client(dev->driver);
	rc = client->probe(dev);
264 265 266 267 268 269
	if (rc)
		put_device(dev);

	return rc;
}

270
static int ntb_transport_bus_remove(struct device *dev)
271
{
272
	const struct ntb_transport_client *client;
273

274 275
	client = drv_client(dev->driver);
	client->remove(dev);
276 277 278 279 280 281

	put_device(dev);

	return 0;
}

282 283 284 285 286
static struct bus_type ntb_transport_bus = {
	.name = "ntb_transport",
	.match = ntb_transport_bus_match,
	.probe = ntb_transport_bus_probe,
	.remove = ntb_transport_bus_remove,
287 288 289 290
};

static LIST_HEAD(ntb_transport_list);

291
static int ntb_bus_init(struct ntb_transport_ctx *nt)
292 293 294 295 296
{
	list_add(&nt->entry, &ntb_transport_list);
	return 0;
}

297
static void ntb_bus_remove(struct ntb_transport_ctx *nt)
298 299 300 301 302 303 304 305 306 307 308 309 310
{
	struct ntb_transport_client_dev *client_dev, *cd;

	list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
		dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
			dev_name(&client_dev->dev));
		list_del(&client_dev->entry);
		device_unregister(&client_dev->dev);
	}

	list_del(&nt->entry);
}

311
static void ntb_transport_client_release(struct device *dev)
312 313 314
{
	struct ntb_transport_client_dev *client_dev;

315
	client_dev = dev_client_dev(dev);
316 317 318 319
	kfree(client_dev);
}

/**
320
 * ntb_transport_unregister_client_dev - Unregister NTB client device
321 322 323 324
 * @device_name: Name of NTB client device
 *
 * Unregister an NTB client device with the NTB transport layer
 */
325
void ntb_transport_unregister_client_dev(char *device_name)
326 327
{
	struct ntb_transport_client_dev *client, *cd;
328
	struct ntb_transport_ctx *nt;
329 330 331 332 333 334 335 336 337

	list_for_each_entry(nt, &ntb_transport_list, entry)
		list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
			if (!strncmp(dev_name(&client->dev), device_name,
				     strlen(device_name))) {
				list_del(&client->entry);
				device_unregister(&client->dev);
			}
}
338
EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
339 340

/**
341
 * ntb_transport_register_client_dev - Register NTB client device
342 343 344 345
 * @device_name: Name of NTB client device
 *
 * Register an NTB client device with the NTB transport layer
 */
346
int ntb_transport_register_client_dev(char *device_name)
347 348
{
	struct ntb_transport_client_dev *client_dev;
349
	struct ntb_transport_ctx *nt;
350
	int node;
Jon Mason's avatar
Jon Mason committed
351
	int rc, i = 0;
352

353 354 355
	if (list_empty(&ntb_transport_list))
		return -ENODEV;

356 357 358
	list_for_each_entry(nt, &ntb_transport_list, entry) {
		struct device *dev;

359 360 361 362
		node = dev_to_node(&nt->ndev->dev);

		client_dev = kzalloc_node(sizeof(*client_dev),
					  GFP_KERNEL, node);
363 364 365 366 367 368 369 370
		if (!client_dev) {
			rc = -ENOMEM;
			goto err;
		}

		dev = &client_dev->dev;

		/* setup and register client devices */
Jon Mason's avatar
Jon Mason committed
371
		dev_set_name(dev, "%s%d", device_name, i);
372 373 374
		dev->bus = &ntb_transport_bus;
		dev->release = ntb_transport_client_release;
		dev->parent = &nt->ndev->dev;
375 376 377 378 379 380 381 382

		rc = device_register(dev);
		if (rc) {
			kfree(client_dev);
			goto err;
		}

		list_add_tail(&client_dev->entry, &nt->client_devs);
Jon Mason's avatar
Jon Mason committed
383
		i++;
384 385 386 387 388
	}

	return 0;

err:
389
	ntb_transport_unregister_client_dev(device_name);
390 391 392

	return rc;
}
393
EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
394 395

/**
396
 * ntb_transport_register_client - Register NTB client driver
397 398 399 400 401 402
 * @drv: NTB client driver to be registered
 *
 * Register an NTB client driver with the NTB transport layer
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
403
int ntb_transport_register_client(struct ntb_transport_client *drv)
404
{
405
	drv->driver.bus = &ntb_transport_bus;
406

407 408 409
	if (list_empty(&ntb_transport_list))
		return -ENODEV;

410 411
	return driver_register(&drv->driver);
}
412
EXPORT_SYMBOL_GPL(ntb_transport_register_client);
413 414

/**
415
 * ntb_transport_unregister_client - Unregister NTB client driver
416 417 418 419 420 421
 * @drv: NTB client driver to be unregistered
 *
 * Unregister an NTB client driver with the NTB transport layer
 *
 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
 */
422
void ntb_transport_unregister_client(struct ntb_transport_client *drv)
423 424 425
{
	driver_unregister(&drv->driver);
}
426
EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
427 428 429 430 431

static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
			    loff_t *offp)
{
	struct ntb_transport_qp *qp;
432
	char *buf;
433 434
	ssize_t ret, out_offset, out_count;

435
	out_count = 1000;
436 437 438 439

	buf = kmalloc(out_count, GFP_KERNEL);
	if (!buf)
		return -ENOMEM;
440 441 442 443 444 445 446 447 448

	qp = filp->private_data;
	out_offset = 0;
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "NTB QP stats\n");
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_bytes - \t%llu\n", qp->rx_bytes);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_pkts - \t%llu\n", qp->rx_pkts);
449 450 451 452
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_memcpy - \t%llu\n", qp->rx_memcpy);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_async - \t%llu\n", qp->rx_async);
453 454 455 456 457 458 459 460 461
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_ring_empty - %llu\n", qp->rx_ring_empty);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "rx_err_ver - \t%llu\n", qp->rx_err_ver);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
462
			       "rx_buff - \t%p\n", qp->rx_buff);
463
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
464
			       "rx_index - \t%u\n", qp->rx_index);
465
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
466
			       "rx_max_entry - \t%u\n", qp->rx_max_entry);
467 468 469 470 471

	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_bytes - \t%llu\n", qp->tx_bytes);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_pkts - \t%llu\n", qp->tx_pkts);
472 473 474 475
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_memcpy - \t%llu\n", qp->tx_memcpy);
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_async - \t%llu\n", qp->tx_async);
476 477
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_ring_full - \t%llu\n", qp->tx_ring_full);
478 479
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
			       "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
480
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
481
			       "tx_mw - \t%p\n", qp->tx_mw);
482
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
483
			       "tx_index - \t%u\n", qp->tx_index);
484
	out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason's avatar
Jon Mason committed
485
			       "tx_max_entry - \t%u\n", qp->tx_max_entry);
486 487

	out_offset += snprintf(buf + out_offset, out_count - out_offset,
488 489
			       "\nQP Link %s\n",
			       qp->link_is_up ? "Up" : "Down");
490 491
	if (out_offset > out_count)
		out_offset = out_count;
492 493

	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
494
	kfree(buf);
495 496 497 498 499
	return ret;
}

static const struct file_operations ntb_qp_debugfs_stats = {
	.owner = THIS_MODULE,
Jon Mason's avatar
Jon Mason committed
500
	.open = simple_open,
501 502 503 504 505 506 507 508 509 510 511 512 513 514
	.read = debugfs_read,
};

static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
			 struct list_head *list)
{
	unsigned long flags;

	spin_lock_irqsave(lock, flags);
	list_add_tail(entry, list);
	spin_unlock_irqrestore(lock, flags);
}

static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
Jon Mason's avatar
Jon Mason committed
515
					   struct list_head *list)
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
{
	struct ntb_queue_entry *entry;
	unsigned long flags;

	spin_lock_irqsave(lock, flags);
	if (list_empty(list)) {
		entry = NULL;
		goto out;
	}
	entry = list_first_entry(list, struct ntb_queue_entry, entry);
	list_del(&entry->entry);
out:
	spin_unlock_irqrestore(lock, flags);

	return entry;
}

533 534
static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
				     unsigned int qp_num)
535
{
536 537
	struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
	struct ntb_transport_mw *mw;
538
	unsigned int rx_size, num_qps_mw;
539
	unsigned int mw_num, mw_count, qp_count;
Jon Mason's avatar
Jon Mason committed
540
	unsigned int i;
541

542 543
	mw_count = nt->mw_count;
	qp_count = nt->qp_count;
Jon Mason's avatar
Jon Mason committed
544

545 546 547 548 549
	mw_num = QP_TO_MW(nt, qp_num);
	mw = &nt->mw_vec[mw_num];

	if (!mw->virt_addr)
		return -ENOMEM;
550

551 552
	if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
		num_qps_mw = qp_count / mw_count + 1;
553
	else
554
		num_qps_mw = qp_count / mw_count;
555

556 557
	rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
	qp->rx_buff = mw->virt_addr + rx_size * qp_num / mw_count;
Jon Mason's avatar
Jon Mason committed
558 559
	rx_size -= sizeof(struct ntb_rx_info);

560 561
	qp->remote_rx_info = qp->rx_buff + rx_size;

562 563
	/* Due to housekeeping, there must be atleast 2 buffs */
	qp->rx_max_frame = min(transport_mtu, rx_size / 2);
Jon Mason's avatar
Jon Mason committed
564 565 566
	qp->rx_max_entry = rx_size / qp->rx_max_frame;
	qp->rx_index = 0;

567
	qp->remote_rx_info->entry = qp->rx_max_entry - 1;
568

569
	/* setup the hdr offsets with 0's */
Jon Mason's avatar
Jon Mason committed
570
	for (i = 0; i < qp->rx_max_entry; i++) {
571 572
		void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
				sizeof(struct ntb_payload_header));
573
		memset(offset, 0, sizeof(struct ntb_payload_header));
Jon Mason's avatar
Jon Mason committed
574
	}
575 576 577

	qp->rx_pkts = 0;
	qp->tx_pkts = 0;
Jon Mason's avatar
Jon Mason committed
578
	qp->tx_index = 0;
579 580

	return 0;
581 582
}

583
static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
Jon Mason's avatar
Jon Mason committed
584
{
585 586
	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
	struct pci_dev *pdev = nt->ndev->pdev;
Jon Mason's avatar
Jon Mason committed
587 588 589 590

	if (!mw->virt_addr)
		return;

591 592 593 594 595
	ntb_mw_clear_trans(nt->ndev, num_mw);
	dma_free_coherent(&pdev->dev, mw->buff_size,
			  mw->virt_addr, mw->dma_addr);
	mw->xlat_size = 0;
	mw->buff_size = 0;
Jon Mason's avatar
Jon Mason committed
596 597 598
	mw->virt_addr = NULL;
}

599 600
static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
		      unsigned int size)
601
{
602 603 604 605 606 607 608
	struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
	struct pci_dev *pdev = nt->ndev->pdev;
	unsigned int xlat_size, buff_size;
	int rc;

	xlat_size = round_up(size, mw->xlat_align_size);
	buff_size = round_up(size, mw->xlat_align);
609

Jon Mason's avatar
Jon Mason committed
610
	/* No need to re-setup */
611
	if (mw->xlat_size == xlat_size)
Jon Mason's avatar
Jon Mason committed
612 613
		return 0;

614
	if (mw->buff_size)
Jon Mason's avatar
Jon Mason committed
615 616
		ntb_free_mw(nt, num_mw);

617 618 619
	/* Alloc memory for receiving data.  Must be aligned */
	mw->xlat_size = xlat_size;
	mw->buff_size = buff_size;
620

621 622
	mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
					   &mw->dma_addr, GFP_KERNEL);
623
	if (!mw->virt_addr) {
624 625 626 627
		mw->xlat_size = 0;
		mw->buff_size = 0;
		dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n",
			buff_size);
628 629 630
		return -ENOMEM;
	}

631 632 633 634 635 636
	/*
	 * we must ensure that the memory address allocated is BAR size
	 * aligned in order for the XLAT register to take the value. This
	 * is a requirement of the hardware. It is recommended to setup CMA
	 * for BAR sizes equal or greater than 4MB.
	 */
637 638
	if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
		dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
639 640 641 642 643
			&mw->dma_addr);
		ntb_free_mw(nt, num_mw);
		return -ENOMEM;
	}

644
	/* Notify HW the memory location of the receive buffer */
645 646 647 648 649 650
	rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size);
	if (rc) {
		dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
		ntb_free_mw(nt, num_mw);
		return -EIO;
	}
651 652 653 654

	return 0;
}

655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
{
	qp->link_is_up = false;

	qp->tx_index = 0;
	qp->rx_index = 0;
	qp->rx_bytes = 0;
	qp->rx_pkts = 0;
	qp->rx_ring_empty = 0;
	qp->rx_err_no_buf = 0;
	qp->rx_err_oflow = 0;
	qp->rx_err_ver = 0;
	qp->rx_memcpy = 0;
	qp->rx_async = 0;
	qp->tx_bytes = 0;
	qp->tx_pkts = 0;
	qp->tx_ring_full = 0;
	qp->tx_err_no_buf = 0;
	qp->tx_memcpy = 0;
	qp->tx_async = 0;
}

677
static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
678
{
679 680
	struct ntb_transport_ctx *nt = qp->transport;
	struct pci_dev *pdev = nt->ndev->pdev;
681

682
	dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
683 684 685

	cancel_delayed_work_sync(&qp->link_work);
	ntb_qp_link_down_reset(qp);
686 687 688

	if (qp->event_handler)
		qp->event_handler(qp->cb_data, qp->link_is_up);
689 690 691 692 693 694 695
}

static void ntb_qp_link_cleanup_work(struct work_struct *work)
{
	struct ntb_transport_qp *qp = container_of(work,
						   struct ntb_transport_qp,
						   link_cleanup);
696
	struct ntb_transport_ctx *nt = qp->transport;
697 698

	ntb_qp_link_cleanup(qp);
699

700
	if (nt->link_is_up)
701 702 703 704
		schedule_delayed_work(&qp->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

705 706 707 708 709
static void ntb_qp_link_down(struct ntb_transport_qp *qp)
{
	schedule_work(&qp->link_cleanup);
}

710
static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
711
{
712 713
	struct ntb_transport_qp *qp;
	u64 qp_bitmap_alloc;
714 715
	int i;

716 717
	qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;

718
	/* Pass along the info to any clients */
719 720 721 722 723 724 725
	for (i = 0; i < nt->qp_count; i++)
		if (qp_bitmap_alloc & BIT_ULL(i)) {
			qp = &nt->qp_vec[i];
			ntb_qp_link_cleanup(qp);
			cancel_work_sync(&qp->link_cleanup);
			cancel_delayed_work_sync(&qp->link_work);
		}
726

727
	if (!nt->link_is_up)
728 729 730 731 732 733 734
		cancel_delayed_work_sync(&nt->link_work);

	/* The scratchpad registers keep the values if the remote side
	 * goes down, blast them now to give them a sane value the next
	 * time they are accessed
	 */
	for (i = 0; i < MAX_SPAD; i++)
735
		ntb_spad_write(nt->ndev, i, 0);
736 737
}

738 739
static void ntb_transport_link_cleanup_work(struct work_struct *work)
{
740 741
	struct ntb_transport_ctx *nt =
		container_of(work, struct ntb_transport_ctx, link_cleanup);
742 743 744 745

	ntb_transport_link_cleanup(nt);
}

746
static void ntb_transport_event_callback(void *data)
747
{
748
	struct ntb_transport_ctx *nt = data;
749

750
	if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
751
		schedule_delayed_work(&nt->link_work, 0);
752
	else
753
		schedule_work(&nt->link_cleanup);
754 755 756 757
}

static void ntb_transport_link_work(struct work_struct *work)
{
758 759 760 761 762
	struct ntb_transport_ctx *nt =
		container_of(work, struct ntb_transport_ctx, link_work.work);
	struct ntb_dev *ndev = nt->ndev;
	struct pci_dev *pdev = ndev->pdev;
	resource_size_t size;
763
	u32 val;
764
	int rc, i, spad;
765

Jon Mason's avatar
Jon Mason committed
766
	/* send the local info, in the opposite order of the way we read it */
767 768
	for (i = 0; i < nt->mw_count; i++) {
		size = nt->mw_vec[i].phys_size;
769

770 771
		if (max_mw_size && size > max_mw_size)
			size = max_mw_size;
772

773 774
		spad = MW0_SZ_HIGH + (i * 2);
		ntb_peer_spad_write(ndev, spad, (u32)(size >> 32));
775

776 777
		spad = MW0_SZ_LOW + (i * 2);
		ntb_peer_spad_write(ndev, spad, (u32)size);
778 779
	}

780
	ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
781

782
	ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count);
783

784
	ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
785

786
	/* Query the remote side for its info */
787
	val = ntb_spad_read(ndev, VERSION);
788 789
	dev_dbg(&pdev->dev, "Remote version = %d\n", val);
	if (val != NTB_TRANSPORT_VERSION)
790 791
		goto out;

792
	val = ntb_spad_read(ndev, NUM_QPS);
793
	dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
794
	if (val != nt->qp_count)
795 796
		goto out;

797
	val = ntb_spad_read(ndev, NUM_MWS);
Jon Mason's avatar
Jon Mason committed
798
	dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
799 800
	if (val != nt->mw_count)
		goto out;
801

802
	for (i = 0; i < nt->mw_count; i++) {
Jon Mason's avatar
Jon Mason committed
803
		u64 val64;
804

805
		val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
806
		val64 = (u64)val << 32;
Jon Mason's avatar
Jon Mason committed
807

808
		val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
Jon Mason's avatar
Jon Mason committed
809 810
		val64 |= val;

811
		dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
Jon Mason's avatar
Jon Mason committed
812 813 814 815 816

		rc = ntb_set_mw(nt, i, val64);
		if (rc)
			goto out1;
	}
817

818
	nt->link_is_up = true;
819

820 821
	for (i = 0; i < nt->qp_count; i++) {
		struct ntb_transport_qp *qp = &nt->qp_vec[i];
822 823 824

		ntb_transport_setup_qp_mw(nt, i);

825
		if (qp->client_ready)
826 827 828 829 830
			schedule_delayed_work(&qp->link_work, 0);
	}

	return;

Jon Mason's avatar
Jon Mason committed
831
out1:
832
	for (i = 0; i < nt->mw_count; i++)
Jon Mason's avatar
Jon Mason committed
833
		ntb_free_mw(nt, i);
834
out:
835
	if (ntb_link_is_up(ndev, NULL, NULL) == 1)
836 837 838 839 840 841 842 843 844
		schedule_delayed_work(&nt->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

static void ntb_qp_link_work(struct work_struct *work)
{
	struct ntb_transport_qp *qp = container_of(work,
						   struct ntb_transport_qp,
						   link_work.work);
845 846 847
	struct pci_dev *pdev = qp->ndev->pdev;
	struct ntb_transport_ctx *nt = qp->transport;
	int val;
848

849
	WARN_ON(!nt->link_is_up);
850

851
	val = ntb_spad_read(nt->ndev, QP_LINKS);
852

853
	ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
854 855

	/* query remote spad for qp ready bits */
856
	ntb_peer_spad_read(nt->ndev, QP_LINKS);
Allen Hubbe's avatar
Allen Hubbe committed
857
	dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
858 859

	/* See if the remote side is up */
860
	if (val & BIT(qp->qp_num)) {
861
		dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
862 863
		qp->link_is_up = true;

864
		if (qp->event_handler)
865 866
			qp->event_handler(qp->cb_data, qp->link_is_up);
	} else if (nt->link_is_up)
867 868 869 870
		schedule_delayed_work(&qp->link_work,
				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
}

871
static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
Jon Mason's avatar
Jon Mason committed
872
				    unsigned int qp_num)
873 874
{
	struct ntb_transport_qp *qp;
875 876 877
	struct ntb_transport_mw *mw;
	phys_addr_t mw_base;
	resource_size_t mw_size;
878
	unsigned int num_qps_mw, tx_size;
879
	unsigned int mw_num, mw_count, qp_count;
880
	u64 qp_offset;
Jon Mason's avatar
Jon Mason committed
881

882 883
	mw_count = nt->mw_count;
	qp_count = nt->qp_count;
884

885 886 887 888
	mw_num = QP_TO_MW(nt, qp_num);
	mw = &nt->mw_vec[mw_num];

	qp = &nt->qp_vec[qp_num];
889 890 891
	qp->qp_num = qp_num;
	qp->transport = nt;
	qp->ndev = nt->ndev;
892
	qp->client_ready = false;
893
	qp->event_handler = NULL;
894
	ntb_qp_link_down_reset(qp);
895

896 897
	if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
		num_qps_mw = qp_count / mw_count + 1;
898
	else
899 900 901 902
		num_qps_mw = qp_count / mw_count;

	mw_base = nt->mw_vec[mw_num].phys_addr;
	mw_size = nt->mw_vec[mw_num].phys_size;
903

904 905 906 907
	tx_size = (unsigned int)mw_size / num_qps_mw;
	qp_offset = tx_size * qp_num / mw_count;

	qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
908 909 910
	if (!qp->tx_mw)
		return -EINVAL;

911
	qp->tx_mw_phys = mw_base + qp_offset;
912 913 914
	if (!qp->tx_mw_phys)
		return -EINVAL;

Jon Mason's avatar
Jon Mason committed
915
	tx_size -= sizeof(struct ntb_rx_info);
916
	qp->rx_info = qp->tx_mw + tx_size;
Jon Mason's avatar
Jon Mason committed