core-transaction.c 35.4 KB
Newer Older
1 2
/*
 * Core IEEE1394 transaction logic
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
 *
 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

Stefan Richter's avatar
Stefan Richter committed
21
#include <linux/bug.h>
22
#include <linux/completion.h>
Stefan Richter's avatar
Stefan Richter committed
23 24
#include <linux/device.h>
#include <linux/errno.h>
25
#include <linux/firewire.h>
Stefan Richter's avatar
Stefan Richter committed
26 27 28
#include <linux/firewire-constants.h>
#include <linux/fs.h>
#include <linux/init.h>
29
#include <linux/idr.h>
Stefan Richter's avatar
Stefan Richter committed
30
#include <linux/jiffies.h>
31 32
#include <linux/kernel.h>
#include <linux/list.h>
Stefan Richter's avatar
Stefan Richter committed
33
#include <linux/module.h>
34
#include <linux/rculist.h>
Stefan Richter's avatar
Stefan Richter committed
35 36 37 38 39
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/types.h>
40
#include <linux/workqueue.h>
Stefan Richter's avatar
Stefan Richter committed
41 42

#include <asm/byteorder.h>
43

44
#include "core.h"
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
#define HEADER_PRI(pri)			((pri) << 0)
#define HEADER_TCODE(tcode)		((tcode) << 4)
#define HEADER_RETRY(retry)		((retry) << 8)
#define HEADER_TLABEL(tlabel)		((tlabel) << 10)
#define HEADER_DESTINATION(destination)	((destination) << 16)
#define HEADER_SOURCE(source)		((source) << 16)
#define HEADER_RCODE(rcode)		((rcode) << 12)
#define HEADER_OFFSET_HIGH(offset_high)	((offset_high) << 0)
#define HEADER_DATA_LENGTH(length)	((length) << 16)
#define HEADER_EXTENDED_TCODE(tcode)	((tcode) << 0)

#define HEADER_GET_TCODE(q)		(((q) >> 4) & 0x0f)
#define HEADER_GET_TLABEL(q)		(((q) >> 10) & 0x3f)
#define HEADER_GET_RCODE(q)		(((q) >> 12) & 0x0f)
#define HEADER_GET_DESTINATION(q)	(((q) >> 16) & 0xffff)
#define HEADER_GET_SOURCE(q)		(((q) >> 16) & 0xffff)
#define HEADER_GET_OFFSET_HIGH(q)	(((q) >> 0) & 0xffff)
#define HEADER_GET_DATA_LENGTH(q)	(((q) >> 16) & 0xffff)
#define HEADER_GET_EXTENDED_TCODE(q)	(((q) >> 0) & 0xffff)

66 67 68
#define HEADER_DESTINATION_IS_BROADCAST(q) \
	(((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))

69 70 71 72
#define PHY_PACKET_CONFIG	0x0
#define PHY_PACKET_LINK_ON	0x1
#define PHY_PACKET_SELF_ID	0x2

73 74 75
#define PHY_CONFIG_GAP_COUNT(gap_count)	(((gap_count) << 16) | (1 << 22))
#define PHY_CONFIG_ROOT_ID(node_id)	((((node_id) & 0x3f) << 24) | (1 << 23))
#define PHY_IDENTIFIER(id)		((id) << 30)
76

77 78 79 80 81 82 83 84 85
/* returns 0 if the split timeout handler is already running */
static int try_cancel_split_timeout(struct fw_transaction *t)
{
	if (t->is_split_transaction)
		return del_timer(&t->split_timeout_timer);
	else
		return 1;
}

86
static int close_transaction(struct fw_transaction *transaction,
87
			     struct fw_card *card, int rcode)
88
{
89
	struct fw_transaction *t;
90 91 92
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);
93 94
	list_for_each_entry(t, &card->transaction_list, link) {
		if (t == transaction) {
95
			if (!try_cancel_split_timeout(t)) {
96 97 98
				spin_unlock_irqrestore(&card->lock, flags);
				goto timed_out;
			}
99
			list_del_init(&t->link);
100
			card->tlabel_mask &= ~(1ULL << t->tlabel);
101 102 103
			break;
		}
	}
104 105
	spin_unlock_irqrestore(&card->lock, flags);

106
	if (&t->link != &card->transaction_list) {
107
		t->callback(card, rcode, NULL, 0, t->callback_data);
108 109 110
		return 0;
	}

111
 timed_out:
112
	return -ENOENT;
113 114
}

115 116 117 118
/*
 * Only valid for transactions that are potentially pending (ie have
 * been sent).
 */
119 120
int fw_cancel_transaction(struct fw_card *card,
			  struct fw_transaction *transaction)
121
{
122 123
	/*
	 * Cancel the packet transmission if it's still queued.  That
124
	 * will call the packet transmission callback which cancels
125 126
	 * the transaction.
	 */
127 128 129 130

	if (card->driver->cancel_packet(card, &transaction->packet) == 0)
		return 0;

131 132 133 134
	/*
	 * If the request packet has already been sent, we need to see
	 * if the transaction is still pending and remove it in that case.
	 */
135

136
	return close_transaction(transaction, card, RCODE_CANCELLED);
137 138 139
}
EXPORT_SYMBOL(fw_cancel_transaction);

140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
static void split_transaction_timeout_callback(unsigned long data)
{
	struct fw_transaction *t = (struct fw_transaction *)data;
	struct fw_card *card = t->card;
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);
	if (list_empty(&t->link)) {
		spin_unlock_irqrestore(&card->lock, flags);
		return;
	}
	list_del(&t->link);
	card->tlabel_mask &= ~(1ULL << t->tlabel);
	spin_unlock_irqrestore(&card->lock, flags);

	t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
}

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
static void start_split_transaction_timeout(struct fw_transaction *t,
					    struct fw_card *card)
{
	unsigned long flags;

	spin_lock_irqsave(&card->lock, flags);

	if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
		spin_unlock_irqrestore(&card->lock, flags);
		return;
	}

	t->is_split_transaction = true;
	mod_timer(&t->split_timeout_timer,
		  jiffies + card->split_timeout_jiffies);

	spin_unlock_irqrestore(&card->lock, flags);
}

177 178
static void transmit_complete_callback(struct fw_packet *packet,
				       struct fw_card *card, int status)
179 180 181 182 183 184
{
	struct fw_transaction *t =
	    container_of(packet, struct fw_transaction, packet);

	switch (status) {
	case ACK_COMPLETE:
185
		close_transaction(t, card, RCODE_COMPLETE);
186 187
		break;
	case ACK_PENDING:
188
		start_split_transaction_timeout(t, card);
189 190 191 192
		break;
	case ACK_BUSY_X:
	case ACK_BUSY_A:
	case ACK_BUSY_B:
193
		close_transaction(t, card, RCODE_BUSY);
194 195
		break;
	case ACK_DATA_ERROR:
196
		close_transaction(t, card, RCODE_DATA_ERROR);
197
		break;
198
	case ACK_TYPE_ERROR:
199
		close_transaction(t, card, RCODE_TYPE_ERROR);
200 201
		break;
	default:
202 203 204 205
		/*
		 * In this case the ack is really a juju specific
		 * rcode, so just forward that to the callback.
		 */
206
		close_transaction(t, card, status);
207 208 209 210
		break;
	}
}

211
static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
212
		int destination_id, int source_id, int generation, int speed,
213
		unsigned long long offset, void *payload, size_t length)
214 215 216
{
	int ext_tcode;

217 218 219 220 221 222 223 224 225 226 227 228
	if (tcode == TCODE_STREAM_DATA) {
		packet->header[0] =
			HEADER_DATA_LENGTH(length) |
			destination_id |
			HEADER_TCODE(TCODE_STREAM_DATA);
		packet->header_length = 4;
		packet->payload = payload;
		packet->payload_length = length;

		goto common;
	}

229
	if (tcode > 0x10) {
230
		ext_tcode = tcode & ~0x10;
231 232 233 234 235
		tcode = TCODE_LOCK_REQUEST;
	} else
		ext_tcode = 0;

	packet->header[0] =
236 237 238
		HEADER_RETRY(RETRY_X) |
		HEADER_TLABEL(tlabel) |
		HEADER_TCODE(tcode) |
239
		HEADER_DESTINATION(destination_id);
240
	packet->header[1] =
241
		HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
242 243 244 245 246 247 248 249 250 251 252 253 254
	packet->header[2] =
		offset;

	switch (tcode) {
	case TCODE_WRITE_QUADLET_REQUEST:
		packet->header[3] = *(u32 *)payload;
		packet->header_length = 16;
		packet->payload_length = 0;
		break;

	case TCODE_LOCK_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
		packet->header[3] =
255 256
			HEADER_DATA_LENGTH(length) |
			HEADER_EXTENDED_TCODE(ext_tcode);
257 258 259 260 261 262 263 264 265 266 267 268
		packet->header_length = 16;
		packet->payload = payload;
		packet->payload_length = length;
		break;

	case TCODE_READ_QUADLET_REQUEST:
		packet->header_length = 12;
		packet->payload_length = 0;
		break;

	case TCODE_READ_BLOCK_REQUEST:
		packet->header[3] =
269 270
			HEADER_DATA_LENGTH(length) |
			HEADER_EXTENDED_TCODE(ext_tcode);
271 272 273
		packet->header_length = 16;
		packet->payload_length = 0;
		break;
274 275

	default:
276
		WARN(1, "wrong tcode %d\n", tcode);
277
	}
278
 common:
279 280
	packet->speed = speed;
	packet->generation = generation;
281
	packet->ack = 0;
282
	packet->payload_mapped = false;
283 284
}

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
static int allocate_tlabel(struct fw_card *card)
{
	int tlabel;

	tlabel = card->current_tlabel;
	while (card->tlabel_mask & (1ULL << tlabel)) {
		tlabel = (tlabel + 1) & 0x3f;
		if (tlabel == card->current_tlabel)
			return -EBUSY;
	}

	card->current_tlabel = (tlabel + 1) & 0x3f;
	card->tlabel_mask |= 1ULL << tlabel;

	return tlabel;
}

302
/**
303 304 305 306 307 308 309 310 311 312 313 314
 * fw_send_request() - submit a request packet for transmission
 * @card:		interface to send the request at
 * @t:			transaction instance to which the request belongs
 * @tcode:		transaction code
 * @destination_id:	destination node ID, consisting of bus_ID and phy_ID
 * @generation:		bus generation in which request and response are valid
 * @speed:		transmission speed
 * @offset:		48bit wide offset into destination's address space
 * @payload:		data payload for the request subaction
 * @length:		length of the payload, in bytes
 * @callback:		function to be called when the transaction is completed
 * @callback_data:	data to be passed to the transaction completion callback
315
 *
316 317 318
 * Submit a request packet into the asynchronous request transmission queue.
 * Can be called from atomic context.  If you prefer a blocking API, use
 * fw_run_transaction() in a context that can sleep.
319
 *
320 321
 * In case of lock requests, specify one of the firewire-core specific %TCODE_
 * constants instead of %TCODE_LOCK_REQUEST in @tcode.
322
 *
323 324
 * Make sure that the value in @destination_id is not older than the one in
 * @generation.  Otherwise the request is in danger to be sent to a wrong node.
325
 *
326
 * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller
327
 * needs to synthesize @destination_id with fw_stream_packet_destination_id().
328 329 330
 * It will contain tag, channel, and sy data instead of a node ID then.
 *
 * The payload buffer at @data is going to be DMA-mapped except in case of
331 332
 * @length <= 8 or of local (loopback) requests.  Hence make sure that the
 * buffer complies with the restrictions of the streaming DMA mapping API.
333 334 335 336 337 338 339
 * @payload must not be freed before the @callback is called.
 *
 * In case of request types without payload, @data is NULL and @length is 0.
 *
 * After the transaction is completed successfully or unsuccessfully, the
 * @callback will be called.  Among its parameters is the response code which
 * is either one of the rcodes per IEEE 1394 or, in case of internal errors,
340 341 342 343
 * the firewire-core specific %RCODE_SEND_ERROR.  The other firewire-core
 * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION,
 * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request
 * generation, or missing ACK respectively.
344 345 346 347 348
 *
 * Note some timing corner cases:  fw_send_request() may complete much earlier
 * than when the request packet actually hits the wire.  On the other hand,
 * transaction completion and hence execution of @callback may happen even
 * before fw_send_request() returns.
349
 */
350 351 352 353
void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
		     int destination_id, int generation, int speed,
		     unsigned long long offset, void *payload, size_t length,
		     fw_transaction_callback_t callback, void *callback_data)
354 355
{
	unsigned long flags;
356
	int tlabel;
357

358 359 360 361
	/*
	 * Allocate tlabel from the bitmap and put the transaction on
	 * the list while holding the card spinlock.
	 */
362 363 364

	spin_lock_irqsave(&card->lock, flags);

365 366
	tlabel = allocate_tlabel(card);
	if (tlabel < 0) {
367 368 369 370 371
		spin_unlock_irqrestore(&card->lock, flags);
		callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
		return;
	}

372
	t->node_id = destination_id;
373
	t->tlabel = tlabel;
374
	t->card = card;
375
	t->is_split_transaction = false;
376 377
	setup_timer(&t->split_timeout_timer,
		    split_transaction_timeout_callback, (unsigned long)t);
378 379 380
	t->callback = callback;
	t->callback_data = callback_data;

381 382 383
	fw_fill_request(&t->packet, tcode, t->tlabel,
			destination_id, card->node_id, generation,
			speed, offset, payload, length);
384 385
	t->packet.callback = transmit_complete_callback;

386 387 388 389
	list_add_tail(&t->link, &card->transaction_list);

	spin_unlock_irqrestore(&card->lock, flags);

390 391 392 393
	card->driver->send_request(card, &t->packet);
}
EXPORT_SYMBOL(fw_send_request);

394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
struct transaction_callback_data {
	struct completion done;
	void *payload;
	int rcode;
};

static void transaction_callback(struct fw_card *card, int rcode,
				 void *payload, size_t length, void *data)
{
	struct transaction_callback_data *d = data;

	if (rcode == RCODE_COMPLETE)
		memcpy(d->payload, payload, length);
	d->rcode = rcode;
	complete(&d->done);
}

/**
412
 * fw_run_transaction() - send request and sleep until transaction is completed
413
 *
414 415
 * Returns the RCODE.  See fw_send_request() for parameter documentation.
 * Unlike fw_send_request(), @data points to the payload of the request or/and
416 417
 * to the payload of the response.  DMA mapping restrictions apply to outbound
 * request payloads of >= 8 bytes but not to inbound response payloads.
418 419
 */
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
420
		       int generation, int speed, unsigned long long offset,
421
		       void *payload, size_t length)
422 423 424 425
{
	struct transaction_callback_data d;
	struct fw_transaction t;

426
	init_timer_on_stack(&t.split_timeout_timer);
427
	init_completion(&d.done);
428
	d.payload = payload;
429
	fw_send_request(card, &t, tcode, destination_id, generation, speed,
430
			offset, payload, length, transaction_callback, &d);
431
	wait_for_completion(&d.done);
432
	destroy_timer_on_stack(&t.split_timeout_timer);
433 434 435 436 437

	return d.rcode;
}
EXPORT_SYMBOL(fw_run_transaction);

438 439
static DEFINE_MUTEX(phy_config_mutex);
static DECLARE_COMPLETION(phy_config_done);
440 441 442

static void transmit_phy_packet_callback(struct fw_packet *packet,
					 struct fw_card *card, int status)
443
{
444
	complete(&phy_config_done);
445 446
}

447
static struct fw_packet phy_config_packet = {
448 449
	.header_length	= 12,
	.header[0]	= TCODE_LINK_INTERNAL << 4,
450 451 452 453 454
	.payload_length	= 0,
	.speed		= SCODE_100,
	.callback	= transmit_phy_packet_callback,
};

455 456
void fw_send_phy_config(struct fw_card *card,
			int node_id, int generation, int gap_count)
457
{
458
	long timeout = DIV_ROUND_UP(HZ, 10);
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
	u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG);

	if (node_id != FW_PHY_CONFIG_NO_NODE_ID)
		data |= PHY_CONFIG_ROOT_ID(node_id);

	if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) {
		gap_count = card->driver->read_phy_reg(card, 1);
		if (gap_count < 0)
			return;

		gap_count &= 63;
		if (gap_count == 63)
			return;
	}
	data |= PHY_CONFIG_GAP_COUNT(gap_count);
474

475 476
	mutex_lock(&phy_config_mutex);

477 478
	phy_config_packet.header[1] = data;
	phy_config_packet.header[2] = ~data;
479
	phy_config_packet.generation = generation;
480
	reinit_completion(&phy_config_done);
481 482 483

	card->driver->send_request(card, &phy_config_packet);
	wait_for_completion_timeout(&phy_config_done, timeout);
484

485
	mutex_unlock(&phy_config_mutex);
486 487
}

488 489
static struct fw_address_handler *lookup_overlapping_address_handler(
	struct list_head *list, unsigned long long offset, size_t length)
490 491 492
{
	struct fw_address_handler *handler;

493
	list_for_each_entry_rcu(handler, list, link) {
494 495 496 497 498 499 500 501
		if (handler->offset < offset + length &&
		    offset < handler->offset + handler->length)
			return handler;
	}

	return NULL;
}

502 503 504 505 506 507 508
static bool is_enclosing_handler(struct fw_address_handler *handler,
				 unsigned long long offset, size_t length)
{
	return handler->offset <= offset &&
		offset + length <= handler->offset + handler->length;
}

509 510
static struct fw_address_handler *lookup_enclosing_address_handler(
	struct list_head *list, unsigned long long offset, size_t length)
511 512 513
{
	struct fw_address_handler *handler;

514
	list_for_each_entry_rcu(handler, list, link) {
515
		if (is_enclosing_handler(handler, offset, length))
516 517 518 519 520 521
			return handler;
	}

	return NULL;
}

522
static DEFINE_SPINLOCK(address_handler_list_lock);
523 524
static LIST_HEAD(address_handler_list);

525
const struct fw_address_region fw_high_memory_region =
526
	{ .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, };
Adrian Bunk's avatar
Adrian Bunk committed
527 528
EXPORT_SYMBOL(fw_high_memory_region);

529
static const struct fw_address_region low_memory_region =
530
	{ .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, };
531 532

#if 0
533
const struct fw_address_region fw_private_region =
534
	{ .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL,  };
535
const struct fw_address_region fw_csr_region =
536 537
	{ .start = CSR_REGISTER_BASE,
	  .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END,  };
538
const struct fw_address_region fw_unit_space_region =
539
	{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
Adrian Bunk's avatar
Adrian Bunk committed
540
#endif  /*  0  */
541

542 543 544 545 546 547
static bool is_in_fcp_region(u64 offset, size_t length)
{
	return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
		offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END);
}

548
/**
549 550 551
 * fw_core_add_address_handler() - register for incoming requests
 * @handler:	callback
 * @region:	region in the IEEE 1212 node space address range
552 553 554 555 556 557
 *
 * region->start, ->end, and handler->length have to be quadlet-aligned.
 *
 * When a request is received that falls within the specified address range,
 * the specified callback is invoked.  The parameters passed to the callback
 * give the details of the particular request.
558
 *
559
 * To be called in process context.
560
 * Return value:  0 on success, non-zero otherwise.
561
 *
562 563
 * The start offset of the handler's address region is determined by
 * fw_core_add_address_handler() and is returned in handler->offset.
564 565
 *
 * Address allocations are exclusive, except for the FCP registers.
566
 */
567 568
int fw_core_add_address_handler(struct fw_address_handler *handler,
				const struct fw_address_region *region)
569 570 571 572
{
	struct fw_address_handler *other;
	int ret = -EBUSY;

573 574
	if (region->start & 0xffff000000000003ULL ||
	    region->start >= region->end ||
575
	    region->end   > 0x0001000000000000ULL ||
576 577 578 579
	    handler->length & 3 ||
	    handler->length == 0)
		return -EINVAL;

580
	spin_lock(&address_handler_list_lock);
581

582
	handler->offset = region->start;
583
	while (handler->offset + handler->length <= region->end) {
584 585 586 587 588 589
		if (is_in_fcp_region(handler->offset, handler->length))
			other = NULL;
		else
			other = lookup_overlapping_address_handler
					(&address_handler_list,
					 handler->offset, handler->length);
590
		if (other != NULL) {
591
			handler->offset += other->length;
592
		} else {
593
			list_add_tail_rcu(&handler->link, &address_handler_list);
594 595 596 597 598
			ret = 0;
			break;
		}
	}

599
	spin_unlock(&address_handler_list_lock);
600 601 602 603 604 605

	return ret;
}
EXPORT_SYMBOL(fw_core_add_address_handler);

/**
606
 * fw_core_remove_address_handler() - unregister an address handler
607
 *
608 609
 * To be called in process context.
 *
610 611
 * When fw_core_remove_address_handler() returns, @handler->callback() is
 * guaranteed to not run on any CPU anymore.
612 613 614
 */
void fw_core_remove_address_handler(struct fw_address_handler *handler)
{
615
	spin_lock(&address_handler_list_lock);
616
	list_del_rcu(&handler->link);
617
	spin_unlock(&address_handler_list_lock);
618
	synchronize_rcu();
619 620 621 622 623
}
EXPORT_SYMBOL(fw_core_remove_address_handler);

struct fw_request {
	struct fw_packet response;
624
	u32 request_header[4];
625 626 627 628 629
	int ack;
	u32 length;
	u32 data[0];
};

630 631
static void free_response_callback(struct fw_packet *packet,
				   struct fw_card *card, int status)
632 633 634 635 636 637 638
{
	struct fw_request *request;

	request = container_of(packet, struct fw_request, response);
	kfree(request);
}

639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
int fw_get_response_length(struct fw_request *r)
{
	int tcode, ext_tcode, data_length;

	tcode = HEADER_GET_TCODE(r->request_header[0]);

	switch (tcode) {
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
		return 0;

	case TCODE_READ_QUADLET_REQUEST:
		return 4;

	case TCODE_READ_BLOCK_REQUEST:
		data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
		return data_length;

	case TCODE_LOCK_REQUEST:
		ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]);
		data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
		switch (ext_tcode) {
		case EXTCODE_FETCH_ADD:
		case EXTCODE_LITTLE_ADD:
			return data_length;
		default:
			return data_length / 2;
		}

	default:
669
		WARN(1, "wrong tcode %d\n", tcode);
670 671 672 673
		return 0;
	}
}

674 675
void fw_fill_response(struct fw_packet *response, u32 *request_header,
		      int rcode, void *payload, size_t length)
676 677 678
{
	int tcode, tlabel, extended_tcode, source, destination;

679 680 681 682 683
	tcode          = HEADER_GET_TCODE(request_header[0]);
	tlabel         = HEADER_GET_TLABEL(request_header[0]);
	source         = HEADER_GET_DESTINATION(request_header[0]);
	destination    = HEADER_GET_SOURCE(request_header[1]);
	extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
684 685

	response->header[0] =
686 687 688
		HEADER_RETRY(RETRY_1) |
		HEADER_TLABEL(tlabel) |
		HEADER_DESTINATION(destination);
689
	response->header[1] =
690 691
		HEADER_SOURCE(source) |
		HEADER_RCODE(rcode);
692 693 694 695 696
	response->header[2] = 0;

	switch (tcode) {
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
697
		response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
698 699 700 701 702 703
		response->header_length = 12;
		response->payload_length = 0;
		break;

	case TCODE_READ_QUADLET_REQUEST:
		response->header[0] |=
704
			HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
705 706 707 708
		if (payload != NULL)
			response->header[3] = *(u32 *)payload;
		else
			response->header[3] = 0;
709 710 711 712 713 714
		response->header_length = 16;
		response->payload_length = 0;
		break;

	case TCODE_READ_BLOCK_REQUEST:
	case TCODE_LOCK_REQUEST:
715
		response->header[0] |= HEADER_TCODE(tcode + 2);
716
		response->header[3] =
717 718
			HEADER_DATA_LENGTH(length) |
			HEADER_EXTENDED_TCODE(extended_tcode);
719
		response->header_length = 16;
720 721
		response->payload = payload;
		response->payload_length = length;
722 723 724
		break;

	default:
725
		WARN(1, "wrong tcode %d\n", tcode);
726
	}
727

728
	response->payload_mapped = false;
729
}
730
EXPORT_SYMBOL(fw_fill_response);
731

732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
static u32 compute_split_timeout_timestamp(struct fw_card *card,
					   u32 request_timestamp)
{
	unsigned int cycles;
	u32 timestamp;

	cycles = card->split_timeout_cycles;
	cycles += request_timestamp & 0x1fff;

	timestamp = request_timestamp & ~0x1fff;
	timestamp += (cycles / 8000) << 13;
	timestamp |= cycles % 8000;

	return timestamp;
}

static struct fw_request *allocate_request(struct fw_card *card,
					   struct fw_packet *p)
750 751 752
{
	struct fw_request *request;
	u32 *data, length;
753
	int request_tcode;
754

755
	request_tcode = HEADER_GET_TCODE(p->header[0]);
756 757
	switch (request_tcode) {
	case TCODE_WRITE_QUADLET_REQUEST:
758
		data = &p->header[3];
759 760 761 762 763
		length = 4;
		break;

	case TCODE_WRITE_BLOCK_REQUEST:
	case TCODE_LOCK_REQUEST:
764
		data = p->payload;
765
		length = HEADER_GET_DATA_LENGTH(p->header[3]);
766 767 768 769 770 771 772 773 774
		break;

	case TCODE_READ_QUADLET_REQUEST:
		data = NULL;
		length = 4;
		break;

	case TCODE_READ_BLOCK_REQUEST:
		data = NULL;
775
		length = HEADER_GET_DATA_LENGTH(p->header[3]);
776 777 778
		break;

	default:
779
		fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n",
780
			 p->header[0], p->header[1], p->header[2]);
781 782 783
		return NULL;
	}

784
	request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
785 786 787
	if (request == NULL)
		return NULL;

788
	request->response.speed = p->speed;
789 790
	request->response.timestamp =
			compute_split_timeout_timestamp(card, p->timestamp);
791
	request->response.generation = p->generation;
792
	request->response.ack = 0;
793
	request->response.callback = free_response_callback;
794
	request->ack = p->ack;
795
	request->length = length;
796
	if (data)
797
		memcpy(request->data, data, length);
798

799
	memcpy(request->request_header, p->header, sizeof(p->header));
800 801 802 803

	return request;
}

804 805
void fw_send_response(struct fw_card *card,
		      struct fw_request *request, int rcode)
806
{
807 808 809
	if (WARN_ONCE(!request, "invalid for FCP address handlers"))
		return;

810 811 812
	/* unified transaction or broadcast transaction: don't respond */
	if (request->ack != ACK_PENDING ||
	    HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
813
		kfree(request);
814
		return;
815
	}
816

817 818
	if (rcode == RCODE_COMPLETE)
		fw_fill_response(&request->response, request->request_header,
819 820
				 rcode, request->data,
				 fw_get_response_length(request));
821 822 823
	else
		fw_fill_response(&request->response, request->request_header,
				 rcode, NULL, 0);
824 825 826 827 828

	card->driver->send_response(card, &request->response);
}
EXPORT_SYMBOL(fw_send_response);

829 830 831 832 833 834 835 836 837
/**
 * fw_get_request_speed() - returns speed at which the @request was received
 */
int fw_get_request_speed(struct fw_request *request)
{
	return request->response.speed;
}
EXPORT_SYMBOL(fw_get_request_speed);

838 839 840 841
static void handle_exclusive_region_request(struct fw_card *card,
					    struct fw_packet *p,
					    struct fw_request *request,
					    unsigned long long offset)
842 843
{
	struct fw_address_handler *handler;
844
	int tcode, destination, source;
845

846
	destination = HEADER_GET_DESTINATION(p->header[0]);
847
	source      = HEADER_GET_SOURCE(p->header[1]);
848 849 850
	tcode       = HEADER_GET_TCODE(p->header[0]);
	if (tcode == TCODE_LOCK_REQUEST)
		tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]);
851

852
	rcu_read_lock();
853 854
	handler = lookup_enclosing_address_handler(&address_handler_list,
						   offset, request->length);
855
	if (handler)
856 857
		handler->address_callback(card, request,
					  tcode, destination, source,
858
					  p->generation, offset,
859 860
					  request->data, request->length,
					  handler->callback_data);
861
	rcu_read_unlock();
862 863 864

	if (!handler)
		fw_send_response(card, request, RCODE_ADDRESS_ERROR);
865
}
866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893

static void handle_fcp_region_request(struct fw_card *card,
				      struct fw_packet *p,
				      struct fw_request *request,
				      unsigned long long offset)
{
	struct fw_address_handler *handler;
	int tcode, destination, source;

	if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
	     offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
	    request->length > 0x200) {
		fw_send_response(card, request, RCODE_ADDRESS_ERROR);

		return;
	}

	tcode       = HEADER_GET_TCODE(p->header[0]);
	destination = HEADER_GET_DESTINATION(p->header[0]);
	source      = HEADER_GET_SOURCE(p->header[1]);

	if (tcode != TCODE_WRITE_QUADLET_REQUEST &&
	    tcode != TCODE_WRITE_BLOCK_REQUEST) {
		fw_send_response(card, request, RCODE_TYPE_ERROR);

		return;
	}

894 895
	rcu_read_lock();
	list_for_each_entry_rcu(handler, &address_handler_list, link) {
896 897 898
		if (is_enclosing_handler(handler, offset, request->length))
			handler->address_callback(card, NULL, tcode,
						  destination, source,
899 900
						  p->generation, offset,
						  request->data,
901 902 903
						  request->length,
						  handler->callback_data);
	}
904
	rcu_read_unlock();
905 906 907 908 909 910 911 912 913 914 915 916

	fw_send_response(card, request, RCODE_COMPLETE);
}

void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
{
	struct fw_request *request;
	unsigned long long offset;

	if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
		return;

917 918 919 920 921
	if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) {
		fw_cdev_handle_phy_packet(card, p);
		return;
	}

922
	request = allocate_request(card, p);
923 924 925 926 927 928 929 930 931 932 933 934 935 936
	if (request == NULL) {
		/* FIXME: send statically allocated busy packet. */
		return;
	}

	offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) |
		p->header[2];

	if (!is_in_fcp_region(offset, request->length))
		handle_exclusive_region_request(card, p, request, offset);
	else
		handle_fcp_region_request(card, p, request, offset);

}
937 938
EXPORT_SYMBOL(fw_core_handle_request);

939
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
940 941 942 943 944
{
	struct fw_transaction *t;
	unsigned long flags;
	u32 *data;
	size_t data_length;
945
	int tcode, tlabel, source, rcode;
946

947 948 949 950
	tcode	= HEADER_GET_TCODE(p->header[0]);
	tlabel	= HEADER_GET_TLABEL(p->header[0]);
	source	= HEADER_GET_SOURCE(p->header[1]);
	rcode	= HEADER_GET_RCODE(p->header[1]);
951 952 953 954

	spin_lock_irqsave(&card->lock, flags);
	list_for_each_entry(t, &card->transaction_list, link) {
		if (t->node_id == source && t->tlabel == tlabel) {
955
			if (!try_cancel_split_timeout(t)) {
956 957 958
				spin_unlock_irqrestore(&card->lock, flags);
				goto timed_out;
			}
959
			list_del_init(&t->link);
960
			card->tlabel_mask &= ~(1ULL << t->tlabel);
961 962 963 964 965 966
			break;
		}
	}
	spin_unlock_irqrestore(&card->lock, flags);

	if (&t->link == &card->transaction_list) {
967
 timed_out:
968
		fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
969
			  source, tlabel);
970 971 972
		return;
	}

973 974 975 976
	/*
	 * FIXME: sanity check packet, is length correct, does tcodes
	 * and addresses match.
	 */
977 978 979

	switch (tcode) {
	case TCODE_READ_QUADLET_RESPONSE:
980
		data = (u32 *) &p->header[3];
981 982 983 984 985 986 987 988 989 990
		data_length = 4;
		break;

	case TCODE_WRITE_RESPONSE:
		data = NULL;
		data_length = 0;
		break;

	case TCODE_READ_BLOCK_RESPONSE:
	case TCODE_LOCK_RESPONSE:
991
		data = p->payload;
992
		data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
993 994 995 996 997 998 999 1000 1001
		break;

	default:
		/* Should never happen, this is just to shut up gcc. */
		data = NULL;
		data_length = 0;
		break;
	}

1002 1003 1004 1005 1006 1007
	/*
	 * The response handler may be executed while the request handler
	 * is still pending.  Cancel the request handler.
	 */
	card->driver->cancel_packet(card, &t->packet);

1008 1009 1010 1011
	t->callback(card, rcode, data, data_length, t->callback_data);
}
EXPORT_SYMBOL(fw_core_handle_response);

1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
/**
 * fw_rcode_string - convert a firewire result code to an error description
 * @rcode: the result code
 */
const char *fw_rcode_string(int rcode)
{
	static const char *const names[] = {
		[RCODE_COMPLETE]       = "no error",
		[RCODE_CONFLICT_ERROR] = "conflict error",
		[RCODE_DATA_ERROR]     = "data error",
		[RCODE_TYPE_ERROR]     = "type error",
		[RCODE_ADDRESS_ERROR]  = "address error",
		[RCODE_SEND_ERROR]     = "send error",
		[RCODE_CANCELLED]      = "timeout",
		[RCODE_BUSY]           = "busy",
		[RCODE_GENERATION]     = "bus reset",
		[RCODE_NO_ACK]         = "no ack",
	};

	if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode])
		return names[rcode];
	else
		return "unknown";
}
EXPORT_SYMBOL(fw_rcode_string);

1038
static const struct fw_address_region topology_map_region =
1039 1040
	{ .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
	  .end   = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
1041

1042 1043
static void handle_topology_map(struct fw_card *card, struct fw_request *request,
		int tcode, int destination, int source, int generation,
1044 1045
		unsigned long long offset, void *payload, size_t length,
		void *callback_data)
1046
{
1047
	int start;
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059

	if (!TCODE_IS_READ_REQUEST(tcode)) {
		fw_send_response(card, request, RCODE_TYPE_ERROR);
		return;
	}

	if ((offset & 3) > 0 || (length & 3) > 0) {
		fw_send_response(card, request, RCODE_ADDRESS_ERROR);
		return;
	}

	start = (offset - topology_map_region.start) / 4;
1060
	memcpy(payload, &card->topology_map[start], length);
1061 1062 1063 1064 1065

	fw_send_response(card, request, RCODE_COMPLETE);
}

static struct fw_address_handler topology_map = {
1066
	.length			= 0x400,
1067 1068 1069
	.address_callback	= handle_topology_map,
};

1070
static const struct fw_address_region registers_region =
1071 1072
	{ .start = CSR_REGISTER_BASE,
	  .end   = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
1073

1074 1075 1076 1077 1078 1079
static void update_split_timeout(struct fw_card *card)
{
	unsigned int cycles;

	cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);

1080 1081
	/* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
	cycles = clamp(cycles, 800u, 3u * 8000u);
1082 1083 1084 1085 1086

	card->split_timeout_cycles = cycles;
	card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
}

1087 1088
static void handle_registers(struct fw_card *card, struct fw_request *request,
		int tcode, int destination, int source, int generation,
1089 1090
		unsigned long long offset, void *payload, size_t length,
		void *callback_data)
1091
{
1092
	int reg = offset & ~CSR_REGISTER_BASE;
1093
	__be32 *data = payload;
1094
	int rcode = RCODE_COMPLETE;
1095
	unsigned long flags;
1096 1097

	switch (reg) {
1098 1099 1100 1101 1102 1103
	case CSR_PRIORITY_BUDGET:
		if (!card->priority_budget_implemented) {
			rcode = RCODE_ADDRESS_ERROR;
			break;
		}
		/* else fall through */
1104

1105
	case CSR_NODE_IDS:
1106 1107 1108 1109
		/*
		 * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8
		 * and 9.6, but interoperable with IEEE 1394.1-2004 bridges
		 */
1110 1111 1112 1113 1114 1115 1116
		/* fall through */

	case CSR_STATE_CLEAR:
	case CSR_STATE_SET:
	case CSR_CYCLE_TIME:
	case CSR_BUS_TIME:
	case CSR_BUSY_TIMEOUT:
1117
		if (tcode == TCODE_READ_QUADLET_REQUEST)
1118
			*data = cpu_to_be32(card->driver->read_csr(card, reg));
1119
		else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1120
			card->driver->write_csr(card, reg, be32_to_cpu(*data));
1121 1122 1123 1124
		else
			rcode = RCODE_TYPE_ERROR;
		break;

1125
	case CSR_RESET_START:
1126
		if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1127 1128
			card->driver->write_csr(card, CSR_STATE_CLEAR,
						CSR_STATE_BIT_ABDICATE);
1129
		else
1130 1131 1132
			rcode = RCODE_TYPE_ERROR;
		break;

1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
	case CSR_SPLIT_TIMEOUT_HI:
		if (tcode == TCODE_READ_QUADLET_REQUEST) {
			*data = cpu_to_be32(card->split_timeout_hi);
		} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
			spin_lock_irqsave(&card->lock, flags);
			card->split_timeout_hi = be32_to_cpu(*data) & 7;
			update_split_timeout(card);
			spin_unlock_irqrestore(&card->lock, flags);
		} else {
			rcode = RCODE_TYPE_ERROR;
		}
		break;

	case CSR_SPLIT_TIMEOUT_LO:
		if (tcode == TCODE_READ_QUADLET_REQUEST) {
			*data = cpu_to_be32(card->split_timeout_lo);
		} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
			spin_lock_irqsave(&card->lock, flags);
			card->split_timeout_lo =
					be32_to_cpu(*data) & 0xfff80000;
			update_split_timeout(card);
			spin_unlock_irqrestore(&card->lock, flags);
		} else {
			rcode = RCODE_TYPE_ERROR;
		}
		break;

1160 1161 1162 1163 1164 1165 1166 1167 1168
	case CSR_MAINT_UTILITY:
		if (tcode == TCODE_READ_QUADLET_REQUEST)
			*data = card->maint_utility_register;
		else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
			card->maint_utility_register = *data;
		else
			rcode = RCODE_TYPE_ERROR;
		break;

1169 1170 1171 1172 1173 1174 1175 1176 1177
	case CSR_BROADCAST_CHANNEL:
		if (tcode == TCODE_READ_QUADLET_REQUEST)
			*data = cpu_to_be32(card->broadcast_channel);
		else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
			card->broadcast_channel =
			    (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) |
			    BROADCAST_CHANNEL_INITIAL;
		else
			rcode = RCODE_TYPE_ERROR;
1178 1179 1180 1181 1182 1183
		break;

	case CSR_BUS_MANAGER_ID:
	case CSR_BANDWIDTH_AVAILABLE:
	case CSR_CHANNELS_AVAILABLE_HI:
	case CSR_CHANNELS_AVAILABLE_LO:
1184 1185
		/*
		 * FIXME: these are handled by the OHCI hardware and
1186 1187 1188
		 * the stack never sees these request. If we add
		 * support for a new type of controller that doesn't
		 * handle this in hardware we need to deal with these
1189 1190
		 * transactions.
		 */
1191 1192 1193 1194
		BUG();
		break;

	default:
1195
		rcode = RCODE_ADDRESS_ERROR;
1196 1197
		break;
	}
1198 1199

	fw_send_response(card, request, rcode);
1200 1201 1202 1203 1204 1205 1206
}

static struct fw_address_handler registers = {
	.length			= 0x400,
	.address_callback	= handle_registers,
};

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
static void handle_low_memory(struct fw_card *card, struct fw_request *request,
		int tcode, int destination, int source, int generation,
		unsigned long long offset, void *payload, size_t length,
		void *callback_data)
{
	/*
	 * This catches requests not handled by the physical DMA unit,
	 * i.e., wrong transaction types or unauthorized source nodes.
	 */
	fw_send_response(card, request, RCODE_TYPE_ERROR);
}

static struct fw_address_handler low_memory = {
1220
	.length			= FW_MAX_PHYSICAL_RANGE,
1221 1222 1223
	.address_callback	= handle_low_memory,
};