caif_hsi.c 35.8 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * Copyright (C) ST-Ericsson AB 2010
 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
 * Author:  Daniel Martensson / daniel.martensson@stericsson.com
 *	    Dmitry.Tarnyagin  / dmitry.tarnyagin@stericsson.com
 * License terms: GNU General Public License (GPL) version 2.
 */

9 10
#define pr_fmt(fmt) KBUILD_MODNAME fmt

11 12 13 14 15 16 17 18 19 20 21
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/if_arp.h>
#include <linux/timer.h>
Sjur Brændeland's avatar
Sjur Brændeland committed
22
#include <net/rtnetlink.h>
23
#include <linux/pkt_sched.h>
24 25 26 27 28 29 30 31 32 33 34
#include <net/caif/caif_layer.h>
#include <net/caif/caif_hsi.h>

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
MODULE_DESCRIPTION("CAIF HSI driver");

/* Returns the number of padding bytes for alignment. */
#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
				(((pow)-((x)&((pow)-1)))))

35 36 37 38
static int inactivity_timeout = 1000;
module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");

39 40 41 42
static int aggregation_timeout = 1;
module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");

43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
/*
 * HSI padding options.
 * Warning: must be a base of 2 (& operation used) and can not be zero !
 */
static int hsi_head_align = 4;
module_param(hsi_head_align, int, S_IRUGO);
MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");

static int hsi_tail_align = 4;
module_param(hsi_tail_align, int, S_IRUGO);
MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");

/*
 * HSI link layer flowcontrol thresholds.
 * Warning: A high threshold value migth increase throughput but it will at
 * the same time prevent channel prioritization and increase the risk of
 * flooding the modem. The high threshold should be above the low.
 */
static int hsi_high_threshold = 100;
module_param(hsi_high_threshold, int, S_IRUGO);
MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");

static int hsi_low_threshold = 50;
module_param(hsi_low_threshold, int, S_IRUGO);
MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");

#define ON 1
#define OFF 0

/*
 * Threshold values for the HSI packet queue. Flowcontrol will be asserted
 * when the number of packets exceeds HIGH_WATER_MARK. It will not be
 * de-asserted before the number of packets drops below LOW_WATER_MARK.
 */
#define LOW_WATER_MARK   hsi_low_threshold
#define HIGH_WATER_MARK  hsi_high_threshold

static LIST_HEAD(cfhsi_list);

static void cfhsi_inactivity_tout(unsigned long arg)
{
	struct cfhsi *cfhsi = (struct cfhsi *)arg;

86
	netdev_dbg(cfhsi->ndev, "%s.\n",
87 88 89 90 91 92 93
		__func__);

	/* Schedule power down work queue. */
	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		queue_work(cfhsi->wq, &cfhsi->wake_down_work);
}

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
					   const struct sk_buff *skb,
					   int direction)
{
	struct caif_payload_info *info;
	int hpad, tpad, len;

	info = (struct caif_payload_info *)&skb->cb;
	hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
	tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
	len = skb->len + hpad + tpad;

	if (direction > 0)
		cfhsi->aggregation_len += len;
	else if (direction < 0)
		cfhsi->aggregation_len -= len;
}

static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
{
	int i;

116
	if (cfhsi->aggregation_timeout == 0)
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
		return true;

	for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
		if (cfhsi->qhead[i].qlen)
			return true;
	}

	/* TODO: Use aggregation_len instead */
	if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
		return true;

	return false;
}

static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
{
	struct sk_buff *skb;
	int i;

	for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
		skb = skb_dequeue(&cfhsi->qhead[i]);
		if (skb)
			break;
	}

	return skb;
}

static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
{
	int i, len = 0;
	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
		len += skb_queue_len(&cfhsi->qhead[i]);
	return len;
}

153 154 155 156 157 158
static void cfhsi_abort_tx(struct cfhsi *cfhsi)
{
	struct sk_buff *skb;

	for (;;) {
		spin_lock_bh(&cfhsi->lock);
159
		skb = cfhsi_dequeue(cfhsi);
160 161 162 163 164
		if (!skb)
			break;

		cfhsi->ndev->stats.tx_errors++;
		cfhsi->ndev->stats.tx_dropped++;
165
		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
166 167 168 169 170
		spin_unlock_bh(&cfhsi->lock);
		kfree_skb(skb);
	}
	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
171
		mod_timer(&cfhsi->inactivity_timer,
172
			jiffies + cfhsi->inactivity_timeout);
173 174 175 176 177 178 179 180 181
	spin_unlock_bh(&cfhsi->lock);
}

static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
{
	char buffer[32]; /* Any reasonable value */
	size_t fifo_occupancy;
	int ret;

182
	netdev_dbg(cfhsi->ndev, "%s.\n",
183 184 185
		__func__);

	do {
186
		ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
187 188
				&fifo_occupancy);
		if (ret) {
189
			netdev_warn(cfhsi->ndev,
190 191 192 193 194 195 196 197 198
				"%s: can't get FIFO occupancy: %d.\n",
				__func__, ret);
			break;
		} else if (!fifo_occupancy)
			/* No more data, exitting normally */
			break;

		fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
		set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
199 200
		ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
				cfhsi->ops);
201 202
		if (ret) {
			clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
203
			netdev_warn(cfhsi->ndev,
204 205 206 207 208 209
				"%s: can't read data: %d.\n",
				__func__, ret);
			break;
		}

		ret = 5 * HZ;
210
		ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
211 212 213
			 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);

		if (ret < 0) {
214
			netdev_warn(cfhsi->ndev,
215 216 217 218 219
				"%s: can't wait for flush complete: %d.\n",
				__func__, ret);
			break;
		} else if (!ret) {
			ret = -ETIMEDOUT;
220
			netdev_warn(cfhsi->ndev,
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
				"%s: timeout waiting for flush complete.\n",
				__func__);
			break;
		}
	} while (1);

	return ret;
}

static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
{
	int nfrms = 0;
	int pld_len = 0;
	struct sk_buff *skb;
	u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;

237
	skb = cfhsi_dequeue(cfhsi);
238 239 240
	if (!skb)
		return 0;

241 242 243
	/* Clear offset. */
	desc->offset = 0;

244 245 246
	/* Check if we can embed a CAIF frame. */
	if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
		struct caif_payload_info *info;
247 248
		int hpad;
		int tpad;
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263

		/* Calculate needed head alignment and tail alignment. */
		info = (struct caif_payload_info *)&skb->cb;

		hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
		tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);

		/* Check if frame still fits with added alignment. */
		if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
			u8 *pemb = desc->emb_frm;
			desc->offset = CFHSI_DESC_SHORT_SZ;
			*pemb = (u8)(hpad - 1);
			pemb += hpad;

			/* Update network statistics. */
264
			spin_lock_bh(&cfhsi->lock);
265 266
			cfhsi->ndev->stats.tx_packets++;
			cfhsi->ndev->stats.tx_bytes += skb->len;
267 268
			cfhsi_update_aggregation_stats(cfhsi, skb, -1);
			spin_unlock_bh(&cfhsi->lock);
269 270 271

			/* Copy in embedded CAIF frame. */
			skb_copy_bits(skb, 0, pemb, skb->len);
272 273

			/* Consume the SKB */
274 275 276
			consume_skb(skb);
			skb = NULL;
		}
277
	}
278 279 280 281 282

	/* Create payload CAIF frames. */
	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
	while (nfrms < CFHSI_MAX_PKTS) {
		struct caif_payload_info *info;
283 284
		int hpad;
		int tpad;
285 286

		if (!skb)
287
			skb = cfhsi_dequeue(cfhsi);
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305

		if (!skb)
			break;

		/* Calculate needed head alignment and tail alignment. */
		info = (struct caif_payload_info *)&skb->cb;

		hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
		tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);

		/* Fill in CAIF frame length in descriptor. */
		desc->cffrm_len[nfrms] = hpad + skb->len + tpad;

		/* Fill head padding information. */
		*pfrm = (u8)(hpad - 1);
		pfrm += hpad;

		/* Update network statistics. */
306
		spin_lock_bh(&cfhsi->lock);
307 308
		cfhsi->ndev->stats.tx_packets++;
		cfhsi->ndev->stats.tx_bytes += skb->len;
309 310
		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
		spin_unlock_bh(&cfhsi->lock);
311 312 313 314 315 316 317 318 319

		/* Copy in CAIF frame. */
		skb_copy_bits(skb, 0, pfrm, skb->len);

		/* Update payload length. */
		pld_len += desc->cffrm_len[nfrms];

		/* Update frame pointer. */
		pfrm += skb->len + tpad;
320 321

		/* Consume the SKB */
322 323 324 325 326 327 328 329 330 331 332 333 334 335
		consume_skb(skb);
		skb = NULL;

		/* Update number of frames. */
		nfrms++;
	}

	/* Unused length fields should be zero-filled (according to SPEC). */
	while (nfrms < CFHSI_MAX_PKTS) {
		desc->cffrm_len[nfrms] = 0x0000;
		nfrms++;
	}

	/* Check if we can piggy-back another descriptor. */
336
	if (cfhsi_can_send_aggregate(cfhsi))
337 338 339 340 341 342 343
		desc->header |= CFHSI_PIGGY_DESC;
	else
		desc->header &= ~CFHSI_PIGGY_DESC;

	return CFHSI_DESC_SZ + pld_len;
}

344
static void cfhsi_start_tx(struct cfhsi *cfhsi)
345
{
346 347
	struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
	int len, res;
348

349
	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
350 351 352 353 354 355

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	do {
		/* Create HSI frame. */
356 357 358 359
		len = cfhsi_tx_frm(desc, cfhsi);
		if (!len) {
			spin_lock_bh(&cfhsi->lock);
			if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
360
				spin_unlock_bh(&cfhsi->lock);
361 362
				res = -EAGAIN;
				continue;
363
			}
364 365 366 367 368 369 370
			cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
			/* Start inactivity timer. */
			mod_timer(&cfhsi->inactivity_timer,
				jiffies + cfhsi->inactivity_timeout);
			spin_unlock_bh(&cfhsi->lock);
			break;
		}
371 372

		/* Set up new transfer. */
373
		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
374
		if (WARN_ON(res < 0))
375
			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
376 377
				__func__, res);
	} while (res < 0);
378 379 380 381
}

static void cfhsi_tx_done(struct cfhsi *cfhsi)
{
382
	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	/*
	 * Send flow on if flow off has been previously signalled
	 * and number of packets is below low water mark.
	 */
	spin_lock_bh(&cfhsi->lock);
	if (cfhsi->flow_off_sent &&
			cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark &&
			cfhsi->cfdev.flowctrl) {

		cfhsi->flow_off_sent = 0;
		cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
	}

	if (cfhsi_can_send_aggregate(cfhsi)) {
		spin_unlock_bh(&cfhsi->lock);
		cfhsi_start_tx(cfhsi);
	} else {
		mod_timer(&cfhsi->aggregation_timer,
			jiffies + cfhsi->aggregation_timeout);
		spin_unlock_bh(&cfhsi->lock);
	}
408 409

	return;
410 411
}

412
static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
413 414 415
{
	struct cfhsi *cfhsi;

416
	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
417
	netdev_dbg(cfhsi->ndev, "%s.\n",
418 419 420 421
		__func__);

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;
422
	cfhsi_tx_done(cfhsi);
423 424
}

425
static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
426 427 428 429 430 431 432 433
{
	int xfer_sz = 0;
	int nfrms = 0;
	u16 *plen = NULL;
	u8 *pfrm = NULL;

	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
434
		netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
435
			__func__);
436
		return -EPROTO;
437 438 439 440 441 442
	}

	/* Check for embedded CAIF frame. */
	if (desc->offset) {
		struct sk_buff *skb;
		u8 *dst = NULL;
443
		int len = 0;
444 445 446 447 448 449 450 451 452 453
		pfrm = ((u8 *)desc) + desc->offset;

		/* Remove offset padding. */
		pfrm += *pfrm + 1;

		/* Read length of CAIF frame (little endian). */
		len = *pfrm;
		len |= ((*(pfrm+1)) << 8) & 0xFF00;
		len += 2;	/* Add FCS fields. */

454 455
		/* Sanity check length of CAIF frame. */
		if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
456
			netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
457 458 459
				__func__);
			return -EPROTO;
		}
460 461

		/* Allocate SKB (OK even in IRQ context). */
462 463
		skb = alloc_skb(len + 1, GFP_ATOMIC);
		if (!skb) {
464
			netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
465 466
				__func__);
			return -ENOMEM;
467 468 469 470 471 472 473 474 475 476 477
		}
		caif_assert(skb != NULL);

		dst = skb_put(skb, len);
		memcpy(dst, pfrm, len);

		skb->protocol = htons(ETH_P_CAIF);
		skb_reset_mac_header(skb);
		skb->dev = cfhsi->ndev;

		/*
478 479
		 * We are in a callback handler and
		 * unfortunately we don't know what context we're
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
		 * running in.
		 */
		if (in_interrupt())
			netif_rx(skb);
		else
			netif_rx_ni(skb);

		/* Update network statistics. */
		cfhsi->ndev->stats.rx_packets++;
		cfhsi->ndev->stats.rx_bytes += len;
	}

	/* Calculate transfer length. */
	plen = desc->cffrm_len;
	while (nfrms < CFHSI_MAX_PKTS && *plen) {
		xfer_sz += *plen;
		plen++;
		nfrms++;
	}

	/* Check for piggy-backed descriptor. */
	if (desc->header & CFHSI_PIGGY_DESC)
		xfer_sz += CFHSI_DESC_SZ;

504
	if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
505
		netdev_err(cfhsi->ndev,
506 507
				"%s: Invalid payload len: %d, ignored.\n",
			__func__, xfer_sz);
508
		return -EPROTO;
509 510 511 512
	}
	return xfer_sz;
}

513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
{
	int xfer_sz = 0;
	int nfrms = 0;
	u16 *plen;

	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {

		pr_err("Invalid descriptor. %x %x\n", desc->header,
				desc->offset);
		return -EPROTO;
	}

	/* Calculate transfer length. */
	plen = desc->cffrm_len;
	while (nfrms < CFHSI_MAX_PKTS && *plen) {
		xfer_sz += *plen;
		plen++;
		nfrms++;
	}

	if (xfer_sz % 4) {
		pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
		return -EPROTO;
	}
	return xfer_sz;
}

542
static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
543 544 545 546 547 548 549 550 551
{
	int rx_sz = 0;
	int nfrms = 0;
	u16 *plen = NULL;
	u8 *pfrm = NULL;

	/* Sanity check header and offset. */
	if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
			(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
552
		netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
553
			__func__);
554
		return -EPROTO;
555 556 557 558 559
	}

	/* Set frame pointer to start of payload. */
	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
	plen = desc->cffrm_len;
560 561 562 563 564 565 566 567 568 569

	/* Skip already processed frames. */
	while (nfrms < cfhsi->rx_state.nfrms) {
		pfrm += *plen;
		rx_sz += *plen;
		plen++;
		nfrms++;
	}

	/* Parse payload. */
570 571 572 573
	while (nfrms < CFHSI_MAX_PKTS && *plen) {
		struct sk_buff *skb;
		u8 *dst = NULL;
		u8 *pcffrm = NULL;
574
		int len;
575 576 577 578 579 580 581 582 583

		/* CAIF frame starts after head padding. */
		pcffrm = pfrm + *pfrm + 1;

		/* Read length of CAIF frame (little endian). */
		len = *pcffrm;
		len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
		len += 2;	/* Add FCS fields. */

584 585
		/* Sanity check length of CAIF frames. */
		if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
586
			netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
587 588 589 590
				__func__);
			return -EPROTO;
		}

591
		/* Allocate SKB (OK even in IRQ context). */
592 593
		skb = alloc_skb(len + 1, GFP_ATOMIC);
		if (!skb) {
594
			netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
595 596 597
				__func__);
			cfhsi->rx_state.nfrms = nfrms;
			return -ENOMEM;
598 599 600 601 602 603 604 605 606 607 608
		}
		caif_assert(skb != NULL);

		dst = skb_put(skb, len);
		memcpy(dst, pcffrm, len);

		skb->protocol = htons(ETH_P_CAIF);
		skb_reset_mac_header(skb);
		skb->dev = cfhsi->ndev;

		/*
609
		 * We're called in callback from HSI
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
		 * and don't know the context we're running in.
		 */
		if (in_interrupt())
			netif_rx(skb);
		else
			netif_rx_ni(skb);

		/* Update network statistics. */
		cfhsi->ndev->stats.rx_packets++;
		cfhsi->ndev->stats.rx_bytes += len;

		pfrm += *plen;
		rx_sz += *plen;
		plen++;
		nfrms++;
	}

	return rx_sz;
}

630
static void cfhsi_rx_done(struct cfhsi *cfhsi)
631 632
{
	int res;
633
	int desc_pld_len = 0, rx_len, rx_state;
634
	struct cfhsi_desc *desc = NULL;
635 636
	u8 *rx_ptr, *rx_buf;
	struct cfhsi_desc *piggy_desc = NULL;
637 638 639

	desc = (struct cfhsi_desc *)cfhsi->rx_buf;

640
	netdev_dbg(cfhsi->ndev, "%s\n", __func__);
641 642 643 644 645

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	/* Update inactivity timer if pending. */
646
	spin_lock_bh(&cfhsi->lock);
647
	mod_timer_pending(&cfhsi->inactivity_timer,
648
			jiffies + cfhsi->inactivity_timeout);
649
	spin_unlock_bh(&cfhsi->lock);
650

651
	if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
652 653 654
		desc_pld_len = cfhsi_rx_desc_len(desc);

		if (desc_pld_len < 0)
655
			goto out_of_sync;
656 657 658 659 660 661 662

		rx_buf = cfhsi->rx_buf;
		rx_len = desc_pld_len;
		if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
			rx_len += CFHSI_DESC_SZ;
		if (desc_pld_len == 0)
			rx_buf = cfhsi->rx_flip_buf;
663
	} else {
664
		rx_buf = cfhsi->rx_flip_buf;
665

666 667 668
		rx_len = CFHSI_DESC_SZ;
		if (cfhsi->rx_state.pld_len > 0 &&
				(desc->header & CFHSI_PIGGY_DESC)) {
669 670 671

			piggy_desc = (struct cfhsi_desc *)
				(desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
672 673
						cfhsi->rx_state.pld_len);

674
			cfhsi->rx_state.piggy_desc = true;
675

676 677 678 679 680
			/* Extract payload len from piggy-backed descriptor. */
			desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
			if (desc_pld_len < 0)
				goto out_of_sync;

681
			if (desc_pld_len > 0) {
682
				rx_len = desc_pld_len;
683 684 685
				if (piggy_desc->header & CFHSI_PIGGY_DESC)
					rx_len += CFHSI_DESC_SZ;
			}
686 687 688 689 690

			/*
			 * Copy needed information from the piggy-backed
			 * descriptor to the descriptor in the start.
			 */
691
			memcpy(rx_buf, (u8 *)piggy_desc,
692
					CFHSI_DESC_SHORT_SZ);
693 694
			/* Mark no embedded frame here */
			piggy_desc->offset = 0;
695
		}
696 697
	}

698
	if (desc_pld_len) {
699 700
		rx_state = CFHSI_RX_STATE_PAYLOAD;
		rx_ptr = rx_buf + CFHSI_DESC_SZ;
701
	} else {
702 703 704
		rx_state = CFHSI_RX_STATE_DESC;
		rx_ptr = rx_buf;
		rx_len = CFHSI_DESC_SZ;
705 706
	}

707
	/* Initiate next read */
708 709
	if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
		/* Set up new transfer. */
710
		netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
711 712
				__func__);

713 714
		res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
				cfhsi->ops);
715
		if (WARN_ON(res < 0)) {
716
			netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
717 718 719 720 721
				__func__, res);
			cfhsi->ndev->stats.rx_errors++;
			cfhsi->ndev->stats.rx_dropped++;
		}
	}
722

723 724 725 726 727 728 729 730 731 732 733 734 735
	if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
		/* Extract payload from descriptor */
		if (cfhsi_rx_desc(desc, cfhsi) < 0)
			goto out_of_sync;
	} else {
		/* Extract payload */
		if (cfhsi_rx_pld(desc, cfhsi) < 0)
			goto out_of_sync;
		if (piggy_desc) {
			/* Extract any payload in piggyback descriptor. */
			if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
				goto out_of_sync;
		}
736
	}
737 738 739 740 741 742 743 744 745 746 747

	/* Update state info */
	memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
	cfhsi->rx_state.state = rx_state;
	cfhsi->rx_ptr = rx_ptr;
	cfhsi->rx_len = rx_len;
	cfhsi->rx_state.pld_len = desc_pld_len;
	cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;

	if (rx_buf != cfhsi->rx_buf)
		swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
748 749 750
	return;

out_of_sync:
751
	netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
752 753 754
	print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
			cfhsi->rx_buf, CFHSI_DESC_SZ);
	schedule_work(&cfhsi->out_of_sync_work);
755 756 757 758 759 760
}

static void cfhsi_rx_slowpath(unsigned long arg)
{
	struct cfhsi *cfhsi = (struct cfhsi *)arg;

761
	netdev_dbg(cfhsi->ndev, "%s.\n",
762 763 764
		__func__);

	cfhsi_rx_done(cfhsi);
765 766
}

767
static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
768 769 770
{
	struct cfhsi *cfhsi;

771
	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
772
	netdev_dbg(cfhsi->ndev, "%s.\n",
773 774 775 776 777 778 779 780
		__func__);

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
		wake_up_interruptible(&cfhsi->flush_fifo_wait);
	else
781
		cfhsi_rx_done(cfhsi);
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
}

static void cfhsi_wake_up(struct work_struct *work)
{
	struct cfhsi *cfhsi = NULL;
	int res;
	int len;
	long ret;

	cfhsi = container_of(work, struct cfhsi, wake_up_work);

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
		/* It happenes when wakeup is requested by
		 * both ends at the same time. */
		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
800
		clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
801 802 803 804
		return;
	}

	/* Activate wake line. */
805
	cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
806

807
	netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
808 809 810
		__func__);

	/* Wait for acknowledge. */
811 812 813
	ret = CFHSI_WAKE_TOUT;
	ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
					test_and_clear_bit(CFHSI_WAKE_UP_ACK,
814 815 816
							&cfhsi->bits), ret);
	if (unlikely(ret < 0)) {
		/* Interrupted by signal. */
817
		netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
818
			__func__, ret);
819

820
		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
821
		cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
822 823
		return;
	} else if (!ret) {
824 825 826
		bool ca_wake = false;
		size_t fifo_occupancy = 0;

827
		/* Wakeup timeout */
828
		netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
829
			__func__);
830 831

		/* Check FIFO to check if modem has sent something. */
832
		WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
833 834
					&fifo_occupancy));

835
		netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
836 837 838
				__func__, (unsigned) fifo_occupancy);

		/* Check if we misssed the interrupt. */
839
		WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
840 841 842
							&ca_wake));

		if (ca_wake) {
843
			netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
844 845 846 847 848 849 850 851 852
				__func__);

			/* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
			clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);

			/* Continue execution. */
			goto wake_ack;
		}

853
		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
854
		cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
855 856
		return;
	}
857
wake_ack:
858
	netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
859 860 861 862 863 864 865
		__func__);

	/* Clear power up bit. */
	set_bit(CFHSI_AWAKE, &cfhsi->bits);
	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);

	/* Resume read operation. */
866
	netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
867
	res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
868 869

	if (WARN_ON(res < 0))
870
		netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
871 872 873 874 875 876

	/* Clear power up acknowledment. */
	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);

	spin_lock_bh(&cfhsi->lock);

877 878
	/* Resume transmit if queues are not empty. */
	if (!cfhsi_tx_queue_len(cfhsi)) {
879
		netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
880 881
			__func__);
		/* Start inactivity timer. */
882
		mod_timer(&cfhsi->inactivity_timer,
883
				jiffies + cfhsi->inactivity_timeout);
884 885 886 887
		spin_unlock_bh(&cfhsi->lock);
		return;
	}

888
	netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
889 890 891 892 893 894 895 896 897
		__func__);

	spin_unlock_bh(&cfhsi->lock);

	/* Create HSI frame. */
	len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);

	if (likely(len > 0)) {
		/* Set up new transfer. */
898
		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
899
		if (WARN_ON(res < 0)) {
900
			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
901 902 903 904
				__func__, res);
			cfhsi_abort_tx(cfhsi);
		}
	} else {
905
		netdev_err(cfhsi->ndev,
906 907 908 909 910 911 912 913 914
				"%s: Failed to create HSI frame: %d.\n",
				__func__, len);
	}
}

static void cfhsi_wake_down(struct work_struct *work)
{
	long ret;
	struct cfhsi *cfhsi = NULL;
915 916
	size_t fifo_occupancy = 0;
	int retry = CFHSI_WAKE_TOUT;
917 918

	cfhsi = container_of(work, struct cfhsi, wake_down_work);
919
	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
920 921 922 923 924

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	/* Deactivate wake line. */
925
	cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
926 927

	/* Wait for acknowledge. */
928
	ret = CFHSI_WAKE_TOUT;
929
	ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
930 931
					test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
							&cfhsi->bits), ret);
932 933
	if (ret < 0) {
		/* Interrupted by signal. */
934
		netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
935 936 937
			__func__, ret);
		return;
	} else if (!ret) {
938 939
		bool ca_wake = true;

940
		/* Timeout */
941
		netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
942 943

		/* Check if we misssed the interrupt. */
944
		WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
945 946
							&ca_wake));
		if (!ca_wake)
947
			netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
948
				__func__);
949 950
	}

951 952
	/* Check FIFO occupancy. */
	while (retry) {
953
		WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
954 955 956 957 958 959 960 961 962 963 964
							&fifo_occupancy));

		if (!fifo_occupancy)
			break;

		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(1);
		retry--;
	}

	if (!retry)
965
		netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
966 967

	/* Clear AWAKE condition. */
968 969
	clear_bit(CFHSI_AWAKE, &cfhsi->bits);

970
	/* Cancel pending RX requests. */
971
	cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
972 973
}

974 975 976 977 978 979 980 981 982 983 984
static void cfhsi_out_of_sync(struct work_struct *work)
{
	struct cfhsi *cfhsi = NULL;

	cfhsi = container_of(work, struct cfhsi, out_of_sync_work);

	rtnl_lock();
	dev_close(cfhsi->ndev);
	rtnl_unlock();
}

985
static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
986 987 988
{
	struct cfhsi *cfhsi = NULL;

989
	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
990
	netdev_dbg(cfhsi->ndev, "%s.\n",
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003
		__func__);

	set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
	wake_up_interruptible(&cfhsi->wake_up_wait);

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	/* Schedule wake up work queue if the peer initiates. */
	if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
		queue_work(cfhsi->wq, &cfhsi->wake_up_work);
}

1004
static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
1005 1006 1007
{
	struct cfhsi *cfhsi = NULL;

1008
	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
1009
	netdev_dbg(cfhsi->ndev, "%s.\n",
1010 1011 1012 1013 1014 1015 1016
		__func__);

	/* Initiating low power is only permitted by the host (us). */
	set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
	wake_up_interruptible(&cfhsi->wake_down_wait);
}

1017 1018 1019 1020
static void cfhsi_aggregation_tout(unsigned long arg)
{
	struct cfhsi *cfhsi = (struct cfhsi *)arg;

1021
	netdev_dbg(cfhsi->ndev, "%s.\n",
1022 1023 1024 1025 1026
		__func__);

	cfhsi_start_tx(cfhsi);
}

1027 1028 1029 1030 1031
static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct cfhsi *cfhsi = NULL;
	int start_xfer = 0;
	int timer_active;
1032
	int prio;
1033 1034 1035 1036 1037 1038

	if (!dev)
		return -EINVAL;

	cfhsi = netdev_priv(dev);

1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
	switch (skb->priority) {
	case TC_PRIO_BESTEFFORT:
	case TC_PRIO_FILLER:
	case TC_PRIO_BULK:
		prio = CFHSI_PRIO_BEBK;
		break;
	case TC_PRIO_INTERACTIVE_BULK:
		prio = CFHSI_PRIO_VI;
		break;
	case TC_PRIO_INTERACTIVE:
		prio = CFHSI_PRIO_VO;
		break;
	case TC_PRIO_CONTROL:
	default:
		prio = CFHSI_PRIO_CTL;
		break;
	}

1057 1058
	spin_lock_bh(&cfhsi->lock);

1059 1060 1061 1062 1063
	/* Update aggregation statistics  */
	cfhsi_update_aggregation_stats(cfhsi, skb, 1);

	/* Queue the SKB */
	skb_queue_tail(&cfhsi->qhead[prio], skb);
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073

	/* Sanity check; xmit should not be called after unregister_netdev */
	if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
		spin_unlock_bh(&cfhsi->lock);
		cfhsi_abort_tx(cfhsi);
		return -EINVAL;
	}

	/* Send flow off if number of packets is above high water mark. */
	if (!cfhsi->flow_off_sent &&
1074
		cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark &&
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
		cfhsi->cfdev.flowctrl) {
		cfhsi->flow_off_sent = 1;
		cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
	}

	if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
		cfhsi->tx_state = CFHSI_TX_STATE_XFER;
		start_xfer = 1;
	}

1085
	if (!start_xfer) {
1086 1087 1088 1089
		/* Send aggregate if it is possible */
		bool aggregate_ready =
			cfhsi_can_send_aggregate(cfhsi) &&
			del_timer(&cfhsi->aggregation_timer) > 0;
1090
		spin_unlock_bh(&cfhsi->lock);
1091 1092
		if (aggregate_ready)
			cfhsi_start_tx(cfhsi);
1093
		return 0;
1094
	}