caif_hsi.c 35.4 KB
Newer Older
1 2 3 4 5 6 7
/*
 * Copyright (C) ST-Ericsson AB 2010
 * Author:  Daniel Martensson / daniel.martensson@stericsson.com
 *	    Dmitry.Tarnyagin  / dmitry.tarnyagin@stericsson.com
 * License terms: GNU General Public License (GPL) version 2.
 */

8 9
#define pr_fmt(fmt) KBUILD_MODNAME fmt

10 11 12 13 14 15 16 17 18 19 20
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/if_arp.h>
#include <linux/timer.h>
Sjur Brændeland's avatar
Sjur Brændeland committed
21
#include <net/rtnetlink.h>
22
#include <linux/pkt_sched.h>
23 24 25 26 27 28 29 30 31 32 33
#include <net/caif/caif_layer.h>
#include <net/caif/caif_hsi.h>

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
MODULE_DESCRIPTION("CAIF HSI driver");

/* Returns the number of padding bytes for alignment. */
#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
				(((pow)-((x)&((pow)-1)))))

34
static const struct cfhsi_config  hsi_default_config = {
35

36 37
	/* Inactivity timeout on HSI, ms */
	.inactivity_timeout = HZ,
38

39 40
	/* Aggregation timeout (ms) of zero means no aggregation is done*/
	.aggregation_timeout = 1,
41

42 43 44 45 46 47 48 49 50 51 52 53 54
	/*
	 * HSI link layer flow-control thresholds.
	 * Threshold values for the HSI packet queue. Flow-control will be
	 * asserted when the number of packets exceeds q_high_mark. It will
	 * not be de-asserted before the number of packets drops below
	 * q_low_mark.
	 * Warning: A high threshold value might increase throughput but it
	 * will at the same time prevent channel prioritization and increase
	 * the risk of flooding the modem. The high threshold should be above
	 * the low.
	 */
	.q_high_mark = 100,
	.q_low_mark = 50,
55

56 57 58 59 60 61 62
	/*
	 * HSI padding options.
	 * Warning: must be a base of 2 (& operation used) and can not be zero !
	 */
	.head_align = 4,
	.tail_align = 4,
};
63 64 65 66 67 68 69 70 71 72

#define ON 1
#define OFF 0

static LIST_HEAD(cfhsi_list);

static void cfhsi_inactivity_tout(unsigned long arg)
{
	struct cfhsi *cfhsi = (struct cfhsi *)arg;

73
	netdev_dbg(cfhsi->ndev, "%s.\n",
74 75 76 77 78 79 80
		__func__);

	/* Schedule power down work queue. */
	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		queue_work(cfhsi->wq, &cfhsi->wake_down_work);
}

81 82 83 84 85 86 87 88
static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
					   const struct sk_buff *skb,
					   int direction)
{
	struct caif_payload_info *info;
	int hpad, tpad, len;

	info = (struct caif_payload_info *)&skb->cb;
89 90
	hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
	tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
91 92 93 94 95 96 97 98 99 100 101 102
	len = skb->len + hpad + tpad;

	if (direction > 0)
		cfhsi->aggregation_len += len;
	else if (direction < 0)
		cfhsi->aggregation_len -= len;
}

static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
{
	int i;

103
	if (cfhsi->cfg.aggregation_timeout == 0)
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
		return true;

	for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
		if (cfhsi->qhead[i].qlen)
			return true;
	}

	/* TODO: Use aggregation_len instead */
	if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
		return true;

	return false;
}

static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
{
	struct sk_buff *skb;
	int i;

	for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
		skb = skb_dequeue(&cfhsi->qhead[i]);
		if (skb)
			break;
	}

	return skb;
}

static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
{
	int i, len = 0;
	for (i = 0; i < CFHSI_PRIO_LAST; ++i)
		len += skb_queue_len(&cfhsi->qhead[i]);
	return len;
}

140 141 142 143 144 145
static void cfhsi_abort_tx(struct cfhsi *cfhsi)
{
	struct sk_buff *skb;

	for (;;) {
		spin_lock_bh(&cfhsi->lock);
146
		skb = cfhsi_dequeue(cfhsi);
147 148 149 150 151
		if (!skb)
			break;

		cfhsi->ndev->stats.tx_errors++;
		cfhsi->ndev->stats.tx_dropped++;
152
		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
153 154 155 156 157
		spin_unlock_bh(&cfhsi->lock);
		kfree_skb(skb);
	}
	cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
	if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
158
		mod_timer(&cfhsi->inactivity_timer,
159
			jiffies + cfhsi->cfg.inactivity_timeout);
160 161 162 163 164 165 166 167 168
	spin_unlock_bh(&cfhsi->lock);
}

static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
{
	char buffer[32]; /* Any reasonable value */
	size_t fifo_occupancy;
	int ret;

169
	netdev_dbg(cfhsi->ndev, "%s.\n",
170 171 172
		__func__);

	do {
173
		ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
174 175
				&fifo_occupancy);
		if (ret) {
176
			netdev_warn(cfhsi->ndev,
177 178 179 180 181 182 183 184 185
				"%s: can't get FIFO occupancy: %d.\n",
				__func__, ret);
			break;
		} else if (!fifo_occupancy)
			/* No more data, exitting normally */
			break;

		fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
		set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
186 187
		ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
				cfhsi->ops);
188 189
		if (ret) {
			clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
190
			netdev_warn(cfhsi->ndev,
191 192 193 194 195 196
				"%s: can't read data: %d.\n",
				__func__, ret);
			break;
		}

		ret = 5 * HZ;
197
		ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
198 199 200
			 !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);

		if (ret < 0) {
201
			netdev_warn(cfhsi->ndev,
202 203 204 205 206
				"%s: can't wait for flush complete: %d.\n",
				__func__, ret);
			break;
		} else if (!ret) {
			ret = -ETIMEDOUT;
207
			netdev_warn(cfhsi->ndev,
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
				"%s: timeout waiting for flush complete.\n",
				__func__);
			break;
		}
	} while (1);

	return ret;
}

static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
{
	int nfrms = 0;
	int pld_len = 0;
	struct sk_buff *skb;
	u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;

224
	skb = cfhsi_dequeue(cfhsi);
225 226 227
	if (!skb)
		return 0;

228 229 230
	/* Clear offset. */
	desc->offset = 0;

231 232 233
	/* Check if we can embed a CAIF frame. */
	if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
		struct caif_payload_info *info;
234 235
		int hpad;
		int tpad;
236 237 238 239

		/* Calculate needed head alignment and tail alignment. */
		info = (struct caif_payload_info *)&skb->cb;

240 241
		hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
		tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
242 243 244 245 246 247 248 249 250

		/* Check if frame still fits with added alignment. */
		if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
			u8 *pemb = desc->emb_frm;
			desc->offset = CFHSI_DESC_SHORT_SZ;
			*pemb = (u8)(hpad - 1);
			pemb += hpad;

			/* Update network statistics. */
251
			spin_lock_bh(&cfhsi->lock);
252 253
			cfhsi->ndev->stats.tx_packets++;
			cfhsi->ndev->stats.tx_bytes += skb->len;
254 255
			cfhsi_update_aggregation_stats(cfhsi, skb, -1);
			spin_unlock_bh(&cfhsi->lock);
256 257 258

			/* Copy in embedded CAIF frame. */
			skb_copy_bits(skb, 0, pemb, skb->len);
259 260

			/* Consume the SKB */
261 262 263
			consume_skb(skb);
			skb = NULL;
		}
264
	}
265 266 267 268 269

	/* Create payload CAIF frames. */
	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
	while (nfrms < CFHSI_MAX_PKTS) {
		struct caif_payload_info *info;
270 271
		int hpad;
		int tpad;
272 273

		if (!skb)
274
			skb = cfhsi_dequeue(cfhsi);
275 276 277 278 279 280 281

		if (!skb)
			break;

		/* Calculate needed head alignment and tail alignment. */
		info = (struct caif_payload_info *)&skb->cb;

282 283
		hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
		tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
284 285 286 287 288 289 290 291 292

		/* Fill in CAIF frame length in descriptor. */
		desc->cffrm_len[nfrms] = hpad + skb->len + tpad;

		/* Fill head padding information. */
		*pfrm = (u8)(hpad - 1);
		pfrm += hpad;

		/* Update network statistics. */
293
		spin_lock_bh(&cfhsi->lock);
294 295
		cfhsi->ndev->stats.tx_packets++;
		cfhsi->ndev->stats.tx_bytes += skb->len;
296 297
		cfhsi_update_aggregation_stats(cfhsi, skb, -1);
		spin_unlock_bh(&cfhsi->lock);
298 299 300 301 302 303 304 305 306

		/* Copy in CAIF frame. */
		skb_copy_bits(skb, 0, pfrm, skb->len);

		/* Update payload length. */
		pld_len += desc->cffrm_len[nfrms];

		/* Update frame pointer. */
		pfrm += skb->len + tpad;
307 308

		/* Consume the SKB */
309 310 311 312 313 314 315 316 317 318 319 320 321 322
		consume_skb(skb);
		skb = NULL;

		/* Update number of frames. */
		nfrms++;
	}

	/* Unused length fields should be zero-filled (according to SPEC). */
	while (nfrms < CFHSI_MAX_PKTS) {
		desc->cffrm_len[nfrms] = 0x0000;
		nfrms++;
	}

	/* Check if we can piggy-back another descriptor. */
323
	if (cfhsi_can_send_aggregate(cfhsi))
324 325 326 327 328 329 330
		desc->header |= CFHSI_PIGGY_DESC;
	else
		desc->header &= ~CFHSI_PIGGY_DESC;

	return CFHSI_DESC_SZ + pld_len;
}

331
static void cfhsi_start_tx(struct cfhsi *cfhsi)
332
{
333 334
	struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
	int len, res;
335

336
	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
337 338 339 340 341 342

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	do {
		/* Create HSI frame. */
343 344 345 346
		len = cfhsi_tx_frm(desc, cfhsi);
		if (!len) {
			spin_lock_bh(&cfhsi->lock);
			if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
347
				spin_unlock_bh(&cfhsi->lock);
348 349
				res = -EAGAIN;
				continue;
350
			}
351 352 353
			cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
			/* Start inactivity timer. */
			mod_timer(&cfhsi->inactivity_timer,
354
				jiffies + cfhsi->cfg.inactivity_timeout);
355 356 357
			spin_unlock_bh(&cfhsi->lock);
			break;
		}
358 359

		/* Set up new transfer. */
360
		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
361
		if (WARN_ON(res < 0))
362
			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
363 364
				__func__, res);
	} while (res < 0);
365 366 367 368
}

static void cfhsi_tx_done(struct cfhsi *cfhsi)
{
369
	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
370 371 372 373 374 375 376 377 378 379

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	/*
	 * Send flow on if flow off has been previously signalled
	 * and number of packets is below low water mark.
	 */
	spin_lock_bh(&cfhsi->lock);
	if (cfhsi->flow_off_sent &&
380
			cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
381 382 383 384 385 386 387 388 389 390 391
			cfhsi->cfdev.flowctrl) {

		cfhsi->flow_off_sent = 0;
		cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
	}

	if (cfhsi_can_send_aggregate(cfhsi)) {
		spin_unlock_bh(&cfhsi->lock);
		cfhsi_start_tx(cfhsi);
	} else {
		mod_timer(&cfhsi->aggregation_timer,
392
			jiffies + cfhsi->cfg.aggregation_timeout);
393 394
		spin_unlock_bh(&cfhsi->lock);
	}
395 396

	return;
397 398
}

399
static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
400 401 402
{
	struct cfhsi *cfhsi;

403
	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
404
	netdev_dbg(cfhsi->ndev, "%s.\n",
405 406 407 408
		__func__);

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;
409
	cfhsi_tx_done(cfhsi);
410 411
}

412
static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
413 414 415 416 417 418 419 420
{
	int xfer_sz = 0;
	int nfrms = 0;
	u16 *plen = NULL;
	u8 *pfrm = NULL;

	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
421
		netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
422
			__func__);
423
		return -EPROTO;
424 425 426 427 428 429
	}

	/* Check for embedded CAIF frame. */
	if (desc->offset) {
		struct sk_buff *skb;
		u8 *dst = NULL;
430
		int len = 0;
431 432 433 434 435 436 437 438 439 440
		pfrm = ((u8 *)desc) + desc->offset;

		/* Remove offset padding. */
		pfrm += *pfrm + 1;

		/* Read length of CAIF frame (little endian). */
		len = *pfrm;
		len |= ((*(pfrm+1)) << 8) & 0xFF00;
		len += 2;	/* Add FCS fields. */

441 442
		/* Sanity check length of CAIF frame. */
		if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
443
			netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
444 445 446
				__func__);
			return -EPROTO;
		}
447 448

		/* Allocate SKB (OK even in IRQ context). */
449 450
		skb = alloc_skb(len + 1, GFP_ATOMIC);
		if (!skb) {
451
			netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
452 453
				__func__);
			return -ENOMEM;
454 455 456 457 458 459 460 461 462 463 464
		}
		caif_assert(skb != NULL);

		dst = skb_put(skb, len);
		memcpy(dst, pfrm, len);

		skb->protocol = htons(ETH_P_CAIF);
		skb_reset_mac_header(skb);
		skb->dev = cfhsi->ndev;

		/*
465 466
		 * We are in a callback handler and
		 * unfortunately we don't know what context we're
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
		 * running in.
		 */
		if (in_interrupt())
			netif_rx(skb);
		else
			netif_rx_ni(skb);

		/* Update network statistics. */
		cfhsi->ndev->stats.rx_packets++;
		cfhsi->ndev->stats.rx_bytes += len;
	}

	/* Calculate transfer length. */
	plen = desc->cffrm_len;
	while (nfrms < CFHSI_MAX_PKTS && *plen) {
		xfer_sz += *plen;
		plen++;
		nfrms++;
	}

	/* Check for piggy-backed descriptor. */
	if (desc->header & CFHSI_PIGGY_DESC)
		xfer_sz += CFHSI_DESC_SZ;

491
	if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
492
		netdev_err(cfhsi->ndev,
493 494
				"%s: Invalid payload len: %d, ignored.\n",
			__func__, xfer_sz);
495
		return -EPROTO;
496 497 498 499
	}
	return xfer_sz;
}

500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
{
	int xfer_sz = 0;
	int nfrms = 0;
	u16 *plen;

	if ((desc->header & ~CFHSI_PIGGY_DESC) ||
			(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {

		pr_err("Invalid descriptor. %x %x\n", desc->header,
				desc->offset);
		return -EPROTO;
	}

	/* Calculate transfer length. */
	plen = desc->cffrm_len;
	while (nfrms < CFHSI_MAX_PKTS && *plen) {
		xfer_sz += *plen;
		plen++;
		nfrms++;
	}

	if (xfer_sz % 4) {
		pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
		return -EPROTO;
	}
	return xfer_sz;
}

529
static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
530 531 532 533 534 535 536 537 538
{
	int rx_sz = 0;
	int nfrms = 0;
	u16 *plen = NULL;
	u8 *pfrm = NULL;

	/* Sanity check header and offset. */
	if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
			(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
539
		netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
540
			__func__);
541
		return -EPROTO;
542 543 544 545 546
	}

	/* Set frame pointer to start of payload. */
	pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
	plen = desc->cffrm_len;
547 548 549 550 551 552 553 554 555 556

	/* Skip already processed frames. */
	while (nfrms < cfhsi->rx_state.nfrms) {
		pfrm += *plen;
		rx_sz += *plen;
		plen++;
		nfrms++;
	}

	/* Parse payload. */
557 558 559 560
	while (nfrms < CFHSI_MAX_PKTS && *plen) {
		struct sk_buff *skb;
		u8 *dst = NULL;
		u8 *pcffrm = NULL;
561
		int len;
562 563 564 565 566 567 568 569 570

		/* CAIF frame starts after head padding. */
		pcffrm = pfrm + *pfrm + 1;

		/* Read length of CAIF frame (little endian). */
		len = *pcffrm;
		len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
		len += 2;	/* Add FCS fields. */

571 572
		/* Sanity check length of CAIF frames. */
		if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
573
			netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
574 575 576 577
				__func__);
			return -EPROTO;
		}

578
		/* Allocate SKB (OK even in IRQ context). */
579 580
		skb = alloc_skb(len + 1, GFP_ATOMIC);
		if (!skb) {
581
			netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
582 583 584
				__func__);
			cfhsi->rx_state.nfrms = nfrms;
			return -ENOMEM;
585 586 587 588 589 590 591 592 593 594 595
		}
		caif_assert(skb != NULL);

		dst = skb_put(skb, len);
		memcpy(dst, pcffrm, len);

		skb->protocol = htons(ETH_P_CAIF);
		skb_reset_mac_header(skb);
		skb->dev = cfhsi->ndev;

		/*
596
		 * We're called in callback from HSI
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
		 * and don't know the context we're running in.
		 */
		if (in_interrupt())
			netif_rx(skb);
		else
			netif_rx_ni(skb);

		/* Update network statistics. */
		cfhsi->ndev->stats.rx_packets++;
		cfhsi->ndev->stats.rx_bytes += len;

		pfrm += *plen;
		rx_sz += *plen;
		plen++;
		nfrms++;
	}

	return rx_sz;
}

617
static void cfhsi_rx_done(struct cfhsi *cfhsi)
618 619
{
	int res;
620
	int desc_pld_len = 0, rx_len, rx_state;
621
	struct cfhsi_desc *desc = NULL;
622 623
	u8 *rx_ptr, *rx_buf;
	struct cfhsi_desc *piggy_desc = NULL;
624 625 626

	desc = (struct cfhsi_desc *)cfhsi->rx_buf;

627
	netdev_dbg(cfhsi->ndev, "%s\n", __func__);
628 629 630 631 632

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	/* Update inactivity timer if pending. */
633
	spin_lock_bh(&cfhsi->lock);
634
	mod_timer_pending(&cfhsi->inactivity_timer,
635
			jiffies + cfhsi->cfg.inactivity_timeout);
636
	spin_unlock_bh(&cfhsi->lock);
637

638
	if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
639 640 641
		desc_pld_len = cfhsi_rx_desc_len(desc);

		if (desc_pld_len < 0)
642
			goto out_of_sync;
643 644 645 646 647 648 649

		rx_buf = cfhsi->rx_buf;
		rx_len = desc_pld_len;
		if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
			rx_len += CFHSI_DESC_SZ;
		if (desc_pld_len == 0)
			rx_buf = cfhsi->rx_flip_buf;
650
	} else {
651
		rx_buf = cfhsi->rx_flip_buf;
652

653 654 655
		rx_len = CFHSI_DESC_SZ;
		if (cfhsi->rx_state.pld_len > 0 &&
				(desc->header & CFHSI_PIGGY_DESC)) {
656 657 658

			piggy_desc = (struct cfhsi_desc *)
				(desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
659 660
						cfhsi->rx_state.pld_len);

661
			cfhsi->rx_state.piggy_desc = true;
662

663 664 665 666 667
			/* Extract payload len from piggy-backed descriptor. */
			desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
			if (desc_pld_len < 0)
				goto out_of_sync;

668
			if (desc_pld_len > 0) {
669
				rx_len = desc_pld_len;
670 671 672
				if (piggy_desc->header & CFHSI_PIGGY_DESC)
					rx_len += CFHSI_DESC_SZ;
			}
673 674 675 676 677

			/*
			 * Copy needed information from the piggy-backed
			 * descriptor to the descriptor in the start.
			 */
678
			memcpy(rx_buf, (u8 *)piggy_desc,
679
					CFHSI_DESC_SHORT_SZ);
680
		}
681 682
	}

683
	if (desc_pld_len) {
684 685
		rx_state = CFHSI_RX_STATE_PAYLOAD;
		rx_ptr = rx_buf + CFHSI_DESC_SZ;
686
	} else {
687 688 689
		rx_state = CFHSI_RX_STATE_DESC;
		rx_ptr = rx_buf;
		rx_len = CFHSI_DESC_SZ;
690 691
	}

692
	/* Initiate next read */
693 694
	if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
		/* Set up new transfer. */
695
		netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
696 697
				__func__);

698 699
		res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
				cfhsi->ops);
700
		if (WARN_ON(res < 0)) {
701
			netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
702 703 704 705 706
				__func__, res);
			cfhsi->ndev->stats.rx_errors++;
			cfhsi->ndev->stats.rx_dropped++;
		}
	}
707

708 709 710 711 712 713 714 715 716 717 718 719
	if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
		/* Extract payload from descriptor */
		if (cfhsi_rx_desc(desc, cfhsi) < 0)
			goto out_of_sync;
	} else {
		/* Extract payload */
		if (cfhsi_rx_pld(desc, cfhsi) < 0)
			goto out_of_sync;
		if (piggy_desc) {
			/* Extract any payload in piggyback descriptor. */
			if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
				goto out_of_sync;
720 721
			/* Mark no embedded frame after extracting it */
			piggy_desc->offset = 0;
722
		}
723
	}
724 725 726 727 728 729 730 731 732 733 734

	/* Update state info */
	memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
	cfhsi->rx_state.state = rx_state;
	cfhsi->rx_ptr = rx_ptr;
	cfhsi->rx_len = rx_len;
	cfhsi->rx_state.pld_len = desc_pld_len;
	cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;

	if (rx_buf != cfhsi->rx_buf)
		swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
735 736 737
	return;

out_of_sync:
738
	netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
739 740 741
	print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
			cfhsi->rx_buf, CFHSI_DESC_SZ);
	schedule_work(&cfhsi->out_of_sync_work);
742 743 744 745 746 747
}

static void cfhsi_rx_slowpath(unsigned long arg)
{
	struct cfhsi *cfhsi = (struct cfhsi *)arg;

748
	netdev_dbg(cfhsi->ndev, "%s.\n",
749 750 751
		__func__);

	cfhsi_rx_done(cfhsi);
752 753
}

754
static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
755 756 757
{
	struct cfhsi *cfhsi;

758
	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
759
	netdev_dbg(cfhsi->ndev, "%s.\n",
760 761 762 763 764 765 766 767
		__func__);

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
		wake_up_interruptible(&cfhsi->flush_fifo_wait);
	else
768
		cfhsi_rx_done(cfhsi);
769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
}

static void cfhsi_wake_up(struct work_struct *work)
{
	struct cfhsi *cfhsi = NULL;
	int res;
	int len;
	long ret;

	cfhsi = container_of(work, struct cfhsi, wake_up_work);

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
		/* It happenes when wakeup is requested by
		 * both ends at the same time. */
		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
787
		clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
788 789 790 791
		return;
	}

	/* Activate wake line. */
792
	cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
793

794
	netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
795 796 797
		__func__);

	/* Wait for acknowledge. */
798 799 800
	ret = CFHSI_WAKE_TOUT;
	ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
					test_and_clear_bit(CFHSI_WAKE_UP_ACK,
801 802 803
							&cfhsi->bits), ret);
	if (unlikely(ret < 0)) {
		/* Interrupted by signal. */
804
		netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
805
			__func__, ret);
806

807
		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
808
		cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
809 810
		return;
	} else if (!ret) {
811 812 813
		bool ca_wake = false;
		size_t fifo_occupancy = 0;

814
		/* Wakeup timeout */
815
		netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
816
			__func__);
817 818

		/* Check FIFO to check if modem has sent something. */
819
		WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
820 821
					&fifo_occupancy));

822
		netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
823 824 825
				__func__, (unsigned) fifo_occupancy);

		/* Check if we misssed the interrupt. */
826
		WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
827 828 829
							&ca_wake));

		if (ca_wake) {
830
			netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
831 832 833 834 835 836 837 838 839
				__func__);

			/* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
			clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);

			/* Continue execution. */
			goto wake_ack;
		}

840
		clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
841
		cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
842 843
		return;
	}
844
wake_ack:
845
	netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
846 847 848 849 850 851 852
		__func__);

	/* Clear power up bit. */
	set_bit(CFHSI_AWAKE, &cfhsi->bits);
	clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);

	/* Resume read operation. */
853
	netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
854
	res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
855 856

	if (WARN_ON(res < 0))
857
		netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
858 859 860 861 862 863

	/* Clear power up acknowledment. */
	clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);

	spin_lock_bh(&cfhsi->lock);

864 865
	/* Resume transmit if queues are not empty. */
	if (!cfhsi_tx_queue_len(cfhsi)) {
866
		netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
867 868
			__func__);
		/* Start inactivity timer. */
869
		mod_timer(&cfhsi->inactivity_timer,
870
				jiffies + cfhsi->cfg.inactivity_timeout);
871 872 873 874
		spin_unlock_bh(&cfhsi->lock);
		return;
	}

875
	netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
876 877 878 879 880 881 882 883 884
		__func__);

	spin_unlock_bh(&cfhsi->lock);

	/* Create HSI frame. */
	len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);

	if (likely(len > 0)) {
		/* Set up new transfer. */
885
		res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
886
		if (WARN_ON(res < 0)) {
887
			netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
888 889 890 891
				__func__, res);
			cfhsi_abort_tx(cfhsi);
		}
	} else {
892
		netdev_err(cfhsi->ndev,
893 894 895 896 897 898 899 900 901
				"%s: Failed to create HSI frame: %d.\n",
				__func__, len);
	}
}

static void cfhsi_wake_down(struct work_struct *work)
{
	long ret;
	struct cfhsi *cfhsi = NULL;
902 903
	size_t fifo_occupancy = 0;
	int retry = CFHSI_WAKE_TOUT;
904 905

	cfhsi = container_of(work, struct cfhsi, wake_down_work);
906
	netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
907 908 909 910 911

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	/* Deactivate wake line. */
912
	cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
913 914

	/* Wait for acknowledge. */
915
	ret = CFHSI_WAKE_TOUT;
916
	ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
917 918
					test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
							&cfhsi->bits), ret);
919 920
	if (ret < 0) {
		/* Interrupted by signal. */
921
		netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
922 923 924
			__func__, ret);
		return;
	} else if (!ret) {
925 926
		bool ca_wake = true;

927
		/* Timeout */
928
		netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
929 930

		/* Check if we misssed the interrupt. */
931
		WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
932 933
							&ca_wake));
		if (!ca_wake)
934
			netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
935
				__func__);
936 937
	}

938 939
	/* Check FIFO occupancy. */
	while (retry) {
940
		WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
941 942 943 944 945 946 947 948 949 950 951
							&fifo_occupancy));

		if (!fifo_occupancy)
			break;

		set_current_state(TASK_INTERRUPTIBLE);
		schedule_timeout(1);
		retry--;
	}

	if (!retry)
952
		netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
953 954

	/* Clear AWAKE condition. */
955 956
	clear_bit(CFHSI_AWAKE, &cfhsi->bits);

957
	/* Cancel pending RX requests. */
958
	cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
959 960
}

961 962 963 964 965 966 967 968 969 970 971
static void cfhsi_out_of_sync(struct work_struct *work)
{
	struct cfhsi *cfhsi = NULL;

	cfhsi = container_of(work, struct cfhsi, out_of_sync_work);

	rtnl_lock();
	dev_close(cfhsi->ndev);
	rtnl_unlock();
}

972
static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
973 974 975
{
	struct cfhsi *cfhsi = NULL;

976
	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
977
	netdev_dbg(cfhsi->ndev, "%s.\n",
978 979 980 981 982 983 984 985 986 987 988 989 990
		__func__);

	set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
	wake_up_interruptible(&cfhsi->wake_up_wait);

	if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
		return;

	/* Schedule wake up work queue if the peer initiates. */
	if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
		queue_work(cfhsi->wq, &cfhsi->wake_up_work);
}

991
static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
992 993 994
{
	struct cfhsi *cfhsi = NULL;

995
	cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
996
	netdev_dbg(cfhsi->ndev, "%s.\n",
997 998 999 1000 1001 1002 1003
		__func__);

	/* Initiating low power is only permitted by the host (us). */
	set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
	wake_up_interruptible(&cfhsi->wake_down_wait);
}

1004 1005 1006 1007
static void cfhsi_aggregation_tout(unsigned long arg)
{
	struct cfhsi *cfhsi = (struct cfhsi *)arg;

1008
	netdev_dbg(cfhsi->ndev, "%s.\n",
1009 1010 1011 1012 1013
		__func__);

	cfhsi_start_tx(cfhsi);
}

1014 1015 1016 1017 1018
static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct cfhsi *cfhsi = NULL;
	int start_xfer = 0;
	int timer_active;
1019
	int prio;
1020 1021 1022 1023 1024 1025

	if (!dev)
		return -EINVAL;

	cfhsi = netdev_priv(dev);

1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
	switch (skb->priority) {
	case TC_PRIO_BESTEFFORT:
	case TC_PRIO_FILLER:
	case TC_PRIO_BULK:
		prio = CFHSI_PRIO_BEBK;
		break;
	case TC_PRIO_INTERACTIVE_BULK:
		prio = CFHSI_PRIO_VI;
		break;
	case TC_PRIO_INTERACTIVE:
		prio = CFHSI_PRIO_VO;
		break;
	case TC_PRIO_CONTROL:
	default:
		prio = CFHSI_PRIO_CTL;
		break;
	}

1044 1045
	spin_lock_bh(&cfhsi->lock);

1046 1047 1048 1049 1050
	/* Update aggregation statistics  */
	cfhsi_update_aggregation_stats(cfhsi, skb, 1);

	/* Queue the SKB */
	skb_queue_tail(&cfhsi->qhead[prio], skb);
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060

	/* Sanity check; xmit should not be called after unregister_netdev */
	if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
		spin_unlock_bh(&cfhsi->lock);
		cfhsi_abort_tx(cfhsi);
		return -EINVAL;
	}

	/* Send flow off if number of packets is above high water mark. */
	if (!cfhsi->flow_off_sent &&
1061
		cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
		cfhsi->cfdev.flowctrl) {
		cfhsi->flow_off_sent = 1;
		cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
	}

	if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
		cfhsi->tx_state = CFHSI_TX_STATE_XFER;
		start_xfer = 1;
	}

1072
	if (!start_xfer) {
1073 1074 1075 1076
		/* Send aggregate if it is possible */
		bool aggregate_ready =
			cfhsi_can_send_aggregate(cfhsi) &&
			del_timer(&cfhsi->aggregation_timer) > 0;
1077
		spin_unlock_bh(&cfhsi->lock);
1078 1079
		if (aggregate_ready)
			cfhsi_start_tx(cfhsi);
1080
		return 0;
1081
	}
1082 1083

	/* Delete inactivity timer if started. */
1084
	timer_active = del_timer_sync(&cfhsi->inactivity_timer);
1085

1086 1087
	spin_unlock_bh(&cfhsi->lock);

1088 1089 1090 1091 1092 1093 1094
	if (timer_active) {
		struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
		int len;
		int res;

		/* Create HSI frame. */
		len = cfhsi_tx_frm(desc, cfhsi);
1095
		WARN_ON(!len);
1096 1097

		/* Set up new transfer. */
1098
		res =