hif_usb.c 29.6 KB
Newer Older
1
/*
2
 * Copyright (c) 2010-2011 Atheros Communications Inc.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

#include "htc.h"

19
/* identify firmware images */
Sujith Manoharan's avatar
Sujith Manoharan committed
20
21
#define FIRMWARE_AR7010_1_1     "htc_7010.fw"
#define FIRMWARE_AR9271         "htc_9271.fw"
22
23
24
25

MODULE_FIRMWARE(FIRMWARE_AR7010_1_1);
MODULE_FIRMWARE(FIRMWARE_AR9271);

26
static struct usb_device_id ath9k_hif_usb_ids[] = {
27
28
29
30
31
32
	{ USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */
	{ USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */
	{ USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */
	{ USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
	{ USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
	{ USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
33
	{ USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */
34
35
36
	{ USB_DEVICE(0x13D3, 0x3348) }, /* Azurewave */
	{ USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */
	{ USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */
37
	{ USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
38
	{ USB_DEVICE(0x040D, 0x3801) }, /* VIA */
39
	{ USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
40
41
42

	{ USB_DEVICE(0x0cf3, 0x7015),
	  .driver_info = AR9287_USB },  /* Atheros */
43
	{ USB_DEVICE(0x1668, 0x1200),
44
45
46
47
48
49
50
51
	  .driver_info = AR9287_USB },  /* Verizon */

	{ USB_DEVICE(0x0cf3, 0x7010),
	  .driver_info = AR9280_USB },  /* Atheros */
	{ USB_DEVICE(0x0846, 0x9018),
	  .driver_info = AR9280_USB },  /* Netgear WNDA3200 */
	{ USB_DEVICE(0x083A, 0xA704),
	  .driver_info = AR9280_USB },  /* SMC Networks */
52
53
	{ USB_DEVICE(0x0411, 0x017f),
	  .driver_info = AR9280_USB },  /* Sony UWA-BR100 */
54

55
56
57
	{ USB_DEVICE(0x0cf3, 0x20ff),
	  .driver_info = STORAGE_DEVICE },

58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
	{ },
};

MODULE_DEVICE_TABLE(usb, ath9k_hif_usb_ids);

static int __hif_usb_tx(struct hif_device_usb *hif_dev);

static void hif_usb_regout_cb(struct urb *urb)
{
	struct cmd_buf *cmd = (struct cmd_buf *)urb->context;

	switch (urb->status) {
	case 0:
		break;
	case -ENOENT:
	case -ECONNRESET:
	case -ENODEV:
	case -ESHUTDOWN:
76
		goto free;
77
78
79
80
81
82
	default:
		break;
	}

	if (cmd) {
		ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
83
					  cmd->skb, true);
84
85
		kfree(cmd);
	}
86
87
88

	return;
free:
89
	kfree_skb(cmd->skb);
90
	kfree(cmd);
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
}

static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
			       struct sk_buff *skb)
{
	struct urb *urb;
	struct cmd_buf *cmd;
	int ret = 0;

	urb = usb_alloc_urb(0, GFP_KERNEL);
	if (urb == NULL)
		return -ENOMEM;

	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
	if (cmd == NULL) {
		usb_free_urb(urb);
		return -ENOMEM;
	}

	cmd->skb = skb;
	cmd->hif_dev = hif_dev;

113
114
	usb_fill_bulk_urb(urb, hif_dev->udev,
			 usb_sndbulkpipe(hif_dev->udev, USB_REG_OUT_PIPE),
115
			 skb->data, skb->len,
116
			 hif_usb_regout_cb, cmd);
117

118
	usb_anchor_urb(urb, &hif_dev->regout_submitted);
119
120
	ret = usb_submit_urb(urb, GFP_KERNEL);
	if (ret) {
121
		usb_unanchor_urb(urb);
122
123
		kfree(cmd);
	}
124
	usb_free_urb(urb);
125
126
127
128

	return ret;
}

129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
static void hif_usb_mgmt_cb(struct urb *urb)
{
	struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
	struct hif_device_usb *hif_dev = cmd->hif_dev;
	bool txok = true;

	if (!cmd || !cmd->skb || !cmd->hif_dev)
		return;

	switch (urb->status) {
	case 0:
		break;
	case -ENOENT:
	case -ECONNRESET:
	case -ENODEV:
	case -ESHUTDOWN:
		txok = false;

		/*
		 * If the URBs are being flushed, no need to complete
		 * this packet.
		 */
		spin_lock(&hif_dev->tx.tx_lock);
		if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
			spin_unlock(&hif_dev->tx.tx_lock);
			dev_kfree_skb_any(cmd->skb);
			kfree(cmd);
			return;
		}
		spin_unlock(&hif_dev->tx.tx_lock);

		break;
	default:
		txok = false;
		break;
	}

	skb_pull(cmd->skb, 4);
	ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
				  cmd->skb, txok);
	kfree(cmd);
}

static int hif_usb_send_mgmt(struct hif_device_usb *hif_dev,
			     struct sk_buff *skb)
{
	struct urb *urb;
	struct cmd_buf *cmd;
	int ret = 0;
	__le16 *hdr;

	urb = usb_alloc_urb(0, GFP_ATOMIC);
	if (urb == NULL)
		return -ENOMEM;

	cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
	if (cmd == NULL) {
		usb_free_urb(urb);
		return -ENOMEM;
	}

	cmd->skb = skb;
	cmd->hif_dev = hif_dev;

	hdr = (__le16 *) skb_push(skb, 4);
	*hdr++ = cpu_to_le16(skb->len - 4);
	*hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);

	usb_fill_bulk_urb(urb, hif_dev->udev,
			 usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
			 skb->data, skb->len,
			 hif_usb_mgmt_cb, cmd);

	usb_anchor_urb(urb, &hif_dev->mgmt_submitted);
	ret = usb_submit_urb(urb, GFP_ATOMIC);
	if (ret) {
		usb_unanchor_urb(urb);
		kfree(cmd);
	}
	usb_free_urb(urb);

	return ret;
}

Sujith's avatar
Sujith committed
213
214
215
216
217
218
219
static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev,
					 struct sk_buff_head *list)
{
	struct sk_buff *skb;

	while ((skb = __skb_dequeue(list)) != NULL) {
		dev_kfree_skb_any(skb);
220
221
222
223
224
225
226
227
228
229
230
231
	}
}

static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev,
					    struct sk_buff_head *queue,
					    bool txok)
{
	struct sk_buff *skb;

	while ((skb = __skb_dequeue(queue)) != NULL) {
		ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
					  skb, txok);
232
233
234
235
		if (txok)
			TX_STAT_INC(skb_success);
		else
			TX_STAT_INC(skb_failed);
Sujith's avatar
Sujith committed
236
237
238
	}
}

239
240
241
static void hif_usb_tx_cb(struct urb *urb)
{
	struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
242
	struct hif_device_usb *hif_dev;
243
	bool txok = true;
244

245
	if (!tx_buf || !tx_buf->hif_dev)
246
247
		return;

248
249
	hif_dev = tx_buf->hif_dev;

250
251
252
253
254
255
256
	switch (urb->status) {
	case 0:
		break;
	case -ENOENT:
	case -ECONNRESET:
	case -ENODEV:
	case -ESHUTDOWN:
257
		txok = false;
258
259
260
261
262
263
264
265

		/*
		 * If the URBs are being flushed, no need to add this
		 * URB to the free list.
		 */
		spin_lock(&hif_dev->tx.tx_lock);
		if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
			spin_unlock(&hif_dev->tx.tx_lock);
266
			ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
267
268
269
270
			return;
		}
		spin_unlock(&hif_dev->tx.tx_lock);

271
		break;
272
	default:
273
		txok = false;
274
275
276
		break;
	}

277
	ath9k_skb_queue_complete(hif_dev, &tx_buf->skb_queue, txok);
Sujith's avatar
Sujith committed
278
279
280
281
282
283
284
285
286
287
288
289
290

	/* Re-initialize the SKB queue */
	tx_buf->len = tx_buf->offset = 0;
	__skb_queue_head_init(&tx_buf->skb_queue);

	/* Add this TX buffer to the free list */
	spin_lock(&hif_dev->tx.tx_lock);
	list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
	hif_dev->tx.tx_buf_cnt++;
	if (!(hif_dev->tx.flags & HIF_USB_TX_STOP))
		__hif_usb_tx(hif_dev); /* Check for pending SKBs */
	TX_STAT_INC(buf_completed);
	spin_unlock(&hif_dev->tx.tx_lock);
291
292
}

293
294
295
296
297
298
/* TX lock has to be taken */
static int __hif_usb_tx(struct hif_device_usb *hif_dev)
{
	struct tx_buf *tx_buf = NULL;
	struct sk_buff *nskb = NULL;
	int ret = 0, i;
299
	u16 tx_skb_cnt = 0;
300
	u8 *buf;
301
	__le16 *hdr;
302
303
304
305
306
307
308
309
310

	if (hif_dev->tx.tx_skb_cnt == 0)
		return 0;

	/* Check if a free TX buffer is available */
	if (list_empty(&hif_dev->tx.tx_buf))
		return 0;

	tx_buf = list_first_entry(&hif_dev->tx.tx_buf, struct tx_buf, list);
Sujith's avatar
Sujith committed
311
	list_move_tail(&tx_buf->list, &hif_dev->tx.tx_pending);
312
313
314
315
316
317
318
319
320
321
322
323
324
325
	hif_dev->tx.tx_buf_cnt--;

	tx_skb_cnt = min_t(u16, hif_dev->tx.tx_skb_cnt, MAX_TX_AGGR_NUM);

	for (i = 0; i < tx_skb_cnt; i++) {
		nskb = __skb_dequeue(&hif_dev->tx.tx_skb_queue);

		/* Should never be NULL */
		BUG_ON(!nskb);

		hif_dev->tx.tx_skb_cnt--;

		buf = tx_buf->buf;
		buf += tx_buf->offset;
326
327
328
		hdr = (__le16 *)buf;
		*hdr++ = cpu_to_le16(nskb->len);
		*hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
		buf += 4;
		memcpy(buf, nskb->data, nskb->len);
		tx_buf->len = nskb->len + 4;

		if (i < (tx_skb_cnt - 1))
			tx_buf->offset += (((tx_buf->len - 1) / 4) + 1) * 4;

		if (i == (tx_skb_cnt - 1))
			tx_buf->len += tx_buf->offset;

		__skb_queue_tail(&tx_buf->skb_queue, nskb);
		TX_STAT_INC(skb_queued);
	}

	usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev,
			  usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
			  tx_buf->buf, tx_buf->len,
			  hif_usb_tx_cb, tx_buf);

	ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC);
	if (ret) {
		tx_buf->len = tx_buf->offset = 0;
351
		ath9k_skb_queue_complete(hif_dev, &tx_buf->skb_queue, false);
352
353
354
355
356
357
358
359
360
361
362
		__skb_queue_head_init(&tx_buf->skb_queue);
		list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
		hif_dev->tx.tx_buf_cnt++;
	}

	if (!ret)
		TX_STAT_INC(buf_queued);

	return ret;
}

363
static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb)
364
{
365
	struct ath9k_htc_tx_ctl *tx_ctl;
366
	unsigned long flags;
367
	int ret = 0;
368
369
370
371
372
373
374
375
376
377
378
379
380
381

	spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);

	if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
		spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
		return -ENODEV;
	}

	/* Check if the max queue count has been reached */
	if (hif_dev->tx.tx_skb_cnt > MAX_TX_BUF_NUM) {
		spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
		return -ENOMEM;
	}

382
	spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
383

384
385
	tx_ctl = HTC_SKB_CB(skb);

386
387
388
389
390
391
392
393
394
395
396
397
398
	/* Mgmt/Beacon frames don't use the TX buffer pool */
	if ((tx_ctl->type == ATH9K_HTC_MGMT) ||
	    (tx_ctl->type == ATH9K_HTC_BEACON)) {
		ret = hif_usb_send_mgmt(hif_dev, skb);
	}

	spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);

	if ((tx_ctl->type == ATH9K_HTC_NORMAL) ||
	    (tx_ctl->type == ATH9K_HTC_AMPDU)) {
		__skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb);
		hif_dev->tx.tx_skb_cnt++;
	}
399
400

	/* Check if AMPDUs have to be sent immediately */
401
	if ((hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) &&
402
403
404
405
406
407
	    (hif_dev->tx.tx_skb_cnt < 2)) {
		__hif_usb_tx(hif_dev);
	}

	spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);

408
	return ret;
409
410
}

411
static void hif_usb_start(void *hif_handle)
412
413
414
415
416
417
418
419
420
421
422
{
	struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
	unsigned long flags;

	hif_dev->flags |= HIF_USB_START;

	spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
	hif_dev->tx.flags &= ~HIF_USB_TX_STOP;
	spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
}

423
static void hif_usb_stop(void *hif_handle)
424
425
{
	struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
426
	struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
427
428
429
	unsigned long flags;

	spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
430
	ath9k_skb_queue_complete(hif_dev, &hif_dev->tx.tx_skb_queue, false);
431
432
433
	hif_dev->tx.tx_skb_cnt = 0;
	hif_dev->tx.flags |= HIF_USB_TX_STOP;
	spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
434
435
436
437
438
439

	/* The pending URBs have to be canceled. */
	list_for_each_entry_safe(tx_buf, tx_buf_tmp,
				 &hif_dev->tx.tx_pending, list) {
		usb_kill_urb(tx_buf->urb);
	}
440
441

	usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
442
443
}

444
static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb)
445
446
447
448
449
450
{
	struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
	int ret = 0;

	switch (pipe_id) {
	case USB_WLAN_TX_PIPE:
451
		ret = hif_usb_send_tx(hif_dev, skb);
452
453
454
455
456
		break;
	case USB_REG_OUT_PIPE:
		ret = hif_usb_send_regout(hif_dev, skb);
		break;
	default:
Sujith's avatar
Sujith committed
457
458
		dev_err(&hif_dev->udev->dev,
			"ath9k_htc: Invalid TX pipe: %d\n", pipe_id);
459
460
461
462
463
464
465
		ret = -EINVAL;
		break;
	}

	return ret;
}

466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
static inline bool check_index(struct sk_buff *skb, u8 idx)
{
	struct ath9k_htc_tx_ctl *tx_ctl;

	tx_ctl = HTC_SKB_CB(skb);

	if ((tx_ctl->type == ATH9K_HTC_AMPDU) &&
	    (tx_ctl->sta_idx == idx))
		return true;

	return false;
}

static void hif_usb_sta_drain(void *hif_handle, u8 idx)
{
	struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
	struct sk_buff *skb, *tmp;
	unsigned long flags;

	spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);

	skb_queue_walk_safe(&hif_dev->tx.tx_skb_queue, skb, tmp) {
		if (check_index(skb, idx)) {
			__skb_unlink(skb, &hif_dev->tx.tx_skb_queue);
			ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
						  skb, false);
			hif_dev->tx.tx_skb_cnt--;
			TX_STAT_INC(skb_failed);
		}
	}

	spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
}

500
501
502
503
504
505
506
507
508
static struct ath9k_htc_hif hif_usb = {
	.transport = ATH9K_HIF_USB,
	.name = "ath9k_hif_usb",

	.control_ul_pipe = USB_REG_OUT_PIPE,
	.control_dl_pipe = USB_REG_IN_PIPE,

	.start = hif_usb_start,
	.stop = hif_usb_stop,
509
	.sta_drain = hif_usb_sta_drain,
510
511
512
513
514
515
	.send = hif_usb_send,
};

static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
				    struct sk_buff *skb)
{
Sujith's avatar
Sujith committed
516
	struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
517
518
519
	int index = 0, i = 0, len = skb->len;
	int rx_remain_len, rx_pkt_len;
	u16 pool_index = 0;
520
521
	u8 *ptr;

522
523
	spin_lock(&hif_dev->rx_lock);

524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
	rx_remain_len = hif_dev->rx_remain_len;
	rx_pkt_len = hif_dev->rx_transfer_len;

	if (rx_remain_len != 0) {
		struct sk_buff *remain_skb = hif_dev->remain_skb;

		if (remain_skb) {
			ptr = (u8 *) remain_skb->data;

			index = rx_remain_len;
			rx_remain_len -= hif_dev->rx_pad_len;
			ptr += rx_pkt_len;

			memcpy(ptr, skb->data, rx_remain_len);

			rx_pkt_len += rx_remain_len;
			hif_dev->rx_remain_len = 0;
			skb_put(remain_skb, rx_pkt_len);

			skb_pool[pool_index++] = remain_skb;

		} else {
			index = rx_remain_len;
		}
	}

550
551
	spin_unlock(&hif_dev->rx_lock);

552
	while (index < len) {
553
554
555
556
557
		u16 pkt_len;
		u16 pkt_tag;
		u16 pad_len;
		int chk_idx;

558
559
560
561
562
		ptr = (u8 *) skb->data;

		pkt_len = ptr[index] + (ptr[index+1] << 8);
		pkt_tag = ptr[index+2] + (ptr[index+3] << 8);

563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
		if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
			RX_STAT_INC(skb_dropped);
			return;
		}

		pad_len = 4 - (pkt_len & 0x3);
		if (pad_len == 4)
			pad_len = 0;

		chk_idx = index;
		index = index + 4 + pkt_len + pad_len;

		if (index > MAX_RX_BUF_SIZE) {
			spin_lock(&hif_dev->rx_lock);
			hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
			hif_dev->rx_transfer_len =
				MAX_RX_BUF_SIZE - chk_idx - 4;
			hif_dev->rx_pad_len = pad_len;

			nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
			if (!nskb) {
				dev_err(&hif_dev->udev->dev,
					"ath9k_htc: RX memory allocation error\n");
586
				spin_unlock(&hif_dev->rx_lock);
587
				goto err;
588
			}
589
590
591
592
593
594
595
596
597
			skb_reserve(nskb, 32);
			RX_STAT_INC(skb_allocated);

			memcpy(nskb->data, &(skb->data[chk_idx+4]),
			       hif_dev->rx_transfer_len);

			/* Record the buffer pointer */
			hif_dev->remain_skb = nskb;
			spin_unlock(&hif_dev->rx_lock);
598
		} else {
599
600
601
602
603
604
605
606
607
608
609
610
			nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
			if (!nskb) {
				dev_err(&hif_dev->udev->dev,
					"ath9k_htc: RX memory allocation error\n");
				goto err;
			}
			skb_reserve(nskb, 32);
			RX_STAT_INC(skb_allocated);

			memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
			skb_put(nskb, pkt_len);
			skb_pool[pool_index++] = nskb;
611
612
613
614
615
616
617
618
619
620
621
622
623
624
		}
	}

err:
	for (i = 0; i < pool_index; i++) {
		ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
				 skb_pool[i]->len, USB_WLAN_RX_PIPE);
		RX_STAT_INC(skb_completed);
	}
}

static void ath9k_hif_usb_rx_cb(struct urb *urb)
{
	struct sk_buff *skb = (struct sk_buff *) urb->context;
625
	struct hif_device_usb *hif_dev =
626
627
628
		usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
	int ret;

Sujith's avatar
Sujith committed
629
630
631
	if (!skb)
		return;

632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
	if (!hif_dev)
		goto free;

	switch (urb->status) {
	case 0:
		break;
	case -ENOENT:
	case -ECONNRESET:
	case -ENODEV:
	case -ESHUTDOWN:
		goto free;
	default:
		goto resubmit;
	}

	if (likely(urb->actual_length != 0)) {
		skb_put(skb, urb->actual_length);
		ath9k_hif_usb_rx_stream(hif_dev, skb);
	}

resubmit:
	skb_reset_tail_pointer(skb);
	skb_trim(skb, 0);

Sujith's avatar
Sujith committed
656
	usb_anchor_urb(urb, &hif_dev->rx_submitted);
657
	ret = usb_submit_urb(urb, GFP_ATOMIC);
Sujith's avatar
Sujith committed
658
659
	if (ret) {
		usb_unanchor_urb(urb);
660
		goto free;
Sujith's avatar
Sujith committed
661
	}
662
663
664

	return;
free:
665
	kfree_skb(skb);
666
667
668
669
670
671
}

static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
{
	struct sk_buff *skb = (struct sk_buff *) urb->context;
	struct sk_buff *nskb;
672
	struct hif_device_usb *hif_dev =
673
674
675
		usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
	int ret;

Sujith's avatar
Sujith committed
676
677
678
	if (!skb)
		return;

679
680
681
682
683
684
685
686
687
688
689
690
	if (!hif_dev)
		goto free;

	switch (urb->status) {
	case 0:
		break;
	case -ENOENT:
	case -ECONNRESET:
	case -ENODEV:
	case -ESHUTDOWN:
		goto free;
	default:
691
692
693
		skb_reset_tail_pointer(skb);
		skb_trim(skb, 0);

694
695
696
697
698
699
		goto resubmit;
	}

	if (likely(urb->actual_length != 0)) {
		skb_put(skb, urb->actual_length);

700
701
702
703
704
		/* Process the command first */
		ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
				 skb->len, USB_REG_IN_PIPE);


705
		nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
706
707
708
709
710
711
		if (!nskb) {
			dev_err(&hif_dev->udev->dev,
				"ath9k_htc: REG_IN memory allocation failure\n");
			urb->context = NULL;
			return;
		}
712

713
		usb_fill_bulk_urb(urb, hif_dev->udev,
714
715
				 usb_rcvbulkpipe(hif_dev->udev,
						 USB_REG_IN_PIPE),
716
				 nskb->data, MAX_REG_IN_BUF_SIZE,
717
				 ath9k_hif_usb_reg_in_cb, nskb);
718
719
720
	}

resubmit:
721
	usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
722
	ret = usb_submit_urb(urb, GFP_ATOMIC);
723
724
	if (ret) {
		usb_unanchor_urb(urb);
725
		goto free;
726
	}
727
728
729

	return;
free:
730
	kfree_skb(skb);
Sujith's avatar
Sujith committed
731
	urb->context = NULL;
732
733
734
735
736
}

static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
{
	struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
737
	unsigned long flags;
738

Sujith's avatar
Sujith committed
739
740
741
	list_for_each_entry_safe(tx_buf, tx_buf_tmp,
				 &hif_dev->tx.tx_buf, list) {
		usb_kill_urb(tx_buf->urb);
742
743
744
745
746
747
		list_del(&tx_buf->list);
		usb_free_urb(tx_buf->urb);
		kfree(tx_buf->buf);
		kfree(tx_buf);
	}

748
749
750
751
	spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
	hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
	spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);

752
753
754
755
756
757
758
759
	list_for_each_entry_safe(tx_buf, tx_buf_tmp,
				 &hif_dev->tx.tx_pending, list) {
		usb_kill_urb(tx_buf->urb);
		list_del(&tx_buf->list);
		usb_free_urb(tx_buf->urb);
		kfree(tx_buf->buf);
		kfree(tx_buf);
	}
760
761

	usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
762
763
764
765
766
767
768
769
770
771
772
}

static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
{
	struct tx_buf *tx_buf;
	int i;

	INIT_LIST_HEAD(&hif_dev->tx.tx_buf);
	INIT_LIST_HEAD(&hif_dev->tx.tx_pending);
	spin_lock_init(&hif_dev->tx.tx_lock);
	__skb_queue_head_init(&hif_dev->tx.tx_skb_queue);
773
	init_usb_anchor(&hif_dev->mgmt_submitted);
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797

	for (i = 0; i < MAX_TX_URB_NUM; i++) {
		tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
		if (!tx_buf)
			goto err;

		tx_buf->buf = kzalloc(MAX_TX_BUF_SIZE, GFP_KERNEL);
		if (!tx_buf->buf)
			goto err;

		tx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!tx_buf->urb)
			goto err;

		tx_buf->hif_dev = hif_dev;
		__skb_queue_head_init(&tx_buf->skb_queue);

		list_add_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
	}

	hif_dev->tx.tx_buf_cnt = MAX_TX_URB_NUM;

	return 0;
err:
798
799
800
801
	if (tx_buf) {
		kfree(tx_buf->buf);
		kfree(tx_buf);
	}
802
803
804
805
806
807
	ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
	return -ENOMEM;
}

static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
{
Sujith's avatar
Sujith committed
808
	usb_kill_anchored_urbs(&hif_dev->rx_submitted);
809
810
811
812
}

static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
{
Sujith's avatar
Sujith committed
813
814
	struct urb *urb = NULL;
	struct sk_buff *skb = NULL;
815
816
	int i, ret;

Sujith's avatar
Sujith committed
817
	init_usb_anchor(&hif_dev->rx_submitted);
818
	spin_lock_init(&hif_dev->rx_lock);
Sujith's avatar
Sujith committed
819

820
821
822
	for (i = 0; i < MAX_RX_URB_NUM; i++) {

		/* Allocate URB */
Sujith's avatar
Sujith committed
823
824
		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (urb == NULL) {
825
			ret = -ENOMEM;
Sujith's avatar
Sujith committed
826
			goto err_urb;
827
828
829
		}

		/* Allocate buffer */
830
		skb = alloc_skb(MAX_RX_BUF_SIZE, GFP_KERNEL);
Sujith's avatar
Sujith committed
831
832
833
834
		if (!skb) {
			ret = -ENOMEM;
			goto err_skb;
		}
835

Sujith's avatar
Sujith committed
836
837
838
839
840
841
842
843
		usb_fill_bulk_urb(urb, hif_dev->udev,
				  usb_rcvbulkpipe(hif_dev->udev,
						  USB_WLAN_RX_PIPE),
				  skb->data, MAX_RX_BUF_SIZE,
				  ath9k_hif_usb_rx_cb, skb);

		/* Anchor URB */
		usb_anchor_urb(urb, &hif_dev->rx_submitted);
844

Sujith's avatar
Sujith committed
845
846
847
848
849
850
		/* Submit URB */
		ret = usb_submit_urb(urb, GFP_KERNEL);
		if (ret) {
			usb_unanchor_urb(urb);
			goto err_submit;
		}
Sujith's avatar
Sujith committed
851
852
853
854
855
856

		/*
		 * Drop reference count.
		 * This ensures that the URB is freed when killing them.
		 */
		usb_free_urb(urb);
857
858
859
860
	}

	return 0;

Sujith's avatar
Sujith committed
861
err_submit:
862
	kfree_skb(skb);
Sujith's avatar
Sujith committed
863
864
865
err_skb:
	usb_free_urb(urb);
err_urb:
866
867
868
869
	ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
	return ret;
}

870
static void ath9k_hif_usb_dealloc_reg_in_urbs(struct hif_device_usb *hif_dev)
871
{
872
	usb_kill_anchored_urbs(&hif_dev->reg_in_submitted);
873
874
}

875
static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
876
{
877
878
879
	struct urb *urb = NULL;
	struct sk_buff *skb = NULL;
	int i, ret;
880

881
	init_usb_anchor(&hif_dev->reg_in_submitted);
882

883
	for (i = 0; i < MAX_REG_IN_URB_NUM; i++) {
884

885
886
887
888
889
890
		/* Allocate URB */
		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (urb == NULL) {
			ret = -ENOMEM;
			goto err_urb;
		}
891

892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
		/* Allocate buffer */
		skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
		if (!skb) {
			ret = -ENOMEM;
			goto err_skb;
		}

		usb_fill_bulk_urb(urb, hif_dev->udev,
				  usb_rcvbulkpipe(hif_dev->udev,
						  USB_REG_IN_PIPE),
				  skb->data, MAX_REG_IN_BUF_SIZE,
				  ath9k_hif_usb_reg_in_cb, skb);

		/* Anchor URB */
		usb_anchor_urb(urb, &hif_dev->reg_in_submitted);

		/* Submit URB */
		ret = usb_submit_urb(urb, GFP_KERNEL);
		if (ret) {
			usb_unanchor_urb(urb);
			goto err_submit;
		}

		/*
		 * Drop reference count.
		 * This ensures that the URB is freed when killing them.
		 */
		usb_free_urb(urb);
	}
921
922
923

	return 0;

924
925
926
927
928
929
930
err_submit:
	kfree_skb(skb);
err_skb:
	usb_free_urb(urb);
err_urb:
	ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
	return ret;
931
932
933
934
}

static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
{
935
936
937
	/* Register Write */
	init_usb_anchor(&hif_dev->regout_submitted);

938
939
940
941
942
943
	/* TX */
	if (ath9k_hif_usb_alloc_tx_urbs(hif_dev) < 0)
		goto err;

	/* RX */
	if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0)
944
		goto err_rx;
945

946
	/* Register Read */
947
	if (ath9k_hif_usb_alloc_reg_in_urbs(hif_dev) < 0)
948
		goto err_reg;
949
950

	return 0;
951
952
953
954
err_reg:
	ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
err_rx:
	ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
955
956
957
958
err:
	return -ENOMEM;
}

959
960
961
static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
{
	usb_kill_anchored_urbs(&hif_dev->regout_submitted);
962
	ath9k_hif_usb_dealloc_reg_in_urbs(hif_dev);
963
964
965
966
	ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
	ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
}

967
968
static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev,
				     u32 drv_info)
969
970
971
972
973
974
{
	int transfer, err;
	const void *data = hif_dev->firmware->data;
	size_t len = hif_dev->firmware->size;
	u32 addr = AR9271_FIRMWARE;
	u8 *buf = kzalloc(4096, GFP_KERNEL);
Sujith's avatar
Sujith committed
975
	u32 firm_offset;
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998

	if (!buf)
		return -ENOMEM;

	while (len) {
		transfer = min_t(int, len, 4096);
		memcpy(buf, data, transfer);

		err = usb_control_msg(hif_dev->udev,
				      usb_sndctrlpipe(hif_dev->udev, 0),
				      FIRMWARE_DOWNLOAD, 0x40 | USB_DIR_OUT,
				      addr >> 8, 0, buf, transfer, HZ);
		if (err < 0) {
			kfree(buf);
			return err;
		}

		len -= transfer;
		data += transfer;
		addr += transfer;
	}
	kfree(buf);

999
	if (IS_AR7010_DEVICE(drv_info))
Sujith's avatar
Sujith committed
1000
		firm_offset = AR7010_FIRMWARE_TEXT;
For faster browsing, not all history is shown. View entire blame