recv.c 18.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
/*
 * Copyright (c) 2008 Atheros Communications Inc.
 *
 * Permission to use, copy, modify, and/or distribute this software for any
 * purpose with or without fee is hereby granted, provided that the above
 * copyright notice and this permission notice appear in all copies.
 *
 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 */

Sujith's avatar
Sujith committed
17
#include "ath9k.h"
18

19
20
21
static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
					     struct ieee80211_hdr *hdr)
{
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
	struct ieee80211_hw *hw = sc->pri_wiphy->hw;
	int i;

	spin_lock_bh(&sc->wiphy_lock);
	for (i = 0; i < sc->num_sec_wiphy; i++) {
		struct ath_wiphy *aphy = sc->sec_wiphy[i];
		if (aphy == NULL)
			continue;
		if (compare_ether_addr(hdr->addr1, aphy->hw->wiphy->perm_addr)
		    == 0) {
			hw = aphy->hw;
			break;
		}
	}
	spin_unlock_bh(&sc->wiphy_lock);
	return hw;
38
39
}

40
41
42
43
44
45
46
47
48
49
/*
 * Setup and link descriptors.
 *
 * 11N: we can no longer afford to self link the last descriptor.
 * MAC acknowledges BA status as long as it copies frames to host
 * buffer (or rx fifo). This can incorrectly acknowledge packets
 * to a sender if last desc is self-linked.
 */
static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
{
50
	struct ath_hw *ah = sc->sc_ah;
51
52
53
54
55
56
	struct ath_desc *ds;
	struct sk_buff *skb;

	ATH_RXBUF_RESET(bf);

	ds = bf->bf_desc;
Sujith's avatar
Sujith committed
57
	ds->ds_link = 0; /* link to null */
58
59
	ds->ds_data = bf->bf_buf_addr;

Sujith's avatar
Sujith committed
60
	/* virtual addr of the beginning of the buffer. */
61
62
63
64
	skb = bf->bf_mpdu;
	ASSERT(skb != NULL);
	ds->ds_vdata = skb->data;

Sujith's avatar
Sujith committed
65
	/* setup rx descriptors. The rx.bufsize here tells the harware
66
67
	 * how much data it can DMA to us and that we are prepared
	 * to process */
Sujith's avatar
Sujith committed
68
69
	ath9k_hw_setuprxdesc(ah, ds,
			     sc->rx.bufsize,
70
71
			     0);

Sujith's avatar
Sujith committed
72
	if (sc->rx.rxlink == NULL)
73
74
		ath9k_hw_putrxbuf(ah, bf->bf_daddr);
	else
Sujith's avatar
Sujith committed
75
		*sc->rx.rxlink = bf->bf_daddr;
76

Sujith's avatar
Sujith committed
77
	sc->rx.rxlink = &ds->ds_link;
78
79
80
	ath9k_hw_rxena(ah);
}

Sujith's avatar
Sujith committed
81
82
83
84
static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
{
	/* XXX block beacon interrupts */
	ath9k_hw_setantenna(sc->sc_ah, antenna);
Sujith's avatar
Sujith committed
85
86
	sc->rx.defant = antenna;
	sc->rx.rxotherant = 0;
Sujith's avatar
Sujith committed
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
}

/*
 *  Extend 15-bit time stamp from rx descriptor to
 *  a full 64-bit TSF using the current h/w TSF.
*/
static u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
{
	u64 tsf;

	tsf = ath9k_hw_gettsf64(sc->sc_ah);
	if ((tsf & 0x7fff) < rstamp)
		tsf -= 0x8000;
	return (tsf & ~0x7fff) | rstamp;
}

Sujith's avatar
Sujith committed
103
static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, u32 len)
104
105
106
107
108
109
110
111
112
113
{
	struct sk_buff *skb;
	u32 off;

	/*
	 * Cache-line-align.  This is important (for the
	 * 5210 at least) as not doing so causes bogus data
	 * in rx'd frames.
	 */

114
115
116
117
118
119
120
	/* Note: the kernel can allocate a value greater than
	 * what we ask it to give us. We really only need 4 KB as that
	 * is this hardware supports and in fact we need at least 3849
	 * as that is the MAX AMSDU size this hardware supports.
	 * Unfortunately this means we may get 8 KB here from the
	 * kernel... and that is actually what is observed on some
	 * systems :( */
Sujith's avatar
Sujith committed
121
	skb = dev_alloc_skb(len + sc->cachelsz - 1);
122
	if (skb != NULL) {
Sujith's avatar
Sujith committed
123
		off = ((unsigned long) skb->data) % sc->cachelsz;
124
		if (off != 0)
Sujith's avatar
Sujith committed
125
			skb_reserve(skb, sc->cachelsz - off);
126
127
	} else {
		DPRINTF(sc, ATH_DBG_FATAL,
Sujith's avatar
Sujith committed
128
			"skbuff alloc of size %u failed\n", len);
129
130
131
132
133
134
135
		return NULL;
	}

	return skb;
}

/*
Sujith's avatar
Sujith committed
136
137
138
 * For Decrypt or Demic errors, we only mark packet status here and always push
 * up the frame up to let mac80211 handle the actual error case, be it no
 * decryption key or real decryption error. This let us keep statistics there.
139
 */
Sujith's avatar
Sujith committed
140
141
142
static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds,
			  struct ieee80211_rx_status *rx_status, bool *decrypt_error,
			  struct ath_softc *sc)
143
{
Sujith's avatar
Sujith committed
144
145
146
	struct ieee80211_hdr *hdr;
	u8 ratecode;
	__le16 fc;
147
	struct ieee80211_hw *hw;
Sujith's avatar
Sujith committed
148
149
150
151

	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
	memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
152
	hw = ath_get_virt_hw(sc, hdr);
Sujith's avatar
Sujith committed
153
154
155
156
157
158
159
160

	if (ds->ds_rxstat.rs_more) {
		/*
		 * Frame spans multiple descriptors; this cannot happen yet
		 * as we don't support jumbograms. If not in monitor mode,
		 * discard the frame. Enable this if you want to see
		 * error frames in Monitor mode.
		 */
161
		if (sc->sc_ah->opmode != NL80211_IFTYPE_MONITOR)
Sujith's avatar
Sujith committed
162
163
164
165
166
167
			goto rx_next;
	} else if (ds->ds_rxstat.rs_status != 0) {
		if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
			rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
		if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY)
			goto rx_next;
168

Sujith's avatar
Sujith committed
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
		if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) {
			*decrypt_error = true;
		} else if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) {
			if (ieee80211_is_ctl(fc))
				/*
				 * Sometimes, we get invalid
				 * MIC failures on valid control frames.
				 * Remove these mic errors.
				 */
				ds->ds_rxstat.rs_status &= ~ATH9K_RXERR_MIC;
			else
				rx_status->flag |= RX_FLAG_MMIC_ERROR;
		}
		/*
		 * Reject error frames with the exception of
		 * decryption and MIC failures. For monitor mode,
		 * we also ignore the CRC error.
		 */
187
		if (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR) {
Sujith's avatar
Sujith committed
188
189
190
191
192
193
194
195
196
197
			if (ds->ds_rxstat.rs_status &
			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
			      ATH9K_RXERR_CRC))
				goto rx_next;
		} else {
			if (ds->ds_rxstat.rs_status &
			    ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
				goto rx_next;
			}
		}
198
199
	}

Sujith's avatar
Sujith committed
200
201
202
	ratecode = ds->ds_rxstat.rs_rate;

	if (ratecode & 0x80) {
203
204
		/* HT rate */
		rx_status->flag |= RX_FLAG_HT;
Sujith's avatar
Sujith committed
205
		if (ds->ds_rxstat.rs_flags & ATH9K_RX_2040)
206
			rx_status->flag |= RX_FLAG_40MHZ;
Sujith's avatar
Sujith committed
207
		if (ds->ds_rxstat.rs_flags & ATH9K_RX_GI)
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
			rx_status->flag |= RX_FLAG_SHORT_GI;
		rx_status->rate_idx = ratecode & 0x7f;
	} else {
		int i = 0, cur_band, n_rates;

		cur_band = hw->conf.channel->band;
		n_rates = sc->sbands[cur_band].n_bitrates;

		for (i = 0; i < n_rates; i++) {
			if (sc->sbands[cur_band].bitrates[i].hw_value ==
			    ratecode) {
				rx_status->rate_idx = i;
				break;
			}

			if (sc->sbands[cur_band].bitrates[i].hw_value_short ==
			    ratecode) {
				rx_status->rate_idx = i;
				rx_status->flag |= RX_FLAG_SHORTPRE;
				break;
			}
		}
Sujith's avatar
Sujith committed
230
231
232
	}

	rx_status->mactime = ath_extend_tsf(sc, ds->ds_rxstat.rs_tstamp);
233
234
	rx_status->band = hw->conf.channel->band;
	rx_status->freq = hw->conf.channel->center_freq;
Sujith's avatar
Sujith committed
235
	rx_status->noise = sc->ani.noise_floor;
Sujith's avatar
Sujith committed
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
	rx_status->signal = rx_status->noise + ds->ds_rxstat.rs_rssi;
	rx_status->antenna = ds->ds_rxstat.rs_antenna;

	/* at 45 you will be able to use MCS 15 reliably. A more elaborate
	 * scheme can be used here but it requires tables of SNR/throughput for
	 * each possible mode used. */
	rx_status->qual =  ds->ds_rxstat.rs_rssi * 100 / 45;

	/* rssi can be more than 45 though, anything above that
	 * should be considered at 100% */
	if (rx_status->qual > 100)
		rx_status->qual = 100;

	rx_status->flag |= RX_FLAG_TSFT;

	return 1;
rx_next:
	return 0;
254
255
256
257
}

static void ath_opmode_init(struct ath_softc *sc)
{
258
	struct ath_hw *ah = sc->sc_ah;
259
260
261
262
263
264
265
	u32 rfilt, mfilt[2];

	/* configure rx filter */
	rfilt = ath_calcrxfilter(sc);
	ath9k_hw_setrxfilter(ah, rfilt);

	/* configure bssid mask */
266
	if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
Sujith's avatar
Sujith committed
267
		ath9k_hw_setbssidmask(sc);
268
269
270
271
272

	/* configure operational mode */
	ath9k_hw_setopmode(ah);

	/* Handle any link-level address change. */
Sujith's avatar
Sujith committed
273
	ath9k_hw_setmac(ah, sc->sc_ah->macaddr);
274
275
276
277
278
279
280
281
282
283
284
285
286

	/* calculate and install multicast filter */
	mfilt[0] = mfilt[1] = ~0;
	ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
}

int ath_rx_init(struct ath_softc *sc, int nbufs)
{
	struct sk_buff *skb;
	struct ath_buf *bf;
	int error = 0;

	do {
Sujith's avatar
Sujith committed
287
		spin_lock_init(&sc->rx.rxflushlock);
Sujith's avatar
Sujith committed
288
		sc->sc_flags &= ~SC_OP_RXFLUSH;
Sujith's avatar
Sujith committed
289
		spin_lock_init(&sc->rx.rxbuflock);
290

Sujith's avatar
Sujith committed
291
		sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
Sujith's avatar
Sujith committed
292
					   min(sc->cachelsz,
293
294
					       (u16)64));

Sujith's avatar
Sujith committed
295
		DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
Sujith's avatar
Sujith committed
296
			sc->cachelsz, sc->rx.bufsize);
297
298
299

		/* Initialize rx descriptors */

Sujith's avatar
Sujith committed
300
		error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
301
302
303
					  "rx", nbufs, 1);
		if (error != 0) {
			DPRINTF(sc, ATH_DBG_FATAL,
Sujith's avatar
Sujith committed
304
				"failed to allocate rx descriptors: %d\n", error);
305
306
307
			break;
		}

Sujith's avatar
Sujith committed
308
309
		list_for_each_entry(bf, &sc->rx.rxbuf, list) {
			skb = ath_rxbuf_alloc(sc, sc->rx.bufsize);
310
311
312
313
314
315
			if (skb == NULL) {
				error = -ENOMEM;
				break;
			}

			bf->bf_mpdu = skb;
316
			bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
Sujith's avatar
Sujith committed
317
							 sc->rx.bufsize,
318
319
							 DMA_FROM_DEVICE);
			if (unlikely(dma_mapping_error(sc->dev,
320
321
322
323
				  bf->bf_buf_addr))) {
				dev_kfree_skb_any(skb);
				bf->bf_mpdu = NULL;
				DPRINTF(sc, ATH_DBG_CONFIG,
324
					"dma_mapping_error() on RX init\n");
325
326
327
				error = -ENOMEM;
				break;
			}
328
			bf->bf_dmacontext = bf->bf_buf_addr;
329
		}
Sujith's avatar
Sujith committed
330
		sc->rx.rxlink = NULL;
331
332
333
334
335
336
337
338
339
340
341
342
343
344

	} while (0);

	if (error)
		ath_rx_cleanup(sc);

	return error;
}

void ath_rx_cleanup(struct ath_softc *sc)
{
	struct sk_buff *skb;
	struct ath_buf *bf;

Sujith's avatar
Sujith committed
345
	list_for_each_entry(bf, &sc->rx.rxbuf, list) {
346
347
348
349
350
		skb = bf->bf_mpdu;
		if (skb)
			dev_kfree_skb(skb);
	}

Sujith's avatar
Sujith committed
351
352
	if (sc->rx.rxdma.dd_desc_len != 0)
		ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
}

/*
 * Calculate the receive filter according to the
 * operating mode and state:
 *
 * o always accept unicast, broadcast, and multicast traffic
 * o maintain current state of phy error reception (the hal
 *   may enable phy error frames for noise immunity work)
 * o probe request frames are accepted only when operating in
 *   hostap, adhoc, or monitor modes
 * o enable promiscuous mode according to the interface state
 * o accept beacons:
 *   - when operating in adhoc mode so the 802.11 layer creates
 *     node table entries for peers,
 *   - when operating in station mode for collecting rssi data when
 *     the station is otherwise quiet, or
 *   - when operating as a repeater so we see repeater-sta beacons
 *   - when scanning
 */

u32 ath_calcrxfilter(struct ath_softc *sc)
{
#define	RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
Sujith's avatar
Sujith committed
377

378
379
380
381
382
383
384
	u32 rfilt;

	rfilt = (ath9k_hw_getrxfilter(sc->sc_ah) & RX_FILTER_PRESERVE)
		| ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
		| ATH9K_RX_FILTER_MCAST;

	/* If not a STA, enable processing of Probe Requests */
385
	if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
386
387
388
		rfilt |= ATH9K_RX_FILTER_PROBEREQ;

	/* Can't set HOSTAP into promiscous mode */
389
	if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) &&
Sujith's avatar
Sujith committed
390
	     (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) ||
391
	    (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) {
392
393
394
395
396
		rfilt |= ATH9K_RX_FILTER_PROM;
		/* ??? To prevent from sending ACK */
		rfilt &= ~ATH9K_RX_FILTER_UCAST;
	}

397
398
399
	if (sc->rx.rxfilter & FIF_CONTROL)
		rfilt |= ATH9K_RX_FILTER_CONTROL;

400
401
402
403
	if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
	    !(sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC))
		rfilt |= ATH9K_RX_FILTER_MYBEACON;
	else
404
405
		rfilt |= ATH9K_RX_FILTER_BEACON;

406
	/* If in HOSTAP mode, want to enable reception of PSPOLL frames */
407
	if (sc->sc_ah->opmode == NL80211_IFTYPE_AP)
408
		rfilt |= ATH9K_RX_FILTER_PSPOLL;
Sujith's avatar
Sujith committed
409

410
411
412
413
414
415
416
417
	if (sc->sec_wiphy) {
		/* TODO: only needed if more than one BSSID is in use in
		 * station/adhoc mode */
		/* TODO: for older chips, may need to add ATH9K_RX_FILTER_PROM
		 */
		rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
	}

418
	return rfilt;
Sujith's avatar
Sujith committed
419

420
421
422
423
424
#undef RX_FILTER_PRESERVE
}

int ath_startrecv(struct ath_softc *sc)
{
425
	struct ath_hw *ah = sc->sc_ah;
426
427
	struct ath_buf *bf, *tbf;

Sujith's avatar
Sujith committed
428
429
	spin_lock_bh(&sc->rx.rxbuflock);
	if (list_empty(&sc->rx.rxbuf))
430
431
		goto start_recv;

Sujith's avatar
Sujith committed
432
433
	sc->rx.rxlink = NULL;
	list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
434
435
436
437
		ath_rx_buf_link(sc, bf);
	}

	/* We could have deleted elements so the list may be empty now */
Sujith's avatar
Sujith committed
438
	if (list_empty(&sc->rx.rxbuf))
439
440
		goto start_recv;

Sujith's avatar
Sujith committed
441
	bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
442
	ath9k_hw_putrxbuf(ah, bf->bf_daddr);
Sujith's avatar
Sujith committed
443
	ath9k_hw_rxena(ah);
444
445

start_recv:
Sujith's avatar
Sujith committed
446
	spin_unlock_bh(&sc->rx.rxbuflock);
Sujith's avatar
Sujith committed
447
448
449
	ath_opmode_init(sc);
	ath9k_hw_startpcureceive(ah);

450
451
452
453
454
	return 0;
}

bool ath_stoprecv(struct ath_softc *sc)
{
455
	struct ath_hw *ah = sc->sc_ah;
456
457
	bool stopped;

Sujith's avatar
Sujith committed
458
459
460
	ath9k_hw_stoppcurecv(ah);
	ath9k_hw_setrxfilter(ah, 0);
	stopped = ath9k_hw_stopdmarecv(ah);
Sujith's avatar
Sujith committed
461
	sc->rx.rxlink = NULL;
Sujith's avatar
Sujith committed
462

463
464
465
466
467
	return stopped;
}

void ath_flushrecv(struct ath_softc *sc)
{
Sujith's avatar
Sujith committed
468
	spin_lock_bh(&sc->rx.rxflushlock);
Sujith's avatar
Sujith committed
469
	sc->sc_flags |= SC_OP_RXFLUSH;
470
	ath_rx_tasklet(sc, 1);
Sujith's avatar
Sujith committed
471
	sc->sc_flags &= ~SC_OP_RXFLUSH;
Sujith's avatar
Sujith committed
472
	spin_unlock_bh(&sc->rx.rxflushlock);
473
474
475
476
477
}

int ath_rx_tasklet(struct ath_softc *sc, int flush)
{
#define PA2DESC(_sc, _pa)                                               \
Sujith's avatar
Sujith committed
478
479
	((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc +		\
			     ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr)))
480

Sujith's avatar
Sujith committed
481
	struct ath_buf *bf;
482
	struct ath_desc *ds;
483
	struct sk_buff *skb = NULL, *requeue_skb;
Sujith's avatar
Sujith committed
484
	struct ieee80211_rx_status rx_status;
485
	struct ath_hw *ah = sc->sc_ah;
Sujith's avatar
Sujith committed
486
487
488
489
490
	struct ieee80211_hdr *hdr;
	int hdrlen, padsize, retval;
	bool decrypt_error = false;
	u8 keyix;

Sujith's avatar
Sujith committed
491
	spin_lock_bh(&sc->rx.rxbuflock);
492
493
494

	do {
		/* If handling rx interrupt and flush is in progress => exit */
Sujith's avatar
Sujith committed
495
		if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
496
497
			break;

Sujith's avatar
Sujith committed
498
499
		if (list_empty(&sc->rx.rxbuf)) {
			sc->rx.rxlink = NULL;
500
501
502
			break;
		}

Sujith's avatar
Sujith committed
503
		bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
504
505
506
507
508
509
510
511
512
513
514
515
516
		ds = bf->bf_desc;

		/*
		 * Must provide the virtual address of the current
		 * descriptor, the physical address, and the virtual
		 * address of the next descriptor in the h/w chain.
		 * This allows the HAL to look ahead to see if the
		 * hardware is done with a descriptor by checking the
		 * done bit in the following descriptor and the address
		 * of the current descriptor the DMA engine is working
		 * on.  All this is necessary because of our use of
		 * a self-linked list to avoid rx overruns.
		 */
Sujith's avatar
Sujith committed
517
		retval = ath9k_hw_rxprocdesc(ah, ds,
518
519
520
521
522
523
524
					     bf->bf_daddr,
					     PA2DESC(sc, ds->ds_link),
					     0);
		if (retval == -EINPROGRESS) {
			struct ath_buf *tbf;
			struct ath_desc *tds;

Sujith's avatar
Sujith committed
525
526
			if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
				sc->rx.rxlink = NULL;
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
				break;
			}

			tbf = list_entry(bf->list.next, struct ath_buf, list);

			/*
			 * On some hardware the descriptor status words could
			 * get corrupted, including the done bit. Because of
			 * this, check if the next descriptor's done bit is
			 * set or not.
			 *
			 * If the next descriptor's done bit is set, the current
			 * descriptor has been corrupted. Force s/w to discard
			 * this descriptor and continue...
			 */

			tds = tbf->bf_desc;
Sujith's avatar
Sujith committed
544
545
			retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr,
					     PA2DESC(sc, tds->ds_link), 0);
546
547
548
549
550
551
			if (retval == -EINPROGRESS) {
				break;
			}
		}

		skb = bf->bf_mpdu;
Sujith's avatar
Sujith committed
552
		if (!skb)
553
554
			continue;

555
556
557
558
559
		/*
		 * Synchronize the DMA transfer with CPU before
		 * 1. accessing the frame
		 * 2. requeueing the same buffer to h/w
		 */
560
		dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
561
				sc->rx.bufsize,
562
				DMA_FROM_DEVICE);
563

564
		/*
Sujith's avatar
Sujith committed
565
566
		 * If we're asked to flush receive queue, directly
		 * chain it back at the queue without processing it.
567
		 */
Sujith's avatar
Sujith committed
568
		if (flush)
569
			goto requeue;
570

Sujith's avatar
Sujith committed
571
		if (!ds->ds_rxstat.rs_datalen)
572
			goto requeue;
573

Sujith's avatar
Sujith committed
574
		/* The status portion of the descriptor could get corrupted. */
Sujith's avatar
Sujith committed
575
		if (sc->rx.bufsize < ds->ds_rxstat.rs_datalen)
576
			goto requeue;
577

Sujith's avatar
Sujith committed
578
		if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc))
579
580
581
582
			goto requeue;

		/* Ensure we always have an skb to requeue once we are done
		 * processing the current buffer's skb */
Sujith's avatar
Sujith committed
583
		requeue_skb = ath_rxbuf_alloc(sc, sc->rx.bufsize);
584
585
586

		/* If there is no memory we ignore the current RX'd frame,
		 * tell hardware it can give us a new frame using the old
Sujith's avatar
Sujith committed
587
		 * skb and put it at the tail of the sc->rx.rxbuf list for
588
589
590
		 * processing. */
		if (!requeue_skb)
			goto requeue;
591

592
		/* Unmap the frame */
593
		dma_unmap_single(sc->dev, bf->bf_buf_addr,
Sujith's avatar
Sujith committed
594
				 sc->rx.bufsize,
595
				 DMA_FROM_DEVICE);
596

Sujith's avatar
Sujith committed
597
598
599
600
601
602
603
		skb_put(skb, ds->ds_rxstat.rs_datalen);
		skb->protocol = cpu_to_be16(ETH_P_CONTROL);

		/* see if any padding is done by the hw and remove it */
		hdr = (struct ieee80211_hdr *)skb->data;
		hdrlen = ieee80211_get_hdrlen_from_skb(skb);

604
605
606
607
608
609
610
611
612
613
		/* The MAC header is padded to have 32-bit boundary if the
		 * packet payload is non-zero. The general calculation for
		 * padsize would take into account odd header lengths:
		 * padsize = (4 - hdrlen % 4) % 4; However, since only
		 * even-length headers are used, padding can only be 0 or 2
		 * bytes and we can optimize this a bit. In addition, we must
		 * not try to remove padding from short control frames that do
		 * not have payload. */
		padsize = hdrlen & 3;
		if (padsize && hdrlen >= 24) {
Sujith's avatar
Sujith committed
614
615
			memmove(skb->data + padsize, skb->data, hdrlen);
			skb_pull(skb, padsize);
616
617
		}

Sujith's avatar
Sujith committed
618
		keyix = ds->ds_rxstat.rs_keyix;
619

Sujith's avatar
Sujith committed
620
621
622
623
624
625
		if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) {
			rx_status.flag |= RX_FLAG_DECRYPTED;
		} else if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED)
			   && !decrypt_error && skb->len >= hdrlen + 4) {
			keyix = skb->data[hdrlen + 3] >> 6;

Sujith's avatar
Sujith committed
626
			if (test_bit(keyix, sc->keymap))
Sujith's avatar
Sujith committed
627
628
				rx_status.flag |= RX_FLAG_DECRYPTED;
		}
629
630
631
632
633
634
		if (ah->sw_mgmt_crypto &&
		    (rx_status.flag & RX_FLAG_DECRYPTED) &&
		    ieee80211_is_mgmt(hdr->frame_control)) {
			/* Use software decrypt for management frames. */
			rx_status.flag &= ~RX_FLAG_DECRYPTED;
		}
Sujith's avatar
Sujith committed
635
636

		/* Send the frame to mac80211 */
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
		if (hdr->addr1[5] & 0x01) {
			int i;
			/*
			 * Deliver broadcast/multicast frames to all suitable
			 * virtual wiphys.
			 */
			/* TODO: filter based on channel configuration */
			for (i = 0; i < sc->num_sec_wiphy; i++) {
				struct ath_wiphy *aphy = sc->sec_wiphy[i];
				struct sk_buff *nskb;
				if (aphy == NULL)
					continue;
				nskb = skb_copy(skb, GFP_ATOMIC);
				if (nskb)
					__ieee80211_rx(aphy->hw, nskb,
						       &rx_status);
			}
			__ieee80211_rx(sc->hw, skb, &rx_status);
		} else {
			/* Deliver unicast frames based on receiver address */
			__ieee80211_rx(ath_get_virt_hw(sc, hdr), skb,
				       &rx_status);
		}
660
661
662

		/* We will now give hardware our shiny new allocated skb */
		bf->bf_mpdu = requeue_skb;
663
		bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
Sujith's avatar
Sujith committed
664
					 sc->rx.bufsize,
665
666
					 DMA_FROM_DEVICE);
		if (unlikely(dma_mapping_error(sc->dev,
667
668
669
670
			  bf->bf_buf_addr))) {
			dev_kfree_skb_any(requeue_skb);
			bf->bf_mpdu = NULL;
			DPRINTF(sc, ATH_DBG_CONFIG,
671
				"dma_mapping_error() on RX\n");
672
673
			break;
		}
674
		bf->bf_dmacontext = bf->bf_buf_addr;
675
676
677
678
679

		/*
		 * change the default rx antenna if rx diversity chooses the
		 * other antenna 3 times in a row.
		 */
Sujith's avatar
Sujith committed
680
681
		if (sc->rx.defant != ds->ds_rxstat.rs_antenna) {
			if (++sc->rx.rxotherant >= 3)
Sujith's avatar
Sujith committed
682
				ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna);
683
		} else {
Sujith's avatar
Sujith committed
684
			sc->rx.rxotherant = 0;
685
		}
686
687
688
689
690
691

		if (ieee80211_is_beacon(hdr->frame_control) &&
				(sc->sc_flags & SC_OP_WAIT_FOR_BEACON)) {
			sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
			ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
		}
692
requeue:
Sujith's avatar
Sujith committed
693
		list_move_tail(&bf->list, &sc->rx.rxbuf);
694
		ath_rx_buf_link(sc, bf);
Sujith's avatar
Sujith committed
695
696
	} while (1);

Sujith's avatar
Sujith committed
697
	spin_unlock_bh(&sc->rx.rxbuflock);
698
699
700
701

	return 0;
#undef PA2DESC
}