base.c 78.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
/*-
 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
 * Copyright (c) 2004-2005 Atheros Communications, Inc.
 * Copyright (c) 2006 Devicescape Software, Inc.
 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
 *
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
 *    redistribution must be conditioned upon including a substantially
 *    similar Disclaimer requirement for further binary redistribution.
 * 3. Neither the names of the above-listed copyright holders nor the names
 *    of any contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * NO WARRANTY
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGES.
 *
 */

#include <linux/module.h>
#include <linux/delay.h>
Jiri Slaby's avatar
Jiri Slaby committed
45
#include <linux/hardirq.h>
46
#include <linux/if.h>
Jiri Slaby's avatar
Jiri Slaby committed
47
#include <linux/io.h>
48
49
50
51
#include <linux/netdevice.h>
#include <linux/cache.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
52
#include <linux/slab.h>
53
#include <linux/etherdevice.h>
54
55
56
57
58
59
60
61

#include <net/ieee80211_radiotap.h>

#include <asm/unaligned.h>

#include "base.h"
#include "reg.h"
#include "debug.h"
62
#include "ani.h"
63

64
65
66
#define CREATE_TRACE_POINTS
#include "trace.h"

67
68
int ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
69
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
70

71
static int modparam_all_channels;
Bob Copeland's avatar
Bob Copeland committed
72
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
73
74
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");

75
76
77
78
79
80
81
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");

82
static int ath5k_init(struct ieee80211_hw *hw);
83
84
static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
								bool skip_pcu);
85
86
int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
87
88

/* Known SREVs */
Jiri Slaby's avatar
Jiri Slaby committed
89
static const struct ath5k_srev_name srev_names[] = {
Felix Fietkau's avatar
Felix Fietkau committed
90
91
92
93
94
95
96
97
98
#ifdef CONFIG_ATHEROS_AR231X
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R2 },
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R7 },
	{ "2313",	AR5K_VERSION_MAC,	AR5K_SREV_AR2313_R8 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R6 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R7 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R1 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R2 },
#else
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
	{ "5210",	AR5K_VERSION_MAC,	AR5K_SREV_AR5210 },
	{ "5311",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311 },
	{ "5311A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311A },
	{ "5311B",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311B },
	{ "5211",	AR5K_VERSION_MAC,	AR5K_SREV_AR5211 },
	{ "5212",	AR5K_VERSION_MAC,	AR5K_SREV_AR5212 },
	{ "5213",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213 },
	{ "5213A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213A },
	{ "2413",	AR5K_VERSION_MAC,	AR5K_SREV_AR2413 },
	{ "2414",	AR5K_VERSION_MAC,	AR5K_SREV_AR2414 },
	{ "5424",	AR5K_VERSION_MAC,	AR5K_SREV_AR5424 },
	{ "5413",	AR5K_VERSION_MAC,	AR5K_SREV_AR5413 },
	{ "5414",	AR5K_VERSION_MAC,	AR5K_SREV_AR5414 },
	{ "2415",	AR5K_VERSION_MAC,	AR5K_SREV_AR2415 },
	{ "5416",	AR5K_VERSION_MAC,	AR5K_SREV_AR5416 },
	{ "5418",	AR5K_VERSION_MAC,	AR5K_SREV_AR5418 },
	{ "2425",	AR5K_VERSION_MAC,	AR5K_SREV_AR2425 },
	{ "2417",	AR5K_VERSION_MAC,	AR5K_SREV_AR2417 },
Felix Fietkau's avatar
Felix Fietkau committed
117
#endif
118
	{ "xxxxx",	AR5K_VERSION_MAC,	AR5K_SREV_UNKNOWN },
119
120
	{ "5110",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5110 },
	{ "5111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111 },
121
	{ "5111A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111A },
122
123
124
	{ "2111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2111 },
	{ "5112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112 },
	{ "5112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112A },
125
	{ "5112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112B },
126
127
	{ "2112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112 },
	{ "2112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112A },
128
129
130
131
	{ "2112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112B },
	{ "2413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2413 },
	{ "5413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5413 },
	{ "5424",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5424 },
132
	{ "5133",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5133 },
Felix Fietkau's avatar
Felix Fietkau committed
133
134
135
136
#ifdef CONFIG_ATHEROS_AR231X
	{ "2316",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2316 },
	{ "2317",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2317 },
#endif
137
138
139
	{ "xxxxx",	AR5K_VERSION_RAD,	AR5K_SREV_UNKNOWN },
};

Jiri Slaby's avatar
Jiri Slaby committed
140
static const struct ieee80211_rate ath5k_rates[] = {
Bruno Randolf's avatar
Bruno Randolf committed
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
	{ .bitrate = 10,
	  .hw_value = ATH5K_RATE_CODE_1M, },
	{ .bitrate = 20,
	  .hw_value = ATH5K_RATE_CODE_2M,
	  .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 55,
	  .hw_value = ATH5K_RATE_CODE_5_5M,
	  .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 110,
	  .hw_value = ATH5K_RATE_CODE_11M,
	  .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 60,
	  .hw_value = ATH5K_RATE_CODE_6M,
	  .flags = 0 },
	{ .bitrate = 90,
	  .hw_value = ATH5K_RATE_CODE_9M,
	  .flags = 0 },
	{ .bitrate = 120,
	  .hw_value = ATH5K_RATE_CODE_12M,
	  .flags = 0 },
	{ .bitrate = 180,
	  .hw_value = ATH5K_RATE_CODE_18M,
	  .flags = 0 },
	{ .bitrate = 240,
	  .hw_value = ATH5K_RATE_CODE_24M,
	  .flags = 0 },
	{ .bitrate = 360,
	  .hw_value = ATH5K_RATE_CODE_36M,
	  .flags = 0 },
	{ .bitrate = 480,
	  .hw_value = ATH5K_RATE_CODE_48M,
	  .flags = 0 },
	{ .bitrate = 540,
	  .hw_value = ATH5K_RATE_CODE_54M,
	  .flags = 0 },
	/* XR missing */
};

182
183
184
185
186
187
188
189
190
191
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
	u64 tsf = ath5k_hw_get_tsf64(ah);

	if ((tsf & 0x7fff) < rstamp)
		tsf -= 0x8000;

	return (tsf & ~0x7fff) | rstamp;
}

192
const char *
193
194
195
196
197
198
199
200
ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
{
	const char *name = "xxxxx";
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
		if (srev_names[i].sr_type != type)
			continue;
201
202
203
204
205

		if ((val & 0xf0) == srev_names[i].sr_val)
			name = srev_names[i].sr_name;

		if ((val & 0xff) == srev_names[i].sr_val) {
206
207
208
209
210
211
212
			name = srev_names[i].sr_name;
			break;
		}
	}

	return name;
}
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	return ath5k_hw_reg_read(ah, reg_offset);
}

static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	ath5k_hw_reg_write(ah, val, reg_offset);
}

static const struct ath_ops ath5k_common_ops = {
	.read = ath5k_ioread32,
	.write = ath5k_iowrite32,
};
229

230
231
232
233
234
/***********************\
* Driver Initialization *
\***********************/

static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
235
{
236
237
238
	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
	struct ath5k_softc *sc = hw->priv;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(sc->ah);
239

240
241
	return ath_reg_notifier_apply(wiphy, request, regulatory);
}
242

243
244
245
/********************\
* Channel/mode setup *
\********************/
246

247
248
249
/*
 * Returns true for the channel numbers used without all_channels modparam.
 */
250
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
251
{
252
253
254
255
256
	if (band == IEEE80211_BAND_2GHZ && chan <= 14)
		return true;

	return	/* UNII 1,2 */
		(((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
257
258
259
		/* midband */
		((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
		/* UNII-3 */
260
261
262
263
264
		((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
		/* 802.11j 5.030-5.080 GHz (20MHz) */
		(chan == 8 || chan == 12 || chan == 16) ||
		/* 802.11j 4.9GHz (20MHz) */
		(chan == 184 || chan == 188 || chan == 192 || chan == 196));
265
}
266

267
static unsigned int
268
269
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
		unsigned int mode, unsigned int max)
270
{
271
	unsigned int count, size, chfreq, freq, ch;
272
	enum ieee80211_band band;
273

274
275
276
	switch (mode) {
	case AR5K_MODE_11A:
		/* 1..220, but 2GHz frequencies are filtered by check_channel */
277
		size = 220;
278
		chfreq = CHANNEL_5GHZ;
279
		band = IEEE80211_BAND_5GHZ;
280
281
282
283
284
		break;
	case AR5K_MODE_11B:
	case AR5K_MODE_11G:
		size = 26;
		chfreq = CHANNEL_2GHZ;
285
		band = IEEE80211_BAND_2GHZ;
286
287
288
289
		break;
	default:
		ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n");
		return 0;
290
291
	}

292
293
	count = 0;
	for (ch = 1; ch <= size && count < max; ch++) {
294
295
296
297
		freq = ieee80211_channel_to_frequency(ch, band);

		if (freq == 0) /* mapping failed - not a standard channel */
			continue;
298

299
300
301
		/* Check if channel is supported by the chipset */
		if (!ath5k_channel_ok(ah, freq, chfreq))
			continue;
302

303
304
		if (!modparam_all_channels &&
		    !ath5k_is_standard_channel(ch, band))
305
			continue;
306

307
308
		/* Write channel info and increment counter */
		channels[count].center_freq = freq;
309
		channels[count].band = band;
310
311
312
313
314
315
316
317
		switch (mode) {
		case AR5K_MODE_11A:
		case AR5K_MODE_11G:
			channels[count].hw_value = chfreq | CHANNEL_OFDM;
			break;
		case AR5K_MODE_11B:
			channels[count].hw_value = CHANNEL_B;
		}
318

319
320
		count++;
	}
321

322
323
	return count;
}
324

325
326
327
328
static void
ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b)
{
	u8 i;
329

330
331
	for (i = 0; i < AR5K_MAX_RATES; i++)
		sc->rate_idx[b->band][i] = -1;
332

333
334
335
336
	for (i = 0; i < b->n_bitrates; i++) {
		sc->rate_idx[b->band][b->bitrates[i].hw_value] = i;
		if (b->bitrates[i].hw_value_short)
			sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
337
	}
338
}
339

340
341
342
343
344
345
346
347
static int
ath5k_setup_bands(struct ieee80211_hw *hw)
{
	struct ath5k_softc *sc = hw->priv;
	struct ath5k_hw *ah = sc->ah;
	struct ieee80211_supported_band *sband;
	int max_c, count_c = 0;
	int i;
348

349
350
	BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
	max_c = ARRAY_SIZE(sc->channels);
351

352
353
354
355
	/* 2GHz band */
	sband = &sc->sbands[IEEE80211_BAND_2GHZ];
	sband->band = IEEE80211_BAND_2GHZ;
	sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0];
356

357
358
359
360
361
	if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
		/* G mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 12);
		sband->n_bitrates = 12;
362

363
		sband->channels = sc->channels;
364
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
365
					AR5K_MODE_11G, max_c);
366

367
368
369
370
371
372
373
374
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	} else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) {
		/* B mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 4);
		sband->n_bitrates = 4;
375

376
377
378
379
380
381
382
383
384
385
		/* 5211 only supports B rates and uses 4bit rate codes
		 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
		 * fix them up here:
		 */
		if (ah->ah_version == AR5K_AR5211) {
			for (i = 0; i < 4; i++) {
				sband->bitrates[i].hw_value =
					sband->bitrates[i].hw_value & 0xF;
				sband->bitrates[i].hw_value_short =
					sband->bitrates[i].hw_value_short & 0xF;
386
387
388
			}
		}

389
		sband->channels = sc->channels;
390
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
391
					AR5K_MODE_11B, max_c);
392

393
394
395
396
397
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	}
	ath5k_setup_rate_idx(sc, sband);
398

399
400
401
402
403
	/* 5GHz band, A mode */
	if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
		sband = &sc->sbands[IEEE80211_BAND_5GHZ];
		sband->band = IEEE80211_BAND_5GHZ;
		sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0];
404

405
406
407
		memcpy(sband->bitrates, &ath5k_rates[4],
		       sizeof(struct ieee80211_rate) * 8);
		sband->n_bitrates = 8;
408

409
		sband->channels = &sc->channels[count_c];
410
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
411
					AR5K_MODE_11A, max_c);
412

413
414
415
416
417
		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
	}
	ath5k_setup_rate_idx(sc, sband);

	ath5k_debug_dump_bands(sc);
418
419
420
421

	return 0;
}

422
423
424
425
426
427
428
/*
 * Set/change channels. We always reset the chip.
 * To accomplish this we must first cleanup any pending DMA,
 * then restart stuff after a la  ath5k_init.
 *
 * Called with sc->lock.
 */
429
int
430
431
432
433
434
435
ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
{
	ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
		  "channel set, resetting (%u -> %u MHz)\n",
		  sc->curchan->center_freq, chan->center_freq);

436
	/*
437
438
439
440
	 * To switch channels clear any pending DMA operations;
	 * wait long enough for the RX fifo to drain, reset the
	 * hardware at the new frequency, and then re-enable
	 * the relevant bits of the h/w.
441
	 */
442
	return ath5k_reset(sc, chan, true);
443
444
}

445
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
446
{
447
	struct ath5k_vif_iter_data *iter_data = data;
448
	int i;
449
	struct ath5k_vif *avf = (void *)vif->drv_priv;
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468

	if (iter_data->hw_macaddr)
		for (i = 0; i < ETH_ALEN; i++)
			iter_data->mask[i] &=
				~(iter_data->hw_macaddr[i] ^ mac[i]);

	if (!iter_data->found_active) {
		iter_data->found_active = true;
		memcpy(iter_data->active_mac, mac, ETH_ALEN);
	}

	if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
		if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0)
			iter_data->need_set_hw_addr = false;

	if (!iter_data->any_assoc) {
		if (avf->assoc)
			iter_data->any_assoc = true;
	}
469
470
471
472

	/* Calculate combined mode - when APs are active, operate in AP mode.
	 * Otherwise use the mode of the new interface. This can currently
	 * only deal with combinations of APs and STAs. Only one ad-hoc
473
	 * interfaces is allowed.
474
475
476
	 */
	if (avf->opmode == NL80211_IFTYPE_AP)
		iter_data->opmode = NL80211_IFTYPE_AP;
477
478
479
	else {
		if (avf->opmode == NL80211_IFTYPE_STATION)
			iter_data->n_stas++;
480
481
		if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
			iter_data->opmode = avf->opmode;
482
	}
483
484
}

485
486
487
void
ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
				   struct ieee80211_vif *vif)
488
489
{
	struct ath_common *common = ath5k_hw_common(sc->ah);
490
491
	struct ath5k_vif_iter_data iter_data;
	u32 rfilt;
492
493
494
495
496
497
498
499
500

	/*
	 * Use the hardware MAC address as reference, the hardware uses it
	 * together with the BSSID mask when matching addresses.
	 */
	iter_data.hw_macaddr = common->macaddr;
	memset(&iter_data.mask, 0xff, ETH_ALEN);
	iter_data.found_active = false;
	iter_data.need_set_hw_addr = true;
501
	iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
502
	iter_data.n_stas = 0;
503
504

	if (vif)
505
		ath5k_vif_iter(&iter_data, vif->addr, vif);
506
507

	/* Get list of all active MAC addresses */
508
	ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
509
510
511
						   &iter_data);
	memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN);

512
513
514
515
516
	sc->opmode = iter_data.opmode;
	if (sc->opmode == NL80211_IFTYPE_UNSPECIFIED)
		/* Nothing active, default to station mode */
		sc->opmode = NL80211_IFTYPE_STATION;

517
518
519
	ath5k_hw_set_opmode(sc->ah, sc->opmode);
	ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
		  sc->opmode, ath_opmode_to_string(sc->opmode));
520

521
522
523
	if (iter_data.need_set_hw_addr && iter_data.found_active)
		ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac);

524
525
	if (ath5k_hw_hasbssidmask(sc->ah))
		ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
526

527
528
529
530
531
532
533
534
	/* Set up RX Filter */
	if (iter_data.n_stas > 1) {
		/* If you have multiple STA interfaces connected to
		 * different APs, ARPs are not received (most of the time?)
		 * Enabling PROMISC appears to fix that probem.
		 */
		sc->filter_flags |= AR5K_RX_FILTER_PROM;
	}
535

536
	rfilt = sc->filter_flags;
537
	ath5k_hw_set_rx_filter(sc->ah, rfilt);
538
539
	ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
}
540

541
542
543
544
static inline int
ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
{
	int rix;
545

546
547
548
549
550
	/* return base rate on errors */
	if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
			"hw_rix out of bounds: %x\n", hw_rix))
		return 0;

551
	rix = sc->rate_idx[sc->curchan->band][hw_rix];
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
	if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
		rix = 0;

	return rix;
}

/***************\
* Buffers setup *
\***************/

static
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
{
	struct ath_common *common = ath5k_hw_common(sc->ah);
	struct sk_buff *skb;
567
568

	/*
569
570
	 * Allocate buffer with headroom_needed space for the
	 * fake physical layer header at the start.
571
	 */
572
573
574
	skb = ath_rxbuf_alloc(common,
			      common->rx_bufsize,
			      GFP_ATOMIC);
575

576
577
578
579
	if (!skb) {
		ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
				common->rx_bufsize);
		return NULL;
580
581
	}

582
	*skb_addr = dma_map_single(sc->dev,
583
				   skb->data, common->rx_bufsize,
584
585
586
				   DMA_FROM_DEVICE);

	if (unlikely(dma_mapping_error(sc->dev, *skb_addr))) {
587
588
589
		ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
		dev_kfree_skb(skb);
		return NULL;
590
	}
591
592
	return skb;
}
593

594
595
596
597
598
599
600
static int
ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
	struct ath5k_hw *ah = sc->ah;
	struct sk_buff *skb = bf->skb;
	struct ath5k_desc *ds;
	int ret;
601

602
603
604
605
606
	if (!skb) {
		skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
		if (!skb)
			return -ENOMEM;
		bf->skb = skb;
607
608
	}

609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
	/*
	 * Setup descriptors.  For receive we always terminate
	 * the descriptor list with a self-linked entry so we'll
	 * not get overrun under high load (as can happen with a
	 * 5212 when ANI processing enables PHY error frames).
	 *
	 * To ensure the last descriptor is self-linked we create
	 * each descriptor as self-linked and add it to the end.  As
	 * each additional descriptor is added the previous self-linked
	 * entry is "fixed" naturally.  This should be safe even
	 * if DMA is happening.  When processing RX interrupts we
	 * never remove/process the last, self-linked, entry on the
	 * descriptor list.  This ensures the hardware always has
	 * someplace to write a new frame.
	 */
	ds = bf->desc;
	ds->ds_link = bf->daddr;	/* link to self */
	ds->ds_data = bf->skbaddr;
	ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
628
	if (ret) {
629
630
		ATH5K_ERR(sc, "%s: could not setup RX desc\n", __func__);
		return ret;
631
632
	}

633
634
635
	if (sc->rxlink != NULL)
		*sc->rxlink = bf->daddr;
	sc->rxlink = &ds->ds_link;
636
637
638
	return 0;
}

639
static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
640
{
641
642
643
	struct ieee80211_hdr *hdr;
	enum ath5k_pkt_type htype;
	__le16 fc;
644

645
646
	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
647

648
649
650
651
652
653
654
655
	if (ieee80211_is_beacon(fc))
		htype = AR5K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = AR5K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = AR5K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = AR5K_PKT_TYPE_PSPOLL;
656
	else
657
		htype = AR5K_PKT_TYPE_NORMAL;
658

659
	return htype;
660
661
}

662
663
664
static int
ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
		  struct ath5k_txq *txq, int padsize)
665
{
666
667
668
669
670
671
672
673
674
675
676
677
	struct ath5k_hw *ah = sc->ah;
	struct ath5k_desc *ds = bf->desc;
	struct sk_buff *skb = bf->skb;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
	struct ieee80211_rate *rate;
	unsigned int mrr_rate[3], mrr_tries[3];
	int i, ret;
	u16 hw_rate;
	u16 cts_rate = 0;
	u16 duration = 0;
	u8 rc_flags;
678

679
	flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
680

681
	/* XXX endianness */
682
683
	bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
			DMA_TO_DEVICE);
684

685
	rate = ieee80211_get_tx_rate(sc->hw, info);
686
687
688
689
	if (!rate) {
		ret = -EINVAL;
		goto err_unmap;
	}
690

691
692
	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= AR5K_TXDESC_NOACK;
693

694
695
696
	rc_flags = info->control.rates[0].flags;
	hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
		rate->hw_value_short : rate->hw_value;
697

698
699
700
701
702
703
704
705
706
707
708
709
710
	pktlen = skb->len;

	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
	if (info->control.hw_key) {
		keyidx = info->control.hw_key->hw_key_idx;
		pktlen += info->control.hw_key->icv_len;
	}
	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		flags |= AR5K_TXDESC_RTSENA;
		cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_rts_duration(sc->hw,
711
			info->control.vif, pktlen, info));
712
713
714
715
716
	}
	if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
		flags |= AR5K_TXDESC_CTSENA;
		cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw,
717
			info->control.vif, pktlen, info));
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
	}
	ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
		ieee80211_get_hdrlen_from_skb(skb), padsize,
		get_hw_packet_type(skb),
		(sc->power_level * 2),
		hw_rate,
		info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
		cts_rate, duration);
	if (ret)
		goto err_unmap;

	memset(mrr_rate, 0, sizeof(mrr_rate));
	memset(mrr_tries, 0, sizeof(mrr_tries));
	for (i = 0; i < 3; i++) {
		rate = ieee80211_get_alt_retry_rate(sc->hw, info, i);
		if (!rate)
734
			break;
735

736
737
		mrr_rate[i] = rate->hw_value;
		mrr_tries[i] = info->control.rates[i + 1].count;
738
739
	}

740
741
742
743
	ath5k_hw_setup_mrr_tx_desc(ah, ds,
		mrr_rate[0], mrr_tries[0],
		mrr_rate[1], mrr_tries[1],
		mrr_rate[2], mrr_tries[2]);
744

745
746
	ds->ds_link = 0;
	ds->ds_data = bf->skbaddr;
Bruno Randolf's avatar
Bruno Randolf committed
747

748
749
	spin_lock_bh(&txq->lock);
	list_add_tail(&bf->list, &txq->q);
750
	txq->txq_len++;
751
752
753
754
	if (txq->link == NULL) /* is this first packet? */
		ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
	else /* no, so only link it */
		*txq->link = bf->daddr;
Bruno Randolf's avatar
Bruno Randolf committed
755

756
757
758
759
760
761
762
	txq->link = &ds->ds_link;
	ath5k_hw_start_tx_dma(ah, txq->qnum);
	mmiowb();
	spin_unlock_bh(&txq->lock);

	return 0;
err_unmap:
763
	dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
764
	return ret;
Bruno Randolf's avatar
Bruno Randolf committed
765
766
}

767
768
769
770
/*******************\
* Descriptors setup *
\*******************/

771
static int
772
ath5k_desc_alloc(struct ath5k_softc *sc)
773
{
774
775
776
777
778
	struct ath5k_desc *ds;
	struct ath5k_buf *bf;
	dma_addr_t da;
	unsigned int i;
	int ret;
779

780
781
782
	/* allocate descriptors */
	sc->desc_len = sizeof(struct ath5k_desc) *
			(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
783
784
785

	sc->desc = dma_alloc_coherent(sc->dev, sc->desc_len,
				&sc->desc_daddr, GFP_KERNEL);
786
787
788
789
790
791
792
793
794
	if (sc->desc == NULL) {
		ATH5K_ERR(sc, "can't allocate descriptors\n");
		ret = -ENOMEM;
		goto err;
	}
	ds = sc->desc;
	da = sc->desc_daddr;
	ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
		ds, sc->desc_len, (unsigned long long)sc->desc_daddr);
795

796
797
798
799
800
801
802
803
	bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
			sizeof(struct ath5k_buf), GFP_KERNEL);
	if (bf == NULL) {
		ATH5K_ERR(sc, "can't allocate bufptr\n");
		ret = -ENOMEM;
		goto err_free;
	}
	sc->bufptr = bf;
804

805
806
807
808
809
810
	INIT_LIST_HEAD(&sc->rxbuf);
	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
		list_add_tail(&bf->list, &sc->rxbuf);
	}
811

812
813
814
815
816
817
818
	INIT_LIST_HEAD(&sc->txbuf);
	sc->txbuf_len = ATH_TXBUF;
	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++,
			da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
		list_add_tail(&bf->list, &sc->txbuf);
819
820
	}

821
822
823
824
825
826
827
	/* beacon buffers */
	INIT_LIST_HEAD(&sc->bcbuf);
	for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
		list_add_tail(&bf->list, &sc->bcbuf);
	}
828

829
830
	return 0;
err_free:
831
	dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
832
833
834
835
err:
	sc->desc = NULL;
	return ret;
}
836

837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
void
ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
	BUG_ON(!bf);
	if (!bf->skb)
		return;
	dma_unmap_single(sc->dev, bf->skbaddr, bf->skb->len,
			DMA_TO_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

void
ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);

	BUG_ON(!bf);
	if (!bf->skb)
		return;
	dma_unmap_single(sc->dev, bf->skbaddr, common->rx_bufsize,
			DMA_FROM_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

868
static void
869
ath5k_desc_free(struct ath5k_softc *sc)
870
871
{
	struct ath5k_buf *bf;
872

873
874
875
876
	list_for_each_entry(bf, &sc->txbuf, list)
		ath5k_txbuf_free_skb(sc, bf);
	list_for_each_entry(bf, &sc->rxbuf, list)
		ath5k_rxbuf_free_skb(sc, bf);
877
878
	list_for_each_entry(bf, &sc->bcbuf, list)
		ath5k_txbuf_free_skb(sc, bf);
879

880
	/* Free memory associated with all descriptors */
881
	dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
882
883
	sc->desc = NULL;
	sc->desc_daddr = 0;
884

885
886
	kfree(sc->bufptr);
	sc->bufptr = NULL;
887
888
}

889
890
891
892
893
894
895
896

/**************\
* Queues setup *
\**************/

static struct ath5k_txq *
ath5k_txq_setup(struct ath5k_softc *sc,
		int qtype, int subtype)
897
{
898
899
900
901
	struct ath5k_hw *ah = sc->ah;
	struct ath5k_txq *txq;
	struct ath5k_txq_info qi = {
		.tqi_subtype = subtype,
902
903
904
905
906
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX
907
908
	};
	int qnum;
909

910
	/*
911
912
913
914
915
916
917
918
919
920
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise we wait for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
921
	 */
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
	qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
				AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
	qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
	if (qnum < 0) {
		/*
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
		 */
		return ERR_PTR(qnum);
	}
	if (qnum >= ARRAY_SIZE(sc->txqs)) {
		ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n",
			qnum, ARRAY_SIZE(sc->txqs));
		ath5k_hw_release_tx_queue(ah, qnum);
		return ERR_PTR(-EINVAL);
	}
	txq = &sc->txqs[qnum];
	if (!txq->setup) {
		txq->qnum = qnum;
		txq->link = NULL;
		INIT_LIST_HEAD(&txq->q);
		spin_lock_init(&txq->lock);
		txq->setup = true;
945
		txq->txq_len = 0;
946
		txq->txq_max = ATH5K_TXQ_LEN_MAX;
947
		txq->txq_poll_mark = false;
948
		txq->txq_stuck = 0;
949
950
	}
	return &sc->txqs[qnum];
951
952
}

953
954
static int
ath5k_beaconq_setup(struct ath5k_hw *ah)
955
{
956
	struct ath5k_txq_info qi = {
957
958
959
960
961
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX,
962
963
964
		/* NB: for dynamic turbo, don't enable any other interrupts */
		.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
	};
965

966
	return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
967
968
}

969
970
static int
ath5k_beaconq_config(struct ath5k_softc *sc)
971
972
{
	struct ath5k_hw *ah = sc->ah;
973
974
	struct ath5k_txq_info qi;
	int ret;
975

976
977
978
	ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
	if (ret)
		goto err;
979

980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
	if (sc->opmode == NL80211_IFTYPE_AP ||
		sc->opmode == NL80211_IFTYPE_MESH_POINT) {
		/*
		 * Always burst out beacon and CAB traffic
		 * (aifs = cwmin = cwmax = 0)
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
		qi.tqi_cw_max = 0;
	} else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
		/*
		 * Adhoc mode; backoff between 0 and (2 * cw_min).
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
995
		qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
996
	}
997

998
999
1000
	ATH5K_DBG(sc, ATH5K_DEBUG_BEACON,
		"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
		qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
1001

1002
1003
1004
1005
1006
1007
1008
1009
1010
	ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi);
	if (ret) {
		ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
			"hardware queue!\n", __func__);
		goto err;
	}
	ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
	if (ret)
		goto err;
1011

1012
1013
1014
1015
	/* reconfigure cabq with ready time to 80% of beacon_interval */
	ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1016

1017
1018
1019
1020
	qi.tqi_ready_time = (sc->bintval * 80) / 100;
	ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)
		goto err;
1021

1022
1023
1024
	ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
err:
	return ret;
1025
1026
}

1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
/**
 * ath5k_drain_tx_buffs - Empty tx buffers
 *
 * @sc The &struct ath5k_softc
 *
 * Empty tx buffers from all queues in preparation
 * of a reset or during shutdown.
 *
 * NB:	this assumes output has been stopped and
 *	we do not need to block ath5k_tx_tasklet
 */
1038
static void
1039
ath5k_drain_tx_buffs(struct ath5k_softc *sc)
1040
{
1041
	struct ath5k_txq *txq;
1042
	struct ath5k_buf *bf, *bf0;
1043
	int i;
1044

1045
1046
1047
1048
1049
1050
	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
		if (sc->txqs[i].setup) {
			txq = &sc->txqs[i];
			spin_lock_bh(&txq->lock);
			list_for_each_entry_safe(bf, bf0, &txq->q, list) {
				ath5k_debug_printtxbuf(sc, bf);
1051

1052
				ath5k_txbuf_free_skb(sc, bf);
1053

1054
1055
1056
1057
1058
				spin_lock_bh(&sc->txbuflock);
				list_move_tail(&bf->list, &sc->txbuf);
				sc->txbuf_len++;
				txq->txq_len--;
				spin_unlock_bh(&sc->txbuflock);
1059
			}
1060
1061
1062
1063
			txq->link = NULL;
			txq->txq_poll_mark = false;
			spin_unlock_bh(&txq->lock);
		}
1064
	}
1065
1066
}

1067
1068
static void
ath5k_txq_release(struct ath5k_softc *sc)
1069
{
1070
1071
	struct ath5k_txq *txq = sc->txqs;
	unsigned int i;
1072

1073
1074
1075
1076
1077
1078
	for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++)
		if (txq->setup) {
			ath5k_hw_release_tx_queue(sc->ah, txq->qnum);
			txq->setup = false;
		}
}
1079
1080


1081
1082
1083
/*************\
* RX Handling *
\*************/
1084

1085
1086
1087
/*
 * Enable the receive h/w following a reset.
 */
1088
static int
1089
ath5k_rx_start(struct ath5k_softc *sc)
1090
1091
{
	struct ath5k_hw *ah = sc->ah;
1092
1093
1094
	struct ath_common *common = ath5k_hw_common(ah);
	struct ath5k_buf *bf;
	int ret;
1095

1096
	common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
1097

1098
1099
	ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
		  common->cachelsz, common->rx_bufsize);
1100

1101
1102
1103
1104
1105
1106
1107
1108
	spin_lock_bh(&sc->rxbuflock);
	sc->rxlink = NULL;
	list_for_each_entry(bf, &sc->rxbuf, list) {
		ret = ath5k_rxbuf_setup(sc, bf);
		if (ret != 0) {
			spin_unlock_bh(&sc->rxbuflock);
			goto err;
		}
1109
	}
1110
1111
1112
	bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list);
	ath5k_hw_set_rxdp(ah, bf->daddr);
	spin_unlock_bh(&sc->rxbuflock);
1113

1114
	ath5k_hw_start_rx_dma(ah);	/* enable recv descriptors */
1115
	ath5k_update_bssid_mask_and_opmode(sc, NULL); /* set filters, etc. */
1116
	ath5k_hw_start_rx_pcu(ah);	/* re-enable PCU/DMA engine */
1117
1118

	return 0;
1119
err:
1120
1121
1122
	return ret;
}

1123
/*
1124
1125
1126
1127
1128
 * Disable the receive logic on PCU (DRU)
 * In preparation for a shutdown.
 *
 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
 * does.
1129
1130
1131
 */
static void
ath5k_rx_stop(struct ath5k_softc *sc)
1132
{
1133
	struct ath5k_hw *ah = sc->ah;
1134

1135
	ath5k_hw_set_rx_filter(ah, 0);	/* clear recv filter */
1136
	ath5k_hw_stop_rx_pcu(ah);	/* disable PCU */
1137

1138
1139
	ath5k_debug_printrxbuffs(sc, ah);
}
1140

1141
1142
1143
1144
1145
1146
1147
1148
static unsigned int
ath5k_rx_decrypted(struct ath5k_softc *sc, struct sk_buff *skb,
		   struct ath5k_rx_status *rs)
{
	struct ath5k_hw *ah = sc->ah;
	struct ath_common *common = ath5k_hw_common(ah);
	struct ieee80211_hdr *hdr = (void *)skb->data;
	unsigned int keyix, hlen;
1149

1150
1151
1152
	if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
			rs->rs_keyix != AR5K_RXKEYIX_INVALID)
		return RX_FLAG_DECRYPTED;
1153

1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
	/* Apparently when a default key is used to decrypt the packet
	   the hw does not set the index used to decrypt.  In such cases
	   get the index from the packet. */
	hlen = ieee80211_hdrlen(hdr->frame_control);
	if (ieee80211_has_protected(hdr->frame_control) &&
	    !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
	    skb->len >= hlen + 4) {
		keyix = skb->data[hlen + 3] >> 6;

		if (test_bit(keyix, common->keymap))
			return RX_FLAG_DECRYPTED;
	}
1166
1167
1168
1169

	return 0;
}

1170

1171
static void
1172
1173
ath5k_check_ibss_tsf