base.c 78.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
/*-
 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
 * Copyright (c) 2004-2005 Atheros Communications, Inc.
 * Copyright (c) 2006 Devicescape Software, Inc.
 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
 *
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
 *    redistribution must be conditioned upon including a substantially
 *    similar Disclaimer requirement for further binary redistribution.
 * 3. Neither the names of the above-listed copyright holders nor the names
 *    of any contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * NO WARRANTY
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGES.
 *
 */

#include <linux/module.h>
#include <linux/delay.h>
Jiri Slaby's avatar
Jiri Slaby committed
45
#include <linux/hardirq.h>
46
#include <linux/if.h>
Jiri Slaby's avatar
Jiri Slaby committed
47
#include <linux/io.h>
48
49
50
51
#include <linux/netdevice.h>
#include <linux/cache.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
52
#include <linux/slab.h>
53
#include <linux/etherdevice.h>
54
55
56
57
58
59
60
61

#include <net/ieee80211_radiotap.h>

#include <asm/unaligned.h>

#include "base.h"
#include "reg.h"
#include "debug.h"
62
#include "ani.h"
63

64
65
66
#define CREATE_TRACE_POINTS
#include "trace.h"

67
68
int ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
69
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
70

71
static int modparam_all_channels;
Bob Copeland's avatar
Bob Copeland committed
72
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
73
74
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");

75
76
77
78
79
static int modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");


80
81
82
83
84
85
86
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");

87
static int ath5k_init(struct ieee80211_hw *hw);
88
static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
89
								bool skip_pcu);
90
91

/* Known SREVs */
Jiri Slaby's avatar
Jiri Slaby committed
92
static const struct ath5k_srev_name srev_names[] = {
Felix Fietkau's avatar
Felix Fietkau committed
93
94
95
96
97
98
99
100
101
#ifdef CONFIG_ATHEROS_AR231X
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R2 },
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R7 },
	{ "2313",	AR5K_VERSION_MAC,	AR5K_SREV_AR2313_R8 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R6 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R7 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R1 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R2 },
#else
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
	{ "5210",	AR5K_VERSION_MAC,	AR5K_SREV_AR5210 },
	{ "5311",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311 },
	{ "5311A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311A },
	{ "5311B",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311B },
	{ "5211",	AR5K_VERSION_MAC,	AR5K_SREV_AR5211 },
	{ "5212",	AR5K_VERSION_MAC,	AR5K_SREV_AR5212 },
	{ "5213",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213 },
	{ "5213A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213A },
	{ "2413",	AR5K_VERSION_MAC,	AR5K_SREV_AR2413 },
	{ "2414",	AR5K_VERSION_MAC,	AR5K_SREV_AR2414 },
	{ "5424",	AR5K_VERSION_MAC,	AR5K_SREV_AR5424 },
	{ "5413",	AR5K_VERSION_MAC,	AR5K_SREV_AR5413 },
	{ "5414",	AR5K_VERSION_MAC,	AR5K_SREV_AR5414 },
	{ "2415",	AR5K_VERSION_MAC,	AR5K_SREV_AR2415 },
	{ "5416",	AR5K_VERSION_MAC,	AR5K_SREV_AR5416 },
	{ "5418",	AR5K_VERSION_MAC,	AR5K_SREV_AR5418 },
	{ "2425",	AR5K_VERSION_MAC,	AR5K_SREV_AR2425 },
	{ "2417",	AR5K_VERSION_MAC,	AR5K_SREV_AR2417 },
Felix Fietkau's avatar
Felix Fietkau committed
120
#endif
121
	{ "xxxxx",	AR5K_VERSION_MAC,	AR5K_SREV_UNKNOWN },
122
123
	{ "5110",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5110 },
	{ "5111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111 },
124
	{ "5111A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111A },
125
126
127
	{ "2111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2111 },
	{ "5112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112 },
	{ "5112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112A },
128
	{ "5112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112B },
129
130
	{ "2112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112 },
	{ "2112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112A },
131
132
133
134
	{ "2112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112B },
	{ "2413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2413 },
	{ "5413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5413 },
	{ "5424",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5424 },
135
	{ "5133",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5133 },
Felix Fietkau's avatar
Felix Fietkau committed
136
137
138
139
#ifdef CONFIG_ATHEROS_AR231X
	{ "2316",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2316 },
	{ "2317",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2317 },
#endif
140
141
142
	{ "xxxxx",	AR5K_VERSION_RAD,	AR5K_SREV_UNKNOWN },
};

Jiri Slaby's avatar
Jiri Slaby committed
143
static const struct ieee80211_rate ath5k_rates[] = {
Bruno Randolf's avatar
Bruno Randolf committed
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
	{ .bitrate = 10,
	  .hw_value = ATH5K_RATE_CODE_1M, },
	{ .bitrate = 20,
	  .hw_value = ATH5K_RATE_CODE_2M,
	  .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 55,
	  .hw_value = ATH5K_RATE_CODE_5_5M,
	  .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 110,
	  .hw_value = ATH5K_RATE_CODE_11M,
	  .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 60,
	  .hw_value = ATH5K_RATE_CODE_6M,
	  .flags = 0 },
	{ .bitrate = 90,
	  .hw_value = ATH5K_RATE_CODE_9M,
	  .flags = 0 },
	{ .bitrate = 120,
	  .hw_value = ATH5K_RATE_CODE_12M,
	  .flags = 0 },
	{ .bitrate = 180,
	  .hw_value = ATH5K_RATE_CODE_18M,
	  .flags = 0 },
	{ .bitrate = 240,
	  .hw_value = ATH5K_RATE_CODE_24M,
	  .flags = 0 },
	{ .bitrate = 360,
	  .hw_value = ATH5K_RATE_CODE_36M,
	  .flags = 0 },
	{ .bitrate = 480,
	  .hw_value = ATH5K_RATE_CODE_48M,
	  .flags = 0 },
	{ .bitrate = 540,
	  .hw_value = ATH5K_RATE_CODE_54M,
	  .flags = 0 },
	/* XR missing */
};

185
186
187
188
189
190
191
192
193
194
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
	u64 tsf = ath5k_hw_get_tsf64(ah);

	if ((tsf & 0x7fff) < rstamp)
		tsf -= 0x8000;

	return (tsf & ~0x7fff) | rstamp;
}

195
const char *
196
197
198
199
200
201
202
203
ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
{
	const char *name = "xxxxx";
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
		if (srev_names[i].sr_type != type)
			continue;
204
205
206
207
208

		if ((val & 0xf0) == srev_names[i].sr_val)
			name = srev_names[i].sr_name;

		if ((val & 0xff) == srev_names[i].sr_val) {
209
210
211
212
213
214
215
			name = srev_names[i].sr_name;
			break;
		}
	}

	return name;
}
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	return ath5k_hw_reg_read(ah, reg_offset);
}

static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	ath5k_hw_reg_write(ah, val, reg_offset);
}

static const struct ath_ops ath5k_common_ops = {
	.read = ath5k_ioread32,
	.write = ath5k_iowrite32,
};
232

233
234
235
236
237
/***********************\
* Driver Initialization *
\***********************/

static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
238
{
239
	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
240
241
	struct ath5k_hw *ah = hw->priv;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
242

243
244
	return ath_reg_notifier_apply(wiphy, request, regulatory);
}
245

246
247
248
/********************\
* Channel/mode setup *
\********************/
249

250
251
252
/*
 * Returns true for the channel numbers used without all_channels modparam.
 */
253
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
254
{
255
256
257
258
259
	if (band == IEEE80211_BAND_2GHZ && chan <= 14)
		return true;

	return	/* UNII 1,2 */
		(((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
260
261
262
		/* midband */
		((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
		/* UNII-3 */
263
264
265
266
267
		((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
		/* 802.11j 5.030-5.080 GHz (20MHz) */
		(chan == 8 || chan == 12 || chan == 16) ||
		/* 802.11j 4.9GHz (20MHz) */
		(chan == 184 || chan == 188 || chan == 192 || chan == 196));
268
}
269

270
static unsigned int
271
272
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
		unsigned int mode, unsigned int max)
273
{
274
	unsigned int count, size, chfreq, freq, ch;
275
	enum ieee80211_band band;
276

277
278
279
	switch (mode) {
	case AR5K_MODE_11A:
		/* 1..220, but 2GHz frequencies are filtered by check_channel */
280
		size = 220;
281
		chfreq = CHANNEL_5GHZ;
282
		band = IEEE80211_BAND_5GHZ;
283
284
285
286
287
		break;
	case AR5K_MODE_11B:
	case AR5K_MODE_11G:
		size = 26;
		chfreq = CHANNEL_2GHZ;
288
		band = IEEE80211_BAND_2GHZ;
289
290
		break;
	default:
291
		ATH5K_WARN(ah, "bad mode, not copying channels\n");
292
		return 0;
293
294
	}

295
296
	count = 0;
	for (ch = 1; ch <= size && count < max; ch++) {
297
298
299
300
		freq = ieee80211_channel_to_frequency(ch, band);

		if (freq == 0) /* mapping failed - not a standard channel */
			continue;
301

302
303
304
		/* Check if channel is supported by the chipset */
		if (!ath5k_channel_ok(ah, freq, chfreq))
			continue;
305

306
307
		if (!modparam_all_channels &&
		    !ath5k_is_standard_channel(ch, band))
308
			continue;
309

310
311
		/* Write channel info and increment counter */
		channels[count].center_freq = freq;
312
		channels[count].band = band;
313
314
315
316
317
318
319
320
		switch (mode) {
		case AR5K_MODE_11A:
		case AR5K_MODE_11G:
			channels[count].hw_value = chfreq | CHANNEL_OFDM;
			break;
		case AR5K_MODE_11B:
			channels[count].hw_value = CHANNEL_B;
		}
321

322
323
		count++;
	}
324

325
326
	return count;
}
327

328
static void
329
ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b)
330
331
{
	u8 i;
332

333
	for (i = 0; i < AR5K_MAX_RATES; i++)
334
		ah->rate_idx[b->band][i] = -1;
335

336
	for (i = 0; i < b->n_bitrates; i++) {
337
		ah->rate_idx[b->band][b->bitrates[i].hw_value] = i;
338
		if (b->bitrates[i].hw_value_short)
339
			ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
340
	}
341
}
342

343
344
345
static int
ath5k_setup_bands(struct ieee80211_hw *hw)
{
346
	struct ath5k_hw *ah = hw->priv;
347
348
349
	struct ieee80211_supported_band *sband;
	int max_c, count_c = 0;
	int i;
350

351
352
	BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
	max_c = ARRAY_SIZE(ah->channels);
353

354
	/* 2GHz band */
355
	sband = &ah->sbands[IEEE80211_BAND_2GHZ];
356
	sband->band = IEEE80211_BAND_2GHZ;
357
	sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
358

359
	if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
360
361
362
363
		/* G mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 12);
		sband->n_bitrates = 12;
364

365
		sband->channels = ah->channels;
366
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
367
					AR5K_MODE_11G, max_c);
368

369
370
371
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
372
	} else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
373
374
375
376
		/* B mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 4);
		sband->n_bitrates = 4;
377

378
379
380
381
382
383
384
385
386
387
		/* 5211 only supports B rates and uses 4bit rate codes
		 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
		 * fix them up here:
		 */
		if (ah->ah_version == AR5K_AR5211) {
			for (i = 0; i < 4; i++) {
				sband->bitrates[i].hw_value =
					sband->bitrates[i].hw_value & 0xF;
				sband->bitrates[i].hw_value_short =
					sband->bitrates[i].hw_value_short & 0xF;
388
389
390
			}
		}

391
		sband->channels = ah->channels;
392
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
393
					AR5K_MODE_11B, max_c);
394

395
396
397
398
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	}
399
	ath5k_setup_rate_idx(ah, sband);
400

401
	/* 5GHz band, A mode */
402
403
	if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
		sband = &ah->sbands[IEEE80211_BAND_5GHZ];
404
		sband->band = IEEE80211_BAND_5GHZ;
405
		sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
406

407
408
409
		memcpy(sband->bitrates, &ath5k_rates[4],
		       sizeof(struct ieee80211_rate) * 8);
		sband->n_bitrates = 8;
410

411
		sband->channels = &ah->channels[count_c];
412
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
413
					AR5K_MODE_11A, max_c);
414

415
416
		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
	}
417
	ath5k_setup_rate_idx(ah, sband);
418

419
	ath5k_debug_dump_bands(ah);
420
421
422
423

	return 0;
}

424
425
426
427
428
/*
 * Set/change channels. We always reset the chip.
 * To accomplish this we must first cleanup any pending DMA,
 * then restart stuff after a la  ath5k_init.
 *
429
 * Called with ah->lock.
430
 */
431
int
432
ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
433
{
434
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
435
		  "channel set, resetting (%u -> %u MHz)\n",
436
		  ah->curchan->center_freq, chan->center_freq);
437

438
	/*
439
440
441
442
	 * To switch channels clear any pending DMA operations;
	 * wait long enough for the RX fifo to drain, reset the
	 * hardware at the new frequency, and then re-enable
	 * the relevant bits of the h/w.
443
	 */
444
	return ath5k_reset(ah, chan, true);
445
446
}

447
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
448
{
449
	struct ath5k_vif_iter_data *iter_data = data;
450
	int i;
451
	struct ath5k_vif *avf = (void *)vif->drv_priv;
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470

	if (iter_data->hw_macaddr)
		for (i = 0; i < ETH_ALEN; i++)
			iter_data->mask[i] &=
				~(iter_data->hw_macaddr[i] ^ mac[i]);

	if (!iter_data->found_active) {
		iter_data->found_active = true;
		memcpy(iter_data->active_mac, mac, ETH_ALEN);
	}

	if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
		if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0)
			iter_data->need_set_hw_addr = false;

	if (!iter_data->any_assoc) {
		if (avf->assoc)
			iter_data->any_assoc = true;
	}
471
472
473
474

	/* Calculate combined mode - when APs are active, operate in AP mode.
	 * Otherwise use the mode of the new interface. This can currently
	 * only deal with combinations of APs and STAs. Only one ad-hoc
475
	 * interfaces is allowed.
476
477
478
	 */
	if (avf->opmode == NL80211_IFTYPE_AP)
		iter_data->opmode = NL80211_IFTYPE_AP;
479
480
481
	else {
		if (avf->opmode == NL80211_IFTYPE_STATION)
			iter_data->n_stas++;
482
483
		if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
			iter_data->opmode = avf->opmode;
484
	}
485
486
}

487
void
488
ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
489
				   struct ieee80211_vif *vif)
490
{
491
	struct ath_common *common = ath5k_hw_common(ah);
492
493
	struct ath5k_vif_iter_data iter_data;
	u32 rfilt;
494
495
496
497
498
499
500
501
502

	/*
	 * Use the hardware MAC address as reference, the hardware uses it
	 * together with the BSSID mask when matching addresses.
	 */
	iter_data.hw_macaddr = common->macaddr;
	memset(&iter_data.mask, 0xff, ETH_ALEN);
	iter_data.found_active = false;
	iter_data.need_set_hw_addr = true;
503
	iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
504
	iter_data.n_stas = 0;
505
506

	if (vif)
507
		ath5k_vif_iter(&iter_data, vif->addr, vif);
508
509

	/* Get list of all active MAC addresses */
510
	ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
511
						   &iter_data);
512
	memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
513

514
515
	ah->opmode = iter_data.opmode;
	if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED)
516
		/* Nothing active, default to station mode */
517
		ah->opmode = NL80211_IFTYPE_STATION;
518

519
520
521
	ath5k_hw_set_opmode(ah, ah->opmode);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
		  ah->opmode, ath_opmode_to_string(ah->opmode));
522

523
	if (iter_data.need_set_hw_addr && iter_data.found_active)
524
		ath5k_hw_set_lladdr(ah, iter_data.active_mac);
525

526
527
	if (ath5k_hw_hasbssidmask(ah))
		ath5k_hw_set_bssid_mask(ah, ah->bssidmask);
528

529
530
531
532
	/* Set up RX Filter */
	if (iter_data.n_stas > 1) {
		/* If you have multiple STA interfaces connected to
		 * different APs, ARPs are not received (most of the time?)
533
		 * Enabling PROMISC appears to fix that problem.
534
		 */
535
		ah->filter_flags |= AR5K_RX_FILTER_PROM;
536
	}
537

538
539
540
	rfilt = ah->filter_flags;
	ath5k_hw_set_rx_filter(ah, rfilt);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
541
}
542

543
static inline int
544
ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix)
545
546
{
	int rix;
547

548
549
550
551
552
	/* return base rate on errors */
	if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
			"hw_rix out of bounds: %x\n", hw_rix))
		return 0;

553
	rix = ah->rate_idx[ah->curchan->band][hw_rix];
554
555
556
557
558
559
560
561
562
563
564
	if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
		rix = 0;

	return rix;
}

/***************\
* Buffers setup *
\***************/

static
565
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr)
566
{
567
	struct ath_common *common = ath5k_hw_common(ah);
568
	struct sk_buff *skb;
569
570

	/*
571
572
	 * Allocate buffer with headroom_needed space for the
	 * fake physical layer header at the start.
573
	 */
574
575
576
	skb = ath_rxbuf_alloc(common,
			      common->rx_bufsize,
			      GFP_ATOMIC);
577

578
	if (!skb) {
579
		ATH5K_ERR(ah, "can't alloc skbuff of size %u\n",
580
581
				common->rx_bufsize);
		return NULL;
582
583
	}

584
	*skb_addr = dma_map_single(ah->dev,
585
				   skb->data, common->rx_bufsize,
586
587
				   DMA_FROM_DEVICE);

588
589
	if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) {
		ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__);
590
591
		dev_kfree_skb(skb);
		return NULL;
592
	}
593
594
	return skb;
}
595

596
static int
597
ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
598
599
600
601
{
	struct sk_buff *skb = bf->skb;
	struct ath5k_desc *ds;
	int ret;
602

603
	if (!skb) {
604
		skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr);
605
606
607
		if (!skb)
			return -ENOMEM;
		bf->skb = skb;
608
609
	}

610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
	/*
	 * Setup descriptors.  For receive we always terminate
	 * the descriptor list with a self-linked entry so we'll
	 * not get overrun under high load (as can happen with a
	 * 5212 when ANI processing enables PHY error frames).
	 *
	 * To ensure the last descriptor is self-linked we create
	 * each descriptor as self-linked and add it to the end.  As
	 * each additional descriptor is added the previous self-linked
	 * entry is "fixed" naturally.  This should be safe even
	 * if DMA is happening.  When processing RX interrupts we
	 * never remove/process the last, self-linked, entry on the
	 * descriptor list.  This ensures the hardware always has
	 * someplace to write a new frame.
	 */
	ds = bf->desc;
	ds->ds_link = bf->daddr;	/* link to self */
	ds->ds_data = bf->skbaddr;
	ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
629
	if (ret) {
630
		ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__);
631
		return ret;
632
633
	}

634
635
636
	if (ah->rxlink != NULL)
		*ah->rxlink = bf->daddr;
	ah->rxlink = &ds->ds_link;
637
638
639
	return 0;
}

640
static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
641
{
642
643
644
	struct ieee80211_hdr *hdr;
	enum ath5k_pkt_type htype;
	__le16 fc;
645

646
647
	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
648

649
650
651
652
653
654
655
656
	if (ieee80211_is_beacon(fc))
		htype = AR5K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = AR5K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = AR5K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = AR5K_PKT_TYPE_PSPOLL;
657
	else
658
		htype = AR5K_PKT_TYPE_NORMAL;
659

660
	return htype;
661
662
}

663
static int
664
ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
665
		  struct ath5k_txq *txq, int padsize)
666
{
667
668
669
670
671
672
673
674
675
676
677
	struct ath5k_desc *ds = bf->desc;
	struct sk_buff *skb = bf->skb;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
	struct ieee80211_rate *rate;
	unsigned int mrr_rate[3], mrr_tries[3];
	int i, ret;
	u16 hw_rate;
	u16 cts_rate = 0;
	u16 duration = 0;
	u8 rc_flags;
678

679
	flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
680

681
	/* XXX endianness */
682
	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
683
			DMA_TO_DEVICE);
684

685
	rate = ieee80211_get_tx_rate(ah->hw, info);
686
687
688
689
	if (!rate) {
		ret = -EINVAL;
		goto err_unmap;
	}
690

691
692
	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= AR5K_TXDESC_NOACK;
693

694
695
696
	rc_flags = info->control.rates[0].flags;
	hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
		rate->hw_value_short : rate->hw_value;
697

698
699
700
701
702
703
704
705
706
707
708
	pktlen = skb->len;

	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
	if (info->control.hw_key) {
		keyidx = info->control.hw_key->hw_key_idx;
		pktlen += info->control.hw_key->icv_len;
	}
	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		flags |= AR5K_TXDESC_RTSENA;
709
710
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_rts_duration(ah->hw,
711
			info->control.vif, pktlen, info));
712
713
714
	}
	if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
		flags |= AR5K_TXDESC_CTSENA;
715
716
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
717
			info->control.vif, pktlen, info));
718
719
720
721
	}
	ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
		ieee80211_get_hdrlen_from_skb(skb), padsize,
		get_hw_packet_type(skb),
722
		(ah->power_level * 2),
723
724
725
726
727
728
729
730
731
		hw_rate,
		info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
		cts_rate, duration);
	if (ret)
		goto err_unmap;

	memset(mrr_rate, 0, sizeof(mrr_rate));
	memset(mrr_tries, 0, sizeof(mrr_tries));
	for (i = 0; i < 3; i++) {
732
		rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
733
		if (!rate)
734
			break;
735

736
737
		mrr_rate[i] = rate->hw_value;
		mrr_tries[i] = info->control.rates[i + 1].count;
738
739
	}

740
741
742
743
	ath5k_hw_setup_mrr_tx_desc(ah, ds,
		mrr_rate[0], mrr_tries[0],
		mrr_rate[1], mrr_tries[1],
		mrr_rate[2], mrr_tries[2]);
744

745
746
	ds->ds_link = 0;
	ds->ds_data = bf->skbaddr;
Bruno Randolf's avatar
Bruno Randolf committed
747

748
749
	spin_lock_bh(&txq->lock);
	list_add_tail(&bf->list, &txq->q);
750
	txq->txq_len++;
751
752
753
754
	if (txq->link == NULL) /* is this first packet? */
		ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
	else /* no, so only link it */
		*txq->link = bf->daddr;
Bruno Randolf's avatar
Bruno Randolf committed
755

756
757
758
759
760
761
762
	txq->link = &ds->ds_link;
	ath5k_hw_start_tx_dma(ah, txq->qnum);
	mmiowb();
	spin_unlock_bh(&txq->lock);

	return 0;
err_unmap:
763
	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
764
	return ret;
Bruno Randolf's avatar
Bruno Randolf committed
765
766
}

767
768
769
770
/*******************\
* Descriptors setup *
\*******************/

771
static int
772
ath5k_desc_alloc(struct ath5k_hw *ah)
773
{
774
775
776
777
778
	struct ath5k_desc *ds;
	struct ath5k_buf *bf;
	dma_addr_t da;
	unsigned int i;
	int ret;
779

780
	/* allocate descriptors */
781
	ah->desc_len = sizeof(struct ath5k_desc) *
782
			(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
783

784
785
786
787
	ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len,
				&ah->desc_daddr, GFP_KERNEL);
	if (ah->desc == NULL) {
		ATH5K_ERR(ah, "can't allocate descriptors\n");
788
789
790
		ret = -ENOMEM;
		goto err;
	}
791
792
793
794
	ds = ah->desc;
	da = ah->desc_daddr;
	ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
		ds, ah->desc_len, (unsigned long long)ah->desc_daddr);
795

796
797
798
	bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
			sizeof(struct ath5k_buf), GFP_KERNEL);
	if (bf == NULL) {
799
		ATH5K_ERR(ah, "can't allocate bufptr\n");
800
801
802
		ret = -ENOMEM;
		goto err_free;
	}
803
	ah->bufptr = bf;
804

805
	INIT_LIST_HEAD(&ah->rxbuf);
806
807
808
	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
809
		list_add_tail(&bf->list, &ah->rxbuf);
810
	}
811

812
813
	INIT_LIST_HEAD(&ah->txbuf);
	ah->txbuf_len = ATH_TXBUF;
814
	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
815
816
		bf->desc = ds;
		bf->daddr = da;
817
		list_add_tail(&bf->list, &ah->txbuf);
818
819
	}

820
	/* beacon buffers */
821
	INIT_LIST_HEAD(&ah->bcbuf);
822
823
824
	for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
825
		list_add_tail(&bf->list, &ah->bcbuf);
826
	}
827

828
829
	return 0;
err_free:
830
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
831
err:
832
	ah->desc = NULL;
833
834
	return ret;
}
835

836
void
837
ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
838
839
840
841
{
	BUG_ON(!bf);
	if (!bf->skb)
		return;
842
	dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
843
844
845
846
847
848
849
850
			DMA_TO_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

void
851
ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
852
853
854
855
856
857
{
	struct ath_common *common = ath5k_hw_common(ah);

	BUG_ON(!bf);
	if (!bf->skb)
		return;
858
	dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize,
859
860
861
862
863
864
865
			DMA_FROM_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

866
static void
867
ath5k_desc_free(struct ath5k_hw *ah)
868
869
{
	struct ath5k_buf *bf;
870

871
872
873
874
875
876
	list_for_each_entry(bf, &ah->txbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->rxbuf, list)
		ath5k_rxbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->bcbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
877

878
	/* Free memory associated with all descriptors */
879
880
881
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
	ah->desc = NULL;
	ah->desc_daddr = 0;
882

883
884
	kfree(ah->bufptr);
	ah->bufptr = NULL;
885
886
}

887
888
889
890
891
892

/**************\
* Queues setup *
\**************/

static struct ath5k_txq *
893
ath5k_txq_setup(struct ath5k_hw *ah,
894
		int qtype, int subtype)
895
{
896
897
898
	struct ath5k_txq *txq;
	struct ath5k_txq_info qi = {
		.tqi_subtype = subtype,
899
900
901
902
903
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX
904
905
	};
	int qnum;
906

907
	/*
908
909
910
911
912
913
914
915
916
917
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise we wait for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
918
	 */
919
920
921
922
923
924
925
926
927
928
	qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
				AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
	qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
	if (qnum < 0) {
		/*
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
		 */
		return ERR_PTR(qnum);
	}
929
930
931
	if (qnum >= ARRAY_SIZE(ah->txqs)) {
		ATH5K_ERR(ah, "hw qnum %u out of range, max %tu!\n",
			qnum, ARRAY_SIZE(ah->txqs));
932
933
934
		ath5k_hw_release_tx_queue(ah, qnum);
		return ERR_PTR(-EINVAL);
	}
935
	txq = &ah->txqs[qnum];
936
937
938
939
940
941
	if (!txq->setup) {
		txq->qnum = qnum;
		txq->link = NULL;
		INIT_LIST_HEAD(&txq->q);
		spin_lock_init(&txq->lock);
		txq->setup = true;
942
		txq->txq_len = 0;
943
		txq->txq_max = ATH5K_TXQ_LEN_MAX;
944
		txq->txq_poll_mark = false;
945
		txq->txq_stuck = 0;
946
	}
947
	return &ah->txqs[qnum];
948
949
}

950
951
static int
ath5k_beaconq_setup(struct ath5k_hw *ah)
952
{
953
	struct ath5k_txq_info qi = {
954
955
956
957
958
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX,
959
960
961
		/* NB: for dynamic turbo, don't enable any other interrupts */
		.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
	};
962

963
	return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
964
965
}

966
static int
967
ath5k_beaconq_config(struct ath5k_hw *ah)
968
{
969
970
	struct ath5k_txq_info qi;
	int ret;
971

972
	ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi);
973
974
	if (ret)
		goto err;
975

976
977
	if (ah->opmode == NL80211_IFTYPE_AP ||
	    ah->opmode == NL80211_IFTYPE_MESH_POINT) {
978
979
980
981
982
983
984
		/*
		 * Always burst out beacon and CAB traffic
		 * (aifs = cwmin = cwmax = 0)
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
		qi.tqi_cw_max = 0;
985
	} else if (ah->opmode == NL80211_IFTYPE_ADHOC) {
986
987
988
989
990
		/*
		 * Adhoc mode; backoff between 0 and (2 * cw_min).
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
991
		qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
992
	}
993

994
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
995
996
		"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
		qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
997

998
	ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi);
999
	if (ret) {
1000
		ATH5K_ERR(ah, "%s: unable to update parameters for beacon "
1001
1002
1003
			"hardware queue!\n", __func__);
		goto err;
	}
1004
	ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */
1005
1006
	if (ret)
		goto err;
1007