base.c 81 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
/*-
 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
 * Copyright (c) 2004-2005 Atheros Communications, Inc.
 * Copyright (c) 2006 Devicescape Software, Inc.
 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
 *
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
 *    redistribution must be conditioned upon including a substantially
 *    similar Disclaimer requirement for further binary redistribution.
 * 3. Neither the names of the above-listed copyright holders nor the names
 *    of any contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * NO WARRANTY
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGES.
 *
 */

43
44
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

45
46
#include <linux/module.h>
#include <linux/delay.h>
47
#include <linux/dma-mapping.h>
Jiri Slaby's avatar
Jiri Slaby committed
48
#include <linux/hardirq.h>
49
#include <linux/if.h>
Jiri Slaby's avatar
Jiri Slaby committed
50
#include <linux/io.h>
51
52
53
54
#include <linux/netdevice.h>
#include <linux/cache.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
55
#include <linux/slab.h>
56
#include <linux/etherdevice.h>
57
#include <linux/nl80211.h>
58
59
60
61
62
63
64
65

#include <net/ieee80211_radiotap.h>

#include <asm/unaligned.h>

#include "base.h"
#include "reg.h"
#include "debug.h"
66
#include "ani.h"
67
68
#include "ath5k.h"
#include "../regd.h"
69

70
71
72
#define CREATE_TRACE_POINTS
#include "trace.h"

73
bool ath5k_modparam_nohwcrypt;
74
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
75
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
76

77
static bool modparam_all_channels;
Bob Copeland's avatar
Bob Copeland committed
78
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
79
80
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");

81
static bool modparam_fastchanswitch;
82
83
84
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");

85
static bool ath5k_modparam_no_hw_rfkill_switch;
86
87
88
89
module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
								bool, S_IRUGO);
MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");

90

91
92
93
94
95
96
97
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");

98
static int ath5k_init(struct ieee80211_hw *hw);
99
static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
100
								bool skip_pcu);
101
102

/* Known SREVs */
Jiri Slaby's avatar
Jiri Slaby committed
103
static const struct ath5k_srev_name srev_names[] = {
Felix Fietkau's avatar
Felix Fietkau committed
104
105
106
107
108
109
110
111
112
#ifdef CONFIG_ATHEROS_AR231X
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R2 },
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R7 },
	{ "2313",	AR5K_VERSION_MAC,	AR5K_SREV_AR2313_R8 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R6 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R7 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R1 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R2 },
#else
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
	{ "5210",	AR5K_VERSION_MAC,	AR5K_SREV_AR5210 },
	{ "5311",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311 },
	{ "5311A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311A },
	{ "5311B",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311B },
	{ "5211",	AR5K_VERSION_MAC,	AR5K_SREV_AR5211 },
	{ "5212",	AR5K_VERSION_MAC,	AR5K_SREV_AR5212 },
	{ "5213",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213 },
	{ "5213A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213A },
	{ "2413",	AR5K_VERSION_MAC,	AR5K_SREV_AR2413 },
	{ "2414",	AR5K_VERSION_MAC,	AR5K_SREV_AR2414 },
	{ "5424",	AR5K_VERSION_MAC,	AR5K_SREV_AR5424 },
	{ "5413",	AR5K_VERSION_MAC,	AR5K_SREV_AR5413 },
	{ "5414",	AR5K_VERSION_MAC,	AR5K_SREV_AR5414 },
	{ "2415",	AR5K_VERSION_MAC,	AR5K_SREV_AR2415 },
	{ "5416",	AR5K_VERSION_MAC,	AR5K_SREV_AR5416 },
	{ "5418",	AR5K_VERSION_MAC,	AR5K_SREV_AR5418 },
	{ "2425",	AR5K_VERSION_MAC,	AR5K_SREV_AR2425 },
	{ "2417",	AR5K_VERSION_MAC,	AR5K_SREV_AR2417 },
Felix Fietkau's avatar
Felix Fietkau committed
131
#endif
132
	{ "xxxxx",	AR5K_VERSION_MAC,	AR5K_SREV_UNKNOWN },
133
134
	{ "5110",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5110 },
	{ "5111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111 },
135
	{ "5111A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111A },
136
137
138
	{ "2111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2111 },
	{ "5112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112 },
	{ "5112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112A },
139
	{ "5112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112B },
140
141
	{ "2112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112 },
	{ "2112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112A },
142
143
144
145
	{ "2112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112B },
	{ "2413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2413 },
	{ "5413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5413 },
	{ "5424",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5424 },
146
	{ "5133",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5133 },
Felix Fietkau's avatar
Felix Fietkau committed
147
148
149
150
#ifdef CONFIG_ATHEROS_AR231X
	{ "2316",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2316 },
	{ "2317",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2317 },
#endif
151
152
153
	{ "xxxxx",	AR5K_VERSION_RAD,	AR5K_SREV_UNKNOWN },
};

Jiri Slaby's avatar
Jiri Slaby committed
154
static const struct ieee80211_rate ath5k_rates[] = {
Bruno Randolf's avatar
Bruno Randolf committed
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
	{ .bitrate = 10,
	  .hw_value = ATH5K_RATE_CODE_1M, },
	{ .bitrate = 20,
	  .hw_value = ATH5K_RATE_CODE_2M,
	  .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 55,
	  .hw_value = ATH5K_RATE_CODE_5_5M,
	  .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 110,
	  .hw_value = ATH5K_RATE_CODE_11M,
	  .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 60,
	  .hw_value = ATH5K_RATE_CODE_6M,
	  .flags = 0 },
	{ .bitrate = 90,
	  .hw_value = ATH5K_RATE_CODE_9M,
	  .flags = 0 },
	{ .bitrate = 120,
	  .hw_value = ATH5K_RATE_CODE_12M,
	  .flags = 0 },
	{ .bitrate = 180,
	  .hw_value = ATH5K_RATE_CODE_18M,
	  .flags = 0 },
	{ .bitrate = 240,
	  .hw_value = ATH5K_RATE_CODE_24M,
	  .flags = 0 },
	{ .bitrate = 360,
	  .hw_value = ATH5K_RATE_CODE_36M,
	  .flags = 0 },
	{ .bitrate = 480,
	  .hw_value = ATH5K_RATE_CODE_48M,
	  .flags = 0 },
	{ .bitrate = 540,
	  .hw_value = ATH5K_RATE_CODE_54M,
	  .flags = 0 },
};

195
196
197
198
199
200
201
202
203
204
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
	u64 tsf = ath5k_hw_get_tsf64(ah);

	if ((tsf & 0x7fff) < rstamp)
		tsf -= 0x8000;

	return (tsf & ~0x7fff) | rstamp;
}

205
const char *
206
207
208
209
210
211
212
213
ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
{
	const char *name = "xxxxx";
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
		if (srev_names[i].sr_type != type)
			continue;
214
215
216
217
218

		if ((val & 0xf0) == srev_names[i].sr_val)
			name = srev_names[i].sr_name;

		if ((val & 0xff) == srev_names[i].sr_val) {
219
220
221
222
223
224
225
			name = srev_names[i].sr_name;
			break;
		}
	}

	return name;
}
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	return ath5k_hw_reg_read(ah, reg_offset);
}

static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	ath5k_hw_reg_write(ah, val, reg_offset);
}

static const struct ath_ops ath5k_common_ops = {
	.read = ath5k_ioread32,
	.write = ath5k_iowrite32,
};
242

243
244
245
246
247
/***********************\
* Driver Initialization *
\***********************/

static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
248
{
249
	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
250
251
	struct ath5k_hw *ah = hw->priv;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
252

253
254
	return ath_reg_notifier_apply(wiphy, request, regulatory);
}
255

256
257
258
/********************\
* Channel/mode setup *
\********************/
259

260
261
262
/*
 * Returns true for the channel numbers used without all_channels modparam.
 */
263
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
264
{
265
266
267
268
269
	if (band == IEEE80211_BAND_2GHZ && chan <= 14)
		return true;

	return	/* UNII 1,2 */
		(((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
270
271
272
		/* midband */
		((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
		/* UNII-3 */
273
274
275
276
277
		((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
		/* 802.11j 5.030-5.080 GHz (20MHz) */
		(chan == 8 || chan == 12 || chan == 16) ||
		/* 802.11j 4.9GHz (20MHz) */
		(chan == 184 || chan == 188 || chan == 192 || chan == 196));
278
}
279

280
static unsigned int
281
282
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
		unsigned int mode, unsigned int max)
283
{
284
	unsigned int count, size, freq, ch;
285
	enum ieee80211_band band;
286

287
288
289
	switch (mode) {
	case AR5K_MODE_11A:
		/* 1..220, but 2GHz frequencies are filtered by check_channel */
290
		size = 220;
291
		band = IEEE80211_BAND_5GHZ;
292
293
294
295
		break;
	case AR5K_MODE_11B:
	case AR5K_MODE_11G:
		size = 26;
296
		band = IEEE80211_BAND_2GHZ;
297
298
		break;
	default:
299
		ATH5K_WARN(ah, "bad mode, not copying channels\n");
300
		return 0;
301
302
	}

303
304
	count = 0;
	for (ch = 1; ch <= size && count < max; ch++) {
305
306
307
308
		freq = ieee80211_channel_to_frequency(ch, band);

		if (freq == 0) /* mapping failed - not a standard channel */
			continue;
309

310
311
312
313
314
		/* Write channel info, needed for ath5k_channel_ok() */
		channels[count].center_freq = freq;
		channels[count].band = band;
		channels[count].hw_value = mode;

315
		/* Check if channel is supported by the chipset */
316
		if (!ath5k_channel_ok(ah, &channels[count]))
317
			continue;
318

319
320
		if (!modparam_all_channels &&
		    !ath5k_is_standard_channel(ch, band))
321
			continue;
322

323
324
		count++;
	}
325

326
327
	return count;
}
328

329
static void
330
ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b)
331
332
{
	u8 i;
333

334
	for (i = 0; i < AR5K_MAX_RATES; i++)
335
		ah->rate_idx[b->band][i] = -1;
336

337
	for (i = 0; i < b->n_bitrates; i++) {
338
		ah->rate_idx[b->band][b->bitrates[i].hw_value] = i;
339
		if (b->bitrates[i].hw_value_short)
340
			ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
341
	}
342
}
343

344
345
346
static int
ath5k_setup_bands(struct ieee80211_hw *hw)
{
347
	struct ath5k_hw *ah = hw->priv;
348
349
350
	struct ieee80211_supported_band *sband;
	int max_c, count_c = 0;
	int i;
351

352
353
	BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
	max_c = ARRAY_SIZE(ah->channels);
354

355
	/* 2GHz band */
356
	sband = &ah->sbands[IEEE80211_BAND_2GHZ];
357
	sband->band = IEEE80211_BAND_2GHZ;
358
	sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
359

360
	if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
361
362
363
364
		/* G mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 12);
		sband->n_bitrates = 12;
365

366
		sband->channels = ah->channels;
367
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
368
					AR5K_MODE_11G, max_c);
369

370
371
372
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
373
	} else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
374
375
376
377
		/* B mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 4);
		sband->n_bitrates = 4;
378

379
380
381
382
383
384
385
386
387
388
		/* 5211 only supports B rates and uses 4bit rate codes
		 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
		 * fix them up here:
		 */
		if (ah->ah_version == AR5K_AR5211) {
			for (i = 0; i < 4; i++) {
				sband->bitrates[i].hw_value =
					sband->bitrates[i].hw_value & 0xF;
				sband->bitrates[i].hw_value_short =
					sband->bitrates[i].hw_value_short & 0xF;
389
390
391
			}
		}

392
		sband->channels = ah->channels;
393
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
394
					AR5K_MODE_11B, max_c);
395

396
397
398
399
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	}
400
	ath5k_setup_rate_idx(ah, sband);
401

402
	/* 5GHz band, A mode */
403
404
	if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
		sband = &ah->sbands[IEEE80211_BAND_5GHZ];
405
		sband->band = IEEE80211_BAND_5GHZ;
406
		sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
407

408
409
410
		memcpy(sband->bitrates, &ath5k_rates[4],
		       sizeof(struct ieee80211_rate) * 8);
		sband->n_bitrates = 8;
411

412
		sband->channels = &ah->channels[count_c];
413
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
414
					AR5K_MODE_11A, max_c);
415

416
417
		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
	}
418
	ath5k_setup_rate_idx(ah, sband);
419

420
	ath5k_debug_dump_bands(ah);
421
422
423
424

	return 0;
}

425
426
427
428
429
/*
 * Set/change channels. We always reset the chip.
 * To accomplish this we must first cleanup any pending DMA,
 * then restart stuff after a la  ath5k_init.
 *
430
 * Called with ah->lock.
431
 */
432
int
433
ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
434
{
435
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
436
		  "channel set, resetting (%u -> %u MHz)\n",
437
		  ah->curchan->center_freq, chan->center_freq);
438

439
	/*
440
441
442
443
	 * To switch channels clear any pending DMA operations;
	 * wait long enough for the RX fifo to drain, reset the
	 * hardware at the new frequency, and then re-enable
	 * the relevant bits of the h/w.
444
	 */
445
	return ath5k_reset(ah, chan, true);
446
447
}

448
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
449
{
450
	struct ath5k_vif_iter_data *iter_data = data;
451
	int i;
452
	struct ath5k_vif *avf = (void *)vif->drv_priv;
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471

	if (iter_data->hw_macaddr)
		for (i = 0; i < ETH_ALEN; i++)
			iter_data->mask[i] &=
				~(iter_data->hw_macaddr[i] ^ mac[i]);

	if (!iter_data->found_active) {
		iter_data->found_active = true;
		memcpy(iter_data->active_mac, mac, ETH_ALEN);
	}

	if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
		if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0)
			iter_data->need_set_hw_addr = false;

	if (!iter_data->any_assoc) {
		if (avf->assoc)
			iter_data->any_assoc = true;
	}
472
473
474
475

	/* Calculate combined mode - when APs are active, operate in AP mode.
	 * Otherwise use the mode of the new interface. This can currently
	 * only deal with combinations of APs and STAs. Only one ad-hoc
476
	 * interfaces is allowed.
477
478
479
	 */
	if (avf->opmode == NL80211_IFTYPE_AP)
		iter_data->opmode = NL80211_IFTYPE_AP;
480
481
482
	else {
		if (avf->opmode == NL80211_IFTYPE_STATION)
			iter_data->n_stas++;
483
484
		if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
			iter_data->opmode = avf->opmode;
485
	}
486
487
}

488
void
489
ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
490
				   struct ieee80211_vif *vif)
491
{
492
	struct ath_common *common = ath5k_hw_common(ah);
493
494
	struct ath5k_vif_iter_data iter_data;
	u32 rfilt;
495
496
497
498
499
500
501
502
503

	/*
	 * Use the hardware MAC address as reference, the hardware uses it
	 * together with the BSSID mask when matching addresses.
	 */
	iter_data.hw_macaddr = common->macaddr;
	memset(&iter_data.mask, 0xff, ETH_ALEN);
	iter_data.found_active = false;
	iter_data.need_set_hw_addr = true;
504
	iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
505
	iter_data.n_stas = 0;
506
507

	if (vif)
508
		ath5k_vif_iter(&iter_data, vif->addr, vif);
509
510

	/* Get list of all active MAC addresses */
511
	ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
512
						   &iter_data);
513
	memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
514

515
516
	ah->opmode = iter_data.opmode;
	if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED)
517
		/* Nothing active, default to station mode */
518
		ah->opmode = NL80211_IFTYPE_STATION;
519

520
521
522
	ath5k_hw_set_opmode(ah, ah->opmode);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
		  ah->opmode, ath_opmode_to_string(ah->opmode));
523

524
	if (iter_data.need_set_hw_addr && iter_data.found_active)
525
		ath5k_hw_set_lladdr(ah, iter_data.active_mac);
526

527
528
	if (ath5k_hw_hasbssidmask(ah))
		ath5k_hw_set_bssid_mask(ah, ah->bssidmask);
529

530
531
532
533
	/* Set up RX Filter */
	if (iter_data.n_stas > 1) {
		/* If you have multiple STA interfaces connected to
		 * different APs, ARPs are not received (most of the time?)
534
		 * Enabling PROMISC appears to fix that problem.
535
		 */
536
		ah->filter_flags |= AR5K_RX_FILTER_PROM;
537
	}
538

539
540
541
	rfilt = ah->filter_flags;
	ath5k_hw_set_rx_filter(ah, rfilt);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
542
}
543

544
static inline int
545
ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix)
546
547
{
	int rix;
548

549
550
551
552
553
	/* return base rate on errors */
	if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
			"hw_rix out of bounds: %x\n", hw_rix))
		return 0;

554
	rix = ah->rate_idx[ah->curchan->band][hw_rix];
555
556
557
558
559
560
561
562
563
564
565
	if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
		rix = 0;

	return rix;
}

/***************\
* Buffers setup *
\***************/

static
566
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr)
567
{
568
	struct ath_common *common = ath5k_hw_common(ah);
569
	struct sk_buff *skb;
570
571

	/*
572
573
	 * Allocate buffer with headroom_needed space for the
	 * fake physical layer header at the start.
574
	 */
575
576
577
	skb = ath_rxbuf_alloc(common,
			      common->rx_bufsize,
			      GFP_ATOMIC);
578

579
	if (!skb) {
580
		ATH5K_ERR(ah, "can't alloc skbuff of size %u\n",
581
582
				common->rx_bufsize);
		return NULL;
583
584
	}

585
	*skb_addr = dma_map_single(ah->dev,
586
				   skb->data, common->rx_bufsize,
587
588
				   DMA_FROM_DEVICE);

589
590
	if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) {
		ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__);
591
592
		dev_kfree_skb(skb);
		return NULL;
593
	}
594
595
	return skb;
}
596

597
static int
598
ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
599
600
601
602
{
	struct sk_buff *skb = bf->skb;
	struct ath5k_desc *ds;
	int ret;
603

604
	if (!skb) {
605
		skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr);
606
607
608
		if (!skb)
			return -ENOMEM;
		bf->skb = skb;
609
610
	}

611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
	/*
	 * Setup descriptors.  For receive we always terminate
	 * the descriptor list with a self-linked entry so we'll
	 * not get overrun under high load (as can happen with a
	 * 5212 when ANI processing enables PHY error frames).
	 *
	 * To ensure the last descriptor is self-linked we create
	 * each descriptor as self-linked and add it to the end.  As
	 * each additional descriptor is added the previous self-linked
	 * entry is "fixed" naturally.  This should be safe even
	 * if DMA is happening.  When processing RX interrupts we
	 * never remove/process the last, self-linked, entry on the
	 * descriptor list.  This ensures the hardware always has
	 * someplace to write a new frame.
	 */
	ds = bf->desc;
	ds->ds_link = bf->daddr;	/* link to self */
	ds->ds_data = bf->skbaddr;
	ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
630
	if (ret) {
631
		ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__);
632
		return ret;
633
634
	}

635
636
637
	if (ah->rxlink != NULL)
		*ah->rxlink = bf->daddr;
	ah->rxlink = &ds->ds_link;
638
639
640
	return 0;
}

641
static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
642
{
643
644
645
	struct ieee80211_hdr *hdr;
	enum ath5k_pkt_type htype;
	__le16 fc;
646

647
648
	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
649

650
651
652
653
654
655
656
657
	if (ieee80211_is_beacon(fc))
		htype = AR5K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = AR5K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = AR5K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = AR5K_PKT_TYPE_PSPOLL;
658
	else
659
		htype = AR5K_PKT_TYPE_NORMAL;
660

661
	return htype;
662
663
}

664
static int
665
ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
666
		  struct ath5k_txq *txq, int padsize)
667
{
668
669
670
671
672
673
674
675
676
677
678
	struct ath5k_desc *ds = bf->desc;
	struct sk_buff *skb = bf->skb;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
	struct ieee80211_rate *rate;
	unsigned int mrr_rate[3], mrr_tries[3];
	int i, ret;
	u16 hw_rate;
	u16 cts_rate = 0;
	u16 duration = 0;
	u8 rc_flags;
679

680
	flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
681

682
	/* XXX endianness */
683
	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
684
			DMA_TO_DEVICE);
685

686
	rate = ieee80211_get_tx_rate(ah->hw, info);
687
688
689
690
	if (!rate) {
		ret = -EINVAL;
		goto err_unmap;
	}
691

692
693
	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= AR5K_TXDESC_NOACK;
694

695
696
697
	rc_flags = info->control.rates[0].flags;
	hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
		rate->hw_value_short : rate->hw_value;
698

699
700
701
702
703
704
705
706
707
708
709
	pktlen = skb->len;

	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
	if (info->control.hw_key) {
		keyidx = info->control.hw_key->hw_key_idx;
		pktlen += info->control.hw_key->icv_len;
	}
	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		flags |= AR5K_TXDESC_RTSENA;
710
711
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_rts_duration(ah->hw,
712
			info->control.vif, pktlen, info));
713
714
715
	}
	if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
		flags |= AR5K_TXDESC_CTSENA;
716
717
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
718
			info->control.vif, pktlen, info));
719
720
721
722
	}
	ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
		ieee80211_get_hdrlen_from_skb(skb), padsize,
		get_hw_packet_type(skb),
723
		(ah->power_level * 2),
724
725
726
727
728
729
		hw_rate,
		info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
		cts_rate, duration);
	if (ret)
		goto err_unmap;

730
731
732
733
734
735
736
737
	/* Set up MRR descriptor */
	if (ah->ah_capabilities.cap_has_mrr_support) {
		memset(mrr_rate, 0, sizeof(mrr_rate));
		memset(mrr_tries, 0, sizeof(mrr_tries));
		for (i = 0; i < 3; i++) {
			rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
			if (!rate)
				break;
738

739
740
741
			mrr_rate[i] = rate->hw_value;
			mrr_tries[i] = info->control.rates[i + 1].count;
		}
742

743
744
745
746
747
		ath5k_hw_setup_mrr_tx_desc(ah, ds,
			mrr_rate[0], mrr_tries[0],
			mrr_rate[1], mrr_tries[1],
			mrr_rate[2], mrr_tries[2]);
	}
748

749
750
	ds->ds_link = 0;
	ds->ds_data = bf->skbaddr;
Bruno Randolf's avatar
Bruno Randolf committed
751

752
753
	spin_lock_bh(&txq->lock);
	list_add_tail(&bf->list, &txq->q);
754
	txq->txq_len++;
755
756
757
758
	if (txq->link == NULL) /* is this first packet? */
		ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
	else /* no, so only link it */
		*txq->link = bf->daddr;
Bruno Randolf's avatar
Bruno Randolf committed
759

760
761
762
763
764
765
766
	txq->link = &ds->ds_link;
	ath5k_hw_start_tx_dma(ah, txq->qnum);
	mmiowb();
	spin_unlock_bh(&txq->lock);

	return 0;
err_unmap:
767
	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
768
	return ret;
Bruno Randolf's avatar
Bruno Randolf committed
769
770
}

771
772
773
774
/*******************\
* Descriptors setup *
\*******************/

775
static int
776
ath5k_desc_alloc(struct ath5k_hw *ah)
777
{
778
779
780
781
782
	struct ath5k_desc *ds;
	struct ath5k_buf *bf;
	dma_addr_t da;
	unsigned int i;
	int ret;
783

784
	/* allocate descriptors */
785
	ah->desc_len = sizeof(struct ath5k_desc) *
786
			(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
787

788
789
790
791
	ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len,
				&ah->desc_daddr, GFP_KERNEL);
	if (ah->desc == NULL) {
		ATH5K_ERR(ah, "can't allocate descriptors\n");
792
793
794
		ret = -ENOMEM;
		goto err;
	}
795
796
797
798
	ds = ah->desc;
	da = ah->desc_daddr;
	ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
		ds, ah->desc_len, (unsigned long long)ah->desc_daddr);
799

800
801
802
	bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
			sizeof(struct ath5k_buf), GFP_KERNEL);
	if (bf == NULL) {
803
		ATH5K_ERR(ah, "can't allocate bufptr\n");
804
805
806
		ret = -ENOMEM;
		goto err_free;
	}
807
	ah->bufptr = bf;
808

809
	INIT_LIST_HEAD(&ah->rxbuf);
810
811
812
	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
813
		list_add_tail(&bf->list, &ah->rxbuf);
814
	}
815

816
817
	INIT_LIST_HEAD(&ah->txbuf);
	ah->txbuf_len = ATH_TXBUF;
818
	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
819
820
		bf->desc = ds;
		bf->daddr = da;
821
		list_add_tail(&bf->list, &ah->txbuf);
822
823
	}

824
	/* beacon buffers */
825
	INIT_LIST_HEAD(&ah->bcbuf);
826
827
828
	for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
829
		list_add_tail(&bf->list, &ah->bcbuf);
830
	}
831

832
833
	return 0;
err_free:
834
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
835
err:
836
	ah->desc = NULL;
837
838
	return ret;
}
839

840
void
841
ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
842
843
844
845
{
	BUG_ON(!bf);
	if (!bf->skb)
		return;
846
	dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
847
848
849
850
851
852
853
854
			DMA_TO_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

void
855
ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
856
857
858
859
860
861
{
	struct ath_common *common = ath5k_hw_common(ah);

	BUG_ON(!bf);
	if (!bf->skb)
		return;
862
	dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize,
863
864
865
866
867
868
869
			DMA_FROM_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

870
static void
871
ath5k_desc_free(struct ath5k_hw *ah)
872
873
{
	struct ath5k_buf *bf;
874

875
876
877
878
879
880
	list_for_each_entry(bf, &ah->txbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->rxbuf, list)
		ath5k_rxbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->bcbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
881

882
	/* Free memory associated with all descriptors */
883
884
885
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
	ah->desc = NULL;
	ah->desc_daddr = 0;
886

887
888
	kfree(ah->bufptr);
	ah->bufptr = NULL;
889
890
}

891
892
893
894
895
896

/**************\
* Queues setup *
\**************/

static struct ath5k_txq *
897
ath5k_txq_setup(struct ath5k_hw *ah,
898
		int qtype, int subtype)
899
{
900
901
902
	struct ath5k_txq *txq;
	struct ath5k_txq_info qi = {
		.tqi_subtype = subtype,
903
904
905
906
907
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX
908
909
	};
	int qnum;
910

911
	/*
912
913
914
915
916
917
918
919
920
921
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise we wait for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
922
	 */
923
924
925
926
927
928
929
930
931
932
	qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
				AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
	qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
	if (qnum < 0) {
		/*
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
		 */
		return ERR_PTR(qnum);
	}
933
	txq = &ah->txqs[qnum];
934
935
936
937
938
939
	if (!txq->setup) {
		txq->qnum = qnum;
		txq->link = NULL;
		INIT_LIST_HEAD(&txq->q);
		spin_lock_init(&txq->lock);
		txq->setup = true;
940
		txq->txq_len = 0;
941
		txq->txq_max = ATH5K_TXQ_LEN_MAX;
942
		txq->txq_poll_mark = false;
943
		txq->txq_stuck = 0;
944
	}
945
	return &ah->txqs[qnum];
946
947
}

948
949
static int
ath5k_beaconq_setup(struct ath5k_hw *ah)
950
{
951
	struct ath5k_txq_info qi = {
952
953
954
955
956
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX,
957
958
959
		/* NB: for dynamic turbo, don't enable any other interrupts */
		.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
	};
960

961
	return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
962
963
}

964
static int
965
ath5k_beaconq_config(struct ath5k_hw *ah)
966
{
967
968
	struct ath5k_txq_info qi;
	int ret;
969

970
	ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi);
971
972
	if (ret)
		goto err;
973

974
975
	if (ah->opmode == NL80211_IFTYPE_AP ||
	    ah->opmode == NL80211_IFTYPE_MESH_POINT) {
976
977
978
979
980
981
982
		/*
		 * Always burst out beacon and CAB traffic
		 * (aifs = cwmin = cwmax = 0)
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
		qi.tqi_cw_max = 0;
983
	} else if (ah->opmode == NL80211_IFTYPE_ADHOC) {
984
985
986
987
988
		/*
		 * Adhoc mode; backoff between 0 and (2 * cw_min).
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
989
		qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
990
	}
991

992
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
993
994
		"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
		qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
995

996
	ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi);
997
	if (ret) {