base.c 78.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
/*-
 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
 * Copyright (c) 2004-2005 Atheros Communications, Inc.
 * Copyright (c) 2006 Devicescape Software, Inc.
 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
 *
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer,
 *    without modification.
 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
 *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
 *    redistribution must be conditioned upon including a substantially
 *    similar Disclaimer requirement for further binary redistribution.
 * 3. Neither the names of the above-listed copyright holders nor the names
 *    of any contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * Alternatively, this software may be distributed under the terms of the
 * GNU General Public License ("GPL") version 2 as published by the Free
 * Software Foundation.
 *
 * NO WARRANTY
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGES.
 *
 */

#include <linux/module.h>
#include <linux/delay.h>
45
#include <linux/dma-mapping.h>
Jiri Slaby's avatar
Jiri Slaby committed
46
#include <linux/hardirq.h>
47
#include <linux/if.h>
Jiri Slaby's avatar
Jiri Slaby committed
48
#include <linux/io.h>
49
50
51
52
#include <linux/netdevice.h>
#include <linux/cache.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
53
#include <linux/slab.h>
54
#include <linux/etherdevice.h>
55
#include <linux/nl80211.h>
56
57
58
59
60
61
62
63

#include <net/ieee80211_radiotap.h>

#include <asm/unaligned.h>

#include "base.h"
#include "reg.h"
#include "debug.h"
64
#include "ani.h"
65
66
#include "ath5k.h"
#include "../regd.h"
67

68
69
70
#define CREATE_TRACE_POINTS
#include "trace.h"

71
72
int ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
73
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
74

75
static int modparam_all_channels;
Bob Copeland's avatar
Bob Copeland committed
76
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
77
78
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");

79
80
81
82
83
static int modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");


84
85
86
87
88
89
90
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");

91
static int ath5k_init(struct ieee80211_hw *hw);
92
static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
93
								bool skip_pcu);
94
95

/* Known SREVs */
Jiri Slaby's avatar
Jiri Slaby committed
96
static const struct ath5k_srev_name srev_names[] = {
Felix Fietkau's avatar
Felix Fietkau committed
97
98
99
100
101
102
103
104
105
#ifdef CONFIG_ATHEROS_AR231X
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R2 },
	{ "5312",	AR5K_VERSION_MAC,	AR5K_SREV_AR5312_R7 },
	{ "2313",	AR5K_VERSION_MAC,	AR5K_SREV_AR2313_R8 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R6 },
	{ "2315",	AR5K_VERSION_MAC,	AR5K_SREV_AR2315_R7 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R1 },
	{ "2317",	AR5K_VERSION_MAC,	AR5K_SREV_AR2317_R2 },
#else
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
	{ "5210",	AR5K_VERSION_MAC,	AR5K_SREV_AR5210 },
	{ "5311",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311 },
	{ "5311A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311A },
	{ "5311B",	AR5K_VERSION_MAC,	AR5K_SREV_AR5311B },
	{ "5211",	AR5K_VERSION_MAC,	AR5K_SREV_AR5211 },
	{ "5212",	AR5K_VERSION_MAC,	AR5K_SREV_AR5212 },
	{ "5213",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213 },
	{ "5213A",	AR5K_VERSION_MAC,	AR5K_SREV_AR5213A },
	{ "2413",	AR5K_VERSION_MAC,	AR5K_SREV_AR2413 },
	{ "2414",	AR5K_VERSION_MAC,	AR5K_SREV_AR2414 },
	{ "5424",	AR5K_VERSION_MAC,	AR5K_SREV_AR5424 },
	{ "5413",	AR5K_VERSION_MAC,	AR5K_SREV_AR5413 },
	{ "5414",	AR5K_VERSION_MAC,	AR5K_SREV_AR5414 },
	{ "2415",	AR5K_VERSION_MAC,	AR5K_SREV_AR2415 },
	{ "5416",	AR5K_VERSION_MAC,	AR5K_SREV_AR5416 },
	{ "5418",	AR5K_VERSION_MAC,	AR5K_SREV_AR5418 },
	{ "2425",	AR5K_VERSION_MAC,	AR5K_SREV_AR2425 },
	{ "2417",	AR5K_VERSION_MAC,	AR5K_SREV_AR2417 },
Felix Fietkau's avatar
Felix Fietkau committed
124
#endif
125
	{ "xxxxx",	AR5K_VERSION_MAC,	AR5K_SREV_UNKNOWN },
126
127
	{ "5110",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5110 },
	{ "5111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111 },
128
	{ "5111A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5111A },
129
130
131
	{ "2111",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2111 },
	{ "5112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112 },
	{ "5112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112A },
132
	{ "5112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5112B },
133
134
	{ "2112",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112 },
	{ "2112A",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112A },
135
136
137
138
	{ "2112B",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2112B },
	{ "2413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2413 },
	{ "5413",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5413 },
	{ "5424",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5424 },
139
	{ "5133",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_5133 },
Felix Fietkau's avatar
Felix Fietkau committed
140
141
142
143
#ifdef CONFIG_ATHEROS_AR231X
	{ "2316",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2316 },
	{ "2317",	AR5K_VERSION_RAD,	AR5K_SREV_RAD_2317 },
#endif
144
145
146
	{ "xxxxx",	AR5K_VERSION_RAD,	AR5K_SREV_UNKNOWN },
};

Jiri Slaby's avatar
Jiri Slaby committed
147
static const struct ieee80211_rate ath5k_rates[] = {
Bruno Randolf's avatar
Bruno Randolf committed
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
	{ .bitrate = 10,
	  .hw_value = ATH5K_RATE_CODE_1M, },
	{ .bitrate = 20,
	  .hw_value = ATH5K_RATE_CODE_2M,
	  .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 55,
	  .hw_value = ATH5K_RATE_CODE_5_5M,
	  .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 110,
	  .hw_value = ATH5K_RATE_CODE_11M,
	  .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
	{ .bitrate = 60,
	  .hw_value = ATH5K_RATE_CODE_6M,
	  .flags = 0 },
	{ .bitrate = 90,
	  .hw_value = ATH5K_RATE_CODE_9M,
	  .flags = 0 },
	{ .bitrate = 120,
	  .hw_value = ATH5K_RATE_CODE_12M,
	  .flags = 0 },
	{ .bitrate = 180,
	  .hw_value = ATH5K_RATE_CODE_18M,
	  .flags = 0 },
	{ .bitrate = 240,
	  .hw_value = ATH5K_RATE_CODE_24M,
	  .flags = 0 },
	{ .bitrate = 360,
	  .hw_value = ATH5K_RATE_CODE_36M,
	  .flags = 0 },
	{ .bitrate = 480,
	  .hw_value = ATH5K_RATE_CODE_48M,
	  .flags = 0 },
	{ .bitrate = 540,
	  .hw_value = ATH5K_RATE_CODE_54M,
	  .flags = 0 },
	/* XR missing */
};

189
190
191
192
193
194
195
196
197
198
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
	u64 tsf = ath5k_hw_get_tsf64(ah);

	if ((tsf & 0x7fff) < rstamp)
		tsf -= 0x8000;

	return (tsf & ~0x7fff) | rstamp;
}

199
const char *
200
201
202
203
204
205
206
207
ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
{
	const char *name = "xxxxx";
	unsigned int i;

	for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
		if (srev_names[i].sr_type != type)
			continue;
208
209
210
211
212

		if ((val & 0xf0) == srev_names[i].sr_val)
			name = srev_names[i].sr_name;

		if ((val & 0xff) == srev_names[i].sr_val) {
213
214
215
216
217
218
219
			name = srev_names[i].sr_name;
			break;
		}
	}

	return name;
}
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	return ath5k_hw_reg_read(ah, reg_offset);
}

static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
	struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
	ath5k_hw_reg_write(ah, val, reg_offset);
}

static const struct ath_ops ath5k_common_ops = {
	.read = ath5k_ioread32,
	.write = ath5k_iowrite32,
};
236

237
238
239
240
241
/***********************\
* Driver Initialization *
\***********************/

static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
242
{
243
	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
244
245
	struct ath5k_hw *ah = hw->priv;
	struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
246

247
248
	return ath_reg_notifier_apply(wiphy, request, regulatory);
}
249

250
251
252
/********************\
* Channel/mode setup *
\********************/
253

254
255
256
/*
 * Returns true for the channel numbers used without all_channels modparam.
 */
257
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
258
{
259
260
261
262
263
	if (band == IEEE80211_BAND_2GHZ && chan <= 14)
		return true;

	return	/* UNII 1,2 */
		(((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
264
265
266
		/* midband */
		((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
		/* UNII-3 */
267
268
269
270
271
		((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
		/* 802.11j 5.030-5.080 GHz (20MHz) */
		(chan == 8 || chan == 12 || chan == 16) ||
		/* 802.11j 4.9GHz (20MHz) */
		(chan == 184 || chan == 188 || chan == 192 || chan == 196));
272
}
273

274
static unsigned int
275
276
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
		unsigned int mode, unsigned int max)
277
{
278
	unsigned int count, size, freq, ch;
279
	enum ieee80211_band band;
280

281
282
283
	switch (mode) {
	case AR5K_MODE_11A:
		/* 1..220, but 2GHz frequencies are filtered by check_channel */
284
		size = 220;
285
		band = IEEE80211_BAND_5GHZ;
286
287
288
289
		break;
	case AR5K_MODE_11B:
	case AR5K_MODE_11G:
		size = 26;
290
		band = IEEE80211_BAND_2GHZ;
291
292
		break;
	default:
293
		ATH5K_WARN(ah, "bad mode, not copying channels\n");
294
		return 0;
295
296
	}

297
298
	count = 0;
	for (ch = 1; ch <= size && count < max; ch++) {
299
300
301
302
		freq = ieee80211_channel_to_frequency(ch, band);

		if (freq == 0) /* mapping failed - not a standard channel */
			continue;
303

304
305
306
307
308
		/* Write channel info, needed for ath5k_channel_ok() */
		channels[count].center_freq = freq;
		channels[count].band = band;
		channels[count].hw_value = mode;

309
		/* Check if channel is supported by the chipset */
310
		if (!ath5k_channel_ok(ah, &channels[count]))
311
			continue;
312

313
314
		if (!modparam_all_channels &&
		    !ath5k_is_standard_channel(ch, band))
315
			continue;
316

317
318
		count++;
	}
319

320
321
	return count;
}
322

323
static void
324
ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b)
325
326
{
	u8 i;
327

328
	for (i = 0; i < AR5K_MAX_RATES; i++)
329
		ah->rate_idx[b->band][i] = -1;
330

331
	for (i = 0; i < b->n_bitrates; i++) {
332
		ah->rate_idx[b->band][b->bitrates[i].hw_value] = i;
333
		if (b->bitrates[i].hw_value_short)
334
			ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
335
	}
336
}
337

338
339
340
static int
ath5k_setup_bands(struct ieee80211_hw *hw)
{
341
	struct ath5k_hw *ah = hw->priv;
342
343
344
	struct ieee80211_supported_band *sband;
	int max_c, count_c = 0;
	int i;
345

346
347
	BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
	max_c = ARRAY_SIZE(ah->channels);
348

349
	/* 2GHz band */
350
	sband = &ah->sbands[IEEE80211_BAND_2GHZ];
351
	sband->band = IEEE80211_BAND_2GHZ;
352
	sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
353

354
	if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
355
356
357
358
		/* G mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 12);
		sband->n_bitrates = 12;
359

360
		sband->channels = ah->channels;
361
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
362
					AR5K_MODE_11G, max_c);
363

364
365
366
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
367
	} else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
368
369
370
371
		/* B mode */
		memcpy(sband->bitrates, &ath5k_rates[0],
		       sizeof(struct ieee80211_rate) * 4);
		sband->n_bitrates = 4;
372

373
374
375
376
377
378
379
380
381
382
		/* 5211 only supports B rates and uses 4bit rate codes
		 * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
		 * fix them up here:
		 */
		if (ah->ah_version == AR5K_AR5211) {
			for (i = 0; i < 4; i++) {
				sband->bitrates[i].hw_value =
					sband->bitrates[i].hw_value & 0xF;
				sband->bitrates[i].hw_value_short =
					sband->bitrates[i].hw_value_short & 0xF;
383
384
385
			}
		}

386
		sband->channels = ah->channels;
387
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
388
					AR5K_MODE_11B, max_c);
389

390
391
392
393
		hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
		count_c = sband->n_channels;
		max_c -= count_c;
	}
394
	ath5k_setup_rate_idx(ah, sband);
395

396
	/* 5GHz band, A mode */
397
398
	if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
		sband = &ah->sbands[IEEE80211_BAND_5GHZ];
399
		sband->band = IEEE80211_BAND_5GHZ;
400
		sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
401

402
403
404
		memcpy(sband->bitrates, &ath5k_rates[4],
		       sizeof(struct ieee80211_rate) * 8);
		sband->n_bitrates = 8;
405

406
		sband->channels = &ah->channels[count_c];
407
		sband->n_channels = ath5k_setup_channels(ah, sband->channels,
408
					AR5K_MODE_11A, max_c);
409

410
411
		hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
	}
412
	ath5k_setup_rate_idx(ah, sband);
413

414
	ath5k_debug_dump_bands(ah);
415
416
417
418

	return 0;
}

419
420
421
422
423
/*
 * Set/change channels. We always reset the chip.
 * To accomplish this we must first cleanup any pending DMA,
 * then restart stuff after a la  ath5k_init.
 *
424
 * Called with ah->lock.
425
 */
426
int
427
ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
428
{
429
	ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
430
		  "channel set, resetting (%u -> %u MHz)\n",
431
		  ah->curchan->center_freq, chan->center_freq);
432

433
	/*
434
435
436
437
	 * To switch channels clear any pending DMA operations;
	 * wait long enough for the RX fifo to drain, reset the
	 * hardware at the new frequency, and then re-enable
	 * the relevant bits of the h/w.
438
	 */
439
	return ath5k_reset(ah, chan, true);
440
441
}

442
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
443
{
444
	struct ath5k_vif_iter_data *iter_data = data;
445
	int i;
446
	struct ath5k_vif *avf = (void *)vif->drv_priv;
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465

	if (iter_data->hw_macaddr)
		for (i = 0; i < ETH_ALEN; i++)
			iter_data->mask[i] &=
				~(iter_data->hw_macaddr[i] ^ mac[i]);

	if (!iter_data->found_active) {
		iter_data->found_active = true;
		memcpy(iter_data->active_mac, mac, ETH_ALEN);
	}

	if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
		if (compare_ether_addr(iter_data->hw_macaddr, mac) == 0)
			iter_data->need_set_hw_addr = false;

	if (!iter_data->any_assoc) {
		if (avf->assoc)
			iter_data->any_assoc = true;
	}
466
467
468
469

	/* Calculate combined mode - when APs are active, operate in AP mode.
	 * Otherwise use the mode of the new interface. This can currently
	 * only deal with combinations of APs and STAs. Only one ad-hoc
470
	 * interfaces is allowed.
471
472
473
	 */
	if (avf->opmode == NL80211_IFTYPE_AP)
		iter_data->opmode = NL80211_IFTYPE_AP;
474
475
476
	else {
		if (avf->opmode == NL80211_IFTYPE_STATION)
			iter_data->n_stas++;
477
478
		if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
			iter_data->opmode = avf->opmode;
479
	}
480
481
}

482
void
483
ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
484
				   struct ieee80211_vif *vif)
485
{
486
	struct ath_common *common = ath5k_hw_common(ah);
487
488
	struct ath5k_vif_iter_data iter_data;
	u32 rfilt;
489
490
491
492
493
494
495
496
497

	/*
	 * Use the hardware MAC address as reference, the hardware uses it
	 * together with the BSSID mask when matching addresses.
	 */
	iter_data.hw_macaddr = common->macaddr;
	memset(&iter_data.mask, 0xff, ETH_ALEN);
	iter_data.found_active = false;
	iter_data.need_set_hw_addr = true;
498
	iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
499
	iter_data.n_stas = 0;
500
501

	if (vif)
502
		ath5k_vif_iter(&iter_data, vif->addr, vif);
503
504

	/* Get list of all active MAC addresses */
505
	ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
506
						   &iter_data);
507
	memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
508

509
510
	ah->opmode = iter_data.opmode;
	if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED)
511
		/* Nothing active, default to station mode */
512
		ah->opmode = NL80211_IFTYPE_STATION;
513

514
515
516
	ath5k_hw_set_opmode(ah, ah->opmode);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
		  ah->opmode, ath_opmode_to_string(ah->opmode));
517

518
	if (iter_data.need_set_hw_addr && iter_data.found_active)
519
		ath5k_hw_set_lladdr(ah, iter_data.active_mac);
520

521
522
	if (ath5k_hw_hasbssidmask(ah))
		ath5k_hw_set_bssid_mask(ah, ah->bssidmask);
523

524
525
526
527
	/* Set up RX Filter */
	if (iter_data.n_stas > 1) {
		/* If you have multiple STA interfaces connected to
		 * different APs, ARPs are not received (most of the time?)
528
		 * Enabling PROMISC appears to fix that problem.
529
		 */
530
		ah->filter_flags |= AR5K_RX_FILTER_PROM;
531
	}
532

533
534
535
	rfilt = ah->filter_flags;
	ath5k_hw_set_rx_filter(ah, rfilt);
	ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
536
}
537

538
static inline int
539
ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix)
540
541
{
	int rix;
542

543
544
545
546
547
	/* return base rate on errors */
	if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
			"hw_rix out of bounds: %x\n", hw_rix))
		return 0;

548
	rix = ah->rate_idx[ah->curchan->band][hw_rix];
549
550
551
552
553
554
555
556
557
558
559
	if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
		rix = 0;

	return rix;
}

/***************\
* Buffers setup *
\***************/

static
560
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr)
561
{
562
	struct ath_common *common = ath5k_hw_common(ah);
563
	struct sk_buff *skb;
564
565

	/*
566
567
	 * Allocate buffer with headroom_needed space for the
	 * fake physical layer header at the start.
568
	 */
569
570
571
	skb = ath_rxbuf_alloc(common,
			      common->rx_bufsize,
			      GFP_ATOMIC);
572

573
	if (!skb) {
574
		ATH5K_ERR(ah, "can't alloc skbuff of size %u\n",
575
576
				common->rx_bufsize);
		return NULL;
577
578
	}

579
	*skb_addr = dma_map_single(ah->dev,
580
				   skb->data, common->rx_bufsize,
581
582
				   DMA_FROM_DEVICE);

583
584
	if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) {
		ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__);
585
586
		dev_kfree_skb(skb);
		return NULL;
587
	}
588
589
	return skb;
}
590

591
static int
592
ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
593
594
595
596
{
	struct sk_buff *skb = bf->skb;
	struct ath5k_desc *ds;
	int ret;
597

598
	if (!skb) {
599
		skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr);
600
601
602
		if (!skb)
			return -ENOMEM;
		bf->skb = skb;
603
604
	}

605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
	/*
	 * Setup descriptors.  For receive we always terminate
	 * the descriptor list with a self-linked entry so we'll
	 * not get overrun under high load (as can happen with a
	 * 5212 when ANI processing enables PHY error frames).
	 *
	 * To ensure the last descriptor is self-linked we create
	 * each descriptor as self-linked and add it to the end.  As
	 * each additional descriptor is added the previous self-linked
	 * entry is "fixed" naturally.  This should be safe even
	 * if DMA is happening.  When processing RX interrupts we
	 * never remove/process the last, self-linked, entry on the
	 * descriptor list.  This ensures the hardware always has
	 * someplace to write a new frame.
	 */
	ds = bf->desc;
	ds->ds_link = bf->daddr;	/* link to self */
	ds->ds_data = bf->skbaddr;
	ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
624
	if (ret) {
625
		ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__);
626
		return ret;
627
628
	}

629
630
631
	if (ah->rxlink != NULL)
		*ah->rxlink = bf->daddr;
	ah->rxlink = &ds->ds_link;
632
633
634
	return 0;
}

635
static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
636
{
637
638
639
	struct ieee80211_hdr *hdr;
	enum ath5k_pkt_type htype;
	__le16 fc;
640

641
642
	hdr = (struct ieee80211_hdr *)skb->data;
	fc = hdr->frame_control;
643

644
645
646
647
648
649
650
651
	if (ieee80211_is_beacon(fc))
		htype = AR5K_PKT_TYPE_BEACON;
	else if (ieee80211_is_probe_resp(fc))
		htype = AR5K_PKT_TYPE_PROBE_RESP;
	else if (ieee80211_is_atim(fc))
		htype = AR5K_PKT_TYPE_ATIM;
	else if (ieee80211_is_pspoll(fc))
		htype = AR5K_PKT_TYPE_PSPOLL;
652
	else
653
		htype = AR5K_PKT_TYPE_NORMAL;
654

655
	return htype;
656
657
}

658
static int
659
ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
660
		  struct ath5k_txq *txq, int padsize)
661
{
662
663
664
665
666
667
668
669
670
671
672
	struct ath5k_desc *ds = bf->desc;
	struct sk_buff *skb = bf->skb;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
	unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
	struct ieee80211_rate *rate;
	unsigned int mrr_rate[3], mrr_tries[3];
	int i, ret;
	u16 hw_rate;
	u16 cts_rate = 0;
	u16 duration = 0;
	u8 rc_flags;
673

674
	flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
675

676
	/* XXX endianness */
677
	bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
678
			DMA_TO_DEVICE);
679

680
	rate = ieee80211_get_tx_rate(ah->hw, info);
681
682
683
684
	if (!rate) {
		ret = -EINVAL;
		goto err_unmap;
	}
685

686
687
	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
		flags |= AR5K_TXDESC_NOACK;
688

689
690
691
	rc_flags = info->control.rates[0].flags;
	hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
		rate->hw_value_short : rate->hw_value;
692

693
694
695
696
697
698
699
700
701
702
703
	pktlen = skb->len;

	/* FIXME: If we are in g mode and rate is a CCK rate
	 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
	 * from tx power (value is in dB units already) */
	if (info->control.hw_key) {
		keyidx = info->control.hw_key->hw_key_idx;
		pktlen += info->control.hw_key->icv_len;
	}
	if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		flags |= AR5K_TXDESC_RTSENA;
704
705
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_rts_duration(ah->hw,
706
			info->control.vif, pktlen, info));
707
708
709
	}
	if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
		flags |= AR5K_TXDESC_CTSENA;
710
711
		cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
		duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
712
			info->control.vif, pktlen, info));
713
714
715
716
	}
	ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
		ieee80211_get_hdrlen_from_skb(skb), padsize,
		get_hw_packet_type(skb),
717
		(ah->power_level * 2),
718
719
720
721
722
723
724
725
726
		hw_rate,
		info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
		cts_rate, duration);
	if (ret)
		goto err_unmap;

	memset(mrr_rate, 0, sizeof(mrr_rate));
	memset(mrr_tries, 0, sizeof(mrr_tries));
	for (i = 0; i < 3; i++) {
727
		rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
728
		if (!rate)
729
			break;
730

731
732
		mrr_rate[i] = rate->hw_value;
		mrr_tries[i] = info->control.rates[i + 1].count;
733
734
	}

735
736
737
738
	ath5k_hw_setup_mrr_tx_desc(ah, ds,
		mrr_rate[0], mrr_tries[0],
		mrr_rate[1], mrr_tries[1],
		mrr_rate[2], mrr_tries[2]);
739

740
741
	ds->ds_link = 0;
	ds->ds_data = bf->skbaddr;
Bruno Randolf's avatar
Bruno Randolf committed
742

743
744
	spin_lock_bh(&txq->lock);
	list_add_tail(&bf->list, &txq->q);
745
	txq->txq_len++;
746
747
748
749
	if (txq->link == NULL) /* is this first packet? */
		ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
	else /* no, so only link it */
		*txq->link = bf->daddr;
Bruno Randolf's avatar
Bruno Randolf committed
750

751
752
753
754
755
756
757
	txq->link = &ds->ds_link;
	ath5k_hw_start_tx_dma(ah, txq->qnum);
	mmiowb();
	spin_unlock_bh(&txq->lock);

	return 0;
err_unmap:
758
	dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
759
	return ret;
Bruno Randolf's avatar
Bruno Randolf committed
760
761
}

762
763
764
765
/*******************\
* Descriptors setup *
\*******************/

766
static int
767
ath5k_desc_alloc(struct ath5k_hw *ah)
768
{
769
770
771
772
773
	struct ath5k_desc *ds;
	struct ath5k_buf *bf;
	dma_addr_t da;
	unsigned int i;
	int ret;
774

775
	/* allocate descriptors */
776
	ah->desc_len = sizeof(struct ath5k_desc) *
777
			(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
778

779
780
781
782
	ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len,
				&ah->desc_daddr, GFP_KERNEL);
	if (ah->desc == NULL) {
		ATH5K_ERR(ah, "can't allocate descriptors\n");
783
784
785
		ret = -ENOMEM;
		goto err;
	}
786
787
788
789
	ds = ah->desc;
	da = ah->desc_daddr;
	ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
		ds, ah->desc_len, (unsigned long long)ah->desc_daddr);
790

791
792
793
	bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
			sizeof(struct ath5k_buf), GFP_KERNEL);
	if (bf == NULL) {
794
		ATH5K_ERR(ah, "can't allocate bufptr\n");
795
796
797
		ret = -ENOMEM;
		goto err_free;
	}
798
	ah->bufptr = bf;
799

800
	INIT_LIST_HEAD(&ah->rxbuf);
801
802
803
	for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
804
		list_add_tail(&bf->list, &ah->rxbuf);
805
	}
806

807
808
	INIT_LIST_HEAD(&ah->txbuf);
	ah->txbuf_len = ATH_TXBUF;
809
	for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
810
811
		bf->desc = ds;
		bf->daddr = da;
812
		list_add_tail(&bf->list, &ah->txbuf);
813
814
	}

815
	/* beacon buffers */
816
	INIT_LIST_HEAD(&ah->bcbuf);
817
818
819
	for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
		bf->desc = ds;
		bf->daddr = da;
820
		list_add_tail(&bf->list, &ah->bcbuf);
821
	}
822

823
824
	return 0;
err_free:
825
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
826
err:
827
	ah->desc = NULL;
828
829
	return ret;
}
830

831
void
832
ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
833
834
835
836
{
	BUG_ON(!bf);
	if (!bf->skb)
		return;
837
	dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
838
839
840
841
842
843
844
845
			DMA_TO_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

void
846
ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
847
848
849
850
851
852
{
	struct ath_common *common = ath5k_hw_common(ah);

	BUG_ON(!bf);
	if (!bf->skb)
		return;
853
	dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize,
854
855
856
857
858
859
860
			DMA_FROM_DEVICE);
	dev_kfree_skb_any(bf->skb);
	bf->skb = NULL;
	bf->skbaddr = 0;
	bf->desc->ds_data = 0;
}

861
static void
862
ath5k_desc_free(struct ath5k_hw *ah)
863
864
{
	struct ath5k_buf *bf;
865

866
867
868
869
870
871
	list_for_each_entry(bf, &ah->txbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->rxbuf, list)
		ath5k_rxbuf_free_skb(ah, bf);
	list_for_each_entry(bf, &ah->bcbuf, list)
		ath5k_txbuf_free_skb(ah, bf);
872

873
	/* Free memory associated with all descriptors */
874
875
876
	dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
	ah->desc = NULL;
	ah->desc_daddr = 0;
877

878
879
	kfree(ah->bufptr);
	ah->bufptr = NULL;
880
881
}

882
883
884
885
886
887

/**************\
* Queues setup *
\**************/

static struct ath5k_txq *
888
ath5k_txq_setup(struct ath5k_hw *ah,
889
		int qtype, int subtype)
890
{
891
892
893
	struct ath5k_txq *txq;
	struct ath5k_txq_info qi = {
		.tqi_subtype = subtype,
894
895
896
897
898
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX
899
900
	};
	int qnum;
901

902
	/*
903
904
905
906
907
908
909
910
911
912
	 * Enable interrupts only for EOL and DESC conditions.
	 * We mark tx descriptors to receive a DESC interrupt
	 * when a tx queue gets deep; otherwise we wait for the
	 * EOL to reap descriptors.  Note that this is done to
	 * reduce interrupt load and this only defers reaping
	 * descriptors, never transmitting frames.  Aside from
	 * reducing interrupts this also permits more concurrency.
	 * The only potential downside is if the tx queue backs
	 * up in which case the top half of the kernel may backup
	 * due to a lack of tx descriptors.
913
	 */
914
915
916
917
918
919
920
921
922
923
	qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
				AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
	qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
	if (qnum < 0) {
		/*
		 * NB: don't print a message, this happens
		 * normally on parts with too few tx queues
		 */
		return ERR_PTR(qnum);
	}
924
925
926
	if (qnum >= ARRAY_SIZE(ah->txqs)) {
		ATH5K_ERR(ah, "hw qnum %u out of range, max %tu!\n",
			qnum, ARRAY_SIZE(ah->txqs));
927
928
929
		ath5k_hw_release_tx_queue(ah, qnum);
		return ERR_PTR(-EINVAL);
	}
930
	txq = &ah->txqs[qnum];
931
932
933
934
935
936
	if (!txq->setup) {
		txq->qnum = qnum;
		txq->link = NULL;
		INIT_LIST_HEAD(&txq->q);
		spin_lock_init(&txq->lock);
		txq->setup = true;
937
		txq->txq_len = 0;
938
		txq->txq_max = ATH5K_TXQ_LEN_MAX;
939
		txq->txq_poll_mark = false;
940
		txq->txq_stuck = 0;
941
	}
942
	return &ah->txqs[qnum];
943
944
}

945
946
static int
ath5k_beaconq_setup(struct ath5k_hw *ah)
947
{
948
	struct ath5k_txq_info qi = {
949
950
951
952
953
		/* XXX: default values not correct for B and XR channels,
		 * but who cares? */
		.tqi_aifs = AR5K_TUNE_AIFS,
		.tqi_cw_min = AR5K_TUNE_CWMIN,
		.tqi_cw_max = AR5K_TUNE_CWMAX,
954
955
956
		/* NB: for dynamic turbo, don't enable any other interrupts */
		.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
	};
957

958
	return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
959
960
}

961
static int
962
ath5k_beaconq_config(struct ath5k_hw *ah)
963
{
964
965
	struct ath5k_txq_info qi;
	int ret;
966

967
	ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi);
968
969
	if (ret)
		goto err;
970

971
972
	if (ah->opmode == NL80211_IFTYPE_AP ||
	    ah->opmode == NL80211_IFTYPE_MESH_POINT) {
973
974
975
976
977
978
979
		/*
		 * Always burst out beacon and CAB traffic
		 * (aifs = cwmin = cwmax = 0)
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
		qi.tqi_cw_max = 0;
980
	} else if (ah->opmode == NL80211_IFTYPE_ADHOC) {
981
982
983
984
985
		/*
		 * Adhoc mode; backoff between 0 and (2 * cw_min).
		 */
		qi.tqi_aifs = 0;
		qi.tqi_cw_min = 0;
986
		qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
987
	}
988

989
	ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
990
991
		"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
		qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
992

993
	ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi);
994
	if (ret) {
995
		ATH5K_ERR(ah, "%s: unable to update parameters for beacon "
996
997
998
			"hardware queue!\n", __func__);
		goto err;
	}
999
	ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */
1000
1001
	if (ret)
		goto err;
1002

1003
1004
1005
1006
	/* reconfigure cabq with ready time to 80% of beacon_interval */
	ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
	if (ret)