iwl-agn-lib.c 69.6 KB
Newer Older
1
2
3
4
/******************************************************************************
 *
 * GPL LICENSE SUMMARY
 *
Wey-Yi Guy's avatar
Wey-Yi Guy committed
5
 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
 * in the file called LICENSE.GPL.
 *
 * Contact Information:
 *  Intel Linux Wireless <ilw@linux.intel.com>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/
29
#include <linux/etherdevice.h>
30
31
32
33
34
35
36
37
38
39
40
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>

#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
41
#include "iwl-sta.h"
42
#include "iwl-trans.h"
43

Wey-Yi Guy's avatar
Wey-Yi Guy committed
44
static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
45
46
47
48
49
{
	return le32_to_cpup((__le32 *)&tx_resp->status +
			    tx_resp->frame_count) & MAX_SN;
}

50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
{
	status &= TX_STATUS_MSK;

	switch (status) {
	case TX_STATUS_POSTPONE_DELAY:
		priv->_agn.reply_tx_stats.pp_delay++;
		break;
	case TX_STATUS_POSTPONE_FEW_BYTES:
		priv->_agn.reply_tx_stats.pp_few_bytes++;
		break;
	case TX_STATUS_POSTPONE_BT_PRIO:
		priv->_agn.reply_tx_stats.pp_bt_prio++;
		break;
	case TX_STATUS_POSTPONE_QUIET_PERIOD:
		priv->_agn.reply_tx_stats.pp_quiet_period++;
		break;
	case TX_STATUS_POSTPONE_CALC_TTAK:
		priv->_agn.reply_tx_stats.pp_calc_ttak++;
		break;
	case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
		priv->_agn.reply_tx_stats.int_crossed_retry++;
		break;
	case TX_STATUS_FAIL_SHORT_LIMIT:
		priv->_agn.reply_tx_stats.short_limit++;
		break;
	case TX_STATUS_FAIL_LONG_LIMIT:
		priv->_agn.reply_tx_stats.long_limit++;
		break;
	case TX_STATUS_FAIL_FIFO_UNDERRUN:
		priv->_agn.reply_tx_stats.fifo_underrun++;
		break;
	case TX_STATUS_FAIL_DRAIN_FLOW:
		priv->_agn.reply_tx_stats.drain_flow++;
		break;
	case TX_STATUS_FAIL_RFKILL_FLUSH:
		priv->_agn.reply_tx_stats.rfkill_flush++;
		break;
	case TX_STATUS_FAIL_LIFE_EXPIRE:
		priv->_agn.reply_tx_stats.life_expire++;
		break;
	case TX_STATUS_FAIL_DEST_PS:
		priv->_agn.reply_tx_stats.dest_ps++;
		break;
	case TX_STATUS_FAIL_HOST_ABORTED:
		priv->_agn.reply_tx_stats.host_abort++;
		break;
	case TX_STATUS_FAIL_BT_RETRY:
		priv->_agn.reply_tx_stats.bt_retry++;
		break;
	case TX_STATUS_FAIL_STA_INVALID:
		priv->_agn.reply_tx_stats.sta_invalid++;
		break;
	case TX_STATUS_FAIL_FRAG_DROPPED:
		priv->_agn.reply_tx_stats.frag_drop++;
		break;
	case TX_STATUS_FAIL_TID_DISABLE:
		priv->_agn.reply_tx_stats.tid_disable++;
		break;
	case TX_STATUS_FAIL_FIFO_FLUSHED:
		priv->_agn.reply_tx_stats.fifo_flush++;
		break;
	case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
		priv->_agn.reply_tx_stats.insuff_cf_poll++;
		break;
115
	case TX_STATUS_FAIL_PASSIVE_NO_RX:
116
117
		priv->_agn.reply_tx_stats.fail_hw_drop++;
		break;
118
	case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
119
120
121
122
123
124
125
126
		priv->_agn.reply_tx_stats.sta_color_mismatch++;
		break;
	default:
		priv->_agn.reply_tx_stats.unknown++;
		break;
	}
}

127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
{
	status &= AGG_TX_STATUS_MSK;

	switch (status) {
	case AGG_TX_STATE_UNDERRUN_MSK:
		priv->_agn.reply_agg_tx_stats.underrun++;
		break;
	case AGG_TX_STATE_BT_PRIO_MSK:
		priv->_agn.reply_agg_tx_stats.bt_prio++;
		break;
	case AGG_TX_STATE_FEW_BYTES_MSK:
		priv->_agn.reply_agg_tx_stats.few_bytes++;
		break;
	case AGG_TX_STATE_ABORT_MSK:
		priv->_agn.reply_agg_tx_stats.abort++;
		break;
	case AGG_TX_STATE_LAST_SENT_TTL_MSK:
		priv->_agn.reply_agg_tx_stats.last_sent_ttl++;
		break;
	case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
		priv->_agn.reply_agg_tx_stats.last_sent_try++;
		break;
	case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
		priv->_agn.reply_agg_tx_stats.last_sent_bt_kill++;
		break;
	case AGG_TX_STATE_SCD_QUERY_MSK:
		priv->_agn.reply_agg_tx_stats.scd_query++;
		break;
	case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
		priv->_agn.reply_agg_tx_stats.bad_crc32++;
		break;
	case AGG_TX_STATE_RESPONSE_MSK:
		priv->_agn.reply_agg_tx_stats.response++;
		break;
	case AGG_TX_STATE_DUMP_TX_MSK:
		priv->_agn.reply_agg_tx_stats.dump_tx++;
		break;
	case AGG_TX_STATE_DELAY_TX_MSK:
		priv->_agn.reply_agg_tx_stats.delay_tx++;
		break;
	default:
		priv->_agn.reply_agg_tx_stats.unknown++;
		break;
	}
}

Wey-Yi Guy's avatar
Wey-Yi Guy committed
174
175
static void iwlagn_set_tx_status(struct iwl_priv *priv,
				 struct ieee80211_tx_info *info,
176
				 struct iwl_rxon_context *ctx,
Wey-Yi Guy's avatar
Wey-Yi Guy committed
177
				 struct iwlagn_tx_resp *tx_resp,
Wey-Yi Guy's avatar
Wey-Yi Guy committed
178
179
180
181
182
183
184
185
186
187
				 int txq_id, bool is_agg)
{
	u16  status = le16_to_cpu(tx_resp->status.status);

	info->status.rates[0].count = tx_resp->failure_frame + 1;
	if (is_agg)
		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
	info->flags |= iwl_tx_status_to_mac80211(status);
	iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
				    info);
188
189
	if (!iwl_is_tx_success(status))
		iwlagn_count_tx_err_status(priv, status);
Wey-Yi Guy's avatar
Wey-Yi Guy committed
190

191
192
193
194
195
196
197
	if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
	    iwl_is_associated_ctx(ctx) && ctx->vif &&
	    ctx->vif->type == NL80211_IFTYPE_STATION) {
		ctx->last_tx_rejected = true;
		iwl_stop_queue(priv, &priv->txq[txq_id]);
	}

Wey-Yi Guy's avatar
Wey-Yi Guy committed
198
199
200
201
202
203
204
205
	IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
			   "0x%x retries %d\n",
			   txq_id,
			   iwl_get_tx_fail_reason(status), status,
			   le32_to_cpu(tx_resp->rate_n_flags),
			   tx_resp->failure_frame);
}

206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
#ifdef CONFIG_IWLWIFI_DEBUG
#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x

const char *iwl_get_agg_tx_fail_reason(u16 status)
{
	status &= AGG_TX_STATUS_MSK;
	switch (status) {
	case AGG_TX_STATE_TRANSMITTED:
		return "SUCCESS";
		AGG_TX_STATE_FAIL(UNDERRUN_MSK);
		AGG_TX_STATE_FAIL(BT_PRIO_MSK);
		AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
		AGG_TX_STATE_FAIL(ABORT_MSK);
		AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
		AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
		AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
		AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
		AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
		AGG_TX_STATE_FAIL(RESPONSE_MSK);
		AGG_TX_STATE_FAIL(DUMP_TX_MSK);
		AGG_TX_STATE_FAIL(DELAY_TX_MSK);
	}

	return "UNKNOWN";
}
#endif /* CONFIG_IWLWIFI_DEBUG */

233
234
static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
				      struct iwl_ht_agg *agg,
Wey-Yi Guy's avatar
Wey-Yi Guy committed
235
				      struct iwlagn_tx_resp *tx_resp,
236
237
238
239
240
241
242
243
244
245
246
247
248
				      int txq_id, u16 start_idx)
{
	u16 status;
	struct agg_tx_status *frame_status = &tx_resp->status;
	struct ieee80211_hdr *hdr = NULL;
	int i, sh, idx;
	u16 seq;

	if (agg->wait_for_ba)
		IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");

	agg->frame_count = tx_resp->frame_count;
	agg->start_idx = start_idx;
Wey-Yi Guy's avatar
Wey-Yi Guy committed
249
	agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
250
251
252
253
	agg->bitmap = 0;

	/* # frames attempted by Tx command */
	if (agg->frame_count == 1) {
254
255
		struct iwl_tx_info *txb;

256
257
258
259
260
		/* Only one frame was attempted; no block-ack will arrive */
		idx = start_idx;

		IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
				   agg->frame_count, agg->start_idx, idx);
261
262
263
		txb = &priv->txq[txq_id].txb[idx];
		iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(txb->skb),
				     txb->ctx, tx_resp, txq_id, true);
264
265
266
267
		agg->wait_for_ba = 0;
	} else {
		/* Two or more frames were attempted; expect block-ack */
		u64 bitmap = 0;
268
269
270
271
272
273

		/*
		 * Start is the lowest frame sent. It may not be the first
		 * frame in the batch; we figure this out dynamically during
		 * the following loop.
		 */
274
275
276
277
278
279
280
281
282
283
		int start = agg->start_idx;

		/* Construct bit-map of pending frames within Tx window */
		for (i = 0; i < agg->frame_count; i++) {
			u16 sc;
			status = le16_to_cpu(frame_status[i].status);
			seq  = le16_to_cpu(frame_status[i].sequence);
			idx = SEQ_TO_INDEX(seq);
			txq_id = SEQ_TO_QUEUE(seq);

284
285
286
			if (status & AGG_TX_STATUS_MSK)
				iwlagn_count_agg_tx_err_status(priv, status);

287
288
289
290
291
292
			if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
				      AGG_TX_STATE_ABORT_MSK))
				continue;

			IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
					   agg->frame_count, txq_id, idx);
293
294
295
296
297
			IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
					   "try-count (0x%08x)\n",
					   iwl_get_agg_tx_fail_reason(status),
					   status & AGG_TX_STATUS_MSK,
					   status & AGG_TX_TRY_MSK);
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319

			hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
			if (!hdr) {
				IWL_ERR(priv,
					"BUG_ON idx doesn't point to valid skb"
					" idx=%d, txq_id=%d\n", idx, txq_id);
				return -1;
			}

			sc = le16_to_cpu(hdr->seq_ctrl);
			if (idx != (SEQ_TO_SN(sc) & 0xff)) {
				IWL_ERR(priv,
					"BUG_ON idx doesn't match seq control"
					" idx=%d, seq_idx=%d, seq=%d\n",
					  idx, SEQ_TO_SN(sc),
					  hdr->seq_ctrl);
				return -1;
			}

			IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
					   i, idx, SEQ_TO_SN(sc));

320
321
322
323
324
325
326
327
328
			/*
			 * sh -> how many frames ahead of the starting frame is
			 * the current one?
			 *
			 * Note that all frames sent in the batch must be in a
			 * 64-frame window, so this number should be in [0,63].
			 * If outside of this window, then we've found a new
			 * "first" frame in the batch and need to change start.
			 */
329
			sh = idx - start;
330
331
332
333
334
335
336
337
338
339

			/*
			 * If >= 64, out of window. start must be at the front
			 * of the circular buffer, idx must be near the end of
			 * the buffer, and idx is the new "first" frame. Shift
			 * the indices around.
			 */
			if (sh >= 64) {
				/* Shift bitmap by start - idx, wrapped */
				sh = 0x100 - idx + start;
340
				bitmap = bitmap << sh;
341
				/* Now idx is the new start so sh = 0 */
342
343
				sh = 0;
				start = idx;
344
345
346
347
348
349
350
351
352
353
354
355
			/*
			 * If <= -64 then wraps the 256-pkt circular buffer
			 * (e.g., start = 255 and idx = 0, sh should be 1)
			 */
			} else if (sh <= -64) {
				sh  = 0x100 - start + idx;
			/*
			 * If < 0 but > -64, out of window. idx is before start
			 * but not wrapped. Shift the indices around.
			 */
			} else if (sh < 0) {
				/* Shift by how far start is ahead of idx */
356
357
				sh = start - idx;
				bitmap = bitmap << sh;
358
359
				/* Now idx is the new start so sh = 0 */
				start = idx;
360
361
				sh = 0;
			}
362
			/* Sequence number start + sh was sent in this batch */
363
364
365
366
367
			bitmap |= 1ULL << sh;
			IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
					   start, (unsigned long long)bitmap);
		}

368
369
370
371
		/*
		 * Store the bitmap and possibly the new start, if we wrapped
		 * the buffer above
		 */
372
373
374
375
376
377
378
379
380
381
382
383
		agg->bitmap = bitmap;
		agg->start_idx = start;
		IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
				   agg->frame_count, agg->start_idx,
				   (unsigned long long)agg->bitmap);

		if (bitmap)
			agg->wait_for_ba = 1;
	}
	return 0;
}

384
385
386
387
void iwl_check_abort_status(struct iwl_priv *priv,
			    u8 frame_count, u32 status)
{
	if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
388
389
390
		IWL_ERR(priv, "Tx flush command to flush out all frames\n");
		if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
			queue_work(priv->workqueue, &priv->tx_flush);
391
392
393
	}
}

394
395
396
397
398
399
400
401
402
static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
				struct iwl_rx_mem_buffer *rxb)
{
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
	int txq_id = SEQ_TO_QUEUE(sequence);
	int index = SEQ_TO_INDEX(sequence);
	struct iwl_tx_queue *txq = &priv->txq[txq_id];
	struct ieee80211_tx_info *info;
Wey-Yi Guy's avatar
Wey-Yi Guy committed
403
	struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
404
405
	struct iwl_tx_info *txb;
	u32 status = le16_to_cpu(tx_resp->status.status);
406
407
408
	int tid;
	int sta_id;
	int freed;
409
	unsigned long flags;
410
411

	if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
412
413
414
		IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
			  "index %d is out of range [0-%d] %d %d\n", __func__,
			  txq_id, index, txq->q.n_bd, txq->q.write_ptr,
415
416
417
418
			  txq->q.read_ptr);
		return;
	}

419
	txq->time_stamp = jiffies;
420
421
	txb = &txq->txb[txq->q.read_ptr];
	info = IEEE80211_SKB_CB(txb->skb);
422
423
	memset(&info->status, 0, sizeof(info->status));

Wey-Yi Guy's avatar
Wey-Yi Guy committed
424
425
426
427
	tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
		IWLAGN_TX_RES_TID_POS;
	sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
		IWLAGN_TX_RES_RA_POS;
428

429
	spin_lock_irqsave(&priv->sta_lock, flags);
430
431
	if (txq->sched_retry) {
		const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
432
		struct iwl_ht_agg *agg;
433
434

		agg = &priv->stations[sta_id].tid[tid].agg;
435
436
437
438
439
		/*
		 * If the BT kill count is non-zero, we'll get this
		 * notification again.
		 */
		if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
440
441
		    priv->cfg->bt_params &&
		    priv->cfg->bt_params->advanced_bt_coexist) {
442
			IWL_DEBUG_COEX(priv, "receive reply tx with bt_kill\n");
443
		}
444
445
446
447
448
449
450
451
452
453
454
455
		iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);

		/* check if BAR is needed */
		if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
			info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;

		if (txq->q.read_ptr != (scd_ssn & 0xff)) {
			index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
			IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
					"scd_ssn=%d idx=%d txq=%d swq=%d\n",
					scd_ssn , index, txq_id, txq->swq_id);

456
			freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
457
458
459
460
			iwl_free_tfds_in_queue(priv, sta_id, tid, freed);

			if (priv->mac80211_registered &&
			    (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
461
			    (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
462
				iwl_wake_queue(priv, txq);
463
464
		}
	} else {
465
466
		iwlagn_set_tx_status(priv, info, txb->ctx, tx_resp,
				     txq_id, false);
467
		freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
468
469
470
		iwl_free_tfds_in_queue(priv, sta_id, tid, freed);

		if (priv->mac80211_registered &&
471
472
		    iwl_queue_space(&txq->q) > txq->q.low_mark &&
		    status != TX_STATUS_FAIL_PASSIVE_NO_RX)
473
			iwl_wake_queue(priv, txq);
474
475
	}

476
	iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
477

478
	iwl_check_abort_status(priv, tx_resp->frame_count, status);
479
	spin_unlock_irqrestore(&priv->sta_lock, flags);
480
481
482
483
484
485
486
487
}

void iwlagn_rx_handler_setup(struct iwl_priv *priv)
{
	/* init calibration handlers */
	priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
					iwlagn_rx_calib_result;
	priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
488
489
490
491
492

	/* set up notification wait support */
	spin_lock_init(&priv->_agn.notif_wait_lock);
	INIT_LIST_HEAD(&priv->_agn.notif_waits);
	init_waitqueue_head(&priv->_agn.notif_waitq);
493
494
495
496
}

void iwlagn_setup_deferred_work(struct iwl_priv *priv)
{
497
498
499
500
	/*
	 * nothing need to be done here anymore
	 * still keep for future use if needed
	 */
501
502
503
504
505
506
507
508
509
510
}

int iwlagn_hw_valid_rtc_data_addr(u32 addr)
{
	return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
		(addr < IWLAGN_RTC_DATA_UPPER_BOUND);
}

int iwlagn_send_tx_power(struct iwl_priv *priv)
{
511
	struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
512
513
	u8 tx_ant_cfg_cmd;

514
515
516
517
	if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
		      "TX Power requested while scanning!\n"))
		return -EAGAIN;

518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
	/* half dBm need to multiply */
	tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);

	if (priv->tx_power_lmt_in_half_dbm &&
	    priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
		/*
		 * For the newer devices which using enhanced/extend tx power
		 * table in EEPROM, the format is in half dBm. driver need to
		 * convert to dBm format before report to mac80211.
		 * By doing so, there is a possibility of 1/2 dBm resolution
		 * lost. driver will perform "round-up" operation before
		 * reporting, but it will cause 1/2 dBm tx power over the
		 * regulatory limit. Perform the checking here, if the
		 * "tx_power_user_lmt" is higher than EEPROM value (in
		 * half-dBm format), lower the tx power based on EEPROM
		 */
		tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
	}
536
537
	tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
	tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
538
539
540
541
542
543

	if (IWL_UCODE_API(priv->ucode_ver) == 1)
		tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
	else
		tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;

544
	return trans_send_cmd_pdu(priv, tx_ant_cfg_cmd, CMD_SYNC,
545
			sizeof(tx_power_cmd), &tx_power_cmd);
546
547
548
549
}

void iwlagn_temperature(struct iwl_priv *priv)
{
550
	/* store temperature from correct statistics (in Celsius) */
551
	priv->temperature = le32_to_cpu(priv->statistics.common.temperature);
552
553
554
555
556
557
558
559
560
561
562
563
	iwl_tt_handler(priv);
}

u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
{
	struct iwl_eeprom_calib_hdr {
		u8 version;
		u8 pa_type;
		u16 voltage;
	} *hdr;

	hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
564
							EEPROM_CALIB_ALL);
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
	return hdr->version;

}

/*
 * EEPROM
 */
static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
{
	u16 offset = 0;

	if ((address & INDIRECT_ADDRESS) == 0)
		return address;

	switch (address & INDIRECT_TYPE_MSK) {
	case INDIRECT_HOST:
581
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
582
583
		break;
	case INDIRECT_GENERAL:
584
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
585
586
		break;
	case INDIRECT_REGULATORY:
587
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
588
		break;
589
590
591
592
593
594
	case INDIRECT_TXP_LIMIT:
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
		break;
	case INDIRECT_TXP_LIMIT_SIZE:
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
		break;
595
	case INDIRECT_CALIBRATION:
596
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
597
598
		break;
	case INDIRECT_PROCESS_ADJST:
599
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
600
601
		break;
	case INDIRECT_OTHERS:
602
		offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
603
604
605
606
607
608
609
610
611
612
613
		break;
	default:
		IWL_ERR(priv, "illegal indirect type: 0x%X\n",
		address & INDIRECT_TYPE_MSK);
		break;
	}

	/* translate the offset from words to byte */
	return (address & ADDRESS_MSK) + (offset << 1);
}

614
const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
615
616
{
	u32 address = eeprom_indirect_address(priv, offset);
617
	BUG_ON(address >= priv->cfg->base_params->eeprom_size);
618
619
	return &priv->eeprom[address];
}
620
621
622
623

struct iwl_mod_params iwlagn_mod_params = {
	.amsdu_size_8K = 1,
	.restart_fw = 1,
624
	.plcp_check = true,
625
	.bt_coex_active = true,
626
	.no_sleep_autoadjust = true,
627
	.power_level = IWL_POWER_INDEX_1,
628
629
	/* the rest are 0 by default */
};
630
631
632
633
634
635
636

int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
	u32 rb_size;
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
	u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */

637
	rb_timeout = RX_RB_TIMEOUT;
638

Don Fry's avatar
Don Fry committed
639
	if (iwlagn_mod_params.amsdu_size_8K)
640
641
642
643
644
645
646
647
648
649
650
651
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
	else
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;

	/* Stop Rx DMA */
	iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);

	/* Reset driver's Rx queue write index */
	iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);

	/* Tell device where to find RBD circular buffer in DRAM */
	iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
652
			   (u32)(rxq->bd_dma >> 8));
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680

	/* Tell device where in DRAM to update its Rx status */
	iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
			   rxq->rb_stts_dma >> 4);

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
	 * Rx buffer size 4 or 8k
	 * RB timeout 0x10
	 * 256 RBDs
	 */
	iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
			   FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
			   rb_size|
			   (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	/* Set interrupt coalescing timer to default (2048 usecs) */
	iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);

	return 0;
}

681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
static void iwlagn_set_pwr_vmain(struct iwl_priv *priv)
{
/*
 * (for documentation purposes)
 * to set power to V_AUX, do:

		if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
			iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
					       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
					       ~APMG_PS_CTRL_MSK_PWR_SRC);
 */

	iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
			       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
			       ~APMG_PS_CTRL_MSK_PWR_SRC);
}

698
699
700
701
702
703
704
int iwlagn_hw_nic_init(struct iwl_priv *priv)
{
	unsigned long flags;
	struct iwl_rx_queue *rxq = &priv->rxq;

	/* nic_init */
	spin_lock_irqsave(&priv->lock, flags);
705
	iwl_apm_init(priv);
706
707
708
709
710
711

	/* Set interrupt coalescing calibration timer to default (512 usecs) */
	iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);

	spin_unlock_irqrestore(&priv->lock, flags);

712
	iwlagn_set_pwr_vmain(priv);
713

714
	priv->cfg->lib->nic_config(priv);
715
716

	/* Allocate the RX queue, or reset if it is already allocated */
717
	trans_rx_init(priv);
718

719
	iwlagn_rx_replenish(priv);
720
721
722
723
724
725
726
727
728
729

	iwlagn_rx_init(priv, rxq);

	spin_lock_irqsave(&priv->lock, flags);

	rxq->need_update = 1;
	iwl_rx_queue_update_write_ptr(priv, rxq);

	spin_unlock_irqrestore(&priv->lock, flags);

730
	/* Allocate or reset and init all Tx and Command queues */
731
	if (trans_tx_init(priv))
732
		return -ENOMEM;
733

Wey-Yi Guy's avatar
Wey-Yi Guy committed
734
735
736
737
738
739
	if (priv->cfg->base_params->shadow_reg_enable) {
		/* enable shadow regs in HW */
		iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
			0x800FFFFF);
	}

740
741
742
743
	set_bit(STATUS_INIT, &priv->status);

	return 0;
}
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773

/**
 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
 */
static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
					  dma_addr_t dma_addr)
{
	return cpu_to_le32((u32)(dma_addr >> 8));
}

/**
 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
 *
 * If there are slots in the RX queue that need to be restocked,
 * and we have free pre-allocated buffers, fill the ranks as much
 * as we can, pulling from rx_free.
 *
 * This moves the 'write' index forward to catch up with 'processed', and
 * also updates the memory address in the firmware to reference the new
 * target buffer.
 */
void iwlagn_rx_queue_restock(struct iwl_priv *priv)
{
	struct iwl_rx_queue *rxq = &priv->rxq;
	struct list_head *element;
	struct iwl_rx_mem_buffer *rxb;
	unsigned long flags;

	spin_lock_irqsave(&rxq->lock, flags);
	while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
774
775
776
777
		/* The overwritten rxb must be a used one */
		rxb = rxq->queue[rxq->write];
		BUG_ON(rxb && rxb->page);

778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
		/* Get next free Rx buffer, remove from free list */
		element = rxq->rx_free.next;
		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
		list_del(element);

		/* Point to Rx buffer via next RBD in circular buffer */
		rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
							      rxb->page_dma);
		rxq->queue[rxq->write] = rxb;
		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
		rxq->free_count--;
	}
	spin_unlock_irqrestore(&rxq->lock, flags);
	/* If the pre-allocated buffer pool is dropping low, schedule to
	 * refill it */
	if (rxq->free_count <= RX_LOW_WATERMARK)
		queue_work(priv->workqueue, &priv->rx_replenish);


	/* If we've added more space for the firmware to place data, tell it.
	 * Increment device's write pointer in multiples of 8. */
	if (rxq->write_actual != (rxq->write & ~0x7)) {
		spin_lock_irqsave(&rxq->lock, flags);
		rxq->need_update = 1;
		spin_unlock_irqrestore(&rxq->lock, flags);
		iwl_rx_queue_update_write_ptr(priv, rxq);
	}
}

/**
 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
 *
 * When moving to rx_free an SKB is allocated for the slot.
 *
 * Also restock the Rx queue via iwl_rx_queue_restock.
 * This is called as a scheduled work item (except for during initialization)
 */
void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
{
	struct iwl_rx_queue *rxq = &priv->rxq;
	struct list_head *element;
	struct iwl_rx_mem_buffer *rxb;
	struct page *page;
	unsigned long flags;
	gfp_t gfp_mask = priority;

	while (1) {
		spin_lock_irqsave(&rxq->lock, flags);
		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			return;
		}
		spin_unlock_irqrestore(&rxq->lock, flags);

		if (rxq->free_count > RX_LOW_WATERMARK)
			gfp_mask |= __GFP_NOWARN;

		if (priv->hw_params.rx_page_order > 0)
			gfp_mask |= __GFP_COMP;

		/* Alloc a new receive buffer */
		page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
		if (!page) {
			if (net_ratelimit())
				IWL_DEBUG_INFO(priv, "alloc_pages failed, "
					       "order: %d\n",
					       priv->hw_params.rx_page_order);

			if ((rxq->free_count <= RX_LOW_WATERMARK) &&
			    net_ratelimit())
				IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
					 priority == GFP_ATOMIC ?  "GFP_ATOMIC" : "GFP_KERNEL",
					 rxq->free_count);
			/* We don't reschedule replenish work here -- we will
			 * call the restock method and if it still needs
			 * more buffers it will schedule replenish */
			return;
		}

		spin_lock_irqsave(&rxq->lock, flags);

		if (list_empty(&rxq->rx_used)) {
			spin_unlock_irqrestore(&rxq->lock, flags);
			__free_pages(page, priv->hw_params.rx_page_order);
			return;
		}
		element = rxq->rx_used.next;
		rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
		list_del(element);

		spin_unlock_irqrestore(&rxq->lock, flags);

870
		BUG_ON(rxb->page);
871
872
		rxb->page = page;
		/* Get physical address of the RB */
873
		rxb->page_dma = dma_map_page(priv->bus.dev, page, 0,
874
				PAGE_SIZE << priv->hw_params.rx_page_order,
875
				DMA_FROM_DEVICE);
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
		/* dma address must be no more than 36 bits */
		BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
		/* and also 256 byte aligned! */
		BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));

		spin_lock_irqsave(&rxq->lock, flags);

		list_add_tail(&rxb->list, &rxq->rx_free);
		rxq->free_count++;

		spin_unlock_irqrestore(&rxq->lock, flags);
	}
}

void iwlagn_rx_replenish(struct iwl_priv *priv)
{
	unsigned long flags;

	iwlagn_rx_allocate(priv, GFP_KERNEL);

	spin_lock_irqsave(&priv->lock, flags);
	iwlagn_rx_queue_restock(priv);
	spin_unlock_irqrestore(&priv->lock, flags);
}

void iwlagn_rx_replenish_now(struct iwl_priv *priv)
{
	iwlagn_rx_allocate(priv, GFP_ATOMIC);

	iwlagn_rx_queue_restock(priv);
}

908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
{
	int idx = 0;
	int band_offset = 0;

	/* HT rate format: mac80211 wants an MCS number, which is just LSB */
	if (rate_n_flags & RATE_MCS_HT_MSK) {
		idx = (rate_n_flags & 0xff);
		return idx;
	/* Legacy rate format, search for match in table */
	} else {
		if (band == IEEE80211_BAND_5GHZ)
			band_offset = IWL_FIRST_OFDM_RATE;
		for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
			if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
				return idx - band_offset;
	}

	return -1;
}

929
static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
930
931
932
					   struct ieee80211_vif *vif,
					   enum ieee80211_band band,
					   struct iwl_scan_channel *scan_ch)
933
934
935
936
{
	const struct ieee80211_supported_band *sband;
	u16 passive_dwell = 0;
	u16 active_dwell = 0;
Abhijeet Kolekar's avatar
Abhijeet Kolekar committed
937
	int added = 0;
938
939
940
941
942
943
944
945
946
	u16 channel = 0;

	sband = iwl_get_hw_mode(priv, band);
	if (!sband) {
		IWL_ERR(priv, "invalid band\n");
		return added;
	}

	active_dwell = iwl_get_active_dwell_time(priv, band, 0);
947
	passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
948
949
950
951

	if (passive_dwell <= active_dwell)
		passive_dwell = active_dwell + 1;

Abhijeet Kolekar's avatar
Abhijeet Kolekar committed
952
	channel = iwl_get_single_channel_number(priv, band);
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
	if (channel) {
		scan_ch->channel = cpu_to_le16(channel);
		scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
		scan_ch->active_dwell = cpu_to_le16(active_dwell);
		scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
		/* Set txpower levels to defaults */
		scan_ch->dsp_atten = 110;
		if (band == IEEE80211_BAND_5GHZ)
			scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
		else
			scan_ch->tx_gain = ((1 << 5) | (5 << 3));
		added++;
	} else
		IWL_ERR(priv, "no valid channel found\n");
	return added;
}

static int iwl_get_channels_for_scan(struct iwl_priv *priv,
971
				     struct ieee80211_vif *vif,
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
				     enum ieee80211_band band,
				     u8 is_active, u8 n_probes,
				     struct iwl_scan_channel *scan_ch)
{
	struct ieee80211_channel *chan;
	const struct ieee80211_supported_band *sband;
	const struct iwl_channel_info *ch_info;
	u16 passive_dwell = 0;
	u16 active_dwell = 0;
	int added, i;
	u16 channel;

	sband = iwl_get_hw_mode(priv, band);
	if (!sband)
		return 0;

	active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
989
	passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
990
991
992
993
994
995
996
997
998
999

	if (passive_dwell <= active_dwell)
		passive_dwell = active_dwell + 1;

	for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
		chan = priv->scan_request->channels[i];

		if (chan->band != band)
			continue;

1000
		channel = chan->hw_value;