iwl-agn.c 88.4 KB
Newer Older
1
2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>

#include <net/mac80211.h>

#include <asm/div64.h>

47
48
#define DRV_NAME        "iwlagn"

49
#include "iwl-eeprom.h"
50
#include "iwl-dev.h"
51
#include "iwl-core.h"
52
#include "iwl-io.h"
53
#include "iwl-helpers.h"
54
#include "iwl-sta.h"
55
#include "iwl-calib.h"
56

57

58
59
60
61
62
63
64
65
66
/******************************************************************************
 *
 * module boiler plate
 *
 ******************************************************************************/

/*
 * module name, copyright, version, etc.
 */
67
#define DRV_DESCRIPTION	"Intel(R) Wireless WiFi Link AGN driver for Linux"
68

69
#ifdef CONFIG_IWLWIFI_DEBUG
70
71
72
73
74
#define VD "d"
#else
#define VD
#endif

75
#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
76
77
78
79
80
#define VS "s"
#else
#define VS
#endif

Tomas Winkler's avatar
Tomas Winkler committed
81
#define DRV_VERSION     IWLWIFI_VERSION VD VS
82
83
84
85


MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
86
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
87
MODULE_LICENSE("GPL");
Tomas Winkler's avatar
Tomas Winkler committed
88
MODULE_ALIAS("iwl4965");
89
90

/*************** STATION TABLE MANAGEMENT ****
91
 * mac80211 should be examined to determine if sta_info is duplicating
92
93
94
95
96
97
 * the functionality provided here
 */

/**************************************************************/

/**
98
 * iwl_commit_rxon - commit staging_rxon to hardware
99
 *
100
 * The RXON command in staging_rxon is committed to the hardware and
101
102
103
104
 * the active_rxon structure is updated with the new data.  This
 * function correctly transitions out of the RXON_ASSOC_MSK state if
 * a HW tune is required based on the RXON structure changes.
 */
105
int iwl_commit_rxon(struct iwl_priv *priv)
106
107
{
	/* cast away the const for active_rxon in this function */
Gregory Greenman's avatar
Gregory Greenman committed
108
	struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
109
110
111
	int ret;
	bool new_assoc =
		!!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK);
112

113
	if (!iwl_is_alive(priv))
114
		return -EBUSY;
115
116
117

	/* always get timestamp with Rx frame */
	priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
118
119
120
	/* allow CTS-to-self if possible. this is relevant only for
	 * 5000, but will not damage 4965 */
	priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
121

122
	ret = iwl_check_rxon_cmd(priv);
123
	if (ret) {
124
		IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
125
126
127
128
		return -EINVAL;
	}

	/* If we don't need to send a full RXON, we can use
129
	 * iwl_rxon_assoc_cmd which is used to reconfigure filter
130
	 * and other flags for the current radio configuration. */
131
	if (!iwl_full_rxon_required(priv)) {
132
133
		ret = iwl_send_rxon_assoc(priv);
		if (ret) {
134
			IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
135
			return ret;
136
137
138
139
140
141
142
143
144
145
146
147
148
		}

		memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
		return 0;
	}

	/* station table will be cleared */
	priv->assoc_station_added = 0;

	/* If we are currently associated and the new config requires
	 * an RXON_ASSOC and the new config wants the associated mask enabled,
	 * we must clear the associated from the active configuration
	 * before we apply the new config */
149
	if (iwl_is_associated(priv) && new_assoc) {
150
		IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
151
152
		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;

153
		ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
Gregory Greenman's avatar
Gregory Greenman committed
154
				      sizeof(struct iwl_rxon_cmd),
155
156
157
158
				      &priv->active_rxon);

		/* If the mask clearing failed then we set
		 * active_rxon back to what it was previously */
159
		if (ret) {
160
			active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
161
			IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
162
			return ret;
163
164
165
		}
	}

166
	IWL_DEBUG_INFO(priv, "Sending RXON\n"
167
168
		       "* with%s RXON_FILTER_ASSOC_MSK\n"
		       "* channel = %d\n"
Johannes Berg's avatar
Johannes Berg committed
169
		       "* bssid = %pM\n",
170
		       (new_assoc ? "" : "out"),
171
		       le16_to_cpu(priv->staging_rxon.channel),
Johannes Berg's avatar
Johannes Berg committed
172
		       priv->staging_rxon.bssid_addr);
173

174
	iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto);
175
176
177
178
179
180
181
182

	/* Apply the new configuration
	 * RXON unassoc clears the station table in uCode, send it before
	 * we add the bcast station. If assoc bit is set, we will send RXON
	 * after having added the bcast and bssid station.
	 */
	if (!new_assoc) {
		ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
Gregory Greenman's avatar
Gregory Greenman committed
183
			      sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
184
		if (ret) {
185
			IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
186
187
188
			return ret;
		}
		memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
189
190
	}

191
	iwl_clear_stations_table(priv);
192

Johannes Berg's avatar
Johannes Berg committed
193
	priv->start_calib = 0;
194
195

	/* Add the broadcast address so we can send broadcast frames */
196
	if (iwl_rxon_add_station(priv, iwl_bcast_addr, 0) ==
197
						IWL_INVALID_STATION) {
198
		IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
199
200
201
202
203
		return -EIO;
	}

	/* If we have set the ASSOC_MSK and we are in BSS mode then
	 * add the IWL_AP_ID to the station rate table */
204
	if (new_assoc) {
205
		if (priv->iw_mode == NL80211_IFTYPE_STATION) {
206
207
208
			ret = iwl_rxon_add_station(priv,
					   priv->active_rxon.bssid_addr, 1);
			if (ret == IWL_INVALID_STATION) {
209
210
				IWL_ERR(priv,
					"Error adding AP address for TX.\n");
211
212
213
214
215
				return -EIO;
			}
			priv->assoc_station_added = 1;
			if (priv->default_wep_key &&
			    iwl_send_static_wepkey_cmd(priv, 0))
216
217
				IWL_ERR(priv,
					"Could not send WEP static key.\n");
218
		}
219
220
221
222
223
224
225

		/* Apply the new configuration
		 * RXON assoc doesn't clear the station table in uCode,
		 */
		ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
			      sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
		if (ret) {
226
			IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
227
228
229
			return ret;
		}
		memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
230
231
	}

232
233
234
235
236
237
	iwl_init_sensitivity(priv);

	/* If we issue a new RXON command which required a tune then we must
	 * send a new TXPOWER command or we won't be able to Tx any frames */
	ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
	if (ret) {
238
		IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
239
240
241
		return ret;
	}

242
243
244
	return 0;
}

245
void iwl_update_chain_flags(struct iwl_priv *priv)
246
247
{

248
249
	if (priv->cfg->ops->hcmd->set_rxon_chain)
		priv->cfg->ops->hcmd->set_rxon_chain(priv);
250
	iwlcore_commit_rxon(priv);
251
252
}

253
static void iwl_clear_free_frames(struct iwl_priv *priv)
254
255
256
{
	struct list_head *element;

257
	IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
258
259
260
261
262
		       priv->frames_count);

	while (!list_empty(&priv->free_frames)) {
		element = priv->free_frames.next;
		list_del(element);
263
		kfree(list_entry(element, struct iwl_frame, list));
264
265
266
267
		priv->frames_count--;
	}

	if (priv->frames_count) {
268
		IWL_WARN(priv, "%d frames still in use.  Did we lose one?\n",
269
270
271
272
273
			    priv->frames_count);
		priv->frames_count = 0;
	}
}

274
static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
275
{
276
	struct iwl_frame *frame;
277
278
279
280
	struct list_head *element;
	if (list_empty(&priv->free_frames)) {
		frame = kzalloc(sizeof(*frame), GFP_KERNEL);
		if (!frame) {
281
			IWL_ERR(priv, "Could not allocate frame!\n");
282
283
284
285
286
287
288
289
290
			return NULL;
		}

		priv->frames_count++;
		return frame;
	}

	element = priv->free_frames.next;
	list_del(element);
291
	return list_entry(element, struct iwl_frame, list);
292
293
}

294
static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
295
296
297
298
299
{
	memset(frame, 0, sizeof(*frame));
	list_add(&frame->list, &priv->free_frames);
}

300
301
static unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv,
					  struct ieee80211_hdr *hdr,
302
					  int left)
303
{
304
	if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
305
306
	    ((priv->iw_mode != NL80211_IFTYPE_ADHOC) &&
	     (priv->iw_mode != NL80211_IFTYPE_AP)))
307
308
309
310
311
312
313
314
315
316
		return 0;

	if (priv->ibss_beacon->len > left)
		return 0;

	memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);

	return priv->ibss_beacon->len;
}

317
static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
				       struct iwl_frame *frame, u8 rate)
{
	struct iwl_tx_beacon_cmd *tx_beacon_cmd;
	unsigned int frame_size;

	tx_beacon_cmd = &frame->u.beacon;
	memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));

	tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
	tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;

	frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame,
				sizeof(frame->u) - sizeof(*tx_beacon_cmd));

	BUG_ON(frame_size > MAX_MPDU_SIZE);
	tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);

	if ((rate == IWL_RATE_1M_PLCP) || (rate >= IWL_RATE_2M_PLCP))
		tx_beacon_cmd->tx.rate_n_flags =
			iwl_hw_set_rate_n_flags(rate, RATE_MCS_CCK_MSK);
	else
		tx_beacon_cmd->tx.rate_n_flags =
			iwl_hw_set_rate_n_flags(rate, 0);

	tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
				     TX_CMD_FLG_TSF_MSK |
				     TX_CMD_FLG_STA_RATE_MSK;

	return sizeof(*tx_beacon_cmd) + frame_size;
}
348
static int iwl_send_beacon_cmd(struct iwl_priv *priv)
349
{
350
	struct iwl_frame *frame;
351
352
353
354
	unsigned int frame_size;
	int rc;
	u8 rate;

355
	frame = iwl_get_free_frame(priv);
356
357

	if (!frame) {
358
		IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
359
360
361
362
			  "command.\n");
		return -ENOMEM;
	}

363
	rate = iwl_rate_get_lowest_plcp(priv);
364

365
	frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate);
366

367
	rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
368
369
			      &frame->u.cmd[0]);

370
	iwl_free_frame(priv, frame);
371
372
373
374

	return rc;
}

375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				  dma_addr_t addr, u16 len)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
	return tfd->num_tbs & 0x1f;
}

/**
 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
 * @priv - driver private data
 * @txq - tx queue
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{
424
	struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
	struct iwl_tfd *tfd;
	struct pci_dev *dev = priv->pci_dev;
	int index = txq->q.read_ptr;
	int i;
	int num_tbs;

	tfd = &tfd_tmp[index];

	/* Sanity check on number of chunks */
	num_tbs = iwl_tfd_get_num_tbs(tfd);

	if (num_tbs >= IWL_NUM_OF_TBS) {
		IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

	/* Unmap tx_cmd */
	if (num_tbs)
		pci_unmap_single(dev,
				pci_unmap_addr(&txq->cmd[index]->meta, mapping),
				pci_unmap_len(&txq->cmd[index]->meta, len),
447
				PCI_DMA_BIDIRECTIONAL);
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466

	/* Unmap chunks, if any. */
	for (i = 1; i < num_tbs; i++) {
		pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
				iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);

		if (txq->txb) {
			dev_kfree_skb(txq->txb[txq->q.read_ptr].skb[i - 1]);
			txq->txb[txq->q.read_ptr].skb[i - 1] = NULL;
		}
	}
}

int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
				 struct iwl_tx_queue *txq,
				 dma_addr_t addr, u16 len,
				 u8 reset, u8 pad)
{
	struct iwl_queue *q;
467
	struct iwl_tfd *tfd, *tfd_tmp;
468
469
470
	u32 num_tbs;

	q = &txq->q;
471
472
	tfd_tmp = (struct iwl_tfd *)txq->tfds;
	tfd = &tfd_tmp[q->write_ptr];
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495

	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
		IWL_ERR(priv, "Error can not send more than %d chunks\n",
			  IWL_NUM_OF_TBS);
		return -EINVAL;
	}

	BUG_ON(addr & ~DMA_BIT_MASK(36));
	if (unlikely(addr & ~IWL_TX_DMA_MASK))
		IWL_ERR(priv, "Unaligned address = %llx\n",
			  (unsigned long long)addr);

	iwl_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
/*
 * Tell nic where to find circular buffer of Tx Frame Descriptors for
 * given Tx queue, and enable the DMA channel used for that queue.
 *
 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
 * channels supported in hardware.
 */
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
			 struct iwl_tx_queue *txq)
{
	int txq_id = txq->q.id;

	/* Circular buffer (TFD queue in DRAM) physical base address */
	iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
			     txq->q.dma_addr >> 8);

	return 0;
}


516
517
518
519
520
521
522
523
/******************************************************************************
 *
 * Misc. internal state and helper functions
 *
 ******************************************************************************/

#define MAX_UCODE_BEACON_INTERVAL	4096

524
static u16 iwl_adjust_beacon_interval(u16 beacon_val)
525
526
527
528
{
	u16 new_val = 0;
	u16 beacon_factor = 0;

529
530
	beacon_factor = (beacon_val + MAX_UCODE_BEACON_INTERVAL)
					/ MAX_UCODE_BEACON_INTERVAL;
531
532
	new_val = beacon_val / beacon_factor;

533
534
535
	if (!new_val)
		new_val = MAX_UCODE_BEACON_INTERVAL;

536
	return new_val;
537
538
}

539
static void iwl_setup_rxon_timing(struct iwl_priv *priv)
540
{
541
542
	u64 tsf;
	s32 interval_tm, rem;
543
544
545
546
547
548
549
	unsigned long flags;
	struct ieee80211_conf *conf = NULL;
	u16 beacon_int = 0;

	conf = ieee80211_get_hw_conf(priv->hw);

	spin_lock_irqsave(&priv->lock, flags);
550
	priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
551
	priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
552

553
	if (priv->iw_mode == NL80211_IFTYPE_STATION) {
554
		beacon_int = iwl_adjust_beacon_interval(priv->beacon_int);
555
556
		priv->rxon_timing.atim_window = 0;
	} else {
557
558
		beacon_int = iwl_adjust_beacon_interval(
			priv->vif->bss_conf.beacon_int);
559

560
561
562
563
564
		/* TODO: we need to get atim_window from upper stack
		 * for now we set to 0 */
		priv->rxon_timing.atim_window = 0;
	}

565
	priv->rxon_timing.beacon_interval = cpu_to_le16(beacon_int);
566

567
568
569
570
571
572
	tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
	interval_tm = beacon_int * 1024;
	rem = do_div(tsf, interval_tm);
	priv->rxon_timing.beacon_init_val = cpu_to_le32(interval_tm - rem);

	spin_unlock_irqrestore(&priv->lock, flags);
573
	IWL_DEBUG_ASSOC(priv, "beacon interval %d beacon timer %d beacon tim %d\n",
574
575
576
			le16_to_cpu(priv->rxon_timing.beacon_interval),
			le32_to_cpu(priv->rxon_timing.beacon_init_val),
			le16_to_cpu(priv->rxon_timing.atim_window));
577
578
579
580
581
582
583
}

/******************************************************************************
 *
 * Generic RX handler implementations
 *
 ******************************************************************************/
584
585
static void iwl_rx_reply_alive(struct iwl_priv *priv,
				struct iwl_rx_mem_buffer *rxb)
586
{
587
	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
588
	struct iwl_alive_resp *palive;
589
590
591
592
	struct delayed_work *pwork;

	palive = &pkt->u.alive_frame;

593
	IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
594
595
596
597
598
		       "0x%01X 0x%01X\n",
		       palive->is_valid, palive->ver_type,
		       palive->ver_subtype);

	if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
599
		IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
600
601
		memcpy(&priv->card_alive_init,
		       &pkt->u.alive_frame,
602
		       sizeof(struct iwl_init_alive_resp));
603
604
		pwork = &priv->init_alive_start;
	} else {
605
		IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
606
		memcpy(&priv->card_alive, &pkt->u.alive_frame,
607
		       sizeof(struct iwl_alive_resp));
608
609
610
611
612
613
614
615
616
		pwork = &priv->alive_start;
	}

	/* We delay the ALIVE response by 5ms to
	 * give the HW RF Kill time to activate... */
	if (palive->is_valid == UCODE_VALID_OK)
		queue_delayed_work(priv->workqueue, pwork,
				   msecs_to_jiffies(5));
	else
617
		IWL_WARN(priv, "uCode did not respond OK.\n");
618
619
}

620
static void iwl_bg_beacon_update(struct work_struct *work)
621
{
622
623
	struct iwl_priv *priv =
		container_of(work, struct iwl_priv, beacon_update);
624
625
626
	struct sk_buff *beacon;

	/* Pull updated AP beacon from mac80211. will fail if not in AP mode */
627
	beacon = ieee80211_beacon_get(priv->hw, priv->vif);
628
629

	if (!beacon) {
630
		IWL_ERR(priv, "update beacon failed\n");
631
632
633
634
635
636
637
638
639
640
641
		return;
	}

	mutex_lock(&priv->mutex);
	/* new beacon skb is allocated every time; dispose previous.*/
	if (priv->ibss_beacon)
		dev_kfree_skb(priv->ibss_beacon);

	priv->ibss_beacon = beacon;
	mutex_unlock(&priv->mutex);

642
	iwl_send_beacon_cmd(priv);
643
644
}

645
/**
646
 * iwl_bg_statistics_periodic - Timer callback to queue statistics
647
648
649
650
651
652
653
654
 *
 * This callback is provided in order to send a statistics request.
 *
 * This timer function is continually reset to execute within
 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
 * was received.  We need to ensure we receive the statistics in order
 * to update the temperature used for calibrating the TXPOWER.
 */
655
static void iwl_bg_statistics_periodic(unsigned long data)
656
657
658
659
660
661
{
	struct iwl_priv *priv = (struct iwl_priv *)data;

	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
		return;

662
663
664
665
	/* dont send host command if rf-kill is on */
	if (!iwl_is_ready_rf(priv))
		return;

666
667
668
	iwl_send_statistics_request(priv, CMD_ASYNC);
}

669
static void iwl_rx_beacon_notif(struct iwl_priv *priv,
670
				struct iwl_rx_mem_buffer *rxb)
671
{
672
#ifdef CONFIG_IWLWIFI_DEBUG
673
	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
674
675
	struct iwl4965_beacon_notif *beacon =
		(struct iwl4965_beacon_notif *)pkt->u.raw;
676
	u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
677

678
	IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
679
		"tsf %d %d rate %d\n",
680
		le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
681
682
683
684
685
686
		beacon->beacon_notify_hdr.failure_frame,
		le32_to_cpu(beacon->ibss_mgr_status),
		le32_to_cpu(beacon->high_tsf),
		le32_to_cpu(beacon->low_tsf), rate);
#endif

687
	if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
688
689
690
691
692
693
	    (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
		queue_work(priv->workqueue, &priv->beacon_update);
}

/* Handle notification from uCode that card's power state is changing
 * due to software, hardware, or critical temperature RFKILL */
694
static void iwl_rx_card_state_notif(struct iwl_priv *priv,
695
				    struct iwl_rx_mem_buffer *rxb)
696
{
697
	struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data;
698
699
	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
	unsigned long status = priv->status;
Mohamed Abbas's avatar
Mohamed Abbas committed
700
	unsigned long reg_flags;
701

702
	IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s\n",
703
704
705
706
707
708
			  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
			  (flags & SW_CARD_DISABLED) ? "Kill" : "On");

	if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
		     RF_CARD_DISABLED)) {

709
		iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
710
711
			    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);

Mohamed Abbas's avatar
Mohamed Abbas committed
712
713
		iwl_write_direct32(priv, HBUS_TARG_MBX_C,
					HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
714
715

		if (!(flags & RXON_CARD_DISABLED)) {
716
			iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
717
				    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
Mohamed Abbas's avatar
Mohamed Abbas committed
718
			iwl_write_direct32(priv, HBUS_TARG_MBX_C,
719
720
721
722
723
					HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);

		}

		if (flags & RF_CARD_DISABLED) {
724
			iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
725
				    CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
726
			iwl_read32(priv, CSR_UCODE_DRV_GP1);
Mohamed Abbas's avatar
Mohamed Abbas committed
727
			spin_lock_irqsave(&priv->reg_lock, reg_flags);
728
729
			if (!iwl_grab_nic_access(priv))
				iwl_release_nic_access(priv);
Mohamed Abbas's avatar
Mohamed Abbas committed
730
			spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
731
732
733
734
735
736
737
738
739
740
		}
	}

	if (flags & HW_CARD_DISABLED)
		set_bit(STATUS_RF_KILL_HW, &priv->status);
	else
		clear_bit(STATUS_RF_KILL_HW, &priv->status);


	if (!(flags & RXON_CARD_DISABLED))
741
		iwl_scan_cancel(priv);
742
743

	if ((test_bit(STATUS_RF_KILL_HW, &status) !=
744
745
746
	     test_bit(STATUS_RF_KILL_HW, &priv->status)))
		wiphy_rfkill_set_hw_state(priv->hw->wiphy,
			test_bit(STATUS_RF_KILL_HW, &priv->status));
747
748
749
750
	else
		wake_up_interruptible(&priv->wait_command_queue);
}

751
int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
752
753
{
	if (src == IWL_PWR_SRC_VAUX) {
754
		if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
755
756
757
758
759
760
761
762
763
			iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
					       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
					       ~APMG_PS_CTRL_MSK_PWR_SRC);
	} else {
		iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
				       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
				       ~APMG_PS_CTRL_MSK_PWR_SRC);
	}

Mohamed Abbas's avatar
Mohamed Abbas committed
764
	return 0;
765
766
}

767
/**
768
 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
769
770
771
772
773
774
775
 *
 * Setup the RX handlers for each of the reply types sent from the uCode
 * to the host.
 *
 * This function chains into the hardware specific files for them to setup
 * any hardware specific handlers as well.
 */
776
static void iwl_setup_rx_handlers(struct iwl_priv *priv)
777
{
778
	priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
779
780
781
	priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
	priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
	priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
782
	priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
783
784
	    iwl_rx_pm_debug_statistics_notif;
	priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
785

786
787
788
789
	/*
	 * The same handler is used for both the REPLY to a discrete
	 * statistics request from the host as well as for the periodic
	 * statistics notifications (after received beacons) from the uCode.
790
	 */
791
792
	priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_rx_statistics;
	priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
793

794
	iwl_setup_spectrum_handlers(priv);
795
796
	iwl_setup_rx_scan_handlers(priv);

797
	/* status change handler */
798
	priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
799

800
801
	priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
	    iwl_rx_missed_beacon_notif;
802
	/* Rx handlers */
803
804
	priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy;
	priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx;
805
806
	/* block ack */
	priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl_rx_reply_compressed_ba;
807
	/* Set up hardware specific Rx handlers */
808
	priv->cfg->ops->lib->rx_handler_setup(priv);
809
810
811
}

/**
812
 * iwl_rx_handle - Main entry function for receiving responses from uCode
813
814
815
816
817
 *
 * Uses the priv->rx_handlers callback function array to invoke
 * the appropriate handlers, including command responses,
 * frame-received notifications, and other notifications.
 */
818
void iwl_rx_handle(struct iwl_priv *priv)
819
{
820
	struct iwl_rx_mem_buffer *rxb;
821
	struct iwl_rx_packet *pkt;
822
	struct iwl_rx_queue *rxq = &priv->rxq;
823
824
825
	u32 r, i;
	int reclaim;
	unsigned long flags;
826
	u8 fill_rx = 0;
Mohamed Abbas's avatar
Mohamed Abbas committed
827
	u32 count = 8;
828
	int total_empty;
829

830
831
	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
832
	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
833
834
835
836
	i = rxq->read;

	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
837
		IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
838

839
840
841
842
843
844
	/* calculate total frames need to be restock after handling RX */
	total_empty = r - priv->rxq.write_actual;
	if (total_empty < 0)
		total_empty += RX_QUEUE_SIZE;

	if (total_empty > (RX_QUEUE_SIZE / 2))
845
846
		fill_rx = 1;

847
848
849
	while (i != r) {
		rxb = rxq->queue[i];

850
		/* If an RXB doesn't have a Rx queue slot associated with it,
851
852
853
854
855
856
		 * then a bug has been introduced in the queue refilling
		 * routines -- catch it here */
		BUG_ON(rxb == NULL);

		rxq->queue[i] = NULL;

Reinette Chatre's avatar
Reinette Chatre committed
857
858
859
		pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
				 priv->hw_params.rx_buf_size + 256,
				 PCI_DMA_FROMDEVICE);
860
		pkt = (struct iwl_rx_packet *)rxb->skb->data;
861
862
863
864
865
866
867
868
869

		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
			(pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
870
			(pkt->hdr.cmd != REPLY_RX) &&
Daniel Halperin's avatar
Daniel Halperin committed
871
			(pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
872
			(pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
873
874
875
876
877
			(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
			(pkt->hdr.cmd != REPLY_TX);

		/* Based on type of command response or notification,
		 *   handle those that need handling via function in
878
		 *   rx_handlers table.  See iwl_setup_rx_handlers() */
879
		if (priv->rx_handlers[pkt->hdr.cmd]) {
880
			IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
881
				i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
882
			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
883
			priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
884
885
		} else {
			/* No handling needed */
886
			IWL_DEBUG_RX(priv,
887
888
889
890
891
892
				"r %d i %d No handler needed for %s, 0x%02x\n",
				r, i, get_cmd_string(pkt->hdr.cmd),
				pkt->hdr.cmd);
		}

		if (reclaim) {
893
			/* Invoke any callbacks, transfer the skb to caller, and
894
			 * fire off the (possibly) blocking iwl_send_cmd()
895
896
			 * as we reclaim the driver command queue */
			if (rxb && rxb->skb)
897
				iwl_tx_cmd_complete(priv, rxb);
898
			else
899
				IWL_WARN(priv, "Claim null rxb?\n");
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
		}

		/* For now we just don't re-use anything.  We can tweak this
		 * later to try and re-use notification packets and SKBs that
		 * fail to Rx correctly */
		if (rxb->skb != NULL) {
			priv->alloc_rxb_skb--;
			dev_kfree_skb_any(rxb->skb);
			rxb->skb = NULL;
		}

		spin_lock_irqsave(&rxq->lock, flags);
		list_add_tail(&rxb->list, &priv->rxq.rx_used);
		spin_unlock_irqrestore(&rxq->lock, flags);
		i = (i + 1) & RX_QUEUE_MASK;
915
916
917
918
919
920
		/* If there are a lot of unused frames,
		 * restock the Rx queue so ucode wont assert. */
		if (fill_rx) {
			count++;
			if (count >= 8) {
				priv->rxq.read = i;
921
				iwl_rx_replenish_now(priv);
922
923
924
				count = 0;
			}
		}
925
926
927
928
	}

	/* Backtrack one entry */
	priv->rxq.read = i;
929
930
931
932
	if (fill_rx)
		iwl_rx_replenish_now(priv);
	else
		iwl_rx_queue_restock(priv);
933
934
}

935
936
937
/* call this function to flush any scheduled tasklet */
static inline void iwl_synchronize_irq(struct iwl_priv *priv)
{
938
	/* wait to make sure we flush pending tasklet*/
939
940
941
942
	synchronize_irq(priv->pci_dev->irq);
	tasklet_kill(&priv->irq_tasklet);
}

Mohamed Abbas's avatar
Mohamed Abbas committed
943
static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
944
945
946
947
{
	u32 inta, handled = 0;
	u32 inta_fh;
	unsigned long flags;
948
#ifdef CONFIG_IWLWIFI_DEBUG
949
950
951
952
953
954
955
956
	u32 inta_mask;
#endif

	spin_lock_irqsave(&priv->lock, flags);

	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
957
958
	inta = iwl_read32(priv, CSR_INT);
	iwl_write32(priv, CSR_INT, inta);
959
960
961
962

	/* Ack/clear/reset pending flow-handler (DMA) interrupts.
	 * Any new interrupts that happen after this, either while we're
	 * in this tasklet, or later, will show up in next ISR/tasklet. */
963
964
	inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
	iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
965

966
#ifdef CONFIG_IWLWIFI_DEBUG
967
	if (priv->debug_level & IWL_DL_ISR) {
968
		/* just for debug */
969
		inta_mask = iwl_read32(priv, CSR_INT_MASK);
970
		IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
971
972
973
974
975
976
977
978
			      inta, inta_mask, inta_fh);
	}
#endif

	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
	 * atomic, make sure that inta covers all the interrupts that
	 * we've discovered, even if FH interrupt came in just after
	 * reading CSR_INT. */
Tomas Winkler's avatar
Tomas Winkler committed
979
	if (inta_fh & CSR49_FH_INT_RX_MASK)
980
		inta |= CSR_INT_BIT_FH_RX;
Tomas Winkler's avatar
Tomas Winkler committed
981
	if (inta_fh & CSR49_FH_INT_TX_MASK)
982
983
984
985
		inta |= CSR_INT_BIT_FH_TX;

	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
986
		IWL_ERR(priv, "Microcode HW error detected.  Restarting.\n");
987
988

		/* Tell the device to stop sending interrupts */
989
		iwl_disable_interrupts(priv);
990

991
		priv->isr_stats.hw++;
992
		iwl_irq_handle_error(priv);
993
994
995
996
997
998
999
1000

		handled |= CSR_INT_BIT_HW_ERR;

		spin_unlock_irqrestore(&priv->lock, flags);

		return;
	}

For faster browsing, not all history is shown. View entire blame