iwl-agn.c 133 KB
Newer Older
1
2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26
27
28
29
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/

30
31
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

32
33
34
35
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
36
#include <linux/pci-aspm.h>
37
#include <linux/slab.h>
38
39
#include <linux/dma-mapping.h>
#include <linux/delay.h>
40
#include <linux/sched.h>
41
42
43
44
45
46
47
48
49
50
51
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>

#include <net/mac80211.h>

#include <asm/div64.h>

52
53
#define DRV_NAME        "iwlagn"

54
#include "iwl-eeprom.h"
55
#include "iwl-dev.h"
56
#include "iwl-core.h"
57
#include "iwl-io.h"
58
#include "iwl-helpers.h"
59
#include "iwl-sta.h"
Johannes Berg's avatar
Johannes Berg committed
60
#include "iwl-agn-calib.h"
61
#include "iwl-agn.h"
62
#include "iwl-agn-led.h"
63

64

65
66
67
68
69
70
71
72
73
/******************************************************************************
 *
 * module boiler plate
 *
 ******************************************************************************/

/*
 * module name, copyright, version, etc.
 */
74
#define DRV_DESCRIPTION	"Intel(R) Wireless WiFi Link AGN driver for Linux"
75

76
#ifdef CONFIG_IWLWIFI_DEBUG
77
78
79
80
81
#define VD "d"
#else
#define VD
#endif

82
#define DRV_VERSION     IWLWIFI_VERSION VD
83
84
85
86


MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
87
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
88
89
MODULE_LICENSE("GPL");

90
static int iwlagn_ant_coupling;
91
static bool iwlagn_bt_ch_announce = 1;
92

93
void iwl_update_chain_flags(struct iwl_priv *priv)
94
{
95
	struct iwl_rxon_context *ctx;
96

97
98
99
	if (priv->cfg->ops->hcmd->set_rxon_chain) {
		for_each_context(priv, ctx) {
			priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
100
101
			if (ctx->active.rx_chain != ctx->staging.rx_chain)
				iwlcore_commit_rxon(priv, ctx);
102
103
		}
	}
104
105
}

106
static void iwl_clear_free_frames(struct iwl_priv *priv)
107
108
109
{
	struct list_head *element;

110
	IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
111
112
113
114
115
		       priv->frames_count);

	while (!list_empty(&priv->free_frames)) {
		element = priv->free_frames.next;
		list_del(element);
116
		kfree(list_entry(element, struct iwl_frame, list));
117
118
119
120
		priv->frames_count--;
	}

	if (priv->frames_count) {
121
		IWL_WARN(priv, "%d frames still in use.  Did we lose one?\n",
122
123
124
125
126
			    priv->frames_count);
		priv->frames_count = 0;
	}
}

127
static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
128
{
129
	struct iwl_frame *frame;
130
131
132
133
	struct list_head *element;
	if (list_empty(&priv->free_frames)) {
		frame = kzalloc(sizeof(*frame), GFP_KERNEL);
		if (!frame) {
134
			IWL_ERR(priv, "Could not allocate frame!\n");
135
136
137
138
139
140
141
142
143
			return NULL;
		}

		priv->frames_count++;
		return frame;
	}

	element = priv->free_frames.next;
	list_del(element);
144
	return list_entry(element, struct iwl_frame, list);
145
146
}

147
static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
148
149
150
151
152
{
	memset(frame, 0, sizeof(*frame));
	list_add(&frame->list, &priv->free_frames);
}

153
static u32 iwl_fill_beacon_frame(struct iwl_priv *priv,
154
155
				 struct ieee80211_hdr *hdr,
				 int left)
156
{
157
158
	lockdep_assert_held(&priv->mutex);

159
	if (!priv->beacon_skb)
160
161
		return 0;

162
	if (priv->beacon_skb->len > left)
163
164
		return 0;

165
	memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
166

167
	return priv->beacon_skb->len;
168
169
}

170
171
/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
static void iwl_set_beacon_tim(struct iwl_priv *priv,
172
173
			       struct iwl_tx_beacon_cmd *tx_beacon_cmd,
			       u8 *beacon, u32 frame_size)
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
{
	u16 tim_idx;
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;

	/*
	 * The index is relative to frame start but we start looking at the
	 * variable-length part of the beacon.
	 */
	tim_idx = mgmt->u.beacon.variable - beacon;

	/* Parse variable-length elements of beacon to find WLAN_EID_TIM */
	while ((tim_idx < (frame_size - 2)) &&
			(beacon[tim_idx] != WLAN_EID_TIM))
		tim_idx += beacon[tim_idx+1] + 2;

	/* If TIM field was found, set variables */
	if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
		tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
		tx_beacon_cmd->tim_size = beacon[tim_idx+1];
	} else
		IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
}

197
static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
198
				       struct iwl_frame *frame)
199
200
{
	struct iwl_tx_beacon_cmd *tx_beacon_cmd;
201
202
203
204
205
206
207
	u32 frame_size;
	u32 rate_flags;
	u32 rate;
	/*
	 * We have to set up the TX command, the TX Beacon command, and the
	 * beacon contents.
	 */
208

209
210
211
212
	lockdep_assert_held(&priv->mutex);

	if (!priv->beacon_ctx) {
		IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
213
		return 0;
214
215
	}

216
	/* Initialize memory */
217
218
219
	tx_beacon_cmd = &frame->u.beacon;
	memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));

220
	/* Set up TX beacon contents */
221
222
	frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame,
				sizeof(frame->u) - sizeof(*tx_beacon_cmd));
223
224
	if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
		return 0;
225
226
	if (!frame_size)
		return 0;
227

228
	/* Set up TX command fields */
229
	tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
230
	tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
231
232
233
	tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
	tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
		TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
234

235
236
	/* Set up TX beacon command fields */
	iwl_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
237
			   frame_size);
238

239
	/* Set up packet rate and flags */
240
	rate = iwl_rate_get_lowest_plcp(priv, priv->beacon_ctx);
241
242
	priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
					      priv->hw_params.valid_tx_ant);
243
244
245
246
247
	rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
	if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
		rate_flags |= RATE_MCS_CCK_MSK;
	tx_beacon_cmd->tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate,
			rate_flags);
248
249
250

	return sizeof(*tx_beacon_cmd) + frame_size;
}
251
252

int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
253
{
254
	struct iwl_frame *frame;
255
256
257
	unsigned int frame_size;
	int rc;

258
	frame = iwl_get_free_frame(priv);
259
	if (!frame) {
260
		IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
261
262
263
264
			  "command.\n");
		return -ENOMEM;
	}

265
266
267
268
269
270
	frame_size = iwl_hw_get_beacon_cmd(priv, frame);
	if (!frame_size) {
		IWL_ERR(priv, "Error configuring the beacon command\n");
		iwl_free_frame(priv, frame);
		return -EINVAL;
	}
271

272
	rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
273
274
			      &frame->u.cmd[0]);

275
	iwl_free_frame(priv, frame);
276
277
278
279

	return rc;
}

280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				  dma_addr_t addr, u16 len)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
	return tfd->num_tbs & 0x1f;
}

/**
 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
 * @priv - driver private data
 * @txq - tx queue
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{
329
	struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
	struct iwl_tfd *tfd;
	struct pci_dev *dev = priv->pci_dev;
	int index = txq->q.read_ptr;
	int i;
	int num_tbs;

	tfd = &tfd_tmp[index];

	/* Sanity check on number of chunks */
	num_tbs = iwl_tfd_get_num_tbs(tfd);

	if (num_tbs >= IWL_NUM_OF_TBS) {
		IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

	/* Unmap tx_cmd */
	if (num_tbs)
		pci_unmap_single(dev,
350
351
				dma_unmap_addr(&txq->meta[index], mapping),
				dma_unmap_len(&txq->meta[index], len),
352
				PCI_DMA_BIDIRECTIONAL);
353
354

	/* Unmap chunks, if any. */
355
	for (i = 1; i < num_tbs; i++)
356
357
358
		pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
				iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);

359
360
361
	/* free SKB */
	if (txq->txb) {
		struct sk_buff *skb;
Johannes Berg's avatar
Johannes Berg committed
362

363
		skb = txq->txb[txq->q.read_ptr].skb;
Johannes Berg's avatar
Johannes Berg committed
364

365
366
367
368
		/* can be called from irqs-disabled context */
		if (skb) {
			dev_kfree_skb_any(skb);
			txq->txb[txq->q.read_ptr].skb = NULL;
369
370
371
372
373
374
375
376
377
378
		}
	}
}

int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
				 struct iwl_tx_queue *txq,
				 dma_addr_t addr, u16 len,
				 u8 reset, u8 pad)
{
	struct iwl_queue *q;
379
	struct iwl_tfd *tfd, *tfd_tmp;
380
381
382
	u32 num_tbs;

	q = &txq->q;
383
384
	tfd_tmp = (struct iwl_tfd *)txq->tfds;
	tfd = &tfd_tmp[q->write_ptr];
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407

	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
		IWL_ERR(priv, "Error can not send more than %d chunks\n",
			  IWL_NUM_OF_TBS);
		return -EINVAL;
	}

	BUG_ON(addr & ~DMA_BIT_MASK(36));
	if (unlikely(addr & ~IWL_TX_DMA_MASK))
		IWL_ERR(priv, "Unaligned address = %llx\n",
			  (unsigned long long)addr);

	iwl_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
/*
 * Tell nic where to find circular buffer of Tx Frame Descriptors for
 * given Tx queue, and enable the DMA channel used for that queue.
 *
 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
 * channels supported in hardware.
 */
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
			 struct iwl_tx_queue *txq)
{
	int txq_id = txq->q.id;

	/* Circular buffer (TFD queue in DRAM) physical base address */
	iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
			     txq->q.dma_addr >> 8);

	return 0;
}

427
static void iwl_bg_beacon_update(struct work_struct *work)
428
{
429
430
	struct iwl_priv *priv =
		container_of(work, struct iwl_priv, beacon_update);
431
432
	struct sk_buff *beacon;

433
434
435
436
437
	mutex_lock(&priv->mutex);
	if (!priv->beacon_ctx) {
		IWL_ERR(priv, "updating beacon w/o beacon context!\n");
		goto out;
	}
438

439
440
441
442
443
444
445
446
447
448
	if (priv->beacon_ctx->vif->type != NL80211_IFTYPE_AP) {
		/*
		 * The ucode will send beacon notifications even in
		 * IBSS mode, but we don't want to process them. But
		 * we need to defer the type check to here due to
		 * requiring locking around the beacon_ctx access.
		 */
		goto out;
	}

449
450
	/* Pull updated AP beacon from mac80211. will fail if not in AP mode */
	beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif);
451
	if (!beacon) {
452
		IWL_ERR(priv, "update beacon failed -- keeping old\n");
453
		goto out;
454
455
456
	}

	/* new beacon skb is allocated every time; dispose previous.*/
457
	dev_kfree_skb(priv->beacon_skb);
458

459
	priv->beacon_skb = beacon;
460

461
	iwlagn_send_beacon_cmd(priv);
462
463
 out:
	mutex_unlock(&priv->mutex);
464
465
}

466
467
468
469
470
471
472
473
474
475
476
477
478
479
static void iwl_bg_bt_runtime_config(struct work_struct *work)
{
	struct iwl_priv *priv =
		container_of(work, struct iwl_priv, bt_runtime_config);

	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
		return;

	/* dont send host command if rf-kill is on */
	if (!iwl_is_ready_rf(priv))
		return;
	priv->cfg->ops->hcmd->send_bt_config(priv);
}

480
481
482
483
static void iwl_bg_bt_full_concurrency(struct work_struct *work)
{
	struct iwl_priv *priv =
		container_of(work, struct iwl_priv, bt_full_concurrency);
484
	struct iwl_rxon_context *ctx;
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500

	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
		return;

	/* dont send host command if rf-kill is on */
	if (!iwl_is_ready_rf(priv))
		return;

	IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
		       priv->bt_full_concurrent ?
		       "full concurrency" : "3-wire");

	/*
	 * LQ & RXON updated cmds must be sent before BT Config cmd
	 * to avoid 3-wire collisions
	 */
501
502
503
504
505
506
507
	mutex_lock(&priv->mutex);
	for_each_context(priv, ctx) {
		if (priv->cfg->ops->hcmd->set_rxon_chain)
			priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
		iwlcore_commit_rxon(priv, ctx);
	}
	mutex_unlock(&priv->mutex);
508
509
510
511

	priv->cfg->ops->hcmd->send_bt_config(priv);
}

512
/**
513
 * iwl_bg_statistics_periodic - Timer callback to queue statistics
514
515
516
517
518
519
520
521
 *
 * This callback is provided in order to send a statistics request.
 *
 * This timer function is continually reset to execute within
 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
 * was received.  We need to ensure we receive the statistics in order
 * to update the temperature used for calibrating the TXPOWER.
 */
522
static void iwl_bg_statistics_periodic(unsigned long data)
523
524
525
526
527
528
{
	struct iwl_priv *priv = (struct iwl_priv *)data;

	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
		return;

529
530
531
532
	/* dont send host command if rf-kill is on */
	if (!iwl_is_ready_rf(priv))
		return;

533
	iwl_send_statistics_request(priv, CMD_ASYNC, false);
534
535
}

536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582

static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
					u32 start_idx, u32 num_events,
					u32 mode)
{
	u32 i;
	u32 ptr;        /* SRAM byte address of log data */
	u32 ev, time, data; /* event log data */
	unsigned long reg_flags;

	if (mode == 0)
		ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
	else
		ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));

	/* Make sure device is powered up for SRAM reads */
	spin_lock_irqsave(&priv->reg_lock, reg_flags);
	if (iwl_grab_nic_access(priv)) {
		spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
		return;
	}

	/* Set starting address; reads will auto-increment */
	_iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
	rmb();

	/*
	 * "time" is actually "data" for mode 0 (no timestamp).
	 * place event id # at far right for easier visual parsing.
	 */
	for (i = 0; i < num_events; i++) {
		ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
		time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
		if (mode == 0) {
			trace_iwlwifi_dev_ucode_cont_event(priv,
							0, time, ev);
		} else {
			data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
			trace_iwlwifi_dev_ucode_cont_event(priv,
						time, data, ev);
		}
	}
	/* Allow device to power down */
	iwl_release_nic_access(priv);
	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}

Johannes Berg's avatar
Johannes Berg committed
583
static void iwl_continuous_event_trace(struct iwl_priv *priv)
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
{
	u32 capacity;   /* event log capacity in # entries */
	u32 base;       /* SRAM byte address of event log header */
	u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
	u32 num_wraps;  /* # times uCode wrapped to top of log */
	u32 next_entry; /* index of next entry to be written by uCode */

	if (priv->ucode_type == UCODE_INIT)
		base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
	else
		base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
	if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
		capacity = iwl_read_targ_mem(priv, base);
		num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
		mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
		next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
	} else
		return;

	if (num_wraps == priv->event_log.num_wraps) {
		iwl_print_cont_event_trace(priv,
				       base, priv->event_log.next_entry,
				       next_entry - priv->event_log.next_entry,
				       mode);
		priv->event_log.non_wraps_count++;
	} else {
		if ((num_wraps - priv->event_log.num_wraps) > 1)
			priv->event_log.wraps_more_count++;
		else
			priv->event_log.wraps_once_count++;
		trace_iwlwifi_dev_ucode_wrap_event(priv,
				num_wraps - priv->event_log.num_wraps,
				next_entry, priv->event_log.next_entry);
		if (next_entry < priv->event_log.next_entry) {
			iwl_print_cont_event_trace(priv, base,
			       priv->event_log.next_entry,
			       capacity - priv->event_log.next_entry,
			       mode);

			iwl_print_cont_event_trace(priv, base, 0,
				next_entry, mode);
		} else {
			iwl_print_cont_event_trace(priv, base,
			       next_entry, capacity - next_entry,
			       mode);

			iwl_print_cont_event_trace(priv, base, 0,
				next_entry, mode);
		}
	}
	priv->event_log.num_wraps = num_wraps;
	priv->event_log.next_entry = next_entry;
}

/**
 * iwl_bg_ucode_trace - Timer callback to log ucode event
 *
 * The timer is continually set to execute every
 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
 * this function is to perform continuous uCode event logging operation
 * if enabled
 */
static void iwl_bg_ucode_trace(unsigned long data)
{
	struct iwl_priv *priv = (struct iwl_priv *)data;

	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
		return;

	if (priv->event_log.ucode_trace) {
		iwl_continuous_event_trace(priv);
		/* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
		mod_timer(&priv->ucode_trace,
			 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
	}
}

661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
static void iwl_bg_tx_flush(struct work_struct *work)
{
	struct iwl_priv *priv =
		container_of(work, struct iwl_priv, tx_flush);

	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
		return;

	/* do nothing if rf-kill is on */
	if (!iwl_is_ready_rf(priv))
		return;

	if (priv->cfg->ops->lib->txfifo_flush) {
		IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
		iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
	}
}

679
/**
680
 * iwl_rx_handle - Main entry function for receiving responses from uCode
681
682
683
684
685
 *
 * Uses the priv->rx_handlers callback function array to invoke
 * the appropriate handlers, including command responses,
 * frame-received notifications, and other notifications.
 */
686
static void iwl_rx_handle(struct iwl_priv *priv)
687
{
688
	struct iwl_rx_mem_buffer *rxb;
689
	struct iwl_rx_packet *pkt;
690
	struct iwl_rx_queue *rxq = &priv->rxq;
691
692
693
	u32 r, i;
	int reclaim;
	unsigned long flags;
694
	u8 fill_rx = 0;
Mohamed Abbas's avatar
Mohamed Abbas committed
695
	u32 count = 8;
696
	int total_empty;
697

698
699
	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
700
	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
701
702
703
704
	i = rxq->read;

	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
705
		IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
706

707
	/* calculate total frames need to be restock after handling RX */
708
	total_empty = r - rxq->write_actual;
709
710
711
712
	if (total_empty < 0)
		total_empty += RX_QUEUE_SIZE;

	if (total_empty > (RX_QUEUE_SIZE / 2))
713
714
		fill_rx = 1;

715
	while (i != r) {
Johannes Berg's avatar
Johannes Berg committed
716
717
		int len;

718
719
		rxb = rxq->queue[i];

720
		/* If an RXB doesn't have a Rx queue slot associated with it,
721
722
723
724
725
726
		 * then a bug has been introduced in the queue refilling
		 * routines -- catch it here */
		BUG_ON(rxb == NULL);

		rxq->queue[i] = NULL;

Zhu Yi's avatar
Zhu Yi committed
727
728
729
730
		pci_unmap_page(priv->pci_dev, rxb->page_dma,
			       PAGE_SIZE << priv->hw_params.rx_page_order,
			       PCI_DMA_FROMDEVICE);
		pkt = rxb_addr(rxb);
731

Johannes Berg's avatar
Johannes Berg committed
732
733
734
		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
		len += sizeof(u32); /* account for status word */
		trace_iwlwifi_dev_rx(priv, pkt, len);
Johannes Berg's avatar
Johannes Berg committed
735

736
737
738
739
740
741
742
743
		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
			(pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
744
			(pkt->hdr.cmd != REPLY_RX) &&
Daniel Halperin's avatar
Daniel Halperin committed
745
			(pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
746
			(pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
747
748
749
			(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
			(pkt->hdr.cmd != REPLY_TX);

750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
		/*
		 * Do the notification wait before RX handlers so
		 * even if the RX handler consumes the RXB we have
		 * access to it in the notification wait entry.
		 */
		if (!list_empty(&priv->_agn.notif_waits)) {
			struct iwl_notification_wait *w;

			spin_lock(&priv->_agn.notif_wait_lock);
			list_for_each_entry(w, &priv->_agn.notif_waits, list) {
				if (w->cmd == pkt->hdr.cmd) {
					w->triggered = true;
					if (w->fn)
						w->fn(priv, pkt);
				}
			}
			spin_unlock(&priv->_agn.notif_wait_lock);

			wake_up_all(&priv->_agn.notif_waitq);
		}

771
772
		/* Based on type of command response or notification,
		 *   handle those that need handling via function in
773
		 *   rx_handlers table.  See iwl_setup_rx_handlers() */
774
		if (priv->rx_handlers[pkt->hdr.cmd]) {
775
			IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
776
				i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
777
			priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
778
			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
779
780
		} else {
			/* No handling needed */
781
			IWL_DEBUG_RX(priv,
782
783
784
785
786
				"r %d i %d No handler needed for %s, 0x%02x\n",
				r, i, get_cmd_string(pkt->hdr.cmd),
				pkt->hdr.cmd);
		}

787
788
789
790
791
792
793
		/*
		 * XXX: After here, we should always check rxb->page
		 * against NULL before touching it or its virtual
		 * memory (pkt). Because some rx_handler might have
		 * already taken or freed the pages.
		 */

794
		if (reclaim) {
Zhu Yi's avatar
Zhu Yi committed
795
796
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking iwl_send_cmd()
797
			 * as we reclaim the driver command queue */
798
			if (rxb->page)
799
				iwl_tx_cmd_complete(priv, rxb);
800
			else
801
				IWL_WARN(priv, "Claim null rxb?\n");
802
803
		}

804
805
806
807
		/* Reuse the page if possible. For notification packets and
		 * SKBs that fail to Rx correctly, add them back into the
		 * rx_free list for reuse later. */
		spin_lock_irqsave(&rxq->lock, flags);
Zhu Yi's avatar
Zhu Yi committed
808
		if (rxb->page != NULL) {
809
810
811
812
813
814
815
			rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
				0, PAGE_SIZE << priv->hw_params.rx_page_order,
				PCI_DMA_FROMDEVICE);
			list_add_tail(&rxb->list, &rxq->rx_free);
			rxq->free_count++;
		} else
			list_add_tail(&rxb->list, &rxq->rx_used);
816
817

		spin_unlock_irqrestore(&rxq->lock, flags);
818

819
		i = (i + 1) & RX_QUEUE_MASK;
820
821
822
823
824
		/* If there are a lot of unused frames,
		 * restock the Rx queue so ucode wont assert. */
		if (fill_rx) {
			count++;
			if (count >= 8) {
825
				rxq->read = i;
826
				iwlagn_rx_replenish_now(priv);
827
828
829
				count = 0;
			}
		}
830
831
832
	}

	/* Backtrack one entry */
833
	rxq->read = i;
834
	if (fill_rx)
835
		iwlagn_rx_replenish_now(priv);
836
	else
837
		iwlagn_rx_queue_restock(priv);
838
839
}

840
841
842
/* call this function to flush any scheduled tasklet */
static inline void iwl_synchronize_irq(struct iwl_priv *priv)
{
843
	/* wait to make sure we flush pending tasklet*/
844
845
846
847
	synchronize_irq(priv->pci_dev->irq);
	tasklet_kill(&priv->irq_tasklet);
}

Mohamed Abbas's avatar
Mohamed Abbas committed
848
static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
849
850
851
852
{
	u32 inta, handled = 0;
	u32 inta_fh;
	unsigned long flags;
853
	u32 i;
854
#ifdef CONFIG_IWLWIFI_DEBUG
855
856
857
858
859
860
861
862
	u32 inta_mask;
#endif

	spin_lock_irqsave(&priv->lock, flags);

	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
863
864
	inta = iwl_read32(priv, CSR_INT);
	iwl_write32(priv, CSR_INT, inta);
865
866
867
868

	/* Ack/clear/reset pending flow-handler (DMA) interrupts.
	 * Any new interrupts that happen after this, either while we're
	 * in this tasklet, or later, will show up in next ISR/tasklet. */
869
870
	inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
	iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
871

872
#ifdef CONFIG_IWLWIFI_DEBUG
873
	if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
874
		/* just for debug */
875
		inta_mask = iwl_read32(priv, CSR_INT_MASK);
876
		IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
877
878
879
880
			      inta, inta_mask, inta_fh);
	}
#endif

Zhu Yi's avatar
Zhu Yi committed
881
882
	spin_unlock_irqrestore(&priv->lock, flags);

883
884
885
886
	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
	 * atomic, make sure that inta covers all the interrupts that
	 * we've discovered, even if FH interrupt came in just after
	 * reading CSR_INT. */
Tomas Winkler's avatar
Tomas Winkler committed
887
	if (inta_fh & CSR49_FH_INT_RX_MASK)
888
		inta |= CSR_INT_BIT_FH_RX;
Tomas Winkler's avatar
Tomas Winkler committed
889
	if (inta_fh & CSR49_FH_INT_TX_MASK)
890
891
892
893
		inta |= CSR_INT_BIT_FH_TX;

	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
894
		IWL_ERR(priv, "Hardware error detected.  Restarting.\n");
895
896

		/* Tell the device to stop sending interrupts */
897
		iwl_disable_interrupts(priv);
898

899
		priv->isr_stats.hw++;
900
		iwl_irq_handle_error(priv);
901
902
903
904
905
906

		handled |= CSR_INT_BIT_HW_ERR;

		return;
	}

907
#ifdef CONFIG_IWLWIFI_DEBUG
908
	if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
909
		/* NIC fires this, but we don't use it, redundant with WAKEUP */
910
		if (inta & CSR_INT_BIT_SCD) {
911
			IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
912
				      "the frame/frames.\n");
913
914
			priv->isr_stats.sch++;
		}
915
916

		/* Alive notification via Rx interrupt will do the real work */
917
		if (inta & CSR_INT_BIT_ALIVE) {
918
			IWL_DEBUG_ISR(priv, "Alive interrupt\n");
919
920
			priv->isr_stats.alive++;
		}
921
922
923
	}
#endif
	/* Safely ignore these bits for debug checks below */
924
	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
925

926
	/* HW RF KILL switch toggled */
927
928
	if (inta & CSR_INT_BIT_RF_KILL) {
		int hw_rf_kill = 0;
929
		if (!(iwl_read32(priv, CSR_GP_CNTRL) &
930
931
932
				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
			hw_rf_kill = 1;

933
		IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
934
				hw_rf_kill ? "disable radio" : "enable radio");
935

936
937
		priv->isr_stats.rfkill++;

938
		/* driver only loads ucode once setting the interface up.
939
940
941
		 * the driver allows loading the ucode even if the radio
		 * is killed. Hence update the killswitch state here. The
		 * rfkill handler will care about restarting if needed.
942
		 */
943
944
945
946
947
		if (!test_bit(STATUS_ALIVE, &priv->status)) {
			if (hw_rf_kill)
				set_bit(STATUS_RF_KILL_HW, &priv->status);
			else
				clear_bit(STATUS_RF_KILL_HW, &priv->status);
948
			wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
949
		}
950
951
952
953

		handled |= CSR_INT_BIT_RF_KILL;
	}

954
	/* Chip got too hot and stopped itself */
955
	if (inta & CSR_INT_BIT_CT_KILL) {
956
		IWL_ERR(priv, "Microcode CT kill error detected.\n");
957
		priv->isr_stats.ctkill++;
958
959
960
961
962
		handled |= CSR_INT_BIT_CT_KILL;
	}

	/* Error detected by uCode */
	if (inta & CSR_INT_BIT_SW_ERR) {
963
964
		IWL_ERR(priv, "Microcode SW error detected. "
			" Restarting 0x%X.\n", inta);
965
		priv->isr_stats.sw++;
966
		iwl_irq_handle_error(priv);
967
968
969
		handled |= CSR_INT_BIT_SW_ERR;
	}

970
971
972
973
974
	/*
	 * uCode wakes up after power-down sleep.
	 * Tell device about any new tx or host commands enqueued,
	 * and about any Rx buffers made available while asleep.
	 */
975
	if (inta & CSR_INT_BIT_WAKEUP) {
976
		IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
977
		iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
978
979
		for (i = 0; i < priv->hw_params.max_txq_num; i++)
			iwl_txq_update_write_ptr(priv, &priv->txq[i]);
980
		priv->isr_stats.wakeup++;
981
982
983
984
985
986
987
		handled |= CSR_INT_BIT_WAKEUP;
	}

	/* All uCode command responses, including Tx command responses,
	 * Rx "responses" (frame-received notification), and other
	 * notifications from uCode come through here*/
	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
988
		iwl_rx_handle(priv);
989
		priv->isr_stats.rx++;
990
991
992
		handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
	}

Ben Cahill's avatar
Ben Cahill committed
993
	/* This "Tx" DMA channel is used only for loading uCode */
994
	if (inta & CSR_INT_BIT_FH_TX) {
Ben Cahill's avatar
Ben Cahill committed
995
		IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
996
		priv->isr_stats.tx++;
997
		handled |= CSR_INT_BIT_FH_TX;
Ben Cahill's avatar
Ben Cahill committed
998
		/* Wake up uCode load routine, now that load is complete */
999
1000
		priv->ucode_write_complete = 1;
		wake_up_interruptible(&priv->wait_command_queue);
1001
1002
	}

1003
	if (inta & ~handled) {
1004
		IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
1005
1006
		priv->isr_stats.unhandled++;
	}
1007

1008
	if (inta & ~(priv->inta_mask)) {
1009
		IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1010
			 inta & ~priv->inta_mask);
1011
		IWL_WARN(priv, "   with FH_INT = 0x%08x\n", inta_fh);
1012
1013
1014
	}

	/* Re-enable all interrupts */
1015
	/* only Re-enable if disabled by irq */
1016
	if (test_bit(STATUS_INT_ENABLED, &priv->status))
1017
		iwl_enable_interrupts(priv);
1018
1019
1020
	/* Re-enable RF_KILL if it occurred */
	else if (handled & CSR_INT_BIT_RF_KILL)
		iwl_enable_rfkill_int(priv);
1021

1022
#ifdef CONFIG_IWLWIFI_DEBUG
1023
	if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
1024
1025
1026
		inta = iwl_read32(priv, CSR_INT);
		inta_mask = iwl_read32(priv, CSR_INT_MASK);
		inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1027
		IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
1028
1029
1030
1031
1032
			"flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
	}
#endif
}

Mohamed Abbas's avatar
Mohamed Abbas committed
1033
1034
1035
1036
1037
1038
/* tasklet for iwlagn interrupt */
static void iwl_irq_tasklet(struct iwl_priv *priv)
{
	u32 inta = 0;
	u32 handled = 0;
	unsigned long flags;
1039
	u32 i;
Mohamed Abbas's avatar
Mohamed Abbas committed
1040
1041
1042
1043
1044
1045
1046
1047
1048
#ifdef CONFIG_IWLWIFI_DEBUG
	u32 inta_mask;
#endif

	spin_lock_irqsave(&priv->lock, flags);

	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 */
1049
1050
1051
1052
1053
1054
1055
1056
	/* There is a hardware bug in the interrupt mask function that some
	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
	 * they are disabled in the CSR_INT_MASK register. Furthermore the
	 * ICT interrupt handling mechanism has another bug that might cause
	 * these unmasked interrupts fail to be detected. We workaround the
	 * hardware bugs here by ACKing all the possible interrupts so that
	 * interrupt coalescing can still be achieved.
	 */
1057
	iwl_write32(priv, CSR_INT, priv->_agn.inta | ~priv->inta_mask);
Mohamed Abbas's avatar
Mohamed Abbas committed
1058

1059
	inta = priv->_agn.inta;
Mohamed Abbas's avatar
Mohamed Abbas committed
1060
1061

#ifdef CONFIG_IWLWIFI_DEBUG
1062
	if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
Mohamed Abbas's avatar
Mohamed Abbas committed
1063
1064
1065
1066
1067
1068
		/* just for debug */
		inta_mask = iwl_read32(priv, CSR_INT_MASK);
		IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ",
				inta, inta_mask);
	}
#endif
Zhu Yi's avatar
Zhu Yi committed
1069
1070
1071

	spin_unlock_irqrestore(&priv->lock, flags);

1072
1073
	/* saved interrupt in inta variable now we can reset priv->_agn.inta */
	priv->_agn.inta = 0;
Mohamed Abbas's avatar
Mohamed Abbas committed
1074
1075
1076

	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
1077
		IWL_ERR(priv, "Hardware error detected.  Restarting.\n");
Mohamed Abbas's avatar
Mohamed Abbas committed
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090

		/* Tell the device to stop sending interrupts */
		iwl_disable_interrupts(priv);

		priv->isr_stats.hw++;
		iwl_irq_handle_error(priv);

		handled |= CSR_INT_BIT_HW_ERR;

		return;
	}

#ifdef CONFIG_IWLWIFI_DEBUG
1091
	if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
Mohamed Abbas's avatar
Mohamed Abbas committed
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
		/* NIC fires this, but we don't use it, redundant with WAKEUP */
		if (inta & CSR_INT_BIT_SCD) {
			IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
				      "the frame/frames.\n");
			priv->isr_stats.sch++;
		}

		/* Alive notification via Rx interrupt will do the real work */
		if (inta & CSR_INT_BIT_ALIVE) {
			IWL_DEBUG_ISR(priv, "Alive interrupt\n");
			priv->isr_stats.alive++;
		}
	}
#endif
	/* Safely ignore these bits for debug checks below */
	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);

	/* HW RF KILL switch toggled */
	if (inta & CSR_INT_BIT_RF_KILL) {
		int hw_rf_kill = 0;
		if (!(iwl_read32(priv, CSR_GP_CNTRL) &
				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
			hw_rf_kill = 1;

1116
		IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
Mohamed Abbas's avatar
Mohamed Abbas committed
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
				hw_rf_kill ? "disable radio" : "enable radio");

		priv->isr_stats.rfkill++;

		/* driver only loads ucode once setting the interface up.
		 * the driver allows loading the ucode even if the radio
		 * is killed. Hence update the killswitch state here. The
		 * rfkill handler will care about restarting if needed.
		 */
		if (!test_bit(STATUS_ALIVE, &priv->status)) {
			if (hw_rf_kill)
				set_bit(STATUS_RF_KILL_HW, &priv->status);
			else
				clear_bit(STATUS_RF_KILL_HW, &priv->status);
1131
			wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
Mohamed Abbas's avatar
Mohamed Abbas committed
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
		}

		handled |= CSR_INT_BIT_RF_KILL;
	}

	/* Chip got too hot and stopped itself */
	if (inta & CSR_INT_BIT_CT_KILL) {
		IWL_ERR(priv, "Microcode CT kill error detected.\n");
		priv->isr_stats.ctkill++;
		handled |= CSR_INT_BIT_CT_KILL;
	}

	/* Error detected by uCode */
	if (inta & CSR_INT_BIT_SW_ERR) {
		IWL_ERR(priv, "Microcode SW error detected. "
			" Restarting 0x%X.\n", inta);
		priv->isr_stats.sw++;
		iwl_irq_handle_error(priv);
		handled |= CSR_INT_BIT_SW_ERR;
	}

	/* uCode wakes up after power-down sleep */
	if (inta & CSR_INT_BIT_WAKEUP) {
		IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
		iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
1157
1158
		for (i = 0; i < priv->hw_params.max_txq_num; i++)
			iwl_txq_update_write_ptr(priv, &priv->txq[i]);
Mohamed Abbas's avatar
Mohamed Abbas committed
1159
1160
1161
1162
1163
1164
1165
1166
1167

		priv->isr_stats.wakeup++;

		handled |= CSR_INT_BIT_WAKEUP;
	}

	/* All uCode command responses, including Tx command responses,
	 * Rx "responses" (frame-received notification), and other
	 * notifications from uCode come through here*/
1168
1169
	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
			CSR_INT_BIT_RX_PERIODIC)) {
Mohamed Abbas's avatar
Mohamed Abbas committed
1170
		IWL_DEBUG_ISR(priv, "Rx interrupt\n");
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
			iwl_write32(priv, CSR_FH_INT_STATUS,
					CSR49_FH_INT_RX_MASK);
		}
		if (inta & CSR_INT_BIT_RX_PERIODIC) {
			handled |= CSR_INT_BIT_RX_PERIODIC;
			iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC);
		}
		/* Sending RX interrupt require many steps to be done in the
		 * the device:
		 * 1- write interrupt to current index in ICT table.
		 * 2- dma RX frame.
		 * 3- update RX shared data to indicate last write index.
		 * 4- send interrupt.
		 * This could lead to RX race, driver could receive RX interrupt
1187
1188
		 * but the shared data changes does not reflect this;
		 * periodic interrupt will detect any dangling Rx activity.
1189
		 */
1190
1191
1192

		/* Disable periodic interrupt; we use it as just a one-shot. */
		iwl_write8(priv, CSR_INT_PERIODIC_REG,
1193
			    CSR_INT_PERIODIC_DIS);
Mohamed Abbas's avatar
Mohamed Abbas committed
1194
		iwl_rx_handle(priv);
1195
1196
1197
1198
1199
1200
1201
1202

		/*
		 * Enable periodic interrupt in 8 msec only if we received
		 * real RX interrupt (instead of just periodic int), to catch
		 * any dangling Rx interrupt.  If it was just the periodic
		 * interrupt, there was no dangling Rx activity, and no need
		 * to extend the periodic interrupt; one-shot is enough.
		 */
1203
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1204
			iwl_write8(priv, CSR_INT_PERIODIC_REG,
1205
1206
				    CSR_INT_PERIODIC_ENA);

Mohamed Abbas's avatar
Mohamed Abbas committed
1207
1208
1209
		priv->isr_stats.rx++;
	}

Ben Cahill's avatar
Ben Cahill committed
1210
	/* This "Tx" DMA channel is used only for loading uCode */
Mohamed Abbas's avatar
Mohamed Abbas committed
1211
1212
	if (inta & CSR_INT_BIT_FH_TX) {
		iwl_write32(priv, CSR_FH_INT_STATUS, CSR49_FH_INT_TX_MASK);
Ben Cahill's avatar
Ben Cahill committed
1213
		IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
Mohamed Abbas's avatar
Mohamed Abbas committed
1214
1215
		priv->isr_stats.tx++;
		handled |= CSR_INT_BIT_FH_TX;
Ben Cahill's avatar
Ben Cahill committed
1216
		/* Wake up uCode load routine, now that load is complete */
Mohamed Abbas's avatar
Mohamed Abbas committed
1217
1218
1219
1220
1221
1222
1223
1224
1225
		priv->ucode_write_complete = 1;
		wake_up_interruptible(&priv->wait_command_queue);
	}

	if (inta & ~handled) {
		IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
		priv->isr_stats.unhandled++;
	}

1226
	if (inta & ~(priv->inta_mask)) {
Mohamed Abbas's avatar