iwl-agn.c 125 KB
Newer Older
1
2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26
27
28
29
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/

30
31
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

32
33
34
35
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
36
#include <linux/pci-aspm.h>
37
#include <linux/slab.h>
38
39
#include <linux/dma-mapping.h>
#include <linux/delay.h>
40
#include <linux/sched.h>
41
42
43
44
45
46
47
48
49
50
51
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>

#include <net/mac80211.h>

#include <asm/div64.h>

52
53
#define DRV_NAME        "iwlagn"

54
#include "iwl-eeprom.h"
55
#include "iwl-dev.h"
56
#include "iwl-core.h"
57
#include "iwl-io.h"
58
#include "iwl-helpers.h"
59
#include "iwl-sta.h"
Johannes Berg's avatar
Johannes Berg committed
60
#include "iwl-agn-calib.h"
61
#include "iwl-agn.h"
62
#include "iwl-agn-led.h"
63

64

65
66
67
68
69
70
71
72
73
/******************************************************************************
 *
 * module boiler plate
 *
 ******************************************************************************/

/*
 * module name, copyright, version, etc.
 */
74
#define DRV_DESCRIPTION	"Intel(R) Wireless WiFi Link AGN driver for Linux"
75

76
#ifdef CONFIG_IWLWIFI_DEBUG
77
78
79
80
81
#define VD "d"
#else
#define VD
#endif

82
#define DRV_VERSION     IWLWIFI_VERSION VD
83
84
85
86


MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
87
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
88
89
MODULE_LICENSE("GPL");

90
static int iwlagn_ant_coupling;
91
static bool iwlagn_bt_ch_announce = 1;
92

93
void iwl_update_chain_flags(struct iwl_priv *priv)
94
{
95
	struct iwl_rxon_context *ctx;
96

97
98
99
	if (priv->cfg->ops->hcmd->set_rxon_chain) {
		for_each_context(priv, ctx) {
			priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
100
101
			if (ctx->active.rx_chain != ctx->staging.rx_chain)
				iwlcore_commit_rxon(priv, ctx);
102
103
		}
	}
104
105
}

106
static void iwl_clear_free_frames(struct iwl_priv *priv)
107
108
109
{
	struct list_head *element;

110
	IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
111
112
113
114
115
		       priv->frames_count);

	while (!list_empty(&priv->free_frames)) {
		element = priv->free_frames.next;
		list_del(element);
116
		kfree(list_entry(element, struct iwl_frame, list));
117
118
119
120
		priv->frames_count--;
	}

	if (priv->frames_count) {
121
		IWL_WARN(priv, "%d frames still in use.  Did we lose one?\n",
122
123
124
125
126
			    priv->frames_count);
		priv->frames_count = 0;
	}
}

127
static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
128
{
129
	struct iwl_frame *frame;
130
131
132
133
	struct list_head *element;
	if (list_empty(&priv->free_frames)) {
		frame = kzalloc(sizeof(*frame), GFP_KERNEL);
		if (!frame) {
134
			IWL_ERR(priv, "Could not allocate frame!\n");
135
136
137
138
139
140
141
142
143
			return NULL;
		}

		priv->frames_count++;
		return frame;
	}

	element = priv->free_frames.next;
	list_del(element);
144
	return list_entry(element, struct iwl_frame, list);
145
146
}

147
static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
148
149
150
151
152
{
	memset(frame, 0, sizeof(*frame));
	list_add(&frame->list, &priv->free_frames);
}

153
static u32 iwl_fill_beacon_frame(struct iwl_priv *priv,
154
155
				 struct ieee80211_hdr *hdr,
				 int left)
156
{
157
158
	lockdep_assert_held(&priv->mutex);

159
	if (!priv->beacon_skb)
160
161
		return 0;

162
	if (priv->beacon_skb->len > left)
163
164
		return 0;

165
	memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
166

167
	return priv->beacon_skb->len;
168
169
}

170
171
/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
static void iwl_set_beacon_tim(struct iwl_priv *priv,
172
173
			       struct iwl_tx_beacon_cmd *tx_beacon_cmd,
			       u8 *beacon, u32 frame_size)
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
{
	u16 tim_idx;
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;

	/*
	 * The index is relative to frame start but we start looking at the
	 * variable-length part of the beacon.
	 */
	tim_idx = mgmt->u.beacon.variable - beacon;

	/* Parse variable-length elements of beacon to find WLAN_EID_TIM */
	while ((tim_idx < (frame_size - 2)) &&
			(beacon[tim_idx] != WLAN_EID_TIM))
		tim_idx += beacon[tim_idx+1] + 2;

	/* If TIM field was found, set variables */
	if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
		tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
		tx_beacon_cmd->tim_size = beacon[tim_idx+1];
	} else
		IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
}

197
static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
198
				       struct iwl_frame *frame)
199
200
{
	struct iwl_tx_beacon_cmd *tx_beacon_cmd;
201
202
203
204
205
206
207
	u32 frame_size;
	u32 rate_flags;
	u32 rate;
	/*
	 * We have to set up the TX command, the TX Beacon command, and the
	 * beacon contents.
	 */
208

209
210
211
212
	lockdep_assert_held(&priv->mutex);

	if (!priv->beacon_ctx) {
		IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
213
		return 0;
214
215
	}

216
	/* Initialize memory */
217
218
219
	tx_beacon_cmd = &frame->u.beacon;
	memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));

220
	/* Set up TX beacon contents */
221
222
	frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame,
				sizeof(frame->u) - sizeof(*tx_beacon_cmd));
223
224
	if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
		return 0;
225
226
	if (!frame_size)
		return 0;
227

228
	/* Set up TX command fields */
229
	tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
230
	tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
231
232
233
	tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
	tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
		TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
234

235
236
	/* Set up TX beacon command fields */
	iwl_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
237
			   frame_size);
238

239
	/* Set up packet rate and flags */
240
	rate = iwl_rate_get_lowest_plcp(priv, priv->beacon_ctx);
241
242
	priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
					      priv->hw_params.valid_tx_ant);
243
244
245
246
247
	rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
	if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
		rate_flags |= RATE_MCS_CCK_MSK;
	tx_beacon_cmd->tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate,
			rate_flags);
248
249
250

	return sizeof(*tx_beacon_cmd) + frame_size;
}
251
252

int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
253
{
254
	struct iwl_frame *frame;
255
256
257
	unsigned int frame_size;
	int rc;

258
	frame = iwl_get_free_frame(priv);
259
	if (!frame) {
260
		IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
261
262
263
264
			  "command.\n");
		return -ENOMEM;
	}

265
266
267
268
269
270
	frame_size = iwl_hw_get_beacon_cmd(priv, frame);
	if (!frame_size) {
		IWL_ERR(priv, "Error configuring the beacon command\n");
		iwl_free_frame(priv, frame);
		return -EINVAL;
	}
271

272
	rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
273
274
			      &frame->u.cmd[0]);

275
	iwl_free_frame(priv, frame);
276
277
278
279

	return rc;
}

280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				  dma_addr_t addr, u16 len)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
	return tfd->num_tbs & 0x1f;
}

/**
 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
 * @priv - driver private data
 * @txq - tx queue
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{
329
	struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
	struct iwl_tfd *tfd;
	struct pci_dev *dev = priv->pci_dev;
	int index = txq->q.read_ptr;
	int i;
	int num_tbs;

	tfd = &tfd_tmp[index];

	/* Sanity check on number of chunks */
	num_tbs = iwl_tfd_get_num_tbs(tfd);

	if (num_tbs >= IWL_NUM_OF_TBS) {
		IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

	/* Unmap tx_cmd */
	if (num_tbs)
		pci_unmap_single(dev,
350
351
				dma_unmap_addr(&txq->meta[index], mapping),
				dma_unmap_len(&txq->meta[index], len),
352
				PCI_DMA_BIDIRECTIONAL);
353
354

	/* Unmap chunks, if any. */
355
	for (i = 1; i < num_tbs; i++)
356
357
358
		pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
				iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);

359
360
361
	/* free SKB */
	if (txq->txb) {
		struct sk_buff *skb;
Johannes Berg's avatar
Johannes Berg committed
362

363
		skb = txq->txb[txq->q.read_ptr].skb;
Johannes Berg's avatar
Johannes Berg committed
364

365
366
367
368
		/* can be called from irqs-disabled context */
		if (skb) {
			dev_kfree_skb_any(skb);
			txq->txb[txq->q.read_ptr].skb = NULL;
369
370
371
372
373
374
375
376
377
378
		}
	}
}

int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
				 struct iwl_tx_queue *txq,
				 dma_addr_t addr, u16 len,
				 u8 reset, u8 pad)
{
	struct iwl_queue *q;
379
	struct iwl_tfd *tfd, *tfd_tmp;
380
381
382
	u32 num_tbs;

	q = &txq->q;
383
384
	tfd_tmp = (struct iwl_tfd *)txq->tfds;
	tfd = &tfd_tmp[q->write_ptr];
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407

	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
		IWL_ERR(priv, "Error can not send more than %d chunks\n",
			  IWL_NUM_OF_TBS);
		return -EINVAL;
	}

	BUG_ON(addr & ~DMA_BIT_MASK(36));
	if (unlikely(addr & ~IWL_TX_DMA_MASK))
		IWL_ERR(priv, "Unaligned address = %llx\n",
			  (unsigned long long)addr);

	iwl_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

408
409
410
411
/*
 * Tell nic where to find circular buffer of Tx Frame Descriptors for
 * given Tx queue, and enable the DMA channel used for that queue.
 *
412
 * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
413
414
415
416
417
418
419
420
421
422
423
424
425
426
 * channels supported in hardware.
 */
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
			 struct iwl_tx_queue *txq)
{
	int txq_id = txq->q.id;

	/* Circular buffer (TFD queue in DRAM) physical base address */
	iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
			     txq->q.dma_addr >> 8);

	return 0;
}

427
static void iwl_bg_beacon_update(struct work_struct *work)
428
{
429
430
	struct iwl_priv *priv =
		container_of(work, struct iwl_priv, beacon_update);
431
432
	struct sk_buff *beacon;

433
434
435
436
437
	mutex_lock(&priv->mutex);
	if (!priv->beacon_ctx) {
		IWL_ERR(priv, "updating beacon w/o beacon context!\n");
		goto out;
	}
438

439
440
441
442
443
444
445
446
447
448
	if (priv->beacon_ctx->vif->type != NL80211_IFTYPE_AP) {
		/*
		 * The ucode will send beacon notifications even in
		 * IBSS mode, but we don't want to process them. But
		 * we need to defer the type check to here due to
		 * requiring locking around the beacon_ctx access.
		 */
		goto out;
	}

449
450
	/* Pull updated AP beacon from mac80211. will fail if not in AP mode */
	beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif);
451
	if (!beacon) {
452
		IWL_ERR(priv, "update beacon failed -- keeping old\n");
453
		goto out;
454
455
456
	}

	/* new beacon skb is allocated every time; dispose previous.*/
457
	dev_kfree_skb(priv->beacon_skb);
458

459
	priv->beacon_skb = beacon;
460

461
	iwlagn_send_beacon_cmd(priv);
462
463
 out:
	mutex_unlock(&priv->mutex);
464
465
}

466
467
468
469
470
471
472
473
474
475
476
477
478
479
static void iwl_bg_bt_runtime_config(struct work_struct *work)
{
	struct iwl_priv *priv =
		container_of(work, struct iwl_priv, bt_runtime_config);

	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
		return;

	/* dont send host command if rf-kill is on */
	if (!iwl_is_ready_rf(priv))
		return;
	priv->cfg->ops->hcmd->send_bt_config(priv);
}

480
481
482
483
static void iwl_bg_bt_full_concurrency(struct work_struct *work)
{
	struct iwl_priv *priv =
		container_of(work, struct iwl_priv, bt_full_concurrency);
484
	struct iwl_rxon_context *ctx;
485

486
487
	mutex_lock(&priv->mutex);

488
	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
489
		goto out;
490
491
492

	/* dont send host command if rf-kill is on */
	if (!iwl_is_ready_rf(priv))
493
		goto out;
494
495
496
497
498
499
500
501
502

	IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
		       priv->bt_full_concurrent ?
		       "full concurrency" : "3-wire");

	/*
	 * LQ & RXON updated cmds must be sent before BT Config cmd
	 * to avoid 3-wire collisions
	 */
503
504
505
506
507
	for_each_context(priv, ctx) {
		if (priv->cfg->ops->hcmd->set_rxon_chain)
			priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
		iwlcore_commit_rxon(priv, ctx);
	}
508
509

	priv->cfg->ops->hcmd->send_bt_config(priv);
510
511
out:
	mutex_unlock(&priv->mutex);
512
513
}

514
/**
515
 * iwl_bg_statistics_periodic - Timer callback to queue statistics
516
517
518
519
520
521
522
523
 *
 * This callback is provided in order to send a statistics request.
 *
 * This timer function is continually reset to execute within
 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
 * was received.  We need to ensure we receive the statistics in order
 * to update the temperature used for calibrating the TXPOWER.
 */
524
static void iwl_bg_statistics_periodic(unsigned long data)
525
526
527
528
529
530
{
	struct iwl_priv *priv = (struct iwl_priv *)data;

	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
		return;

531
532
533
534
	/* dont send host command if rf-kill is on */
	if (!iwl_is_ready_rf(priv))
		return;

535
	iwl_send_statistics_request(priv, CMD_ASYNC, false);
536
537
}

538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584

static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
					u32 start_idx, u32 num_events,
					u32 mode)
{
	u32 i;
	u32 ptr;        /* SRAM byte address of log data */
	u32 ev, time, data; /* event log data */
	unsigned long reg_flags;

	if (mode == 0)
		ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
	else
		ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));

	/* Make sure device is powered up for SRAM reads */
	spin_lock_irqsave(&priv->reg_lock, reg_flags);
	if (iwl_grab_nic_access(priv)) {
		spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
		return;
	}

	/* Set starting address; reads will auto-increment */
	_iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
	rmb();

	/*
	 * "time" is actually "data" for mode 0 (no timestamp).
	 * place event id # at far right for easier visual parsing.
	 */
	for (i = 0; i < num_events; i++) {
		ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
		time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
		if (mode == 0) {
			trace_iwlwifi_dev_ucode_cont_event(priv,
							0, time, ev);
		} else {
			data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
			trace_iwlwifi_dev_ucode_cont_event(priv,
						time, data, ev);
		}
	}
	/* Allow device to power down */
	iwl_release_nic_access(priv);
	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}

Johannes Berg's avatar
Johannes Berg committed
585
static void iwl_continuous_event_trace(struct iwl_priv *priv)
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
{
	u32 capacity;   /* event log capacity in # entries */
	u32 base;       /* SRAM byte address of event log header */
	u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
	u32 num_wraps;  /* # times uCode wrapped to top of log */
	u32 next_entry; /* index of next entry to be written by uCode */

	if (priv->ucode_type == UCODE_INIT)
		base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
	else
		base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
	if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
		capacity = iwl_read_targ_mem(priv, base);
		num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
		mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
		next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
	} else
		return;

	if (num_wraps == priv->event_log.num_wraps) {
		iwl_print_cont_event_trace(priv,
				       base, priv->event_log.next_entry,
				       next_entry - priv->event_log.next_entry,
				       mode);
		priv->event_log.non_wraps_count++;
	} else {
		if ((num_wraps - priv->event_log.num_wraps) > 1)
			priv->event_log.wraps_more_count++;
		else
			priv->event_log.wraps_once_count++;
		trace_iwlwifi_dev_ucode_wrap_event(priv,
				num_wraps - priv->event_log.num_wraps,
				next_entry, priv->event_log.next_entry);
		if (next_entry < priv->event_log.next_entry) {
			iwl_print_cont_event_trace(priv, base,
			       priv->event_log.next_entry,
			       capacity - priv->event_log.next_entry,
			       mode);

			iwl_print_cont_event_trace(priv, base, 0,
				next_entry, mode);
		} else {
			iwl_print_cont_event_trace(priv, base,
			       next_entry, capacity - next_entry,
			       mode);

			iwl_print_cont_event_trace(priv, base, 0,
				next_entry, mode);
		}
	}
	priv->event_log.num_wraps = num_wraps;
	priv->event_log.next_entry = next_entry;
}

/**
 * iwl_bg_ucode_trace - Timer callback to log ucode event
 *
 * The timer is continually set to execute every
 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
 * this function is to perform continuous uCode event logging operation
 * if enabled
 */
static void iwl_bg_ucode_trace(unsigned long data)
{
	struct iwl_priv *priv = (struct iwl_priv *)data;

	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
		return;

	if (priv->event_log.ucode_trace) {
		iwl_continuous_event_trace(priv);
		/* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
		mod_timer(&priv->ucode_trace,
			 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
	}
}

663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
static void iwl_bg_tx_flush(struct work_struct *work)
{
	struct iwl_priv *priv =
		container_of(work, struct iwl_priv, tx_flush);

	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
		return;

	/* do nothing if rf-kill is on */
	if (!iwl_is_ready_rf(priv))
		return;

	if (priv->cfg->ops->lib->txfifo_flush) {
		IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
		iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
	}
}

681
/**
682
 * iwl_rx_handle - Main entry function for receiving responses from uCode
683
684
685
686
687
 *
 * Uses the priv->rx_handlers callback function array to invoke
 * the appropriate handlers, including command responses,
 * frame-received notifications, and other notifications.
 */
688
static void iwl_rx_handle(struct iwl_priv *priv)
689
{
690
	struct iwl_rx_mem_buffer *rxb;
691
	struct iwl_rx_packet *pkt;
692
	struct iwl_rx_queue *rxq = &priv->rxq;
693
694
695
	u32 r, i;
	int reclaim;
	unsigned long flags;
696
	u8 fill_rx = 0;
Mohamed Abbas's avatar
Mohamed Abbas committed
697
	u32 count = 8;
698
	int total_empty;
699

700
701
	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
702
	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
703
704
705
706
	i = rxq->read;

	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
707
		IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
708

709
	/* calculate total frames need to be restock after handling RX */
710
	total_empty = r - rxq->write_actual;
711
712
713
714
	if (total_empty < 0)
		total_empty += RX_QUEUE_SIZE;

	if (total_empty > (RX_QUEUE_SIZE / 2))
715
716
		fill_rx = 1;

717
	while (i != r) {
Johannes Berg's avatar
Johannes Berg committed
718
719
		int len;

720
721
		rxb = rxq->queue[i];

722
		/* If an RXB doesn't have a Rx queue slot associated with it,
723
724
725
726
727
728
		 * then a bug has been introduced in the queue refilling
		 * routines -- catch it here */
		BUG_ON(rxb == NULL);

		rxq->queue[i] = NULL;

Zhu Yi's avatar
Zhu Yi committed
729
730
731
732
		pci_unmap_page(priv->pci_dev, rxb->page_dma,
			       PAGE_SIZE << priv->hw_params.rx_page_order,
			       PCI_DMA_FROMDEVICE);
		pkt = rxb_addr(rxb);
733

Johannes Berg's avatar
Johannes Berg committed
734
735
736
		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
		len += sizeof(u32); /* account for status word */
		trace_iwlwifi_dev_rx(priv, pkt, len);
Johannes Berg's avatar
Johannes Berg committed
737

738
739
740
741
742
743
744
745
		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
			(pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
746
			(pkt->hdr.cmd != REPLY_RX) &&
Daniel Halperin's avatar
Daniel Halperin committed
747
			(pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
748
			(pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
749
750
751
			(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
			(pkt->hdr.cmd != REPLY_TX);

752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
		/*
		 * Do the notification wait before RX handlers so
		 * even if the RX handler consumes the RXB we have
		 * access to it in the notification wait entry.
		 */
		if (!list_empty(&priv->_agn.notif_waits)) {
			struct iwl_notification_wait *w;

			spin_lock(&priv->_agn.notif_wait_lock);
			list_for_each_entry(w, &priv->_agn.notif_waits, list) {
				if (w->cmd == pkt->hdr.cmd) {
					w->triggered = true;
					if (w->fn)
						w->fn(priv, pkt);
				}
			}
			spin_unlock(&priv->_agn.notif_wait_lock);

			wake_up_all(&priv->_agn.notif_waitq);
		}

773
774
		/* Based on type of command response or notification,
		 *   handle those that need handling via function in
775
		 *   rx_handlers table.  See iwl_setup_rx_handlers() */
776
		if (priv->rx_handlers[pkt->hdr.cmd]) {
777
			IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
778
				i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
779
			priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
780
			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
781
782
		} else {
			/* No handling needed */
783
			IWL_DEBUG_RX(priv,
784
785
786
787
788
				"r %d i %d No handler needed for %s, 0x%02x\n",
				r, i, get_cmd_string(pkt->hdr.cmd),
				pkt->hdr.cmd);
		}

789
790
791
792
793
794
795
		/*
		 * XXX: After here, we should always check rxb->page
		 * against NULL before touching it or its virtual
		 * memory (pkt). Because some rx_handler might have
		 * already taken or freed the pages.
		 */

796
		if (reclaim) {
Zhu Yi's avatar
Zhu Yi committed
797
798
			/* Invoke any callbacks, transfer the buffer to caller,
			 * and fire off the (possibly) blocking iwl_send_cmd()
799
			 * as we reclaim the driver command queue */
800
			if (rxb->page)
801
				iwl_tx_cmd_complete(priv, rxb);
802
			else
803
				IWL_WARN(priv, "Claim null rxb?\n");
804
805
		}

806
807
808
809
		/* Reuse the page if possible. For notification packets and
		 * SKBs that fail to Rx correctly, add them back into the
		 * rx_free list for reuse later. */
		spin_lock_irqsave(&rxq->lock, flags);
Zhu Yi's avatar
Zhu Yi committed
810
		if (rxb->page != NULL) {
811
812
813
814
815
816
817
			rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
				0, PAGE_SIZE << priv->hw_params.rx_page_order,
				PCI_DMA_FROMDEVICE);
			list_add_tail(&rxb->list, &rxq->rx_free);
			rxq->free_count++;
		} else
			list_add_tail(&rxb->list, &rxq->rx_used);
818
819

		spin_unlock_irqrestore(&rxq->lock, flags);
820

821
		i = (i + 1) & RX_QUEUE_MASK;
822
823
824
825
826
		/* If there are a lot of unused frames,
		 * restock the Rx queue so ucode wont assert. */
		if (fill_rx) {
			count++;
			if (count >= 8) {
827
				rxq->read = i;
828
				iwlagn_rx_replenish_now(priv);
829
830
831
				count = 0;
			}
		}
832
833
834
	}

	/* Backtrack one entry */
835
	rxq->read = i;
836
	if (fill_rx)
837
		iwlagn_rx_replenish_now(priv);
838
	else
839
		iwlagn_rx_queue_restock(priv);
840
841
}

842
843
844
/* call this function to flush any scheduled tasklet */
static inline void iwl_synchronize_irq(struct iwl_priv *priv)
{
845
	/* wait to make sure we flush pending tasklet*/
846
847
848
849
	synchronize_irq(priv->pci_dev->irq);
	tasklet_kill(&priv->irq_tasklet);
}

Mohamed Abbas's avatar
Mohamed Abbas committed
850
851
852
853
854
855
/* tasklet for iwlagn interrupt */
static void iwl_irq_tasklet(struct iwl_priv *priv)
{
	u32 inta = 0;
	u32 handled = 0;
	unsigned long flags;
856
	u32 i;
Mohamed Abbas's avatar
Mohamed Abbas committed
857
858
859
860
861
862
863
864
865
#ifdef CONFIG_IWLWIFI_DEBUG
	u32 inta_mask;
#endif

	spin_lock_irqsave(&priv->lock, flags);

	/* Ack/clear/reset pending uCode interrupts.
	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
	 */
866
867
868
869
870
871
872
873
	/* There is a hardware bug in the interrupt mask function that some
	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
	 * they are disabled in the CSR_INT_MASK register. Furthermore the
	 * ICT interrupt handling mechanism has another bug that might cause
	 * these unmasked interrupts fail to be detected. We workaround the
	 * hardware bugs here by ACKing all the possible interrupts so that
	 * interrupt coalescing can still be achieved.
	 */
874
	iwl_write32(priv, CSR_INT, priv->_agn.inta | ~priv->inta_mask);
Mohamed Abbas's avatar
Mohamed Abbas committed
875

876
	inta = priv->_agn.inta;
Mohamed Abbas's avatar
Mohamed Abbas committed
877
878

#ifdef CONFIG_IWLWIFI_DEBUG
879
	if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
Mohamed Abbas's avatar
Mohamed Abbas committed
880
881
882
883
884
885
		/* just for debug */
		inta_mask = iwl_read32(priv, CSR_INT_MASK);
		IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x\n ",
				inta, inta_mask);
	}
#endif
Zhu Yi's avatar
Zhu Yi committed
886
887
888

	spin_unlock_irqrestore(&priv->lock, flags);

889
890
	/* saved interrupt in inta variable now we can reset priv->_agn.inta */
	priv->_agn.inta = 0;
Mohamed Abbas's avatar
Mohamed Abbas committed
891
892
893

	/* Now service all interrupt bits discovered above. */
	if (inta & CSR_INT_BIT_HW_ERR) {
894
		IWL_ERR(priv, "Hardware error detected.  Restarting.\n");
Mohamed Abbas's avatar
Mohamed Abbas committed
895
896
897
898
899
900
901
902
903
904
905
906
907

		/* Tell the device to stop sending interrupts */
		iwl_disable_interrupts(priv);

		priv->isr_stats.hw++;
		iwl_irq_handle_error(priv);

		handled |= CSR_INT_BIT_HW_ERR;

		return;
	}

#ifdef CONFIG_IWLWIFI_DEBUG
908
	if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
Mohamed Abbas's avatar
Mohamed Abbas committed
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
		/* NIC fires this, but we don't use it, redundant with WAKEUP */
		if (inta & CSR_INT_BIT_SCD) {
			IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
				      "the frame/frames.\n");
			priv->isr_stats.sch++;
		}

		/* Alive notification via Rx interrupt will do the real work */
		if (inta & CSR_INT_BIT_ALIVE) {
			IWL_DEBUG_ISR(priv, "Alive interrupt\n");
			priv->isr_stats.alive++;
		}
	}
#endif
	/* Safely ignore these bits for debug checks below */
	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);

	/* HW RF KILL switch toggled */
	if (inta & CSR_INT_BIT_RF_KILL) {
		int hw_rf_kill = 0;
		if (!(iwl_read32(priv, CSR_GP_CNTRL) &
				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
			hw_rf_kill = 1;

933
		IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
Mohamed Abbas's avatar
Mohamed Abbas committed
934
935
936
937
938
939
940
941
942
943
944
945
946
947
				hw_rf_kill ? "disable radio" : "enable radio");

		priv->isr_stats.rfkill++;

		/* driver only loads ucode once setting the interface up.
		 * the driver allows loading the ucode even if the radio
		 * is killed. Hence update the killswitch state here. The
		 * rfkill handler will care about restarting if needed.
		 */
		if (!test_bit(STATUS_ALIVE, &priv->status)) {
			if (hw_rf_kill)
				set_bit(STATUS_RF_KILL_HW, &priv->status);
			else
				clear_bit(STATUS_RF_KILL_HW, &priv->status);
948
			wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
Mohamed Abbas's avatar
Mohamed Abbas committed
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
		}

		handled |= CSR_INT_BIT_RF_KILL;
	}

	/* Chip got too hot and stopped itself */
	if (inta & CSR_INT_BIT_CT_KILL) {
		IWL_ERR(priv, "Microcode CT kill error detected.\n");
		priv->isr_stats.ctkill++;
		handled |= CSR_INT_BIT_CT_KILL;
	}

	/* Error detected by uCode */
	if (inta & CSR_INT_BIT_SW_ERR) {
		IWL_ERR(priv, "Microcode SW error detected. "
			" Restarting 0x%X.\n", inta);
		priv->isr_stats.sw++;
		iwl_irq_handle_error(priv);
		handled |= CSR_INT_BIT_SW_ERR;
	}

	/* uCode wakes up after power-down sleep */
	if (inta & CSR_INT_BIT_WAKEUP) {
		IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
		iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
974
975
		for (i = 0; i < priv->hw_params.max_txq_num; i++)
			iwl_txq_update_write_ptr(priv, &priv->txq[i]);
Mohamed Abbas's avatar
Mohamed Abbas committed
976
977
978
979
980
981
982
983
984

		priv->isr_stats.wakeup++;

		handled |= CSR_INT_BIT_WAKEUP;
	}

	/* All uCode command responses, including Tx command responses,
	 * Rx "responses" (frame-received notification), and other
	 * notifications from uCode come through here*/
985
986
	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
			CSR_INT_BIT_RX_PERIODIC)) {
Mohamed Abbas's avatar
Mohamed Abbas committed
987
		IWL_DEBUG_ISR(priv, "Rx interrupt\n");
988
989
990
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
			iwl_write32(priv, CSR_FH_INT_STATUS,
991
					CSR_FH_INT_RX_MASK);
992
993
994
995
996
997
998
999
1000
1001
1002
1003
		}
		if (inta & CSR_INT_BIT_RX_PERIODIC) {
			handled |= CSR_INT_BIT_RX_PERIODIC;
			iwl_write32(priv, CSR_INT, CSR_INT_BIT_RX_PERIODIC);
		}
		/* Sending RX interrupt require many steps to be done in the
		 * the device:
		 * 1- write interrupt to current index in ICT table.
		 * 2- dma RX frame.
		 * 3- update RX shared data to indicate last write index.
		 * 4- send interrupt.
		 * This could lead to RX race, driver could receive RX interrupt
1004
1005
		 * but the shared data changes does not reflect this;
		 * periodic interrupt will detect any dangling Rx activity.
1006
		 */
1007
1008
1009

		/* Disable periodic interrupt; we use it as just a one-shot. */
		iwl_write8(priv, CSR_INT_PERIODIC_REG,
1010
			    CSR_INT_PERIODIC_DIS);
Mohamed Abbas's avatar
Mohamed Abbas committed
1011
		iwl_rx_handle(priv);
1012
1013
1014
1015
1016
1017
1018
1019

		/*
		 * Enable periodic interrupt in 8 msec only if we received
		 * real RX interrupt (instead of just periodic int), to catch
		 * any dangling Rx interrupt.  If it was just the periodic
		 * interrupt, there was no dangling Rx activity, and no need
		 * to extend the periodic interrupt; one-shot is enough.
		 */
1020
		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1021
			iwl_write8(priv, CSR_INT_PERIODIC_REG,
1022
1023
				    CSR_INT_PERIODIC_ENA);

Mohamed Abbas's avatar
Mohamed Abbas committed
1024
1025
1026
		priv->isr_stats.rx++;
	}

Ben Cahill's avatar
Ben Cahill committed
1027
	/* This "Tx" DMA channel is used only for loading uCode */
Mohamed Abbas's avatar
Mohamed Abbas committed
1028
	if (inta & CSR_INT_BIT_FH_TX) {
1029
		iwl_write32(priv, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
Ben Cahill's avatar
Ben Cahill committed
1030
		IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
Mohamed Abbas's avatar
Mohamed Abbas committed
1031
1032
		priv->isr_stats.tx++;
		handled |= CSR_INT_BIT_FH_TX;
Ben Cahill's avatar
Ben Cahill committed
1033
		/* Wake up uCode load routine, now that load is complete */
Mohamed Abbas's avatar
Mohamed Abbas committed
1034
1035
1036
1037
1038
1039
1040
1041
1042
		priv->ucode_write_complete = 1;
		wake_up_interruptible(&priv->wait_command_queue);
	}

	if (inta & ~handled) {
		IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
		priv->isr_stats.unhandled++;
	}

1043
	if (inta & ~(priv->inta_mask)) {
Mohamed Abbas's avatar
Mohamed Abbas committed
1044
		IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
1045
			 inta & ~priv->inta_mask);
Mohamed Abbas's avatar
Mohamed Abbas committed
1046
1047
1048
	}

	/* Re-enable all interrupts */
1049
	/* only Re-enable if disabled by irq */
Mohamed Abbas's avatar
Mohamed Abbas committed
1050
1051
	if (test_bit(STATUS_INT_ENABLED, &priv->status))
		iwl_enable_interrupts(priv);
1052
1053
1054
	/* Re-enable RF_KILL if it occurred */
	else if (handled & CSR_INT_BIT_RF_KILL)
		iwl_enable_rfkill_int(priv);
Mohamed Abbas's avatar
Mohamed Abbas committed
1055
1056
}

1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
/*****************************************************************************
 *
 * sysfs attributes
 *
 *****************************************************************************/

#ifdef CONFIG_IWLWIFI_DEBUG

/*
 * The following adds a new attribute to the sysfs representation
 * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
 * used for controlling the debug level.
 *
 * See the level definitions in iwl for details.
 *
 * The debug_level being managed using sysfs below is a per device debug
 * level that is used instead of the global debug level if it (the per
 * device debug level) is set.
 */
static ssize_t show_debug_level(struct device *d,
				struct device_attribute *attr, char *buf)
{
	struct iwl_priv *priv = dev_get_drvdata(d);
	return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv));
}
static ssize_t store_debug_level(struct device *d,
				struct device_attribute *attr,
				 const char *buf, size_t count)
{
	struct iwl_priv *priv = dev_get_drvdata(d);
	unsigned long val;
	int ret;

	ret = strict_strtoul(buf, 0, &val);
	if (ret)
		IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
	else {
		priv->debug_level = val;
		if (iwl_alloc_traffic_mem(priv))
			IWL_ERR(priv,
				"Not enough memory to generate traffic log\n");
	}
	return strnlen(buf, count);
}

static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
			show_debug_level, store_debug_level);


#endif /* CONFIG_IWLWIFI_DEBUG */


static ssize_t show_temperature(struct device *d,
				struct device_attribute *attr, char *buf)
{
	struct iwl_priv *priv = dev_get_drvdata(d);

	if (!iwl_is_alive(priv))
		return -EAGAIN;

	return sprintf(buf, "%d\n", priv->temperature);
}

static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL);

static ssize_t show_tx_power(struct device *d,
			     struct device_attribute *attr, char *buf)
{
	struct iwl_priv *priv = dev_get_drvdata(d);

	if (!iwl_is_ready_rf(priv))
		return sprintf(buf, "off\n");
	else
		return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
}

static ssize_t store_tx_power(struct device *d,
			      struct device_attribute *attr,
			      const char *buf, size_t count)
{
	struct iwl_priv *priv = dev_get_drvdata(d);
	unsigned long val;
	int ret;

	ret = strict_strtoul(buf, 10, &val);
	if (ret)
		IWL_INFO(priv, "%s is not in decimal form.\n", buf);
	else {
		ret = iwl_set_tx_power(priv, val, false);
		if (ret)
			IWL_ERR(priv, "failed setting tx power (0x%d).\n",
				ret);
		else
			ret = count;
	}
	return ret;
}

static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);

static struct attribute *iwl_sysfs_entries[] = {
	&dev_attr_temperature.attr,
	&dev_attr_tx_power.attr,
#ifdef CONFIG_IWLWIFI_DEBUG
	&dev_attr_debug_level.attr,
#endif
	NULL
};

static struct attribute_group iwl_attribute_group = {
	.name = NULL,		/* put in device directory */
	.attrs = iwl_sysfs_entries,
};

1171
1172
1173
1174
1175
1176
/******************************************************************************
 *
 * uCode download functions
 *
 ******************************************************************************/

1177
static void iwl_dealloc_ucode_pci(struct iwl_priv *priv)
1178
{
1179
1180
1181
1182
1183
	iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
	iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
	iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
	iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init);
	iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1184
1185
}

1186
static void iwl_nic_start(struct iwl_priv *priv)
1187
1188
1189
1190
1191
{
	/* Remove all resets to allow NIC to operate */
	iwl_write32(priv, CSR_RESET, 0);
}

1192
1193
struct iwlagn_ucode_capabilities {
	u32 max_probe_length;
1194
	u32 standard_phy_calibration_size;
Johannes Berg's avatar
Johannes Berg committed
1195
	bool pan;
1196
};
1197

1198
static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
1199
1200
static int iwl_mac_setup_register(struct iwl_priv *priv,
				  struct iwlagn_ucode_capabilities *capa);
1201

1202
1203
1204
#define UCODE_EXPERIMENTAL_INDEX	100
#define UCODE_EXPERIMENTAL_TAG		"exp"

1205
1206
1207
static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
{
	const char *name_pre = priv->cfg->fw_name_pre;
1208
	char tag[8];
1209

1210
1211
1212
1213
1214
1215
	if (first) {
#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
		priv->fw_index = UCODE_EXPERIMENTAL_INDEX;
		strcpy(tag, UCODE_EXPERIMENTAL_TAG);
	} else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
#endif
1216
		priv->fw_index = priv->cfg->ucode_api_max;
1217
1218
		sprintf(tag, "%d", priv->fw_index);
	} else {
1219
		priv->fw_index--;
1220
1221
		sprintf(tag, "%d", priv->fw_index);
	}
1222
1223
1224
1225
1226
1227

	if (priv->fw_index < priv->cfg->ucode_api_min) {
		IWL_ERR(priv, "no suitable firmware found!\n");
		return -ENOENT;
	}

1228
	sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
1229

1230
1231
1232
	IWL_DEBUG_INFO(priv, "attempting to load firmware %s'%s'\n",
		       (priv->fw_index == UCODE_EXPERIMENTAL_INDEX)
				? "EXPERIMENTAL " : "",
1233
1234
1235
1236
1237
1238
1239
		       priv->firmware_name);

	return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
				       &priv->pci_dev->dev, GFP_KERNEL, priv,
				       iwl_ucode_callback);
}

1240
struct iwlagn_firmware_pieces {
Johannes Berg's avatar
Johannes Berg committed
1241
1242
	const void *inst, *data, *init, *init_data;
	size_t inst_size, data_size, init_size, init_data_size;
1243
1244

	u32 build;
1245
1246
1247

	u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
	u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
};

static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
				       const struct firmware *ucode_raw,
				       struct iwlagn_firmware_pieces *pieces)
{
	struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
	u32 api_ver, hdr_size;
	const u8 *src;

	priv->ucode_ver = le32_to_cpu(ucode->ver);
	api_ver = IWL_UCODE_API(priv->ucode_ver);

	switch (api_ver) {
	default:
1263
1264
1265
1266
		hdr_size = 28;
		if (ucode_raw->size < hdr_size) {
			IWL_ERR(priv, "File size too small!\n");
			return -EINVAL;
1267
		}
1268
1269
1270
1271
1272
1273
1274
		pieces->build = le32_to_cpu(ucode->u.v2.build);
		pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
		pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
		pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
		pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size);
		src = ucode->u.v2.data;
		break;
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
	case 0:
	case 1:
	case 2:
		hdr_size = 24;
		if (ucode_raw->size < hdr_size) {
			IWL_ERR(priv, "File size too small!\n");
			return -EINVAL;
		}
		pieces->build = 0;
		pieces->inst_size = le32_to_cpu(ucode->u.v1.inst_size);
		pieces->data_size = le32_to_cpu(ucode->u.v1.data_size);
		pieces->init_size = le32_to_cpu(ucode->u.v1.init_size);
		pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size);
		src = ucode->u.v1.data;
		break;
	}

	/* Verify size of file vs. image size info in file's header */
	if (ucode_raw->size != hdr_size + pieces->inst_size +
				pieces->data_size + pieces->init_size +
Johannes Berg's avatar
Johannes Berg committed
1295
				pieces->init_data_size) {
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314

		IWL_ERR(priv,
			"uCode file size %d does not match expected size\n",
			(int)ucode_raw->size);
		return -EINVAL;
	}

	pieces->inst = src;
	src += pieces->inst_size;
	pieces->data = src;
	src += pieces->data_size;
	pieces->init = src;
	src += pieces->init_size;
	pieces->init_data = src;
	src += pieces->init_data_size;

	return 0;
}

1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
static int iwlagn_wanted_ucode_alternative = 1;

static int iwlagn_load_firmware(struct iwl_priv *priv,
				const struct firmware *ucode_raw,
				struct iwlagn_firmware_pieces *pieces,
				struct iwlagn_ucode_capabilities *capa)
{
	struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
	struct iwl_ucode_tlv *tlv;
	size_t len = ucode_raw->size;
	const u8 *data;
	int wanted_alternative = iwlagn_wanted_ucode_alternative, tmp;
	u64 alternatives;
1328
1329
1330
	u32 tlv_len;
	enum iwl_ucode_tlv_type tlv_type;
	const u8 *tlv_data;
1331

1332
1333
	if (len < sizeof(*ucode)) {
		IWL_ERR(priv, "uCode has invalid length: %zd\n", len);
1334
		return -EINVAL;
1335
	}
1336

1337
1338
1339
	if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
		IWL_ERR(priv, "invalid uCode magic: 0X%x\n",
			le32_to_cpu(ucode->magic));