iwl-agn.c 132 KB
Newer Older
1
2
/******************************************************************************
 *
3
 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
 *
 * Portions of this file are derived from the ipw3945 project, as well
 * as portions of the ieee80211 subsystem header files.
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
 *
 * The full GNU General Public License is included in this distribution in the
 * file called LICENSE.
 *
 * Contact Information:
25
 *  Intel Linux Wireless <ilw@linux.intel.com>
26
27
28
29
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 *****************************************************************************/

30
31
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

32
33
34
35
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
36
#include <linux/pci-aspm.h>
37
#include <linux/slab.h>
38
39
#include <linux/dma-mapping.h>
#include <linux/delay.h>
40
#include <linux/sched.h>
41
42
43
44
45
46
47
48
49
50
51
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>

#include <net/mac80211.h>

#include <asm/div64.h>

52
53
#define DRV_NAME        "iwlagn"

54
#include "iwl-eeprom.h"
55
#include "iwl-dev.h"
56
#include "iwl-core.h"
57
#include "iwl-io.h"
58
#include "iwl-helpers.h"
59
#include "iwl-sta.h"
60
#include "iwl-calib.h"
61
#include "iwl-agn.h"
62

63

64
65
66
67
68
69
70
71
72
/******************************************************************************
 *
 * module boiler plate
 *
 ******************************************************************************/

/*
 * module name, copyright, version, etc.
 */
73
#define DRV_DESCRIPTION	"Intel(R) Wireless WiFi Link AGN driver for Linux"
74

75
#ifdef CONFIG_IWLWIFI_DEBUG
76
77
78
79
80
#define VD "d"
#else
#define VD
#endif

81
#define DRV_VERSION     IWLWIFI_VERSION VD
82
83
84
85


MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
86
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
87
MODULE_LICENSE("GPL");
Tomas Winkler's avatar
Tomas Winkler committed
88
MODULE_ALIAS("iwl4965");
89
90

/**
91
 * iwl_commit_rxon - commit staging_rxon to hardware
92
 *
93
 * The RXON command in staging_rxon is committed to the hardware and
94
95
96
97
 * the active_rxon structure is updated with the new data.  This
 * function correctly transitions out of the RXON_ASSOC_MSK state if
 * a HW tune is required based on the RXON structure changes.
 */
98
int iwl_commit_rxon(struct iwl_priv *priv)
99
100
{
	/* cast away the const for active_rxon in this function */
Gregory Greenman's avatar
Gregory Greenman committed
101
	struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
102
103
104
	int ret;
	bool new_assoc =
		!!(priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK);
105

106
	if (!iwl_is_alive(priv))
107
		return -EBUSY;
108
109
110
111

	/* always get timestamp with Rx frame */
	priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;

112
	ret = iwl_check_rxon_cmd(priv);
113
	if (ret) {
114
		IWL_ERR(priv, "Invalid RXON configuration.  Not committing.\n");
115
116
117
		return -EINVAL;
	}

Wey-Yi Guy's avatar
Wey-Yi Guy committed
118
119
120
121
122
123
124
125
	/*
	 * receive commit_rxon request
	 * abort any previous channel switch if still in process
	 */
	if (priv->switch_rxon.switch_in_progress &&
	    (priv->switch_rxon.channel != priv->staging_rxon.channel)) {
		IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
		      le16_to_cpu(priv->switch_rxon.channel));
126
		iwl_chswitch_done(priv, false);
Wey-Yi Guy's avatar
Wey-Yi Guy committed
127
128
	}

129
	/* If we don't need to send a full RXON, we can use
130
	 * iwl_rxon_assoc_cmd which is used to reconfigure filter
131
	 * and other flags for the current radio configuration. */
132
	if (!iwl_full_rxon_required(priv)) {
133
134
		ret = iwl_send_rxon_assoc(priv);
		if (ret) {
135
			IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
136
			return ret;
137
138
139
		}

		memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
140
		iwl_print_rx_config_cmd(priv);
141
142
143
144
145
146
147
		return 0;
	}

	/* If we are currently associated and the new config requires
	 * an RXON_ASSOC and the new config wants the associated mask enabled,
	 * we must clear the associated from the active configuration
	 * before we apply the new config */
148
	if (iwl_is_associated(priv) && new_assoc) {
149
		IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
150
151
		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;

152
		ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
Gregory Greenman's avatar
Gregory Greenman committed
153
				      sizeof(struct iwl_rxon_cmd),
154
155
156
157
				      &priv->active_rxon);

		/* If the mask clearing failed then we set
		 * active_rxon back to what it was previously */
158
		if (ret) {
159
			active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
160
			IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
161
			return ret;
162
		}
163
		iwl_clear_ucode_stations(priv);
164
		iwl_restore_stations(priv);
165
166
167
168
169
		ret = iwl_restore_default_wep_keys(priv);
		if (ret) {
			IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
			return ret;
		}
170
171
	}

172
	IWL_DEBUG_INFO(priv, "Sending RXON\n"
173
174
		       "* with%s RXON_FILTER_ASSOC_MSK\n"
		       "* channel = %d\n"
Johannes Berg's avatar
Johannes Berg committed
175
		       "* bssid = %pM\n",
176
		       (new_assoc ? "" : "out"),
177
		       le16_to_cpu(priv->staging_rxon.channel),
Johannes Berg's avatar
Johannes Berg committed
178
		       priv->staging_rxon.bssid_addr);
179

180
	iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto);
181
182

	/* Apply the new configuration
183
184
	 * RXON unassoc clears the station table in uCode so restoration of
	 * stations is needed after it (the RXON command) completes
185
186
187
	 */
	if (!new_assoc) {
		ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
Gregory Greenman's avatar
Gregory Greenman committed
188
			      sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
189
		if (ret) {
190
			IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
191
192
			return ret;
		}
193
		IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
194
		memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
195
		iwl_clear_ucode_stations(priv);
196
		iwl_restore_stations(priv);
197
198
199
200
201
		ret = iwl_restore_default_wep_keys(priv);
		if (ret) {
			IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
			return ret;
		}
202
203
	}

Johannes Berg's avatar
Johannes Berg committed
204
	priv->start_calib = 0;
205
	if (new_assoc) {
206
207
208
209
210
211
		/* Apply the new configuration
		 * RXON assoc doesn't clear the station table in uCode,
		 */
		ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
			      sizeof(struct iwl_rxon_cmd), &priv->staging_rxon);
		if (ret) {
212
			IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
213
214
215
			return ret;
		}
		memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
216
	}
217
	iwl_print_rx_config_cmd(priv);
218

219
220
221
222
223
224
	iwl_init_sensitivity(priv);

	/* If we issue a new RXON command which required a tune then we must
	 * send a new TXPOWER command or we won't be able to Tx any frames */
	ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
	if (ret) {
225
		IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
226
227
228
		return ret;
	}

229
230
231
	return 0;
}

232
void iwl_update_chain_flags(struct iwl_priv *priv)
233
234
{

235
236
	if (priv->cfg->ops->hcmd->set_rxon_chain)
		priv->cfg->ops->hcmd->set_rxon_chain(priv);
237
	iwlcore_commit_rxon(priv);
238
239
}

240
static void iwl_clear_free_frames(struct iwl_priv *priv)
241
242
243
{
	struct list_head *element;

244
	IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
245
246
247
248
249
		       priv->frames_count);

	while (!list_empty(&priv->free_frames)) {
		element = priv->free_frames.next;
		list_del(element);
250
		kfree(list_entry(element, struct iwl_frame, list));
251
252
253
254
		priv->frames_count--;
	}

	if (priv->frames_count) {
255
		IWL_WARN(priv, "%d frames still in use.  Did we lose one?\n",
256
257
258
259
260
			    priv->frames_count);
		priv->frames_count = 0;
	}
}

261
static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv)
262
{
263
	struct iwl_frame *frame;
264
265
266
267
	struct list_head *element;
	if (list_empty(&priv->free_frames)) {
		frame = kzalloc(sizeof(*frame), GFP_KERNEL);
		if (!frame) {
268
			IWL_ERR(priv, "Could not allocate frame!\n");
269
270
271
272
273
274
275
276
277
			return NULL;
		}

		priv->frames_count++;
		return frame;
	}

	element = priv->free_frames.next;
	list_del(element);
278
	return list_entry(element, struct iwl_frame, list);
279
280
}

281
static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
282
283
284
285
286
{
	memset(frame, 0, sizeof(*frame));
	list_add(&frame->list, &priv->free_frames);
}

287
static u32 iwl_fill_beacon_frame(struct iwl_priv *priv,
288
					  struct ieee80211_hdr *hdr,
289
					  int left)
290
{
291
	if (!priv->ibss_beacon)
292
293
294
295
296
297
298
299
300
301
		return 0;

	if (priv->ibss_beacon->len > left)
		return 0;

	memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len);

	return priv->ibss_beacon->len;
}

302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
static void iwl_set_beacon_tim(struct iwl_priv *priv,
		struct iwl_tx_beacon_cmd *tx_beacon_cmd,
		u8 *beacon, u32 frame_size)
{
	u16 tim_idx;
	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;

	/*
	 * The index is relative to frame start but we start looking at the
	 * variable-length part of the beacon.
	 */
	tim_idx = mgmt->u.beacon.variable - beacon;

	/* Parse variable-length elements of beacon to find WLAN_EID_TIM */
	while ((tim_idx < (frame_size - 2)) &&
			(beacon[tim_idx] != WLAN_EID_TIM))
		tim_idx += beacon[tim_idx+1] + 2;

	/* If TIM field was found, set variables */
	if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
		tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
		tx_beacon_cmd->tim_size = beacon[tim_idx+1];
	} else
		IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
}

329
static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
330
				       struct iwl_frame *frame)
331
332
{
	struct iwl_tx_beacon_cmd *tx_beacon_cmd;
333
334
335
336
337
338
339
	u32 frame_size;
	u32 rate_flags;
	u32 rate;
	/*
	 * We have to set up the TX command, the TX Beacon command, and the
	 * beacon contents.
	 */
340

341
	/* Initialize memory */
342
343
344
	tx_beacon_cmd = &frame->u.beacon;
	memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));

345
	/* Set up TX beacon contents */
346
347
	frame_size = iwl_fill_beacon_frame(priv, tx_beacon_cmd->frame,
				sizeof(frame->u) - sizeof(*tx_beacon_cmd));
348
349
	if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
		return 0;
350

351
	/* Set up TX command fields */
352
	tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
353
354
355
356
	tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
	tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
	tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
		TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
357

358
359
360
	/* Set up TX beacon command fields */
	iwl_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
			frame_size);
361

362
363
	/* Set up packet rate and flags */
	rate = iwl_rate_get_lowest_plcp(priv);
364
365
	priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
					      priv->hw_params.valid_tx_ant);
366
367
368
369
370
	rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
	if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
		rate_flags |= RATE_MCS_CCK_MSK;
	tx_beacon_cmd->tx.rate_n_flags = iwl_hw_set_rate_n_flags(rate,
			rate_flags);
371
372
373

	return sizeof(*tx_beacon_cmd) + frame_size;
}
374
static int iwl_send_beacon_cmd(struct iwl_priv *priv)
375
{
376
	struct iwl_frame *frame;
377
378
379
	unsigned int frame_size;
	int rc;

380
	frame = iwl_get_free_frame(priv);
381
	if (!frame) {
382
		IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
383
384
385
386
			  "command.\n");
		return -ENOMEM;
	}

387
388
389
390
391
392
	frame_size = iwl_hw_get_beacon_cmd(priv, frame);
	if (!frame_size) {
		IWL_ERR(priv, "Error configuring the beacon command\n");
		iwl_free_frame(priv, frame);
		return -EINVAL;
	}
393

394
	rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
395
396
			      &frame->u.cmd[0]);

397
	iwl_free_frame(priv, frame);
398
399
400
401

	return rc;
}

402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	dma_addr_t addr = get_unaligned_le32(&tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		addr |=
		((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;

	return addr;
}

static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];

	return le16_to_cpu(tb->hi_n_len) >> 4;
}

static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
				  dma_addr_t addr, u16 len)
{
	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
	u16 hi_n_len = len << 4;

	put_unaligned_le32(addr, &tb->lo);
	if (sizeof(dma_addr_t) > sizeof(u32))
		hi_n_len |= ((addr >> 16) >> 16) & 0xF;

	tb->hi_n_len = cpu_to_le16(hi_n_len);

	tfd->num_tbs = idx + 1;
}

static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
{
	return tfd->num_tbs & 0x1f;
}

/**
 * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
 * @priv - driver private data
 * @txq - tx queue
 *
 * Does NOT advance any TFD circular buffer read/write indexes
 * Does NOT free the TFD itself (which is within circular buffer)
 */
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{
451
	struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
	struct iwl_tfd *tfd;
	struct pci_dev *dev = priv->pci_dev;
	int index = txq->q.read_ptr;
	int i;
	int num_tbs;

	tfd = &tfd_tmp[index];

	/* Sanity check on number of chunks */
	num_tbs = iwl_tfd_get_num_tbs(tfd);

	if (num_tbs >= IWL_NUM_OF_TBS) {
		IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
		/* @todo issue fatal error, it is quite serious situation */
		return;
	}

	/* Unmap tx_cmd */
	if (num_tbs)
		pci_unmap_single(dev,
472
473
				dma_unmap_addr(&txq->meta[index], mapping),
				dma_unmap_len(&txq->meta[index], len),
474
				PCI_DMA_BIDIRECTIONAL);
475
476

	/* Unmap chunks, if any. */
477
	for (i = 1; i < num_tbs; i++)
478
479
480
		pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
				iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);

481
482
483
	/* free SKB */
	if (txq->txb) {
		struct sk_buff *skb;
Johannes Berg's avatar
Johannes Berg committed
484

485
		skb = txq->txb[txq->q.read_ptr].skb;
Johannes Berg's avatar
Johannes Berg committed
486

487
488
489
490
		/* can be called from irqs-disabled context */
		if (skb) {
			dev_kfree_skb_any(skb);
			txq->txb[txq->q.read_ptr].skb = NULL;
491
492
493
494
495
496
497
498
499
500
		}
	}
}

int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
				 struct iwl_tx_queue *txq,
				 dma_addr_t addr, u16 len,
				 u8 reset, u8 pad)
{
	struct iwl_queue *q;
501
	struct iwl_tfd *tfd, *tfd_tmp;
502
503
504
	u32 num_tbs;

	q = &txq->q;
505
506
	tfd_tmp = (struct iwl_tfd *)txq->tfds;
	tfd = &tfd_tmp[q->write_ptr];
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529

	if (reset)
		memset(tfd, 0, sizeof(*tfd));

	num_tbs = iwl_tfd_get_num_tbs(tfd);

	/* Each TFD can point to a maximum 20 Tx buffers */
	if (num_tbs >= IWL_NUM_OF_TBS) {
		IWL_ERR(priv, "Error can not send more than %d chunks\n",
			  IWL_NUM_OF_TBS);
		return -EINVAL;
	}

	BUG_ON(addr & ~DMA_BIT_MASK(36));
	if (unlikely(addr & ~IWL_TX_DMA_MASK))
		IWL_ERR(priv, "Unaligned address = %llx\n",
			  (unsigned long long)addr);

	iwl_tfd_set_tb(tfd, num_tbs, addr, len);

	return 0;
}

530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
/*
 * Tell nic where to find circular buffer of Tx Frame Descriptors for
 * given Tx queue, and enable the DMA channel used for that queue.
 *
 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
 * channels supported in hardware.
 */
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
			 struct iwl_tx_queue *txq)
{
	int txq_id = txq->q.id;

	/* Circular buffer (TFD queue in DRAM) physical base address */
	iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
			     txq->q.dma_addr >> 8);

	return 0;
}

549
550
551
552
553
/******************************************************************************
 *
 * Generic RX handler implementations
 *
 ******************************************************************************/
554
555
static void iwl_rx_reply_alive(struct iwl_priv *priv,
				struct iwl_rx_mem_buffer *rxb)
556
{
Zhu Yi's avatar
Zhu Yi committed
557
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
558
	struct iwl_alive_resp *palive;
559
560
561
562
	struct delayed_work *pwork;

	palive = &pkt->u.alive_frame;

563
	IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
564
565
566
567
568
		       "0x%01X 0x%01X\n",
		       palive->is_valid, palive->ver_type,
		       palive->ver_subtype);

	if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
569
		IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
570
571
		memcpy(&priv->card_alive_init,
		       &pkt->u.alive_frame,
572
		       sizeof(struct iwl_init_alive_resp));
573
574
		pwork = &priv->init_alive_start;
	} else {
575
		IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
576
		memcpy(&priv->card_alive, &pkt->u.alive_frame,
577
		       sizeof(struct iwl_alive_resp));
578
579
580
581
582
583
584
585
586
		pwork = &priv->alive_start;
	}

	/* We delay the ALIVE response by 5ms to
	 * give the HW RF Kill time to activate... */
	if (palive->is_valid == UCODE_VALID_OK)
		queue_delayed_work(priv->workqueue, pwork,
				   msecs_to_jiffies(5));
	else
587
		IWL_WARN(priv, "uCode did not respond OK.\n");
588
589
}

590
static void iwl_bg_beacon_update(struct work_struct *work)
591
{
592
593
	struct iwl_priv *priv =
		container_of(work, struct iwl_priv, beacon_update);
594
595
596
	struct sk_buff *beacon;

	/* Pull updated AP beacon from mac80211. will fail if not in AP mode */
597
	beacon = ieee80211_beacon_get(priv->hw, priv->vif);
598
599

	if (!beacon) {
600
		IWL_ERR(priv, "update beacon failed\n");
601
602
603
604
605
606
607
608
609
610
611
		return;
	}

	mutex_lock(&priv->mutex);
	/* new beacon skb is allocated every time; dispose previous.*/
	if (priv->ibss_beacon)
		dev_kfree_skb(priv->ibss_beacon);

	priv->ibss_beacon = beacon;
	mutex_unlock(&priv->mutex);

612
	iwl_send_beacon_cmd(priv);
613
614
}

615
/**
616
 * iwl_bg_statistics_periodic - Timer callback to queue statistics
617
618
619
620
621
622
623
624
 *
 * This callback is provided in order to send a statistics request.
 *
 * This timer function is continually reset to execute within
 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
 * was received.  We need to ensure we receive the statistics in order
 * to update the temperature used for calibrating the TXPOWER.
 */
625
static void iwl_bg_statistics_periodic(unsigned long data)
626
627
628
629
630
631
{
	struct iwl_priv *priv = (struct iwl_priv *)data;

	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
		return;

632
633
634
635
	/* dont send host command if rf-kill is on */
	if (!iwl_is_ready_rf(priv))
		return;

636
	iwl_send_statistics_request(priv, CMD_ASYNC, false);
637
638
}

639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685

static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
					u32 start_idx, u32 num_events,
					u32 mode)
{
	u32 i;
	u32 ptr;        /* SRAM byte address of log data */
	u32 ev, time, data; /* event log data */
	unsigned long reg_flags;

	if (mode == 0)
		ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
	else
		ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));

	/* Make sure device is powered up for SRAM reads */
	spin_lock_irqsave(&priv->reg_lock, reg_flags);
	if (iwl_grab_nic_access(priv)) {
		spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
		return;
	}

	/* Set starting address; reads will auto-increment */
	_iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
	rmb();

	/*
	 * "time" is actually "data" for mode 0 (no timestamp).
	 * place event id # at far right for easier visual parsing.
	 */
	for (i = 0; i < num_events; i++) {
		ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
		time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
		if (mode == 0) {
			trace_iwlwifi_dev_ucode_cont_event(priv,
							0, time, ev);
		} else {
			data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
			trace_iwlwifi_dev_ucode_cont_event(priv,
						time, data, ev);
		}
	}
	/* Allow device to power down */
	iwl_release_nic_access(priv);
	spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
}

Johannes Berg's avatar
Johannes Berg committed
686
static void iwl_continuous_event_trace(struct iwl_priv *priv)
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
{
	u32 capacity;   /* event log capacity in # entries */
	u32 base;       /* SRAM byte address of event log header */
	u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
	u32 num_wraps;  /* # times uCode wrapped to top of log */
	u32 next_entry; /* index of next entry to be written by uCode */

	if (priv->ucode_type == UCODE_INIT)
		base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
	else
		base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
	if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
		capacity = iwl_read_targ_mem(priv, base);
		num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
		mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
		next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
	} else
		return;

	if (num_wraps == priv->event_log.num_wraps) {
		iwl_print_cont_event_trace(priv,
				       base, priv->event_log.next_entry,
				       next_entry - priv->event_log.next_entry,
				       mode);
		priv->event_log.non_wraps_count++;
	} else {
		if ((num_wraps - priv->event_log.num_wraps) > 1)
			priv->event_log.wraps_more_count++;
		else
			priv->event_log.wraps_once_count++;
		trace_iwlwifi_dev_ucode_wrap_event(priv,
				num_wraps - priv->event_log.num_wraps,
				next_entry, priv->event_log.next_entry);
		if (next_entry < priv->event_log.next_entry) {
			iwl_print_cont_event_trace(priv, base,
			       priv->event_log.next_entry,
			       capacity - priv->event_log.next_entry,
			       mode);

			iwl_print_cont_event_trace(priv, base, 0,
				next_entry, mode);
		} else {
			iwl_print_cont_event_trace(priv, base,
			       next_entry, capacity - next_entry,
			       mode);

			iwl_print_cont_event_trace(priv, base, 0,
				next_entry, mode);
		}
	}
	priv->event_log.num_wraps = num_wraps;
	priv->event_log.next_entry = next_entry;
}

/**
 * iwl_bg_ucode_trace - Timer callback to log ucode event
 *
 * The timer is continually set to execute every
 * UCODE_TRACE_PERIOD milliseconds after the last timer expired
 * this function is to perform continuous uCode event logging operation
 * if enabled
 */
static void iwl_bg_ucode_trace(unsigned long data)
{
	struct iwl_priv *priv = (struct iwl_priv *)data;

	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
		return;

	if (priv->event_log.ucode_trace) {
		iwl_continuous_event_trace(priv);
		/* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
		mod_timer(&priv->ucode_trace,
			 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
	}
}

764
static void iwl_rx_beacon_notif(struct iwl_priv *priv,
765
				struct iwl_rx_mem_buffer *rxb)
766
{
Zhu Yi's avatar
Zhu Yi committed
767
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
768
769
	struct iwl4965_beacon_notif *beacon =
		(struct iwl4965_beacon_notif *)pkt->u.raw;
770
#ifdef CONFIG_IWLWIFI_DEBUG
771
	u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
772

773
	IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
774
		"tsf %d %d rate %d\n",
775
		le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
776
777
778
779
780
781
		beacon->beacon_notify_hdr.failure_frame,
		le32_to_cpu(beacon->ibss_mgr_status),
		le32_to_cpu(beacon->high_tsf),
		le32_to_cpu(beacon->low_tsf), rate);
#endif

782
783
	priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);

784
	if ((priv->iw_mode == NL80211_IFTYPE_AP) &&
785
786
787
788
789
790
	    (!test_bit(STATUS_EXIT_PENDING, &priv->status)))
		queue_work(priv->workqueue, &priv->beacon_update);
}

/* Handle notification from uCode that card's power state is changing
 * due to software, hardware, or critical temperature RFKILL */
791
static void iwl_rx_card_state_notif(struct iwl_priv *priv,
792
				    struct iwl_rx_mem_buffer *rxb)
793
{
Zhu Yi's avatar
Zhu Yi committed
794
	struct iwl_rx_packet *pkt = rxb_addr(rxb);
795
796
797
	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
	unsigned long status = priv->status;

798
	IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
799
			  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
800
801
802
			  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
			  (flags & CT_CARD_DISABLED) ?
			  "Reached" : "Not reached");
803
804

	if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
805
		     CT_CARD_DISABLED)) {
806

807
		iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
808
809
			    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);

Mohamed Abbas's avatar
Mohamed Abbas committed
810
811
		iwl_write_direct32(priv, HBUS_TARG_MBX_C,
					HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
812
813

		if (!(flags & RXON_CARD_DISABLED)) {
814
			iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
815
				    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
Mohamed Abbas's avatar
Mohamed Abbas committed
816
			iwl_write_direct32(priv, HBUS_TARG_MBX_C,
817
818
					HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
		}
819
		if (flags & CT_CARD_DISABLED)
820
			iwl_tt_enter_ct_kill(priv);
821
	}
822
	if (!(flags & CT_CARD_DISABLED))
823
		iwl_tt_exit_ct_kill(priv);
824
825
826
827
828
829
830
831

	if (flags & HW_CARD_DISABLED)
		set_bit(STATUS_RF_KILL_HW, &priv->status);
	else
		clear_bit(STATUS_RF_KILL_HW, &priv->status);


	if (!(flags & RXON_CARD_DISABLED))
832
		iwl_scan_cancel(priv);
833
834

	if ((test_bit(STATUS_RF_KILL_HW, &status) !=
835
836
837
	     test_bit(STATUS_RF_KILL_HW, &priv->status)))
		wiphy_rfkill_set_hw_state(priv->hw->wiphy,
			test_bit(STATUS_RF_KILL_HW, &priv->status));
838
839
840
841
	else
		wake_up_interruptible(&priv->wait_command_queue);
}

842
int iwl_set_pwr_src(struct iwl_priv *priv, enum iwl_pwr_src src)
843
844
{
	if (src == IWL_PWR_SRC_VAUX) {
845
		if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
846
847
848
849
850
851
852
853
854
			iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
					       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
					       ~APMG_PS_CTRL_MSK_PWR_SRC);
	} else {
		iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
				       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
				       ~APMG_PS_CTRL_MSK_PWR_SRC);
	}

Mohamed Abbas's avatar
Mohamed Abbas committed
855
	return 0;
856
857
}

858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
static void iwl_bg_tx_flush(struct work_struct *work)
{
	struct iwl_priv *priv =
		container_of(work, struct iwl_priv, tx_flush);

	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
		return;

	/* do nothing if rf-kill is on */
	if (!iwl_is_ready_rf(priv))
		return;

	if (priv->cfg->ops->lib->txfifo_flush) {
		IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
		iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
	}
}

876
/**
877
 * iwl_setup_rx_handlers - Initialize Rx handler callbacks
878
879
880
881
882
883
884
 *
 * Setup the RX handlers for each of the reply types sent from the uCode
 * to the host.
 *
 * This function chains into the hardware specific files for them to setup
 * any hardware specific handlers as well.
 */
885
static void iwl_setup_rx_handlers(struct iwl_priv *priv)
886
{
887
	priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
888
889
	priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
	priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
890
891
	priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
			iwl_rx_spectrum_measure_notif;
892
	priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
893
	priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
894
895
	    iwl_rx_pm_debug_statistics_notif;
	priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif;
896

897
898
899
900
	/*
	 * The same handler is used for both the REPLY to a discrete
	 * statistics request from the host as well as for the periodic
	 * statistics notifications (after received beacons) from the uCode.
901
	 */
902
	priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics;
903
	priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
904
905
906

	iwl_setup_rx_scan_handlers(priv);

907
	/* status change handler */
908
	priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif;
909

910
911
	priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
	    iwl_rx_missed_beacon_notif;
912
	/* Rx handlers */
913
914
	priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
	priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
915
	/* block ack */
916
	priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
917
	/* Set up hardware specific Rx handlers */
918
	priv->cfg->ops->lib->rx_handler_setup(priv);
919
920
921
}

/**
922
 * iwl_rx_handle - Main entry function for receiving responses from uCode
923
924
925
926
927
 *
 * Uses the priv->rx_handlers callback function array to invoke
 * the appropriate handlers, including command responses,
 * frame-received notifications, and other notifications.
 */
928
void iwl_rx_handle(struct iwl_priv *priv)
929
{
930
	struct iwl_rx_mem_buffer *rxb;
931
	struct iwl_rx_packet *pkt;
932
	struct iwl_rx_queue *rxq = &priv->rxq;
933
934
935
	u32 r, i;
	int reclaim;
	unsigned long flags;
936
	u8 fill_rx = 0;
Mohamed Abbas's avatar
Mohamed Abbas committed
937
	u32 count = 8;
938
	int total_empty;
939

940
941
	/* uCode's read index (stored in shared DRAM) indicates the last Rx
	 * buffer that the driver may process (last buffer filled by ucode). */
942
	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF;
943
944
945
946
	i = rxq->read;

	/* Rx interrupt, but nothing sent from uCode */
	if (i == r)
947
		IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
948

949
	/* calculate total frames need to be restock after handling RX */
950
	total_empty = r - rxq->write_actual;
951
952
953
954
	if (total_empty < 0)
		total_empty += RX_QUEUE_SIZE;

	if (total_empty > (RX_QUEUE_SIZE / 2))
955
956
		fill_rx = 1;

957
	while (i != r) {
Johannes Berg's avatar
Johannes Berg committed
958
959
		int len;

960
961
		rxb = rxq->queue[i];

962
		/* If an RXB doesn't have a Rx queue slot associated with it,
963
964
965
966
967
968
		 * then a bug has been introduced in the queue refilling
		 * routines -- catch it here */
		BUG_ON(rxb == NULL);

		rxq->queue[i] = NULL;

Zhu Yi's avatar
Zhu Yi committed
969
970
971
972
		pci_unmap_page(priv->pci_dev, rxb->page_dma,
			       PAGE_SIZE << priv->hw_params.rx_page_order,
			       PCI_DMA_FROMDEVICE);
		pkt = rxb_addr(rxb);
973

Johannes Berg's avatar
Johannes Berg committed
974
975
976
		len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
		len += sizeof(u32); /* account for status word */
		trace_iwlwifi_dev_rx(priv, pkt, len);
Johannes Berg's avatar
Johannes Berg committed
977

978
979
980
981
982
983
984
985
		/* Reclaim a command buffer only if this packet is a response
		 *   to a (driver-originated) command.
		 * If the packet (e.g. Rx frame) originated from uCode,
		 *   there is no command buffer to reclaim.
		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
		 *   but apparently a few don't get set; catch them here. */
		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
			(pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
986
			(pkt->hdr.cmd != REPLY_RX) &&
Daniel Halperin's avatar
Daniel Halperin committed
987
			(pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
988
			(pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
989
990
991
992
993
			(pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
			(pkt->hdr.cmd != REPLY_TX);

		/* Based on type of command response or notification,
		 *   handle those that need handling via function in
994
		 *   rx_handlers table.  See iwl_setup_rx_handlers() */
995
		if (priv->rx_handlers[pkt->hdr.cmd]) {
996
			IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
997
				i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
998
			priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
999
			priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1000
		} else {
For faster browsing, not all history is shown. View entire blame