main.c 125 KB
Newer Older
1

2
3
4
/*
 * This file is part of wl1271
 *
5
 * Copyright (C) 2008-2010 Nokia Corporation
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
 *
 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * version 2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
 * 02110-1301 USA
 *
 */

#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/crc32.h>
#include <linux/etherdevice.h>
31
#include <linux/vmalloc.h>
32
#include <linux/platform_device.h>
33
#include <linux/slab.h>
34
#include <linux/wl12xx.h>
35
#include <linux/sched.h>
36
#include <linux/interrupt.h>
37

38
#include "wlcore.h"
39
#include "debug.h"
40
#include "wl12xx_80211.h"
41
42
43
44
45
46
47
48
49
50
51
#include "io.h"
#include "event.h"
#include "tx.h"
#include "rx.h"
#include "ps.h"
#include "init.h"
#include "debugfs.h"
#include "cmd.h"
#include "boot.h"
#include "testmode.h"
#include "scan.h"
52
#include "hw_ops.h"
53

54
55
#define WL1271_BOOT_RETRIES 3

56
#define WL1271_BOOT_RETRIES 3
57

58
static char *fwlog_param;
59
static bool bug_on_recovery;
60
static bool no_recovery;
61

62
static void __wl1271_op_remove_interface(struct wl1271 *wl,
63
					 struct ieee80211_vif *vif,
64
					 bool reset_tx_queues);
Eliad Peller's avatar
Eliad Peller committed
65
static void wl1271_op_stop(struct ieee80211_hw *hw);
66
static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
67

68
69
static int wl12xx_set_authorized(struct wl1271 *wl,
				 struct wl12xx_vif *wlvif)
70
71
{
	int ret;
Eliad Peller's avatar
Eliad Peller committed
72

73
74
75
76
	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
		return -EINVAL;

	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
77
78
		return 0;

79
	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
80
81
		return 0;

82
	ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
83
84
85
	if (ret < 0)
		return ret;

Eliad Peller's avatar
Eliad Peller committed
86
	wl12xx_croc(wl, wlvif->role_id);
87

88
89
90
	wl1271_info("Association completed.");
	return 0;
}
91

92
static int wl1271_reg_notify(struct wiphy *wiphy,
93
94
			     struct regulatory_request *request)
{
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
	struct ieee80211_supported_band *band;
	struct ieee80211_channel *ch;
	int i;

	band = wiphy->bands[IEEE80211_BAND_5GHZ];
	for (i = 0; i < band->n_channels; i++) {
		ch = &band->channels[i];
		if (ch->flags & IEEE80211_CHAN_DISABLED)
			continue;

		if (ch->flags & IEEE80211_CHAN_RADAR)
			ch->flags |= IEEE80211_CHAN_NO_IBSS |
				     IEEE80211_CHAN_PASSIVE_SCAN;

	}

	return 0;
}

114
115
static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
				   bool enable)
116
117
118
119
{
	int ret = 0;

	/* we should hold wl->mutex */
120
	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
121
122
123
124
	if (ret < 0)
		goto out;

	if (enable)
125
		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
126
	else
127
		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
128
129
130
131
132
133
134
135
out:
	return ret;
}

/*
 * this function is being called when the rx_streaming interval
 * has beed changed or rx_streaming should be disabled
 */
136
int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
137
138
139
140
141
{
	int ret = 0;
	int period = wl->conf.rx_streaming.interval;

	/* don't reconfigure if rx_streaming is disabled */
142
	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
143
144
145
146
		goto out;

	/* reconfigure/disable according to new streaming_period */
	if (period &&
147
	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
148
149
	    (wl->conf.rx_streaming.always ||
	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150
		ret = wl1271_set_rx_streaming(wl, wlvif, true);
151
	else {
152
		ret = wl1271_set_rx_streaming(wl, wlvif, false);
153
		/* don't cancel_work_sync since we might deadlock */
154
		del_timer_sync(&wlvif->rx_streaming_timer);
155
156
157
158
159
160
161
162
	}
out:
	return ret;
}

static void wl1271_rx_streaming_enable_work(struct work_struct *work)
{
	int ret;
163
164
165
	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
						rx_streaming_enable_work);
	struct wl1271 *wl = wlvif->wl;
166
167
168

	mutex_lock(&wl->mutex);

169
	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
170
	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
171
172
173
174
175
176
177
178
179
180
181
	    (!wl->conf.rx_streaming.always &&
	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
		goto out;

	if (!wl->conf.rx_streaming.interval)
		goto out;

	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

182
	ret = wl1271_set_rx_streaming(wl, wlvif, true);
183
184
185
186
	if (ret < 0)
		goto out_sleep;

	/* stop it after some time of inactivity */
187
	mod_timer(&wlvif->rx_streaming_timer,
188
189
190
191
192
193
194
195
196
197
198
		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));

out_sleep:
	wl1271_ps_elp_sleep(wl);
out:
	mutex_unlock(&wl->mutex);
}

static void wl1271_rx_streaming_disable_work(struct work_struct *work)
{
	int ret;
199
200
201
	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
						rx_streaming_disable_work);
	struct wl1271 *wl = wlvif->wl;
202
203
204

	mutex_lock(&wl->mutex);

205
	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
206
207
208
209
210
211
		goto out;

	ret = wl1271_ps_elp_wakeup(wl);
	if (ret < 0)
		goto out;

212
	ret = wl1271_set_rx_streaming(wl, wlvif, false);
213
214
215
216
217
218
219
220
221
222
223
	if (ret)
		goto out_sleep;

out_sleep:
	wl1271_ps_elp_sleep(wl);
out:
	mutex_unlock(&wl->mutex);
}

static void wl1271_rx_streaming_timer(unsigned long data)
{
224
225
226
	struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
	struct wl1271 *wl = wlvif->wl;
	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
227
228
}

229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
/* wl->mutex must be taken */
void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
{
	/* if the watchdog is not armed, don't do anything */
	if (wl->tx_allocated_blocks == 0)
		return;

	cancel_delayed_work(&wl->tx_watchdog_work);
	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
}

static void wl12xx_tx_watchdog_work(struct work_struct *work)
{
	struct delayed_work *dwork;
	struct wl1271 *wl;

	dwork = container_of(work, struct delayed_work, work);
	wl = container_of(dwork, struct wl1271, tx_watchdog_work);

	mutex_lock(&wl->mutex);

	if (unlikely(wl->state == WL1271_STATE_OFF))
		goto out;

	/* Tx went out in the meantime - everything is ok */
	if (unlikely(wl->tx_allocated_blocks == 0))
		goto out;

	/*
	 * if a ROC is in progress, we might not have any Tx for a long
	 * time (e.g. pending Tx on the non-ROC channels)
	 */
	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
			     wl->conf.tx.tx_watchdog_timeout);
		wl12xx_rearm_tx_watchdog_locked(wl);
		goto out;
	}

	/*
	 * if a scan is in progress, we might not have any Tx for a long
	 * time
	 */
	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
			     wl->conf.tx.tx_watchdog_timeout);
		wl12xx_rearm_tx_watchdog_locked(wl);
		goto out;
	}

	/*
	* AP might cache a frame for a long time for a sleeping station,
	* so rearm the timer if there's an AP interface with stations. If
	* Tx is genuinely stuck we will most hopefully discover it when all
	* stations are removed due to inactivity.
	*/
	if (wl->active_sta_count) {
		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
			     " %d stations",
			      wl->conf.tx.tx_watchdog_timeout,
			      wl->active_sta_count);
		wl12xx_rearm_tx_watchdog_locked(wl);
		goto out;
	}

	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
		     wl->conf.tx.tx_watchdog_timeout);
	wl12xx_queue_recovery_work(wl);

out:
	mutex_unlock(&wl->mutex);
}

303
static void wlcore_adjust_conf(struct wl1271 *wl)
304
{
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
	/* Adjust settings according to optional module parameters */
	if (fwlog_param) {
		if (!strcmp(fwlog_param, "continuous")) {
			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
		} else if (!strcmp(fwlog_param, "ondemand")) {
			wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
		} else if (!strcmp(fwlog_param, "dbgpins")) {
			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
		} else if (!strcmp(fwlog_param, "disable")) {
			wl->conf.fwlog.mem_blocks = 0;
			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
		} else {
			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
		}
	}
}
322

323
324
static int wl1271_plt_init(struct wl1271 *wl)
{
325
	int ret;
326

327
	ret = wl->ops->hw_init(wl);
328
329
330
	if (ret < 0)
		return ret;

331
332
333
334
	ret = wl1271_acx_init_mem_config(wl);
	if (ret < 0)
		return ret;

Eliad Peller's avatar
Eliad Peller committed
335
	ret = wl12xx_acx_mem_cfg(wl);
336
337
338
	if (ret < 0)
		goto out_free_memmap;

339
	/* Enable data path */
340
	ret = wl1271_cmd_data_path(wl, 1);
341
	if (ret < 0)
342
343
344
345
346
347
348
349
350
351
352
		goto out_free_memmap;

	/* Configure for CAM power saving (ie. always active) */
	ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
	if (ret < 0)
		goto out_free_memmap;

	/* configure PM */
	ret = wl1271_acx_pm_config(wl);
	if (ret < 0)
		goto out_free_memmap;
353
354

	return 0;
355
356
357
358
359
360

 out_free_memmap:
	kfree(wl->target_mem_map);
	wl->target_mem_map = NULL;

	return ret;
361
362
}

363
364
365
static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
					struct wl12xx_vif *wlvif,
					u8 hlid, u8 tx_pkts)
366
{
367
	bool fw_ps, single_sta;
368
369

	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
370
	single_sta = (wl->active_sta_count == 1);
371
372
373

	/*
	 * Wake up from high level PS if the STA is asleep with too little
374
	 * packets in FW or if the STA is awake.
375
	 */
376
	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
377
		wl12xx_ps_link_end(wl, wlvif, hlid);
378

379
380
381
382
383
384
	/*
	 * Start high-level PS if the STA is asleep with enough blocks in FW.
	 * Make an exception if this is the only connected station. In this
	 * case FW-memory congestion is not a problem.
	 */
	else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
385
		wl12xx_ps_link_start(wl, wlvif, hlid, true);
386
387
}

388
static void wl12xx_irq_update_links_status(struct wl1271 *wl,
389
					   struct wl12xx_vif *wlvif,
390
					   struct wl_fw_status *status)
391
{
392
	struct wl1271_link *lnk;
393
	u32 cur_fw_ps_map;
394
395
396
	u8 hlid, cnt;

	/* TODO: also use link_fast_bitmap here */
397
398
399
400
401
402
403
404
405
406
407

	cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
		wl1271_debug(DEBUG_PSM,
			     "link ps prev 0x%x cur 0x%x changed 0x%x",
			     wl->ap_fw_ps_map, cur_fw_ps_map,
			     wl->ap_fw_ps_map ^ cur_fw_ps_map);

		wl->ap_fw_ps_map = cur_fw_ps_map;
	}

408
409
	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
		lnk = &wl->links[hlid];
410
411
		cnt = status->counters.tx_lnk_free_pkts[hlid] -
			lnk->prev_freed_pkts;
412

413
		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
414
		lnk->allocated_pkts -= cnt;
415

416
417
		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
					    lnk->allocated_pkts);
418
419
420
	}
}

Eliad Peller's avatar
Eliad Peller committed
421
static void wl12xx_fw_status(struct wl1271 *wl,
422
			     struct wl_fw_status *status)
423
{
424
	struct wl12xx_vif *wlvif;
425
	struct timespec ts;
426
	u32 old_tx_blk_count = wl->tx_blocks_available;
Eliad Peller's avatar
Eliad Peller committed
427
	int avail, freed_blocks;
428
	int i;
429
430
431
	size_t status_len;

	status_len = sizeof(*status) + wl->fw_status_priv_len;
432

433
	wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status,
434
			     status_len, false);
435

436
437
438
439
440
441
442
	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
		     "drv_rx_counter = %d, tx_results_counter = %d)",
		     status->intr,
		     status->fw_rx_counter,
		     status->drv_rx_counter,
		     status->tx_results_counter);

443
444
	for (i = 0; i < NUM_TX_QUEUES; i++) {
		/* prevent wrap-around in freed-packets counter */
445
		wl->tx_allocated_pkts[i] -=
446
				(status->counters.tx_released_pkts[i] -
447
448
				wl->tx_pkts_freed[i]) & 0xff;

449
		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
450
451
	}

452
453
454
455
456
457
458
459
460
	/* prevent wrap-around in total blocks counter */
	if (likely(wl->tx_blocks_freed <=
		   le32_to_cpu(status->total_released_blks)))
		freed_blocks = le32_to_cpu(status->total_released_blks) -
			       wl->tx_blocks_freed;
	else
		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
			       le32_to_cpu(status->total_released_blks);

Eliad Peller's avatar
Eliad Peller committed
461
	wl->tx_blocks_freed = le32_to_cpu(status->total_released_blks);
462

463
464
	wl->tx_allocated_blocks -= freed_blocks;

465
466
467
468
469
470
471
472
473
474
475
476
	/*
	 * If the FW freed some blocks:
	 * If we still have allocated blocks - re-arm the timer, Tx is
	 * not stuck. Otherwise, cancel the timer (no Tx currently).
	 */
	if (freed_blocks) {
		if (wl->tx_allocated_blocks)
			wl12xx_rearm_tx_watchdog_locked(wl);
		else
			cancel_delayed_work(&wl->tx_watchdog_work);
	}

Eliad Peller's avatar
Eliad Peller committed
477
	avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks;
478

Eliad Peller's avatar
Eliad Peller committed
479
480
481
482
483
484
485
486
487
488
	/*
	 * The FW might change the total number of TX memblocks before
	 * we get a notification about blocks being released. Thus, the
	 * available blocks calculation might yield a temporary result
	 * which is lower than the actual available blocks. Keeping in
	 * mind that only blocks that were allocated can be moved from
	 * TX to RX, tx_blocks_available should never decrease here.
	 */
	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
				      avail);
489

Ido Yariv's avatar
Ido Yariv committed
490
	/* if more blocks are available now, tx work can be scheduled */
491
	if (wl->tx_blocks_available > old_tx_blk_count)
Ido Yariv's avatar
Ido Yariv committed
492
		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
493

Eliad Peller's avatar
Eliad Peller committed
494
	/* for AP update num of allocated TX blocks per link and ps status */
495
	wl12xx_for_each_wlvif_ap(wl, wlvif) {
496
		wl12xx_irq_update_links_status(wl, wlvif, status);
497
	}
Eliad Peller's avatar
Eliad Peller committed
498

499
	/* update the host-chipset time offset */
500
501
502
	getnstimeofday(&ts);
	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
		(s64)le32_to_cpu(status->fw_localtime);
503
504
}

505
506
507
508
509
510
511
512
513
514
static void wl1271_flush_deferred_work(struct wl1271 *wl)
{
	struct sk_buff *skb;

	/* Pass all received frames to the network stack */
	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
		ieee80211_rx_ni(wl->hw, skb);

	/* Return sent skbs to the network stack */
	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
515
		ieee80211_tx_status_ni(wl->hw, skb);
516
517
518
519
520
521
522
523
524
525
526
}

static void wl1271_netstack_work(struct work_struct *work)
{
	struct wl1271 *wl =
		container_of(work, struct wl1271, netstack_work);

	do {
		wl1271_flush_deferred_work(wl);
	} while (skb_queue_len(&wl->deferred_rx_queue));
}
527

528
529
#define WL1271_IRQ_MAX_LOOPS 256

530
static irqreturn_t wl1271_irq(int irq, void *cookie)
531
532
{
	int ret;
533
	u32 intr;
534
	int loopcount = WL1271_IRQ_MAX_LOOPS;
535
536
537
	struct wl1271 *wl = (struct wl1271 *)cookie;
	bool done = false;
	unsigned int defer_count;
Ido Yariv's avatar
Ido Yariv committed
538
539
540
541
542
	unsigned long flags;

	/* TX might be handled here, avoid redundant work */
	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
	cancel_work_sync(&wl->tx_work);
543

544
545
546
547
548
549
550
	/*
	 * In case edge triggered interrupt must be used, we cannot iterate
	 * more than once without introducing race conditions with the hardirq.
	 */
	if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
		loopcount = 1;

551
552
553
554
	mutex_lock(&wl->mutex);

	wl1271_debug(DEBUG_IRQ, "IRQ work");

555
	if (unlikely(wl->state == WL1271_STATE_OFF))
556
557
		goto out;

558
	ret = wl1271_ps_elp_wakeup(wl);
559
560
561
	if (ret < 0)
		goto out;

562
563
564
565
566
567
568
569
	while (!done && loopcount--) {
		/*
		 * In order to avoid a race with the hardirq, clear the flag
		 * before acknowledging the chip. Since the mutex is held,
		 * wl1271_ps_elp_wakeup cannot be called concurrently.
		 */
		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
		smp_mb__after_clear_bit();
570

Eliad Peller's avatar
Eliad Peller committed
571
		wl12xx_fw_status(wl, wl->fw_status);
572
573
574

		wlcore_hw_tx_immediate_compl(wl);

Eliad Peller's avatar
Eliad Peller committed
575
		intr = le32_to_cpu(wl->fw_status->intr);
576
		intr &= WL1271_INTR_MASK;
577
		if (!intr) {
578
			done = true;
579
580
			continue;
		}
581

582
583
584
		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
			wl1271_error("watchdog interrupt received! "
				     "starting recovery.");
585
			wl12xx_queue_recovery_work(wl);
586
587
588
589
590

			/* restarting the chip. ignore any other interrupt. */
			goto out;
		}

591
		if (likely(intr & WL1271_ACX_INTR_DATA)) {
592
			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
593

Eliad Peller's avatar
Eliad Peller committed
594
			wl12xx_rx(wl, wl->fw_status);
595

Ido Yariv's avatar
Ido Yariv committed
596
			/* Check if any tx blocks were freed */
Ido Yariv's avatar
Ido Yariv committed
597
			spin_lock_irqsave(&wl->wl_lock, flags);
Ido Yariv's avatar
Ido Yariv committed
598
			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
599
			    wl1271_tx_total_queue_count(wl) > 0) {
Ido Yariv's avatar
Ido Yariv committed
600
				spin_unlock_irqrestore(&wl->wl_lock, flags);
Ido Yariv's avatar
Ido Yariv committed
601
602
603
604
				/*
				 * In order to avoid starvation of the TX path,
				 * call the work function directly.
				 */
605
				wl1271_tx_work_locked(wl);
Ido Yariv's avatar
Ido Yariv committed
606
607
			} else {
				spin_unlock_irqrestore(&wl->wl_lock, flags);
Ido Yariv's avatar
Ido Yariv committed
608
609
			}

610
			/* check for tx results */
611
			wlcore_hw_tx_delayed_compl(wl);
612
613
614
615
616
617

			/* Make sure the deferred queues don't get too long */
			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
				      skb_queue_len(&wl->deferred_rx_queue);
			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
				wl1271_flush_deferred_work(wl);
618
		}
619

620
621
622
623
		if (intr & WL1271_ACX_INTR_EVENT_A) {
			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
			wl1271_event_handle(wl, 0);
		}
624

625
626
627
628
		if (intr & WL1271_ACX_INTR_EVENT_B) {
			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
			wl1271_event_handle(wl, 1);
		}
629

630
631
632
		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
			wl1271_debug(DEBUG_IRQ,
				     "WL1271_ACX_INTR_INIT_COMPLETE");
633

634
635
		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
636
	}
637
638
639
640

	wl1271_ps_elp_sleep(wl);

out:
Ido Yariv's avatar
Ido Yariv committed
641
642
643
644
	spin_lock_irqsave(&wl->wl_lock, flags);
	/* In case TX was not handled here, queue TX work */
	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
645
	    wl1271_tx_total_queue_count(wl) > 0)
Ido Yariv's avatar
Ido Yariv committed
646
647
648
		ieee80211_queue_work(wl->hw, &wl->tx_work);
	spin_unlock_irqrestore(&wl->wl_lock, flags);

649
	mutex_unlock(&wl->mutex);
650
651

	return IRQ_HANDLED;
652
653
}

654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
struct vif_counter_data {
	u8 counter;

	struct ieee80211_vif *cur_vif;
	bool cur_vif_running;
};

static void wl12xx_vif_count_iter(void *data, u8 *mac,
				  struct ieee80211_vif *vif)
{
	struct vif_counter_data *counter = data;

	counter->counter++;
	if (counter->cur_vif == vif)
		counter->cur_vif_running = true;
}

/* caller must not hold wl->mutex, as it might deadlock */
static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
			       struct ieee80211_vif *cur_vif,
			       struct vif_counter_data *data)
{
	memset(data, 0, sizeof(*data));
	data->cur_vif = cur_vif;

	ieee80211_iterate_active_interfaces(hw,
					    wl12xx_vif_count_iter, data);
}

683
static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
684
685
{
	const struct firmware *fw;
686
	const char *fw_name;
687
	enum wl12xx_fw_type fw_type;
688
689
	int ret;

690
691
	if (plt) {
		fw_type = WL12XX_FW_TYPE_PLT;
692
		fw_name = wl->plt_fw_name;
693
	} else {
694
695
696
697
698
699
		/*
		 * we can't call wl12xx_get_vif_count() here because
		 * wl->mutex is taken, so use the cached last_vif_count value
		 */
		if (wl->last_vif_count > 1) {
			fw_type = WL12XX_FW_TYPE_MULTI;
700
			fw_name = wl->mr_fw_name;
701
702
		} else {
			fw_type = WL12XX_FW_TYPE_NORMAL;
703
			fw_name = wl->sr_fw_name;
704
		}
705
706
707
708
	}

	if (wl->fw_type == fw_type)
		return 0;
709
710
711

	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);

712
	ret = request_firmware(&fw, fw_name, wl->dev);
713
714

	if (ret < 0) {
715
		wl1271_error("could not get firmware %s: %d", fw_name, ret);
716
717
718
719
720
721
722
723
724
725
		return ret;
	}

	if (fw->size % 4) {
		wl1271_error("firmware size is not multiple of 32 bits: %zu",
			     fw->size);
		ret = -EILSEQ;
		goto out;
	}

726
	vfree(wl->fw);
727
	wl->fw_type = WL12XX_FW_TYPE_NONE;
728
	wl->fw_len = fw->size;
729
	wl->fw = vmalloc(wl->fw_len);
730
731
732
733
734
735
736
737
738

	if (!wl->fw) {
		wl1271_error("could not allocate memory for the firmware");
		ret = -ENOMEM;
		goto out;
	}

	memcpy(wl->fw, fw->data, wl->fw_len);
	ret = 0;
739
	wl->fw_type = fw_type;
740
741
742
743
744
745
746
747
748
749
750
out:
	release_firmware(fw);

	return ret;
}

static int wl1271_fetch_nvs(struct wl1271 *wl)
{
	const struct firmware *fw;
	int ret;

751
	ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
752
753

	if (ret < 0) {
754
755
		wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
			     ret);
756
757
758
		return ret;
	}

759
	wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
760
761
762
763
764
765
766

	if (!wl->nvs) {
		wl1271_error("could not allocate memory for the nvs file");
		ret = -ENOMEM;
		goto out;
	}

767
768
	wl->nvs_len = fw->size;

769
770
771
772
773
774
out:
	release_firmware(fw);

	return ret;
}

775
776
777
778
779
780
void wl12xx_queue_recovery_work(struct wl1271 *wl)
{
	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
		ieee80211_queue_work(wl->hw, &wl->recovery_work);
}

781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
{
	size_t len = 0;

	/* The FW log is a length-value list, find where the log end */
	while (len < maxlen) {
		if (memblock[len] == 0)
			break;
		if (len + memblock[len] + 1 > maxlen)
			break;
		len += memblock[len] + 1;
	}

	/* Make sure we have enough room */
	len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));

	/* Fill the FW log file, consumed by the sysfs fwlog entry */
	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
	wl->fwlog_size += len;

	return len;
}

static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
{
	u32 addr;
	u32 first_addr;
	u8 *block;

810
	if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
	    (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
	    (wl->conf.fwlog.mem_blocks == 0))
		return;

	wl1271_info("Reading FW panic log");

	block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
	if (!block)
		return;

	/*
	 * Make sure the chip is awake and the logger isn't active.
	 * This might fail if the firmware hanged.
	 */
	if (!wl1271_ps_elp_wakeup(wl))
		wl12xx_cmd_stop_fwlog(wl);

	/* Read the first memory block address */
Eliad Peller's avatar
Eliad Peller committed
829
830
	wl12xx_fw_status(wl, wl->fw_status);
	first_addr = le32_to_cpu(wl->fw_status->log_start_addr);
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
	if (!first_addr)
		goto out;

	/* Traverse the memory blocks linked list */
	addr = first_addr;
	do {
		memset(block, 0, WL12XX_HW_BLOCK_SIZE);
		wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
				   false);

		/*
		 * Memory blocks are linked to one another. The first 4 bytes
		 * of each memory block hold the hardware address of the next
		 * one. The last memory block points to the first one.
		 */
Eliad Peller's avatar
Eliad Peller committed
846
		addr = le32_to_cpup((__le32 *)block);
847
848
849
850
851
852
853
854
855
856
857
		if (!wl12xx_copy_fwlog(wl, block + sizeof(addr),
				       WL12XX_HW_BLOCK_SIZE - sizeof(addr)))
			break;
	} while (addr && (addr != first_addr));

	wake_up_interruptible(&wl->fwlog_waitq);

out:
	kfree(block);
}

858
859
860
861
static void wl1271_recovery_work(struct work_struct *work)
{
	struct wl1271 *wl =
		container_of(work, struct wl1271, recovery_work);
862
	struct wl12xx_vif *wlvif;
863
	struct ieee80211_vif *vif;
864
865
866

	mutex_lock(&wl->mutex);

867
	if (wl->state != WL1271_STATE_ON || wl->plt)
Eliad Peller's avatar
Eliad Peller committed
868
		goto out_unlock;
869

870
871
872
	/* Avoid a recursive recovery */
	set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);

873
874
	wl12xx_read_fwlog_panic(wl);

875
	wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
876
877
		    wl->chip.fw_ver_str,
		    wlcore_read_reg(wl, REG_PC_ON_RECOVERY));
878

879
880
	BUG_ON(bug_on_recovery &&
	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
881

882
883
884
885
886
887
888
889
	if (no_recovery) {
		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
		clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
		goto out_unlock;
	}

	BUG_ON(bug_on_recovery);

890
891
892
893
894
	/*
	 * Advance security sequence number to overcome potential progress
	 * in the firmware during recovery. This doens't hurt if the network is
	 * not encrypted.
	 */
895
	wl12xx_for_each_wlvif(wl, wlvif) {
896
		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
897
		    test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
898
899
900
			wlvif->tx_security_seq +=
				WL1271_TX_SQN_POST_RECOVERY_PADDING;
	}
901

902
903
904
	/* Prevent spurious TX during FW restart */
	ieee80211_stop_queues(wl->hw);

905
906
907
908
909
	if (wl->sched_scanning) {
		ieee80211_sched_scan_stopped(wl->hw);
		wl->sched_scanning = false;
	}

910
	/* reboot the chipset */
911
912
913
914
915
916
	while (!list_empty(&wl->wlvif_list)) {
		wlvif = list_first_entry(&wl->wlvif_list,
				       struct wl12xx_vif, list);
		vif = wl12xx_wlvif_to_vif(wlvif);
		__wl1271_op_remove_interface(wl, vif, false);
	}
Eliad Peller's avatar
Eliad Peller committed
917
918
	mutex_unlock(&wl->mutex);
	wl1271_op_stop(wl->hw);
919
920
921

	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);

922
923
	ieee80211_restart_hw(wl->hw);

924
925
926
927
928
	/*
	 * Its safe to enable TX now - the queues are stopped after a request
	 * to restart the HW.
	 */
	ieee80211_wake_queues(wl->hw);
Eliad Peller's avatar
Eliad Peller committed
929
930
	return;
out_unlock:
931
932
933
	mutex_unlock(&wl->mutex);
}

934
935
static void wl1271_fw_wakeup(struct wl1271 *wl)
{
936
	wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
}

static int wl1271_setup(struct wl1271 *wl)
{
	wl->fw_status = kmalloc(sizeof(*wl->fw_status), GFP_KERNEL);
	if (!wl->fw_status)
		return -ENOMEM;

	wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
	if (!wl->tx_res_if) {
		kfree(wl->fw_status);
		return -ENOMEM;
	}

	return 0;
}

954
static int wl12xx_set_power_on(struct wl1271 *wl)
955
{
956
	int ret;
957

958
	msleep(WL1271_PRE_POWER_ON_SLEEP);
959
960
961
	ret = wl1271_power_on(wl);
	if (ret < 0)
		goto out;
962
	msleep(WL1271_POWER_ON_SLEEP);
963
964
	wl1271_io_reset(wl);
	wl1271_io_init(wl);
965

966
	wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
967
968
969
970

	/* ELP module wake up */
	wl1271_fw_wakeup(wl);

971
972
973
out:
	return ret;
}
974

975
static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
976
977
978
979
980
981
{
	int ret = 0;

	ret = wl12xx_set_power_on(wl);
	if (ret < 0)
		goto out;
982

983
984
985
986
987
988
989
990
	/*
	 * For wl127x based devices we could use the default block
	 * size (512 bytes), but due to a bug in the sdio driver, we
	 * need to set it explicitly after the chip is powered on.  To
	 * simplify the code and since the performance impact is
	 * negligible, we use the same block size for all different
	 * chip types.
	 */
991
992
	if (wl1271_set_block_size(wl))
		wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
993

994
995
996
	ret = wl->ops->identify_chip(wl);
	if (ret < 0)
		goto out;
997

998
	/* TODO: make sure the lower driver has set things up correctly */
999

1000
	ret = wl1271_setup(wl);