iwl-trans-pcie.c 56.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
/******************************************************************************
 *
 * This file is provided under a dual BSD/GPLv2 license.  When using or
 * redistributing this file, you may do so under either license.
 *
 * GPL LICENSE SUMMARY
 *
 * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
 * USA
 *
 * The full GNU General Public License is included in this distribution
 * in the file called LICENSE.GPL.
 *
 * Contact Information:
 *  Intel Linux Wireless <ilw@linux.intel.com>
 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 *
 * BSD LICENSE
 *
 * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 *  * Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *  * Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in
 *    the documentation and/or other materials provided with the
 *    distribution.
 *  * Neither the name Intel Corporation nor the names of its
 *    contributors may be used to endorse or promote products derived
 *    from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 *
 *****************************************************************************/
63
#include <linux/interrupt.h>
64
#include <linux/debugfs.h>
65
66
#include <linux/bitops.h>
#include <linux/gfp.h>
67

68
#include "iwl-trans.h"
69
#include "iwl-trans-pcie-int.h"
70
71
#include "iwl-csr.h"
#include "iwl-prph.h"
72
#include "iwl-shared.h"
73
#include "iwl-eeprom.h"
74
#include "iwl-agn-hw.h"
75

76
static int iwl_trans_rx_alloc(struct iwl_trans *trans)
77
{
78
79
80
81
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
	struct device *dev = bus(trans)->dev;
82

83
	memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
84
85
86
87
88
89
90

	spin_lock_init(&rxq->lock);

	if (WARN_ON(rxq->bd || rxq->rb_stts))
		return -EINVAL;

	/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
91
92
	rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
				     &rxq->bd_dma, GFP_KERNEL);
93
94
	if (!rxq->bd)
		goto err_bd;
95
	memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
96
97
98
99
100
101
102
103
104
105
106

	/*Allocate the driver's pointer to receive buffer status */
	rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
					  &rxq->rb_stts_dma, GFP_KERNEL);
	if (!rxq->rb_stts)
		goto err_rb_stts;
	memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));

	return 0;

err_rb_stts:
107
108
	dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
			rxq->bd, rxq->bd_dma);
109
110
111
112
113
114
	memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
	rxq->bd = NULL;
err_bd:
	return -ENOMEM;
}

115
static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
116
{
117
118
119
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;
120
	int i;
121
122
123
124
125
126

	/* Fill the rx_used queue with _all_ of the Rx buffers */
	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
		/* In the reset function, these buffers may have been allocated
		 * to an SKB, so we need to unmap and free potential storage */
		if (rxq->pool[i].page != NULL) {
127
128
			dma_unmap_page(bus(trans)->dev, rxq->pool[i].page_dma,
				PAGE_SIZE << hw_params(trans).rx_page_order,
129
				DMA_FROM_DEVICE);
130
131
			__free_pages(rxq->pool[i].page,
				     hw_params(trans).rx_page_order);
132
133
134
135
			rxq->pool[i].page = NULL;
		}
		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
	}
136
137
}

138
static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
139
140
141
142
				 struct iwl_rx_queue *rxq)
{
	u32 rb_size;
	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
143
	u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
144
145
146
147
148
149
150

	if (iwlagn_mod_params.amsdu_size_8K)
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
	else
		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;

	/* Stop Rx DMA */
151
	iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
152
153

	/* Reset driver's Rx queue write index */
154
	iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
155
156

	/* Tell device where to find RBD circular buffer in DRAM */
157
	iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_RBDCB_BASE_REG,
158
159
160
			   (u32)(rxq->bd_dma >> 8));

	/* Tell device where in DRAM to update its Rx status */
161
	iwl_write_direct32(bus(trans), FH_RSCSR_CHNL0_STTS_WPTR_REG,
162
163
164
165
166
167
168
169
170
171
			   rxq->rb_stts_dma >> 4);

	/* Enable Rx DMA
	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
	 *      the credit mechanism in 5000 HW RX FIFO
	 * Direct rx interrupts to hosts
	 * Rx buffer size 4 or 8k
	 * RB timeout 0x10
	 * 256 RBDs
	 */
172
	iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG,
173
174
175
176
177
178
179
180
181
			   FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
			   FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
			   FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
			   FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
			   rb_size|
			   (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
			   (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));

	/* Set interrupt coalescing timer to default (2048 usecs) */
182
	iwl_write8(bus(trans), CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
183
184
}

185
static int iwl_rx_init(struct iwl_trans *trans)
186
{
187
188
189
190
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;

191
192
193
194
	int i, err;
	unsigned long flags;

	if (!rxq->bd) {
195
		err = iwl_trans_rx_alloc(trans);
196
197
198
199
200
201
202
203
		if (err)
			return err;
	}

	spin_lock_irqsave(&rxq->lock, flags);
	INIT_LIST_HEAD(&rxq->rx_free);
	INIT_LIST_HEAD(&rxq->rx_used);

204
	iwl_trans_rxq_free_rx_bufs(trans);
205
206
207
208
209
210
211
212
213
214
215

	for (i = 0; i < RX_QUEUE_SIZE; i++)
		rxq->queue[i] = NULL;

	/* Set us so that we have processed and used all buffers, but have
	 * not restocked the Rx queue with fresh buffers */
	rxq->read = rxq->write = 0;
	rxq->write_actual = 0;
	rxq->free_count = 0;
	spin_unlock_irqrestore(&rxq->lock, flags);

216
	iwlagn_rx_replenish(trans);
217

218
	iwl_trans_rx_hw_init(trans, rxq);
219

220
	spin_lock_irqsave(&trans->shrd->lock, flags);
221
	rxq->need_update = 1;
222
223
	iwl_rx_queue_update_write_ptr(trans, rxq);
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
224

225
226
227
	return 0;
}

228
static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
229
{
230
231
232
233
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_rx_queue *rxq = &trans_pcie->rxq;

234
235
236
237
238
	unsigned long flags;

	/*if rxq->bd is NULL, it means that nothing has been allocated,
	 * exit now */
	if (!rxq->bd) {
239
		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
240
241
242
243
		return;
	}

	spin_lock_irqsave(&rxq->lock, flags);
244
	iwl_trans_rxq_free_rx_bufs(trans);
245
246
	spin_unlock_irqrestore(&rxq->lock, flags);

247
	dma_free_coherent(bus(trans)->dev, sizeof(__le32) * RX_QUEUE_SIZE,
248
249
250
251
252
			  rxq->bd, rxq->bd_dma);
	memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
	rxq->bd = NULL;

	if (rxq->rb_stts)
253
		dma_free_coherent(bus(trans)->dev,
254
255
256
				  sizeof(struct iwl_rb_status),
				  rxq->rb_stts, rxq->rb_stts_dma);
	else
257
		IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
258
259
260
261
	memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
	rxq->rb_stts = NULL;
}

262
static int iwl_trans_rx_stop(struct iwl_trans *trans)
263
264
265
{

	/* stop Rx DMA */
266
267
	iwl_write_direct32(bus(trans), FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
	return iwl_poll_direct_bit(bus(trans), FH_MEM_RSSR_RX_STATUS_REG,
268
269
270
			    FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}

271
static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
272
273
274
275
276
				    struct iwl_dma_ptr *ptr, size_t size)
{
	if (WARN_ON(ptr->addr))
		return -EINVAL;

277
	ptr->addr = dma_alloc_coherent(bus(trans)->dev, size,
278
279
280
281
282
283
284
				       &ptr->dma, GFP_KERNEL);
	if (!ptr->addr)
		return -ENOMEM;
	ptr->size = size;
	return 0;
}

285
static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
286
287
288
289
290
				    struct iwl_dma_ptr *ptr)
{
	if (unlikely(!ptr->addr))
		return;

291
	dma_free_coherent(bus(trans)->dev, ptr->size, ptr->addr, ptr->dma);
292
293
294
	memset(ptr, 0, sizeof(*ptr));
}

295
296
297
static int iwl_trans_txq_alloc(struct iwl_trans *trans,
				struct iwl_tx_queue *txq, int slots_num,
				u32 txq_id)
298
{
299
	size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
300
301
	int i;

302
	if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds))
303
304
		return -EINVAL;

305
306
	txq->q.n_window = slots_num;

307
308
	txq->meta = kcalloc(slots_num, sizeof(txq->meta[0]), GFP_KERNEL);
	txq->cmd = kcalloc(slots_num, sizeof(txq->cmd[0]), GFP_KERNEL);
309
310
311
312

	if (!txq->meta || !txq->cmd)
		goto error;

313
314
315
316
317
318
319
	if (txq_id == trans->shrd->cmd_queue)
		for (i = 0; i < slots_num; i++) {
			txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd),
						GFP_KERNEL);
			if (!txq->cmd[i])
				goto error;
		}
320
321
322
323

	/* Alloc driver data array and TFD circular buffer */
	/* Driver private data, only for Tx (not command) queues,
	 * not shared with device. */
324
	if (txq_id != trans->shrd->cmd_queue) {
325
326
		txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]),
				    GFP_KERNEL);
327
		if (!txq->skbs) {
328
			IWL_ERR(trans, "kmalloc for auxiliary BD "
329
330
331
332
				  "structures failed\n");
			goto error;
		}
	} else {
333
		txq->skbs = NULL;
334
335
336
337
	}

	/* Circular buffer of transmit frame descriptors (TFDs),
	 * shared with device */
338
339
	txq->tfds = dma_alloc_coherent(bus(trans)->dev, tfd_sz,
				       &txq->q.dma_addr, GFP_KERNEL);
340
	if (!txq->tfds) {
341
		IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
342
343
344
345
346
347
		goto error;
	}
	txq->q.id = txq_id;

	return 0;
error:
348
349
	kfree(txq->skbs);
	txq->skbs = NULL;
350
351
	/* since txq->cmd has been zeroed,
	 * all non allocated cmd[i] will be NULL */
352
	if (txq->cmd && txq_id == trans->shrd->cmd_queue)
353
354
355
356
357
358
359
360
361
362
363
		for (i = 0; i < slots_num; i++)
			kfree(txq->cmd[i]);
	kfree(txq->meta);
	kfree(txq->cmd);
	txq->meta = NULL;
	txq->cmd = NULL;

	return -ENOMEM;

}

364
static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
		      int slots_num, u32 txq_id)
{
	int ret;

	txq->need_update = 0;
	memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);

	/*
	 * For the default queues 0-3, set up the swq_id
	 * already -- all others need to get one later
	 * (if they need one at all).
	 */
	if (txq_id < 4)
		iwl_set_swq_id(txq, txq_id, txq_id);

	/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));

	/* Initialize queue's high/low-water marks, and head/tail indexes */
385
	ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
386
387
388
389
390
391
392
393
			txq_id);
	if (ret)
		return ret;

	/*
	 * Tell nic where to find circular buffer of Tx Frame Descriptors for
	 * given Tx queue, and enable the DMA channel used for that queue.
	 * Circular buffer (TFD queue in DRAM) physical base address */
394
	iwl_write_direct32(bus(trans), FH_MEM_CBBC_QUEUE(txq_id),
395
396
397
398
399
			     txq->q.dma_addr >> 8);

	return 0;
}

400
401
402
/**
 * iwl_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
 */
403
static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
404
{
405
406
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
407
	struct iwl_queue *q = &txq->q;
408
	enum dma_data_direction dma_dir;
409
	unsigned long flags;
410
	spinlock_t *lock;
411
412
413
414

	if (!q->n_bd)
		return;

415
416
417
	/* In the command queue, all the TBs are mapped as BIDI
	 * so unmap them as such.
	 */
418
	if (txq_id == trans->shrd->cmd_queue) {
419
		dma_dir = DMA_BIDIRECTIONAL;
420
421
		lock = &trans->hcmd_lock;
	} else {
422
		dma_dir = DMA_TO_DEVICE;
423
424
		lock = &trans->shrd->sta_lock;
	}
425

426
	spin_lock_irqsave(lock, flags);
427
428
	while (q->write_ptr != q->read_ptr) {
		/* The read_ptr needs to bound by q->n_window */
429
430
		iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
				    dma_dir);
431
432
		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
	}
433
	spin_unlock_irqrestore(lock, flags);
434
435
}

436
437
438
439
440
441
442
443
/**
 * iwl_tx_queue_free - Deallocate DMA queue.
 * @txq: Transmit queue to deallocate.
 *
 * Empty queue by removing and destroying all BD's.
 * Free all buffers.
 * 0-fill, but do not free "txq" descriptor structure.
 */
444
static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
445
{
446
447
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
	struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
448
	struct device *dev = bus(trans)->dev;
449
450
451
452
	int i;
	if (WARN_ON(!txq))
		return;

453
	iwl_tx_queue_unmap(trans, txq_id);
454
455

	/* De-alloc array of command/tx buffers */
456
457
458
459

	if (txq_id == trans->shrd->cmd_queue)
		for (i = 0; i < txq->q.n_window; i++)
			kfree(txq->cmd[i]);
460
461
462

	/* De-alloc circular buffer of TFDs */
	if (txq->q.n_bd) {
463
		dma_free_coherent(dev, sizeof(struct iwl_tfd) *
464
465
466
467
468
				  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
		memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
	}

	/* De-alloc array of per-TFD driver data */
469
470
	kfree(txq->skbs);
	txq->skbs = NULL;
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486

	/* deallocate arrays */
	kfree(txq->cmd);
	kfree(txq->meta);
	txq->cmd = NULL;
	txq->meta = NULL;

	/* 0-fill queue descriptor structure */
	memset(txq, 0, sizeof(*txq));
}

/**
 * iwl_trans_tx_free - Free TXQ Context
 *
 * Destroy all TX DMA queues and structures
 */
487
static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
488
489
{
	int txq_id;
490
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
491
492

	/* Tx queues */
493
	if (trans_pcie->txq) {
494
		for (txq_id = 0;
495
496
		     txq_id < hw_params(trans).max_txq_num; txq_id++)
			iwl_tx_queue_free(trans, txq_id);
497
498
	}

499
500
	kfree(trans_pcie->txq);
	trans_pcie->txq = NULL;
501

502
	iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
503

504
	iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
505
506
}

507
508
509
510
511
512
513
/**
 * iwl_trans_tx_alloc - allocate TX context
 * Allocate all Tx DMA structures and initialize them
 *
 * @param priv
 * @return error code
 */
514
static int iwl_trans_tx_alloc(struct iwl_trans *trans)
515
516
517
{
	int ret;
	int txq_id, slots_num;
518
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
519

520
	u16 scd_bc_tbls_size = hw_params(trans).max_txq_num *
521
522
			sizeof(struct iwlagn_scd_bc_tbl);

523
524
	/*It is not allowed to alloc twice, so warn when this happens.
	 * We cannot rely on the previous allocation, so free and fail */
525
	if (WARN_ON(trans_pcie->txq)) {
526
527
528
529
		ret = -EINVAL;
		goto error;
	}

530
	ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
531
				   scd_bc_tbls_size);
532
	if (ret) {
533
		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
534
535
536
537
		goto error;
	}

	/* Alloc keep-warm buffer */
538
	ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
539
	if (ret) {
540
		IWL_ERR(trans, "Keep Warm allocation failed\n");
541
542
543
		goto error;
	}

544
545
	trans_pcie->txq = kcalloc(hw_params(trans).max_txq_num,
				  sizeof(struct iwl_tx_queue), GFP_KERNEL);
546
	if (!trans_pcie->txq) {
547
		IWL_ERR(trans, "Not enough memory for txq\n");
548
549
550
551
552
		ret = ENOMEM;
		goto error;
	}

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
553
554
	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
		slots_num = (txq_id == trans->shrd->cmd_queue) ?
555
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
556
557
		ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
					  slots_num, txq_id);
558
		if (ret) {
559
			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
560
561
562
563
564
565
566
			goto error;
		}
	}

	return 0;

error:
567
	iwl_trans_pcie_tx_free(trans);
568
569
570

	return ret;
}
571
static int iwl_tx_init(struct iwl_trans *trans)
572
573
574
575
576
{
	int ret;
	int txq_id, slots_num;
	unsigned long flags;
	bool alloc = false;
577
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
578

579
	if (!trans_pcie->txq) {
580
		ret = iwl_trans_tx_alloc(trans);
581
582
583
584
585
		if (ret)
			goto error;
		alloc = true;
	}

586
	spin_lock_irqsave(&trans->shrd->lock, flags);
587
588

	/* Turn off all Tx DMA fifos */
589
	iwl_write_prph(bus(trans), SCD_TXFACT, 0);
590
591

	/* Tell NIC where to find the "keep warm" buffer */
592
593
	iwl_write_direct32(bus(trans), FH_KW_MEM_ADDR_REG,
			   trans_pcie->kw.dma >> 4);
594

595
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
596
597

	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
598
599
	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) {
		slots_num = (txq_id == trans->shrd->cmd_queue) ?
600
					TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
601
602
		ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
					 slots_num, txq_id);
603
		if (ret) {
604
			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
605
606
607
608
609
610
611
612
			goto error;
		}
	}

	return 0;
error:
	/*Upon error, free only if we allocated something */
	if (alloc)
613
		iwl_trans_pcie_tx_free(trans);
614
615
616
	return ret;
}

617
static void iwl_set_pwr_vmain(struct iwl_trans *trans)
618
619
620
621
622
623
{
/*
 * (for documentation purposes)
 * to set power to V_AUX, do:

		if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
624
			iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
625
626
627
628
					       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
					       ~APMG_PS_CTRL_MSK_PWR_SRC);
 */

629
	iwl_set_bits_mask_prph(bus(trans), APMG_PS_CTRL_REG,
630
631
632
633
			       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
			       ~APMG_PS_CTRL_MSK_PWR_SRC);
}

634
static int iwl_nic_init(struct iwl_trans *trans)
635
636
637
638
{
	unsigned long flags;

	/* nic_init */
639
	spin_lock_irqsave(&trans->shrd->lock, flags);
640
	iwl_apm_init(priv(trans));
641
642

	/* Set interrupt coalescing calibration timer to default (512 usecs) */
643
644
	iwl_write8(bus(trans), CSR_INT_COALESCING,
		IWL_HOST_INT_CALIB_TIMEOUT_DEF);
645

646
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
647

648
	iwl_set_pwr_vmain(trans);
649

650
	iwl_nic_config(priv(trans));
651
652

	/* Allocate the RX queue, or reset if it is already allocated */
653
	iwl_rx_init(trans);
654
655

	/* Allocate or reset and init all Tx and Command queues */
656
	if (iwl_tx_init(trans))
657
658
		return -ENOMEM;

659
	if (hw_params(trans).shadow_reg_enable) {
660
		/* enable shadow regs in HW */
661
		iwl_set_bit(bus(trans), CSR_MAC_SHADOW_REG_CTRL,
662
663
664
			0x800FFFFF);
	}

665
	set_bit(STATUS_INIT, &trans->shrd->status);
666
667
668
669
670
671
672

	return 0;
}

#define HW_READY_TIMEOUT (50)

/* Note: returns poll_bit return value, which is >= 0 if success */
673
static int iwl_set_hw_ready(struct iwl_trans *trans)
674
675
676
{
	int ret;

677
	iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
678
679
680
		CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);

	/* See if we got it */
681
	ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
682
683
684
685
				CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
				CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
				HW_READY_TIMEOUT);

686
	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
687
688
689
690
	return ret;
}

/* Note: returns standard 0/-ERROR code */
691
static int iwl_trans_pcie_prepare_card_hw(struct iwl_trans *trans)
692
693
694
{
	int ret;

695
	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
696

697
	ret = iwl_set_hw_ready(trans);
698
699
700
701
	if (ret >= 0)
		return 0;

	/* If HW is not ready, prepare the conditions to check again */
702
	iwl_set_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
703
704
			CSR_HW_IF_CONFIG_REG_PREPARE);

705
	ret = iwl_poll_bit(bus(trans), CSR_HW_IF_CONFIG_REG,
706
707
708
709
710
711
712
			~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
			CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);

	if (ret < 0)
		return ret;

	/* HW should be ready by now, check again. */
713
	ret = iwl_set_hw_ready(trans);
714
715
716
717
718
	if (ret >= 0)
		return 0;
	return ret;
}

719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
#define IWL_AC_UNSET -1

struct queue_to_fifo_ac {
	s8 fifo, ac;
};

static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
	{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
	{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
	{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
	{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
	{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
};

static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
	{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
	{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
	{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
	{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
	{ IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
	{ IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
	{ IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
	{ IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
	{ IWL_TX_FIFO_BE_IPAN, 2, },
	{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
	{ IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
};

static const u8 iwlagn_bss_ac_to_fifo[] = {
	IWL_TX_FIFO_VO,
	IWL_TX_FIFO_VI,
	IWL_TX_FIFO_BE,
	IWL_TX_FIFO_BK,
};
static const u8 iwlagn_bss_ac_to_queue[] = {
	0, 1, 2, 3,
};
static const u8 iwlagn_pan_ac_to_fifo[] = {
	IWL_TX_FIFO_VO_IPAN,
	IWL_TX_FIFO_VI_IPAN,
	IWL_TX_FIFO_BE_IPAN,
	IWL_TX_FIFO_BK_IPAN,
};
static const u8 iwlagn_pan_ac_to_queue[] = {
	7, 6, 5, 4,
};

772
static int iwl_trans_pcie_start_device(struct iwl_trans *trans)
773
774
{
	int ret;
775
776
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
777

778
	trans->shrd->ucode_owner = IWL_OWNERSHIP_DRIVER;
779
780
781
782
783
784
785
786
	trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
	trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;

	trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
	trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;

	trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
	trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
787

788
	if ((hw_params(trans).sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
789
790
	     iwl_trans_pcie_prepare_card_hw(trans)) {
		IWL_WARN(trans, "Exit HW not ready\n");
791
792
793
794
		return -EIO;
	}

	/* If platform's RF_KILL switch is NOT set to KILL */
795
	if (iwl_read32(bus(trans), CSR_GP_CNTRL) &
796
			CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
797
		clear_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
798
	else
799
		set_bit(STATUS_RF_KILL_HW, &trans->shrd->status);
800

801
	if (iwl_is_rfkill(trans->shrd)) {
802
		iwl_set_hw_rfkill_state(priv(trans), true);
803
		iwl_enable_interrupts(trans);
804
805
806
		return -ERFKILL;
	}

807
	iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
808

809
	ret = iwl_nic_init(trans);
810
	if (ret) {
811
		IWL_ERR(trans, "Unable to init nic\n");
812
813
814
815
		return ret;
	}

	/* make sure rfkill handshake bits are cleared */
816
817
	iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
	iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR,
818
819
820
		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);

	/* clear (again), then enable host interrupts */
821
	iwl_write32(bus(trans), CSR_INT, 0xFFFFFFFF);
822
	iwl_enable_interrupts(trans);
823
824

	/* really make sure rfkill handshake bits are cleared */
825
826
	iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
	iwl_write32(bus(trans), CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
827
828
829
830

	return 0;
}

831
832
/*
 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
833
 * must be called under priv->shrd->lock and mac access
834
 */
835
static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
836
{
837
	iwl_write_prph(bus(trans), SCD_TXFACT, mask);
838
839
}

840
static void iwl_trans_pcie_tx_start(struct iwl_trans *trans)
841
842
{
	const struct queue_to_fifo_ac *queue_to_fifo;
843
844
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);
845
846
847
848
849
	u32 a;
	unsigned long flags;
	int i, chan;
	u32 reg_val;

850
	spin_lock_irqsave(&trans->shrd->lock, flags);
851

852
853
	trans_pcie->scd_base_addr =
		iwl_read_prph(bus(trans), SCD_SRAM_BASE_ADDR);
854
	a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
855
	/* reset conext data memory */
856
	for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
857
		a += 4)
858
		iwl_write_targ_mem(bus(trans), a, 0);
859
	/* reset tx status memory */
860
	for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
861
		a += 4)
862
		iwl_write_targ_mem(bus(trans), a, 0);
863
	for (; a < trans_pcie->scd_base_addr +
864
	       SCD_TRANS_TBL_OFFSET_QUEUE(hw_params(trans).max_txq_num);
865
	       a += 4)
866
		iwl_write_targ_mem(bus(trans), a, 0);
867

868
	iwl_write_prph(bus(trans), SCD_DRAM_BASE_ADDR,
869
		       trans_pcie->scd_bc_tbls.dma >> 10);
870
871
872

	/* Enable DMA channel */
	for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
873
		iwl_write_direct32(bus(trans), FH_TCSR_CHNL_TX_CONFIG_REG(chan),
874
875
876
877
				FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
				FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);

	/* Update FH chicken bits */
878
879
	reg_val = iwl_read_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG);
	iwl_write_direct32(bus(trans), FH_TX_CHICKEN_BITS_REG,
880
881
			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);

882
	iwl_write_prph(bus(trans), SCD_QUEUECHAIN_SEL,
883
		SCD_QUEUECHAIN_SEL_ALL(trans));
884
	iwl_write_prph(bus(trans), SCD_AGGR_SEL, 0);
885
886

	/* initiate the queues */
887
	for (i = 0; i < hw_params(trans).max_txq_num; i++) {
888
889
890
		iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(i), 0);
		iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, 0 | (i << 8));
		iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
891
				SCD_CONTEXT_QUEUE_OFFSET(i), 0);
892
		iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
893
894
895
896
897
898
899
900
901
902
				SCD_CONTEXT_QUEUE_OFFSET(i) +
				sizeof(u32),
				((SCD_WIN_SIZE <<
				SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
				SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
				((SCD_FRAME_LIMIT <<
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
				SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
	}

903
	iwl_write_prph(bus(trans), SCD_INTERRUPT_MASK,
904
			IWL_MASK(0, hw_params(trans).max_txq_num));
905
906

	/* Activate all Tx DMA/FIFO channels */
907
	iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
908
909

	/* map queues to FIFOs */
910
	if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))
911
912
913
914
		queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
	else
		queue_to_fifo = iwlagn_default_queue_to_tx_fifo;

915
	iwl_trans_set_wr_ptrs(trans, trans->shrd->cmd_queue, 0);
916
917

	/* make sure all queue are not stopped */
918
919
	memset(&trans_pcie->queue_stopped[0], 0,
		sizeof(trans_pcie->queue_stopped));
920
	for (i = 0; i < 4; i++)
921
		atomic_set(&trans_pcie->queue_stop_count[i], 0);
922
923

	/* reset to 0 to enable all the queue first */
924
	trans_pcie->txq_ctx_active_msk = 0;
925

926
	BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) <
927
						IWLAGN_FIRST_AMPDU_QUEUE);
928
	BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
929
						IWLAGN_FIRST_AMPDU_QUEUE);
930

931
	for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) {
932
933
934
		int fifo = queue_to_fifo[i].fifo;
		int ac = queue_to_fifo[i].ac;

935
		iwl_txq_ctx_activate(trans_pcie, i);
936
937
938
939
940

		if (fifo == IWL_TX_FIFO_UNUSED)
			continue;

		if (ac != IWL_AC_UNSET)
941
942
943
			iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
		iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
					      fifo, 0);
944
945
	}

946
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
947
948

	/* Enable L1-Active */
949
	iwl_clear_bits_prph(bus(trans), APMG_PCIDEV_STT_REG,
950
951
952
			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}

953
954
955
/**
 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
 */
956
static int iwl_trans_tx_stop(struct iwl_trans *trans)
957
958
959
{
	int ch, txq_id;
	unsigned long flags;
960
	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
961
962

	/* Turn off all Tx DMA fifos */
963
	spin_lock_irqsave(&trans->shrd->lock, flags);
964

965
	iwl_trans_txq_set_sched(trans, 0);
966
967

	/* Stop each Tx DMA channel, and wait for it to be idle */
968
	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
969
		iwl_write_direct32(bus(trans),
970
				   FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
971
		if (iwl_poll_direct_bit(bus(trans), FH_TSSR_TX_STATUS_REG,
972
973
				    FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
				    1000))
974
			IWL_ERR(trans, "Failing on timeout while stopping"
975
			    " DMA channel %d [0x%08x]", ch,
976
			    iwl_read_direct32(bus(trans),
977
					      FH_TSSR_TX_STATUS_REG));
978
	}
979
	spin_unlock_irqrestore(&trans->shrd->lock, flags);
980

981
	if (!trans_pcie->txq) {
982
		IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
983
984
985
986
		return 0;
	}

	/* Unmap DMA from host system and free skb's */
987
988
	for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++)
		iwl_tx_queue_unmap(trans, txq_id);
989
990
991
992

	return 0;
}

993
994
995
996
997
998
999
1000
static void iwl_trans_pcie_disable_sync_irq(struct iwl_trans *trans)
{
	unsigned long flags;
	struct iwl_trans_pcie *trans_pcie =
		IWL_TRANS_GET_PCIE_TRANS(trans);

	spin_lock_irqsave(&trans->shrd->lock, flags);
	iwl_disable_interrupts(trans);