rt2x00pci.c 12.1 KB
Newer Older
1
/*
2
	Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
	<http://rt2x00.serialmonkey.com>

	This program is free software; you can redistribute it and/or modify
	it under the terms of the GNU General Public License as published by
	the Free Software Foundation; either version 2 of the License, or
	(at your option) any later version.

	This program is distributed in the hope that it will be useful,
	but WITHOUT ANY WARRANTY; without even the implied warranty of
	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
	GNU General Public License for more details.

	You should have received a copy of the GNU General Public License
	along with this program; if not, write to the
	Free Software Foundation, Inc.,
	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

/*
	Module: rt2x00pci
	Abstract: rt2x00 generic pci device routines.
 */

#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>

#include "rt2x00.h"
#include "rt2x00pci.h"

/*
 * TX data handlers.
 */
int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
38
			    struct data_queue *queue, struct sk_buff *skb,
39
40
			    struct ieee80211_tx_control *control)
{
41
42
43
	struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
	struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
	struct skb_frame_desc *skbdesc;
44
45
	u32 word;

46
	if (rt2x00queue_full(queue))
47
48
		return -EINVAL;

49
	rt2x00_desc_read(priv_tx->desc, 0, &word);
50
51
52
53
54
55
56
57
58
59

	if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) ||
	    rt2x00_get_field32(word, TXD_ENTRY_VALID)) {
		ERROR(rt2x00dev,
		      "Arrived at non-free entry in the non-full queue %d.\n"
		      "Please file bug report to %s.\n",
		      control->queue, DRV_PROJECT);
		return -EINVAL;
	}

Ivo van Doorn's avatar
Ivo van Doorn committed
60
61
62
	/*
	 * Fill in skb descriptor
	 */
63
64
65
66
67
68
69
70
71
	skbdesc = get_skb_frame_desc(skb);
	memset(skbdesc, 0, sizeof(*skbdesc));
	skbdesc->data = skb->data;
	skbdesc->data_len = queue->data_size;
	skbdesc->desc = priv_tx->desc;
	skbdesc->desc_len = queue->desc_size;
	skbdesc->entry = entry;

	memcpy(priv_tx->data, skb->data, skb->len);
Ivo van Doorn's avatar
Ivo van Doorn committed
72
	rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
73

74
	rt2x00queue_index_inc(queue, Q_INDEX);
75
76
77
78
79
80

	return 0;
}
EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);

/*
81
 * TX/RX data handlers.
82
83
84
 */
void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
{
85
86
87
	struct data_queue *queue = rt2x00dev->rx;
	struct queue_entry *entry;
	struct queue_entry_priv_pci_rx *priv_rx;
88
	struct ieee80211_hdr *hdr;
89
90
	struct skb_frame_desc *skbdesc;
	struct rxdone_entry_desc rxdesc;
91
92
	int header_size;
	int align;
93
	u32 word;
94
95

	while (1) {
96
97
98
		entry = rt2x00queue_get_entry(queue, Q_INDEX);
		priv_rx = entry->priv_data;
		rt2x00_desc_read(priv_rx->desc, 0, &word);
99

100
		if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
101
102
			break;

103
104
		memset(&rxdesc, 0, sizeof(rxdesc));
		rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
105

106
		hdr = (struct ieee80211_hdr *)priv_rx->data;
107
108
109
110
111
112
113
		header_size =
		    ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));

		/*
		 * The data behind the ieee80211 header must be
		 * aligned on a 4 byte boundary.
		 */
114
		align = header_size % 4;
115

116
117
118
119
		/*
		 * Allocate the sk_buffer, initialize it and copy
		 * all data into it.
		 */
120
121
		entry->skb = dev_alloc_skb(rxdesc.size + align);
		if (!entry->skb)
122
123
			return;

124
125
126
		skb_reserve(entry->skb, align);
		memcpy(skb_put(entry->skb, rxdesc.size),
		       priv_rx->data, rxdesc.size);
127

Ivo van Doorn's avatar
Ivo van Doorn committed
128
129
130
		/*
		 * Fill in skb descriptor
		 */
131
132
133
134
135
136
		skbdesc = get_skb_frame_desc(entry->skb);
		memset(skbdesc, 0, sizeof(*skbdesc));
		skbdesc->data = entry->skb->data;
		skbdesc->data_len = queue->data_size;
		skbdesc->desc = priv_rx->desc;
		skbdesc->desc_len = queue->desc_size;
Ivo van Doorn's avatar
Ivo van Doorn committed
137
138
		skbdesc->entry = entry;

139
140
141
		/*
		 * Send the frame to rt2x00lib for further processing.
		 */
142
		rt2x00lib_rxdone(entry, &rxdesc);
143

144
		if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) {
145
			rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1);
146
			rt2x00_desc_write(priv_rx->desc, 0, word);
147
148
		}

149
		rt2x00queue_index_inc(queue, Q_INDEX);
150
151
152
153
	}
}
EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);

154
155
void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
		      struct txdone_entry_desc *txdesc)
156
{
157
	struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
158
159
	u32 word;

160
161
	txdesc->control = &priv_tx->control;
	rt2x00lib_txdone(entry, txdesc);
162
163
164
165
166
167

	/*
	 * Make this entry available for reuse.
	 */
	entry->flags = 0;

168
	rt2x00_desc_read(priv_tx->desc, 0, &word);
169
170
	rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0);
	rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0);
171
	rt2x00_desc_write(priv_tx->desc, 0, word);
172

173
	rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
174
175

	/*
176
	 * If the data queue was full before the txdone handler
177
178
179
	 * we must make sure the packet queue in the mac80211 stack
	 * is reenabled when the txdone handler has finished.
	 */
180
181
	if (!rt2x00queue_full(entry->queue))
		ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue);
182
183
184
185

}
EXPORT_SYMBOL_GPL(rt2x00pci_txdone);

186
187
188
/*
 * Device initialization handlers.
 */
Ivo van Doorn's avatar
Ivo van Doorn committed
189
190
191
#define desc_size(__queue)			\
({						\
	 ((__queue)->limit * (__queue)->desc_size);\
192
193
})

Ivo van Doorn's avatar
Ivo van Doorn committed
194
195
196
#define data_size(__queue)			\
({						\
	 ((__queue)->limit * (__queue)->data_size);\
197
198
})

Ivo van Doorn's avatar
Ivo van Doorn committed
199
200
201
#define dma_size(__queue)			\
({						\
	data_size(__queue) + desc_size(__queue);\
202
203
})

Ivo van Doorn's avatar
Ivo van Doorn committed
204
205
206
207
208
209
210
211
212
213
#define desc_offset(__queue, __base, __i)	\
({						\
	(__base) + data_size(__queue) + 	\
	    ((__i) * (__queue)->desc_size);	\
})

#define data_offset(__queue, __base, __i)	\
({						\
	(__base) +				\
	    ((__i) * (__queue)->data_size);	\
214
215
216
217
})

static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
				     struct data_queue *queue)
218
{
219
	struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
Ivo van Doorn's avatar
Ivo van Doorn committed
220
	struct queue_entry_priv_pci_rx *priv_rx;
221
	struct queue_entry_priv_pci_tx *priv_tx;
Ivo van Doorn's avatar
Ivo van Doorn committed
222
	void *desc;
223
	void *data_addr;
Ivo van Doorn's avatar
Ivo van Doorn committed
224
	void *data;
225
	dma_addr_t data_dma;
Ivo van Doorn's avatar
Ivo van Doorn committed
226
	dma_addr_t dma;
227
228
229
230
231
	unsigned int i;

	/*
	 * Allocate DMA memory for descriptor and buffer.
	 */
232
233
	data_addr = pci_alloc_consistent(pci_dev, dma_size(queue), &data_dma);
	if (!data_addr)
234
235
		return -ENOMEM;

Ivo van Doorn's avatar
Ivo van Doorn committed
236
237
	memset(data_addr, 0, dma_size(queue));

238
	/*
239
	 * Initialize all queue entries to contain valid addresses.
240
	 */
241
	for (i = 0; i < queue->limit; i++) {
Ivo van Doorn's avatar
Ivo van Doorn committed
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
		desc = desc_offset(queue, data_addr, i);
		data = data_offset(queue, data_addr, i);
		dma = data_offset(queue, data_dma, i);

		if (queue->qid == QID_RX) {
			priv_rx = queue->entries[i].priv_data;
			priv_rx->desc = desc;
			priv_rx->data = data;
			priv_rx->dma = dma;
		} else {
			priv_tx = queue->entries[i].priv_data;
			priv_tx->desc = desc;
			priv_tx->data = data;
			priv_tx->dma = dma;
		}
257
258
259
260
261
	}

	return 0;
}

262
263
static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
				     struct data_queue *queue)
264
{
265
	struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
Ivo van Doorn's avatar
Ivo van Doorn committed
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
	struct queue_entry_priv_pci_rx *priv_rx;
	struct queue_entry_priv_pci_tx *priv_tx;
	void *data_addr;
	dma_addr_t data_dma;

	if (queue->qid == QID_RX) {
		priv_rx = queue->entries[0].priv_data;
		data_addr = priv_rx->data;
		data_dma = priv_rx->dma;

		priv_rx->data = NULL;
	} else {
		priv_tx = queue->entries[0].priv_data;
		data_addr = priv_tx->data;
		data_dma = priv_tx->dma;

		priv_tx->data = NULL;
	}
284

Ivo van Doorn's avatar
Ivo van Doorn committed
285
	if (data_addr)
286
		pci_free_consistent(pci_dev, dma_size(queue),
Ivo van Doorn's avatar
Ivo van Doorn committed
287
				    data_addr, data_dma);
288
289
290
291
292
}

int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
{
	struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
293
	struct data_queue *queue;
294
295
296
297
298
	int status;

	/*
	 * Allocate DMA
	 */
299
300
	queue_for_each(rt2x00dev, queue) {
		status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
		if (status)
			goto exit;
	}

	/*
	 * Register interrupt handler.
	 */
	status = request_irq(pci_dev->irq, rt2x00dev->ops->lib->irq_handler,
			     IRQF_SHARED, pci_name(pci_dev), rt2x00dev);
	if (status) {
		ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
		      pci_dev->irq, status);
		return status;
	}

	return 0;

exit:
	rt2x00pci_uninitialize(rt2x00dev);

	return status;
}
EXPORT_SYMBOL_GPL(rt2x00pci_initialize);

void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
{
327
	struct data_queue *queue;
328
329
330
331
332
333
334
335
336

	/*
	 * Free irq line.
	 */
	free_irq(rt2x00dev_pci(rt2x00dev)->irq, rt2x00dev);

	/*
	 * Free DMA
	 */
337
338
	queue_for_each(rt2x00dev, queue)
		rt2x00pci_free_queue_dma(rt2x00dev, queue);
339
340
341
342
343
344
345
346
347
348
349
350
351
352
}
EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);

/*
 * PCI driver handlers.
 */
static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
{
	kfree(rt2x00dev->rf);
	rt2x00dev->rf = NULL;

	kfree(rt2x00dev->eeprom);
	rt2x00dev->eeprom = NULL;

353
354
355
	if (rt2x00dev->csr.base) {
		iounmap(rt2x00dev->csr.base);
		rt2x00dev->csr.base = NULL;
356
357
358
359
360
361
362
	}
}

static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
{
	struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);

363
	rt2x00dev->csr.base = ioremap(pci_resource_start(pci_dev, 0),
364
				      pci_resource_len(pci_dev, 0));
365
	if (!rt2x00dev->csr.base)
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
		goto exit;

	rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
	if (!rt2x00dev->eeprom)
		goto exit;

	rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
	if (!rt2x00dev->rf)
		goto exit;

	return 0;

exit:
	ERROR_PROBE("Failed to allocate registers.\n");

	rt2x00pci_free_reg(rt2x00dev);

	return -ENOMEM;
}

int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
{
	struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_data;
	struct ieee80211_hw *hw;
	struct rt2x00_dev *rt2x00dev;
	int retval;

	retval = pci_request_regions(pci_dev, pci_name(pci_dev));
	if (retval) {
		ERROR_PROBE("PCI request regions failed.\n");
		return retval;
	}

	retval = pci_enable_device(pci_dev);
	if (retval) {
		ERROR_PROBE("Enable device failed.\n");
		goto exit_release_regions;
	}

	pci_set_master(pci_dev);

	if (pci_set_mwi(pci_dev))
		ERROR_PROBE("MWI not available.\n");

	if (pci_set_dma_mask(pci_dev, DMA_64BIT_MASK) &&
	    pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
		ERROR_PROBE("PCI DMA not supported.\n");
		retval = -EIO;
		goto exit_disable_device;
	}

	hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
	if (!hw) {
		ERROR_PROBE("Failed to allocate hardware.\n");
		retval = -ENOMEM;
		goto exit_disable_device;
	}

	pci_set_drvdata(pci_dev, hw);

	rt2x00dev = hw->priv;
	rt2x00dev->dev = pci_dev;
	rt2x00dev->ops = ops;
	rt2x00dev->hw = hw;

	retval = rt2x00pci_alloc_reg(rt2x00dev);
	if (retval)
		goto exit_free_device;

	retval = rt2x00lib_probe_dev(rt2x00dev);
	if (retval)
		goto exit_free_reg;

	return 0;

exit_free_reg:
	rt2x00pci_free_reg(rt2x00dev);

exit_free_device:
	ieee80211_free_hw(hw);

exit_disable_device:
	if (retval != -EBUSY)
		pci_disable_device(pci_dev);

exit_release_regions:
	pci_release_regions(pci_dev);

	pci_set_drvdata(pci_dev, NULL);

	return retval;
}
EXPORT_SYMBOL_GPL(rt2x00pci_probe);

void rt2x00pci_remove(struct pci_dev *pci_dev)
{
	struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
	struct rt2x00_dev *rt2x00dev = hw->priv;

	/*
	 * Free all allocated data.
	 */
	rt2x00lib_remove_dev(rt2x00dev);
	rt2x00pci_free_reg(rt2x00dev);
	ieee80211_free_hw(hw);

	/*
	 * Free the PCI device data.
	 */
	pci_set_drvdata(pci_dev, NULL);
	pci_disable_device(pci_dev);
	pci_release_regions(pci_dev);
}
EXPORT_SYMBOL_GPL(rt2x00pci_remove);

#ifdef CONFIG_PM
int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
{
	struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
	struct rt2x00_dev *rt2x00dev = hw->priv;
	int retval;

	retval = rt2x00lib_suspend(rt2x00dev, state);
	if (retval)
		return retval;

	rt2x00pci_free_reg(rt2x00dev);

	pci_save_state(pci_dev);
	pci_disable_device(pci_dev);
	return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
}
EXPORT_SYMBOL_GPL(rt2x00pci_suspend);

int rt2x00pci_resume(struct pci_dev *pci_dev)
{
	struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
	struct rt2x00_dev *rt2x00dev = hw->priv;
	int retval;

	if (pci_set_power_state(pci_dev, PCI_D0) ||
	    pci_enable_device(pci_dev) ||
	    pci_restore_state(pci_dev)) {
		ERROR(rt2x00dev, "Failed to resume device.\n");
		return -EIO;
	}

	retval = rt2x00pci_alloc_reg(rt2x00dev);
	if (retval)
		return retval;

	retval = rt2x00lib_resume(rt2x00dev);
	if (retval)
		goto exit_free_reg;

	return 0;

exit_free_reg:
	rt2x00pci_free_reg(rt2x00dev);

	return retval;
}
EXPORT_SYMBOL_GPL(rt2x00pci_resume);
#endif /* CONFIG_PM */

/*
 * rt2x00pci module information.
 */
MODULE_AUTHOR(DRV_PROJECT);
MODULE_VERSION(DRV_VERSION);
536
MODULE_DESCRIPTION("rt2x00 pci library");
537
MODULE_LICENSE("GPL");