dma.c 44.2 KB
Newer Older
1
2
3
4
5
6
/*

  Broadcom B43 wireless driver

  DMA ringbuffer and descriptor allocation/management

Michael Büsch's avatar
Michael Büsch committed
7
  Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39

  Some code in this file is derived from the b44.c driver
  Copyright (C) 2002 David S. Miller
  Copyright (C) Pekka Pietikainen

  This program is free software; you can redistribute it and/or modify
  it under the terms of the GNU General Public License as published by
  the Free Software Foundation; either version 2 of the License, or
  (at your option) any later version.

  This program is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  GNU General Public License for more details.

  You should have received a copy of the GNU General Public License
  along with this program; see the file COPYING.  If not, write to
  the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
  Boston, MA 02110-1301, USA.

*/

#include "b43.h"
#include "dma.h"
#include "main.h"
#include "debugfs.h"
#include "xmit.h"

#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/skbuff.h>
40
#include <linux/etherdevice.h>
41
#include <linux/slab.h>
42
#include <asm/div64.h>
43

44

Michael Buesch's avatar
Michael Buesch committed
45
46
47
48
49
50
/* Required number of TX DMA slots per TX frame.
 * This currently is 2, because we put the header and the ieee80211 frame
 * into separate slots. */
#define TX_SLOTS_PER_FRAME	2


51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
/* 32bit DMA ops. */
static
struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
					  int slot,
					  struct b43_dmadesc_meta **meta)
{
	struct b43_dmadesc32 *desc;

	*meta = &(ring->meta[slot]);
	desc = ring->descbase;
	desc = &(desc[slot]);

	return (struct b43_dmadesc_generic *)desc;
}

static void op32_fill_descriptor(struct b43_dmaring *ring,
				 struct b43_dmadesc_generic *desc,
				 dma_addr_t dmaaddr, u16 bufsize,
				 int start, int end, int irq)
{
	struct b43_dmadesc32 *descbase = ring->descbase;
	int slot;
	u32 ctl;
	u32 addr;
	u32 addrext;

	slot = (int)(&(desc->dma32) - descbase);
	B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));

	addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
	addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
	    >> SSB_DMA_TRANSLATION_SHIFT;
83
	addr |= ring->dev->dma.translation;
84
	ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
	if (slot == ring->nr_slots - 1)
		ctl |= B43_DMA32_DCTL_DTABLEEND;
	if (start)
		ctl |= B43_DMA32_DCTL_FRAMESTART;
	if (end)
		ctl |= B43_DMA32_DCTL_FRAMEEND;
	if (irq)
		ctl |= B43_DMA32_DCTL_IRQ;
	ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
	    & B43_DMA32_DCTL_ADDREXT_MASK;

	desc->dma32.control = cpu_to_le32(ctl);
	desc->dma32.address = cpu_to_le32(addr);
}

static void op32_poke_tx(struct b43_dmaring *ring, int slot)
{
	b43_dma_write(ring, B43_DMA32_TXINDEX,
		      (u32) (slot * sizeof(struct b43_dmadesc32)));
}

static void op32_tx_suspend(struct b43_dmaring *ring)
{
	b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
		      | B43_DMA32_TXSUSPEND);
}

static void op32_tx_resume(struct b43_dmaring *ring)
{
	b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
		      & ~B43_DMA32_TXSUSPEND);
}

static int op32_get_current_rxslot(struct b43_dmaring *ring)
{
	u32 val;

	val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
	val &= B43_DMA32_RXDPTR;

	return (val / sizeof(struct b43_dmadesc32));
}

static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
{
	b43_dma_write(ring, B43_DMA32_RXINDEX,
		      (u32) (slot * sizeof(struct b43_dmadesc32)));
}

static const struct b43_dma_ops dma32_ops = {
	.idx2desc = op32_idx2desc,
	.fill_descriptor = op32_fill_descriptor,
	.poke_tx = op32_poke_tx,
	.tx_suspend = op32_tx_suspend,
	.tx_resume = op32_tx_resume,
	.get_current_rxslot = op32_get_current_rxslot,
	.set_current_rxslot = op32_set_current_rxslot,
};

/* 64bit DMA ops. */
static
struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
					  int slot,
					  struct b43_dmadesc_meta **meta)
{
	struct b43_dmadesc64 *desc;

	*meta = &(ring->meta[slot]);
	desc = ring->descbase;
	desc = &(desc[slot]);

	return (struct b43_dmadesc_generic *)desc;
}

static void op64_fill_descriptor(struct b43_dmaring *ring,
				 struct b43_dmadesc_generic *desc,
				 dma_addr_t dmaaddr, u16 bufsize,
				 int start, int end, int irq)
{
	struct b43_dmadesc64 *descbase = ring->descbase;
	int slot;
	u32 ctl0 = 0, ctl1 = 0;
	u32 addrlo, addrhi;
	u32 addrext;

	slot = (int)(&(desc->dma64) - descbase);
	B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));

	addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
	addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
	addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
	    >> SSB_DMA_TRANSLATION_SHIFT;
177
	addrhi |= ring->dev->dma.translation;
178
179
180
181
182
183
184
185
	if (slot == ring->nr_slots - 1)
		ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
	if (start)
		ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
	if (end)
		ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
	if (irq)
		ctl0 |= B43_DMA64_DCTL0_IRQ;
186
	ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
	ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
	    & B43_DMA64_DCTL1_ADDREXT_MASK;

	desc->dma64.control0 = cpu_to_le32(ctl0);
	desc->dma64.control1 = cpu_to_le32(ctl1);
	desc->dma64.address_low = cpu_to_le32(addrlo);
	desc->dma64.address_high = cpu_to_le32(addrhi);
}

static void op64_poke_tx(struct b43_dmaring *ring, int slot)
{
	b43_dma_write(ring, B43_DMA64_TXINDEX,
		      (u32) (slot * sizeof(struct b43_dmadesc64)));
}

static void op64_tx_suspend(struct b43_dmaring *ring)
{
	b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
		      | B43_DMA64_TXSUSPEND);
}

static void op64_tx_resume(struct b43_dmaring *ring)
{
	b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
		      & ~B43_DMA64_TXSUSPEND);
}

static int op64_get_current_rxslot(struct b43_dmaring *ring)
{
	u32 val;

	val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
	val &= B43_DMA64_RXSTATDPTR;

	return (val / sizeof(struct b43_dmadesc64));
}

static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
{
	b43_dma_write(ring, B43_DMA64_RXINDEX,
		      (u32) (slot * sizeof(struct b43_dmadesc64)));
}

static const struct b43_dma_ops dma64_ops = {
	.idx2desc = op64_idx2desc,
	.fill_descriptor = op64_fill_descriptor,
	.poke_tx = op64_poke_tx,
	.tx_suspend = op64_tx_suspend,
	.tx_resume = op64_tx_resume,
	.get_current_rxslot = op64_get_current_rxslot,
	.set_current_rxslot = op64_set_current_rxslot,
};

static inline int free_slots(struct b43_dmaring *ring)
{
	return (ring->nr_slots - ring->used_slots);
}

static inline int next_slot(struct b43_dmaring *ring, int slot)
{
	B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
	if (slot == ring->nr_slots - 1)
		return 0;
	return slot + 1;
}

static inline int prev_slot(struct b43_dmaring *ring, int slot)
{
	B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
	if (slot == 0)
		return ring->nr_slots - 1;
	return slot - 1;
}

#ifdef CONFIG_B43_DEBUG
static void update_max_used_slots(struct b43_dmaring *ring,
				  int current_used_slots)
{
	if (current_used_slots <= ring->max_used_slots)
		return;
	ring->max_used_slots = current_used_slots;
	if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
		b43dbg(ring->dev->wl,
		       "max_used_slots increased to %d on %s ring %d\n",
		       ring->max_used_slots,
		       ring->tx ? "TX" : "RX", ring->index);
	}
}
#else
static inline
    void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
{
}
#endif /* DEBUG */

/* Request a slot for usage. */
static inline int request_slot(struct b43_dmaring *ring)
{
	int slot;

	B43_WARN_ON(!ring->tx);
	B43_WARN_ON(ring->stopped);
	B43_WARN_ON(free_slots(ring) == 0);

	slot = next_slot(ring, ring->current_slot);
	ring->current_slot = slot;
	ring->used_slots++;

	update_max_used_slots(ring, ring->used_slots);

	return slot;
}

300
static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
{
	static const u16 map64[] = {
		B43_MMIO_DMA64_BASE0,
		B43_MMIO_DMA64_BASE1,
		B43_MMIO_DMA64_BASE2,
		B43_MMIO_DMA64_BASE3,
		B43_MMIO_DMA64_BASE4,
		B43_MMIO_DMA64_BASE5,
	};
	static const u16 map32[] = {
		B43_MMIO_DMA32_BASE0,
		B43_MMIO_DMA32_BASE1,
		B43_MMIO_DMA32_BASE2,
		B43_MMIO_DMA32_BASE3,
		B43_MMIO_DMA32_BASE4,
		B43_MMIO_DMA32_BASE5,
	};

319
	if (type == B43_DMA_64BIT) {
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
		B43_WARN_ON(!(controller_idx >= 0 &&
			      controller_idx < ARRAY_SIZE(map64)));
		return map64[controller_idx];
	}
	B43_WARN_ON(!(controller_idx >= 0 &&
		      controller_idx < ARRAY_SIZE(map32)));
	return map32[controller_idx];
}

static inline
    dma_addr_t map_descbuffer(struct b43_dmaring *ring,
			      unsigned char *buf, size_t len, int tx)
{
	dma_addr_t dmaaddr;

	if (tx) {
336
		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
337
					 buf, len, DMA_TO_DEVICE);
338
	} else {
339
		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
340
					 buf, len, DMA_FROM_DEVICE);
341
342
343
344
345
346
347
348
349
350
	}

	return dmaaddr;
}

static inline
    void unmap_descbuffer(struct b43_dmaring *ring,
			  dma_addr_t addr, size_t len, int tx)
{
	if (tx) {
351
		dma_unmap_single(ring->dev->dev->dma_dev,
352
				 addr, len, DMA_TO_DEVICE);
353
	} else {
354
		dma_unmap_single(ring->dev->dev->dma_dev,
355
				 addr, len, DMA_FROM_DEVICE);
356
357
358
359
360
361
362
363
	}
}

static inline
    void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
				 dma_addr_t addr, size_t len)
{
	B43_WARN_ON(ring->tx);
364
	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
365
				    addr, len, DMA_FROM_DEVICE);
366
367
368
369
370
371
372
}

static inline
    void sync_descbuffer_for_device(struct b43_dmaring *ring,
				    dma_addr_t addr, size_t len)
{
	B43_WARN_ON(ring->tx);
373
	dma_sync_single_for_device(ring->dev->dev->dma_dev,
374
				   addr, len, DMA_FROM_DEVICE);
375
376
377
378
379
380
381
382
383
384
385
386
387
388
}

static inline
    void free_descriptor_buffer(struct b43_dmaring *ring,
				struct b43_dmadesc_meta *meta)
{
	if (meta->skb) {
		dev_kfree_skb_any(meta->skb);
		meta->skb = NULL;
	}
}

static int alloc_ringmemory(struct b43_dmaring *ring)
{
389
390
391
392
393
394
395
396
397
398
399
400
	gfp_t flags = GFP_KERNEL;

	/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
	 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
	 * has shown that 4K is sufficient for the latter as long as the buffer
	 * does not cross an 8K boundary.
	 *
	 * For unknown reasons - possibly a hardware error - the BCM4311 rev
	 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
	 * which accounts for the GFP_DMA flag below.
	 *
	 * The flags here must match the flags in free_ringmemory below!
401
	 */
402
	if (ring->type == B43_DMA_64BIT)
403
		flags |= GFP_DMA;
404
	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
405
406
					    B43_DMA_RINGMEMSIZE,
					    &(ring->dmabase), flags);
407
408
	if (!ring->descbase) {
		b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
409
		return -ENOMEM;
410
	}
411
	memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
412
413
414
415
416
417

	return 0;
}

static void free_ringmemory(struct b43_dmaring *ring)
{
418
	dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
419
			  ring->descbase, ring->dmabase);
420
421
422
}

/* Reset the RX DMA channel */
423
424
static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
				      enum b43_dmatype type)
425
426
427
428
429
430
431
{
	int i;
	u32 value;
	u16 offset;

	might_sleep();

432
	offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
433
434
	b43_write32(dev, mmio_base + offset, 0);
	for (i = 0; i < 10; i++) {
435
436
		offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
						   B43_DMA32_RXSTATUS;
437
		value = b43_read32(dev, mmio_base + offset);
438
		if (type == B43_DMA_64BIT) {
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
			value &= B43_DMA64_RXSTAT;
			if (value == B43_DMA64_RXSTAT_DISABLED) {
				i = -1;
				break;
			}
		} else {
			value &= B43_DMA32_RXSTATE;
			if (value == B43_DMA32_RXSTAT_DISABLED) {
				i = -1;
				break;
			}
		}
		msleep(1);
	}
	if (i != -1) {
		b43err(dev->wl, "DMA RX reset timed out\n");
		return -ENODEV;
	}

	return 0;
}

461
/* Reset the TX DMA channel */
462
463
static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
				      enum b43_dmatype type)
464
465
466
467
468
469
470
471
{
	int i;
	u32 value;
	u16 offset;

	might_sleep();

	for (i = 0; i < 10; i++) {
472
473
		offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
						   B43_DMA32_TXSTATUS;
474
		value = b43_read32(dev, mmio_base + offset);
475
		if (type == B43_DMA_64BIT) {
476
477
478
479
480
481
482
483
484
485
486
487
488
489
			value &= B43_DMA64_TXSTAT;
			if (value == B43_DMA64_TXSTAT_DISABLED ||
			    value == B43_DMA64_TXSTAT_IDLEWAIT ||
			    value == B43_DMA64_TXSTAT_STOPPED)
				break;
		} else {
			value &= B43_DMA32_TXSTATE;
			if (value == B43_DMA32_TXSTAT_DISABLED ||
			    value == B43_DMA32_TXSTAT_IDLEWAIT ||
			    value == B43_DMA32_TXSTAT_STOPPED)
				break;
		}
		msleep(1);
	}
490
	offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
491
492
	b43_write32(dev, mmio_base + offset, 0);
	for (i = 0; i < 10; i++) {
493
494
		offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
						   B43_DMA32_TXSTATUS;
495
		value = b43_read32(dev, mmio_base + offset);
496
		if (type == B43_DMA_64BIT) {
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
			value &= B43_DMA64_TXSTAT;
			if (value == B43_DMA64_TXSTAT_DISABLED) {
				i = -1;
				break;
			}
		} else {
			value &= B43_DMA32_TXSTATE;
			if (value == B43_DMA32_TXSTAT_DISABLED) {
				i = -1;
				break;
			}
		}
		msleep(1);
	}
	if (i != -1) {
		b43err(dev->wl, "DMA TX reset timed out\n");
		return -ENODEV;
	}
	/* ensure the reset is completed. */
	msleep(1);

	return 0;
}

521
522
523
/* Check if a DMA mapping address is invalid. */
static bool b43_dma_mapping_error(struct b43_dmaring *ring,
				  dma_addr_t addr,
Michael Buesch's avatar
Michael Buesch committed
524
				  size_t buffersize, bool dma_to_device)
525
{
526
	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
527
528
		return 1;

529
530
531
532
533
534
535
536
537
538
539
540
541
	switch (ring->type) {
	case B43_DMA_30BIT:
		if ((u64)addr + buffersize > (1ULL << 30))
			goto address_error;
		break;
	case B43_DMA_32BIT:
		if ((u64)addr + buffersize > (1ULL << 32))
			goto address_error;
		break;
	case B43_DMA_64BIT:
		/* Currently we can't have addresses beyond
		 * 64bit in the kernel. */
		break;
542
543
544
545
	}

	/* The address is OK. */
	return 0;
546
547
548
549
550
551

address_error:
	/* We can't support this address. Unmap it again. */
	unmap_descbuffer(ring, addr, buffersize, dma_to_device);

	return 1;
552
553
}

Michael Buesch's avatar
Michael Buesch committed
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
{
	unsigned char *f = skb->data + ring->frameoffset;

	return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
}

static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
{
	struct b43_rxhdr_fw4 *rxhdr;
	unsigned char *frame;

	/* This poisons the RX buffer to detect DMA failures. */

	rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
	rxhdr->frame_len = 0;

	B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
	frame = skb->data + ring->frameoffset;
	memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
}

576
577
578
579
580
581
582
583
584
585
586
587
static int setup_rx_descbuffer(struct b43_dmaring *ring,
			       struct b43_dmadesc_generic *desc,
			       struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
{
	dma_addr_t dmaaddr;
	struct sk_buff *skb;

	B43_WARN_ON(ring->tx);

	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
	if (unlikely(!skb))
		return -ENOMEM;
Michael Buesch's avatar
Michael Buesch committed
588
	b43_poison_rx_buffer(ring, skb);
589
	dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
Michael Buesch's avatar
Michael Buesch committed
590
	if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
591
592
593
594
595
596
597
598
		/* ugh. try to realloc in zone_dma */
		gfp_flags |= GFP_DMA;

		dev_kfree_skb_any(skb);

		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
		if (unlikely(!skb))
			return -ENOMEM;
Michael Buesch's avatar
Michael Buesch committed
599
		b43_poison_rx_buffer(ring, skb);
600
601
		dmaaddr = map_descbuffer(ring, skb->data,
					 ring->rx_buffersize, 0);
Michael Buesch's avatar
Michael Buesch committed
602
603
604
605
606
		if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
			b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
			dev_kfree_skb_any(skb);
			return -EIO;
		}
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
	}

	meta->skb = skb;
	meta->dmaaddr = dmaaddr;
	ring->ops->fill_descriptor(ring, desc, dmaaddr,
				   ring->rx_buffersize, 0, 0, 0);

	return 0;
}

/* Allocate the initial descbuffers.
 * This is used for an RX ring only.
 */
static int alloc_initial_descbuffers(struct b43_dmaring *ring)
{
	int i, err = -ENOMEM;
	struct b43_dmadesc_generic *desc;
	struct b43_dmadesc_meta *meta;

	for (i = 0; i < ring->nr_slots; i++) {
		desc = ring->ops->idx2desc(ring, i, &meta);

		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
		if (err) {
			b43err(ring->dev->wl,
			       "Failed to allocate initial descbuffers\n");
			goto err_unwind;
		}
	}
	mb();
	ring->used_slots = ring->nr_slots;
	err = 0;
      out:
	return err;

      err_unwind:
	for (i--; i >= 0; i--) {
		desc = ring->ops->idx2desc(ring, i, &meta);

		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
		dev_kfree_skb(meta->skb);
	}
	goto out;
}

/* Do initial setup of the DMA controller.
 * Reset the controller, write the ring busaddress
 * and switch the "enable" bit on.
 */
static int dmacontroller_setup(struct b43_dmaring *ring)
{
	int err = 0;
	u32 value;
	u32 addrext;
661
	u32 trans = ring->dev->dma.translation;
662
	bool parity = ring->dev->dma.parity;
663
664

	if (ring->tx) {
665
		if (ring->type == B43_DMA_64BIT) {
666
667
668
669
670
671
672
			u64 ringbase = (u64) (ring->dmabase);

			addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
			    >> SSB_DMA_TRANSLATION_SHIFT;
			value = B43_DMA64_TXENABLE;
			value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
			    & B43_DMA64_TXADDREXT_MASK;
673
674
			if (!parity)
				value |= B43_DMA64_TXPARITYDISABLE;
675
676
677
678
679
680
			b43_dma_write(ring, B43_DMA64_TXCTL, value);
			b43_dma_write(ring, B43_DMA64_TXRINGLO,
				      (ringbase & 0xFFFFFFFF));
			b43_dma_write(ring, B43_DMA64_TXRINGHI,
				      ((ringbase >> 32) &
				       ~SSB_DMA_TRANSLATION_MASK)
681
				      | trans);
682
683
684
685
686
687
688
689
		} else {
			u32 ringbase = (u32) (ring->dmabase);

			addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
			    >> SSB_DMA_TRANSLATION_SHIFT;
			value = B43_DMA32_TXENABLE;
			value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
			    & B43_DMA32_TXADDREXT_MASK;
690
691
			if (!parity)
				value |= B43_DMA32_TXPARITYDISABLE;
692
693
694
695
696
697
698
699
700
			b43_dma_write(ring, B43_DMA32_TXCTL, value);
			b43_dma_write(ring, B43_DMA32_TXRING,
				      (ringbase & ~SSB_DMA_TRANSLATION_MASK)
				      | trans);
		}
	} else {
		err = alloc_initial_descbuffers(ring);
		if (err)
			goto out;
701
		if (ring->type == B43_DMA_64BIT) {
702
703
704
705
706
707
708
709
			u64 ringbase = (u64) (ring->dmabase);

			addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
			    >> SSB_DMA_TRANSLATION_SHIFT;
			value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
			value |= B43_DMA64_RXENABLE;
			value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
			    & B43_DMA64_RXADDREXT_MASK;
710
711
			if (!parity)
				value |= B43_DMA64_RXPARITYDISABLE;
712
713
714
715
716
717
			b43_dma_write(ring, B43_DMA64_RXCTL, value);
			b43_dma_write(ring, B43_DMA64_RXRINGLO,
				      (ringbase & 0xFFFFFFFF));
			b43_dma_write(ring, B43_DMA64_RXRINGHI,
				      ((ringbase >> 32) &
				       ~SSB_DMA_TRANSLATION_MASK)
718
				      | trans);
719
720
			b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
				      sizeof(struct b43_dmadesc64));
721
722
723
724
725
726
727
728
729
		} else {
			u32 ringbase = (u32) (ring->dmabase);

			addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
			    >> SSB_DMA_TRANSLATION_SHIFT;
			value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
			value |= B43_DMA32_RXENABLE;
			value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
			    & B43_DMA32_RXADDREXT_MASK;
730
731
			if (!parity)
				value |= B43_DMA32_RXPARITYDISABLE;
732
733
734
735
			b43_dma_write(ring, B43_DMA32_RXCTL, value);
			b43_dma_write(ring, B43_DMA32_RXRING,
				      (ringbase & ~SSB_DMA_TRANSLATION_MASK)
				      | trans);
736
737
			b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
				      sizeof(struct b43_dmadesc32));
738
739
740
		}
	}

741
out:
742
743
744
745
746
747
748
749
	return err;
}

/* Shutdown the DMA controller. */
static void dmacontroller_cleanup(struct b43_dmaring *ring)
{
	if (ring->tx) {
		b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
750
751
					   ring->type);
		if (ring->type == B43_DMA_64BIT) {
752
753
754
755
756
757
			b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
			b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
		} else
			b43_dma_write(ring, B43_DMA32_TXRING, 0);
	} else {
		b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
758
759
					   ring->type);
		if (ring->type == B43_DMA_64BIT) {
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
			b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
			b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
		} else
			b43_dma_write(ring, B43_DMA32_RXRING, 0);
	}
}

static void free_all_descbuffers(struct b43_dmaring *ring)
{
	struct b43_dmadesc_meta *meta;
	int i;

	if (!ring->used_slots)
		return;
	for (i = 0; i < ring->nr_slots; i++) {
775
776
		/* get meta - ignore returned value */
		ring->ops->idx2desc(ring, i, &meta);
777

778
		if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
			B43_WARN_ON(!ring->tx);
			continue;
		}
		if (ring->tx) {
			unmap_descbuffer(ring, meta->dmaaddr,
					 meta->skb->len, 1);
		} else {
			unmap_descbuffer(ring, meta->dmaaddr,
					 ring->rx_buffersize, 0);
		}
		free_descriptor_buffer(ring, meta);
	}
}

static u64 supported_dma_mask(struct b43_wldev *dev)
{
	u32 tmp;
	u16 mmio_base;

	tmp = b43_read32(dev, SSB_TMSHIGH);
	if (tmp & SSB_TMSHIGH_DMA64)
800
		return DMA_BIT_MASK(64);
801
802
803
804
	mmio_base = b43_dmacontroller_base(0, 0);
	b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
	tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
	if (tmp & B43_DMA32_TXADDREXT_MASK)
805
		return DMA_BIT_MASK(32);
806

807
	return DMA_BIT_MASK(30);
808
809
}

810
811
static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
{
812
	if (dmamask == DMA_BIT_MASK(30))
813
		return B43_DMA_30BIT;
814
	if (dmamask == DMA_BIT_MASK(32))
815
		return B43_DMA_32BIT;
816
	if (dmamask == DMA_BIT_MASK(64))
817
818
819
820
821
		return B43_DMA_64BIT;
	B43_WARN_ON(1);
	return B43_DMA_30BIT;
}

822
823
824
825
/* Main initialization function. */
static
struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
				      int controller_index,
826
827
				      int for_tx,
				      enum b43_dmatype type)
828
829
{
	struct b43_dmaring *ring;
830
	int i, err;
831
832
833
834
835
836
	dma_addr_t dma_test;

	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
	if (!ring)
		goto out;

837
	ring->nr_slots = B43_RXRING_SLOTS;
838
	if (for_tx)
839
		ring->nr_slots = B43_TXRING_SLOTS;
840

841
	ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
842
843
844
			     GFP_KERNEL);
	if (!ring->meta)
		goto err_kfree_ring;
845
846
	for (i = 0; i < ring->nr_slots; i++)
		ring->meta->skb = B43_DMA_PTR_POISON;
847
848
849
850
851
852
853
854
855

	ring->type = type;
	ring->dev = dev;
	ring->mmio_base = b43_dmacontroller_base(type, controller_index);
	ring->index = controller_index;
	if (type == B43_DMA_64BIT)
		ring->ops = &dma64_ops;
	else
		ring->ops = &dma32_ops;
856
	if (for_tx) {
857
858
859
860
		ring->tx = 1;
		ring->current_slot = -1;
	} else {
		if (ring->index == 0) {
861
862
863
864
865
866
867
868
869
870
871
			switch (dev->fw.hdr_format) {
			case B43_FW_HDR_598:
				ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE;
				ring->frameoffset = B43_DMA0_RX_FW598_FO;
				break;
			case B43_FW_HDR_410:
			case B43_FW_HDR_351:
				ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE;
				ring->frameoffset = B43_DMA0_RX_FW351_FO;
				break;
			}
872
873
874
875
876
877
878
879
		} else
			B43_WARN_ON(1);
	}
#ifdef CONFIG_B43_DEBUG
	ring->last_injected_overflow = jiffies;
#endif

	if (for_tx) {
880
881
882
		/* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
		BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);

Michael Buesch's avatar
Michael Buesch committed
883
		ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
884
					    b43_txhdr_size(dev),
885
886
887
888
889
					    GFP_KERNEL);
		if (!ring->txhdr_cache)
			goto err_kfree_meta;

		/* test for ability to dma to txhdr_cache */
890
		dma_test = dma_map_single(dev->dev->dma_dev,
891
892
893
					  ring->txhdr_cache,
					  b43_txhdr_size(dev),
					  DMA_TO_DEVICE);
894

Michael Buesch's avatar
Michael Buesch committed
895
896
		if (b43_dma_mapping_error(ring, dma_test,
					  b43_txhdr_size(dev), 1)) {
897
898
			/* ugh realloc */
			kfree(ring->txhdr_cache);
Michael Buesch's avatar
Michael Buesch committed
899
			ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
900
						    b43_txhdr_size(dev),
901
902
903
904
						    GFP_KERNEL | GFP_DMA);
			if (!ring->txhdr_cache)
				goto err_kfree_meta;

905
			dma_test = dma_map_single(dev->dev->dma_dev,
906
907
908
						  ring->txhdr_cache,
						  b43_txhdr_size(dev),
						  DMA_TO_DEVICE);
909

910
			if (b43_dma_mapping_error(ring, dma_test,
911
912
913
914
						  b43_txhdr_size(dev), 1)) {

				b43err(dev->wl,
				       "TXHDR DMA allocation failed\n");
915
				goto err_kfree_txhdr_cache;
916
			}
917
918
		}

919
		dma_unmap_single(dev->dev->dma_dev,
920
921
				 dma_test, b43_txhdr_size(dev),
				 DMA_TO_DEVICE);
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
	}

	err = alloc_ringmemory(ring);
	if (err)
		goto err_kfree_txhdr_cache;
	err = dmacontroller_setup(ring);
	if (err)
		goto err_free_ringmemory;

      out:
	return ring;

      err_free_ringmemory:
	free_ringmemory(ring);
      err_kfree_txhdr_cache:
	kfree(ring->txhdr_cache);
      err_kfree_meta:
	kfree(ring->meta);
      err_kfree_ring:
	kfree(ring);
	ring = NULL;
	goto out;
}

946
947
948
949
950
951
952
953
954
955
956
#define divide(a, b)	({	\
	typeof(a) __a = a;	\
	do_div(__a, b);		\
	__a;			\
  })

#define modulo(a, b)	({	\
	typeof(a) __a = a;	\
	do_div(__a, b);		\
  })

957
/* Main cleanup function. */
958
959
static void b43_destroy_dmaring(struct b43_dmaring *ring,
				const char *ringname)
960
961
962
963
{
	if (!ring)
		return;

964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
#ifdef CONFIG_B43_DEBUG
	{
		/* Print some statistics. */
		u64 failed_packets = ring->nr_failed_tx_packets;
		u64 succeed_packets = ring->nr_succeed_tx_packets;
		u64 nr_packets = failed_packets + succeed_packets;
		u64 permille_failed = 0, average_tries = 0;

		if (nr_packets)
			permille_failed = divide(failed_packets * 1000, nr_packets);
		if (nr_packets)
			average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);

		b43dbg(ring->dev->wl, "DMA-%u %s: "
		       "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
		       "Average tries %llu.%02llu\n",
		       (unsigned int)(ring->type), ringname,
		       ring->max_used_slots,
		       ring->nr_slots,
		       (unsigned long long)failed_packets,
984
		       (unsigned long long)nr_packets,
985
986
987
988
989
990
991
		       (unsigned long long)divide(permille_failed, 10),
		       (unsigned long long)modulo(permille_failed, 10),
		       (unsigned long long)divide(average_tries, 100),
		       (unsigned long long)modulo(average_tries, 100));
	}
#endif /* DEBUG */

992
993
994
995
996
997
998
999
1000
1001
1002
1003
	/* Device IRQs are disabled prior entering this function,
	 * so no need to take care of concurrency with rx handler stuff.
	 */
	dmacontroller_cleanup(ring);
	free_all_descbuffers(ring);
	free_ringmemory(ring);

	kfree(ring->txhdr_cache);
	kfree(ring->meta);
	kfree(ring);
}

1004
1005
1006
1007
1008
#define destroy_ring(dma, ring) do {				\
	b43_destroy_dmaring((dma)->ring, __stringify(ring));	\
	(dma)->ring = NULL;					\
    } while (0)

1009
1010
void b43_dma_free(struct b43_wldev *dev)
{
1011
1012
1013
1014
1015
	struct b43_dma *dma;

	if (b43_using_pio_transfers(dev))
		return;
	dma = &dev->dma;
1016

1017
1018
1019
1020
1021
1022
	destroy_ring(dma, rx_ring);
	destroy_ring(dma, tx_ring_AC_BK);
	destroy_ring(dma, tx_ring_AC_BE);
	destroy_ring(dma, tx_ring_AC_VI);
	destroy_ring(dma, tx_ring_AC_VO);
	destroy_ring(dma, tx_ring_mcast);
1023
1024
}

Michael Buesch's avatar
Michael Buesch committed
1025
1026
1027
1028
1029
1030
1031
1032
1033
static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
{
	u64 orig_mask = mask;
	bool fallback = 0;
	int err;

	/* Try to set the DMA mask. If it fails, try falling back to a
	 * lower mask, as we can always also support a lower one. */
	while (1) {
1034
		err = dma_set_mask(dev->dev->dma_dev, mask);
1035
		if (!err) {
1036
			err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
1037
1038
1039
			if (!err)
				break;
		}
1040
		if (mask == DMA_BIT_MASK(64)) {
1041
			mask = DMA_BIT_MASK(32);
Michael Buesch's avatar
Michael Buesch committed
1042
1043
1044
			fallback = 1;
			continue;
		}
1045
		if (mask == DMA_BIT_MASK(32)) {
1046
			mask = DMA_BIT_MASK(30);
Michael Buesch's avatar
Michael Buesch committed
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
			fallback = 1;
			continue;
		}
		b43err(dev->wl, "The machine/kernel does not support "
		       "the required %u-bit DMA mask\n",
		       (unsigned int)dma_mask_to_engine_type(orig_mask));
		return -EOPNOTSUPP;
	}
	if (fallback) {
		b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
			(unsigned int)dma_mask_to_engine_type(orig_mask),
			(unsigned int)dma_mask_to_engine_type(mask));
	}

	return 0;
}

1064
1065
1066
1067
1068
int b43_dma_init(struct b43_wldev *dev)
{
	struct b43_dma *dma = &dev->dma;
	int err;
	u64 dmamask;
1069
	enum b43_dmatype type;
1070
1071

	dmamask = supported_dma_mask(dev);
1072
	type = dma_mask_to_engine_type(dmamask);
Michael Buesch's avatar
Michael Buesch committed
1073
1074
1075
	err = b43_dma_set_mask(dev, dmamask);
	if (err)
		return err;
1076
1077

	switch (dev->dev->bus_type) {
1078
1079
1080
1081
1082
#ifdef CONFIG_B43_BCMA
	case B43_BUS_BCMA:
		dma->translation = bcma_core_dma_translation(dev->dev->bdev);
		break;
#endif
1083
1084
1085
1086
1087
1088
#ifdef CONFIG_B43_SSB
	case B43_BUS_SSB:
		dma->translation = ssb_dma_translation(dev->dev->sdev);
		break;
#endif
	}
1089

1090
1091
1092
1093
1094
1095
1096
	dma->parity = true;
#ifdef CONFIG_B43_BCMA
	/* TODO: find out which SSB devices need disabling parity */
	if (dev->dev->bus_type == B43_BUS_BCMA)
		dma->parity = false;
#endif

1097
1098
	err = -ENOMEM;
	/* setup TX DMA channels. */
1099
1100
	dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
	if (!dma->tx_ring_AC_BK)
1101
1102
		goto out;

1103
1104
1105
	dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
	if (!dma->tx_ring_AC_BE)
		goto err_destroy_bk;
1106

1107
1108
1109
	dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
	if (!dma->tx_ring_AC_VI)
		goto err_destroy_be;
1110

1111
1112
1113
	dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
	if (!dma->tx_ring_AC_VO)
		goto err_destroy_vi;
1114

1115
1116
1117
	dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
	if (!dma->tx_ring_mcast)
		goto err_destroy_vo;
1118

1119
1120
1121
1122
	/* setup RX DMA channel. */
	dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
	if (!dma->rx_ring)
		goto err_destroy_mcast;
1123

1124
	/* No support for the TX status DMA ring. */
1125
	B43_WARN_ON(dev->dev->core_rev < 5);
1126

1127
1128
	b43dbg(dev->wl, "%u-bit DMA initialized\n",
	       (unsigned int)type);
1129
	err = 0;
1130
out:
1131
1132
	return err;

1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
err_destroy_mcast:
	destroy_ring(dma, tx_ring_mcast);
err_destroy_vo:
	destroy_ring(dma, tx_ring_AC_VO);
err_destroy_vi:
	destroy_ring(dma, tx_ring_AC_VI);
err_destroy_be:
	destroy_ring(dma, tx_ring_AC_BE);
err_destroy_bk:
	destroy_ring(dma, tx_ring_AC_BK);
	return err;
1144
1145
1146
1147
1148
}

/* Generate a cookie for the TX header. */
static u16 generate_cookie(struct b43_dmaring *ring, int slot)
{
1149
	u16 cookie;
1150
1151
1152
1153
1154
1155

	/* Use the upper 4 bits of the cookie as
	 * DMA controller ID and store the slot number
	 * in the lower 12 bits.
	 * Note that the cookie must never be 0, as this
	 * is a special value used in RX path.
1156
1157
	 * It can also not be 0xFFFF because that is special
	 * for multicast frames.
1158
	 */
1159
	cookie = (((u16)ring->index + 1) << 12);
1160
	B43_WARN_ON(slot & ~0x0FFF);
1161
	cookie |= (u16)slot;
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173

	return cookie;
}

/* Inspect a cookie and find out to which controller/slot it belongs. */
static
struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
{
	struct b43_dma *dma = &dev->dma;
	struct b43_dmaring *ring = NULL;

	switch (cookie & 0xF000) {
1174
	case 0x1000:
1175
		ring = dma->tx_ring_AC_BK;
1176
		break;
1177
	case 0x2000:
1178
		ring = dma->tx_ring_AC_BE;
1179
		break;
1180
	case 0x3000:
1181
		ring = dma->tx_ring_AC_VI;
1182
		break;
1183
	case 0x4000:
1184
		ring = dma->tx_ring_AC_VO;
1185
		break;
1186
	case 0x5000:
1187
		ring = dma->tx_ring_mcast;
1188
1189
1190
		break;
	}
	*slot = (cookie & 0x0FFF);
1191
1192
1193
1194
1195
	if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
		b43dbg(dev->wl, "TX-status contains "
		       "invalid cookie: 0x%04X\n", cookie);
		return NULL;
	}
1196
1197
1198
1199
1200

	return ring;
}

static int dma_tx_fragment(struct b43_dmaring *ring,
1201
			   struct sk_buff *skb)
1202
1203
{
	const struct b43_dma_ops *ops = ring->ops;
1204
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1205
	struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
1206
	u8 *header;
1207
	int slot, old_top_slot, old_used_slots;
1208
1209
1210
1211
	int err;
	struct b43_dmadesc_generic *desc;
	struct b43_dmadesc_meta *meta;
	struct b43_dmadesc_meta *meta_hdr;
1212
	u16 cookie;
1213
	size_t hdrsize = b43_txhdr_size(ring->dev);
1214

Michael Buesch's avatar
Michael Buesch committed
1215
1216
1217
1218
	/* Important note: If the number of used DMA slots per TX frame
	 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
	 * the file has to be updated, too!
	 */
1219

1220
1221
1222
	old_top_slot = ring->current_slot;
	old_used_slots = ring->used_slots;

1223
1224
1225
1226
1227
	/* Get a slot for the header. */
	slot = request_slot(ring);
	desc = ops->idx2desc(ring, slot, &meta_hdr);
	memset(meta_hdr, 0, sizeof(*meta_hdr));

Michael Buesch's avatar
Michael Buesch committed
1228
	header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
1229
	cookie = generate_cookie(ring, slot);
1230
	err = b43_generate_txhdr(ring->dev, header,
gregor kowski's avatar
gregor kowski committed
1231
				 skb, info, cookie);
1232
1233
1234
1235
1236
	if (unlikely(err)) {
		ring->current_slot = old_top_slot;
		ring->used_slots = old_used_slots;
		return err;
	}
1237
1238

	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1239
					   hdrsize, 1);
Michael Buesch's avatar
Michael Buesch committed
1240
	if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1241
1242
		ring->current_slot = old_top_slot;
		ring->used_slots = old_used_slots;
1243
		return -EIO;
1244
	}
1245
	ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1246
			     hdrsize, 1, 0, 0);
1247
1248
1249
1250
1251
1252
1253
1254

	/* Get a slot for the payload. */
	slot = request_slot(ring);
	desc = ops->idx2desc(ring, slot, &meta);
	memset(meta, 0, sizeof(*meta));

	meta->skb = skb;
	meta->is_last_fragment = 1;
1255
	priv_info->bouncebuffer = NULL;
1256
1257
1258

	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
	/* create a bounce buffer in zone_dma on mapping failure. */
Michael Buesch's avatar
Michael Buesch committed
1259
	if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1260
1261
		priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
						  GFP_ATOMIC | GFP_DMA);
1262
		if (!priv_info->bouncebuffer) {
1263
1264
			ring->current_slot = old_top_slot;
			ring->used_slots = old_used_slots;
1265
1266
1267
1268
			err = -ENOMEM;
			goto out_unmap_hdr;
		}

1269
		meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
Michael Buesch's avatar
Michael Buesch committed
1270
		if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1271
1272
			kfree(priv_info->bouncebuffer);
			priv_info->bouncebuffer = NULL;
1273
1274
			ring->current_slot = old_top_slot;
			ring->used_slots = old_used_slots;
1275
			err = -EIO;
1276
			goto out_unmap_hdr;
1277
1278
1279
1280
1281
		}
	}

	ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);

1282
	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1283
1284
1285
1286
1287
		/* Tell the firmware about the cookie of the last
		 * mcast frame, so it can clear the more-data bit in it. */
		b43_shm_write16(ring->dev, B43_SHM_SHARED,
				B43_SHM_SH_MCASTCOOKIE, cookie);
	}
1288
1289
1290
1291
1292
	/* Now transfer the whole frame. */
	wmb();
	ops->poke_tx(ring, next_slot(ring, slot));
	return 0;

1293
out_unmap_hdr:
1294
	unmap_descbuffer(ring, meta_hdr->dmaaddr,
1295
			 hdrsize, 1);
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
	return err;
}

static inline int should_inject_overflow(struct b43_dmaring *ring)
{
#ifdef CONFIG_B43_DEBUG
	if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
		/* Check if we should inject another ringbuffer overflow
		 * to test handling of this situation in the stack. */
		unsigned long next_overflow;

		next_overflow = ring->last_injected_overflow + HZ;
		if (time_after(jiffies, next_overflow)) {
			ring->last_injected_overflow = jiffies;
			b43dbg(ring->dev->wl,
			       "Injecting TX ring overflow on "
			       "DMA controller %d\n", ring->index);
			return 1;
		}
	}
#endif /* CONFIG_B43_DEBUG */
	return 0;
}

Michael Buesch's avatar
Michael Buesch committed
1320
/* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
John Daiker's avatar
John Daiker committed
1321
1322
static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
						   u8 queue_prio)
Michael Buesch's avatar
Michael Buesch committed
1323
1324
1325
{
	struct b43_dmaring *ring;

Michael Buesch's avatar
Michael Buesch committed
1326
	if (dev->qos_enabled) {
Michael Buesch's avatar
Michael Buesch committed
1327
1328
1329
1330
1331
1332
		/* 0 = highest priority */
		switch (queue_prio) {
		default:
			B43_WARN_ON(1);
			/* fallthrough */
		case 0:
1333
			ring = dev->dma.tx_ring_AC_VO;
Michael Buesch's avatar
Michael Buesch committed
1334
1335
			break;
		case 1:
1336
			ring = dev->dma.tx_ring_AC_VI;
Michael Buesch's avatar
Michael Buesch committed
1337
1338
			break;
		case 2:
1339
			ring = dev->dma.tx_ring_AC_BE;
Michael Buesch's avatar
Michael Buesch committed
1340
1341
			break;
		case 3:
1342
			ring = dev->dma.tx_ring_AC_BK;
Michael Buesch's avatar
Michael Buesch committed
1343
1344
1345
			break;
		}
	} else
1346
		ring = dev->dma.tx_ring_AC_BE;
Michael Buesch's avatar
Michael Buesch committed
1347
1348
1349
1350

	return ring;
}

1351
int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1352
1353
{
	struct b43_dmaring *ring;
1354
	struct ieee80211_hdr *hdr;
1355
	int err = 0;
1356
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1357

1358
	hdr = (struct ieee80211_hdr *)skb->data;
1359
	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1360
		/* The multicast ring will be sent after the DTIM */
1361
		ring = dev->dma.tx_ring_mcast;
1362
1363
1364
1365
1366
		/* Set the more-data bit. Ucode will clear it on
		 * the last frame for us. */
		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
	} else {
		/* Decide by priority where to put this frame. */
1367
1368
		ring = select_ring_by_priority(
			dev, skb_get_queue_mapping(skb));
1369
1370
	}

1371
	B43_WARN_ON(!ring->tx);
1372

1373
1374
1375
1376
1377
1378
1379
1380
	if (unlikely(ring->stopped)) {
		/* We get here only because of a bug in mac80211.
		 * Because of a race, one packet may be queued after
		 * the queue is stopped, thus we got called when we shouldn't.
		 * For now, just refuse the transmit. */
		if (b43_debug(dev, B43_DBG_DMAVERBOSE))
			b43err(dev->wl, "Packet after queue stopped\n");
		err = -ENOSPC;
1381
		goto out;
1382
1383
1384
1385
1386
1387
	}

	if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
		/* If we get here, we have a real error with the queue
		 * full, but queues not stopped. */
		b43err(dev->wl, "DMA queue overflow\n");
1388
		err = -ENOSPC;
1389
		goto out;
1390
1391
	}

Michael Buesch's avatar
Michael Buesch committed
1392
1393
1394
	/* Assign the queue number to the ring (if not already done before)
	 * so TX status handling can use it. The queue to ring mapping is
	 * static, so we don't need to store it per frame. */
1395
	ring->queue_prio = skb_get_queue_mapping(skb);
Michael Buesch's avatar
Michael Buesch committed
1396

1397
	err = dma_tx_fragment(ring, skb);
1398
1399
1400
1401
1402
	if (unlikely(err == -ENOKEY)) {
		/* Drop this packet, as we don't have the encryption key
		 * anymore and must not transmit it unencrypted. */
		dev_kfree_skb_any(skb);
		err = 0;
1403
		goto out;
Michael Buesch's avatar