dmapool.c 12.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*
 * DMA Pool allocator
 *
 * Copyright 2001 David Brownell
 * Copyright 2007 Intel Corporation
 *   Author: Matthew Wilcox <willy@linux.intel.com>
 *
 * This software may be redistributed and/or modified under the terms of
 * the GNU General Public License ("GPL") version 2 as published by the
 * Free Software Foundation.
 *
 * This allocator returns small blocks of a given size which are DMA-able by
 * the given device.  It uses the dma_alloc_coherent page allocator to get
 * new pages, then splits them up into blocks of the required size.
 * Many older drivers still have their own code to do this.
 *
 * The current design of this allocator is fairly simple.  The pool is
 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 * allocated pages.  Each page in the page_list is split into blocks of at
 * least 'size' bytes.
 */
Linus Torvalds's avatar
Linus Torvalds committed
22 23 24 25

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
26 27
#include <linux/kernel.h>
#include <linux/list.h>
Linus Torvalds's avatar
Linus Torvalds committed
28
#include <linux/module.h>
29
#include <linux/mutex.h>
30
#include <linux/poison.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
31
#include <linux/sched.h>
32 33 34 35 36
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>
Linus Torvalds's avatar
Linus Torvalds committed
37

Matthew Wilcox's avatar
Matthew Wilcox committed
38 39 40 41 42 43 44 45 46 47
struct dma_pool {		/* the pool */
	struct list_head page_list;
	spinlock_t lock;
	size_t blocks_per_page;
	size_t size;
	struct device *dev;
	size_t allocation;
	char name[32];
	wait_queue_head_t waitq;
	struct list_head pools;
Linus Torvalds's avatar
Linus Torvalds committed
48 49
};

Matthew Wilcox's avatar
Matthew Wilcox committed
50 51 52 53 54 55
struct dma_page {		/* cacheable header for 'allocation' bytes */
	struct list_head page_list;
	void *vaddr;
	dma_addr_t dma;
	unsigned in_use;
	unsigned long bitmap[0];
Linus Torvalds's avatar
Linus Torvalds committed
56 57 58 59
};

#define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)

Matthew Wilcox's avatar
Matthew Wilcox committed
60
static DEFINE_MUTEX(pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
61 62

static ssize_t
Matthew Wilcox's avatar
Matthew Wilcox committed
63
show_pools(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
64 65 66 67 68 69 70 71 72 73 74 75 76 77
{
	unsigned temp;
	unsigned size;
	char *next;
	struct dma_page *page;
	struct dma_pool *pool;

	next = buf;
	size = PAGE_SIZE;

	temp = scnprintf(next, size, "poolinfo - 0.1\n");
	size -= temp;
	next += temp;

78
	mutex_lock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
79 80 81 82 83 84 85 86 87 88 89
	list_for_each_entry(pool, &dev->dma_pools, pools) {
		unsigned pages = 0;
		unsigned blocks = 0;

		list_for_each_entry(page, &pool->page_list, page_list) {
			pages++;
			blocks += page->in_use;
		}

		/* per-pool info, no real statistics yet */
		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
Matthew Wilcox's avatar
Matthew Wilcox committed
90 91 92
				 pool->name,
				 blocks, pages * pool->blocks_per_page,
				 pool->size, pages);
Linus Torvalds's avatar
Linus Torvalds committed
93 94 95
		size -= temp;
		next += temp;
	}
96
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
97 98 99

	return PAGE_SIZE - size;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
100 101

static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123

/**
 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 * Context: !in_interrupt()
 *
 * Returns a dma allocation pool with the requested characteristics, or
 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
 * may be used to allocate memory.  Such memory will all have "consistent"
 * DMA mappings, accessible by the device and its driver without using
 * cache flushing primitives.  The actual size of blocks allocated may be
 * larger than requested because of alignment.
 *
 * If allocation is nonzero, objects returned from dma_pool_alloc() won't
 * cross that size boundary.  This is useful for devices which have
 * addressing restrictions on individual DMA transfers, such as not crossing
 * boundaries of 4KBytes.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
124 125
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
				 size_t size, size_t align, size_t allocation)
Linus Torvalds's avatar
Linus Torvalds committed
126
{
Matthew Wilcox's avatar
Matthew Wilcox committed
127
	struct dma_pool *retval;
Linus Torvalds's avatar
Linus Torvalds committed
128

129
	if (align == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
130
		align = 1;
131
	} else if (align & (align - 1)) {
Linus Torvalds's avatar
Linus Torvalds committed
132 133 134
		return NULL;
	}

135 136 137 138 139 140
	if (size == 0)
		return NULL;

	if ((size % align) != 0)
		size = ALIGN(size, align);

Linus Torvalds's avatar
Linus Torvalds committed
141 142 143 144 145
	if (allocation == 0) {
		if (PAGE_SIZE < size)
			allocation = size;
		else
			allocation = PAGE_SIZE;
Matthew Wilcox's avatar
Matthew Wilcox committed
146
		/* FIXME: round up for less fragmentation */
Linus Torvalds's avatar
Linus Torvalds committed
147 148 149
	} else if (allocation < size)
		return NULL;

Matthew Wilcox's avatar
Matthew Wilcox committed
150 151 152
	if (!
	    (retval =
	     kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
Linus Torvalds's avatar
Linus Torvalds committed
153 154
		return retval;

Matthew Wilcox's avatar
Matthew Wilcox committed
155
	strlcpy(retval->name, name, sizeof retval->name);
Linus Torvalds's avatar
Linus Torvalds committed
156 157 158

	retval->dev = dev;

Matthew Wilcox's avatar
Matthew Wilcox committed
159 160
	INIT_LIST_HEAD(&retval->page_list);
	spin_lock_init(&retval->lock);
Linus Torvalds's avatar
Linus Torvalds committed
161 162 163
	retval->size = size;
	retval->allocation = allocation;
	retval->blocks_per_page = allocation / size;
Matthew Wilcox's avatar
Matthew Wilcox committed
164
	init_waitqueue_head(&retval->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
165 166

	if (dev) {
167 168
		int ret;

169
		mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
170 171
		if (list_empty(&dev->dma_pools))
			ret = device_create_file(dev, &dev_attr_pools);
172 173
		else
			ret = 0;
Linus Torvalds's avatar
Linus Torvalds committed
174
		/* note:  not currently insisting "name" be unique */
175
		if (!ret)
Matthew Wilcox's avatar
Matthew Wilcox committed
176
			list_add(&retval->pools, &dev->dma_pools);
177 178 179 180
		else {
			kfree(retval);
			retval = NULL;
		}
181
		mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
182
	} else
Matthew Wilcox's avatar
Matthew Wilcox committed
183
		INIT_LIST_HEAD(&retval->pools);
Linus Torvalds's avatar
Linus Torvalds committed
184 185 186

	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
187
EXPORT_SYMBOL(dma_pool_create);
Linus Torvalds's avatar
Linus Torvalds committed
188

Matthew Wilcox's avatar
Matthew Wilcox committed
189
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Linus Torvalds's avatar
Linus Torvalds committed
190
{
Matthew Wilcox's avatar
Matthew Wilcox committed
191 192
	struct dma_page *page;
	int mapsize;
Linus Torvalds's avatar
Linus Torvalds committed
193 194 195

	mapsize = pool->blocks_per_page;
	mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
Matthew Wilcox's avatar
Matthew Wilcox committed
196
	mapsize *= sizeof(long);
Linus Torvalds's avatar
Linus Torvalds committed
197

198
	page = kmalloc(mapsize + sizeof *page, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
199 200
	if (!page)
		return NULL;
Matthew Wilcox's avatar
Matthew Wilcox committed
201 202 203
	page->vaddr = dma_alloc_coherent(pool->dev,
					 pool->allocation,
					 &page->dma, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
204
	if (page->vaddr) {
Matthew Wilcox's avatar
Matthew Wilcox committed
205
		memset(page->bitmap, 0xff, mapsize);	/* bit set == free */
Linus Torvalds's avatar
Linus Torvalds committed
206
#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
207
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
208
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
209
		list_add(&page->page_list, &pool->page_list);
Linus Torvalds's avatar
Linus Torvalds committed
210 211
		page->in_use = 0;
	} else {
Matthew Wilcox's avatar
Matthew Wilcox committed
212
		kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
213 214 215 216 217
		page = NULL;
	}
	return page;
}

Matthew Wilcox's avatar
Matthew Wilcox committed
218
static inline int is_page_busy(int blocks, unsigned long *bitmap)
Linus Torvalds's avatar
Linus Torvalds committed
219 220 221 222 223 224 225 226 227
{
	while (blocks > 0) {
		if (*bitmap++ != ~0UL)
			return 1;
		blocks -= BITS_PER_LONG;
	}
	return 0;
}

Matthew Wilcox's avatar
Matthew Wilcox committed
228
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
229
{
Matthew Wilcox's avatar
Matthew Wilcox committed
230
	dma_addr_t dma = page->dma;
Linus Torvalds's avatar
Linus Torvalds committed
231 232

#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
233
	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
234
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
235 236 237
	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
	list_del(&page->page_list);
	kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
238 239 240 241 242 243 244 245 246 247
}

/**
 * dma_pool_destroy - destroys a pool of dma memory blocks.
 * @pool: dma pool that will be destroyed
 * Context: !in_interrupt()
 *
 * Caller guarantees that no more memory from the pool is in use,
 * and that nothing will try to use the pool after this call.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
248
void dma_pool_destroy(struct dma_pool *pool)
Linus Torvalds's avatar
Linus Torvalds committed
249
{
250
	mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
251 252 253
	list_del(&pool->pools);
	if (pool->dev && list_empty(&pool->dev->dma_pools))
		device_remove_file(pool->dev, &dev_attr_pools);
254
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
255

Matthew Wilcox's avatar
Matthew Wilcox committed
256 257 258 259 260
	while (!list_empty(&pool->page_list)) {
		struct dma_page *page;
		page = list_entry(pool->page_list.next,
				  struct dma_page, page_list);
		if (is_page_busy(pool->blocks_per_page, page->bitmap)) {
Linus Torvalds's avatar
Linus Torvalds committed
261
			if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
262 263
				dev_err(pool->dev,
					"dma_pool_destroy %s, %p busy\n",
Linus Torvalds's avatar
Linus Torvalds committed
264 265
					pool->name, page->vaddr);
			else
Matthew Wilcox's avatar
Matthew Wilcox committed
266 267 268
				printk(KERN_ERR
				       "dma_pool_destroy %s, %p busy\n",
				       pool->name, page->vaddr);
Linus Torvalds's avatar
Linus Torvalds committed
269
			/* leak the still-in-use consistent memory */
Matthew Wilcox's avatar
Matthew Wilcox committed
270 271
			list_del(&page->page_list);
			kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
272
		} else
Matthew Wilcox's avatar
Matthew Wilcox committed
273
			pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
274 275
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
276
	kfree(pool);
Linus Torvalds's avatar
Linus Torvalds committed
277
}
Matthew Wilcox's avatar
Matthew Wilcox committed
278
EXPORT_SYMBOL(dma_pool_destroy);
Linus Torvalds's avatar
Linus Torvalds committed
279 280 281 282 283 284 285 286 287

/**
 * dma_pool_alloc - get a block of consistent memory
 * @pool: dma pool that will produce the block
 * @mem_flags: GFP_* bitmask
 * @handle: pointer to dma address of block
 *
 * This returns the kernel virtual address of a currently unused block,
 * and reports its dma address through the handle.
288
 * If such a memory block can't be allocated, %NULL is returned.
Linus Torvalds's avatar
Linus Torvalds committed
289
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
290 291
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
		     dma_addr_t *handle)
Linus Torvalds's avatar
Linus Torvalds committed
292
{
Matthew Wilcox's avatar
Matthew Wilcox committed
293 294 295 296 297 298 299
	unsigned long flags;
	struct dma_page *page;
	int map, block;
	size_t offset;
	void *retval;

	spin_lock_irqsave(&pool->lock, flags);
300
 restart:
Linus Torvalds's avatar
Linus Torvalds committed
301
	list_for_each_entry(page, &pool->page_list, page_list) {
Matthew Wilcox's avatar
Matthew Wilcox committed
302
		int i;
Linus Torvalds's avatar
Linus Torvalds committed
303 304
		/* only cachable accesses here ... */
		for (map = 0, i = 0;
Matthew Wilcox's avatar
Matthew Wilcox committed
305 306
		     i < pool->blocks_per_page; i += BITS_PER_LONG, map++) {
			if (page->bitmap[map] == 0)
Linus Torvalds's avatar
Linus Torvalds committed
307
				continue;
Matthew Wilcox's avatar
Matthew Wilcox committed
308
			block = ffz(~page->bitmap[map]);
Linus Torvalds's avatar
Linus Torvalds committed
309
			if ((i + block) < pool->blocks_per_page) {
Matthew Wilcox's avatar
Matthew Wilcox committed
310
				clear_bit(block, &page->bitmap[map]);
Linus Torvalds's avatar
Linus Torvalds committed
311 312 313 314 315 316
				offset = (BITS_PER_LONG * map) + block;
				offset *= pool->size;
				goto ready;
			}
		}
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
317 318
	page = pool_alloc_page(pool, GFP_ATOMIC);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
319
		if (mem_flags & __GFP_WAIT) {
Matthew Wilcox's avatar
Matthew Wilcox committed
320
			DECLARE_WAITQUEUE(wait, current);
Linus Torvalds's avatar
Linus Torvalds committed
321

322
			__set_current_state(TASK_INTERRUPTIBLE);
323
			__add_wait_queue(&pool->waitq, &wait);
Matthew Wilcox's avatar
Matthew Wilcox committed
324
			spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
325

Matthew Wilcox's avatar
Matthew Wilcox committed
326
			schedule_timeout(POOL_TIMEOUT_JIFFIES);
Linus Torvalds's avatar
Linus Torvalds committed
327

328 329
			spin_lock_irqsave(&pool->lock, flags);
			__remove_wait_queue(&pool->waitq, &wait);
Linus Torvalds's avatar
Linus Torvalds committed
330 331 332 333 334 335
			goto restart;
		}
		retval = NULL;
		goto done;
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
336
	clear_bit(0, &page->bitmap[0]);
Linus Torvalds's avatar
Linus Torvalds committed
337
	offset = 0;
Matthew Wilcox's avatar
Matthew Wilcox committed
338
 ready:
Linus Torvalds's avatar
Linus Torvalds committed
339 340 341 342
	page->in_use++;
	retval = offset + page->vaddr;
	*handle = offset + page->dma;
#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
343
	memset(retval, POOL_POISON_ALLOCATED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
344
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
345 346
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
347 348
	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
349
EXPORT_SYMBOL(dma_pool_alloc);
Linus Torvalds's avatar
Linus Torvalds committed
350

Matthew Wilcox's avatar
Matthew Wilcox committed
351
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
352
{
Matthew Wilcox's avatar
Matthew Wilcox committed
353 354
	unsigned long flags;
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
355

Matthew Wilcox's avatar
Matthew Wilcox committed
356
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
357 358 359 360 361 362 363
	list_for_each_entry(page, &pool->page_list, page_list) {
		if (dma < page->dma)
			continue;
		if (dma < (page->dma + pool->allocation))
			goto done;
	}
	page = NULL;
Matthew Wilcox's avatar
Matthew Wilcox committed
364 365
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
366 367 368 369 370 371 372 373 374 375 376 377
	return page;
}

/**
 * dma_pool_free - put block back into dma pool
 * @pool: the dma pool holding the block
 * @vaddr: virtual address of block
 * @dma: dma address of block
 *
 * Caller promises neither device nor driver will again touch this block
 * unless it is first re-allocated.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
378
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
379
{
Matthew Wilcox's avatar
Matthew Wilcox committed
380 381 382
	struct dma_page *page;
	unsigned long flags;
	int map, block;
Linus Torvalds's avatar
Linus Torvalds committed
383

Matthew Wilcox's avatar
Matthew Wilcox committed
384 385
	page = pool_find_page(pool, dma);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
386
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
387 388 389
			dev_err(pool->dev,
				"dma_pool_free %s, %p/%lx (bad dma)\n",
				pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
390
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
391 392
			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
			       pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
393 394 395 396 397 398 399 400 401 402 403
		return;
	}

	block = dma - page->dma;
	block /= pool->size;
	map = block / BITS_PER_LONG;
	block %= BITS_PER_LONG;

#ifdef	CONFIG_DEBUG_SLAB
	if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
404 405 406
			dev_err(pool->dev,
				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
				pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
407
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
408 409 410
			printk(KERN_ERR
			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
			       pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
411 412
		return;
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
413
	if (page->bitmap[map] & (1UL << block)) {
Linus Torvalds's avatar
Linus Torvalds committed
414
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
415 416
			dev_err(pool->dev,
				"dma_pool_free %s, dma %Lx already free\n",
Linus Torvalds's avatar
Linus Torvalds committed
417 418
				pool->name, (unsigned long long)dma);
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
419 420 421
			printk(KERN_ERR
			       "dma_pool_free %s, dma %Lx already free\n",
			       pool->name, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
422 423
		return;
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
424
	memset(vaddr, POOL_POISON_FREED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
425 426
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
427
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
428
	page->in_use--;
Matthew Wilcox's avatar
Matthew Wilcox committed
429 430
	set_bit(block, &page->bitmap[map]);
	if (waitqueue_active(&pool->waitq))
431
		wake_up_locked(&pool->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
432 433 434 435 436
	/*
	 * Resist a temptation to do
	 *    if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
	 * Better have a few empty pages hang around.
	 */
Matthew Wilcox's avatar
Matthew Wilcox committed
437
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
438
}
Matthew Wilcox's avatar
Matthew Wilcox committed
439
EXPORT_SYMBOL(dma_pool_free);
Linus Torvalds's avatar
Linus Torvalds committed
440

441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
/*
 * Managed DMA pool
 */
static void dmam_pool_release(struct device *dev, void *res)
{
	struct dma_pool *pool = *(struct dma_pool **)res;

	dma_pool_destroy(pool);
}

static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
	return *(struct dma_pool **)res == match_data;
}

/**
 * dmam_pool_create - Managed dma_pool_create()
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 *
 * Managed dma_pool_create().  DMA pool created with this function is
 * automatically destroyed on driver detach.
 */
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
				  size_t size, size_t align, size_t allocation)
{
	struct dma_pool **ptr, *pool;

	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
	if (pool)
		devres_add(dev, ptr);
	else
		devres_free(ptr);

	return pool;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
484
EXPORT_SYMBOL(dmam_pool_create);
485 486 487 488 489 490 491 492 493 494 495 496 497 498

/**
 * dmam_pool_destroy - Managed dma_pool_destroy()
 * @pool: dma pool that will be destroyed
 *
 * Managed dma_pool_destroy().
 */
void dmam_pool_destroy(struct dma_pool *pool)
{
	struct device *dev = pool->dev;

	dma_pool_destroy(pool);
	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
}
Matthew Wilcox's avatar
Matthew Wilcox committed
499
EXPORT_SYMBOL(dmam_pool_destroy);