dmapool.c 13 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * DMA Pool allocator
 *
 * Copyright 2001 David Brownell
 * Copyright 2007 Intel Corporation
 *   Author: Matthew Wilcox <willy@linux.intel.com>
 *
 * This software may be redistributed and/or modified under the terms of
 * the GNU General Public License ("GPL") version 2 as published by the
 * Free Software Foundation.
 *
 * This allocator returns small blocks of a given size which are DMA-able by
 * the given device.  It uses the dma_alloc_coherent page allocator to get
 * new pages, then splits them up into blocks of the required size.
 * Many older drivers still have their own code to do this.
 *
 * The current design of this allocator is fairly simple.  The pool is
 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 * allocated pages.  Each page in the page_list is split into blocks of at
20 21 22
 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 * list of free blocks within the page.  Used blocks aren't tracked, but we
 * keep a count of how many are currently allocated from each page.
23
 */
Linus Torvalds's avatar
Linus Torvalds committed
24 25 26 27

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
28 29
#include <linux/kernel.h>
#include <linux/list.h>
30
#include <linux/export.h>
31
#include <linux/mutex.h>
32
#include <linux/poison.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
33
#include <linux/sched.h>
34
#include <linux/slab.h>
35
#include <linux/stat.h>
36 37 38 39
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>
Linus Torvalds's avatar
Linus Torvalds committed
40

41 42 43 44
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
#define DMAPOOL_DEBUG 1
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
45 46 47 48 49 50
struct dma_pool {		/* the pool */
	struct list_head page_list;
	spinlock_t lock;
	size_t size;
	struct device *dev;
	size_t allocation;
51
	size_t boundary;
Matthew Wilcox's avatar
Matthew Wilcox committed
52 53 54
	char name[32];
	wait_queue_head_t waitq;
	struct list_head pools;
Linus Torvalds's avatar
Linus Torvalds committed
55 56
};

Matthew Wilcox's avatar
Matthew Wilcox committed
57 58 59 60
struct dma_page {		/* cacheable header for 'allocation' bytes */
	struct list_head page_list;
	void *vaddr;
	dma_addr_t dma;
61 62
	unsigned int in_use;
	unsigned int offset;
Linus Torvalds's avatar
Linus Torvalds committed
63 64 65 66
};

#define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)

Matthew Wilcox's avatar
Matthew Wilcox committed
67
static DEFINE_MUTEX(pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
68 69

static ssize_t
Matthew Wilcox's avatar
Matthew Wilcox committed
70
show_pools(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
71 72 73 74 75 76 77 78 79 80 81 82 83 84
{
	unsigned temp;
	unsigned size;
	char *next;
	struct dma_page *page;
	struct dma_pool *pool;

	next = buf;
	size = PAGE_SIZE;

	temp = scnprintf(next, size, "poolinfo - 0.1\n");
	size -= temp;
	next += temp;

85
	mutex_lock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
86 87 88 89
	list_for_each_entry(pool, &dev->dma_pools, pools) {
		unsigned pages = 0;
		unsigned blocks = 0;

90
		spin_lock_irq(&pool->lock);
Linus Torvalds's avatar
Linus Torvalds committed
91 92 93 94
		list_for_each_entry(page, &pool->page_list, page_list) {
			pages++;
			blocks += page->in_use;
		}
95
		spin_unlock_irq(&pool->lock);
Linus Torvalds's avatar
Linus Torvalds committed
96 97 98

		/* per-pool info, no real statistics yet */
		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
99 100
				 pool->name, blocks,
				 pages * (pool->allocation / pool->size),
Matthew Wilcox's avatar
Matthew Wilcox committed
101
				 pool->size, pages);
Linus Torvalds's avatar
Linus Torvalds committed
102 103 104
		size -= temp;
		next += temp;
	}
105
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
106 107 108

	return PAGE_SIZE - size;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
109 110

static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
111 112 113 114 115 116 117

/**
 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
118
 * @boundary: returned blocks won't cross this power of two boundary
Linus Torvalds's avatar
Linus Torvalds committed
119 120 121 122 123 124 125 126 127
 * Context: !in_interrupt()
 *
 * Returns a dma allocation pool with the requested characteristics, or
 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
 * may be used to allocate memory.  Such memory will all have "consistent"
 * DMA mappings, accessible by the device and its driver without using
 * cache flushing primitives.  The actual size of blocks allocated may be
 * larger than requested because of alignment.
 *
128
 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
Linus Torvalds's avatar
Linus Torvalds committed
129 130 131 132
 * cross that size boundary.  This is useful for devices which have
 * addressing restrictions on individual DMA transfers, such as not crossing
 * boundaries of 4KBytes.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
133
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
134
				 size_t size, size_t align, size_t boundary)
Linus Torvalds's avatar
Linus Torvalds committed
135
{
Matthew Wilcox's avatar
Matthew Wilcox committed
136
	struct dma_pool *retval;
137
	size_t allocation;
Linus Torvalds's avatar
Linus Torvalds committed
138

139
	if (align == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
140
		align = 1;
141
	} else if (align & (align - 1)) {
Linus Torvalds's avatar
Linus Torvalds committed
142 143 144
		return NULL;
	}

145
	if (size == 0) {
146
		return NULL;
147 148 149
	} else if (size < 4) {
		size = 4;
	}
150 151 152 153

	if ((size % align) != 0)
		size = ALIGN(size, align);

154 155 156 157 158
	allocation = max_t(size_t, size, PAGE_SIZE);

	if (!boundary) {
		boundary = allocation;
	} else if ((boundary < size) || (boundary & (boundary - 1))) {
Linus Torvalds's avatar
Linus Torvalds committed
159
		return NULL;
160
	}
Linus Torvalds's avatar
Linus Torvalds committed
161

162 163
	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
	if (!retval)
Linus Torvalds's avatar
Linus Torvalds committed
164 165
		return retval;

166
	strlcpy(retval->name, name, sizeof(retval->name));
Linus Torvalds's avatar
Linus Torvalds committed
167 168 169

	retval->dev = dev;

Matthew Wilcox's avatar
Matthew Wilcox committed
170 171
	INIT_LIST_HEAD(&retval->page_list);
	spin_lock_init(&retval->lock);
Linus Torvalds's avatar
Linus Torvalds committed
172
	retval->size = size;
173
	retval->boundary = boundary;
Linus Torvalds's avatar
Linus Torvalds committed
174
	retval->allocation = allocation;
Matthew Wilcox's avatar
Matthew Wilcox committed
175
	init_waitqueue_head(&retval->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
176 177

	if (dev) {
178 179
		int ret;

180
		mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
181 182
		if (list_empty(&dev->dma_pools))
			ret = device_create_file(dev, &dev_attr_pools);
183 184
		else
			ret = 0;
Linus Torvalds's avatar
Linus Torvalds committed
185
		/* note:  not currently insisting "name" be unique */
186
		if (!ret)
Matthew Wilcox's avatar
Matthew Wilcox committed
187
			list_add(&retval->pools, &dev->dma_pools);
188 189 190 191
		else {
			kfree(retval);
			retval = NULL;
		}
192
		mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
193
	} else
Matthew Wilcox's avatar
Matthew Wilcox committed
194
		INIT_LIST_HEAD(&retval->pools);
Linus Torvalds's avatar
Linus Torvalds committed
195 196 197

	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
198
EXPORT_SYMBOL(dma_pool_create);
Linus Torvalds's avatar
Linus Torvalds committed
199

200 201 202
static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
{
	unsigned int offset = 0;
203
	unsigned int next_boundary = pool->boundary;
204 205 206

	do {
		unsigned int next = offset + pool->size;
207 208 209 210
		if (unlikely((next + pool->size) >= next_boundary)) {
			next = next_boundary;
			next_boundary += pool->boundary;
		}
211 212 213 214 215
		*(int *)(page->vaddr + offset) = next;
		offset = next;
	} while (offset < pool->allocation);
}

Matthew Wilcox's avatar
Matthew Wilcox committed
216
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Linus Torvalds's avatar
Linus Torvalds committed
217
{
Matthew Wilcox's avatar
Matthew Wilcox committed
218
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
219

220
	page = kmalloc(sizeof(*page), mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
221 222
	if (!page)
		return NULL;
223
	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
Matthew Wilcox's avatar
Matthew Wilcox committed
224
					 &page->dma, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
225
	if (page->vaddr) {
226
#ifdef	DMAPOOL_DEBUG
Matthew Wilcox's avatar
Matthew Wilcox committed
227
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
228
#endif
229
		pool_initialise_page(pool, page);
Matthew Wilcox's avatar
Matthew Wilcox committed
230
		list_add(&page->page_list, &pool->page_list);
Linus Torvalds's avatar
Linus Torvalds committed
231
		page->in_use = 0;
232
		page->offset = 0;
Linus Torvalds's avatar
Linus Torvalds committed
233
	} else {
Matthew Wilcox's avatar
Matthew Wilcox committed
234
		kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
235 236 237 238 239
		page = NULL;
	}
	return page;
}

240
static inline int is_page_busy(struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
241
{
242
	return page->in_use != 0;
Linus Torvalds's avatar
Linus Torvalds committed
243 244
}

Matthew Wilcox's avatar
Matthew Wilcox committed
245
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
246
{
Matthew Wilcox's avatar
Matthew Wilcox committed
247
	dma_addr_t dma = page->dma;
Linus Torvalds's avatar
Linus Torvalds committed
248

249
#ifdef	DMAPOOL_DEBUG
Matthew Wilcox's avatar
Matthew Wilcox committed
250
	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
251
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
252 253 254
	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
	list_del(&page->page_list);
	kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
255 256 257 258 259 260 261 262 263 264
}

/**
 * dma_pool_destroy - destroys a pool of dma memory blocks.
 * @pool: dma pool that will be destroyed
 * Context: !in_interrupt()
 *
 * Caller guarantees that no more memory from the pool is in use,
 * and that nothing will try to use the pool after this call.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
265
void dma_pool_destroy(struct dma_pool *pool)
Linus Torvalds's avatar
Linus Torvalds committed
266
{
267
	mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
268 269 270
	list_del(&pool->pools);
	if (pool->dev && list_empty(&pool->dev->dma_pools))
		device_remove_file(pool->dev, &dev_attr_pools);
271
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
272

Matthew Wilcox's avatar
Matthew Wilcox committed
273 274 275 276
	while (!list_empty(&pool->page_list)) {
		struct dma_page *page;
		page = list_entry(pool->page_list.next,
				  struct dma_page, page_list);
277
		if (is_page_busy(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
278
			if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
279 280
				dev_err(pool->dev,
					"dma_pool_destroy %s, %p busy\n",
Linus Torvalds's avatar
Linus Torvalds committed
281 282
					pool->name, page->vaddr);
			else
Matthew Wilcox's avatar
Matthew Wilcox committed
283 284 285
				printk(KERN_ERR
				       "dma_pool_destroy %s, %p busy\n",
				       pool->name, page->vaddr);
Linus Torvalds's avatar
Linus Torvalds committed
286
			/* leak the still-in-use consistent memory */
Matthew Wilcox's avatar
Matthew Wilcox committed
287 288
			list_del(&page->page_list);
			kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
289
		} else
Matthew Wilcox's avatar
Matthew Wilcox committed
290
			pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
291 292
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
293
	kfree(pool);
Linus Torvalds's avatar
Linus Torvalds committed
294
}
Matthew Wilcox's avatar
Matthew Wilcox committed
295
EXPORT_SYMBOL(dma_pool_destroy);
Linus Torvalds's avatar
Linus Torvalds committed
296 297 298 299 300 301 302 303 304

/**
 * dma_pool_alloc - get a block of consistent memory
 * @pool: dma pool that will produce the block
 * @mem_flags: GFP_* bitmask
 * @handle: pointer to dma address of block
 *
 * This returns the kernel virtual address of a currently unused block,
 * and reports its dma address through the handle.
305
 * If such a memory block can't be allocated, %NULL is returned.
Linus Torvalds's avatar
Linus Torvalds committed
306
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
307 308
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
		     dma_addr_t *handle)
Linus Torvalds's avatar
Linus Torvalds committed
309
{
Matthew Wilcox's avatar
Matthew Wilcox committed
310 311 312 313 314
	unsigned long flags;
	struct dma_page *page;
	size_t offset;
	void *retval;

315 316
	might_sleep_if(mem_flags & __GFP_WAIT);

Matthew Wilcox's avatar
Matthew Wilcox committed
317
	spin_lock_irqsave(&pool->lock, flags);
318
 restart:
Linus Torvalds's avatar
Linus Torvalds committed
319
	list_for_each_entry(page, &pool->page_list, page_list) {
320 321
		if (page->offset < pool->allocation)
			goto ready;
Linus Torvalds's avatar
Linus Torvalds committed
322
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
323 324
	page = pool_alloc_page(pool, GFP_ATOMIC);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
325
		if (mem_flags & __GFP_WAIT) {
Matthew Wilcox's avatar
Matthew Wilcox committed
326
			DECLARE_WAITQUEUE(wait, current);
Linus Torvalds's avatar
Linus Torvalds committed
327

328
			__set_current_state(TASK_UNINTERRUPTIBLE);
329
			__add_wait_queue(&pool->waitq, &wait);
Matthew Wilcox's avatar
Matthew Wilcox committed
330
			spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
331

Matthew Wilcox's avatar
Matthew Wilcox committed
332
			schedule_timeout(POOL_TIMEOUT_JIFFIES);
Linus Torvalds's avatar
Linus Torvalds committed
333

334 335
			spin_lock_irqsave(&pool->lock, flags);
			__remove_wait_queue(&pool->waitq, &wait);
Linus Torvalds's avatar
Linus Torvalds committed
336 337 338 339 340 341
			goto restart;
		}
		retval = NULL;
		goto done;
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
342
 ready:
Linus Torvalds's avatar
Linus Torvalds committed
343
	page->in_use++;
344 345
	offset = page->offset;
	page->offset = *(int *)(page->vaddr + offset);
Linus Torvalds's avatar
Linus Torvalds committed
346 347
	retval = offset + page->vaddr;
	*handle = offset + page->dma;
348
#ifdef	DMAPOOL_DEBUG
Matthew Wilcox's avatar
Matthew Wilcox committed
349
	memset(retval, POOL_POISON_ALLOCATED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
350
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
351 352
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
353 354
	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
355
EXPORT_SYMBOL(dma_pool_alloc);
Linus Torvalds's avatar
Linus Torvalds committed
356

Matthew Wilcox's avatar
Matthew Wilcox committed
357
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
358
{
Matthew Wilcox's avatar
Matthew Wilcox committed
359
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
360 361 362 363 364

	list_for_each_entry(page, &pool->page_list, page_list) {
		if (dma < page->dma)
			continue;
		if (dma < (page->dma + pool->allocation))
365
			return page;
Linus Torvalds's avatar
Linus Torvalds committed
366
	}
367
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
368 369 370 371 372 373 374 375 376 377 378
}

/**
 * dma_pool_free - put block back into dma pool
 * @pool: the dma pool holding the block
 * @vaddr: virtual address of block
 * @dma: dma address of block
 *
 * Caller promises neither device nor driver will again touch this block
 * unless it is first re-allocated.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
379
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
380
{
Matthew Wilcox's avatar
Matthew Wilcox committed
381 382
	struct dma_page *page;
	unsigned long flags;
383
	unsigned int offset;
Linus Torvalds's avatar
Linus Torvalds committed
384

385
	spin_lock_irqsave(&pool->lock, flags);
Matthew Wilcox's avatar
Matthew Wilcox committed
386 387
	page = pool_find_page(pool, dma);
	if (!page) {
388
		spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
389
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
390 391 392
			dev_err(pool->dev,
				"dma_pool_free %s, %p/%lx (bad dma)\n",
				pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
393
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
394 395
			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
			       pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
396 397 398
		return;
	}

399
	offset = vaddr - page->vaddr;
400
#ifdef	DMAPOOL_DEBUG
401
	if ((dma - page->dma) != offset) {
402
		spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
403
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
404 405 406
			dev_err(pool->dev,
				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
				pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
407
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
408 409 410
			printk(KERN_ERR
			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
			       pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
411 412
		return;
	}
413 414 415 416 417 418 419
	{
		unsigned int chain = page->offset;
		while (chain < pool->allocation) {
			if (chain != offset) {
				chain = *(int *)(page->vaddr + chain);
				continue;
			}
420
			spin_unlock_irqrestore(&pool->lock, flags);
421 422 423 424 425 426 427 428 429 430
			if (pool->dev)
				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
					"already free\n", pool->name,
					(unsigned long long)dma);
			else
				printk(KERN_ERR "dma_pool_free %s, dma %Lx "
					"already free\n", pool->name,
					(unsigned long long)dma);
			return;
		}
Linus Torvalds's avatar
Linus Torvalds committed
431
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
432
	memset(vaddr, POOL_POISON_FREED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
433 434 435
#endif

	page->in_use--;
436 437
	*(int *)vaddr = page->offset;
	page->offset = offset;
Matthew Wilcox's avatar
Matthew Wilcox committed
438
	if (waitqueue_active(&pool->waitq))
439
		wake_up_locked(&pool->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
440 441
	/*
	 * Resist a temptation to do
442
	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
443 444
	 * Better have a few empty pages hang around.
	 */
Matthew Wilcox's avatar
Matthew Wilcox committed
445
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
446
}
Matthew Wilcox's avatar
Matthew Wilcox committed
447
EXPORT_SYMBOL(dma_pool_free);
Linus Torvalds's avatar
Linus Torvalds committed
448

449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
/*
 * Managed DMA pool
 */
static void dmam_pool_release(struct device *dev, void *res)
{
	struct dma_pool *pool = *(struct dma_pool **)res;

	dma_pool_destroy(pool);
}

static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
	return *(struct dma_pool **)res == match_data;
}

/**
 * dmam_pool_create - Managed dma_pool_create()
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 *
 * Managed dma_pool_create().  DMA pool created with this function is
 * automatically destroyed on driver detach.
 */
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
				  size_t size, size_t align, size_t allocation)
{
	struct dma_pool **ptr, *pool;

	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
	if (pool)
		devres_add(dev, ptr);
	else
		devres_free(ptr);

	return pool;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
492
EXPORT_SYMBOL(dmam_pool_create);
493 494 495 496 497 498 499 500 501 502 503 504

/**
 * dmam_pool_destroy - Managed dma_pool_destroy()
 * @pool: dma pool that will be destroyed
 *
 * Managed dma_pool_destroy().
 */
void dmam_pool_destroy(struct dma_pool *pool)
{
	struct device *dev = pool->dev;

	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
505
	dma_pool_destroy(pool);
506
}
Matthew Wilcox's avatar
Matthew Wilcox committed
507
EXPORT_SYMBOL(dmam_pool_destroy);