dmapool.c 13.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * DMA Pool allocator
 *
 * Copyright 2001 David Brownell
 * Copyright 2007 Intel Corporation
 *   Author: Matthew Wilcox <willy@linux.intel.com>
 *
 * This software may be redistributed and/or modified under the terms of
 * the GNU General Public License ("GPL") version 2 as published by the
 * Free Software Foundation.
 *
 * This allocator returns small blocks of a given size which are DMA-able by
 * the given device.  It uses the dma_alloc_coherent page allocator to get
 * new pages, then splits them up into blocks of the required size.
 * Many older drivers still have their own code to do this.
 *
 * The current design of this allocator is fairly simple.  The pool is
 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
 * allocated pages.  Each page in the page_list is split into blocks of at
20 21 22
 * least 'size' bytes.  Free blocks are tracked in an unsorted singly-linked
 * list of free blocks within the page.  Used blocks aren't tracked, but we
 * keep a count of how many are currently allocated from each page.
23
 */
Linus Torvalds's avatar
Linus Torvalds committed
24 25 26 27

#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
28 29
#include <linux/kernel.h>
#include <linux/list.h>
30
#include <linux/export.h>
31
#include <linux/mutex.h>
32
#include <linux/poison.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
33
#include <linux/sched.h>
34
#include <linux/slab.h>
35
#include <linux/stat.h>
36 37 38 39
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/wait.h>
Linus Torvalds's avatar
Linus Torvalds committed
40

41 42 43 44
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
#define DMAPOOL_DEBUG 1
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
45 46 47 48 49 50
struct dma_pool {		/* the pool */
	struct list_head page_list;
	spinlock_t lock;
	size_t size;
	struct device *dev;
	size_t allocation;
51
	size_t boundary;
Matthew Wilcox's avatar
Matthew Wilcox committed
52 53
	char name[32];
	struct list_head pools;
Linus Torvalds's avatar
Linus Torvalds committed
54 55
};

Matthew Wilcox's avatar
Matthew Wilcox committed
56 57 58 59
struct dma_page {		/* cacheable header for 'allocation' bytes */
	struct list_head page_list;
	void *vaddr;
	dma_addr_t dma;
60 61
	unsigned int in_use;
	unsigned int offset;
Linus Torvalds's avatar
Linus Torvalds committed
62 63
};

Matthew Wilcox's avatar
Matthew Wilcox committed
64
static DEFINE_MUTEX(pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
65 66

static ssize_t
Matthew Wilcox's avatar
Matthew Wilcox committed
67
show_pools(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
68 69 70 71 72 73 74 75 76 77 78 79 80 81
{
	unsigned temp;
	unsigned size;
	char *next;
	struct dma_page *page;
	struct dma_pool *pool;

	next = buf;
	size = PAGE_SIZE;

	temp = scnprintf(next, size, "poolinfo - 0.1\n");
	size -= temp;
	next += temp;

82
	mutex_lock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
83 84 85 86
	list_for_each_entry(pool, &dev->dma_pools, pools) {
		unsigned pages = 0;
		unsigned blocks = 0;

87
		spin_lock_irq(&pool->lock);
Linus Torvalds's avatar
Linus Torvalds committed
88 89 90 91
		list_for_each_entry(page, &pool->page_list, page_list) {
			pages++;
			blocks += page->in_use;
		}
92
		spin_unlock_irq(&pool->lock);
Linus Torvalds's avatar
Linus Torvalds committed
93 94 95

		/* per-pool info, no real statistics yet */
		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
96 97
				 pool->name, blocks,
				 pages * (pool->allocation / pool->size),
Matthew Wilcox's avatar
Matthew Wilcox committed
98
				 pool->size, pages);
Linus Torvalds's avatar
Linus Torvalds committed
99 100 101
		size -= temp;
		next += temp;
	}
102
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
103 104 105

	return PAGE_SIZE - size;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
106 107

static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
108 109 110 111 112 113 114

/**
 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
115
 * @boundary: returned blocks won't cross this power of two boundary
Linus Torvalds's avatar
Linus Torvalds committed
116 117 118 119 120 121 122 123 124
 * Context: !in_interrupt()
 *
 * Returns a dma allocation pool with the requested characteristics, or
 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
 * may be used to allocate memory.  Such memory will all have "consistent"
 * DMA mappings, accessible by the device and its driver without using
 * cache flushing primitives.  The actual size of blocks allocated may be
 * larger than requested because of alignment.
 *
125
 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
Linus Torvalds's avatar
Linus Torvalds committed
126 127 128 129
 * cross that size boundary.  This is useful for devices which have
 * addressing restrictions on individual DMA transfers, such as not crossing
 * boundaries of 4KBytes.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
130
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
131
				 size_t size, size_t align, size_t boundary)
Linus Torvalds's avatar
Linus Torvalds committed
132
{
Matthew Wilcox's avatar
Matthew Wilcox committed
133
	struct dma_pool *retval;
134
	size_t allocation;
Linus Torvalds's avatar
Linus Torvalds committed
135

136
	if (align == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
137
		align = 1;
138
	} else if (align & (align - 1)) {
Linus Torvalds's avatar
Linus Torvalds committed
139 140 141
		return NULL;
	}

142
	if (size == 0) {
143
		return NULL;
144 145 146
	} else if (size < 4) {
		size = 4;
	}
147 148 149 150

	if ((size % align) != 0)
		size = ALIGN(size, align);

151 152 153 154 155
	allocation = max_t(size_t, size, PAGE_SIZE);

	if (!boundary) {
		boundary = allocation;
	} else if ((boundary < size) || (boundary & (boundary - 1))) {
Linus Torvalds's avatar
Linus Torvalds committed
156
		return NULL;
157
	}
Linus Torvalds's avatar
Linus Torvalds committed
158

159 160
	retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
	if (!retval)
Linus Torvalds's avatar
Linus Torvalds committed
161 162
		return retval;

163
	strlcpy(retval->name, name, sizeof(retval->name));
Linus Torvalds's avatar
Linus Torvalds committed
164 165 166

	retval->dev = dev;

Matthew Wilcox's avatar
Matthew Wilcox committed
167 168
	INIT_LIST_HEAD(&retval->page_list);
	spin_lock_init(&retval->lock);
Linus Torvalds's avatar
Linus Torvalds committed
169
	retval->size = size;
170
	retval->boundary = boundary;
Linus Torvalds's avatar
Linus Torvalds committed
171 172 173
	retval->allocation = allocation;

	if (dev) {
174 175
		int ret;

176
		mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
177 178
		if (list_empty(&dev->dma_pools))
			ret = device_create_file(dev, &dev_attr_pools);
179 180
		else
			ret = 0;
Linus Torvalds's avatar
Linus Torvalds committed
181
		/* note:  not currently insisting "name" be unique */
182
		if (!ret)
Matthew Wilcox's avatar
Matthew Wilcox committed
183
			list_add(&retval->pools, &dev->dma_pools);
184 185 186 187
		else {
			kfree(retval);
			retval = NULL;
		}
188
		mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
189
	} else
Matthew Wilcox's avatar
Matthew Wilcox committed
190
		INIT_LIST_HEAD(&retval->pools);
Linus Torvalds's avatar
Linus Torvalds committed
191 192 193

	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
194
EXPORT_SYMBOL(dma_pool_create);
Linus Torvalds's avatar
Linus Torvalds committed
195

196 197 198
static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
{
	unsigned int offset = 0;
199
	unsigned int next_boundary = pool->boundary;
200 201 202

	do {
		unsigned int next = offset + pool->size;
203 204 205 206
		if (unlikely((next + pool->size) >= next_boundary)) {
			next = next_boundary;
			next_boundary += pool->boundary;
		}
207 208 209 210 211
		*(int *)(page->vaddr + offset) = next;
		offset = next;
	} while (offset < pool->allocation);
}

Matthew Wilcox's avatar
Matthew Wilcox committed
212
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Linus Torvalds's avatar
Linus Torvalds committed
213
{
Matthew Wilcox's avatar
Matthew Wilcox committed
214
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
215

216
	page = kmalloc(sizeof(*page), mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
217 218
	if (!page)
		return NULL;
219
	page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
Matthew Wilcox's avatar
Matthew Wilcox committed
220
					 &page->dma, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
221
	if (page->vaddr) {
222
#ifdef	DMAPOOL_DEBUG
Matthew Wilcox's avatar
Matthew Wilcox committed
223
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
224
#endif
225
		pool_initialise_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
226
		page->in_use = 0;
227
		page->offset = 0;
Linus Torvalds's avatar
Linus Torvalds committed
228
	} else {
Matthew Wilcox's avatar
Matthew Wilcox committed
229
		kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
230 231 232 233 234
		page = NULL;
	}
	return page;
}

235
static inline int is_page_busy(struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
236
{
237
	return page->in_use != 0;
Linus Torvalds's avatar
Linus Torvalds committed
238 239
}

Matthew Wilcox's avatar
Matthew Wilcox committed
240
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
241
{
Matthew Wilcox's avatar
Matthew Wilcox committed
242
	dma_addr_t dma = page->dma;
Linus Torvalds's avatar
Linus Torvalds committed
243

244
#ifdef	DMAPOOL_DEBUG
Matthew Wilcox's avatar
Matthew Wilcox committed
245
	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
246
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
247 248 249
	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
	list_del(&page->page_list);
	kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
250 251 252 253 254 255 256 257 258 259
}

/**
 * dma_pool_destroy - destroys a pool of dma memory blocks.
 * @pool: dma pool that will be destroyed
 * Context: !in_interrupt()
 *
 * Caller guarantees that no more memory from the pool is in use,
 * and that nothing will try to use the pool after this call.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
260
void dma_pool_destroy(struct dma_pool *pool)
Linus Torvalds's avatar
Linus Torvalds committed
261
{
262
	mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
263 264 265
	list_del(&pool->pools);
	if (pool->dev && list_empty(&pool->dev->dma_pools))
		device_remove_file(pool->dev, &dev_attr_pools);
266
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
267

Matthew Wilcox's avatar
Matthew Wilcox committed
268 269 270 271
	while (!list_empty(&pool->page_list)) {
		struct dma_page *page;
		page = list_entry(pool->page_list.next,
				  struct dma_page, page_list);
272
		if (is_page_busy(page)) {
Linus Torvalds's avatar
Linus Torvalds committed
273
			if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
274 275
				dev_err(pool->dev,
					"dma_pool_destroy %s, %p busy\n",
Linus Torvalds's avatar
Linus Torvalds committed
276 277
					pool->name, page->vaddr);
			else
Matthew Wilcox's avatar
Matthew Wilcox committed
278 279 280
				printk(KERN_ERR
				       "dma_pool_destroy %s, %p busy\n",
				       pool->name, page->vaddr);
Linus Torvalds's avatar
Linus Torvalds committed
281
			/* leak the still-in-use consistent memory */
Matthew Wilcox's avatar
Matthew Wilcox committed
282 283
			list_del(&page->page_list);
			kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
284
		} else
Matthew Wilcox's avatar
Matthew Wilcox committed
285
			pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
286 287
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
288
	kfree(pool);
Linus Torvalds's avatar
Linus Torvalds committed
289
}
Matthew Wilcox's avatar
Matthew Wilcox committed
290
EXPORT_SYMBOL(dma_pool_destroy);
Linus Torvalds's avatar
Linus Torvalds committed
291 292 293 294 295 296 297 298 299

/**
 * dma_pool_alloc - get a block of consistent memory
 * @pool: dma pool that will produce the block
 * @mem_flags: GFP_* bitmask
 * @handle: pointer to dma address of block
 *
 * This returns the kernel virtual address of a currently unused block,
 * and reports its dma address through the handle.
300
 * If such a memory block can't be allocated, %NULL is returned.
Linus Torvalds's avatar
Linus Torvalds committed
301
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
302 303
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
		     dma_addr_t *handle)
Linus Torvalds's avatar
Linus Torvalds committed
304
{
Matthew Wilcox's avatar
Matthew Wilcox committed
305 306 307 308 309
	unsigned long flags;
	struct dma_page *page;
	size_t offset;
	void *retval;

310 311
	might_sleep_if(mem_flags & __GFP_WAIT);

Matthew Wilcox's avatar
Matthew Wilcox committed
312
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
313
	list_for_each_entry(page, &pool->page_list, page_list) {
314 315
		if (page->offset < pool->allocation)
			goto ready;
Linus Torvalds's avatar
Linus Torvalds committed
316 317
	}

318 319
	/* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
320

321 322 323
	page = pool_alloc_page(pool, mem_flags);
	if (!page)
		return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
324

325
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
326

327
	list_add(&page->page_list, &pool->page_list);
Matthew Wilcox's avatar
Matthew Wilcox committed
328
 ready:
Linus Torvalds's avatar
Linus Torvalds committed
329
	page->in_use++;
330 331
	offset = page->offset;
	page->offset = *(int *)(page->vaddr + offset);
Linus Torvalds's avatar
Linus Torvalds committed
332 333
	retval = offset + page->vaddr;
	*handle = offset + page->dma;
334
#ifdef	DMAPOOL_DEBUG
335 336 337 338 339 340 341 342 343
	{
		int i;
		u8 *data = retval;
		/* page->offset is stored in first 4 bytes */
		for (i = sizeof(page->offset); i < pool->size; i++) {
			if (data[i] == POOL_POISON_FREED)
				continue;
			if (pool->dev)
				dev_err(pool->dev,
344
					"dma_pool_alloc %s, %p (corrupted)\n",
345 346
					pool->name, retval);
			else
347
				pr_err("dma_pool_alloc %s, %p (corrupted)\n",
348 349 350 351 352 353 354 355 356 357 358
					pool->name, retval);

			/*
			 * Dump the first 4 bytes even if they are not
			 * POOL_POISON_FREED
			 */
			print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
					data, pool->size, 1);
			break;
		}
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
359
	memset(retval, POOL_POISON_ALLOCATED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
360
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
361
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
362 363
	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
364
EXPORT_SYMBOL(dma_pool_alloc);
Linus Torvalds's avatar
Linus Torvalds committed
365

Matthew Wilcox's avatar
Matthew Wilcox committed
366
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
367
{
Matthew Wilcox's avatar
Matthew Wilcox committed
368
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
369 370 371 372 373

	list_for_each_entry(page, &pool->page_list, page_list) {
		if (dma < page->dma)
			continue;
		if (dma < (page->dma + pool->allocation))
374
			return page;
Linus Torvalds's avatar
Linus Torvalds committed
375
	}
376
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
377 378 379 380 381 382 383 384 385 386 387
}

/**
 * dma_pool_free - put block back into dma pool
 * @pool: the dma pool holding the block
 * @vaddr: virtual address of block
 * @dma: dma address of block
 *
 * Caller promises neither device nor driver will again touch this block
 * unless it is first re-allocated.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
388
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
389
{
Matthew Wilcox's avatar
Matthew Wilcox committed
390 391
	struct dma_page *page;
	unsigned long flags;
392
	unsigned int offset;
Linus Torvalds's avatar
Linus Torvalds committed
393

394
	spin_lock_irqsave(&pool->lock, flags);
Matthew Wilcox's avatar
Matthew Wilcox committed
395 396
	page = pool_find_page(pool, dma);
	if (!page) {
397
		spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
398
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
399 400 401
			dev_err(pool->dev,
				"dma_pool_free %s, %p/%lx (bad dma)\n",
				pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
402
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
403 404
			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
			       pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
405 406 407
		return;
	}

408
	offset = vaddr - page->vaddr;
409
#ifdef	DMAPOOL_DEBUG
410
	if ((dma - page->dma) != offset) {
411
		spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
412
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
413 414 415
			dev_err(pool->dev,
				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
				pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
416
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
417 418 419
			printk(KERN_ERR
			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
			       pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
420 421
		return;
	}
422 423 424 425 426 427 428
	{
		unsigned int chain = page->offset;
		while (chain < pool->allocation) {
			if (chain != offset) {
				chain = *(int *)(page->vaddr + chain);
				continue;
			}
429
			spin_unlock_irqrestore(&pool->lock, flags);
430 431 432 433 434 435 436 437 438 439
			if (pool->dev)
				dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
					"already free\n", pool->name,
					(unsigned long long)dma);
			else
				printk(KERN_ERR "dma_pool_free %s, dma %Lx "
					"already free\n", pool->name,
					(unsigned long long)dma);
			return;
		}
Linus Torvalds's avatar
Linus Torvalds committed
440
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
441
	memset(vaddr, POOL_POISON_FREED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
442 443 444
#endif

	page->in_use--;
445 446
	*(int *)vaddr = page->offset;
	page->offset = offset;
Linus Torvalds's avatar
Linus Torvalds committed
447 448
	/*
	 * Resist a temptation to do
449
	 *    if (!is_page_busy(page)) pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
450 451
	 * Better have a few empty pages hang around.
	 */
Matthew Wilcox's avatar
Matthew Wilcox committed
452
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
453
}
Matthew Wilcox's avatar
Matthew Wilcox committed
454
EXPORT_SYMBOL(dma_pool_free);
Linus Torvalds's avatar
Linus Torvalds committed
455

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
/*
 * Managed DMA pool
 */
static void dmam_pool_release(struct device *dev, void *res)
{
	struct dma_pool *pool = *(struct dma_pool **)res;

	dma_pool_destroy(pool);
}

static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
	return *(struct dma_pool **)res == match_data;
}

/**
 * dmam_pool_create - Managed dma_pool_create()
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 *
 * Managed dma_pool_create().  DMA pool created with this function is
 * automatically destroyed on driver detach.
 */
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
				  size_t size, size_t align, size_t allocation)
{
	struct dma_pool **ptr, *pool;

	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
	if (pool)
		devres_add(dev, ptr);
	else
		devres_free(ptr);

	return pool;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
499
EXPORT_SYMBOL(dmam_pool_create);
500 501 502 503 504 505 506 507 508 509 510 511

/**
 * dmam_pool_destroy - Managed dma_pool_destroy()
 * @pool: dma pool that will be destroyed
 *
 * Managed dma_pool_destroy().
 */
void dmam_pool_destroy(struct dma_pool *pool)
{
	struct device *dev = pool->dev;

	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
512
	dma_pool_destroy(pool);
513
}
Matthew Wilcox's avatar
Matthew Wilcox committed
514
EXPORT_SYMBOL(dmam_pool_destroy);