dmapool.c 12 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8

#include <linux/device.h>
#include <linux/mm.h>
#include <asm/io.h>		/* Needed for i386 to build */
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/module.h>
9
#include <linux/poison.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
10
#include <linux/sched.h>
Linus Torvalds's avatar
Linus Torvalds committed
11 12 13 14 15 16 17

/*
 * Pool allocator ... wraps the dma_alloc_coherent page allocator, so
 * small blocks are easily used by drivers for bus mastering controllers.
 * This should probably be sharing the guts of the slab allocator.
 */

Matthew Wilcox's avatar
Matthew Wilcox committed
18 19 20 21 22 23 24 25 26 27
struct dma_pool {		/* the pool */
	struct list_head page_list;
	spinlock_t lock;
	size_t blocks_per_page;
	size_t size;
	struct device *dev;
	size_t allocation;
	char name[32];
	wait_queue_head_t waitq;
	struct list_head pools;
Linus Torvalds's avatar
Linus Torvalds committed
28 29
};

Matthew Wilcox's avatar
Matthew Wilcox committed
30 31 32 33 34 35
struct dma_page {		/* cacheable header for 'allocation' bytes */
	struct list_head page_list;
	void *vaddr;
	dma_addr_t dma;
	unsigned in_use;
	unsigned long bitmap[0];
Linus Torvalds's avatar
Linus Torvalds committed
36 37 38 39
};

#define	POOL_TIMEOUT_JIFFIES	((100 /* msec */ * HZ) / 1000)

Matthew Wilcox's avatar
Matthew Wilcox committed
40
static DEFINE_MUTEX(pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
41 42

static ssize_t
Matthew Wilcox's avatar
Matthew Wilcox committed
43
show_pools(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds's avatar
Linus Torvalds committed
44 45 46 47 48 49 50 51 52 53 54 55 56 57
{
	unsigned temp;
	unsigned size;
	char *next;
	struct dma_page *page;
	struct dma_pool *pool;

	next = buf;
	size = PAGE_SIZE;

	temp = scnprintf(next, size, "poolinfo - 0.1\n");
	size -= temp;
	next += temp;

58
	mutex_lock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
59 60 61 62 63 64 65 66 67 68 69
	list_for_each_entry(pool, &dev->dma_pools, pools) {
		unsigned pages = 0;
		unsigned blocks = 0;

		list_for_each_entry(page, &pool->page_list, page_list) {
			pages++;
			blocks += page->in_use;
		}

		/* per-pool info, no real statistics yet */
		temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
Matthew Wilcox's avatar
Matthew Wilcox committed
70 71 72
				 pool->name,
				 blocks, pages * pool->blocks_per_page,
				 pool->size, pages);
Linus Torvalds's avatar
Linus Torvalds committed
73 74 75
		size -= temp;
		next += temp;
	}
76
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
77 78 79

	return PAGE_SIZE - size;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
80 81

static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103

/**
 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 * Context: !in_interrupt()
 *
 * Returns a dma allocation pool with the requested characteristics, or
 * null if one can't be created.  Given one of these pools, dma_pool_alloc()
 * may be used to allocate memory.  Such memory will all have "consistent"
 * DMA mappings, accessible by the device and its driver without using
 * cache flushing primitives.  The actual size of blocks allocated may be
 * larger than requested because of alignment.
 *
 * If allocation is nonzero, objects returned from dma_pool_alloc() won't
 * cross that size boundary.  This is useful for devices which have
 * addressing restrictions on individual DMA transfers, such as not crossing
 * boundaries of 4KBytes.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
104 105
struct dma_pool *dma_pool_create(const char *name, struct device *dev,
				 size_t size, size_t align, size_t allocation)
Linus Torvalds's avatar
Linus Torvalds committed
106
{
Matthew Wilcox's avatar
Matthew Wilcox committed
107
	struct dma_pool *retval;
Linus Torvalds's avatar
Linus Torvalds committed
108

109
	if (align == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
110
		align = 1;
111
	} else if (align & (align - 1)) {
Linus Torvalds's avatar
Linus Torvalds committed
112 113 114
		return NULL;
	}

115 116 117 118 119 120
	if (size == 0)
		return NULL;

	if ((size % align) != 0)
		size = ALIGN(size, align);

Linus Torvalds's avatar
Linus Torvalds committed
121 122 123 124 125
	if (allocation == 0) {
		if (PAGE_SIZE < size)
			allocation = size;
		else
			allocation = PAGE_SIZE;
Matthew Wilcox's avatar
Matthew Wilcox committed
126
		/* FIXME: round up for less fragmentation */
Linus Torvalds's avatar
Linus Torvalds committed
127 128 129
	} else if (allocation < size)
		return NULL;

Matthew Wilcox's avatar
Matthew Wilcox committed
130 131 132
	if (!
	    (retval =
	     kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
Linus Torvalds's avatar
Linus Torvalds committed
133 134
		return retval;

Matthew Wilcox's avatar
Matthew Wilcox committed
135
	strlcpy(retval->name, name, sizeof retval->name);
Linus Torvalds's avatar
Linus Torvalds committed
136 137 138

	retval->dev = dev;

Matthew Wilcox's avatar
Matthew Wilcox committed
139 140
	INIT_LIST_HEAD(&retval->page_list);
	spin_lock_init(&retval->lock);
Linus Torvalds's avatar
Linus Torvalds committed
141 142 143
	retval->size = size;
	retval->allocation = allocation;
	retval->blocks_per_page = allocation / size;
Matthew Wilcox's avatar
Matthew Wilcox committed
144
	init_waitqueue_head(&retval->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
145 146

	if (dev) {
147 148
		int ret;

149
		mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
150 151
		if (list_empty(&dev->dma_pools))
			ret = device_create_file(dev, &dev_attr_pools);
152 153
		else
			ret = 0;
Linus Torvalds's avatar
Linus Torvalds committed
154
		/* note:  not currently insisting "name" be unique */
155
		if (!ret)
Matthew Wilcox's avatar
Matthew Wilcox committed
156
			list_add(&retval->pools, &dev->dma_pools);
157 158 159 160
		else {
			kfree(retval);
			retval = NULL;
		}
161
		mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
162
	} else
Matthew Wilcox's avatar
Matthew Wilcox committed
163
		INIT_LIST_HEAD(&retval->pools);
Linus Torvalds's avatar
Linus Torvalds committed
164 165 166

	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
167
EXPORT_SYMBOL(dma_pool_create);
Linus Torvalds's avatar
Linus Torvalds committed
168

Matthew Wilcox's avatar
Matthew Wilcox committed
169
static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
Linus Torvalds's avatar
Linus Torvalds committed
170
{
Matthew Wilcox's avatar
Matthew Wilcox committed
171 172
	struct dma_page *page;
	int mapsize;
Linus Torvalds's avatar
Linus Torvalds committed
173 174 175

	mapsize = pool->blocks_per_page;
	mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG;
Matthew Wilcox's avatar
Matthew Wilcox committed
176
	mapsize *= sizeof(long);
Linus Torvalds's avatar
Linus Torvalds committed
177

178
	page = kmalloc(mapsize + sizeof *page, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
179 180
	if (!page)
		return NULL;
Matthew Wilcox's avatar
Matthew Wilcox committed
181 182 183
	page->vaddr = dma_alloc_coherent(pool->dev,
					 pool->allocation,
					 &page->dma, mem_flags);
Linus Torvalds's avatar
Linus Torvalds committed
184
	if (page->vaddr) {
Matthew Wilcox's avatar
Matthew Wilcox committed
185
		memset(page->bitmap, 0xff, mapsize);	/* bit set == free */
Linus Torvalds's avatar
Linus Torvalds committed
186
#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
187
		memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
188
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
189
		list_add(&page->page_list, &pool->page_list);
Linus Torvalds's avatar
Linus Torvalds committed
190 191
		page->in_use = 0;
	} else {
Matthew Wilcox's avatar
Matthew Wilcox committed
192
		kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
193 194 195 196 197
		page = NULL;
	}
	return page;
}

Matthew Wilcox's avatar
Matthew Wilcox committed
198
static inline int is_page_busy(int blocks, unsigned long *bitmap)
Linus Torvalds's avatar
Linus Torvalds committed
199 200 201 202 203 204 205 206 207
{
	while (blocks > 0) {
		if (*bitmap++ != ~0UL)
			return 1;
		blocks -= BITS_PER_LONG;
	}
	return 0;
}

Matthew Wilcox's avatar
Matthew Wilcox committed
208
static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
Linus Torvalds's avatar
Linus Torvalds committed
209
{
Matthew Wilcox's avatar
Matthew Wilcox committed
210
	dma_addr_t dma = page->dma;
Linus Torvalds's avatar
Linus Torvalds committed
211 212

#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
213
	memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
Linus Torvalds's avatar
Linus Torvalds committed
214
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
215 216 217
	dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
	list_del(&page->page_list);
	kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
218 219 220 221 222 223 224 225 226 227
}

/**
 * dma_pool_destroy - destroys a pool of dma memory blocks.
 * @pool: dma pool that will be destroyed
 * Context: !in_interrupt()
 *
 * Caller guarantees that no more memory from the pool is in use,
 * and that nothing will try to use the pool after this call.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
228
void dma_pool_destroy(struct dma_pool *pool)
Linus Torvalds's avatar
Linus Torvalds committed
229
{
230
	mutex_lock(&pools_lock);
Matthew Wilcox's avatar
Matthew Wilcox committed
231 232 233
	list_del(&pool->pools);
	if (pool->dev && list_empty(&pool->dev->dma_pools))
		device_remove_file(pool->dev, &dev_attr_pools);
234
	mutex_unlock(&pools_lock);
Linus Torvalds's avatar
Linus Torvalds committed
235

Matthew Wilcox's avatar
Matthew Wilcox committed
236 237 238 239 240
	while (!list_empty(&pool->page_list)) {
		struct dma_page *page;
		page = list_entry(pool->page_list.next,
				  struct dma_page, page_list);
		if (is_page_busy(pool->blocks_per_page, page->bitmap)) {
Linus Torvalds's avatar
Linus Torvalds committed
241
			if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
242 243
				dev_err(pool->dev,
					"dma_pool_destroy %s, %p busy\n",
Linus Torvalds's avatar
Linus Torvalds committed
244 245
					pool->name, page->vaddr);
			else
Matthew Wilcox's avatar
Matthew Wilcox committed
246 247 248
				printk(KERN_ERR
				       "dma_pool_destroy %s, %p busy\n",
				       pool->name, page->vaddr);
Linus Torvalds's avatar
Linus Torvalds committed
249
			/* leak the still-in-use consistent memory */
Matthew Wilcox's avatar
Matthew Wilcox committed
250 251
			list_del(&page->page_list);
			kfree(page);
Linus Torvalds's avatar
Linus Torvalds committed
252
		} else
Matthew Wilcox's avatar
Matthew Wilcox committed
253
			pool_free_page(pool, page);
Linus Torvalds's avatar
Linus Torvalds committed
254 255
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
256
	kfree(pool);
Linus Torvalds's avatar
Linus Torvalds committed
257
}
Matthew Wilcox's avatar
Matthew Wilcox committed
258
EXPORT_SYMBOL(dma_pool_destroy);
Linus Torvalds's avatar
Linus Torvalds committed
259 260 261 262 263 264 265 266 267 268 269

/**
 * dma_pool_alloc - get a block of consistent memory
 * @pool: dma pool that will produce the block
 * @mem_flags: GFP_* bitmask
 * @handle: pointer to dma address of block
 *
 * This returns the kernel virtual address of a currently unused block,
 * and reports its dma address through the handle.
 * If such a memory block can't be allocated, null is returned.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
270 271
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
		     dma_addr_t *handle)
Linus Torvalds's avatar
Linus Torvalds committed
272
{
Matthew Wilcox's avatar
Matthew Wilcox committed
273 274 275 276 277 278 279
	unsigned long flags;
	struct dma_page *page;
	int map, block;
	size_t offset;
	void *retval;

	spin_lock_irqsave(&pool->lock, flags);
280
 restart:
Linus Torvalds's avatar
Linus Torvalds committed
281
	list_for_each_entry(page, &pool->page_list, page_list) {
Matthew Wilcox's avatar
Matthew Wilcox committed
282
		int i;
Linus Torvalds's avatar
Linus Torvalds committed
283 284
		/* only cachable accesses here ... */
		for (map = 0, i = 0;
Matthew Wilcox's avatar
Matthew Wilcox committed
285 286
		     i < pool->blocks_per_page; i += BITS_PER_LONG, map++) {
			if (page->bitmap[map] == 0)
Linus Torvalds's avatar
Linus Torvalds committed
287
				continue;
Matthew Wilcox's avatar
Matthew Wilcox committed
288
			block = ffz(~page->bitmap[map]);
Linus Torvalds's avatar
Linus Torvalds committed
289
			if ((i + block) < pool->blocks_per_page) {
Matthew Wilcox's avatar
Matthew Wilcox committed
290
				clear_bit(block, &page->bitmap[map]);
Linus Torvalds's avatar
Linus Torvalds committed
291 292 293 294 295 296
				offset = (BITS_PER_LONG * map) + block;
				offset *= pool->size;
				goto ready;
			}
		}
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
297 298
	page = pool_alloc_page(pool, GFP_ATOMIC);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
299
		if (mem_flags & __GFP_WAIT) {
Matthew Wilcox's avatar
Matthew Wilcox committed
300
			DECLARE_WAITQUEUE(wait, current);
Linus Torvalds's avatar
Linus Torvalds committed
301

302
			__set_current_state(TASK_INTERRUPTIBLE);
303
			__add_wait_queue(&pool->waitq, &wait);
Matthew Wilcox's avatar
Matthew Wilcox committed
304
			spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
305

Matthew Wilcox's avatar
Matthew Wilcox committed
306
			schedule_timeout(POOL_TIMEOUT_JIFFIES);
Linus Torvalds's avatar
Linus Torvalds committed
307

308 309
			spin_lock_irqsave(&pool->lock, flags);
			__remove_wait_queue(&pool->waitq, &wait);
Linus Torvalds's avatar
Linus Torvalds committed
310 311 312 313 314 315
			goto restart;
		}
		retval = NULL;
		goto done;
	}

Matthew Wilcox's avatar
Matthew Wilcox committed
316
	clear_bit(0, &page->bitmap[0]);
Linus Torvalds's avatar
Linus Torvalds committed
317
	offset = 0;
Matthew Wilcox's avatar
Matthew Wilcox committed
318
 ready:
Linus Torvalds's avatar
Linus Torvalds committed
319 320 321 322
	page->in_use++;
	retval = offset + page->vaddr;
	*handle = offset + page->dma;
#ifdef	CONFIG_DEBUG_SLAB
Matthew Wilcox's avatar
Matthew Wilcox committed
323
	memset(retval, POOL_POISON_ALLOCATED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
324
#endif
Matthew Wilcox's avatar
Matthew Wilcox committed
325 326
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
327 328
	return retval;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
329
EXPORT_SYMBOL(dma_pool_alloc);
Linus Torvalds's avatar
Linus Torvalds committed
330

Matthew Wilcox's avatar
Matthew Wilcox committed
331
static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
332
{
Matthew Wilcox's avatar
Matthew Wilcox committed
333 334
	unsigned long flags;
	struct dma_page *page;
Linus Torvalds's avatar
Linus Torvalds committed
335

Matthew Wilcox's avatar
Matthew Wilcox committed
336
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
337 338 339 340 341 342 343
	list_for_each_entry(page, &pool->page_list, page_list) {
		if (dma < page->dma)
			continue;
		if (dma < (page->dma + pool->allocation))
			goto done;
	}
	page = NULL;
Matthew Wilcox's avatar
Matthew Wilcox committed
344 345
 done:
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
346 347 348 349 350 351 352 353 354 355 356 357
	return page;
}

/**
 * dma_pool_free - put block back into dma pool
 * @pool: the dma pool holding the block
 * @vaddr: virtual address of block
 * @dma: dma address of block
 *
 * Caller promises neither device nor driver will again touch this block
 * unless it is first re-allocated.
 */
Matthew Wilcox's avatar
Matthew Wilcox committed
358
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
Linus Torvalds's avatar
Linus Torvalds committed
359
{
Matthew Wilcox's avatar
Matthew Wilcox committed
360 361 362
	struct dma_page *page;
	unsigned long flags;
	int map, block;
Linus Torvalds's avatar
Linus Torvalds committed
363

Matthew Wilcox's avatar
Matthew Wilcox committed
364 365
	page = pool_find_page(pool, dma);
	if (!page) {
Linus Torvalds's avatar
Linus Torvalds committed
366
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
367 368 369
			dev_err(pool->dev,
				"dma_pool_free %s, %p/%lx (bad dma)\n",
				pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
370
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
371 372
			printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
			       pool->name, vaddr, (unsigned long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
373 374 375 376 377 378 379 380 381 382 383
		return;
	}

	block = dma - page->dma;
	block /= pool->size;
	map = block / BITS_PER_LONG;
	block %= BITS_PER_LONG;

#ifdef	CONFIG_DEBUG_SLAB
	if (((dma - page->dma) + (void *)page->vaddr) != vaddr) {
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
384 385 386
			dev_err(pool->dev,
				"dma_pool_free %s, %p (bad vaddr)/%Lx\n",
				pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
387
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
388 389 390
			printk(KERN_ERR
			       "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
			       pool->name, vaddr, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
391 392
		return;
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
393
	if (page->bitmap[map] & (1UL << block)) {
Linus Torvalds's avatar
Linus Torvalds committed
394
		if (pool->dev)
Matthew Wilcox's avatar
Matthew Wilcox committed
395 396
			dev_err(pool->dev,
				"dma_pool_free %s, dma %Lx already free\n",
Linus Torvalds's avatar
Linus Torvalds committed
397 398
				pool->name, (unsigned long long)dma);
		else
Matthew Wilcox's avatar
Matthew Wilcox committed
399 400 401
			printk(KERN_ERR
			       "dma_pool_free %s, dma %Lx already free\n",
			       pool->name, (unsigned long long)dma);
Linus Torvalds's avatar
Linus Torvalds committed
402 403
		return;
	}
Matthew Wilcox's avatar
Matthew Wilcox committed
404
	memset(vaddr, POOL_POISON_FREED, pool->size);
Linus Torvalds's avatar
Linus Torvalds committed
405 406
#endif

Matthew Wilcox's avatar
Matthew Wilcox committed
407
	spin_lock_irqsave(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
408
	page->in_use--;
Matthew Wilcox's avatar
Matthew Wilcox committed
409 410
	set_bit(block, &page->bitmap[map]);
	if (waitqueue_active(&pool->waitq))
411
		wake_up_locked(&pool->waitq);
Linus Torvalds's avatar
Linus Torvalds committed
412 413 414 415 416
	/*
	 * Resist a temptation to do
	 *    if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page);
	 * Better have a few empty pages hang around.
	 */
Matthew Wilcox's avatar
Matthew Wilcox committed
417
	spin_unlock_irqrestore(&pool->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
418
}
Matthew Wilcox's avatar
Matthew Wilcox committed
419
EXPORT_SYMBOL(dma_pool_free);
Linus Torvalds's avatar
Linus Torvalds committed
420

421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
/*
 * Managed DMA pool
 */
static void dmam_pool_release(struct device *dev, void *res)
{
	struct dma_pool *pool = *(struct dma_pool **)res;

	dma_pool_destroy(pool);
}

static int dmam_pool_match(struct device *dev, void *res, void *match_data)
{
	return *(struct dma_pool **)res == match_data;
}

/**
 * dmam_pool_create - Managed dma_pool_create()
 * @name: name of pool, for diagnostics
 * @dev: device that will be doing the DMA
 * @size: size of the blocks in this pool.
 * @align: alignment requirement for blocks; must be a power of two
 * @allocation: returned blocks won't cross this boundary (or zero)
 *
 * Managed dma_pool_create().  DMA pool created with this function is
 * automatically destroyed on driver detach.
 */
struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
				  size_t size, size_t align, size_t allocation)
{
	struct dma_pool **ptr, *pool;

	ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
	if (!ptr)
		return NULL;

	pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
	if (pool)
		devres_add(dev, ptr);
	else
		devres_free(ptr);

	return pool;
}
Matthew Wilcox's avatar
Matthew Wilcox committed
464
EXPORT_SYMBOL(dmam_pool_create);
465 466 467 468 469 470 471 472 473 474 475 476 477 478

/**
 * dmam_pool_destroy - Managed dma_pool_destroy()
 * @pool: dma pool that will be destroyed
 *
 * Managed dma_pool_destroy().
 */
void dmam_pool_destroy(struct dma_pool *pool)
{
	struct device *dev = pool->dev;

	dma_pool_destroy(pool);
	WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
}
Matthew Wilcox's avatar
Matthew Wilcox committed
479
EXPORT_SYMBOL(dmam_pool_destroy);