drm_mm.c 12.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
/**************************************************************************
 *
 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 *
 **************************************************************************/

/*
 * Generic simple memory manager implementation. Intended to be used as a base
 * class implementation for more advanced memory managers.
 *
 * Note that the algorithm used is quite simple and there might be substantial
 * performance gains if a smarter free list is implemented. Currently it is just an
 * unordered stack of free regions. This could easily be improved if an RB-tree
 * is used instead. At least if we expect heavy fragmentation.
 *
 * Aligned allocations can also see improvement.
 *
 * Authors:
41
 * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
42
43
44
 */

#include "drmP.h"
45
#include "drm_mm.h"
46
#include <linux/slab.h>
47
#include <linux/seq_file.h>
48

49
50
#define MM_UNUSED_TARGET 4

Dave Airlie's avatar
Dave Airlie committed
51
unsigned long drm_mm_tail_space(struct drm_mm *mm)
52
53
{
	struct list_head *tail_node;
Dave Airlie's avatar
Dave Airlie committed
54
	struct drm_mm_node *entry;
55
56

	tail_node = mm->ml_entry.prev;
Dave Airlie's avatar
Dave Airlie committed
57
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
58
59
60
61
62
63
	if (!entry->free)
		return 0;

	return entry->size;
}

Dave Airlie's avatar
Dave Airlie committed
64
int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
65
66
{
	struct list_head *tail_node;
Dave Airlie's avatar
Dave Airlie committed
67
	struct drm_mm_node *entry;
68
69

	tail_node = mm->ml_entry.prev;
Dave Airlie's avatar
Dave Airlie committed
70
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
71
72
73
74
75
76
77
78
79
80
	if (!entry->free)
		return -ENOMEM;

	if (entry->size <= size)
		return -ENOMEM;

	entry->size -= size;
	return 0;
}

81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
	struct drm_mm_node *child;

	if (atomic)
		child = kmalloc(sizeof(*child), GFP_ATOMIC);
	else
		child = kmalloc(sizeof(*child), GFP_KERNEL);

	if (unlikely(child == NULL)) {
		spin_lock(&mm->unused_lock);
		if (list_empty(&mm->unused_nodes))
			child = NULL;
		else {
			child =
			    list_entry(mm->unused_nodes.next,
				       struct drm_mm_node, fl_entry);
			list_del(&child->fl_entry);
			--mm->num_unused;
		}
		spin_unlock(&mm->unused_lock);
	}
	return child;
}

106
107
108
109
110
/* drm_mm_pre_get() - pre allocate drm_mm_node structure
 * drm_mm:	memory manager struct we are pre-allocating for
 *
 * Returns 0 on success or -ENOMEM if allocation fails.
 */
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
int drm_mm_pre_get(struct drm_mm *mm)
{
	struct drm_mm_node *node;

	spin_lock(&mm->unused_lock);
	while (mm->num_unused < MM_UNUSED_TARGET) {
		spin_unlock(&mm->unused_lock);
		node = kmalloc(sizeof(*node), GFP_KERNEL);
		spin_lock(&mm->unused_lock);

		if (unlikely(node == NULL)) {
			int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
			spin_unlock(&mm->unused_lock);
			return ret;
		}
		++mm->num_unused;
		list_add_tail(&node->fl_entry, &mm->unused_nodes);
	}
	spin_unlock(&mm->unused_lock);
	return 0;
}
EXPORT_SYMBOL(drm_mm_pre_get);
133

Dave Airlie's avatar
Dave Airlie committed
134
static int drm_mm_create_tail_node(struct drm_mm *mm,
135
136
				   unsigned long start,
				   unsigned long size, int atomic)
137
{
Dave Airlie's avatar
Dave Airlie committed
138
	struct drm_mm_node *child;
139

140
141
	child = drm_mm_kmalloc(mm, atomic);
	if (unlikely(child == NULL))
142
143
144
145
146
147
148
149
150
151
152
153
154
		return -ENOMEM;

	child->free = 1;
	child->size = size;
	child->start = start;
	child->mm = mm;

	list_add_tail(&child->ml_entry, &mm->ml_entry);
	list_add_tail(&child->fl_entry, &mm->fl_entry);

	return 0;
}

155
int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
156
157
{
	struct list_head *tail_node;
Dave Airlie's avatar
Dave Airlie committed
158
	struct drm_mm_node *entry;
159
160

	tail_node = mm->ml_entry.prev;
Dave Airlie's avatar
Dave Airlie committed
161
	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
162
	if (!entry->free) {
163
164
		return drm_mm_create_tail_node(mm, entry->start + entry->size,
					       size, atomic);
165
166
167
168
169
	}
	entry->size += size;
	return 0;
}

Dave Airlie's avatar
Dave Airlie committed
170
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
171
172
						 unsigned long size,
						 int atomic)
173
{
Dave Airlie's avatar
Dave Airlie committed
174
	struct drm_mm_node *child;
175

176
177
	child = drm_mm_kmalloc(parent->mm, atomic);
	if (unlikely(child == NULL))
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
		return NULL;

	INIT_LIST_HEAD(&child->fl_entry);

	child->free = 0;
	child->size = size;
	child->start = parent->start;
	child->mm = parent->mm;

	list_add_tail(&child->ml_entry, &parent->ml_entry);
	INIT_LIST_HEAD(&child->fl_entry);

	parent->size -= size;
	parent->start += size;
	return child;
}


196
197
198
199
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
					     unsigned long size,
					     unsigned alignment,
					     int atomic)
200
201
{

Dave Airlie's avatar
Dave Airlie committed
202
	struct drm_mm_node *align_splitoff = NULL;
203
	unsigned tmp = 0;
204
205

	if (alignment)
206
		tmp = node->start % alignment;
207
208

	if (tmp) {
209
		align_splitoff =
210
		    drm_mm_split_at_start(node, alignment - tmp, atomic);
211
		if (unlikely(align_splitoff == NULL))
212
213
			return NULL;
	}
214

215
216
217
	if (node->size == size) {
		list_del_init(&node->fl_entry);
		node->free = 0;
218
	} else {
219
		node = drm_mm_split_at_start(node, size, atomic);
220
	}
221

222
223
	if (align_splitoff)
		drm_mm_put_block(align_splitoff);
224

225
	return node;
226
}
227
EXPORT_SYMBOL(drm_mm_get_block_generic);
228

229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
						unsigned long size,
						unsigned alignment,
						unsigned long start,
						unsigned long end,
						int atomic)
{
	struct drm_mm_node *align_splitoff = NULL;
	unsigned tmp = 0;
	unsigned wasted = 0;

	if (node->start < start)
		wasted += start - node->start;
	if (alignment)
		tmp = ((node->start + wasted) % alignment);

	if (tmp)
		wasted += alignment - tmp;
	if (wasted) {
		align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
		if (unlikely(align_splitoff == NULL))
			return NULL;
	}

	if (node->size == size) {
		list_del_init(&node->fl_entry);
		node->free = 0;
	} else {
		node = drm_mm_split_at_start(node, size, atomic);
	}

	if (align_splitoff)
		drm_mm_put_block(align_splitoff);

	return node;
}
EXPORT_SYMBOL(drm_mm_get_block_range_generic);

267
268
269
270
271
/*
 * Put a block. Merge with the previous and / or next block if they are free.
 * Otherwise add to the free stack.
 */

272
void drm_mm_put_block(struct drm_mm_node *cur)
273
274
{

Dave Airlie's avatar
Dave Airlie committed
275
	struct drm_mm *mm = cur->mm;
276
	struct list_head *cur_head = &cur->ml_entry;
277
	struct list_head *root_head = &mm->ml_entry;
Dave Airlie's avatar
Dave Airlie committed
278
279
	struct drm_mm_node *prev_node = NULL;
	struct drm_mm_node *next_node;
280

281
	int merged = 0;
282
283

	if (cur_head->prev != root_head) {
284
285
		prev_node =
		    list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
286
287
		if (prev_node->free) {
			prev_node->size += cur->size;
288
			merged = 1;
289
290
291
		}
	}
	if (cur_head->next != root_head) {
292
293
		next_node =
		    list_entry(cur_head->next, struct drm_mm_node, ml_entry);
294
295
296
297
298
		if (next_node->free) {
			if (merged) {
				prev_node->size += next_node->size;
				list_del(&next_node->ml_entry);
				list_del(&next_node->fl_entry);
299
				spin_lock(&mm->unused_lock);
300
301
302
303
304
305
				if (mm->num_unused < MM_UNUSED_TARGET) {
					list_add(&next_node->fl_entry,
						 &mm->unused_nodes);
					++mm->num_unused;
				} else
					kfree(next_node);
306
				spin_unlock(&mm->unused_lock);
307
308
309
			} else {
				next_node->size += cur->size;
				next_node->start = cur->start;
310
				merged = 1;
311
312
313
314
			}
		}
	}
	if (!merged) {
315
		cur->free = 1;
316
		list_add(&cur->fl_entry, &mm->fl_entry);
317
318
	} else {
		list_del(&cur->ml_entry);
319
		spin_lock(&mm->unused_lock);
320
321
322
323
324
		if (mm->num_unused < MM_UNUSED_TARGET) {
			list_add(&cur->fl_entry, &mm->unused_nodes);
			++mm->num_unused;
		} else
			kfree(cur);
325
		spin_unlock(&mm->unused_lock);
326
327
	}
}
328

329
EXPORT_SYMBOL(drm_mm_put_block);
330

331
332
333
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
				       unsigned long size,
				       unsigned alignment, int best_match)
334
335
{
	struct list_head *list;
336
	const struct list_head *free_stack = &mm->fl_entry;
Dave Airlie's avatar
Dave Airlie committed
337
338
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
339
	unsigned long best_size;
340
	unsigned wasted;
341
342
343
344
345

	best = NULL;
	best_size = ~0UL;

	list_for_each(list, free_stack) {
Dave Airlie's avatar
Dave Airlie committed
346
		entry = list_entry(list, struct drm_mm_node, fl_entry);
347
348
349
350
351
352
353
354
355
356
357
358
		wasted = 0;

		if (entry->size < size)
			continue;

		if (alignment) {
			register unsigned tmp = entry->start % alignment;
			if (tmp)
				wasted += alignment - tmp;
		}

		if (entry->size >= size + wasted) {
359
360
			if (!best_match)
				return entry;
361
			if (entry->size < best_size) {
362
363
364
365
366
367
368
369
				best = entry;
				best_size = entry->size;
			}
		}
	}

	return best;
}
370
EXPORT_SYMBOL(drm_mm_search_free);
371

372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
						unsigned long size,
						unsigned alignment,
						unsigned long start,
						unsigned long end,
						int best_match)
{
	struct list_head *list;
	const struct list_head *free_stack = &mm->fl_entry;
	struct drm_mm_node *entry;
	struct drm_mm_node *best;
	unsigned long best_size;
	unsigned wasted;

	best = NULL;
	best_size = ~0UL;

	list_for_each(list, free_stack) {
		entry = list_entry(list, struct drm_mm_node, fl_entry);
		wasted = 0;

		if (entry->size < size)
			continue;

		if (entry->start > end || (entry->start+entry->size) < start)
			continue;

		if (entry->start < start)
			wasted += start - entry->start;

		if (alignment) {
			register unsigned tmp = (entry->start + wasted) % alignment;
			if (tmp)
				wasted += alignment - tmp;
		}

		if (entry->size >= size + wasted) {
			if (!best_match)
				return entry;
411
			if (entry->size < best_size) {
412
413
414
415
416
417
418
419
420
421
				best = entry;
				best_size = entry->size;
			}
		}
	}

	return best;
}
EXPORT_SYMBOL(drm_mm_search_free_in_range);

Dave Airlie's avatar
Dave Airlie committed
422
int drm_mm_clean(struct drm_mm * mm)
423
{
424
	struct list_head *head = &mm->ml_entry;
425

426
427
	return (head->next->next == head);
}
428
EXPORT_SYMBOL(drm_mm_clean);
429

Dave Airlie's avatar
Dave Airlie committed
430
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
431
432
433
{
	INIT_LIST_HEAD(&mm->ml_entry);
	INIT_LIST_HEAD(&mm->fl_entry);
434
435
436
	INIT_LIST_HEAD(&mm->unused_nodes);
	mm->num_unused = 0;
	spin_lock_init(&mm->unused_lock);
437

438
	return drm_mm_create_tail_node(mm, start, size, 0);
439
}
440
EXPORT_SYMBOL(drm_mm_init);
441

Dave Airlie's avatar
Dave Airlie committed
442
void drm_mm_takedown(struct drm_mm * mm)
443
{
444
	struct list_head *bnode = mm->fl_entry.next;
Dave Airlie's avatar
Dave Airlie committed
445
	struct drm_mm_node *entry;
446
	struct drm_mm_node *next;
447

Dave Airlie's avatar
Dave Airlie committed
448
	entry = list_entry(bnode, struct drm_mm_node, fl_entry);
449

450
451
	if (entry->ml_entry.next != &mm->ml_entry ||
	    entry->fl_entry.next != &mm->fl_entry) {
452
453
454
455
456
457
		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
		return;
	}

	list_del(&entry->fl_entry);
	list_del(&entry->ml_entry);
458
459
460
461
462
463
464
465
466
	kfree(entry);

	spin_lock(&mm->unused_lock);
	list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
		list_del(&entry->fl_entry);
		kfree(entry);
		--mm->num_unused;
	}
	spin_unlock(&mm->unused_lock);
467

468
	BUG_ON(mm->num_unused != 0);
469
}
Dave Airlie's avatar
Dave Airlie committed
470
EXPORT_SYMBOL(drm_mm_takedown);
471

472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
{
	struct drm_mm_node *entry;
	int total_used = 0, total_free = 0, total = 0;

	list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
			prefix, entry->start, entry->start + entry->size,
			entry->size, entry->free ? "free" : "used");
		total += entry->size;
		if (entry->free)
			total_free += entry->size;
		else
			total_used += entry->size;
	}
	printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
		total_used, total_free);
}
EXPORT_SYMBOL(drm_mm_debug_table);

492
493
494
495
496
497
498
499
500
501
502
503
504
505
#if defined(CONFIG_DEBUG_FS)
int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
{
	struct drm_mm_node *entry;
	int total_used = 0, total_free = 0, total = 0;

	list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
		seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
		total += entry->size;
		if (entry->free)
			total_free += entry->size;
		else
			total_used += entry->size;
	}
506
	seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
507
508
509
510
	return 0;
}
EXPORT_SYMBOL(drm_mm_dump_table);
#endif