sram-alloc.c 19.9 KB
Newer Older
Bryan Wu's avatar
Bryan Wu committed
1
/*
2
 * SRAM allocator for Blackfin on-chip memory
Bryan Wu's avatar
Bryan Wu committed
3
 *
4
 * Copyright 2004-2009 Analog Devices Inc.
Bryan Wu's avatar
Bryan Wu committed
5
 *
6
 * Licensed under the GPL-2 or later.
Bryan Wu's avatar
Bryan Wu committed
7 8 9 10 11 12 13 14 15 16 17 18 19
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/rtc.h>
20
#include <linux/slab.h>
Bryan Wu's avatar
Bryan Wu committed
21
#include <asm/blackfin.h>
22
#include <asm/mem_map.h>
Bryan Wu's avatar
Bryan Wu committed
23 24 25
#include "blackfin_sram.h"

/* the data structure for L1 scratchpad and DATA SRAM */
26
struct sram_piece {
Bryan Wu's avatar
Bryan Wu committed
27 28
	void *paddr;
	int size;
29
	pid_t pid;
30
	struct sram_piece *next;
Bryan Wu's avatar
Bryan Wu committed
31 32
};

33
static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
34 35
static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
Bryan Wu's avatar
Bryan Wu committed
36 37

#if L1_DATA_A_LENGTH != 0
38 39
static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
Bryan Wu's avatar
Bryan Wu committed
40 41 42
#endif

#if L1_DATA_B_LENGTH != 0
43 44
static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
Bryan Wu's avatar
Bryan Wu committed
45 46
#endif

47 48 49 50
#if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
#endif

Bryan Wu's avatar
Bryan Wu committed
51
#if L1_CODE_LENGTH != 0
52
static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
53 54
static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
Bryan Wu's avatar
Bryan Wu committed
55 56
#endif

57
#if L2_LENGTH != 0
58
static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
59 60 61
static struct sram_piece free_l2_sram_head, used_l2_sram_head;
#endif

62 63
static struct kmem_cache *sram_piece_cache;

Bryan Wu's avatar
Bryan Wu committed
64
/* L1 Scratchpad SRAM initialization function */
65
static void __init l1sram_init(void)
Bryan Wu's avatar
Bryan Wu committed
66
{
67
	unsigned int cpu;
68 69 70 71 72 73 74 75
	unsigned long reserve;

#ifdef CONFIG_SMP
	reserve = 0;
#else
	reserve = sizeof(struct l1_scratch_task_info);
#endif

76 77 78 79 80 81 82 83
	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
		per_cpu(free_l1_ssram_head, cpu).next =
			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
		if (!per_cpu(free_l1_ssram_head, cpu).next) {
			printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
			return;
		}

84 85
		per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
		per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
86 87 88 89 90 91 92 93 94
		per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
		per_cpu(free_l1_ssram_head, cpu).next->next = NULL;

		per_cpu(used_l1_ssram_head, cpu).next = NULL;

		/* mutex initialize */
		spin_lock_init(&per_cpu(l1sram_lock, cpu));
		printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
			L1_SCRATCH_LENGTH >> 10);
95
	}
Bryan Wu's avatar
Bryan Wu committed
96 97
}

98
static void __init l1_data_sram_init(void)
Bryan Wu's avatar
Bryan Wu committed
99
{
100
#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
101
	unsigned int cpu;
102
#endif
Bryan Wu's avatar
Bryan Wu committed
103
#if L1_DATA_A_LENGTH != 0
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
		per_cpu(free_l1_data_A_sram_head, cpu).next =
			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
		if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
			printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
			return;
		}

		per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
			(void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
		per_cpu(free_l1_data_A_sram_head, cpu).next->size =
			L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
		per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
		per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;

		per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;

		printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
			L1_DATA_A_LENGTH >> 10,
			per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
124
	}
Bryan Wu's avatar
Bryan Wu committed
125 126
#endif
#if L1_DATA_B_LENGTH != 0
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
		per_cpu(free_l1_data_B_sram_head, cpu).next =
			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
		if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
			printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
			return;
		}

		per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
			(void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
		per_cpu(free_l1_data_B_sram_head, cpu).next->size =
			L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
		per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
		per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;

		per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;

		printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
			L1_DATA_B_LENGTH >> 10,
			per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
		/* mutex initialize */
148
	}
Bryan Wu's avatar
Bryan Wu committed
149 150
#endif

151 152 153 154
#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
	for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
		spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
#endif
Bryan Wu's avatar
Bryan Wu committed
155 156
}

157
static void __init l1_inst_sram_init(void)
Bryan Wu's avatar
Bryan Wu committed
158 159
{
#if L1_CODE_LENGTH != 0
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
	unsigned int cpu;
	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
		per_cpu(free_l1_inst_sram_head, cpu).next =
			kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
		if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
			printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
			return;
		}

		per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
			(void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
		per_cpu(free_l1_inst_sram_head, cpu).next->size =
			L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
		per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
		per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;

		per_cpu(used_l1_inst_sram_head, cpu).next = NULL;

		printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
			L1_CODE_LENGTH >> 10,
			per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);

		/* mutex initialize */
		spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
184
	}
Bryan Wu's avatar
Bryan Wu committed
185 186 187
#endif
}

188 189
static void __init l2_sram_init(void)
{
190
#if L2_LENGTH != 0
191 192 193
	free_l2_sram_head.next =
		kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
	if (!free_l2_sram_head.next) {
194
		printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
195 196 197
		return;
	}

198 199 200 201
	free_l2_sram_head.next->paddr =
		(void *)L2_START + (_ebss_l2 - _stext_l2);
	free_l2_sram_head.next->size =
		L2_LENGTH - (_ebss_l2 - _stext_l2);
202 203 204 205 206 207 208 209 210 211 212
	free_l2_sram_head.next->pid = 0;
	free_l2_sram_head.next->next = NULL;

	used_l2_sram_head.next = NULL;

	printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
		L2_LENGTH >> 10,
		free_l2_sram_head.next->size >> 10);

	/* mutex initialize */
	spin_lock_init(&l2_sram_lock);
213
#endif
214
}
215

216
static int __init bfin_sram_init(void)
217 218 219 220 221 222 223 224
{
	sram_piece_cache = kmem_cache_create("sram_piece_cache",
				sizeof(struct sram_piece),
				0, SLAB_PANIC, NULL);

	l1sram_init();
	l1_data_sram_init();
	l1_inst_sram_init();
225
	l2_sram_init();
226 227

	return 0;
228
}
229
pure_initcall(bfin_sram_init);
230

231 232
/* SRAM allocate function */
static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
233
		struct sram_piece *pused_head)
Bryan Wu's avatar
Bryan Wu committed
234
{
235
	struct sram_piece *pslot, *plast, *pavail;
Bryan Wu's avatar
Bryan Wu committed
236

237
	if (size <= 0 || !pfree_head || !pused_head)
Bryan Wu's avatar
Bryan Wu committed
238 239 240 241 242
		return NULL;

	/* Align the size */
	size = (size + 3) & ~3;

243 244 245 246 247 248 249
	pslot = pfree_head->next;
	plast = pfree_head;

	/* search an available piece slot */
	while (pslot != NULL && size > pslot->size) {
		plast = pslot;
		pslot = pslot->next;
Bryan Wu's avatar
Bryan Wu committed
250
	}
251 252

	if (!pslot)
Bryan Wu's avatar
Bryan Wu committed
253 254
		return NULL;

255 256 257 258 259 260 261 262 263 264 265 266 267
	if (pslot->size == size) {
		plast->next = pslot->next;
		pavail = pslot;
	} else {
		pavail = kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);

		if (!pavail)
			return NULL;

		pavail->paddr = pslot->paddr;
		pavail->size = size;
		pslot->paddr += size;
		pslot->size -= size;
Bryan Wu's avatar
Bryan Wu committed
268 269
	}

270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
	pavail->pid = current->pid;

	pslot = pused_head->next;
	plast = pused_head;

	/* insert new piece into used piece list !!! */
	while (pslot != NULL && pavail->paddr < pslot->paddr) {
		plast = pslot;
		pslot = pslot->next;
	}

	pavail->next = pslot;
	plast->next = pavail;

	return pavail->paddr;
Bryan Wu's avatar
Bryan Wu committed
285 286 287
}

/* Allocate the largest available block.  */
288
static void *_sram_alloc_max(struct sram_piece *pfree_head,
289
				struct sram_piece *pused_head,
Bryan Wu's avatar
Bryan Wu committed
290 291
				unsigned long *psize)
{
292 293 294 295 296 297
	struct sram_piece *pslot, *pmax;

	if (!pfree_head || !pused_head)
		return NULL;

	pmax = pslot = pfree_head->next;
Bryan Wu's avatar
Bryan Wu committed
298

299 300 301 302 303
	/* search an available piece slot */
	while (pslot != NULL) {
		if (pslot->size > pmax->size)
			pmax = pslot;
		pslot = pslot->next;
Bryan Wu's avatar
Bryan Wu committed
304
	}
305 306

	if (!pmax)
Bryan Wu's avatar
Bryan Wu committed
307 308
		return NULL;

309 310
	*psize = pmax->size;

311
	return _sram_alloc(*psize, pfree_head, pused_head);
Bryan Wu's avatar
Bryan Wu committed
312 313
}

314 315
/* SRAM free function */
static int _sram_free(const void *addr,
316 317
			struct sram_piece *pfree_head,
			struct sram_piece *pused_head)
Bryan Wu's avatar
Bryan Wu committed
318
{
319 320 321 322
	struct sram_piece *pslot, *plast, *pavail;

	if (!pfree_head || !pused_head)
		return -1;
Bryan Wu's avatar
Bryan Wu committed
323 324

	/* search the relevant memory slot */
325 326 327 328 329 330 331
	pslot = pused_head->next;
	plast = pused_head;

	/* search an available piece slot */
	while (pslot != NULL && pslot->paddr != addr) {
		plast = pslot;
		pslot = pslot->next;
Bryan Wu's avatar
Bryan Wu committed
332
	}
333 334

	if (!pslot)
Bryan Wu's avatar
Bryan Wu committed
335 336
		return -1;

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
	plast->next = pslot->next;
	pavail = pslot;
	pavail->pid = 0;

	/* insert free pieces back to the free list */
	pslot = pfree_head->next;
	plast = pfree_head;

	while (pslot != NULL && addr > pslot->paddr) {
		plast = pslot;
		pslot = pslot->next;
	}

	if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
		plast->size += pavail->size;
		kmem_cache_free(sram_piece_cache, pavail);
	} else {
354
		pavail->next = plast->next;
355 356
		plast->next = pavail;
		plast = pavail;
Bryan Wu's avatar
Bryan Wu committed
357 358
	}

359 360 361 362
	if (pslot && plast->paddr + plast->size == pslot->paddr) {
		plast->size += pslot->size;
		plast->next = pslot->next;
		kmem_cache_free(sram_piece_cache, pslot);
Bryan Wu's avatar
Bryan Wu committed
363 364 365 366 367 368 369
	}

	return 0;
}

int sram_free(const void *addr)
{
370

Bryan Wu's avatar
Bryan Wu committed
371
#if L1_CODE_LENGTH != 0
372 373
	if (addr >= (void *)get_l1_code_start()
		 && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
Bryan Wu's avatar
Bryan Wu committed
374
		return l1_inst_sram_free(addr);
375
	else
Bryan Wu's avatar
Bryan Wu committed
376 377
#endif
#if L1_DATA_A_LENGTH != 0
378 379
	if (addr >= (void *)get_l1_data_a_start()
		 && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
Bryan Wu's avatar
Bryan Wu committed
380
		return l1_data_A_sram_free(addr);
381
	else
Bryan Wu's avatar
Bryan Wu committed
382 383
#endif
#if L1_DATA_B_LENGTH != 0
384 385
	if (addr >= (void *)get_l1_data_b_start()
		 && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
Bryan Wu's avatar
Bryan Wu committed
386
		return l1_data_B_sram_free(addr);
387
	else
388
#endif
389
#if L2_LENGTH != 0
390
	if (addr >= (void *)L2_START
391 392
		 && addr < (void *)(L2_START + L2_LENGTH))
		return l2_sram_free(addr);
Bryan Wu's avatar
Bryan Wu committed
393
	else
394
#endif
Bryan Wu's avatar
Bryan Wu committed
395 396 397 398 399 400
		return -1;
}
EXPORT_SYMBOL(sram_free);

void *l1_data_A_sram_alloc(size_t size)
{
401
#if L1_DATA_A_LENGTH != 0
402
	unsigned long flags;
403
	void *addr;
404
	unsigned int cpu;
Bryan Wu's avatar
Bryan Wu committed
405

406
	cpu = smp_processor_id();
Bryan Wu's avatar
Bryan Wu committed
407
	/* add mutex operation */
408
	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
409

410 411
	addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
			&per_cpu(used_l1_data_A_sram_head, cpu));
Bryan Wu's avatar
Bryan Wu committed
412 413

	/* add mutex operation */
414
	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
415 416 417 418 419

	pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
		 (long unsigned int)addr, size);

	return addr;
420 421 422
#else
	return NULL;
#endif
Bryan Wu's avatar
Bryan Wu committed
423 424 425 426 427
}
EXPORT_SYMBOL(l1_data_A_sram_alloc);

int l1_data_A_sram_free(const void *addr)
{
428
#if L1_DATA_A_LENGTH != 0
429
	unsigned long flags;
Bryan Wu's avatar
Bryan Wu committed
430
	int ret;
431
	unsigned int cpu;
Bryan Wu's avatar
Bryan Wu committed
432

433
	cpu = smp_processor_id();
Bryan Wu's avatar
Bryan Wu committed
434
	/* add mutex operation */
435
	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
436

437 438
	ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
			&per_cpu(used_l1_data_A_sram_head, cpu));
Bryan Wu's avatar
Bryan Wu committed
439 440

	/* add mutex operation */
441
	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
442 443

	return ret;
444 445 446
#else
	return -1;
#endif
Bryan Wu's avatar
Bryan Wu committed
447 448 449 450 451 452
}
EXPORT_SYMBOL(l1_data_A_sram_free);

void *l1_data_B_sram_alloc(size_t size)
{
#if L1_DATA_B_LENGTH != 0
453
	unsigned long flags;
Bryan Wu's avatar
Bryan Wu committed
454
	void *addr;
455
	unsigned int cpu;
Bryan Wu's avatar
Bryan Wu committed
456

457
	cpu = smp_processor_id();
Bryan Wu's avatar
Bryan Wu committed
458
	/* add mutex operation */
459
	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
460

461 462
	addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
			&per_cpu(used_l1_data_B_sram_head, cpu));
Bryan Wu's avatar
Bryan Wu committed
463 464

	/* add mutex operation */
465
	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
466 467 468 469 470 471 472 473 474 475 476 477 478 479

	pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
		 (long unsigned int)addr, size);

	return addr;
#else
	return NULL;
#endif
}
EXPORT_SYMBOL(l1_data_B_sram_alloc);

int l1_data_B_sram_free(const void *addr)
{
#if L1_DATA_B_LENGTH != 0
480
	unsigned long flags;
Bryan Wu's avatar
Bryan Wu committed
481
	int ret;
482
	unsigned int cpu;
Bryan Wu's avatar
Bryan Wu committed
483

484
	cpu = smp_processor_id();
Bryan Wu's avatar
Bryan Wu committed
485
	/* add mutex operation */
486
	spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
487

488 489
	ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
			&per_cpu(used_l1_data_B_sram_head, cpu));
Bryan Wu's avatar
Bryan Wu committed
490 491

	/* add mutex operation */
492
	spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534

	return ret;
#else
	return -1;
#endif
}
EXPORT_SYMBOL(l1_data_B_sram_free);

void *l1_data_sram_alloc(size_t size)
{
	void *addr = l1_data_A_sram_alloc(size);

	if (!addr)
		addr = l1_data_B_sram_alloc(size);

	return addr;
}
EXPORT_SYMBOL(l1_data_sram_alloc);

void *l1_data_sram_zalloc(size_t size)
{
	void *addr = l1_data_sram_alloc(size);

	if (addr)
		memset(addr, 0x00, size);

	return addr;
}
EXPORT_SYMBOL(l1_data_sram_zalloc);

int l1_data_sram_free(const void *addr)
{
	int ret;
	ret = l1_data_A_sram_free(addr);
	if (ret == -1)
		ret = l1_data_B_sram_free(addr);
	return ret;
}
EXPORT_SYMBOL(l1_data_sram_free);

void *l1_inst_sram_alloc(size_t size)
{
535
#if L1_CODE_LENGTH != 0
536
	unsigned long flags;
Bryan Wu's avatar
Bryan Wu committed
537
	void *addr;
538
	unsigned int cpu;
Bryan Wu's avatar
Bryan Wu committed
539

540
	cpu = smp_processor_id();
Bryan Wu's avatar
Bryan Wu committed
541
	/* add mutex operation */
542
	spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
543

544 545
	addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
			&per_cpu(used_l1_inst_sram_head, cpu));
Bryan Wu's avatar
Bryan Wu committed
546 547

	/* add mutex operation */
548
	spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
549 550 551 552 553 554 555 556 557 558 559 560 561 562

	pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
		 (long unsigned int)addr, size);

	return addr;
#else
	return NULL;
#endif
}
EXPORT_SYMBOL(l1_inst_sram_alloc);

int l1_inst_sram_free(const void *addr)
{
#if L1_CODE_LENGTH != 0
563
	unsigned long flags;
Bryan Wu's avatar
Bryan Wu committed
564
	int ret;
565
	unsigned int cpu;
Bryan Wu's avatar
Bryan Wu committed
566

567
	cpu = smp_processor_id();
Bryan Wu's avatar
Bryan Wu committed
568
	/* add mutex operation */
569
	spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
570

571 572
	ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
			&per_cpu(used_l1_inst_sram_head, cpu));
Bryan Wu's avatar
Bryan Wu committed
573 574

	/* add mutex operation */
575
	spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
576 577 578 579 580 581 582 583 584 585 586

	return ret;
#else
	return -1;
#endif
}
EXPORT_SYMBOL(l1_inst_sram_free);

/* L1 Scratchpad memory allocate function */
void *l1sram_alloc(size_t size)
{
587
	unsigned long flags;
Bryan Wu's avatar
Bryan Wu committed
588
	void *addr;
589
	unsigned int cpu;
Bryan Wu's avatar
Bryan Wu committed
590

591
	cpu = smp_processor_id();
Bryan Wu's avatar
Bryan Wu committed
592
	/* add mutex operation */
593
	spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
594

595 596
	addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
			&per_cpu(used_l1_ssram_head, cpu));
Bryan Wu's avatar
Bryan Wu committed
597 598

	/* add mutex operation */
599
	spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
600 601 602 603 604 605 606

	return addr;
}

/* L1 Scratchpad memory allocate function */
void *l1sram_alloc_max(size_t *psize)
{
607
	unsigned long flags;
Bryan Wu's avatar
Bryan Wu committed
608
	void *addr;
609
	unsigned int cpu;
Bryan Wu's avatar
Bryan Wu committed
610

611
	cpu = smp_processor_id();
Bryan Wu's avatar
Bryan Wu committed
612
	/* add mutex operation */
613
	spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
614

615 616
	addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
			&per_cpu(used_l1_ssram_head, cpu), psize);
Bryan Wu's avatar
Bryan Wu committed
617 618

	/* add mutex operation */
619
	spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
620 621 622 623 624 625 626

	return addr;
}

/* L1 Scratchpad memory free function */
int l1sram_free(const void *addr)
{
627
	unsigned long flags;
Bryan Wu's avatar
Bryan Wu committed
628
	int ret;
629
	unsigned int cpu;
Bryan Wu's avatar
Bryan Wu committed
630

631
	cpu = smp_processor_id();
Bryan Wu's avatar
Bryan Wu committed
632
	/* add mutex operation */
633
	spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
634

635 636
	ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
			&per_cpu(used_l1_ssram_head, cpu));
Bryan Wu's avatar
Bryan Wu committed
637 638

	/* add mutex operation */
639
	spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
Bryan Wu's avatar
Bryan Wu committed
640 641 642 643

	return ret;
}

644 645
void *l2_sram_alloc(size_t size)
{
646
#if L2_LENGTH != 0
647
	unsigned long flags;
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
	void *addr;

	/* add mutex operation */
	spin_lock_irqsave(&l2_sram_lock, flags);

	addr = _sram_alloc(size, &free_l2_sram_head,
			&used_l2_sram_head);

	/* add mutex operation */
	spin_unlock_irqrestore(&l2_sram_lock, flags);

	pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
		 (long unsigned int)addr, size);

	return addr;
#else
	return NULL;
#endif
}
EXPORT_SYMBOL(l2_sram_alloc);

void *l2_sram_zalloc(size_t size)
{
	void *addr = l2_sram_alloc(size);

	if (addr)
		memset(addr, 0x00, size);

	return addr;
}
EXPORT_SYMBOL(l2_sram_zalloc);

int l2_sram_free(const void *addr)
{
682
#if L2_LENGTH != 0
683
	unsigned long flags;
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701
	int ret;

	/* add mutex operation */
	spin_lock_irqsave(&l2_sram_lock, flags);

	ret = _sram_free(addr, &free_l2_sram_head,
			&used_l2_sram_head);

	/* add mutex operation */
	spin_unlock_irqrestore(&l2_sram_lock, flags);

	return ret;
#else
	return -1;
#endif
}
EXPORT_SYMBOL(l2_sram_free);

Bryan Wu's avatar
Bryan Wu committed
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
int sram_free_with_lsl(const void *addr)
{
	struct sram_list_struct *lsl, **tmp;
	struct mm_struct *mm = current->mm;

	for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
		if ((*tmp)->addr == addr)
			goto found;
	return -1;
found:
	lsl = *tmp;
	sram_free(addr);
	*tmp = lsl->next;
	kfree(lsl);

	return 0;
}
EXPORT_SYMBOL(sram_free_with_lsl);

721 722 723 724
/* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
 * tracked.  These are designed for userspace so that when a process exits,
 * we can safely reap their resources.
 */
Bryan Wu's avatar
Bryan Wu committed
725 726 727 728 729 730
void *sram_alloc_with_lsl(size_t size, unsigned long flags)
{
	void *addr = NULL;
	struct sram_list_struct *lsl = NULL;
	struct mm_struct *mm = current->mm;

731
	lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
Bryan Wu's avatar
Bryan Wu committed
732 733 734 735 736 737 738 739 740 741 742 743
	if (!lsl)
		return NULL;

	if (flags & L1_INST_SRAM)
		addr = l1_inst_sram_alloc(size);

	if (addr == NULL && (flags & L1_DATA_A_SRAM))
		addr = l1_data_A_sram_alloc(size);

	if (addr == NULL && (flags & L1_DATA_B_SRAM))
		addr = l1_data_B_sram_alloc(size);

744 745 746
	if (addr == NULL && (flags & L2_SRAM))
		addr = l2_sram_alloc(size);

Bryan Wu's avatar
Bryan Wu committed
747 748 749 750 751 752 753 754 755 756 757
	if (addr == NULL) {
		kfree(lsl);
		return NULL;
	}
	lsl->addr = addr;
	lsl->length = size;
	lsl->next = mm->context.sram_list;
	mm->context.sram_list = lsl;
	return addr;
}
EXPORT_SYMBOL(sram_alloc_with_lsl);
758 759 760 761 762

#ifdef CONFIG_PROC_FS
/* Once we get a real allocator, we'll throw all of this away.
 * Until then, we need some sort of visibility into the L1 alloc.
 */
763 764 765
/* Need to keep line of output the same.  Currently, that is 44 bytes
 * (including newline).
 */
766
static int _sram_proc_read(char *buf, int *len, int count, const char *desc,
767 768
		struct sram_piece *pfree_head,
		struct sram_piece *pused_head)
769
{
770 771 772 773
	struct sram_piece *pslot;

	if (!pfree_head || !pused_head)
		return -1;
774

775
	*len += sprintf(&buf[*len], "--- SRAM %-14s Size   PID State     \n", desc);
776 777 778 779 780

	/* search the relevant memory slot */
	pslot = pused_head->next;

	while (pslot != NULL) {
781
		*len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
782 783 784 785 786 787 788 789 790
			pslot->paddr, pslot->paddr + pslot->size,
			pslot->size, pslot->pid, "ALLOCATED");

		pslot = pslot->next;
	}

	pslot = pfree_head->next;

	while (pslot != NULL) {
791
		*len += sprintf(&buf[*len], "%p-%p %10i %5i %-10s\n",
792 793 794 795
			pslot->paddr, pslot->paddr + pslot->size,
			pslot->size, pslot->pid, "FREE");

		pslot = pslot->next;
796
	}
797 798

	return 0;
799
}
800
static int sram_proc_read(char *buf, char **start, off_t offset, int count,
801 802 803
		int *eof, void *data)
{
	int len = 0;
804
	unsigned int cpu;
805

806 807 808 809
	for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
		if (_sram_proc_read(buf, &len, count, "Scratchpad",
			&per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
			goto not_done;
810
#if L1_DATA_A_LENGTH != 0
811 812 813 814
		if (_sram_proc_read(buf, &len, count, "L1 Data A",
			&per_cpu(free_l1_data_A_sram_head, cpu),
			&per_cpu(used_l1_data_A_sram_head, cpu)))
			goto not_done;
815 816
#endif
#if L1_DATA_B_LENGTH != 0
817 818 819 820
		if (_sram_proc_read(buf, &len, count, "L1 Data B",
			&per_cpu(free_l1_data_B_sram_head, cpu),
			&per_cpu(used_l1_data_B_sram_head, cpu)))
			goto not_done;
821 822
#endif
#if L1_CODE_LENGTH != 0
823 824 825 826
		if (_sram_proc_read(buf, &len, count, "L1 Instruction",
			&per_cpu(free_l1_inst_sram_head, cpu),
			&per_cpu(used_l1_inst_sram_head, cpu)))
			goto not_done;
827
#endif
828
	}
829
#if L2_LENGTH != 0
830 831
	if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head,
		&used_l2_sram_head))
832 833
		goto not_done;
#endif
834 835
	*eof = 1;
 not_done:
836 837 838
	return len;
}

839
static int __init sram_proc_init(void)
840 841 842 843 844 845 846
{
	struct proc_dir_entry *ptr;
	ptr = create_proc_entry("sram", S_IFREG | S_IRUGO, NULL);
	if (!ptr) {
		printk(KERN_WARNING "unable to create /proc/sram\n");
		return -1;
	}
847
	ptr->read_proc = sram_proc_read;
848 849
	return 0;
}
850
late_initcall(sram_proc_init);
851
#endif