memblock.h 13.8 KB
Newer Older
Yinghai Lu's avatar
Yinghai Lu committed
1 2 3 4
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
#ifdef __KERNEL__

5
#ifdef CONFIG_HAVE_MEMBLOCK
Yinghai Lu's avatar
Yinghai Lu committed
6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Logical memory blocks.
 *
 * Copyright (C) 2001 Peter Bergner, IBM Corp.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/init.h>
#include <linux/mm.h>

Yinghai Lu's avatar
Yinghai Lu committed
20
#define INIT_MEMBLOCK_REGIONS	128
21
#define INIT_PHYSMEM_REGIONS	4
Yinghai Lu's avatar
Yinghai Lu committed
22

23
/* Definition of memblock flags. */
24 25 26
enum {
	MEMBLOCK_NONE		= 0x0,	/* No special request */
	MEMBLOCK_HOTPLUG	= 0x1,	/* hotpluggable region */
27
	MEMBLOCK_MIRROR		= 0x2,	/* mirrored region */
28
	MEMBLOCK_NOMAP		= 0x4,	/* don't add to kernel direct mapping */
29
};
30

31
struct memblock_region {
32 33
	phys_addr_t base;
	phys_addr_t size;
34
	unsigned long flags;
Tejun Heo's avatar
Tejun Heo committed
35 36 37
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
	int nid;
#endif
Yinghai Lu's avatar
Yinghai Lu committed
38 39
};

40
struct memblock_type {
41 42
	unsigned long cnt;	/* number of regions */
	unsigned long max;	/* size of the allocated array */
43
	phys_addr_t total_size;	/* size of all regions */
44
	struct memblock_region *regions;
Yinghai Lu's avatar
Yinghai Lu committed
45 46 47
};

struct memblock {
48
	bool bottom_up;  /* is bottom up direction? */
49
	phys_addr_t current_limit;
50 51
	struct memblock_type memory;
	struct memblock_type reserved;
52 53 54
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	struct memblock_type physmem;
#endif
Yinghai Lu's avatar
Yinghai Lu committed
55 56 57
};

extern struct memblock memblock;
58
extern int memblock_debug;
59 60 61 62
#ifdef CONFIG_MOVABLE_NODE
/* If movable_node boot option specified */
extern bool movable_node_enabled;
#endif /* CONFIG_MOVABLE_NODE */
63 64 65

#define memblock_dbg(fmt, ...) \
	if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
Yinghai Lu's avatar
Yinghai Lu committed
66

67 68
phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
					    phys_addr_t start, phys_addr_t end,
69
					    int nid, ulong flags);
70 71
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
				   phys_addr_t size, phys_addr_t align);
72
phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
73
phys_addr_t get_allocated_memblock_memory_regions_info(phys_addr_t *addr);
74
void memblock_allow_resize(void);
75
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
76 77 78 79
int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
80
void memblock_trim_memory(phys_addr_t align);
81 82
bool memblock_overlaps_region(struct memblock_type *type,
			      phys_addr_t base, phys_addr_t size);
83 84
int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
85
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
86
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
87
ulong choose_memblock_flags(void);
88 89 90 91 92 93

/* Low level functions */
int memblock_add_range(struct memblock_type *type,
		       phys_addr_t base, phys_addr_t size,
		       int nid, unsigned long flags);

94 95
void __next_mem_range(u64 *idx, int nid, ulong flags,
		      struct memblock_type *type_a,
96 97 98
		      struct memblock_type *type_b, phys_addr_t *out_start,
		      phys_addr_t *out_end, int *out_nid);

99 100
void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
			  struct memblock_type *type_a,
101 102 103
			  struct memblock_type *type_b, phys_addr_t *out_start,
			  phys_addr_t *out_end, int *out_nid);

104 105 106
void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
			       phys_addr_t *out_end);

107 108 109 110 111 112 113
/**
 * for_each_mem_range - iterate through memblock areas from type_a and not
 * included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
114
 * @flags: pick from blocks based on memory attributes
115 116 117 118
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
119
#define for_each_mem_range(i, type_a, type_b, nid, flags,		\
120
			   p_start, p_end, p_nid)			\
121
	for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b,	\
122 123
				     p_start, p_end, p_nid);		\
	     i != (u64)ULLONG_MAX;					\
124
	     __next_mem_range(&i, nid, flags, type_a, type_b,		\
125 126 127 128 129 130 131 132 133
			      p_start, p_end, p_nid))

/**
 * for_each_mem_range_rev - reverse iterate through memblock areas from
 * type_a and not included in type_b. Or just type_a if type_b is NULL.
 * @i: u64 used as loop variable
 * @type_a: ptr to memblock_type to iterate
 * @type_b: ptr to memblock_type which excludes from the iteration
 * @nid: node selector, %NUMA_NO_NODE for all nodes
134
 * @flags: pick from blocks based on memory attributes
135 136 137 138
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 */
139
#define for_each_mem_range_rev(i, type_a, type_b, nid, flags,		\
140 141
			       p_start, p_end, p_nid)			\
	for (i = (u64)ULLONG_MAX,					\
142
		     __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
143 144
					 p_start, p_end, p_nid);	\
	     i != (u64)ULLONG_MAX;					\
145
	     __next_mem_range_rev(&i, nid, flags, type_a, type_b,	\
146 147
				  p_start, p_end, p_nid))

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
/**
 * for_each_reserved_mem_region - iterate over all reserved memblock areas
 * @i: u64 used as loop variable
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 *
 * Walks over reserved areas of memblock. Available as soon as memblock
 * is initialized.
 */
#define for_each_reserved_mem_region(i, p_start, p_end)			\
	for (i = 0UL,							\
	     __next_reserved_mem_region(&i, p_start, p_end);		\
	     i != (u64)ULLONG_MAX;					\
	     __next_reserved_mem_region(&i, p_start, p_end))

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
#ifdef CONFIG_MOVABLE_NODE
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_HOTPLUG;
}

static inline bool movable_node_is_enabled(void)
{
	return movable_node_enabled;
}
#else
static inline bool memblock_is_hotpluggable(struct memblock_region *m)
{
	return false;
}
static inline bool movable_node_is_enabled(void)
{
	return false;
}
#endif
183

184 185 186 187 188
static inline bool memblock_is_mirror(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_MIRROR;
}

189 190 191 192 193
static inline bool memblock_is_nomap(struct memblock_region *m)
{
	return m->flags & MEMBLOCK_NOMAP;
}

Tejun Heo's avatar
Tejun Heo committed
194
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
195 196
int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
			    unsigned long  *end_pfn);
Tejun Heo's avatar
Tejun Heo committed
197 198 199 200 201 202 203 204 205 206 207
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
			  unsigned long *out_end_pfn, int *out_nid);

/**
 * for_each_mem_pfn_range - early memory pfn range iterator
 * @i: an integer used as loop variable
 * @nid: node selector, %MAX_NUMNODES for all nodes
 * @p_start: ptr to ulong for start pfn of the range, can be %NULL
 * @p_end: ptr to ulong for end pfn of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
 *
208
 * Walks over configured memory ranges.
Tejun Heo's avatar
Tejun Heo committed
209 210 211 212 213 214
 */
#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid)		\
	for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
	     i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

215 216 217
/**
 * for_each_free_mem_range - iterate through free memblock areas
 * @i: u64 used as loop variable
218
 * @nid: node selector, %NUMA_NO_NODE for all nodes
219 220 221
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
222
 * @flags: pick from blocks based on memory attributes
223 224 225 226
 *
 * Walks over free (memory && !reserved) areas of memblock.  Available as
 * soon as memblock is initialized.
 */
227
#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid)	\
228
	for_each_mem_range(i, &memblock.memory, &memblock.reserved,	\
229
			   nid, flags, p_start, p_end, p_nid)
230 231 232 233

/**
 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
 * @i: u64 used as loop variable
234
 * @nid: node selector, %NUMA_NO_NODE for all nodes
235 236 237
 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @p_nid: ptr to int for nid of the range, can be %NULL
238
 * @flags: pick from blocks based on memory attributes
239 240 241 242
 *
 * Walks over free (memory && !reserved) areas of memblock in reverse
 * order.  Available as soon as memblock is initialized.
 */
243 244
#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end,	\
					p_nid)				\
245
	for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved,	\
246
			       nid, flags, p_start, p_end, p_nid)
247

248 249 250 251 252 253 254 255 256 257 258 259
static inline void memblock_set_region_flags(struct memblock_region *r,
					     unsigned long flags)
{
	r->flags |= flags;
}

static inline void memblock_clear_region_flags(struct memblock_region *r,
					       unsigned long flags)
{
	r->flags &= ~flags;
}

Tejun Heo's avatar
Tejun Heo committed
260
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
261 262
int memblock_set_node(phys_addr_t base, phys_addr_t size,
		      struct memblock_type *type, int nid);
Tejun Heo's avatar
Tejun Heo committed
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283

static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
	r->nid = nid;
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
	return r->nid;
}
#else
static inline void memblock_set_region_node(struct memblock_region *r, int nid)
{
}

static inline int memblock_get_region_node(const struct memblock_region *r)
{
	return 0;
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

284 285
phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
286

287
phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
288

289 290 291 292
#ifdef CONFIG_MOVABLE_NODE
/*
 * Set the allocation direction to bottom-up or top-down.
 */
293
static inline void __init memblock_set_bottom_up(bool enable)
294 295 296 297 298 299 300 301 302 303 304 305 306 307
{
	memblock.bottom_up = enable;
}

/*
 * Check if the allocation direction is bottom-up or not.
 * if this is true, that said, memblock will allocate memory
 * in bottom-up direction.
 */
static inline bool memblock_bottom_up(void)
{
	return memblock.bottom_up;
}
#else
308
static inline void __init memblock_set_bottom_up(bool enable) {}
309 310 311
static inline bool memblock_bottom_up(void) { return false; }
#endif

312
/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
313
#define MEMBLOCK_ALLOC_ANYWHERE	(~(phys_addr_t)0)
314 315
#define MEMBLOCK_ALLOC_ACCESSIBLE	0

316
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
317 318
					phys_addr_t start, phys_addr_t end,
					ulong flags);
319 320 321 322 323
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
				phys_addr_t max_addr);
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
				  phys_addr_t max_addr);
phys_addr_t memblock_phys_mem_size(void);
Yinghai Lu's avatar
Yinghai Lu committed
324
phys_addr_t memblock_mem_size(unsigned long limit_pfn);
325 326 327
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
328
bool memblock_is_memory(phys_addr_t addr);
329
int memblock_is_map_memory(phys_addr_t addr);
330
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
331
bool memblock_is_reserved(phys_addr_t addr);
332
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
333

334 335 336 337 338 339 340
extern void __memblock_dump_all(void);

static inline void memblock_dump_all(void)
{
	if (memblock_debug)
		__memblock_dump_all();
}
Yinghai Lu's avatar
Yinghai Lu committed
341

342 343 344 345 346 347
/**
 * memblock_set_current_limit - Set the current allocation limit to allow
 *                         limiting allocations to what is currently
 *                         accessible during boot
 * @limit: New limit value (physical address)
 */
348
void memblock_set_current_limit(phys_addr_t limit);
349

350

351 352
phys_addr_t memblock_get_current_limit(void);

353 354 355 356 357 358 359 360 361
/*
 * pfn conversion functions
 *
 * While the memory MEMBLOCKs should always be page aligned, the reserved
 * MEMBLOCKs may not be. This accessor attempt to provide a very clear
 * idea of what they return for such non aligned MEMBLOCKs.
 */

/**
362
 * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
363 364
 * @reg: memblock_region structure
 */
365
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
366
{
367
	return PFN_UP(reg->base);
368 369 370
}

/**
371
 * memblock_region_memory_end_pfn - Return the end_pfn this region
372 373
 * @reg: memblock_region structure
 */
374
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
375
{
376
	return PFN_DOWN(reg->base + reg->size);
377 378 379
}

/**
380
 * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
381 382
 * @reg: memblock_region structure
 */
383
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
384
{
385
	return PFN_DOWN(reg->base);
386 387 388
}

/**
389
 * memblock_region_reserved_end_pfn - Return the end_pfn this region
390 391
 * @reg: memblock_region structure
 */
392
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
393
{
394
	return PFN_UP(reg->base + reg->size);
395 396 397 398 399 400 401 402
}

#define for_each_memblock(memblock_type, region)					\
	for (region = memblock.memblock_type.regions;				\
	     region < (memblock.memblock_type.regions + memblock.memblock_type.cnt);	\
	     region++)


403
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
404 405
#define __init_memblock __meminit
#define __initdata_memblock __meminitdata
406 407 408 409 410
#else
#define __init_memblock
#define __initdata_memblock
#endif

Vladimir Murzin's avatar
Vladimir Murzin committed
411
#ifdef CONFIG_MEMTEST
412
extern void early_memtest(phys_addr_t start, phys_addr_t end);
Vladimir Murzin's avatar
Vladimir Murzin committed
413
#else
414
static inline void early_memtest(phys_addr_t start, phys_addr_t end)
Vladimir Murzin's avatar
Vladimir Murzin committed
415 416 417 418
{
}
#endif

419 420 421
#else
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
{
Tejun Heo's avatar
Tejun Heo committed
422
	return 0;
423 424
}

425 426
#endif /* CONFIG_HAVE_MEMBLOCK */

Yinghai Lu's avatar
Yinghai Lu committed
427 428 429
#endif /* __KERNEL__ */

#endif /* _LINUX_MEMBLOCK_H */