ram_map.c 5.99 KB
Newer Older
1
2
3
4
5
6
7
/*
 * ram_map.c
 *
 * Code for mapping RAM memory objects in LCD's address
 * spaces.
 */

8
9
#include <lcd_config/pre_hook.h>

10
#include <linux/slab.h>
11
12
#include <liblcd/allocator.h>
#include <liblcd/mem.h>
13
14
#include <asm/lcd_domains/liblcd.h>
#include <lcd_domains/liblcd.h>
15
16
17
18
19

#include <lcd_config/post_hook.h>

struct lcd_page_allocator *ram_map_allocator;

20
21
22
23
24
25
/* LOW-LEVEL SYSTEM CALLS ---------------------------------------- */

int _lcd_mmap(cptr_t mo, unsigned int order, gpa_t base)
{
	int ret;
	/*
26
27
28
29
30
31
32
33
34
35
	 * BEWARE: This code is a bit fragile. You must do the actual
	 * map *before* inserting into the memory interval tree. This
	 * is because the mem itree code uses kmalloc (after we've
	 * booted). Scenario: the heap is calling this function to map
	 * fresh pages; mem itree is going to call kmalloc before
	 * this function returns; kmalloc may need to grow the slab
	 * cache, which leads into the heap again; but if we've alloc'd
	 * and mapped the pages, it should all be OK. Just a bit
	 * of scary and risky recursion.
	 *
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
	 * Do low level syscall to map memory object
	 */
	ret = lcd_syscall_mmap(mo, base);
	if (ret) {
		LIBLCD_ERR("low level mmap failed");
		goto fail1;
	}
	/*
	 * Insert into resource tree (unlike kliblcd, all of our
	 * memory objects are always contiguous in guest physical)
	 */
	ret = __liblcd_mem_itree_insert(base, (1UL << (PAGE_SHIFT + order)),
					mo);
	if (ret) {
		LIBLCD_ERR("error inserting into mem itree");
		goto fail2;
	}

	return 0;

fail2:
	lcd_syscall_munmap(mo);
fail1:
	return ret;
}

void _lcd_munmap(cptr_t mo, gpa_t base)
{
	int ret;
	struct lcd_resource_node *n;
	/*
	 * Look up resource node for memory object in itree
	 */
	ret = lcd_phys_to_resource_node(base, &n);
	if (ret) {
71
72
		LIBLCD_ERR("couldn't find memory object in tree; base gpa is 0x%lx",
			gpa_val(base));
73
74
75
76
77
78
79
80
81
82
83
84
		return;
	}
	/*
	 * Remove from tree
	 */
	__liblcd_mem_itree_delete(n);
	/*
	 * Unmap memory object
	 */
	lcd_syscall_munmap(mo);
}

85
86
87
/* RAM MAP INTERNALS ---------------------------------------- */

static int 
88
ram_alloc_map_metadata_memory(const struct lcd_page_allocator_cbs *cbs,
89
90
91
			unsigned int alloc_order,
			unsigned long metadata_sz,
			void **metadata_addr)
92
93
94
95
96
{
	int ret;
	/*
	 * Just do a kmalloc
	 */
97
98
99
100
	*metadata_addr = kmalloc(metadata_sz, GFP_KERNEL);
	if (!*metadata_addr) {
		LIBLCD_ERR("error getting metadata mem for ram map");
		ret = -ENOMEM;
101
102
		goto fail1;
	}
103
	
104
105
106
107
108
109
110
	return 0;

fail1:
	return ret;
}

static void
111
ram_free_unmap_metadata_memory(const struct lcd_page_allocator_cbs *cbs,
112
113
114
			void *metadata_addr,
			unsigned long metadata_sz,
			unsigned int alloc_order)
115
116
{
	/*
117
	 * We alloc'd metadata with kmalloc (above)
118
	 */
119
	kfree(metadata_addr);
120
121
}

122
static inline gva_t ram_page_block_to_addr(struct lcd_page_block *pb)
123
{
124
125
	return gva_add(LCD_RAM_MAP_GV_ADDR, 
		lcd_page_block_to_offset(ram_map_allocator, pb));
126
127
}

128
static inline struct lcd_page_block *ram_addr_to_page_block(gva_t addr)
129
130
{
	return lcd_offset_to_page_block(
131
132
		ram_map_allocator,
		gva_val(addr) - gva_val(LCD_RAM_MAP_GV_ADDR));
133
134
}

135
/* INTERFACE -------------------------------------------------- */
136

137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
static int do_map_into_phys(cptr_t pages, unsigned int order, gpa_t *base_out)
{
	int ret;
	struct lcd_page_block *pb;
	gpa_t addr;
	/*
	 * Adjust order so that it's >= LCD_RAM_MAP_MIN_ORDER
	 */
	if (order < LCD_RAM_MAP_MIN_ORDER)
		order = LCD_RAM_MAP_MIN_ORDER;
	/*
	 * Try to alloc a block of phys address space
	 */
	pb = lcd_page_allocator_alloc(ram_map_allocator, order);
	if (!pb) {
		LIBLCD_ERR("failed to get free ram map region");
		ret = -ENOMEM;
		goto fail1;
	}
	addr = lcd_gva2gpa(ram_page_block_to_addr(pb));
	/*
	 * Got some; map in guest physical
	 */
	ret = _lcd_mmap(pages, order, addr);
	if (ret) {
		LIBLCD_ERR("failed to map in guest physical");
		goto fail2;
	}

	*base_out = addr;

	return 0;
169

170
171
172
173
174
175
176
177
fail2:
	lcd_page_allocator_free(ram_map_allocator, pb, order);
fail1:
	return ret;
}

int lcd_map_phys(cptr_t pages, unsigned int order, gpa_t *base_out)
{
178
	return do_map_into_phys(pages, order, base_out);
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
}

int lcd_map_virt(cptr_t pages, unsigned int order, gva_t *gva_out)
{
	gpa_t gpa;
	int ret;

	ret = do_map_into_phys(pages, order, &gpa);
	if (ret)
		return ret;
	*gva_out = lcd_gpa2gva(gpa);

	return 0;
}

static void do_unmap_from_phys(gpa_t base, unsigned int order)
{
	int ret;
	struct lcd_resource_node *n;
	cptr_t pages;
199
200
201
202
203
	/*
	 * Adjust order so that it's >= LCD_RAM_MAP_MIN_ORDER
	 */
	if (order < LCD_RAM_MAP_MIN_ORDER)
		order = LCD_RAM_MAP_MIN_ORDER;
204
205
206
207
208
209
210
211
212
213
214
215
216
217
	/*
	 * Resolve address to resource node
	 */
	ret = lcd_phys_to_resource_node(base, &n);
	if (ret) { 
		LIBLCD_ERR("error looking up resource node");
		return;
	}
	pages = n->cptr;
	/*
	 * Free address block from RAM region
	 */
	lcd_page_allocator_free(
		ram_map_allocator,
218
219
		ram_addr_to_page_block(lcd_gpa2gva(base)),
		order);
220
221
222
	/*
	 * Unmap from guest physical
	 */
223
	_lcd_munmap(pages, base);
224
225
226
227
228
229
230
231
232
233
234
235
}

void lcd_unmap_phys(gpa_t base, unsigned int order)
{
	do_unmap_from_phys(base, order);
}

void lcd_unmap_virt(gva_t base, unsigned int order)
{
	gpa_t gp_base = lcd_gva2gpa(base);
	do_unmap_from_phys(gp_base, order);
}
236

237
void *lcd_ioremap_cache(unsigned long phys_addr, unsigned long size)
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
{
	/* 
	 * Assumes someone already mapped the memory; we just
	 * translate phys -> virt.
	 */
	return (void *)gva_val(isolated_lcd_gpa2gva(__gpa(phys_addr)));
}

void lcd_iounmap(void *addr)
{
	/*
	 * No-op. Someone (like the glue) will call a lower-level
	 * phys unmap.
	 */
	return;
}

255
256
/* INIT/EXIT ---------------------------------------- */

257
struct lcd_page_allocator_cbs ram_map_page_allocator_cbs = {
258
259
	.alloc_map_metadata_memory = ram_alloc_map_metadata_memory,
	.free_unmap_metadata_memory = ram_free_unmap_metadata_memory,
260
261
};

262
263
int __liblcd_ram_map_init(void)
{
264
265
266
267
268
269
270
271
272
273
274
275
276
277
	int ret;
	/*
	 * Create the RAM map allocator
	 */
	ret = lcd_page_allocator_create(LCD_RAM_MAP_NR_PAGES_ORDER,
					LCD_RAM_MAP_MIN_ORDER,
					LCD_RAM_MAP_MAX_ORDER,
					&ram_map_page_allocator_cbs,
					0, /* don't embed metadata */
					&ram_map_allocator);
	if (ret) {
		LIBLCD_ERR("failed to initialize RAM map allocator");
		goto fail1;
	}
278
279
280
281
282

	return 0;

fail1:
	return ret;
283
}