Commit fc402b18 authored by cl349's avatar cl349

merge?

parent d5836be2
......@@ -14,6 +14,7 @@ LD = $(CROSS_COMPILE)ld
CC = $(CROSS_COMPILE)gcc
CPP = $(CROSS_COMPILE)gcc -E
AR = $(CROSS_COMPILE)ar
RANLIB = $(CROSS_COMPILE)ranlib
NM = $(CROSS_COMPILE)nm
STRIP = $(CROSS_COMPILE)strip
OBJCOPY = $(CROSS_COMPILE)objcopy
......@@ -43,3 +44,7 @@ KERNEL_REPO = http://www.kernel.org
# ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY
# ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY
ACM_USE_SECURITY_POLICY ?= ACM_NULL_POLICY
# Optional components
XENSTAT_XENTOP ?= y
......@@ -405,54 +405,6 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
balloon_unlock(flags);
}
unsigned long allocate_empty_lowmem_region(unsigned long pages)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long *pfn_array;
unsigned long vstart;
unsigned long i;
unsigned int order = get_order(pages*PAGE_SIZE);
vstart = __get_free_pages(GFP_KERNEL, order);
if (vstart == 0)
return 0UL;
scrub_pages(vstart, 1 << order);
pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
BUG_ON(pfn_array == NULL);
for (i = 0; i < (1<<order); i++) {
pgd = pgd_offset_k( (vstart + (i*PAGE_SIZE)));
pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
pfn_array[i] = pte_mfn(*pte);
#ifdef CONFIG_X86_64
xen_l1_entry_update(pte, __pte(0));
#else
BUG_ON(HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE),
__pte_ma(0), 0));
#endif
phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
INVALID_P2M_ENTRY;
}
flush_tlb_all();
balloon_put_pages(pfn_array, 1 << order);
vfree(pfn_array);
return vstart;
}
EXPORT_SYMBOL(allocate_empty_lowmem_region);
/*
* Local variables:
* c-file-style: "linux"
......
......@@ -368,35 +368,37 @@ int direct_remap_area_pages(struct mm_struct *mm,
EXPORT_SYMBOL(direct_remap_area_pages);
static int lookup_pte_fn(
pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
{
unsigned long *ptep = (unsigned long *)data;
if (ptep)
*ptep = (pfn_to_mfn(page_to_pfn(pte_page)) <<
PAGE_SHIFT) |
((unsigned long)pte & ~PAGE_MASK);
return 0;
}
int create_lookup_pte_addr(struct mm_struct *mm,
unsigned long address,
unsigned long *ptep)
{
int f(pte_t *pte, struct page *pte_page, unsigned long addr,
void *data) {
unsigned long *ptep = (unsigned long *)data;
if (ptep)
*ptep = (pfn_to_mfn(page_to_pfn(pte_page)) <<
PAGE_SHIFT) |
((unsigned long)pte & ~PAGE_MASK);
return 0;
}
return generic_page_range(mm, address, PAGE_SIZE, f, ptep);
return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep);
}
EXPORT_SYMBOL(create_lookup_pte_addr);
static int noop_fn(
pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
{
return 0;
}
int touch_pte_range(struct mm_struct *mm,
unsigned long address,
unsigned long size)
{
int f(pte_t *pte, struct page *pte_page, unsigned long addr,
void *data) {
return 0;
}
return generic_page_range(mm, address, size, f, NULL);
return generic_page_range(mm, address, size, noop_fn, NULL);
}
EXPORT_SYMBOL(touch_pte_range);
......
......@@ -465,33 +465,35 @@ int direct_remap_area_pages(struct mm_struct *mm,
EXPORT_SYMBOL(direct_remap_area_pages);
static int lookup_pte_fn(
pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
{
unsigned long *ptep = (unsigned long *)data;
if (ptep) *ptep = (pfn_to_mfn(page_to_pfn(pte_page)) << PAGE_SHIFT)
| ((unsigned long)pte & ~PAGE_MASK);
return 0;
}
int create_lookup_pte_addr(struct mm_struct *mm,
unsigned long address,
unsigned long *ptep)
{
int f(pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
{
unsigned long *ptep = (unsigned long *)data;
if (ptep) *ptep = (pfn_to_mfn(page_to_pfn(pte_page)) << PAGE_SHIFT)
| ((unsigned long)pte & ~PAGE_MASK);
return 0;
}
return generic_page_range(mm, address, PAGE_SIZE, f, ptep);
return generic_page_range(mm, address, PAGE_SIZE, lookup_pte_fn, ptep);
}
EXPORT_SYMBOL(create_lookup_pte_addr);
static int noop_fn(
pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
{
return 0;
}
int touch_pte_range(struct mm_struct *mm,
unsigned long address,
unsigned long size)
{
int f(pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
{
return 0;
}
return generic_page_range(mm, address, size, f, NULL);
}
return generic_page_range(mm, address, size, noop_fn, NULL);
}
EXPORT_SYMBOL(touch_pte_range);
......@@ -83,12 +83,15 @@ static struct timer_list balloon_timer;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
/* Use the private and mapping fields of struct page as a list. */
#define PAGE_TO_LIST(p) ( (struct list_head *)&p->private )
#define LIST_TO_PAGE(l) ( list_entry( ((unsigned long *)l), \
struct page, private ) )
#define UNLIST_PAGE(p) do { list_del(PAGE_TO_LIST(p)); \
p->mapping = NULL; \
p->private = 0; } while(0)
#define PAGE_TO_LIST(p) ((struct list_head *)&p->private)
#define LIST_TO_PAGE(l) \
(list_entry(((unsigned long *)l), struct page, private))
#define UNLIST_PAGE(p) \
do { \
list_del(PAGE_TO_LIST(p)); \
p->mapping = NULL; \
p->private = 0; \
} while(0)
#else
/* There's a dedicated list field in struct page we can use. */
#define PAGE_TO_LIST(p) ( &p->list )
......@@ -104,56 +107,53 @@ static struct timer_list balloon_timer;
#endif
#define IPRINTK(fmt, args...) \
printk(KERN_INFO "xen_mem: " fmt, ##args)
printk(KERN_INFO "xen_mem: " fmt, ##args)
#define WPRINTK(fmt, args...) \
printk(KERN_WARNING "xen_mem: " fmt, ##args)
printk(KERN_WARNING "xen_mem: " fmt, ##args)
/* balloon_append: add the given page to the balloon. */
static void balloon_append(struct page *page)
{
/* Low memory is re-populated first, so highmem pages go at list tail. */
if ( PageHighMem(page) )
{
list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
balloon_high++;
}
else
{
list_add(PAGE_TO_LIST(page), &ballooned_pages);
balloon_low++;
}
/* Lowmem is re-populated first, so highmem pages go at list tail. */
if (PageHighMem(page)) {
list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
balloon_high++;
} else {
list_add(PAGE_TO_LIST(page), &ballooned_pages);
balloon_low++;
}
}
/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
static struct page *balloon_retrieve(void)
{
struct page *page;
struct page *page;
if ( list_empty(&ballooned_pages) )
return NULL;
if (list_empty(&ballooned_pages))
return NULL;
page = LIST_TO_PAGE(ballooned_pages.next);
UNLIST_PAGE(page);
page = LIST_TO_PAGE(ballooned_pages.next);
UNLIST_PAGE(page);
if ( PageHighMem(page) )
balloon_high--;
else
balloon_low--;
if (PageHighMem(page))
balloon_high--;
else
balloon_low--;
return page;
return page;
}
static void balloon_alarm(unsigned long unused)
{
schedule_work(&balloon_worker);
schedule_work(&balloon_worker);
}
static unsigned long current_target(void)
{
unsigned long target = min(target_pages, hard_limit);
if ( target > (current_pages + balloon_low + balloon_high) )
target = current_pages + balloon_low + balloon_high;
return target;
unsigned long target = min(target_pages, hard_limit);
if (target > (current_pages + balloon_low + balloon_high))
target = current_pages + balloon_low + balloon_high;
return target;
}
/*
......@@ -164,161 +164,147 @@ static unsigned long current_target(void)
*/
static void balloon_process(void *unused)
{
unsigned long *mfn_list, pfn, i, flags;
struct page *page;
long credit, debt, rc;
void *v;
unsigned long *mfn_list, pfn, i, flags;
struct page *page;
long credit, debt, rc;
void *v;
down(&balloon_mutex);
down(&balloon_mutex);
retry:
mfn_list = NULL;
if ( (credit = current_target() - current_pages) > 0 )
{
mfn_list = (unsigned long *)vmalloc(credit * sizeof(*mfn_list));
if ( mfn_list == NULL )
goto out;
balloon_lock(flags);
rc = HYPERVISOR_dom_mem_op(
MEMOP_increase_reservation, mfn_list, credit, 0);
balloon_unlock(flags);
if ( rc < credit )
{
/* We hit the Xen hard limit: reprobe. */
if ( HYPERVISOR_dom_mem_op(
MEMOP_decrease_reservation, mfn_list, rc, 0) != rc )
BUG();
hard_limit = current_pages + rc - driver_pages;
vfree(mfn_list);
goto retry;
}
for ( i = 0; i < credit; i++ )
{
if ( (page = balloon_retrieve()) == NULL )
BUG();
pfn = page - mem_map;
if ( phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY )
BUG();
/* Update P->M and M->P tables. */
phys_to_machine_mapping[pfn] = mfn_list[i];
xen_machphys_update(mfn_list[i], pfn);
mfn_list = NULL;
if ((credit = current_target() - current_pages) > 0) {
mfn_list = vmalloc(credit * sizeof(*mfn_list));
if (mfn_list == NULL)
goto out;
balloon_lock(flags);
rc = HYPERVISOR_dom_mem_op(
MEMOP_increase_reservation, mfn_list, credit, 0);
balloon_unlock(flags);
if (rc < credit) {
/* We hit the Xen hard limit: reprobe. */
BUG_ON(HYPERVISOR_dom_mem_op(
MEMOP_decrease_reservation,
mfn_list, rc, 0) != rc);
hard_limit = current_pages + rc - driver_pages;
vfree(mfn_list);
goto retry;
}
for (i = 0; i < credit; i++) {
page = balloon_retrieve();
BUG_ON(page == NULL);
pfn = page - mem_map;
if (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY)
BUG();
/* Update P->M and M->P tables. */
phys_to_machine_mapping[pfn] = mfn_list[i];
xen_machphys_update(mfn_list[i], pfn);
/* Link back into the page tables if it's not a highmem page. */
if ( pfn < max_low_pfn )
{
BUG_ON(HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
pfn_pte_ma(mfn_list[i], PAGE_KERNEL), 0));
}
/* Finally, relinquish the memory back to the system allocator. */
ClearPageReserved(page);
set_page_count(page, 1);
__free_page(page);
}
current_pages += credit;
}
else if ( credit < 0 )
{
debt = -credit;
mfn_list = (unsigned long *)vmalloc(debt * sizeof(*mfn_list));
if ( mfn_list == NULL )
goto out;
for ( i = 0; i < debt; i++ )
{
if ( (page = alloc_page(GFP_HIGHUSER)) == NULL )
{
debt = i;
break;
}
pfn = page - mem_map;
mfn_list[i] = phys_to_machine_mapping[pfn];
if ( !PageHighMem(page) )
{
v = phys_to_virt(pfn << PAGE_SHIFT);
scrub_pages(v, 1);
BUG_ON(HYPERVISOR_update_va_mapping(
(unsigned long)v, __pte_ma(0), 0));
}
/* Link back into the page tables if not highmem. */
if (pfn < max_low_pfn)
BUG_ON(HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
pfn_pte_ma(mfn_list[i], PAGE_KERNEL),
0));
/* Relinquish the page back to the allocator. */
ClearPageReserved(page);
set_page_count(page, 1);
__free_page(page);
}
current_pages += credit;
} else if (credit < 0) {
debt = -credit;
mfn_list = vmalloc(debt * sizeof(*mfn_list));
if (mfn_list == NULL)
goto out;
for (i = 0; i < debt; i++) {
if ((page = alloc_page(GFP_HIGHUSER)) == NULL) {
debt = i;
break;
}
pfn = page - mem_map;
mfn_list[i] = phys_to_machine_mapping[pfn];
if (!PageHighMem(page)) {
v = phys_to_virt(pfn << PAGE_SHIFT);
scrub_pages(v, 1);
BUG_ON(HYPERVISOR_update_va_mapping(
(unsigned long)v, __pte_ma(0), 0));
}
#ifdef CONFIG_XEN_SCRUB_PAGES
else
{
v = kmap(page);
scrub_pages(v, 1);
kunmap(page);
}
else {
v = kmap(page);
scrub_pages(v, 1);
kunmap(page);
}
#endif
}
}
/* Ensure that ballooned highmem pages don't have cached mappings. */
kmap_flush_unused();
flush_tlb_all();
/* Ensure that ballooned highmem pages don't have kmaps. */
kmap_flush_unused();
flush_tlb_all();
/* No more mappings: invalidate pages in P2M and add to balloon. */
for ( i = 0; i < debt; i++ )
{
pfn = mfn_to_pfn(mfn_list[i]);
phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
balloon_append(pfn_to_page(pfn));
}
/* No more mappings: invalidate P2M and add to balloon. */
for (i = 0; i < debt; i++) {
pfn = mfn_to_pfn(mfn_list[i]);
phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
balloon_append(pfn_to_page(pfn));
}
if ( HYPERVISOR_dom_mem_op(
MEMOP_decrease_reservation, mfn_list, debt, 0) != debt )
BUG();
BUG_ON(HYPERVISOR_dom_mem_op(
MEMOP_decrease_reservation,mfn_list, debt, 0) != debt);
current_pages -= debt;
}
current_pages -= debt;
}
out:
if ( mfn_list != NULL )
vfree(mfn_list);
if (mfn_list != NULL)
vfree(mfn_list);
/* Schedule more work if there is some still to be done. */
if ( current_target() != current_pages )
mod_timer(&balloon_timer, jiffies + HZ);
/* Schedule more work if there is some still to be done. */
if (current_target() != current_pages)
mod_timer(&balloon_timer, jiffies + HZ);
up(&balloon_mutex);
up(&balloon_mutex);
}
/* Resets the Xen limit, sets new target, and kicks off processing. */
static void set_new_target(unsigned long target)
{
/* No need for lock. Not read-modify-write updates. */
hard_limit = ~0UL;
target_pages = target;
schedule_work(&balloon_worker);
/* No need for lock. Not read-modify-write updates. */
hard_limit = ~0UL;
target_pages = target;
schedule_work(&balloon_worker);
}
static struct xenbus_watch target_watch =
{
.node = "memory/target"
.node = "memory/target"
};
/* React to a change in the target key */
static void watch_target(struct xenbus_watch *watch, const char *node)
{
unsigned long new_target;
int err;
err = xenbus_scanf("memory", "target", "%lu", &new_target);
if(err != 1)
{
printk(KERN_ERR "Unable to read memory/target\n");
return;
}
unsigned long new_target;
int err;
err = xenbus_scanf("memory", "target", "%lu", &new_target);
if (err != 1) {
printk(KERN_ERR "Unable to read memory/target\n");
return;
}
set_new_target(new_target >> PAGE_SHIFT);
set_new_target(new_target >> PAGE_SHIFT);
}
......@@ -329,141 +315,185 @@ int balloon_init_watcher(struct notifier_block *notifier,
unsigned long event,
void *data)
{
int err;
BUG_ON(down_trylock(&xenbus_lock) == 0);
int err;
err = register_xenbus_watch(&target_watch);
BUG_ON(down_trylock(&xenbus_lock) == 0);
if (err) {
printk(KERN_ERR "Failed to set balloon watcher\n");
}
err = register_xenbus_watch(&target_watch);
if (err)
printk(KERN_ERR "Failed to set balloon watcher\n");
return NOTIFY_DONE;
return NOTIFY_DONE;
}
static int balloon_write(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
char memstring[64], *endchar;
unsigned long long target_bytes;
char memstring[64], *endchar;
unsigned long long target_bytes;
if ( !capable(CAP_SYS_ADMIN) )
return -EPERM;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if ( count <= 1 )
return -EBADMSG; /* runt */
if ( count > sizeof(memstring) )
return -EFBIG; /* too long */
if (count <= 1)
return -EBADMSG; /* runt */
if (count > sizeof(memstring))
return -EFBIG; /* too long */
if ( copy_from_user(memstring, buffer, count) )
return -EFAULT;
memstring[sizeof(memstring)-1] = '\0';
if (copy_from_user(memstring, buffer, count))
return -EFAULT;