Commit fb19420b authored by Vikram Narayanan's avatar Vikram Narayanan
Browse files

merge/v4.8: Remove unneded code

Some of them creeped in silently during merge, rest is from pmfs. Since pmfs
won't work with v4.8 anymore, remove it.
http://lists.infradead.org/pipermail/linux-pmfs/2016-June/000148.html



Signed-off-by: Vikram Narayanan's avatarVikram Narayanan <vikram186@gmail.com>
parent 1ec68386
......@@ -386,4 +386,3 @@
377 i386 copy_file_range sys_copy_file_range
378 i386 preadv2 sys_preadv2 compat_sys_preadv2
379 i386 pwritev2 sys_pwritev2 compat_sys_pwritev2
380 i386 init_lcd sys_init_lcd
......@@ -491,8 +491,6 @@ ENTRY(phys_base)
#include "../../x86/xen/xen-head.S"
#include "../../x86/lcdguest/lcd-head.S"
__PAGE_ALIGNED_BSS
NEXT_PAGE(empty_zero_page)
.skip PAGE_SIZE
......
......@@ -215,152 +215,3 @@ bottomup:
*/
return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
}
static unsigned long arch_get_unmapped_area_bottomup_sz(struct file *file,
unsigned long addr, unsigned long len, unsigned long align_size,
unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr;
if (len > mm->cached_hole_size) {
start_addr = mm->free_area_cache;
} else {
start_addr = TASK_UNMAPPED_BASE;
mm->cached_hole_size = 0;
}
full_search:
addr = ALIGN(start_addr, align_size);
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr) {
/*
* Start a new search - just in case we missed
* some holes.
*/
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = TASK_UNMAPPED_BASE;
mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
}
if (!vma || addr + len <= vma->vm_start) {
mm->free_area_cache = addr + len;
return addr;
}
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;
addr = ALIGN(vma->vm_end, align_size);
}
}
static unsigned long arch_get_unmapped_area_topdown_sz(struct file *file,
unsigned long addr0, unsigned long len, unsigned long align_size,
unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev_vma;
unsigned long base = mm->mmap_base, addr = addr0;
unsigned long largest_hole = mm->cached_hole_size;
unsigned long align_mask = ~(align_size - 1);
int first_time = 1;
/* don't allow allocations above current base */
if (mm->free_area_cache > base)
mm->free_area_cache = base;
if (len <= largest_hole) {
largest_hole = 0;
mm->free_area_cache = base;
}
try_again:
/* make sure it can fit in the remaining address space */
if (mm->free_area_cache < len)
goto fail;
/* either no address requested or can't fit in requested address hole */
addr = (mm->free_area_cache - len) & align_mask;
do {
/*
* Lookup failure means no vma is above this address,
* i.e. return with success:
*/
vma = find_vma(mm, addr);
if (!vma)
return addr;
/*
* new region fits between prev_vma->vm_end and
* vma->vm_start, use it:
*/
prev_vma = vma->vm_prev;
if (addr + len <= vma->vm_start &&
(!prev_vma || (addr >= prev_vma->vm_end))) {
/* remember the address as a hint for next time */
mm->cached_hole_size = largest_hole;
return (mm->free_area_cache = addr);
} else {
/* pull free_area_cache down to the first hole */
if (mm->free_area_cache == vma->vm_end) {
mm->free_area_cache = vma->vm_start;
mm->cached_hole_size = largest_hole;
}
}
/* remember the largest hole we saw so far */
if (addr + largest_hole < vma->vm_start)
largest_hole = vma->vm_start - addr;
/* try just below the current vma->vm_start */
addr = (vma->vm_start - len) & align_mask;
} while (len <= vma->vm_start);
fail:
/*
* if hint left us with no space for the requested
* mapping then try again:
*/
if (first_time) {
mm->free_area_cache = base;
largest_hole = 0;
first_time = 0;
goto try_again;
}
/*
* A failed mmap() very likely causes application failure,
* so fall back to the bottom-up function here. This scenario
* can happen with large stack limits and large mmap()
* allocations.
*/
mm->free_area_cache = TASK_UNMAPPED_BASE;
mm->cached_hole_size = ~0UL;
addr = arch_get_unmapped_area_bottomup_sz(file, addr0, len, align_size,
pgoff, flags);
/*
* Restore the topdown base:
*/
mm->free_area_cache = base;
mm->cached_hole_size = ~0UL;
return addr;
}
unsigned long arch_get_unmapped_area_sz(struct file *file,
unsigned long addr, unsigned long len, unsigned long align_size,
unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
if (mm->get_unmapped_area == arch_get_unmapped_area)
return arch_get_unmapped_area_bottomup_sz(file, addr, len, align_size,
pgoff, flags);
return arch_get_unmapped_area_topdown_sz(file, addr, len, align_size,
pgoff, flags);
}
EXPORT_SYMBOL(arch_get_unmapped_area_sz);
This diff is collapsed.
#ifndef LCD_LCD_DEFS_H
#define LCD_LCD_DEFS_H
#include <linux/bitmap.h>
#include <uapi/asm/bootparam.h>
#include <xen/interface/xen.h>
#include <asm/vmx.h>
#include <lcd/ipc.h>
#include <lcd/lcd.h>
#if !defined(VMX_EPT_AD_BIT)
#define VMX_EPT_AD_BIT (1ull << 21)
#define VMX_EPT_AD_ENABLE_BIT (1ull << 6)
#endif
/* Where did this come from ? it's not documented in ia32 manual
#ifndef VMX_EPT_EXTENT_INDIVIDUAL_BIT
#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24)
#endif
*/
#ifndef X86_CR4_PCIDE
#define X86_CR4_PCIDE 0x00020000 /* enable PCID support */
#endif
#ifndef SECONDARY_EXEC_ENABLE_INVPCID
#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
#endif
#ifndef SEG_TYPE_DATA
#define SEG_TYPE_DATA (0 << 3)
#endif
#ifndef SEG_TYPE_READ_WRITE
#define SEG_TYPE_READ_WRITE (1 << 1)
#endif
#ifndef SEG_TYPE_CODE
#define SEG_TYPE_CODE (1 << 3)
#endif
#ifndef SEG_TYPE_EXEC_READ
#define SEG_TYPE_EXEC_READ (1 << 1)
#endif
#ifndef SEG_TYPE_TSS
#define SEG_TYPE_TSS ((1 << 3) | (1 << 0))
#endif
#ifndef SEG_OP_SIZE_32BIT
#define SEG_OP_SIZE_32BIT (1 << 0)
#endif
#ifndef SEG_GRANULARITY_4KB
#define SEG_GRANULARITY_4KB (1 << 0)
#endif
#ifndef DESC_TYPE_CODE_DATA
#define DESC_TYPE_CODE_DATA (1 << 0)
#endif
/* Memory management */
#define EPT_LEVELS 4
#define VMX_EPT_FAULT_READ 0x01
typedef unsigned long epte_t;
#define __EPTE_READ 0x01
#define __EPTE_WRITE 0x02
#define __EPTE_EXEC 0x04
#define __EPTE_IPAT 0x40
#define __EPTE_SZ 0x80
#define __EPTE_A 0x100
#define __EPTE_D 0x200
#define __EPTE_TYPE(n) (((n) & 0x7) << 3)
enum {
EPTE_TYPE_UC = 0, /* uncachable */
EPTE_TYPE_WC = 1, /* write combining */
EPTE_TYPE_WT = 4, /* write through */
EPTE_TYPE_WP = 5, /* write protected */
EPTE_TYPE_WB = 6, /* write back */
};
#define __EPTE_NONE 0
#define __EPTE_FULL (__EPTE_READ | __EPTE_WRITE | __EPTE_EXEC)
#define EPTE_ADDR (~(PAGE_SIZE - 1))
#define EPTE_FLAGS (PAGE_SIZE - 1)
#define ADDR_TO_IDX(la, n) \
((((unsigned long) (la)) >> (12 + 9 * (n))) & ((1 << 9) - 1))
/* VMCS related */
/* moved to lcd-vmx.h */
/* struct vmcs_config { */
/* int size; */
/* int order; */
/* u32 revision_id; */
/* u32 pin_based_exec_ctrl; */
/* u32 cpu_based_exec_ctrl; */
/* u32 cpu_based_2nd_exec_ctrl; */
/* u32 vmexit_ctrl; */
/* u32 vmentry_ctrl; */
/* }; */
/* struct vmcs { */
/* u32 revision_id; */
/* u32 abort; */
/* char data[0]; */
/* }; */
/* struct vmx_capability { */
/* u32 ept; */
/* u32 vpid; */
/* int has_load_efer:1; */
/* }; */
/* extern struct vmx_capability vmx_capability; */
/* extern struct vmcs_config vmcs_config; */
struct lcd_tss_struct {
struct x86_hw_tss tss;
u8 io_bitmap[1];
} __attribute__((packed));
struct ipc_waitq {
u32 partner_id;
struct list_head list;
};
/*
* Guest Physical and Virtual Memory Layout
* ========================================
*
* IA-32e paging is used, with a 4-level page table hierarchy (see
* Intel Manual 4.5). IA-32e paging maps 48-bit guest virtual addresses
* to 52-bit guest physical addresses. The upper 16 bits are ignored.
*
*
* +---------------------------+ 0xFFFF FFFF FFFF FFFF
* | |
* : :
* : Kernel Module :
* : Mapping Area :
* : :
* | |
* LCD_HEAP_END----------> +---------------------------+ TASK_SIZE (arch-dep)
* LCD_MODULE_START | |
* | |
* : :
* : HEAP :
* : (grows up) :
* : :
* : :
* | |
* LCD_HEAP_START--------> +---------------------------+ 0x0000 0000 0040 3000
* | Initial Guest Virtual |
* | Paging Memory | (4 MBs)
* LCD_PAGING_MEM_START--> +---------------------------+ 0x0000 0000 0000 3000
* | GDT | (4 KBs)
* LCD_GDT---------------> +---------------------------+ 0x0000 0000 0000 2000
* | |
* | Stack |
* : (grows down) : (4 KBs)
* : :
* | |
* | IPC Message Registers |
* LCD_ROOT_TCB----------> +---------------------------+ 0x0000 0000 0000 1000
* | Stack Canary Page | (4 KBs)
* LCD_STACK_CANARY,-----> +---------------------------+ 0x0000 0000 0000 0000
* LCD_BOTTOM
*/
#define LCD_BOTTOM 0x0000000000000000UL
#define LCD_STACK_CANARY LCD_BOTTOM
#define LCD_ROOT_TCB 0x0000000000001000UL
#define LCD_GDT 0x0000000000002000UL
#define LCD_PAGING_MEM_START 0x0000000000003000UL
#define LCD_HEAP_START 0x0000000000403000UL
#define LCD_HEAP_END TASK_SIZE
#define LCD_MODULE_START TASK_SIZE
#define LCD_PAGING_MEM_NUM_PAGES ((LCD_HEAP_START - LCD_PAGING_MEM_START) \
/ PAGE_SIZE)
#endif
ccflags-y += -Ivirt/kvm -Iarch/x86/kvm
obj-$(CONFIG_LCD) += lcd-domains.o
#
#obj-m += lcd.o
lcd-domains-y += $(addprefix ../../../virt/lcd/, core.o cap.o \
ipc.o cap-cache.o api.o)
lcd-domains-y += lcd-vtx.o utils.o
......@@ -150,27 +150,6 @@ void __init early_alloc_pgt_buf(void)
int after_bootmem;
early_param_on_off("gbpages", "nogbpages", direct_gbpages, CONFIG_X86_DIRECT_GBPAGES);
//FIXME: Pass it via early param
// Place it here - should resolve after moving to v4.8
int direct_gbpages
#ifdef CONFIG_DIRECT_GBPAGES
= 1
#endif
;
static void __init init_gbpages(void)
{
#ifdef CONFIG_X86_64
if (direct_gbpages && cpu_has_gbpages)
printk(KERN_INFO "Using GB pages for direct mapping\n");
else
{
printk(KERN_INFO "direct_gbpages(%d). cpu_has_gbpages(%d)."
"GB pages not supported.\n", direct_gbpages, cpu_has_gbpages);
direct_gbpages = 0;
}
#endif
}
struct map_range {
unsigned long start;
......
......@@ -20,7 +20,6 @@
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
#include <asm/pat.h>
#include <asm/cpufeature.h>
#include "physaddr.h"
......@@ -80,9 +79,8 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
static void __iomem *___ioremap_caller(resource_size_t phys_addr,
unsigned long size, unsigned long prot_val, void *caller,
unsigned int hpages, unsigned int readonly)
static void __iomem *__ioremap_caller(resource_size_t phys_addr,
unsigned long size, enum page_cache_mode pcm, void *caller)
{
unsigned long offset, vaddr;
resource_size_t pfn, last_pfn, last_addr;
......@@ -173,10 +171,6 @@ static void __iomem *___ioremap_caller(resource_size_t phys_addr,
break;
}
/* Map pages RO */
if (readonly)
prot = __pgprot((unsigned long)prot.pgprot & ~_PAGE_RW);
/*
* Ok, go for it..
*/
......@@ -189,16 +183,8 @@ static void __iomem *___ioremap_caller(resource_size_t phys_addr,
if (kernel_map_sync_memtype(phys_addr, size, pcm))
goto err_free_area;
if (hpages)
{
if (ioremap_hpage_range(vaddr, vaddr + size, phys_addr, prot))
goto err_free_area;
}
else
{
if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
goto err_free_area;
}
ret_addr = (void __iomem *) (vaddr + offset);
mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
......@@ -218,21 +204,6 @@ err_free_memtype:
return NULL;
}
/*
* Remap an arbitrary physical address space into the kernel virtual
* address space. Needed when the kernel wants to access high addresses
* directly.
*
* NOTE! We need to allow non-page-aligned mappings too: we will obviously
* have to convert them into an offset in a page-aligned mapping, but the
* caller shouldn't need to know that small detail.
*/
static void __iomem *__ioremap_caller(resource_size_t phys_addr,
unsigned long size, unsigned long prot_val, void *caller)
{
return ___ioremap_caller(phys_addr, size, prot_val, caller, 0, 0);
}
/**
* ioremap_nocache - map bus memory into CPU space
* @phys_addr: bus address of the memory
......@@ -340,42 +311,12 @@ EXPORT_SYMBOL(ioremap_wt);
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
{
/* Map using 4k pages */
return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
__builtin_return_address(0));
void __iomem *
ioremap_hpage_cache(resource_size_t phys_addr, unsigned long size)
{
/* Map using hugepages */
return ___ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
__builtin_return_address(0), 1, 0);
}
EXPORT_SYMBOL(ioremap_hpage_cache);
void __iomem *
ioremap_hpage_cache_ro(resource_size_t phys_addr, unsigned long size)
{
/* Map using hugepages */
return ___ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
__builtin_return_address(0), 1, 1);
}
EXPORT_SYMBOL(ioremap_hpage_cache_ro);
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
{
/* Map using 4k pages */
return ___ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
__builtin_return_address(0), 0, 0);
}
EXPORT_SYMBOL(ioremap_cache);
void __iomem *ioremap_cache_ro(resource_size_t phys_addr, unsigned long size)
{
/* Map using 4k pages */
return ___ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
__builtin_return_address(0), 0, 1);
}
EXPORT_SYMBOL(ioremap_cache_ro);
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
unsigned long prot_val)
{
......
......@@ -120,16 +120,6 @@
* POSIX.1 2.4: an empty pathname is invalid (ENOENT).
* PATH_MAX includes the nul terminator --RR.
*/
void final_putname(struct filename *name)
{
if (name->separate) {
__putname(name->name);
kfree(name);
} else {
__putname(name);
}
}
EXPORT_SYMBOL(final_putname);
#define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname))
......
......@@ -197,8 +197,6 @@ obj-$(CONFIG_PERCPU_TEST) += percpu_test.o
# Always include interval tree (for LCD Domains)
obj-y += interval_tree.o
interval_tree_test-objs := interval_tree_test_main.o interval_tree.o
obj-$(CONFIG_ASN1) += asn1_decoder.o
obj-$(CONFIG_FONT_SUPPORT) += fonts/
......
......@@ -70,26 +70,15 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
}
static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot, int hpages)
unsigned long end, phys_addr_t phys_addr, pgprot_t prot)//, int hpages)
{
pmd_t *pmd_page, *pmd;
pmd_t *pmd;
unsigned long next;
phys_addr -= addr;
pmd_page = pmd_alloc(&init_mm, pud, addr);
if (!pmd_page)
pmd = pmd_alloc(&init_mm, pud, addr);
if (!pmd)
return -ENOMEM;
if (hpages)
{
printk (KERN_INFO "PMD_MAPPING (START) [%s,%d]"
" VA START(0x%lx), VA END(0x%lx), "
"PA(0x%lx), SIZE(0x%lx)\n", __FUNCTION__, __LINE__,
addr, end, (unsigned long)(phys_addr+addr), (end-addr));
}
pmd = pmd_page;
do {
next = pmd_addr_end(addr, end);
......@@ -102,36 +91,6 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
return -ENOMEM;
#if 1
//FIXME: Check if pmd_set_huge is good enough to remove the below block
#error "PMFS: feature Already enabled in the latest kernel"
if (hpages && cpu_has_pse && ((next-addr)>=PMD_SIZE))
{
u64 pfn = ((u64)(phys_addr + addr)) >> PAGE_SHIFT;
prot = __pgprot((unsigned long)prot.pgprot | _PAGE_PSE);
if ((s64)pfn < 0)
{
printk (KERN_INFO "MAPPING ERROR [%s, %d] : phys_addr(0x%lx)"
"addr(0x%lx), next(0x%lx), end(0x%lx),"
"pfn(0x%lx)\n", __FUNCTION__, __LINE__,
(unsigned long)phys_addr,
(unsigned long)addr, (unsigned long)next,
(unsigned long)end, (unsigned long)pfn);
return -ENOMEM;
}
spin_lock(&init_mm.page_table_lock);
set_pte((pte_t *)pmd, pfn_pte(pfn, prot));
spin_unlock(&init_mm.page_table_lock);
}
else
{
if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, prot))
return -ENOMEM;
}
#endif
} while (pmd++, addr = next, addr != end);
return 0;
}
......@@ -139,23 +98,13 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
unsigned long end, phys_addr_t phys_addr, pgprot_t prot, int hpages)
{
pud_t *pud_page, *pud;
pud_t *pud;
unsigned long next;
phys_addr -= addr;
pud_page = pud_alloc(&init_mm, pgd, addr);
if (!pud_page)
pud = pud_alloc(&init_mm, pgd, addr);
if (!pud)
return -ENOMEM;
if (hpages)