Commit 3a6f1b20 authored by Vikram Narayanan's avatar Vikram Narayanan

merge/v4.8/lcd/mm: Update mm codebase

Use our old mm codebase (based on v3.10) and push in changes that are required
to make it compile with v4.8's header files. To make this work I pulled in
3.10's slab_def.h to kick out 4.8's slab_def.h

The changes are not that significant
* Removed struct slab, struct slab_rcu (Use struct page instead)
* Few kmem_chg related APIs were changed
* Some minor optimizations

Merged from
45eed508 slab: remove SLAB_LIMIT
16025177 slab: remove kmem_bufctl_t
b1cb0982 slab: change the management method of free objects of the slab
a57a4988 slab: use __GFP_COMP flag for allocating slab pages
56f295ef slab: use well-defined macro, virt_to_slab()
68126702 slab: overloading the RCU head over the LRU for RCU free
07d417a1 slab: remove cachep in struct slab_rcu
1ea991b0 slab: remove nodeid in struct slab
ac2b54ed slab: remove colouroff in struct slab
0c3aa83e slab: change return type of kmem_getpages() to struct page
73293c2f slab: correct pfmemalloc check
276a2439 mm/slab: Give s_next and s_stop slab-specific names
e25839f6 mm/slab: Sharing s_next and s_stop between slab and slub
0fa8103b mm/slab: Fix drain freelist excessively
Signed-off-by: Vikram Narayanan's avatarVikram Narayanan <vikram186@gmail.com>
parent a982422b
......@@ -86,3 +86,6 @@
#define __MM_INTERNAL_H
#endif
#ifndef _LINUX_SLAB_DEF_H
#define _LINUX_SLAB_DEF_H
#endif
......@@ -19,14 +19,16 @@
#include <linux/mm.h>
#define GFP_SLAB_BUG_MASK (__GFP_DMA32|__GFP_HIGHMEM|~__GFP_BITS_MASK)
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
#define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
__GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
__GFP_ATOMIC)
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);
static inline void set_page_count(struct page *page, int v)
{
atomic_set(&page->_count, v);
}
/*
* Turn a non-refcounted page (->_count == 0) into refcounted with
* a count of one.
......@@ -34,61 +36,9 @@ static inline void set_page_count(struct page *page, int v)
static inline void set_page_refcounted(struct page *page)
{
VM_BUG_ON(PageTail(page));
VM_BUG_ON(atomic_read(&page->_count));
VM_BUG_ON(atomic_read(&page->_refcount));
set_page_count(page, 1);
}
static inline void __put_page(struct page *page)
{
atomic_dec(&page->_count);
}
static inline void __get_page_tail_foll(struct page *page,
bool get_page_head)
{
/*
* If we're getting a tail page, the elevated page->_count is
* required only in the head page and we will elevate the head
* page->_count and tail page->_mapcount.
*
* We elevate page_tail->_mapcount for tail pages to force
* page_tail->_count to be zero at all times to avoid getting
* false positives from get_page_unless_zero() with
* speculative page access (like in
* page_cache_get_speculative()) on tail pages.
*/
VM_BUG_ON(atomic_read(&page->first_page->_count) <= 0);
VM_BUG_ON(atomic_read(&page->_count) != 0);
VM_BUG_ON(page_mapcount(page) < 0);
if (get_page_head)
atomic_inc(&page->first_page->_count);
atomic_inc(&page->_mapcount);
}
/*
* This is meant to be called as the FOLL_GET operation of
* follow_page() and it must be called while holding the proper PT
* lock while the pte (or pmd_trans_huge) is still mapping the page.
*/
static inline void get_page_foll(struct page *page)
{
if (unlikely(PageTail(page)))
/*
* This is safe only because
* __split_huge_page_refcount() can't run under
* get_page_foll() because we hold the proper PT lock.
*/
__get_page_tail_foll(page, true);
else {
/*
* Getting a normal page or the head of a compound page
* requires to already have an elevated page->_count.
*/
VM_BUG_ON(atomic_read(&page->_count) <= 0);
atomic_inc(&page->_count);
}
}
extern unsigned long highest_memmap_pfn;
/*
......
This diff is collapsed.
......@@ -74,7 +74,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
#elif defined(CONFIG_SLUB_DEBUG)
#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
SLAB_TRACE | SLAB_DEBUG_FREE)
SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
#else
#define SLAB_DEBUG_FLAGS (0)
#endif
......@@ -228,7 +228,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
* to not do even the assignment. In that case, slab_equal_or_root
* will also be a constant.
*/
if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
return s;
page = virt_to_head_page(x);
......@@ -275,3 +275,6 @@ struct kmem_cache_node {
};
void *slab_next(struct seq_file *m, void *p, loff_t *pos);
void slab_stop(struct seq_file *m, void *p);
......@@ -24,6 +24,7 @@
#include <asm/page.h>
#include <linux/memcontrol.h>
#include "slab_def.h"
#include "slab.h"
/* BEGIN LCD */
......@@ -35,6 +36,25 @@ LIST_HEAD(slab_caches);
DEFINE_MUTEX(slab_mutex);
struct kmem_cache *kmem_cache;
static inline int
memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
struct kmem_cache *root_cache)
{
return 0;
}
static inline void memcg_release_cache(struct kmem_cache *cachep)
{
}
static inline void memcg_cache_list_add(struct mem_cgroup *memcg,
struct kmem_cache *s)
{
}
static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
{
}
#ifdef CONFIG_DEBUG_VM
static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
......@@ -290,7 +310,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
}
EXPORT_SYMBOL(kmem_cache_destroy);
int slab_is_available(void)
bool slab_is_available(void)
{
return slab_state >= UP;
}
......@@ -506,6 +526,13 @@ void __init create_kmalloc_caches(unsigned long flags)
#ifdef CONFIG_SLABINFO
#ifdef CONFIG_SLAB
#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
#else
#define SLABINFO_RIGHTS S_IRUSR
#endif
void print_slabinfo_header(struct seq_file *m)
{
/*
......@@ -540,12 +567,12 @@ static void *s_start(struct seq_file *m, loff_t *pos)
return seq_list_start(&slab_caches, *pos);
}
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
void *slab_next(struct seq_file *m, void *p, loff_t *pos)
{
return seq_list_next(p, &slab_caches, pos);
}
static void s_stop(struct seq_file *m, void *p)
void slab_stop(struct seq_file *m, void *p)
{
mutex_unlock(&slab_mutex);
}
......@@ -622,8 +649,8 @@ static int s_show(struct seq_file *m, void *p)
*/
static const struct seq_operations slabinfo_op = {
.start = s_start,
.next = s_next,
.stop = s_stop,
.next = slab_next,
.stop = slab_stop,
.show = s_show,
};
......@@ -642,7 +669,8 @@ static const struct file_operations proc_slabinfo_operations = {
static int __init slab_proc_init(void)
{
proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
&proc_slabinfo_operations);
return 0;
}
module_init(slab_proc_init);
......
#ifndef _LCD_LINUX_SLAB_DEF_H
#define _LCD_LINUX_SLAB_DEF_H
/*
* Definitions unique to the original Linux SLAB allocator.
*
* What we provide here is a way to optimize the frequent kmalloc
* calls in the kernel by selecting the appropriate general cache
* if kmalloc was called with a size that can be established at
* compile time.
*/
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/reciprocal_div.h>
/*
* struct kmem_cache
*
* manages a cache.
*/
struct kmem_cache {
/* 1) Cache tunables. Protected by cache_chain_mutex */
unsigned int batchcount;
unsigned int limit;
unsigned int shared;
unsigned int size;
// u32 reciprocal_buffer_size;
struct reciprocal_value reciprocal_buffer_size;
/* 2) touched by every alloc & free from the backend */
unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */
/* 3) cache_grow/shrink */
/* order of pgs per slab (2^n) */
unsigned int gfporder;
/* force GFP flags, e.g. GFP_DMA */
gfp_t allocflags;
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
struct kmem_cache *freelist_cache;
unsigned int freelist_size;
/* constructor func */
void (*ctor)(void *obj);
/* 4) cache creation/removal */
const char *name;
struct list_head list;
int refcount;
int object_size;
int align;
/* 5) statistics */
#ifdef CONFIG_DEBUG_SLAB
unsigned long num_active;
unsigned long num_allocations;
unsigned long high_mark;
unsigned long grown;
unsigned long reaped;
unsigned long errors;
unsigned long max_freeable;
unsigned long node_allocs;
unsigned long node_frees;
unsigned long node_overflow;
atomic_t allochit;
atomic_t allocmiss;
atomic_t freehit;
atomic_t freemiss;
/*
* If debugging is enabled, then the allocator can add additional
* fields and/or padding to every object. size contains the total
* object size including these internal fields, the following two
* variables contain the offset to the user object and its size.
*/
int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */
#ifdef CONFIG_MEMCG_KMEM
struct memcg_cache_params *memcg_params;
#endif
/* 6) per-cpu/per-node data, touched during every alloc/free */
/*
* We put array[] at the end of kmem_cache, because we want to size
* this array to nr_cpu_ids slots instead of NR_CPUS
* (see kmem_cache_init())
* We still use [NR_CPUS] and not [1] or [0] because cache_cache
* is statically defined, so we reserve the max number of cpus.
*
* We also need to guarantee that the list is able to accomodate a
* pointer for each node since "nodelists" uses the remainder of
* available pointers.
*/
struct kmem_cache_node **node;
struct array_cache *array[NR_CPUS + MAX_NUMNODES];
/*
* Do not add fields after array[]
*/
};
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
#if 0
#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
#else
static __always_inline void *
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
{
return kmem_cache_alloc(cachep, flags);
}
#endif
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
struct kmem_cache *cachep;
void *ret;
if (__builtin_constant_p(size)) {
int i;
if (!size)
return ZERO_SIZE_PTR;
if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
return NULL;
i = kmalloc_index(size);
#ifdef CONFIG_ZONE_DMA
if (flags & GFP_DMA)
cachep = kmalloc_dma_caches[i];
else
#endif
cachep = kmalloc_caches[i];
ret = kmem_cache_alloc_trace(cachep, flags, size);
return ret;
}
return __kmalloc(size, flags);
}
#endif //if 0
#ifdef CONFIG_NUMA
extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
#ifdef CONFIG_TRACING
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid,
size_t size);
#else
static __always_inline void *
kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid,
size_t size)
{
return kmem_cache_alloc_node(cachep, flags, nodeid);
}
#endif
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
struct kmem_cache *cachep;
if (__builtin_constant_p(size)) {
int i;
if (!size)
return ZERO_SIZE_PTR;
if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
return NULL;
i = kmalloc_index(size);
#ifdef CONFIG_ZONE_DMA
if (flags & GFP_DMA)
cachep = kmalloc_dma_caches[i];
else
#endif
cachep = kmalloc_caches[i];
return kmem_cache_alloc_node_trace(cachep, flags, node, size);
}
return __kmalloc_node(size, flags, node);
}
#endif /* CONFIG_NUMA */
#endif /* _LINUX_SLAB_DEF_H */
......@@ -31,7 +31,19 @@
/* BEGIN LCD */
#include <lcd_config/post_hook.h>
/* END LCD */
static inline unsigned long
ex_fixup_addr(const struct exception_table_entry *x)
{
return (unsigned long)&x->fixup + x->fixup;
}
bool ex_handler_fault(const struct exception_table_entry *fixup,
struct pt_regs *regs, int trapnr)
{
regs->ip = ex_fixup_addr(fixup);
regs->ax = trapnr;
return true;
}
/**
* kstrdup - allocate space for and copy an existing string
* @s: the string to duplicate
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment