Commit e7d54728 authored by Vikram Narayanan's avatar Vikram Narayanan

mm/priv_mempool: Switch to spinlocks

Also switch to onine cpus from possible cpus
Signed-off-by: Vikram Narayanan's avatarVikram Narayanan <vikram186@gmail.com>
parent f3d64da8
......@@ -8,6 +8,8 @@
#define DATA_ALIGNED_SZ (SKB_DATA_ALIGN(MTU + HEADERS + SKB_LCD_MEMBERS_SZ))
#define SKB_DATA_SIZE (DATA_ALIGNED_SZ + SKB_DATA_ALIGN(SKB_SHARED_INFO))
#define SKB_CONTAINER_SIZE 128
typedef enum {
/* for skb->data */
SKB_DATA_POOL = 0,
......@@ -15,6 +17,9 @@ typedef enum {
/* for skb->page_frag */
SKB_FRAG_POOL,
/* for skb_container */
SKB_CONTAINER_POOL,
POOL_MAX,
} pool_type_t;
......@@ -46,7 +51,7 @@ typedef struct {
void *gpool;
struct atom stack;
unsigned int pool_order;
struct mutex pool_lock;
spinlock_t pool_spin_lock;
} priv_pool_t;
......
......@@ -28,7 +28,7 @@ void construct_global_pool(priv_pool_t *p)
{
unsigned int list_sz = p->num_objs_percpu;
unsigned int obj_size = p->obj_size;
unsigned int gpool_objs = list_sz * p->num_cpus;
unsigned int gpool_objs = list_sz * num_possible_cpus();
char *gpool = p->gpool;
int i, b;
......@@ -42,8 +42,8 @@ void construct_global_pool(priv_pool_t *p)
*/
bundles = gpool_objs / CACHE_SIZE;
printk("%s, bundles %u | list_sz %u\n",
__func__, bundles, list_sz);
printk("%s, gpool_objs %d | list_sz %u | bundles %u\n",
__func__, gpool_objs, list_sz, bundles);
for (b = 0; b < bundles; b++) {
printk("bundle ===> %d\n", b);
......@@ -146,16 +146,16 @@ priv_pool_t *priv_pool_init(pool_type_t type, unsigned int num_objs,
/* calculate num_pages per cpu */
num_pages = PAGE_ALIGN(num_objs * obj_size) / PAGE_SIZE;
p->num_cpus = num_cpus = num_possible_cpus();
p->num_cpus = num_cpus = num_online_cpus();
/* allocate twice the amount of requested pages
* one set is for the percpu buf, the remaining pages
* would be given to the global buffer
*/
p->total_pages = total_pages = num_pages * num_cpus * 2;
p->total_pages = total_pages = num_pages * (num_cpus + num_possible_cpus());
printk("num objs %d | num_cpus %d | num_pages %d | num_objs_percpu %d "
"| total_pages %d | page order %d\npcpu_pool %p | global_pool %p\n",
num_objs, num_possible_cpus(), num_pages, num_objs_percpu,
num_objs, num_online_cpus(), num_pages, num_objs_percpu,
total_pages, get_order(total_pages * PAGE_SIZE), pcpu_pool,
global_pool);
......@@ -175,10 +175,10 @@ priv_pool_t *priv_pool_init(pool_type_t type, unsigned int num_objs,
/* split the total pages between pcpu pool and the global pool */
pcpu_pool = pool;
p->gpool = global_pool =
pool + (num_possible_cpus() * num_pages * PAGE_SIZE);
pool + (num_cpus * num_pages * PAGE_SIZE);
/* update percpu vars */
for_each_possible_cpu(cpu) {
for_each_online_cpu(cpu) {
*per_cpu_ptr(p->marker, cpu) =
*per_cpu_ptr(p->head, cpu) = (struct object*) NULL;
*per_cpu_ptr(p->buf, cpu) =
......@@ -193,7 +193,7 @@ priv_pool_t *priv_pool_init(pool_type_t type, unsigned int num_objs,
}
construct_global_pool(p);
mutex_init(&p->pool_lock);
spin_lock_init(&p->pool_spin_lock);
return p;
}
EXPORT_SYMBOL(priv_pool_init);
......@@ -255,7 +255,7 @@ void *priv_alloc(pool_type_t type)
struct atom snapshot, new;
/* lock global pool */
mutex_lock(&p->pool_lock);
spin_lock(&p->pool_spin_lock);
snapshot = p->stack;
......@@ -269,7 +269,7 @@ void *priv_alloc(pool_type_t type)
this_cpu_write(*(p->cached), CACHE_SIZE);
/* unlock global pool */
mutex_unlock(&p->pool_lock);
spin_unlock(&p->pool_spin_lock);
m = (struct object*) snapshot.head;
......@@ -320,7 +320,7 @@ void priv_free(void *addr, pool_type_t type)
struct atom snapshot, new;
/* lock global pool */
mutex_lock(&pool->pool_lock);
spin_lock(&pool->pool_spin_lock);
new.head = donation;
donation->list = ((struct object*)*this_cpu_ptr(pool->head))->next;
......@@ -337,7 +337,7 @@ void priv_free(void *addr, pool_type_t type)
pool->stack.version = new.version;
/* unlock global pool */
mutex_unlock(&pool->pool_lock);
spin_unlock(&pool->pool_spin_lock);
WARN_ON(!new.head);
pr_debug("update gpchain %p to %p | ohead: %p/%ld, nhead: %p/%ld\n",
donation, snapshot.head,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment