Commit fc4c8ed8 authored by Charlie Jacobsen's avatar Charlie Jacobsen

static-cptr-cache: Updates cptr and cspace code for new cache, and tests.

I'm seeing what appears to be race conditions in the tests, so
we're not out of the woods yet. I think I just need to introduce
a lock for the cptr cache. It wasn't originally designed to be
thread safe since only one thread at a time was using it. But we
need it now.

There are a few other miscellaneous changes:

  - Moves cptr manipulation into public header. Doc cleanup.
  - cptr_init returns an integer now (non-zero signals failure).
  - Adds CAP_BUG macro. Library code invokes this to abort or signal
    a serious internal library error (e.g., unexpected switch case).

              kernel:   CAP_BUG ==> BUG
              user:     CAP_BUG ==> abort

  - Aside from the cptr cache code updates for the modified struct,
    separates cptr cache initialization into two parts: alloc and init.
    Motivation: Some users of libcap will have already allocated the
    cptr cache (e.g., declared it as a static global), and only need
    it initialized. So, to fully initialize a cptr cache, you now need
    to do, e.g.,

                int ret;
                cptr_cache *cache;

                ret = cptr_cache_alloc(&cache);
                if (ret)
                      ... handle error ...

                ret = cptr_cache_init(cache);
                if (ret) {
                      cptr_cache_free(cache);
                      ... handle error ...
                }

  - Updates test apps to use new cptr cache API (alloc then init). Adds
    some extra error handling/clean up code.
parent 48ee0d11
Pipeline #391 skipped
......@@ -57,28 +57,193 @@ struct cap_type_ops {
#define CAP_TYPE_MAX 256
#endif
#define CAP_BUG() __cap_bug()
/**
* Initalize the cptr cache subsystem
* For now, put debug macros in the user-accessible part; convenient.
*/
void cptr_init(void);
extern int cap_debug_level;
#define CAP_ERR __cap_err
#define CAP_WARN __cap_warn
#define CAP_MSG __cap_msg
#define CAP_DEBUG_ERR 1
#define CAP_DEBUG_WARN 2
#define CAP_DEBUG_MSG 3
#define CAP_DEBUG(lvl, msg, ...) { \
if (lvl <= cap_debug_level) \
__cap_debug(msg,## __VA_ARGS__); \
}
/* CPTRs -------------------------------------------------- */
/**
* Allocate and initialize a new cptr_cache.
* __cptr -- Construct a cptr from an unsigned long
* @cptr: the unsigned long to use
*
* This is a low-level function. You need to know how to pack
* the bits into the unsigned long.
*/
int cptr_cache_init(struct cptr_cache **c_out);
static inline cptr_t __cptr(unsigned long cptr)
{
return (cptr_t) {cptr};
}
/**
* Free and delete a cptr_cache
* cptr_val -- Extract the unsigned long (bits) in the cptr
* @c: the ctpr to extract
*
* This can be useful if you want to pass a cptr in a register,
* as a scalar.
*/
void cptr_cache_destroy(struct cptr_cache *c);
static inline unsigned long cptr_val(cptr_t c)
{
return c.cptr;
}
/**
* Allocate a new cptr in the given cptr_cache. The cptr is stored in the memory
* pointed to by 'free_cptr'.
* cap_cptr_slot -- Returns the slot index into the final cnode table
* @c: the cptr
*
* Once you have arrived at the correct cnode table in the cspace
* radix tree, this is the index into that table to get the
* capability that @c refers to.
*/
static inline unsigned long cap_cptr_slot(cptr_t c)
{
/*
* Mask off low bits
*/
return cptr_val(c) & ((1 << (CAP_CSPACE_CNODE_TABLE_BITS - 1)) - 1);
}
/**
* cap_cptr_fanout -- Gives fanout index for going *from* @lvl to @lvl + 1
* @c: the cptr
* @lvl: the level in the cspace radix tree, where 0 <= lvl < CAP_CSPACE_DEPTH
*
* Each node in the cspace radix tree is a cnode table. Each cnode
* table is split in half: the first half are capability slots, and
* the other half are pointers to further nodes in the tree. If a
* cptr refers to a slot in a deeper level in the tree, you need to
* follow these pointers. The fanout index tells you which pointers
* to follow at each level.
*/
static inline unsigned long cap_cptr_fanout(cptr_t c, int lvl)
{
unsigned long i;
if (lvl >= CAP_CSPACE_DEPTH - 1)
CAP_BUG();
i = cptr_val(c);
/*
* Shift and mask off bits at correct section
*/
i >>= ((lvl + 1) * (CAP_CSPACE_CNODE_TABLE_BITS - 1));
i &= ((1 << (CAP_CSPACE_CNODE_TABLE_BITS - 1)) - 1);
return i;
}
/**
* cap_cptr_level -- The zero-indexed level in the cspace radix tree
* @c: the cptr
*
* Returns the level of the slot which @c refers to. 0 means the root
* cnode table.
*/
static inline unsigned long cap_cptr_level(cptr_t c)
{
unsigned long i;
i = cptr_val(c);
/*
* Shift and mask
*/
i >>= (CAP_CSPACE_DEPTH * (CAP_CSPACE_CNODE_TABLE_BITS - 1));
i &= ((1 << CAP_CSPACE_DEPTH_BITS) - 1);
return i;
}
/**
* cap_cptr_set_level -- Sets the level for @c
* @c: the cptr
* @lvl: the level in the cspace radix tree, 0 <= lvl < CAP_CSPACE_DEPTH
*/
static inline void cap_cptr_set_level(cptr_t *c, int lvl)
{
/* Shift and OR to store lvl */
c->cptr |= (lvl <<
(CAP_CSPACE_DEPTH * (CAP_CSPACE_CNODE_TABLE_BITS - 1)));
}
/**
* cptr_is_null -- Returns non-zero if cptr is the special null cptr
* @c: cptr to test
*/
static inline int cptr_is_null(cptr_t c)
{
return cptr_val(c) == cptr_val(CAP_CPTR_NULL);
}
/* CPTR CACHEs -------------------------------------------------- */
/**
* cptr_init -- Initalize the cptr cache subsystem
*/
int cptr_init(void);
/**
* cptr_fini -- Tear down the cptr cache subsystem.
*/
void cptr_fini(void);
/**
* cptr_cache_alloc -- Allocate a cptr cache data structure (not initialized)
* @out: out param, pointer to newly alloc'd cache
*
* You should call cptr_cache_free when done with the cache. Returns
* non-zero on error.
*/
int cptr_cache_alloc(struct cptr_cache **out);
/**
* cptr_cache_free -- Free a cptr cache alloc'd via cptr_cache_alloc
* @cache: the cptr cache to free
*/
void cptr_cache_free(struct cptr_cache *cache);
/**
* cptr_cache_init -- Initialize the data in a cptr cache
* @cache: the cptr cache to initialize
*
* Zeros out things, and sets some initial values. You *must* call this
* function before using the cptr cache.
*/
int cptr_cache_init(struct cptr_cache *cache);
/**
* cptr_cache_destroy -- Destroys internals of cptr cache
* @cache: cache to destroy
*
* For now, this is a no-op. You *must* call this before freeing the
* cache. (Yes, for now it is a no-op, but perhaps it won't be in the
* future.)
*/
void cptr_cache_destroy(struct cptr_cache *cache);
/**
* cptr_alloc -- Allocate a new cptr from the cache
* @cache: the cptr cache to allocate from
* @free_cptr: out param, points to the allocated cptr
*
* Returns non-zero if there are no more slots left.
*/
int cptr_alloc(struct cptr_cache *cptr_cache, cptr_t *free_cptr);
/**
* Remove the value pointed to by the
* cptr_free -- Return a cptr to the cache
* @cache: the cptr cache to return the cptr to
* @c: the cptr to return
*
* Fails silently if the cptr is free already.
*/
void cptr_free(struct cptr_cache *cptr_cache, cptr_t c);
/* CSPACES -------------------------------------------------- */
/**
* Initializes caches, etc. in capability subsystem. Called when microkernel
* intializes.
......@@ -89,6 +254,24 @@ int cap_init(void);
* is exiting.
*/
void cap_fini(void);
/**
* cap_cspace_slots_in_level -- Return total number of slots in cspace at lvl
* @lvl: the level to query
*
* Returns the total number of *capability* slots in all of the
* cnode tables at a given @lvl of the cspace radix tree.
*/
static inline int cap_cspace_slots_in_level(int lvl)
{
int out = CAP_CSPACE_CNODE_TABLE_SIZE/2;
if (lvl < 0 || lvl >= CAP_CSPACE_DEPTH)
CAP_BUG();
for ( ; lvl > 0; lvl-- )
out *= CAP_CSPACE_CNODE_TABLE_SIZE/2;
return out;
}
/**
* Register a new capability object type. If you pass type == 0, the
* system will select the next available identifier and return it. You
......@@ -215,22 +398,5 @@ cap_type_t cap_cnode_type(struct cnode *cnode);
*/
struct cspace * cap_cnode_cspace(struct cnode *cnode);
/**
* For now, put debug macros in the user-accessible part; convenient.
*/
extern int cap_debug_level;
#define CAP_ERR __cap_err
#define CAP_WARN __cap_warn
#define CAP_MSG __cap_msg
#define CAP_DEBUG_ERR 1
#define CAP_DEBUG_WARN 2
#define CAP_DEBUG_MSG 3
#define CAP_DEBUG(lvl, msg, ...) { \
if (lvl <= cap_debug_level) \
__cap_debug(msg,## __VA_ARGS__); \
}
#endif /* __LIBCAP_H__ */
......@@ -39,7 +39,7 @@ struct cnode {
};
struct cnode_table {
struct cnode cnode[CAP_CNODE_TABLE_NUM_SLOTS];
struct cnode cnode[CAP_CSPACE_CNODE_TABLE_SIZE];
uint8_t table_level;
struct list_head table_list;
};
......@@ -61,7 +61,7 @@ struct cdt_root_node {
/* The init and finish routines are defined in their own compoents. The
* implementations differ between the kernel and userspace. */
void __cptr_init(void);
int __cptr_init(void);
void __cptr_fini(void);
/**
......
......@@ -85,7 +85,7 @@ static inline void __cap_cache_free(cap_cache_t *cache, void *obj)
#define __cap_zalloc(nmemb,size) kzalloc((nmemb)*(size),GFP_KERNEL)
#define __cap_free(addr) kfree(addr)
static inline void __cptr_init(void) { }
static inline int __cptr_init(void) { return 0; }
static inline void __cptr_fini(void) { }
#endif /* __LIBCAP_INTERNAL_KERNEL_H__ */
......@@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/bug.h>
CAP_BUILD_CORE_TYPES_NOBUILTIN();
......@@ -15,4 +16,6 @@ CAP_BUILD_CORE_TYPES_NOBUILTIN();
#define __cap_debug(format,...) \
printk(KERN_DEBUG,"cap: %s:%d: "format,__FUNCTION__,__LINE__,##__VA_ARGS__)
#define __cap_bug() BUG()
#endif /* __LIBCAP_KERNEL_H__ */
......@@ -41,7 +41,7 @@
* cptr allocation algorithm works, and (2) because a cnode table needs at
* least one capability slot and one pointer slot.
*/
#define CAP_CSPACE_CNODE_TABLE_BITS 8
#define CAP_CSPACE_CNODE_TABLE_BITS 6
#define CAP_CSPACE_CNODE_TABLE_SIZE (1 << CAP_CSPACE_CNODE_TABLE_BITS)
#if (CAP_CSPACE_CNODE_TABLE_SIZE < 2)
......@@ -52,10 +52,12 @@
* All of the data - the level, fanout sections, and slot - must fit
* inside an unsigned long. The current configuration was chosen so
* that this works on 32- and 64-bit. The cspace size is fairly
* significant - over 200 million slot capacity.
* significant - over 1 million slot capacity. You don't want it to
* be too big or else the (inefficient) cptr cache with bitmaps will
* be enormous.
*/
#if ((CAP_CSPACE_DEPTH * (CAP_CSPACE_CNODE_TABLE_BITS - 1) + \
CAP_CSPACE_DEPTH_BITS) > SIZEOF_UNSIGNED_LONG)
CAP_CSPACE_DEPTH_BITS) > (SIZEOF_UNSIGNED_LONG * 8))
#error "Adjust cspace sizing, otherwise cptrs won't work."
#endif
......@@ -81,92 +83,30 @@
#else
#error "cspace depth not 4, you need to update this"
#endif
static inline int cap_cspace_slots_in_level(int lvl)
{
int out = CAP_CSPACE_CNODE_TABLE_SIZE/2;
if (lvl < 0 || lvl >= CAP_CSPACE_DEPTH)
BUG();
for ( ; lvl > 0; lvl-- )
out *= CAP_CSPACE_CNODE_TABLE_SIZE/2;
return out;
}
/* CPTRs -------------------------------------------------- */
/**
* cptr_t -- Index into cspace radix tree (like a file descriptor)
*
* We wrap it inside a struct def so that the compiler will do strong
* type checking.
*/
typedef struct {
unsigned long cptr;
} cptr_t;
static inline cptr_t __cptr(unsigned long cptr)
{
return (cptr_t) {cptr};
}
static inline unsigned long cptr_val(cptr_t c)
{
return c.cptr;
}
static inline unsigned long cap_cptr_slot(cptr_t c)
{
/*
* Mask off low bits
*/
return cptr_val(c) & ((1 << (CAP_CSPACE_CNODE_TABLE_BITS - 1)) - 1);
}
/*
* Gives fanout index for going *from* lvl to lvl + 1, where
* 0 <= lvl < CAP_CSPACE_DEPTH.
*/
static inline unsigned long cap_cptr_fanout(cptr_t c, int lvl)
{
unsigned long i;
if (unlikely(lvl >= 3))
BUG();
i = cptr_val(c);
/*
* Shift and mask off bits at correct section
*/
i >>= ((lvl + 1) * (CAP_CSPACE_CNODE_TABLE_BITS - 1));
i &= ((1 << (CAP_CSPACE_CNODE_TABLE_BITS - 1)) - 1);
return i;
}
/*
* Gives depth/level of cptr, zero indexed (0 means the root cnode table)
*/
static inline unsigned long cap_cptr_level(cptr_t c)
{
unsigned long i;
i = cptr_val(c);
/*
* Shift and mask
*/
i >>= (CAP_CSPACE_DEPTH * (CAP_CSPACE_CNODE_TABLE_BITS - 1));
i &= ((1 << CAP_CSPACE_DEPTH_BITS) - 1);
return i;
}
/*
* Reserved cnodes:
*
* cptr = 0 is always null
*/
#define CAP_CPTR_NULL __cptr(0)
#define CAP_CPTR_NULL ((cptr_t){0})
static inline int cptr_is_null(cptr_t c)
{
return cptr_val(c) == cptr_val(CAP_CPTR_NULL);
}
/* CPTR CACHE -------------------------------------------------- */
#if (CAP_CSPACE_DEPTH == 4)
struct cptr_cache {
......@@ -180,23 +120,6 @@ struct cptr_cache {
unsigned long bmap3[CAP_BITS_TO_LONGS(CAP_CSPACE_SLOTS_IN_LEVEL(3))];
};
static inline unsigned long*
cap_cptr_cache_bmap_for_level(struct cptr_cache *c, int lvl)
{
switch (lvl) {
case 0:
return c->bmap0;
case 1:
return c->bmap1;
case 2:
return c->bmap2;
case 3:
return c->bmap3;
default:
BUG();
}
}
#else
#error "You need to adjust the cptr cache def."
#endif
......
......@@ -2,6 +2,7 @@
#define __LIBCAP_USER_H__
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
CAP_BUILD_CORE_TYPES_NOBUILTIN();
......@@ -15,4 +16,8 @@ CAP_BUILD_CORE_TYPES_NOBUILTIN();
#define __cap_debug(format,...) \
fprintf(stderr,"CDEBUG: %s:%d: "format,__FUNCTION__,__LINE__,## __VA_ARGS__)
#define __cap_bug() \
abort()
#endif /* __LIBCAP_USER_H__ */
......@@ -159,7 +159,7 @@ static int make_empty_cnode_table(struct cspace *cspace, uint8_t level,
* We delay some of the other set up until the cnode is
* actually used.
*/
for (i = 0; i < CAP_CNODE_TABLE_NUM_SLOTS; i++) {
for (i = 0; i < CAP_CSPACE_CNODE_TABLE_SIZE; i++) {
new->cnode[i].type = CAP_TYPE_FREE;
INIT_LIST_HEAD(&new->cnode[i].children);
INIT_LIST_HEAD(&new->cnode[i].siblings);
......@@ -262,7 +262,7 @@ static int update_cnode_table(struct cspace *cspace,
* pointers. Skip over cap slots by adding half the number of slots
* to level_id.
*/
index = level_id + (CAP_CNODE_TABLE_NUM_SLOTS >> 1);
index = level_id + (CAP_CSPACE_CNODE_TABLE_SIZE >> 1);
if (old->cnode[index].type == CAP_TYPE_CNODE) {
/*
......@@ -324,6 +324,11 @@ static int find_cnode(struct cspace *cspace, struct cnode_table *old,
/*
* invalid indexing, etc.
*/
CAP_DEBUG(1,
"Error in lookup: cnode is %s, and we are%s trying to alloc\n",
old->cnode[level_id].type == CAP_TYPE_FREE ?
"free" : "occupied",
alloc ? "" : " not");
return -EINVAL; /* signal an error in look up */
}
}
......@@ -381,7 +386,7 @@ static int __cap_cnode_lookup(struct cspace *cspace, cptr_t c, bool alloc,
/*
* If cptr is null, fail
*/
if (cptr_val(c) == cptr_val(CAP_CPTR_NULL))
if (cptr_is_null(c))
return -EINVAL;
/*
......@@ -1079,7 +1084,7 @@ static void cnode_table_tear_down(struct cnode_table *t, struct cspace *cspace)
/*
* Loop over cap slots (first half), and tear down each cnode
*/
for (i = 0; i < (CAP_CNODE_TABLE_NUM_SLOTS >> 1); i++) {
for (i = 0; i < (CAP_CSPACE_CNODE_TABLE_SIZE >> 1); i++) {
cnode = &t->cnode[i];
cnode_tear_down(cnode, cspace);
}
......
......@@ -10,9 +10,31 @@
#include "libcap_types.h"
#include "libcap_internal.h"
void cptr_init(void)
#if (CAP_CSPACE_DEPTH == 4)
static inline unsigned long*
cap_cptr_cache_bmap_for_level(struct cptr_cache *c, int lvl)
{
__cptr_init();
switch (lvl) {
case 0:
return c->bmap0;
case 1:
return c->bmap1;
case 2:
return c->bmap2;
case 3:
return c->bmap3;
default:
CAP_BUG();
}
}
#else
#error "You need to adjust this function def."
#endif
int cptr_init(void)
{
return __cptr_init();
}
void cptr_fini(void)
......@@ -20,70 +42,52 @@ void cptr_fini(void)
__cptr_fini();
}
int cptr_cache_init(struct cptr_cache **out)
int cptr_cache_alloc(struct cptr_cache **out)
{
struct cptr_cache *cache;
int ret;
int i, j;
int nbits;
/*
* Allocate the container
*/
cache = cap_zalloc(1, sizeof(*cache));
if (!cache) {
ret = -ENOMEM;
goto fail1;
}
/*
* Allocate the bitmaps
*/
for (i = 0; i < (1 << CAP_CPTR_DEPTH_BITS); i++) {
/*
* For level i, we use the slot bits plus i * fanout bits
*
* So e.g. for level 0, we use only slot bits, so there
* are only 2^(num slot bits) cap slots at level 0.
*/
nbits = 1 << (CAP_CPTR_SLOT_BITS + i * CAP_CPTR_FANOUT_BITS);
/*
* Alloc bitmap
*/
cache->bmaps[i] = cap_zalloc(BITS_TO_LONGS(nbits),
sizeof(unsigned long));
if (!cache->bmaps[i]) {
ret = -ENOMEM;
goto fail2; /* i = level we failed at */
}
}
/*
* Mark reserved cptr's as allocated
*/
cap_set_bit(0, cache->bmaps[0]);
if (!cache)
return -ENOMEM;
*out = cache;
return 0;
}
fail2:
for (j = 0; j < i; j++)
cap_free(cache->bmaps[j]);
void cptr_cache_free(struct cptr_cache *cache)
{
/*
* Free container
*/
cap_free(cache);
fail1:
return ret;
}
void cptr_cache_destroy(struct cptr_cache *cache)
int cptr_cache_init(struct cptr_cache *cache)
{
int i;
unsigned long *bmap;
/*
* Free bitmaps
* Zero out the bitmaps. (The caller may not have
* necessarily used zalloc.)
*/
for (i = 0; i < (1 << CAP_CPTR_DEPTH_BITS); i++)
cap_free(cache->bmaps[i]);
for (i = 0; i < CAP_CSPACE_DEPTH; i++) {
bmap = cap_cptr_cache_bmap_for_level(cache, i);
memset(bmap,
0,
CAP_BITS_TO_LONGS(cap_cspace_slots_in_level(i)));
}
/*
* Free container
* Mark reserved cptr's as allocated
*/
cap_free(cache);
cap_set_bit(0, cap_cptr_cache_bmap_for_level(cache, 0));
return 0;
}
void cptr_cache_destroy(struct cptr_cache *cache)
{
/* No-op for now */
}
int __cap_alloc_cptr_from_bmap(unsigned long *bmap, int size,
......@@ -114,14 +118,15 @@ int cptr_alloc(struct cptr_cache *cptr_cache, cptr_t *free_cptr)
unsigned long *bmap;
unsigned long idx;
int size;