Commit 6e36dcfe authored by Charlie Jacobsen's avatar Charlie Jacobsen
Browse files

Simple example builds.

parent f7b227a6
......@@ -108,6 +108,10 @@ AC_CONFIG_FILES(
src/common/Makefile
src/include/Makefile
src/tests/Kbuild
src/tests/Makefile
src/tests/simple/Kbuild
)
AC_OUTPUT
......
......@@ -33,7 +33,7 @@ LIBASYNC_TESTS_KBUILD=@abs_top_builddir@/src/tests
# are relative to this directory (the build version).
LIBASYNC_PATH=libasync.a
LIBFIPC_LIB=$(shell @abs_top_srcdir@/scripts/relpath.py \
$(LIBFIPC_DIR)/lib/libfipc.a @abs_top_builddir@)
$(LIBFIPC_DIR)/lib/libfipc.a @abs_top_builddir@/src)
export LIBASYNC_TESTS_KBUILD LIBASYNC_PATH LIBFIPC_LIB
......
......@@ -163,5 +163,5 @@ int thc_channel_group_item_add(struct thc_channel_group* channel_group,
return 0;
}
EXPORT_SYMBOL(thc_channel_group_add);
EXPORT_SYMBOL(thc_channel_group_item_add);
......@@ -110,12 +110,9 @@ typedef int errval_t;
awe_t _awe; \
extern void * CONT_RET_FN_NAME(_C) (void); \
\
_awe.status = LAZY_AWE; \
_awe.lazy_stack = NULL; \
_awe.pts = NULL; \
\
/* Define nested function containing the body */ \
noinline auto void _thc_nested_async(FORCE_ARGS_STACK awe_t *awe) __asm__(NESTED_FN_STRING(_C)); \
\
noinline void _thc_nested_async(FORCE_ARGS_STACK awe_t *awe) { \
void *_my_fb = _fb_info; \
_awe.current_fb = _my_fb; \
......@@ -130,6 +127,11 @@ typedef int errval_t;
/* Otherwise, return */ \
RETURN_CONT(CONT_RET_FN_STRING(_C)); \
} \
\
_awe.status = LAZY_AWE; \
_awe.lazy_stack = NULL; \
_awe.pts = NULL; \
\
SCHEDULE_CONT(&_awe, _thc_nested_async); \
__asm__ volatile ( \
" .globl " CONT_RET_FN_STRING(_C) "\n\t" \
......@@ -244,6 +246,10 @@ typedef void (*THCIdleFn_t)(void *);
// possibly producing additional AWEs which may be run subsequently.
typedef struct awe_t awe_t;
// Invoke these to initialize/tear down the thc runtime
void thc_init(void);
void thc_done(void);
// Finish the current AWE, and initialize (*awe_ptr_ptr) with a pointer
// to an AWE for its continuation. Typically, this will be stashed
// away in a data structure from which it will subsequently be fetched
......
obj-m += simple/
#obj-m += dispatch_loop/
\ No newline at end of file
# D=install dir (something/lib)
install-tests:
cp $(LIBASYNC_TESTS_KBUILD)/simple/libasync_test_simple.ko $(D)
#cp $(LIBASYNC_TESTS_KBUILD)/dispatch_loop/libfipc_test_dispatch_loop.ko $(D)
ASYNC_OBJS_DIR=../../src/common
RPC_OBJS_DIR=../../../fast-ipc-module/src/common
CFLAGS_ipc.o = -O2 -DPOLL -fno-ipa-cp -fno-ipa-sra
#CFLAGS_ring-channel.o = -I$(RPC_OBJS_DIR)../../IPC -I$(RING_CHAN_DIR) -fno-ipa-cp -fno-ipa-sra
EXTRA_CFLAGS=-DDEBUG_OUTPUT
obj-m := libasync_test.o
ccflags-y += -O0 -fno-ipa-cp -fno-ipa-sra -I$(LIB_TEST_ASYNC_DIR)/../src/include -I$(LIB_TEST_ASYNC_DIR)/../../fast-ipc-module/src/include -I$(LIB_TEST_ASYNC_DIR)/../../fast-ipc-module/src/platform/kernel/include -DUSE_ASYNC -DCONFIG_LAZY_THC
libasync_test-objs = $(ASYNC_OBJS_DIR)/awe-mapper.o $(ASYNC_OBJS_DIR)/thc.o $(ASYNC_OBJS_DIR)/thc_ipc.o $(ASYNC_OBJS_DIR)/thcsync.o $(RPC_OBJS_DIR)/ipc.o callee.o caller.o main.o
# @ASYNC_AUTOCONF_NOTICE@
# Magic line so we can do out-of-source build
src = @abs_top_srcdir@/src/tests/simple
obj-m = libasync_test_simple.o
# Path are relative to root test/ dir
libasync_test_simple-y += main.o callee.o caller.o
# libasync.a is relative to src/
libasync_test_simple-y += ../../$(LIBASYNC_PATH)
# libfipc.a is relative to src/
libasync_test_simple-y += ../../$(LIBFIPC_LIB)
# Add -DCHECK_MESSAGES to check that message values contain what is expected
ccflags-y += $(CFLAGS) $(AM_CPPFLAGS) $(AM_CFLAGS)
......@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <libfipc.h>
#include "rpc.h"
#include "../../../fast-ipc-module/src/platform/kernel/tests/test_helpers.h"
#include "../test_helpers.h"
#include <thc_ipc.h>
/*
......
......@@ -11,8 +11,8 @@
#include <thc_ipc.h>
#include <thc.h>
#include <thcinternal.h>
#include <awe-mapper.h>
#include "../../../fast-ipc-module/src/platform/kernel/tests/test_helpers.h"
#include <awe_mapper.h>
#include "../test_helpers.h"
static inline int send_and_get_response(
struct fipc_ring_channel *chan,
......@@ -144,7 +144,6 @@ int caller(void *_caller_channel_header)
{
struct fipc_ring_channel *chan = _caller_channel_header;
unsigned long transaction_id = 0;
unsigned long start, end;
int ret = 0;
volatile void ** frame = (volatile void**)__builtin_frame_address(0);
volatile void *ret_addr = *(frame + 1);
......
......@@ -8,7 +8,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include "../../../fast-ipc-module/src/platform/kernel/tests/test_helpers.h"
#include "../test_helpers.h"
#include "rpc.h"
#define CALLER_CPU 1
......
/*
* test_helpers.h
*
* Some common utilities for setting up threads, pinning them
* to cores, setting up message buffers, etc.
*
* Copyright: University of Utah
*/
#ifndef FIPC_KERNEL_TEST_HELPERS_H
#define FIPC_KERNEL_TEST_HELPERS_H
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
#include <libfipc.h>
static inline
struct task_struct *
test_fipc_spawn_thread_with_channel(struct fipc_ring_channel *channel,
int (*threadfn)(void *data),
int cpu_pin)
{
struct cpumask cpu_core;
struct task_struct* thread = NULL;
if (cpu_pin > num_online_cpus()) {
pr_err("Trying to pin on cpu > than avail # cpus\n");
goto fail1;
}
/*
* Create kernel thread
*/
thread = kthread_create(threadfn, channel, "AsyncIPC.%d", cpu_pin);
if (IS_ERR(thread)) {
pr_err("Error while creating kernel thread\n");
goto fail2;
}
/*
* Bump reference count, so even if thread dies before we have
* a chance to wait on it, we won't crash
*/
get_task_struct(thread);
/*
* Assign thread to cpu_pin
*/
cpumask_clear(&cpu_core);
cpumask_set_cpu(cpu_pin, &cpu_core);
set_cpus_allowed_ptr(thread, &cpu_core);
return thread;
fail2:
fail1:
return thread;
}
static inline
void
test_fipc_release_thread(struct task_struct *thread)
{
put_task_struct(thread);
}
static inline
int
test_fipc_wait_for_thread(struct task_struct *thread)
{
int ret;
ret = kthread_stop(thread);
test_fipc_release_thread(thread);
return ret;
}
static inline
int
test_fipc_create_channel(unsigned int buf_nr_pages_order, /* in pages */
struct fipc_ring_channel **header_1,
struct fipc_ring_channel **header_2)
{
int ret;
void *buf1, *buf2;
struct fipc_ring_channel *h1, *h2;
unsigned int buf_order = buf_nr_pages_order + PAGE_SHIFT;
/*
* Allocate buffer pages
*/
buf1 = (void *)__get_free_pages(GFP_KERNEL, buf_nr_pages_order);
if (!buf1) {
ret = -ENOMEM;
goto fail1;
}
buf2 = (void *)__get_free_pages(GFP_KERNEL, buf_nr_pages_order);
if (!buf2) {
ret = -ENOMEM;
goto fail2;
}
/*
* Initialize them
*/
ret = fipc_prep_buffers(buf_order, buf1, buf2);
if (ret)
goto fail3;
/*
* Allocate and initialize headers
*/
h1 = kmalloc(sizeof(*h1), GFP_KERNEL);
if (!h1) {
ret = -ENOMEM;
goto fail4;
}
h2 = kmalloc(sizeof(*h2), GFP_KERNEL);
if (!h2) {
ret = -ENOMEM;
goto fail5;
}
ret = fipc_ring_channel_init(h1, buf_order, buf1, buf2);
if (ret)
goto fail6;
ret = fipc_ring_channel_init(h2, buf_order, buf2, buf1);
if (ret)
goto fail7;
*header_1 = h1;
*header_2 = h2;
return 0;
fail7:
fail6:
kfree(h2);
fail5:
kfree(h1);
fail4:
fail3:
free_pages((unsigned long)buf1, buf_nr_pages_order);
fail2:
free_pages((unsigned long)buf1, buf_nr_pages_order);
fail1:
return ret;
}
static inline
void
test_fipc_free_channel(unsigned int buf_nr_pages_order, /* in pages */
struct fipc_ring_channel *header_1,
struct fipc_ring_channel *header_2)
{
/*
* Free buffers
*/
free_pages((unsigned long)header_1->tx.buffer, buf_nr_pages_order);
free_pages((unsigned long)header_1->rx.buffer, buf_nr_pages_order);
/*
* Free headers
*/
kfree(header_1);
kfree(header_2);
}
static inline
int
test_fipc_blocking_recv_start(struct fipc_ring_channel *chnl,
struct fipc_message **out)
{
int ret;
for (;;) {
/* Poll until we get a message or error */
ret = fipc_recv_msg_start(chnl, out);
if (!ret || ret != -EWOULDBLOCK)
return ret;
cpu_relax();
}
}
static inline
int
test_fipc_blocking_send_start(struct fipc_ring_channel *chnl,
struct fipc_message **out)
{
int ret;
for (;;) {
/* Poll until we get a free slot or error */
ret = fipc_send_msg_start(chnl, out);
if (!ret || ret != -EWOULDBLOCK)
return ret;
cpu_relax();
}
}
static inline unsigned long test_fipc_start_stopwatch(void)
{
unsigned long stamp;
/*
* Assumes x86
*
* rdtsc returns current cycle counter on cpu;
* low 32 bits in %rax, high 32 bits in %rdx.
*
* Note: We use rdtsc to start the stopwatch because it won't
* wait for prior instructions to complete (that we don't care
* about). It is not exact - meaning that instructions after
* it in program order may start executing before the read
* is completed (so we may slightly underestimate the time to
* execute the intervening instructions). But also note that
* the two subsequent move instructions are also counted against
* us (overestimate).
*/
asm volatile(
"rdtsc\n\t"
"shl $32, %%rdx\n\t"
"or %%rdx, %%rax\n\t"
: "=a" (stamp)
:
: "rdx");
return stamp;
}
static inline unsigned long test_fipc_stop_stopwatch(void)
{
unsigned long stamp;
/*
* Assumes x86
*
* Unlike start_stopwatch, we want to wait until all prior
* instructions are done, so we use rdtscp. (We don't care
* about the tsc aux value.)
*/
asm volatile(
"rdtscp\n\t"
"shl $32, %%rdx\n\t"
"or %%rdx, %%rax\n\t"
: "=a" (stamp)
:
: "rdx", "rcx");
return stamp;
}
#endif /* FIPC_KERNEL_TEST_HELPERS_H */
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment