Commit 45c40870 authored by Charlie Jacobsen's avatar Charlie Jacobsen
Browse files

lcd: First draft of libfipc interface.

parent 893457a8
/*
* libfipc.h
*
* Fast, asynchronous IPC library using shared
* memory.
*
* The main object here is a duplex, ring channel, used by two
* threads, or processes, etc., shown below:
*
* | |
* | Shared Memory |
* Thread 1 | | Thread 2
* -------- | | --------
* | |
* header | | header
* +----+ | +--------------------------+ | +-----+
* | rx ---------------->| memory buffer 1 | | | |
* | | | +---------------------------+ |<--------- tx |
* | tx ------------->| memory buffer 2 |-+ | | |
* | | | | |<----------- rx |
* +----+ | +---------------------------+ | +-----+
* | |
* | |
*
* There are a couple things to note here.
*
* 1 - The ring channel consists of two buffers in shared memory, and
* two distinct headers on each side. These headers *need not be* in
* a region of memory shared by the two threads.
*
* 2 - Memory buffer 1 is used by Thread 2 to send messages to Thread 1
* (Thread 2's tx points to memory buffer 1, and Thread 1's rx points
* to memory buffer 1.) Similarly, for memory buffer 2. So, data
* can flow both directions.
*
* Each header is a struct fipc_ring_channel. struct fipc_ring_channel
* consists of two struct fipc_ring_buf's -- one for each direction (tx/rx).
*
* The memory buffers are treated as circular buffers, whose slots are
* struct fipc_message's.
*
* memory buffer layout
* +--------------+--------------+--------------+--------------+---- ...
* | struct | struct | struct | struct |
* | fipc_message | fipc_message | fipc_message | fipc_message |
* +--------------+--------------+--------------+--------------+---- ...
*
* Ring Channel Initialization
* ---------------------------
*
* There are few steps:
*
* 1 - Allocate the shared memory buffers
*
* 2 - Allocate the headers (struct fipc_ring_channel's). These
* can be statically allocated (e.g. global variables).
*
* 3 - Initialize the headers
*
* In a typical scenario, Thread 1 will allocate both memory buffers and
* share them with Thread 2 (how this is done depends on the environment).
* Thread 1 and Thread 2 will allocate their private headers, and initialize
* them to point to the allocated memory buffers. Here is how this looks
* for Thread 1 using the libfipc interface:
*
* --------
* Thread 1
* --------
*
* struct fipc_ring_channel t1_chnl_header;
*
* // Allocate shared memory buffers
* unsigned int buf_nr_pages_order = .. buffers are 2^buf_nr_pages_order ..
* char *buffer_1 = ... alloc memory buffer 1 ...
* char *buffer_2 = ... alloc memory buffer 1 ...
*
* .... share buffers with Thread 2 ...
*
* // Initialize my struct fipc_ring_channel
* fipc_ring_channel_init(&t1_chnl_header, buf_nr_pages_order,
* buffer_1,
* buffer_2);
*
* Send/Receive
* ------------
*
* A typical send/receive sequence is as follows, assuming the headers
* have been initialized already:
*
* --------
* Thread 1
* --------
*
* struct fipc_message *msg;
* int ret;
*
* do {
*
* // Allocate a slot to put message in
* ret = fipc_send_msg_start(t1_chnl_header, &msg);
*
* } while (ret == -EWOULDBLOCK);
*
* msg->regs[0] = 1;
* msg->regs[1] = 2;
* ...
*
* // Mark message as sent (receiver will see status change)
* fipc_send_msg_end(t1_chnl_header, msg);
*
* --------
* Thread 2
* --------
*
* struct fipc_message *msg;
* int ret;
*
* do {
*
* // Wait for message to receive
* ret = fipc_recv_msg_start(t2_chnl_header, &msg);
*
* } while (ret == -EWOULDBLOCK);
*
* ... do something with msg ...
*
* // Mark message as received (sender will see slot as available)
* fipc_recv_msg_end(t2_chnl_header, msg);
*
* Ring Channel Tear down
* ----------------------
*
* When you're done using the ring channel (you should probably wait until
* both threads are done -- using some other mechanism), there is no tear
* down required. (You are the one responsible for allocating the headers
* and shared memory buffers, so you need to tear them down.)
*
*
* Authors: Anton Burtsev Scotty Bauer
* Date: October 2011 Feburary 2015
*
* Copyright: University of Utah
*/
#ifndef LIBFIPC_H
#define LIBFIPC_H
#include <libfipc_types.h>
/**
* fipc_init -- Initialize libfipc.
*
* This should be invoked before any use of libfipc functions.
*
* Note: This is a no-op for now, but gives us a chance later to have
* some init code if necessary (internal caches, whatever).
*/
int fipc_init(void);
/**
* fipc_fini -- Tear down libfipc.
*
* This should be invoked when finished using libfipc functions.
*/
void fipc_fini(void);
/**
* fipc_ring_channel_init -- Initialize ring channel header with buffers
* @chnl: the struct fipc_ring_channel to initialize
* @buf_order: buffers are 2^buf_order bytes
* @buffer_tx: buffer to use for tx (send) direction
* @buffer_rx: buffer to use for rx (receive) direction
*
* This function must be called before trying to do a send or receive
* on the channel.
*
* The buffers are required to be at least sizeof(struct fipc_message)
* bytes (at least one message should fit). (Note that because they are
* a power of 2 number of bytes, they will automatically be a multiple
* of sizeof(struct fipc_message).)
*/
int fipc_ring_channel_init(struct fipc_ring_channel *chnl,
unsigned int buf_order,
void *buffer_tx, void *buffer_rx);
/**
* fipc_send_msg_start -- Allocate a slot from tx buffer for sending
* @chnl: the ring channel, whose tx we should allocate from
* @msg: out param, the allocated slot
*
* If there are no free slots, returns -EWOULDBLOCK.
*
* IMPORTANT: If the sender fails to invoke fipc_send_msg_end, this could
* introduce some delay and re-ordering of messages. (The slot will not
* be marked as ready to receive, for the receiver to pick up.) So, make
* sure the code in between start and end cannot fail.
*
* This function is thread safe -- if you are returned a message slot,
* you know that no other thread got a hold of it.
*/
int fipc_send_msg_start(struct fipc_ring_channel *chnl,
struct fipc_message **msg);
/**
* fipc_send_msg_end -- Mark a message as ready for receipt from receiver
* @chnl: the ring channel containing @msg in tx
* @msg: the message we are sending
*
* Returns non-zero on failure. (For now, this never fails, but in case
* failure is possible in the future, we provide for this possibility.)
*/
int fipc_send_msg_end(struct fipc_ring_channel *chnl,
struct fipc_message *msg);
/**
* fipc_recv_msg_start -- Receive the next message from rx, if available
* @chnl: the ring channel, whose rx we should receive from
* @msg: out param, the received message
*
* Messages are received in increasing slot order (wrapping around when
* we reach the end of the memory buffer). Internally, we maintain a
* cursor to the next slot where we expect a message, and wait until the
* sender puts one there. When the sender puts a message there, and a
* thread on the receiving side receives the message (by invoking this
* function), we increment the cursor by 1.
*
* XXX: This implies that if the sender screws up and doesn't send messages
* in increasing slot order, the receiver will be stuck waiting. (This
* can happen if a thread on the sending side allocates a slot to send
* a message in, but doesn't mark the message as ready to be
* received -- i.e., failing to call fipc_send_msg_end.)
*
* If there are no messages to be received, returns -EWOULDBLOCK. (More
* precisely, if the current slot under the cursor does not contain a
* ready message, returns -EWOULDBLOCK.)
*
* IMPORTANT: If the caller fails to invoke fipc_recv_msg_end, the sender
* will potentially block waiting for the slot to become free. So, make sure
* your code cannot fail between start/end.
*
* This function is thread safe -- if you are returned a message,
* you know that no other thread got a hold of it.
*/
int fipc_recv_msg_start(struct fipc_ring_channel *chnl,
struct fipc_message **msg);
/**
* fipc_recv_msg_if -- Like fipc_recv_msg_start, but conditioned on a predicate
* @chnl: the ring channel, whose rx we should receive from
* @pred: the condition under which we should receive a message
* @data: context data to pass to @pred
* @msg: out param, the received message
*
* This is like fipc_recv_msg_start, but if there is a message to be
* received, libfipc will allow @pred to peek at the message to see if the
* caller wants to receive it (by looking at values in the message).
* libfipc will pass along @data to @pred, providing context.
*
* @pred should return non-zero to indicate the caller should receive the
* message, and zero if no.
*
* IMPORTANT: @pred should be simple, as it is executed inside of a
* critical section.
*/
int fipc_recv_msg_if(struct fipc_ring_channel *chnl,
int (*pred)(struct fipc_message *, void *),
void *data,
struct fipc_message **msg);
/**
* fipc_recv_msg_end -- Mark a message as received, so sender can re-use slot
* @chnl: the ring channel containing @msg in rx
* @msg: the message to mark as received
*
* Returns non-zero on failure. (For now, this never fails, but in case
* failure is possible in the future, we provide for this possibility.)
*/
int fipc_recv_msg_end(struct fipc_ring_channel *chnl,
struct fipc_message *msg);
#endif /* LIBFIPC_H */
/*
* libfipc_types.h
*
* Struct definitions, etc. for libfipc library.
*
* Copyright: University of Utah
*/
#ifndef LIBFIPC_TYPES_H
#define LIBFIPC_TYPES_H
/**
* struct fipc_message
*
* This is the data in each slot in an IPC ring buffer. It should fit
* into one cache line. All fields are available for use, except
* msg_status - this is reserved and is used internally by libfipc
* to track the status of individual message slots in the IPC ring buffer.
*
* XXX: This probably needs to be arch-specific in order to fit in a
* cache line.
*
* XXX: The size of the message must be a power of 2.
*/
#define FIPC_NR_REGS 7
struct fipc_message {
/**
* Reserved. Used internally to track message status.
*/
volatile uint32_t msg_status;
/**
* Not touched by libfipc.
*/
uint32_t flags;
/**
* Not touched by libfipc.
*/
unsigned long regs[FIPC_NR_REGS];
};
/**
* struct fipc_ring_buf
*
* This is the header (metadata) for an IPC ring buffer (circular
* buffer). Each IPC ring buffer has a producer and consumer; each
* will maintain its own struct fipc_ring_buf header. (Producers and consumers
* communicate "where they are" in the IPC buffer using a special message
* status field in the message slots in the buffer; see struct fipc_message
* above.)
*
* XXX: This definition may need to be arch-specific in general if we want
* it to be cacheline-aligned.
*/
struct fipc_ring_buf {
/**
* Where *I* am in the IPC buffer. (The other guy knows where I am
* by looking at message statuses.)
*/
unsigned long slot;
/**
* IPC ring buffer is 2^nr_pages_order pages
*/
unsigned long nr_pages_order;
/**
* This is pre-computed so that we can quickly calculate the
* message slot index for slot allocations. It is given by:
*
* [2^nr_pages_order * PAGE_SIZE / sizeof(struct fipc_message)] - 1
*
* Notice that because struct fipc_message and PAGE_SIZE are powers
* of 2, this mask will be 2^x - 1 for some x.
*/
unsigned long order_two_mask;
/**
* Pointer to the actual IPC buffer
*/
char *recs;
/**
* Pad the struct up to cacheline size
*/
uint8_t padding[32];
};
/**
* struct fipc_ring_channel
*
* A full duplex IPC channel, made up of two, one-way IPC ring buffers,
* @tx and @rx.
*/
struct fipc_ring_channel {
/**
* Pointer to our sending ring buffer.
*/
struct fipc_ring_buf tx;
/**
* Pointer to our receiving ring buffer.
*/
struct fipc_ring_buf rx;
};
#endif /* LIBFIPC_TYPES_H */
......@@ -20,54 +20,54 @@
#include <linux/slab.h>
#include "ipc.h"
struct ttd_buf {
/* PRODUCER */
unsigned long slot;
unsigned long size_of_a_rec; /* size of a single record */
unsigned long order_two_mask;
unsigned long size_in_pages;
char *recs; /* pointer to buffer data areas */
uint8_t padding[16]; /* pad the struct up to cache line size */
};
/* struct ttd_buf { */
/* /\* PRODUCER *\/ */
/* unsigned long slot; */
/* unsigned long size_of_a_rec; /\* size of a single record *\/ */
/* unsigned long order_two_mask; */
/* unsigned long size_in_pages; */
/* char *recs; /\* pointer to buffer data areas *\/ */
/* uint8_t padding[16]; /\* pad the struct up to cache line size *\/ */
/* }; */
//from ipc.h
struct ipc_message;
struct ttd_ring_channel {
struct ttd_buf tx;
struct ttd_buf rx;
/* TODO NECESSARY? */
int (*dispatch_fn)(struct ttd_ring_channel*, struct ipc_message*);
uint8_t padding[56]; /* pad the struct to cacheline size */
};
struct ttd_ring_channel_group
{
struct ttd_ring_channel **chans;
size_t chans_length;
struct task_struct *thread;
};
static inline void channel_group_alloc(struct ttd_ring_channel_group* channel_group, size_t chans_length)
{
struct ttd_ring_channel **chans_arr = (struct ttd_ring_channel **)kzalloc(
sizeof(struct ttd_ring_channel*)*chans_length,
GFP_KERNEL);
if( !chans_arr )
{
pr_err("could not allocate memory for ring channel group\n");
return;
}
channel_group->chans = chans_arr;
channel_group->chans_length = chans_length;
}
static inline void channel_group_free(struct ttd_ring_channel_group* channel_group)
{
kfree(channel_group->chans);
}
/* struct ipc_message; */
/* struct ttd_ring_channel { */
/* struct ttd_buf tx; */
/* struct ttd_buf rx; */
/* /\* TODO NECESSARY? *\/ */
/* int (*dispatch_fn)(struct ttd_ring_channel*, struct ipc_message*); */
/* uint8_t padding[56]; /\* pad the struct to cacheline size *\/ */
/* }; */
/* struct ttd_ring_channel_group */
/* { */
/* struct ttd_ring_channel **chans; */
/* size_t chans_length; */
/* struct task_struct *thread; */
/* }; */
/* static inline void channel_group_alloc(struct ttd_ring_channel_group* channel_group, size_t chans_length) */
/* { */
/* struct ttd_ring_channel **chans_arr = (struct ttd_ring_channel **)kzalloc( */
/* sizeof(struct ttd_ring_channel*)*chans_length, */
/* GFP_KERNEL); */
/* if( !chans_arr ) */
/* { */
/* pr_err("could not allocate memory for ring channel group\n"); */
/* return; */
/* } */
/* channel_group->chans = chans_arr; */
/* channel_group->chans_length = chans_length; */
/* } */
/* static inline void channel_group_free(struct ttd_ring_channel_group* channel_group) */
/* { */
/* kfree(channel_group->chans); */
/* } */
static inline void ttd_ring_channel_init(struct ttd_ring_channel *ring_channel)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment