Commit 6404e62e authored by Anton Burtsev's avatar Anton Burtsev Committed by Vikram Narayanan

Re-implemented ipc send and receive functions

parent 0b4401a9
......@@ -13,15 +13,15 @@
#include <linux/delay.h>
#include <linux/kfifo.h>
#include <asm/page.h>
#include <uapi/linux/lcd-cap.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("FLUX-LAB University of Utah");
MODULE_AUTHOR ("Flux Research Group, University of Utah");
/* XXX: some temporary crap from Jithu to test IPC */
void * get_cap_obj(u32 cap_id);
typedef uint32_t lcd_cnode; // a pointer to the cnode
typedef uint64_t capability_t ; // a locally unique identifier (address within cspace)
typedef uint32_t lcd_cnode_entry; // a pointer to an entry within a cnode
typedef uint64_t lcd_tcb; // a pointer/handle to the thread contrl block
typedef uint16_t lcd_cap_rights; // holds the rights associated with a capability.
......
#ifndef HOST_IPC_H
#define HOST_IPC_H
#include <uapi/linux/lcd-ipc.h>
#include <lcd/cap.h>
#include "ipc_common_defs.h"
enum ipc_state {
IPC_DONT_CARE = 0,
IPC_RCV_WAIT = 1,
IPC_SND_WAIT = 2,
IPC_RUNNING = 3,
};
struct ipc_wait_list_elem {
u32 peer;
struct list_head list;
struct task_struct *task;
};
struct sync_ipc {
// either we put an explicit capid here
// so that given the capid we can fetch
// the peers sync_ipc or lcd_struct
u32 state;
u32 my_capid;
//u32 dir;
u32 expected_sender;
// void *waiting_on; -> this might not be reqd as we are modelling spl states
//struct lcd_struct *lcd_mine;
//struct lcd_struct *lcd_partner;
// some waitq
spinlock_t snd_lock;
u32 snd_sleepers;
struct list_head snd_q;
struct task_struct *task;
//spinlock_t rcv_lock;
// struct list_head rcv_q;
struct list_head senders;
struct list_head receivers;
spinlock_t lock;
};
struct lcd_message_info {
unsigned char regs;
unsigned char cap_regs;
/* Per thread kernel stack unified on a single page. */
union utcb_union {
struct utcb utcb;
char kstack[PAGE_SIZE];
};
//headers used by host for ipc
//int ipc_send(u32 myself, u32 recv_capid);
int ipc_send(capability_t cap, struct lcd_message_info *msg);
int ipc_recv(u32 myself, u32 send_capid);
void display_mr(utcb_t *p_utcb);
int ipc_send(capability_t cap, struct message_info *msg);
int ipc_recv(capability_t rvp_cap, struct message_info *msg);
#endif
#ifndef LCD_IPC_GUEST_DEFS_H
#define LCD_IPC_GUEST_DEFS_H
typedef struct {
u32 mr[6]; /* MRs that are mapped to real registers */
u32 saved_tag; /* Saved tag field for stacked ipcs */
u32 saved_sender; /* Saved sender field for stacked ipcs */
u32 notify[8]; /* Irq notification slots */
u32 mr_rest[2]; /* Complete the utcb for up to 64 words */
} utcb_t;
typedef struct {
uint mr0;
uint mr1;
uint mr2;
uint mr3;
uint mr4;
uint mr5;
} msg_regs_t;
//#define IPC_SEND_SND 1
//#define IPC_SEND_RCV 2
//#define IPC_SEND_SND_RCV 3
#define IPC_PEER_ANY 0xffffffff
#define LCD_IPC_DIR(x) (x >> 32)
#define LCD_IPC_PEER(x) (x & 0xffffffff)
enum IPC_DIR {
IPC_INVALID = 0,
IPC_SEND = 1,
IPC_RECV = 2,
IPC_SENDRECV = 3,
};
/* Per thread kernel stack unified on a single page. */
union utcb_union {
utcb_t utcb;
char kstack[PAGE_SIZE];
};
#endif
......@@ -76,7 +76,6 @@ struct lcd {
struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
} msr_autoload;
struct sync_ipc sync_ipc;
struct vmcs *vmcs;
void *shared;
......
......@@ -15,6 +15,14 @@
#include <net/net_namespace.h>
#include <linux/sched/rt.h>
#ifdef CONFIG_LCD
#define INIT_LCD(tsk) \
.sync_rendezvous = LIST_HEAD_INIT(tsk.sync_rendezvous), \
.utcb = NULL,
#else
#define INIT_LCD(tsk)
#endif
#ifdef CONFIG_SMP
# define INIT_PUSHABLE_TASKS(tsk) \
.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
......
......@@ -62,6 +62,7 @@ struct sched_param {
#ifdef CONFIG_LCD
#include <lcd/cap.h>
#include <lcd/ipc.h>
#endif
#include <asm/processor.h>
......@@ -1469,6 +1470,8 @@ struct task_struct {
unsigned int ptrace;
#ifdef CONFIG_LCD
struct cap_space cspace;
struct list_head sync_rendezvous;
struct utcb *utcb;
#endif
#ifdef CONFIG_SMP
struct llist_node wake_entry;
......
/*
* lcd.h - interface to LCD domains
*
* Author: Anton Burtsev
* Copyright: University of Utah
*/
#ifndef __LINUX_PUBLIC_LCD_CAP_H__
#define __LINUX_PUBLIC_LCD_CAP_H__
typedef uint64_t capability_t;
#endif
/*
* lcd.h - interface to LCD domains
*
* Author: Anton Burtsev
* Copyright: University of Utah
*/
#ifndef __LINUX_PUBLIC_LCD_IPC_H__
#define __LINUX_PUBLIC_LCD_IPC_H__
#include <linux/lcd-cap.h>
struct message_info {
u64 regs[8];
capability_t cap_regs[8];
u8 valid_regs;
u8 valid_cap_regs;
};
struct utcb {
struct message_info msg_info;
};
#endif
......@@ -82,7 +82,7 @@ bool lcd_cap_delete_internal(struct cte *cap, bool *last_reference)
struct cte *node;
bool done = false;
BUG_ON(cap != NULL);
BUG_ON(cap == NULL);
if(last_reference == NULL)
return false;
......@@ -116,7 +116,7 @@ uint32_t lcd_cap_delete_capability(struct cap_space *cspace, capability_t cid)
bool last_reference = false;
struct semaphore *sem_cdt_backup;
BUG_ON(cid);
BUG_ON(cid == 0);
cap = lcd_cap_lookup_capability(cspace, cid, true);
if (cap == NULL)
......@@ -145,7 +145,7 @@ uint32_t lcd_cap_revoke_capability(struct cap_space *cspace, capability_t cid)
int size = sizeof(struct cte);
bool dummy = false, last_reference = false;
BUG_ON(cid);
BUG_ON(cid == 0);
if (kfifo_alloc(&cap_q, sizeof(struct cte) * 512, GFP_KERNEL) != 0)
{
......@@ -156,7 +156,7 @@ uint32_t lcd_cap_revoke_capability(struct cap_space *cspace, capability_t cid)
cap = lcd_cap_lookup_capability(cspace, cid, true);
if (cap == NULL)
{
LCD_PANIC("lcd_cap_delete_capability: Capability not found\n");
LCD_PANIC("lcd_cap_revoke_capability: Capability not found\n");
return -1;
}
cdt = cap->cap.cdt_node;
......@@ -200,7 +200,7 @@ void lcd_cap_destroy_cspace(struct cap_space *cspace)
struct semaphore *sem_cdt_backup;
bool cspace_locked = false, table_visited = false, last_reference = false;
BUG_ON(cspace);
BUG_ON(cspace == NULL);
if (kfifo_alloc(&cnode_q, sizeof(struct cte) * 512, GFP_KERNEL) != 0)
{
......@@ -312,7 +312,7 @@ capability_t lcd_cap_grant_capability(struct cap_space *src_space, capability_t
bool done = false;
struct cap_derivation_tree *dst_cdt_node;
BUG_ON(src_space != NULL && dst_space != NULL && src_cid != 0);
BUG_ON(src_space == NULL || dst_space == NULL || src_cid == 0);
dst_cdt_node = kmalloc(sizeof(struct cap_derivation_tree), GFP_KERNEL);
if(!dst_cdt_node) {
......@@ -405,7 +405,7 @@ uint32_t lcd_cap_get_rights(struct cap_space *cspace, capability_t cid, lcd_cap_
{
struct cte *cap;
BUG_ON(cspace && cid != 0 && rights);
BUG_ON(cspace == NULL || cid == 0 || rights == NULL);
cap = lcd_cap_lookup_capability(cspace, cid, false);
if (cap == NULL || cap->ctetype != lcd_type_capability)
......@@ -427,12 +427,12 @@ struct cte * lcd_cap_lookup_capability(struct cap_space *cspace, capability_t ci
int index = 0;
int mask = (~0);
BUG_ON(cspace && cid != 0);
BUG_ON(cspace == NULL || cid == 0);
mask = mask << (CNODE_INDEX_BITS);
mask = ~mask;
BUG_ON(cspace->root_cnode.cnode.table);
BUG_ON(cspace->root_cnode.cnode.table == NULL);
node = cspace->root_cnode.cnode.table;
......@@ -479,7 +479,7 @@ capability_t lcd_cap_create_capability(struct cap_space *cspace, void * hobject,
capability_t cid;
struct cap_derivation_tree *cdtnode;
BUG_ON(cspace && cspace->root_cnode.cnode.table);
BUG_ON(cspace == NULL || cspace->root_cnode.cnode.table == NULL);
cdtnode = kmalloc(sizeof(struct cap_derivation_tree), GFP_KERNEL);
if (cdtnode == NULL)
......@@ -641,7 +641,7 @@ bool lcd_cap_initialize_freelist(struct cap_space *cspace, struct cte *cnode, bo
int startid = 1;
int i;
BUG_ON(cnode && cspace);
BUG_ON(cnode == NULL || cspace == NULL);
if (bFirstCNode)
{
......@@ -681,7 +681,7 @@ bool lcd_cap_initialize_freelist(struct cap_space *cspace, struct cte *cnode, bo
struct cte * lcd_cap_reserve_slot(struct cte *cnode, capability_t *cid, int free_slot)
{
struct cte *node = cnode->cnode.table;
BUG_ON(node[free_slot].ctetype == lcd_type_free);
BUG_ON(node[free_slot].ctetype != lcd_type_free);
// a valid empty slot
node[0].slot.next_free_cap_slot = node[free_slot].slot.next_free_cap_slot;
lcd_set_bits_at_level(cnode, cid, free_slot);
......@@ -702,7 +702,7 @@ capability_t lcd_cap_lookup_freeslot(struct cap_space *cspace, struct cte **cap)
int size = sizeof(struct cte);
struct kfifo cnode_q;
BUG_ON(cspace && cspace->root_cnode.cnode.table && cap);
BUG_ON(cspace == NULL || cspace->root_cnode.cnode.table == NULL || cap == NULL);
if (kfifo_alloc(&cnode_q, sizeof(struct cte) * 512, GFP_KERNEL) != 0)
{
......@@ -798,7 +798,7 @@ capability_t lcd_cap_mint_capability(struct cap_space *cspace, capability_t cid,
struct cap_derivation_tree *dst_cdt;
bool done = false;
BUG_ON(cid);
BUG_ON(cid == 0);
dst_cdt = kmalloc(sizeof(struct cap_derivation_tree), GFP_KERNEL);
if (dst_cdt == NULL)
......
......@@ -7,130 +7,127 @@
#include <lcd/cap.h>
#include <lcd/lcd.h>
//#include "lcd_defs.h"
int ipc_send(capability_t rvp_cap, struct message_info *msg)
{
struct task_struct *recv_task;
struct sync_ipc *sync_ipc;
struct cte *rvp_cte;
unsigned long flags;
void display_mr(utcb_t *p_utcb) {
printk(KERN_ERR "Message Regs at utcb %p - %d ,%d , %d\n", p_utcb, p_utcb->mr[0], p_utcb->mr[1], p_utcb->mr[3]);
}
printk(KERN_ERR "ipc_send:%s: sending on cap %lld\n", current->comm, rvp_cap);
rvp_cte = lcd_cap_lookup_capability(&current->cspace, rvp_cap, true);
if (rvp_cte == NULL) {
printk(KERN_ERR "ipc_send: can't resolve rendezvous capabilty: %lld\n", rvp_cap);
return -EINVAL;
}
//int ipc_send(capability_t recv, struct lcd_message *msg) {
//
//};
sync_ipc = (struct sync_ipc *) rvp_cte->cap.hobject;
BUG_ON(!sync_ipc);
// XXX: BU: Maybe I need to do some reference counting for IPC
// objects here (before releasing the lock)
up(rvp_cte->cap.cdt_node->sem_cdt);
spin_lock_irqsave(&sync_ipc->lock, flags);
if (list_empty(&sync_ipc->receivers)) {
int ipc_send(capability_t cap, struct lcd_message_info *msg)
{
struct lcd *recv_lcd, *snd_lcd;
struct ipc_wait_list_elem stack_elem;
#if 0
printk(KERN_ERR "ipc_send : myself %p re %d\n", current->utcb, cap);
//chk if the reciever is ready
// fetch the reciever task struct from if
recv_lcd = (struct lcd *) get_cap_obj(recv);
if (recv_lcd == NULL) {
printk(KERN_ERR "ipc_send : Cant get object for reciever %d\n", recv_capid);
return -1;
}
set_current_state(TASK_INTERRUPTIBLE);
list_add_tail(&current->sync_rendezvous, &sync_ipc->senders);
printk(KERN_ERR "ipc_send:%s: putting myself to sleep\n", current->comm);
spin_unlock_irqrestore(&sync_ipc->lock, flags);
schedule();
printk(KERN_ERR "ipc_send: somone woke me up\n");
return 0;
snd_lcd = (struct lcd *) get_cap_obj(myself);
if (snd_lcd == NULL) {
printk(KERN_ERR "ipc_send : Cant get object for myself %d\n", myself);
return -1;
}
if (recv_lcd->sync_ipc.state == IPC_RCV_WAIT && \
recv_lcd->sync_ipc.expected_sender == myself) {
printk(KERN_ERR "ipc_send : partner %d expecting me\n", recv_capid);
//copy the message registers
memcpy(recv_lcd->shared, snd_lcd->shared, sizeof(utcb_t));
//awaken the thread
wake_up_process(recv_lcd->sync_ipc.task);
//looks like there is no need for a reciever queue
//as if a process invokes a recv and finds no
//corresponding senders , then it puts itself to sleep
recv_lcd->sync_ipc.state = IPC_DONT_CARE;
recv_lcd->sync_ipc.expected_sender = 0;
//No case of
} else {
// put him in the Q
recv_lcd->sync_ipc.snd_sleepers++;
set_current_state(TASK_INTERRUPTIBLE);
stack_elem.peer = myself;
stack_elem.task = current;
// recv_lcd->sync_ipc.status = IPC_SND_WAIT;
list_add_tail(&stack_elem.list, &recv_lcd->sync_ipc.snd_q);
printk(KERN_ERR "ipc_send : putting myself to sleep %p\n", current);
recv_task = list_first_entry(&sync_ipc->receivers,
struct task_struct,
sync_rendezvous);
schedule();
list_del(&recv_task->sync_rendezvous);
spin_unlock_irqrestore(&sync_ipc->lock, flags);
printk(KERN_ERR "ipc_send: found other end %s\n", recv_task->comm);
// copy the message registers
// XXX: BU: maybe MIN(of valid_regs)?
memcpy(recv_task->utcb->msg_info.regs,
current->utcb->msg_info.regs,
sizeof(uint64_t)*recv_task->utcb->msg_info.valid_regs);
}
#endif
printk(KERN_ERR "ipc_send : Finished\n");
// BU: TODO: transfer capabilities
wake_up_process(recv_task);
printk(KERN_ERR "ipc_send: finished\n");
return 0;
}
EXPORT_SYMBOL(ipc_send);
int ipc_recv(u32 myself, u32 send_capid)
int ipc_recv(capability_t rvp_cap, struct message_info *msg)
{
struct lcd *recv_lcd, *snd_lcd;
struct list_head *ptr;
struct ipc_wait_list_elem *entry;
struct task_struct *send_task;
struct sync_ipc *sync_ipc;
struct cte *rvp_cte;
unsigned long flags;
printk(KERN_ERR "ipc_recv : myself %d sender %d\n", myself, send_capid);
recv_lcd = (struct lcd *) get_cap_obj(myself);
if (recv_lcd == NULL) {
printk(KERN_ERR "ipc_recv : Cant get object for my id %d\n", myself);
return -1;
printk(KERN_ERR "ipc_recv:%s: receiving on cap %lld\n", current->comm, rvp_cap);
rvp_cte = lcd_cap_lookup_capability(&current->cspace, rvp_cap, true);
if (rvp_cte == NULL) {
printk(KERN_ERR "ipc_recv: can't resolve capability: %lld\n", rvp_cap);
return -EINVAL;
}
snd_lcd = (struct lcd *) get_cap_obj(send_capid);
if (snd_lcd == NULL) {
printk(KERN_ERR "ipc_recv : Cant get object for peer id %d\n", send_capid);
//return -1;
}
sync_ipc = (struct sync_ipc *) rvp_cte->cap.hobject;
BUG_ON(!sync_ipc);
//check if one of the senders in the snd q is our intended
// recipient
if (recv_lcd->sync_ipc.snd_sleepers > 0) {
printk(KERN_ERR "ipc_recv : Num of senders in Q %d \n", \
recv_lcd->sync_ipc.snd_sleepers);
list_for_each(ptr, &recv_lcd->sync_ipc.snd_q) {
entry = list_entry(ptr, struct ipc_wait_list_elem, list);
if (entry->peer == send_capid) {
printk(KERN_ERR "ipc_recv : Found expected sender %d\n", send_capid);
recv_lcd->sync_ipc.snd_sleepers--;
//copy the message registers
memcpy(recv_lcd->shared, snd_lcd->shared, sizeof(utcb_t));
//remove the entry
list_del(ptr);
//wakeup
wake_up_process(entry->task);
// we dont care for state in snd_wait
//recv_lcd->sync_ipc.status = IPC_RUNNING;
printk(KERN_ERR "ipc_recv : Returning after waking up sender\n");
return 0;
}
}
// XXX: BU: Maybe I need to do some reference counting for IPC
// objects here (before releasing the lock)
up(rvp_cte->cap.cdt_node->sem_cdt);
spin_lock_irqsave(&sync_ipc->lock, flags);
if (list_empty(&sync_ipc->senders)) {
set_current_state(TASK_INTERRUPTIBLE);
list_add_tail(&current->sync_rendezvous, &sync_ipc->receivers);
printk(KERN_ERR "ipc_recv:%s: putting myself to sleep\n", current->comm);
spin_unlock_irqrestore(&sync_ipc->lock, flags);
schedule();
printk(KERN_ERR "ipc_recv: somone woke me up\n");
return 0;
}
printk(KERN_ERR "ipc_recv : Scheduling out myself\n");
// we cant proceed further
recv_lcd->sync_ipc.state = IPC_RCV_WAIT ;
recv_lcd->sync_ipc.expected_sender = send_capid;
set_current_state(TASK_INTERRUPTIBLE);
schedule();
printk(KERN_ERR "ipc_recv : Somebody woke me\n");
send_task = list_first_entry(&sync_ipc->senders,
struct task_struct,
sync_rendezvous);
list_del(&send_task->sync_rendezvous);
spin_unlock_irqrestore(&sync_ipc->lock, flags);
printk(KERN_ERR "ipc_send: other end %s\n", send_task->comm);
// copy the message registers
// XXX: BU: maybe MIN(of valid_regs)?
memcpy(current->utcb->msg_info.regs,
send_task->utcb->msg_info.regs,
sizeof(uint64_t)*send_task->utcb->msg_info.valid_regs);
// BU: TODO: transfer capabilities
wake_up_process(send_task);
printk(KERN_ERR "ipc_recv: finished\n");
return 0;
}
EXPORT_SYMBOL(ipc_recv);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment