Commit 7625fcd9 authored by Vikram Narayanan's avatar Vikram Narayanan

test_mods/nullnet: Add new APIs for single crossing

parent 6083097b
......@@ -31,11 +31,6 @@ int dispatch_async_loop(struct thc_channel *channel,
/* wait until uninit is called */
dummy_done = 1;
return -1;
case NDO_START_XMIT:
// trace(NDO_START_XMIT);
/* XXX: xmit never reaches the dispatch loop */
return ndo_start_xmit_async_bare_callee(message,
channel, cspace, sync_ep);
case NDO_VALIDATE_ADDR:
trace(NDO_VALIDATE_ADDR);
return ndo_validate_addr_callee(message,
......
......@@ -677,10 +677,7 @@ int prep_xmit_channels_lcd(void)
LIBLCD_ERR("async channel creation failed\n");
#elif NUM_LCDS == 4
if (current_lcd_id < 2)
node_id = 0;
else
node_id = 1;
node_id = current_lcd_id;
if (create_one_async_channel_on_node(node_id, &xmit, &tx[i], &rx[i]))
LIBLCD_ERR("async channel creation failed\n");
#elif NUM_LCDS == 6
......@@ -1412,7 +1409,70 @@ int ndo_start_xmit_bare_callee(struct fipc_message *_request, struct thc_channel
return 0;
}
int ndo_start_xmit_noawe_callee(struct fipc_message *_request, struct thc_channel *channel, struct glue_cspace *cspace, struct cptr sync_ep)
int ndo_start_xmit_noasync_1c_callee(struct fipc_message *_request, struct thc_channel *channel, struct glue_cspace *cspace, struct cptr sync_ep)
{
struct lcd_sk_buff_container static_skb_c;
struct lcd_sk_buff_container *skb_c = &static_skb_c;
struct sk_buff *skb = &skb_c->skbuff;
struct fipc_message *response;
int ret = NETDEV_TX_OK;
#ifdef COPY
struct skbuff_members *skb_lcd;
#endif
unsigned long skbh_offset, skb_end;
__be16 proto;
u32 len;
cptr_t skb_ref;
skb_ref = __cptr(fipc_get_reg2(_request));
skbh_offset = fipc_get_reg3(_request);
skb_end = fipc_get_reg4(_request);
proto = fipc_get_reg5(_request);
len = fipc_get_reg6(_request);
fipc_recv_msg_end(thc_channel_to_fipc(channel),
_request);
skb->head = (char*)data_pool + skbh_offset;
skb->end = skb_end;
skb->len = len;
skb->private = true;
#ifdef COPY
skb_lcd = SKB_LCD_MEMBERS(skb);
P(len);
P(data_len);
P(queue_mapping);
P(xmit_more);
P(tail);
P(truesize);
P(ip_summed);
P(csum_start);
P(network_header);
P(csum_offset);
P(transport_header);
skb->data = skb->head + skb_lcd->head_data_off;
#endif
skb_c->chnl = channel;
if (async_msg_blocking_send_start(channel, &response)) {
LIBLCD_ERR("error getting response msg");
return -EIO;
}
fipc_set_reg1(response, ret);
thc_set_msg_type(response, msg_type_response);
fipc_send_msg_end(thc_channel_to_fipc(channel), response);
//printk("%s, response sent! chnl: %p", __func__, channel);
return ret;
}
int ndo_start_xmit_noasync_callee(struct fipc_message *_request, struct thc_channel *channel, struct glue_cspace *cspace, struct cptr sync_ep)
{
struct lcd_sk_buff_container static_skb_c;
struct lcd_sk_buff_container *skb_c = &static_skb_c;
......@@ -1477,11 +1537,84 @@ int ndo_start_xmit_noawe_callee(struct fipc_message *_request, struct thc_channe
return ret;
}
#define MARSHAL
/* xmit_callee for async. This function receives the IPC and
* sends back a response
*/
int ndo_start_xmit_async_bare_callee(struct fipc_message *_request, struct thc_channel *channel, struct glue_cspace *cspace, struct cptr sync_ep)
int ndo_start_xmit_async_1c_callee(struct fipc_message *_request, struct thc_channel *channel, struct glue_cspace *cspace, struct cptr sync_ep)
{
struct fipc_message *response;
unsigned int request_cookie;
#ifdef MARSHAL
struct lcd_sk_buff_container static_skb_c;
struct lcd_sk_buff_container *skb_c = &static_skb_c;
struct sk_buff *skb = &skb_c->skbuff;
#endif
#ifdef COPY
struct skbuff_members *skb_lcd;
#endif
#ifdef MARSHAL
unsigned long skbh_offset, skb_end;
__be16 proto;
u32 len;
cptr_t skb_ref;
#endif
request_cookie = thc_get_request_cookie(_request);
#ifdef MARSHAL
skb_ref = __cptr(fipc_get_reg2(_request));
skbh_offset = fipc_get_reg3(_request);
skb_end = fipc_get_reg4(_request);
proto = fipc_get_reg5(_request);
len = fipc_get_reg6(_request);
#endif
fipc_recv_msg_end(thc_channel_to_fipc(channel),
_request);
#ifdef MARSHAL
skb->head = (char*)data_pool + skbh_offset;
skb->end = skb_end;
skb->len = len;
skb->private = true;
#endif
#ifdef COPY
skb_lcd = SKB_LCD_MEMBERS(skb);
P(len);
P(data_len);
P(queue_mapping);
P(xmit_more);
P(tail);
P(truesize);
P(ip_summed);
P(csum_start);
P(network_header);
P(csum_offset);
P(transport_header);
skb->data = skb->head + skb_lcd->head_data_off;
#endif
#ifdef MARSHAL
skb_c->chnl = channel;
skb_c->cookie = request_cookie;
#endif
if (async_msg_blocking_send_start(channel, &response)) {
LIBLCD_ERR("error getting response msg");
return -EIO;
}
return thc_ipc_reply(channel, request_cookie, response);
}
/* xmit_callee for async. This function receives the IPC and
* sends back a response
*/
int ndo_start_xmit_async_callee(struct fipc_message *_request, struct thc_channel *channel, struct glue_cspace *cspace, struct cptr sync_ep)
{
struct fipc_message *response;
unsigned int request_cookie;
......
......@@ -133,14 +133,14 @@ static void main_and_loop(void)
//printk("%s, got msg on xmit ch: %p", __func__, curr_item->channel);
if (fipc_get_reg0(msg)) {
ret = ndo_start_xmit_async_bare_callee(msg,
ret = ndo_start_xmit_async_callee(msg,
curr_item->channel,
nullnet_cspace,
nullnet_sync_endpoints[current_lcd_id]);
} else {
//printk("%s, LCD:%d got XMIT msg on chnl: %p",
// __func__, current_lcd_id, curr_item->channel);
ret = ndo_start_xmit_noawe_callee(msg,
ret = ndo_start_xmit_noasync_callee(msg,
curr_item->channel,
nullnet_cspace,
nullnet_sync_endpoints[current_lcd_id]);
......
......@@ -26,14 +26,30 @@ int ndo_start_xmit_noawe_callee(struct fipc_message *_request,
struct glue_cspace *cspace,
struct cptr sync_ep);
int ndo_start_xmit_async_bare_callee(struct fipc_message *_request,
int ndo_start_xmit_bare_callee(struct fipc_message *_request,
struct thc_channel *_channel,
struct glue_cspace *cspace,
struct cptr sync_ep);
int ndo_start_xmit_bare_callee(struct fipc_message *_request,
int ndo_start_xmit_noasync_1c_callee(struct fipc_message *_request,
struct thc_channel *_channel,
struct glue_cspace *cspace,
struct cptr sync_ep);
int ndo_start_xmit_noasync_callee(struct fipc_message *_request,
struct thc_channel *_channel,
struct glue_cspace *cspace,
struct cptr sync_ep);
int ndo_start_xmit_async_1c_callee(struct fipc_message *_request,
struct thc_channel *_channel,
struct glue_cspace *cspace,
struct cptr sync_ep);
int ndo_start_xmit_async_callee(struct fipc_message *_request,
struct thc_channel *_channel,
struct glue_cspace *cspace,
struct cptr sync_ep);
#endif /* __NULLNET_CALLER_H__ */
......@@ -82,6 +82,8 @@ struct skbuff_members {
#define CONSUME_SKB_NO_HASHING
#define SENDER_DISPATCH_LOOP
#define CONFIG_PREALLOC_XMIT_CHANNELS
#define COPY
#define MARSHAL
/* TODO: Use num_online_cpus/ num_online_nodes from Linux */
#define NUM_CPUS 32
......
......@@ -137,8 +137,6 @@ struct lcd_smp {
int ndo_start_xmit_async_landing(struct sk_buff *first, struct net_device *dev, struct trampoline_hidden_args *hidden_args);
int __ndo_start_xmit_dummy(struct sk_buff *skb, struct net_device *dev, struct trampoline_hidden_args *hidden_args);
int __ndo_start_xmit_bare_fipc_nomarshal(struct sk_buff *skb, struct net_device *dev, struct trampoline_hidden_args *hidden_args);
void skb_data_pool_init(void)
{
......@@ -801,8 +799,6 @@ int LCD_TRAMPOLINE_LINKAGE(ndo_start_xmit_trampoline)
struct trampoline_hidden_args *hidden_args;
LCD_TRAMPOLINE_PROLOGUE(hidden_args, ndo_start_xmit_trampoline);
ndo_start_xmit_fp = ndo_start_xmit_async_landing;
//ndo_start_xmit_fp = __ndo_start_xmit_dummy;
//ndo_start_xmit_fp = __ndo_start_xmit_bare_fipc_nomarshal;
return ndo_start_xmit_fp(skb, dev, hidden_args);
}
......
......@@ -329,6 +329,104 @@ free:
return NETDEV_TX_OK;
}
int ndo_start_xmit_async_1c(struct sk_buff *skb, struct net_device *dev, struct trampoline_hidden_args *hidden_args)
{
struct fipc_message *_request;
struct fipc_message *_response;
struct thc_channel *async_chnl = NULL;
xmit_type_t xmit_type;
unsigned int request_cookie;
struct net_device_container *net_dev_container;
struct sk_buff_container static_skbc = {0};
struct sk_buff_container *skb_c = &static_skbc;
int ret;
#ifdef COPY
struct skbuff_members *skb_lcd;
#endif
xmit_type = check_skb_range(skb);
if (xmit_type == VOLUNTEER_XMIT) {
printk("%s, skb->proto %02X | len %d\n",
__func__, ntohs(skb->protocol),
skb->len);
goto free;
}
if (unlikely(!current->ptstate)) {
if (setup_once(hidden_args))
goto free;
}
net_dev_container = container_of(dev,
struct net_device_container, net_device);
async_chnl = (struct thc_channel*) PTS()->thc_chnl;
/*
* doesn't free the packet NUM_TRANSACTIONS times
* frees the packet only once
*/
ret = fipc_test_blocking_send_start(
async_chnl, &_request);
if (unlikely(ret)) {
LIBLCD_ERR("failed to get a send slot");
goto fail_async;
}
async_msg_set_fn_type(_request, NDO_START_XMIT);
/* inform LCD that it is async */
fipc_set_reg0(_request, 1);
fipc_set_reg1(_request,
net_dev_container->other_ref.cptr);
fipc_set_reg2(_request,
skb_c->my_ref.cptr);
fipc_set_reg3(_request,
(unsigned long)
((void*)skb->head - skb_pool->base));
fipc_set_reg4(_request, skb->end);
fipc_set_reg5(_request, skb->protocol);
fipc_set_reg6(_request, skb->len);
#ifdef COPY
skb_lcd = SKB_LCD_MEMBERS(skb);
C(len);
C(data_len);
C(queue_mapping);
C(xmit_more);
C(tail);
C(truesize);
C(ip_summed);
C(csum_start);
C(network_header);
C(csum_offset);
C(transport_header);
skb_lcd->head_data_off = skb->data - skb->head;
#endif
ret = thc_ipc_send_request(async_chnl, _request, &request_cookie);
ret = thc_ipc_recv_response_inline(async_chnl, request_cookie,
&_response);
awe_mapper_remove_id(request_cookie);
if (unlikely(ret)) {
LIBLCD_ERR("thc_ipc_call");
goto fail_ipc;
}
ret = fipc_get_reg1(_response);
fipc_recv_msg_end(thc_channel_to_fipc( async_chnl), _response);
fail_ipc:
fail_async:
free:
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/*
* This function measures the overhead of bare fipc in KLCD/LCD setting
*/
......@@ -535,6 +633,79 @@ free:
return NETDEV_TX_OK;
}
int ndo_start_xmit_noasync_1c(struct sk_buff *skb, struct net_device *dev, struct trampoline_hidden_args *hidden_args)
{
struct fipc_message *_request;
struct fipc_message *_response;
xmit_type_t xmit_type;
struct thc_channel *async_chnl;
struct net_device_container *net_dev_container;
struct sk_buff_container static_skbc = {0};
struct sk_buff_container *skb_c = &static_skbc;
int ret;
net_dev_container = container_of(dev,
struct net_device_container, net_device);
skb_c->skb = skb;
xmit_type = check_skb_range(skb);
if (xmit_type == VOLUNTEER_XMIT) {
printk("%s, skb->proto %02X | len %d\n",
__func__, ntohs(skb->protocol),
skb->len);
goto free;
}
/* setup once for this thread */
if (unlikely(!current->ptstate)) {
if (setup_once(hidden_args))
goto free;
printk("%s, Got async_chnl %p\n", __func__, current->ptstate->thc_chnl);
}
/* get the async channel */
async_chnl = current->ptstate->thc_chnl;
fipc_test_blocking_send_start( async_chnl, &_request);
async_msg_set_fn_type(_request, NDO_START_XMIT);
thc_set_msg_type(_request, msg_type_request);
/* chain skb or not */
fipc_set_reg0(_request, false);
fipc_set_reg1(_request,
net_dev_container->other_ref.cptr);
fipc_set_reg2(_request,
skb_c->my_ref.cptr);
fipc_set_reg3(_request,
(unsigned long)
((void*)skb->head - skb_pool->base));
fipc_set_reg4(_request, skb->end);
fipc_set_reg5(_request, skb->protocol);
fipc_set_reg6(_request, skb->len);
fipc_send_msg_end(thc_channel_to_fipc(
async_chnl), _request);
/* guard nonlcd case with all macros */
fipc_test_blocking_recv_start( async_chnl, &_response);
fipc_recv_msg_end(thc_channel_to_fipc(
async_chnl), _response);
ret = fipc_get_reg1(_response);
free:
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/*
* This function gets called when there is a chained skb in flight. For packet sizes > mtu, skbs are chained
* at the IP layer if NETIF_CHAIN_SKB feature is enabled in the driver
......@@ -666,6 +837,7 @@ int ndo_start_xmit_async_landing(struct sk_buff *first, struct net_device *dev,
skb->chain_skb = false;
if (!skb->chain_skb)
//return ndo_start_xmit_noasync_1c(skb, dev, hidden_args);
return ndo_start_xmit_noasync(skb, dev, hidden_args);
/* chain skb */
......@@ -681,7 +853,7 @@ int ndo_start_xmit_async_landing(struct sk_buff *first, struct net_device *dev,
ASYNC_({
skb->chain_skb = true;
rc = ndo_start_xmit_async(skb, dev, hidden_args);
//rc = __ndo_start_xmit_bare_async(skb, dev, hidden_args);
//rc = ndo_start_xmit_async_1c(skb, dev, hidden_args);
if (unlikely(!dev_xmit_complete(rc))) {
skb->next = next;
printk("%s, xmit failed\n", __func__);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment