Commit f35339ec authored by Vikram Narayanan's avatar Vikram Narayanan
Browse files

lcd/ixgbe: Use hashtable for skbs



skbs are used extensively in tx/rx path. Use a simple hashtable instead of
polluting the kernel sources with sk_buff_containers

Signed-off-by: Vikram Narayanan's avatarVikram Narayanan <vikram186@gmail.com>
parent 678269e1
......@@ -46,11 +46,24 @@ struct rtnl_link_stats64_container {
struct cptr other_ref;
struct cptr my_ref;
};
struct sk_buff_container {
struct sk_buff sk_buff;
/* just store the pointer */
struct sk_buff *skb;
/* store the order when volunteered. comes handy during unmap */
unsigned int skb_ord, skbd_ord;
cptr_t skb_cptr, skbh_cptr;
/*
* as head, data pointer is different in LCD and KLCD, store it
* while crossing the boundary
*/
unsigned char *head, *data;
/* for hashtable insertion */
struct hlist_node hentry;
struct cptr other_ref;
struct cptr my_ref;
};
struct trampoline_hidden_args {
void *struct_container;
struct glue_cspace *cspace;
......
......@@ -1199,14 +1199,60 @@ void consume_skb(struct sk_buff *skb)
int ret;
struct fipc_message *_request;
struct fipc_message *_response;
unsigned long skb_sz, skb_off, skbh_sz, skbh_off;
cptr_t skb_cptr, skbh_cptr;
struct sk_buff_container *skb_c;
ret = async_msg_blocking_send_start(ixgbe_async,
&_request);
glue_lookup_skbuff(cptr_table,
__cptr((unsigned long)skb), &skb_c);
if (ret) {
LIBLCD_ERR("failed to get a send slot");
goto fail_async;
}
async_msg_set_fn_type(_request,
CONSUME_SKB);
fipc_set_reg0(_request, skb_c->other_ref.cptr);
ret = lcd_virt_to_cptr(__gva((unsigned long)skb),
&skb_cptr,
&skb_sz,
&skb_off);
if (ret) {
LIBLCD_ERR("lcd_virt_to_cptr");
goto fail_virt;
}
ret = lcd_virt_to_cptr(__gva((unsigned long)skb->head),
&skbh_cptr,
&skbh_sz,
&skbh_off);
if (ret) {
LIBLCD_ERR("lcd_virt_to_cptr");
goto fail_virt;
}
#ifdef IOMMU_ASSIGN
ret = lcd_syscall_iommu_unmap_page(lcd_gva2gpa(__gva((unsigned long) skb->head)),
get_order(skbh_sz));
if (ret)
LIBLCD_ERR("unMapping failed for packet %p",
__pa(skb->data));
#endif
lcd_unmap_virt(__gva((unsigned long)skb->head), get_order(skb_sz));
lcd_unmap_virt(__gva((unsigned long)skb), get_order(skbh_sz));
lcd_cap_delete(skb_cptr);
lcd_cap_delete(skbh_cptr);
glue_remove_skbuff(skb_c);
kfree(skb_c);
ret = thc_ipc_call(ixgbe_async,
_request,
&_response);
......@@ -1256,6 +1302,7 @@ void unregister_netdev(struct net_device *dev)
return;
fail_async:
fail_ipc:
fail_virt:
return;
}
......@@ -2223,6 +2270,7 @@ int ndo_start_xmit_callee(struct fipc_message *_request,
struct cptr sync_ep)
{
struct sk_buff *skb;
struct sk_buff_container *skb_c;
struct net_device_container *dev_container;
int ret;
struct fipc_message *_response;
......@@ -2233,6 +2281,7 @@ int ndo_start_xmit_callee(struct fipc_message *_request,
unsigned long skbd_ord, skbd_off;
gva_t skb_gva, skbd_gva;
unsigned int data_off;
cptr_t skb_ref;
request_cookie = thc_get_request_cookie(_request);
ret = glue_cap_lookup_net_device_type(cspace,
......@@ -2242,9 +2291,14 @@ int ndo_start_xmit_callee(struct fipc_message *_request,
LIBLCD_ERR("lookup");
goto fail_lookup;
}
skb_ref = __cptr(fipc_get_reg2(_request));
fipc_recv_msg_end(thc_channel_to_fipc(_channel),
_request);
ret = lcd_cptr_alloc(&skb_cptr);
if (ret) {
LIBLCD_ERR("failed to get cptr");
goto fail_sync;
......@@ -2285,6 +2339,15 @@ int ndo_start_xmit_callee(struct fipc_message *_request,
skb = (void*)(gva_val(skb_gva) + skb_off);
skb->head = (void*)(gva_val(skbd_gva) + skbd_off);
skb->data = skb->head + data_off;
skb_c = kzalloc(sizeof(*skb_c), GFP_KERNEL);
if (!skb_c)
LIBLCD_MSG("no memory");
skb_c->skb = skb;
skb_c->skbd_ord = skbd_ord;
glue_insert_skbuff(cptr_table, skb_c);
skb_c->other_ref = skb_ref;
#ifdef IOMMU_ASSIGN
ret = lcd_syscall_iommu_map_page(lcd_gva2gpa(skbd_gva),
......@@ -2840,11 +2903,10 @@ int unsync_callee(struct fipc_message *_request,
LIBLCD_ERR("lookup");
goto fail_lookup;
}
m.mac_addr_l = fipc_get_reg3(_request);
fipc_recv_msg_end(thc_channel_to_fipc(_channel),
_request);
m.mac_addr_l = fipc_get_reg3(_request);
func_ret = unsync_container->unsync(( &dev_container->net_device ),
m.mac_addr);
if (async_msg_blocking_send_start(_channel,
......
......@@ -1106,8 +1106,8 @@ int ndo_start_xmit_user(struct sk_buff *skb,
struct fipc_message *_response;
int func_ret;
unsigned int request_cookie;
cptr_t sync_end;
struct sk_buff_container *skb_c;
unsigned long skb_ord, skb_off;
unsigned long skbd_ord, skbd_off;
cptr_t skb_cptr, skbd_cptr;
......@@ -1116,14 +1116,32 @@ int ndo_start_xmit_user(struct sk_buff *skb,
struct net_device_container,
net_device);
skb_c = kzalloc(sizeof(*skb_c), GFP_KERNEL);
if (!skb_c)
LIBLCD_MSG("no memory");
skb_c->skb = skb;
glue_insert_skbuff(cptr_table, skb_c);
/* save original head, data */
skb_c->head = skb->head;
skb_c->data = skb->data;
skb_c->skb_ord = skb_ord;
/* enter LCD mode to have cspace tree */
lcd_enter();
ret = grant_sync_ep(&sync_end, hidden_args->sync_ep);
ret = sync_setup_memory(skb, sizeof(struct sk_buff), &skb_ord, &skb_cptr, &skb_off);
ret = sync_setup_memory(skb, sizeof(struct sk_buff),
&skb_ord, &skb_cptr, &skb_off);
ret = sync_setup_memory(skb->head, skb_end_offset(skb) + sizeof(struct skb_shared_info), &skbd_ord, &skbd_cptr, &skbd_off);
ret = sync_setup_memory(skb->head,
skb_end_offset(skb) + sizeof(struct skb_shared_info),
&skbd_ord, &skbd_cptr, &skbd_off);
skb_c->skb_cptr = skb_cptr;
skb_c->skbh_cptr = skbd_cptr;
ret = async_msg_blocking_send_start(hidden_args->async_chnl,
&_request);
......@@ -1137,7 +1155,11 @@ int ndo_start_xmit_user(struct sk_buff *skb,
fipc_set_reg1(_request,
dev_container->other_ref.cptr);
ret = thc_ipc_send_request(hidden_args->async_chnl, _request, &request_cookie);
fipc_set_reg2(_request,
skb_c->my_ref.cptr);
ret = thc_ipc_send_request(hidden_args->async_chnl,
_request, &request_cookie);
if (ret) {
LIBLCD_ERR("thc_ipc_call");
......@@ -1153,8 +1175,8 @@ int ndo_start_xmit_user(struct sk_buff *skb,
lcd_set_r3(skbd_off);
lcd_set_r4(skb->data - skb->head);
LIBLCD_MSG("skb->data %p | nr_frags %d",
__pa(skb->data),
LIBLCD_MSG("skb %p | skb->data %p | nr_frags %d",
skb, skb->data,
skb_shinfo(skb)->nr_frags);
ret = lcd_sync_send(sync_end);
......@@ -1187,7 +1209,6 @@ fail_async:
fail_ipc:
lcd_exit(0);
return func_ret;
}
int ndo_start_xmit(struct sk_buff *skb,
......@@ -1198,7 +1219,13 @@ int ndo_start_xmit(struct sk_buff *skb,
int ret;
struct fipc_message *_request;
struct fipc_message *_response;
unsigned int request_cookie;
int func_ret;
struct sk_buff_container *skb_c;
unsigned long skb_ord, skb_off;
unsigned long skbd_ord, skbd_off;
cptr_t skb_cptr, skbd_cptr;
if (!current->ptstate) {
LIBLCD_MSG("Calling %s from a non-LCD context! creating thc runtime!",
__func__);
......@@ -1214,6 +1241,33 @@ int ndo_start_xmit(struct sk_buff *skb,
dev_container = container_of(dev,
struct net_device_container,
net_device);
skb_c = kzalloc(sizeof(*skb_c), GFP_KERNEL);
if (!skb_c) {
LIBLCD_MSG("no memory");
goto fail_alloc;
}
skb_c->skb = skb;
glue_insert_skbuff(cptr_table, skb_c);
/* save original head, data */
skb_c->head = skb->head;
skb_c->data = skb->data;
skb_c->skb_ord = skb_ord;
ret = sync_setup_memory(skb, sizeof(struct sk_buff),
&skb_ord, &skb_cptr, &skb_off);
ret = sync_setup_memory(skb->head,
skb_end_offset(skb) + sizeof(struct skb_shared_info),
&skbd_ord, &skbd_cptr, &skbd_off);
skb_c->skb_cptr = skb_cptr;
skb_c->skbh_cptr = skbd_cptr;
ret = async_msg_blocking_send_start(hidden_args->async_chnl,
&_request);
if (ret) {
......@@ -1224,9 +1278,40 @@ int ndo_start_xmit(struct sk_buff *skb,
NDO_START_XMIT);
fipc_set_reg1(_request,
dev_container->other_ref.cptr);
ret = thc_ipc_call(hidden_args->async_chnl,
_request,
&_response);
fipc_set_reg2(_request,
skb_c->my_ref.cptr);
ret = thc_ipc_send_request(hidden_args->async_chnl,
_request, &request_cookie);
if (ret) {
LIBLCD_ERR("thc_ipc_call");
goto fail_ipc;
}
//sync half
lcd_set_cr0(skb_cptr);
lcd_set_cr1(skbd_cptr);
lcd_set_r0(skb_ord);
lcd_set_r1(skb_off);
lcd_set_r2(skbd_ord);
lcd_set_r3(skbd_off);
lcd_set_r4(skb->data - skb->head);
LIBLCD_MSG("skb %p | skb->data %p | nr_frags %d",
skb, skb->data,
skb_shinfo(skb)->nr_frags);
ret = lcd_sync_send(hidden_args->sync_ep);
lcd_set_cr0(CAP_CPTR_NULL);
lcd_set_cr1(CAP_CPTR_NULL);
if (ret) {
LIBLCD_ERR("failed to send");
goto fail_sync;
}
ret = thc_ipc_recv_response(hidden_args->async_chnl, request_cookie, &_response);
if (ret) {
LIBLCD_ERR("thc_ipc_call");
goto fail_ipc;
......@@ -1235,7 +1320,9 @@ int ndo_start_xmit(struct sk_buff *skb,
fipc_recv_msg_end(thc_channel_to_fipc(hidden_args->async_chnl),
_response);
return func_ret;
fail_alloc:
fail_async:
fail_sync:
fail_ipc:
return ret;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment