Commit 4894d8a0 authored by Vikram Narayanan's avatar Vikram Narayanan
Browse files

lcd/ixgbe: Add skb_rx_frag and eth_get_headlen for Rx path



Rx packets > 256 requires page fragments. (256 size limit is imposed by the
IXGBE driver according to their internal buffer size). Add skb_rx_fragto add a
page fragment to the skb. This is not an optimized solution. Just add this to
make the Rxpath complete.
Signed-off-by: Vikram Narayanan's avatarVikram Narayanan <vikram186@gmail.com>
parent 85c0c5e9
......@@ -43,7 +43,6 @@ enum dispatch_t {
DEV_ADDR_ADD,
DEV_ADDR_DEL,
DEVICE_SET_WAKEUP_ENABLE,
ETH_GET_HEADLEN,
NETIF_TX_STOP_ALL_QUEUES,
NETIF_TX_WAKE_ALL_QUEUES,
NETIF_NAPI_ADD,
......@@ -53,6 +52,8 @@ enum dispatch_t {
NAPI_GRO_RECEIVE,
__NAPI_ALLOC_SKB,
ETH_TYPE_TRANS,
SKB_ADD_RX_FRAG,
ETH_GET_HEADLEN,
PCI_DISABLE_PCIE_ERROR_REPORTING,
PCI_BUS_READ_CONFIG_WORD,
PCI_BUS_WRITE_CONFIG_WORD,
......@@ -168,6 +169,4 @@ async_msg_blocking_send_start(struct thc_channel *chnl,
return -EIO;
}
}
#endif /* __IXGBE_COMMON_H__ */
......@@ -62,6 +62,7 @@ struct sk_buff_container {
struct hlist_node hentry;
struct cptr other_ref;
struct cptr my_ref;
struct task_struct *tsk;
};
struct trampoline_hidden_args {
......
......@@ -76,9 +76,7 @@ int glue_lookup_skbuff(struct hlist_head *htable, struct cptr c, struct sk_buff_
struct sk_buff_container *skb_c;
hash_for_each_possible(cptr_table, skb_c, hentry, (unsigned long) cptr_val(c)) {
printk("lookup skb %p\n", skb_c->skb);
if (skb_c->skb == (struct sk_buff*) c.cptr) {
printk("match %p <==> %lx", skb_c->skb, c.cptr);
*skb_cout = skb_c;
}
}
......@@ -448,7 +446,9 @@ int __must_check __pci_register_driver(struct pci_driver *drv,
func_ret = fipc_get_reg3(_response);
fipc_recv_msg_end(thc_channel_to_fipc(ixgbe_async),
_response);
return func_ret;
fail_async:
fail_insert:
fail_ipc:
......@@ -1614,64 +1614,6 @@ fail_ipc:
return ret;
}
unsigned int eth_get_headlen(void *data,
unsigned int len)
{
int ret;
struct fipc_message *_request;
struct fipc_message *_response;
int sync_ret;
unsigned long data_mem_sz;
unsigned long data_offset;
cptr_t data_cptr;
unsigned int request_cookie;
unsigned int func_ret;
ret = async_msg_blocking_send_start(ixgbe_async,
&_request);
if (ret) {
LIBLCD_ERR("failed to get a send slot");
goto fail_async;
}
async_msg_set_fn_type(_request,
ETH_GET_HEADLEN);
sync_ret = lcd_virt_to_cptr(__gva(( unsigned long )data),
&data_cptr,
&data_mem_sz,
&data_offset);
if (sync_ret) {
LIBLCD_ERR("virt to cptr failed");
lcd_exit(-1);
}
fipc_set_reg1(_request,
len);
ret = thc_ipc_send_request(ixgbe_async,
_request,
&request_cookie);
if (ret) {
LIBLCD_ERR("thc_ipc_send_request");
goto fail_ipc;
}
lcd_set_r0(ilog2(( data_mem_sz ) >> ( PAGE_SHIFT )));
lcd_set_r1(data_offset);
lcd_set_cr0(data_cptr);
sync_ret = lcd_sync_send(ixgbe_sync_endpoint);
lcd_set_cr0(CAP_CPTR_NULL);
if (sync_ret) {
LIBLCD_ERR("failed to send");
lcd_exit(-1);
}
ret = thc_ipc_recv_response(ixgbe_async,
request_cookie,
&_response);
func_ret = fipc_get_reg1(_response);
fipc_recv_msg_end(thc_channel_to_fipc(ixgbe_async),
_response);
return func_ret;
fail_async:
fail_ipc:
return ret;
}
void netif_tx_stop_all_queues(struct net_device *dev)
{
int ret;
......@@ -2784,7 +2726,6 @@ int ixgbe_service_event_schedule_callee(struct fipc_message *_request,
request_cookie = thc_get_request_cookie(_request);
fipc_recv_msg_end(thc_channel_to_fipc(_channel),
_request);
LIBLCD_MSG("service timer callback");
__ixgbe_service_event_schedule(&dev_container->net_device);
fail_lookup:
......@@ -3430,6 +3371,8 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
glue_insert_skbuff(cptr_table, skb_c);
skb_c->other_ref = skb_ref;
skb_c->skb_cptr = skb_cptr;
skb_c->skbh_cptr = skbd_cptr;
ret = thc_ipc_recv_response(ixgbe_async,
request_cookie,
......@@ -3490,7 +3433,6 @@ __be16 eth_type_trans(struct sk_buff *skb,
goto fail_ipc;
}
func_ret = fipc_get_reg1(_response);
LIBLCD_MSG("%s, got %x", __func__, func_ret);
/* restore pointers */
skb->head = skb_c->head;
......@@ -3503,3 +3445,113 @@ fail_async:
fail_ipc:
return ret;
}
void skb_add_rx_frag(struct sk_buff *skb,
int i,
struct page *page,
int off,
int size,
unsigned int truesize)
{
int ret;
struct fipc_message *_request;
struct fipc_message *_response;
struct sk_buff_container *skb_c;
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
glue_lookup_skbuff(cptr_table,
__cptr((unsigned long)skb), &skb_c);
ret = async_msg_blocking_send_start(ixgbe_async,
&_request);
if (ret) {
LIBLCD_ERR("failed to get a send slot");
goto fail_async;
}
async_msg_set_fn_type(_request,
SKB_ADD_RX_FRAG);
fipc_set_reg0(_request,
skb_c->other_ref.cptr);
fipc_set_reg1(_request,
i);
fipc_set_reg2(_request,
gpa_val(lcd_gva2gpa(
__gva(
(unsigned long)lcd_page_address(page)))));
fipc_set_reg3(_request,
off);
fipc_set_reg4(_request,
size);
fipc_set_reg5(_request,
truesize);
/* save pointers */
skb_c->head = skb->head;
skb_c->data = skb->data;
printk("calling rpc rx_flag");
ret = thc_ipc_call(ixgbe_async,
_request,
&_response);
if (ret) {
LIBLCD_ERR("thc_ipc_call");
goto fail_ipc;
}
fipc_recv_msg_end(thc_channel_to_fipc(ixgbe_async),
_response);
/* restore pointers */
skb->head = skb_c->head;
skb->data = skb_c->data;
/* set LCD page's address */
frag->page.p = page;
printk("call returns rx_frag");
return;
fail_async:
fail_ipc:
return;
}
unsigned int eth_get_headlen(void *data,
unsigned int len)
{
int ret;
struct fipc_message *_request;
struct fipc_message *_response;
unsigned int func_ret;
ret = async_msg_blocking_send_start(ixgbe_async,
&_request);
if (ret) {
LIBLCD_ERR("failed to get a send slot");
goto fail_async;
}
async_msg_set_fn_type(_request,
ETH_GET_HEADLEN);
fipc_set_reg1(_request,
gpa_val(lcd_gva2gpa(__gva((unsigned long)data))));
fipc_set_reg2(_request,
len);
ret = thc_ipc_call(ixgbe_async,
_request,
&_response);
if (ret) {
LIBLCD_ERR("thc_ipc_call");
goto fail_ipc;
}
func_ret = fipc_get_reg1(_response);
fipc_recv_msg_end(thc_channel_to_fipc(ixgbe_async),
_response);
return func_ret;
fail_async:
fail_ipc:
return ret;
}
......@@ -31,7 +31,6 @@ int dispatch_async_loop(struct thc_channel *_channel,
sync_ep);
case NDO_START_XMIT:
trace(NDO_START_XMIT);
return ndo_start_xmit_callee(message,
_channel,
cspace,
......@@ -115,7 +114,6 @@ int dispatch_async_loop(struct thc_channel *_channel,
sync_ep);
case SERVICE_EVENT_SCHED:
trace(SERVICE_EVENT_SCHED);
return ixgbe_service_event_schedule_callee(message,
_channel,
cspace,
......
......@@ -54,9 +54,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask)
unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) { LIBLCD_MSG("================>$$$$$$ Dummy %s called", __func__); return (char*)skb; }
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
int size, unsigned int truesize) { LIBLCD_MSG("================>$$$$$$ Dummy %s called", __func__); }
void netdev_rss_key_fill(void *buffer, size_t len) { LIBLCD_MSG("================>$$$$$$ Dummy %s called", __func__); }
unsigned long dev_trans_start(struct net_device *dev)
......
......@@ -916,7 +916,6 @@ int ndo_open_user(struct net_device *dev,
goto fail_ipc;
}
func_ret = fipc_get_reg1(_response);
printk("%s, returned %d\n", __func__, func_ret);
mod_timer(&service_timer, jiffies + msecs_to_jiffies(5000));
napi_enable(napi_q0);
fipc_recv_msg_end(thc_channel_to_fipc(hidden_args->async_chnl),
......@@ -1252,6 +1251,7 @@ int ndo_start_xmit(struct sk_buff *skb,
}
skb_c->skb = skb;
skb_c->tsk = current;
glue_insert_skbuff(cptr_table, skb_c);
/* save original head, data */
......@@ -3227,62 +3227,6 @@ int device_set_wakeup_enable_callee(struct fipc_message *_request,
return ret;
}
int eth_get_headlen_callee(struct fipc_message *_request,
struct thc_channel *_channel,
struct glue_cspace *cspace,
struct cptr sync_ep)
{
int sync_ret;
unsigned long mem_order;
unsigned long data_offset;
cptr_t data_cptr;
gva_t data_gva;
unsigned int len;
int ret;
struct fipc_message *_response;
unsigned int request_cookie;
unsigned int func_ret;
request_cookie = thc_get_request_cookie(_request);
fipc_recv_msg_end(thc_channel_to_fipc(_channel),
_request);
sync_ret = lcd_cptr_alloc(&data_cptr);
if (sync_ret) {
LIBLCD_ERR("failed to get cptr");
lcd_exit(-1);
}
lcd_set_cr0(data_cptr);
sync_ret = lcd_sync_recv(sync_ep);
lcd_set_cr0(CAP_CPTR_NULL);
if (sync_ret) {
LIBLCD_ERR("failed to recv");
lcd_exit(-1);
}
mem_order = lcd_r0();
data_offset = lcd_r1();
sync_ret = lcd_map_virt(data_cptr,
mem_order,
&data_gva);
if (sync_ret) {
LIBLCD_ERR("failed to map void *data");
lcd_exit(-1);
}
len = fipc_get_reg1(_request);
func_ret = eth_get_headlen(( void * )( ( gva_val(data_gva) ) + ( data_offset ) ),
len);
if (async_msg_blocking_send_start(_channel,
&_response)) {
LIBLCD_ERR("error getting response msg");
return -EIO;
}
fipc_set_reg1(_response,
func_ret);
thc_ipc_reply(_channel,
request_cookie,
_response);
return ret;
}
int netif_tx_stop_all_queues_callee(struct fipc_message *_request,
struct thc_channel *_channel,
struct glue_cspace *cspace,
......@@ -3855,7 +3799,6 @@ int trigger_exit_to_lcd(struct thc_channel *_channel)
int ret;
unsigned int request_cookie;
dump_stack();
ret = async_msg_blocking_send_start(_channel,
&_request);
if (ret) {
......@@ -4768,6 +4711,8 @@ int netif_receive_skb_callee(struct fipc_message *_request,
return ret;
}
extern struct lcd *iommu_lcd;
int napi_gro_receive_callee(struct fipc_message *_request,
struct thc_channel *_channel,
struct glue_cspace *cspace,
......@@ -4862,12 +4807,10 @@ int __napi_alloc_skb_callee(struct fipc_message *_request,
LIBLCD_MSG("skb_c allocation failed");
goto fail_alloc;
}
skb_c->tsk = current;
skb_c->skb = skb;
skb_c->head = skb->head;
skb_c->data = skb->data;
LIBLCD_MSG("%s, skb alloc for %d | skb->dev %p",
__func__, len, skb->dev);
glue_insert_skbuff(cptr_table, skb_c);
......@@ -4880,6 +4823,9 @@ int __napi_alloc_skb_callee(struct fipc_message *_request,
skb_c->skb_ord = skb_ord;
skb_c->skbd_ord = skbd_ord;
skb_c->skb_cptr = skb_cptr;
skb_c->skbh_cptr = skbd_cptr;
/* sync half */
lcd_set_cr0(skb_cptr);
lcd_set_cr1(skbd_cptr);
......@@ -4952,7 +4898,6 @@ int eth_type_trans_callee(struct fipc_message *_request,
func_ret = eth_type_trans(skb,
( &dev_container->net_device ));
LIBLCD_MSG("%s, got %x", __func__, func_ret);
/* save */
skb_c->head = skb->head;
......@@ -4972,3 +4917,132 @@ int eth_type_trans_callee(struct fipc_message *_request,
fail_lookup:
return ret;
}
int skb_add_rx_frag_callee(struct fipc_message *_request,
struct thc_channel *_channel,
struct glue_cspace *cspace,
struct cptr sync_ep,
cptr_t lcd_cptr)
{
struct sk_buff *skb;
int i;
unsigned long page;
int off;
int size;
unsigned int truesize;
int ret;
struct fipc_message *_response;
unsigned int request_cookie;
cptr_t skb_ref;
struct sk_buff_container *skb_c;
struct lcd *lcd_struct;
hva_t hva_out;
request_cookie = thc_get_request_cookie(_request);
skb_ref = __cptr(fipc_get_reg0(_request));
i = fipc_get_reg1(_request);
page = fipc_get_reg2(_request);
off = fipc_get_reg3(_request);
size = fipc_get_reg4(_request);
truesize = fipc_get_reg5(_request);
fipc_recv_msg_end(thc_channel_to_fipc(_channel),
_request);
glue_lookup_skbuff(cptr_table, skb_ref, &skb_c);
skb = skb_c->skb;
lcd_struct = iommu_lcd;
ret = lcd_arch_ept_gpa_to_hva(lcd_struct->lcd_arch, __gpa(page), &hva_out);
if (ret) {
LIBLCD_WARN("Couldn't get gpa:hpa mapping");
goto reply;
}
printk("got gpa:hpa %lx:%lx from ept\n", page,
hva_val(hva_out));
/* restore */
skb->head = skb_c->head;
skb->data = skb_c->data;
LIBLCD_MSG("%s ,calling real function", __func__);
skb_add_rx_frag(skb,
i,
virt_to_page(hva_val(hva_out)),
off,
size,
truesize);
/* XXX: Is it ok to mess up with page ref?
* In the native driver, they do it. But with all our
* hpa:gpa stuff, I am skeptical if this is correct.
* If there are any page related bugs, this can be the
* reason.
*/
page_ref_inc(virt_to_page(hva_val(hva_out)));
/* save */
skb_c->head = skb->head;
skb_c->data = skb->data;
reply:
if (async_msg_blocking_send_start(_channel,
&_response)) {
LIBLCD_ERR("error getting response msg");
return -EIO;
}
thc_ipc_reply(_channel,
request_cookie,
_response);
return ret;
}
int eth_get_headlen_callee(struct fipc_message *_request,
struct thc_channel *_channel,
struct glue_cspace *cspace,
struct cptr sync_ep)
{
unsigned long data;
unsigned int len;
int ret = 0;
struct fipc_message *_response;
unsigned int request_cookie;
unsigned int func_ret;
struct lcd *lcd_struct;
hva_t hva_out;
request_cookie = thc_get_request_cookie(_request);
fipc_recv_msg_end(thc_channel_to_fipc(_channel),
_request);
data = fipc_get_reg1(_request);
len = fipc_get_reg2(_request);
lcd_struct = iommu_lcd;
printk("req gpa %lx from ept\n", data);
ret = lcd_arch_ept_gpa_to_hva(lcd_struct->lcd_arch, __gpa(data), &hva_out);
if (ret) {
LIBLCD_WARN("Couldn't get gpa:hpa mapping");
goto reply;
}
func_ret = eth_get_headlen((void *)hva_val(hva_out),
len);
reply:
ret = 0;
if (async_msg_blocking_send_start(_channel,
&_response)) {
LIBLCD_ERR("error getting response msg");
return -EIO;
}
fipc_set_reg1(_response,
func_ret);
thc_ipc_reply(_channel,
request_cookie,
_response);
return ret;
}
......@@ -126,14 +126,12 @@ int dispatch_async_loop(struct thc_channel *_channel,
sync_ep);
case CONSUME_SKB:
trace(CONSUME_SKB);
return consume_skb_callee(message,
_channel,
cspace,
sync_ep);
case NAPI_CONSUME_SKB:
trace(NAPI_CONSUME_SKB);
return napi_consume_skb_callee(message,
_channel,
cspace,
......@@ -375,33 +373,35 @@ int dispatch_async_loop(struct thc_channel *_channel,
sync_ep);
case NETIF_RECEIVE_SKB:
trace(NETIF_RECEIVE_SKB);
return netif_receive_skb_callee(message,
_channel,
cspace,
sync_ep);
case NAPI_GRO_RECEIVE:
trace(NAPI_GRO_RECEIVE);
return napi_gro_receive_callee(message,
_channel,
cspace,
sync_ep);
case __NAPI_ALLOC_SKB:
trace(__NAPI_ALLOC_SKB);
return __napi_alloc_skb_callee(message,
_channel,
cspace,
sync_ep);
case ETH_TYPE_TRANS:
trace(ETH_TYPE_TRANS);
return eth_type_trans_callee(message,
_channel,
cspace,
sync_ep);
case SKB_ADD_RX_FRAG:
trace(SKB_ADD_RX_FRAG);
return skb_add_rx_frag_callee(message,
_channel,
cspace,
sync_ep);
default:
LIBLCD_ERR("unexpected function label: %d",
......@@ -412,4 +412,3 @@ int dispatch_async_loop(struct thc_channel *_channel,
return 0;
}
......@@ -227,7 +227,10 @@ int eth_type_trans_callee(struct fipc_message *_request,
struct thc_channel *_channel,