...
 
Commits (8)
......@@ -36,6 +36,7 @@
#include <asm/smap.h>
#include <asm/pgtable_types.h>
#include <linux/err.h>
#include <linux/lcd_trace.h>
/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
#include <linux/elf-em.h>
......@@ -619,12 +620,16 @@ END(irq_entries_start)
/* We entered an interrupt context - irqs are off: */
TRACE_IRQS_OFF
/* No matter which path we took, save current LCD stack */
movq vmfunc_state_page + VMFUNC_kernel_esp, %r13
#ifdef CONFIG_LCD_TRACE_BUFFER
/* Trace IRQ into LCDs buffer, rdi = trap frame, rsi = event type */
pushq %rdi
movq $4, %rsi
movq $EVENT_IRQ, %rsi
call add_trace_entry_tf
popq %rdi
#endif
call \func /* rdi points to pt_regs */
.endm
......@@ -641,6 +646,7 @@ common_interrupt:
ret_from_intr:
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
decl PER_CPU_VAR(irq_count)
/* Restore saved previous stack */
......@@ -649,6 +655,16 @@ ret_from_intr:
* valid gs after swapgs, save the IST pointer here */
//movq PER_CPU_VAR(lvd_irq_stack_addr), %r13
/* Restore the kernel stack in the VMFUNC state page */
movq %r13, vmfunc_state_page + VMFUNC_kernel_esp
#ifdef CONFIG_LCD_TRACE_BUFFER
/* Trace IRQ into LCDs buffer, rdi = trap frame, rsi = event type */
movq %rsp, %rdi
movq $EVENT_IRQ_EXIT, %rsi
call add_trace_entry_tf
#endif
testb $3, CS(%rsp)
jz retint_vmfunc_kernel
......@@ -690,7 +706,6 @@ GLOBAL(restore_vmfunc_regs_and_iret)
testq %r12, %r12
jz 2f
movq %r12, vmfunc_state_page + VMFUNC_entered_lcd
movq %r13, vmfunc_state_page + VMFUNC_kernel_esp
/* Stack to switch to, i.e., IST. Save it here while we have gs */
movq PER_CPU_VAR(lvd_irq_stack_addr), %r13
......@@ -957,13 +972,15 @@ ENTRY(\sym)
subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
.endif
#ifdef CONFIG_LCD_TRACE_BUFFER
/* Trace IRQ into LCDs buffer, rdi = trap frame, rsi = event type */
pushq %rdi
pushq %rsi
movq $6, %rsi
movq $EVENT_EXCEPTION, %rsi
call add_trace_entry_tf
popq %rsi
popq %rdi
#endif
call \do_sym
......@@ -1539,11 +1556,12 @@ ENTRY(nmi_vmfunc_simple)
* done with the NMI stack.
*/
#ifdef CONFIG_LCD_TRACE_BUFFER
/* Trace IRQ into LCDs buffer, rdi = trap frame, rsi = event type */
movq %rsp, %rdi
movq $5, %rsi
movq $EVENT_NMI, %rsi
call add_trace_entry_tf
#endif
movq %rsp, %rdi
movq $-1, %rsi
......@@ -1603,10 +1621,12 @@ ENTRY(nmi_vmfunc_simple)
call paranoid_entry_vmfunc_nmi
// call paranoid_entry
#ifdef CONFIG_LCD_TRACE_BUFFER
/* Trace IRQ into LCDs buffer, rdi = trap frame, rsi = event type */
movq %rsp, %rdi
movq $5, %rsi
movq $EVENT_NMI, %rsi
call add_trace_entry_tf
#endif
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp, %rdi
......
......@@ -22,7 +22,7 @@
int panic_on_unrecovered_nmi;
int panic_on_io_nmi;
unsigned int code_bytes = 64;
int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
int kstack_depth_to_print = 20 * STACKSLOTS_PER_LINE;
static int die_counter;
static void printk_stack_address(unsigned long address, int reliable,
......
#include <asm/msr.h>
#include <asm/ptrace.h>
#include <asm/current.h>
#include <linux/percpu.h>
......@@ -5,22 +6,60 @@
#include <linux/lcd_trace.h>
DEFINE_PER_CPU_PAGE_ALIGNED(unsigned char, ring_buffer[RING_BUFFER_SIZE]);
EXPORT_PER_CPU_SYMBOL(ring_buffer);
DEFINE_PER_CPU(unsigned char, ring_head);
EXPORT_PER_CPU_SYMBOL(ring_head);
#define NUM_TRACE_ENTRIES (RING_BUFFER_SIZE / sizeof(struct ring_trace_entry))
char *trace_event_type_string[] = {
"xmit",
"msix_hndlr",
"napi_comp_done",
"irq",
"nmi",
"exception",
"softirq_rx_poll",
"net_rx_action"
};
asmlinkage __visible notrace void __add_trace_entry(trace_event_t type,
static const char *event_type_to_string(unsigned type)
{
switch (type) {
case EVENT_XMIT:
return "XMIT";
case EVENT_MSIX_HANDLER:
return "MSIX_HANDLER";
case EVENT_NAPI_COMPLETE_DONE:
return "NAPI_COMP_DONE";
case EVENT_IRQ:
return "IRQ";
case EVENT_NMI:
return "NMI";
case EVENT_EXCEPTION:
return "EXCEPTION";
case EVENT_IRQ_EXIT:
return "IRQ_EXIT";
case EVENT_SOFTIRQ_POLL:
return "SOFTIRQ_POLL";
case EVENT_NET_RX_ACTION:
return "NET_RX_ACTION";
case EVENT_VMFUNC_TRAMP_ENTRY:
return "TRAMP_ENTRY";
case EVENT_VMFUNC_TRAMP_EXIT:
return "TRAMP_EXIT";
case EVENT_VMFUNC_SBOARD_KLCD_ENTER:
return "SBOARD_ENTER";
case EVENT_VMFUNC_SBOARD_KLCD_LEAVE:
return "SBOARD_LEAVE";
default:
return "Undefined item";
}
}
asmlinkage __visible notrace void __add_trace_entry(unsigned type,
unsigned long rdi, unsigned long rsp, unsigned long eflags,
unsigned long ip)
{
......@@ -28,20 +67,25 @@ asmlinkage __visible notrace void __add_trace_entry(trace_event_t type,
unsigned char *ring_buf_base = per_cpu(ring_buffer, cpu);
struct ring_trace_entry *ring_buf = (struct ring_trace_entry*) ring_buf_base;
unsigned char head_idx = __this_cpu_read(ring_head);
struct ring_trace_entry *entry = (struct ring_trace_entry*) &ring_buf[head_idx];
struct ring_trace_entry *entry;
__this_cpu_write(ring_head, (++head_idx % NUM_TRACE_ENTRIES));
entry = (struct ring_trace_entry*) &ring_buf[head_idx];
entry->rip = ip;
entry->rsp = rsp;
entry->rdi = rdi;
entry->eflags = eflags;
entry->lcd_stack = (unsigned long) current->lcd_stack;
entry->lcd_stack_bit = current->lcd_stack_bit;
entry->lcd_nc = current->nested_count;
entry->pid = current->pid;
entry->gsbase = native_read_msr(MSR_GS_BASE);
entry->context = (!!in_irq() << IN_IRQ_SHIFT)
| (!!in_softirq() << IN_SOFTIRQ_SHIFT)
| (!!in_nmi() << IN_NMI_SHIFT);
entry->type = type;
snprintf(entry->name, PROC_NAME_MAX, current->comm);
__this_cpu_write(ring_head, (++head_idx % NUM_TRACE_ENTRIES));
}
__asm__(
......@@ -63,7 +107,7 @@ EXPORT_SYMBOL(add_trace_entry);
asmlinkage __visible notrace void add_trace_entry_tf(struct pt_regs *regs,
trace_event_t type)
unsigned type)
{
__add_trace_entry(type, regs->di, regs->sp, regs->flags, regs->ip);
}
......@@ -90,17 +134,18 @@ void dump_ring_trace_buffer(void)
struct ring_trace_entry *entry = &trace_entries[head_idx % NUM_TRACE_ENTRIES];
if (i == 0)
printk("head ==> ");
printk("type:%16s cpu: %d [%c|%c|%c] comm: %s pid: %d rip: %08lx rsp: %08lx "
"rdi: %08lx lcd_stack: %08lx[esp_lcd_bmap: %x] "
"eflags: %lx [IF: %d]\n",
trace_event_type_string[entry->type - 1],
printk("type:%16s(%x) cpu: %d [%c|%c|%c] comm: %s pid: %d rip: %16lx rsp: %16lx "
"rdi: %09lx gsbase: %16lx lcd_stack: %16lx[bmap: %x nc:%u] "
"eflags: %08lx [IF: %d]\n",
event_type_to_string(entry->type),
entry->type,
raw_smp_processor_id(),
entry->context & (IN_NMI) ? 'N' : '-',
entry->context & (IN_SOFTIRQ) ? 'S' : '-',
entry->context & (IN_IRQ) ? 'I' : '-',
entry->name, entry->pid, entry->rip,
entry->rsp, entry->rdi, entry->lcd_stack,
entry->lcd_stack_bit, entry->eflags,
entry->rsp, entry->rdi, entry->gsbase, entry->lcd_stack,
entry->lcd_stack_bit, entry->lcd_nc, entry->eflags,
!!(entry->eflags & IF_FLAG));
}
}
......
......@@ -557,7 +557,7 @@ NOKPROBE_SYMBOL(sync_regs_to_ist);
asmlinkage __visible notrace void save_lcd_stack(struct pt_regs *eregs)
{
current->lcd_stack = eregs->sp;
current->lcd_stack = (void*) eregs->sp;
return;
}
NOKPROBE_SYMBOL(save_lcd_stack);
......
#ifndef LCD_TRACE_H
#define LCD_TRACE_H
#define PROC_NAME_MAX 16
#define IF_FLAG (1 << 9)
#define RING_BUFFER_SIZE (2 * PAGE_SIZE)
#define IN_IRQ_SHIFT 0
#define IN_SOFTIRQ_SHIFT 1
#define IN_NMI_SHIFT 2
#define IN_IRQ (1 << IN_IRQ_SHIFT)
#define IN_SOFTIRQ (1 << IN_SOFTIRQ_SHIFT)
#define IN_NMI (1 << IN_NMI_SHIFT)
#define EVENT_XMIT 1
#define EVENT_MSIX_HANDLER 2
#define EVENT_NAPI_COMPLETE_DONE 3
#define EVENT_IRQ 4
#define EVENT_NMI 5
#define EVENT_EXCEPTION 6
#define EVENT_IRQ_EXIT 7
#define EVENT_SOFTIRQ_POLL 8
#define EVENT_NET_RX_ACTION 9
#define EVENT_VMFUNC_TRAMP_ENTRY 10
#define EVENT_VMFUNC_TRAMP_EXIT 11
#define EVENT_VMFUNC_SBOARD_KLCD_ENTER 12
#define EVENT_VMFUNC_SBOARD_KLCD_LEAVE 13
#ifndef __ASSEMBLY__
struct ring_trace_entry {
unsigned long rip;
unsigned long eflags;
unsigned long rsp;
unsigned long rdi;
unsigned long lcd_stack;
unsigned long gsbase;
unsigned char context;
unsigned char lcd_stack_bit;
unsigned char lcd_nc;
unsigned short pid;
unsigned type;
unsigned orig_type;
char name[PROC_NAME_MAX];
};
void add_trace_entry(unsigned type, unsigned long rdi);
void dump_ring_trace_buffer(void);
asmlinkage __visible notrace void add_trace_entry_tf(struct pt_regs *regs, unsigned type);
DECLARE_PER_CPU_PAGE_ALIGNED(unsigned char, ring_buffer[RING_BUFFER_SIZE]);
DECLARE_PER_CPU(unsigned char, ring_head);
#endif /* __ASSEMBLY__ */
#endif /* LCD_TRACE_H */
......@@ -3301,6 +3301,9 @@ void dump_stack_print_info(const char *log_lvl)
printk("%sHardware name: %s\n",
log_lvl, dump_stack_arch_desc_str);
printk("vmfunc_lcd: %p lcd_stack: %p[bitmap: %x] nested: %d\n",
current->vmfunc_lcd, current->lcd_stack,
current->lcd_stack_bit, current->nested_count);
print_worker_info(log_lvl, current);
}
......
......@@ -18,6 +18,10 @@
#include <lcd_domains/microkernel.h>
#include <asm/liblcd/address_spaces.h>
#ifdef CONFIG_MAP_TRACE_BUFFER_BFLANK
#include <linux/lcd_trace.h>
#endif
/* These are initialized by init.c */
struct kmem_cache *lcd_arch_cache;
struct lcd_vmcs_config lcd_global_vmcs_config;
......@@ -966,6 +970,11 @@ int lcd_arch_create(struct lcd_arch **out)
for_each_online_cpu(cpu) {
/* Get the EPT VMFUNC switching page for this CPU */
struct page *eptp_list_page = (struct page *) per_cpu(vmfunc_eptp_list_page, cpu);
#ifdef CONFIG_MAP_TRACE_BUFFER_BFLANK
unsigned char *this_ring_buf = per_cpu(ring_buffer, cpu);
unsigned char this_ring_head = per_cpu(ring_head, cpu);
#endif
u64 *eptp_list = phys_to_virt(page_to_phys(eptp_list_page));
u64 eptp;
u64 *root;
......@@ -984,6 +993,14 @@ int lcd_arch_create(struct lcd_arch **out)
/* Add EPT to the VMFUNC switching page */
eptp_list[lcd_arch->ept_id] = eptp;
#ifdef CONFIG_MAP_TRACE_BUFFER_BFLANK
/* pass GVA of this buffer on idx:3 */
eptp_list[3] = (uint64_t) this_ring_buf;
/* pass num_pages of ring buffer on idx:4 */
eptp_list[4] = RING_BUFFER_SIZE >> PAGE_SHIFT;
/* pass curr_head of ring_buffer on idx:5 */
eptp_list[5] = this_ring_head;
#endif
}
#else
......
......@@ -7,6 +7,7 @@
#include <libfipc.h>
#include <linux/kallsyms.h>
#include <asm/pgtable_64.h>
#include <linux/lcd_trace.h>
#define NUM_LCDS 5
/* this is the only function Intel VT-x support */
......@@ -221,8 +222,12 @@ int vmfunc_klcd_wrapper(struct fipc_message *msg, unsigned int ept)
msg->rpc_id,
current->lcd_stack);
#endif
add_trace_entry(EVENT_VMFUNC_TRAMP_ENTRY, msg->rpc_id);
vmfunc_trampoline_entry(msg);
add_trace_entry(EVENT_VMFUNC_TRAMP_EXIT, msg->rpc_id);
local_irq_save(flags);
if (--current->nested_count == 0)
drop_stack(ept);
......
......@@ -17,8 +17,8 @@ MICROKERNEL_BASE_CFLAGS= \
-DVMFUNC_PAGE_REMAP \
-DOTHER_DOMAIN=1 \
-DCONFIG_LVD \
-DCONFIG_DEFEAT_LAZY_TLB
# -DCONFIG_LVD_DISABLE_IRQS
-DCONFIG_DEFEAT_LAZY_TLB \
-DCONFIG_MAP_TRACE_BUFFER_BFLANK
MICROKERNEL_CFLAGS= \
$(MICROKERNEL_BASE_CFLAGS) \
......
......@@ -34,6 +34,7 @@ enum glue_type {
GLUE_TYPE_RTNL_LINK_STATS64_CONTAINER,
GLUE_TYPE_SK_BUFF_CONTAINER,
GLUE_TYPE_NAPI_STRUCT_CONTAINER,
GLUE_TYPE_IRQ_HANDLER_CONTAINER,
GLUE_NR_TYPES,
};
......@@ -116,7 +117,13 @@ static struct type_ops_id glue_libcap_type_ops[GLUE_NR_TYPES] = {
.revoke = dummy_func,
}
},
{
{
.name = "struct irq_handler_container",
.delete = dummy_func,
.revoke = dummy_func,
}
},
};
int glue_cap_init(void)
......
......@@ -26,6 +26,7 @@
#define LOCAL_SKB
#define SENDER_DISPATCH_LOOP
#define PASS_DEV_ADDR_IN_REG
#define NAPI_STRUCT_ARRAY
enum dispatch_t {
__PCI_REGISTER_DRIVER,
......@@ -52,6 +53,7 @@ enum dispatch_t {
DEVICE_SET_WAKEUP_ENABLE,
NETIF_TX_STOP_ALL_QUEUES,
NETIF_TX_WAKE_ALL_QUEUES,
__NETIF_TX_DISABLE,
NETIF_NAPI_ADD,
NETIF_NAPI_DEL,
NETIF_WAKE_SUBQUEUE,
......@@ -59,6 +61,8 @@ enum dispatch_t {
NAPI_GRO_RECEIVE,
__NAPI_ALLOC_SKB,
__NAPI_SCHEDULE_IRQOFF,
___NAPI_SCHEDULE_IRQOFF,
__NAPI_ENABLE,
NAPI_DISABLE,
NAPI_COMPLETE_DONE,
ETH_TYPE_TRANS,
......@@ -112,6 +116,7 @@ enum dispatch_t {
MODULE_EXIT,
SYNC_PROBE,
SYNC_NDO_SET_MAC_ADDRESS,
SYNCHRONIZE_IRQ,
};
typedef enum {
......@@ -121,7 +126,7 @@ typedef enum {
typedef enum {
VOLUNTEER_XMIT = 0x1,
SHARED_DATA_XMIT = 0x2,
SHARED_DATA_XMIT = 0x2abcd,
} xmit_type_t;
typedef enum {
......@@ -183,6 +188,7 @@ void glue_cap_remove(
struct glue_cspace *cspace,
cptr_t c);
#define INIT_IPC_MSG(m) memset(m, 0x0, sizeof(*m))
/* ASYNC HELPERS -------------------------------------------------- */
static inline
int
......
......@@ -238,3 +238,24 @@ int glue_cap_lookup_napi_struct_type(struct glue_cspace *cspace,
( void ** )napi_struct_container);
}
int glue_cap_insert_irqhandler_type(struct glue_cspace *cspace,
struct irqhandler_t_container *irqhandler_container,
struct cptr *c_out)
{
return glue_cspace_insert(cspace,
irqhandler_container,
glue_libcap_type_ops[ GLUE_TYPE_NAPI_STRUCT_CONTAINER ].libcap_type,
c_out);
}
int glue_cap_lookup_irqhandler_type(struct glue_cspace *cspace,
struct cptr c,
struct irqhandler_t_container **irqhandler_container)
{
return glue_cspace_lookup(cspace,
c,
glue_libcap_type_ops[ GLUE_TYPE_IRQ_HANDLER_CONTAINER ].libcap_type,
( void ** )irqhandler_container);
}
......@@ -184,4 +184,11 @@ int glue_cap_lookup_napi_struct_type(struct glue_cspace *cspace,
struct cptr c,
struct napi_struct_container **napi_struct_container);
int glue_cap_insert_irqhandler_type(struct glue_cspace *cspace,
struct irqhandler_t_container *irqhandler_container,
struct cptr *c_out);
int glue_cap_lookup_irqhandler_type(struct glue_cspace *cspace,
struct cptr c,
struct irqhandler_t_container **irqhandler_container);
#endif /* __IXGBE_GLUE_HELPER_H__ */
......@@ -907,7 +907,8 @@ static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
void __ixgbe_dump(struct net_device *dev)
{
ixgbe_dump(netdev_priv(dev));
/* ixgbe_dump(netdev_priv(dev)); */
ixgbe_dump(g_adapter);
}
static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
......@@ -1258,6 +1259,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
//printk("%s, calling napi_consume_skb %p", __func__, tx_buffer->skb);
/* free the skb */
napi_consume_skb(tx_buffer->skb, napi_budget);
......@@ -2966,6 +2968,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
return IRQ_HANDLED;
}
void ___napi_schedule_irqoff(struct napi_struct *napi);
static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
{
struct ixgbe_q_vector *q_vector = data;
......@@ -2973,7 +2977,7 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
/* EIAM disabled interrupts (on this vector) for us */
if (q_vector->rx.ring || q_vector->tx.ring)
napi_schedule_irqoff(&q_vector->napi);
___napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED;
}
......@@ -3021,14 +3025,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
once++;
}
ixgbe_for_each_ring(ring, q_vector->rx) {
int cleaned;
if (0)
printk("%s, checking rx_ring %p\n", __func__, ring);
cleaned = ixgbe_clean_rx_irq(q_vector, ring,
int cleaned = ixgbe_clean_rx_irq(q_vector, ring,
per_ring_budget);
if (0)
printk("%s, clean_rx_irq ret %d\n", __func__, cleaned);
work_done += cleaned;
if (cleaned >= per_ring_budget)
clean_complete = false;
......@@ -3039,9 +3037,6 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
/* always poll */
return budget;
/* all work done, exit the polling mode */
napi_complete_done(napi, work_done);
if (adapter->rx_itr_setting & 1)
......@@ -3840,7 +3835,7 @@ void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
/* the hardware may take up to 100us to really disable the rx queue */
do {
udelay(10);
udelay(100);
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
} while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
......@@ -4751,7 +4746,7 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
/* FIXME: enabling this causes undefined instruction
* to be generated by the compiler. Disable it for now
*/
napi_enable(&adapter->q_vector[q_idx]->napi);
__napi_enable(&adapter->q_vector[q_idx]->napi);
}
}
......@@ -4760,6 +4755,14 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
int q_idx;
for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) {
/*
* XXX: This calls out to the kernel and sets
* NAPI_STATE_DISABLE bit and waits for NAPI_STATE_SCHED to be
* 0, but in the msix_clean_rings we call out with a stale
* napi->state. Let's set NAPI_STATE_DISABLE locally here to
* synchronize the state.
*/
set_bit(NAPI_STATE_DISABLE, &adapter->q_vector[q_idx]->napi.state);
napi_disable(&adapter->q_vector[q_idx]->napi);
while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) {
pr_info("QV %d locked\n", q_idx);
......@@ -5676,19 +5679,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* call carrier off first to avoid false dev_watchdog timeouts */
netif_carrier_off(netdev);
#ifndef LCD_ISOLATE
netif_tx_disable(netdev);
#else
/* netif_tx_disable does local_bh_disable/enable which translates
* to percpu macros that cause ept violation. So, pull out the
* below loop from that function and have it here in case of LCDs
*/
for (i = 0; i < netdev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(netdev, i);
netif_tx_stop_queue(txq);
}
#endif
/* XXX: calls out to perform this */
__netif_tx_disable(netdev);
/* disable any upper devices */
netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
......@@ -5698,7 +5691,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
if (vlan->fwd_priv) {
netif_tx_stop_all_queues(upper);
netif_carrier_off(upper);
netif_tx_disable(upper);
/* XXX: calls out to perform this */
__netif_tx_disable(upper);
}
}
}
......
......@@ -28,6 +28,8 @@ void napi_disable(struct napi_struct *n) {
void napi_complete_done(struct napi_struct *n, int work_done) {
LIBLCD_MSG("================>$$$$$$ Dummy %s called", __func__);
}
void synchronize_irq(unsigned int irq) { LIBLCD_MSG("================>$$$$$$ Dummy %s called", __func__); }
#endif
int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
......@@ -41,9 +43,6 @@ bool cancel_work_sync(struct work_struct *work) { LIBLCD_MSG("================>$
*/
int rtnl_is_locked(void) { LIBLCD_MSG("================>$$$$$$ Dummy %s called", __func__); return 1; }
void synchronize_irq(unsigned int irq) { LIBLCD_MSG("================>$$$$$$ Dummy %s called", __func__); }
void napi_gro_flush(struct napi_struct *napi, bool flush_old) { LIBLCD_MSG("================>$$$$$$ Dummy %s called", __func__); }
unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
......
......@@ -36,5 +36,7 @@ int msix_vector_handler_callee(struct fipc_message *_request);
int trigger_dump_callee(struct fipc_message *_request);
int poll_callee(struct fipc_message *_request);
void __napi_enable(struct napi_struct *napi);
void __netif_tx_disable(struct net_device *dev);
#endif /* __IXGBE_CALLER_H__ */
......@@ -13,6 +13,7 @@ int eth_mac_addr_callee(struct fipc_message *_request);
int eth_validate_addr_callee(struct fipc_message *_request);
int free_netdev_callee(struct fipc_message *_request);
int netif_carrier_off_callee(struct fipc_message *_request);
int __netif_tx_disable_callee(struct fipc_message *_request);
int netif_carrier_on_callee(struct fipc_message *_request);
int netif_device_attach_callee(struct fipc_message *_request);
int netif_device_detach_callee(struct fipc_message *_request);
......@@ -62,8 +63,11 @@ int napi_gro_receive_callee(struct fipc_message *_request);
int __napi_alloc_skb_callee(struct fipc_message *_request);
int __napi_schedule_irqoff_callee(struct fipc_message *_request);
int ___napi_schedule_irqoff_callee(struct fipc_message *_request);
int __napi_enable_callee(struct fipc_message *_request);
int napi_disable_callee(struct fipc_message *_request);
int napi_complete_done_callee(struct fipc_message *_request);
int synchronize_irq_callee(struct fipc_message *_request);
int eth_type_trans_callee(struct fipc_message *_request);
int skb_add_rx_frag_callee(struct fipc_message *_request);
......
......@@ -141,6 +141,7 @@
#include <linux/netfilter_ingress.h>
#include <linux/sctp.h>
#include <linux/crash_dump.h>
#include <linux/lcd_trace.h>
#include "net-sysfs.h"
......@@ -5243,6 +5244,9 @@ static void net_rx_action(struct softirq_action *h)
}
n = list_first_entry(&list, struct napi_struct, poll_list);
#ifdef CONFIG_LCD_TRACE_BUFFER
add_trace_entry(EVENT_NET_RX_ACTION, 0);
#endif
budget -= napi_poll(n, &repoll);
/* If softirq window is exhausted then punt.
......