diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d7ad030a4c49874ca874795be94d972426770726..9175ce91b8f6923bbfaf7e883bcfc0fc27b81fd6 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -269,7 +269,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
 	if (unlikely(!tracer_enabled))
 		return;
 
-	raw_local_irq_save(flags);
+	local_irq_save(flags);
 	cpu = raw_smp_processor_id();
 	data = tr->data[cpu];
 	disabled = atomic_inc_return(&data->disabled);
@@ -278,7 +278,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
 		ftrace(tr, data, ip, parent_ip, flags);
 
 	atomic_dec(&data->disabled);
-	raw_local_irq_restore(flags);
+	local_irq_restore(flags);
 }
 
 static struct ftrace_ops trace_ops __read_mostly =
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 3e4771d3b8905164f768d95679fb8c9cbd7f6857..2715267be4696913b4b89269b981d30766a0a754 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -29,7 +29,7 @@ ctx_switch_func(struct task_struct *prev, struct task_struct *next)
 	if (!tracer_enabled)
 		return;
 
-	raw_local_irq_save(flags);
+	local_irq_save(flags);
 	cpu = raw_smp_processor_id();
 	data = tr->data[cpu];
 	disabled = atomic_inc_return(&data->disabled);
@@ -38,7 +38,7 @@ ctx_switch_func(struct task_struct *prev, struct task_struct *next)
 		tracing_sched_switch_trace(tr, data, prev, next, flags);
 
 	atomic_dec(&data->disabled);
-	raw_local_irq_restore(flags);
+	local_irq_restore(flags);
 }
 
 void ftrace_ctx_switch(struct task_struct *prev, struct task_struct *next)