diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index ce70677afbf99eb0cb19fc72cce0f59581e341d4..1399f372b5dcc54709fdfc162ef8bbe143f5b249 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -8,3 +8,16 @@ config TRACING
 	bool
 	select DEBUG_FS
 
+config FTRACE
+	bool "Kernel Function Tracer"
+	depends on DEBUG_KERNEL && HAVE_FTRACE
+	select FRAME_POINTER
+	select TRACING
+	help
+	  Enable the kernel to trace every kernel function. This is done
+	  by using a compiler feature to insert a small, 5-byte No-Operation
+	  instruction to the beginning of every kernel function, which NOP
+	  sequence is then dynamically patched into a tracer call when
+	  tracing is enabled by the administrator. If it's runtime disabled
+	  (the bootup default), then the overhead of the instructions is very
+	  small and not measurable even in micro-benchmarks.
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 7af4031752552653f8ac354f88349f205c56c8f3..6bb5e50b4a40f0d2df62cc7f5929e5b24310d547 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -1,5 +1,6 @@
 obj-$(CONFIG_FTRACE) += libftrace.o
 
 obj-$(CONFIG_TRACING) += trace.o
+obj-$(CONFIG_FTRACE) += trace_functions.o
 
 libftrace-y := ftrace.o
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
new file mode 100644
index 0000000000000000000000000000000000000000..82988c5336e0ffaae9367ab8a5df970a4b65b8e9
--- /dev/null
+++ b/kernel/trace/trace_functions.c
@@ -0,0 +1,73 @@
+/*
+ * ring buffer based function tracer
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
+ *
+ * Based on code from the latency_tracer, that is:
+ *
+ *  Copyright (C) 2004-2006 Ingo Molnar
+ *  Copyright (C) 2004 William Lee Irwin III
+ */
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+
+#include "trace.h"
+
+static notrace void function_reset(struct trace_array *tr)
+{
+	int cpu;
+
+	tr->time_start = now(tr->cpu);
+
+	for_each_online_cpu(cpu)
+		tracing_reset(tr->data[cpu]);
+}
+
+static notrace void start_function_trace(struct trace_array *tr)
+{
+	function_reset(tr);
+	tracing_start_function_trace();
+}
+
+static notrace void stop_function_trace(struct trace_array *tr)
+{
+	tracing_stop_function_trace();
+}
+
+static notrace void function_trace_init(struct trace_array *tr)
+{
+	if (tr->ctrl)
+		start_function_trace(tr);
+}
+
+static notrace void function_trace_reset(struct trace_array *tr)
+{
+	if (tr->ctrl)
+		stop_function_trace(tr);
+}
+
+static notrace void function_trace_ctrl_update(struct trace_array *tr)
+{
+	if (tr->ctrl)
+		start_function_trace(tr);
+	else
+		stop_function_trace(tr);
+}
+
+static struct tracer function_trace __read_mostly =
+{
+	.name	     = "ftrace",
+	.init	     = function_trace_init,
+	.reset	     = function_trace_reset,
+	.ctrl_update = function_trace_ctrl_update,
+};
+
+static __init int init_function_trace(void)
+{
+	return register_tracer(&function_trace);
+}
+
+device_initcall(init_function_trace);