Commit c17488d0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'trace-v4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
 "Not much new with tracing for this release.  Mostly just clean ups and
  minor fixes.

  Here's what else is new:

   - A new TRACE_EVENT_FN_COND macro, combining both _FN and _COND for
     those that want both.

   - New selftest to test the instance create and delete

   - Better debug output when ftrace fails"

* tag 'trace-v4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (24 commits)
  ftrace: Fix the race between ftrace and insmod
  ftrace: Add infrastructure for delayed enabling of module functions
  x86: ftrace: Fix the comments for ftrace_modify_code_direct()
  tracing: Fix comment to use tracing_on over tracing_enable
  metag: ftrace: Fix the comments for ftrace_modify_code
  sh: ftrace: Fix the comments for ftrace_modify_code()
  ia64: ftrace: Fix the comments for ftrace_modify_code()
  ftrace: Clean up ftrace_module_init() code
  ftrace: Join functions ftrace_module_init() and ftrace_init_module()
  tracing: Introduce TRACE_EVENT_FN_COND macro
  tracing: Use seq_buf_used() in seq_buf_to_user() instead of len
  bpf: Constify bpf_verifier_ops structure
  ftrace: Have ftrace_ops_get_func() handle RCU and PER_CPU flags too
  ftrace: Remove use of control list and ops
  ftrace: Fix output of enabled_functions for showing tramp
  ftrace: Fix a typo in comment
  ftrace: Show all tramps registered to a record on ftrace_bug()
  ftrace: Add variable ftrace_expected for archs to show expected code
  ftrace: Add new type to distinguish what kind of ftrace_bug()
  tracing: Update cond flag when enabling or disabling a trigger
  ...
parents 34a9304a 5156dca3
......@@ -97,13 +97,11 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char replaced[MCOUNT_INSN_SIZE];
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
* as well as code changing. We do this by using the
* probe_kernel_* functions.
*
* No real locking needed, this code is run through
* kstop_machine, or before SMP starts.
* Note:
* We are paranoid about modifying text, as if a bug was to happen, it
* could cause us to read or write to someplace that could cause harm.
* Carefully read and modify the code with probe_kernel_*(), and make
* sure what we read is what we expected it to be before modifying it.
*/
if (!do_check)
......
......@@ -54,12 +54,11 @@ static int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
unsigned char replaced[MCOUNT_INSN_SIZE];
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
* as well as code changing.
*
* No real locking needed, this code is run through
* kstop_machine.
* Note:
* We are paranoid about modifying text, as if a bug was to happen, it
* could cause us to read or write to someplace that could cause harm.
* Carefully read and modify the code with probe_kernel_*(), and make
* sure what we read is what we expected it to be before modifying it.
*/
/* read the text we want to modify */
......
......@@ -212,13 +212,11 @@ static int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char replaced[MCOUNT_INSN_SIZE];
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
* as well as code changing. We do this by using the
* probe_kernel_* functions.
*
* No real locking needed, this code is run through
* kstop_machine, or before SMP starts.
* Note:
* We are paranoid about modifying text, as if a bug was to happen, it
* could cause us to read or write to someplace that could cause harm.
* Carefully read and modify the code with probe_kernel_*(), and make
* sure what we read is what we expected it to be before modifying it.
*/
/* read the text we want to modify */
......
......@@ -105,14 +105,14 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
{
unsigned char replaced[MCOUNT_INSN_SIZE];
ftrace_expected = old_code;
/*
* Note: Due to modules and __init, code can
* disappear and change, we need to protect against faulting
* as well as code changing. We do this by using the
* probe_kernel_* functions.
*
* No real locking needed, this code is run through
* kstop_machine, or before SMP starts.
* Note:
* We are paranoid about modifying text, as if a bug was to happen, it
* could cause us to read or write to someplace that could cause harm.
* Carefully read and modify the code with probe_kernel_*(), and make
* sure what we read is what we expected it to be before modifying it.
*/
/* read the text we want to modify */
......@@ -154,6 +154,8 @@ int ftrace_make_nop(struct module *mod,
if (addr == MCOUNT_ADDR)
return ftrace_modify_code_direct(rec->ip, old, new);
ftrace_expected = NULL;
/* Normal cases use add_brk_on_nop */
WARN_ONCE(1, "invalid use of ftrace_make_nop");
return -EINVAL;
......@@ -220,6 +222,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
WARN_ON(1);
ftrace_expected = NULL;
return -EINVAL;
}
......@@ -314,6 +317,8 @@ static int add_break(unsigned long ip, const char *old)
if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
ftrace_expected = old;
/* Make sure it is what we expect it to be */
if (memcmp(replaced, old, MCOUNT_INSN_SIZE) != 0)
return -EINVAL;
......@@ -413,6 +418,8 @@ static int remove_breakpoint(struct dyn_ftrace *rec)
ftrace_addr = ftrace_get_addr_curr(rec);
nop = ftrace_call_replace(ip, ftrace_addr);
ftrace_expected = nop;
if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
return -EINVAL;
}
......
......@@ -76,8 +76,8 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* ENABLED - set/unset when ftrace_ops is registered/unregistered
* DYNAMIC - set when ftrace_ops is registered to denote dynamically
* allocated ftrace_ops which need special care
* CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
* could be controled by following calls:
* PER_CPU - set manualy by ftrace_ops user to denote the ftrace_ops
* could be controlled by following calls:
* ftrace_function_local_enable
* ftrace_function_local_disable
* SAVE_REGS - The ftrace_ops wants regs saved at each function called
......@@ -121,7 +121,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
enum {
FTRACE_OPS_FL_ENABLED = 1 << 0,
FTRACE_OPS_FL_DYNAMIC = 1 << 1,
FTRACE_OPS_FL_CONTROL = 1 << 2,
FTRACE_OPS_FL_PER_CPU = 1 << 2,
FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
......@@ -134,6 +134,7 @@ enum {
FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
FTRACE_OPS_FL_IPMODIFY = 1 << 13,
FTRACE_OPS_FL_PID = 1 << 14,
FTRACE_OPS_FL_RCU = 1 << 15,
};
#ifdef CONFIG_DYNAMIC_FTRACE
......@@ -146,11 +147,11 @@ struct ftrace_ops_hash {
#endif
/*
* Note, ftrace_ops can be referenced outside of RCU protection.
* (Although, for perf, the control ops prevent that). If ftrace_ops is
* allocated and not part of kernel core data, the unregistering of it will
* perform a scheduling on all CPUs to make sure that there are no more users.
* Depending on the load of the system that may take a bit of time.
* Note, ftrace_ops can be referenced outside of RCU protection, unless
* the RCU flag is set. If ftrace_ops is allocated and not part of kernel
* core data, the unregistering of it will perform a scheduling on all CPUs
* to make sure that there are no more users. Depending on the load of the
* system that may take a bit of time.
*
* Any private data added must also take care not to be freed and if private
* data is added to a ftrace_ops that is in core code, the user of the
......@@ -196,34 +197,34 @@ int unregister_ftrace_function(struct ftrace_ops *ops);
void clear_ftrace_function(void);
/**
* ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
* ftrace_function_local_enable - enable ftrace_ops on current cpu
*
* This function enables tracing on current cpu by decreasing
* the per cpu control variable.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
* registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
{
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
return;
(*this_cpu_ptr(ops->disabled))--;
}
/**
* ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
* ftrace_function_local_disable - disable ftrace_ops on current cpu
*
* This function enables tracing on current cpu by decreasing
* This function disables tracing on current cpu by increasing
* the per cpu control variable.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
* registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
{
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
return;
(*this_cpu_ptr(ops->disabled))++;
......@@ -235,12 +236,12 @@ static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
*
* This function returns value of ftrace_ops::disabled on current cpu.
* It must be called with preemption disabled and only on ftrace_ops
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
* registered with FTRACE_OPS_FL_PER_CPU. If called without preemption
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
*/
static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
{
WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU));
return *this_cpu_ptr(ops->disabled);
}
......@@ -296,6 +297,21 @@ int ftrace_arch_code_modify_post_process(void);
struct dyn_ftrace;
enum ftrace_bug_type {
FTRACE_BUG_UNKNOWN,
FTRACE_BUG_INIT,
FTRACE_BUG_NOP,
FTRACE_BUG_CALL,
FTRACE_BUG_UPDATE,
};
extern enum ftrace_bug_type ftrace_bug_type;
/*
* Archs can set this to point to a variable that holds the value that was
* expected at the call site before calling ftrace_bug().
*/
extern const void *ftrace_expected;
void ftrace_bug(int err, struct dyn_ftrace *rec);
struct seq_file;
......@@ -341,6 +357,7 @@ bool is_ftrace_trampoline(unsigned long addr);
* REGS - the record wants the function to save regs
* REGS_EN - the function is set up to save regs.
* IPMODIFY - the record allows for the IP address to be changed.
* DISABLED - the record is not ready to be touched yet
*
* When a new ftrace_ops is registered and wants a function to save
* pt_regs, the rec->flag REGS is set. When the function has been
......@@ -355,10 +372,11 @@ enum {
FTRACE_FL_TRAMP = (1UL << 28),
FTRACE_FL_TRAMP_EN = (1UL << 27),
FTRACE_FL_IPMODIFY = (1UL << 26),
FTRACE_FL_DISABLED = (1UL << 25),
};
#define FTRACE_REF_MAX_SHIFT 26
#define FTRACE_FL_BITS 6
#define FTRACE_REF_MAX_SHIFT 25
#define FTRACE_FL_BITS 7
#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
......
......@@ -479,6 +479,10 @@ extern void syscall_unregfunc(void);
#define TRACE_EVENT_FN(name, proto, args, struct, \
assign, print, reg, unreg) \
DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
#define TRACE_EVENT_FN_COND(name, proto, args, cond, struct, \
assign, print, reg, unreg) \
DECLARE_TRACE_CONDITION(name, PARAMS(proto), \
PARAMS(args), PARAMS(cond))
#define TRACE_EVENT_CONDITION(name, proto, args, cond, \
struct, assign, print) \
DECLARE_TRACE_CONDITION(name, PARAMS(proto), \
......
......@@ -40,6 +40,11 @@
assign, print, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg)
#undef TRACE_EVENT_FN_COND
#define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \
assign, print, reg, unreg) \
DEFINE_TRACE_FN(name, reg, unreg)
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args) \
DEFINE_TRACE(name)
......@@ -93,6 +98,7 @@
#undef TRACE_EVENT
#undef TRACE_EVENT_FN
#undef TRACE_EVENT_FN_COND
#undef TRACE_EVENT_CONDITION
#undef DECLARE_EVENT_CLASS
#undef DEFINE_EVENT
......
......@@ -123,6 +123,12 @@ TRACE_MAKE_SYSTEM_STR();
TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
#undef TRACE_EVENT_FN_COND
#define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct, \
assign, print, reg, unreg) \
TRACE_EVENT_CONDITION(name, PARAMS(proto), PARAMS(args), PARAMS(cond), \
PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
#undef TRACE_EVENT_FLAGS
#define TRACE_EVENT_FLAGS(name, value) \
__TRACE_EVENT_FLAGS(name, value)
......
......@@ -316,7 +316,7 @@ static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type
return true;
}
static struct bpf_verifier_ops kprobe_prog_ops = {
static const struct bpf_verifier_ops kprobe_prog_ops = {
.get_func_proto = kprobe_prog_func_proto,
.is_valid_access = kprobe_prog_is_valid_access,
};
......
......@@ -62,8 +62,6 @@
#define FTRACE_HASH_DEFAULT_BITS 10
#define FTRACE_HASH_MAX_BITS 12
#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
#ifdef CONFIG_DYNAMIC_FTRACE
#define INIT_OPS_HASH(opsname) \
.func_hash = &opsname.local_hash, \
......@@ -113,14 +111,9 @@ static int ftrace_disabled __read_mostly;
static DEFINE_MUTEX(ftrace_lock);
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops;
static struct ftrace_ops control_ops;
static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs);
#if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
......@@ -203,7 +196,7 @@ void clear_ftrace_function(void)
ftrace_trace_function = ftrace_stub;
}
static void control_ops_disable_all(struct ftrace_ops *ops)
static void per_cpu_ops_disable_all(struct ftrace_ops *ops)
{
int cpu;
......@@ -211,16 +204,19 @@ static void control_ops_disable_all(struct ftrace_ops *ops)
*per_cpu_ptr(ops->disabled, cpu) = 1;
}
static int control_ops_alloc(struct ftrace_ops *ops)
static int per_cpu_ops_alloc(struct ftrace_ops *ops)
{
int __percpu *disabled;
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_PER_CPU)))
return -EINVAL;
disabled = alloc_percpu(int);
if (!disabled)
return -ENOMEM;
ops->disabled = disabled;
control_ops_disable_all(ops);
per_cpu_ops_disable_all(ops);
return 0;
}
......@@ -256,10 +252,11 @@ static inline void update_function_graph_func(void) { }
static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
{
/*
* If this is a dynamic ops or we force list func,
* If this is a dynamic, RCU, or per CPU ops, or we force list func,
* then it needs to call the list anyway.
*/
if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU |
FTRACE_OPS_FL_RCU) || FTRACE_FORCE_LIST_FUNC)
return ftrace_ops_list_func;
return ftrace_ops_get_func(ops);
......@@ -383,26 +380,6 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
return 0;
}
static void add_ftrace_list_ops(struct ftrace_ops **list,
struct ftrace_ops *main_ops,
struct ftrace_ops *ops)
{
int first = *list == &ftrace_list_end;
add_ftrace_ops(list, ops);
if (first)
add_ftrace_ops(&ftrace_ops_list, main_ops);
}
static int remove_ftrace_list_ops(struct ftrace_ops **list,
struct ftrace_ops *main_ops,
struct ftrace_ops *ops)
{
int ret = remove_ftrace_ops(list, ops);
if (!ret && *list == &ftrace_list_end)
ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
return ret;
}
static void ftrace_update_trampoline(struct ftrace_ops *ops);
static int __register_ftrace_function(struct ftrace_ops *ops)
......@@ -430,14 +407,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
if (!core_kernel_data((unsigned long)ops))
ops->flags |= FTRACE_OPS_FL_DYNAMIC;
if (ops->flags & FTRACE_OPS_FL_CONTROL) {
if (control_ops_alloc(ops))
if (ops->flags & FTRACE_OPS_FL_PER_CPU) {
if (per_cpu_ops_alloc(ops))
return -ENOMEM;
add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
/* The control_ops needs the trampoline update */
ops = &control_ops;
} else
add_ftrace_ops(&ftrace_ops_list, ops);
}
add_ftrace_ops(&ftrace_ops_list, ops);
/* Always save the function, and reset at unregistering */
ops->saved_func = ops->func;
......@@ -460,11 +435,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
return -EBUSY;
if (ops->flags & FTRACE_OPS_FL_CONTROL) {
ret = remove_ftrace_list_ops(&ftrace_control_list,
&control_ops, ops);
} else
ret = remove_ftrace_ops(&ftrace_ops_list, ops);
ret = remove_ftrace_ops(&ftrace_ops_list, ops);
if (ret < 0)
return ret;
......@@ -1687,6 +1658,9 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
int in_hash = 0;
int match = 0;
if (rec->flags & FTRACE_FL_DISABLED)
continue;
if (all) {
/*
* Only the filter_hash affects all records.
......@@ -1940,7 +1914,7 @@ static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
}
static void print_ip_ins(const char *fmt, unsigned char *p)
static void print_ip_ins(const char *fmt, const unsigned char *p)
{
int i;
......@@ -1952,6 +1926,31 @@ static void print_ip_ins(const char *fmt, unsigned char *p)
static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
enum ftrace_bug_type ftrace_bug_type;
const void *ftrace_expected;
static void print_bug_type(void)
{
switch (ftrace_bug_type) {
case FTRACE_BUG_UNKNOWN:
break;
case FTRACE_BUG_INIT:
pr_info("Initializing ftrace call sites\n");
break;
case FTRACE_BUG_NOP:
pr_info("Setting ftrace call site to NOP\n");
break;
case FTRACE_BUG_CALL:
pr_info("Setting ftrace call site to call ftrace function\n");
break;
case FTRACE_BUG_UPDATE:
pr_info("Updating ftrace call site to call a different ftrace function\n");
break;
}
}
/**
* ftrace_bug - report and shutdown function tracer
......@@ -1979,8 +1978,12 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec)
FTRACE_WARN_ON_ONCE(1);
pr_info("ftrace failed to modify ");
print_ip_sym(ip);
print_ip_ins(" actual: ", (unsigned char *)ip);
print_ip_ins(" actual: ", (unsigned char *)ip);
pr_cont("\n");
if (ftrace_expected) {
print_ip_ins(" expected: ", ftrace_expected);
pr_cont("\n");
}
break;
case -EPERM:
FTRACE_WARN_ON_ONCE(1);
......@@ -1992,6 +1995,7 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec)
pr_info("ftrace faulted on unknown error ");
print_ip_sym(ip);
}
print_bug_type();
if (rec) {
struct ftrace_ops *ops = NULL;
......@@ -2000,15 +2004,19 @@ void ftrace_bug(int failed, struct dyn_ftrace *rec)
rec->flags & FTRACE_FL_REGS ? " R" : " ");
if (rec->flags & FTRACE_FL_TRAMP_EN) {
ops = ftrace_find_tramp_ops_any(rec);
if (ops)
pr_cont("\ttramp: %pS",
(void *)ops->trampoline);
else
if (ops) {
do {
pr_cont("\ttramp: %pS (%pS)",
(void *)ops->trampoline,
(void *)ops->func);
ops = ftrace_find_tramp_ops_next(rec, ops);
} while (ops);
} else
pr_cont("\ttramp: ERROR!");
}
ip = ftrace_get_addr_curr(rec);
pr_cont(" expected tramp: %lx\n", ip);
pr_cont("\n expected tramp: %lx\n", ip);
}
}
......@@ -2016,6 +2024,11 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
{
unsigned long flag = 0UL;
ftrace_bug_type = FTRACE_BUG_UNKNOWN;
if (rec->flags & FTRACE_FL_DISABLED)
return FTRACE_UPDATE_IGNORE;
/*
* If we are updating calls:
*
......@@ -2077,9 +2090,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
* from the save regs, to a non-save regs function or
* vice versa, or from a trampoline call.
*/
if (flag & FTRACE_FL_ENABLED)
if (flag & FTRACE_FL_ENABLED) {
ftrace_bug_type = FTRACE_BUG_CALL;
return FTRACE_UPDATE_MAKE_CALL;
}
ftrace_bug_type = FTRACE_BUG_UPDATE;
return FTRACE_UPDATE_MODIFY_CALL;
}
......@@ -2096,6 +2112,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
FTRACE_FL_REGS_EN);
}
ftrace_bug_type = FTRACE_BUG_NOP;
return FTRACE_UPDATE_MAKE_NOP;
}
......@@ -2144,6 +2161,24 @@ ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
return NULL;
}
static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec,
struct ftrace_ops *op)
{
unsigned long ip = rec->ip;
while_for_each_ftrace_op(op) {
if (!op->trampoline)
continue;
if (hash_contains_ip(ip, op->func_hash))
return op;
}
return NULL;
}
static struct ftrace_ops *
ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
{
......@@ -2307,17 +2342,22 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
ret = ftrace_update_record(rec, enable);
ftrace_bug_type = FTRACE_BUG_UNKNOWN;
switch (ret) {
case FTRACE_UPDATE_IGNORE:
return 0;
case FTRACE_UPDATE_MAKE_CALL:
ftrace_bug_type = FTRACE_BUG_CALL;
return ftrace_make_call(rec, ftrace_addr);
case FTRACE_UPDATE_MAKE_NOP:
ftrace_bug_type = FTRACE_BUG_NOP;
return ftrace_make_nop(NULL, rec, ftrace_old_addr);
case FTRACE_UPDATE_MODIFY_CALL:
ftrace_bug_type = FTRACE_BUG_UPDATE;
return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
}
......@@ -2425,6 +2465,7 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
if (ret) {
ftrace_bug_type = FTRACE_BUG_INIT;
ftrace_bug(ret, rec);
return 0;
}
......@@ -2566,7 +2607,7 @@ void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
{
}
static void control_ops_free(struct ftrace_ops *ops)