@@ -23,6 +23,7 @@ config ARM64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS
+ select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_GENERIC_DMA_COHERENT
@@ -17,6 +17,23 @@
#ifndef __ASSEMBLY__
extern void _mcount(unsigned long);
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+struct dyn_arch_ftrace {
+ /* No extra data needed for arm64 */
+};
+
+extern unsigned long ftrace_graph_call;
+
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ /*
+ * addr is the address of the mcount call instruction.
+ * recordmcount does the necessary offset calculation.
+ */
+ return addr;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_FUNCTION_TRACER */
@@ -88,6 +88,7 @@
add \reg, \reg, #8
.endm
+#ifndef CONFIG_DYNAMIC_FTRACE
/*
* void _mcount(unsigned long return_address)
* @return_address: return address to instrumented function (callsite)
@@ -132,6 +133,44 @@ skip_ftrace_call:
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
ENDPROC(_mcount)
+#else /* CONFIG_DYNAMIC_FTRACE */
+/*
+ * This code is executed only at boot time.
+ * Once after initialized, "bl _mcount" generated by gcc -pg option will be
+ * replaced to "nop" if tracing is disabled or "b ftrace_caller" if enabled.
+ */
+ENTRY(_mcount)
+ ret
+ENDPROC(_mcount)
+
+/*
+ * void ftrace_caller(unsigned long return_address)
+ * @return_address: return address to instrumented function (callsite)
+ *
+ * Call any kind of tracer function and/or function graph tracer.
+ */
+ENTRY(ftrace_caller)
+ mcount_enter
+
+ mcount_get_pc0 x0 // pc in callsite
+ mcount_get_lr x1 // callsite's lr (adjusted)
+
+ .global ftrace_call
+ftrace_call: // tracer(pc, lr);
+ nop // This will be replaced with "bl xxx"
+ // where xxx can be any kind of tracer.
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .global ftrace_graph_call
+ftrace_graph_call: // ftrace_graph_caller();
+ nop // If enabled, this will be replaced
+ // "b ftrace_graph_caller"
+#endif
+
+ mcount_exit
+ENDPROC(ftrace_caller)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
ENTRY(ftrace_stub)
ret
ENDPROC(ftrace_stub)
@@ -17,6 +17,76 @@
#include <asm/ftrace.h>
#include <asm/insn.h>
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* FIXME: can be replaced by aarch64_insn_patch_text_nosync() */
+static int ftrace_modify_code(unsigned long pc, unsigned int old,
+ unsigned int new, bool validate)
+{
+ unsigned int replaced;
+
+#ifdef __AARCH64EB__
+ old = swab32(old);
+ new = swab32(new);
+#endif
+
+ if (validate) {
+ if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ if (replaced != old)
+ return -EINVAL;
+ }
+
+ if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
+ return -EPERM;
+
+ flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
+
+ return 0;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long pc;
+ unsigned int new;
+
+ pc = (unsigned long)&ftrace_call;
+ new = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, true);
+
+ return ftrace_modify_code(pc, 0, new, false);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ unsigned int old, new;
+
+ old = aarch64_insn_gen_nop();
+ new = aarch64_insn_gen_branch_imm(pc, addr, true);
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long pc = rec->ip;
+ unsigned int old, new;
+
+ old = aarch64_insn_gen_branch_imm(pc, addr, true);
+ new = aarch64_insn_gen_nop();
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+ *(unsigned long *)data = 0;
+
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
@@ -80,4 +150,37 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
return;
}
}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+static int __ftrace_modify_caller(unsigned long *callsite,
+ void (*func) (void), bool enable)
+{
+ unsigned long pc = (unsigned long)callsite;
+ unsigned int branch, nop, old, new;
+
+ branch = aarch64_insn_gen_branch_imm(pc, (unsigned long)func, false);
+ nop = aarch64_insn_gen_nop();
+ old = enable ? nop : branch;
+ new = enable ? branch : nop;
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+static int ftrace_modify_graph_caller(bool enable)
+{
+ return __ftrace_modify_caller(&ftrace_graph_call,
+ ftrace_graph_caller,
+ enable);
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
This enables DYNAMIC_FTRACE configuration. ftrace_caller() is the entry function which is a replacement of _mcount(). If tracing is enabled on a function, the branch instruction to _mcount() is replaced to that of ftace_caller() and if disabled, replaced to nop. See Documentation/trace/ftrace-design.txt Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org> --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/ftrace.h | 17 +++++++ arch/arm64/kernel/entry-ftrace.S | 39 +++++++++++++++ arch/arm64/kernel/ftrace.c | 103 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 160 insertions(+)