pstore/ftrace: Convert to its own enable/disable debugfs knob

With this patch we no longer reuse function tracer infrastructure, now
we register our own tracer back-end via a debugfs knob.

It's a bit more code, but that is the only downside. On the bright side we
have:

- Ability to make persistent_ram module removable (when needed, we can
  move ftrace_ops struct into a module). Note that persistent_ram is still
  not removable for other reasons, but with this patch it's just one
  thing less to worry about;

- Pstore part is more isolated from the generic function tracer. We tried
  it already by registering our own tracer in available_tracers, but that
  way we're loosing ability to see the traces while we record them to
  pstore. This solution is somewhere in the middle: we only register
  "internal ftracer" back-end, but not the "front-end";

- When there is only pstore tracing enabled, the kernel will only write
  to the pstore buffer, omitting function tracer buffer (which, of course,
  still can be enabled via 'echo function > current_tracer').

Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index a426f41..0ad83e3 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -13,7 +13,6 @@
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/ftrace.h>
-#include <linux/pstore.h>
 #include <linux/fs.h>
 
 #include "trace.h"
@@ -75,10 +74,9 @@
 	preempt_enable_notrace();
 }
 
-/* Our two options */
+/* Our option */
 enum {
 	TRACE_FUNC_OPT_STACK	= 0x1,
-	TRACE_FUNC_OPT_PSTORE	= 0x2,
 };
 
 static struct tracer_flags func_flags;
@@ -106,12 +104,6 @@
 	disabled = atomic_inc_return(&data->disabled);
 
 	if (likely(disabled == 1)) {
-		/*
-		 * So far tracing doesn't support multiple buffers, so
-		 * we make an explicit call for now.
-		 */
-		if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE))
-			pstore_ftrace_call(ip, parent_ip);
 		pc = preempt_count();
 		trace_function(tr, ip, parent_ip, flags, pc);
 	}
@@ -177,9 +169,6 @@
 #ifdef CONFIG_STACKTRACE
 	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
 #endif
-#ifdef CONFIG_PSTORE_FTRACE
-	{ TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) },
-#endif
 	{ } /* Always set a last empty entry */
 };
 
@@ -232,8 +221,6 @@
 		}
 
 		break;
-	case TRACE_FUNC_OPT_PSTORE:
-		break;
 	default:
 		return -EINVAL;
 	}