blob: b0ea5eb0c3b43da49b8a94aa3337d666878b6af1 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/* Rewritten by Rusty Russell, on the backs of many others...
3 Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005*/
Frederic Weisbecker8b96f012008-12-06 03:40:00 +01006#include <linux/ftrace.h>
Dmitri Vorobievf80d2d72009-03-22 19:11:10 +02007#include <linux/memory.h>
Paul Gortmaker8a293be2016-07-23 14:01:45 -04008#include <linux/extable.h>
Ingo Molnar505f2b92009-03-20 11:05:04 +01009#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/init.h>
Masami Hiramatsu5b485622017-01-08 23:58:09 +090012#include <linux/kprobes.h>
Daniel Borkmann74451e662017-02-16 22:24:50 +010013#include <linux/filter.h>
Ingo Molnar505f2b92009-03-20 11:05:04 +010014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/sections.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080016#include <linux/uaccess.h>
Ingo Molnar505f2b92009-03-20 11:05:04 +010017
18/*
19 * mutex protecting text section modification (dynamic code patching).
20 * some users need to sleep (allocating memory...) while they hold this lock.
21 *
Zhou Chengminge846d132017-11-02 09:18:21 +080022 * Note: Also protects SMP-alternatives modification on x86.
23 *
Ingo Molnar505f2b92009-03-20 11:05:04 +010024 * NOT exported to modules - patching kernel text is a really delicate matter.
25 */
26DEFINE_MUTEX(text_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28extern struct exception_table_entry __start___ex_table[];
29extern struct exception_table_entry __stop___ex_table[];
30
David Daneyd219e2e2012-04-19 14:59:56 -070031/* Cleared by build time tools if the table is already sorted. */
Andi Kleen00b71032014-02-08 08:52:04 +010032u32 __initdata __visible main_extable_sort_needed = 1;
David Daneyd219e2e2012-04-19 14:59:56 -070033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/* Sort the kernel's built-in exception table */
35void __init sort_main_extable(void)
36{
Nathan Chancellor63174f62020-04-06 20:09:27 -070037 if (main_extable_sort_needed &&
38 &__stop___ex_table > &__start___ex_table) {
Borislav Petkovbec1b9e2013-04-15 12:51:49 +020039 pr_notice("Sorting __ex_table...\n");
David Daneyd219e2e2012-04-19 14:59:56 -070040 sort_extable(__start___ex_table, __stop___ex_table);
Borislav Petkovbec1b9e2013-04-15 12:51:49 +020041 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070042}
43
Santosh Sivaraj49ec9172019-08-20 13:43:49 +053044/* Given an address, look for it in the kernel exception table */
45const
46struct exception_table_entry *search_kernel_exception_table(unsigned long addr)
47{
48 return search_extable(__start___ex_table,
49 __stop___ex_table - __start___ex_table, addr);
50}
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* Given an address, look for it in the exception tables. */
53const struct exception_table_entry *search_exception_tables(unsigned long addr)
54{
55 const struct exception_table_entry *e;
56
Santosh Sivaraj49ec9172019-08-20 13:43:49 +053057 e = search_kernel_exception_table(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 if (!e)
59 e = search_module_extables(addr);
Alexei Starovoitov3dec5412019-10-15 20:25:03 -070060 if (!e)
61 e = search_bpf_extables(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 return e;
63}
64
Josh Poimboeuf9fbcc572018-02-20 11:37:53 -060065int init_kernel_text(unsigned long addr)
Ingo Molnar4a44bac2009-03-19 13:21:44 +010066{
67 if (addr >= (unsigned long)_sinittext &&
Helge Deller5ecbe3c2013-11-28 09:16:33 +010068 addr < (unsigned long)_einittext)
Ingo Molnar4a44bac2009-03-19 13:21:44 +010069 return 1;
70 return 0;
71}
72
Marcin Nowakowskic0d80dd2017-07-06 15:35:31 -070073int notrace core_kernel_text(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
75 if (addr >= (unsigned long)_stext &&
Helge Deller5ecbe3c2013-11-28 09:16:33 +010076 addr < (unsigned long)_etext)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 return 1;
78
Thomas Gleixner05947292017-05-16 20:42:44 +020079 if (system_state < SYSTEM_RUNNING &&
Ingo Molnar4a44bac2009-03-19 13:21:44 +010080 init_kernel_text(addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 return 1;
82 return 0;
83}
84
Steven Rostedta2d063a2011-05-19 21:34:58 -040085/**
86 * core_kernel_data - tell if addr points to kernel data
87 * @addr: address to test
88 *
89 * Returns true if @addr passed in is from the core kernel data
90 * section.
91 *
92 * Note: On some archs it may return true for core RODATA, and false
93 * for others. But will always be true for core RW data.
94 */
Steven Rostedtcdbe61b2011-05-05 21:14:55 -040095int core_kernel_data(unsigned long addr)
96{
Steven Rostedta2d063a2011-05-19 21:34:58 -040097 if (addr >= (unsigned long)_sdata &&
Steven Rostedtcdbe61b2011-05-05 21:14:55 -040098 addr < (unsigned long)_edata)
99 return 1;
100 return 0;
101}
102
Frederic Weisbecker3861a172009-02-08 00:04:02 +0100103int __kernel_text_address(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Steven Rostedt (VMware)9aadde92017-09-22 17:22:19 -0400105 if (kernel_text_address(addr))
Daniel Borkmann74451e662017-02-16 22:24:50 +0100106 return 1;
Ingo Molnar4a44bac2009-03-19 13:21:44 +0100107 /*
108 * There might be init symbols in saved stacktraces.
109 * Give those symbols a chance to be printed in
110 * backtraces (such as lockdep traces).
111 *
112 * Since we are after the module-symbols check, there's
113 * no danger of address overlap:
114 */
115 if (init_kernel_text(addr))
116 return 1;
117 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118}
119
120int kernel_text_address(unsigned long addr)
121{
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400122 bool no_rcu;
123 int ret = 1;
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 if (core_kernel_text(addr))
126 return 1;
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400127
128 /*
129 * If a stack dump happens while RCU is not watching, then
130 * RCU needs to be notified that it requires to start
131 * watching again. This can happen either by tracing that
132 * triggers a stack trace, or a WARN() that happens during
133 * coming back from idle, or cpu on or offlining.
134 *
Jiri Olsae9b4e602020-01-23 17:15:07 +0100135 * is_module_text_address() as well as the kprobe slots,
136 * is_bpf_text_address() and is_bpf_image_address require
137 * RCU to be watching.
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400138 */
139 no_rcu = !rcu_is_watching();
140
141 /* Treat this like an NMI as it can happen anywhere */
142 if (no_rcu)
143 rcu_nmi_enter();
144
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -0500145 if (is_module_text_address(addr))
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400146 goto out;
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900147 if (is_ftrace_trampoline(addr))
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400148 goto out;
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900149 if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400150 goto out;
Daniel Borkmann74451e662017-02-16 22:24:50 +0100151 if (is_bpf_text_address(addr))
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400152 goto out;
153 ret = 0;
154out:
155 if (no_rcu)
156 rcu_nmi_exit();
157
158 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159}
Arjan van de Venab7476c2008-08-15 15:29:38 -0700160
161/*
162 * On some architectures (PPC64, IA64) function pointers
163 * are actually only tokens to some data that then holds the
164 * real function address. As a result, to find if a function
165 * pointer is part of the kernel text, we need to do some
166 * special dereferencing first.
167 */
168int func_ptr_is_kernel_text(void *ptr)
169{
170 unsigned long addr;
171 addr = (unsigned long) dereference_function_descriptor(ptr);
172 if (core_kernel_text(addr))
173 return 1;
Rusty Russella6e6abd2009-03-31 13:05:31 -0600174 return is_module_text_address(addr);
Arjan van de Venab7476c2008-08-15 15:29:38 -0700175}