blob: 6505207aa7e6a082fa35d1ad60c42dd2825a776e [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/* Rewritten by Rusty Russell, on the backs of many others...
3 Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005*/
Frederic Weisbecker8b96f012008-12-06 03:40:00 +01006#include <linux/ftrace.h>
Dmitri Vorobievf80d2d72009-03-22 19:11:10 +02007#include <linux/memory.h>
Paul Gortmaker8a293be2016-07-23 14:01:45 -04008#include <linux/extable.h>
Ingo Molnar505f2b92009-03-20 11:05:04 +01009#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/init.h>
Masami Hiramatsu5b485622017-01-08 23:58:09 +090012#include <linux/kprobes.h>
Daniel Borkmann74451e662017-02-16 22:24:50 +010013#include <linux/filter.h>
Ingo Molnar505f2b92009-03-20 11:05:04 +010014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/sections.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080016#include <linux/uaccess.h>
Ingo Molnar505f2b92009-03-20 11:05:04 +010017
18/*
19 * mutex protecting text section modification (dynamic code patching).
20 * some users need to sleep (allocating memory...) while they hold this lock.
21 *
Zhou Chengminge846d132017-11-02 09:18:21 +080022 * Note: Also protects SMP-alternatives modification on x86.
23 *
Ingo Molnar505f2b92009-03-20 11:05:04 +010024 * NOT exported to modules - patching kernel text is a really delicate matter.
25 */
26DEFINE_MUTEX(text_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28extern struct exception_table_entry __start___ex_table[];
29extern struct exception_table_entry __stop___ex_table[];
30
David Daneyd219e2e2012-04-19 14:59:56 -070031/* Cleared by build time tools if the table is already sorted. */
Andi Kleen00b71032014-02-08 08:52:04 +010032u32 __initdata __visible main_extable_sort_needed = 1;
David Daneyd219e2e2012-04-19 14:59:56 -070033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/* Sort the kernel's built-in exception table */
35void __init sort_main_extable(void)
36{
Nathan Chancellor63174f62020-04-06 20:09:27 -070037 if (main_extable_sort_needed &&
38 &__stop___ex_table > &__start___ex_table) {
Borislav Petkovbec1b9e2013-04-15 12:51:49 +020039 pr_notice("Sorting __ex_table...\n");
David Daneyd219e2e2012-04-19 14:59:56 -070040 sort_extable(__start___ex_table, __stop___ex_table);
Borislav Petkovbec1b9e2013-04-15 12:51:49 +020041 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070042}
43
Santosh Sivaraj49ec9172019-08-20 13:43:49 +053044/* Given an address, look for it in the kernel exception table */
45const
46struct exception_table_entry *search_kernel_exception_table(unsigned long addr)
47{
48 return search_extable(__start___ex_table,
49 __stop___ex_table - __start___ex_table, addr);
50}
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* Given an address, look for it in the exception tables. */
53const struct exception_table_entry *search_exception_tables(unsigned long addr)
54{
55 const struct exception_table_entry *e;
56
Santosh Sivaraj49ec9172019-08-20 13:43:49 +053057 e = search_kernel_exception_table(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 if (!e)
59 e = search_module_extables(addr);
Alexei Starovoitov3dec5412019-10-15 20:25:03 -070060 if (!e)
61 e = search_bpf_extables(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 return e;
63}
64
Marcin Nowakowskic0d80dd2017-07-06 15:35:31 -070065int notrace core_kernel_text(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
67 if (addr >= (unsigned long)_stext &&
Helge Deller5ecbe3c2013-11-28 09:16:33 +010068 addr < (unsigned long)_etext)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 return 1;
70
Christophe Leroyd2635f22021-11-05 13:40:40 -070071 if (system_state < SYSTEM_FREEING_INITMEM &&
Kefeng Wangb9ad8fe2021-11-08 18:33:54 -080072 is_kernel_inittext(addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 return 1;
74 return 0;
75}
76
Frederic Weisbecker3861a172009-02-08 00:04:02 +010077int __kernel_text_address(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Steven Rostedt (VMware)9aadde92017-09-22 17:22:19 -040079 if (kernel_text_address(addr))
Daniel Borkmann74451e662017-02-16 22:24:50 +010080 return 1;
Ingo Molnar4a44bac2009-03-19 13:21:44 +010081 /*
82 * There might be init symbols in saved stacktraces.
83 * Give those symbols a chance to be printed in
84 * backtraces (such as lockdep traces).
85 *
86 * Since we are after the module-symbols check, there's
87 * no danger of address overlap:
88 */
Kefeng Wangb9ad8fe2021-11-08 18:33:54 -080089 if (is_kernel_inittext(addr))
Ingo Molnar4a44bac2009-03-19 13:21:44 +010090 return 1;
91 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94int kernel_text_address(unsigned long addr)
95{
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -040096 bool no_rcu;
97 int ret = 1;
98
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 if (core_kernel_text(addr))
100 return 1;
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400101
102 /*
103 * If a stack dump happens while RCU is not watching, then
104 * RCU needs to be notified that it requires to start
105 * watching again. This can happen either by tracing that
106 * triggers a stack trace, or a WARN() that happens during
107 * coming back from idle, or cpu on or offlining.
108 *
Jiri Olsae9b4e602020-01-23 17:15:07 +0100109 * is_module_text_address() as well as the kprobe slots,
110 * is_bpf_text_address() and is_bpf_image_address require
111 * RCU to be watching.
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400112 */
113 no_rcu = !rcu_is_watching();
114
115 /* Treat this like an NMI as it can happen anywhere */
116 if (no_rcu)
117 rcu_nmi_enter();
118
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -0500119 if (is_module_text_address(addr))
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400120 goto out;
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900121 if (is_ftrace_trampoline(addr))
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400122 goto out;
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900123 if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400124 goto out;
Daniel Borkmann74451e662017-02-16 22:24:50 +0100125 if (is_bpf_text_address(addr))
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400126 goto out;
127 ret = 0;
128out:
129 if (no_rcu)
130 rcu_nmi_exit();
131
132 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133}
Arjan van de Venab7476c2008-08-15 15:29:38 -0700134
135/*
136 * On some architectures (PPC64, IA64) function pointers
137 * are actually only tokens to some data that then holds the
138 * real function address. As a result, to find if a function
139 * pointer is part of the kernel text, we need to do some
140 * special dereferencing first.
141 */
142int func_ptr_is_kernel_text(void *ptr)
143{
144 unsigned long addr;
145 addr = (unsigned long) dereference_function_descriptor(ptr);
146 if (core_kernel_text(addr))
147 return 1;
Rusty Russella6e6abd2009-03-31 13:05:31 -0600148 return is_module_text_address(addr);
Arjan van de Venab7476c2008-08-15 15:29:38 -0700149}