blob: b6f330f0fe7496ad34d21fda39e494edb381d262 [file] [log] [blame]
Thomas Gleixner1a59d1b82019-05-27 08:55:05 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/* Rewritten by Rusty Russell, on the backs of many others...
3 Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005*/
Frederic Weisbecker8b96f012008-12-06 03:40:00 +01006#include <linux/ftrace.h>
Dmitri Vorobievf80d2d72009-03-22 19:11:10 +02007#include <linux/memory.h>
Paul Gortmaker8a293be2016-07-23 14:01:45 -04008#include <linux/extable.h>
Ingo Molnar505f2b92009-03-20 11:05:04 +01009#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/init.h>
Masami Hiramatsu5b485622017-01-08 23:58:09 +090012#include <linux/kprobes.h>
Daniel Borkmann74451e662017-02-16 22:24:50 +010013#include <linux/filter.h>
Ingo Molnar505f2b92009-03-20 11:05:04 +010014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/sections.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080016#include <linux/uaccess.h>
Ingo Molnar505f2b92009-03-20 11:05:04 +010017
18/*
19 * mutex protecting text section modification (dynamic code patching).
20 * some users need to sleep (allocating memory...) while they hold this lock.
21 *
Zhou Chengminge846d132017-11-02 09:18:21 +080022 * Note: Also protects SMP-alternatives modification on x86.
23 *
Ingo Molnar505f2b92009-03-20 11:05:04 +010024 * NOT exported to modules - patching kernel text is a really delicate matter.
25 */
26DEFINE_MUTEX(text_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28extern struct exception_table_entry __start___ex_table[];
29extern struct exception_table_entry __stop___ex_table[];
30
David Daneyd219e2e2012-04-19 14:59:56 -070031/* Cleared by build time tools if the table is already sorted. */
Andi Kleen00b71032014-02-08 08:52:04 +010032u32 __initdata __visible main_extable_sort_needed = 1;
David Daneyd219e2e2012-04-19 14:59:56 -070033
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/* Sort the kernel's built-in exception table */
35void __init sort_main_extable(void)
36{
Nathan Chancellor63174f62020-04-06 20:09:27 -070037 if (main_extable_sort_needed &&
38 &__stop___ex_table > &__start___ex_table) {
Borislav Petkovbec1b9e2013-04-15 12:51:49 +020039 pr_notice("Sorting __ex_table...\n");
David Daneyd219e2e2012-04-19 14:59:56 -070040 sort_extable(__start___ex_table, __stop___ex_table);
Borislav Petkovbec1b9e2013-04-15 12:51:49 +020041 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070042}
43
Santosh Sivaraj49ec9172019-08-20 13:43:49 +053044/* Given an address, look for it in the kernel exception table */
45const
46struct exception_table_entry *search_kernel_exception_table(unsigned long addr)
47{
48 return search_extable(__start___ex_table,
49 __stop___ex_table - __start___ex_table, addr);
50}
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/* Given an address, look for it in the exception tables. */
53const struct exception_table_entry *search_exception_tables(unsigned long addr)
54{
55 const struct exception_table_entry *e;
56
Santosh Sivaraj49ec9172019-08-20 13:43:49 +053057 e = search_kernel_exception_table(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 if (!e)
59 e = search_module_extables(addr);
Alexei Starovoitov3dec5412019-10-15 20:25:03 -070060 if (!e)
61 e = search_bpf_extables(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 return e;
63}
64
Marcin Nowakowskic0d80dd2017-07-06 15:35:31 -070065int notrace core_kernel_text(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
Kefeng Wang808b6452021-11-08 18:34:09 -080067 if (is_kernel_text(addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 return 1;
69
Christophe Leroyd2635f22021-11-05 13:40:40 -070070 if (system_state < SYSTEM_FREEING_INITMEM &&
Kefeng Wangb9ad8fe2021-11-08 18:33:54 -080071 is_kernel_inittext(addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 return 1;
73 return 0;
74}
75
Frederic Weisbecker3861a172009-02-08 00:04:02 +010076int __kernel_text_address(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
Steven Rostedt (VMware)9aadde92017-09-22 17:22:19 -040078 if (kernel_text_address(addr))
Daniel Borkmann74451e662017-02-16 22:24:50 +010079 return 1;
Ingo Molnar4a44bac2009-03-19 13:21:44 +010080 /*
81 * There might be init symbols in saved stacktraces.
82 * Give those symbols a chance to be printed in
83 * backtraces (such as lockdep traces).
84 *
85 * Since we are after the module-symbols check, there's
86 * no danger of address overlap:
87 */
Kefeng Wangb9ad8fe2021-11-08 18:33:54 -080088 if (is_kernel_inittext(addr))
Ingo Molnar4a44bac2009-03-19 13:21:44 +010089 return 1;
90 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091}
92
93int kernel_text_address(unsigned long addr)
94{
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -040095 bool no_rcu;
96 int ret = 1;
97
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 if (core_kernel_text(addr))
99 return 1;
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400100
101 /*
102 * If a stack dump happens while RCU is not watching, then
103 * RCU needs to be notified that it requires to start
104 * watching again. This can happen either by tracing that
105 * triggers a stack trace, or a WARN() that happens during
106 * coming back from idle, or cpu on or offlining.
107 *
Jiri Olsae9b4e602020-01-23 17:15:07 +0100108 * is_module_text_address() as well as the kprobe slots,
109 * is_bpf_text_address() and is_bpf_image_address require
110 * RCU to be watching.
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400111 */
112 no_rcu = !rcu_is_watching();
113
114 /* Treat this like an NMI as it can happen anywhere */
115 if (no_rcu)
116 rcu_nmi_enter();
117
Steven Rostedt (Red Hat)aec0be22014-11-18 21:14:11 -0500118 if (is_module_text_address(addr))
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400119 goto out;
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900120 if (is_ftrace_trampoline(addr))
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400121 goto out;
Masami Hiramatsu5b485622017-01-08 23:58:09 +0900122 if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400123 goto out;
Daniel Borkmann74451e662017-02-16 22:24:50 +0100124 if (is_bpf_text_address(addr))
Steven Rostedt (VMware)e8cac8b2017-09-22 17:36:32 -0400125 goto out;
126 ret = 0;
127out:
128 if (no_rcu)
129 rcu_nmi_exit();
130
131 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132}
Arjan van de Venab7476c2008-08-15 15:29:38 -0700133
134/*
135 * On some architectures (PPC64, IA64) function pointers
136 * are actually only tokens to some data that then holds the
137 * real function address. As a result, to find if a function
138 * pointer is part of the kernel text, we need to do some
139 * special dereferencing first.
140 */
141int func_ptr_is_kernel_text(void *ptr)
142{
143 unsigned long addr;
144 addr = (unsigned long) dereference_function_descriptor(ptr);
145 if (core_kernel_text(addr))
146 return 1;
Rusty Russella6e6abd2009-03-31 13:05:31 -0600147 return is_module_text_address(addr);
Arjan van de Venab7476c2008-08-15 15:29:38 -0700148}