blob: 674906fad43b11fc09ee3b1f96feb0182332d71f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Jason Barond9f5ab72010-09-17 11:09:22 -04002/*
3 * jump label x86 support
4 *
5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6 *
7 */
8#include <linux/jump_label.h>
9#include <linux/memory.h>
10#include <linux/uaccess.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/jhash.h>
14#include <linux/cpu.h>
15#include <asm/kprobes.h>
16#include <asm/alternative.h>
Andy Lutomirski35de5b02016-04-26 12:23:24 -070017#include <asm/text-patching.h>
Peter Zijlstrae7bf1ba2021-05-06 21:34:01 +020018#include <asm/insn.h>
Jason Barond9f5ab72010-09-17 11:09:22 -040019
Peter Zijlstrafa5e5dc2021-05-06 21:33:58 +020020int arch_jump_entry_size(struct jump_entry *entry)
21{
Peter Zijlstrae7bf1ba2021-05-06 21:34:01 +020022 struct insn insn = {};
23
24 insn_decode_kernel(&insn, (void *)jump_entry_code(entry));
25 BUG_ON(insn.length != 2 && insn.length != 5);
26
27 return insn.length;
Peter Zijlstrafa5e5dc2021-05-06 21:33:58 +020028}
29
Peter Zijlstra001951b2021-05-06 21:33:59 +020030struct jump_label_patch {
31 const void *code;
32 int size;
33};
34
35static struct jump_label_patch
36__jump_label_patch(struct jump_entry *entry, enum jump_label_type type)
Jason Barond9f5ab72010-09-17 11:09:22 -040037{
Peter Zijlstra001951b2021-05-06 21:33:59 +020038 const void *expect, *code, *nop;
Peter Zijlstra63f62ad2019-10-03 14:50:42 +020039 const void *addr, *dest;
Peter Zijlstra001951b2021-05-06 21:33:59 +020040 int size;
Ard Biesheuvel9fc0f792018-09-18 23:51:40 -070041
Peter Zijlstra63f62ad2019-10-03 14:50:42 +020042 addr = (void *)jump_entry_code(entry);
43 dest = (void *)jump_entry_target(entry);
Peter Zijlstra18cbc8b2019-08-26 13:38:58 +020044
Peter Zijlstra001951b2021-05-06 21:33:59 +020045 size = arch_jump_entry_size(entry);
46 switch (size) {
47 case JMP8_INSN_SIZE:
48 code = text_gen_insn(JMP8_INSN_OPCODE, addr, dest);
49 nop = x86_nops[size];
50 break;
51
52 case JMP32_INSN_SIZE:
53 code = text_gen_insn(JMP32_INSN_OPCODE, addr, dest);
54 nop = x86_nops[size];
55 break;
56
57 default: BUG();
58 }
Jason Barond9f5ab72010-09-17 11:09:22 -040059
Peter Zijlstraf9510fa2021-05-06 21:33:57 +020060 if (type == JUMP_LABEL_JMP)
Peter Zijlstra001951b2021-05-06 21:33:59 +020061 expect = nop;
Peter Zijlstraf9510fa2021-05-06 21:33:57 +020062 else
63 expect = code;
Jeremy Fitzhardingee71a5be2011-09-29 11:11:09 -070064
Peter Zijlstra001951b2021-05-06 21:33:59 +020065 if (memcmp(addr, expect, size)) {
Peter Zijlstraf9510fa2021-05-06 21:33:57 +020066 /*
67 * The location is not an op that we were expecting.
68 * Something went wrong. Crash the box, as something could be
69 * corrupting the kernel.
70 */
Peter Zijlstra001951b2021-05-06 21:33:59 +020071 pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph != %5ph)) size:%d type:%d\n",
72 addr, addr, addr, expect, size, type);
Peter Zijlstraf9510fa2021-05-06 21:33:57 +020073 BUG();
74 }
Ard Biesheuvel9fc0f792018-09-18 23:51:40 -070075
Daniel Bristot de Oliveira4cc66202019-06-12 11:57:27 +020076 if (type == JUMP_LABEL_NOP)
Peter Zijlstra001951b2021-05-06 21:33:59 +020077 code = nop;
Peter Zijlstra18cbc8b2019-08-26 13:38:58 +020078
Peter Zijlstra001951b2021-05-06 21:33:59 +020079 return (struct jump_label_patch){.code = code, .size = size};
Daniel Bristot de Oliveira4cc66202019-06-12 11:57:27 +020080}
81
Randy Dunlap4de49522020-03-26 14:16:58 -070082static inline void __jump_label_transform(struct jump_entry *entry,
Peter Zijlstra18cbc8b2019-08-26 13:38:58 +020083 enum jump_label_type type,
84 int init)
Daniel Bristot de Oliveira4cc66202019-06-12 11:57:27 +020085{
Peter Zijlstra001951b2021-05-06 21:33:59 +020086 const struct jump_label_patch jlp = __jump_label_patch(entry, type);
Daniel Bristot de Oliveira4cc66202019-06-12 11:57:27 +020087
Jiri Kosina51b2c072013-07-12 11:22:09 +020088 /*
Nadav Amitbb0a0082019-04-25 17:11:32 -070089 * As long as only a single processor is running and the code is still
90 * not marked as RO, text_poke_early() can be used; Checking that
91 * system_state is SYSTEM_BOOTING guarantees it. It will be set to
92 * SYSTEM_SCHEDULING before other cores are awaken and before the
93 * code is write-protected.
Jiri Kosina51b2c072013-07-12 11:22:09 +020094 *
95 * At the time the change is being done, just ignore whether we
96 * are doing nop -> jump or jump -> nop transition, and assume
97 * always nop being the 'currently valid' instruction
Jiri Kosina51b2c072013-07-12 11:22:09 +020098 */
Nadav Amitbb0a0082019-04-25 17:11:32 -070099 if (init || system_state == SYSTEM_BOOTING) {
Peter Zijlstra001951b2021-05-06 21:33:59 +0200100 text_poke_early((void *)jump_entry_code(entry), jlp.code, jlp.size);
Ard Biesheuvel9fc0f792018-09-18 23:51:40 -0700101 return;
102 }
103
Peter Zijlstra001951b2021-05-06 21:33:59 +0200104 text_poke_bp((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
Peter Zijlstra18cbc8b2019-08-26 13:38:58 +0200105}
106
107static void __ref jump_label_transform(struct jump_entry *entry,
108 enum jump_label_type type,
109 int init)
110{
111 mutex_lock(&text_mutex);
112 __jump_label_transform(entry, type, init);
113 mutex_unlock(&text_mutex);
Jeremy Fitzhardingee71a5be2011-09-29 11:11:09 -0700114}
115
116void arch_jump_label_transform(struct jump_entry *entry,
117 enum jump_label_type type)
118{
Peter Zijlstra18cbc8b2019-08-26 13:38:58 +0200119 jump_label_transform(entry, type, 0);
Jason Barond9f5ab72010-09-17 11:09:22 -0400120}
121
Daniel Bristot de Oliveiraba54f0c2019-06-12 11:57:31 +0200122bool arch_jump_label_transform_queue(struct jump_entry *entry,
123 enum jump_label_type type)
124{
Peter Zijlstra001951b2021-05-06 21:33:59 +0200125 struct jump_label_patch jlp;
Daniel Bristot de Oliveiraba54f0c2019-06-12 11:57:31 +0200126
127 if (system_state == SYSTEM_BOOTING) {
128 /*
129 * Fallback to the non-batching mode.
130 */
131 arch_jump_label_transform(entry, type);
132 return true;
133 }
134
Peter Zijlstra18cbc8b2019-08-26 13:38:58 +0200135 mutex_lock(&text_mutex);
Peter Zijlstra001951b2021-05-06 21:33:59 +0200136 jlp = __jump_label_patch(entry, type);
137 text_poke_queue((void *)jump_entry_code(entry), jlp.code, jlp.size, NULL);
Peter Zijlstra18cbc8b2019-08-26 13:38:58 +0200138 mutex_unlock(&text_mutex);
Daniel Bristot de Oliveiraba54f0c2019-06-12 11:57:31 +0200139 return true;
140}
141
142void arch_jump_label_transform_apply(void)
143{
Daniel Bristot de Oliveiraba54f0c2019-06-12 11:57:31 +0200144 mutex_lock(&text_mutex);
Peter Zijlstra18cbc8b2019-08-26 13:38:58 +0200145 text_poke_finish();
Daniel Bristot de Oliveiraba54f0c2019-06-12 11:57:31 +0200146 mutex_unlock(&text_mutex);
Daniel Bristot de Oliveiraba54f0c2019-06-12 11:57:31 +0200147}
148
Steven Rostedt11570da2012-01-26 18:16:15 -0500149static enum {
150 JL_STATE_START,
151 JL_STATE_NO_UPDATE,
152 JL_STATE_UPDATE,
153} jlstate __initdata_or_module = JL_STATE_START;
154
Peter Zijlstra9cdbe1c2011-12-06 17:27:29 +0100155__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
Jeremy Fitzhardingee71a5be2011-09-29 11:11:09 -0700156 enum jump_label_type type)
157{
Steven Rostedt11570da2012-01-26 18:16:15 -0500158 if (jlstate == JL_STATE_UPDATE)
Peter Zijlstra18cbc8b2019-08-26 13:38:58 +0200159 jump_label_transform(entry, type, 1);
Jeremy Fitzhardingee71a5be2011-09-29 11:11:09 -0700160}