blob: 5d4d9bbdec36c9f8f3dd5450ff3f5704b3ee40b7 [file] [log] [blame]
Sami Tolvanend08b9f02020-04-27 09:00:07 -07001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Shadow Call Stack support.
4 *
5 * Copyright (C) 2019 Google LLC
6 */
7
8#include <linux/kasan.h>
Sami Tolvanen628d06a2020-04-27 09:00:08 -07009#include <linux/mm.h>
Sami Tolvanend08b9f02020-04-27 09:00:07 -070010#include <linux/scs.h>
11#include <linux/slab.h>
Sami Tolvanen628d06a2020-04-27 09:00:08 -070012#include <linux/vmstat.h>
Sami Tolvanend08b9f02020-04-27 09:00:07 -070013
14static struct kmem_cache *scs_cache;
15
Will Deaconbee348f2020-05-15 14:43:11 +010016static void __scs_account(void *s, int account)
17{
18 struct page *scs_page = virt_to_page(s);
19
20 mod_zone_page_state(page_zone(scs_page), NR_KERNEL_SCS_KB,
21 account * (SCS_SIZE / SZ_1K));
22}
23
Sami Tolvanend08b9f02020-04-27 09:00:07 -070024static void *scs_alloc(int node)
25{
Will Deaconbee348f2020-05-15 14:43:11 +010026 void *s = kmem_cache_alloc_node(scs_cache, GFP_SCS, node);
Sami Tolvanend08b9f02020-04-27 09:00:07 -070027
Will Deaconbee348f2020-05-15 14:43:11 +010028 if (!s)
29 return NULL;
Sami Tolvanend08b9f02020-04-27 09:00:07 -070030
Will Deaconbee348f2020-05-15 14:43:11 +010031 *__scs_magic(s) = SCS_END_MAGIC;
32
33 /*
34 * Poison the allocation to catch unintentional accesses to
35 * the shadow stack when KASAN is enabled.
36 */
37 kasan_poison_object_data(scs_cache, s);
38 __scs_account(s, 1);
Sami Tolvanend08b9f02020-04-27 09:00:07 -070039 return s;
40}
41
42static void scs_free(void *s)
43{
Will Deaconbee348f2020-05-15 14:43:11 +010044 __scs_account(s, -1);
Sami Tolvanend08b9f02020-04-27 09:00:07 -070045 kasan_unpoison_object_data(scs_cache, s);
46 kmem_cache_free(scs_cache, s);
47}
48
49void __init scs_init(void)
50{
51 scs_cache = kmem_cache_create("scs_cache", SCS_SIZE, 0, 0, NULL);
52}
53
54int scs_prepare(struct task_struct *tsk, int node)
55{
56 void *s = scs_alloc(node);
57
58 if (!s)
59 return -ENOMEM;
60
Will Deacon51189c72020-05-15 14:11:05 +010061 task_scs(tsk) = task_scs_sp(tsk) = s;
Sami Tolvanend08b9f02020-04-27 09:00:07 -070062 return 0;
63}
64
Sami Tolvanen5bbaf9d2020-04-27 09:00:09 -070065static void scs_check_usage(struct task_struct *tsk)
66{
67 static unsigned long highest;
68
69 unsigned long *p, prev, curr = highest, used = 0;
70
71 if (!IS_ENABLED(CONFIG_DEBUG_STACK_USAGE))
72 return;
73
74 for (p = task_scs(tsk); p < __scs_magic(tsk); ++p) {
75 if (!READ_ONCE_NOCHECK(*p))
76 break;
Will Deacon333ed742020-06-03 12:16:37 +010077 used += sizeof(*p);
Sami Tolvanen5bbaf9d2020-04-27 09:00:09 -070078 }
79
80 while (used > curr) {
81 prev = cmpxchg_relaxed(&highest, curr, used);
82
83 if (prev == curr) {
84 pr_info("%s (%d): highest shadow stack usage: %lu bytes\n",
85 tsk->comm, task_pid_nr(tsk), used);
86 break;
87 }
88
89 curr = prev;
90 }
91}
92
Sami Tolvanend08b9f02020-04-27 09:00:07 -070093void scs_release(struct task_struct *tsk)
94{
95 void *s = task_scs(tsk);
96
97 if (!s)
98 return;
99
Will Deacon88485be2020-05-15 14:56:05 +0100100 WARN(task_scs_end_corrupted(tsk),
101 "corrupted shadow stack detected when freeing task\n");
Sami Tolvanen5bbaf9d2020-04-27 09:00:09 -0700102 scs_check_usage(tsk);
Sami Tolvanend08b9f02020-04-27 09:00:07 -0700103 scs_free(s);
104}