blob: 6cc31789b38da0540e218615c837015099edf952 [file] [log] [blame]
Dave Gerlach728bbe72017-01-12 14:52:19 -06001/*
2 * SRAM protect-exec region helper functions
3 *
Alexander A. Klimov4e74eeb2020-07-13 12:44:53 +02004 * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
Dave Gerlach728bbe72017-01-12 14:52:19 -06005 * Dave Gerlach
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/device.h>
18#include <linux/genalloc.h>
Laura Abbott056d16b2017-05-08 15:58:38 -070019#include <linux/mm.h>
Dave Gerlach728bbe72017-01-12 14:52:19 -060020#include <linux/sram.h>
21
Dave Gerlach34cfb102017-05-18 10:07:06 -050022#include <asm/fncpy.h>
Laura Abbott056d16b2017-05-08 15:58:38 -070023#include <asm/set_memory.h>
Dave Gerlach728bbe72017-01-12 14:52:19 -060024
25#include "sram.h"
26
27static DEFINE_MUTEX(exec_pool_list_mutex);
28static LIST_HEAD(exec_pool_list);
29
30int sram_check_protect_exec(struct sram_dev *sram, struct sram_reserve *block,
31 struct sram_partition *part)
32{
33 unsigned long base = (unsigned long)part->base;
34 unsigned long end = base + block->size;
35
36 if (!PAGE_ALIGNED(base) || !PAGE_ALIGNED(end)) {
37 dev_err(sram->dev,
38 "SRAM pool marked with 'protect-exec' is not page aligned and will not be created.\n");
39 return -ENOMEM;
40 }
41
42 return 0;
43}
44
45int sram_add_protect_exec(struct sram_partition *part)
46{
47 mutex_lock(&exec_pool_list_mutex);
48 list_add_tail(&part->list, &exec_pool_list);
49 mutex_unlock(&exec_pool_list_mutex);
50
51 return 0;
52}
53
54/**
55 * sram_exec_copy - copy data to a protected executable region of sram
56 *
57 * @pool: struct gen_pool retrieved that is part of this sram
58 * @dst: Destination address for the copy, that must be inside pool
59 * @src: Source address for the data to copy
60 * @size: Size of copy to perform, which starting from dst, must reside in pool
61 *
Dave Gerlach34cfb102017-05-18 10:07:06 -050062 * Return: Address for copied data that can safely be called through function
63 * pointer, or NULL if problem.
64 *
Dave Gerlach728bbe72017-01-12 14:52:19 -060065 * This helper function allows sram driver to act as central control location
66 * of 'protect-exec' pools which are normal sram pools but are always set
67 * read-only and executable except when copying data to them, at which point
68 * they are set to read-write non-executable, to make sure no memory is
69 * writeable and executable at the same time. This region must be page-aligned
70 * and is checked during probe, otherwise page attribute manipulation would
Dave Gerlach34cfb102017-05-18 10:07:06 -050071 * not be possible. Care must be taken to only call the returned address as
72 * dst address is not guaranteed to be safely callable.
73 *
74 * NOTE: This function uses the fncpy macro to move code to the executable
75 * region. Some architectures have strict requirements for relocating
76 * executable code, so fncpy is a macro that must be defined by any arch
77 * making use of this functionality that guarantees a safe copy of exec
78 * data and returns a safe address that can be called as a C function
79 * pointer.
Dave Gerlach728bbe72017-01-12 14:52:19 -060080 */
Dave Gerlach34cfb102017-05-18 10:07:06 -050081void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
82 size_t size)
Dave Gerlach728bbe72017-01-12 14:52:19 -060083{
84 struct sram_partition *part = NULL, *p;
85 unsigned long base;
86 int pages;
Dave Gerlach34cfb102017-05-18 10:07:06 -050087 void *dst_cpy;
Tianlin Lic576edd2019-12-17 13:45:28 -060088 int ret;
Dave Gerlach728bbe72017-01-12 14:52:19 -060089
90 mutex_lock(&exec_pool_list_mutex);
91 list_for_each_entry(p, &exec_pool_list, list) {
92 if (p->pool == pool)
93 part = p;
94 }
95 mutex_unlock(&exec_pool_list_mutex);
96
97 if (!part)
Dave Gerlach34cfb102017-05-18 10:07:06 -050098 return NULL;
Dave Gerlach728bbe72017-01-12 14:52:19 -060099
Huang Shijie964975a2019-12-04 16:52:03 -0800100 if (!gen_pool_has_addr(pool, (unsigned long)dst, size))
Dave Gerlach34cfb102017-05-18 10:07:06 -0500101 return NULL;
Dave Gerlach728bbe72017-01-12 14:52:19 -0600102
103 base = (unsigned long)part->base;
104 pages = PAGE_ALIGN(size) / PAGE_SIZE;
105
106 mutex_lock(&part->lock);
107
Tianlin Lic576edd2019-12-17 13:45:28 -0600108 ret = set_memory_nx((unsigned long)base, pages);
109 if (ret)
110 goto error_out;
111 ret = set_memory_rw((unsigned long)base, pages);
112 if (ret)
113 goto error_out;
Dave Gerlach728bbe72017-01-12 14:52:19 -0600114
Dave Gerlach34cfb102017-05-18 10:07:06 -0500115 dst_cpy = fncpy(dst, src, size);
Dave Gerlach728bbe72017-01-12 14:52:19 -0600116
Tianlin Lic576edd2019-12-17 13:45:28 -0600117 ret = set_memory_ro((unsigned long)base, pages);
118 if (ret)
119 goto error_out;
120 ret = set_memory_x((unsigned long)base, pages);
121 if (ret)
122 goto error_out;
Dave Gerlach728bbe72017-01-12 14:52:19 -0600123
124 mutex_unlock(&part->lock);
125
Dave Gerlach34cfb102017-05-18 10:07:06 -0500126 return dst_cpy;
Tianlin Lic576edd2019-12-17 13:45:28 -0600127
128error_out:
129 mutex_unlock(&part->lock);
130 return NULL;
Dave Gerlach728bbe72017-01-12 14:52:19 -0600131}
132EXPORT_SYMBOL_GPL(sram_exec_copy);