Thomas Gleixner | 08dbd0f | 2019-05-29 07:12:41 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 2 | /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved. |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 3 | * Copyright (C) 2015 Linaro Ltd. |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #include <linux/slab.h> |
| 7 | #include <linux/io.h> |
| 8 | #include <linux/module.h> |
| 9 | #include <linux/mutex.h> |
| 10 | #include <linux/errno.h> |
| 11 | #include <linux/err.h> |
| 12 | #include <linux/qcom_scm.h> |
Elliot Berman | 0224898 | 2020-01-07 13:04:18 -0800 | [diff] [blame] | 13 | #include <linux/arm-smccc.h> |
Andy Gross | 16e5946 | 2016-06-03 18:25:25 -0500 | [diff] [blame] | 14 | #include <linux/dma-mapping.h> |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 15 | |
| 16 | #include "qcom_scm.h" |
| 17 | |
| 18 | #define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00 |
| 19 | #define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01 |
| 20 | #define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08 |
| 21 | #define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20 |
| 22 | |
| 23 | #define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04 |
| 24 | #define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02 |
| 25 | #define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10 |
| 26 | #define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40 |
| 27 | |
| 28 | struct qcom_scm_entry { |
| 29 | int flag; |
| 30 | void *entry; |
| 31 | }; |
| 32 | |
| 33 | static struct qcom_scm_entry qcom_scm_wb[] = { |
| 34 | { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 }, |
| 35 | { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 }, |
| 36 | { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 }, |
| 37 | { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 }, |
| 38 | }; |
| 39 | |
| 40 | static DEFINE_MUTEX(qcom_scm_lock); |
| 41 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 42 | #define MAX_QCOM_SCM_ARGS 10 |
| 43 | #define MAX_QCOM_SCM_RETS 3 |
| 44 | |
| 45 | enum qcom_scm_arg_types { |
| 46 | QCOM_SCM_VAL, |
| 47 | QCOM_SCM_RO, |
| 48 | QCOM_SCM_RW, |
| 49 | QCOM_SCM_BUFVAL, |
| 50 | }; |
| 51 | |
| 52 | #define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\ |
| 53 | (((a) & 0x3) << 4) | \ |
| 54 | (((b) & 0x3) << 6) | \ |
| 55 | (((c) & 0x3) << 8) | \ |
| 56 | (((d) & 0x3) << 10) | \ |
| 57 | (((e) & 0x3) << 12) | \ |
| 58 | (((f) & 0x3) << 14) | \ |
| 59 | (((g) & 0x3) << 16) | \ |
| 60 | (((h) & 0x3) << 18) | \ |
| 61 | (((i) & 0x3) << 20) | \ |
| 62 | (((j) & 0x3) << 22) | \ |
| 63 | ((num) & 0xf)) |
| 64 | |
| 65 | #define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) |
| 66 | |
| 67 | /** |
| 68 | * struct qcom_scm_desc |
| 69 | * @arginfo: Metadata describing the arguments in args[] |
| 70 | * @args: The array of arguments for the secure syscall |
| 71 | */ |
| 72 | struct qcom_scm_desc { |
| 73 | u32 svc; |
| 74 | u32 cmd; |
| 75 | u32 arginfo; |
| 76 | u64 args[MAX_QCOM_SCM_ARGS]; |
| 77 | u32 owner; |
| 78 | }; |
| 79 | |
| 80 | /** |
| 81 | * struct qcom_scm_res |
| 82 | * @result: The values returned by the secure syscall |
| 83 | */ |
| 84 | struct qcom_scm_res { |
| 85 | u64 result[MAX_QCOM_SCM_RETS]; |
| 86 | }; |
| 87 | |
Elliot Berman | 590e928 | 2020-01-07 13:04:21 -0800 | [diff] [blame^] | 88 | /** |
| 89 | * struct arm_smccc_args |
| 90 | * @args: The array of values used in registers in smc instruction |
| 91 | */ |
| 92 | struct arm_smccc_args { |
| 93 | unsigned long args[8]; |
| 94 | }; |
| 95 | |
Elliot Berman | fd62c30 | 2020-01-07 13:04:19 -0800 | [diff] [blame] | 96 | #define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff)) |
| 97 | |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 98 | /** |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 99 | * struct scm_legacy_command - one SCM command buffer |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 100 | * @len: total available memory for command and response |
| 101 | * @buf_offset: start of command buffer |
| 102 | * @resp_hdr_offset: start of response buffer |
| 103 | * @id: command to be executed |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 104 | * @buf: buffer returned from scm_legacy_get_command_buffer() |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 105 | * |
| 106 | * An SCM command is laid out in memory as follows: |
| 107 | * |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 108 | * ------------------- <--- struct scm_legacy_command |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 109 | * | command header | |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 110 | * ------------------- <--- scm_legacy_get_command_buffer() |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 111 | * | command buffer | |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 112 | * ------------------- <--- struct scm_legacy_response and |
| 113 | * | response header | scm_legacy_command_to_response() |
| 114 | * ------------------- <--- scm_legacy_get_response_buffer() |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 115 | * | response buffer | |
| 116 | * ------------------- |
| 117 | * |
| 118 | * There can be arbitrary padding between the headers and buffers so |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 119 | * you should always use the appropriate scm_legacy_get_*_buffer() routines |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 120 | * to access the buffers in a safe manner. |
| 121 | */ |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 122 | struct scm_legacy_command { |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 123 | __le32 len; |
| 124 | __le32 buf_offset; |
| 125 | __le32 resp_hdr_offset; |
| 126 | __le32 id; |
| 127 | __le32 buf[0]; |
| 128 | }; |
| 129 | |
| 130 | /** |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 131 | * struct scm_legacy_response - one SCM response buffer |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 132 | * @len: total available memory for response |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 133 | * @buf_offset: start of response data relative to start of scm_legacy_response |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 134 | * @is_complete: indicates if the command has finished processing |
| 135 | */ |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 136 | struct scm_legacy_response { |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 137 | __le32 len; |
| 138 | __le32 buf_offset; |
| 139 | __le32 is_complete; |
| 140 | }; |
| 141 | |
| 142 | /** |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 143 | * scm_legacy_command_to_response() - Get a pointer to a scm_legacy_response |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 144 | * @cmd: command |
| 145 | * |
| 146 | * Returns a pointer to a response for a command. |
| 147 | */ |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 148 | static inline struct scm_legacy_response *scm_legacy_command_to_response( |
| 149 | const struct scm_legacy_command *cmd) |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 150 | { |
| 151 | return (void *)cmd + le32_to_cpu(cmd->resp_hdr_offset); |
| 152 | } |
| 153 | |
| 154 | /** |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 155 | * scm_legacy_get_command_buffer() - Get a pointer to a command buffer |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 156 | * @cmd: command |
| 157 | * |
| 158 | * Returns a pointer to the command buffer of a command. |
| 159 | */ |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 160 | static inline void *scm_legacy_get_command_buffer( |
| 161 | const struct scm_legacy_command *cmd) |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 162 | { |
| 163 | return (void *)cmd->buf; |
| 164 | } |
| 165 | |
| 166 | /** |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 167 | * scm_legacy_get_response_buffer() - Get a pointer to a response buffer |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 168 | * @rsp: response |
| 169 | * |
| 170 | * Returns a pointer to a response buffer of a response. |
| 171 | */ |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 172 | static inline void *scm_legacy_get_response_buffer( |
| 173 | const struct scm_legacy_response *rsp) |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 174 | { |
| 175 | return (void *)rsp + le32_to_cpu(rsp->buf_offset); |
| 176 | } |
| 177 | |
Elliot Berman | 590e928 | 2020-01-07 13:04:21 -0800 | [diff] [blame^] | 178 | static void __scm_legacy_do(const struct arm_smccc_args *smc, |
| 179 | struct arm_smccc_res *res) |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 180 | { |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 181 | do { |
Elliot Berman | 590e928 | 2020-01-07 13:04:21 -0800 | [diff] [blame^] | 182 | arm_smccc_smc(smc->args[0], smc->args[1], smc->args[2], |
| 183 | smc->args[3], smc->args[4], smc->args[5], |
| 184 | smc->args[6], smc->args[7], res); |
| 185 | } while (res->a0 == QCOM_SCM_INTERRUPTED); |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 186 | } |
| 187 | |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 188 | /** |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 189 | * qcom_scm_call() - Sends a command to the SCM and waits for the command to |
| 190 | * finish processing. |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 191 | * |
| 192 | * A note on cache maintenance: |
| 193 | * Note that any buffers that are expected to be accessed by the secure world |
| 194 | * must be flushed before invoking qcom_scm_call and invalidated in the cache |
| 195 | * immediately after qcom_scm_call returns. Cache maintenance on the command |
| 196 | * and response buffers is taken care of by qcom_scm_call; however, callers are |
| 197 | * responsible for any other cached buffers passed over to the secure world. |
| 198 | */ |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 199 | static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc, |
| 200 | struct qcom_scm_res *res) |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 201 | { |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 202 | u8 arglen = desc->arginfo & 0xf; |
Elliot Berman | 590e928 | 2020-01-07 13:04:21 -0800 | [diff] [blame^] | 203 | int ret = 0, context_id; |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 204 | unsigned int i; |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 205 | struct scm_legacy_command *cmd; |
| 206 | struct scm_legacy_response *rsp; |
Elliot Berman | 590e928 | 2020-01-07 13:04:21 -0800 | [diff] [blame^] | 207 | struct arm_smccc_args smc = {0}; |
| 208 | struct arm_smccc_res smc_res; |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 209 | const size_t cmd_len = arglen * sizeof(__le32); |
| 210 | const size_t resp_len = MAX_QCOM_SCM_RETS * sizeof(__le32); |
Andy Gross | 16e5946 | 2016-06-03 18:25:25 -0500 | [diff] [blame] | 211 | size_t alloc_len = sizeof(*cmd) + cmd_len + sizeof(*rsp) + resp_len; |
| 212 | dma_addr_t cmd_phys; |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 213 | __le32 *arg_buf; |
| 214 | const __le32 *res_buf; |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 215 | |
Andy Gross | 16e5946 | 2016-06-03 18:25:25 -0500 | [diff] [blame] | 216 | cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL); |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 217 | if (!cmd) |
| 218 | return -ENOMEM; |
| 219 | |
Andy Gross | 16e5946 | 2016-06-03 18:25:25 -0500 | [diff] [blame] | 220 | cmd->len = cpu_to_le32(alloc_len); |
| 221 | cmd->buf_offset = cpu_to_le32(sizeof(*cmd)); |
| 222 | cmd->resp_hdr_offset = cpu_to_le32(sizeof(*cmd) + cmd_len); |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 223 | cmd->id = cpu_to_le32(SCM_LEGACY_FNID(desc->svc, desc->cmd)); |
Andy Gross | 16e5946 | 2016-06-03 18:25:25 -0500 | [diff] [blame] | 224 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 225 | arg_buf = scm_legacy_get_command_buffer(cmd); |
| 226 | for (i = 0; i < arglen; i++) |
| 227 | arg_buf[i] = cpu_to_le32(desc->args[i]); |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 228 | |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 229 | rsp = scm_legacy_command_to_response(cmd); |
Andy Gross | 16e5946 | 2016-06-03 18:25:25 -0500 | [diff] [blame] | 230 | |
| 231 | cmd_phys = dma_map_single(dev, cmd, alloc_len, DMA_TO_DEVICE); |
| 232 | if (dma_mapping_error(dev, cmd_phys)) { |
| 233 | kfree(cmd); |
| 234 | return -ENOMEM; |
| 235 | } |
| 236 | |
Elliot Berman | 590e928 | 2020-01-07 13:04:21 -0800 | [diff] [blame^] | 237 | smc.args[0] = 1; |
| 238 | smc.args[1] = (unsigned long)&context_id; |
| 239 | smc.args[2] = cmd_phys; |
| 240 | |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 241 | mutex_lock(&qcom_scm_lock); |
Elliot Berman | 590e928 | 2020-01-07 13:04:21 -0800 | [diff] [blame^] | 242 | __scm_legacy_do(&smc, &smc_res); |
| 243 | if (smc_res.a0) |
| 244 | ret = qcom_scm_remap_error(smc_res.a0); |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 245 | mutex_unlock(&qcom_scm_lock); |
| 246 | if (ret) |
| 247 | goto out; |
| 248 | |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 249 | do { |
Andy Gross | 16e5946 | 2016-06-03 18:25:25 -0500 | [diff] [blame] | 250 | dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len, |
| 251 | sizeof(*rsp), DMA_FROM_DEVICE); |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 252 | } while (!rsp->is_complete); |
| 253 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 254 | dma_sync_single_for_cpu(dev, cmd_phys + sizeof(*cmd) + cmd_len + |
| 255 | le32_to_cpu(rsp->buf_offset), |
| 256 | resp_len, DMA_FROM_DEVICE); |
| 257 | |
| 258 | if (res) { |
| 259 | res_buf = scm_legacy_get_response_buffer(rsp); |
| 260 | for (i = 0; i < MAX_QCOM_SCM_RETS; i++) |
| 261 | res->result[i] = le32_to_cpu(res_buf[i]); |
Andy Gross | 16e5946 | 2016-06-03 18:25:25 -0500 | [diff] [blame] | 262 | } |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 263 | out: |
Andy Gross | 16e5946 | 2016-06-03 18:25:25 -0500 | [diff] [blame] | 264 | dma_unmap_single(dev, cmd_phys, alloc_len, DMA_TO_DEVICE); |
| 265 | kfree(cmd); |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 266 | return ret; |
| 267 | } |
| 268 | |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 269 | #define SCM_LEGACY_CLASS_REGISTER (0x2 << 8) |
| 270 | #define SCM_LEGACY_MASK_IRQS BIT(5) |
| 271 | #define SCM_LEGACY_ATOMIC_ID(svc, cmd, n) \ |
Elliot Berman | fd62c30 | 2020-01-07 13:04:19 -0800 | [diff] [blame] | 272 | ((SCM_LEGACY_FNID(svc, cmd) << 12) | \ |
Elliot Berman | e0aa153 | 2020-01-07 13:04:10 -0800 | [diff] [blame] | 273 | SCM_LEGACY_CLASS_REGISTER | \ |
| 274 | SCM_LEGACY_MASK_IRQS | \ |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 275 | (n & 0xf)) |
| 276 | |
| 277 | /** |
| 278 | * qcom_scm_call_atomic1() - Send an atomic SCM command with one argument |
| 279 | * @svc_id: service identifier |
| 280 | * @cmd_id: command identifier |
| 281 | * @arg1: first argument |
| 282 | * |
| 283 | * This shall only be used with commands that are guaranteed to be |
| 284 | * uninterruptable, atomic and SMP safe. |
| 285 | */ |
| 286 | static s32 qcom_scm_call_atomic1(u32 svc, u32 cmd, u32 arg1) |
| 287 | { |
| 288 | int context_id; |
Elliot Berman | 0224898 | 2020-01-07 13:04:18 -0800 | [diff] [blame] | 289 | struct arm_smccc_res res; |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 290 | |
Elliot Berman | 0224898 | 2020-01-07 13:04:18 -0800 | [diff] [blame] | 291 | arm_smccc_smc(SCM_LEGACY_ATOMIC_ID(svc, cmd, 1), |
| 292 | (unsigned long)&context_id, arg1, 0, 0, 0, 0, 0, &res); |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 293 | |
Elliot Berman | 0224898 | 2020-01-07 13:04:18 -0800 | [diff] [blame] | 294 | return res.a0; |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 295 | } |
| 296 | |
Andy Gross | 13e7774 | 2016-06-03 18:25:23 -0500 | [diff] [blame] | 297 | /** |
| 298 | * qcom_scm_call_atomic2() - Send an atomic SCM command with two arguments |
| 299 | * @svc_id: service identifier |
| 300 | * @cmd_id: command identifier |
| 301 | * @arg1: first argument |
| 302 | * @arg2: second argument |
| 303 | * |
| 304 | * This shall only be used with commands that are guaranteed to be |
| 305 | * uninterruptable, atomic and SMP safe. |
| 306 | */ |
| 307 | static s32 qcom_scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2) |
| 308 | { |
| 309 | int context_id; |
Elliot Berman | 0224898 | 2020-01-07 13:04:18 -0800 | [diff] [blame] | 310 | struct arm_smccc_res res; |
Andy Gross | 13e7774 | 2016-06-03 18:25:23 -0500 | [diff] [blame] | 311 | |
Elliot Berman | 0224898 | 2020-01-07 13:04:18 -0800 | [diff] [blame] | 312 | arm_smccc_smc(SCM_LEGACY_ATOMIC_ID(svc, cmd, 2), |
| 313 | (unsigned long)&context_id, arg1, 0, 0, 0, 0, 0, &res); |
Andy Gross | 13e7774 | 2016-06-03 18:25:23 -0500 | [diff] [blame] | 314 | |
Elliot Berman | 0224898 | 2020-01-07 13:04:18 -0800 | [diff] [blame] | 315 | return res.a0; |
Andy Gross | 13e7774 | 2016-06-03 18:25:23 -0500 | [diff] [blame] | 316 | } |
| 317 | |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 318 | /** |
| 319 | * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus |
| 320 | * @entry: Entry point function for the cpus |
| 321 | * @cpus: The cpumask of cpus that will use the entry point |
| 322 | * |
| 323 | * Set the cold boot address of the cpus. Any cpu outside the supported |
| 324 | * range would be removed from the cpu present mask. |
| 325 | */ |
| 326 | int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus) |
| 327 | { |
| 328 | int flags = 0; |
| 329 | int cpu; |
| 330 | int scm_cb_flags[] = { |
| 331 | QCOM_SCM_FLAG_COLDBOOT_CPU0, |
| 332 | QCOM_SCM_FLAG_COLDBOOT_CPU1, |
| 333 | QCOM_SCM_FLAG_COLDBOOT_CPU2, |
| 334 | QCOM_SCM_FLAG_COLDBOOT_CPU3, |
| 335 | }; |
| 336 | |
| 337 | if (!cpus || (cpus && cpumask_empty(cpus))) |
| 338 | return -EINVAL; |
| 339 | |
| 340 | for_each_cpu(cpu, cpus) { |
| 341 | if (cpu < ARRAY_SIZE(scm_cb_flags)) |
| 342 | flags |= scm_cb_flags[cpu]; |
| 343 | else |
| 344 | set_cpu_present(cpu, false); |
| 345 | } |
| 346 | |
Elliot Berman | 5443cc5 | 2020-01-07 13:04:11 -0800 | [diff] [blame] | 347 | return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_SET_ADDR, |
Andy Gross | 13e7774 | 2016-06-03 18:25:23 -0500 | [diff] [blame] | 348 | flags, virt_to_phys(entry)); |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 349 | } |
| 350 | |
| 351 | /** |
| 352 | * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus |
| 353 | * @entry: Entry point function for the cpus |
| 354 | * @cpus: The cpumask of cpus that will use the entry point |
| 355 | * |
| 356 | * Set the Linux entry point for the SCM to transfer control to when coming |
| 357 | * out of a power down. CPU power down may be executed on cpuidle or hotplug. |
| 358 | */ |
Andy Gross | 16e5946 | 2016-06-03 18:25:25 -0500 | [diff] [blame] | 359 | int __qcom_scm_set_warm_boot_addr(struct device *dev, void *entry, |
| 360 | const cpumask_t *cpus) |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 361 | { |
| 362 | int ret; |
| 363 | int flags = 0; |
| 364 | int cpu; |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 365 | struct qcom_scm_desc desc = { |
| 366 | .svc = QCOM_SCM_SVC_BOOT, |
| 367 | .cmd = QCOM_SCM_BOOT_SET_ADDR, |
| 368 | }; |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 369 | |
| 370 | /* |
| 371 | * Reassign only if we are switching from hotplug entry point |
| 372 | * to cpuidle entry point or vice versa. |
| 373 | */ |
| 374 | for_each_cpu(cpu, cpus) { |
| 375 | if (entry == qcom_scm_wb[cpu].entry) |
| 376 | continue; |
| 377 | flags |= qcom_scm_wb[cpu].flag; |
| 378 | } |
| 379 | |
| 380 | /* No change in entry function */ |
| 381 | if (!flags) |
| 382 | return 0; |
| 383 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 384 | desc.args[0] = flags; |
| 385 | desc.args[1] = virt_to_phys(entry); |
| 386 | desc.arginfo = QCOM_SCM_ARGS(2); |
| 387 | |
| 388 | ret = qcom_scm_call(dev, &desc, NULL); |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 389 | if (!ret) { |
| 390 | for_each_cpu(cpu, cpus) |
| 391 | qcom_scm_wb[cpu].entry = entry; |
| 392 | } |
| 393 | |
| 394 | return ret; |
| 395 | } |
| 396 | |
| 397 | /** |
| 398 | * qcom_scm_cpu_power_down() - Power down the cpu |
| 399 | * @flags - Flags to flush cache |
| 400 | * |
| 401 | * This is an end point to power down cpu. If there was a pending interrupt, |
| 402 | * the control would return from this function, otherwise, the cpu jumps to the |
| 403 | * warm boot entry point set for this cpu upon reset. |
| 404 | */ |
| 405 | void __qcom_scm_cpu_power_down(u32 flags) |
| 406 | { |
Elliot Berman | 5443cc5 | 2020-01-07 13:04:11 -0800 | [diff] [blame] | 407 | qcom_scm_call_atomic1(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_TERMINATE_PC, |
Kumar Gala | b6a1dfb | 2015-03-11 16:28:10 -0500 | [diff] [blame] | 408 | flags & QCOM_SCM_FLUSH_FLAG_MASK); |
| 409 | } |
jilai wang | 9626b69 | 2015-04-10 16:15:59 -0400 | [diff] [blame] | 410 | |
Andy Gross | 16e5946 | 2016-06-03 18:25:25 -0500 | [diff] [blame] | 411 | int __qcom_scm_is_call_available(struct device *dev, u32 svc_id, u32 cmd_id) |
jilai wang | 9626b69 | 2015-04-10 16:15:59 -0400 | [diff] [blame] | 412 | { |
| 413 | int ret; |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 414 | struct qcom_scm_desc desc = { |
| 415 | .svc = QCOM_SCM_SVC_INFO, |
| 416 | .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL, |
| 417 | .args[0] = SCM_LEGACY_FNID(svc_id, cmd_id), |
| 418 | .arginfo = QCOM_SCM_ARGS(1), |
| 419 | }; |
| 420 | struct qcom_scm_res res; |
jilai wang | 9626b69 | 2015-04-10 16:15:59 -0400 | [diff] [blame] | 421 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 422 | ret = qcom_scm_call(dev, &desc, &res); |
jilai wang | 9626b69 | 2015-04-10 16:15:59 -0400 | [diff] [blame] | 423 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 424 | return ret ? : res.result[0]; |
jilai wang | 9626b69 | 2015-04-10 16:15:59 -0400 | [diff] [blame] | 425 | } |
| 426 | |
Andy Gross | 16e5946 | 2016-06-03 18:25:25 -0500 | [diff] [blame] | 427 | int __qcom_scm_hdcp_req(struct device *dev, struct qcom_scm_hdcp_req *req, |
| 428 | u32 req_cnt, u32 *resp) |
jilai wang | 9626b69 | 2015-04-10 16:15:59 -0400 | [diff] [blame] | 429 | { |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 430 | int ret; |
| 431 | struct qcom_scm_desc desc = { |
| 432 | .svc = QCOM_SCM_SVC_HDCP, |
| 433 | .cmd = QCOM_SCM_HDCP_INVOKE, |
| 434 | }; |
| 435 | struct qcom_scm_res res; |
| 436 | |
jilai wang | 9626b69 | 2015-04-10 16:15:59 -0400 | [diff] [blame] | 437 | if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT) |
| 438 | return -ERANGE; |
| 439 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 440 | desc.args[0] = req[0].addr; |
| 441 | desc.args[1] = req[0].val; |
| 442 | desc.args[2] = req[1].addr; |
| 443 | desc.args[3] = req[1].val; |
| 444 | desc.args[4] = req[2].addr; |
| 445 | desc.args[5] = req[2].val; |
| 446 | desc.args[6] = req[3].addr; |
| 447 | desc.args[7] = req[3].val; |
| 448 | desc.args[8] = req[4].addr; |
| 449 | desc.args[9] = req[4].val; |
| 450 | desc.arginfo = QCOM_SCM_ARGS(10); |
| 451 | |
| 452 | ret = qcom_scm_call(dev, &desc, &res); |
| 453 | *resp = res.result[0]; |
| 454 | |
| 455 | return ret; |
jilai wang | 9626b69 | 2015-04-10 16:15:59 -0400 | [diff] [blame] | 456 | } |
Kumar Gala | 6b1751a | 2016-06-03 18:25:26 -0500 | [diff] [blame] | 457 | |
Rob Clark | b0a1614 | 2019-08-23 05:16:33 -0700 | [diff] [blame] | 458 | int __qcom_scm_ocmem_lock(struct device *dev, u32 id, u32 offset, u32 size, |
| 459 | u32 mode) |
| 460 | { |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 461 | struct qcom_scm_desc desc = { |
| 462 | .svc = QCOM_SCM_SVC_OCMEM, |
| 463 | .cmd = QCOM_SCM_OCMEM_LOCK_CMD, |
| 464 | }; |
Rob Clark | b0a1614 | 2019-08-23 05:16:33 -0700 | [diff] [blame] | 465 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 466 | desc.args[0] = id; |
| 467 | desc.args[1] = offset; |
| 468 | desc.args[2] = size; |
| 469 | desc.args[3] = mode; |
| 470 | desc.arginfo = QCOM_SCM_ARGS(4); |
Rob Clark | b0a1614 | 2019-08-23 05:16:33 -0700 | [diff] [blame] | 471 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 472 | return qcom_scm_call(dev, &desc, NULL); |
Rob Clark | b0a1614 | 2019-08-23 05:16:33 -0700 | [diff] [blame] | 473 | } |
| 474 | |
| 475 | int __qcom_scm_ocmem_unlock(struct device *dev, u32 id, u32 offset, u32 size) |
| 476 | { |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 477 | struct qcom_scm_desc desc = { |
| 478 | .svc = QCOM_SCM_SVC_OCMEM, |
| 479 | .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD, |
| 480 | }; |
Rob Clark | b0a1614 | 2019-08-23 05:16:33 -0700 | [diff] [blame] | 481 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 482 | desc.args[0] = id; |
| 483 | desc.args[1] = offset; |
| 484 | desc.args[2] = size; |
| 485 | desc.arginfo = QCOM_SCM_ARGS(3); |
Rob Clark | b0a1614 | 2019-08-23 05:16:33 -0700 | [diff] [blame] | 486 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 487 | return qcom_scm_call(dev, &desc, NULL); |
Rob Clark | b0a1614 | 2019-08-23 05:16:33 -0700 | [diff] [blame] | 488 | } |
| 489 | |
Kumar Gala | 6b1751a | 2016-06-03 18:25:26 -0500 | [diff] [blame] | 490 | void __qcom_scm_init(void) |
| 491 | { |
| 492 | } |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 493 | |
| 494 | bool __qcom_scm_pas_supported(struct device *dev, u32 peripheral) |
| 495 | { |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 496 | int ret; |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 497 | struct qcom_scm_desc desc = { |
| 498 | .svc = QCOM_SCM_SVC_PIL, |
| 499 | .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED, |
| 500 | }; |
| 501 | struct qcom_scm_res res; |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 502 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 503 | desc.args[0] = peripheral; |
| 504 | desc.arginfo = QCOM_SCM_ARGS(1); |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 505 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 506 | ret = qcom_scm_call(dev, &desc, &res); |
| 507 | |
| 508 | return ret ? false : !!res.result[0]; |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 509 | } |
| 510 | |
| 511 | int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, |
| 512 | dma_addr_t metadata_phys) |
| 513 | { |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 514 | int ret; |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 515 | struct qcom_scm_desc desc = { |
| 516 | .svc = QCOM_SCM_SVC_PIL, |
| 517 | .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE, |
| 518 | }; |
| 519 | struct qcom_scm_res res; |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 520 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 521 | desc.args[0] = peripheral; |
| 522 | desc.args[1] = metadata_phys; |
| 523 | desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW); |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 524 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 525 | ret = qcom_scm_call(dev, &desc, &res); |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 526 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 527 | return ret ? : res.result[0]; |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 528 | } |
| 529 | |
| 530 | int __qcom_scm_pas_mem_setup(struct device *dev, u32 peripheral, |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 531 | phys_addr_t addr, phys_addr_t size) |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 532 | { |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 533 | int ret; |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 534 | struct qcom_scm_desc desc = { |
| 535 | .svc = QCOM_SCM_SVC_PIL, |
| 536 | .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP, |
| 537 | }; |
| 538 | struct qcom_scm_res res; |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 539 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 540 | desc.args[0] = peripheral; |
| 541 | desc.args[1] = addr; |
| 542 | desc.args[2] = size; |
| 543 | desc.arginfo = QCOM_SCM_ARGS(3); |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 544 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 545 | ret = qcom_scm_call(dev, &desc, &res); |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 546 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 547 | return ret ? : res.result[0]; |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 548 | } |
| 549 | |
| 550 | int __qcom_scm_pas_auth_and_reset(struct device *dev, u32 peripheral) |
| 551 | { |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 552 | int ret; |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 553 | struct qcom_scm_desc desc = { |
| 554 | .svc = QCOM_SCM_SVC_PIL, |
| 555 | .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET, |
| 556 | }; |
| 557 | struct qcom_scm_res res; |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 558 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 559 | desc.args[0] = peripheral; |
| 560 | desc.arginfo = QCOM_SCM_ARGS(1); |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 561 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 562 | ret = qcom_scm_call(dev, &desc, &res); |
| 563 | |
| 564 | return ret ? : res.result[0]; |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 565 | } |
| 566 | |
| 567 | int __qcom_scm_pas_shutdown(struct device *dev, u32 peripheral) |
| 568 | { |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 569 | int ret; |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 570 | struct qcom_scm_desc desc = { |
| 571 | .svc = QCOM_SCM_SVC_PIL, |
| 572 | .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN, |
| 573 | }; |
| 574 | struct qcom_scm_res res; |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 575 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 576 | desc.args[0] = peripheral; |
| 577 | desc.arginfo = QCOM_SCM_ARGS(1); |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 578 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 579 | ret = qcom_scm_call(dev, &desc, &res); |
| 580 | |
| 581 | return ret ? : res.result[0]; |
Bjorn Andersson | f01e90f | 2015-09-23 12:56:12 -0700 | [diff] [blame] | 582 | } |
Bjorn Andersson | dd4fe5b | 2016-06-17 10:40:43 -0700 | [diff] [blame] | 583 | |
| 584 | int __qcom_scm_pas_mss_reset(struct device *dev, bool reset) |
| 585 | { |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 586 | struct qcom_scm_desc desc = { |
| 587 | .svc = QCOM_SCM_SVC_PIL, |
| 588 | .cmd = QCOM_SCM_PIL_PAS_MSS_RESET, |
| 589 | }; |
| 590 | struct qcom_scm_res res; |
Bjorn Andersson | dd4fe5b | 2016-06-17 10:40:43 -0700 | [diff] [blame] | 591 | int ret; |
| 592 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 593 | desc.args[0] = reset; |
| 594 | desc.args[1] = 0; |
| 595 | desc.arginfo = QCOM_SCM_ARGS(2); |
Bjorn Andersson | dd4fe5b | 2016-06-17 10:40:43 -0700 | [diff] [blame] | 596 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 597 | ret = qcom_scm_call(dev, &desc, &res); |
| 598 | |
| 599 | return ret ? : res.result[0]; |
Bjorn Andersson | dd4fe5b | 2016-06-17 10:40:43 -0700 | [diff] [blame] | 600 | } |
Andy Gross | a811b42 | 2017-01-16 23:24:15 -0600 | [diff] [blame] | 601 | |
Bjorn Andersson | 8c1b7dc | 2017-08-14 15:46:18 -0700 | [diff] [blame] | 602 | int __qcom_scm_set_dload_mode(struct device *dev, bool enable) |
| 603 | { |
Elliot Berman | 5443cc5 | 2020-01-07 13:04:11 -0800 | [diff] [blame] | 604 | return qcom_scm_call_atomic2(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_SET_DLOAD_MODE, |
| 605 | enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0, 0); |
Bjorn Andersson | 8c1b7dc | 2017-08-14 15:46:18 -0700 | [diff] [blame] | 606 | } |
| 607 | |
Andy Gross | a811b42 | 2017-01-16 23:24:15 -0600 | [diff] [blame] | 608 | int __qcom_scm_set_remote_state(struct device *dev, u32 state, u32 id) |
| 609 | { |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 610 | struct qcom_scm_desc desc = { |
| 611 | .svc = QCOM_SCM_SVC_BOOT, |
| 612 | .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE, |
| 613 | }; |
| 614 | struct qcom_scm_res res; |
Andy Gross | a811b42 | 2017-01-16 23:24:15 -0600 | [diff] [blame] | 615 | int ret; |
| 616 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 617 | desc.args[0] = state; |
| 618 | desc.args[1] = id; |
Andy Gross | a811b42 | 2017-01-16 23:24:15 -0600 | [diff] [blame] | 619 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 620 | ret = qcom_scm_call(dev, &desc, &res); |
Andy Gross | a811b42 | 2017-01-16 23:24:15 -0600 | [diff] [blame] | 621 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 622 | return ret ? : res.result[0]; |
Andy Gross | a811b42 | 2017-01-16 23:24:15 -0600 | [diff] [blame] | 623 | } |
Rob Clark | a2c680c | 2017-03-14 11:18:03 -0400 | [diff] [blame] | 624 | |
Avaneesh Kumar Dwivedi | d82bd35 | 2017-10-24 21:22:24 +0530 | [diff] [blame] | 625 | int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, |
| 626 | size_t mem_sz, phys_addr_t src, size_t src_sz, |
| 627 | phys_addr_t dest, size_t dest_sz) |
| 628 | { |
| 629 | return -ENODEV; |
| 630 | } |
| 631 | |
Rob Clark | a2c680c | 2017-03-14 11:18:03 -0400 | [diff] [blame] | 632 | int __qcom_scm_restore_sec_cfg(struct device *dev, u32 device_id, |
| 633 | u32 spare) |
| 634 | { |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 635 | struct qcom_scm_desc desc = { |
| 636 | .svc = QCOM_SCM_SVC_MP, |
| 637 | .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG, |
| 638 | }; |
| 639 | struct qcom_scm_res res; |
| 640 | int ret; |
Rob Clark | 0434a40 | 2019-08-23 05:16:34 -0700 | [diff] [blame] | 641 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 642 | desc.args[0] = device_id; |
| 643 | desc.args[1] = spare; |
| 644 | desc.arginfo = QCOM_SCM_ARGS(2); |
Rob Clark | 0434a40 | 2019-08-23 05:16:34 -0700 | [diff] [blame] | 645 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 646 | ret = qcom_scm_call(dev, &desc, &res); |
Rob Clark | 0434a40 | 2019-08-23 05:16:34 -0700 | [diff] [blame] | 647 | |
Elliot Berman | efd2b15 | 2020-01-07 13:04:20 -0800 | [diff] [blame] | 648 | return ret ? : res.result[0]; |
Rob Clark | a2c680c | 2017-03-14 11:18:03 -0400 | [diff] [blame] | 649 | } |
Stanimir Varbanov | b182cc4 | 2017-03-14 11:18:04 -0400 | [diff] [blame] | 650 | |
| 651 | int __qcom_scm_iommu_secure_ptbl_size(struct device *dev, u32 spare, |
| 652 | size_t *size) |
| 653 | { |
| 654 | return -ENODEV; |
| 655 | } |
| 656 | |
| 657 | int __qcom_scm_iommu_secure_ptbl_init(struct device *dev, u64 addr, u32 size, |
| 658 | u32 spare) |
| 659 | { |
| 660 | return -ENODEV; |
| 661 | } |
Bjorn Andersson | 4e659db | 2017-08-14 15:46:17 -0700 | [diff] [blame] | 662 | |
| 663 | int __qcom_scm_io_readl(struct device *dev, phys_addr_t addr, |
| 664 | unsigned int *val) |
| 665 | { |
| 666 | int ret; |
| 667 | |
| 668 | ret = qcom_scm_call_atomic1(QCOM_SCM_SVC_IO, QCOM_SCM_IO_READ, addr); |
| 669 | if (ret >= 0) |
| 670 | *val = ret; |
| 671 | |
| 672 | return ret < 0 ? ret : 0; |
| 673 | } |
| 674 | |
| 675 | int __qcom_scm_io_writel(struct device *dev, phys_addr_t addr, unsigned int val) |
| 676 | { |
| 677 | return qcom_scm_call_atomic2(QCOM_SCM_SVC_IO, QCOM_SCM_IO_WRITE, |
| 678 | addr, val); |
| 679 | } |
Vivek Gautam | 5eb0e0e | 2019-09-20 13:34:28 +0530 | [diff] [blame] | 680 | |
| 681 | int __qcom_scm_qsmmu500_wait_safe_toggle(struct device *dev, bool enable) |
| 682 | { |
| 683 | return -ENODEV; |
| 684 | } |