Greg Kroah-Hartman | b79c0f4 | 2017-11-07 14:58:47 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Zohaib Javed | e517857 | 2017-08-22 13:26:51 -0400 | [diff] [blame] | 2 | /* |
Benjamin Romer | 6f14cc1 | 2015-07-16 12:40:48 -0400 | [diff] [blame] | 3 | * Copyright (C) 2010 - 2015 UNISYS CORPORATION |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 4 | * All rights reserved. |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 5 | */ |
| 6 | |
Prarit Bhargava | 55c67dc | 2015-05-05 18:37:02 -0400 | [diff] [blame] | 7 | #include <linux/acpi.h> |
Benjamin Romer | 1ba0098 | 2015-04-06 10:27:40 -0400 | [diff] [blame] | 8 | #include <linux/crash_dump.h> |
David Kershner | 93d3ad9 | 2017-12-07 12:11:07 -0500 | [diff] [blame] | 9 | #include <linux/visorbus.h> |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 10 | |
Prarit Bhargava | 55c67dc | 2015-05-05 18:37:02 -0400 | [diff] [blame] | 11 | #include "visorbus_private.h" |
| 12 | |
David Kershner | f79e1df | 2017-08-22 13:27:20 -0400 | [diff] [blame] | 13 | /* {72120008-4AAB-11DC-8530-444553544200} */ |
David Kershner | 1604ebe | 2017-08-30 13:36:33 -0400 | [diff] [blame] | 14 | #define VISOR_SIOVM_GUID GUID_INIT(0x72120008, 0x4AAB, 0x11DC, 0x85, 0x30, \ |
| 15 | 0x44, 0x45, 0x53, 0x54, 0x42, 0x00) |
David Kershner | f79e1df | 2017-08-22 13:27:20 -0400 | [diff] [blame] | 16 | |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 17 | static const guid_t visor_vhba_channel_guid = VISOR_VHBA_CHANNEL_GUID; |
| 18 | static const guid_t visor_siovm_guid = VISOR_SIOVM_GUID; |
| 19 | static const guid_t visor_controlvm_channel_guid = VISOR_CONTROLVM_CHANNEL_GUID; |
| 20 | |
David Kershner | 3fbee19 | 2017-09-27 13:14:38 -0400 | [diff] [blame] | 21 | #define POLLJIFFIES_CONTROLVM_FAST 1 |
| 22 | #define POLLJIFFIES_CONTROLVM_SLOW 100 |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 23 | |
Bhaktipriya Shridhar | 2c7e1d4 | 2015-12-09 20:57:05 +0530 | [diff] [blame] | 24 | #define MAX_CONTROLVM_PAYLOAD_BYTES (1024 * 128) |
Prarit Bhargava | 2ee0dee | 2015-05-05 18:36:16 -0400 | [diff] [blame] | 25 | |
Sameer Wadgaonkar | a27ded9 | 2017-05-19 16:17:47 -0400 | [diff] [blame] | 26 | #define UNISYS_VISOR_LEAF_ID 0x40000000 |
Erik Arfvidson | d5b3f1d | 2015-05-05 18:37:04 -0400 | [diff] [blame] | 27 | |
| 28 | /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */ |
Sameer Wadgaonkar | a27ded9 | 2017-05-19 16:17:47 -0400 | [diff] [blame] | 29 | #define UNISYS_VISOR_ID_EBX 0x73696e55 |
| 30 | #define UNISYS_VISOR_ID_ECX 0x70537379 |
| 31 | #define UNISYS_VISOR_ID_EDX 0x34367261 |
Erik Arfvidson | d5b3f1d | 2015-05-05 18:37:04 -0400 | [diff] [blame] | 32 | |
Jes Sorensen | b615d62 | 2015-05-05 18:35:38 -0400 | [diff] [blame] | 33 | /* |
David Kershner | 6577cbf | 2017-08-30 13:36:29 -0400 | [diff] [blame] | 34 | * When the controlvm channel is idle for at least MIN_IDLE_SECONDS, we switch |
| 35 | * to slow polling mode. As soon as we get a controlvm message, we switch back |
| 36 | * to fast polling mode. |
David Binder | ec17f45 | 2016-06-10 21:48:18 -0400 | [diff] [blame] | 37 | */ |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 38 | #define MIN_IDLE_SECONDS 10 |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 39 | |
Erik Arfvidson | 4616881 | 2015-05-05 18:36:14 -0400 | [diff] [blame] | 40 | struct parser_context { |
| 41 | unsigned long allocbytes; |
| 42 | unsigned long param_bytes; |
| 43 | u8 *curr; |
| 44 | unsigned long bytes_remaining; |
| 45 | bool byte_stream; |
David Kershner | 26a42c2 | 2017-08-30 13:36:24 -0400 | [diff] [blame] | 46 | struct visor_controlvm_parameters_header data; |
Erik Arfvidson | 4616881 | 2015-05-05 18:36:14 -0400 | [diff] [blame] | 47 | }; |
| 48 | |
David Binder | 12cbd49 | 2017-08-22 13:27:01 -0400 | [diff] [blame] | 49 | /* VMCALL_CONTROLVM_ADDR: Used by all guests, not just IO. */ |
David Binder | c8684a9 | 2017-08-22 13:27:00 -0400 | [diff] [blame] | 50 | #define VMCALL_CONTROLVM_ADDR 0x0501 |
| 51 | |
| 52 | enum vmcall_result { |
| 53 | VMCALL_RESULT_SUCCESS = 0, |
| 54 | VMCALL_RESULT_INVALID_PARAM = 1, |
| 55 | VMCALL_RESULT_DATA_UNAVAILABLE = 2, |
| 56 | VMCALL_RESULT_FAILURE_UNAVAILABLE = 3, |
| 57 | VMCALL_RESULT_DEVICE_ERROR = 4, |
| 58 | VMCALL_RESULT_DEVICE_NOT_READY = 5 |
| 59 | }; |
| 60 | |
| 61 | /* |
| 62 | * struct vmcall_io_controlvm_addr_params - Structure for IO VMCALLS. Has |
| 63 | * parameters to VMCALL_CONTROLVM_ADDR |
| 64 | * interface. |
| 65 | * @address: The Guest-relative physical address of the ControlVm channel. |
| 66 | * This VMCall fills this in with the appropriate address. |
| 67 | * Contents provided by this VMCALL (OUT). |
| 68 | * @channel_bytes: The size of the ControlVm channel in bytes This VMCall fills |
| 69 | * this in with the appropriate address. Contents provided by |
| 70 | * this VMCALL (OUT). |
| 71 | * @unused: Unused Bytes in the 64-Bit Aligned Struct. |
| 72 | */ |
| 73 | struct vmcall_io_controlvm_addr_params { |
| 74 | u64 address; |
| 75 | u32 channel_bytes; |
| 76 | u8 unused[4]; |
| 77 | } __packed; |
| 78 | |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 79 | struct visorchipset_device { |
| 80 | struct acpi_device *acpi_device; |
| 81 | unsigned long poll_jiffies; |
| 82 | /* when we got our last controlvm message */ |
| 83 | unsigned long most_recent_message_jiffies; |
| 84 | struct delayed_work periodic_controlvm_work; |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 85 | struct visorchannel *controlvm_channel; |
| 86 | unsigned long controlvm_payload_bytes_buffered; |
| 87 | /* |
| 88 | * The following variables are used to handle the scenario where we are |
| 89 | * unable to offload the payload from a controlvm message due to memory |
| 90 | * requirements. In this scenario, we simply stash the controlvm |
| 91 | * message, then attempt to process it again the next time |
| 92 | * controlvm_periodic_work() runs. |
| 93 | */ |
| 94 | struct controlvm_message controlvm_pending_msg; |
| 95 | bool controlvm_pending_msg_valid; |
David Kershner | 800da5f | 2017-08-30 13:36:13 -0400 | [diff] [blame] | 96 | struct vmcall_io_controlvm_addr_params controlvm_params; |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 97 | }; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 98 | |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 99 | static struct visorchipset_device *chipset_dev; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 100 | |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 101 | struct parahotplug_request { |
| 102 | struct list_head list; |
| 103 | int id; |
| 104 | unsigned long expiration; |
Benjamin Romer | 3ab4770 | 2014-10-23 14:30:31 -0400 | [diff] [blame] | 105 | struct controlvm_message msg; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 106 | }; |
| 107 | |
Benjamin Romer | 19f6634 | 2014-07-22 09:56:25 -0400 | [diff] [blame] | 108 | /* prototypes for attributes */ |
| 109 | static ssize_t toolaction_show(struct device *dev, |
David Kershner | 84efd20 | 2016-09-19 17:09:29 -0400 | [diff] [blame] | 110 | struct device_attribute *attr, |
| 111 | char *buf) |
| 112 | { |
| 113 | u8 tool_action = 0; |
David Kershner | 002a5ab | 2017-03-28 09:34:23 -0400 | [diff] [blame] | 114 | int err; |
David Kershner | 84efd20 | 2016-09-19 17:09:29 -0400 | [diff] [blame] | 115 | |
David Kershner | 002a5ab | 2017-03-28 09:34:23 -0400 | [diff] [blame] | 116 | err = visorchannel_read(chipset_dev->controlvm_channel, |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 117 | offsetof(struct visor_controlvm_channel, |
David Kershner | 002a5ab | 2017-03-28 09:34:23 -0400 | [diff] [blame] | 118 | tool_action), |
| 119 | &tool_action, sizeof(u8)); |
| 120 | if (err) |
| 121 | return err; |
David Binder | 746fb13 | 2017-01-03 16:01:15 -0500 | [diff] [blame] | 122 | return sprintf(buf, "%u\n", tool_action); |
David Kershner | 84efd20 | 2016-09-19 17:09:29 -0400 | [diff] [blame] | 123 | } |
| 124 | |
Benjamin Romer | 19f6634 | 2014-07-22 09:56:25 -0400 | [diff] [blame] | 125 | static ssize_t toolaction_store(struct device *dev, |
Benjamin Romer | 8e76e69 | 2015-03-16 13:58:52 -0400 | [diff] [blame] | 126 | struct device_attribute *attr, |
David Kershner | 84efd20 | 2016-09-19 17:09:29 -0400 | [diff] [blame] | 127 | const char *buf, size_t count) |
| 128 | { |
| 129 | u8 tool_action; |
David Kershner | dc35cdf | 2017-03-28 09:34:30 -0400 | [diff] [blame] | 130 | int err; |
David Kershner | 84efd20 | 2016-09-19 17:09:29 -0400 | [diff] [blame] | 131 | |
| 132 | if (kstrtou8(buf, 10, &tool_action)) |
| 133 | return -EINVAL; |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 134 | err = visorchannel_write(chipset_dev->controlvm_channel, |
| 135 | offsetof(struct visor_controlvm_channel, |
| 136 | tool_action), |
| 137 | &tool_action, sizeof(u8)); |
David Kershner | dc35cdf | 2017-03-28 09:34:30 -0400 | [diff] [blame] | 138 | if (err) |
| 139 | return err; |
David Kershner | 84efd20 | 2016-09-19 17:09:29 -0400 | [diff] [blame] | 140 | return count; |
| 141 | } |
Benjamin Romer | 19f6634 | 2014-07-22 09:56:25 -0400 | [diff] [blame] | 142 | static DEVICE_ATTR_RW(toolaction); |
| 143 | |
Benjamin Romer | 54b3122 | 2014-07-22 09:56:26 -0400 | [diff] [blame] | 144 | static ssize_t boottotool_show(struct device *dev, |
David Kershner | 1b1d463 | 2016-09-19 17:09:30 -0400 | [diff] [blame] | 145 | struct device_attribute *attr, |
| 146 | char *buf) |
| 147 | { |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 148 | struct efi_visor_indication efi_visor_indication; |
David Kershner | 0b01c6c | 2017-03-28 09:34:24 -0400 | [diff] [blame] | 149 | int err; |
David Kershner | 1b1d463 | 2016-09-19 17:09:30 -0400 | [diff] [blame] | 150 | |
David Kershner | 0b01c6c | 2017-03-28 09:34:24 -0400 | [diff] [blame] | 151 | err = visorchannel_read(chipset_dev->controlvm_channel, |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 152 | offsetof(struct visor_controlvm_channel, |
| 153 | efi_visor_ind), |
| 154 | &efi_visor_indication, |
| 155 | sizeof(struct efi_visor_indication)); |
David Kershner | 0b01c6c | 2017-03-28 09:34:24 -0400 | [diff] [blame] | 156 | if (err) |
| 157 | return err; |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 158 | return sprintf(buf, "%u\n", efi_visor_indication.boot_to_tool); |
David Kershner | 1b1d463 | 2016-09-19 17:09:30 -0400 | [diff] [blame] | 159 | } |
| 160 | |
Benjamin Romer | 54b3122 | 2014-07-22 09:56:26 -0400 | [diff] [blame] | 161 | static ssize_t boottotool_store(struct device *dev, |
David Kershner | 1b1d463 | 2016-09-19 17:09:30 -0400 | [diff] [blame] | 162 | struct device_attribute *attr, |
| 163 | const char *buf, size_t count) |
| 164 | { |
David Kershner | b309266 | 2017-03-28 09:34:31 -0400 | [diff] [blame] | 165 | int val, err; |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 166 | struct efi_visor_indication efi_visor_indication; |
David Kershner | 1b1d463 | 2016-09-19 17:09:30 -0400 | [diff] [blame] | 167 | |
| 168 | if (kstrtoint(buf, 10, &val)) |
| 169 | return -EINVAL; |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 170 | efi_visor_indication.boot_to_tool = val; |
| 171 | err = visorchannel_write(chipset_dev->controlvm_channel, |
| 172 | offsetof(struct visor_controlvm_channel, |
| 173 | efi_visor_ind), |
| 174 | &(efi_visor_indication), |
| 175 | sizeof(struct efi_visor_indication)); |
David Kershner | b309266 | 2017-03-28 09:34:31 -0400 | [diff] [blame] | 176 | if (err) |
| 177 | return err; |
David Kershner | 1b1d463 | 2016-09-19 17:09:30 -0400 | [diff] [blame] | 178 | return count; |
| 179 | } |
Benjamin Romer | 54b3122 | 2014-07-22 09:56:26 -0400 | [diff] [blame] | 180 | static DEVICE_ATTR_RW(boottotool); |
| 181 | |
Benjamin Romer | 422af17 | 2014-07-24 14:08:42 -0400 | [diff] [blame] | 182 | static ssize_t error_show(struct device *dev, struct device_attribute *attr, |
David Kershner | 8a4a8a0 | 2016-09-19 17:09:31 -0400 | [diff] [blame] | 183 | char *buf) |
| 184 | { |
| 185 | u32 error = 0; |
David Kershner | d9857c7 | 2017-03-28 09:34:25 -0400 | [diff] [blame] | 186 | int err; |
David Kershner | 8a4a8a0 | 2016-09-19 17:09:31 -0400 | [diff] [blame] | 187 | |
David Kershner | d9857c7 | 2017-03-28 09:34:25 -0400 | [diff] [blame] | 188 | err = visorchannel_read(chipset_dev->controlvm_channel, |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 189 | offsetof(struct visor_controlvm_channel, |
David Kershner | d9857c7 | 2017-03-28 09:34:25 -0400 | [diff] [blame] | 190 | installation_error), |
| 191 | &error, sizeof(u32)); |
| 192 | if (err) |
| 193 | return err; |
David Binder | 6df555c | 2017-08-22 13:27:25 -0400 | [diff] [blame] | 194 | return sprintf(buf, "%u\n", error); |
David Kershner | 8a4a8a0 | 2016-09-19 17:09:31 -0400 | [diff] [blame] | 195 | } |
| 196 | |
Benjamin Romer | 422af17 | 2014-07-24 14:08:42 -0400 | [diff] [blame] | 197 | static ssize_t error_store(struct device *dev, struct device_attribute *attr, |
David Kershner | 8a4a8a0 | 2016-09-19 17:09:31 -0400 | [diff] [blame] | 198 | const char *buf, size_t count) |
| 199 | { |
| 200 | u32 error; |
David Kershner | ea29585 | 2017-03-28 09:34:32 -0400 | [diff] [blame] | 201 | int err; |
David Kershner | 8a4a8a0 | 2016-09-19 17:09:31 -0400 | [diff] [blame] | 202 | |
| 203 | if (kstrtou32(buf, 10, &error)) |
| 204 | return -EINVAL; |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 205 | err = visorchannel_write(chipset_dev->controlvm_channel, |
| 206 | offsetof(struct visor_controlvm_channel, |
| 207 | installation_error), |
| 208 | &error, sizeof(u32)); |
David Kershner | ea29585 | 2017-03-28 09:34:32 -0400 | [diff] [blame] | 209 | if (err) |
| 210 | return err; |
David Kershner | 8a4a8a0 | 2016-09-19 17:09:31 -0400 | [diff] [blame] | 211 | return count; |
| 212 | } |
Benjamin Romer | 422af17 | 2014-07-24 14:08:42 -0400 | [diff] [blame] | 213 | static DEVICE_ATTR_RW(error); |
| 214 | |
| 215 | static ssize_t textid_show(struct device *dev, struct device_attribute *attr, |
David Kershner | 79730c7 | 2016-09-19 17:09:32 -0400 | [diff] [blame] | 216 | char *buf) |
| 217 | { |
| 218 | u32 text_id = 0; |
David Kershner | 0d40643 | 2017-03-28 09:34:26 -0400 | [diff] [blame] | 219 | int err; |
David Kershner | 79730c7 | 2016-09-19 17:09:32 -0400 | [diff] [blame] | 220 | |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 221 | err = visorchannel_read(chipset_dev->controlvm_channel, |
| 222 | offsetof(struct visor_controlvm_channel, |
| 223 | installation_text_id), |
| 224 | &text_id, sizeof(u32)); |
David Kershner | 0d40643 | 2017-03-28 09:34:26 -0400 | [diff] [blame] | 225 | if (err) |
| 226 | return err; |
David Binder | 6df555c | 2017-08-22 13:27:25 -0400 | [diff] [blame] | 227 | return sprintf(buf, "%u\n", text_id); |
David Kershner | 79730c7 | 2016-09-19 17:09:32 -0400 | [diff] [blame] | 228 | } |
| 229 | |
Benjamin Romer | 422af17 | 2014-07-24 14:08:42 -0400 | [diff] [blame] | 230 | static ssize_t textid_store(struct device *dev, struct device_attribute *attr, |
David Kershner | 79730c7 | 2016-09-19 17:09:32 -0400 | [diff] [blame] | 231 | const char *buf, size_t count) |
| 232 | { |
| 233 | u32 text_id; |
David Kershner | 08a55d2 | 2017-03-28 09:34:33 -0400 | [diff] [blame] | 234 | int err; |
David Kershner | 79730c7 | 2016-09-19 17:09:32 -0400 | [diff] [blame] | 235 | |
| 236 | if (kstrtou32(buf, 10, &text_id)) |
| 237 | return -EINVAL; |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 238 | err = visorchannel_write(chipset_dev->controlvm_channel, |
| 239 | offsetof(struct visor_controlvm_channel, |
| 240 | installation_text_id), |
| 241 | &text_id, sizeof(u32)); |
David Kershner | 08a55d2 | 2017-03-28 09:34:33 -0400 | [diff] [blame] | 242 | if (err) |
| 243 | return err; |
David Kershner | 79730c7 | 2016-09-19 17:09:32 -0400 | [diff] [blame] | 244 | return count; |
| 245 | } |
Benjamin Romer | 422af17 | 2014-07-24 14:08:42 -0400 | [diff] [blame] | 246 | static DEVICE_ATTR_RW(textid); |
| 247 | |
| 248 | static ssize_t remaining_steps_show(struct device *dev, |
David Kershner | 97f792e | 2016-09-19 17:09:33 -0400 | [diff] [blame] | 249 | struct device_attribute *attr, char *buf) |
| 250 | { |
| 251 | u16 remaining_steps = 0; |
David Kershner | c53578b | 2017-03-28 09:34:27 -0400 | [diff] [blame] | 252 | int err; |
David Kershner | 97f792e | 2016-09-19 17:09:33 -0400 | [diff] [blame] | 253 | |
David Kershner | c53578b | 2017-03-28 09:34:27 -0400 | [diff] [blame] | 254 | err = visorchannel_read(chipset_dev->controlvm_channel, |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 255 | offsetof(struct visor_controlvm_channel, |
David Kershner | c53578b | 2017-03-28 09:34:27 -0400 | [diff] [blame] | 256 | installation_remaining_steps), |
| 257 | &remaining_steps, sizeof(u16)); |
| 258 | if (err) |
| 259 | return err; |
David Binder | 746fb13 | 2017-01-03 16:01:15 -0500 | [diff] [blame] | 260 | return sprintf(buf, "%hu\n", remaining_steps); |
David Kershner | 97f792e | 2016-09-19 17:09:33 -0400 | [diff] [blame] | 261 | } |
| 262 | |
Benjamin Romer | 422af17 | 2014-07-24 14:08:42 -0400 | [diff] [blame] | 263 | static ssize_t remaining_steps_store(struct device *dev, |
Benjamin Romer | 8e76e69 | 2015-03-16 13:58:52 -0400 | [diff] [blame] | 264 | struct device_attribute *attr, |
David Kershner | 97f792e | 2016-09-19 17:09:33 -0400 | [diff] [blame] | 265 | const char *buf, size_t count) |
| 266 | { |
| 267 | u16 remaining_steps; |
David Kershner | e030d39 | 2017-03-28 09:34:34 -0400 | [diff] [blame] | 268 | int err; |
David Kershner | 97f792e | 2016-09-19 17:09:33 -0400 | [diff] [blame] | 269 | |
| 270 | if (kstrtou16(buf, 10, &remaining_steps)) |
| 271 | return -EINVAL; |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 272 | err = visorchannel_write(chipset_dev->controlvm_channel, |
| 273 | offsetof(struct visor_controlvm_channel, |
| 274 | installation_remaining_steps), |
| 275 | &remaining_steps, sizeof(u16)); |
David Kershner | e030d39 | 2017-03-28 09:34:34 -0400 | [diff] [blame] | 276 | if (err) |
| 277 | return err; |
David Kershner | 97f792e | 2016-09-19 17:09:33 -0400 | [diff] [blame] | 278 | return count; |
| 279 | } |
Benjamin Romer | 422af17 | 2014-07-24 14:08:42 -0400 | [diff] [blame] | 280 | static DEVICE_ATTR_RW(remaining_steps); |
| 281 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 282 | static void controlvm_init_response(struct controlvm_message *msg, |
| 283 | struct controlvm_message_header *msg_hdr, |
| 284 | int response) |
David Kershner | 5f25139 | 2016-09-19 17:09:26 -0400 | [diff] [blame] | 285 | { |
| 286 | memset(msg, 0, sizeof(struct controlvm_message)); |
| 287 | memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header)); |
| 288 | msg->hdr.payload_bytes = 0; |
| 289 | msg->hdr.payload_vm_offset = 0; |
| 290 | msg->hdr.payload_max_bytes = 0; |
| 291 | if (response < 0) { |
| 292 | msg->hdr.flags.failed = 1; |
| 293 | msg->hdr.completion_status = (u32)(-response); |
| 294 | } |
| 295 | } |
| 296 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 297 | static int controlvm_respond_chipset_init( |
| 298 | struct controlvm_message_header *msg_hdr, |
| 299 | int response, |
| 300 | enum visor_chipset_feature features) |
David Kershner | 5f25139 | 2016-09-19 17:09:26 -0400 | [diff] [blame] | 301 | { |
| 302 | struct controlvm_message outmsg; |
| 303 | |
| 304 | controlvm_init_response(&outmsg, msg_hdr, response); |
| 305 | outmsg.cmd.init_chipset.features = features; |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 306 | return visorchannel_signalinsert(chipset_dev->controlvm_channel, |
David Kershner | 1d7f552 | 2016-11-21 12:15:44 -0500 | [diff] [blame] | 307 | CONTROLVM_QUEUE_REQUEST, &outmsg); |
David Kershner | 5f25139 | 2016-09-19 17:09:26 -0400 | [diff] [blame] | 308 | } |
| 309 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 310 | static int chipset_init(struct controlvm_message *inmsg) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 311 | { |
| 312 | static int chipset_inited; |
Sameer Wadgaonkar | d3ad6e6 | 2017-05-19 16:17:50 -0400 | [diff] [blame] | 313 | enum visor_chipset_feature features = 0; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 314 | int rc = CONTROLVM_RESP_SUCCESS; |
David Kershner | 79c3f97 | 2016-11-21 12:15:45 -0500 | [diff] [blame] | 315 | int res = 0; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 316 | |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 317 | if (chipset_inited) { |
Sameer Wadgaonkar | 98f9ed9 | 2016-12-22 11:08:58 -0500 | [diff] [blame] | 318 | rc = -CONTROLVM_RESP_ALREADY_DONE; |
David Kershner | 79c3f97 | 2016-11-21 12:15:45 -0500 | [diff] [blame] | 319 | res = -EIO; |
David Kershner | 5233d1e | 2016-03-11 17:01:41 -0500 | [diff] [blame] | 320 | goto out_respond; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 321 | } |
| 322 | chipset_inited = 1; |
David Binder | ec17f45 | 2016-06-10 21:48:18 -0400 | [diff] [blame] | 323 | /* |
David Kershner | 6577cbf | 2017-08-30 13:36:29 -0400 | [diff] [blame] | 324 | * Set features to indicate we support parahotplug (if Command also |
David Kershner | 977980a | 2017-09-27 13:14:06 -0400 | [diff] [blame] | 325 | * supports it). Set the "reply" bit so Command knows this is a |
| 326 | * features-aware driver. |
Erik Arfvidson | 2ee0d05 | 2016-02-08 10:41:44 -0500 | [diff] [blame] | 327 | */ |
David Binder | 0762188 | 2016-11-03 11:44:28 -0400 | [diff] [blame] | 328 | features = inmsg->cmd.init_chipset.features & |
Sameer Wadgaonkar | d3ad6e6 | 2017-05-19 16:17:50 -0400 | [diff] [blame] | 329 | VISOR_CHIPSET_FEATURE_PARA_HOTPLUG; |
Sameer Wadgaonkar | d3ad6e6 | 2017-05-19 16:17:50 -0400 | [diff] [blame] | 330 | features |= VISOR_CHIPSET_FEATURE_REPLY; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 331 | |
David Kershner | 5233d1e | 2016-03-11 17:01:41 -0500 | [diff] [blame] | 332 | out_respond: |
Benjamin Romer | 98d7b59 | 2014-10-23 14:30:26 -0400 | [diff] [blame] | 333 | if (inmsg->hdr.flags.response_expected) |
David Kershner | 79c3f97 | 2016-11-21 12:15:45 -0500 | [diff] [blame] | 334 | res = controlvm_respond_chipset_init(&inmsg->hdr, rc, features); |
| 335 | |
| 336 | return res; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 337 | } |
| 338 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 339 | static int controlvm_respond(struct controlvm_message_header *msg_hdr, |
David Kershner | 040b78f | 2017-09-27 13:14:10 -0400 | [diff] [blame] | 340 | int response, struct visor_segment_state *state) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 341 | { |
Benjamin Romer | 3ab4770 | 2014-10-23 14:30:31 -0400 | [diff] [blame] | 342 | struct controlvm_message outmsg; |
Benjamin Romer | 26eb2c0 | 2014-08-18 09:34:53 -0400 | [diff] [blame] | 343 | |
Benjamin Romer | b3168c7 | 2015-03-16 13:58:46 -0400 | [diff] [blame] | 344 | controlvm_init_response(&outmsg, msg_hdr, response); |
Benjamin Romer | 2098dbd | 2015-03-04 12:14:22 -0500 | [diff] [blame] | 345 | if (outmsg.hdr.flags.test_message == 1) |
David Kershner | 2d26aeb | 2016-11-21 12:15:46 -0500 | [diff] [blame] | 346 | return -EINVAL; |
David Kershner | 4c0e65f | 2017-04-18 16:55:01 -0400 | [diff] [blame] | 347 | if (state) { |
| 348 | outmsg.cmd.device_change_state.state = *state; |
| 349 | outmsg.cmd.device_change_state.flags.phys_device = 1; |
| 350 | } |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 351 | return visorchannel_signalinsert(chipset_dev->controlvm_channel, |
David Kershner | 2c4ef56 | 2016-11-21 12:15:47 -0500 | [diff] [blame] | 352 | CONTROLVM_QUEUE_REQUEST, &outmsg); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 353 | } |
| 354 | |
Prarit Bhargava | 2ee0dee | 2015-05-05 18:36:16 -0400 | [diff] [blame] | 355 | enum crash_obj_type { |
| 356 | CRASH_DEV, |
| 357 | CRASH_BUS, |
| 358 | }; |
| 359 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 360 | static int save_crash_message(struct controlvm_message *msg, |
| 361 | enum crash_obj_type cr_type) |
Tim Sell | 12c957d | 2016-03-01 19:45:04 -0500 | [diff] [blame] | 362 | { |
| 363 | u32 local_crash_msg_offset; |
| 364 | u16 local_crash_msg_count; |
David Kershner | 8dff01f | 2016-11-21 12:15:48 -0500 | [diff] [blame] | 365 | int err; |
Tim Sell | 12c957d | 2016-03-01 19:45:04 -0500 | [diff] [blame] | 366 | |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 367 | err = visorchannel_read(chipset_dev->controlvm_channel, |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 368 | offsetof(struct visor_controlvm_channel, |
David Kershner | 8dff01f | 2016-11-21 12:15:48 -0500 | [diff] [blame] | 369 | saved_crash_message_count), |
| 370 | &local_crash_msg_count, sizeof(u16)); |
| 371 | if (err) { |
David Kershner | 35301b8 | 2017-04-18 16:55:16 -0400 | [diff] [blame] | 372 | dev_err(&chipset_dev->acpi_device->dev, |
| 373 | "failed to read message count\n"); |
David Kershner | 8dff01f | 2016-11-21 12:15:48 -0500 | [diff] [blame] | 374 | return err; |
Tim Sell | 12c957d | 2016-03-01 19:45:04 -0500 | [diff] [blame] | 375 | } |
Tim Sell | 12c957d | 2016-03-01 19:45:04 -0500 | [diff] [blame] | 376 | if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) { |
David Kershner | 35301b8 | 2017-04-18 16:55:16 -0400 | [diff] [blame] | 377 | dev_err(&chipset_dev->acpi_device->dev, |
| 378 | "invalid number of messages\n"); |
David Kershner | 8dff01f | 2016-11-21 12:15:48 -0500 | [diff] [blame] | 379 | return -EIO; |
Tim Sell | 12c957d | 2016-03-01 19:45:04 -0500 | [diff] [blame] | 380 | } |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 381 | err = visorchannel_read(chipset_dev->controlvm_channel, |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 382 | offsetof(struct visor_controlvm_channel, |
David Kershner | 8dff01f | 2016-11-21 12:15:48 -0500 | [diff] [blame] | 383 | saved_crash_message_offset), |
| 384 | &local_crash_msg_offset, sizeof(u32)); |
| 385 | if (err) { |
David Kershner | 35301b8 | 2017-04-18 16:55:16 -0400 | [diff] [blame] | 386 | dev_err(&chipset_dev->acpi_device->dev, |
| 387 | "failed to read offset\n"); |
David Kershner | 8dff01f | 2016-11-21 12:15:48 -0500 | [diff] [blame] | 388 | return err; |
Tim Sell | 12c957d | 2016-03-01 19:45:04 -0500 | [diff] [blame] | 389 | } |
Jon Frisch | 603a198 | 2017-05-19 16:17:35 -0400 | [diff] [blame] | 390 | switch (cr_type) { |
David Binder | 36309d3 | 2016-12-22 11:08:59 -0500 | [diff] [blame] | 391 | case CRASH_DEV: |
Tim Sell | 12c957d | 2016-03-01 19:45:04 -0500 | [diff] [blame] | 392 | local_crash_msg_offset += sizeof(struct controlvm_message); |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 393 | err = visorchannel_write(chipset_dev->controlvm_channel, |
David Kershner | 040b78f | 2017-09-27 13:14:10 -0400 | [diff] [blame] | 394 | local_crash_msg_offset, msg, |
David Kershner | 8dff01f | 2016-11-21 12:15:48 -0500 | [diff] [blame] | 395 | sizeof(struct controlvm_message)); |
| 396 | if (err) { |
David Kershner | 35301b8 | 2017-04-18 16:55:16 -0400 | [diff] [blame] | 397 | dev_err(&chipset_dev->acpi_device->dev, |
| 398 | "failed to write dev msg\n"); |
David Kershner | 8dff01f | 2016-11-21 12:15:48 -0500 | [diff] [blame] | 399 | return err; |
Tim Sell | 12c957d | 2016-03-01 19:45:04 -0500 | [diff] [blame] | 400 | } |
David Binder | 36309d3 | 2016-12-22 11:08:59 -0500 | [diff] [blame] | 401 | break; |
| 402 | case CRASH_BUS: |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 403 | err = visorchannel_write(chipset_dev->controlvm_channel, |
David Kershner | 040b78f | 2017-09-27 13:14:10 -0400 | [diff] [blame] | 404 | local_crash_msg_offset, msg, |
David Binder | 36309d3 | 2016-12-22 11:08:59 -0500 | [diff] [blame] | 405 | sizeof(struct controlvm_message)); |
| 406 | if (err) { |
David Kershner | 35301b8 | 2017-04-18 16:55:16 -0400 | [diff] [blame] | 407 | dev_err(&chipset_dev->acpi_device->dev, |
| 408 | "failed to write bus msg\n"); |
David Binder | 36309d3 | 2016-12-22 11:08:59 -0500 | [diff] [blame] | 409 | return err; |
| 410 | } |
| 411 | break; |
| 412 | default: |
David Kershner | 35301b8 | 2017-04-18 16:55:16 -0400 | [diff] [blame] | 413 | dev_err(&chipset_dev->acpi_device->dev, |
| 414 | "Invalid crash_obj_type\n"); |
David Binder | 36309d3 | 2016-12-22 11:08:59 -0500 | [diff] [blame] | 415 | break; |
Tim Sell | 12c957d | 2016-03-01 19:45:04 -0500 | [diff] [blame] | 416 | } |
David Kershner | 8dff01f | 2016-11-21 12:15:48 -0500 | [diff] [blame] | 417 | return 0; |
Tim Sell | 12c957d | 2016-03-01 19:45:04 -0500 | [diff] [blame] | 418 | } |
| 419 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 420 | static int controlvm_responder(enum controlvm_id cmd_id, |
| 421 | struct controlvm_message_header *pending_msg_hdr, |
| 422 | int response) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 423 | { |
Don Zickus | 0274b5a | 2015-06-01 13:00:27 -0400 | [diff] [blame] | 424 | if (pending_msg_hdr->id != (u32)cmd_id) |
David Kershner | 734ad93 | 2016-11-21 12:15:49 -0500 | [diff] [blame] | 425 | return -EINVAL; |
Don Zickus | 0274b5a | 2015-06-01 13:00:27 -0400 | [diff] [blame] | 426 | |
David Kershner | 4c0e65f | 2017-04-18 16:55:01 -0400 | [diff] [blame] | 427 | return controlvm_respond(pending_msg_hdr, response, NULL); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 428 | } |
| 429 | |
David Kershner | da56cb0 | 2017-09-27 13:14:39 -0400 | [diff] [blame] | 430 | static int device_changestate_responder(enum controlvm_id cmd_id, |
| 431 | struct visor_device *p, int response, |
| 432 | struct visor_segment_state state) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 433 | { |
Benjamin Romer | 3ab4770 | 2014-10-23 14:30:31 -0400 | [diff] [blame] | 434 | struct controlvm_message outmsg; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 435 | |
Don Zickus | 0274b5a | 2015-06-01 13:00:27 -0400 | [diff] [blame] | 436 | if (p->pending_msg_hdr->id != cmd_id) |
David Kershner | 68f99d4 | 2016-11-21 12:15:50 -0500 | [diff] [blame] | 437 | return -EINVAL; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 438 | |
Don Zickus | 0274b5a | 2015-06-01 13:00:27 -0400 | [diff] [blame] | 439 | controlvm_init_response(&outmsg, p->pending_msg_hdr, response); |
David Kershner | b253ff5 | 2017-08-22 13:27:23 -0400 | [diff] [blame] | 440 | outmsg.cmd.device_change_state.bus_no = p->chipset_bus_no; |
| 441 | outmsg.cmd.device_change_state.dev_no = p->chipset_dev_no; |
David Kershner | da56cb0 | 2017-09-27 13:14:39 -0400 | [diff] [blame] | 442 | outmsg.cmd.device_change_state.state = state; |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 443 | return visorchannel_signalinsert(chipset_dev->controlvm_channel, |
David Kershner | 68f99d4 | 2016-11-21 12:15:50 -0500 | [diff] [blame] | 444 | CONTROLVM_QUEUE_REQUEST, &outmsg); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 445 | } |
| 446 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 447 | static int visorbus_create(struct controlvm_message *inmsg) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 448 | { |
Benjamin Romer | 2ea5117 | 2014-10-23 14:30:25 -0400 | [diff] [blame] | 449 | struct controlvm_message_packet *cmd = &inmsg->cmd; |
David Kershner | ef7b9dc | 2017-09-27 13:14:37 -0400 | [diff] [blame] | 450 | struct controlvm_message_header *pmsg_hdr; |
Jes Sorensen | 52063ec | 2015-04-13 10:28:41 -0400 | [diff] [blame] | 451 | u32 bus_no = cmd->create_bus.bus_no; |
Don Zickus | d32517e | 2015-06-04 09:22:41 -0400 | [diff] [blame] | 452 | struct visor_device *bus_info; |
Don Zickus | b32c499 | 2015-06-01 13:00:26 -0400 | [diff] [blame] | 453 | struct visorchannel *visorchannel; |
David Kershner | 33161a2 | 2016-11-03 11:44:22 -0400 | [diff] [blame] | 454 | int err; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 455 | |
Don Zickus | d32517e | 2015-06-04 09:22:41 -0400 | [diff] [blame] | 456 | bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); |
David Kershner | 614b083 | 2017-09-27 13:14:48 -0400 | [diff] [blame] | 457 | if (bus_info && bus_info->state.created == 1) { |
David Kershner | 055bc90 | 2017-04-18 16:55:17 -0400 | [diff] [blame] | 458 | dev_err(&chipset_dev->acpi_device->dev, |
Zachary Dremann | 87408fe | 2017-06-30 15:43:25 -0400 | [diff] [blame] | 459 | "failed %s: already exists\n", __func__); |
David Kershner | 33161a2 | 2016-11-03 11:44:22 -0400 | [diff] [blame] | 460 | err = -EEXIST; |
| 461 | goto err_respond; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 462 | } |
Benjamin Romer | 6c5fed3 | 2015-03-16 13:58:20 -0400 | [diff] [blame] | 463 | bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL); |
| 464 | if (!bus_info) { |
David Kershner | 33161a2 | 2016-11-03 11:44:22 -0400 | [diff] [blame] | 465 | err = -ENOMEM; |
| 466 | goto err_respond; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 467 | } |
David Kershner | 4abce83 | 2015-06-04 09:22:49 -0400 | [diff] [blame] | 468 | INIT_LIST_HEAD(&bus_info->list_all); |
Don Zickus | d32517e | 2015-06-04 09:22:41 -0400 | [diff] [blame] | 469 | bus_info->chipset_bus_no = bus_no; |
| 470 | bus_info->chipset_dev_no = BUS_ROOT_DEVICE; |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 471 | if (guid_equal(&cmd->create_bus.bus_inst_guid, &visor_siovm_guid)) { |
David Kershner | 300ed61 | 2016-12-22 11:09:05 -0500 | [diff] [blame] | 472 | err = save_crash_message(inmsg, CRASH_BUS); |
| 473 | if (err) |
| 474 | goto err_free_bus_info; |
| 475 | } |
David Kershner | 8f334e3 | 2016-11-03 11:44:19 -0400 | [diff] [blame] | 476 | if (inmsg->hdr.flags.response_expected == 1) { |
David Kershner | 040b78f | 2017-09-27 13:14:10 -0400 | [diff] [blame] | 477 | pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); |
David Kershner | 8f334e3 | 2016-11-03 11:44:19 -0400 | [diff] [blame] | 478 | if (!pmsg_hdr) { |
David Kershner | 33161a2 | 2016-11-03 11:44:22 -0400 | [diff] [blame] | 479 | err = -ENOMEM; |
| 480 | goto err_free_bus_info; |
David Kershner | 8f334e3 | 2016-11-03 11:44:19 -0400 | [diff] [blame] | 481 | } |
David Kershner | 8f334e3 | 2016-11-03 11:44:19 -0400 | [diff] [blame] | 482 | memcpy(pmsg_hdr, &inmsg->hdr, |
| 483 | sizeof(struct controlvm_message_header)); |
| 484 | bus_info->pending_msg_hdr = pmsg_hdr; |
| 485 | } |
David Kershner | 33161a2 | 2016-11-03 11:44:22 -0400 | [diff] [blame] | 486 | visorchannel = visorchannel_create(cmd->create_bus.channel_addr, |
David Kershner | 33161a2 | 2016-11-03 11:44:22 -0400 | [diff] [blame] | 487 | GFP_KERNEL, |
Sameer Wadgaonkar | 9047667 | 2017-09-27 13:14:44 -0400 | [diff] [blame] | 488 | &cmd->create_bus.bus_data_type_guid, |
| 489 | false); |
David Kershner | 33161a2 | 2016-11-03 11:44:22 -0400 | [diff] [blame] | 490 | if (!visorchannel) { |
David Kershner | 33161a2 | 2016-11-03 11:44:22 -0400 | [diff] [blame] | 491 | err = -ENOMEM; |
| 492 | goto err_free_pending_msg; |
| 493 | } |
| 494 | bus_info->visorchannel = visorchannel; |
David Kershner | fdf5b9a | 2017-08-22 13:27:24 -0400 | [diff] [blame] | 495 | /* Response will be handled by visorbus_create_instance on success */ |
| 496 | err = visorbus_create_instance(bus_info); |
David Kershner | 621f5e1 | 2017-03-28 09:34:35 -0400 | [diff] [blame] | 497 | if (err) |
| 498 | goto err_destroy_channel; |
David Kershner | 33161a2 | 2016-11-03 11:44:22 -0400 | [diff] [blame] | 499 | return 0; |
David Kershner | 8f334e3 | 2016-11-03 11:44:19 -0400 | [diff] [blame] | 500 | |
David Kershner | 621f5e1 | 2017-03-28 09:34:35 -0400 | [diff] [blame] | 501 | err_destroy_channel: |
| 502 | visorchannel_destroy(visorchannel); |
| 503 | |
David Kershner | 33161a2 | 2016-11-03 11:44:22 -0400 | [diff] [blame] | 504 | err_free_pending_msg: |
| 505 | kfree(bus_info->pending_msg_hdr); |
| 506 | |
| 507 | err_free_bus_info: |
David Kershner | 8f334e3 | 2016-11-03 11:44:19 -0400 | [diff] [blame] | 508 | kfree(bus_info); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 509 | |
David Kershner | 33161a2 | 2016-11-03 11:44:22 -0400 | [diff] [blame] | 510 | err_respond: |
David Kershner | 8f334e3 | 2016-11-03 11:44:19 -0400 | [diff] [blame] | 511 | if (inmsg->hdr.flags.response_expected == 1) |
David Kershner | 4fb2539 | 2017-03-28 09:34:39 -0400 | [diff] [blame] | 512 | controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err); |
David Kershner | 33161a2 | 2016-11-03 11:44:22 -0400 | [diff] [blame] | 513 | return err; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 514 | } |
| 515 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 516 | static int visorbus_destroy(struct controlvm_message *inmsg) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 517 | { |
David Kershner | ef7b9dc | 2017-09-27 13:14:37 -0400 | [diff] [blame] | 518 | struct controlvm_message_header *pmsg_hdr; |
David Kershner | 3f5a562 | 2017-09-27 13:14:07 -0400 | [diff] [blame] | 519 | u32 bus_no = inmsg->cmd.destroy_bus.bus_no; |
Don Zickus | d32517e | 2015-06-04 09:22:41 -0400 | [diff] [blame] | 520 | struct visor_device *bus_info; |
David Kershner | 30f6c3f | 2016-11-03 11:44:21 -0400 | [diff] [blame] | 521 | int err; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 522 | |
Don Zickus | d32517e | 2015-06-04 09:22:41 -0400 | [diff] [blame] | 523 | bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); |
David Kershner | 3e0e8db | 2016-11-03 11:44:20 -0400 | [diff] [blame] | 524 | if (!bus_info) { |
David Kershner | 30f6c3f | 2016-11-03 11:44:21 -0400 | [diff] [blame] | 525 | err = -ENODEV; |
| 526 | goto err_respond; |
David Kershner | 3e0e8db | 2016-11-03 11:44:20 -0400 | [diff] [blame] | 527 | } |
| 528 | if (bus_info->state.created == 0) { |
David Kershner | 30f6c3f | 2016-11-03 11:44:21 -0400 | [diff] [blame] | 529 | err = -ENOENT; |
| 530 | goto err_respond; |
David Kershner | 3e0e8db | 2016-11-03 11:44:20 -0400 | [diff] [blame] | 531 | } |
| 532 | if (bus_info->pending_msg_hdr) { |
| 533 | /* only non-NULL if dev is still waiting on a response */ |
David Kershner | 30f6c3f | 2016-11-03 11:44:21 -0400 | [diff] [blame] | 534 | err = -EEXIST; |
| 535 | goto err_respond; |
David Kershner | 3e0e8db | 2016-11-03 11:44:20 -0400 | [diff] [blame] | 536 | } |
| 537 | if (inmsg->hdr.flags.response_expected == 1) { |
| 538 | pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); |
| 539 | if (!pmsg_hdr) { |
David Kershner | 30f6c3f | 2016-11-03 11:44:21 -0400 | [diff] [blame] | 540 | err = -ENOMEM; |
| 541 | goto err_respond; |
David Kershner | 3e0e8db | 2016-11-03 11:44:20 -0400 | [diff] [blame] | 542 | } |
David Kershner | 3e0e8db | 2016-11-03 11:44:20 -0400 | [diff] [blame] | 543 | memcpy(pmsg_hdr, &inmsg->hdr, |
| 544 | sizeof(struct controlvm_message_header)); |
| 545 | bus_info->pending_msg_hdr = pmsg_hdr; |
| 546 | } |
Sameer Wadgaonkar | a7093ba | 2017-08-22 13:27:33 -0400 | [diff] [blame] | 547 | /* Response will be handled by visorbus_remove_instance */ |
| 548 | visorbus_remove_instance(bus_info); |
David Kershner | 30f6c3f | 2016-11-03 11:44:21 -0400 | [diff] [blame] | 549 | return 0; |
David Kershner | 3e0e8db | 2016-11-03 11:44:20 -0400 | [diff] [blame] | 550 | |
David Kershner | 30f6c3f | 2016-11-03 11:44:21 -0400 | [diff] [blame] | 551 | err_respond: |
David Kershner | 3e0e8db | 2016-11-03 11:44:20 -0400 | [diff] [blame] | 552 | if (inmsg->hdr.flags.response_expected == 1) |
David Kershner | 4fb2539 | 2017-03-28 09:34:39 -0400 | [diff] [blame] | 553 | controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err); |
David Kershner | 30f6c3f | 2016-11-03 11:44:21 -0400 | [diff] [blame] | 554 | return err; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 555 | } |
| 556 | |
David Kershner | 39b486d | 2017-08-30 13:36:27 -0400 | [diff] [blame] | 557 | static const guid_t *parser_id_get(struct parser_context *ctx) |
| 558 | { |
| 559 | return &ctx->data.id; |
| 560 | } |
| 561 | |
David Kershner | 90d1ecf | 2017-09-27 13:14:08 -0400 | [diff] [blame] | 562 | static void *parser_string_get(u8 *pscan, int nscan) |
David Kershner | 39b486d | 2017-08-30 13:36:27 -0400 | [diff] [blame] | 563 | { |
David Kershner | 39b486d | 2017-08-30 13:36:27 -0400 | [diff] [blame] | 564 | int value_length; |
| 565 | void *value; |
David Kershner | 39b486d | 2017-08-30 13:36:27 -0400 | [diff] [blame] | 566 | |
David Kershner | 39b486d | 2017-08-30 13:36:27 -0400 | [diff] [blame] | 567 | if (nscan == 0) |
| 568 | return NULL; |
| 569 | |
David Kershner | 90d1ecf | 2017-09-27 13:14:08 -0400 | [diff] [blame] | 570 | value_length = strnlen(pscan, nscan); |
| 571 | value = kzalloc(value_length + 1, GFP_KERNEL); |
David Kershner | 39b486d | 2017-08-30 13:36:27 -0400 | [diff] [blame] | 572 | if (!value) |
| 573 | return NULL; |
| 574 | if (value_length > 0) |
| 575 | memcpy(value, pscan, value_length); |
David Kershner | 39b486d | 2017-08-30 13:36:27 -0400 | [diff] [blame] | 576 | return value; |
| 577 | } |
| 578 | |
| 579 | static void *parser_name_get(struct parser_context *ctx) |
| 580 | { |
David Kershner | ef7b9dc | 2017-09-27 13:14:37 -0400 | [diff] [blame] | 581 | struct visor_controlvm_parameters_header *phdr; |
David Kershner | 39b486d | 2017-08-30 13:36:27 -0400 | [diff] [blame] | 582 | |
| 583 | phdr = &ctx->data; |
Tim Sell | a5eb218 | 2017-11-17 12:27:38 -0500 | [diff] [blame] | 584 | if ((unsigned long)phdr->name_offset + |
| 585 | (unsigned long)phdr->name_length > ctx->param_bytes) |
David Kershner | 39b486d | 2017-08-30 13:36:27 -0400 | [diff] [blame] | 586 | return NULL; |
David Kershner | 39b486d | 2017-08-30 13:36:27 -0400 | [diff] [blame] | 587 | ctx->curr = (char *)&phdr + phdr->name_offset; |
| 588 | ctx->bytes_remaining = phdr->name_length; |
David Kershner | 90d1ecf | 2017-09-27 13:14:08 -0400 | [diff] [blame] | 589 | return parser_string_get(ctx->curr, phdr->name_length); |
David Kershner | 39b486d | 2017-08-30 13:36:27 -0400 | [diff] [blame] | 590 | } |
| 591 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 592 | static int visorbus_configure(struct controlvm_message *inmsg, |
| 593 | struct parser_context *parser_ctx) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 594 | { |
Benjamin Romer | 2ea5117 | 2014-10-23 14:30:25 -0400 | [diff] [blame] | 595 | struct controlvm_message_packet *cmd = &inmsg->cmd; |
Jes Sorensen | e82ba62 | 2015-05-05 18:35:45 -0400 | [diff] [blame] | 596 | u32 bus_no; |
Don Zickus | d32517e | 2015-06-04 09:22:41 -0400 | [diff] [blame] | 597 | struct visor_device *bus_info; |
David Kershner | c71529f | 2016-11-21 12:15:53 -0500 | [diff] [blame] | 598 | int err = 0; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 599 | |
Benjamin Romer | 654bada | 2015-03-16 13:58:22 -0400 | [diff] [blame] | 600 | bus_no = cmd->configure_bus.bus_no; |
Don Zickus | d32517e | 2015-06-04 09:22:41 -0400 | [diff] [blame] | 601 | bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); |
Benjamin Romer | 654bada | 2015-03-16 13:58:22 -0400 | [diff] [blame] | 602 | if (!bus_info) { |
David Kershner | c71529f | 2016-11-21 12:15:53 -0500 | [diff] [blame] | 603 | err = -EINVAL; |
| 604 | goto err_respond; |
David Kershner | af53ce4 | 2017-08-30 13:36:23 -0400 | [diff] [blame] | 605 | } |
| 606 | if (bus_info->state.created == 0) { |
David Kershner | c71529f | 2016-11-21 12:15:53 -0500 | [diff] [blame] | 607 | err = -EINVAL; |
| 608 | goto err_respond; |
David Kershner | af53ce4 | 2017-08-30 13:36:23 -0400 | [diff] [blame] | 609 | } |
| 610 | if (bus_info->pending_msg_hdr) { |
David Kershner | c71529f | 2016-11-21 12:15:53 -0500 | [diff] [blame] | 611 | err = -EIO; |
| 612 | goto err_respond; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 613 | } |
David Kershner | 34fbf6a | 2017-09-27 13:14:11 -0400 | [diff] [blame] | 614 | err = visorchannel_set_clientpartition(bus_info->visorchannel, |
| 615 | cmd->configure_bus.guest_handle); |
David Kershner | c71529f | 2016-11-21 12:15:53 -0500 | [diff] [blame] | 616 | if (err) |
| 617 | goto err_respond; |
David Kershner | 046f93d | 2017-01-09 13:02:23 -0500 | [diff] [blame] | 618 | if (parser_ctx) { |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 619 | const guid_t *partition_guid = parser_id_get(parser_ctx); |
| 620 | |
| 621 | guid_copy(&bus_info->partition_guid, partition_guid); |
David Kershner | 046f93d | 2017-01-09 13:02:23 -0500 | [diff] [blame] | 622 | bus_info->name = parser_name_get(parser_ctx); |
| 623 | } |
David Kershner | b6b057d | 2016-11-03 11:44:23 -0400 | [diff] [blame] | 624 | if (inmsg->hdr.flags.response_expected == 1) |
David Kershner | 4fb2539 | 2017-03-28 09:34:39 -0400 | [diff] [blame] | 625 | controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err); |
David Kershner | c71529f | 2016-11-21 12:15:53 -0500 | [diff] [blame] | 626 | return 0; |
| 627 | |
| 628 | err_respond: |
David Kershner | 71a0265d | 2017-04-18 16:55:18 -0400 | [diff] [blame] | 629 | dev_err(&chipset_dev->acpi_device->dev, |
David Kershner | 9a8dc90 | 2017-08-30 13:36:16 -0400 | [diff] [blame] | 630 | "%s exited with err: %d\n", __func__, err); |
David Kershner | c71529f | 2016-11-21 12:15:53 -0500 | [diff] [blame] | 631 | if (inmsg->hdr.flags.response_expected == 1) |
David Kershner | 4fb2539 | 2017-03-28 09:34:39 -0400 | [diff] [blame] | 632 | controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err); |
David Kershner | c71529f | 2016-11-21 12:15:53 -0500 | [diff] [blame] | 633 | return err; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 634 | } |
| 635 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 636 | static int visorbus_device_create(struct controlvm_message *inmsg) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 637 | { |
Benjamin Romer | 2ea5117 | 2014-10-23 14:30:25 -0400 | [diff] [blame] | 638 | struct controlvm_message_packet *cmd = &inmsg->cmd; |
David Kershner | ef7b9dc | 2017-09-27 13:14:37 -0400 | [diff] [blame] | 639 | struct controlvm_message_header *pmsg_hdr; |
Jes Sorensen | 52063ec | 2015-04-13 10:28:41 -0400 | [diff] [blame] | 640 | u32 bus_no = cmd->create_device.bus_no; |
| 641 | u32 dev_no = cmd->create_device.dev_no; |
David Kershner | ef7b9dc | 2017-09-27 13:14:37 -0400 | [diff] [blame] | 642 | struct visor_device *dev_info; |
Don Zickus | d32517e | 2015-06-04 09:22:41 -0400 | [diff] [blame] | 643 | struct visor_device *bus_info; |
Don Zickus | b32c499 | 2015-06-01 13:00:26 -0400 | [diff] [blame] | 644 | struct visorchannel *visorchannel; |
David Kershner | ad2a7d65 | 2016-12-22 11:09:04 -0500 | [diff] [blame] | 645 | int err; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 646 | |
Don Zickus | a298bc0 | 2015-06-04 09:22:42 -0400 | [diff] [blame] | 647 | bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL); |
Benjamin Romer | c60c8e2 | 2015-03-16 13:58:23 -0400 | [diff] [blame] | 648 | if (!bus_info) { |
David Kershner | a8c26e4 | 2017-04-18 16:55:19 -0400 | [diff] [blame] | 649 | dev_err(&chipset_dev->acpi_device->dev, |
| 650 | "failed to get bus by id: %d\n", bus_no); |
David Kershner | ad2a7d65 | 2016-12-22 11:09:04 -0500 | [diff] [blame] | 651 | err = -ENODEV; |
| 652 | goto err_respond; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 653 | } |
Benjamin Romer | c60c8e2 | 2015-03-16 13:58:23 -0400 | [diff] [blame] | 654 | if (bus_info->state.created == 0) { |
David Kershner | a8c26e4 | 2017-04-18 16:55:19 -0400 | [diff] [blame] | 655 | dev_err(&chipset_dev->acpi_device->dev, |
| 656 | "bus not created, id: %d\n", bus_no); |
David Kershner | ad2a7d65 | 2016-12-22 11:09:04 -0500 | [diff] [blame] | 657 | err = -EINVAL; |
| 658 | goto err_respond; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 659 | } |
Don Zickus | a298bc0 | 2015-06-04 09:22:42 -0400 | [diff] [blame] | 660 | dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL); |
David Kershner | 614b083 | 2017-09-27 13:14:48 -0400 | [diff] [blame] | 661 | if (dev_info && dev_info->state.created == 1) { |
David Kershner | a8c26e4 | 2017-04-18 16:55:19 -0400 | [diff] [blame] | 662 | dev_err(&chipset_dev->acpi_device->dev, |
| 663 | "failed to get bus by id: %d/%d\n", bus_no, dev_no); |
David Kershner | ad2a7d65 | 2016-12-22 11:09:04 -0500 | [diff] [blame] | 664 | err = -EEXIST; |
| 665 | goto err_respond; |
Don Zickus | a298bc0 | 2015-06-04 09:22:42 -0400 | [diff] [blame] | 666 | } |
| 667 | |
Benjamin Romer | c60c8e2 | 2015-03-16 13:58:23 -0400 | [diff] [blame] | 668 | dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); |
| 669 | if (!dev_info) { |
David Kershner | ad2a7d65 | 2016-12-22 11:09:04 -0500 | [diff] [blame] | 670 | err = -ENOMEM; |
| 671 | goto err_respond; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 672 | } |
Don Zickus | a298bc0 | 2015-06-04 09:22:42 -0400 | [diff] [blame] | 673 | dev_info->chipset_bus_no = bus_no; |
| 674 | dev_info->chipset_dev_no = dev_no; |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 675 | guid_copy(&dev_info->inst, &cmd->create_device.dev_inst_guid); |
Don Zickus | a298bc0 | 2015-06-04 09:22:42 -0400 | [diff] [blame] | 676 | dev_info->device.parent = &bus_info->device; |
Sameer Wadgaonkar | 9047667 | 2017-09-27 13:14:44 -0400 | [diff] [blame] | 677 | visorchannel = visorchannel_create(cmd->create_device.channel_addr, |
| 678 | GFP_KERNEL, |
| 679 | &cmd->create_device.data_type_guid, |
| 680 | true); |
Don Zickus | b32c499 | 2015-06-01 13:00:26 -0400 | [diff] [blame] | 681 | if (!visorchannel) { |
David Kershner | a8c26e4 | 2017-04-18 16:55:19 -0400 | [diff] [blame] | 682 | dev_err(&chipset_dev->acpi_device->dev, |
| 683 | "failed to create visorchannel: %d/%d\n", |
| 684 | bus_no, dev_no); |
David Kershner | ad2a7d65 | 2016-12-22 11:09:04 -0500 | [diff] [blame] | 685 | err = -ENOMEM; |
| 686 | goto err_free_dev_info; |
Don Zickus | b32c499 | 2015-06-01 13:00:26 -0400 | [diff] [blame] | 687 | } |
| 688 | dev_info->visorchannel = visorchannel; |
Sameer Wadgaonkar | fe9f4b5 | 2017-09-27 13:14:45 -0400 | [diff] [blame] | 689 | guid_copy(&dev_info->channel_type_guid, |
| 690 | &cmd->create_device.data_type_guid); |
| 691 | if (guid_equal(&cmd->create_device.data_type_guid, |
| 692 | &visor_vhba_channel_guid)) { |
David Kershner | ad2a7d65 | 2016-12-22 11:09:04 -0500 | [diff] [blame] | 693 | err = save_crash_message(inmsg, CRASH_DEV); |
| 694 | if (err) |
David Kershner | 3f49a21 | 2017-03-28 09:34:38 -0400 | [diff] [blame] | 695 | goto err_destroy_visorchannel; |
David Kershner | ad2a7d65 | 2016-12-22 11:09:04 -0500 | [diff] [blame] | 696 | } |
David Kershner | 5a80e98 | 2016-11-03 11:44:24 -0400 | [diff] [blame] | 697 | if (inmsg->hdr.flags.response_expected == 1) { |
| 698 | pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); |
| 699 | if (!pmsg_hdr) { |
David Kershner | ad2a7d65 | 2016-12-22 11:09:04 -0500 | [diff] [blame] | 700 | err = -ENOMEM; |
David Kershner | 3f49a21 | 2017-03-28 09:34:38 -0400 | [diff] [blame] | 701 | goto err_destroy_visorchannel; |
David Kershner | 5a80e98 | 2016-11-03 11:44:24 -0400 | [diff] [blame] | 702 | } |
David Kershner | 5a80e98 | 2016-11-03 11:44:24 -0400 | [diff] [blame] | 703 | memcpy(pmsg_hdr, &inmsg->hdr, |
| 704 | sizeof(struct controlvm_message_header)); |
| 705 | dev_info->pending_msg_hdr = pmsg_hdr; |
| 706 | } |
Sameer Wadgaonkar | 51c0f81 | 2017-08-22 13:27:34 -0400 | [diff] [blame] | 707 | /* create_visor_device will send response */ |
| 708 | err = create_visor_device(dev_info); |
David Kershner | 3f49a21 | 2017-03-28 09:34:38 -0400 | [diff] [blame] | 709 | if (err) |
| 710 | goto err_destroy_visorchannel; |
| 711 | |
David Kershner | ad2a7d65 | 2016-12-22 11:09:04 -0500 | [diff] [blame] | 712 | return 0; |
David Kershner | 5a80e98 | 2016-11-03 11:44:24 -0400 | [diff] [blame] | 713 | |
David Kershner | 3f49a21 | 2017-03-28 09:34:38 -0400 | [diff] [blame] | 714 | err_destroy_visorchannel: |
| 715 | visorchannel_destroy(visorchannel); |
| 716 | |
David Kershner | ad2a7d65 | 2016-12-22 11:09:04 -0500 | [diff] [blame] | 717 | err_free_dev_info: |
David Kershner | 5a80e98 | 2016-11-03 11:44:24 -0400 | [diff] [blame] | 718 | kfree(dev_info); |
| 719 | |
David Kershner | ad2a7d65 | 2016-12-22 11:09:04 -0500 | [diff] [blame] | 720 | err_respond: |
David Kershner | 5a80e98 | 2016-11-03 11:44:24 -0400 | [diff] [blame] | 721 | if (inmsg->hdr.flags.response_expected == 1) |
David Kershner | 4fb2539 | 2017-03-28 09:34:39 -0400 | [diff] [blame] | 722 | controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err); |
David Kershner | ad2a7d65 | 2016-12-22 11:09:04 -0500 | [diff] [blame] | 723 | return err; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 724 | } |
| 725 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 726 | static int visorbus_device_changestate(struct controlvm_message *inmsg) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 727 | { |
Benjamin Romer | 2ea5117 | 2014-10-23 14:30:25 -0400 | [diff] [blame] | 728 | struct controlvm_message_packet *cmd = &inmsg->cmd; |
David Kershner | ef7b9dc | 2017-09-27 13:14:37 -0400 | [diff] [blame] | 729 | struct controlvm_message_header *pmsg_hdr; |
Jes Sorensen | 52063ec | 2015-04-13 10:28:41 -0400 | [diff] [blame] | 730 | u32 bus_no = cmd->device_change_state.bus_no; |
| 731 | u32 dev_no = cmd->device_change_state.dev_no; |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 732 | struct visor_segment_state state = cmd->device_change_state.state; |
Don Zickus | a298bc0 | 2015-06-04 09:22:42 -0400 | [diff] [blame] | 733 | struct visor_device *dev_info; |
David Kershner | b4a8e6a | 2017-03-28 09:34:54 -0400 | [diff] [blame] | 734 | int err = 0; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 735 | |
Don Zickus | a298bc0 | 2015-06-04 09:22:42 -0400 | [diff] [blame] | 736 | dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL); |
Benjamin Romer | 0278a90 | 2015-03-16 13:58:24 -0400 | [diff] [blame] | 737 | if (!dev_info) { |
David Kershner | 40fc79f | 2016-12-22 11:09:06 -0500 | [diff] [blame] | 738 | err = -ENODEV; |
David Kershner | 0825f19 | 2016-11-03 11:44:25 -0400 | [diff] [blame] | 739 | goto err_respond; |
| 740 | } |
| 741 | if (dev_info->state.created == 0) { |
David Kershner | 40fc79f | 2016-12-22 11:09:06 -0500 | [diff] [blame] | 742 | err = -EINVAL; |
David Kershner | 0825f19 | 2016-11-03 11:44:25 -0400 | [diff] [blame] | 743 | goto err_respond; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 744 | } |
David Kershner | 8e609b5 | 2016-11-03 11:44:26 -0400 | [diff] [blame] | 745 | if (dev_info->pending_msg_hdr) { |
| 746 | /* only non-NULL if dev is still waiting on a response */ |
David Kershner | 40fc79f | 2016-12-22 11:09:06 -0500 | [diff] [blame] | 747 | err = -EIO; |
David Kershner | 8e609b5 | 2016-11-03 11:44:26 -0400 | [diff] [blame] | 748 | goto err_respond; |
| 749 | } |
David Kershner | 9116ae7 | 2017-09-27 13:14:26 -0400 | [diff] [blame] | 750 | |
David Kershner | 8e609b5 | 2016-11-03 11:44:26 -0400 | [diff] [blame] | 751 | if (inmsg->hdr.flags.response_expected == 1) { |
| 752 | pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); |
| 753 | if (!pmsg_hdr) { |
David Kershner | 40fc79f | 2016-12-22 11:09:06 -0500 | [diff] [blame] | 754 | err = -ENOMEM; |
David Kershner | 8e609b5 | 2016-11-03 11:44:26 -0400 | [diff] [blame] | 755 | goto err_respond; |
| 756 | } |
David Kershner | 8e609b5 | 2016-11-03 11:44:26 -0400 | [diff] [blame] | 757 | memcpy(pmsg_hdr, &inmsg->hdr, |
| 758 | sizeof(struct controlvm_message_header)); |
| 759 | dev_info->pending_msg_hdr = pmsg_hdr; |
| 760 | } |
David Kershner | 8e609b5 | 2016-11-03 11:44:26 -0400 | [diff] [blame] | 761 | if (state.alive == segment_state_running.alive && |
| 762 | state.operating == segment_state_running.operating) |
Sameer Wadgaonkar | c0b4413 | 2017-05-19 16:17:41 -0400 | [diff] [blame] | 763 | /* Response will be sent from visorchipset_device_resume */ |
| 764 | err = visorchipset_device_resume(dev_info); |
David Kershner | 8e609b5 | 2016-11-03 11:44:26 -0400 | [diff] [blame] | 765 | /* ServerNotReady / ServerLost / SegmentStateStandby */ |
| 766 | else if (state.alive == segment_state_standby.alive && |
| 767 | state.operating == segment_state_standby.operating) |
| 768 | /* |
| 769 | * technically this is standby case where server is lost. |
Sameer Wadgaonkar | c0b4413 | 2017-05-19 16:17:41 -0400 | [diff] [blame] | 770 | * Response will be sent from visorchipset_device_pause. |
David Kershner | 8e609b5 | 2016-11-03 11:44:26 -0400 | [diff] [blame] | 771 | */ |
Sameer Wadgaonkar | c0b4413 | 2017-05-19 16:17:41 -0400 | [diff] [blame] | 772 | err = visorchipset_device_pause(dev_info); |
David Kershner | b4a8e6a | 2017-03-28 09:34:54 -0400 | [diff] [blame] | 773 | if (err) |
| 774 | goto err_respond; |
David Kershner | 40fc79f | 2016-12-22 11:09:06 -0500 | [diff] [blame] | 775 | return 0; |
David Kershner | 0825f19 | 2016-11-03 11:44:25 -0400 | [diff] [blame] | 776 | |
| 777 | err_respond: |
David Kershner | 03662df | 2017-04-18 16:55:20 -0400 | [diff] [blame] | 778 | dev_err(&chipset_dev->acpi_device->dev, "failed: %d\n", err); |
David Kershner | 8e609b5 | 2016-11-03 11:44:26 -0400 | [diff] [blame] | 779 | if (inmsg->hdr.flags.response_expected == 1) |
David Kershner | 4fb2539 | 2017-03-28 09:34:39 -0400 | [diff] [blame] | 780 | controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err); |
David Kershner | 40fc79f | 2016-12-22 11:09:06 -0500 | [diff] [blame] | 781 | return err; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 782 | } |
| 783 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 784 | static int visorbus_device_destroy(struct controlvm_message *inmsg) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 785 | { |
Benjamin Romer | 2ea5117 | 2014-10-23 14:30:25 -0400 | [diff] [blame] | 786 | struct controlvm_message_packet *cmd = &inmsg->cmd; |
David Kershner | ef7b9dc | 2017-09-27 13:14:37 -0400 | [diff] [blame] | 787 | struct controlvm_message_header *pmsg_hdr; |
Jes Sorensen | 52063ec | 2015-04-13 10:28:41 -0400 | [diff] [blame] | 788 | u32 bus_no = cmd->destroy_device.bus_no; |
| 789 | u32 dev_no = cmd->destroy_device.dev_no; |
Don Zickus | a298bc0 | 2015-06-04 09:22:42 -0400 | [diff] [blame] | 790 | struct visor_device *dev_info; |
David Kershner | e795491 | 2016-12-22 11:09:07 -0500 | [diff] [blame] | 791 | int err; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 792 | |
Don Zickus | a298bc0 | 2015-06-04 09:22:42 -0400 | [diff] [blame] | 793 | dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL); |
David Kershner | 9e9eec6 | 2016-11-03 11:44:27 -0400 | [diff] [blame] | 794 | if (!dev_info) { |
David Kershner | e795491 | 2016-12-22 11:09:07 -0500 | [diff] [blame] | 795 | err = -ENODEV; |
David Kershner | 9e9eec6 | 2016-11-03 11:44:27 -0400 | [diff] [blame] | 796 | goto err_respond; |
| 797 | } |
| 798 | if (dev_info->state.created == 0) { |
David Kershner | e795491 | 2016-12-22 11:09:07 -0500 | [diff] [blame] | 799 | err = -EINVAL; |
David Kershner | 9e9eec6 | 2016-11-03 11:44:27 -0400 | [diff] [blame] | 800 | goto err_respond; |
| 801 | } |
David Kershner | 9e9eec6 | 2016-11-03 11:44:27 -0400 | [diff] [blame] | 802 | if (dev_info->pending_msg_hdr) { |
| 803 | /* only non-NULL if dev is still waiting on a response */ |
David Kershner | e795491 | 2016-12-22 11:09:07 -0500 | [diff] [blame] | 804 | err = -EIO; |
David Kershner | 9e9eec6 | 2016-11-03 11:44:27 -0400 | [diff] [blame] | 805 | goto err_respond; |
| 806 | } |
| 807 | if (inmsg->hdr.flags.response_expected == 1) { |
| 808 | pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL); |
| 809 | if (!pmsg_hdr) { |
David Kershner | e795491 | 2016-12-22 11:09:07 -0500 | [diff] [blame] | 810 | err = -ENOMEM; |
David Kershner | 9e9eec6 | 2016-11-03 11:44:27 -0400 | [diff] [blame] | 811 | goto err_respond; |
| 812 | } |
| 813 | |
| 814 | memcpy(pmsg_hdr, &inmsg->hdr, |
| 815 | sizeof(struct controlvm_message_header)); |
| 816 | dev_info->pending_msg_hdr = pmsg_hdr; |
| 817 | } |
Zachary Dremann | 661a215 | 2017-07-17 16:17:03 -0400 | [diff] [blame] | 818 | kfree(dev_info->name); |
Sameer Wadgaonkar | b74856b | 2017-08-22 13:27:35 -0400 | [diff] [blame] | 819 | remove_visor_device(dev_info); |
David Kershner | e795491 | 2016-12-22 11:09:07 -0500 | [diff] [blame] | 820 | return 0; |
David Kershner | 9e9eec6 | 2016-11-03 11:44:27 -0400 | [diff] [blame] | 821 | |
| 822 | err_respond: |
| 823 | if (inmsg->hdr.flags.response_expected == 1) |
David Kershner | 4fb2539 | 2017-03-28 09:34:39 -0400 | [diff] [blame] | 824 | controlvm_responder(inmsg->hdr.id, &inmsg->hdr, err); |
David Kershner | e795491 | 2016-12-22 11:09:07 -0500 | [diff] [blame] | 825 | return err; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 826 | } |
| 827 | |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 828 | /* |
David Binder | 5d501ef | 2016-12-22 11:09:02 -0500 | [diff] [blame] | 829 | * The general parahotplug flow works as follows. The visorchipset receives |
| 830 | * a DEVICE_CHANGESTATE message from Command specifying a physical device |
| 831 | * to enable or disable. The CONTROLVM message handler calls |
| 832 | * parahotplug_process_message, which then adds the message to a global list |
| 833 | * and kicks off a udev event which causes a user level script to enable or |
| 834 | * disable the specified device. The udev script then writes to |
| 835 | * /sys/devices/platform/visorchipset/parahotplug, which causes the |
| 836 | * parahotplug store functions to get called, at which point the |
David Binder | 904ee62 | 2017-07-17 16:16:42 -0400 | [diff] [blame] | 837 | * appropriate CONTROLVM message is retrieved from the list and responded to. |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 838 | */ |
| 839 | |
| 840 | #define PARAHOTPLUG_TIMEOUT_MS 2000 |
| 841 | |
David Binder | 04dbfea | 2017-02-21 12:53:25 -0500 | [diff] [blame] | 842 | /* |
David Binder | 5d501ef | 2016-12-22 11:09:02 -0500 | [diff] [blame] | 843 | * parahotplug_next_id() - generate unique int to match an outstanding |
| 844 | * CONTROLVM message with a udev script /sys |
| 845 | * response |
David Binder | ec17f45 | 2016-06-10 21:48:18 -0400 | [diff] [blame] | 846 | * |
| 847 | * Return: a unique integer value |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 848 | */ |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 849 | static int parahotplug_next_id(void) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 850 | { |
| 851 | static atomic_t id = ATOMIC_INIT(0); |
Benjamin Romer | 26eb2c0 | 2014-08-18 09:34:53 -0400 | [diff] [blame] | 852 | |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 853 | return atomic_inc_return(&id); |
| 854 | } |
| 855 | |
David Binder | 04dbfea | 2017-02-21 12:53:25 -0500 | [diff] [blame] | 856 | /* |
David Binder | ec17f45 | 2016-06-10 21:48:18 -0400 | [diff] [blame] | 857 | * parahotplug_next_expiration() - returns the time (in jiffies) when a |
| 858 | * CONTROLVM message on the list should expire |
| 859 | * -- PARAHOTPLUG_TIMEOUT_MS in the future |
| 860 | * |
| 861 | * Return: expected expiration time (in jiffies) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 862 | */ |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 863 | static unsigned long parahotplug_next_expiration(void) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 864 | { |
Nicholas Mc Guire | 2cc1a1b | 2015-01-31 12:02:08 +0100 | [diff] [blame] | 865 | return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 866 | } |
| 867 | |
David Binder | 04dbfea | 2017-02-21 12:53:25 -0500 | [diff] [blame] | 868 | /* |
David Binder | ec17f45 | 2016-06-10 21:48:18 -0400 | [diff] [blame] | 869 | * parahotplug_request_create() - create a parahotplug_request, which is |
| 870 | * basically a wrapper for a CONTROLVM_MESSAGE |
| 871 | * that we can stick on a list |
| 872 | * @msg: the message to insert in the request |
| 873 | * |
| 874 | * Return: the request containing the provided message |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 875 | */ |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 876 | static struct parahotplug_request *parahotplug_request_create( |
| 877 | struct controlvm_message *msg) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 878 | { |
Quentin Lambert | ea0dcfc | 2015-02-10 15:12:07 +0100 | [diff] [blame] | 879 | struct parahotplug_request *req; |
| 880 | |
David Kershner | 8c8c975 | 2017-04-18 16:55:08 -0400 | [diff] [blame] | 881 | req = kmalloc(sizeof(*req), GFP_KERNEL); |
Benjamin Romer | 38f736e | 2015-03-16 13:58:13 -0400 | [diff] [blame] | 882 | if (!req) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 883 | return NULL; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 884 | req->id = parahotplug_next_id(); |
| 885 | req->expiration = parahotplug_next_expiration(); |
| 886 | req->msg = *msg; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 887 | return req; |
| 888 | } |
| 889 | |
David Binder | 04dbfea | 2017-02-21 12:53:25 -0500 | [diff] [blame] | 890 | /* |
David Binder | ec17f45 | 2016-06-10 21:48:18 -0400 | [diff] [blame] | 891 | * parahotplug_request_destroy() - free a parahotplug_request |
| 892 | * @req: the request to deallocate |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 893 | */ |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 894 | static void parahotplug_request_destroy(struct parahotplug_request *req) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 895 | { |
| 896 | kfree(req); |
| 897 | } |
| 898 | |
David Kershner | 5131966 | 2016-09-19 17:09:37 -0400 | [diff] [blame] | 899 | static LIST_HEAD(parahotplug_request_list); |
Sameer Wadgaonkar | ac0aba6 | 2017-06-30 15:43:08 -0400 | [diff] [blame] | 900 | /* lock for above */ |
| 901 | static DEFINE_SPINLOCK(parahotplug_request_list_lock); |
David Kershner | 5131966 | 2016-09-19 17:09:37 -0400 | [diff] [blame] | 902 | |
David Binder | 04dbfea | 2017-02-21 12:53:25 -0500 | [diff] [blame] | 903 | /* |
David Binder | ec17f45 | 2016-06-10 21:48:18 -0400 | [diff] [blame] | 904 | * parahotplug_request_complete() - mark request as complete |
| 905 | * @id: the id of the request |
| 906 | * @active: indicates whether the request is assigned to active partition |
| 907 | * |
David Binder | 5d501ef | 2016-12-22 11:09:02 -0500 | [diff] [blame] | 908 | * Called from the /sys handler, which means the user script has |
David Binder | ec17f45 | 2016-06-10 21:48:18 -0400 | [diff] [blame] | 909 | * finished the enable/disable. Find the matching identifier, and |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 910 | * respond to the CONTROLVM message with success. |
David Binder | ec17f45 | 2016-06-10 21:48:18 -0400 | [diff] [blame] | 911 | * |
| 912 | * Return: 0 on success or -EINVAL on failure |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 913 | */ |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 914 | static int parahotplug_request_complete(int id, u16 active) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 915 | { |
Jes Sorensen | e82ba62 | 2015-05-05 18:35:45 -0400 | [diff] [blame] | 916 | struct list_head *pos; |
| 917 | struct list_head *tmp; |
David Kershner | 040b78f | 2017-09-27 13:14:10 -0400 | [diff] [blame] | 918 | struct parahotplug_request *req; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 919 | |
Benjamin Romer | ddf5de5 | 2015-03-16 13:58:41 -0400 | [diff] [blame] | 920 | spin_lock(¶hotplug_request_list_lock); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 921 | /* Look for a request matching "id". */ |
Benjamin Romer | ddf5de5 | 2015-03-16 13:58:41 -0400 | [diff] [blame] | 922 | list_for_each_safe(pos, tmp, ¶hotplug_request_list) { |
David Kershner | 040b78f | 2017-09-27 13:14:10 -0400 | [diff] [blame] | 923 | req = list_entry(pos, struct parahotplug_request, list); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 924 | if (req->id == id) { |
David Binder | ec17f45 | 2016-06-10 21:48:18 -0400 | [diff] [blame] | 925 | /* |
| 926 | * Found a match. Remove it from the list and |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 927 | * respond. |
| 928 | */ |
| 929 | list_del(pos); |
Benjamin Romer | ddf5de5 | 2015-03-16 13:58:41 -0400 | [diff] [blame] | 930 | spin_unlock(¶hotplug_request_list_lock); |
Benjamin Romer | 2ea5117 | 2014-10-23 14:30:25 -0400 | [diff] [blame] | 931 | req->msg.cmd.device_change_state.state.active = active; |
Benjamin Romer | 98d7b59 | 2014-10-23 14:30:26 -0400 | [diff] [blame] | 932 | if (req->msg.hdr.flags.response_expected) |
David Kershner | 4c0e65f | 2017-04-18 16:55:01 -0400 | [diff] [blame] | 933 | controlvm_respond( |
| 934 | &req->msg.hdr, CONTROLVM_RESP_SUCCESS, |
| 935 | &req->msg.cmd.device_change_state.state); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 936 | parahotplug_request_destroy(req); |
| 937 | return 0; |
| 938 | } |
| 939 | } |
Benjamin Romer | ddf5de5 | 2015-03-16 13:58:41 -0400 | [diff] [blame] | 940 | spin_unlock(¶hotplug_request_list_lock); |
Erik Arfvidson | 119296e | 2016-05-13 23:17:20 -0400 | [diff] [blame] | 941 | return -EINVAL; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 942 | } |
| 943 | |
David Binder | 04dbfea | 2017-02-21 12:53:25 -0500 | [diff] [blame] | 944 | /* |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 945 | * devicedisabled_store() - disables the hotplug device |
| 946 | * @dev: sysfs interface variable not utilized in this function |
| 947 | * @attr: sysfs interface variable not utilized in this function |
| 948 | * @buf: buffer containing the device id |
| 949 | * @count: the size of the buffer |
| 950 | * |
| 951 | * The parahotplug/devicedisabled interface gets called by our support script |
| 952 | * when an SR-IOV device has been shut down. The ID is passed to the script |
| 953 | * and then passed back when the device has been removed. |
| 954 | * |
| 955 | * Return: the size of the buffer for success or negative for error |
| 956 | */ |
| 957 | static ssize_t devicedisabled_store(struct device *dev, |
| 958 | struct device_attribute *attr, |
| 959 | const char *buf, size_t count) |
| 960 | { |
| 961 | unsigned int id; |
| 962 | int err; |
| 963 | |
| 964 | if (kstrtouint(buf, 10, &id)) |
| 965 | return -EINVAL; |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 966 | err = parahotplug_request_complete(id, 0); |
| 967 | if (err < 0) |
| 968 | return err; |
| 969 | return count; |
| 970 | } |
| 971 | static DEVICE_ATTR_WO(devicedisabled); |
| 972 | |
David Binder | 04dbfea | 2017-02-21 12:53:25 -0500 | [diff] [blame] | 973 | /* |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 974 | * deviceenabled_store() - enables the hotplug device |
| 975 | * @dev: sysfs interface variable not utilized in this function |
| 976 | * @attr: sysfs interface variable not utilized in this function |
| 977 | * @buf: buffer containing the device id |
| 978 | * @count: the size of the buffer |
| 979 | * |
| 980 | * The parahotplug/deviceenabled interface gets called by our support script |
| 981 | * when an SR-IOV device has been recovered. The ID is passed to the script |
| 982 | * and then passed back when the device has been brought back up. |
| 983 | * |
| 984 | * Return: the size of the buffer for success or negative for error |
| 985 | */ |
| 986 | static ssize_t deviceenabled_store(struct device *dev, |
| 987 | struct device_attribute *attr, |
| 988 | const char *buf, size_t count) |
| 989 | { |
| 990 | unsigned int id; |
| 991 | |
| 992 | if (kstrtouint(buf, 10, &id)) |
| 993 | return -EINVAL; |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 994 | parahotplug_request_complete(id, 1); |
| 995 | return count; |
| 996 | } |
| 997 | static DEVICE_ATTR_WO(deviceenabled); |
| 998 | |
| 999 | static struct attribute *visorchipset_install_attrs[] = { |
| 1000 | &dev_attr_toolaction.attr, |
| 1001 | &dev_attr_boottotool.attr, |
| 1002 | &dev_attr_error.attr, |
| 1003 | &dev_attr_textid.attr, |
| 1004 | &dev_attr_remaining_steps.attr, |
| 1005 | NULL |
| 1006 | }; |
| 1007 | |
Mihaela Muraru | a2d1e42 | 2016-10-16 01:41:14 +0300 | [diff] [blame] | 1008 | static const struct attribute_group visorchipset_install_group = { |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1009 | .name = "install", |
| 1010 | .attrs = visorchipset_install_attrs |
| 1011 | }; |
| 1012 | |
| 1013 | static struct attribute *visorchipset_parahotplug_attrs[] = { |
| 1014 | &dev_attr_devicedisabled.attr, |
| 1015 | &dev_attr_deviceenabled.attr, |
| 1016 | NULL |
| 1017 | }; |
| 1018 | |
Arvind Yadav | 1722270 | 2017-07-18 13:40:54 +0530 | [diff] [blame] | 1019 | static const struct attribute_group visorchipset_parahotplug_group = { |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1020 | .name = "parahotplug", |
| 1021 | .attrs = visorchipset_parahotplug_attrs |
| 1022 | }; |
| 1023 | |
| 1024 | static const struct attribute_group *visorchipset_dev_groups[] = { |
| 1025 | &visorchipset_install_group, |
| 1026 | &visorchipset_parahotplug_group, |
| 1027 | NULL |
| 1028 | }; |
| 1029 | |
David Binder | 04dbfea | 2017-02-21 12:53:25 -0500 | [diff] [blame] | 1030 | /* |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1031 | * parahotplug_request_kickoff() - initiate parahotplug request |
| 1032 | * @req: the request to initiate |
| 1033 | * |
| 1034 | * Cause uevent to run the user level script to do the disable/enable specified |
| 1035 | * in the parahotplug_request. |
| 1036 | */ |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 1037 | static int parahotplug_request_kickoff(struct parahotplug_request *req) |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1038 | { |
| 1039 | struct controlvm_message_packet *cmd = &req->msg.cmd; |
| 1040 | char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40], |
David Kershner | da56cb0 | 2017-09-27 13:14:39 -0400 | [diff] [blame] | 1041 | env_func[40]; |
| 1042 | char *envp[] = { env_cmd, env_id, env_state, env_bus, env_dev, |
| 1043 | env_func, NULL |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1044 | }; |
| 1045 | |
Sameer Wadgaonkar | c5a2890 | 2017-05-19 16:17:48 -0400 | [diff] [blame] | 1046 | sprintf(env_cmd, "VISOR_PARAHOTPLUG=1"); |
| 1047 | sprintf(env_id, "VISOR_PARAHOTPLUG_ID=%d", req->id); |
| 1048 | sprintf(env_state, "VISOR_PARAHOTPLUG_STATE=%d", |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1049 | cmd->device_change_state.state.active); |
Sameer Wadgaonkar | c5a2890 | 2017-05-19 16:17:48 -0400 | [diff] [blame] | 1050 | sprintf(env_bus, "VISOR_PARAHOTPLUG_BUS=%d", |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1051 | cmd->device_change_state.bus_no); |
Sameer Wadgaonkar | c5a2890 | 2017-05-19 16:17:48 -0400 | [diff] [blame] | 1052 | sprintf(env_dev, "VISOR_PARAHOTPLUG_DEVICE=%d", |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1053 | cmd->device_change_state.dev_no >> 3); |
Sameer Wadgaonkar | c5a2890 | 2017-05-19 16:17:48 -0400 | [diff] [blame] | 1054 | sprintf(env_func, "VISOR_PARAHOTPLUG_FUNCTION=%d", |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1055 | cmd->device_change_state.dev_no & 0x7); |
David Kershner | ae0fa82 | 2017-03-28 09:34:21 -0400 | [diff] [blame] | 1056 | return kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj, |
| 1057 | KOBJ_CHANGE, envp); |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1058 | } |
| 1059 | |
David Binder | 04dbfea | 2017-02-21 12:53:25 -0500 | [diff] [blame] | 1060 | /* |
David Binder | ec17f45 | 2016-06-10 21:48:18 -0400 | [diff] [blame] | 1061 | * parahotplug_process_message() - enables or disables a PCI device by kicking |
| 1062 | * off a udev script |
| 1063 | * @inmsg: the message indicating whether to enable or disable |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1064 | */ |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 1065 | static int parahotplug_process_message(struct controlvm_message *inmsg) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1066 | { |
| 1067 | struct parahotplug_request *req; |
David Kershner | ae0fa82 | 2017-03-28 09:34:21 -0400 | [diff] [blame] | 1068 | int err; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1069 | |
| 1070 | req = parahotplug_request_create(inmsg); |
Benjamin Romer | 38f736e | 2015-03-16 13:58:13 -0400 | [diff] [blame] | 1071 | if (!req) |
David Kershner | 114d5dc | 2017-03-28 09:34:18 -0400 | [diff] [blame] | 1072 | return -ENOMEM; |
David Kershner | d02bde9 | 2017-04-18 16:55:05 -0400 | [diff] [blame] | 1073 | /* |
| 1074 | * For enable messages, just respond with success right away, we don't |
| 1075 | * need to wait to see if the enable was successful. |
| 1076 | */ |
Benjamin Romer | 2ea5117 | 2014-10-23 14:30:25 -0400 | [diff] [blame] | 1077 | if (inmsg->cmd.device_change_state.state.active) { |
David Kershner | ae0fa82 | 2017-03-28 09:34:21 -0400 | [diff] [blame] | 1078 | err = parahotplug_request_kickoff(req); |
| 1079 | if (err) |
| 1080 | goto err_respond; |
David Kershner | 4c0e65f | 2017-04-18 16:55:01 -0400 | [diff] [blame] | 1081 | controlvm_respond(&inmsg->hdr, CONTROLVM_RESP_SUCCESS, |
| 1082 | &inmsg->cmd.device_change_state.state); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1083 | parahotplug_request_destroy(req); |
David Kershner | ae0fa82 | 2017-03-28 09:34:21 -0400 | [diff] [blame] | 1084 | return 0; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1085 | } |
David Kershner | ae0fa82 | 2017-03-28 09:34:21 -0400 | [diff] [blame] | 1086 | /* |
David Kershner | 6577cbf | 2017-08-30 13:36:29 -0400 | [diff] [blame] | 1087 | * For disable messages, add the request to the request list before |
| 1088 | * kicking off the udev script. It won't get responded to until the |
| 1089 | * script has indicated it's done. |
David Kershner | ae0fa82 | 2017-03-28 09:34:21 -0400 | [diff] [blame] | 1090 | */ |
| 1091 | spin_lock(¶hotplug_request_list_lock); |
| 1092 | list_add_tail(&req->list, ¶hotplug_request_list); |
| 1093 | spin_unlock(¶hotplug_request_list_lock); |
David Kershner | ae0fa82 | 2017-03-28 09:34:21 -0400 | [diff] [blame] | 1094 | err = parahotplug_request_kickoff(req); |
| 1095 | if (err) |
| 1096 | goto err_respond; |
David Kershner | 114d5dc | 2017-03-28 09:34:18 -0400 | [diff] [blame] | 1097 | return 0; |
David Kershner | ae0fa82 | 2017-03-28 09:34:21 -0400 | [diff] [blame] | 1098 | |
| 1099 | err_respond: |
David Kershner | 4c0e65f | 2017-04-18 16:55:01 -0400 | [diff] [blame] | 1100 | controlvm_respond(&inmsg->hdr, err, |
| 1101 | &inmsg->cmd.device_change_state.state); |
David Kershner | ae0fa82 | 2017-03-28 09:34:21 -0400 | [diff] [blame] | 1102 | return err; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1103 | } |
| 1104 | |
David Binder | 7289a8d | 2017-02-01 17:38:56 -0500 | [diff] [blame] | 1105 | /* |
| 1106 | * chipset_ready_uevent() - sends chipset_ready action |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1107 | * |
| 1108 | * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset. |
| 1109 | * |
David Binder | 7289a8d | 2017-02-01 17:38:56 -0500 | [diff] [blame] | 1110 | * Return: 0 on success, negative on failure |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1111 | */ |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 1112 | static int chipset_ready_uevent(struct controlvm_message_header *msg_hdr) |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1113 | { |
David Kershner | deeeca6 | 2017-03-28 09:34:22 -0400 | [diff] [blame] | 1114 | int res; |
| 1115 | |
David Kershner | 040b78f | 2017-09-27 13:14:10 -0400 | [diff] [blame] | 1116 | res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, KOBJ_ONLINE); |
David Binder | 7289a8d | 2017-02-01 17:38:56 -0500 | [diff] [blame] | 1117 | if (msg_hdr->flags.response_expected) |
David Kershner | 4c0e65f | 2017-04-18 16:55:01 -0400 | [diff] [blame] | 1118 | controlvm_respond(msg_hdr, res, NULL); |
David Kershner | deeeca6 | 2017-03-28 09:34:22 -0400 | [diff] [blame] | 1119 | return res; |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1120 | } |
| 1121 | |
David Binder | 7289a8d | 2017-02-01 17:38:56 -0500 | [diff] [blame] | 1122 | /* |
| 1123 | * chipset_selftest_uevent() - sends chipset_selftest action |
| 1124 | * |
| 1125 | * Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset. |
| 1126 | * |
| 1127 | * Return: 0 on success, negative on failure |
| 1128 | */ |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 1129 | static int chipset_selftest_uevent(struct controlvm_message_header *msg_hdr) |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1130 | { |
| 1131 | char env_selftest[20]; |
| 1132 | char *envp[] = { env_selftest, NULL }; |
David Kershner | deeeca6 | 2017-03-28 09:34:22 -0400 | [diff] [blame] | 1133 | int res; |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1134 | |
| 1135 | sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1); |
David Kershner | deeeca6 | 2017-03-28 09:34:22 -0400 | [diff] [blame] | 1136 | res = kobject_uevent_env(&chipset_dev->acpi_device->dev.kobj, |
| 1137 | KOBJ_CHANGE, envp); |
David Binder | 7289a8d | 2017-02-01 17:38:56 -0500 | [diff] [blame] | 1138 | if (msg_hdr->flags.response_expected) |
David Kershner | 4c0e65f | 2017-04-18 16:55:01 -0400 | [diff] [blame] | 1139 | controlvm_respond(msg_hdr, res, NULL); |
David Kershner | deeeca6 | 2017-03-28 09:34:22 -0400 | [diff] [blame] | 1140 | return res; |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1141 | } |
| 1142 | |
David Binder | 7289a8d | 2017-02-01 17:38:56 -0500 | [diff] [blame] | 1143 | /* |
| 1144 | * chipset_notready_uevent() - sends chipset_notready action |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1145 | * |
| 1146 | * Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset. |
| 1147 | * |
David Binder | 7289a8d | 2017-02-01 17:38:56 -0500 | [diff] [blame] | 1148 | * Return: 0 on success, negative on failure |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1149 | */ |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 1150 | static int chipset_notready_uevent(struct controlvm_message_header *msg_hdr) |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1151 | { |
David Binder | 904ee62 | 2017-07-17 16:16:42 -0400 | [diff] [blame] | 1152 | int res = kobject_uevent(&chipset_dev->acpi_device->dev.kobj, |
David Kershner | 34fbf6a | 2017-09-27 13:14:11 -0400 | [diff] [blame] | 1153 | KOBJ_OFFLINE); |
David Binder | 904ee62 | 2017-07-17 16:16:42 -0400 | [diff] [blame] | 1154 | |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1155 | if (msg_hdr->flags.response_expected) |
David Kershner | 4c0e65f | 2017-04-18 16:55:01 -0400 | [diff] [blame] | 1156 | controlvm_respond(msg_hdr, res, NULL); |
David Kershner | deeeca6 | 2017-03-28 09:34:22 -0400 | [diff] [blame] | 1157 | return res; |
David Kershner | ebeff05 | 2016-09-19 17:09:34 -0400 | [diff] [blame] | 1158 | } |
| 1159 | |
David Kershner | 88845f4 | 2017-03-28 09:35:00 -0400 | [diff] [blame] | 1160 | static int unisys_vmcall(unsigned long tuple, unsigned long param) |
| 1161 | { |
| 1162 | int result = 0; |
| 1163 | unsigned int cpuid_eax, cpuid_ebx, cpuid_ecx, cpuid_edx; |
| 1164 | unsigned long reg_ebx; |
| 1165 | unsigned long reg_ecx; |
| 1166 | |
| 1167 | reg_ebx = param & 0xFFFFFFFF; |
| 1168 | reg_ecx = param >> 32; |
David Kershner | 88845f4 | 2017-03-28 09:35:00 -0400 | [diff] [blame] | 1169 | cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx); |
| 1170 | if (!(cpuid_ecx & 0x80000000)) |
| 1171 | return -EPERM; |
David Kershner | 88845f4 | 2017-03-28 09:35:00 -0400 | [diff] [blame] | 1172 | __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) : |
David Kershner | 34fbf6a | 2017-09-27 13:14:11 -0400 | [diff] [blame] | 1173 | "a"(tuple), "b"(reg_ebx), "c"(reg_ecx)); |
David Kershner | bd801a0 | 2017-03-28 09:35:01 -0400 | [diff] [blame] | 1174 | if (result) |
| 1175 | goto error; |
David Kershner | bd801a0 | 2017-03-28 09:35:01 -0400 | [diff] [blame] | 1176 | return 0; |
David Kershner | 9116ae7 | 2017-09-27 13:14:26 -0400 | [diff] [blame] | 1177 | |
Sameer Wadgaonkar | ac0aba6 | 2017-06-30 15:43:08 -0400 | [diff] [blame] | 1178 | /* Need to convert from VMCALL error codes to Linux */ |
| 1179 | error: |
David Kershner | bd801a0 | 2017-03-28 09:35:01 -0400 | [diff] [blame] | 1180 | switch (result) { |
| 1181 | case VMCALL_RESULT_INVALID_PARAM: |
| 1182 | return -EINVAL; |
| 1183 | case VMCALL_RESULT_DATA_UNAVAILABLE: |
| 1184 | return -ENODEV; |
| 1185 | default: |
| 1186 | return -EFAULT; |
| 1187 | } |
David Kershner | 88845f4 | 2017-03-28 09:35:00 -0400 | [diff] [blame] | 1188 | } |
Zohaib Javed | ab61097 | 2017-08-22 13:26:53 -0400 | [diff] [blame] | 1189 | |
David Kershner | f1f537c | 2017-08-30 13:36:19 -0400 | [diff] [blame] | 1190 | static int controlvm_channel_create(struct visorchipset_device *dev) |
David Kershner | 5f3a7e3 | 2015-05-13 13:22:10 -0400 | [diff] [blame] | 1191 | { |
David Kershner | f1f537c | 2017-08-30 13:36:19 -0400 | [diff] [blame] | 1192 | struct visorchannel *chan; |
| 1193 | u64 addr; |
David Kershner | 800da5f | 2017-08-30 13:36:13 -0400 | [diff] [blame] | 1194 | int err; |
David Kershner | 5f3a7e3 | 2015-05-13 13:22:10 -0400 | [diff] [blame] | 1195 | |
David Kershner | f1f537c | 2017-08-30 13:36:19 -0400 | [diff] [blame] | 1196 | err = unisys_vmcall(VMCALL_CONTROLVM_ADDR, |
| 1197 | virt_to_phys(&dev->controlvm_params)); |
David Kershner | 800da5f | 2017-08-30 13:36:13 -0400 | [diff] [blame] | 1198 | if (err) |
| 1199 | return err; |
David Kershner | f1f537c | 2017-08-30 13:36:19 -0400 | [diff] [blame] | 1200 | addr = dev->controlvm_params.address; |
Sameer Wadgaonkar | 9047667 | 2017-09-27 13:14:44 -0400 | [diff] [blame] | 1201 | chan = visorchannel_create(addr, GFP_KERNEL, |
| 1202 | &visor_controlvm_channel_guid, true); |
David Kershner | f1f537c | 2017-08-30 13:36:19 -0400 | [diff] [blame] | 1203 | if (!chan) |
| 1204 | return -ENOMEM; |
| 1205 | dev->controlvm_channel = chan; |
David Kershner | bd801a0 | 2017-03-28 09:35:01 -0400 | [diff] [blame] | 1206 | return 0; |
David Kershner | 5f3a7e3 | 2015-05-13 13:22:10 -0400 | [diff] [blame] | 1207 | } |
| 1208 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 1209 | static void setup_crash_devices_work_queue(struct work_struct *work) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1210 | { |
Benjamin Romer | e6bdb90 | 2015-03-16 13:58:33 -0400 | [diff] [blame] | 1211 | struct controlvm_message local_crash_bus_msg; |
| 1212 | struct controlvm_message local_crash_dev_msg; |
Benjamin Romer | 3ab4770 | 2014-10-23 14:30:31 -0400 | [diff] [blame] | 1213 | struct controlvm_message msg; |
Benjamin Romer | e6bdb90 | 2015-03-16 13:58:33 -0400 | [diff] [blame] | 1214 | u32 local_crash_msg_offset; |
| 1215 | u16 local_crash_msg_count; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1216 | |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1217 | /* send init chipset msg */ |
Benjamin Romer | 98d7b59 | 2014-10-23 14:30:26 -0400 | [diff] [blame] | 1218 | msg.hdr.id = CONTROLVM_CHIPSET_INIT; |
Benjamin Romer | 2ea5117 | 2014-10-23 14:30:25 -0400 | [diff] [blame] | 1219 | msg.cmd.init_chipset.bus_count = 23; |
| 1220 | msg.cmd.init_chipset.switch_count = 0; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1221 | chipset_init(&msg); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1222 | /* get saved message count */ |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1223 | if (visorchannel_read(chipset_dev->controlvm_channel, |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 1224 | offsetof(struct visor_controlvm_channel, |
Benjamin Romer | d19642f | 2014-10-23 14:30:34 -0400 | [diff] [blame] | 1225 | saved_crash_message_count), |
Benjamin Romer | e6bdb90 | 2015-03-16 13:58:33 -0400 | [diff] [blame] | 1226 | &local_crash_msg_count, sizeof(u16)) < 0) { |
David Kershner | 0f7453a | 2017-04-18 16:55:22 -0400 | [diff] [blame] | 1227 | dev_err(&chipset_dev->acpi_device->dev, |
| 1228 | "failed to read channel\n"); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1229 | return; |
| 1230 | } |
Benjamin Romer | e6bdb90 | 2015-03-16 13:58:33 -0400 | [diff] [blame] | 1231 | if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) { |
David Kershner | 040b78f | 2017-09-27 13:14:10 -0400 | [diff] [blame] | 1232 | dev_err(&chipset_dev->acpi_device->dev, "invalid count\n"); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1233 | return; |
| 1234 | } |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1235 | /* get saved crash message offset */ |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1236 | if (visorchannel_read(chipset_dev->controlvm_channel, |
Sameer Wadgaonkar | 545f091 | 2017-05-19 16:17:49 -0400 | [diff] [blame] | 1237 | offsetof(struct visor_controlvm_channel, |
Benjamin Romer | d19642f | 2014-10-23 14:30:34 -0400 | [diff] [blame] | 1238 | saved_crash_message_offset), |
Benjamin Romer | e6bdb90 | 2015-03-16 13:58:33 -0400 | [diff] [blame] | 1239 | &local_crash_msg_offset, sizeof(u32)) < 0) { |
David Kershner | 0f7453a | 2017-04-18 16:55:22 -0400 | [diff] [blame] | 1240 | dev_err(&chipset_dev->acpi_device->dev, |
| 1241 | "failed to read channel\n"); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1242 | return; |
| 1243 | } |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1244 | /* read create device message for storage bus offset */ |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1245 | if (visorchannel_read(chipset_dev->controlvm_channel, |
Benjamin Romer | e6bdb90 | 2015-03-16 13:58:33 -0400 | [diff] [blame] | 1246 | local_crash_msg_offset, |
| 1247 | &local_crash_bus_msg, |
Benjamin Romer | 3ab4770 | 2014-10-23 14:30:31 -0400 | [diff] [blame] | 1248 | sizeof(struct controlvm_message)) < 0) { |
David Kershner | 0f7453a | 2017-04-18 16:55:22 -0400 | [diff] [blame] | 1249 | dev_err(&chipset_dev->acpi_device->dev, |
| 1250 | "failed to read channel\n"); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1251 | return; |
| 1252 | } |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1253 | /* read create device message for storage device */ |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1254 | if (visorchannel_read(chipset_dev->controlvm_channel, |
Benjamin Romer | e6bdb90 | 2015-03-16 13:58:33 -0400 | [diff] [blame] | 1255 | local_crash_msg_offset + |
Benjamin Romer | 3ab4770 | 2014-10-23 14:30:31 -0400 | [diff] [blame] | 1256 | sizeof(struct controlvm_message), |
Benjamin Romer | e6bdb90 | 2015-03-16 13:58:33 -0400 | [diff] [blame] | 1257 | &local_crash_dev_msg, |
Benjamin Romer | 3ab4770 | 2014-10-23 14:30:31 -0400 | [diff] [blame] | 1258 | sizeof(struct controlvm_message)) < 0) { |
David Kershner | 0f7453a | 2017-04-18 16:55:22 -0400 | [diff] [blame] | 1259 | dev_err(&chipset_dev->acpi_device->dev, |
| 1260 | "failed to read channel\n"); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1261 | return; |
| 1262 | } |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1263 | /* reuse IOVM create bus message */ |
David Kershner | d9b89ef | 2017-04-18 16:55:21 -0400 | [diff] [blame] | 1264 | if (!local_crash_bus_msg.cmd.create_bus.channel_addr) { |
David Kershner | 0f7453a | 2017-04-18 16:55:22 -0400 | [diff] [blame] | 1265 | dev_err(&chipset_dev->acpi_device->dev, |
| 1266 | "no valid create_bus message\n"); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1267 | return; |
| 1268 | } |
Sameer Wadgaonkar | ec17cb8 | 2017-05-19 16:17:36 -0400 | [diff] [blame] | 1269 | visorbus_create(&local_crash_bus_msg); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1270 | /* reuse create device message for storage device */ |
David Kershner | d9b89ef | 2017-04-18 16:55:21 -0400 | [diff] [blame] | 1271 | if (!local_crash_dev_msg.cmd.create_device.channel_addr) { |
David Kershner | 0f7453a | 2017-04-18 16:55:22 -0400 | [diff] [blame] | 1272 | dev_err(&chipset_dev->acpi_device->dev, |
| 1273 | "no valid create_device message\n"); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1274 | return; |
| 1275 | } |
Sameer Wadgaonkar | 8b0a6cf | 2017-05-19 16:17:43 -0400 | [diff] [blame] | 1276 | visorbus_device_create(&local_crash_dev_msg); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1277 | } |
| 1278 | |
Sameer Wadgaonkar | 76956aa | 2017-08-22 13:27:03 -0400 | [diff] [blame] | 1279 | void visorbus_response(struct visor_device *bus_info, int response, |
| 1280 | int controlvm_id) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1281 | { |
David Kershner | fd9e450 | 2017-09-27 13:14:16 -0400 | [diff] [blame] | 1282 | if (!bus_info->pending_msg_hdr) |
| 1283 | return; |
Don Zickus | 0274b5a | 2015-06-01 13:00:27 -0400 | [diff] [blame] | 1284 | |
David Kershner | fd9e450 | 2017-09-27 13:14:16 -0400 | [diff] [blame] | 1285 | controlvm_responder(controlvm_id, bus_info->pending_msg_hdr, response); |
Don Zickus | 0274b5a | 2015-06-01 13:00:27 -0400 | [diff] [blame] | 1286 | kfree(bus_info->pending_msg_hdr); |
| 1287 | bus_info->pending_msg_hdr = NULL; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1288 | } |
| 1289 | |
Sameer Wadgaonkar | 722e73d | 2017-08-22 13:27:04 -0400 | [diff] [blame] | 1290 | void visorbus_device_changestate_response(struct visor_device *dev_info, |
| 1291 | int response, |
| 1292 | struct visor_segment_state state) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1293 | { |
David Kershner | fd9e450 | 2017-09-27 13:14:16 -0400 | [diff] [blame] | 1294 | if (!dev_info->pending_msg_hdr) |
| 1295 | return; |
| 1296 | |
David Kershner | 040b78f | 2017-09-27 13:14:10 -0400 | [diff] [blame] | 1297 | device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE, dev_info, |
| 1298 | response, state); |
Don Zickus | 0274b5a | 2015-06-01 13:00:27 -0400 | [diff] [blame] | 1299 | kfree(dev_info->pending_msg_hdr); |
| 1300 | dev_info->pending_msg_hdr = NULL; |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1301 | } |
| 1302 | |
David Kershner | 39b486d | 2017-08-30 13:36:27 -0400 | [diff] [blame] | 1303 | static void parser_done(struct parser_context *ctx) |
| 1304 | { |
| 1305 | chipset_dev->controlvm_payload_bytes_buffered -= ctx->param_bytes; |
| 1306 | kfree(ctx); |
| 1307 | } |
| 1308 | |
David Kershner | 4531143 | 2017-08-30 13:36:28 -0400 | [diff] [blame] | 1309 | static struct parser_context *parser_init_stream(u64 addr, u32 bytes, |
| 1310 | bool *retry) |
David Kershner | 612b81c | 2016-09-19 17:09:22 -0400 | [diff] [blame] | 1311 | { |
Tim Sell | a5eb218 | 2017-11-17 12:27:38 -0500 | [diff] [blame] | 1312 | unsigned long allocbytes; |
David Kershner | 612b81c | 2016-09-19 17:09:22 -0400 | [diff] [blame] | 1313 | struct parser_context *ctx; |
Erik Arfvidson | a35e326 | 2017-07-17 16:17:09 -0400 | [diff] [blame] | 1314 | void *mapping; |
David Kershner | 612b81c | 2016-09-19 17:09:22 -0400 | [diff] [blame] | 1315 | |
David Binder | 3e4273d | 2016-12-22 11:09:00 -0500 | [diff] [blame] | 1316 | *retry = false; |
David Kershner | 26a42c2 | 2017-08-30 13:36:24 -0400 | [diff] [blame] | 1317 | /* alloc an extra byte to ensure payload is \0 terminated */ |
Tim Sell | a5eb218 | 2017-11-17 12:27:38 -0500 | [diff] [blame] | 1318 | allocbytes = (unsigned long)bytes + 1 + (sizeof(struct parser_context) - |
David Kershner | 26a42c2 | 2017-08-30 13:36:24 -0400 | [diff] [blame] | 1319 | sizeof(struct visor_controlvm_parameters_header)); |
David Kershner | 040b78f | 2017-09-27 13:14:10 -0400 | [diff] [blame] | 1320 | if ((chipset_dev->controlvm_payload_bytes_buffered + bytes) > |
| 1321 | MAX_CONTROLVM_PAYLOAD_BYTES) { |
David Binder | 3e4273d | 2016-12-22 11:09:00 -0500 | [diff] [blame] | 1322 | *retry = true; |
David Kershner | 612b81c | 2016-09-19 17:09:22 -0400 | [diff] [blame] | 1323 | return NULL; |
| 1324 | } |
David Kershner | 8c8c975 | 2017-04-18 16:55:08 -0400 | [diff] [blame] | 1325 | ctx = kzalloc(allocbytes, GFP_KERNEL); |
David Kershner | 612b81c | 2016-09-19 17:09:22 -0400 | [diff] [blame] | 1326 | if (!ctx) { |
David Binder | 3e4273d | 2016-12-22 11:09:00 -0500 | [diff] [blame] | 1327 | *retry = true; |
David Kershner | 612b81c | 2016-09-19 17:09:22 -0400 | [diff] [blame] | 1328 | return NULL; |
| 1329 | } |
David Kershner | 612b81c | 2016-09-19 17:09:22 -0400 | [diff] [blame] | 1330 | ctx->allocbytes = allocbytes; |
| 1331 | ctx->param_bytes = bytes; |
Erik Arfvidson | a35e326 | 2017-07-17 16:17:09 -0400 | [diff] [blame] | 1332 | mapping = memremap(addr, bytes, MEMREMAP_WB); |
| 1333 | if (!mapping) |
| 1334 | goto err_finish_ctx; |
David Kershner | 26a42c2 | 2017-08-30 13:36:24 -0400 | [diff] [blame] | 1335 | memcpy(&ctx->data, mapping, bytes); |
Erik Arfvidson | a35e326 | 2017-07-17 16:17:09 -0400 | [diff] [blame] | 1336 | memunmap(mapping); |
David Kershner | 612b81c | 2016-09-19 17:09:22 -0400 | [diff] [blame] | 1337 | ctx->byte_stream = true; |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1338 | chipset_dev->controlvm_payload_bytes_buffered += ctx->param_bytes; |
David Kershner | 612b81c | 2016-09-19 17:09:22 -0400 | [diff] [blame] | 1339 | return ctx; |
| 1340 | |
| 1341 | err_finish_ctx: |
Sameer Wadgaonkar | 90544cb | 2017-08-30 13:36:09 -0400 | [diff] [blame] | 1342 | kfree(ctx); |
David Kershner | 612b81c | 2016-09-19 17:09:22 -0400 | [diff] [blame] | 1343 | return NULL; |
| 1344 | } |
| 1345 | |
David Binder | 04dbfea | 2017-02-21 12:53:25 -0500 | [diff] [blame] | 1346 | /* |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1347 | * handle_command() - process a controlvm message |
| 1348 | * @inmsg: the message to process |
| 1349 | * @channel_addr: address of the controlvm channel |
| 1350 | * |
| 1351 | * Return: |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1352 | * 0 - Successfully processed the message |
| 1353 | * -EAGAIN - ControlVM message was not processed and should be retried |
| 1354 | * reading the next controlvm message; a scenario where this can |
| 1355 | * occur is when we need to throttle the allocation of memory in |
| 1356 | * which to copy out controlvm payload data. |
| 1357 | * < 0 - error: ControlVM message was processed but an error occurred. |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1358 | */ |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 1359 | static int handle_command(struct controlvm_message inmsg, u64 channel_addr) |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1360 | { |
| 1361 | struct controlvm_message_packet *cmd = &inmsg.cmd; |
| 1362 | u64 parm_addr; |
| 1363 | u32 parm_bytes; |
| 1364 | struct parser_context *parser_ctx = NULL; |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1365 | struct controlvm_message ackmsg; |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1366 | int err = 0; |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1367 | |
| 1368 | /* create parsing context if necessary */ |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1369 | parm_addr = channel_addr + inmsg.hdr.payload_vm_offset; |
| 1370 | parm_bytes = inmsg.hdr.payload_bytes; |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1371 | /* |
| 1372 | * Parameter and channel addresses within test messages actually lie |
| 1373 | * within our OS-controlled memory. We need to know that, because it |
| 1374 | * makes a difference in how we compute the virtual address. |
| 1375 | */ |
David Kershner | 4d77e60 | 2017-08-30 13:36:22 -0400 | [diff] [blame] | 1376 | if (parm_bytes) { |
David Kershner | ef7b9dc | 2017-09-27 13:14:37 -0400 | [diff] [blame] | 1377 | bool retry; |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1378 | |
David Kershner | 4531143 | 2017-08-30 13:36:28 -0400 | [diff] [blame] | 1379 | parser_ctx = parser_init_stream(parm_addr, parm_bytes, &retry); |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1380 | if (!parser_ctx && retry) |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1381 | return -EAGAIN; |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1382 | } |
Erik Arfvidson | a35e326 | 2017-07-17 16:17:09 -0400 | [diff] [blame] | 1383 | controlvm_init_response(&ackmsg, &inmsg.hdr, CONTROLVM_RESP_SUCCESS); |
| 1384 | err = visorchannel_signalinsert(chipset_dev->controlvm_channel, |
| 1385 | CONTROLVM_QUEUE_ACK, &ackmsg); |
| 1386 | if (err) |
| 1387 | return err; |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1388 | switch (inmsg.hdr.id) { |
| 1389 | case CONTROLVM_CHIPSET_INIT: |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1390 | err = chipset_init(&inmsg); |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1391 | break; |
| 1392 | case CONTROLVM_BUS_CREATE: |
Sameer Wadgaonkar | ec17cb8 | 2017-05-19 16:17:36 -0400 | [diff] [blame] | 1393 | err = visorbus_create(&inmsg); |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1394 | break; |
| 1395 | case CONTROLVM_BUS_DESTROY: |
Sameer Wadgaonkar | ec17cb8 | 2017-05-19 16:17:36 -0400 | [diff] [blame] | 1396 | err = visorbus_destroy(&inmsg); |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1397 | break; |
| 1398 | case CONTROLVM_BUS_CONFIGURE: |
Sameer Wadgaonkar | ec17cb8 | 2017-05-19 16:17:36 -0400 | [diff] [blame] | 1399 | err = visorbus_configure(&inmsg, parser_ctx); |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1400 | break; |
| 1401 | case CONTROLVM_DEVICE_CREATE: |
Sameer Wadgaonkar | 8b0a6cf | 2017-05-19 16:17:43 -0400 | [diff] [blame] | 1402 | err = visorbus_device_create(&inmsg); |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1403 | break; |
| 1404 | case CONTROLVM_DEVICE_CHANGESTATE: |
| 1405 | if (cmd->device_change_state.flags.phys_device) { |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1406 | err = parahotplug_process_message(&inmsg); |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1407 | } else { |
| 1408 | /* |
David Kershner | 6577cbf | 2017-08-30 13:36:29 -0400 | [diff] [blame] | 1409 | * save the hdr and cmd structures for later use when |
| 1410 | * sending back the response to Command |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1411 | */ |
Sameer Wadgaonkar | 8b0a6cf | 2017-05-19 16:17:43 -0400 | [diff] [blame] | 1412 | err = visorbus_device_changestate(&inmsg); |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1413 | break; |
| 1414 | } |
| 1415 | break; |
| 1416 | case CONTROLVM_DEVICE_DESTROY: |
Sameer Wadgaonkar | 8b0a6cf | 2017-05-19 16:17:43 -0400 | [diff] [blame] | 1417 | err = visorbus_device_destroy(&inmsg); |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1418 | break; |
| 1419 | case CONTROLVM_DEVICE_CONFIGURE: |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1420 | /* no op just send a respond that we passed */ |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1421 | if (inmsg.hdr.flags.response_expected) |
David Kershner | 4c0e65f | 2017-04-18 16:55:01 -0400 | [diff] [blame] | 1422 | controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS, |
| 1423 | NULL); |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1424 | break; |
| 1425 | case CONTROLVM_CHIPSET_READY: |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1426 | err = chipset_ready_uevent(&inmsg.hdr); |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1427 | break; |
| 1428 | case CONTROLVM_CHIPSET_SELFTEST: |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1429 | err = chipset_selftest_uevent(&inmsg.hdr); |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1430 | break; |
| 1431 | case CONTROLVM_CHIPSET_STOP: |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1432 | err = chipset_notready_uevent(&inmsg.hdr); |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1433 | break; |
| 1434 | default: |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1435 | err = -ENOMSG; |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1436 | if (inmsg.hdr.flags.response_expected) |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1437 | controlvm_respond(&inmsg.hdr, |
David Kershner | 4c0e65f | 2017-04-18 16:55:01 -0400 | [diff] [blame] | 1438 | -CONTROLVM_RESP_ID_UNKNOWN, NULL); |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1439 | break; |
| 1440 | } |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1441 | if (parser_ctx) { |
| 1442 | parser_done(parser_ctx); |
| 1443 | parser_ctx = NULL; |
| 1444 | } |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1445 | return err; |
David Kershner | 511474a | 2016-09-19 17:09:21 -0400 | [diff] [blame] | 1446 | } |
| 1447 | |
David Binder | 04dbfea | 2017-02-21 12:53:25 -0500 | [diff] [blame] | 1448 | /* |
David Kershner | 8a28532 | 2016-09-19 17:09:23 -0400 | [diff] [blame] | 1449 | * read_controlvm_event() - retreives the next message from the |
| 1450 | * CONTROLVM_QUEUE_EVENT queue in the controlvm |
| 1451 | * channel |
| 1452 | * @msg: pointer to the retrieved message |
| 1453 | * |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1454 | * Return: 0 if valid message was retrieved or -error |
David Kershner | 8a28532 | 2016-09-19 17:09:23 -0400 | [diff] [blame] | 1455 | */ |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 1456 | static int read_controlvm_event(struct controlvm_message *msg) |
David Kershner | 8a28532 | 2016-09-19 17:09:23 -0400 | [diff] [blame] | 1457 | { |
David Binder | 904ee62 | 2017-07-17 16:16:42 -0400 | [diff] [blame] | 1458 | int err = visorchannel_signalremove(chipset_dev->controlvm_channel, |
David Kershner | da56cb0 | 2017-09-27 13:14:39 -0400 | [diff] [blame] | 1459 | CONTROLVM_QUEUE_EVENT, msg); |
David Kershner | 9116ae7 | 2017-09-27 13:14:26 -0400 | [diff] [blame] | 1460 | |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1461 | if (err) |
| 1462 | return err; |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1463 | /* got a message */ |
| 1464 | if (msg->hdr.flags.test_message == 1) |
| 1465 | return -EINVAL; |
David Kershner | 25a5128 | 2017-03-28 09:34:19 -0400 | [diff] [blame] | 1466 | return 0; |
David Kershner | 8a28532 | 2016-09-19 17:09:23 -0400 | [diff] [blame] | 1467 | } |
| 1468 | |
David Binder | 04dbfea | 2017-02-21 12:53:25 -0500 | [diff] [blame] | 1469 | /* |
David Kershner | a9c7393 | 2016-09-19 17:09:24 -0400 | [diff] [blame] | 1470 | * parahotplug_process_list() - remove any request from the list that's been on |
| 1471 | * there too long and respond with an error |
| 1472 | */ |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 1473 | static void parahotplug_process_list(void) |
David Kershner | a9c7393 | 2016-09-19 17:09:24 -0400 | [diff] [blame] | 1474 | { |
| 1475 | struct list_head *pos; |
| 1476 | struct list_head *tmp; |
| 1477 | |
| 1478 | spin_lock(¶hotplug_request_list_lock); |
David Kershner | a9c7393 | 2016-09-19 17:09:24 -0400 | [diff] [blame] | 1479 | list_for_each_safe(pos, tmp, ¶hotplug_request_list) { |
| 1480 | struct parahotplug_request *req = |
| 1481 | list_entry(pos, struct parahotplug_request, list); |
| 1482 | |
| 1483 | if (!time_after_eq(jiffies, req->expiration)) |
| 1484 | continue; |
David Kershner | a9c7393 | 2016-09-19 17:09:24 -0400 | [diff] [blame] | 1485 | list_del(pos); |
| 1486 | if (req->msg.hdr.flags.response_expected) |
David Kershner | 4c0e65f | 2017-04-18 16:55:01 -0400 | [diff] [blame] | 1487 | controlvm_respond( |
David Kershner | a9c7393 | 2016-09-19 17:09:24 -0400 | [diff] [blame] | 1488 | &req->msg.hdr, |
Sameer Wadgaonkar | 98f9ed9 | 2016-12-22 11:08:58 -0500 | [diff] [blame] | 1489 | CONTROLVM_RESP_DEVICE_UDEV_TIMEOUT, |
David Kershner | 4c0e65f | 2017-04-18 16:55:01 -0400 | [diff] [blame] | 1490 | &req->msg.cmd.device_change_state.state); |
David Kershner | a9c7393 | 2016-09-19 17:09:24 -0400 | [diff] [blame] | 1491 | parahotplug_request_destroy(req); |
| 1492 | } |
David Kershner | a9c7393 | 2016-09-19 17:09:24 -0400 | [diff] [blame] | 1493 | spin_unlock(¶hotplug_request_list_lock); |
| 1494 | } |
| 1495 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 1496 | static void controlvm_periodic_work(struct work_struct *work) |
David Kershner | 3d8394c | 2016-09-19 17:09:20 -0400 | [diff] [blame] | 1497 | { |
| 1498 | struct controlvm_message inmsg; |
David Kershner | 04dbc09 | 2017-04-18 16:55:06 -0400 | [diff] [blame] | 1499 | int count = 0; |
David Kershner | fbc1023 | 2017-03-28 09:34:20 -0400 | [diff] [blame] | 1500 | int err; |
David Kershner | 3d8394c | 2016-09-19 17:09:20 -0400 | [diff] [blame] | 1501 | |
David Kershner | fbc1023 | 2017-03-28 09:34:20 -0400 | [diff] [blame] | 1502 | /* Drain the RESPONSE queue make it empty */ |
| 1503 | do { |
| 1504 | err = visorchannel_signalremove(chipset_dev->controlvm_channel, |
| 1505 | CONTROLVM_QUEUE_RESPONSE, |
| 1506 | &inmsg); |
David Kershner | 04dbc09 | 2017-04-18 16:55:06 -0400 | [diff] [blame] | 1507 | } while ((!err) && (++count < CONTROLVM_MESSAGE_MAX)); |
David Kershner | fbc1023 | 2017-03-28 09:34:20 -0400 | [diff] [blame] | 1508 | if (err != -EAGAIN) |
| 1509 | goto schedule_out; |
David Kershner | fbc1023 | 2017-03-28 09:34:20 -0400 | [diff] [blame] | 1510 | if (chipset_dev->controlvm_pending_msg_valid) { |
| 1511 | /* |
David Kershner | 6577cbf | 2017-08-30 13:36:29 -0400 | [diff] [blame] | 1512 | * we throttled processing of a prior msg, so try to process |
| 1513 | * it again rather than reading a new one |
David Kershner | fbc1023 | 2017-03-28 09:34:20 -0400 | [diff] [blame] | 1514 | */ |
| 1515 | inmsg = chipset_dev->controlvm_pending_msg; |
| 1516 | chipset_dev->controlvm_pending_msg_valid = false; |
| 1517 | err = 0; |
| 1518 | } else { |
| 1519 | err = read_controlvm_event(&inmsg); |
David Kershner | 3d8394c | 2016-09-19 17:09:20 -0400 | [diff] [blame] | 1520 | } |
David Kershner | fbc1023 | 2017-03-28 09:34:20 -0400 | [diff] [blame] | 1521 | while (!err) { |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1522 | chipset_dev->most_recent_message_jiffies = jiffies; |
David Kershner | fbc1023 | 2017-03-28 09:34:20 -0400 | [diff] [blame] | 1523 | err = handle_command(inmsg, |
| 1524 | visorchannel_get_physaddr |
| 1525 | (chipset_dev->controlvm_channel)); |
| 1526 | if (err == -EAGAIN) { |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1527 | chipset_dev->controlvm_pending_msg = inmsg; |
| 1528 | chipset_dev->controlvm_pending_msg_valid = true; |
David Kershner | fbc1023 | 2017-03-28 09:34:20 -0400 | [diff] [blame] | 1529 | break; |
David Kershner | 3d8394c | 2016-09-19 17:09:20 -0400 | [diff] [blame] | 1530 | } |
David Kershner | fbc1023 | 2017-03-28 09:34:20 -0400 | [diff] [blame] | 1531 | |
| 1532 | err = read_controlvm_event(&inmsg); |
David Kershner | 3d8394c | 2016-09-19 17:09:20 -0400 | [diff] [blame] | 1533 | } |
David Kershner | 3d8394c | 2016-09-19 17:09:20 -0400 | [diff] [blame] | 1534 | /* parahotplug_worker */ |
| 1535 | parahotplug_process_list(); |
| 1536 | |
Sameer Wadgaonkar | d36c485 | 2017-05-19 16:17:58 -0400 | [diff] [blame] | 1537 | /* |
| 1538 | * The controlvm messages are sent in a bulk. If we start receiving messages, we |
| 1539 | * want the polling to be fast. If we do not receive any message for |
| 1540 | * MIN_IDLE_SECONDS, we can slow down the polling. |
| 1541 | */ |
David Kershner | fbc1023 | 2017-03-28 09:34:20 -0400 | [diff] [blame] | 1542 | schedule_out: |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1543 | if (time_after(jiffies, chipset_dev->most_recent_message_jiffies + |
| 1544 | (HZ * MIN_IDLE_SECONDS))) { |
David Kershner | 3d8394c | 2016-09-19 17:09:20 -0400 | [diff] [blame] | 1545 | /* |
David Kershner | 6577cbf | 2017-08-30 13:36:29 -0400 | [diff] [blame] | 1546 | * it's been longer than MIN_IDLE_SECONDS since we processed |
| 1547 | * our last controlvm message; slow down the polling |
David Kershner | 3d8394c | 2016-09-19 17:09:20 -0400 | [diff] [blame] | 1548 | */ |
David Kershner | 3fbee19 | 2017-09-27 13:14:38 -0400 | [diff] [blame] | 1549 | if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_SLOW) |
| 1550 | chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_SLOW; |
David Kershner | 3d8394c | 2016-09-19 17:09:20 -0400 | [diff] [blame] | 1551 | } else { |
David Kershner | 3fbee19 | 2017-09-27 13:14:38 -0400 | [diff] [blame] | 1552 | if (chipset_dev->poll_jiffies != POLLJIFFIES_CONTROLVM_FAST) |
| 1553 | chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST; |
David Kershner | 3d8394c | 2016-09-19 17:09:20 -0400 | [diff] [blame] | 1554 | } |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1555 | schedule_delayed_work(&chipset_dev->periodic_controlvm_work, |
| 1556 | chipset_dev->poll_jiffies); |
David Kershner | 3d8394c | 2016-09-19 17:09:20 -0400 | [diff] [blame] | 1557 | } |
| 1558 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 1559 | static int visorchipset_init(struct acpi_device *acpi_device) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1560 | { |
David Kershner | 1366a3d | 2016-04-04 23:31:37 -0400 | [diff] [blame] | 1561 | int err = -ENODEV; |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1562 | struct visorchannel *controlvm_channel; |
Jes Sorensen | d3368a5 | 2015-05-13 13:21:57 -0400 | [diff] [blame] | 1563 | |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1564 | chipset_dev = kzalloc(sizeof(*chipset_dev), GFP_KERNEL); |
| 1565 | if (!chipset_dev) |
David Kershner | 1366a3d | 2016-04-04 23:31:37 -0400 | [diff] [blame] | 1566 | goto error; |
David Kershner | f1f537c | 2017-08-30 13:36:19 -0400 | [diff] [blame] | 1567 | err = controlvm_channel_create(chipset_dev); |
| 1568 | if (err) |
| 1569 | goto error_free_chipset_dev; |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1570 | acpi_device->driver_data = chipset_dev; |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1571 | chipset_dev->acpi_device = acpi_device; |
David Kershner | 3fbee19 | 2017-09-27 13:14:38 -0400 | [diff] [blame] | 1572 | chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST; |
Sameer Wadgaonkar | 15c012d | 2017-02-21 12:53:31 -0500 | [diff] [blame] | 1573 | err = sysfs_create_groups(&chipset_dev->acpi_device->dev.kobj, |
| 1574 | visorchipset_dev_groups); |
| 1575 | if (err < 0) |
| 1576 | goto error_destroy_channel; |
David Kershner | f1f537c | 2017-08-30 13:36:19 -0400 | [diff] [blame] | 1577 | controlvm_channel = chipset_dev->controlvm_channel; |
Sameer Wadgaonkar | 403043c | 2017-07-17 16:17:02 -0400 | [diff] [blame] | 1578 | if (!visor_check_channel(visorchannel_get_header(controlvm_channel), |
Sameer Wadgaonkar | e25201d | 2017-08-30 13:36:35 -0400 | [diff] [blame] | 1579 | &chipset_dev->acpi_device->dev, |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 1580 | &visor_controlvm_channel_guid, |
Sameer Wadgaonkar | 403043c | 2017-07-17 16:17:02 -0400 | [diff] [blame] | 1581 | "controlvm", |
| 1582 | sizeof(struct visor_controlvm_channel), |
| 1583 | VISOR_CONTROLVM_CHANNEL_VERSIONID, |
| 1584 | VISOR_CHANNEL_SIGNATURE)) |
Sameer Wadgaonkar | 15c012d | 2017-02-21 12:53:31 -0500 | [diff] [blame] | 1585 | goto error_delete_groups; |
David Kershner | 4da3336 | 2015-05-05 18:36:39 -0400 | [diff] [blame] | 1586 | /* if booting in a crash kernel */ |
| 1587 | if (is_kdump_kernel()) |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1588 | INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work, |
David Kershner | 4da3336 | 2015-05-05 18:36:39 -0400 | [diff] [blame] | 1589 | setup_crash_devices_work_queue); |
| 1590 | else |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1591 | INIT_DELAYED_WORK(&chipset_dev->periodic_controlvm_work, |
David Kershner | 4da3336 | 2015-05-05 18:36:39 -0400 | [diff] [blame] | 1592 | controlvm_periodic_work); |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1593 | chipset_dev->most_recent_message_jiffies = jiffies; |
David Kershner | 3fbee19 | 2017-09-27 13:14:38 -0400 | [diff] [blame] | 1594 | chipset_dev->poll_jiffies = POLLJIFFIES_CONTROLVM_FAST; |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1595 | schedule_delayed_work(&chipset_dev->periodic_controlvm_work, |
| 1596 | chipset_dev->poll_jiffies); |
David Kershner | 1366a3d | 2016-04-04 23:31:37 -0400 | [diff] [blame] | 1597 | err = visorbus_init(); |
| 1598 | if (err < 0) |
Sameer Wadgaonkar | 15c012d | 2017-02-21 12:53:31 -0500 | [diff] [blame] | 1599 | goto error_cancel_work; |
David Kershner | 1366a3d | 2016-04-04 23:31:37 -0400 | [diff] [blame] | 1600 | return 0; |
| 1601 | |
David Kershner | 1366a3d | 2016-04-04 23:31:37 -0400 | [diff] [blame] | 1602 | error_cancel_work: |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1603 | cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work); |
David Kershner | 1366a3d | 2016-04-04 23:31:37 -0400 | [diff] [blame] | 1604 | |
Sameer Wadgaonkar | 15c012d | 2017-02-21 12:53:31 -0500 | [diff] [blame] | 1605 | error_delete_groups: |
| 1606 | sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj, |
| 1607 | visorchipset_dev_groups); |
| 1608 | |
David Kershner | 1366a3d | 2016-04-04 23:31:37 -0400 | [diff] [blame] | 1609 | error_destroy_channel: |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1610 | visorchannel_destroy(chipset_dev->controlvm_channel); |
| 1611 | |
| 1612 | error_free_chipset_dev: |
| 1613 | kfree(chipset_dev); |
David Kershner | 1366a3d | 2016-04-04 23:31:37 -0400 | [diff] [blame] | 1614 | |
| 1615 | error: |
David Kershner | 372b9f2 | 2017-04-18 16:55:23 -0400 | [diff] [blame] | 1616 | dev_err(&acpi_device->dev, "failed with error %d\n", err); |
David Kershner | 1366a3d | 2016-04-04 23:31:37 -0400 | [diff] [blame] | 1617 | return err; |
Erik Arfvidson | e3420ed | 2015-05-05 18:36:13 -0400 | [diff] [blame] | 1618 | } |
| 1619 | |
Charles Daniels | e80ffd4 | 2017-08-22 13:26:57 -0400 | [diff] [blame] | 1620 | static int visorchipset_exit(struct acpi_device *acpi_device) |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1621 | { |
Prarit Bhargava | c79b28f | 2015-05-05 18:36:15 -0400 | [diff] [blame] | 1622 | visorbus_exit(); |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1623 | cancel_delayed_work_sync(&chipset_dev->periodic_controlvm_work); |
Sameer Wadgaonkar | 15c012d | 2017-02-21 12:53:31 -0500 | [diff] [blame] | 1624 | sysfs_remove_groups(&chipset_dev->acpi_device->dev.kobj, |
| 1625 | visorchipset_dev_groups); |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1626 | visorchannel_destroy(chipset_dev->controlvm_channel); |
Sameer Wadgaonkar | 765b2f8 | 2017-02-21 12:53:29 -0500 | [diff] [blame] | 1627 | kfree(chipset_dev); |
Prarit Bhargava | 55c67dc | 2015-05-05 18:37:02 -0400 | [diff] [blame] | 1628 | return 0; |
| 1629 | } |
| 1630 | |
| 1631 | static const struct acpi_device_id unisys_device_ids[] = { |
| 1632 | {"PNP0A07", 0}, |
| 1633 | {"", 0}, |
| 1634 | }; |
Prarit Bhargava | 55c67dc | 2015-05-05 18:37:02 -0400 | [diff] [blame] | 1635 | |
| 1636 | static struct acpi_driver unisys_acpi_driver = { |
| 1637 | .name = "unisys_acpi", |
| 1638 | .class = "unisys_acpi_class", |
| 1639 | .owner = THIS_MODULE, |
| 1640 | .ids = unisys_device_ids, |
| 1641 | .ops = { |
| 1642 | .add = visorchipset_init, |
| 1643 | .remove = visorchipset_exit, |
David Kershner | 027b03e | 2017-04-18 16:55:04 -0400 | [diff] [blame] | 1644 | }, |
Prarit Bhargava | 55c67dc | 2015-05-05 18:37:02 -0400 | [diff] [blame] | 1645 | }; |
David Kershner | 1fc07f9 | 2015-07-09 13:27:53 -0400 | [diff] [blame] | 1646 | |
| 1647 | MODULE_DEVICE_TABLE(acpi, unisys_device_ids); |
| 1648 | |
David Kershner | c1d28da | 2017-04-18 16:55:03 -0400 | [diff] [blame] | 1649 | static __init int visorutil_spar_detect(void) |
Erik Arfvidson | d5b3f1d | 2015-05-05 18:37:04 -0400 | [diff] [blame] | 1650 | { |
| 1651 | unsigned int eax, ebx, ecx, edx; |
| 1652 | |
Borislav Petkov | 0c9f3536 | 2016-03-29 17:41:55 +0200 | [diff] [blame] | 1653 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { |
Erik Arfvidson | d5b3f1d | 2015-05-05 18:37:04 -0400 | [diff] [blame] | 1654 | /* check the ID */ |
Sameer Wadgaonkar | a27ded9 | 2017-05-19 16:17:47 -0400 | [diff] [blame] | 1655 | cpuid(UNISYS_VISOR_LEAF_ID, &eax, &ebx, &ecx, &edx); |
| 1656 | return (ebx == UNISYS_VISOR_ID_EBX) && |
| 1657 | (ecx == UNISYS_VISOR_ID_ECX) && |
| 1658 | (edx == UNISYS_VISOR_ID_EDX); |
Erik Arfvidson | d5b3f1d | 2015-05-05 18:37:04 -0400 | [diff] [blame] | 1659 | } |
David Kershner | e4a0643 | 2017-08-30 13:36:25 -0400 | [diff] [blame] | 1660 | return 0; |
Erik Arfvidson | d5b3f1d | 2015-05-05 18:37:04 -0400 | [diff] [blame] | 1661 | } |
Prarit Bhargava | 55c67dc | 2015-05-05 18:37:02 -0400 | [diff] [blame] | 1662 | |
Arnd Bergmann | 056e4fc | 2017-09-15 21:23:13 +0200 | [diff] [blame] | 1663 | static int __init init_unisys(void) |
Prarit Bhargava | 55c67dc | 2015-05-05 18:37:02 -0400 | [diff] [blame] | 1664 | { |
| 1665 | int result; |
Alessandro Parini | 35e606d | 2015-06-13 17:40:49 +0200 | [diff] [blame] | 1666 | |
Erik Arfvidson | d5b3f1d | 2015-05-05 18:37:04 -0400 | [diff] [blame] | 1667 | if (!visorutil_spar_detect()) |
Prarit Bhargava | 55c67dc | 2015-05-05 18:37:02 -0400 | [diff] [blame] | 1668 | return -ENODEV; |
Prarit Bhargava | 55c67dc | 2015-05-05 18:37:02 -0400 | [diff] [blame] | 1669 | result = acpi_bus_register_driver(&unisys_acpi_driver); |
| 1670 | if (result) |
| 1671 | return -ENODEV; |
Prarit Bhargava | 55c67dc | 2015-05-05 18:37:02 -0400 | [diff] [blame] | 1672 | pr_info("Unisys Visorchipset Driver Loaded.\n"); |
| 1673 | return 0; |
| 1674 | }; |
| 1675 | |
Arnd Bergmann | 056e4fc | 2017-09-15 21:23:13 +0200 | [diff] [blame] | 1676 | static void __exit exit_unisys(void) |
Prarit Bhargava | 55c67dc | 2015-05-05 18:37:02 -0400 | [diff] [blame] | 1677 | { |
| 1678 | acpi_bus_unregister_driver(&unisys_acpi_driver); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1679 | } |
| 1680 | |
Prarit Bhargava | 55c67dc | 2015-05-05 18:37:02 -0400 | [diff] [blame] | 1681 | module_init(init_unisys); |
| 1682 | module_exit(exit_unisys); |
Ken Cox | 12e364b | 2014-03-04 07:58:07 -0600 | [diff] [blame] | 1683 | |
| 1684 | MODULE_AUTHOR("Unisys"); |
| 1685 | MODULE_LICENSE("GPL"); |
Jon Frisch | bff8c1a | 2016-09-26 11:03:46 -0400 | [diff] [blame] | 1686 | MODULE_DESCRIPTION("s-Par visorbus driver for virtual device buses"); |