Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 1 | /* |
| 2 | * AMD CPU Microcode Update Driver for Linux |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 3 | * |
| 4 | * This driver allows to upgrade microcode on F10h AMD |
| 5 | * CPUs and later. |
| 6 | * |
Borislav Petkov | 597e11a | 2011-12-02 18:09:23 +0100 | [diff] [blame] | 7 | * Copyright (C) 2008-2011 Advanced Micro Devices Inc. |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 8 | * |
| 9 | * Author: Peter Oruba <peter.oruba@amd.com> |
| 10 | * |
| 11 | * Based on work by: |
| 12 | * Tigran Aivazian <tigran@aivazian.fsnet.co.uk> |
| 13 | * |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 14 | * early loader: |
| 15 | * Copyright (C) 2013 Advanced Micro Devices, Inc. |
| 16 | * |
| 17 | * Author: Jacob Shin <jacob.shin@amd.com> |
| 18 | * Fixes: Borislav Petkov <bp@suse.de> |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 19 | * |
Andreas Herrmann | 2a3282a7 | 2008-12-16 19:08:53 +0100 | [diff] [blame] | 20 | * Licensed under the terms of the GNU General Public |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 21 | * License version 2. See file COPYING for details. |
Ingo Molnar | 4bae196 | 2009-03-11 11:19:46 +0100 | [diff] [blame] | 22 | */ |
Borislav Petkov | 6b26e1b | 2015-10-20 11:54:46 +0200 | [diff] [blame] | 23 | #define pr_fmt(fmt) "microcode: " fmt |
Joe Perches | f58e1f5 | 2009-12-08 22:30:50 -0800 | [diff] [blame] | 24 | |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 25 | #include <linux/earlycpio.h> |
Ingo Molnar | 4bae196 | 2009-03-11 11:19:46 +0100 | [diff] [blame] | 26 | #include <linux/firmware.h> |
Andreas Herrmann | be95776 | 2008-12-16 19:11:23 +0100 | [diff] [blame] | 27 | #include <linux/uaccess.h> |
Ingo Molnar | 4bae196 | 2009-03-11 11:19:46 +0100 | [diff] [blame] | 28 | #include <linux/vmalloc.h> |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 29 | #include <linux/initrd.h> |
Ingo Molnar | 4bae196 | 2009-03-11 11:19:46 +0100 | [diff] [blame] | 30 | #include <linux/kernel.h> |
Ingo Molnar | 4bae196 | 2009-03-11 11:19:46 +0100 | [diff] [blame] | 31 | #include <linux/pci.h> |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 32 | |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 33 | #include <asm/microcode_amd.h> |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 34 | #include <asm/microcode.h> |
Ingo Molnar | 4bae196 | 2009-03-11 11:19:46 +0100 | [diff] [blame] | 35 | #include <asm/processor.h> |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 36 | #include <asm/setup.h> |
| 37 | #include <asm/cpu.h> |
Ingo Molnar | 4bae196 | 2009-03-11 11:19:46 +0100 | [diff] [blame] | 38 | #include <asm/msr.h> |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 39 | |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 40 | static struct equiv_cpu_entry *equiv_cpu_table; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 41 | |
Borislav Petkov | a3eb3b4 | 2012-08-01 15:38:18 +0200 | [diff] [blame] | 42 | struct ucode_patch { |
| 43 | struct list_head plist; |
| 44 | void *data; |
| 45 | u32 patch_id; |
| 46 | u16 equiv_cpu; |
| 47 | }; |
| 48 | |
| 49 | static LIST_HEAD(pcache); |
| 50 | |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 51 | /* |
| 52 | * This points to the current valid container of microcode patches which we will |
| 53 | * save from the initrd before jettisoning its contents. |
| 54 | */ |
| 55 | static u8 *container; |
| 56 | static size_t container_size; |
Borislav Petkov | cc2187a | 2016-09-04 11:37:36 +0200 | [diff] [blame] | 57 | static bool ucode_builtin; |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 58 | |
| 59 | static u32 ucode_new_rev; |
Borislav Petkov | a13004a | 2016-06-06 17:10:49 +0200 | [diff] [blame] | 60 | static u8 amd_ucode_patch[PATCH_MAX_SIZE]; |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 61 | static u16 this_equiv_id; |
| 62 | |
| 63 | static struct cpio_data ucode_cpio; |
| 64 | |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 65 | static struct cpio_data __init find_ucode_in_initrd(void) |
| 66 | { |
Borislav Petkov | 6c54564 | 2016-06-06 17:10:42 +0200 | [diff] [blame] | 67 | #ifdef CONFIG_BLK_DEV_INITRD |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 68 | char *path; |
| 69 | void *start; |
| 70 | size_t size; |
| 71 | |
Borislav Petkov | 6c54564 | 2016-06-06 17:10:42 +0200 | [diff] [blame] | 72 | /* |
| 73 | * Microcode patch container file is prepended to the initrd in cpio |
| 74 | * format. See Documentation/x86/early-microcode.txt |
| 75 | */ |
| 76 | static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin"; |
| 77 | |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 78 | #ifdef CONFIG_X86_32 |
| 79 | struct boot_params *p; |
| 80 | |
| 81 | /* |
| 82 | * On 32-bit, early load occurs before paging is turned on so we need |
| 83 | * to use physical addresses. |
| 84 | */ |
| 85 | p = (struct boot_params *)__pa_nodebug(&boot_params); |
| 86 | path = (char *)__pa_nodebug(ucode_path); |
| 87 | start = (void *)p->hdr.ramdisk_image; |
| 88 | size = p->hdr.ramdisk_size; |
| 89 | #else |
| 90 | path = ucode_path; |
| 91 | start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET); |
| 92 | size = boot_params.hdr.ramdisk_size; |
Borislav Petkov | 6c54564 | 2016-06-06 17:10:42 +0200 | [diff] [blame] | 93 | #endif /* !CONFIG_X86_32 */ |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 94 | |
Borislav Petkov | 852ad5b | 2016-06-06 17:10:45 +0200 | [diff] [blame] | 95 | return find_cpio_data(path, start, size, NULL); |
Borislav Petkov | 6c54564 | 2016-06-06 17:10:42 +0200 | [diff] [blame] | 96 | #else |
| 97 | return (struct cpio_data){ NULL, 0, "" }; |
| 98 | #endif |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | static size_t compute_container_size(u8 *data, u32 total_size) |
| 102 | { |
| 103 | size_t size = 0; |
| 104 | u32 *header = (u32 *)data; |
| 105 | |
| 106 | if (header[0] != UCODE_MAGIC || |
| 107 | header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ |
| 108 | header[2] == 0) /* size */ |
| 109 | return size; |
| 110 | |
| 111 | size = header[2] + CONTAINER_HDR_SZ; |
| 112 | total_size -= size; |
| 113 | data += size; |
| 114 | |
| 115 | while (total_size) { |
| 116 | u16 patch_size; |
| 117 | |
| 118 | header = (u32 *)data; |
| 119 | |
| 120 | if (header[0] != UCODE_UCODE_TYPE) |
| 121 | break; |
| 122 | |
| 123 | /* |
| 124 | * Sanity-check patch size. |
| 125 | */ |
| 126 | patch_size = header[1]; |
| 127 | if (patch_size > PATCH_MAX_SIZE) |
| 128 | break; |
| 129 | |
| 130 | size += patch_size + SECTION_HDR_SIZE; |
| 131 | data += patch_size + SECTION_HDR_SIZE; |
| 132 | total_size -= patch_size + SECTION_HDR_SIZE; |
| 133 | } |
| 134 | |
| 135 | return size; |
| 136 | } |
| 137 | |
| 138 | /* |
| 139 | * Early load occurs before we can vmalloc(). So we look for the microcode |
| 140 | * patch container file in initrd, traverse equivalent cpu table, look for a |
| 141 | * matching microcode patch, and update, all in initrd memory in place. |
| 142 | * When vmalloc() is available for use later -- on 64-bit during first AP load, |
| 143 | * and on 32-bit during save_microcode_in_initrd_amd() -- we can call |
| 144 | * load_microcode_amd() to save equivalent cpu table and microcode patches in |
| 145 | * kernel heap memory. |
| 146 | */ |
| 147 | static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch) |
| 148 | { |
| 149 | struct equiv_cpu_entry *eq; |
| 150 | size_t *cont_sz; |
| 151 | u32 *header; |
| 152 | u8 *data, **cont; |
| 153 | u8 (*patch)[PATCH_MAX_SIZE]; |
| 154 | u16 eq_id = 0; |
| 155 | int offset, left; |
| 156 | u32 rev, eax, ebx, ecx, edx; |
| 157 | u32 *new_rev; |
| 158 | |
| 159 | #ifdef CONFIG_X86_32 |
| 160 | new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); |
| 161 | cont_sz = (size_t *)__pa_nodebug(&container_size); |
| 162 | cont = (u8 **)__pa_nodebug(&container); |
| 163 | patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); |
| 164 | #else |
| 165 | new_rev = &ucode_new_rev; |
| 166 | cont_sz = &container_size; |
| 167 | cont = &container; |
| 168 | patch = &amd_ucode_patch; |
| 169 | #endif |
| 170 | |
| 171 | data = ucode; |
| 172 | left = size; |
| 173 | header = (u32 *)data; |
| 174 | |
| 175 | /* find equiv cpu table */ |
| 176 | if (header[0] != UCODE_MAGIC || |
| 177 | header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ |
| 178 | header[2] == 0) /* size */ |
| 179 | return; |
| 180 | |
| 181 | eax = 0x00000001; |
| 182 | ecx = 0; |
| 183 | native_cpuid(&eax, &ebx, &ecx, &edx); |
| 184 | |
| 185 | while (left > 0) { |
| 186 | eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ); |
| 187 | |
| 188 | *cont = data; |
| 189 | |
| 190 | /* Advance past the container header */ |
| 191 | offset = header[2] + CONTAINER_HDR_SZ; |
| 192 | data += offset; |
| 193 | left -= offset; |
| 194 | |
| 195 | eq_id = find_equiv_id(eq, eax); |
| 196 | if (eq_id) { |
| 197 | this_equiv_id = eq_id; |
| 198 | *cont_sz = compute_container_size(*cont, left + offset); |
| 199 | |
| 200 | /* |
| 201 | * truncate how much we need to iterate over in the |
| 202 | * ucode update loop below |
| 203 | */ |
| 204 | left = *cont_sz - offset; |
| 205 | break; |
| 206 | } |
| 207 | |
| 208 | /* |
| 209 | * support multiple container files appended together. if this |
| 210 | * one does not have a matching equivalent cpu entry, we fast |
| 211 | * forward to the next container file. |
| 212 | */ |
| 213 | while (left > 0) { |
| 214 | header = (u32 *)data; |
| 215 | if (header[0] == UCODE_MAGIC && |
| 216 | header[1] == UCODE_EQUIV_CPU_TABLE_TYPE) |
| 217 | break; |
| 218 | |
| 219 | offset = header[1] + SECTION_HDR_SIZE; |
| 220 | data += offset; |
| 221 | left -= offset; |
| 222 | } |
| 223 | |
| 224 | /* mark where the next microcode container file starts */ |
| 225 | offset = data - (u8 *)ucode; |
| 226 | ucode = data; |
| 227 | } |
| 228 | |
| 229 | if (!eq_id) { |
| 230 | *cont = NULL; |
| 231 | *cont_sz = 0; |
| 232 | return; |
| 233 | } |
| 234 | |
| 235 | if (check_current_patch_level(&rev, true)) |
| 236 | return; |
| 237 | |
| 238 | while (left > 0) { |
| 239 | struct microcode_amd *mc; |
| 240 | |
| 241 | header = (u32 *)data; |
| 242 | if (header[0] != UCODE_UCODE_TYPE || /* type */ |
| 243 | header[1] == 0) /* size */ |
| 244 | break; |
| 245 | |
| 246 | mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE); |
| 247 | |
| 248 | if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) { |
| 249 | |
| 250 | if (!__apply_microcode_amd(mc)) { |
| 251 | rev = mc->hdr.patch_id; |
| 252 | *new_rev = rev; |
| 253 | |
| 254 | if (save_patch) |
| 255 | memcpy(patch, mc, |
| 256 | min_t(u32, header[1], PATCH_MAX_SIZE)); |
| 257 | } |
| 258 | } |
| 259 | |
| 260 | offset = header[1] + SECTION_HDR_SIZE; |
| 261 | data += offset; |
| 262 | left -= offset; |
| 263 | } |
| 264 | } |
| 265 | |
| 266 | static bool __init load_builtin_amd_microcode(struct cpio_data *cp, |
| 267 | unsigned int family) |
| 268 | { |
| 269 | #ifdef CONFIG_X86_64 |
| 270 | char fw_name[36] = "amd-ucode/microcode_amd.bin"; |
| 271 | |
| 272 | if (family >= 0x15) |
| 273 | snprintf(fw_name, sizeof(fw_name), |
| 274 | "amd-ucode/microcode_amd_fam%.2xh.bin", family); |
| 275 | |
| 276 | return get_builtin_firmware(cp, fw_name); |
| 277 | #else |
| 278 | return false; |
| 279 | #endif |
| 280 | } |
| 281 | |
| 282 | void __init load_ucode_amd_bsp(unsigned int family) |
| 283 | { |
| 284 | struct cpio_data cp; |
Borislav Petkov | cc2187a | 2016-09-04 11:37:36 +0200 | [diff] [blame] | 285 | bool *builtin; |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 286 | void **data; |
| 287 | size_t *size; |
| 288 | |
| 289 | #ifdef CONFIG_X86_32 |
| 290 | data = (void **)__pa_nodebug(&ucode_cpio.data); |
| 291 | size = (size_t *)__pa_nodebug(&ucode_cpio.size); |
Borislav Petkov | cc2187a | 2016-09-04 11:37:36 +0200 | [diff] [blame] | 292 | builtin = (bool *)__pa_nodebug(&ucode_builtin); |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 293 | #else |
| 294 | data = &ucode_cpio.data; |
| 295 | size = &ucode_cpio.size; |
Borislav Petkov | cc2187a | 2016-09-04 11:37:36 +0200 | [diff] [blame] | 296 | builtin = &ucode_builtin; |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 297 | #endif |
| 298 | |
Borislav Petkov | cc2187a | 2016-09-04 11:37:36 +0200 | [diff] [blame] | 299 | *builtin = load_builtin_amd_microcode(&cp, family); |
| 300 | if (!*builtin) |
Borislav Petkov | 6c54564 | 2016-06-06 17:10:42 +0200 | [diff] [blame] | 301 | cp = find_ucode_in_initrd(); |
| 302 | |
| 303 | if (!(cp.data && cp.size)) |
| 304 | return; |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 305 | |
| 306 | *data = cp.data; |
| 307 | *size = cp.size; |
| 308 | |
| 309 | apply_ucode_in_initrd(cp.data, cp.size, true); |
| 310 | } |
| 311 | |
| 312 | #ifdef CONFIG_X86_32 |
| 313 | /* |
| 314 | * On 32-bit, since AP's early load occurs before paging is turned on, we |
| 315 | * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during |
| 316 | * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During |
| 317 | * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch, |
| 318 | * which is used upon resume from suspend. |
| 319 | */ |
| 320 | void load_ucode_amd_ap(void) |
| 321 | { |
| 322 | struct microcode_amd *mc; |
| 323 | size_t *usize; |
| 324 | void **ucode; |
| 325 | |
| 326 | mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); |
| 327 | if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { |
| 328 | __apply_microcode_amd(mc); |
| 329 | return; |
| 330 | } |
| 331 | |
| 332 | ucode = (void *)__pa_nodebug(&container); |
| 333 | usize = (size_t *)__pa_nodebug(&container_size); |
| 334 | |
| 335 | if (!*ucode || !*usize) |
| 336 | return; |
| 337 | |
| 338 | apply_ucode_in_initrd(*ucode, *usize, false); |
| 339 | } |
| 340 | |
| 341 | static void __init collect_cpu_sig_on_bsp(void *arg) |
| 342 | { |
| 343 | unsigned int cpu = smp_processor_id(); |
| 344 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
| 345 | |
| 346 | uci->cpu_sig.sig = cpuid_eax(0x00000001); |
| 347 | } |
| 348 | |
| 349 | static void __init get_bsp_sig(void) |
| 350 | { |
| 351 | unsigned int bsp = boot_cpu_data.cpu_index; |
| 352 | struct ucode_cpu_info *uci = ucode_cpu_info + bsp; |
| 353 | |
| 354 | if (!uci->cpu_sig.sig) |
| 355 | smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); |
| 356 | } |
| 357 | #else |
| 358 | void load_ucode_amd_ap(void) |
| 359 | { |
| 360 | unsigned int cpu = smp_processor_id(); |
| 361 | struct equiv_cpu_entry *eq; |
| 362 | struct microcode_amd *mc; |
Borislav Petkov | 88b2f63 | 2016-08-17 13:33:14 +0200 | [diff] [blame] | 363 | u8 *cont = container; |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 364 | u32 rev, eax; |
| 365 | u16 eq_id; |
| 366 | |
| 367 | /* Exit if called on the BSP. */ |
| 368 | if (!cpu) |
| 369 | return; |
| 370 | |
| 371 | if (!container) |
| 372 | return; |
| 373 | |
| 374 | /* |
| 375 | * 64-bit runs with paging enabled, thus early==false. |
| 376 | */ |
| 377 | if (check_current_patch_level(&rev, false)) |
| 378 | return; |
| 379 | |
Borislav Petkov | 88b2f63 | 2016-08-17 13:33:14 +0200 | [diff] [blame] | 380 | /* Add CONFIG_RANDOMIZE_MEMORY offset. */ |
Borislav Petkov | cc2187a | 2016-09-04 11:37:36 +0200 | [diff] [blame] | 381 | if (!ucode_builtin) |
| 382 | cont += PAGE_OFFSET - __PAGE_OFFSET_BASE; |
Borislav Petkov | 88b2f63 | 2016-08-17 13:33:14 +0200 | [diff] [blame] | 383 | |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 384 | eax = cpuid_eax(0x00000001); |
Borislav Petkov | 88b2f63 | 2016-08-17 13:33:14 +0200 | [diff] [blame] | 385 | eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ); |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 386 | |
| 387 | eq_id = find_equiv_id(eq, eax); |
| 388 | if (!eq_id) |
| 389 | return; |
| 390 | |
| 391 | if (eq_id == this_equiv_id) { |
| 392 | mc = (struct microcode_amd *)amd_ucode_patch; |
| 393 | |
| 394 | if (mc && rev < mc->hdr.patch_id) { |
| 395 | if (!__apply_microcode_amd(mc)) |
| 396 | ucode_new_rev = mc->hdr.patch_id; |
| 397 | } |
| 398 | |
| 399 | } else { |
| 400 | if (!ucode_cpio.data) |
| 401 | return; |
| 402 | |
| 403 | /* |
| 404 | * AP has a different equivalence ID than BSP, looks like |
| 405 | * mixed-steppings silicon so go through the ucode blob anew. |
| 406 | */ |
| 407 | apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false); |
| 408 | } |
| 409 | } |
| 410 | #endif |
| 411 | |
| 412 | int __init save_microcode_in_initrd_amd(void) |
| 413 | { |
| 414 | unsigned long cont; |
| 415 | int retval = 0; |
| 416 | enum ucode_state ret; |
| 417 | u8 *cont_va; |
| 418 | u32 eax; |
| 419 | |
| 420 | if (!container) |
| 421 | return -EINVAL; |
| 422 | |
| 423 | #ifdef CONFIG_X86_32 |
| 424 | get_bsp_sig(); |
| 425 | cont = (unsigned long)container; |
| 426 | cont_va = __va(container); |
| 427 | #else |
| 428 | /* |
| 429 | * We need the physical address of the container for both bitness since |
| 430 | * boot_params.hdr.ramdisk_image is a physical address. |
| 431 | */ |
| 432 | cont = __pa(container); |
| 433 | cont_va = container; |
| 434 | #endif |
| 435 | |
| 436 | /* |
| 437 | * Take into account the fact that the ramdisk might get relocated and |
| 438 | * therefore we need to recompute the container's position in virtual |
| 439 | * memory space. |
| 440 | */ |
| 441 | if (relocated_ramdisk) |
| 442 | container = (u8 *)(__va(relocated_ramdisk) + |
| 443 | (cont - boot_params.hdr.ramdisk_image)); |
| 444 | else |
| 445 | container = cont_va; |
| 446 | |
Borislav Petkov | 88b2f63 | 2016-08-17 13:33:14 +0200 | [diff] [blame] | 447 | /* Add CONFIG_RANDOMIZE_MEMORY offset. */ |
Borislav Petkov | cc2187a | 2016-09-04 11:37:36 +0200 | [diff] [blame] | 448 | if (!ucode_builtin) |
| 449 | container += PAGE_OFFSET - __PAGE_OFFSET_BASE; |
Borislav Petkov | 88b2f63 | 2016-08-17 13:33:14 +0200 | [diff] [blame] | 450 | |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 451 | eax = cpuid_eax(0x00000001); |
| 452 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); |
| 453 | |
| 454 | ret = load_microcode_amd(smp_processor_id(), eax, container, container_size); |
| 455 | if (ret != UCODE_OK) |
| 456 | retval = -EINVAL; |
| 457 | |
| 458 | /* |
| 459 | * This will be freed any msec now, stash patches for the current |
| 460 | * family and switch to patch cache for cpu hotplug, etc later. |
| 461 | */ |
| 462 | container = NULL; |
| 463 | container_size = 0; |
| 464 | |
| 465 | return retval; |
| 466 | } |
| 467 | |
| 468 | void reload_ucode_amd(void) |
| 469 | { |
| 470 | struct microcode_amd *mc; |
| 471 | u32 rev; |
| 472 | |
| 473 | /* |
| 474 | * early==false because this is a syscore ->resume path and by |
| 475 | * that time paging is long enabled. |
| 476 | */ |
| 477 | if (check_current_patch_level(&rev, false)) |
| 478 | return; |
| 479 | |
| 480 | mc = (struct microcode_amd *)amd_ucode_patch; |
| 481 | |
| 482 | if (mc && rev < mc->hdr.patch_id) { |
| 483 | if (!__apply_microcode_amd(mc)) { |
| 484 | ucode_new_rev = mc->hdr.patch_id; |
Borislav Petkov | a58017c | 2016-02-03 12:33:34 +0100 | [diff] [blame] | 485 | pr_info("reload patch_level=0x%08x\n", ucode_new_rev); |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 486 | } |
| 487 | } |
| 488 | } |
Jacob Shin | a76096a | 2013-05-30 14:09:18 -0500 | [diff] [blame] | 489 | static u16 __find_equiv_id(unsigned int cpu) |
Borislav Petkov | c96d2c0 | 2012-08-01 14:55:01 +0200 | [diff] [blame] | 490 | { |
| 491 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
Jacob Shin | a76096a | 2013-05-30 14:09:18 -0500 | [diff] [blame] | 492 | return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig); |
Borislav Petkov | c96d2c0 | 2012-08-01 14:55:01 +0200 | [diff] [blame] | 493 | } |
| 494 | |
| 495 | static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu) |
| 496 | { |
| 497 | int i = 0; |
| 498 | |
| 499 | BUG_ON(!equiv_cpu_table); |
| 500 | |
| 501 | while (equiv_cpu_table[i].equiv_cpu != 0) { |
| 502 | if (equiv_cpu == equiv_cpu_table[i].equiv_cpu) |
| 503 | return equiv_cpu_table[i].installed_cpu; |
| 504 | i++; |
| 505 | } |
| 506 | return 0; |
| 507 | } |
| 508 | |
Borislav Petkov | a3eb3b4 | 2012-08-01 15:38:18 +0200 | [diff] [blame] | 509 | /* |
| 510 | * a small, trivial cache of per-family ucode patches |
| 511 | */ |
| 512 | static struct ucode_patch *cache_find_patch(u16 equiv_cpu) |
| 513 | { |
| 514 | struct ucode_patch *p; |
| 515 | |
| 516 | list_for_each_entry(p, &pcache, plist) |
| 517 | if (p->equiv_cpu == equiv_cpu) |
| 518 | return p; |
| 519 | return NULL; |
| 520 | } |
| 521 | |
| 522 | static void update_cache(struct ucode_patch *new_patch) |
| 523 | { |
| 524 | struct ucode_patch *p; |
| 525 | |
| 526 | list_for_each_entry(p, &pcache, plist) { |
| 527 | if (p->equiv_cpu == new_patch->equiv_cpu) { |
| 528 | if (p->patch_id >= new_patch->patch_id) |
| 529 | /* we already have the latest patch */ |
| 530 | return; |
| 531 | |
| 532 | list_replace(&p->plist, &new_patch->plist); |
| 533 | kfree(p->data); |
| 534 | kfree(p); |
| 535 | return; |
| 536 | } |
| 537 | } |
| 538 | /* no patch found, add it */ |
| 539 | list_add_tail(&new_patch->plist, &pcache); |
| 540 | } |
| 541 | |
| 542 | static void free_cache(void) |
| 543 | { |
Dan Carpenter | 2d29748 | 2012-09-05 15:30:42 +0300 | [diff] [blame] | 544 | struct ucode_patch *p, *tmp; |
Borislav Petkov | a3eb3b4 | 2012-08-01 15:38:18 +0200 | [diff] [blame] | 545 | |
Dan Carpenter | 2d29748 | 2012-09-05 15:30:42 +0300 | [diff] [blame] | 546 | list_for_each_entry_safe(p, tmp, &pcache, plist) { |
Borislav Petkov | a3eb3b4 | 2012-08-01 15:38:18 +0200 | [diff] [blame] | 547 | __list_del(p->plist.prev, p->plist.next); |
| 548 | kfree(p->data); |
| 549 | kfree(p); |
| 550 | } |
| 551 | } |
| 552 | |
| 553 | static struct ucode_patch *find_patch(unsigned int cpu) |
| 554 | { |
| 555 | u16 equiv_id; |
| 556 | |
Jacob Shin | a76096a | 2013-05-30 14:09:18 -0500 | [diff] [blame] | 557 | equiv_id = __find_equiv_id(cpu); |
Borislav Petkov | a3eb3b4 | 2012-08-01 15:38:18 +0200 | [diff] [blame] | 558 | if (!equiv_id) |
| 559 | return NULL; |
| 560 | |
| 561 | return cache_find_patch(equiv_id); |
| 562 | } |
| 563 | |
Dmitry Adamushko | d45de40 | 2008-08-20 00:22:26 +0200 | [diff] [blame] | 564 | static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 565 | { |
Andreas Herrmann | 3b2e3d8 | 2010-01-22 21:34:56 +0100 | [diff] [blame] | 566 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
Jacob Shin | 757885e | 2013-05-30 14:09:19 -0500 | [diff] [blame] | 567 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
| 568 | struct ucode_patch *p; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 569 | |
Borislav Petkov | 5f5b747 | 2012-07-25 20:06:54 +0200 | [diff] [blame] | 570 | csig->sig = cpuid_eax(0x00000001); |
Borislav Petkov | bcb80e5 | 2011-10-17 16:34:36 +0200 | [diff] [blame] | 571 | csig->rev = c->microcode; |
Jacob Shin | 757885e | 2013-05-30 14:09:19 -0500 | [diff] [blame] | 572 | |
| 573 | /* |
| 574 | * a patch could have been loaded early, set uci->mc so that |
| 575 | * mc_bp_resume() can call apply_microcode() |
| 576 | */ |
| 577 | p = find_patch(cpu); |
| 578 | if (p && (p->patch_id == csig->rev)) |
| 579 | uci->mc = p->data; |
| 580 | |
Borislav Petkov | 258721e | 2011-01-05 18:13:19 +0100 | [diff] [blame] | 581 | pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); |
| 582 | |
Dmitry Adamushko | d45de40 | 2008-08-20 00:22:26 +0200 | [diff] [blame] | 583 | return 0; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 584 | } |
| 585 | |
Torsten Kaiser | 8451609 | 2013-08-08 19:38:18 +0200 | [diff] [blame] | 586 | static unsigned int verify_patch_size(u8 family, u32 patch_size, |
Borislav Petkov | be62adb | 2011-12-02 18:02:17 +0100 | [diff] [blame] | 587 | unsigned int size) |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 588 | { |
Borislav Petkov | be62adb | 2011-12-02 18:02:17 +0100 | [diff] [blame] | 589 | u32 max_size; |
| 590 | |
| 591 | #define F1XH_MPB_MAX_SIZE 2048 |
| 592 | #define F14H_MPB_MAX_SIZE 1824 |
| 593 | #define F15H_MPB_MAX_SIZE 4096 |
Boris Ostrovsky | 36c46ca | 2012-11-15 13:41:50 -0500 | [diff] [blame] | 594 | #define F16H_MPB_MAX_SIZE 3458 |
Borislav Petkov | be62adb | 2011-12-02 18:02:17 +0100 | [diff] [blame] | 595 | |
Torsten Kaiser | 8451609 | 2013-08-08 19:38:18 +0200 | [diff] [blame] | 596 | switch (family) { |
Borislav Petkov | be62adb | 2011-12-02 18:02:17 +0100 | [diff] [blame] | 597 | case 0x14: |
| 598 | max_size = F14H_MPB_MAX_SIZE; |
| 599 | break; |
| 600 | case 0x15: |
| 601 | max_size = F15H_MPB_MAX_SIZE; |
| 602 | break; |
Boris Ostrovsky | 36c46ca | 2012-11-15 13:41:50 -0500 | [diff] [blame] | 603 | case 0x16: |
| 604 | max_size = F16H_MPB_MAX_SIZE; |
| 605 | break; |
Borislav Petkov | be62adb | 2011-12-02 18:02:17 +0100 | [diff] [blame] | 606 | default: |
| 607 | max_size = F1XH_MPB_MAX_SIZE; |
| 608 | break; |
| 609 | } |
| 610 | |
| 611 | if (patch_size > min_t(u32, size, max_size)) { |
| 612 | pr_err("patch size mismatch\n"); |
| 613 | return 0; |
| 614 | } |
| 615 | |
| 616 | return patch_size; |
| 617 | } |
| 618 | |
Borislav Petkov | 2eff73c | 2015-10-12 11:22:41 +0200 | [diff] [blame] | 619 | /* |
Borislav Petkov | 0399f73 | 2015-10-12 11:22:42 +0200 | [diff] [blame] | 620 | * Those patch levels cannot be updated to newer ones and thus should be final. |
| 621 | */ |
| 622 | static u32 final_levels[] = { |
| 623 | 0x01000098, |
| 624 | 0x0100009f, |
| 625 | 0x010000af, |
| 626 | 0, /* T-101 terminator */ |
| 627 | }; |
| 628 | |
| 629 | /* |
Borislav Petkov | 2eff73c | 2015-10-12 11:22:41 +0200 | [diff] [blame] | 630 | * Check the current patch level on this CPU. |
| 631 | * |
| 632 | * @rev: Use it to return the patch level. It is set to 0 in the case of |
| 633 | * error. |
| 634 | * |
| 635 | * Returns: |
| 636 | * - true: if update should stop |
| 637 | * - false: otherwise |
| 638 | */ |
Borislav Petkov | 0399f73 | 2015-10-12 11:22:42 +0200 | [diff] [blame] | 639 | bool check_current_patch_level(u32 *rev, bool early) |
Borislav Petkov | 2eff73c | 2015-10-12 11:22:41 +0200 | [diff] [blame] | 640 | { |
Borislav Petkov | 0399f73 | 2015-10-12 11:22:42 +0200 | [diff] [blame] | 641 | u32 lvl, dummy, i; |
| 642 | bool ret = false; |
| 643 | u32 *levels; |
Borislav Petkov | 2eff73c | 2015-10-12 11:22:41 +0200 | [diff] [blame] | 644 | |
Borislav Petkov | 0399f73 | 2015-10-12 11:22:42 +0200 | [diff] [blame] | 645 | native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); |
Borislav Petkov | 2eff73c | 2015-10-12 11:22:41 +0200 | [diff] [blame] | 646 | |
Borislav Petkov | 0399f73 | 2015-10-12 11:22:42 +0200 | [diff] [blame] | 647 | if (IS_ENABLED(CONFIG_X86_32) && early) |
| 648 | levels = (u32 *)__pa_nodebug(&final_levels); |
| 649 | else |
| 650 | levels = final_levels; |
| 651 | |
| 652 | for (i = 0; levels[i]; i++) { |
| 653 | if (lvl == levels[i]) { |
| 654 | lvl = 0; |
| 655 | ret = true; |
| 656 | break; |
| 657 | } |
| 658 | } |
| 659 | |
| 660 | if (rev) |
| 661 | *rev = lvl; |
| 662 | |
| 663 | return ret; |
Borislav Petkov | 2eff73c | 2015-10-12 11:22:41 +0200 | [diff] [blame] | 664 | } |
| 665 | |
Jacob Shin | a76096a | 2013-05-30 14:09:18 -0500 | [diff] [blame] | 666 | int __apply_microcode_amd(struct microcode_amd *mc_amd) |
| 667 | { |
| 668 | u32 rev, dummy; |
| 669 | |
Borislav Petkov | 5335ba5 | 2013-11-29 14:58:44 +0100 | [diff] [blame] | 670 | native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); |
Jacob Shin | a76096a | 2013-05-30 14:09:18 -0500 | [diff] [blame] | 671 | |
| 672 | /* verify patch application was successful */ |
Borislav Petkov | 5335ba5 | 2013-11-29 14:58:44 +0100 | [diff] [blame] | 673 | native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); |
Jacob Shin | a76096a | 2013-05-30 14:09:18 -0500 | [diff] [blame] | 674 | if (rev != mc_amd->hdr.patch_id) |
| 675 | return -1; |
| 676 | |
| 677 | return 0; |
| 678 | } |
| 679 | |
| 680 | int apply_microcode_amd(int cpu) |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 681 | { |
Borislav Petkov | bcb80e5 | 2011-10-17 16:34:36 +0200 | [diff] [blame] | 682 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 683 | struct microcode_amd *mc_amd; |
| 684 | struct ucode_cpu_info *uci; |
| 685 | struct ucode_patch *p; |
Borislav Petkov | 2eff73c | 2015-10-12 11:22:41 +0200 | [diff] [blame] | 686 | u32 rev; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 687 | |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 688 | BUG_ON(raw_smp_processor_id() != cpu); |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 689 | |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 690 | uci = ucode_cpu_info + cpu; |
| 691 | |
| 692 | p = find_patch(cpu); |
| 693 | if (!p) |
Dmitry Adamushko | 871b72d | 2009-05-11 23:48:27 +0200 | [diff] [blame] | 694 | return 0; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 695 | |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 696 | mc_amd = p->data; |
| 697 | uci->mc = p->data; |
| 698 | |
Borislav Petkov | 0399f73 | 2015-10-12 11:22:42 +0200 | [diff] [blame] | 699 | if (check_current_patch_level(&rev, false)) |
Borislav Petkov | 2eff73c | 2015-10-12 11:22:41 +0200 | [diff] [blame] | 700 | return -1; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 701 | |
Borislav Petkov | 685ca6d | 2012-06-20 16:17:51 +0200 | [diff] [blame] | 702 | /* need to apply patch? */ |
| 703 | if (rev >= mc_amd->hdr.patch_id) { |
| 704 | c->microcode = rev; |
Suravee Suthikulpanit | accd1e8 | 2010-09-29 19:27:12 -0500 | [diff] [blame] | 705 | uci->cpu_sig.rev = rev; |
Borislav Petkov | 685ca6d | 2012-06-20 16:17:51 +0200 | [diff] [blame] | 706 | return 0; |
| 707 | } |
| 708 | |
Torsten Kaiser | d982057 | 2013-07-23 22:58:23 +0200 | [diff] [blame] | 709 | if (__apply_microcode_amd(mc_amd)) { |
Borislav Petkov | 258721e | 2011-01-05 18:13:19 +0100 | [diff] [blame] | 710 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", |
Jacob Shin | a76096a | 2013-05-30 14:09:18 -0500 | [diff] [blame] | 711 | cpu, mc_amd->hdr.patch_id); |
Torsten Kaiser | d982057 | 2013-07-23 22:58:23 +0200 | [diff] [blame] | 712 | return -1; |
| 713 | } |
| 714 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, |
| 715 | mc_amd->hdr.patch_id); |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 716 | |
Jacob Shin | a76096a | 2013-05-30 14:09:18 -0500 | [diff] [blame] | 717 | uci->cpu_sig.rev = mc_amd->hdr.patch_id; |
| 718 | c->microcode = mc_amd->hdr.patch_id; |
Dmitry Adamushko | 871b72d | 2009-05-11 23:48:27 +0200 | [diff] [blame] | 719 | |
| 720 | return 0; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 721 | } |
| 722 | |
Andreas Herrmann | 0657d9e | 2008-12-16 19:14:05 +0100 | [diff] [blame] | 723 | static int install_equiv_cpu_table(const u8 *buf) |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 724 | { |
Borislav Petkov | 10de52d | 2010-12-30 22:10:12 +0100 | [diff] [blame] | 725 | unsigned int *ibuf = (unsigned int *)buf; |
| 726 | unsigned int type = ibuf[1]; |
| 727 | unsigned int size = ibuf[2]; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 728 | |
Borislav Petkov | 10de52d | 2010-12-30 22:10:12 +0100 | [diff] [blame] | 729 | if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { |
Borislav Petkov | 258721e | 2011-01-05 18:13:19 +0100 | [diff] [blame] | 730 | pr_err("empty section/" |
| 731 | "invalid type field in container file section header\n"); |
Borislav Petkov | 10de52d | 2010-12-30 22:10:12 +0100 | [diff] [blame] | 732 | return -EINVAL; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 733 | } |
| 734 | |
Jesper Juhl | 8e5e952 | 2010-11-09 00:08:11 +0100 | [diff] [blame] | 735 | equiv_cpu_table = vmalloc(size); |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 736 | if (!equiv_cpu_table) { |
Joe Perches | f58e1f5 | 2009-12-08 22:30:50 -0800 | [diff] [blame] | 737 | pr_err("failed to allocate equivalent CPU table\n"); |
Borislav Petkov | 10de52d | 2010-12-30 22:10:12 +0100 | [diff] [blame] | 738 | return -ENOMEM; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 739 | } |
| 740 | |
Borislav Petkov | e7e632f | 2012-07-20 14:12:21 +0200 | [diff] [blame] | 741 | memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size); |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 742 | |
Borislav Petkov | 40b7f3d | 2011-06-15 15:34:57 +0200 | [diff] [blame] | 743 | /* add header length */ |
| 744 | return size + CONTAINER_HDR_SZ; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 745 | } |
| 746 | |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 747 | static void free_equiv_cpu_table(void) |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 748 | { |
Figo.zhang | aeef50b | 2009-06-07 22:30:36 +0800 | [diff] [blame] | 749 | vfree(equiv_cpu_table); |
| 750 | equiv_cpu_table = NULL; |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 751 | } |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 752 | |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 753 | static void cleanup(void) |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 754 | { |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 755 | free_equiv_cpu_table(); |
| 756 | free_cache(); |
| 757 | } |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 758 | |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 759 | /* |
| 760 | * We return the current size even if some of the checks failed so that |
| 761 | * we can skip over the next patch. If we return a negative value, we |
| 762 | * signal a grave error like a memory allocation has failed and the |
| 763 | * driver cannot continue functioning normally. In such cases, we tear |
| 764 | * down everything we've used up so far and exit. |
| 765 | */ |
Torsten Kaiser | 8451609 | 2013-08-08 19:38:18 +0200 | [diff] [blame] | 766 | static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 767 | { |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 768 | struct microcode_header_amd *mc_hdr; |
| 769 | struct ucode_patch *patch; |
| 770 | unsigned int patch_size, crnt_size, ret; |
| 771 | u32 proc_fam; |
| 772 | u16 proc_id; |
| 773 | |
| 774 | patch_size = *(u32 *)(fw + 4); |
| 775 | crnt_size = patch_size + SECTION_HDR_SIZE; |
| 776 | mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE); |
| 777 | proc_id = mc_hdr->processor_rev_id; |
| 778 | |
| 779 | proc_fam = find_cpu_family_by_equiv_cpu(proc_id); |
| 780 | if (!proc_fam) { |
| 781 | pr_err("No patch family for equiv ID: 0x%04x\n", proc_id); |
| 782 | return crnt_size; |
| 783 | } |
| 784 | |
| 785 | /* check if patch is for the current family */ |
| 786 | proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); |
Torsten Kaiser | 8451609 | 2013-08-08 19:38:18 +0200 | [diff] [blame] | 787 | if (proc_fam != family) |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 788 | return crnt_size; |
| 789 | |
| 790 | if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { |
| 791 | pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", |
| 792 | mc_hdr->patch_id); |
| 793 | return crnt_size; |
| 794 | } |
| 795 | |
Torsten Kaiser | 8451609 | 2013-08-08 19:38:18 +0200 | [diff] [blame] | 796 | ret = verify_patch_size(family, patch_size, leftover); |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 797 | if (!ret) { |
| 798 | pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); |
| 799 | return crnt_size; |
| 800 | } |
| 801 | |
| 802 | patch = kzalloc(sizeof(*patch), GFP_KERNEL); |
| 803 | if (!patch) { |
| 804 | pr_err("Patch allocation failure.\n"); |
| 805 | return -EINVAL; |
| 806 | } |
| 807 | |
Andrzej Hajda | 9cc6f74 | 2016-02-16 09:43:20 +0100 | [diff] [blame] | 808 | patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL); |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 809 | if (!patch->data) { |
| 810 | pr_err("Patch data allocation failure.\n"); |
| 811 | kfree(patch); |
| 812 | return -EINVAL; |
| 813 | } |
| 814 | |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 815 | INIT_LIST_HEAD(&patch->plist); |
| 816 | patch->patch_id = mc_hdr->patch_id; |
| 817 | patch->equiv_cpu = proc_id; |
| 818 | |
Borislav Petkov | 5335ba5 | 2013-11-29 14:58:44 +0100 | [diff] [blame] | 819 | pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n", |
| 820 | __func__, patch->patch_id, proc_id); |
| 821 | |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 822 | /* ... and add to cache. */ |
| 823 | update_cache(patch); |
| 824 | |
| 825 | return crnt_size; |
| 826 | } |
| 827 | |
Torsten Kaiser | 8451609 | 2013-08-08 19:38:18 +0200 | [diff] [blame] | 828 | static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, |
| 829 | size_t size) |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 830 | { |
| 831 | enum ucode_state ret = UCODE_ERROR; |
| 832 | unsigned int leftover; |
| 833 | u8 *fw = (u8 *)data; |
| 834 | int crnt_size = 0; |
| 835 | int offset; |
| 836 | |
| 837 | offset = install_equiv_cpu_table(data); |
Borislav Petkov | 10de52d | 2010-12-30 22:10:12 +0100 | [diff] [blame] | 838 | if (offset < 0) { |
Joe Perches | f58e1f5 | 2009-12-08 22:30:50 -0800 | [diff] [blame] | 839 | pr_err("failed to create equivalent cpu table\n"); |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 840 | return ret; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 841 | } |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 842 | fw += offset; |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 843 | leftover = size - offset; |
| 844 | |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 845 | if (*(u32 *)fw != UCODE_UCODE_TYPE) { |
Borislav Petkov | be62adb | 2011-12-02 18:02:17 +0100 | [diff] [blame] | 846 | pr_err("invalid type field in container file section header\n"); |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 847 | free_equiv_cpu_table(); |
| 848 | return ret; |
Borislav Petkov | be62adb | 2011-12-02 18:02:17 +0100 | [diff] [blame] | 849 | } |
| 850 | |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 851 | while (leftover) { |
Torsten Kaiser | 8451609 | 2013-08-08 19:38:18 +0200 | [diff] [blame] | 852 | crnt_size = verify_and_add_patch(family, fw, leftover); |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 853 | if (crnt_size < 0) |
| 854 | return ret; |
Borislav Petkov | d733689 | 2011-12-07 17:26:56 +0100 | [diff] [blame] | 855 | |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 856 | fw += crnt_size; |
| 857 | leftover -= crnt_size; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 858 | } |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 859 | |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 860 | return UCODE_OK; |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 861 | } |
| 862 | |
Borislav Petkov | 2ef84b3 | 2014-12-01 11:12:21 +0100 | [diff] [blame] | 863 | enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size) |
Jacob Shin | a76096a | 2013-05-30 14:09:18 -0500 | [diff] [blame] | 864 | { |
| 865 | enum ucode_state ret; |
| 866 | |
| 867 | /* free old equiv table */ |
| 868 | free_equiv_cpu_table(); |
| 869 | |
Torsten Kaiser | 8451609 | 2013-08-08 19:38:18 +0200 | [diff] [blame] | 870 | ret = __load_microcode_amd(family, data, size); |
Jacob Shin | a76096a | 2013-05-30 14:09:18 -0500 | [diff] [blame] | 871 | |
| 872 | if (ret != UCODE_OK) |
| 873 | cleanup(); |
| 874 | |
Borislav Petkov | fe05589 | 2015-10-20 11:54:45 +0200 | [diff] [blame] | 875 | #ifdef CONFIG_X86_32 |
Jacob Shin | 757885e | 2013-05-30 14:09:19 -0500 | [diff] [blame] | 876 | /* save BSP's matching patch for early load */ |
Borislav Petkov | 2ef84b3 | 2014-12-01 11:12:21 +0100 | [diff] [blame] | 877 | if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { |
| 878 | struct ucode_patch *p = find_patch(cpu); |
Jacob Shin | 757885e | 2013-05-30 14:09:19 -0500 | [diff] [blame] | 879 | if (p) { |
Borislav Petkov | 5335ba5 | 2013-11-29 14:58:44 +0100 | [diff] [blame] | 880 | memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); |
| 881 | memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), |
| 882 | PATCH_MAX_SIZE)); |
Jacob Shin | 757885e | 2013-05-30 14:09:19 -0500 | [diff] [blame] | 883 | } |
| 884 | } |
| 885 | #endif |
Jacob Shin | a76096a | 2013-05-30 14:09:18 -0500 | [diff] [blame] | 886 | return ret; |
| 887 | } |
| 888 | |
Andreas Herrmann | 5b68edc | 2012-01-20 17:44:12 +0100 | [diff] [blame] | 889 | /* |
| 890 | * AMD microcode firmware naming convention, up to family 15h they are in |
| 891 | * the legacy file: |
| 892 | * |
| 893 | * amd-ucode/microcode_amd.bin |
| 894 | * |
| 895 | * This legacy file is always smaller than 2K in size. |
| 896 | * |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 897 | * Beginning with family 15h, they are in family-specific firmware files: |
Andreas Herrmann | 5b68edc | 2012-01-20 17:44:12 +0100 | [diff] [blame] | 898 | * |
| 899 | * amd-ucode/microcode_amd_fam15h.bin |
| 900 | * amd-ucode/microcode_amd_fam16h.bin |
| 901 | * ... |
| 902 | * |
| 903 | * These might be larger than 2K. |
| 904 | */ |
Borislav Petkov | 48e3068 | 2012-07-26 15:51:00 +0200 | [diff] [blame] | 905 | static enum ucode_state request_microcode_amd(int cpu, struct device *device, |
| 906 | bool refresh_fw) |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 907 | { |
Andreas Herrmann | 5b68edc | 2012-01-20 17:44:12 +0100 | [diff] [blame] | 908 | char fw_name[36] = "amd-ucode/microcode_amd.bin"; |
Andreas Herrmann | 5b68edc | 2012-01-20 17:44:12 +0100 | [diff] [blame] | 909 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 910 | enum ucode_state ret = UCODE_NFOUND; |
| 911 | const struct firmware *fw; |
| 912 | |
| 913 | /* reload ucode container only on the boot cpu */ |
| 914 | if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index) |
| 915 | return UCODE_OK; |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 916 | |
Andreas Herrmann | 5b68edc | 2012-01-20 17:44:12 +0100 | [diff] [blame] | 917 | if (c->x86 >= 0x15) |
| 918 | snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); |
| 919 | |
Takashi Iwai | 75da02b | 2013-12-02 15:38:17 +0100 | [diff] [blame] | 920 | if (request_firmware_direct(&fw, (const char *)fw_name, device)) { |
Thomas Renninger | 11f918d3e | 2013-11-12 17:39:43 +0100 | [diff] [blame] | 921 | pr_debug("failed to load file %s\n", fw_name); |
Borislav Petkov | ffc7e8a | 2010-12-30 21:06:01 +0100 | [diff] [blame] | 922 | goto out; |
Andreas Herrmann | 3b2e3d8 | 2010-01-22 21:34:56 +0100 | [diff] [blame] | 923 | } |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 924 | |
Borislav Petkov | ffc7e8a | 2010-12-30 21:06:01 +0100 | [diff] [blame] | 925 | ret = UCODE_ERROR; |
| 926 | if (*(u32 *)fw->data != UCODE_MAGIC) { |
Borislav Petkov | 258721e | 2011-01-05 18:13:19 +0100 | [diff] [blame] | 927 | pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data); |
Borislav Petkov | ffc7e8a | 2010-12-30 21:06:01 +0100 | [diff] [blame] | 928 | goto fw_release; |
Borislav Petkov | 506f90e | 2009-10-29 14:45:52 +0100 | [diff] [blame] | 929 | } |
| 930 | |
Borislav Petkov | 2ef84b3 | 2014-12-01 11:12:21 +0100 | [diff] [blame] | 931 | ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size); |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 932 | |
| 933 | fw_release: |
Borislav Petkov | ffc7e8a | 2010-12-30 21:06:01 +0100 | [diff] [blame] | 934 | release_firmware(fw); |
Andreas Herrmann | 3b2e3d8 | 2010-01-22 21:34:56 +0100 | [diff] [blame] | 935 | |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 936 | out: |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 937 | return ret; |
| 938 | } |
| 939 | |
Dmitry Adamushko | 871b72d | 2009-05-11 23:48:27 +0200 | [diff] [blame] | 940 | static enum ucode_state |
| 941 | request_microcode_user(int cpu, const void __user *buf, size_t size) |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 942 | { |
Dmitry Adamushko | 871b72d | 2009-05-11 23:48:27 +0200 | [diff] [blame] | 943 | return UCODE_ERROR; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 944 | } |
| 945 | |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 946 | static void microcode_fini_cpu_amd(int cpu) |
| 947 | { |
| 948 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
| 949 | |
Dmitry Adamushko | 18dbc91 | 2008-09-23 12:08:44 +0200 | [diff] [blame] | 950 | uci->mc = NULL; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 951 | } |
| 952 | |
| 953 | static struct microcode_ops microcode_amd_ops = { |
Dmitry Adamushko | a0a29b6 | 2008-09-11 23:27:52 +0200 | [diff] [blame] | 954 | .request_microcode_user = request_microcode_user, |
Borislav Petkov | ffc7e8a | 2010-12-30 21:06:01 +0100 | [diff] [blame] | 955 | .request_microcode_fw = request_microcode_amd, |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 956 | .collect_cpu_info = collect_cpu_info_amd, |
| 957 | .apply_microcode = apply_microcode_amd, |
| 958 | .microcode_fini_cpu = microcode_fini_cpu_amd, |
| 959 | }; |
| 960 | |
Dmitry Adamushko | 18dbc91 | 2008-09-23 12:08:44 +0200 | [diff] [blame] | 961 | struct microcode_ops * __init init_amd_microcode(void) |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 962 | { |
Borislav Petkov | 9a2bc33 | 2015-10-20 11:54:44 +0200 | [diff] [blame] | 963 | struct cpuinfo_x86 *c = &boot_cpu_data; |
Andreas Herrmann | 283c1f2 | 2012-04-12 16:51:57 +0200 | [diff] [blame] | 964 | |
| 965 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { |
Chen Yucong | 1b74dde | 2016-02-02 11:45:02 +0800 | [diff] [blame] | 966 | pr_warn("AMD CPU family 0x%x not supported\n", c->x86); |
Andreas Herrmann | 283c1f2 | 2012-04-12 16:51:57 +0200 | [diff] [blame] | 967 | return NULL; |
| 968 | } |
| 969 | |
Borislav Petkov | f7eb59d | 2016-02-03 12:33:44 +0100 | [diff] [blame] | 970 | if (ucode_new_rev) |
| 971 | pr_info_once("microcode updated early to new patch_level=0x%08x\n", |
| 972 | ucode_new_rev); |
| 973 | |
Dmitry Adamushko | 18dbc91 | 2008-09-23 12:08:44 +0200 | [diff] [blame] | 974 | return µcode_amd_ops; |
Peter Oruba | 80cc9f1 | 2008-07-28 18:44:22 +0200 | [diff] [blame] | 975 | } |
Borislav Petkov | f72c1a5 | 2011-12-02 16:50:04 +0100 | [diff] [blame] | 976 | |
| 977 | void __exit exit_amd_microcode(void) |
| 978 | { |
Borislav Petkov | 2efb05e | 2012-08-01 16:16:13 +0200 | [diff] [blame] | 979 | cleanup(); |
Borislav Petkov | f72c1a5 | 2011-12-02 16:50:04 +0100 | [diff] [blame] | 980 | } |