Vincent Chen | 6f4eea9 | 2021-03-22 22:26:03 +0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * alternative runtime patching |
| 4 | * inspired by the ARM64 and x86 version |
| 5 | * |
| 6 | * Copyright (C) 2021 Sifive. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/cpu.h> |
| 11 | #include <linux/uaccess.h> |
| 12 | #include <asm/alternative.h> |
| 13 | #include <asm/sections.h> |
| 14 | #include <asm/vendorid_list.h> |
| 15 | #include <asm/sbi.h> |
| 16 | #include <asm/csr.h> |
| 17 | |
| 18 | static struct cpu_manufacturer_info_t { |
| 19 | unsigned long vendor_id; |
| 20 | unsigned long arch_id; |
| 21 | unsigned long imp_id; |
| 22 | } cpu_mfr_info; |
| 23 | |
| 24 | static void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end, |
Jisheng Zhang | 1546541 | 2021-11-29 00:07:40 +0800 | [diff] [blame] | 25 | unsigned long archid, |
| 26 | unsigned long impid) __initdata; |
Vincent Chen | 6f4eea9 | 2021-03-22 22:26:03 +0800 | [diff] [blame] | 27 | |
| 28 | static inline void __init riscv_fill_cpu_mfr_info(void) |
| 29 | { |
| 30 | #ifdef CONFIG_RISCV_M_MODE |
| 31 | cpu_mfr_info.vendor_id = csr_read(CSR_MVENDORID); |
| 32 | cpu_mfr_info.arch_id = csr_read(CSR_MARCHID); |
| 33 | cpu_mfr_info.imp_id = csr_read(CSR_MIMPID); |
| 34 | #else |
| 35 | cpu_mfr_info.vendor_id = sbi_get_mvendorid(); |
| 36 | cpu_mfr_info.arch_id = sbi_get_marchid(); |
| 37 | cpu_mfr_info.imp_id = sbi_get_mimpid(); |
| 38 | #endif |
| 39 | } |
| 40 | |
| 41 | static void __init init_alternative(void) |
| 42 | { |
| 43 | riscv_fill_cpu_mfr_info(); |
| 44 | |
| 45 | switch (cpu_mfr_info.vendor_id) { |
Vincent Chen | 1a0e5db | 2021-03-22 22:26:04 +0800 | [diff] [blame] | 46 | #ifdef CONFIG_ERRATA_SIFIVE |
| 47 | case SIFIVE_VENDOR_ID: |
| 48 | vendor_patch_func = sifive_errata_patch_func; |
| 49 | break; |
| 50 | #endif |
Vincent Chen | 6f4eea9 | 2021-03-22 22:26:03 +0800 | [diff] [blame] | 51 | default: |
| 52 | vendor_patch_func = NULL; |
| 53 | } |
| 54 | } |
| 55 | |
| 56 | /* |
| 57 | * This is called very early in the boot process (directly after we run |
| 58 | * a feature detect on the boot CPU). No need to worry about other CPUs |
| 59 | * here. |
| 60 | */ |
| 61 | void __init apply_boot_alternatives(void) |
| 62 | { |
| 63 | /* If called on non-boot cpu things could go wrong */ |
| 64 | WARN_ON(smp_processor_id() != 0); |
| 65 | |
| 66 | init_alternative(); |
| 67 | |
| 68 | if (!vendor_patch_func) |
| 69 | return; |
| 70 | |
| 71 | vendor_patch_func((struct alt_entry *)__alt_start, |
| 72 | (struct alt_entry *)__alt_end, |
| 73 | cpu_mfr_info.arch_id, cpu_mfr_info.imp_id); |
| 74 | } |
| 75 | |