Michael Kelley | 765e33f | 2019-05-30 00:14:00 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | |
| 3 | /* |
| 4 | * Linux-specific definitions for managing interactions with Microsoft's |
| 5 | * Hyper-V hypervisor. The definitions in this file are architecture |
| 6 | * independent. See arch/<arch>/include/asm/mshyperv.h for definitions |
| 7 | * that are specific to architecture <arch>. |
| 8 | * |
| 9 | * Definitions that are specified in the Hyper-V Top Level Functional |
| 10 | * Spec (TLFS) should not go in this file, but should instead go in |
| 11 | * hyperv-tlfs.h. |
| 12 | * |
| 13 | * Copyright (C) 2019, Microsoft, Inc. |
| 14 | * |
| 15 | * Author : Michael Kelley <mikelley@microsoft.com> |
| 16 | */ |
| 17 | |
| 18 | #ifndef _ASM_GENERIC_MSHYPERV_H |
| 19 | #define _ASM_GENERIC_MSHYPERV_H |
| 20 | |
| 21 | #include <linux/types.h> |
| 22 | #include <linux/atomic.h> |
| 23 | #include <linux/bitops.h> |
| 24 | #include <linux/cpumask.h> |
| 25 | #include <asm/ptrace.h> |
| 26 | #include <asm/hyperv-tlfs.h> |
| 27 | |
| 28 | struct ms_hyperv_info { |
| 29 | u32 features; |
| 30 | u32 misc_features; |
| 31 | u32 hints; |
| 32 | u32 nested_features; |
| 33 | u32 max_vp_index; |
| 34 | u32 max_lp_index; |
| 35 | }; |
| 36 | extern struct ms_hyperv_info ms_hyperv; |
| 37 | |
| 38 | extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr); |
| 39 | extern u64 hv_do_fast_hypercall8(u16 control, u64 input8); |
| 40 | |
| 41 | |
| 42 | /* Generate the guest OS identifier as described in the Hyper-V TLFS */ |
| 43 | static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version, |
| 44 | __u64 d_info2) |
| 45 | { |
| 46 | __u64 guest_id = 0; |
| 47 | |
| 48 | guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48); |
| 49 | guest_id |= (d_info1 << 48); |
| 50 | guest_id |= (kernel_version << 16); |
| 51 | guest_id |= d_info2; |
| 52 | |
| 53 | return guest_id; |
| 54 | } |
| 55 | |
| 56 | |
| 57 | /* Free the message slot and signal end-of-message if required */ |
| 58 | static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) |
| 59 | { |
| 60 | /* |
| 61 | * On crash we're reading some other CPU's message page and we need |
| 62 | * to be careful: this other CPU may already had cleared the header |
| 63 | * and the host may already had delivered some other message there. |
| 64 | * In case we blindly write msg->header.message_type we're going |
| 65 | * to lose it. We can still lose a message of the same type but |
| 66 | * we count on the fact that there can only be one |
| 67 | * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages |
| 68 | * on crash. |
| 69 | */ |
| 70 | if (cmpxchg(&msg->header.message_type, old_msg_type, |
| 71 | HVMSG_NONE) != old_msg_type) |
| 72 | return; |
| 73 | |
| 74 | /* |
| 75 | * The cmxchg() above does an implicit memory barrier to |
| 76 | * ensure the write to MessageType (ie set to |
| 77 | * HVMSG_NONE) happens before we read the |
| 78 | * MessagePending and EOMing. Otherwise, the EOMing |
| 79 | * will not deliver any more messages since there is |
| 80 | * no empty slot |
| 81 | */ |
| 82 | if (msg->header.message_flags.msg_pending) { |
| 83 | /* |
| 84 | * This will cause message queue rescan to |
| 85 | * possibly deliver another msg from the |
| 86 | * hypervisor |
| 87 | */ |
| 88 | hv_signal_eom(); |
| 89 | } |
| 90 | } |
| 91 | |
| 92 | void hv_setup_vmbus_irq(void (*handler)(void)); |
| 93 | void hv_remove_vmbus_irq(void); |
| 94 | void hv_enable_vmbus_irq(void); |
| 95 | void hv_disable_vmbus_irq(void); |
| 96 | |
| 97 | void hv_setup_kexec_handler(void (*handler)(void)); |
| 98 | void hv_remove_kexec_handler(void); |
| 99 | void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); |
| 100 | void hv_remove_crash_handler(void); |
| 101 | |
| 102 | #if IS_ENABLED(CONFIG_HYPERV) |
| 103 | /* |
| 104 | * Hypervisor's notion of virtual processor ID is different from |
| 105 | * Linux' notion of CPU ID. This information can only be retrieved |
| 106 | * in the context of the calling CPU. Setup a map for easy access |
| 107 | * to this information. |
| 108 | */ |
| 109 | extern u32 *hv_vp_index; |
| 110 | extern u32 hv_max_vp_index; |
| 111 | |
| 112 | /* Sentinel value for an uninitialized entry in hv_vp_index array */ |
| 113 | #define VP_INVAL U32_MAX |
| 114 | |
| 115 | /** |
| 116 | * hv_cpu_number_to_vp_number() - Map CPU to VP. |
| 117 | * @cpu_number: CPU number in Linux terms |
| 118 | * |
| 119 | * This function returns the mapping between the Linux processor |
| 120 | * number and the hypervisor's virtual processor number, useful |
| 121 | * in making hypercalls and such that talk about specific |
| 122 | * processors. |
| 123 | * |
| 124 | * Return: Virtual processor number in Hyper-V terms |
| 125 | */ |
| 126 | static inline int hv_cpu_number_to_vp_number(int cpu_number) |
| 127 | { |
| 128 | return hv_vp_index[cpu_number]; |
| 129 | } |
| 130 | |
| 131 | static inline int cpumask_to_vpset(struct hv_vpset *vpset, |
| 132 | const struct cpumask *cpus) |
| 133 | { |
| 134 | int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; |
| 135 | |
| 136 | /* valid_bank_mask can represent up to 64 banks */ |
| 137 | if (hv_max_vp_index / 64 >= 64) |
| 138 | return 0; |
| 139 | |
| 140 | /* |
| 141 | * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex |
| 142 | * structs are not cleared between calls, we risk flushing unneeded |
| 143 | * vCPUs otherwise. |
| 144 | */ |
| 145 | for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++) |
| 146 | vpset->bank_contents[vcpu_bank] = 0; |
| 147 | |
| 148 | /* |
| 149 | * Some banks may end up being empty but this is acceptable. |
| 150 | */ |
| 151 | for_each_cpu(cpu, cpus) { |
| 152 | vcpu = hv_cpu_number_to_vp_number(cpu); |
| 153 | if (vcpu == VP_INVAL) |
| 154 | return -1; |
| 155 | vcpu_bank = vcpu / 64; |
| 156 | vcpu_offset = vcpu % 64; |
| 157 | __set_bit(vcpu_offset, (unsigned long *) |
| 158 | &vpset->bank_contents[vcpu_bank]); |
| 159 | if (vcpu_bank >= nr_bank) |
| 160 | nr_bank = vcpu_bank + 1; |
| 161 | } |
| 162 | vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0); |
| 163 | return nr_bank; |
| 164 | } |
| 165 | |
Tianyu Lan | f3a99e7 | 2020-04-06 08:53:31 -0700 | [diff] [blame] | 166 | void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); |
Michael Kelley | 765e33f | 2019-05-30 00:14:00 +0000 | [diff] [blame] | 167 | void hyperv_report_panic_msg(phys_addr_t pa, size_t size); |
| 168 | bool hv_is_hyperv_initialized(void); |
Dexuan Cui | b96f865 | 2019-11-19 23:16:04 -0800 | [diff] [blame] | 169 | bool hv_is_hibernation_supported(void); |
Michael Kelley | 765e33f | 2019-05-30 00:14:00 +0000 | [diff] [blame] | 170 | void hyperv_cleanup(void); |
Tianyu Lan | bd00cd52 | 2019-08-14 20:32:16 +0800 | [diff] [blame] | 171 | void hv_setup_sched_clock(void *sched_clock); |
Michael Kelley | 765e33f | 2019-05-30 00:14:00 +0000 | [diff] [blame] | 172 | #else /* CONFIG_HYPERV */ |
| 173 | static inline bool hv_is_hyperv_initialized(void) { return false; } |
Dexuan Cui | b96f865 | 2019-11-19 23:16:04 -0800 | [diff] [blame] | 174 | static inline bool hv_is_hibernation_supported(void) { return false; } |
Michael Kelley | 765e33f | 2019-05-30 00:14:00 +0000 | [diff] [blame] | 175 | static inline void hyperv_cleanup(void) {} |
| 176 | #endif /* CONFIG_HYPERV */ |
| 177 | |
| 178 | #if IS_ENABLED(CONFIG_HYPERV) |
| 179 | extern int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void)); |
| 180 | extern void hv_remove_stimer0_irq(int irq); |
| 181 | #endif |
| 182 | |
| 183 | #endif |