Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* |
| 2 | * This program is used to generate definitions needed by |
| 3 | * assembly language modules. |
| 4 | * |
| 5 | * We use the technique used in the OSF Mach kernel code: |
| 6 | * generate asm statements containing #defines, |
| 7 | * compile this file to assembler, and then extract the |
| 8 | * #defines from the assembly-language output. |
| 9 | * |
| 10 | * This program is free software; you can redistribute it and/or |
| 11 | * modify it under the terms of the GNU General Public License |
| 12 | * as published by the Free Software Foundation; either version |
| 13 | * 2 of the License, or (at your option) any later version. |
| 14 | */ |
| 15 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 16 | #include <linux/signal.h> |
| 17 | #include <linux/sched.h> |
| 18 | #include <linux/kernel.h> |
| 19 | #include <linux/errno.h> |
| 20 | #include <linux/string.h> |
| 21 | #include <linux/types.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 22 | #include <linux/mman.h> |
| 23 | #include <linux/mm.h> |
Johannes Berg | 543b9fd | 2007-05-03 22:31:38 +1000 | [diff] [blame] | 24 | #include <linux/suspend.h> |
Tony Breeds | ad7f716 | 2008-02-05 16:16:48 +1100 | [diff] [blame] | 25 | #include <linux/hrtimer.h> |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 26 | #ifdef CONFIG_PPC64 |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 27 | #include <linux/time.h> |
| 28 | #include <linux/hardirq.h> |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 29 | #endif |
Christoph Lameter | d4d298f | 2008-04-29 01:04:08 -0700 | [diff] [blame] | 30 | #include <linux/kbuild.h> |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 31 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 32 | #include <asm/io.h> |
| 33 | #include <asm/page.h> |
| 34 | #include <asm/pgtable.h> |
| 35 | #include <asm/processor.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 36 | #include <asm/cputable.h> |
| 37 | #include <asm/thread_info.h> |
Paul Mackerras | 033ef33 | 2005-10-26 17:05:24 +1000 | [diff] [blame] | 38 | #include <asm/rtas.h> |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 39 | #include <asm/vdso_datapage.h> |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 40 | #include <asm/dbell.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 41 | #ifdef CONFIG_PPC64 |
| 42 | #include <asm/paca.h> |
| 43 | #include <asm/lppaca.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 44 | #include <asm/cache.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 45 | #include <asm/compat.h> |
Michael Neuling | 11a27ad | 2006-08-09 17:00:30 +1000 | [diff] [blame] | 46 | #include <asm/mmu.h> |
Olof Johansson | f04da0bc | 2006-09-13 13:32:39 -0500 | [diff] [blame] | 47 | #include <asm/hvcall.h> |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 48 | #include <asm/xics.h> |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 49 | #endif |
Benjamin Herrenschmidt | ed79ba9 | 2011-09-19 17:45:04 +0000 | [diff] [blame] | 50 | #ifdef CONFIG_PPC_POWERNV |
| 51 | #include <asm/opal.h> |
| 52 | #endif |
Alexander Graf | 989044e | 2010-08-30 12:01:56 +0200 | [diff] [blame] | 53 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST) |
Hollis Blanchard | 366d4b9 | 2009-01-03 16:23:08 -0600 | [diff] [blame] | 54 | #include <linux/kvm_host.h> |
Alexander Graf | 0604675 | 2010-04-16 00:11:44 +0200 | [diff] [blame] | 55 | #endif |
Alexander Graf | 989044e | 2010-08-30 12:01:56 +0200 | [diff] [blame] | 56 | #if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S) |
| 57 | #include <asm/kvm_book3s.h> |
Alexander Graf | 5deb8e7 | 2014-04-24 13:46:24 +0200 | [diff] [blame] | 58 | #include <asm/kvm_ppc.h> |
Hollis Blanchard | db93f57 | 2008-11-05 09:36:18 -0600 | [diff] [blame] | 59 | #endif |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 60 | |
Benjamin Herrenschmidt | 57e2a99 | 2009-07-28 11:59:34 +1000 | [diff] [blame] | 61 | #ifdef CONFIG_PPC32 |
Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 62 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
| 63 | #include "head_booke.h" |
| 64 | #endif |
Benjamin Herrenschmidt | 57e2a99 | 2009-07-28 11:59:34 +1000 | [diff] [blame] | 65 | #endif |
Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 66 | |
Kumar Gala | 55fd766 | 2009-10-16 18:48:40 -0500 | [diff] [blame] | 67 | #if defined(CONFIG_PPC_FSL_BOOK3E) |
Trent Piepho | 19f5465 | 2008-12-08 19:34:55 -0800 | [diff] [blame] | 68 | #include "../mm/mmu_decl.h" |
| 69 | #endif |
| 70 | |
Christophe Leroy | f86ef74 | 2016-05-17 09:02:43 +0200 | [diff] [blame] | 71 | #ifdef CONFIG_PPC_8xx |
| 72 | #include <asm/fixmap.h> |
| 73 | #endif |
| 74 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 75 | int main(void) |
| 76 | { |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 77 | DEFINE(THREAD, offsetof(struct task_struct, thread)); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 78 | DEFINE(MM, offsetof(struct task_struct, mm)); |
Benjamin Herrenschmidt | 5e69661 | 2008-12-18 19:13:24 +0000 | [diff] [blame] | 79 | DEFINE(MMCONTEXTID, offsetof(struct mm_struct, context.id)); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 80 | #ifdef CONFIG_PPC64 |
Paul Mackerras | 9c1e105 | 2009-08-17 15:17:54 +1000 | [diff] [blame] | 81 | DEFINE(SIGSEGV, SIGSEGV); |
| 82 | DEFINE(NMI_MASK, NMI_MASK); |
Haren Myneni | 9277924 | 2012-12-06 21:49:56 +0000 | [diff] [blame] | 83 | DEFINE(TASKTHREADPPR, offsetof(struct task_struct, thread.ppr)); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 84 | #else |
Roman Zippel | f7e4217 | 2007-05-09 02:35:17 -0700 | [diff] [blame] | 85 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); |
Benjamin Herrenschmidt | cbc9565 | 2013-09-24 15:17:21 +1000 | [diff] [blame] | 86 | DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16)); |
| 87 | DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit)); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 88 | #endif /* CONFIG_PPC64 */ |
| 89 | |
Michael Ellerman | 85baa09 | 2016-03-24 22:04:05 +1100 | [diff] [blame] | 90 | #ifdef CONFIG_LIVEPATCH |
| 91 | DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp)); |
| 92 | #endif |
| 93 | |
Christophe Leroy | 902e06e | 2016-11-22 11:49:32 +0100 | [diff] [blame] | 94 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 95 | DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); |
| 96 | #endif |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 97 | DEFINE(KSP, offsetof(struct thread_struct, ksp)); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 98 | DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); |
Ashish Kalra | 1325a68 | 2011-04-22 16:48:27 -0500 | [diff] [blame] | 99 | #ifdef CONFIG_BOOKE |
| 100 | DEFINE(THREAD_NORMSAVES, offsetof(struct thread_struct, normsave[0])); |
| 101 | #endif |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 102 | DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode)); |
Paul Mackerras | de79f7b | 2013-09-10 20:20:42 +1000 | [diff] [blame] | 103 | DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fp_state)); |
Paul Mackerras | 1846196 | 2013-09-10 20:21:10 +1000 | [diff] [blame] | 104 | DEFINE(THREAD_FPSAVEAREA, offsetof(struct thread_struct, fp_save_area)); |
Paul Mackerras | de79f7b | 2013-09-10 20:20:42 +1000 | [diff] [blame] | 105 | DEFINE(FPSTATE_FPSCR, offsetof(struct thread_fp_state, fpscr)); |
Cyril Bur | 70fe3d9 | 2016-02-29 17:53:47 +1100 | [diff] [blame] | 106 | DEFINE(THREAD_LOAD_FP, offsetof(struct thread_struct, load_fp)); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 107 | #ifdef CONFIG_ALTIVEC |
Paul Mackerras | de79f7b | 2013-09-10 20:20:42 +1000 | [diff] [blame] | 108 | DEFINE(THREAD_VRSTATE, offsetof(struct thread_struct, vr_state)); |
Paul Mackerras | 1846196 | 2013-09-10 20:21:10 +1000 | [diff] [blame] | 109 | DEFINE(THREAD_VRSAVEAREA, offsetof(struct thread_struct, vr_save_area)); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 110 | DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave)); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 111 | DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr)); |
Paul Mackerras | de79f7b | 2013-09-10 20:20:42 +1000 | [diff] [blame] | 112 | DEFINE(VRSTATE_VSCR, offsetof(struct thread_vr_state, vscr)); |
Cyril Bur | 70fe3d9 | 2016-02-29 17:53:47 +1100 | [diff] [blame] | 113 | DEFINE(THREAD_LOAD_VEC, offsetof(struct thread_struct, load_vec)); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 114 | #endif /* CONFIG_ALTIVEC */ |
Michael Neuling | c6e6771 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 115 | #ifdef CONFIG_VSX |
Michael Neuling | c6e6771 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 116 | DEFINE(THREAD_USED_VSR, offsetof(struct thread_struct, used_vsr)); |
| 117 | #endif /* CONFIG_VSX */ |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 118 | #ifdef CONFIG_PPC64 |
| 119 | DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid)); |
| 120 | #else /* CONFIG_PPC64 */ |
| 121 | DEFINE(PGDIR, offsetof(struct thread_struct, pgdir)); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 122 | #ifdef CONFIG_SPE |
| 123 | DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0])); |
| 124 | DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc)); |
| 125 | DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr)); |
| 126 | DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe)); |
| 127 | #endif /* CONFIG_SPE */ |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 128 | #endif /* CONFIG_PPC64 */ |
Bharat Bhushan | 13d543c | 2013-05-22 09:50:59 +0530 | [diff] [blame] | 129 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) |
Bharat Bhushan | 51ae8d4 | 2013-07-04 11:45:46 +0530 | [diff] [blame] | 130 | DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, debug.dbcr0)); |
Bharat Bhushan | 13d543c | 2013-05-22 09:50:59 +0530 | [diff] [blame] | 131 | #endif |
Alexander Graf | 97e4925 | 2010-04-16 00:11:51 +0200 | [diff] [blame] | 132 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
| 133 | DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); |
| 134 | #endif |
Bharat Bhushan | ffe129e | 2013-01-15 22:20:42 +0000 | [diff] [blame] | 135 | #if defined(CONFIG_KVM) && defined(CONFIG_BOOKE) |
Scott Wood | d30f6e4 | 2011-12-20 15:34:43 +0000 | [diff] [blame] | 136 | DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu)); |
| 137 | #endif |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 138 | |
Michael Neuling | 8b3c34c | 2013-02-13 16:21:32 +0000 | [diff] [blame] | 139 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
Michael Neuling | afc0770 | 2013-02-13 16:21:34 +0000 | [diff] [blame] | 140 | DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch)); |
Michael Neuling | 8b3c34c | 2013-02-13 16:21:32 +0000 | [diff] [blame] | 141 | DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar)); |
| 142 | DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr)); |
| 143 | DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar)); |
Michael Neuling | 28e61cc | 2013-08-09 17:29:31 +1000 | [diff] [blame] | 144 | DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar)); |
| 145 | DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr)); |
| 146 | DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr)); |
Michael Neuling | 8b3c34c | 2013-02-13 16:21:32 +0000 | [diff] [blame] | 147 | DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); |
Cyril Bur | 000ec28 | 2016-09-23 16:18:25 +1000 | [diff] [blame] | 148 | DEFINE(THREAD_CKVRSTATE, offsetof(struct thread_struct, |
| 149 | ckvr_state)); |
| 150 | DEFINE(THREAD_CKVRSAVE, offsetof(struct thread_struct, |
| 151 | ckvrsave)); |
| 152 | DEFINE(THREAD_CKFPSTATE, offsetof(struct thread_struct, |
| 153 | ckfp_state)); |
Michael Neuling | 8b3c34c | 2013-02-13 16:21:32 +0000 | [diff] [blame] | 154 | /* Local pt_regs on stack for Transactional Memory funcs. */ |
| 155 | DEFINE(TM_FRAME_SIZE, STACK_FRAME_OVERHEAD + |
| 156 | sizeof(struct pt_regs) + 16); |
| 157 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
Ian Munsie | 2468dcf | 2013-02-07 15:46:58 +0000 | [diff] [blame] | 158 | |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 159 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
Paul Mackerras | f39224a | 2006-04-18 21:49:11 +1000 | [diff] [blame] | 160 | DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 161 | DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 162 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 163 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 164 | |
| 165 | #ifdef CONFIG_PPC64 |
| 166 | DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size)); |
| 167 | DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size)); |
| 168 | DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page)); |
| 169 | DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size)); |
| 170 | DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size)); |
| 171 | DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page)); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 172 | /* paca */ |
| 173 | DEFINE(PACA_SIZE, sizeof(struct paca_struct)); |
| 174 | DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index)); |
| 175 | DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start)); |
| 176 | DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack)); |
| 177 | DEFINE(PACACURRENT, offsetof(struct paca_struct, __current)); |
| 178 | DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr)); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 179 | DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr)); |
| 180 | DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1)); |
| 181 | DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc)); |
Paul Mackerras | 1f6a93e | 2008-08-30 11:40:24 +1000 | [diff] [blame] | 182 | DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase)); |
| 183 | DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); |
Paul Mackerras | d04c56f | 2006-10-04 16:47:49 +1000 | [diff] [blame] | 184 | DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); |
Benjamin Herrenschmidt | 7230c56 | 2012-03-06 18:27:59 +1100 | [diff] [blame] | 185 | DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened)); |
Michael Neuling | c395465da6 | 2015-10-28 15:54:06 +1100 | [diff] [blame] | 186 | #ifdef CONFIG_PPC_BOOK3S |
Michael Neuling | 2fc251a | 2015-12-11 09:34:42 +1100 | [diff] [blame] | 187 | DEFINE(PACACONTEXTID, offsetof(struct paca_struct, mm_ctx_id)); |
Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 188 | #ifdef CONFIG_PPC_MM_SLICES |
| 189 | DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, |
Michael Neuling | 2fc251a | 2015-12-11 09:34:42 +1100 | [diff] [blame] | 190 | mm_ctx_low_slices_psize)); |
Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 191 | DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct, |
Michael Neuling | 2fc251a | 2015-12-11 09:34:42 +1100 | [diff] [blame] | 192 | mm_ctx_high_slices_psize)); |
Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 193 | DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def)); |
Benjamin Herrenschmidt | 91c60b5 | 2009-06-02 21:17:41 +0000 | [diff] [blame] | 194 | #endif /* CONFIG_PPC_MM_SLICES */ |
Michael Neuling | c395465da6 | 2015-10-28 15:54:06 +1100 | [diff] [blame] | 195 | #endif |
Benjamin Herrenschmidt | dce6670 | 2009-07-23 23:15:42 +0000 | [diff] [blame] | 196 | |
| 197 | #ifdef CONFIG_PPC_BOOK3E |
| 198 | DEFINE(PACAPGD, offsetof(struct paca_struct, pgd)); |
| 199 | DEFINE(PACA_KERNELPGD, offsetof(struct paca_struct, kernel_pgd)); |
| 200 | DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); |
| 201 | DEFINE(PACA_EXTLB, offsetof(struct paca_struct, extlb)); |
| 202 | DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); |
| 203 | DEFINE(PACA_EXCRIT, offsetof(struct paca_struct, excrit)); |
| 204 | DEFINE(PACA_EXDBG, offsetof(struct paca_struct, exdbg)); |
| 205 | DEFINE(PACA_MC_STACK, offsetof(struct paca_struct, mc_kstack)); |
| 206 | DEFINE(PACA_CRIT_STACK, offsetof(struct paca_struct, crit_kstack)); |
| 207 | DEFINE(PACA_DBG_STACK, offsetof(struct paca_struct, dbg_kstack)); |
Scott Wood | 28efc35 | 2013-10-11 19:22:38 -0500 | [diff] [blame] | 208 | DEFINE(PACA_TCD_PTR, offsetof(struct paca_struct, tcd_ptr)); |
| 209 | |
| 210 | DEFINE(TCD_ESEL_NEXT, |
| 211 | offsetof(struct tlb_core_data, esel_next)); |
| 212 | DEFINE(TCD_ESEL_MAX, |
| 213 | offsetof(struct tlb_core_data, esel_max)); |
| 214 | DEFINE(TCD_ESEL_FIRST, |
| 215 | offsetof(struct tlb_core_data, esel_first)); |
Benjamin Herrenschmidt | dce6670 | 2009-07-23 23:15:42 +0000 | [diff] [blame] | 216 | #endif /* CONFIG_PPC_BOOK3E */ |
| 217 | |
Benjamin Herrenschmidt | 91c60b5 | 2009-06-02 21:17:41 +0000 | [diff] [blame] | 218 | #ifdef CONFIG_PPC_STD_MMU_64 |
Benjamin Herrenschmidt | 91c60b5 | 2009-06-02 21:17:41 +0000 | [diff] [blame] | 219 | DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache)); |
| 220 | DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr)); |
| 221 | DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp)); |
| 222 | #ifdef CONFIG_PPC_MM_SLICES |
Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 223 | DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp)); |
| 224 | #else |
Michael Neuling | 2fc251a | 2015-12-11 09:34:42 +1100 | [diff] [blame] | 225 | DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, mm_ctx_sllp)); |
Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame] | 226 | #endif /* CONFIG_PPC_MM_SLICES */ |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 227 | DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); |
| 228 | DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); |
| 229 | DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); |
David Gibson | 3356bb9f7 | 2006-01-13 10:26:42 +1100 | [diff] [blame] | 230 | DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr)); |
Michael Neuling | 2f6093c | 2006-08-07 16:19:19 +1000 | [diff] [blame] | 231 | DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr)); |
Michael Neuling | 11a27ad | 2006-08-09 17:00:30 +1000 | [diff] [blame] | 232 | DEFINE(SLBSHADOW_STACKVSID, |
| 233 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); |
| 234 | DEFINE(SLBSHADOW_STACKESID, |
| 235 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); |
Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 236 | DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 237 | DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use)); |
Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 238 | DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); |
Paul Mackerras | a8606e2 | 2011-06-29 00:22:05 +0000 | [diff] [blame] | 239 | DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count)); |
Paul Mackerras | cf9efce | 2010-08-26 19:56:43 +0000 | [diff] [blame] | 240 | DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); |
Benjamin Herrenschmidt | 91c60b5 | 2009-06-02 21:17:41 +0000 | [diff] [blame] | 241 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
| 242 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); |
Mahesh Salgaonkar | 1e9b450 | 2013-10-30 20:04:08 +0530 | [diff] [blame] | 243 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 244 | DEFINE(PACAMCEMERGSP, offsetof(struct paca_struct, mc_emergency_sp)); |
| 245 | DEFINE(PACA_IN_MCE, offsetof(struct paca_struct, in_mce)); |
| 246 | #endif |
Benjamin Herrenschmidt | 91c60b5 | 2009-06-02 21:17:41 +0000 | [diff] [blame] | 247 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); |
Michael Neuling | 1fc711f | 2010-05-13 19:40:11 +0000 | [diff] [blame] | 248 | DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); |
Anshuman Khandual | 1db3652 | 2015-05-21 12:13:03 +0530 | [diff] [blame] | 249 | DEFINE(PACA_DSCR_DEFAULT, offsetof(struct paca_struct, dscr_default)); |
Christophe Leroy | c223c90 | 2016-05-17 08:33:46 +0200 | [diff] [blame] | 250 | DEFINE(ACCOUNT_STARTTIME, |
| 251 | offsetof(struct paca_struct, accounting.starttime)); |
| 252 | DEFINE(ACCOUNT_STARTTIME_USER, |
| 253 | offsetof(struct paca_struct, accounting.starttime_user)); |
| 254 | DEFINE(ACCOUNT_USER_TIME, |
| 255 | offsetof(struct paca_struct, accounting.user_time)); |
| 256 | DEFINE(ACCOUNT_SYSTEM_TIME, |
| 257 | offsetof(struct paca_struct, accounting.system_time)); |
Benjamin Herrenschmidt | 91c60b5 | 2009-06-02 21:17:41 +0000 | [diff] [blame] | 258 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); |
Paul Mackerras | 2fde6d2 | 2011-12-05 19:47:26 +0000 | [diff] [blame] | 259 | DEFINE(PACA_NAPSTATELOST, offsetof(struct paca_struct, nap_state_lost)); |
Scott Wood | 9d378df | 2014-03-10 17:29:38 -0500 | [diff] [blame] | 260 | DEFINE(PACA_SPRG_VDSO, offsetof(struct paca_struct, sprg_vdso)); |
Christophe Leroy | c223c90 | 2016-05-17 08:33:46 +0200 | [diff] [blame] | 261 | #else /* CONFIG_PPC64 */ |
| 262 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
| 263 | DEFINE(ACCOUNT_STARTTIME, |
| 264 | offsetof(struct thread_info, accounting.starttime)); |
| 265 | DEFINE(ACCOUNT_STARTTIME_USER, |
| 266 | offsetof(struct thread_info, accounting.starttime_user)); |
| 267 | DEFINE(ACCOUNT_USER_TIME, |
| 268 | offsetof(struct thread_info, accounting.user_time)); |
| 269 | DEFINE(ACCOUNT_SYSTEM_TIME, |
| 270 | offsetof(struct thread_info, accounting.system_time)); |
| 271 | #endif |
Paul Mackerras | 033ef33 | 2005-10-26 17:05:24 +1000 | [diff] [blame] | 272 | #endif /* CONFIG_PPC64 */ |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 273 | |
| 274 | /* RTAS */ |
| 275 | DEFINE(RTASBASE, offsetof(struct rtas_t, base)); |
| 276 | DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 277 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 278 | /* Interrupt register frame */ |
Kumar Gala | 91120cc | 2008-04-24 06:33:49 +1000 | [diff] [blame] | 279 | DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 280 | DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); |
Alexander Graf | 218d169 | 2010-04-16 00:11:55 +0200 | [diff] [blame] | 281 | #ifdef CONFIG_PPC64 |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 282 | /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */ |
| 283 | DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); |
| 284 | DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16); |
| 285 | #endif /* CONFIG_PPC64 */ |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 286 | DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0])); |
| 287 | DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1])); |
| 288 | DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2])); |
| 289 | DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3])); |
| 290 | DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4])); |
| 291 | DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5])); |
| 292 | DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6])); |
| 293 | DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7])); |
| 294 | DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8])); |
| 295 | DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9])); |
| 296 | DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10])); |
| 297 | DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11])); |
| 298 | DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12])); |
| 299 | DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13])); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 300 | #ifndef CONFIG_PPC64 |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 301 | DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14])); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 302 | #endif /* CONFIG_PPC64 */ |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 303 | /* |
| 304 | * Note: these symbols include _ because they overlap with special |
| 305 | * register names |
| 306 | */ |
| 307 | DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip)); |
| 308 | DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr)); |
| 309 | DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr)); |
| 310 | DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link)); |
| 311 | DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr)); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 312 | DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer)); |
| 313 | DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); |
| 314 | DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 315 | DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3)); |
| 316 | DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result)); |
Paul Mackerras | d73e0c9 | 2005-10-28 22:45:25 +1000 | [diff] [blame] | 317 | DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap)); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 318 | #ifndef CONFIG_PPC64 |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 319 | /* |
| 320 | * The PowerPC 400-class & Book-E processors have neither the DAR |
| 321 | * nor the DSISR SPRs. Hence, we overload them to hold the similar |
| 322 | * DEAR and ESR SPRs for such processors. For critical interrupts |
| 323 | * we use them to hold SRR0 and SRR1. |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 324 | */ |
| 325 | DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar)); |
| 326 | DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr)); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 327 | #else /* CONFIG_PPC64 */ |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 328 | DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe)); |
| 329 | |
| 330 | /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */ |
| 331 | DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)); |
| 332 | DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8); |
| 333 | #endif /* CONFIG_PPC64 */ |
| 334 | |
Benjamin Herrenschmidt | 57e2a99 | 2009-07-28 11:59:34 +1000 | [diff] [blame] | 335 | #if defined(CONFIG_PPC32) |
Kumar Gala | fca622c | 2008-04-30 05:23:21 -0500 | [diff] [blame] | 336 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
| 337 | DEFINE(EXC_LVL_SIZE, STACK_EXC_LVL_FRAME_SIZE); |
| 338 | DEFINE(MAS0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); |
| 339 | /* we overload MMUCR for 44x on MAS0 since they are mutually exclusive */ |
| 340 | DEFINE(MMUCR, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas0)); |
| 341 | DEFINE(MAS1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas1)); |
| 342 | DEFINE(MAS2, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas2)); |
| 343 | DEFINE(MAS3, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas3)); |
| 344 | DEFINE(MAS6, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas6)); |
| 345 | DEFINE(MAS7, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, mas7)); |
| 346 | DEFINE(_SRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr0)); |
| 347 | DEFINE(_SRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, srr1)); |
| 348 | DEFINE(_CSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr0)); |
| 349 | DEFINE(_CSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, csrr1)); |
| 350 | DEFINE(_DSRR0, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr0)); |
| 351 | DEFINE(_DSRR1, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, dsrr1)); |
| 352 | DEFINE(SAVED_KSP_LIMIT, STACK_INT_FRAME_SIZE+offsetof(struct exception_regs, saved_ksp_limit)); |
| 353 | #endif |
Benjamin Herrenschmidt | 57e2a99 | 2009-07-28 11:59:34 +1000 | [diff] [blame] | 354 | #endif |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 355 | |
| 356 | #ifndef CONFIG_PPC64 |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 357 | DEFINE(MM_PGD, offsetof(struct mm_struct, pgd)); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 358 | #endif /* ! CONFIG_PPC64 */ |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 359 | |
| 360 | /* About the CPU features table */ |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 361 | DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features)); |
| 362 | DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup)); |
Olof Johansson | f39b7a5 | 2006-08-11 00:07:08 -0500 | [diff] [blame] | 363 | DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore)); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 364 | |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 365 | DEFINE(pbe_address, offsetof(struct pbe, address)); |
| 366 | DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address)); |
| 367 | DEFINE(pbe_next, offsetof(struct pbe, next)); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 368 | |
Johannes Berg | 543b9fd | 2007-05-03 22:31:38 +1000 | [diff] [blame] | 369 | #ifndef CONFIG_PPC64 |
Paul Mackerras | fd582ec | 2005-10-11 22:08:12 +1000 | [diff] [blame] | 370 | DEFINE(TASK_SIZE, TASK_SIZE); |
Stephen Rothwell | d1dead5 | 2005-09-29 00:35:31 +1000 | [diff] [blame] | 371 | DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 372 | #endif /* ! CONFIG_PPC64 */ |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 373 | |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 374 | /* datapage offsets for use by vdso */ |
| 375 | DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct vdso_data, tb_orig_stamp)); |
| 376 | DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct vdso_data, tb_ticks_per_sec)); |
| 377 | DEFINE(CFG_TB_TO_XS, offsetof(struct vdso_data, tb_to_xs)); |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 378 | DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct vdso_data, tb_update_count)); |
| 379 | DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct vdso_data, tz_minuteswest)); |
| 380 | DEFINE(CFG_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); |
| 381 | DEFINE(CFG_SYSCALL_MAP32, offsetof(struct vdso_data, syscall_map_32)); |
| 382 | DEFINE(WTOM_CLOCK_SEC, offsetof(struct vdso_data, wtom_clock_sec)); |
| 383 | DEFINE(WTOM_CLOCK_NSEC, offsetof(struct vdso_data, wtom_clock_nsec)); |
Paul Mackerras | 597bc5c | 2008-10-27 23:56:03 +0000 | [diff] [blame] | 384 | DEFINE(STAMP_XTIME, offsetof(struct vdso_data, stamp_xtime)); |
Paul Mackerras | 8fd63a9 | 2010-06-20 19:03:08 +0000 | [diff] [blame] | 385 | DEFINE(STAMP_SEC_FRAC, offsetof(struct vdso_data, stamp_sec_fraction)); |
Olof Johansson | fbe4817 | 2007-11-20 12:24:45 +1100 | [diff] [blame] | 386 | DEFINE(CFG_ICACHE_BLOCKSZ, offsetof(struct vdso_data, icache_block_size)); |
| 387 | DEFINE(CFG_DCACHE_BLOCKSZ, offsetof(struct vdso_data, dcache_block_size)); |
| 388 | DEFINE(CFG_ICACHE_LOGBLOCKSZ, offsetof(struct vdso_data, icache_log_block_size)); |
| 389 | DEFINE(CFG_DCACHE_LOGBLOCKSZ, offsetof(struct vdso_data, dcache_log_block_size)); |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 390 | #ifdef CONFIG_PPC64 |
| 391 | DEFINE(CFG_SYSCALL_MAP64, offsetof(struct vdso_data, syscall_map_64)); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 392 | DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec)); |
| 393 | DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec)); |
| 394 | DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec)); |
| 395 | DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec)); |
Benjamin Herrenschmidt | 0c37ec2 | 2005-11-14 14:55:58 +1100 | [diff] [blame] | 396 | DEFINE(TSPC64_TV_SEC, offsetof(struct timespec, tv_sec)); |
| 397 | DEFINE(TSPC64_TV_NSEC, offsetof(struct timespec, tv_nsec)); |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 398 | DEFINE(TSPC32_TV_SEC, offsetof(struct compat_timespec, tv_sec)); |
| 399 | DEFINE(TSPC32_TV_NSEC, offsetof(struct compat_timespec, tv_nsec)); |
| 400 | #else |
| 401 | DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec)); |
| 402 | DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec)); |
Benjamin Herrenschmidt | 0c37ec2 | 2005-11-14 14:55:58 +1100 | [diff] [blame] | 403 | DEFINE(TSPC32_TV_SEC, offsetof(struct timespec, tv_sec)); |
| 404 | DEFINE(TSPC32_TV_NSEC, offsetof(struct timespec, tv_nsec)); |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 405 | #endif |
| 406 | /* timeval/timezone offsets for use by vdso */ |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 407 | DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); |
| 408 | DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 409 | |
| 410 | /* Other bits used by the vdso */ |
| 411 | DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); |
| 412 | DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); |
| 413 | DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); |
Tony Breeds | 151db1f | 2008-02-08 09:24:52 +1100 | [diff] [blame] | 414 | DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); |
Benjamin Herrenschmidt | a7f290d | 2005-11-11 21:15:21 +1100 | [diff] [blame] | 415 | |
David Woodhouse | 007d88d | 2007-01-01 18:45:34 +0000 | [diff] [blame] | 416 | #ifdef CONFIG_BUG |
| 417 | DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); |
| 418 | #endif |
Stephen Rothwell | 16a15a3 | 2007-08-20 14:58:36 +1000 | [diff] [blame] | 419 | |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 420 | #ifdef MAX_PGD_TABLE_SIZE |
| 421 | DEFINE(PGD_TABLE_SIZE, MAX_PGD_TABLE_SIZE); |
| 422 | #else |
Stephen Rothwell | ee7a76d | 2007-09-18 17:22:59 +1000 | [diff] [blame] | 423 | DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE); |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame] | 424 | #endif |
Becky Bruce | 4ee7084 | 2008-09-24 11:01:24 -0500 | [diff] [blame] | 425 | DEFINE(PTE_SIZE, sizeof(pte_t)); |
Kumar Gala | bee86f1 | 2007-12-06 13:11:04 -0600 | [diff] [blame] | 426 | |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 427 | #ifdef CONFIG_KVM |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 428 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); |
| 429 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); |
Scott Wood | d30f6e4 | 2011-12-20 15:34:43 +0000 | [diff] [blame] | 430 | DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid)); |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 431 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); |
Scott Wood | eab1767 | 2011-04-27 17:24:10 -0500 | [diff] [blame] | 432 | DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); |
Paul Mackerras | efff191 | 2013-10-15 20:43:02 +1100 | [diff] [blame] | 433 | DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr)); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 434 | #ifdef CONFIG_ALTIVEC |
Paul Mackerras | efff191 | 2013-10-15 20:43:02 +1100 | [diff] [blame] | 435 | DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr)); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 436 | #endif |
| 437 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); |
| 438 | DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); |
| 439 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); |
Alexander Graf | e14e7a1 | 2014-04-22 12:26:58 +0200 | [diff] [blame] | 440 | #ifdef CONFIG_PPC_BOOK3S |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 441 | DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar)); |
Alexander Graf | e14e7a1 | 2014-04-22 12:26:58 +0200 | [diff] [blame] | 442 | #endif |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 443 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); |
| 444 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); |
Aneesh Kumar K.V | 9975f5e | 2013-10-07 22:17:52 +0530 | [diff] [blame] | 445 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 446 | DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr)); |
| 447 | DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0)); |
| 448 | DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1)); |
| 449 | DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0)); |
| 450 | DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1)); |
| 451 | DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); |
| 452 | DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); |
| 453 | #endif |
Paul Mackerras | b6c295d | 2015-03-28 14:21:02 +1100 | [diff] [blame] | 454 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
| 455 | DEFINE(VCPU_TB_RMENTRY, offsetof(struct kvm_vcpu, arch.rm_entry)); |
| 456 | DEFINE(VCPU_TB_RMINTR, offsetof(struct kvm_vcpu, arch.rm_intr)); |
| 457 | DEFINE(VCPU_TB_RMEXIT, offsetof(struct kvm_vcpu, arch.rm_exit)); |
| 458 | DEFINE(VCPU_TB_GUEST, offsetof(struct kvm_vcpu, arch.guest_time)); |
| 459 | DEFINE(VCPU_TB_CEDE, offsetof(struct kvm_vcpu, arch.cede_time)); |
| 460 | DEFINE(VCPU_CUR_ACTIVITY, offsetof(struct kvm_vcpu, arch.cur_activity)); |
| 461 | DEFINE(VCPU_ACTIVITY_START, offsetof(struct kvm_vcpu, arch.cur_tb_start)); |
| 462 | DEFINE(TAS_SEQCOUNT, offsetof(struct kvmhv_tb_accumulator, seqcount)); |
| 463 | DEFINE(TAS_TOTAL, offsetof(struct kvmhv_tb_accumulator, tb_total)); |
| 464 | DEFINE(TAS_MIN, offsetof(struct kvmhv_tb_accumulator, tb_min)); |
| 465 | DEFINE(TAS_MAX, offsetof(struct kvmhv_tb_accumulator, tb_max)); |
| 466 | #endif |
Paul Mackerras | c8ae0ac | 2013-07-11 21:49:43 +1000 | [diff] [blame] | 467 | DEFINE(VCPU_SHARED_SPRG3, offsetof(struct kvm_vcpu_arch_shared, sprg3)); |
Scott Wood | b590497 | 2011-11-08 18:23:30 -0600 | [diff] [blame] | 468 | DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4)); |
| 469 | DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5)); |
| 470 | DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6)); |
| 471 | DEFINE(VCPU_SHARED_SPRG7, offsetof(struct kvm_vcpu_arch_shared, sprg7)); |
Hollis Blanchard | 49dd2c4 | 2008-07-25 13:54:53 -0500 | [diff] [blame] | 472 | DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); |
Liu Yu | dd9ebf1f | 2011-06-14 18:35:14 -0500 | [diff] [blame] | 473 | DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1)); |
Alexander Graf | 96bc451 | 2010-07-29 14:47:42 +0200 | [diff] [blame] | 474 | DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); |
Alexander Graf | 666e725 | 2010-07-29 14:47:43 +0200 | [diff] [blame] | 475 | DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); |
Scott Wood | ecee273 | 2011-06-14 18:34:29 -0500 | [diff] [blame] | 476 | DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); |
Alexander Graf | 5deb8e7 | 2014-04-24 13:46:24 +0200 | [diff] [blame] | 477 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
| 478 | DEFINE(VCPU_SHAREDBE, offsetof(struct kvm_vcpu, arch.shared_big_endian)); |
| 479 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 480 | |
Scott Wood | b590497 | 2011-11-08 18:23:30 -0600 | [diff] [blame] | 481 | DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0)); |
| 482 | DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1)); |
| 483 | DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2)); |
| 484 | DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3)); |
| 485 | DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4)); |
| 486 | DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6)); |
| 487 | |
Scott Wood | d30f6e4 | 2011-12-20 15:34:43 +0000 | [diff] [blame] | 488 | DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); |
| 489 | DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); |
| 490 | |
Alexander Graf | 00c3a37 | 2010-04-16 00:11:42 +0200 | [diff] [blame] | 491 | /* book3s */ |
Aneesh Kumar K.V | 9975f5e | 2013-10-07 22:17:52 +0530 | [diff] [blame] | 492 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
Paul Mackerras | 7c5b06c | 2016-11-18 08:28:51 +1100 | [diff] [blame] | 493 | DEFINE(KVM_TLB_SETS, offsetof(struct kvm, arch.tlb_sets)); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 494 | DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); |
| 495 | DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); |
| 496 | DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); |
| 497 | DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1)); |
Paul Mackerras | 1b400ba | 2012-11-21 23:28:08 +0000 | [diff] [blame] | 498 | DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits)); |
Paul Mackerras | 699a0ea | 2014-06-02 11:02:59 +1000 | [diff] [blame] | 499 | DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls)); |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 500 | DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 501 | DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); |
| 502 | DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); |
Paul Mackerras | 7657f40 | 2012-03-05 21:42:25 +0000 | [diff] [blame] | 503 | DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 504 | DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); |
Paul Mackerras | 4a157d6 | 2014-12-03 13:30:39 +1100 | [diff] [blame] | 505 | DEFINE(VCPU_HEIR, offsetof(struct kvm_vcpu, arch.emul_inst)); |
Paul Mackerras | ec25716 | 2015-06-24 21:18:03 +1000 | [diff] [blame] | 506 | DEFINE(VCPU_CPU, offsetof(struct kvm_vcpu, cpu)); |
| 507 | DEFINE(VCPU_THREAD_CPU, offsetof(struct kvm_vcpu, arch.thread_cpu)); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 508 | #endif |
Alexander Graf | 00c3a37 | 2010-04-16 00:11:42 +0200 | [diff] [blame] | 509 | #ifdef CONFIG_PPC_BOOK3S |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 510 | DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); |
| 511 | DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 512 | DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic)); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 513 | DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); |
| 514 | DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); |
| 515 | DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 516 | DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr)); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 517 | DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); |
| 518 | DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); |
Paul Mackerras | 8563bf5 | 2014-01-08 21:25:29 +1100 | [diff] [blame] | 519 | DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx)); |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 520 | DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr)); |
| 521 | DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx)); |
| 522 | DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr)); |
Alexander Graf | 6290890 | 2009-10-30 05:47:18 +0000 | [diff] [blame] | 523 | DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 524 | DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); |
| 525 | DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); |
Paul Mackerras | aa04b4c | 2011-06-29 00:25:44 +0000 | [diff] [blame] | 526 | DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions)); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 527 | DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded)); |
| 528 | DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 529 | DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); |
| 530 | DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 531 | DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc)); |
Paul Mackerras | 1494178 | 2013-09-06 13:11:18 +1000 | [diff] [blame] | 532 | DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar)); |
| 533 | DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar)); |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 534 | DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier)); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 535 | DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); |
| 536 | DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); |
| 537 | DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 538 | DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr)); |
| 539 | DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); |
Aneesh Kumar K.V | e5ee542 | 2014-05-05 08:39:44 +0530 | [diff] [blame] | 540 | DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr)); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 541 | DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); |
| 542 | DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); |
Paul Mackerras | 0acb911 | 2013-02-04 18:10:51 +0000 | [diff] [blame] | 543 | DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); |
Paul Mackerras | 4b8473c | 2013-09-20 14:52:39 +1000 | [diff] [blame] | 544 | DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 545 | DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr)); |
| 546 | DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb)); |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 547 | DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr)); |
| 548 | DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr)); |
| 549 | DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr)); |
| 550 | DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr)); |
| 551 | DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr)); |
| 552 | DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr)); |
| 553 | DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop)); |
| 554 | DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort)); |
Paul Mackerras | e9cf1e0 | 2016-11-18 13:11:42 +1100 | [diff] [blame] | 555 | DEFINE(VCPU_TID, offsetof(struct kvm_vcpu, arch.tid)); |
| 556 | DEFINE(VCPU_PSSCR, offsetof(struct kvm_vcpu, arch.psscr)); |
Paul Mackerras | 7d6c40d | 2015-03-28 14:21:09 +1100 | [diff] [blame] | 557 | DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_map)); |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 558 | DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 559 | DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 560 | DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm)); |
Paul Mackerras | 93b0f4d | 2013-09-06 13:17:46 +1000 | [diff] [blame] | 561 | DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 562 | DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); |
Paul Mackerras | 388cc6e | 2013-09-21 14:35:02 +1000 | [diff] [blame] | 563 | DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); |
Michael Neuling | b005255e | 2014-01-08 21:25:21 +1100 | [diff] [blame] | 564 | DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes)); |
Paul Mackerras | 88b02cf9 | 2016-09-15 13:42:52 +1000 | [diff] [blame] | 565 | DEFINE(VCORE_VTB, offsetof(struct kvmppc_vcore, vtb)); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 566 | DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); |
| 567 | DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); |
| 568 | DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); |
Michael Neuling | 7b49041 | 2014-01-08 21:25:32 +1100 | [diff] [blame] | 569 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
| 570 | DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar)); |
| 571 | DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar)); |
| 572 | DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr)); |
| 573 | DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm)); |
| 574 | DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr)); |
| 575 | DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr)); |
| 576 | DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm)); |
| 577 | DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm)); |
Paul Mackerras | 0d808df | 2016-11-07 15:09:58 +1100 | [diff] [blame] | 578 | DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm)); |
Michael Neuling | 7b49041 | 2014-01-08 21:25:32 +1100 | [diff] [blame] | 579 | DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm)); |
| 580 | DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm)); |
| 581 | DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm)); |
| 582 | DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm)); |
| 583 | DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm)); |
| 584 | DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm)); |
| 585 | #endif |
Paul Mackerras | 3c42bf8 | 2011-06-29 00:20:58 +0000 | [diff] [blame] | 586 | |
| 587 | #ifdef CONFIG_PPC_BOOK3S_64 |
Aneesh Kumar K.V | 7aa7993 | 2013-10-07 22:17:51 +0530 | [diff] [blame] | 588 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
Paul Mackerras | a2d5602 | 2013-09-20 14:52:43 +1000 | [diff] [blame] | 589 | DEFINE(PACA_SVCPU, offsetof(struct paca_struct, shadow_vcpu)); |
Paul Mackerras | 3c42bf8 | 2011-06-29 00:20:58 +0000 | [diff] [blame] | 590 | # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 591 | #else |
| 592 | # define SVCPU_FIELD(x, f) |
| 593 | #endif |
Paul Mackerras | 3c42bf8 | 2011-06-29 00:20:58 +0000 | [diff] [blame] | 594 | # define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f)) |
| 595 | #else /* 32-bit */ |
| 596 | # define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f)) |
| 597 | # define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, hstate.f)) |
Alexander Graf | 0604675 | 2010-04-16 00:11:44 +0200 | [diff] [blame] | 598 | #endif |
Paul Mackerras | 3c42bf8 | 2011-06-29 00:20:58 +0000 | [diff] [blame] | 599 | |
| 600 | SVCPU_FIELD(SVCPU_CR, cr); |
| 601 | SVCPU_FIELD(SVCPU_XER, xer); |
| 602 | SVCPU_FIELD(SVCPU_CTR, ctr); |
| 603 | SVCPU_FIELD(SVCPU_LR, lr); |
| 604 | SVCPU_FIELD(SVCPU_PC, pc); |
| 605 | SVCPU_FIELD(SVCPU_R0, gpr[0]); |
| 606 | SVCPU_FIELD(SVCPU_R1, gpr[1]); |
| 607 | SVCPU_FIELD(SVCPU_R2, gpr[2]); |
| 608 | SVCPU_FIELD(SVCPU_R3, gpr[3]); |
| 609 | SVCPU_FIELD(SVCPU_R4, gpr[4]); |
| 610 | SVCPU_FIELD(SVCPU_R5, gpr[5]); |
| 611 | SVCPU_FIELD(SVCPU_R6, gpr[6]); |
| 612 | SVCPU_FIELD(SVCPU_R7, gpr[7]); |
| 613 | SVCPU_FIELD(SVCPU_R8, gpr[8]); |
| 614 | SVCPU_FIELD(SVCPU_R9, gpr[9]); |
| 615 | SVCPU_FIELD(SVCPU_R10, gpr[10]); |
| 616 | SVCPU_FIELD(SVCPU_R11, gpr[11]); |
| 617 | SVCPU_FIELD(SVCPU_R12, gpr[12]); |
| 618 | SVCPU_FIELD(SVCPU_R13, gpr[13]); |
| 619 | SVCPU_FIELD(SVCPU_FAULT_DSISR, fault_dsisr); |
| 620 | SVCPU_FIELD(SVCPU_FAULT_DAR, fault_dar); |
| 621 | SVCPU_FIELD(SVCPU_LAST_INST, last_inst); |
| 622 | SVCPU_FIELD(SVCPU_SHADOW_SRR1, shadow_srr1); |
| 623 | #ifdef CONFIG_PPC_BOOK3S_32 |
| 624 | SVCPU_FIELD(SVCPU_SR, sr); |
| 625 | #endif |
| 626 | #ifdef CONFIG_PPC64 |
| 627 | SVCPU_FIELD(SVCPU_SLB, slb); |
| 628 | SVCPU_FIELD(SVCPU_SLB_MAX, slb_max); |
Alexander Graf | 616dff8 | 2014-04-29 16:48:44 +0200 | [diff] [blame] | 629 | SVCPU_FIELD(SVCPU_SHADOW_FSCR, shadow_fscr); |
Paul Mackerras | 3c42bf8 | 2011-06-29 00:20:58 +0000 | [diff] [blame] | 630 | #endif |
| 631 | |
| 632 | HSTATE_FIELD(HSTATE_HOST_R1, host_r1); |
| 633 | HSTATE_FIELD(HSTATE_HOST_R2, host_r2); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 634 | HSTATE_FIELD(HSTATE_HOST_MSR, host_msr); |
Paul Mackerras | 3c42bf8 | 2011-06-29 00:20:58 +0000 | [diff] [blame] | 635 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); |
| 636 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); |
| 637 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); |
Aneesh Kumar K.V | 36e7bb3 | 2013-11-11 19:29:47 +0530 | [diff] [blame] | 638 | HSTATE_FIELD(HSTATE_SCRATCH2, scratch2); |
Paul Mackerras | 3c42bf8 | 2011-06-29 00:20:58 +0000 | [diff] [blame] | 639 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); |
Paul Mackerras | 0214394 | 2011-07-23 17:41:44 +1000 | [diff] [blame] | 640 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 641 | HSTATE_FIELD(HSTATE_NAPPING, napping); |
Paul Mackerras | 3c42bf8 | 2011-06-29 00:20:58 +0000 | [diff] [blame] | 642 | |
Aneesh Kumar K.V | 9975f5e | 2013-10-07 22:17:52 +0530 | [diff] [blame] | 643 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
Paul Mackerras | 7657f40 | 2012-03-05 21:42:25 +0000 | [diff] [blame] | 644 | HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req); |
| 645 | HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 646 | HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); |
Paul Mackerras | 371fefd | 2011-06-29 00:23:08 +0000 | [diff] [blame] | 647 | HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); |
| 648 | HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); |
Benjamin Herrenschmidt | 54695c3 | 2013-04-17 20:30:50 +0000 | [diff] [blame] | 649 | HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr); |
| 650 | HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); |
Paul Mackerras | e0b7ec0 | 2014-01-08 21:25:20 +1100 | [diff] [blame] | 651 | HSTATE_FIELD(HSTATE_PTID, ptid); |
Michael Ellerman | 9a4fc4e | 2014-07-10 19:34:31 +1000 | [diff] [blame] | 652 | HSTATE_FIELD(HSTATE_MMCR0, host_mmcr[0]); |
| 653 | HSTATE_FIELD(HSTATE_MMCR1, host_mmcr[1]); |
| 654 | HSTATE_FIELD(HSTATE_MMCRA, host_mmcr[2]); |
| 655 | HSTATE_FIELD(HSTATE_SIAR, host_mmcr[3]); |
| 656 | HSTATE_FIELD(HSTATE_SDAR, host_mmcr[4]); |
| 657 | HSTATE_FIELD(HSTATE_MMCR2, host_mmcr[5]); |
| 658 | HSTATE_FIELD(HSTATE_SIER, host_mmcr[6]); |
| 659 | HSTATE_FIELD(HSTATE_PMC1, host_pmc[0]); |
| 660 | HSTATE_FIELD(HSTATE_PMC2, host_pmc[1]); |
| 661 | HSTATE_FIELD(HSTATE_PMC3, host_pmc[2]); |
| 662 | HSTATE_FIELD(HSTATE_PMC4, host_pmc[3]); |
| 663 | HSTATE_FIELD(HSTATE_PMC5, host_pmc[4]); |
| 664 | HSTATE_FIELD(HSTATE_PMC6, host_pmc[5]); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 665 | HSTATE_FIELD(HSTATE_PURR, host_purr); |
| 666 | HSTATE_FIELD(HSTATE_SPURR, host_spurr); |
| 667 | HSTATE_FIELD(HSTATE_DSCR, host_dscr); |
| 668 | HSTATE_FIELD(HSTATE_DABR, dabr); |
| 669 | HSTATE_FIELD(HSTATE_DECEXP, dec_expires); |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 670 | HSTATE_FIELD(HSTATE_SPLIT_MODE, kvm_split_mode); |
Paul Mackerras | 19ccb76 | 2011-07-23 17:42:46 +1000 | [diff] [blame] | 671 | DEFINE(IPI_PRIORITY, IPI_PRIORITY); |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 672 | DEFINE(KVM_SPLIT_RPR, offsetof(struct kvm_split_mode, rpr)); |
| 673 | DEFINE(KVM_SPLIT_PMMAR, offsetof(struct kvm_split_mode, pmmar)); |
| 674 | DEFINE(KVM_SPLIT_LDBAR, offsetof(struct kvm_split_mode, ldbar)); |
Paul Mackerras | b4deba5 | 2015-07-02 20:38:16 +1000 | [diff] [blame] | 675 | DEFINE(KVM_SPLIT_DO_NAP, offsetof(struct kvm_split_mode, do_nap)); |
| 676 | DEFINE(KVM_SPLIT_NAPPED, offsetof(struct kvm_split_mode, napped)); |
Aneesh Kumar K.V | 9975f5e | 2013-10-07 22:17:52 +0530 | [diff] [blame] | 677 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 678 | |
Paul Mackerras | 0acb911 | 2013-02-04 18:10:51 +0000 | [diff] [blame] | 679 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 680 | HSTATE_FIELD(HSTATE_CFAR, cfar); |
Paul Mackerras | 4b8473c | 2013-09-20 14:52:39 +1000 | [diff] [blame] | 681 | HSTATE_FIELD(HSTATE_PPR, ppr); |
Alexander Graf | 616dff8 | 2014-04-29 16:48:44 +0200 | [diff] [blame] | 682 | HSTATE_FIELD(HSTATE_HOST_FSCR, host_fscr); |
Paul Mackerras | 0acb911 | 2013-02-04 18:10:51 +0000 | [diff] [blame] | 683 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
| 684 | |
Paul Mackerras | 3c42bf8 | 2011-06-29 00:20:58 +0000 | [diff] [blame] | 685 | #else /* CONFIG_PPC_BOOK3S */ |
Alexander Graf | 7e57cba | 2010-01-08 02:58:03 +0100 | [diff] [blame] | 686 | DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); |
| 687 | DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); |
Alexander Graf | 0604675 | 2010-04-16 00:11:44 +0200 | [diff] [blame] | 688 | DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); |
| 689 | DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); |
| 690 | DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); |
Bharat Bhushan | 99e99d1 | 2014-07-21 11:23:26 +0530 | [diff] [blame] | 691 | DEFINE(VCPU_SPRG9, offsetof(struct kvm_vcpu, arch.sprg9)); |
Alexander Graf | 0604675 | 2010-04-16 00:11:44 +0200 | [diff] [blame] | 692 | DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); |
| 693 | DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear)); |
| 694 | DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); |
Bharat Bhushan | 15b708b | 2013-02-27 18:13:10 +0000 | [diff] [blame] | 695 | DEFINE(VCPU_CRIT_SAVE, offsetof(struct kvm_vcpu, arch.crit_save)); |
Alexander Graf | 00c3a37 | 2010-04-16 00:11:42 +0200 | [diff] [blame] | 696 | #endif /* CONFIG_PPC_BOOK3S */ |
Paul Mackerras | 3c42bf8 | 2011-06-29 00:20:58 +0000 | [diff] [blame] | 697 | #endif /* CONFIG_KVM */ |
Alexander Graf | d17051c | 2010-07-29 14:47:57 +0200 | [diff] [blame] | 698 | |
| 699 | #ifdef CONFIG_KVM_GUEST |
| 700 | DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared, |
| 701 | scratch1)); |
| 702 | DEFINE(KVM_MAGIC_SCRATCH2, offsetof(struct kvm_vcpu_arch_shared, |
| 703 | scratch2)); |
| 704 | DEFINE(KVM_MAGIC_SCRATCH3, offsetof(struct kvm_vcpu_arch_shared, |
| 705 | scratch3)); |
| 706 | DEFINE(KVM_MAGIC_INT, offsetof(struct kvm_vcpu_arch_shared, |
| 707 | int_pending)); |
| 708 | DEFINE(KVM_MAGIC_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); |
| 709 | DEFINE(KVM_MAGIC_CRITICAL, offsetof(struct kvm_vcpu_arch_shared, |
| 710 | critical)); |
Alexander Graf | cbe487f | 2010-08-03 10:39:35 +0200 | [diff] [blame] | 711 | DEFINE(KVM_MAGIC_SR, offsetof(struct kvm_vcpu_arch_shared, sr)); |
Alexander Graf | d17051c | 2010-07-29 14:47:57 +0200 | [diff] [blame] | 712 | #endif |
| 713 | |
Ilya Yanok | ca9153a | 2008-12-11 04:55:41 +0300 | [diff] [blame] | 714 | #ifdef CONFIG_44x |
| 715 | DEFINE(PGD_T_LOG2, PGD_T_LOG2); |
| 716 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); |
| 717 | #endif |
Kumar Gala | 55fd766 | 2009-10-16 18:48:40 -0500 | [diff] [blame] | 718 | #ifdef CONFIG_PPC_FSL_BOOK3E |
Kumar Gala | 78f6223 | 2010-05-13 14:38:21 -0500 | [diff] [blame] | 719 | DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); |
| 720 | DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); |
| 721 | DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); |
| 722 | DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2)); |
| 723 | DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3)); |
| 724 | DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); |
| 725 | #endif |
Hollis Blanchard | bbf45ba | 2008-04-16 23:28:09 -0500 | [diff] [blame] | 726 | |
Scott Wood | 4cd35f6 | 2011-06-14 18:34:31 -0500 | [diff] [blame] | 727 | #if defined(CONFIG_KVM) && defined(CONFIG_SPE) |
| 728 | DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0])); |
| 729 | DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc)); |
| 730 | DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr)); |
| 731 | DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); |
| 732 | #endif |
| 733 | |
Scott Wood | d30f6e4 | 2011-12-20 15:34:43 +0000 | [diff] [blame] | 734 | #ifdef CONFIG_KVM_BOOKE_HV |
| 735 | DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4)); |
| 736 | DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6)); |
Scott Wood | d30f6e4 | 2011-12-20 15:34:43 +0000 | [diff] [blame] | 737 | #endif |
| 738 | |
Hollis Blanchard | 73e75b4 | 2008-12-02 15:51:57 -0600 | [diff] [blame] | 739 | #ifdef CONFIG_KVM_EXIT_TIMING |
| 740 | DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, |
| 741 | arch.timing_exit.tv32.tbu)); |
| 742 | DEFINE(VCPU_TIMING_EXIT_TBL, offsetof(struct kvm_vcpu, |
| 743 | arch.timing_exit.tv32.tbl)); |
| 744 | DEFINE(VCPU_TIMING_LAST_ENTER_TBU, offsetof(struct kvm_vcpu, |
| 745 | arch.timing_last_enter.tv32.tbu)); |
| 746 | DEFINE(VCPU_TIMING_LAST_ENTER_TBL, offsetof(struct kvm_vcpu, |
| 747 | arch.timing_last_enter.tv32.tbl)); |
| 748 | #endif |
| 749 | |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 750 | #ifdef CONFIG_PPC_POWERNV |
| 751 | DEFINE(PACA_CORE_IDLE_STATE_PTR, |
| 752 | offsetof(struct paca_struct, core_idle_state_ptr)); |
| 753 | DEFINE(PACA_THREAD_IDLE_STATE, |
| 754 | offsetof(struct paca_struct, thread_idle_state)); |
| 755 | DEFINE(PACA_THREAD_MASK, |
| 756 | offsetof(struct paca_struct, thread_mask)); |
Shreyas B. Prabhu | 77b54e9f | 2014-12-10 00:26:53 +0530 | [diff] [blame] | 757 | DEFINE(PACA_SUBCORE_SIBLING_MASK, |
| 758 | offsetof(struct paca_struct, subcore_sibling_mask)); |
Shreyas B. Prabhu | 7cba160 | 2014-12-10 00:26:52 +0530 | [diff] [blame] | 759 | #endif |
| 760 | |
Paul Mackerras | 66feed6 | 2015-03-28 14:21:12 +1100 | [diff] [blame] | 761 | DEFINE(PPC_DBELL_SERVER, PPC_DBELL_SERVER); |
| 762 | |
Christophe Leroy | f86ef74 | 2016-05-17 09:02:43 +0200 | [diff] [blame] | 763 | #ifdef CONFIG_PPC_8xx |
Scott Wood | 9f595fd | 2016-07-09 03:22:39 -0500 | [diff] [blame] | 764 | DEFINE(VIRT_IMMR_BASE, (u64)__fix_to_virt(FIX_IMMR_BASE)); |
Christophe Leroy | f86ef74 | 2016-05-17 09:02:43 +0200 | [diff] [blame] | 765 | #endif |
| 766 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 767 | return 0; |
| 768 | } |