blob: 69df840f72535f5a52aec84eb086f27f295b223e [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002/*
3 * This file contains the power_save function for 6xx & 7xxx CPUs
4 * rewritten in assembler
5 *
6 * Warning ! This code assumes that if your machine has a 750fx
7 * it will have PLL 1 set to low speed mode (used during NAP/DOZE).
8 * if this is not the case some additional changes will have to
9 * be done to check a runtime var (a bit like powersave-nap)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100010 */
11
Paul Mackerras14cf11a2005-09-26 16:04:21 +100012#include <linux/threads.h>
Paul Mackerrasb3b8dc62005-10-10 22:20:10 +100013#include <asm/reg.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100014#include <asm/page.h>
15#include <asm/cputable.h>
16#include <asm/thread_info.h>
17#include <asm/ppc_asm.h>
18#include <asm/asm-offsets.h>
Christophe Leroy2c86cd12018-07-05 16:25:01 +000019#include <asm/feature-fixups.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100020
Paul Mackerras14cf11a2005-09-26 16:04:21 +100021 .text
22
23/*
24 * Init idle, called at early CPU setup time from head.S for each CPU
25 * Make sure no rest of NAP mode remains in HID0, save default
26 * values for some CPU specific registers. Called with r24
27 * containing CPU number and r3 reloc offset
28 */
29_GLOBAL(init_idle_6xx)
30BEGIN_FTR_SECTION
31 mfspr r4,SPRN_HID0
32 rlwinm r4,r4,0,10,8 /* Clear NAP */
33 mtspr SPRN_HID0, r4
34 b 1f
35END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
36 blr
371:
38 slwi r5,r24,2
39 add r5,r5,r3
40BEGIN_FTR_SECTION
41 mfspr r4,SPRN_MSSCR0
42 addis r6,r5, nap_save_msscr0@ha
43 stw r4,nap_save_msscr0@l(r6)
44END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
45BEGIN_FTR_SECTION
46 mfspr r4,SPRN_HID1
47 addis r6,r5,nap_save_hid1@ha
48 stw r4,nap_save_hid1@l(r6)
49END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
50 blr
51
52/*
53 * Here is the power_save_6xx function. This could eventually be
54 * split into several functions & changing the function pointer
55 * depending on the various features.
56 */
57_GLOBAL(ppc6xx_idle)
58 /* Check if we can nap or doze, put HID0 mask in r3
59 */
60 lis r3, 0
61BEGIN_FTR_SECTION
62 lis r3,HID0_DOZE@h
63END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
64BEGIN_FTR_SECTION
65 /* We must dynamically check for the NAP feature as it
66 * can be cleared by CPU init after the fixups are done
67 */
68 lis r4,cur_cpu_spec@ha
69 lwz r4,cur_cpu_spec@l(r4)
70 lwz r4,CPU_SPEC_FEATURES(r4)
71 andi. r0,r4,CPU_FTR_CAN_NAP
72 beq 1f
73 /* Now check if user or arch enabled NAP mode */
74 lis r4,powersave_nap@ha
75 lwz r4,powersave_nap@l(r4)
76 cmpwi 0,r4,0
77 beq 1f
78 lis r3,HID0_NAP@h
791:
80END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
81 cmpwi 0,r3,0
82 beqlr
83
Paul Mackerras14cf11a2005-09-26 16:04:21 +100084 /* Some pre-nap cleanups needed on some CPUs */
85 andis. r0,r3,HID0_NAP@h
86 beq 2f
87BEGIN_FTR_SECTION
88 /* Disable L2 prefetch on some 745x and try to ensure
89 * L2 prefetch engines are idle. As explained by errata
90 * text, we can't be sure they are, we just hope very hard
91 * that well be enough (sic !). At least I noticed Apple
92 * doesn't even bother doing the dcbf's here...
93 */
94 mfspr r4,SPRN_MSSCR0
95 rlwinm r4,r4,0,0,29
96 sync
97 mtspr SPRN_MSSCR0,r4
98 sync
99 isync
100 lis r4,KERNELBASE@h
101 dcbf 0,r4
102 dcbf 0,r4
103 dcbf 0,r4
104 dcbf 0,r4
105END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
Paul Mackerras14cf11a2005-09-26 16:04:21 +10001062:
107BEGIN_FTR_SECTION
108 /* Go to low speed mode on some 750FX */
109 lis r4,powersave_lowspeed@ha
110 lwz r4,powersave_lowspeed@l(r4)
111 cmpwi 0,r4,0
112 beq 1f
113 mfspr r4,SPRN_HID1
114 oris r4,r4,0x0001
115 mtspr SPRN_HID1,r4
1161:
117END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
118
119 /* Go to NAP or DOZE now */
120 mfspr r4,SPRN_HID0
121 lis r5,(HID0_NAP|HID0_SLEEP)@h
122BEGIN_FTR_SECTION
123 oris r5,r5,HID0_DOZE@h
124END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
125 andc r4,r4,r5
126 or r4,r4,r3
127BEGIN_FTR_SECTION
128 oris r4,r4,HID0_DPM@h /* that should be done once for all */
129END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
130 mtspr SPRN_HID0,r4
131BEGIN_FTR_SECTION
132 DSSALL
133 sync
134END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
Christophe Leroyf7354cc2019-01-31 10:09:04 +0000135 lwz r8,TI_LOCAL_FLAGS(r2) /* set napping bit */
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000136 ori r8,r8,_TLF_NAPPING /* so when we take an exception */
Christophe Leroyf7354cc2019-01-31 10:09:04 +0000137 stw r8,TI_LOCAL_FLAGS(r2) /* it will return to our caller */
Paul Mackerrasff2e6d7e2006-03-28 09:28:14 +1100138 mfmsr r7
139 ori r7,r7,MSR_EE
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000140 oris r7,r7,MSR_POW@h
Paul Mackerrasf39224a2006-04-18 21:49:11 +10001411: sync
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000142 mtmsr r7
143 isync
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000144 b 1b
145
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000146/*
147 * Return from NAP/DOZE mode, restore some CPU specific registers,
148 * we are called with DR/IR still off and r2 containing physical
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000149 * address of current. R11 points to the exception frame (physical
150 * address). We have to preserve r10.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000151 */
Kumar Galafc4033b2008-06-18 16:26:52 -0500152_GLOBAL(power_save_ppc32_restore)
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000153 lwz r9,_LINK(r11) /* interrupted in ppc6xx_idle: */
154 stw r9,_NIP(r11) /* make it do a blr */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000155
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000156#ifdef CONFIG_SMP
Christophe Leroyf7354cc2019-01-31 10:09:04 +0000157 lwz r11,TASK_CPU(r2) /* get cpu number * 4 */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000158 slwi r11,r11,2
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000159#else
160 li r11,0
161#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000162 /* Todo make sure all these are in the same page
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000163 * and load r11 (@ha part + CPU offset) only once
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000164 */
165BEGIN_FTR_SECTION
Paul Mackerrasf39224a2006-04-18 21:49:11 +1000166 mfspr r9,SPRN_HID0
167 andis. r9,r9,HID0_NAP@h
168 beq 1f
Christophe Leroy477f3482020-02-14 06:53:00 +0000169#ifdef CONFIG_VMAP_STACK
170 addis r9, r11, nap_save_msscr0@ha
171#else
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000172 addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
Christophe Leroy477f3482020-02-14 06:53:00 +0000173#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000174 lwz r9,nap_save_msscr0@l(r9)
175 mtspr SPRN_MSSCR0, r9
176 sync
177 isync
1781:
179END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
180BEGIN_FTR_SECTION
Christophe Leroy477f3482020-02-14 06:53:00 +0000181#ifdef CONFIG_VMAP_STACK
182 addis r9, r11, nap_save_hid1@ha
183#else
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000184 addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
Christophe Leroy477f3482020-02-14 06:53:00 +0000185#endif
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000186 lwz r9,nap_save_hid1@l(r9)
187 mtspr SPRN_HID1, r9
188END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
189 b transfer_to_handler_cont
Christophe Leroy5f32e832020-03-31 16:03:44 +0000190_ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000191
192 .data
193
194_GLOBAL(nap_save_msscr0)
195 .space 4*NR_CPUS
196
197_GLOBAL(nap_save_hid1)
198 .space 4*NR_CPUS
199
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000200_GLOBAL(powersave_lowspeed)
201 .long 0