blob: d7daf45ae7a25307f8c8007c7b8240d9405352fc [file] [log] [blame]
Lorenzo Pieralisifb4a9602014-01-24 10:56:19 +00001#include <linux/percpu.h>
Lorenzo Pieralisi95322522013-07-22 12:22:13 +01002#include <linux/slab.h>
3#include <asm/cacheflush.h>
Lorenzo Pieralisi95322522013-07-22 12:22:13 +01004#include <asm/debug-monitors.h>
5#include <asm/pgtable.h>
6#include <asm/memory.h>
Lorenzo Pieralisif43c2712014-12-19 17:03:47 +00007#include <asm/mmu_context.h>
Lorenzo Pieralisi95322522013-07-22 12:22:13 +01008#include <asm/smp_plat.h>
9#include <asm/suspend.h>
10#include <asm/tlbflush.h>
11
Lorenzo Pieralisi714f5992014-08-07 14:54:50 +010012extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
Lorenzo Pieralisi95322522013-07-22 12:22:13 +010013/*
Lorenzo Pieralisi714f5992014-08-07 14:54:50 +010014 * This is called by __cpu_suspend_enter() to save the state, and do whatever
Lorenzo Pieralisi95322522013-07-22 12:22:13 +010015 * flushing is required to ensure that when the CPU goes to sleep we have
16 * the necessary data available when the caches are not searched.
17 *
Lorenzo Pieralisi714f5992014-08-07 14:54:50 +010018 * ptr: CPU context virtual address
19 * save_ptr: address of the location where the context physical address
20 * must be saved
Lorenzo Pieralisi95322522013-07-22 12:22:13 +010021 */
Lorenzo Pieralisi714f5992014-08-07 14:54:50 +010022void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
23 phys_addr_t *save_ptr)
Lorenzo Pieralisi95322522013-07-22 12:22:13 +010024{
Lorenzo Pieralisi95322522013-07-22 12:22:13 +010025 *save_ptr = virt_to_phys(ptr);
26
27 cpu_do_suspend(ptr);
28 /*
29 * Only flush the context that must be retrieved with the MMU
30 * off. VA primitives ensure the flush is applied to all
31 * cache levels so context is pushed to DRAM.
32 */
33 __flush_dcache_area(ptr, sizeof(*ptr));
34 __flush_dcache_area(save_ptr, sizeof(*save_ptr));
Lorenzo Pieralisi95322522013-07-22 12:22:13 +010035}
36
Lorenzo Pieralisi65c021b2014-01-10 13:15:05 +000037/*
38 * This hook is provided so that cpu_suspend code can restore HW
39 * breakpoints as early as possible in the resume path, before reenabling
40 * debug exceptions. Code cannot be run from a CPU PM notifier since by the
41 * time the notifier runs debug exceptions might have been enabled already,
42 * with HW breakpoints registers content still in an unknown state.
43 */
44void (*hw_breakpoint_restore)(void *);
45void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
46{
47 /* Prevent multiple restore hook initializations */
48 if (WARN_ON(hw_breakpoint_restore))
49 return;
50 hw_breakpoint_restore = hw_bp_restore;
51}
52
Lorenzo Pieralisi714f5992014-08-07 14:54:50 +010053/*
54 * __cpu_suspend
55 *
56 * arg: argument to pass to the finisher function
57 * fn: finisher function pointer
58 *
59 */
60int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
61{
62 struct mm_struct *mm = current->active_mm;
63 int ret;
64 unsigned long flags;
Lorenzo Pieralisi95322522013-07-22 12:22:13 +010065
66 /*
67 * From this point debug exceptions are disabled to prevent
68 * updates to mdscr register (saved and restored along with
69 * general purpose registers) from kernel debuggers.
70 */
71 local_dbg_save(flags);
72
73 /*
74 * mm context saved on the stack, it will be restored when
75 * the cpu comes out of reset through the identity mapped
76 * page tables, so that the thread address space is properly
77 * set-up on function return.
78 */
Lorenzo Pieralisi714f5992014-08-07 14:54:50 +010079 ret = __cpu_suspend_enter(arg, fn);
Lorenzo Pieralisi95322522013-07-22 12:22:13 +010080 if (ret == 0) {
Lorenzo Pieralisif43c2712014-12-19 17:03:47 +000081 /*
82 * We are resuming from reset with TTBR0_EL1 set to the
83 * idmap to enable the MMU; restore the active_mm mappings in
84 * TTBR0_EL1 unless the active_mm == &init_mm, in which case
85 * the thread entered __cpu_suspend with TTBR0_EL1 set to
86 * reserved TTBR0 page tables and should be restored as such.
87 */
88 if (mm == &init_mm)
89 cpu_set_reserved_ttbr0();
90 else
91 cpu_switch_mm(mm->pgd, mm);
92
Lorenzo Pieralisi95322522013-07-22 12:22:13 +010093 flush_tlb_all();
Lorenzo Pieralisifb4a9602014-01-24 10:56:19 +000094
95 /*
96 * Restore per-cpu offset before any kernel
97 * subsystem relying on it has a chance to run.
98 */
Lorenzo Pieralisi714f5992014-08-07 14:54:50 +010099 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
Lorenzo Pieralisifb4a9602014-01-24 10:56:19 +0000100
Lorenzo Pieralisi65c021b2014-01-10 13:15:05 +0000101 /*
102 * Restore HW breakpoint registers to sane values
103 * before debug exceptions are possibly reenabled
104 * through local_dbg_restore.
105 */
106 if (hw_breakpoint_restore)
107 hw_breakpoint_restore(NULL);
Lorenzo Pieralisi95322522013-07-22 12:22:13 +0100108 }
109
110 /*
111 * Restore pstate flags. OS lock and mdscr have been already
112 * restored, so from this point onwards, debugging is fully
113 * renabled if it was enabled when core started shutdown.
114 */
115 local_dbg_restore(flags);
116
117 return ret;
118}
119
Laura Abbottc3684fb2014-11-21 21:50:40 +0000120struct sleep_save_sp sleep_save_sp;
121phys_addr_t sleep_idmap_phys;
Lorenzo Pieralisi95322522013-07-22 12:22:13 +0100122
Lorenzo Pieralisi18ab7db2014-07-17 18:19:20 +0100123static int __init cpu_suspend_init(void)
Lorenzo Pieralisi95322522013-07-22 12:22:13 +0100124{
125 void *ctx_ptr;
126
127 /* ctx_ptr is an array of physical addresses */
128 ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL);
129
130 if (WARN_ON(!ctx_ptr))
131 return -ENOMEM;
132
133 sleep_save_sp.save_ptr_stash = ctx_ptr;
134 sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
135 sleep_idmap_phys = virt_to_phys(idmap_pg_dir);
136 __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
137 __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys));
138
139 return 0;
140}
141early_initcall(cpu_suspend_init);