Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Idle functions for s390. |
| 4 | * |
| 5 | * Copyright IBM Corp. 2014 |
| 6 | * |
| 7 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/kernel_stat.h> |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 12 | #include <linux/notifier.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/cpu.h> |
Ingo Molnar | 32ef551 | 2017-02-05 11:48:36 +0100 | [diff] [blame] | 15 | #include <linux/sched/cputime.h> |
Sven Schnelle | 6589c93 | 2020-07-08 11:21:25 +0200 | [diff] [blame] | 16 | #include <trace/events/power.h> |
Sven Schnelle | 56e62a7 | 2020-11-21 11:14:56 +0100 | [diff] [blame] | 17 | #include <asm/cpu_mf.h> |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 18 | #include <asm/nmi.h> |
| 19 | #include <asm/smp.h> |
| 20 | #include "entry.h" |
| 21 | |
| 22 | static DEFINE_PER_CPU(struct s390_idle_data, s390_idle); |
| 23 | |
Sven Schnelle | 56e62a7 | 2020-11-21 11:14:56 +0100 | [diff] [blame] | 24 | void account_idle_time_irq(void) |
| 25 | { |
| 26 | struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); |
| 27 | u64 cycles_new[8]; |
| 28 | int i; |
| 29 | |
| 30 | clear_cpu_flag(CIF_ENABLED_WAIT); |
| 31 | if (smp_cpu_mtid) { |
| 32 | stcctm(MT_DIAG, smp_cpu_mtid, cycles_new); |
| 33 | for (i = 0; i < smp_cpu_mtid; i++) |
| 34 | this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]); |
| 35 | } |
| 36 | |
| 37 | idle->clock_idle_exit = S390_lowcore.int_clock; |
| 38 | idle->timer_idle_exit = S390_lowcore.sys_enter_timer; |
| 39 | |
| 40 | S390_lowcore.steal_timer += idle->clock_idle_enter - S390_lowcore.last_update_clock; |
| 41 | S390_lowcore.last_update_clock = idle->clock_idle_exit; |
| 42 | |
| 43 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - idle->timer_idle_enter; |
| 44 | S390_lowcore.last_update_timer = idle->timer_idle_exit; |
| 45 | } |
| 46 | |
Heiko Carstens | 44292c8 | 2020-12-14 22:33:39 +0100 | [diff] [blame] | 47 | void arch_cpu_idle(void) |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 48 | { |
Linus Torvalds | 0429fbc | 2014-10-15 07:48:18 +0200 | [diff] [blame] | 49 | struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 50 | unsigned long long idle_time; |
Heiko Carstens | 7494755 | 2020-12-14 22:36:03 +0100 | [diff] [blame] | 51 | unsigned long psw_mask; |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 52 | |
| 53 | /* Wait for external, I/O or machine check interrupt. */ |
| 54 | psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT | PSW_MASK_DAT | |
| 55 | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; |
| 56 | clear_cpu_flag(CIF_NOHZ_DELAY); |
| 57 | |
Heiko Carstens | 7494755 | 2020-12-14 22:36:03 +0100 | [diff] [blame] | 58 | /* psw_idle() returns with interrupts disabled. */ |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 59 | psw_idle(idle, psw_mask); |
Heiko Carstens | 200e7c0 | 2014-12-01 14:24:41 +0100 | [diff] [blame] | 60 | |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 61 | /* Account time spent with enabled wait psw loaded as idle time. */ |
Peter Zijlstra | ca589ea | 2020-09-08 15:30:31 +0200 | [diff] [blame] | 62 | raw_write_seqcount_begin(&idle->seqcount); |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 63 | idle_time = idle->clock_idle_exit - idle->clock_idle_enter; |
| 64 | idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; |
| 65 | idle->idle_time += idle_time; |
| 66 | idle->idle_count++; |
Frederic Weisbecker | 18b43a9 | 2017-01-31 04:09:39 +0100 | [diff] [blame] | 67 | account_idle_time(cputime_to_nsecs(idle_time)); |
Peter Zijlstra | ca589ea | 2020-09-08 15:30:31 +0200 | [diff] [blame] | 68 | raw_write_seqcount_end(&idle->seqcount); |
Heiko Carstens | 44292c8 | 2020-12-14 22:33:39 +0100 | [diff] [blame] | 69 | raw_local_irq_enable(); |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 70 | } |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 71 | |
| 72 | static ssize_t show_idle_count(struct device *dev, |
| 73 | struct device_attribute *attr, char *buf) |
| 74 | { |
| 75 | struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); |
| 76 | unsigned long long idle_count; |
Frederic Weisbecker | 1ce2180 | 2014-11-28 19:23:34 +0100 | [diff] [blame] | 77 | unsigned int seq; |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 78 | |
| 79 | do { |
Frederic Weisbecker | 1ce2180 | 2014-11-28 19:23:34 +0100 | [diff] [blame] | 80 | seq = read_seqcount_begin(&idle->seqcount); |
Christian Borntraeger | 187b5f4 | 2017-02-10 12:34:49 +0100 | [diff] [blame] | 81 | idle_count = READ_ONCE(idle->idle_count); |
| 82 | if (READ_ONCE(idle->clock_idle_enter)) |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 83 | idle_count++; |
Frederic Weisbecker | 1ce2180 | 2014-11-28 19:23:34 +0100 | [diff] [blame] | 84 | } while (read_seqcount_retry(&idle->seqcount, seq)); |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 85 | return sprintf(buf, "%llu\n", idle_count); |
| 86 | } |
| 87 | DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); |
| 88 | |
| 89 | static ssize_t show_idle_time(struct device *dev, |
| 90 | struct device_attribute *attr, char *buf) |
| 91 | { |
Heiko Carstens | 3d7efa4 | 2019-10-28 11:03:27 +0100 | [diff] [blame] | 92 | unsigned long long now, idle_time, idle_enter, idle_exit, in_idle; |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 93 | struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); |
Frederic Weisbecker | 1ce2180 | 2014-11-28 19:23:34 +0100 | [diff] [blame] | 94 | unsigned int seq; |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 95 | |
| 96 | do { |
Frederic Weisbecker | 1ce2180 | 2014-11-28 19:23:34 +0100 | [diff] [blame] | 97 | seq = read_seqcount_begin(&idle->seqcount); |
Christian Borntraeger | 187b5f4 | 2017-02-10 12:34:49 +0100 | [diff] [blame] | 98 | idle_time = READ_ONCE(idle->idle_time); |
| 99 | idle_enter = READ_ONCE(idle->clock_idle_enter); |
| 100 | idle_exit = READ_ONCE(idle->clock_idle_exit); |
Frederic Weisbecker | 1ce2180 | 2014-11-28 19:23:34 +0100 | [diff] [blame] | 101 | } while (read_seqcount_retry(&idle->seqcount, seq)); |
Heiko Carstens | 3d7efa4 | 2019-10-28 11:03:27 +0100 | [diff] [blame] | 102 | in_idle = 0; |
| 103 | now = get_tod_clock(); |
| 104 | if (idle_enter) { |
| 105 | if (idle_exit) { |
| 106 | in_idle = idle_exit - idle_enter; |
| 107 | } else if (now > idle_enter) { |
| 108 | in_idle = now - idle_enter; |
| 109 | } |
| 110 | } |
| 111 | idle_time += in_idle; |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 112 | return sprintf(buf, "%llu\n", idle_time >> 12); |
| 113 | } |
| 114 | DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); |
| 115 | |
Frederic Weisbecker | 42b425b | 2017-01-31 04:09:47 +0100 | [diff] [blame] | 116 | u64 arch_cpu_idle_time(int cpu) |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 117 | { |
| 118 | struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); |
Heiko Carstens | 3d7efa4 | 2019-10-28 11:03:27 +0100 | [diff] [blame] | 119 | unsigned long long now, idle_enter, idle_exit, in_idle; |
Frederic Weisbecker | 1ce2180 | 2014-11-28 19:23:34 +0100 | [diff] [blame] | 120 | unsigned int seq; |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 121 | |
| 122 | do { |
Frederic Weisbecker | 1ce2180 | 2014-11-28 19:23:34 +0100 | [diff] [blame] | 123 | seq = read_seqcount_begin(&idle->seqcount); |
Christian Borntraeger | 187b5f4 | 2017-02-10 12:34:49 +0100 | [diff] [blame] | 124 | idle_enter = READ_ONCE(idle->clock_idle_enter); |
| 125 | idle_exit = READ_ONCE(idle->clock_idle_exit); |
Frederic Weisbecker | 1ce2180 | 2014-11-28 19:23:34 +0100 | [diff] [blame] | 126 | } while (read_seqcount_retry(&idle->seqcount, seq)); |
Heiko Carstens | 3d7efa4 | 2019-10-28 11:03:27 +0100 | [diff] [blame] | 127 | in_idle = 0; |
| 128 | now = get_tod_clock(); |
| 129 | if (idle_enter) { |
| 130 | if (idle_exit) { |
| 131 | in_idle = idle_exit - idle_enter; |
| 132 | } else if (now > idle_enter) { |
| 133 | in_idle = now - idle_enter; |
| 134 | } |
| 135 | } |
| 136 | return cputime_to_nsecs(in_idle); |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 137 | } |
| 138 | |
| 139 | void arch_cpu_idle_enter(void) |
| 140 | { |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 141 | } |
| 142 | |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 143 | void arch_cpu_idle_exit(void) |
| 144 | { |
Martin Schwidefsky | b5f87f1 | 2014-10-01 10:57:57 +0200 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | void arch_cpu_idle_dead(void) |
| 148 | { |
| 149 | cpu_die(); |
| 150 | } |