blob: 767d3a0bcb06860a97dcd7c230593d7d4c0c9cfc [file] [log] [blame]
Vincenzo Frascino00b26472019-06-21 10:52:29 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Generic userspace implementations of gettimeofday() and similar.
4 */
5#include <linux/compiler.h>
6#include <linux/math64.h>
7#include <linux/time.h>
8#include <linux/kernel.h>
9#include <linux/hrtimer_defs.h>
10#include <vdso/datapage.h>
11#include <vdso/helpers.h>
12
13/*
14 * The generic vDSO implementation requires that gettimeofday.h
15 * provides:
16 * - __arch_get_vdso_data(): to get the vdso datapage.
17 * - __arch_get_hw_counter(): to get the hw counter based on the
18 * clock_mode.
19 * - gettimeofday_fallback(): fallback for gettimeofday.
20 * - clock_gettime_fallback(): fallback for clock_gettime.
21 * - clock_getres_fallback(): fallback for clock_getres.
22 */
23#include <asm/vdso/gettimeofday.h>
24
25static int do_hres(const struct vdso_data *vd, clockid_t clk,
26 struct __kernel_timespec *ts)
27{
28 const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
29 u64 cycles, last, sec, ns;
30 u32 seq;
31
32 do {
33 seq = vdso_read_begin(vd);
34 cycles = __arch_get_hw_counter(vd->clock_mode) &
35 vd->mask;
36 ns = vdso_ts->nsec;
37 last = vd->cycle_last;
38 if (unlikely((s64)cycles < 0))
39 return clock_gettime_fallback(clk, ts);
40 if (cycles > last)
41 ns += (cycles - last) * vd->mult;
42 ns >>= vd->shift;
43 sec = vdso_ts->sec;
44 } while (unlikely(vdso_read_retry(vd, seq)));
45
46 /*
47 * Do this outside the loop: a race inside the loop could result
48 * in __iter_div_u64_rem() being extremely slow.
49 */
50 ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
51 ts->tv_nsec = ns;
52
53 return 0;
54}
55
56static void do_coarse(const struct vdso_data *vd, clockid_t clk,
57 struct __kernel_timespec *ts)
58{
59 const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
60 u32 seq;
61
62 do {
63 seq = vdso_read_begin(vd);
64 ts->tv_sec = vdso_ts->sec;
65 ts->tv_nsec = vdso_ts->nsec;
66 } while (unlikely(vdso_read_retry(vd, seq)));
67}
68
69static __maybe_unused int
70__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
71{
72 const struct vdso_data *vd = __arch_get_vdso_data();
73 u32 msk;
74
75 /* Check for negative values or invalid clocks */
76 if (unlikely((u32) clock >= MAX_CLOCKS))
77 goto fallback;
78
79 /*
80 * Convert the clockid to a bitmask and use it to check which
81 * clocks are handled in the VDSO directly.
82 */
83 msk = 1U << clock;
84 if (likely(msk & VDSO_HRES)) {
85 return do_hres(&vd[CS_HRES_COARSE], clock, ts);
86 } else if (msk & VDSO_COARSE) {
87 do_coarse(&vd[CS_HRES_COARSE], clock, ts);
88 return 0;
89 } else if (msk & VDSO_RAW) {
90 return do_hres(&vd[CS_RAW], clock, ts);
91 }
92
93fallback:
94 return clock_gettime_fallback(clock, ts);
95}
96
97static __maybe_unused int
98__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
99{
100 struct __kernel_timespec ts;
101 int ret;
102
103 if (res == NULL)
104 goto fallback;
105
106 ret = __cvdso_clock_gettime(clock, &ts);
107
108 if (ret == 0) {
109 res->tv_sec = ts.tv_sec;
110 res->tv_nsec = ts.tv_nsec;
111 }
112
113 return ret;
114
115fallback:
116 return clock_gettime_fallback(clock, (struct __kernel_timespec *)res);
117}
118
119static __maybe_unused int
120__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
121{
122 const struct vdso_data *vd = __arch_get_vdso_data();
123
124 if (likely(tv != NULL)) {
125 struct __kernel_timespec ts;
126
127 if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
128 return gettimeofday_fallback(tv, tz);
129
130 tv->tv_sec = ts.tv_sec;
131 tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC;
132 }
133
134 if (unlikely(tz != NULL)) {
135 tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
136 tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
137 }
138
139 return 0;
140}
141
142#ifdef VDSO_HAS_TIME
143static __maybe_unused time_t __cvdso_time(time_t *time)
144{
145 const struct vdso_data *vd = __arch_get_vdso_data();
146 time_t t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
147
148 if (time)
149 *time = t;
150
151 return t;
152}
153#endif /* VDSO_HAS_TIME */
154
155#ifdef VDSO_HAS_CLOCK_GETRES
156static __maybe_unused
157int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
158{
159 const struct vdso_data *vd = __arch_get_vdso_data();
160 u64 ns;
161 u32 msk;
162 u64 hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
163
164 /* Check for negative values or invalid clocks */
165 if (unlikely((u32) clock >= MAX_CLOCKS))
166 goto fallback;
167
168 /*
169 * Convert the clockid to a bitmask and use it to check which
170 * clocks are handled in the VDSO directly.
171 */
172 msk = 1U << clock;
173 if (msk & VDSO_HRES) {
174 /*
175 * Preserves the behaviour of posix_get_hrtimer_res().
176 */
177 ns = hrtimer_res;
178 } else if (msk & VDSO_COARSE) {
179 /*
180 * Preserves the behaviour of posix_get_coarse_res().
181 */
182 ns = LOW_RES_NSEC;
183 } else if (msk & VDSO_RAW) {
184 /*
185 * Preserves the behaviour of posix_get_hrtimer_res().
186 */
187 ns = hrtimer_res;
188 } else {
189 goto fallback;
190 }
191
192 if (res) {
193 res->tv_sec = 0;
194 res->tv_nsec = ns;
195 }
196
197 return 0;
198
199fallback:
200 return clock_getres_fallback(clock, res);
201}
202
203static __maybe_unused int
204__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
205{
206 struct __kernel_timespec ts;
207 int ret;
208
209 if (res == NULL)
210 goto fallback;
211
212 ret = __cvdso_clock_getres(clock, &ts);
213
214 if (ret == 0) {
215 res->tv_sec = ts.tv_sec;
216 res->tv_nsec = ts.tv_nsec;
217 }
218
219 return ret;
220
221fallback:
222 return clock_getres_fallback(clock, (struct __kernel_timespec *)res);
223}
224#endif /* VDSO_HAS_CLOCK_GETRES */