blob: a76ac8d17c5f0f187b69e82b74cf06b364d59181 [file] [log] [blame]
Vincenzo Frascino00b26472019-06-21 10:52:29 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Generic userspace implementations of gettimeofday() and similar.
4 */
5#include <linux/compiler.h>
6#include <linux/math64.h>
7#include <linux/time.h>
8#include <linux/kernel.h>
9#include <linux/hrtimer_defs.h>
Thomas Gleixner5d51bee2020-02-07 13:38:55 +010010#include <linux/clocksource.h>
Vincenzo Frascino00b26472019-06-21 10:52:29 +010011#include <vdso/datapage.h>
12#include <vdso/helpers.h>
13
14/*
15 * The generic vDSO implementation requires that gettimeofday.h
16 * provides:
17 * - __arch_get_vdso_data(): to get the vdso datapage.
18 * - __arch_get_hw_counter(): to get the hw counter based on the
19 * clock_mode.
20 * - gettimeofday_fallback(): fallback for gettimeofday.
21 * - clock_gettime_fallback(): fallback for clock_gettime.
22 * - clock_getres_fallback(): fallback for clock_getres.
23 */
Vincenzo Frascino629fdf72019-06-21 10:52:36 +010024#ifdef ENABLE_COMPAT_VDSO
25#include <asm/vdso/compat_gettimeofday.h>
26#else
Vincenzo Frascino00b26472019-06-21 10:52:29 +010027#include <asm/vdso/gettimeofday.h>
Vincenzo Frascino629fdf72019-06-21 10:52:36 +010028#endif /* ENABLE_COMPAT_VDSO */
Vincenzo Frascino00b26472019-06-21 10:52:29 +010029
Thomas Gleixner9d90b932019-06-26 12:02:00 +020030#ifndef vdso_calc_delta
31/*
32 * Default implementation which works for all sane clocksources. That
33 * obviously excludes x86/TSC.
34 */
35static __always_inline
36u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
37{
38 return ((cycles - last) & mask) * mult;
39}
40#endif
41
Thomas Gleixner1dff4152020-02-07 13:38:50 +010042#ifndef __arch_vdso_hres_capable
43static inline bool __arch_vdso_hres_capable(void)
44{
45 return true;
46}
47#endif
48
Thomas Gleixner660fd042019-11-12 01:27:09 +000049#ifdef CONFIG_TIME_NS
50static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
51 struct __kernel_timespec *ts)
52{
53 const struct vdso_data *vd = __arch_get_timens_vdso_data();
54 const struct timens_offset *offs = &vdns->offset[clk];
55 const struct vdso_timestamp *vdso_ts;
56 u64 cycles, last, ns;
57 u32 seq;
58 s64 sec;
59
60 if (clk != CLOCK_MONOTONIC_RAW)
61 vd = &vd[CS_HRES_COARSE];
62 else
63 vd = &vd[CS_RAW];
64 vdso_ts = &vd->basetime[clk];
65
66 do {
67 seq = vdso_read_begin(vd);
Thomas Gleixnerf86fd322020-02-07 13:38:59 +010068
69 if (unlikely(vd->clock_mode == VDSO_CLOCKMODE_NONE))
Thomas Gleixner5d51bee2020-02-07 13:38:55 +010070 return -1;
Thomas Gleixnerf86fd322020-02-07 13:38:59 +010071
Thomas Gleixner660fd042019-11-12 01:27:09 +000072 cycles = __arch_get_hw_counter(vd->clock_mode);
73 ns = vdso_ts->nsec;
74 last = vd->cycle_last;
Thomas Gleixner660fd042019-11-12 01:27:09 +000075 ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
76 ns >>= vd->shift;
77 sec = vdso_ts->sec;
78 } while (unlikely(vdso_read_retry(vd, seq)));
79
80 /* Add the namespace offset */
81 sec += offs->sec;
82 ns += offs->nsec;
83
84 /*
85 * Do this outside the loop: a race inside the loop could result
86 * in __iter_div_u64_rem() being extremely slow.
87 */
88 ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
89 ts->tv_nsec = ns;
90
91 return 0;
92}
93#else
94static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
95{
96 return NULL;
97}
98
99static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
100 struct __kernel_timespec *ts)
101{
102 return -EINVAL;
103}
104#endif
105
Andrei Vaginc9665332019-11-12 01:26:51 +0000106static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
Thomas Gleixner660fd042019-11-12 01:27:09 +0000107 struct __kernel_timespec *ts)
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100108{
109 const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
110 u64 cycles, last, sec, ns;
111 u32 seq;
112
Thomas Gleixner1dff4152020-02-07 13:38:50 +0100113 /* Allows to compile the high resolution parts out */
114 if (!__arch_vdso_hres_capable())
115 return -1;
116
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100117 do {
Thomas Gleixner660fd042019-11-12 01:27:09 +0000118 /*
Thomas Gleixner2d6b01b2020-02-07 13:39:01 +0100119 * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
Thomas Gleixner660fd042019-11-12 01:27:09 +0000120 * enabled tasks have a special VVAR page installed which
121 * has vd->seq set to 1 and vd->clock_mode set to
Thomas Gleixner2d6b01b2020-02-07 13:39:01 +0100122 * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
Thomas Gleixner660fd042019-11-12 01:27:09 +0000123 * this does not affect performance because if vd->seq is
124 * odd, i.e. a concurrent update is in progress the extra
125 * check for vd->clock_mode is just a few extra
126 * instructions while spin waiting for vd->seq to become
127 * even again.
128 */
129 while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
130 if (IS_ENABLED(CONFIG_TIME_NS) &&
Thomas Gleixner2d6b01b2020-02-07 13:39:01 +0100131 vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
Thomas Gleixner660fd042019-11-12 01:27:09 +0000132 return do_hres_timens(vd, clk, ts);
133 cpu_relax();
134 }
135 smp_rmb();
136
Thomas Gleixnerf86fd322020-02-07 13:38:59 +0100137 if (unlikely(vd->clock_mode == VDSO_CLOCKMODE_NONE))
Thomas Gleixner5d51bee2020-02-07 13:38:55 +0100138 return -1;
Thomas Gleixnerf86fd322020-02-07 13:38:59 +0100139
Thomas Gleixner9d90b932019-06-26 12:02:00 +0200140 cycles = __arch_get_hw_counter(vd->clock_mode);
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100141 ns = vdso_ts->nsec;
142 last = vd->cycle_last;
Thomas Gleixner9d90b932019-06-26 12:02:00 +0200143 ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100144 ns >>= vd->shift;
145 sec = vdso_ts->sec;
146 } while (unlikely(vdso_read_retry(vd, seq)));
147
148 /*
149 * Do this outside the loop: a race inside the loop could result
150 * in __iter_div_u64_rem() being extremely slow.
151 */
152 ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
153 ts->tv_nsec = ns;
154
155 return 0;
156}
157
Thomas Gleixner660fd042019-11-12 01:27:09 +0000158#ifdef CONFIG_TIME_NS
159static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
160 struct __kernel_timespec *ts)
161{
162 const struct vdso_data *vd = __arch_get_timens_vdso_data();
163 const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
164 const struct timens_offset *offs = &vdns->offset[clk];
165 u64 nsec;
166 s64 sec;
167 s32 seq;
168
169 do {
170 seq = vdso_read_begin(vd);
171 sec = vdso_ts->sec;
172 nsec = vdso_ts->nsec;
173 } while (unlikely(vdso_read_retry(vd, seq)));
174
175 /* Add the namespace offset */
176 sec += offs->sec;
177 nsec += offs->nsec;
178
179 /*
180 * Do this outside the loop: a race inside the loop could result
181 * in __iter_div_u64_rem() being extremely slow.
182 */
183 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
184 ts->tv_nsec = nsec;
185 return 0;
186}
187#else
188static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
189 struct __kernel_timespec *ts)
190{
191 return -1;
192}
193#endif
194
Andrei Vaginc9665332019-11-12 01:26:51 +0000195static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
196 struct __kernel_timespec *ts)
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100197{
198 const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
199 u32 seq;
200
201 do {
Thomas Gleixner660fd042019-11-12 01:27:09 +0000202 /*
Thomas Gleixner2d6b01b2020-02-07 13:39:01 +0100203 * Open coded to handle VDSO_CLOCK_TIMENS. See comment in
Thomas Gleixner660fd042019-11-12 01:27:09 +0000204 * do_hres().
205 */
206 while ((seq = READ_ONCE(vd->seq)) & 1) {
207 if (IS_ENABLED(CONFIG_TIME_NS) &&
Thomas Gleixner2d6b01b2020-02-07 13:39:01 +0100208 vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
Thomas Gleixner660fd042019-11-12 01:27:09 +0000209 return do_coarse_timens(vd, clk, ts);
210 cpu_relax();
211 }
212 smp_rmb();
213
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100214 ts->tv_sec = vdso_ts->sec;
215 ts->tv_nsec = vdso_ts->nsec;
216 } while (unlikely(vdso_read_retry(vd, seq)));
Christophe Leroy8463cf82019-12-23 14:31:07 +0000217
218 return 0;
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100219}
220
221static __maybe_unused int
Thomas Gleixner502a5902019-07-28 15:12:53 +0200222__cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100223{
224 const struct vdso_data *vd = __arch_get_vdso_data();
225 u32 msk;
226
227 /* Check for negative values or invalid clocks */
228 if (unlikely((u32) clock >= MAX_CLOCKS))
Thomas Gleixner502a5902019-07-28 15:12:53 +0200229 return -1;
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100230
231 /*
232 * Convert the clockid to a bitmask and use it to check which
233 * clocks are handled in the VDSO directly.
234 */
235 msk = 1U << clock;
Christophe Leroy8463cf82019-12-23 14:31:07 +0000236 if (likely(msk & VDSO_HRES))
Andrei Vaginc9665332019-11-12 01:26:51 +0000237 vd = &vd[CS_HRES_COARSE];
Christophe Leroy8463cf82019-12-23 14:31:07 +0000238 else if (msk & VDSO_COARSE)
239 return do_coarse(&vd[CS_HRES_COARSE], clock, ts);
240 else if (msk & VDSO_RAW)
Andrei Vaginc9665332019-11-12 01:26:51 +0000241 vd = &vd[CS_RAW];
242 else
243 return -1;
Christophe Leroy8463cf82019-12-23 14:31:07 +0000244
Andrei Vaginc9665332019-11-12 01:26:51 +0000245 return do_hres(vd, clock, ts);
Thomas Gleixner502a5902019-07-28 15:12:53 +0200246}
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100247
Thomas Gleixner502a5902019-07-28 15:12:53 +0200248static __maybe_unused int
249__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
250{
251 int ret = __cvdso_clock_gettime_common(clock, ts);
252
253 if (unlikely(ret))
254 return clock_gettime_fallback(clock, ts);
255 return 0;
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100256}
257
Vincenzo Frascinobf279842019-08-30 14:58:56 +0100258#ifdef BUILD_VDSO32
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100259static __maybe_unused int
260__cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
261{
262 struct __kernel_timespec ts;
263 int ret;
264
Thomas Gleixner502a5902019-07-28 15:12:53 +0200265 ret = __cvdso_clock_gettime_common(clock, &ts);
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100266
Thomas Gleixnerc60a32e2019-07-30 11:38:50 +0200267 if (unlikely(ret))
268 return clock_gettime32_fallback(clock, res);
Thomas Gleixner502a5902019-07-28 15:12:53 +0200269
Vincenzo Frascinoa2792352019-08-30 14:58:59 +0100270 /* For ret == 0 */
271 res->tv_sec = ts.tv_sec;
272 res->tv_nsec = ts.tv_nsec;
273
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100274 return ret;
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100275}
Vincenzo Frascinobf279842019-08-30 14:58:56 +0100276#endif /* BUILD_VDSO32 */
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100277
278static __maybe_unused int
279__cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
280{
281 const struct vdso_data *vd = __arch_get_vdso_data();
282
283 if (likely(tv != NULL)) {
284 struct __kernel_timespec ts;
285
286 if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
287 return gettimeofday_fallback(tv, tz);
288
289 tv->tv_sec = ts.tv_sec;
290 tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC;
291 }
292
293 if (unlikely(tz != NULL)) {
Thomas Gleixner660fd042019-11-12 01:27:09 +0000294 if (IS_ENABLED(CONFIG_TIME_NS) &&
Thomas Gleixner2d6b01b2020-02-07 13:39:01 +0100295 vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
Thomas Gleixner660fd042019-11-12 01:27:09 +0000296 vd = __arch_get_timens_vdso_data();
297
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100298 tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
299 tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
300 }
301
302 return 0;
303}
304
305#ifdef VDSO_HAS_TIME
Arnd Bergmann21346562019-11-05 11:10:01 +0100306static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100307{
308 const struct vdso_data *vd = __arch_get_vdso_data();
Thomas Gleixner660fd042019-11-12 01:27:09 +0000309 __kernel_old_time_t t;
310
Thomas Gleixner2d6b01b2020-02-07 13:39:01 +0100311 if (IS_ENABLED(CONFIG_TIME_NS) &&
312 vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
Thomas Gleixner660fd042019-11-12 01:27:09 +0000313 vd = __arch_get_timens_vdso_data();
314
315 t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100316
317 if (time)
318 *time = t;
319
320 return t;
321}
322#endif /* VDSO_HAS_TIME */
323
324#ifdef VDSO_HAS_CLOCK_GETRES
325static __maybe_unused
Thomas Gleixner502a5902019-07-28 15:12:53 +0200326int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100327{
328 const struct vdso_data *vd = __arch_get_vdso_data();
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100329 u32 msk;
Thomas Gleixner502a5902019-07-28 15:12:53 +0200330 u64 ns;
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100331
332 /* Check for negative values or invalid clocks */
333 if (unlikely((u32) clock >= MAX_CLOCKS))
Thomas Gleixner502a5902019-07-28 15:12:53 +0200334 return -1;
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100335
Thomas Gleixner2d6b01b2020-02-07 13:39:01 +0100336 if (IS_ENABLED(CONFIG_TIME_NS) &&
337 vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
Thomas Gleixner660fd042019-11-12 01:27:09 +0000338 vd = __arch_get_timens_vdso_data();
339
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100340 /*
341 * Convert the clockid to a bitmask and use it to check which
342 * clocks are handled in the VDSO directly.
343 */
344 msk = 1U << clock;
Christophe Leroycdb7c5a2019-12-23 14:31:09 +0000345 if (msk & (VDSO_HRES | VDSO_RAW)) {
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100346 /*
347 * Preserves the behaviour of posix_get_hrtimer_res().
348 */
Christophe Leroy49a101d2020-01-16 17:58:27 +0000349 ns = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100350 } else if (msk & VDSO_COARSE) {
351 /*
352 * Preserves the behaviour of posix_get_coarse_res().
353 */
354 ns = LOW_RES_NSEC;
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100355 } else {
Thomas Gleixner502a5902019-07-28 15:12:53 +0200356 return -1;
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100357 }
358
Thomas Gleixner1638b8f2019-10-21 12:07:15 +0200359 if (likely(res)) {
360 res->tv_sec = 0;
361 res->tv_nsec = ns;
362 }
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100363 return 0;
Thomas Gleixner502a5902019-07-28 15:12:53 +0200364}
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100365
Vincenzo Frascinoffd08732019-11-28 11:17:19 +0000366static __maybe_unused
Thomas Gleixner502a5902019-07-28 15:12:53 +0200367int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
368{
369 int ret = __cvdso_clock_getres_common(clock, res);
370
371 if (unlikely(ret))
372 return clock_getres_fallback(clock, res);
373 return 0;
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100374}
375
Vincenzo Frascinobf279842019-08-30 14:58:56 +0100376#ifdef BUILD_VDSO32
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100377static __maybe_unused int
378__cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
379{
380 struct __kernel_timespec ts;
381 int ret;
382
Thomas Gleixner502a5902019-07-28 15:12:53 +0200383 ret = __cvdso_clock_getres_common(clock, &ts);
Thomas Gleixnerc60a32e2019-07-30 11:38:50 +0200384
Thomas Gleixnerc60a32e2019-07-30 11:38:50 +0200385 if (unlikely(ret))
386 return clock_getres32_fallback(clock, res);
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100387
Vincenzo Frascinoa2792352019-08-30 14:58:59 +0100388 if (likely(res)) {
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100389 res->tv_sec = ts.tv_sec;
390 res->tv_nsec = ts.tv_nsec;
391 }
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100392 return ret;
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100393}
Vincenzo Frascinobf279842019-08-30 14:58:56 +0100394#endif /* BUILD_VDSO32 */
Vincenzo Frascino00b26472019-06-21 10:52:29 +0100395#endif /* VDSO_HAS_CLOCK_GETRES */