Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Generic userspace implementations of gettimeofday() and similar. |
| 4 | */ |
| 5 | #include <linux/compiler.h> |
| 6 | #include <linux/math64.h> |
| 7 | #include <linux/time.h> |
| 8 | #include <linux/kernel.h> |
| 9 | #include <linux/hrtimer_defs.h> |
Thomas Gleixner | 5d51bee | 2020-02-07 13:38:55 +0100 | [diff] [blame] | 10 | #include <linux/clocksource.h> |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 11 | #include <vdso/datapage.h> |
| 12 | #include <vdso/helpers.h> |
| 13 | |
| 14 | /* |
| 15 | * The generic vDSO implementation requires that gettimeofday.h |
| 16 | * provides: |
| 17 | * - __arch_get_vdso_data(): to get the vdso datapage. |
| 18 | * - __arch_get_hw_counter(): to get the hw counter based on the |
| 19 | * clock_mode. |
| 20 | * - gettimeofday_fallback(): fallback for gettimeofday. |
| 21 | * - clock_gettime_fallback(): fallback for clock_gettime. |
| 22 | * - clock_getres_fallback(): fallback for clock_getres. |
| 23 | */ |
Vincenzo Frascino | 629fdf7 | 2019-06-21 10:52:36 +0100 | [diff] [blame] | 24 | #ifdef ENABLE_COMPAT_VDSO |
| 25 | #include <asm/vdso/compat_gettimeofday.h> |
| 26 | #else |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 27 | #include <asm/vdso/gettimeofday.h> |
Vincenzo Frascino | 629fdf7 | 2019-06-21 10:52:36 +0100 | [diff] [blame] | 28 | #endif /* ENABLE_COMPAT_VDSO */ |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 29 | |
Thomas Gleixner | 9d90b93 | 2019-06-26 12:02:00 +0200 | [diff] [blame] | 30 | #ifndef vdso_calc_delta |
| 31 | /* |
| 32 | * Default implementation which works for all sane clocksources. That |
| 33 | * obviously excludes x86/TSC. |
| 34 | */ |
| 35 | static __always_inline |
| 36 | u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult) |
| 37 | { |
| 38 | return ((cycles - last) & mask) * mult; |
| 39 | } |
| 40 | #endif |
| 41 | |
Thomas Gleixner | 1dff415 | 2020-02-07 13:38:50 +0100 | [diff] [blame] | 42 | #ifndef __arch_vdso_hres_capable |
| 43 | static inline bool __arch_vdso_hres_capable(void) |
| 44 | { |
| 45 | return true; |
| 46 | } |
| 47 | #endif |
| 48 | |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 49 | #ifdef CONFIG_TIME_NS |
| 50 | static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, |
| 51 | struct __kernel_timespec *ts) |
| 52 | { |
| 53 | const struct vdso_data *vd = __arch_get_timens_vdso_data(); |
| 54 | const struct timens_offset *offs = &vdns->offset[clk]; |
| 55 | const struct vdso_timestamp *vdso_ts; |
| 56 | u64 cycles, last, ns; |
| 57 | u32 seq; |
| 58 | s64 sec; |
| 59 | |
| 60 | if (clk != CLOCK_MONOTONIC_RAW) |
| 61 | vd = &vd[CS_HRES_COARSE]; |
| 62 | else |
| 63 | vd = &vd[CS_RAW]; |
| 64 | vdso_ts = &vd->basetime[clk]; |
| 65 | |
| 66 | do { |
| 67 | seq = vdso_read_begin(vd); |
Thomas Gleixner | f86fd32 | 2020-02-07 13:38:59 +0100 | [diff] [blame] | 68 | |
| 69 | if (unlikely(vd->clock_mode == VDSO_CLOCKMODE_NONE)) |
Thomas Gleixner | 5d51bee | 2020-02-07 13:38:55 +0100 | [diff] [blame] | 70 | return -1; |
Thomas Gleixner | f86fd32 | 2020-02-07 13:38:59 +0100 | [diff] [blame] | 71 | |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 72 | cycles = __arch_get_hw_counter(vd->clock_mode); |
| 73 | ns = vdso_ts->nsec; |
| 74 | last = vd->cycle_last; |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 75 | ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); |
| 76 | ns >>= vd->shift; |
| 77 | sec = vdso_ts->sec; |
| 78 | } while (unlikely(vdso_read_retry(vd, seq))); |
| 79 | |
| 80 | /* Add the namespace offset */ |
| 81 | sec += offs->sec; |
| 82 | ns += offs->nsec; |
| 83 | |
| 84 | /* |
| 85 | * Do this outside the loop: a race inside the loop could result |
| 86 | * in __iter_div_u64_rem() being extremely slow. |
| 87 | */ |
| 88 | ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); |
| 89 | ts->tv_nsec = ns; |
| 90 | |
| 91 | return 0; |
| 92 | } |
| 93 | #else |
| 94 | static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void) |
| 95 | { |
| 96 | return NULL; |
| 97 | } |
| 98 | |
| 99 | static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk, |
| 100 | struct __kernel_timespec *ts) |
| 101 | { |
| 102 | return -EINVAL; |
| 103 | } |
| 104 | #endif |
| 105 | |
Andrei Vagin | c966533 | 2019-11-12 01:26:51 +0000 | [diff] [blame] | 106 | static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk, |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 107 | struct __kernel_timespec *ts) |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 108 | { |
| 109 | const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; |
| 110 | u64 cycles, last, sec, ns; |
| 111 | u32 seq; |
| 112 | |
Thomas Gleixner | 1dff415 | 2020-02-07 13:38:50 +0100 | [diff] [blame] | 113 | /* Allows to compile the high resolution parts out */ |
| 114 | if (!__arch_vdso_hres_capable()) |
| 115 | return -1; |
| 116 | |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 117 | do { |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 118 | /* |
Thomas Gleixner | 2d6b01b | 2020-02-07 13:39:01 +0100 | [diff] [blame^] | 119 | * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 120 | * enabled tasks have a special VVAR page installed which |
| 121 | * has vd->seq set to 1 and vd->clock_mode set to |
Thomas Gleixner | 2d6b01b | 2020-02-07 13:39:01 +0100 | [diff] [blame^] | 122 | * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 123 | * this does not affect performance because if vd->seq is |
| 124 | * odd, i.e. a concurrent update is in progress the extra |
| 125 | * check for vd->clock_mode is just a few extra |
| 126 | * instructions while spin waiting for vd->seq to become |
| 127 | * even again. |
| 128 | */ |
| 129 | while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) { |
| 130 | if (IS_ENABLED(CONFIG_TIME_NS) && |
Thomas Gleixner | 2d6b01b | 2020-02-07 13:39:01 +0100 | [diff] [blame^] | 131 | vd->clock_mode == VDSO_CLOCKMODE_TIMENS) |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 132 | return do_hres_timens(vd, clk, ts); |
| 133 | cpu_relax(); |
| 134 | } |
| 135 | smp_rmb(); |
| 136 | |
Thomas Gleixner | f86fd32 | 2020-02-07 13:38:59 +0100 | [diff] [blame] | 137 | if (unlikely(vd->clock_mode == VDSO_CLOCKMODE_NONE)) |
Thomas Gleixner | 5d51bee | 2020-02-07 13:38:55 +0100 | [diff] [blame] | 138 | return -1; |
Thomas Gleixner | f86fd32 | 2020-02-07 13:38:59 +0100 | [diff] [blame] | 139 | |
Thomas Gleixner | 9d90b93 | 2019-06-26 12:02:00 +0200 | [diff] [blame] | 140 | cycles = __arch_get_hw_counter(vd->clock_mode); |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 141 | ns = vdso_ts->nsec; |
| 142 | last = vd->cycle_last; |
Thomas Gleixner | 9d90b93 | 2019-06-26 12:02:00 +0200 | [diff] [blame] | 143 | ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult); |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 144 | ns >>= vd->shift; |
| 145 | sec = vdso_ts->sec; |
| 146 | } while (unlikely(vdso_read_retry(vd, seq))); |
| 147 | |
| 148 | /* |
| 149 | * Do this outside the loop: a race inside the loop could result |
| 150 | * in __iter_div_u64_rem() being extremely slow. |
| 151 | */ |
| 152 | ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); |
| 153 | ts->tv_nsec = ns; |
| 154 | |
| 155 | return 0; |
| 156 | } |
| 157 | |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 158 | #ifdef CONFIG_TIME_NS |
| 159 | static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, |
| 160 | struct __kernel_timespec *ts) |
| 161 | { |
| 162 | const struct vdso_data *vd = __arch_get_timens_vdso_data(); |
| 163 | const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; |
| 164 | const struct timens_offset *offs = &vdns->offset[clk]; |
| 165 | u64 nsec; |
| 166 | s64 sec; |
| 167 | s32 seq; |
| 168 | |
| 169 | do { |
| 170 | seq = vdso_read_begin(vd); |
| 171 | sec = vdso_ts->sec; |
| 172 | nsec = vdso_ts->nsec; |
| 173 | } while (unlikely(vdso_read_retry(vd, seq))); |
| 174 | |
| 175 | /* Add the namespace offset */ |
| 176 | sec += offs->sec; |
| 177 | nsec += offs->nsec; |
| 178 | |
| 179 | /* |
| 180 | * Do this outside the loop: a race inside the loop could result |
| 181 | * in __iter_div_u64_rem() being extremely slow. |
| 182 | */ |
| 183 | ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec); |
| 184 | ts->tv_nsec = nsec; |
| 185 | return 0; |
| 186 | } |
| 187 | #else |
| 188 | static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk, |
| 189 | struct __kernel_timespec *ts) |
| 190 | { |
| 191 | return -1; |
| 192 | } |
| 193 | #endif |
| 194 | |
Andrei Vagin | c966533 | 2019-11-12 01:26:51 +0000 | [diff] [blame] | 195 | static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk, |
| 196 | struct __kernel_timespec *ts) |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 197 | { |
| 198 | const struct vdso_timestamp *vdso_ts = &vd->basetime[clk]; |
| 199 | u32 seq; |
| 200 | |
| 201 | do { |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 202 | /* |
Thomas Gleixner | 2d6b01b | 2020-02-07 13:39:01 +0100 | [diff] [blame^] | 203 | * Open coded to handle VDSO_CLOCK_TIMENS. See comment in |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 204 | * do_hres(). |
| 205 | */ |
| 206 | while ((seq = READ_ONCE(vd->seq)) & 1) { |
| 207 | if (IS_ENABLED(CONFIG_TIME_NS) && |
Thomas Gleixner | 2d6b01b | 2020-02-07 13:39:01 +0100 | [diff] [blame^] | 208 | vd->clock_mode == VDSO_CLOCKMODE_TIMENS) |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 209 | return do_coarse_timens(vd, clk, ts); |
| 210 | cpu_relax(); |
| 211 | } |
| 212 | smp_rmb(); |
| 213 | |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 214 | ts->tv_sec = vdso_ts->sec; |
| 215 | ts->tv_nsec = vdso_ts->nsec; |
| 216 | } while (unlikely(vdso_read_retry(vd, seq))); |
Christophe Leroy | 8463cf8 | 2019-12-23 14:31:07 +0000 | [diff] [blame] | 217 | |
| 218 | return 0; |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 219 | } |
| 220 | |
| 221 | static __maybe_unused int |
Thomas Gleixner | 502a590 | 2019-07-28 15:12:53 +0200 | [diff] [blame] | 222 | __cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts) |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 223 | { |
| 224 | const struct vdso_data *vd = __arch_get_vdso_data(); |
| 225 | u32 msk; |
| 226 | |
| 227 | /* Check for negative values or invalid clocks */ |
| 228 | if (unlikely((u32) clock >= MAX_CLOCKS)) |
Thomas Gleixner | 502a590 | 2019-07-28 15:12:53 +0200 | [diff] [blame] | 229 | return -1; |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 230 | |
| 231 | /* |
| 232 | * Convert the clockid to a bitmask and use it to check which |
| 233 | * clocks are handled in the VDSO directly. |
| 234 | */ |
| 235 | msk = 1U << clock; |
Christophe Leroy | 8463cf8 | 2019-12-23 14:31:07 +0000 | [diff] [blame] | 236 | if (likely(msk & VDSO_HRES)) |
Andrei Vagin | c966533 | 2019-11-12 01:26:51 +0000 | [diff] [blame] | 237 | vd = &vd[CS_HRES_COARSE]; |
Christophe Leroy | 8463cf8 | 2019-12-23 14:31:07 +0000 | [diff] [blame] | 238 | else if (msk & VDSO_COARSE) |
| 239 | return do_coarse(&vd[CS_HRES_COARSE], clock, ts); |
| 240 | else if (msk & VDSO_RAW) |
Andrei Vagin | c966533 | 2019-11-12 01:26:51 +0000 | [diff] [blame] | 241 | vd = &vd[CS_RAW]; |
| 242 | else |
| 243 | return -1; |
Christophe Leroy | 8463cf8 | 2019-12-23 14:31:07 +0000 | [diff] [blame] | 244 | |
Andrei Vagin | c966533 | 2019-11-12 01:26:51 +0000 | [diff] [blame] | 245 | return do_hres(vd, clock, ts); |
Thomas Gleixner | 502a590 | 2019-07-28 15:12:53 +0200 | [diff] [blame] | 246 | } |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 247 | |
Thomas Gleixner | 502a590 | 2019-07-28 15:12:53 +0200 | [diff] [blame] | 248 | static __maybe_unused int |
| 249 | __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) |
| 250 | { |
| 251 | int ret = __cvdso_clock_gettime_common(clock, ts); |
| 252 | |
| 253 | if (unlikely(ret)) |
| 254 | return clock_gettime_fallback(clock, ts); |
| 255 | return 0; |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 256 | } |
| 257 | |
Vincenzo Frascino | bf27984 | 2019-08-30 14:58:56 +0100 | [diff] [blame] | 258 | #ifdef BUILD_VDSO32 |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 259 | static __maybe_unused int |
| 260 | __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res) |
| 261 | { |
| 262 | struct __kernel_timespec ts; |
| 263 | int ret; |
| 264 | |
Thomas Gleixner | 502a590 | 2019-07-28 15:12:53 +0200 | [diff] [blame] | 265 | ret = __cvdso_clock_gettime_common(clock, &ts); |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 266 | |
Thomas Gleixner | c60a32e | 2019-07-30 11:38:50 +0200 | [diff] [blame] | 267 | if (unlikely(ret)) |
| 268 | return clock_gettime32_fallback(clock, res); |
Thomas Gleixner | 502a590 | 2019-07-28 15:12:53 +0200 | [diff] [blame] | 269 | |
Vincenzo Frascino | a279235 | 2019-08-30 14:58:59 +0100 | [diff] [blame] | 270 | /* For ret == 0 */ |
| 271 | res->tv_sec = ts.tv_sec; |
| 272 | res->tv_nsec = ts.tv_nsec; |
| 273 | |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 274 | return ret; |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 275 | } |
Vincenzo Frascino | bf27984 | 2019-08-30 14:58:56 +0100 | [diff] [blame] | 276 | #endif /* BUILD_VDSO32 */ |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 277 | |
| 278 | static __maybe_unused int |
| 279 | __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) |
| 280 | { |
| 281 | const struct vdso_data *vd = __arch_get_vdso_data(); |
| 282 | |
| 283 | if (likely(tv != NULL)) { |
| 284 | struct __kernel_timespec ts; |
| 285 | |
| 286 | if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts)) |
| 287 | return gettimeofday_fallback(tv, tz); |
| 288 | |
| 289 | tv->tv_sec = ts.tv_sec; |
| 290 | tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC; |
| 291 | } |
| 292 | |
| 293 | if (unlikely(tz != NULL)) { |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 294 | if (IS_ENABLED(CONFIG_TIME_NS) && |
Thomas Gleixner | 2d6b01b | 2020-02-07 13:39:01 +0100 | [diff] [blame^] | 295 | vd->clock_mode == VDSO_CLOCKMODE_TIMENS) |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 296 | vd = __arch_get_timens_vdso_data(); |
| 297 | |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 298 | tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest; |
| 299 | tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime; |
| 300 | } |
| 301 | |
| 302 | return 0; |
| 303 | } |
| 304 | |
| 305 | #ifdef VDSO_HAS_TIME |
Arnd Bergmann | 2134656 | 2019-11-05 11:10:01 +0100 | [diff] [blame] | 306 | static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time) |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 307 | { |
| 308 | const struct vdso_data *vd = __arch_get_vdso_data(); |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 309 | __kernel_old_time_t t; |
| 310 | |
Thomas Gleixner | 2d6b01b | 2020-02-07 13:39:01 +0100 | [diff] [blame^] | 311 | if (IS_ENABLED(CONFIG_TIME_NS) && |
| 312 | vd->clock_mode == VDSO_CLOCKMODE_TIMENS) |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 313 | vd = __arch_get_timens_vdso_data(); |
| 314 | |
| 315 | t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec); |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 316 | |
| 317 | if (time) |
| 318 | *time = t; |
| 319 | |
| 320 | return t; |
| 321 | } |
| 322 | #endif /* VDSO_HAS_TIME */ |
| 323 | |
| 324 | #ifdef VDSO_HAS_CLOCK_GETRES |
| 325 | static __maybe_unused |
Thomas Gleixner | 502a590 | 2019-07-28 15:12:53 +0200 | [diff] [blame] | 326 | int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res) |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 327 | { |
| 328 | const struct vdso_data *vd = __arch_get_vdso_data(); |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 329 | u32 msk; |
Thomas Gleixner | 502a590 | 2019-07-28 15:12:53 +0200 | [diff] [blame] | 330 | u64 ns; |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 331 | |
| 332 | /* Check for negative values or invalid clocks */ |
| 333 | if (unlikely((u32) clock >= MAX_CLOCKS)) |
Thomas Gleixner | 502a590 | 2019-07-28 15:12:53 +0200 | [diff] [blame] | 334 | return -1; |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 335 | |
Thomas Gleixner | 2d6b01b | 2020-02-07 13:39:01 +0100 | [diff] [blame^] | 336 | if (IS_ENABLED(CONFIG_TIME_NS) && |
| 337 | vd->clock_mode == VDSO_CLOCKMODE_TIMENS) |
Thomas Gleixner | 660fd04 | 2019-11-12 01:27:09 +0000 | [diff] [blame] | 338 | vd = __arch_get_timens_vdso_data(); |
| 339 | |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 340 | /* |
| 341 | * Convert the clockid to a bitmask and use it to check which |
| 342 | * clocks are handled in the VDSO directly. |
| 343 | */ |
| 344 | msk = 1U << clock; |
Christophe Leroy | cdb7c5a | 2019-12-23 14:31:09 +0000 | [diff] [blame] | 345 | if (msk & (VDSO_HRES | VDSO_RAW)) { |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 346 | /* |
| 347 | * Preserves the behaviour of posix_get_hrtimer_res(). |
| 348 | */ |
Christophe Leroy | 49a101d | 2020-01-16 17:58:27 +0000 | [diff] [blame] | 349 | ns = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res); |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 350 | } else if (msk & VDSO_COARSE) { |
| 351 | /* |
| 352 | * Preserves the behaviour of posix_get_coarse_res(). |
| 353 | */ |
| 354 | ns = LOW_RES_NSEC; |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 355 | } else { |
Thomas Gleixner | 502a590 | 2019-07-28 15:12:53 +0200 | [diff] [blame] | 356 | return -1; |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 357 | } |
| 358 | |
Thomas Gleixner | 1638b8f | 2019-10-21 12:07:15 +0200 | [diff] [blame] | 359 | if (likely(res)) { |
| 360 | res->tv_sec = 0; |
| 361 | res->tv_nsec = ns; |
| 362 | } |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 363 | return 0; |
Thomas Gleixner | 502a590 | 2019-07-28 15:12:53 +0200 | [diff] [blame] | 364 | } |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 365 | |
Vincenzo Frascino | ffd0873 | 2019-11-28 11:17:19 +0000 | [diff] [blame] | 366 | static __maybe_unused |
Thomas Gleixner | 502a590 | 2019-07-28 15:12:53 +0200 | [diff] [blame] | 367 | int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res) |
| 368 | { |
| 369 | int ret = __cvdso_clock_getres_common(clock, res); |
| 370 | |
| 371 | if (unlikely(ret)) |
| 372 | return clock_getres_fallback(clock, res); |
| 373 | return 0; |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 374 | } |
| 375 | |
Vincenzo Frascino | bf27984 | 2019-08-30 14:58:56 +0100 | [diff] [blame] | 376 | #ifdef BUILD_VDSO32 |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 377 | static __maybe_unused int |
| 378 | __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res) |
| 379 | { |
| 380 | struct __kernel_timespec ts; |
| 381 | int ret; |
| 382 | |
Thomas Gleixner | 502a590 | 2019-07-28 15:12:53 +0200 | [diff] [blame] | 383 | ret = __cvdso_clock_getres_common(clock, &ts); |
Thomas Gleixner | c60a32e | 2019-07-30 11:38:50 +0200 | [diff] [blame] | 384 | |
Thomas Gleixner | c60a32e | 2019-07-30 11:38:50 +0200 | [diff] [blame] | 385 | if (unlikely(ret)) |
| 386 | return clock_getres32_fallback(clock, res); |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 387 | |
Vincenzo Frascino | a279235 | 2019-08-30 14:58:59 +0100 | [diff] [blame] | 388 | if (likely(res)) { |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 389 | res->tv_sec = ts.tv_sec; |
| 390 | res->tv_nsec = ts.tv_nsec; |
| 391 | } |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 392 | return ret; |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 393 | } |
Vincenzo Frascino | bf27984 | 2019-08-30 14:58:56 +0100 | [diff] [blame] | 394 | #endif /* BUILD_VDSO32 */ |
Vincenzo Frascino | 00b2647 | 2019-06-21 10:52:29 +0100 | [diff] [blame] | 395 | #endif /* VDSO_HAS_CLOCK_GETRES */ |