Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_MATH64_H |
| 3 | #define _LINUX_MATH64_H |
| 4 | |
| 5 | #include <linux/types.h> |
| 6 | #include <asm/div64.h> |
| 7 | |
| 8 | #if BITS_PER_LONG == 64 |
| 9 | |
Alex Shi | c2853c8 | 2013-06-12 14:05:10 -0700 | [diff] [blame] | 10 | #define div64_long(x, y) div64_s64((x), (y)) |
| 11 | #define div64_ul(x, y) div64_u64((x), (y)) |
Sasha Levin | f910381 | 2012-03-15 12:36:13 -0400 | [diff] [blame] | 12 | |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 13 | /** |
| 14 | * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder |
Randy Dunlap | 078843f | 2017-09-30 08:43:45 -0700 | [diff] [blame] | 15 | * @dividend: unsigned 64bit dividend |
| 16 | * @divisor: unsigned 32bit divisor |
| 17 | * @remainder: pointer to unsigned 32bit remainder |
| 18 | * |
| 19 | * Return: sets ``*remainder``, then returns dividend / divisor |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 20 | * |
| 21 | * This is commonly provided by 32bit archs to provide an optimized 64bit |
| 22 | * divide. |
| 23 | */ |
| 24 | static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) |
| 25 | { |
| 26 | *remainder = dividend % divisor; |
| 27 | return dividend / divisor; |
| 28 | } |
| 29 | |
| 30 | /** |
| 31 | * div_s64_rem - signed 64bit divide with 32bit divisor with remainder |
Randy Dunlap | 078843f | 2017-09-30 08:43:45 -0700 | [diff] [blame] | 32 | * @dividend: signed 64bit dividend |
| 33 | * @divisor: signed 32bit divisor |
| 34 | * @remainder: pointer to signed 32bit remainder |
| 35 | * |
| 36 | * Return: sets ``*remainder``, then returns dividend / divisor |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 37 | */ |
| 38 | static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) |
| 39 | { |
| 40 | *remainder = dividend % divisor; |
| 41 | return dividend / divisor; |
| 42 | } |
| 43 | |
Roman Zippel | 6f6d6a1 | 2008-05-01 04:34:28 -0700 | [diff] [blame] | 44 | /** |
Mike Snitzer | eb18cba | 2013-08-20 15:05:17 -0400 | [diff] [blame] | 45 | * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder |
Randy Dunlap | 078843f | 2017-09-30 08:43:45 -0700 | [diff] [blame] | 46 | * @dividend: unsigned 64bit dividend |
| 47 | * @divisor: unsigned 64bit divisor |
| 48 | * @remainder: pointer to unsigned 64bit remainder |
| 49 | * |
| 50 | * Return: sets ``*remainder``, then returns dividend / divisor |
Mike Snitzer | eb18cba | 2013-08-20 15:05:17 -0400 | [diff] [blame] | 51 | */ |
| 52 | static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) |
| 53 | { |
| 54 | *remainder = dividend % divisor; |
| 55 | return dividend / divisor; |
| 56 | } |
| 57 | |
| 58 | /** |
Roman Zippel | 6f6d6a1 | 2008-05-01 04:34:28 -0700 | [diff] [blame] | 59 | * div64_u64 - unsigned 64bit divide with 64bit divisor |
Randy Dunlap | 078843f | 2017-09-30 08:43:45 -0700 | [diff] [blame] | 60 | * @dividend: unsigned 64bit dividend |
| 61 | * @divisor: unsigned 64bit divisor |
| 62 | * |
| 63 | * Return: dividend / divisor |
Roman Zippel | 6f6d6a1 | 2008-05-01 04:34:28 -0700 | [diff] [blame] | 64 | */ |
| 65 | static inline u64 div64_u64(u64 dividend, u64 divisor) |
| 66 | { |
| 67 | return dividend / divisor; |
| 68 | } |
| 69 | |
Brian Behlendorf | 658716d | 2010-10-26 14:23:10 -0700 | [diff] [blame] | 70 | /** |
| 71 | * div64_s64 - signed 64bit divide with 64bit divisor |
Randy Dunlap | 078843f | 2017-09-30 08:43:45 -0700 | [diff] [blame] | 72 | * @dividend: signed 64bit dividend |
| 73 | * @divisor: signed 64bit divisor |
| 74 | * |
| 75 | * Return: dividend / divisor |
Brian Behlendorf | 658716d | 2010-10-26 14:23:10 -0700 | [diff] [blame] | 76 | */ |
| 77 | static inline s64 div64_s64(s64 dividend, s64 divisor) |
| 78 | { |
| 79 | return dividend / divisor; |
| 80 | } |
| 81 | |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 82 | #elif BITS_PER_LONG == 32 |
| 83 | |
Alex Shi | c2853c8 | 2013-06-12 14:05:10 -0700 | [diff] [blame] | 84 | #define div64_long(x, y) div_s64((x), (y)) |
| 85 | #define div64_ul(x, y) div_u64((x), (y)) |
Sasha Levin | f910381 | 2012-03-15 12:36:13 -0400 | [diff] [blame] | 86 | |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 87 | #ifndef div_u64_rem |
| 88 | static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) |
| 89 | { |
| 90 | *remainder = do_div(dividend, divisor); |
| 91 | return dividend; |
| 92 | } |
| 93 | #endif |
| 94 | |
| 95 | #ifndef div_s64_rem |
| 96 | extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); |
| 97 | #endif |
| 98 | |
Mike Snitzer | eb18cba | 2013-08-20 15:05:17 -0400 | [diff] [blame] | 99 | #ifndef div64_u64_rem |
| 100 | extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder); |
| 101 | #endif |
| 102 | |
Roman Zippel | 6f6d6a1 | 2008-05-01 04:34:28 -0700 | [diff] [blame] | 103 | #ifndef div64_u64 |
Stanislaw Gruszka | f300213 | 2013-04-30 11:35:07 +0200 | [diff] [blame] | 104 | extern u64 div64_u64(u64 dividend, u64 divisor); |
Roman Zippel | 6f6d6a1 | 2008-05-01 04:34:28 -0700 | [diff] [blame] | 105 | #endif |
| 106 | |
Brian Behlendorf | 658716d | 2010-10-26 14:23:10 -0700 | [diff] [blame] | 107 | #ifndef div64_s64 |
| 108 | extern s64 div64_s64(s64 dividend, s64 divisor); |
| 109 | #endif |
| 110 | |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 111 | #endif /* BITS_PER_LONG */ |
| 112 | |
| 113 | /** |
| 114 | * div_u64 - unsigned 64bit divide with 32bit divisor |
Randy Dunlap | 078843f | 2017-09-30 08:43:45 -0700 | [diff] [blame] | 115 | * @dividend: unsigned 64bit dividend |
| 116 | * @divisor: unsigned 32bit divisor |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 117 | * |
| 118 | * This is the most common 64bit divide and should be used if possible, |
| 119 | * as many 32bit archs can optimize this variant better than a full 64bit |
| 120 | * divide. |
| 121 | */ |
| 122 | #ifndef div_u64 |
| 123 | static inline u64 div_u64(u64 dividend, u32 divisor) |
| 124 | { |
| 125 | u32 remainder; |
| 126 | return div_u64_rem(dividend, divisor, &remainder); |
| 127 | } |
| 128 | #endif |
| 129 | |
| 130 | /** |
| 131 | * div_s64 - signed 64bit divide with 32bit divisor |
Randy Dunlap | 078843f | 2017-09-30 08:43:45 -0700 | [diff] [blame] | 132 | * @dividend: signed 64bit dividend |
| 133 | * @divisor: signed 32bit divisor |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 134 | */ |
| 135 | #ifndef div_s64 |
| 136 | static inline s64 div_s64(s64 dividend, s32 divisor) |
| 137 | { |
| 138 | s32 remainder; |
| 139 | return div_s64_rem(dividend, divisor, &remainder); |
| 140 | } |
| 141 | #endif |
| 142 | |
Jeremy Fitzhardinge | f595ec9 | 2008-06-12 10:47:56 +0200 | [diff] [blame] | 143 | u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); |
| 144 | |
Jeremy Fitzhardinge | d5e181f | 2008-06-12 10:47:58 +0200 | [diff] [blame] | 145 | static __always_inline u32 |
| 146 | __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) |
| 147 | { |
| 148 | u32 ret = 0; |
| 149 | |
| 150 | while (dividend >= divisor) { |
| 151 | /* The following asm() prevents the compiler from |
| 152 | optimising this loop into a modulo operation. */ |
| 153 | asm("" : "+rm"(dividend)); |
| 154 | |
| 155 | dividend -= divisor; |
| 156 | ret++; |
| 157 | } |
| 158 | |
| 159 | *remainder = dividend; |
| 160 | |
| 161 | return ret; |
| 162 | } |
| 163 | |
Peter Zijlstra | 9e3d622 | 2016-12-09 09:30:11 +0100 | [diff] [blame] | 164 | #ifndef mul_u32_u32 |
| 165 | /* |
| 166 | * Many a GCC version messes this up and generates a 64x64 mult :-( |
| 167 | */ |
| 168 | static inline u64 mul_u32_u32(u32 a, u32 b) |
| 169 | { |
| 170 | return (u64)a * b; |
| 171 | } |
| 172 | #endif |
| 173 | |
Peter Zijlstra | be5e610 | 2013-11-18 18:27:06 +0100 | [diff] [blame] | 174 | #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) |
| 175 | |
| 176 | #ifndef mul_u64_u32_shr |
| 177 | static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) |
| 178 | { |
| 179 | return (u64)(((unsigned __int128)a * mul) >> shift); |
| 180 | } |
| 181 | #endif /* mul_u64_u32_shr */ |
| 182 | |
Haozhong Zhang | 35181e8 | 2015-10-20 15:39:03 +0800 | [diff] [blame] | 183 | #ifndef mul_u64_u64_shr |
| 184 | static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) |
| 185 | { |
| 186 | return (u64)(((unsigned __int128)a * mul) >> shift); |
| 187 | } |
| 188 | #endif /* mul_u64_u64_shr */ |
| 189 | |
Peter Zijlstra | be5e610 | 2013-11-18 18:27:06 +0100 | [diff] [blame] | 190 | #else |
| 191 | |
| 192 | #ifndef mul_u64_u32_shr |
| 193 | static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) |
| 194 | { |
| 195 | u32 ah, al; |
| 196 | u64 ret; |
| 197 | |
| 198 | al = a; |
| 199 | ah = a >> 32; |
| 200 | |
Peter Zijlstra | 9e3d622 | 2016-12-09 09:30:11 +0100 | [diff] [blame] | 201 | ret = mul_u32_u32(al, mul) >> shift; |
Peter Zijlstra | be5e610 | 2013-11-18 18:27:06 +0100 | [diff] [blame] | 202 | if (ah) |
Peter Zijlstra | 9e3d622 | 2016-12-09 09:30:11 +0100 | [diff] [blame] | 203 | ret += mul_u32_u32(ah, mul) << (32 - shift); |
Peter Zijlstra | be5e610 | 2013-11-18 18:27:06 +0100 | [diff] [blame] | 204 | |
| 205 | return ret; |
| 206 | } |
| 207 | #endif /* mul_u64_u32_shr */ |
| 208 | |
Haozhong Zhang | 35181e8 | 2015-10-20 15:39:03 +0800 | [diff] [blame] | 209 | #ifndef mul_u64_u64_shr |
| 210 | static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) |
| 211 | { |
| 212 | union { |
| 213 | u64 ll; |
| 214 | struct { |
| 215 | #ifdef __BIG_ENDIAN |
| 216 | u32 high, low; |
| 217 | #else |
| 218 | u32 low, high; |
| 219 | #endif |
| 220 | } l; |
| 221 | } rl, rm, rn, rh, a0, b0; |
| 222 | u64 c; |
| 223 | |
| 224 | a0.ll = a; |
| 225 | b0.ll = b; |
| 226 | |
Peter Zijlstra | 9e3d622 | 2016-12-09 09:30:11 +0100 | [diff] [blame] | 227 | rl.ll = mul_u32_u32(a0.l.low, b0.l.low); |
| 228 | rm.ll = mul_u32_u32(a0.l.low, b0.l.high); |
| 229 | rn.ll = mul_u32_u32(a0.l.high, b0.l.low); |
| 230 | rh.ll = mul_u32_u32(a0.l.high, b0.l.high); |
Haozhong Zhang | 35181e8 | 2015-10-20 15:39:03 +0800 | [diff] [blame] | 231 | |
| 232 | /* |
| 233 | * Each of these lines computes a 64-bit intermediate result into "c", |
| 234 | * starting at bits 32-95. The low 32-bits go into the result of the |
| 235 | * multiplication, the high 32-bits are carried into the next step. |
| 236 | */ |
| 237 | rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low; |
| 238 | rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low; |
| 239 | rh.l.high = (c >> 32) + rh.l.high; |
| 240 | |
| 241 | /* |
| 242 | * The 128-bit result of the multiplication is in rl.ll and rh.ll, |
| 243 | * shift it right and throw away the high part of the result. |
| 244 | */ |
| 245 | if (shift == 0) |
| 246 | return rl.ll; |
| 247 | if (shift < 64) |
| 248 | return (rl.ll >> shift) | (rh.ll << (64 - shift)); |
| 249 | return rh.ll >> (shift & 63); |
| 250 | } |
| 251 | #endif /* mul_u64_u64_shr */ |
| 252 | |
Peter Zijlstra | be5e610 | 2013-11-18 18:27:06 +0100 | [diff] [blame] | 253 | #endif |
| 254 | |
Haozhong Zhang | 381d585 | 2015-10-20 15:39:04 +0800 | [diff] [blame] | 255 | #ifndef mul_u64_u32_div |
| 256 | static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) |
| 257 | { |
| 258 | union { |
| 259 | u64 ll; |
| 260 | struct { |
| 261 | #ifdef __BIG_ENDIAN |
| 262 | u32 high, low; |
| 263 | #else |
| 264 | u32 low, high; |
| 265 | #endif |
| 266 | } l; |
| 267 | } u, rl, rh; |
| 268 | |
| 269 | u.ll = a; |
Peter Zijlstra | 9e3d622 | 2016-12-09 09:30:11 +0100 | [diff] [blame] | 270 | rl.ll = mul_u32_u32(u.l.low, mul); |
| 271 | rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high; |
Haozhong Zhang | 381d585 | 2015-10-20 15:39:04 +0800 | [diff] [blame] | 272 | |
| 273 | /* Bits 32-63 of the result will be in rh.l.low. */ |
| 274 | rl.l.high = do_div(rh.ll, divisor); |
| 275 | |
| 276 | /* Bits 0-31 of the result will be in rl.l.low. */ |
| 277 | do_div(rl.ll, divisor); |
| 278 | |
| 279 | rl.l.high = rh.l.low; |
| 280 | return rl.ll; |
| 281 | } |
| 282 | #endif /* mul_u64_u32_div */ |
| 283 | |
Roman Gushchin | 68600f6 | 2018-10-26 15:03:27 -0700 | [diff] [blame] | 284 | #define DIV64_U64_ROUND_UP(ll, d) \ |
| 285 | ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); }) |
| 286 | |
Simon Horman | cb8be11 | 2019-03-25 17:35:53 +0100 | [diff] [blame] | 287 | /** |
| 288 | * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer |
| 289 | * @dividend: unsigned 64bit dividend |
| 290 | * @divisor: unsigned 64bit divisor |
| 291 | * |
| 292 | * Divide unsigned 64bit dividend by unsigned 64bit divisor |
| 293 | * and round to closest integer. |
| 294 | * |
| 295 | * Return: dividend / divisor rounded to nearest integer |
| 296 | */ |
| 297 | #define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \ |
| 298 | ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); }) |
| 299 | |
Roman Zippel | 2418f4f | 2008-05-01 04:34:25 -0700 | [diff] [blame] | 300 | #endif /* _LINUX_MATH64_H */ |