blob: 65bef21cdddbd7584a01b7435327cdb994009b36 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Roman Zippel2418f4f2008-05-01 04:34:25 -07002#ifndef _LINUX_MATH64_H
3#define _LINUX_MATH64_H
4
5#include <linux/types.h>
6#include <asm/div64.h>
7
8#if BITS_PER_LONG == 64
9
Alex Shic2853c82013-06-12 14:05:10 -070010#define div64_long(x, y) div64_s64((x), (y))
11#define div64_ul(x, y) div64_u64((x), (y))
Sasha Levinf9103812012-03-15 12:36:13 -040012
Roman Zippel2418f4f2008-05-01 04:34:25 -070013/**
14 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
Randy Dunlap078843f2017-09-30 08:43:45 -070015 * @dividend: unsigned 64bit dividend
16 * @divisor: unsigned 32bit divisor
17 * @remainder: pointer to unsigned 32bit remainder
18 *
19 * Return: sets ``*remainder``, then returns dividend / divisor
Roman Zippel2418f4f2008-05-01 04:34:25 -070020 *
21 * This is commonly provided by 32bit archs to provide an optimized 64bit
22 * divide.
23 */
24static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
25{
26 *remainder = dividend % divisor;
27 return dividend / divisor;
28}
29
30/**
31 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
Randy Dunlap078843f2017-09-30 08:43:45 -070032 * @dividend: signed 64bit dividend
33 * @divisor: signed 32bit divisor
34 * @remainder: pointer to signed 32bit remainder
35 *
36 * Return: sets ``*remainder``, then returns dividend / divisor
Roman Zippel2418f4f2008-05-01 04:34:25 -070037 */
38static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
39{
40 *remainder = dividend % divisor;
41 return dividend / divisor;
42}
43
Roman Zippel6f6d6a12008-05-01 04:34:28 -070044/**
Mike Snitzereb18cba2013-08-20 15:05:17 -040045 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
Randy Dunlap078843f2017-09-30 08:43:45 -070046 * @dividend: unsigned 64bit dividend
47 * @divisor: unsigned 64bit divisor
48 * @remainder: pointer to unsigned 64bit remainder
49 *
50 * Return: sets ``*remainder``, then returns dividend / divisor
Mike Snitzereb18cba2013-08-20 15:05:17 -040051 */
52static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
53{
54 *remainder = dividend % divisor;
55 return dividend / divisor;
56}
57
58/**
Roman Zippel6f6d6a12008-05-01 04:34:28 -070059 * div64_u64 - unsigned 64bit divide with 64bit divisor
Randy Dunlap078843f2017-09-30 08:43:45 -070060 * @dividend: unsigned 64bit dividend
61 * @divisor: unsigned 64bit divisor
62 *
63 * Return: dividend / divisor
Roman Zippel6f6d6a12008-05-01 04:34:28 -070064 */
65static inline u64 div64_u64(u64 dividend, u64 divisor)
66{
67 return dividend / divisor;
68}
69
Brian Behlendorf658716d2010-10-26 14:23:10 -070070/**
71 * div64_s64 - signed 64bit divide with 64bit divisor
Randy Dunlap078843f2017-09-30 08:43:45 -070072 * @dividend: signed 64bit dividend
73 * @divisor: signed 64bit divisor
74 *
75 * Return: dividend / divisor
Brian Behlendorf658716d2010-10-26 14:23:10 -070076 */
77static inline s64 div64_s64(s64 dividend, s64 divisor)
78{
79 return dividend / divisor;
80}
81
Roman Zippel2418f4f2008-05-01 04:34:25 -070082#elif BITS_PER_LONG == 32
83
Alex Shic2853c82013-06-12 14:05:10 -070084#define div64_long(x, y) div_s64((x), (y))
85#define div64_ul(x, y) div_u64((x), (y))
Sasha Levinf9103812012-03-15 12:36:13 -040086
Roman Zippel2418f4f2008-05-01 04:34:25 -070087#ifndef div_u64_rem
88static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
89{
90 *remainder = do_div(dividend, divisor);
91 return dividend;
92}
93#endif
94
95#ifndef div_s64_rem
96extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
97#endif
98
Mike Snitzereb18cba2013-08-20 15:05:17 -040099#ifndef div64_u64_rem
100extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
101#endif
102
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700103#ifndef div64_u64
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +0200104extern u64 div64_u64(u64 dividend, u64 divisor);
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700105#endif
106
Brian Behlendorf658716d2010-10-26 14:23:10 -0700107#ifndef div64_s64
108extern s64 div64_s64(s64 dividend, s64 divisor);
109#endif
110
Roman Zippel2418f4f2008-05-01 04:34:25 -0700111#endif /* BITS_PER_LONG */
112
113/**
114 * div_u64 - unsigned 64bit divide with 32bit divisor
Randy Dunlap078843f2017-09-30 08:43:45 -0700115 * @dividend: unsigned 64bit dividend
116 * @divisor: unsigned 32bit divisor
Roman Zippel2418f4f2008-05-01 04:34:25 -0700117 *
118 * This is the most common 64bit divide and should be used if possible,
119 * as many 32bit archs can optimize this variant better than a full 64bit
120 * divide.
121 */
122#ifndef div_u64
123static inline u64 div_u64(u64 dividend, u32 divisor)
124{
125 u32 remainder;
126 return div_u64_rem(dividend, divisor, &remainder);
127}
128#endif
129
130/**
131 * div_s64 - signed 64bit divide with 32bit divisor
Randy Dunlap078843f2017-09-30 08:43:45 -0700132 * @dividend: signed 64bit dividend
133 * @divisor: signed 32bit divisor
Roman Zippel2418f4f2008-05-01 04:34:25 -0700134 */
135#ifndef div_s64
136static inline s64 div_s64(s64 dividend, s32 divisor)
137{
138 s32 remainder;
139 return div_s64_rem(dividend, divisor, &remainder);
140}
141#endif
142
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200143u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
144
Jeremy Fitzhardinged5e181f2008-06-12 10:47:58 +0200145static __always_inline u32
146__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
147{
148 u32 ret = 0;
149
150 while (dividend >= divisor) {
151 /* The following asm() prevents the compiler from
152 optimising this loop into a modulo operation. */
153 asm("" : "+rm"(dividend));
154
155 dividend -= divisor;
156 ret++;
157 }
158
159 *remainder = dividend;
160
161 return ret;
162}
163
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100164#ifndef mul_u32_u32
165/*
166 * Many a GCC version messes this up and generates a 64x64 mult :-(
167 */
168static inline u64 mul_u32_u32(u32 a, u32 b)
169{
170 return (u64)a * b;
171}
172#endif
173
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100174#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
175
176#ifndef mul_u64_u32_shr
177static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
178{
179 return (u64)(((unsigned __int128)a * mul) >> shift);
180}
181#endif /* mul_u64_u32_shr */
182
Haozhong Zhang35181e82015-10-20 15:39:03 +0800183#ifndef mul_u64_u64_shr
184static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
185{
186 return (u64)(((unsigned __int128)a * mul) >> shift);
187}
188#endif /* mul_u64_u64_shr */
189
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100190#else
191
192#ifndef mul_u64_u32_shr
193static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
194{
195 u32 ah, al;
196 u64 ret;
197
198 al = a;
199 ah = a >> 32;
200
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100201 ret = mul_u32_u32(al, mul) >> shift;
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100202 if (ah)
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100203 ret += mul_u32_u32(ah, mul) << (32 - shift);
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100204
205 return ret;
206}
207#endif /* mul_u64_u32_shr */
208
Haozhong Zhang35181e82015-10-20 15:39:03 +0800209#ifndef mul_u64_u64_shr
210static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
211{
212 union {
213 u64 ll;
214 struct {
215#ifdef __BIG_ENDIAN
216 u32 high, low;
217#else
218 u32 low, high;
219#endif
220 } l;
221 } rl, rm, rn, rh, a0, b0;
222 u64 c;
223
224 a0.ll = a;
225 b0.ll = b;
226
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100227 rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
228 rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
229 rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
230 rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
Haozhong Zhang35181e82015-10-20 15:39:03 +0800231
232 /*
233 * Each of these lines computes a 64-bit intermediate result into "c",
234 * starting at bits 32-95. The low 32-bits go into the result of the
235 * multiplication, the high 32-bits are carried into the next step.
236 */
237 rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
238 rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
239 rh.l.high = (c >> 32) + rh.l.high;
240
241 /*
242 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
243 * shift it right and throw away the high part of the result.
244 */
245 if (shift == 0)
246 return rl.ll;
247 if (shift < 64)
248 return (rl.ll >> shift) | (rh.ll << (64 - shift));
249 return rh.ll >> (shift & 63);
250}
251#endif /* mul_u64_u64_shr */
252
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100253#endif
254
Haozhong Zhang381d5852015-10-20 15:39:04 +0800255#ifndef mul_u64_u32_div
256static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
257{
258 union {
259 u64 ll;
260 struct {
261#ifdef __BIG_ENDIAN
262 u32 high, low;
263#else
264 u32 low, high;
265#endif
266 } l;
267 } u, rl, rh;
268
269 u.ll = a;
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100270 rl.ll = mul_u32_u32(u.l.low, mul);
271 rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
Haozhong Zhang381d5852015-10-20 15:39:04 +0800272
273 /* Bits 32-63 of the result will be in rh.l.low. */
274 rl.l.high = do_div(rh.ll, divisor);
275
276 /* Bits 0-31 of the result will be in rl.l.low. */
277 do_div(rl.ll, divisor);
278
279 rl.l.high = rh.l.low;
280 return rl.ll;
281}
282#endif /* mul_u64_u32_div */
283
Roman Gushchin68600f62018-10-26 15:03:27 -0700284#define DIV64_U64_ROUND_UP(ll, d) \
285 ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); })
286
Simon Hormancb8be112019-03-25 17:35:53 +0100287/**
288 * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer
289 * @dividend: unsigned 64bit dividend
290 * @divisor: unsigned 64bit divisor
291 *
292 * Divide unsigned 64bit dividend by unsigned 64bit divisor
293 * and round to closest integer.
294 *
295 * Return: dividend / divisor rounded to nearest integer
296 */
297#define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \
298 ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); })
299
Roman Zippel2418f4f2008-05-01 04:34:25 -0700300#endif /* _LINUX_MATH64_H */