blob: 80690c96c734d4fea4cd8970fe6a93b89eb34379 [file] [log] [blame]
Roman Zippel2418f4f2008-05-01 04:34:25 -07001#ifndef _LINUX_MATH64_H
2#define _LINUX_MATH64_H
3
4#include <linux/types.h>
5#include <asm/div64.h>
6
7#if BITS_PER_LONG == 64
8
Alex Shic2853c82013-06-12 14:05:10 -07009#define div64_long(x, y) div64_s64((x), (y))
10#define div64_ul(x, y) div64_u64((x), (y))
Sasha Levinf9103812012-03-15 12:36:13 -040011
Roman Zippel2418f4f2008-05-01 04:34:25 -070012/**
13 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
14 *
15 * This is commonly provided by 32bit archs to provide an optimized 64bit
16 * divide.
17 */
18static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
19{
20 *remainder = dividend % divisor;
21 return dividend / divisor;
22}
23
24/**
25 * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
26 */
27static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
28{
29 *remainder = dividend % divisor;
30 return dividend / divisor;
31}
32
Roman Zippel6f6d6a12008-05-01 04:34:28 -070033/**
Mike Snitzereb18cba2013-08-20 15:05:17 -040034 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
35 */
36static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
37{
38 *remainder = dividend % divisor;
39 return dividend / divisor;
40}
41
42/**
Roman Zippel6f6d6a12008-05-01 04:34:28 -070043 * div64_u64 - unsigned 64bit divide with 64bit divisor
44 */
45static inline u64 div64_u64(u64 dividend, u64 divisor)
46{
47 return dividend / divisor;
48}
49
Brian Behlendorf658716d2010-10-26 14:23:10 -070050/**
51 * div64_s64 - signed 64bit divide with 64bit divisor
52 */
53static inline s64 div64_s64(s64 dividend, s64 divisor)
54{
55 return dividend / divisor;
56}
57
Roman Zippel2418f4f2008-05-01 04:34:25 -070058#elif BITS_PER_LONG == 32
59
Alex Shic2853c82013-06-12 14:05:10 -070060#define div64_long(x, y) div_s64((x), (y))
61#define div64_ul(x, y) div_u64((x), (y))
Sasha Levinf9103812012-03-15 12:36:13 -040062
Roman Zippel2418f4f2008-05-01 04:34:25 -070063#ifndef div_u64_rem
64static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
65{
66 *remainder = do_div(dividend, divisor);
67 return dividend;
68}
69#endif
70
71#ifndef div_s64_rem
72extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
73#endif
74
Mike Snitzereb18cba2013-08-20 15:05:17 -040075#ifndef div64_u64_rem
76extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
77#endif
78
Roman Zippel6f6d6a12008-05-01 04:34:28 -070079#ifndef div64_u64
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +020080extern u64 div64_u64(u64 dividend, u64 divisor);
Roman Zippel6f6d6a12008-05-01 04:34:28 -070081#endif
82
Brian Behlendorf658716d2010-10-26 14:23:10 -070083#ifndef div64_s64
84extern s64 div64_s64(s64 dividend, s64 divisor);
85#endif
86
Roman Zippel2418f4f2008-05-01 04:34:25 -070087#endif /* BITS_PER_LONG */
88
89/**
90 * div_u64 - unsigned 64bit divide with 32bit divisor
91 *
92 * This is the most common 64bit divide and should be used if possible,
93 * as many 32bit archs can optimize this variant better than a full 64bit
94 * divide.
95 */
96#ifndef div_u64
97static inline u64 div_u64(u64 dividend, u32 divisor)
98{
99 u32 remainder;
100 return div_u64_rem(dividend, divisor, &remainder);
101}
102#endif
103
104/**
105 * div_s64 - signed 64bit divide with 32bit divisor
106 */
107#ifndef div_s64
108static inline s64 div_s64(s64 dividend, s32 divisor)
109{
110 s32 remainder;
111 return div_s64_rem(dividend, divisor, &remainder);
112}
113#endif
114
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200115u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
116
Jeremy Fitzhardinged5e181f2008-06-12 10:47:58 +0200117static __always_inline u32
118__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
119{
120 u32 ret = 0;
121
122 while (dividend >= divisor) {
123 /* The following asm() prevents the compiler from
124 optimising this loop into a modulo operation. */
125 asm("" : "+rm"(dividend));
126
127 dividend -= divisor;
128 ret++;
129 }
130
131 *remainder = dividend;
132
133 return ret;
134}
135
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100136#ifndef mul_u32_u32
137/*
138 * Many a GCC version messes this up and generates a 64x64 mult :-(
139 */
140static inline u64 mul_u32_u32(u32 a, u32 b)
141{
142 return (u64)a * b;
143}
144#endif
145
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100146#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
147
148#ifndef mul_u64_u32_shr
149static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
150{
151 return (u64)(((unsigned __int128)a * mul) >> shift);
152}
153#endif /* mul_u64_u32_shr */
154
Haozhong Zhang35181e82015-10-20 15:39:03 +0800155#ifndef mul_u64_u64_shr
156static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
157{
158 return (u64)(((unsigned __int128)a * mul) >> shift);
159}
160#endif /* mul_u64_u64_shr */
161
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100162#else
163
164#ifndef mul_u64_u32_shr
165static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
166{
167 u32 ah, al;
168 u64 ret;
169
170 al = a;
171 ah = a >> 32;
172
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100173 ret = mul_u32_u32(al, mul) >> shift;
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100174 if (ah)
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100175 ret += mul_u32_u32(ah, mul) << (32 - shift);
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100176
177 return ret;
178}
179#endif /* mul_u64_u32_shr */
180
Haozhong Zhang35181e82015-10-20 15:39:03 +0800181#ifndef mul_u64_u64_shr
182static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
183{
184 union {
185 u64 ll;
186 struct {
187#ifdef __BIG_ENDIAN
188 u32 high, low;
189#else
190 u32 low, high;
191#endif
192 } l;
193 } rl, rm, rn, rh, a0, b0;
194 u64 c;
195
196 a0.ll = a;
197 b0.ll = b;
198
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100199 rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
200 rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
201 rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
202 rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
Haozhong Zhang35181e82015-10-20 15:39:03 +0800203
204 /*
205 * Each of these lines computes a 64-bit intermediate result into "c",
206 * starting at bits 32-95. The low 32-bits go into the result of the
207 * multiplication, the high 32-bits are carried into the next step.
208 */
209 rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low;
210 rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low;
211 rh.l.high = (c >> 32) + rh.l.high;
212
213 /*
214 * The 128-bit result of the multiplication is in rl.ll and rh.ll,
215 * shift it right and throw away the high part of the result.
216 */
217 if (shift == 0)
218 return rl.ll;
219 if (shift < 64)
220 return (rl.ll >> shift) | (rh.ll << (64 - shift));
221 return rh.ll >> (shift & 63);
222}
223#endif /* mul_u64_u64_shr */
224
Peter Zijlstrabe5e6102013-11-18 18:27:06 +0100225#endif
226
Haozhong Zhang381d5852015-10-20 15:39:04 +0800227#ifndef mul_u64_u32_div
228static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
229{
230 union {
231 u64 ll;
232 struct {
233#ifdef __BIG_ENDIAN
234 u32 high, low;
235#else
236 u32 low, high;
237#endif
238 } l;
239 } u, rl, rh;
240
241 u.ll = a;
Peter Zijlstra9e3d6222016-12-09 09:30:11 +0100242 rl.ll = mul_u32_u32(u.l.low, mul);
243 rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
Haozhong Zhang381d5852015-10-20 15:39:04 +0800244
245 /* Bits 32-63 of the result will be in rh.l.low. */
246 rl.l.high = do_div(rh.ll, divisor);
247
248 /* Bits 0-31 of the result will be in rl.l.low. */
249 do_div(rl.ll, divisor);
250
251 rl.l.high = rh.l.low;
252 return rl.ll;
253}
254#endif /* mul_u64_u32_div */
255
Roman Zippel2418f4f2008-05-01 04:34:25 -0700256#endif /* _LINUX_MATH64_H */