blob: 46866394fc84382cbab511d044103406ffcd0176 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
4 *
5 * Based on former do_div() implementation from asm-parisc/div64.h:
6 * Copyright (C) 1999 Hewlett-Packard Co
7 * Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
8 *
9 *
10 * Generic C version of 64bit/32bit division and modulo, with
11 * 64bit result and 32bit remainder.
12 *
Andy Shevchenko2c64e9c2019-05-14 15:43:05 -070013 * The fast case for (n>>32 == 0) is handled inline by do_div().
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 *
15 * Code generated for this function might be very inefficient
16 * for some CPUs. __div64_32() can be overridden by linking arch-specific
Nicolas Pitredce1eb92015-11-02 13:02:46 -050017 * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
18 * or by defining a preprocessor macro in arch/include/asm/div64.h.
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 */
20
Andy Shevchenkoaa6159a2020-12-15 20:42:48 -080021#include <linux/bitops.h>
Paul Gortmaker8bc3bcc2011-11-16 21:29:17 -050022#include <linux/export.h>
Andy Shevchenkoaa6159a2020-12-15 20:42:48 -080023#include <linux/math.h>
Roman Zippel2418f4f2008-05-01 04:34:25 -070024#include <linux/math64.h>
Andy Shevchenkoaa6159a2020-12-15 20:42:48 -080025#include <linux/log2.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
27/* Not needed on 64bit architectures */
28#if BITS_PER_LONG == 32
29
Nicolas Pitredce1eb92015-11-02 13:02:46 -050030#ifndef __div64_32
David S. Millercb8c1812007-04-10 22:10:39 -070031uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032{
33 uint64_t rem = *n;
34 uint64_t b = base;
35 uint64_t res, d = 1;
36 uint32_t high = rem >> 32;
37
38 /* Reduce the thing a bit first */
39 res = 0;
40 if (high >= base) {
41 high /= base;
42 res = (uint64_t) high << 32;
43 rem -= (uint64_t) (high*base) << 32;
44 }
45
46 while ((int64_t)b > 0 && b < rem) {
47 b = b+b;
48 d = d+d;
49 }
50
51 do {
52 if (rem >= b) {
53 rem -= b;
54 res += d;
55 }
56 b >>= 1;
57 d >>= 1;
58 } while (d);
59
60 *n = res;
61 return rem;
62}
Linus Torvalds1da177e2005-04-16 15:20:36 -070063EXPORT_SYMBOL(__div64_32);
Nicolas Pitredce1eb92015-11-02 13:02:46 -050064#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Randy Dunlap6ec72e62017-09-30 08:43:42 -070066/**
67 * div_s64_rem - signed 64bit divide with 64bit divisor and remainder
68 * @dividend: 64bit dividend
69 * @divisor: 64bit divisor
70 * @remainder: 64bit remainder
71 */
Roman Zippel2418f4f2008-05-01 04:34:25 -070072#ifndef div_s64_rem
73s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
74{
75 u64 quotient;
76
77 if (dividend < 0) {
78 quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
79 *remainder = -*remainder;
80 if (divisor > 0)
81 quotient = -quotient;
82 } else {
83 quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
84 if (divisor < 0)
85 quotient = -quotient;
86 }
87 return quotient;
88}
89EXPORT_SYMBOL(div_s64_rem);
90#endif
91
Brian Behlendorf658716d2010-10-26 14:23:10 -070092/**
Mike Snitzereb18cba2013-08-20 15:05:17 -040093 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
94 * @dividend: 64bit dividend
95 * @divisor: 64bit divisor
96 * @remainder: 64bit remainder
97 *
98 * This implementation is a comparable to algorithm used by div64_u64.
99 * But this operation, which includes math for calculating the remainder,
100 * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
101 * systems.
102 */
103#ifndef div64_u64_rem
104u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
105{
106 u32 high = divisor >> 32;
107 u64 quot;
108
109 if (high == 0) {
110 u32 rem32;
111 quot = div_u64_rem(dividend, divisor, &rem32);
112 *remainder = rem32;
113 } else {
Stanislaw Gruszkacdc94a372019-03-07 16:28:18 -0800114 int n = fls(high);
Mike Snitzereb18cba2013-08-20 15:05:17 -0400115 quot = div_u64(dividend >> n, divisor >> n);
116
117 if (quot != 0)
118 quot--;
119
120 *remainder = dividend - quot * divisor;
121 if (*remainder >= divisor) {
122 quot++;
123 *remainder -= divisor;
124 }
125 }
126
127 return quot;
128}
129EXPORT_SYMBOL(div64_u64_rem);
130#endif
131
132/**
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +0200133 * div64_u64 - unsigned 64bit divide with 64bit divisor
Brian Behlendorf658716d2010-10-26 14:23:10 -0700134 * @dividend: 64bit dividend
135 * @divisor: 64bit divisor
136 *
137 * This implementation is a modified version of the algorithm proposed
138 * by the book 'Hacker's Delight'. The original source and full proof
139 * can be found here and is available for use without restriction.
140 *
Heinrich Schuchardt28ca84e2015-02-26 20:19:03 +0100141 * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
Brian Behlendorf658716d2010-10-26 14:23:10 -0700142 */
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +0200143#ifndef div64_u64
144u64 div64_u64(u64 dividend, u64 divisor)
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700145{
Brian Behlendorf658716d2010-10-26 14:23:10 -0700146 u32 high = divisor >> 32;
147 u64 quot;
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700148
Brian Behlendorf658716d2010-10-26 14:23:10 -0700149 if (high == 0) {
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +0200150 quot = div_u64(dividend, divisor);
Brian Behlendorf658716d2010-10-26 14:23:10 -0700151 } else {
Stanislaw Gruszkacdc94a372019-03-07 16:28:18 -0800152 int n = fls(high);
Brian Behlendorf658716d2010-10-26 14:23:10 -0700153 quot = div_u64(dividend >> n, divisor >> n);
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700154
Brian Behlendorf658716d2010-10-26 14:23:10 -0700155 if (quot != 0)
156 quot--;
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +0200157 if ((dividend - quot * divisor) >= divisor)
Brian Behlendorf658716d2010-10-26 14:23:10 -0700158 quot++;
159 }
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700160
Brian Behlendorf658716d2010-10-26 14:23:10 -0700161 return quot;
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700162}
Stanislaw Gruszkaf3002132013-04-30 11:35:07 +0200163EXPORT_SYMBOL(div64_u64);
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700164#endif
Stephen Hemminger3927f2e2007-03-25 19:54:23 -0700165
Brian Behlendorf658716d2010-10-26 14:23:10 -0700166/**
167 * div64_s64 - signed 64bit divide with 64bit divisor
168 * @dividend: 64bit dividend
169 * @divisor: 64bit divisor
170 */
171#ifndef div64_s64
172s64 div64_s64(s64 dividend, s64 divisor)
173{
174 s64 quot, t;
175
Andrew Morton79211c82015-11-09 14:58:13 -0800176 quot = div64_u64(abs(dividend), abs(divisor));
Brian Behlendorf658716d2010-10-26 14:23:10 -0700177 t = (dividend ^ divisor) >> 63;
178
179 return (quot ^ t) - t;
180}
181EXPORT_SYMBOL(div64_s64);
182#endif
183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184#endif /* BITS_PER_LONG == 32 */
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200185
186/*
187 * Iterative div/mod for use when dividend is not expected to be much
188 * bigger than divisor.
189 */
190u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
191{
Jeremy Fitzhardinged5e181f2008-06-12 10:47:58 +0200192 return __iter_div_u64_rem(dividend, divisor, remainder);
Jeremy Fitzhardingef595ec92008-06-12 10:47:56 +0200193}
194EXPORT_SYMBOL(iter_div_u64_rem);
Oleg Nesterov3dc167b2020-05-19 19:25:06 +0200195
196#ifndef mul_u64_u64_div_u64
197u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
198{
199 u64 res = 0, div, rem;
200 int shift;
201
202 /* can a * b overflow ? */
203 if (ilog2(a) + ilog2(b) > 62) {
204 /*
205 * (b * a) / c is equal to
206 *
207 * (b / c) * a +
208 * (b % c) * a / c
209 *
210 * if nothing overflows. Can the 1st multiplication
211 * overflow? Yes, but we do not care: this can only
212 * happen if the end result can't fit in u64 anyway.
213 *
214 * So the code below does
215 *
216 * res = (b / c) * a;
217 * b = b % c;
218 */
219 div = div64_u64_rem(b, c, &rem);
220 res = div * a;
221 b = rem;
222
223 shift = ilog2(a) + ilog2(b) - 62;
224 if (shift > 0) {
225 /* drop precision */
226 b >>= shift;
227 c >>= shift;
228 if (!c)
229 return res;
230 }
231 }
232
233 return res + div64_u64(a * b, c);
234}
David S. Millerbf459472021-03-24 16:42:54 -0700235EXPORT_SYMBOL(mul_u64_u64_div_u64);
Oleg Nesterov3dc167b2020-05-19 19:25:06 +0200236#endif