summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorRoman Zippel <zippel@linux-m68k.org>2008-05-01 15:34:25 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-01 19:03:58 +0400
commit2418f4f28f8467b92a6177af32d05737ebf6206c (patch)
treecd35f4feef2ed3078ebb7ce6dcaf5f627299944e /lib
parentadafbedf0c31ae1cde62035c82857f5e376af553 (diff)
downloadlinux-2418f4f28f8467b92a6177af32d05737ebf6206c.tar.xz
introduce explicit signed/unsigned 64bit divide
The current do_div doesn't explicitly say that it's unsigned and the signed counterpart is missing, which is e.g. needed when dealing with time values. This introduces 64bit signed/unsigned divide functions which also attempts to cleanup the somewhat awkward calling API, which often requires the use of temporary variables for the dividend. To avoid the need for temporary variables everywhere for the remainder, each divide variant also provides a version which doesn't return the remainder. Each architecture can now provide optimized versions of these function, otherwise generic fallback implementations will be used. As an example I provided an alternative for the current x86 divide, which avoids the asm casts and using an union allows gcc to generate better code. It also avoids the upper divde in a few more cases, where the result is known (i.e. upper quotient is zero). Signed-off-by: Roman Zippel <zippel@linux-m68k.org> Cc: john stultz <johnstul@us.ibm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/div64.c23
1 files changed, 21 insertions, 2 deletions
diff --git a/lib/div64.c b/lib/div64.c
index b71cf93c529a..689bd76833fa 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -16,9 +16,8 @@
* assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
*/
-#include <linux/types.h>
#include <linux/module.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
/* Not needed on 64bit architectures */
#if BITS_PER_LONG == 32
@@ -58,6 +57,26 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
EXPORT_SYMBOL(__div64_32);
+#ifndef div_s64_rem
+s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+{
+ u64 quotient;
+
+ if (dividend < 0) {
+ quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
+ *remainder = -*remainder;
+ if (divisor > 0)
+ quotient = -quotient;
+ } else {
+ quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
+ if (divisor < 0)
+ quotient = -quotient;
+ }
+ return quotient;
+}
+EXPORT_SYMBOL(div_s64_rem);
+#endif
+
/* 64bit divisor, dividend and result. dynamic precision */
uint64_t div64_64(uint64_t dividend, uint64_t divisor)
{