summaryrefslogtreecommitdiff
path: root/include/linux/math64.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-12-09 11:30:11 +0300
committerIngo Molnar <mingo@kernel.org>2017-01-14 13:31:50 +0300
commit9e3d6223d2093a8903c8f570a06284453ee59944 (patch)
treef6010d541488db907e3fbf0ab56d595694b12c79 /include/linux/math64.h
parente96f8f18c81b2f5b290206fc0da74b551e82646d (diff)
downloadlinux-9e3d6223d2093a8903c8f570a06284453ee59944.tar.xz
math64, timers: Fix 32bit mul_u64_u32_shr() and friends
It turns out that while GCC-4.4 manages to generate 32x32->64 mult instructions for the 32bit mul_u64_u32_shr() code, any GCC after that fails horribly. Fix this by providing an explicit mul_u32_u32() function which can be architcture provided. Reported-by: Chris Metcalf <cmetcalf@mellanox.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Chris Metcalf <cmetcalf@mellanox.com> [for tile] Cc: Christopher S. Hall <christopher.s.hall@intel.com> Cc: David Gibson <david@gibson.dropbear.id.au> Cc: John Stultz <john.stultz@linaro.org> Cc: Laurent Vivier <lvivier@redhat.com> Cc: Liav Rehana <liavr@mellanox.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Parit Bhargava <prarit@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20161209083011.GD15765@worktop.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/math64.h')
-rw-r--r--include/linux/math64.h26
1 files changed, 18 insertions, 8 deletions
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 6e8b5b270ffe..80690c96c734 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -133,6 +133,16 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return ret;
}
+#ifndef mul_u32_u32
+/*
+ * Many a GCC version messes this up and generates a 64x64 mult :-(
+ */
+static inline u64 mul_u32_u32(u32 a, u32 b)
+{
+ return (u64)a * b;
+}
+#endif
+
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
#ifndef mul_u64_u32_shr
@@ -160,9 +170,9 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
al = a;
ah = a >> 32;
- ret = ((u64)al * mul) >> shift;
+ ret = mul_u32_u32(al, mul) >> shift;
if (ah)
- ret += ((u64)ah * mul) << (32 - shift);
+ ret += mul_u32_u32(ah, mul) << (32 - shift);
return ret;
}
@@ -186,10 +196,10 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
a0.ll = a;
b0.ll = b;
- rl.ll = (u64)a0.l.low * b0.l.low;
- rm.ll = (u64)a0.l.low * b0.l.high;
- rn.ll = (u64)a0.l.high * b0.l.low;
- rh.ll = (u64)a0.l.high * b0.l.high;
+ rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
+ rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
+ rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
+ rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
/*
* Each of these lines computes a 64-bit intermediate result into "c",
@@ -229,8 +239,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
} u, rl, rh;
u.ll = a;
- rl.ll = (u64)u.l.low * mul;
- rh.ll = (u64)u.l.high * mul + rl.l.high;
+ rl.ll = mul_u32_u32(u.l.low, mul);
+ rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
/* Bits 32-63 of the result will be in rh.l.low. */
rl.l.high = do_div(rh.ll, divisor);