summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorAlexey Dobriyan <adobriyan@gmail.com>2012-01-14 21:44:49 +0300
committerHerbert Xu <herbert@gondor.apana.org.au>2012-02-16 12:12:33 +0800
commitf2ea0f5f04c97b48c88edccba52b0682fbe45087 (patch)
tree53ee15dd4d1a2122d0e571dc8263353acb54f333 /include/linux
parent3a92d687c8015860a19213e3c102cad6b722f83c (diff)
crypto: sha512 - use standard ror64()
Use standard ror64() instead of hand-written. There is no standard ror64, so create it. The difference is shift value being "unsigned int" instead of uint64_t (for which there is no reason). gcc starts to emit native ROR instructions which it doesn't do for some reason currently. This should make the code faster. Patch survives in-tree crypto test and ping flood with hmac(sha512) on. Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bitops.h20
1 files changed, 20 insertions, 0 deletions
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index a3ef66a2a08..fc8a3ffce32 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -50,6 +50,26 @@ static inline unsigned long hweight_long(unsigned long w)
}
/**
+ * rol64 - rotate a 64-bit value left
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u64 rol64(__u64 word, unsigned int shift)
+{
+ return (word << shift) | (word >> (64 - shift));
+}
+
+/**
+ * ror64 - rotate a 64-bit value right
+ * @word: value to rotate
+ * @shift: bits to roll
+ */
+static inline __u64 ror64(__u64 word, unsigned int shift)
+{
+ return (word >> shift) | (word << (64 - shift));
+}
+
+/**
* rol32 - rotate a 32-bit value left
* @word: value to rotate
* @shift: bits to roll