summaryrefslogtreecommitdiffstats
path: root/drivers/char/random.c
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2013-09-22 15:24:02 -0400
committerTheodore Ts'o <tytso@mit.edu>2013-10-10 14:32:20 -0400
commit655b226470b229552ad95b21323864df9bd9fc74 (patch)
tree9379bc872df3e7e859654a374e23be3fc4248521 /drivers/char/random.c
parentf5c2742c23886e707f062881c5f206c1fc704782 (diff)
random: speed up the fast_mix function by a factor of four
By mixing the entropy in chunks of 32-bit words instead of byte by byte, we can speed up the fast_mix function significantly. Since it is called on every single interrupt, on systems with a very heavy interrupt load, this can make a noticeable difference. Also fix a compilation warning in add_interrupt_randomness() and avoid xor'ing cycles and jiffies together just in case we have an architecture which tries to define random_get_entropy() by returning jiffies. Signed-off-by: "Theodore Ts'o" <tytso@mit.edu> Reported-by: Jörn Engel <joern@logfs.org>
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c50
1 files changed, 28 insertions, 22 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index a68b4a09327..74eeec58e77 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -584,21 +584,26 @@ struct fast_pool {
* collector. It's hardcoded for an 128 bit pool and assumes that any
* locks that might be needed are taken by the caller.
*/
-static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
+static void fast_mix(struct fast_pool *f, __u32 input[4])
{
- const char *bytes = in;
__u32 w;
- unsigned i = f->count;
unsigned input_rotate = f->rotate;
- while (nbytes--) {
- w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
- f->pool[(i + 1) & 3];
- f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
- input_rotate += (i++ & 3) ? 7 : 14;
- }
- f->count = i;
+ w = rol32(input[0], input_rotate) ^ f->pool[0] ^ f->pool[3];
+ f->pool[0] = (w >> 3) ^ twist_table[w & 7];
+ input_rotate = (input_rotate + 14) & 31;
+ w = rol32(input[1], input_rotate) ^ f->pool[1] ^ f->pool[0];
+ f->pool[1] = (w >> 3) ^ twist_table[w & 7];
+ input_rotate = (input_rotate + 7) & 31;
+ w = rol32(input[2], input_rotate) ^ f->pool[2] ^ f->pool[1];
+ f->pool[2] = (w >> 3) ^ twist_table[w & 7];
+ input_rotate = (input_rotate + 7) & 31;
+ w = rol32(input[3], input_rotate) ^ f->pool[3] ^ f->pool[2];
+ f->pool[3] = (w >> 3) ^ twist_table[w & 7];
+ input_rotate = (input_rotate + 7) & 31;
+
f->rotate = input_rotate;
+ f->count++;
}
/*
@@ -828,20 +833,21 @@ void add_interrupt_randomness(int irq, int irq_flags)
struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
struct pt_regs *regs = get_irq_regs();
unsigned long now = jiffies;
- __u32 input[4], cycles = random_get_entropy();
-
- input[0] = cycles ^ jiffies;
- input[1] = irq;
- if (regs) {
- __u64 ip = instruction_pointer(regs);
- input[2] = ip;
- input[3] = ip >> 32;
- }
+ cycles_t cycles = random_get_entropy();
+ __u32 input[4], c_high, j_high;
+ __u64 ip;
+
+ c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
+ j_high = (sizeof(now) > 4) ? now >> 32 : 0;
+ input[0] = cycles ^ j_high ^ irq;
+ input[1] = now ^ c_high;
+ ip = regs ? instruction_pointer(regs) : _RET_IP_;
+ input[2] = ip;
+ input[3] = ip >> 32;
- fast_mix(fast_pool, input, sizeof(input));
+ fast_mix(fast_pool, input);
- if ((fast_pool->count & 1023) &&
- !time_after(now, fast_pool->last + HZ))
+ if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ))
return;
fast_pool->last = now;