summaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2014-06-10 22:46:37 -0400
committerTheodore Ts'o <tytso@mit.edu>2014-07-15 04:49:39 -0400
commit91fcb532efe366d79b93a3c8c368b9dca6176a55 (patch)
tree3d2f571973e695ab5334abbaadbcad3a64152188 /drivers/char
parent1795cd9b3a91d4b5473c97f491d63892442212ab (diff)
downloadop-kernel-dev-91fcb532efe366d79b93a3c8c368b9dca6176a55.zip
op-kernel-dev-91fcb532efe366d79b93a3c8c368b9dca6176a55.tar.gz
random: always update the entropy pool under the spinlock
Instead of using lockless techniques introduced in commit 902c098a3663, use spin_trylock to try to grab entropy pool's lock. If we can't get the lock, then just try again on the next interrupt. Based on discussions with George Spelvin. Signed-off-by: Theodore Ts'o <tytso@mit.edu> Cc: George Spelvin <linux@horizon.com>
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/random.c44
1 files changed, 23 insertions, 21 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0a7ac0a..922a2e4 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -495,9 +495,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
tap4 = r->poolinfo->tap4;
tap5 = r->poolinfo->tap5;
- smp_rmb();
- input_rotate = ACCESS_ONCE(r->input_rotate);
- i = ACCESS_ONCE(r->add_ptr);
+ input_rotate = r->input_rotate;
+ i = r->add_ptr;
/* mix one byte at a time to simplify size handling and churn faster */
while (nbytes--) {
@@ -524,9 +523,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
}
- ACCESS_ONCE(r->input_rotate) = input_rotate;
- ACCESS_ONCE(r->add_ptr) = i;
- smp_wmb();
+ r->input_rotate = input_rotate;
+ r->add_ptr = i;
if (out)
for (j = 0; j < 16; j++)
@@ -845,7 +843,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
__u32 input[4], c_high, j_high;
__u64 ip;
unsigned long seed;
- int credit;
+ int credit = 0;
c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
j_high = (sizeof(now) > 4) ? now >> 32 : 0;
@@ -860,36 +858,40 @@ void add_interrupt_randomness(int irq, int irq_flags)
if ((fast_pool->count & 63) && !time_after(now, fast_pool->last + HZ))
return;
- fast_pool->last = now;
-
r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
+ if (!spin_trylock(&r->lock)) {
+ fast_pool->count--;
+ return;
+ }
+ fast_pool->last = now;
__mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
/*
+ * If we have architectural seed generator, produce a seed and
+ * add it to the pool. For the sake of paranoia count it as
+ * 50% entropic.
+ */
+ if (arch_get_random_seed_long(&seed)) {
+ __mix_pool_bytes(r, &seed, sizeof(seed), NULL);
+ credit += sizeof(seed) * 4;
+ }
+ spin_unlock(&r->lock);
+
+ /*
* If we don't have a valid cycle counter, and we see
* back-to-back timer interrupts, then skip giving credit for
* any entropy, otherwise credit 1 bit.
*/
- credit = 1;
+ credit++;
if (cycles == 0) {
if (irq_flags & __IRQF_TIMER) {
if (fast_pool->last_timer_intr)
- credit = 0;
+ credit--;
fast_pool->last_timer_intr = 1;
} else
fast_pool->last_timer_intr = 0;
}
- /*
- * If we have architectural seed generator, produce a seed and
- * add it to the pool. For the sake of paranoia count it as
- * 50% entropic.
- */
- if (arch_get_random_seed_long(&seed)) {
- __mix_pool_bytes(r, &seed, sizeof(seed), NULL);
- credit += sizeof(seed) * 4;
- }
-
credit_entropy_bits(r, credit);
}
OpenPOWER on IntegriCloud