summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt8
-rw-r--r--drivers/char/Kconfig14
-rw-r--r--drivers/char/random.c49
-rw-r--r--include/linux/random.h3
-rw-r--r--lib/vsprintf.c27
5 files changed, 85 insertions, 16 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a32f2a1..0c88302 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -748,6 +748,14 @@
debug [KNL] Enable kernel debugging (events log level).
+ debug_boot_weak_hash
+ [KNL] Enable printing [hashed] pointers early in the
+ boot sequence. If enabled, we use a weak hash instead
+ of siphash to hash pointers. Use this option if you are
+ seeing instances of '(___ptrval___)') and need to see a
+ value (hashed pointer) instead. Cryptographically
+ insecure, please do not use on production kernels.
+
debug_locks_verbose=
[KNL] verbose self-tests
Format=<0|1>
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 212f447..ce277ee 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -554,3 +554,17 @@ config ADI
endmenu
+config RANDOM_TRUST_CPU
+ bool "Trust the CPU manufacturer to initialize Linux's CRNG"
+ depends on X86 || S390 || PPC
+ default n
+ help
+ Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
+ RDRAND, IBM for the S390 and Power PC architectures) is trustworthy
+ for the purposes of initializing Linux's CRNG. Since this is not
+ something that can be independently audited, this amounts to trusting
+ that CPU manufacturer (perhaps with the insistence or mandate
+ of a Nation State's intelligence or law enforcement agencies)
+ has not installed a hidden back door to compromise the CPU's
+ random number generation facilities.
+
diff --git a/drivers/char/random.c b/drivers/char/random.c
index bd449ad..bf5f99f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -782,6 +782,7 @@ static void invalidate_batched_entropy(void);
static void crng_initialize(struct crng_state *crng)
{
int i;
+ int arch_init = 1;
unsigned long rv;
memcpy(&crng->state[0], "expand 32-byte k", 16);
@@ -792,10 +793,18 @@ static void crng_initialize(struct crng_state *crng)
_get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) &&
- !arch_get_random_long(&rv))
+ !arch_get_random_long(&rv)) {
rv = random_get_entropy();
+ arch_init = 0;
+ }
crng->state[i] ^= rv;
}
+#ifdef CONFIG_RANDOM_TRUST_CPU
+ if (arch_init) {
+ crng_init = 2;
+ pr_notice("random: crng done (trusting CPU's manufacturer)\n");
+ }
+#endif
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
@@ -1122,8 +1131,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
} sample;
long delta, delta2, delta3;
- preempt_disable();
-
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
@@ -1161,8 +1168,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* and limit entropy entimate to 12 bits.
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
-
- preempt_enable();
}
void add_input_randomness(unsigned int type, unsigned int code,
@@ -1659,6 +1664,21 @@ int wait_for_random_bytes(void)
EXPORT_SYMBOL(wait_for_random_bytes);
/*
+ * Returns whether or not the urandom pool has been seeded and thus guaranteed
+ * to supply cryptographically secure random numbers. This applies to: the
+ * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
+ * ,u64,int,long} family of functions.
+ *
+ * Returns: true if the urandom pool has been seeded.
+ * false if the urandom pool has not been seeded.
+ */
+bool rng_is_initialized(void)
+{
+ return crng_ready();
+}
+EXPORT_SYMBOL(rng_is_initialized);
+
+/*
* Add a callback function that will be invoked when the nonblocking
* pool is initialised.
*
@@ -1725,30 +1745,31 @@ EXPORT_SYMBOL(del_random_ready_callback);
* key known by the NSA). So it's useful if we need the speed, but
* only if we're willing to trust the hardware manufacturer not to
* have put in a back door.
+ *
+ * Return number of bytes filled in.
*/
-void get_random_bytes_arch(void *buf, int nbytes)
+int __must_check get_random_bytes_arch(void *buf, int nbytes)
{
+ int left = nbytes;
char *p = buf;
- trace_get_random_bytes_arch(nbytes, _RET_IP_);
- while (nbytes) {
+ trace_get_random_bytes_arch(left, _RET_IP_);
+ while (left) {
unsigned long v;
- int chunk = min(nbytes, (int)sizeof(unsigned long));
+ int chunk = min_t(int, left, sizeof(unsigned long));
if (!arch_get_random_long(&v))
break;
-
+
memcpy(p, &v, chunk);
p += chunk;
- nbytes -= chunk;
+ left -= chunk;
}
- if (nbytes)
- get_random_bytes(p, nbytes);
+ return nbytes - left;
}
EXPORT_SYMBOL(get_random_bytes_arch);
-
/*
* init_std_data - initialize pool with system data
*
diff --git a/include/linux/random.h b/include/linux/random.h
index 2ddf13b..445a0ea 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -36,9 +36,10 @@ extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
extern void get_random_bytes(void *buf, int nbytes);
extern int wait_for_random_bytes(void);
+extern bool rng_is_initialized(void);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
extern void del_random_ready_callback(struct random_ready_callback *rdy);
-extern void get_random_bytes_arch(void *buf, int nbytes);
+extern int __must_check get_random_bytes_arch(void *buf, int nbytes);
#ifndef MODULE
extern const struct file_operations random_fops, urandom_fops;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index cda1862..d5b3a3f 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1651,6 +1651,17 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
return widen_string(buf, buf - buf_start, end, spec);
}
+/* Make pointers available for printing early in the boot sequence. */
+static int debug_boot_weak_hash __ro_after_init;
+
+static int __init debug_boot_weak_hash_enable(char *str)
+{
+ debug_boot_weak_hash = 1;
+ pr_info("debug_boot_weak_hash enabled\n");
+ return 0;
+}
+early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
+
static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
static siphash_key_t ptr_key __read_mostly;
@@ -1675,8 +1686,16 @@ static struct random_ready_callback random_ready = {
static int __init initialize_ptr_random(void)
{
- int ret = add_random_ready_callback(&random_ready);
+ int key_size = sizeof(ptr_key);
+ int ret;
+ /* Use hw RNG if available. */
+ if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
+ static_branch_disable(&not_filled_random_ptr_key);
+ return 0;
+ }
+
+ ret = add_random_ready_callback(&random_ready);
if (!ret) {
return 0;
} else if (ret == -EALREADY) {
@@ -1695,6 +1714,12 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
unsigned long hashval;
+ /* When debugging early boot use non-cryptographically secure hash. */
+ if (unlikely(debug_boot_weak_hash)) {
+ hashval = hash_long((unsigned long)ptr, 32);
+ return pointer_string(buf, end, (const void *)hashval, spec);
+ }
+
if (static_branch_unlikely(&not_filled_random_ptr_key)) {
spec.field_width = 2 * sizeof(ptr);
/* string length must be less than default_width */
OpenPOWER on IntegriCloud