diff --git a/drivers/char/random.c b/drivers/char/random.c index 23cab7a8c1c1..afa3ce7d3e72 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -288,7 +288,6 @@ #define SEC_XFER_SIZE 512 #define EXTRACT_SIZE 10 -#define DEBUG_RANDOM_BOOT 0 #define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long)) @@ -437,6 +436,7 @@ static void _extract_crng(struct crng_state *crng, static void _crng_backtrack_protect(struct crng_state *crng, __u8 tmp[CHACHA20_BLOCK_SIZE], int used); static void process_random_ready_list(void); +static void _get_random_bytes(void *buf, int nbytes); /********************************************************************** * @@ -777,7 +777,7 @@ static void crng_initialize(struct crng_state *crng) _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0); else - get_random_bytes(&crng->state[4], sizeof(__u32) * 12); + _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); for (i = 4; i < 16; i++) { if (!arch_get_random_seed_long(&rv) && !arch_get_random_long(&rv)) @@ -851,11 +851,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) } } -static inline void crng_wait_ready(void) -{ - wait_event_interruptible(crng_init_wait, crng_ready()); -} - static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA20_BLOCK_SIZE]) { @@ -1477,22 +1472,44 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, return ret; } +#define warn_unseeded_randomness(previous) \ + _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous)) + +static void _warn_unseeded_randomness(const char *func_name, void *caller, + void **previous) +{ +#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM + const bool print_once = false; +#else + static bool print_once __read_mostly; +#endif + + if (print_once || + crng_ready() || + (previous && (caller == READ_ONCE(*previous)))) + return; + WRITE_ONCE(*previous, caller); +#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM + print_once = true; +#endif + pr_notice("random: %s called from %pF with crng_init=%d\n", + func_name, caller, crng_init); +} + /* * This function is the exported kernel interface. It returns some * number of good random numbers, suitable for key generation, seeding * TCP sequence numbers, etc. It does not rely on the hardware random * number generator. For random bytes direct from the hardware RNG - * (when available), use get_random_bytes_arch(). + * (when available), use get_random_bytes_arch(). In order to ensure + * that the randomness provided by this function is okay, the function + * wait_for_random_bytes() should be called and return 0 at least once + * at any point prior. */ -void get_random_bytes(void *buf, int nbytes) +static void _get_random_bytes(void *buf, int nbytes) { __u8 tmp[CHACHA20_BLOCK_SIZE]; -#if DEBUG_RANDOM_BOOT > 0 - if (!crng_ready()) - printk(KERN_NOTICE "random: %pF get_random_bytes called " - "with crng_init = %d\n", (void *) _RET_IP_, crng_init); -#endif trace_get_random_bytes(nbytes, _RET_IP_); while (nbytes >= CHACHA20_BLOCK_SIZE) { @@ -1509,8 +1526,34 @@ void get_random_bytes(void *buf, int nbytes) crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE); memzero_explicit(tmp, sizeof(tmp)); } + +void get_random_bytes(void *buf, int nbytes) +{ + static void *previous; + + warn_unseeded_randomness(&previous); + _get_random_bytes(buf, nbytes); +} EXPORT_SYMBOL(get_random_bytes); +/* + * Wait for the urandom pool to be seeded and thus guaranteed to supply + * cryptographically secure random numbers. This applies to: the /dev/urandom + * device, the get_random_bytes function, and the get_random_{u32,u64,int,long} + * family of functions. Using any of these functions without first calling + * this function forfeits the guarantee of security. + * + * Returns: 0 if the urandom pool has been seeded. + * -ERESTARTSYS if the function was interrupted by a signal. + */ +int wait_for_random_bytes(void) +{ + if (likely(crng_ready())) + return 0; + return wait_event_interruptible(crng_init_wait, crng_ready()); +} +EXPORT_SYMBOL(wait_for_random_bytes); + /* * Add a callback function that will be invoked when the nonblocking * pool is initialised. @@ -1865,6 +1908,8 @@ const struct file_operations urandom_fops = { SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, flags) { + int ret; + if (flags & ~(GRND_NONBLOCK|GRND_RANDOM)) return -EINVAL; @@ -1877,9 +1922,9 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, if (!crng_ready()) { if (flags & GRND_NONBLOCK) return -EAGAIN; - crng_wait_ready(); - if (signal_pending(current)) - return -ERESTARTSYS; + ret = wait_for_random_bytes(); + if (unlikely(ret)) + return ret; } return urandom_read(NULL, buf, count, NULL); } @@ -2040,15 +2085,19 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_ /* * Get a random word for internal kernel use only. The quality of the random * number is either as good as RDRAND or as good as /dev/urandom, with the - * goal of being quite fast and not depleting entropy. + * goal of being quite fast and not depleting entropy. In order to ensure + * that the randomness provided by this function is okay, the function + * wait_for_random_bytes() should be called and return 0 at least once + * at any point prior. */ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); u64 get_random_u64(void) { u64 ret; - bool use_lock = READ_ONCE(crng_init) < 2; + bool use_lock; unsigned long flags = 0; struct batched_entropy *batch; + static void *previous; #if BITS_PER_LONG == 64 if (arch_get_random_long((unsigned long *)&ret)) @@ -2059,6 +2108,9 @@ u64 get_random_u64(void) return ret; #endif + warn_unseeded_randomness(&previous); + + use_lock = READ_ONCE(crng_init) < 2; batch = &get_cpu_var(batched_entropy_u64); if (use_lock) read_lock_irqsave(&batched_entropy_reset_lock, flags); @@ -2078,13 +2130,17 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); u32 get_random_u32(void) { u32 ret; - bool use_lock = READ_ONCE(crng_init) < 2; + bool use_lock; unsigned long flags = 0; struct batched_entropy *batch; + static void *previous; if (arch_get_random_int(&ret)) return ret; + warn_unseeded_randomness(&previous); + + use_lock = READ_ONCE(crng_init) < 2; batch = &get_cpu_var(batched_entropy_u32); if (use_lock) read_lock_irqsave(&batched_entropy_reset_lock, flags); diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 903b667f8e01..f9bc8ec6fb6b 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -47,18 +47,21 @@ static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len) } } -static void chap_gen_challenge( +static int chap_gen_challenge( struct iscsi_conn *conn, int caller, char *c_str, unsigned int *c_len) { + int ret; unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1]; struct iscsi_chap *chap = conn->auth_protocol; memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1); - get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH); + ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH); + if (unlikely(ret)) + return ret; chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge, CHAP_CHALLENGE_LENGTH); /* @@ -69,6 +72,7 @@ static void chap_gen_challenge( pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client", challenge_asciihex); + return 0; } static int chap_check_algorithm(const char *a_str) @@ -143,6 +147,7 @@ static struct iscsi_chap *chap_server_open( case CHAP_DIGEST_UNKNOWN: default: pr_err("Unsupported CHAP_A value\n"); + kfree(conn->auth_protocol); return NULL; } @@ -156,7 +161,10 @@ static struct iscsi_chap *chap_server_open( /* * Generate Challenge. */ - chap_gen_challenge(conn, 1, aic_str, aic_len); + if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) { + kfree(conn->auth_protocol); + return NULL; + } return chap; } diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 92b96b51d506..e9bdc8b86e7d 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -245,22 +245,26 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) return 0; } -static void iscsi_login_set_conn_values( +static int iscsi_login_set_conn_values( struct iscsi_session *sess, struct iscsi_conn *conn, __be16 cid) { + int ret; conn->sess = sess; conn->cid = be16_to_cpu(cid); /* * Generate a random Status sequence number (statsn) for the new * iSCSI connection. */ - get_random_bytes(&conn->stat_sn, sizeof(u32)); + ret = get_random_bytes_wait(&conn->stat_sn, sizeof(u32)); + if (unlikely(ret)) + return ret; mutex_lock(&auth_id_lock); conn->auth_id = iscsit_global->auth_id++; mutex_unlock(&auth_id_lock); + return 0; } __printf(2, 3) int iscsi_change_param_sprintf( @@ -306,7 +310,11 @@ static int iscsi_login_zero_tsih_s1( return -ENOMEM; } - iscsi_login_set_conn_values(sess, conn, pdu->cid); + ret = iscsi_login_set_conn_values(sess, conn, pdu->cid); + if (unlikely(ret)) { + kfree(sess); + return ret; + } sess->init_task_tag = pdu->itt; memcpy(&sess->isid, pdu->isid, 6); sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn); @@ -497,8 +505,7 @@ static int iscsi_login_non_zero_tsih_s1( { struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; - iscsi_login_set_conn_values(NULL, conn, pdu->cid); - return 0; + return iscsi_login_set_conn_values(NULL, conn, pdu->cid); } /* @@ -554,9 +561,8 @@ static int iscsi_login_non_zero_tsih_s2( atomic_set(&sess->session_continuation, 1); spin_unlock_bh(&sess->conn_lock); - iscsi_login_set_conn_values(sess, conn, pdu->cid); - - if (iscsi_copy_param_list(&conn->param_list, + if (iscsi_login_set_conn_values(sess, conn, pdu->cid) < 0 || + iscsi_copy_param_list(&conn->param_list, conn->tpg->param_list, 0) < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 556f480c6936..180b3356ff86 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -1354,7 +1354,7 @@ init_cifs(void) spin_lock_init(&cifs_tcp_ses_lock); spin_lock_init(&GlobalMid_Lock); - get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret)); + cifs_lock_secret = get_random_u32(); if (cifs_max_pending < 2) { cifs_max_pending = 2; diff --git a/include/linux/net.h b/include/linux/net.h index abcfa46a2bd9..dda2cc939a53 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -274,6 +274,8 @@ do { \ #define net_get_random_once(buf, nbytes) \ get_random_once((buf), (nbytes)) +#define net_get_random_once_wait(buf, nbytes) \ + get_random_once_wait((buf), (nbytes)) int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t len); diff --git a/include/linux/once.h b/include/linux/once.h index 285f12cb40e6..9c98aaa87cbc 100644 --- a/include/linux/once.h +++ b/include/linux/once.h @@ -53,5 +53,7 @@ void __do_once_done(bool *done, struct static_key *once_key, #define get_random_once(buf, nbytes) \ DO_ONCE(get_random_bytes, (buf), (nbytes)) +#define get_random_once_wait(buf, nbytes) \ + DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \ #endif /* _LINUX_ONCE_H */ diff --git a/include/linux/random.h b/include/linux/random.h index 1fa0dc880bd7..eafea6a09361 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -34,6 +34,7 @@ extern void add_input_randomness(unsigned int type, unsigned int code, extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy; extern void get_random_bytes(void *buf, int nbytes); +extern int wait_for_random_bytes(void); extern int add_random_ready_callback(struct random_ready_callback *rdy); extern void del_random_ready_callback(struct random_ready_callback *rdy); extern void get_random_bytes_arch(void *buf, int nbytes); @@ -78,6 +79,31 @@ static inline unsigned long get_random_canary(void) return val & CANARY_MASK; } +/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). + * Returns the result of the call to wait_for_random_bytes. */ +static inline int get_random_bytes_wait(void *buf, int nbytes) +{ + int ret = wait_for_random_bytes(); + if (unlikely(ret)) + return ret; + get_random_bytes(buf, nbytes); + return 0; +} + +#define declare_get_random_var_wait(var) \ + static inline int get_random_ ## var ## _wait(var *out) { \ + int ret = wait_for_random_bytes(); \ + if (unlikely(ret)) \ + return ret; \ + *out = get_random_ ## var(); \ + return 0; \ + } +declare_get_random_var_wait(u32) +declare_get_random_var_wait(u64) +declare_get_random_var_wait(int) +declare_get_random_var_wait(long) +#undef declare_get_random_var + unsigned long randomize_page(unsigned long start, unsigned long range); u32 prandom_u32(void); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 789c6e9e5e01..98fe715522e8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1223,6 +1223,34 @@ config STACKTRACE It is also used by various kernel debugging features that require stack trace generation. +config WARN_ALL_UNSEEDED_RANDOM + bool "Warn for all uses of unseeded randomness" + default n + help + Some parts of the kernel contain bugs relating to their use of + cryptographically secure random numbers before it's actually possible + to generate those numbers securely. This setting ensures that these + flaws don't go unnoticed, by enabling a message, should this ever + occur. This will allow people with obscure setups to know when things + are going wrong, so that they might contact developers about fixing + it. + + Unfortunately, on some models of some architectures getting + a fully seeded CRNG is extremely difficult, and so this can + result in dmesg getting spammed for a surprisingly long + time. This is really bad from a security perspective, and + so architecture maintainers really need to do what they can + to get the CRNG seeded sooner after the system is booted. + However, since users can not do anything actionble to + address this, by default the kernel will issue only a single + warning for the first use of unseeded randomness. + + Say Y here if you want to receive warnings for all uses of + unseeded randomness. This will be of use primarily for + those developers interersted in improving the security of + Linux kernels running on their architecture (or + subarchitecture). + config DEBUG_KOBJECT bool "kobject debugging" depends on DEBUG_KERNEL diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 42466c167257..707ca5d677c6 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -234,7 +234,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, INIT_LIST_HEAD(&tbl->walkers); - get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); + tbl->hash_rnd = get_random_u32(); for (i = 0; i < nbuckets; i++) INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i); diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 3d265c5cb6d0..5c036d2f401e 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -599,7 +599,11 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private) { struct ceph_client *client; struct ceph_entity_addr *myaddr = NULL; - int err = -ENOMEM; + int err; + + err = wait_for_random_bytes(); + if (err < 0) + return ERR_PTR(err); client = kzalloc(sizeof(*client), GFP_KERNEL); if (client == NULL) diff --git a/net/core/neighbour.c b/net/core/neighbour.c index e31fc11a8000..d0713627deb6 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -347,8 +347,7 @@ out_entries: static void neigh_get_hash_rnd(u32 *x) { - get_random_bytes(x, sizeof(*x)); - *x |= 1; + *x = get_random_u32() | 1; } static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift) diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c816cd53f7fc..0383e66f59bc 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2979,8 +2979,7 @@ static __net_init int rt_genid_init(struct net *net) { atomic_set(&net->ipv4.rt_genid, 0); atomic_set(&net->fnhe_genid, 0); - get_random_bytes(&net->ipv4.dev_addr_genid, - sizeof(net->ipv4.dev_addr_genid)); + atomic_set(&net->ipv4.dev_addr_genid, get_random_int()); return 0; }