mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-28 23:24:50 +00:00
453431a549
As said by Linus: A symmetric naming is only helpful if it implies symmetries in use. Otherwise it's actively misleading. In "kzalloc()", the z is meaningful and an important part of what the caller wants. In "kzfree()", the z is actively detrimental, because maybe in the future we really _might_ want to use that "memfill(0xdeadbeef)" or something. The "zero" part of the interface isn't even _relevant_. The main reason that kzfree() exists is to clear sensitive information that should not be leaked to other future users of the same memory objects. Rename kzfree() to kfree_sensitive() to follow the example of the recently added kvfree_sensitive() and make the intention of the API more explicit. In addition, memzero_explicit() is used to clear the memory to make sure that it won't get optimized away by the compiler. The renaming is done by using the command sequence: git grep -w --name-only kzfree |\ xargs sed -i 's/kzfree/kfree_sensitive/' followed by some editing of the kfree_sensitive() kerneldoc and adding a kzfree backward compatibility macro in slab.h. [akpm@linux-foundation.org: fs/crypto/inline_crypt.c needs linux/slab.h] [akpm@linux-foundation.org: fix fs/crypto/inline_crypt.c some more] Suggested-by: Joe Perches <joe@perches.com> Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: David Howells <dhowells@redhat.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com> Cc: James Morris <jmorris@namei.org> Cc: "Serge E. Hallyn" <serge@hallyn.com> Cc: Joe Perches <joe@perches.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: David Rientjes <rientjes@google.com> Cc: Dan Carpenter <dan.carpenter@oracle.com> Cc: "Jason A . Donenfeld" <Jason@zx2c4.com> Link: http://lkml.kernel.org/r/20200616154311.12314-3-longman@redhat.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
232 lines
4.8 KiB
C
232 lines
4.8 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Cryptographic API.
|
|
*
|
|
* RNG operations.
|
|
*
|
|
* Copyright (c) 2008 Neil Horman <nhorman@tuxdriver.com>
|
|
* Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
|
|
*/
|
|
|
|
#include <linux/atomic.h>
|
|
#include <crypto/internal/rng.h>
|
|
#include <linux/err.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mutex.h>
|
|
#include <linux/random.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/string.h>
|
|
#include <linux/cryptouser.h>
|
|
#include <linux/compiler.h>
|
|
#include <net/netlink.h>
|
|
|
|
#include "internal.h"
|
|
|
|
static DEFINE_MUTEX(crypto_default_rng_lock);
|
|
struct crypto_rng *crypto_default_rng;
|
|
EXPORT_SYMBOL_GPL(crypto_default_rng);
|
|
static int crypto_default_rng_refcnt;
|
|
|
|
int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
|
|
{
|
|
struct crypto_alg *alg = tfm->base.__crt_alg;
|
|
u8 *buf = NULL;
|
|
int err;
|
|
|
|
crypto_stats_get(alg);
|
|
if (!seed && slen) {
|
|
buf = kmalloc(slen, GFP_KERNEL);
|
|
if (!buf) {
|
|
crypto_alg_put(alg);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
err = get_random_bytes_wait(buf, slen);
|
|
if (err) {
|
|
crypto_alg_put(alg);
|
|
goto out;
|
|
}
|
|
seed = buf;
|
|
}
|
|
|
|
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
|
|
crypto_stats_rng_seed(alg, err);
|
|
out:
|
|
kfree_sensitive(buf);
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_rng_reset);
|
|
|
|
static int crypto_rng_init_tfm(struct crypto_tfm *tfm)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static unsigned int seedsize(struct crypto_alg *alg)
|
|
{
|
|
struct rng_alg *ralg = container_of(alg, struct rng_alg, base);
|
|
|
|
return ralg->seedsize;
|
|
}
|
|
|
|
#ifdef CONFIG_NET
|
|
static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|
{
|
|
struct crypto_report_rng rrng;
|
|
|
|
memset(&rrng, 0, sizeof(rrng));
|
|
|
|
strscpy(rrng.type, "rng", sizeof(rrng.type));
|
|
|
|
rrng.seedsize = seedsize(alg);
|
|
|
|
return nla_put(skb, CRYPTOCFGA_REPORT_RNG, sizeof(rrng), &rrng);
|
|
}
|
|
#else
|
|
static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
|
|
{
|
|
return -ENOSYS;
|
|
}
|
|
#endif
|
|
|
|
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
|
|
__maybe_unused;
|
|
static void crypto_rng_show(struct seq_file *m, struct crypto_alg *alg)
|
|
{
|
|
seq_printf(m, "type : rng\n");
|
|
seq_printf(m, "seedsize : %u\n", seedsize(alg));
|
|
}
|
|
|
|
static const struct crypto_type crypto_rng_type = {
|
|
.extsize = crypto_alg_extsize,
|
|
.init_tfm = crypto_rng_init_tfm,
|
|
#ifdef CONFIG_PROC_FS
|
|
.show = crypto_rng_show,
|
|
#endif
|
|
.report = crypto_rng_report,
|
|
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
|
|
.maskset = CRYPTO_ALG_TYPE_MASK,
|
|
.type = CRYPTO_ALG_TYPE_RNG,
|
|
.tfmsize = offsetof(struct crypto_rng, base),
|
|
};
|
|
|
|
struct crypto_rng *crypto_alloc_rng(const char *alg_name, u32 type, u32 mask)
|
|
{
|
|
return crypto_alloc_tfm(alg_name, &crypto_rng_type, type, mask);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_alloc_rng);
|
|
|
|
int crypto_get_default_rng(void)
|
|
{
|
|
struct crypto_rng *rng;
|
|
int err;
|
|
|
|
mutex_lock(&crypto_default_rng_lock);
|
|
if (!crypto_default_rng) {
|
|
rng = crypto_alloc_rng("stdrng", 0, 0);
|
|
err = PTR_ERR(rng);
|
|
if (IS_ERR(rng))
|
|
goto unlock;
|
|
|
|
err = crypto_rng_reset(rng, NULL, crypto_rng_seedsize(rng));
|
|
if (err) {
|
|
crypto_free_rng(rng);
|
|
goto unlock;
|
|
}
|
|
|
|
crypto_default_rng = rng;
|
|
}
|
|
|
|
crypto_default_rng_refcnt++;
|
|
err = 0;
|
|
|
|
unlock:
|
|
mutex_unlock(&crypto_default_rng_lock);
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_get_default_rng);
|
|
|
|
void crypto_put_default_rng(void)
|
|
{
|
|
mutex_lock(&crypto_default_rng_lock);
|
|
crypto_default_rng_refcnt--;
|
|
mutex_unlock(&crypto_default_rng_lock);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_put_default_rng);
|
|
|
|
#if defined(CONFIG_CRYPTO_RNG) || defined(CONFIG_CRYPTO_RNG_MODULE)
|
|
int crypto_del_default_rng(void)
|
|
{
|
|
int err = -EBUSY;
|
|
|
|
mutex_lock(&crypto_default_rng_lock);
|
|
if (crypto_default_rng_refcnt)
|
|
goto out;
|
|
|
|
crypto_free_rng(crypto_default_rng);
|
|
crypto_default_rng = NULL;
|
|
|
|
err = 0;
|
|
|
|
out:
|
|
mutex_unlock(&crypto_default_rng_lock);
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_del_default_rng);
|
|
#endif
|
|
|
|
int crypto_register_rng(struct rng_alg *alg)
|
|
{
|
|
struct crypto_alg *base = &alg->base;
|
|
|
|
if (alg->seedsize > PAGE_SIZE / 8)
|
|
return -EINVAL;
|
|
|
|
base->cra_type = &crypto_rng_type;
|
|
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
|
|
base->cra_flags |= CRYPTO_ALG_TYPE_RNG;
|
|
|
|
return crypto_register_alg(base);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_register_rng);
|
|
|
|
void crypto_unregister_rng(struct rng_alg *alg)
|
|
{
|
|
crypto_unregister_alg(&alg->base);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_unregister_rng);
|
|
|
|
int crypto_register_rngs(struct rng_alg *algs, int count)
|
|
{
|
|
int i, ret;
|
|
|
|
for (i = 0; i < count; i++) {
|
|
ret = crypto_register_rng(algs + i);
|
|
if (ret)
|
|
goto err;
|
|
}
|
|
|
|
return 0;
|
|
|
|
err:
|
|
for (--i; i >= 0; --i)
|
|
crypto_unregister_rng(algs + i);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_register_rngs);
|
|
|
|
void crypto_unregister_rngs(struct rng_alg *algs, int count)
|
|
{
|
|
int i;
|
|
|
|
for (i = count - 1; i >= 0; --i)
|
|
crypto_unregister_rng(algs + i);
|
|
}
|
|
EXPORT_SYMBOL_GPL(crypto_unregister_rngs);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_DESCRIPTION("Random Number Generator");
|