mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
20f01f1632
Add sysfs files that expose the inline encryption capabilities of request queues: /sys/block/$disk/queue/crypto/max_dun_bits /sys/block/$disk/queue/crypto/modes/$mode /sys/block/$disk/queue/crypto/num_keyslots Userspace can use these new files to decide what encryption settings to use, or whether to use inline encryption at all. This also brings the crypto capabilities in line with the other queue properties, which are already discoverable via the queue directory in sysfs. Design notes: - Place the new files in a new subdirectory "crypto" to group them together and to avoid complicating the main "queue" directory. This also makes it possible to replace "crypto" with a symlink later if we ever make the blk_crypto_profiles into real kobjects (see below). - It was necessary to define a new kobject that corresponds to the crypto subdirectory. For now, this kobject just contains a pointer to the blk_crypto_profile. Note that multiple queues (and hence multiple such kobjects) may refer to the same blk_crypto_profile. An alternative design would more closely match the current kernel data structures: the blk_crypto_profile could be a kobject itself, located directly under the host controller device's kobject, while /sys/block/$disk/queue/crypto would be a symlink to it. I decided not to do that for now because it would require a lot more changes, such as no longer embedding blk_crypto_profile in other structures, and also because I'm not sure we can rule out moving the crypto capabilities into 'struct queue_limits' in the future. (Even if multiple queues share the same crypto engine, maybe the supported data unit sizes could differ due to other queue properties.) It would also still be possible to switch to that design later without breaking userspace, by replacing the directory with a symlink. - Use "max_dun_bits" instead of "max_dun_bytes". Currently, the kernel internally stores this value in bytes, but that's an implementation detail. It probably makes more sense to talk about this value in bits, and choosing bits is more future-proof. - "modes" is a sub-subdirectory, since there may be multiple supported crypto modes, sysfs is supposed to have one value per file, and it makes sense to group all the mode files together. - Each mode had to be named. The crypto API names like "xts(aes)" are not appropriate because they don't specify the key size. Therefore, I assigned new names. The exact names chosen are arbitrary, but they happen to match the names used in log messages in fs/crypto/. - The "num_keyslots" file is a bit different from the others in that it is only useful to know for performance reasons. However, it's included as it can still be useful. For example, a user might not want to use inline encryption if there aren't very many keyslots. Reviewed-by: Hannes Reinecke <hare@suse.de> Signed-off-by: Eric Biggers <ebiggers@google.com> Link: https://lore.kernel.org/r/20220124215938.2769-4-ebiggers@kernel.org Signed-off-by: Jens Axboe <axboe@kernel.dk>
224 lines
5.7 KiB
C
224 lines
5.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright 2019 Google LLC
|
|
*/
|
|
|
|
#ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
|
|
#define __LINUX_BLK_CRYPTO_INTERNAL_H
|
|
|
|
#include <linux/bio.h>
|
|
#include <linux/blk-mq.h>
|
|
|
|
/* Represents a crypto mode supported by blk-crypto */
|
|
struct blk_crypto_mode {
|
|
const char *name; /* name of this mode, shown in sysfs */
|
|
const char *cipher_str; /* crypto API name (for fallback case) */
|
|
unsigned int keysize; /* key size in bytes */
|
|
unsigned int ivsize; /* iv size in bytes */
|
|
};
|
|
|
|
extern const struct blk_crypto_mode blk_crypto_modes[];
|
|
|
|
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
|
|
|
int blk_crypto_sysfs_register(struct request_queue *q);
|
|
|
|
void blk_crypto_sysfs_unregister(struct request_queue *q);
|
|
|
|
void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
|
|
unsigned int inc);
|
|
|
|
bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
|
|
|
|
bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
|
|
struct bio_crypt_ctx *bc2);
|
|
|
|
static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
|
|
bio->bi_crypt_context);
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
|
|
bio->bi_iter.bi_size, req->crypt_ctx);
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_merge_rq(struct request *req,
|
|
struct request *next)
|
|
{
|
|
return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
|
|
next->crypt_ctx);
|
|
}
|
|
|
|
static inline void blk_crypto_rq_set_defaults(struct request *rq)
|
|
{
|
|
rq->crypt_ctx = NULL;
|
|
rq->crypt_keyslot = NULL;
|
|
}
|
|
|
|
static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
|
|
{
|
|
return rq->crypt_ctx;
|
|
}
|
|
|
|
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
|
|
|
|
static inline int blk_crypto_sysfs_register(struct request_queue *q)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void blk_crypto_sysfs_unregister(struct request_queue *q) { }
|
|
|
|
static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
|
|
struct bio *bio)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
|
|
struct bio *bio)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline bool bio_crypt_ctx_merge_rq(struct request *req,
|
|
struct request *next)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline void blk_crypto_rq_set_defaults(struct request *rq) { }
|
|
|
|
static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
|
|
|
|
void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
|
|
static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
|
|
{
|
|
if (bio_has_crypt_ctx(bio))
|
|
__bio_crypt_advance(bio, bytes);
|
|
}
|
|
|
|
void __bio_crypt_free_ctx(struct bio *bio);
|
|
static inline void bio_crypt_free_ctx(struct bio *bio)
|
|
{
|
|
if (bio_has_crypt_ctx(bio))
|
|
__bio_crypt_free_ctx(bio);
|
|
}
|
|
|
|
static inline void bio_crypt_do_front_merge(struct request *rq,
|
|
struct bio *bio)
|
|
{
|
|
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
|
|
if (bio_has_crypt_ctx(bio))
|
|
memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
|
|
sizeof(rq->crypt_ctx->bc_dun));
|
|
#endif
|
|
}
|
|
|
|
bool __blk_crypto_bio_prep(struct bio **bio_ptr);
|
|
static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
|
|
{
|
|
if (bio_has_crypt_ctx(*bio_ptr))
|
|
return __blk_crypto_bio_prep(bio_ptr);
|
|
return true;
|
|
}
|
|
|
|
blk_status_t __blk_crypto_init_request(struct request *rq);
|
|
static inline blk_status_t blk_crypto_init_request(struct request *rq)
|
|
{
|
|
if (blk_crypto_rq_is_encrypted(rq))
|
|
return __blk_crypto_init_request(rq);
|
|
return BLK_STS_OK;
|
|
}
|
|
|
|
void __blk_crypto_free_request(struct request *rq);
|
|
static inline void blk_crypto_free_request(struct request *rq)
|
|
{
|
|
if (blk_crypto_rq_is_encrypted(rq))
|
|
__blk_crypto_free_request(rq);
|
|
}
|
|
|
|
int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
|
|
gfp_t gfp_mask);
|
|
/**
|
|
* blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
|
|
* is inserted
|
|
* @rq: The request to prepare
|
|
* @bio: The first bio being inserted into the request
|
|
* @gfp_mask: Memory allocation flags
|
|
*
|
|
* Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if
|
|
* @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
|
|
*/
|
|
static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
|
|
gfp_t gfp_mask)
|
|
{
|
|
if (bio_has_crypt_ctx(bio))
|
|
return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* blk_crypto_insert_cloned_request - Prepare a cloned request to be inserted
|
|
* into a request queue.
|
|
* @rq: the request being queued
|
|
*
|
|
* Return: BLK_STS_OK on success, nonzero on error.
|
|
*/
|
|
static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
|
|
{
|
|
|
|
if (blk_crypto_rq_is_encrypted(rq))
|
|
return blk_crypto_init_request(rq);
|
|
return BLK_STS_OK;
|
|
}
|
|
|
|
#ifdef CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK
|
|
|
|
int blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num);
|
|
|
|
bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr);
|
|
|
|
int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key);
|
|
|
|
#else /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
|
|
|
|
static inline int
|
|
blk_crypto_fallback_start_using_mode(enum blk_crypto_mode_num mode_num)
|
|
{
|
|
pr_warn_once("crypto API fallback is disabled\n");
|
|
return -ENOPKG;
|
|
}
|
|
|
|
static inline bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
|
|
{
|
|
pr_warn_once("crypto API fallback disabled; failing request.\n");
|
|
(*bio_ptr)->bi_status = BLK_STS_NOTSUPP;
|
|
return false;
|
|
}
|
|
|
|
static inline int
|
|
blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif /* CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK */
|
|
|
|
#endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */
|