mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-11-01 08:58:07 +00:00
1a59d1b8e0
Based on 1 normalized pattern(s): this program is free software you can redistribute it and or modify it under the terms of the gnu general public license as published by the free software foundation either version 2 of the license or at your option any later version this program is distributed in the hope that it will be useful but without any warranty without even the implied warranty of merchantability or fitness for a particular purpose see the gnu general public license for more details you should have received a copy of the gnu general public license along with this program if not write to the free software foundation inc 59 temple place suite 330 boston ma 02111 1307 usa extracted by the scancode license scanner the SPDX license identifier GPL-2.0-or-later has been chosen to replace the boilerplate/reference in 1334 file(s). Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Allison Randal <allison@lohutok.net> Reviewed-by: Richard Fontana <rfontana@redhat.com> Cc: linux-spdx@vger.kernel.org Link: https://lkml.kernel.org/r/20190527070033.113240726@linutronix.de Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
315 lines
7.3 KiB
C
315 lines
7.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Shared glue code for 128bit block ciphers
|
|
*
|
|
* Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
|
|
*
|
|
* CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
|
|
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
|
|
* CTR part based on code (crypto/ctr.c) by:
|
|
* (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <crypto/b128ops.h>
|
|
#include <crypto/gf128mul.h>
|
|
#include <crypto/internal/skcipher.h>
|
|
#include <crypto/xts.h>
|
|
#include <asm/crypto/glue_helper.h>
|
|
|
|
int glue_ecb_req_128bit(const struct common_glue_ctx *gctx,
|
|
struct skcipher_request *req)
|
|
{
|
|
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
|
const unsigned int bsize = 128 / 8;
|
|
struct skcipher_walk walk;
|
|
bool fpu_enabled = false;
|
|
unsigned int nbytes;
|
|
int err;
|
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
while ((nbytes = walk.nbytes)) {
|
|
const u8 *src = walk.src.virt.addr;
|
|
u8 *dst = walk.dst.virt.addr;
|
|
unsigned int func_bytes;
|
|
unsigned int i;
|
|
|
|
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
|
&walk, fpu_enabled, nbytes);
|
|
for (i = 0; i < gctx->num_funcs; i++) {
|
|
func_bytes = bsize * gctx->funcs[i].num_blocks;
|
|
|
|
if (nbytes < func_bytes)
|
|
continue;
|
|
|
|
/* Process multi-block batch */
|
|
do {
|
|
gctx->funcs[i].fn_u.ecb(ctx, dst, src);
|
|
src += func_bytes;
|
|
dst += func_bytes;
|
|
nbytes -= func_bytes;
|
|
} while (nbytes >= func_bytes);
|
|
|
|
if (nbytes < bsize)
|
|
break;
|
|
}
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
|
|
glue_fpu_end(fpu_enabled);
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(glue_ecb_req_128bit);
|
|
|
|
int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn,
|
|
struct skcipher_request *req)
|
|
{
|
|
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
|
const unsigned int bsize = 128 / 8;
|
|
struct skcipher_walk walk;
|
|
unsigned int nbytes;
|
|
int err;
|
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
while ((nbytes = walk.nbytes)) {
|
|
const u128 *src = (u128 *)walk.src.virt.addr;
|
|
u128 *dst = (u128 *)walk.dst.virt.addr;
|
|
u128 *iv = (u128 *)walk.iv;
|
|
|
|
do {
|
|
u128_xor(dst, src, iv);
|
|
fn(ctx, (u8 *)dst, (u8 *)dst);
|
|
iv = dst;
|
|
src++;
|
|
dst++;
|
|
nbytes -= bsize;
|
|
} while (nbytes >= bsize);
|
|
|
|
*(u128 *)walk.iv = *iv;
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit);
|
|
|
|
int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx *gctx,
|
|
struct skcipher_request *req)
|
|
{
|
|
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
|
const unsigned int bsize = 128 / 8;
|
|
struct skcipher_walk walk;
|
|
bool fpu_enabled = false;
|
|
unsigned int nbytes;
|
|
int err;
|
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
while ((nbytes = walk.nbytes)) {
|
|
const u128 *src = walk.src.virt.addr;
|
|
u128 *dst = walk.dst.virt.addr;
|
|
unsigned int func_bytes, num_blocks;
|
|
unsigned int i;
|
|
u128 last_iv;
|
|
|
|
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
|
&walk, fpu_enabled, nbytes);
|
|
/* Start of the last block. */
|
|
src += nbytes / bsize - 1;
|
|
dst += nbytes / bsize - 1;
|
|
|
|
last_iv = *src;
|
|
|
|
for (i = 0; i < gctx->num_funcs; i++) {
|
|
num_blocks = gctx->funcs[i].num_blocks;
|
|
func_bytes = bsize * num_blocks;
|
|
|
|
if (nbytes < func_bytes)
|
|
continue;
|
|
|
|
/* Process multi-block batch */
|
|
do {
|
|
src -= num_blocks - 1;
|
|
dst -= num_blocks - 1;
|
|
|
|
gctx->funcs[i].fn_u.cbc(ctx, dst, src);
|
|
|
|
nbytes -= func_bytes;
|
|
if (nbytes < bsize)
|
|
goto done;
|
|
|
|
u128_xor(dst, dst, --src);
|
|
dst--;
|
|
} while (nbytes >= func_bytes);
|
|
}
|
|
done:
|
|
u128_xor(dst, dst, (u128 *)walk.iv);
|
|
*(u128 *)walk.iv = last_iv;
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
|
|
glue_fpu_end(fpu_enabled);
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit);
|
|
|
|
int glue_ctr_req_128bit(const struct common_glue_ctx *gctx,
|
|
struct skcipher_request *req)
|
|
{
|
|
void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
|
|
const unsigned int bsize = 128 / 8;
|
|
struct skcipher_walk walk;
|
|
bool fpu_enabled = false;
|
|
unsigned int nbytes;
|
|
int err;
|
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
|
|
|
while ((nbytes = walk.nbytes) >= bsize) {
|
|
const u128 *src = walk.src.virt.addr;
|
|
u128 *dst = walk.dst.virt.addr;
|
|
unsigned int func_bytes, num_blocks;
|
|
unsigned int i;
|
|
le128 ctrblk;
|
|
|
|
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
|
&walk, fpu_enabled, nbytes);
|
|
|
|
be128_to_le128(&ctrblk, (be128 *)walk.iv);
|
|
|
|
for (i = 0; i < gctx->num_funcs; i++) {
|
|
num_blocks = gctx->funcs[i].num_blocks;
|
|
func_bytes = bsize * num_blocks;
|
|
|
|
if (nbytes < func_bytes)
|
|
continue;
|
|
|
|
/* Process multi-block batch */
|
|
do {
|
|
gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
|
|
src += num_blocks;
|
|
dst += num_blocks;
|
|
nbytes -= func_bytes;
|
|
} while (nbytes >= func_bytes);
|
|
|
|
if (nbytes < bsize)
|
|
break;
|
|
}
|
|
|
|
le128_to_be128((be128 *)walk.iv, &ctrblk);
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
}
|
|
|
|
glue_fpu_end(fpu_enabled);
|
|
|
|
if (nbytes) {
|
|
le128 ctrblk;
|
|
u128 tmp;
|
|
|
|
be128_to_le128(&ctrblk, (be128 *)walk.iv);
|
|
memcpy(&tmp, walk.src.virt.addr, nbytes);
|
|
gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, &tmp, &tmp,
|
|
&ctrblk);
|
|
memcpy(walk.dst.virt.addr, &tmp, nbytes);
|
|
le128_to_be128((be128 *)walk.iv, &ctrblk);
|
|
|
|
err = skcipher_walk_done(&walk, 0);
|
|
}
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(glue_ctr_req_128bit);
|
|
|
|
static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx *gctx,
|
|
void *ctx,
|
|
struct skcipher_walk *walk)
|
|
{
|
|
const unsigned int bsize = 128 / 8;
|
|
unsigned int nbytes = walk->nbytes;
|
|
u128 *src = walk->src.virt.addr;
|
|
u128 *dst = walk->dst.virt.addr;
|
|
unsigned int num_blocks, func_bytes;
|
|
unsigned int i;
|
|
|
|
/* Process multi-block batch */
|
|
for (i = 0; i < gctx->num_funcs; i++) {
|
|
num_blocks = gctx->funcs[i].num_blocks;
|
|
func_bytes = bsize * num_blocks;
|
|
|
|
if (nbytes >= func_bytes) {
|
|
do {
|
|
gctx->funcs[i].fn_u.xts(ctx, dst, src,
|
|
walk->iv);
|
|
|
|
src += num_blocks;
|
|
dst += num_blocks;
|
|
nbytes -= func_bytes;
|
|
} while (nbytes >= func_bytes);
|
|
|
|
if (nbytes < bsize)
|
|
goto done;
|
|
}
|
|
}
|
|
|
|
done:
|
|
return nbytes;
|
|
}
|
|
|
|
int glue_xts_req_128bit(const struct common_glue_ctx *gctx,
|
|
struct skcipher_request *req,
|
|
common_glue_func_t tweak_fn, void *tweak_ctx,
|
|
void *crypt_ctx)
|
|
{
|
|
const unsigned int bsize = 128 / 8;
|
|
struct skcipher_walk walk;
|
|
bool fpu_enabled = false;
|
|
unsigned int nbytes;
|
|
int err;
|
|
|
|
err = skcipher_walk_virt(&walk, req, false);
|
|
nbytes = walk.nbytes;
|
|
if (!nbytes)
|
|
return err;
|
|
|
|
/* set minimum length to bsize, for tweak_fn */
|
|
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
|
|
&walk, fpu_enabled,
|
|
nbytes < bsize ? bsize : nbytes);
|
|
|
|
/* calculate first value of T */
|
|
tweak_fn(tweak_ctx, walk.iv, walk.iv);
|
|
|
|
while (nbytes) {
|
|
nbytes = __glue_xts_req_128bit(gctx, crypt_ctx, &walk);
|
|
|
|
err = skcipher_walk_done(&walk, nbytes);
|
|
nbytes = walk.nbytes;
|
|
}
|
|
|
|
glue_fpu_end(fpu_enabled);
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
|
|
|
|
void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
|
|
common_glue_func_t fn)
|
|
{
|
|
le128 ivblk = *iv;
|
|
|
|
/* generate next IV */
|
|
gf128mul_x_ble(iv, &ivblk);
|
|
|
|
/* CC <- T xor C */
|
|
u128_xor(dst, src, (u128 *)&ivblk);
|
|
|
|
/* PP <- D(Key2,CC) */
|
|
fn(ctx, (u8 *)dst, (u8 *)dst);
|
|
|
|
/* P <- T xor PP */
|
|
u128_xor(dst, dst, (u128 *)&ivblk);
|
|
}
|
|
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
|
|
|
|
MODULE_LICENSE("GPL");
|