cosmopolitan/third_party/argon2/core.c
Justine Tunney 47a53e143b Productionize new APE loader and more
The APE_NO_MODIFY_SELF loader payload has been moved out of the examples
folder and improved so that it works on BSD systems, and permits general
elf program headers. This brings its quality up enough that it should be
acceptable to use by default for many programs, e.g. Python, Lua, SQLite
and Python. It's the responsibility of the user to define an appropriate
TMPDIR if /tmp is considered an adversarial environment. Mac OS shall be
supported by APE_NO_MODIFY_SELF soon.

Fixes and improvements have been made to program_executable_name as it's
now the one true way to get the absolute path of the executing image.

This change fixes a memory leak in linenoise history loading, introduced
by performance optimizations in 51904e2687
This change fixes a longstanding regression with Mach system calls, that
23ae9dfceb back in February which impacted
our sched_yield() implementation, which is why no one noticed until now.

The Blinkenlights PC emulator has been improved. We now fix rendering on
XNU and BSD by not making the assumption that the kernel terminal driver
understands UTF8 since that seems to break its internal modeling of \r\n
which is now being addressed by using \e[𝑦H instead. The paneling is now
more compact in real mode so you won't need to make your font as tiny if
you're only emulating an 8086 program. The CLMUL ISA is now emulated too

This change also makes improvement to time. CLOCK_MONOTONIC now does the
right thing on Windows NT. The nanosecond time module functions added in
Python 3.7 have been backported.

This change doubles the performance of Argon2 password stretching simply
by not using its copy_block and xor_block helper functions, as they were
trivial to inline thus resulting in us needing to iterate over each 1024
byte block four fewer times.

This change makes code size improvements. _PyUnicode_ToNumeric() was 64k
in size and now it's 10k. The CJK codec lookup tables now use lazy delta
zigzag deflate (δzd) encoding which reduces their size from 600k to 200k
plus the code bloat caused by macro abuse in _decimal.c is now addressed
so our fully-loaded statically-linked hermetically-sealed Python virtual
interpreter container is now 9.4 megs in the default build mode and 5.5m
in MODE=tiny which leaves plenty of room for chibicc.

The pydoc web server now accommodates the use case of people who work by
SSH'ing into a different machine w/ python.com -m pydoc -p8080 -h0.0.0.0

Finally Python Capsulae delenda est and won't be supported in the future
2021-10-02 08:27:03 -07:00

580 lines
18 KiB
C

#include "libc/mem/mem.h"
#include "third_party/argon2/blake2-impl.h"
#include "third_party/argon2/blake2.h"
#include "third_party/argon2/core.h"
/* clang-format off */
/*
* Argon2 reference source code package - reference C implementations
*
* Copyright 2015
* Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves
*
* You may use this work under the terms of a Creative Commons CC0 1.0
* License/Waiver or the Apache Public License 2.0, at your option. The terms of
* these licenses can be found at:
*
* - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0
* - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0
*
* You should have received a copy of both of these licenses along with this
* software. If not, they may be obtained at the above URLs.
*/
/***************Instance and Position constructors**********/
void init_block_value(block *b, uint8_t in) {
memset(b->v, in, sizeof(b->v));
}
void copy_block(block *dst, const block *src) {
memcpy(dst->v, src->v, sizeof(uint64_t) * ARGON2_QWORDS_IN_BLOCK);
}
optimizespeed void xor_block(block *dst, const block *src) {
int i;
for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
dst->v[i] ^= src->v[i];
}
}
static void load_block(block *dst, const void *input) {
unsigned i;
for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
dst->v[i] = load64((const uint8_t *)input + i * sizeof(dst->v[i]));
}
}
static void store_block(void *output, const block *src) {
unsigned i;
for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
store64((uint8_t *)output + i * sizeof(src->v[i]), src->v[i]);
}
}
/***************Memory functions*****************/
int allocate_memory(const argon2_context *context, uint8_t **memory,
size_t num, size_t size) {
size_t memory_size = num*size;
if (memory == NULL) {
return ARGON2_MEMORY_ALLOCATION_ERROR;
}
/* 1. Check for multiplication overflow */
if (size != 0 && memory_size / size != num) {
return ARGON2_MEMORY_ALLOCATION_ERROR;
}
/* 2. Try to allocate with appropriate allocator */
if (context->allocate_cbk) {
(context->allocate_cbk)(memory, memory_size);
} else {
*memory = malloc(memory_size);
}
if (*memory == NULL) {
return ARGON2_MEMORY_ALLOCATION_ERROR;
}
return ARGON2_OK;
}
void free_memory(const argon2_context *context, uint8_t *memory,
size_t num, size_t size) {
size_t memory_size = num*size;
clear_internal_memory(memory, memory_size);
if (context->free_cbk) {
(context->free_cbk)(memory, memory_size);
} else {
free(memory);
}
}
/* Memory clear flag defaults to true. */
int FLAG_clear_internal_memory = 1;
void clear_internal_memory(void *v, size_t n) {
if (FLAG_clear_internal_memory && v) {
explicit_bzero(v, n);
}
}
void finalize(const argon2_context *context, argon2_instance_t *instance) {
if (context != NULL && instance != NULL) {
block blockhash;
uint32_t l;
copy_block(&blockhash, instance->memory + instance->lane_length - 1);
/* XOR the last blocks */
for (l = 1; l < instance->lanes; ++l) {
uint32_t last_block_in_lane =
l * instance->lane_length + (instance->lane_length - 1);
xor_block(&blockhash, instance->memory + last_block_in_lane);
}
/* Hash the result */
{
uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE];
store_block(blockhash_bytes, &blockhash);
blake2b_long(context->out, context->outlen, blockhash_bytes,
ARGON2_BLOCK_SIZE);
/* clear blockhash and blockhash_bytes */
clear_internal_memory(blockhash.v, ARGON2_BLOCK_SIZE);
clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
}
free_memory(context, (uint8_t *)instance->memory,
instance->memory_blocks, sizeof(block));
}
}
uint32_t index_alpha(const argon2_instance_t *instance,
const argon2_position_t *position, uint32_t pseudo_rand,
int same_lane) {
/*
* Pass 0:
* This lane : all already finished segments plus already constructed
* blocks in this segment
* Other lanes : all already finished segments
* Pass 1+:
* This lane : (SYNC_POINTS - 1) last segments plus already constructed
* blocks in this segment
* Other lanes : (SYNC_POINTS - 1) last segments
*/
uint32_t reference_area_size;
uint64_t relative_position;
uint32_t start_position, absolute_position;
if (0 == position->pass) {
/* First pass */
if (0 == position->slice) {
/* First slice */
reference_area_size =
position->index - 1; /* all but the previous */
} else {
if (same_lane) {
/* The same lane => add current segment */
reference_area_size =
position->slice * instance->segment_length +
position->index - 1;
} else {
reference_area_size =
position->slice * instance->segment_length +
((position->index == 0) ? (-1) : 0);
}
}
} else {
/* Second pass */
if (same_lane) {
reference_area_size = instance->lane_length -
instance->segment_length + position->index -
1;
} else {
reference_area_size = instance->lane_length -
instance->segment_length +
((position->index == 0) ? (-1) : 0);
}
}
/* 1.2.4. Mapping pseudo_rand to 0..<reference_area_size-1> and produce
* relative position */
relative_position = pseudo_rand;
relative_position = relative_position * relative_position >> 32;
relative_position = reference_area_size - 1 -
(reference_area_size * relative_position >> 32);
/* 1.2.5 Computing starting position */
start_position = 0;
if (0 != position->pass) {
start_position = (position->slice == ARGON2_SYNC_POINTS - 1)
? 0
: (position->slice + 1) * instance->segment_length;
}
/* 1.2.6. Computing absolute position */
absolute_position = (start_position + relative_position) %
instance->lane_length; /* absolute position */
return absolute_position;
}
/* Single-threaded version for p=1 case */
static int fill_memory_blocks_st(argon2_instance_t *instance) {
uint32_t r, s, l;
for (r = 0; r < instance->passes; ++r) {
for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
for (l = 0; l < instance->lanes; ++l) {
argon2_position_t position = {r, l, (uint8_t)s, 0};
fill_segment(instance, position);
}
}
}
return ARGON2_OK;
}
#if !defined(ARGON2_NO_THREADS)
#ifdef _WIN32
static unsigned __stdcall fill_segment_thr(void *thread_data)
#else
static void *fill_segment_thr(void *thread_data)
#endif
{
argon2_thread_data *my_data = thread_data;
fill_segment(my_data->instance_ptr, my_data->pos);
argon2_thread_exit();
return 0;
}
/* Multi-threaded version for p > 1 case */
static int fill_memory_blocks_mt(argon2_instance_t *instance) {
uint32_t r, s;
argon2_thread_handle_t *thread = NULL;
argon2_thread_data *thr_data = NULL;
int rc = ARGON2_OK;
/* 1. Allocating space for threads */
thread = calloc(instance->lanes, sizeof(argon2_thread_handle_t));
if (thread == NULL) {
rc = ARGON2_MEMORY_ALLOCATION_ERROR;
goto fail;
}
thr_data = calloc(instance->lanes, sizeof(argon2_thread_data));
if (thr_data == NULL) {
rc = ARGON2_MEMORY_ALLOCATION_ERROR;
goto fail;
}
for (r = 0; r < instance->passes; ++r) {
for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
uint32_t l, ll;
/* 2. Calling threads */
for (l = 0; l < instance->lanes; ++l) {
argon2_position_t position;
/* 2.1 Join a thread if limit is exceeded */
if (l >= instance->threads) {
if (argon2_thread_join(thread[l - instance->threads])) {
rc = ARGON2_THREAD_FAIL;
goto fail;
}
}
/* 2.2 Create thread */
position.pass = r;
position.lane = l;
position.slice = (uint8_t)s;
position.index = 0;
thr_data[l].instance_ptr =
instance; /* preparing the thread input */
memcpy(&(thr_data[l].pos), &position,
sizeof(argon2_position_t));
if (argon2_thread_create(&thread[l], &fill_segment_thr,
(void *)&thr_data[l])) {
/* Wait for already running threads */
for (ll = 0; ll < l; ++ll)
argon2_thread_join(thread[ll]);
rc = ARGON2_THREAD_FAIL;
goto fail;
}
/* fill_segment(instance, position); */
/*Non-thread equivalent of the lines above */
}
/* 3. Joining remaining threads */
for (l = instance->lanes - instance->threads; l < instance->lanes;
++l) {
if (argon2_thread_join(thread[l])) {
rc = ARGON2_THREAD_FAIL;
goto fail;
}
}
}
}
fail:
if (thread != NULL) {
free(thread);
}
if (thr_data != NULL) {
free(thr_data);
}
return rc;
}
#endif /* ARGON2_NO_THREADS */
int fill_memory_blocks(argon2_instance_t *instance) {
if (instance == NULL || instance->lanes == 0) {
return ARGON2_INCORRECT_PARAMETER;
}
#if defined(ARGON2_NO_THREADS)
return fill_memory_blocks_st(instance);
#else
return instance->threads == 1 ?
fill_memory_blocks_st(instance) : fill_memory_blocks_mt(instance);
#endif
}
int validate_inputs(const argon2_context *context) {
if (NULL == context) {
return ARGON2_INCORRECT_PARAMETER;
}
if (NULL == context->out) {
return ARGON2_OUTPUT_PTR_NULL;
}
/* Validate output length */
if (ARGON2_MIN_OUTLEN > context->outlen) {
return ARGON2_OUTPUT_TOO_SHORT;
}
if (ARGON2_MAX_OUTLEN < context->outlen) {
return ARGON2_OUTPUT_TOO_LONG;
}
/* Validate password (required param) */
if (NULL == context->pwd) {
if (0 != context->pwdlen) {
return ARGON2_PWD_PTR_MISMATCH;
}
}
if (ARGON2_MIN_PWD_LENGTH > context->pwdlen) {
return ARGON2_PWD_TOO_SHORT;
}
if (ARGON2_MAX_PWD_LENGTH < context->pwdlen) {
return ARGON2_PWD_TOO_LONG;
}
/* Validate salt (required param) */
if (NULL == context->salt) {
if (0 != context->saltlen) {
return ARGON2_SALT_PTR_MISMATCH;
}
}
if (ARGON2_MIN_SALT_LENGTH > context->saltlen) {
return ARGON2_SALT_TOO_SHORT;
}
if (ARGON2_MAX_SALT_LENGTH < context->saltlen) {
return ARGON2_SALT_TOO_LONG;
}
/* Validate secret (optional param) */
if (NULL == context->secret) {
if (0 != context->secretlen) {
return ARGON2_SECRET_PTR_MISMATCH;
}
} else {
if (ARGON2_MIN_SECRET > context->secretlen) {
return ARGON2_SECRET_TOO_SHORT;
}
if (ARGON2_MAX_SECRET < context->secretlen) {
return ARGON2_SECRET_TOO_LONG;
}
}
/* Validate associated data (optional param) */
if (NULL == context->ad) {
if (0 != context->adlen) {
return ARGON2_AD_PTR_MISMATCH;
}
} else {
if (ARGON2_MIN_AD_LENGTH > context->adlen) {
return ARGON2_AD_TOO_SHORT;
}
if (ARGON2_MAX_AD_LENGTH < context->adlen) {
return ARGON2_AD_TOO_LONG;
}
}
/* Validate memory cost */
if (ARGON2_MIN_MEMORY > context->m_cost) {
return ARGON2_MEMORY_TOO_LITTLE;
}
if (ARGON2_MAX_MEMORY < context->m_cost) {
return ARGON2_MEMORY_TOO_MUCH;
}
if (context->m_cost < 8 * context->lanes) {
return ARGON2_MEMORY_TOO_LITTLE;
}
/* Validate time cost */
if (ARGON2_MIN_TIME > context->t_cost) {
return ARGON2_TIME_TOO_SMALL;
}
if (ARGON2_MAX_TIME < context->t_cost) {
return ARGON2_TIME_TOO_LARGE;
}
/* Validate lanes */
if (ARGON2_MIN_LANES > context->lanes) {
return ARGON2_LANES_TOO_FEW;
}
if (ARGON2_MAX_LANES < context->lanes) {
return ARGON2_LANES_TOO_MANY;
}
/* Validate threads */
if (ARGON2_MIN_THREADS > context->threads) {
return ARGON2_THREADS_TOO_FEW;
}
if (ARGON2_MAX_THREADS < context->threads) {
return ARGON2_THREADS_TOO_MANY;
}
if (NULL != context->allocate_cbk && NULL == context->free_cbk) {
return ARGON2_FREE_MEMORY_CBK_NULL;
}
if (NULL == context->allocate_cbk && NULL != context->free_cbk) {
return ARGON2_ALLOCATE_MEMORY_CBK_NULL;
}
return ARGON2_OK;
}
void fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance) {
uint32_t l;
/* Make the first and second block in each lane as G(H0||0||i) or
G(H0||1||i) */
uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE];
for (l = 0; l < instance->lanes; ++l) {
store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 0);
store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH + 4, l);
blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
ARGON2_PREHASH_SEED_LENGTH);
load_block(&instance->memory[l * instance->lane_length + 0],
blockhash_bytes);
store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 1);
blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
ARGON2_PREHASH_SEED_LENGTH);
load_block(&instance->memory[l * instance->lane_length + 1],
blockhash_bytes);
}
clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
}
void initial_hash(uint8_t *blockhash, argon2_context *context,
argon2_type type) {
blake2b_state BlakeHash;
uint8_t value[sizeof(uint32_t)];
if (NULL == context || NULL == blockhash) {
return;
}
blake2b_init(&BlakeHash, ARGON2_PREHASH_DIGEST_LENGTH);
store32(&value, context->lanes);
blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
store32(&value, context->outlen);
blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
store32(&value, context->m_cost);
blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
store32(&value, context->t_cost);
blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
store32(&value, context->version);
blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
store32(&value, (uint32_t)type);
blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
store32(&value, context->pwdlen);
blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
if (context->pwd != NULL) {
blake2b_update(&BlakeHash, (const uint8_t *)context->pwd,
context->pwdlen);
if (context->flags & ARGON2_FLAG_CLEAR_PASSWORD) {
explicit_bzero(context->pwd, context->pwdlen);
context->pwdlen = 0;
}
}
store32(&value, context->saltlen);
blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
if (context->salt != NULL) {
blake2b_update(&BlakeHash, (const uint8_t *)context->salt,
context->saltlen);
}
store32(&value, context->secretlen);
blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
if (context->secret != NULL) {
blake2b_update(&BlakeHash, (const uint8_t *)context->secret,
context->secretlen);
if (context->flags & ARGON2_FLAG_CLEAR_SECRET) {
explicit_bzero(context->secret, context->secretlen);
context->secretlen = 0;
}
}
store32(&value, context->adlen);
blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value));
if (context->ad != NULL) {
blake2b_update(&BlakeHash, (const uint8_t *)context->ad,
context->adlen);
}
blake2b_final(&BlakeHash, blockhash, ARGON2_PREHASH_DIGEST_LENGTH);
}
int initialize(argon2_instance_t *instance, argon2_context *context) {
uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH];
int result = ARGON2_OK;
if (instance == NULL || context == NULL)
return ARGON2_INCORRECT_PARAMETER;
instance->context_ptr = context;
/* 1. Memory allocation */
result = allocate_memory(context, (uint8_t **)&(instance->memory),
instance->memory_blocks, sizeof(block));
if (result != ARGON2_OK) {
return result;
}
/* 2. Initial hashing */
/* H_0 + 8 extra bytes to produce the first blocks */
/* uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; */
/* Hashing all inputs */
initial_hash(blockhash, context, instance->type);
/* Zeroing 8 extra bytes */
clear_internal_memory(blockhash + ARGON2_PREHASH_DIGEST_LENGTH,
ARGON2_PREHASH_SEED_LENGTH -
ARGON2_PREHASH_DIGEST_LENGTH);
/* 3. Creating first blocks, we always have at least two blocks in a slice
*/
fill_first_blocks(blockhash, instance);
/* Clearing the hash */
clear_internal_memory(blockhash, ARGON2_PREHASH_SEED_LENGTH);
return ARGON2_OK;
}