/*-*- mode:c;indent-tabs-mode:nil;c-basic-offset:4;tab-width:8;coding:utf-8 -*-│ │vi: set net ft=c ts=4 sts=4 sw=4 fenc=utf-8 :vi│ ╚──────────────────────────────────────────────────────────────────────────────╝ │ │ │ Argon2 reference source code package - reference C implementations │ │ │ │ Copyright 2015 │ │ Daniel Dinu, Dmitry Khovratovich, Jean-Philippe Aumasson, and Samuel Neves │ │ │ │ You may use this work under the terms of a Creative Commons CC0 1.0 │ │ License/Waiver or the Apache Public License 2.0, at your option. The │ │ terms of these licenses can be found at: │ │ │ │ - CC0 1.0 Universal : https://creativecommons.org/publicdomain/zero/1.0 │ │ - Apache 2.0 : https://www.apache.org/licenses/LICENSE-2.0 │ │ │ ╚─────────────────────────────────────────────────────────────────────────────*/ #include "libc/mem/mem.h" #include "third_party/argon2/blake2-impl.h" #include "third_party/argon2/blake2.h" #include "third_party/argon2/core.h" asm(".ident\t\"\\n\\n\ argon2 (CC0 or Apache2)\\n\ Copyright 2016 Daniel Dinu, Dmitry Khovratovich\\n\ Copyright 2016 Jean-Philippe Aumasson, Samuel Neves\""); /* clang-format off */ int FLAG_clear_internal_memory = 1; void init_block_value(block *b, uint8_t in) { memset(b->v, in, sizeof(b->v)); } void copy_block(block *dst, const block *src) { memcpy(dst->v, src->v, sizeof(uint64_t) * ARGON2_QWORDS_IN_BLOCK); } optimizespeed void xor_block(block *dst, const block *src) { int i; for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) { dst->v[i] ^= src->v[i]; } } static void load_block(block *dst, const void *input) { unsigned i; for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) { dst->v[i] = load64((const uint8_t *)input + i * sizeof(dst->v[i])); } } static void store_block(void *output, const block *src) { unsigned i; for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) { store64((uint8_t *)output + i * sizeof(src->v[i]), src->v[i]); } } /** * Allocates memory to the given pointer, uses the appropriate allocator as * specified in the context. Total allocated memory is num*size. * @param context argon2_context which specifies the allocator * @param memory pointer to the pointer to the memory * @param size the size in bytes for each element to be allocated * @param num the number of elements to be allocated * @return ARGON2_OK if @memory is a valid pointer and memory is allocated */ int allocate_memory(const argon2_context *context, uint8_t **memory, size_t num, size_t size) { size_t memory_size = num*size; if (memory == NULL) { return ARGON2_MEMORY_ALLOCATION_ERROR; } /* 1. Check for multiplication overflow */ if (size != 0 && memory_size / size != num) { return ARGON2_MEMORY_ALLOCATION_ERROR; } /* 2. Try to allocate with appropriate allocator */ if (context->allocate_cbk) { (context->allocate_cbk)(memory, memory_size); } else { *memory = malloc(memory_size); } if (*memory == NULL) { return ARGON2_MEMORY_ALLOCATION_ERROR; } return ARGON2_OK; } /** * Frees memory at the given pointer, uses the appropriate deallocator as * specified in the context. Also cleans the memory using clear_internal_memory. * @param context argon2_context which specifies the deallocator * @param memory pointer to buffer to be freed * @param size the size in bytes for each element to be deallocated * @param num the number of elements to be deallocated */ void free_memory(const argon2_context *context, uint8_t *memory, size_t num, size_t size) { size_t memory_size = num*size; clear_internal_memory(memory, memory_size); if (context->free_cbk) { (context->free_cbk)(memory, memory_size); } else { free(memory); } } /** * Function that securely clears the memory if * FLAG_clear_internal_memory is set. If the flag isn't set, this * function does nothing. * * @param mem Pointer to the memory * @param s Memory size in bytes */ void clear_internal_memory(void *v, size_t n) { if (FLAG_clear_internal_memory && v) { explicit_bzero(v, n); } } /** * XORing the last block of each lane, hashing it, making the tag. * Deallocates the memory. * * @param context current Argon2 context (use only the out parameters from it) * @param instance Pointer to current instance of Argon2 * @pre instance->state must point to necessary amount of memory * @pre context->out must point to outlen bytes of memory * @pre if context->free_cbk is not NULL, it should point to a function that * deallocates memory */ void finalize(const argon2_context *context, argon2_instance_t *instance) { if (context != NULL && instance != NULL) { block blockhash; uint32_t l; copy_block(&blockhash, instance->memory + instance->lane_length - 1); /* XOR the last blocks */ for (l = 1; l < instance->lanes; ++l) { uint32_t last_block_in_lane = l * instance->lane_length + (instance->lane_length - 1); xor_block(&blockhash, instance->memory + last_block_in_lane); } /* Hash the result */ { uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE]; store_block(blockhash_bytes, &blockhash); blake2b_long(context->out, context->outlen, blockhash_bytes, ARGON2_BLOCK_SIZE); /* clear blockhash and blockhash_bytes */ clear_internal_memory(blockhash.v, ARGON2_BLOCK_SIZE); clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE); } free_memory(context, (uint8_t *)instance->memory, instance->memory_blocks, sizeof(block)); } } /** * Computes absolute position of reference block in the lane following a skewed * distribution and using a pseudo-random value as input * @param instance Pointer to the current instance * @param position Pointer to the current position * @param pseudo_rand 32-bit pseudo-random value used to determine the position * @param same_lane Indicates if the block will be taken from the current lane. * If so we can reference the current segment * @pre All pointers must be valid */ uint32_t index_alpha(const argon2_instance_t *instance, const argon2_position_t *position, uint32_t pseudo_rand, int same_lane) { /* * Pass 0: * This lane : all already finished segments plus already constructed * blocks in this segment * Other lanes : all already finished segments * Pass 1+: * This lane : (SYNC_POINTS - 1) last segments plus already constructed * blocks in this segment * Other lanes : (SYNC_POINTS - 1) last segments */ uint32_t reference_area_size; uint64_t relative_position; uint32_t start_position, absolute_position; if (0 == position->pass) { /* First pass */ if (0 == position->slice) { /* First slice */ reference_area_size = position->index - 1; /* all but the previous */ } else { if (same_lane) { /* The same lane => add current segment */ reference_area_size = position->slice * instance->segment_length + position->index - 1; } else { reference_area_size = position->slice * instance->segment_length + ((position->index == 0) ? (-1) : 0); } } } else { /* Second pass */ if (same_lane) { reference_area_size = instance->lane_length - instance->segment_length + position->index - 1; } else { reference_area_size = instance->lane_length - instance->segment_length + ((position->index == 0) ? (-1) : 0); } } /* 1.2.4. Mapping pseudo_rand to 0.. and produce * relative position */ relative_position = pseudo_rand; relative_position = relative_position * relative_position >> 32; relative_position = reference_area_size - 1 - (reference_area_size * relative_position >> 32); /* 1.2.5 Computing starting position */ start_position = 0; if (0 != position->pass) { start_position = (position->slice == ARGON2_SYNC_POINTS - 1) ? 0 : (position->slice + 1) * instance->segment_length; } /* 1.2.6. Computing absolute position */ absolute_position = (start_position + relative_position) % instance->lane_length; /* absolute position */ return absolute_position; } /* Single-threaded version for p=1 case */ static int fill_memory_blocks_st(argon2_instance_t *instance) { uint32_t r, s, l; for (r = 0; r < instance->passes; ++r) { for (s = 0; s < ARGON2_SYNC_POINTS; ++s) { for (l = 0; l < instance->lanes; ++l) { argon2_position_t position = {r, l, (uint8_t)s, 0}; fill_segment(instance, position); } } } return ARGON2_OK; } #if !defined(ARGON2_NO_THREADS) #ifdef _WIN32 static unsigned __stdcall fill_segment_thr(void *thread_data) #else static void *fill_segment_thr(void *thread_data) #endif { argon2_thread_data *my_data = thread_data; fill_segment(my_data->instance_ptr, my_data->pos); argon2_thread_exit(); return 0; } /* Multi-threaded version for p > 1 case */ static int fill_memory_blocks_mt(argon2_instance_t *instance) { uint32_t r, s; argon2_thread_handle_t *thread = NULL; argon2_thread_data *thr_data = NULL; int rc = ARGON2_OK; /* 1. Allocating space for threads */ thread = calloc(instance->lanes, sizeof(argon2_thread_handle_t)); if (thread == NULL) { rc = ARGON2_MEMORY_ALLOCATION_ERROR; goto fail; } thr_data = calloc(instance->lanes, sizeof(argon2_thread_data)); if (thr_data == NULL) { rc = ARGON2_MEMORY_ALLOCATION_ERROR; goto fail; } for (r = 0; r < instance->passes; ++r) { for (s = 0; s < ARGON2_SYNC_POINTS; ++s) { uint32_t l, ll; /* 2. Calling threads */ for (l = 0; l < instance->lanes; ++l) { argon2_position_t position; /* 2.1 Join a thread if limit is exceeded */ if (l >= instance->threads) { if (argon2_thread_join(thread[l - instance->threads])) { rc = ARGON2_THREAD_FAIL; goto fail; } } /* 2.2 Create thread */ position.pass = r; position.lane = l; position.slice = (uint8_t)s; position.index = 0; thr_data[l].instance_ptr = instance; /* preparing the thread input */ memcpy(&(thr_data[l].pos), &position, sizeof(argon2_position_t)); if (argon2_thread_create(&thread[l], &fill_segment_thr, (void *)&thr_data[l])) { /* Wait for already running threads */ for (ll = 0; ll < l; ++ll) argon2_thread_join(thread[ll]); rc = ARGON2_THREAD_FAIL; goto fail; } /* fill_segment(instance, position); */ /*Non-thread equivalent of the lines above */ } /* 3. Joining remaining threads */ for (l = instance->lanes - instance->threads; l < instance->lanes; ++l) { if (argon2_thread_join(thread[l])) { rc = ARGON2_THREAD_FAIL; goto fail; } } } } fail: if (thread != NULL) { free(thread); } if (thr_data != NULL) { free(thr_data); } return rc; } #endif /* ARGON2_NO_THREADS */ /** * Function that fills the entire memory t_cost times based on the first two * blocks in each lane * @param instance Pointer to the current instance * @return ARGON2_OK if successful, @context->state */ int fill_memory_blocks(argon2_instance_t *instance) { if (instance == NULL || instance->lanes == 0) { return ARGON2_INCORRECT_PARAMETER; } #if defined(ARGON2_NO_THREADS) return fill_memory_blocks_st(instance); #else return instance->threads == 1 ? fill_memory_blocks_st(instance) : fill_memory_blocks_mt(instance); #endif } /** * Function that validates all inputs against predefined restrictions * and return an error code. * * @param context Pointer to current Argon2 context * @return ARGON2_OK if everything is all right, otherwise one of error * codes (all defined in argon.h) */ int validate_inputs(const argon2_context *context) { if (NULL == context) { return ARGON2_INCORRECT_PARAMETER; } if (NULL == context->out) { return ARGON2_OUTPUT_PTR_NULL; } /* Validate output length */ if (ARGON2_MIN_OUTLEN > context->outlen) { return ARGON2_OUTPUT_TOO_SHORT; } if (ARGON2_MAX_OUTLEN < context->outlen) { return ARGON2_OUTPUT_TOO_LONG; } /* Validate password (required param) */ if (NULL == context->pwd) { if (0 != context->pwdlen) { return ARGON2_PWD_PTR_MISMATCH; } } if (ARGON2_MIN_PWD_LENGTH > context->pwdlen) { return ARGON2_PWD_TOO_SHORT; } if (ARGON2_MAX_PWD_LENGTH < context->pwdlen) { return ARGON2_PWD_TOO_LONG; } /* Validate salt (required param) */ if (NULL == context->salt) { if (0 != context->saltlen) { return ARGON2_SALT_PTR_MISMATCH; } } if (ARGON2_MIN_SALT_LENGTH > context->saltlen) { return ARGON2_SALT_TOO_SHORT; } if (ARGON2_MAX_SALT_LENGTH < context->saltlen) { return ARGON2_SALT_TOO_LONG; } /* Validate secret (optional param) */ if (NULL == context->secret) { if (0 != context->secretlen) { return ARGON2_SECRET_PTR_MISMATCH; } } else { if (ARGON2_MIN_SECRET > context->secretlen) { return ARGON2_SECRET_TOO_SHORT; } if (ARGON2_MAX_SECRET < context->secretlen) { return ARGON2_SECRET_TOO_LONG; } } /* Validate associated data (optional param) */ if (NULL == context->ad) { if (0 != context->adlen) { return ARGON2_AD_PTR_MISMATCH; } } else { if (ARGON2_MIN_AD_LENGTH > context->adlen) { return ARGON2_AD_TOO_SHORT; } if (ARGON2_MAX_AD_LENGTH < context->adlen) { return ARGON2_AD_TOO_LONG; } } /* Validate memory cost */ if (ARGON2_MIN_MEMORY > context->m_cost) { return ARGON2_MEMORY_TOO_LITTLE; } if (ARGON2_MAX_MEMORY < context->m_cost) { return ARGON2_MEMORY_TOO_MUCH; } if (context->m_cost < 8 * context->lanes) { return ARGON2_MEMORY_TOO_LITTLE; } /* Validate time cost */ if (ARGON2_MIN_TIME > context->t_cost) { return ARGON2_TIME_TOO_SMALL; } if (ARGON2_MAX_TIME < context->t_cost) { return ARGON2_TIME_TOO_LARGE; } /* Validate lanes */ if (ARGON2_MIN_LANES > context->lanes) { return ARGON2_LANES_TOO_FEW; } if (ARGON2_MAX_LANES < context->lanes) { return ARGON2_LANES_TOO_MANY; } /* Validate threads */ if (ARGON2_MIN_THREADS > context->threads) { return ARGON2_THREADS_TOO_FEW; } if (ARGON2_MAX_THREADS < context->threads) { return ARGON2_THREADS_TOO_MANY; } if (NULL != context->allocate_cbk && NULL == context->free_cbk) { return ARGON2_FREE_MEMORY_CBK_NULL; } if (NULL == context->allocate_cbk && NULL != context->free_cbk) { return ARGON2_ALLOCATE_MEMORY_CBK_NULL; } return ARGON2_OK; } /** * Function creates first 2 blocks per lane * @param instance Pointer to the current instance * @param blockhash Pointer to the pre-hashing digest * @pre blockhash must point to @a PREHASH_SEED_LENGTH allocated values */ void fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance) { uint32_t l; /* Make the first and second block in each lane as G(H0||0||i) or G(H0||1||i) */ uint8_t blockhash_bytes[ARGON2_BLOCK_SIZE]; for (l = 0; l < instance->lanes; ++l) { store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 0); store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH + 4, l); blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash, ARGON2_PREHASH_SEED_LENGTH); load_block(&instance->memory[l * instance->lane_length + 0], blockhash_bytes); store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 1); blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash, ARGON2_PREHASH_SEED_LENGTH); load_block(&instance->memory[l * instance->lane_length + 1], blockhash_bytes); } clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE); } /** * Hashes all the inputs into @a blockhash[PREHASH_DIGEST_LENGTH], * clears password and secret if needed * * @param context Pointer to the Argon2 internal structure containing * memory pointer, and parameters for time and space requirements * @param blockhash Buffer for pre-hashing digest * @param type Argon2 type * @pre @a blockhash must have at least @a PREHASH_DIGEST_LENGTH bytes * allocated */ void initial_hash(uint8_t *blockhash, argon2_context *context, argon2_type type) { blake2b_state BlakeHash; uint8_t value[sizeof(uint32_t)]; if (NULL == context || NULL == blockhash) { return; } blake2b_init(&BlakeHash, ARGON2_PREHASH_DIGEST_LENGTH); store32(&value, context->lanes); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); store32(&value, context->outlen); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); store32(&value, context->m_cost); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); store32(&value, context->t_cost); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); store32(&value, context->version); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); store32(&value, (uint32_t)type); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); store32(&value, context->pwdlen); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); if (context->pwd != NULL) { blake2b_update(&BlakeHash, (const uint8_t *)context->pwd, context->pwdlen); if (context->flags & ARGON2_FLAG_CLEAR_PASSWORD) { explicit_bzero(context->pwd, context->pwdlen); context->pwdlen = 0; } } store32(&value, context->saltlen); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); if (context->salt != NULL) { blake2b_update(&BlakeHash, (const uint8_t *)context->salt, context->saltlen); } store32(&value, context->secretlen); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); if (context->secret != NULL) { blake2b_update(&BlakeHash, (const uint8_t *)context->secret, context->secretlen); if (context->flags & ARGON2_FLAG_CLEAR_SECRET) { explicit_bzero(context->secret, context->secretlen); context->secretlen = 0; } } store32(&value, context->adlen); blake2b_update(&BlakeHash, (const uint8_t *)&value, sizeof(value)); if (context->ad != NULL) { blake2b_update(&BlakeHash, (const uint8_t *)context->ad, context->adlen); } blake2b_final(&BlakeHash, blockhash, ARGON2_PREHASH_DIGEST_LENGTH); } /** * Function allocates memory, hashes the inputs with Blake, and creates * first two blocks. Returns the pointer to the main memory with 2 * blocks per lane initialized. * * @param context Pointer to the Argon2 internal structure containing memory * pointer, and parameters for time and space requirements. * @param instance Current Argon2 instance * @return Zero if successful, -1 if memory failed to allocate. @context->state * will be modified if successful. */ int initialize(argon2_instance_t *instance, argon2_context *context) { uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; int result = ARGON2_OK; if (instance == NULL || context == NULL) return ARGON2_INCORRECT_PARAMETER; instance->context_ptr = context; /* 1. Memory allocation */ result = allocate_memory(context, (uint8_t **)&(instance->memory), instance->memory_blocks, sizeof(block)); if (result != ARGON2_OK) { return result; } /* 2. Initial hashing */ /* H_0 + 8 extra bytes to produce the first blocks */ /* uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; */ /* Hashing all inputs */ initial_hash(blockhash, context, instance->type); /* Zeroing 8 extra bytes */ clear_internal_memory(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, ARGON2_PREHASH_SEED_LENGTH - ARGON2_PREHASH_DIGEST_LENGTH); /* 3. Creating first blocks, we always have at least two blocks in a slice */ fill_first_blocks(blockhash, instance); /* Clearing the hash */ clear_internal_memory(blockhash, ARGON2_PREHASH_SEED_LENGTH); return ARGON2_OK; }