mirror of
https://git.wownero.com/wownero/RandomWOW.git
synced 2024-12-22 15:58:53 +00:00
commit
86949c8ce1
65
CMakeLists.txt
Normal file
65
CMakeLists.txt
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
# Copyright (c) 2019, The Monero Project
|
||||||
|
#
|
||||||
|
# All rights reserved.
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without modification, are
|
||||||
|
# permitted provided that the following conditions are met:
|
||||||
|
#
|
||||||
|
# 1. Redistributions of source code must retain the above copyright notice, this list of
|
||||||
|
# conditions and the following disclaimer.
|
||||||
|
#
|
||||||
|
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
|
||||||
|
# of conditions and the following disclaimer in the documentation and/or other
|
||||||
|
# materials provided with the distribution.
|
||||||
|
#
|
||||||
|
# 3. Neither the name of the copyright holder nor the names of its contributors may be
|
||||||
|
# used to endorse or promote products derived from this software without specific
|
||||||
|
# prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
|
||||||
|
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||||
|
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
||||||
|
# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||||
|
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||||
|
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
|
||||||
|
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
set (randomx_sources
|
||||||
|
src/aes_hash.cpp
|
||||||
|
src/argon2_ref.c
|
||||||
|
src/dataset.cpp
|
||||||
|
src/soft_aes.cpp
|
||||||
|
src/virtual_memory.cpp
|
||||||
|
src/vm_interpreted.cpp
|
||||||
|
src/allocator.cpp
|
||||||
|
src/assembly_generator_x86.cpp
|
||||||
|
src/instruction.cpp
|
||||||
|
src/randomx.cpp
|
||||||
|
src/superscalar.cpp
|
||||||
|
src/vm_compiled.cpp
|
||||||
|
src/vm_interpreted_light.cpp
|
||||||
|
src/argon2_core.c
|
||||||
|
src/blake2_generator.cpp
|
||||||
|
src/instructions_portable.cpp
|
||||||
|
src/reciprocal.c
|
||||||
|
src/virtual_machine.cpp
|
||||||
|
src/vm_compiled_light.cpp
|
||||||
|
src/blake2/blake2b.c)
|
||||||
|
|
||||||
|
if (ARCH_ID STREQUAL "x86_64" OR ARCH_ID STREQUAL "x86-64" OR ARCH_ID STREQUAL "amd64")
|
||||||
|
list(APPEND randomx_sources
|
||||||
|
src/jit_compiler_x86_static.S
|
||||||
|
src/jit_compiler_x86.cpp)
|
||||||
|
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maes")
|
||||||
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -maes")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(RANDOMX_INCLUDE "${CMAKE_CURRENT_SOURCE_DIR}/src" CACHE STRING "RandomX Include path")
|
||||||
|
|
||||||
|
add_library(randomx
|
||||||
|
${randomx_sources})
|
||||||
|
target_link_libraries(randomx
|
||||||
|
PRIVATE
|
||||||
|
${CMAKE_THREAD_LIBS_INIT})
|
@ -61,13 +61,13 @@ along with RandomX. If not, see<http://www.gnu.org/licenses/>.
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/***************Instance and Position constructors**********/
|
/***************Instance and Position constructors**********/
|
||||||
void init_block_value(block *b, uint8_t in) { memset(b->v, in, sizeof(b->v)); }
|
void rxa2_init_block_value(block *b, uint8_t in) { memset(b->v, in, sizeof(b->v)); }
|
||||||
|
|
||||||
void copy_block(block *dst, const block *src) {
|
void rxa2_copy_block(block *dst, const block *src) {
|
||||||
memcpy(dst->v, src->v, sizeof(uint64_t) * ARGON2_QWORDS_IN_BLOCK);
|
memcpy(dst->v, src->v, sizeof(uint64_t) * ARGON2_QWORDS_IN_BLOCK);
|
||||||
}
|
}
|
||||||
|
|
||||||
void xor_block(block *dst, const block *src) {
|
void rxa2_xor_block(block *dst, const block *src) {
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
|
for (i = 0; i < ARGON2_QWORDS_IN_BLOCK; ++i) {
|
||||||
dst->v[i] ^= src->v[i];
|
dst->v[i] ^= src->v[i];
|
||||||
@ -90,7 +90,7 @@ static void store_block(void *output, const block *src) {
|
|||||||
|
|
||||||
/***************Memory functions*****************/
|
/***************Memory functions*****************/
|
||||||
|
|
||||||
int allocate_memory(const argon2_context *context, uint8_t **memory,
|
int rxa2_allocate_memory(const argon2_context *context, uint8_t **memory,
|
||||||
size_t num, size_t size) {
|
size_t num, size_t size) {
|
||||||
size_t memory_size = num * size;
|
size_t memory_size = num * size;
|
||||||
if (memory == NULL) {
|
if (memory == NULL) {
|
||||||
@ -117,10 +117,10 @@ int allocate_memory(const argon2_context *context, uint8_t **memory,
|
|||||||
return ARGON2_OK;
|
return ARGON2_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
void free_memory(const argon2_context *context, uint8_t *memory,
|
void rxa2_free_memory(const argon2_context *context, uint8_t *memory,
|
||||||
size_t num, size_t size) {
|
size_t num, size_t size) {
|
||||||
size_t memory_size = num * size;
|
size_t memory_size = num * size;
|
||||||
clear_internal_memory(memory, memory_size);
|
rxa2_clear_internal_memory(memory, memory_size);
|
||||||
if (context->free_cbk) {
|
if (context->free_cbk) {
|
||||||
(context->free_cbk)(memory, memory_size);
|
(context->free_cbk)(memory, memory_size);
|
||||||
}
|
}
|
||||||
@ -129,7 +129,7 @@ void free_memory(const argon2_context *context, uint8_t *memory,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void NOT_OPTIMIZED secure_wipe_memory(void *v, size_t n) {
|
void NOT_OPTIMIZED rxa2_secure_wipe_memory(void *v, size_t n) {
|
||||||
#if defined(_MSC_VER) && VC_GE_2005(_MSC_VER)
|
#if defined(_MSC_VER) && VC_GE_2005(_MSC_VER)
|
||||||
SecureZeroMemory(v, n);
|
SecureZeroMemory(v, n);
|
||||||
#elif defined memset_s
|
#elif defined memset_s
|
||||||
@ -144,13 +144,13 @@ void NOT_OPTIMIZED secure_wipe_memory(void *v, size_t n) {
|
|||||||
|
|
||||||
/* Memory clear flag defaults to true. */
|
/* Memory clear flag defaults to true. */
|
||||||
#define FLAG_clear_internal_memory 0
|
#define FLAG_clear_internal_memory 0
|
||||||
void clear_internal_memory(void *v, size_t n) {
|
void rxa2_clear_internal_memory(void *v, size_t n) {
|
||||||
if (FLAG_clear_internal_memory && v) {
|
if (FLAG_clear_internal_memory && v) {
|
||||||
secure_wipe_memory(v, n);
|
rxa2_secure_wipe_memory(v, n);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t index_alpha(const argon2_instance_t *instance,
|
uint32_t rxa2_index_alpha(const argon2_instance_t *instance,
|
||||||
const argon2_position_t *position, uint32_t pseudo_rand,
|
const argon2_position_t *position, uint32_t pseudo_rand,
|
||||||
int same_lane) {
|
int same_lane) {
|
||||||
/*
|
/*
|
||||||
@ -232,7 +232,7 @@ static int fill_memory_blocks_st(argon2_instance_t *instance) {
|
|||||||
for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
|
for (s = 0; s < ARGON2_SYNC_POINTS; ++s) {
|
||||||
for (l = 0; l < instance->lanes; ++l) {
|
for (l = 0; l < instance->lanes; ++l) {
|
||||||
argon2_position_t position = { r, l, (uint8_t)s, 0 };
|
argon2_position_t position = { r, l, (uint8_t)s, 0 };
|
||||||
fill_segment(instance, position);
|
rxa2_fill_segment(instance, position);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef GENKAT
|
#ifdef GENKAT
|
||||||
@ -242,14 +242,14 @@ static int fill_memory_blocks_st(argon2_instance_t *instance) {
|
|||||||
return ARGON2_OK;
|
return ARGON2_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
int fill_memory_blocks(argon2_instance_t *instance) {
|
int rxa2_fill_memory_blocks(argon2_instance_t *instance) {
|
||||||
if (instance == NULL || instance->lanes == 0) {
|
if (instance == NULL || instance->lanes == 0) {
|
||||||
return ARGON2_INCORRECT_PARAMETER;
|
return ARGON2_INCORRECT_PARAMETER;
|
||||||
}
|
}
|
||||||
return fill_memory_blocks_st(instance);
|
return fill_memory_blocks_st(instance);
|
||||||
}
|
}
|
||||||
|
|
||||||
int validate_inputs(const argon2_context *context) {
|
int rxa2_validate_inputs(const argon2_context *context) {
|
||||||
if (NULL == context) {
|
if (NULL == context) {
|
||||||
return ARGON2_INCORRECT_PARAMETER;
|
return ARGON2_INCORRECT_PARAMETER;
|
||||||
}
|
}
|
||||||
@ -378,7 +378,7 @@ int validate_inputs(const argon2_context *context) {
|
|||||||
return ARGON2_OK;
|
return ARGON2_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
void fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance) {
|
void rxa2_fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance) {
|
||||||
uint32_t l;
|
uint32_t l;
|
||||||
/* Make the first and second block in each lane as G(H0||0||i) or
|
/* Make the first and second block in each lane as G(H0||0||i) or
|
||||||
G(H0||1||i) */
|
G(H0||1||i) */
|
||||||
@ -387,21 +387,21 @@ void fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance) {
|
|||||||
|
|
||||||
store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 0);
|
store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 0);
|
||||||
store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH + 4, l);
|
store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH + 4, l);
|
||||||
blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
|
rxa2_blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
|
||||||
ARGON2_PREHASH_SEED_LENGTH);
|
ARGON2_PREHASH_SEED_LENGTH);
|
||||||
load_block(&instance->memory[l * instance->lane_length + 0],
|
load_block(&instance->memory[l * instance->lane_length + 0],
|
||||||
blockhash_bytes);
|
blockhash_bytes);
|
||||||
|
|
||||||
store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 1);
|
store32(blockhash + ARGON2_PREHASH_DIGEST_LENGTH, 1);
|
||||||
blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
|
rxa2_blake2b_long(blockhash_bytes, ARGON2_BLOCK_SIZE, blockhash,
|
||||||
ARGON2_PREHASH_SEED_LENGTH);
|
ARGON2_PREHASH_SEED_LENGTH);
|
||||||
load_block(&instance->memory[l * instance->lane_length + 1],
|
load_block(&instance->memory[l * instance->lane_length + 1],
|
||||||
blockhash_bytes);
|
blockhash_bytes);
|
||||||
}
|
}
|
||||||
clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
|
rxa2_clear_internal_memory(blockhash_bytes, ARGON2_BLOCK_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void initial_hash(uint8_t *blockhash, argon2_context *context, argon2_type type) {
|
void rxa2_initial_hash(uint8_t *blockhash, argon2_context *context, argon2_type type) {
|
||||||
blake2b_state BlakeHash;
|
blake2b_state BlakeHash;
|
||||||
uint8_t value[sizeof(uint32_t)];
|
uint8_t value[sizeof(uint32_t)];
|
||||||
|
|
||||||
@ -437,7 +437,7 @@ void initial_hash(uint8_t *blockhash, argon2_context *context, argon2_type type)
|
|||||||
context->pwdlen);
|
context->pwdlen);
|
||||||
|
|
||||||
if (context->flags & ARGON2_FLAG_CLEAR_PASSWORD) {
|
if (context->flags & ARGON2_FLAG_CLEAR_PASSWORD) {
|
||||||
secure_wipe_memory(context->pwd, context->pwdlen);
|
rxa2_secure_wipe_memory(context->pwd, context->pwdlen);
|
||||||
context->pwdlen = 0;
|
context->pwdlen = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -457,7 +457,7 @@ void initial_hash(uint8_t *blockhash, argon2_context *context, argon2_type type)
|
|||||||
context->secretlen);
|
context->secretlen);
|
||||||
|
|
||||||
if (context->flags & ARGON2_FLAG_CLEAR_SECRET) {
|
if (context->flags & ARGON2_FLAG_CLEAR_SECRET) {
|
||||||
secure_wipe_memory(context->secret, context->secretlen);
|
rxa2_secure_wipe_memory(context->secret, context->secretlen);
|
||||||
context->secretlen = 0;
|
context->secretlen = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -473,7 +473,7 @@ void initial_hash(uint8_t *blockhash, argon2_context *context, argon2_type type)
|
|||||||
blake2b_final(&BlakeHash, blockhash, ARGON2_PREHASH_DIGEST_LENGTH);
|
blake2b_final(&BlakeHash, blockhash, ARGON2_PREHASH_DIGEST_LENGTH);
|
||||||
}
|
}
|
||||||
|
|
||||||
int argon_initialize(argon2_instance_t *instance, argon2_context *context) {
|
int rxa2_argon_initialize(argon2_instance_t *instance, argon2_context *context) {
|
||||||
uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH];
|
uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH];
|
||||||
int result = ARGON2_OK;
|
int result = ARGON2_OK;
|
||||||
|
|
||||||
@ -491,17 +491,17 @@ int argon_initialize(argon2_instance_t *instance, argon2_context *context) {
|
|||||||
/* H_0 + 8 extra bytes to produce the first blocks */
|
/* H_0 + 8 extra bytes to produce the first blocks */
|
||||||
/* uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; */
|
/* uint8_t blockhash[ARGON2_PREHASH_SEED_LENGTH]; */
|
||||||
/* Hashing all inputs */
|
/* Hashing all inputs */
|
||||||
initial_hash(blockhash, context, instance->type);
|
rxa2_initial_hash(blockhash, context, instance->type);
|
||||||
/* Zeroing 8 extra bytes */
|
/* Zeroing 8 extra bytes */
|
||||||
clear_internal_memory(blockhash + ARGON2_PREHASH_DIGEST_LENGTH,
|
rxa2_clear_internal_memory(blockhash + ARGON2_PREHASH_DIGEST_LENGTH,
|
||||||
ARGON2_PREHASH_SEED_LENGTH -
|
ARGON2_PREHASH_SEED_LENGTH -
|
||||||
ARGON2_PREHASH_DIGEST_LENGTH);
|
ARGON2_PREHASH_DIGEST_LENGTH);
|
||||||
|
|
||||||
/* 3. Creating first blocks, we always have at least two blocks in a slice
|
/* 3. Creating first blocks, we always have at least two blocks in a slice
|
||||||
*/
|
*/
|
||||||
fill_first_blocks(blockhash, instance);
|
rxa2_fill_first_blocks(blockhash, instance);
|
||||||
/* Clearing the hash */
|
/* Clearing the hash */
|
||||||
clear_internal_memory(blockhash, ARGON2_PREHASH_SEED_LENGTH);
|
rxa2_clear_internal_memory(blockhash, ARGON2_PREHASH_SEED_LENGTH);
|
||||||
|
|
||||||
return ARGON2_OK;
|
return ARGON2_OK;
|
||||||
}
|
}
|
||||||
|
@ -67,13 +67,13 @@ typedef struct block_ { uint64_t v[ARGON2_QWORDS_IN_BLOCK]; } block;
|
|||||||
/*****************Functions that work with the block******************/
|
/*****************Functions that work with the block******************/
|
||||||
|
|
||||||
/* Initialize each byte of the block with @in */
|
/* Initialize each byte of the block with @in */
|
||||||
void init_block_value(block *b, uint8_t in);
|
void rxa2_init_block_value(block *b, uint8_t in);
|
||||||
|
|
||||||
/* Copy block @src to block @dst */
|
/* Copy block @src to block @dst */
|
||||||
void copy_block(block *dst, const block *src);
|
void rxa2_copy_block(block *dst, const block *src);
|
||||||
|
|
||||||
/* XOR @src onto @dst bytewise */
|
/* XOR @src onto @dst bytewise */
|
||||||
void xor_block(block *dst, const block *src);
|
void rxa2_xor_block(block *dst, const block *src);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Argon2 instance: memory pointer, number of passes, amount of memory, type,
|
* Argon2 instance: memory pointer, number of passes, amount of memory, type,
|
||||||
@ -122,7 +122,7 @@ typedef struct Argon2_thread_data {
|
|||||||
* @param num the number of elements to be allocated
|
* @param num the number of elements to be allocated
|
||||||
* @return ARGON2_OK if @memory is a valid pointer and memory is allocated
|
* @return ARGON2_OK if @memory is a valid pointer and memory is allocated
|
||||||
*/
|
*/
|
||||||
int allocate_memory(const argon2_context *context, uint8_t **memory,
|
int rxa2_allocate_memory(const argon2_context *context, uint8_t **memory,
|
||||||
size_t num, size_t size);
|
size_t num, size_t size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -133,7 +133,7 @@ int allocate_memory(const argon2_context *context, uint8_t **memory,
|
|||||||
* @param size the size in bytes for each element to be deallocated
|
* @param size the size in bytes for each element to be deallocated
|
||||||
* @param num the number of elements to be deallocated
|
* @param num the number of elements to be deallocated
|
||||||
*/
|
*/
|
||||||
void free_memory(const argon2_context *context, uint8_t *memory,
|
void rxa2_free_memory(const argon2_context *context, uint8_t *memory,
|
||||||
size_t num, size_t size);
|
size_t num, size_t size);
|
||||||
|
|
||||||
/* Function that securely cleans the memory. This ignores any flags set
|
/* Function that securely cleans the memory. This ignores any flags set
|
||||||
@ -141,14 +141,14 @@ void free_memory(const argon2_context *context, uint8_t *memory,
|
|||||||
* @param mem Pointer to the memory
|
* @param mem Pointer to the memory
|
||||||
* @param s Memory size in bytes
|
* @param s Memory size in bytes
|
||||||
*/
|
*/
|
||||||
void secure_wipe_memory(void *v, size_t n);
|
void rxa2_secure_wipe_memory(void *v, size_t n);
|
||||||
|
|
||||||
/* Function that securely clears the memory if FLAG_clear_internal_memory is
|
/* Function that securely clears the memory if FLAG_clear_internal_memory is
|
||||||
* set. If the flag isn't set, this function does nothing.
|
* set. If the flag isn't set, this function does nothing.
|
||||||
* @param mem Pointer to the memory
|
* @param mem Pointer to the memory
|
||||||
* @param s Memory size in bytes
|
* @param s Memory size in bytes
|
||||||
*/
|
*/
|
||||||
void clear_internal_memory(void *v, size_t n);
|
void rxa2_clear_internal_memory(void *v, size_t n);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Computes absolute position of reference block in the lane following a skewed
|
* Computes absolute position of reference block in the lane following a skewed
|
||||||
@ -160,7 +160,7 @@ void clear_internal_memory(void *v, size_t n);
|
|||||||
* If so we can reference the current segment
|
* If so we can reference the current segment
|
||||||
* @pre All pointers must be valid
|
* @pre All pointers must be valid
|
||||||
*/
|
*/
|
||||||
uint32_t index_alpha(const argon2_instance_t *instance,
|
uint32_t rxa2_index_alpha(const argon2_instance_t *instance,
|
||||||
const argon2_position_t *position, uint32_t pseudo_rand,
|
const argon2_position_t *position, uint32_t pseudo_rand,
|
||||||
int same_lane);
|
int same_lane);
|
||||||
|
|
||||||
@ -171,7 +171,7 @@ uint32_t index_alpha(const argon2_instance_t *instance,
|
|||||||
* @return ARGON2_OK if everything is all right, otherwise one of error codes
|
* @return ARGON2_OK if everything is all right, otherwise one of error codes
|
||||||
* (all defined in <argon2.h>
|
* (all defined in <argon2.h>
|
||||||
*/
|
*/
|
||||||
int validate_inputs(const argon2_context *context);
|
int rxa2_validate_inputs(const argon2_context *context);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hashes all the inputs into @a blockhash[PREHASH_DIGEST_LENGTH], clears
|
* Hashes all the inputs into @a blockhash[PREHASH_DIGEST_LENGTH], clears
|
||||||
@ -183,7 +183,7 @@ int validate_inputs(const argon2_context *context);
|
|||||||
* @pre @a blockhash must have at least @a PREHASH_DIGEST_LENGTH bytes
|
* @pre @a blockhash must have at least @a PREHASH_DIGEST_LENGTH bytes
|
||||||
* allocated
|
* allocated
|
||||||
*/
|
*/
|
||||||
void initial_hash(uint8_t *blockhash, argon2_context *context,
|
void rxa2_initial_hash(uint8_t *blockhash, argon2_context *context,
|
||||||
argon2_type type);
|
argon2_type type);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -192,7 +192,7 @@ void initial_hash(uint8_t *blockhash, argon2_context *context,
|
|||||||
* @param blockhash Pointer to the pre-hashing digest
|
* @param blockhash Pointer to the pre-hashing digest
|
||||||
* @pre blockhash must point to @a PREHASH_SEED_LENGTH allocated values
|
* @pre blockhash must point to @a PREHASH_SEED_LENGTH allocated values
|
||||||
*/
|
*/
|
||||||
void fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance);
|
void rxa2_fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Function allocates memory, hashes the inputs with Blake, and creates first
|
* Function allocates memory, hashes the inputs with Blake, and creates first
|
||||||
@ -204,7 +204,7 @@ void fill_first_blocks(uint8_t *blockhash, const argon2_instance_t *instance);
|
|||||||
* @return Zero if successful, -1 if memory failed to allocate. @context->state
|
* @return Zero if successful, -1 if memory failed to allocate. @context->state
|
||||||
* will be modified if successful.
|
* will be modified if successful.
|
||||||
*/
|
*/
|
||||||
int argon_initialize(argon2_instance_t *instance, argon2_context *context);
|
int rxa2_argon_initialize(argon2_instance_t *instance, argon2_context *context);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XORing the last block of each lane, hashing it, making the tag. Deallocates
|
* XORing the last block of each lane, hashing it, making the tag. Deallocates
|
||||||
@ -217,7 +217,7 @@ int argon_initialize(argon2_instance_t *instance, argon2_context *context);
|
|||||||
* @pre if context->free_cbk is not NULL, it should point to a function that
|
* @pre if context->free_cbk is not NULL, it should point to a function that
|
||||||
* deallocates memory
|
* deallocates memory
|
||||||
*/
|
*/
|
||||||
void finalize(const argon2_context *context, argon2_instance_t *instance);
|
void rxa2_finalize(const argon2_context *context, argon2_instance_t *instance);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Function that fills the segment using previous segments also from other
|
* Function that fills the segment using previous segments also from other
|
||||||
@ -227,7 +227,7 @@ void finalize(const argon2_context *context, argon2_instance_t *instance);
|
|||||||
* @param position Current position
|
* @param position Current position
|
||||||
* @pre all block pointers must be valid
|
* @pre all block pointers must be valid
|
||||||
*/
|
*/
|
||||||
void fill_segment(const argon2_instance_t *instance,
|
void rxa2_fill_segment(const argon2_instance_t *instance,
|
||||||
argon2_position_t position);
|
argon2_position_t position);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -236,7 +236,7 @@ void fill_segment(const argon2_instance_t *instance,
|
|||||||
* @param instance Pointer to the current instance
|
* @param instance Pointer to the current instance
|
||||||
* @return ARGON2_OK if successful, @context->state
|
* @return ARGON2_OK if successful, @context->state
|
||||||
*/
|
*/
|
||||||
int fill_memory_blocks(argon2_instance_t *instance);
|
int rxa2_fill_memory_blocks(argon2_instance_t *instance);
|
||||||
|
|
||||||
#if defined(__cplusplus)
|
#if defined(__cplusplus)
|
||||||
}
|
}
|
||||||
|
@ -48,13 +48,13 @@ static void fill_block(const block *prev_block, const block *ref_block,
|
|||||||
block blockR, block_tmp;
|
block blockR, block_tmp;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
copy_block(&blockR, ref_block);
|
rxa2_copy_block(&blockR, ref_block);
|
||||||
xor_block(&blockR, prev_block);
|
rxa2_xor_block(&blockR, prev_block);
|
||||||
copy_block(&block_tmp, &blockR);
|
rxa2_copy_block(&block_tmp, &blockR);
|
||||||
/* Now blockR = ref_block + prev_block and block_tmp = ref_block + prev_block */
|
/* Now blockR = ref_block + prev_block and block_tmp = ref_block + prev_block */
|
||||||
if (with_xor) {
|
if (with_xor) {
|
||||||
/* Saving the next block contents for XOR over: */
|
/* Saving the next block contents for XOR over: */
|
||||||
xor_block(&block_tmp, next_block);
|
rxa2_xor_block(&block_tmp, next_block);
|
||||||
/* Now blockR = ref_block + prev_block and
|
/* Now blockR = ref_block + prev_block and
|
||||||
block_tmp = ref_block + prev_block + next_block */
|
block_tmp = ref_block + prev_block + next_block */
|
||||||
}
|
}
|
||||||
@ -83,8 +83,8 @@ static void fill_block(const block *prev_block, const block *ref_block,
|
|||||||
blockR.v[2 * i + 113]);
|
blockR.v[2 * i + 113]);
|
||||||
}
|
}
|
||||||
|
|
||||||
copy_block(next_block, &block_tmp);
|
rxa2_copy_block(next_block, &block_tmp);
|
||||||
xor_block(next_block, &blockR);
|
rxa2_xor_block(next_block, &blockR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void next_addresses(block *address_block, block *input_block,
|
static void next_addresses(block *address_block, block *input_block,
|
||||||
@ -94,7 +94,7 @@ static void next_addresses(block *address_block, block *input_block,
|
|||||||
fill_block(zero_block, address_block, address_block, 0);
|
fill_block(zero_block, address_block, address_block, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void fill_segment(const argon2_instance_t *instance,
|
void rxa2_fill_segment(const argon2_instance_t *instance,
|
||||||
argon2_position_t position) {
|
argon2_position_t position) {
|
||||||
block *ref_block = NULL, *curr_block = NULL;
|
block *ref_block = NULL, *curr_block = NULL;
|
||||||
block address_block, input_block, zero_block;
|
block address_block, input_block, zero_block;
|
||||||
@ -114,8 +114,8 @@ void fill_segment(const argon2_instance_t *instance,
|
|||||||
(position.slice < ARGON2_SYNC_POINTS / 2));
|
(position.slice < ARGON2_SYNC_POINTS / 2));
|
||||||
|
|
||||||
if (data_independent_addressing) {
|
if (data_independent_addressing) {
|
||||||
init_block_value(&zero_block, 0);
|
rxa2_init_block_value(&zero_block, 0);
|
||||||
init_block_value(&input_block, 0);
|
rxa2_init_block_value(&input_block, 0);
|
||||||
|
|
||||||
input_block.v[0] = position.pass;
|
input_block.v[0] = position.pass;
|
||||||
input_block.v[1] = position.lane;
|
input_block.v[1] = position.lane;
|
||||||
@ -180,7 +180,7 @@ void fill_segment(const argon2_instance_t *instance,
|
|||||||
* lane.
|
* lane.
|
||||||
*/
|
*/
|
||||||
position.index = i;
|
position.index = i;
|
||||||
ref_index = index_alpha(instance, &position, pseudo_rand & 0xFFFFFFFF,
|
ref_index = rxa2_index_alpha(instance, &position, pseudo_rand & 0xFFFFFFFF,
|
||||||
ref_lane == position.lane);
|
ref_lane == position.lane);
|
||||||
|
|
||||||
/* 2 Creating a new block */
|
/* 2 Creating a new block */
|
||||||
|
@ -88,7 +88,7 @@ extern "C" {
|
|||||||
const void *key, size_t keylen);
|
const void *key, size_t keylen);
|
||||||
|
|
||||||
/* Argon2 Team - Begin Code */
|
/* Argon2 Team - Begin Code */
|
||||||
int blake2b_long(void *out, size_t outlen, const void *in, size_t inlen);
|
int rxa2_blake2b_long(void *out, size_t outlen, const void *in, size_t inlen);
|
||||||
/* Argon2 Team - End Code */
|
/* Argon2 Team - End Code */
|
||||||
|
|
||||||
#if defined(__cplusplus)
|
#if defined(__cplusplus)
|
||||||
|
@ -338,7 +338,7 @@ fail:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Argon2 Team - Begin Code */
|
/* Argon2 Team - Begin Code */
|
||||||
int blake2b_long(void *pout, size_t outlen, const void *in, size_t inlen) {
|
int rxa2_blake2b_long(void *pout, size_t outlen, const void *in, size_t inlen) {
|
||||||
uint8_t *out = (uint8_t *)pout;
|
uint8_t *out = (uint8_t *)pout;
|
||||||
blake2b_state blake_state;
|
blake2b_state blake_state;
|
||||||
uint8_t outlen_bytes[sizeof(uint32_t)] = { 0 };
|
uint8_t outlen_bytes[sizeof(uint32_t)] = { 0 };
|
||||||
|
@ -95,9 +95,9 @@ namespace randomx {
|
|||||||
/* 3. Initialization: Hashing inputs, allocating memory, filling first
|
/* 3. Initialization: Hashing inputs, allocating memory, filling first
|
||||||
* blocks
|
* blocks
|
||||||
*/
|
*/
|
||||||
argon_initialize(&instance, &context);
|
rxa2_argon_initialize(&instance, &context);
|
||||||
|
|
||||||
fill_memory_blocks(&instance);
|
rxa2_fill_memory_blocks(&instance);
|
||||||
|
|
||||||
cache->reciprocalCache.clear();
|
cache->reciprocalCache.clear();
|
||||||
randomx::Blake2Generator gen(key, keySize);
|
randomx::Blake2Generator gen(key, keySize);
|
||||||
|
Loading…
Reference in New Issue
Block a user