diff --git a/makefile b/makefile
index 356180f..8aff8c5 100644
--- a/makefile
+++ b/makefile
@@ -11,7 +11,7 @@ SRCDIR=src
OBJDIR=obj
LDFLAGS=
TOBJS=$(addprefix $(OBJDIR)/,instructionsPortable.o TestAluFpu.o)
-ROBJS=$(addprefix $(OBJDIR)/,argon2_core.o argon2_ref.o AssemblyGeneratorX86.o blake2b.o CompiledVirtualMachine.o dataset.o JitCompilerX86.o instructionsPortable.o Instruction.o InterpretedVirtualMachine.o main.o Program.o softAes.o VirtualMachine.o)
+ROBJS=$(addprefix $(OBJDIR)/,argon2_core.o argon2_ref.o AssemblyGeneratorX86.o blake2b.o CompiledVirtualMachine.o dataset.o JitCompilerX86.o instructionsPortable.o Instruction.o InterpretedVirtualMachine.o main.o Program.o softAes.o VirtualMachine.o t1ha2.o)
SRC1=$(addprefix $(SRCDIR)/,TestAluFpu.cpp instructions.hpp Pcg32.hpp)
all: release test
@@ -78,6 +78,9 @@ $(OBJDIR)/softAes.o: $(addprefix $(SRCDIR)/,softAes.cpp softAes.h) | $(OBJDIR)
$(OBJDIR)/VirtualMachine.o: $(addprefix $(SRCDIR)/,VirtualMachine.cpp VirtualMachine.hpp common.hpp dataset.hpp) | $(OBJDIR)
$(CXX) $(CXXFLAGS) -c $(SRCDIR)/VirtualMachine.cpp -o $@
+
+$(OBJDIR)/t1ha2.o: $(addprefix $(SRCDIR)/t1ha/,t1ha2.c t1ha.h t1ha_bits.h) | $(OBJDIR)
+ $(CC) $(CCFLAGS) -c $(SRCDIR)/t1ha/t1ha2.c -o $@
$(OBJDIR):
mkdir $(OBJDIR)
diff --git a/src/VirtualMachine.cpp b/src/VirtualMachine.cpp
index 501c069..4ea7ac0 100644
--- a/src/VirtualMachine.cpp
+++ b/src/VirtualMachine.cpp
@@ -20,6 +20,8 @@ along with RandomX. If not, see.
#include "VirtualMachine.hpp"
#include "common.hpp"
#include "dataset.hpp"
+#include "t1ha/t1ha.h"
+#include "blake2/blake2.h"
#include
namespace RandomX {
@@ -74,6 +76,9 @@ namespace RandomX {
}
void VirtualMachine::getResult(void* out) {
-
+ uint64_t smallState[sizeof(RegisterFile) / sizeof(uint64_t) + 2];
+ memcpy(smallState, ®, sizeof(RegisterFile));
+ smallState[17] = t1ha2_atonce128(&smallState[16], scratchpad, ScratchpadSize, reg.r[0].u64);
+ blake2b(out, ResultSize, smallState, sizeof(smallState), nullptr, 0);
}
}
\ No newline at end of file
diff --git a/src/common.hpp b/src/common.hpp
index 28c95cf..1343e62 100644
--- a/src/common.hpp
+++ b/src/common.hpp
@@ -33,6 +33,7 @@ namespace RandomX {
constexpr int RoundToZero = 3;
constexpr int SeedSize = 32;
+ constexpr int ResultSize = 32;
constexpr int CacheBlockSize = 1024;
constexpr int CacheShift = CacheBlockSize / 2;
diff --git a/src/main.cpp b/src/main.cpp
index f27407e..f3f8124 100644
--- a/src/main.cpp
+++ b/src/main.cpp
@@ -91,7 +91,7 @@ int main(int argc, char** argv) {
0xc3, 0x8b, 0xde, 0xd3, 0x4d, 0x2d, 0xcd, 0xee, 0xf9, 0x5c, 0xd2, 0x0c, 0xef, 0xc1, 0x2f, 0x61, 0xd5, 0x61, 0x09
};
int* nonce = (int*)(blockTemplate + 39);
- uint8_t hash[32];
+ uint8_t hash[RandomX::ResultSize];
if (genAsm) {
*nonce = programCount;
@@ -105,7 +105,7 @@ int main(int argc, char** argv) {
if (softAes)
std::cout << "Using software AES." << std::endl;
- char cumulative[32] = { 0 };
+ char cumulative[RandomX::ResultSize] = { 0 };
RandomX::VirtualMachine* vm;
@@ -138,7 +138,7 @@ int main(int argc, char** argv) {
/*std::string fileName("scratchpad-after-");
fileName = fileName + std::to_string(i) + ".txt";
dump((const char *)vm.getScratchpad(), RandomX::ScratchpadSize, fileName.c_str());*/
- blake2b((void*)hash, sizeof(hash), &vm->getRegisterFile(), sizeof(RandomX::RegisterFile), nullptr, 0);
+ vm->getResult(hash);
if (RandomX::trace) {
outputHex(std::cout, (char*)hash, sizeof(hash));
}
@@ -148,8 +148,10 @@ int main(int argc, char** argv) {
((uint64_t*)cumulative)[3] ^= ((uint64_t*)hash)[3];
}
double elapsed = sw.getElapsed();
- std::cout << "Cumulative output hash: ";
+ std::cout << "Calculated result: ";
outputHex(std::cout, cumulative, sizeof(cumulative));
+ if(programCount == 1000)
+ std::cout << "Reference result: d62ed85c39030cd2c5704fca3a23019f1244f2b03447c9a6b39dea5390ed1d10" << std::endl;
std::cout << "Performance: " << programCount / elapsed << " programs per second" << std::endl;
}
catch (std::exception& e) {
diff --git a/src/t1ha/t1ha.h b/src/t1ha/t1ha.h
new file mode 100644
index 0000000..6b56e16
--- /dev/null
+++ b/src/t1ha/t1ha.h
@@ -0,0 +1,723 @@
+/*
+ * Copyright (c) 2016-2018 Positive Technologies, https://www.ptsecurity.com,
+ * Fast Positive Hash.
+ *
+ * Portions Copyright (c) 2010-2018 Leonid Yuriev ,
+ * The 1Hippeus project (t1h).
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgement in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/*
+ * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" }
+ * by [Positive Technologies](https://www.ptsecurity.ru)
+ *
+ * Briefly, it is a 64-bit Hash Function:
+ * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64,
+ * but portable and without penalties it can run on any 64-bit CPU.
+ * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash
+ * and all others portable hash-functions (which do not use specific
+ * hardware tricks).
+ * 3. Not suitable for cryptography.
+ *
+ * The Future will Positive. Всё будет хорошо.
+ *
+ * ACKNOWLEDGEMENT:
+ * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев)
+ * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta!
+ */
+
+#pragma once
+
+/*****************************************************************************
+ *
+ * PLEASE PAY ATTENTION TO THE FOLLOWING NOTES
+ * about macros definitions which controls t1ha behaviour and/or performance.
+ *
+ *
+ * 1) T1HA_SYS_UNALIGNED_ACCESS = Defines the system/platform/CPU/architecture
+ * abilities for unaligned data access.
+ *
+ * By default, when the T1HA_SYS_UNALIGNED_ACCESS not defined,
+ * it will defined on the basis hardcoded knowledge about of capabilities
+ * of most common CPU architectures. But you could override this
+ * default behavior when build t1ha library itself:
+ *
+ * // To disable unaligned access at all.
+ * #define T1HA_SYS_UNALIGNED_ACCESS 0
+ *
+ * // To enable unaligned access, but indicate that it significally slow.
+ * #define T1HA_SYS_UNALIGNED_ACCESS 1
+ *
+ * // To enable unaligned access, and indicate that it effecient.
+ * #define T1HA_SYS_UNALIGNED_ACCESS 2
+ *
+ *
+ * 2) T1HA_USE_FAST_ONESHOT_READ = Controls the data reads at the end of buffer.
+ *
+ * When defined to non-zero, t1ha will use 'one shot' method for reading
+ * up to 8 bytes at the end of data. In this case just the one 64-bit read
+ * will be performed even when the available less than 8 bytes.
+ *
+ * This is little bit faster that switching by length of data tail.
+ * Unfortunately this will triggering a false-positive alarms from Valgrind,
+ * AddressSanitizer and other similar tool.
+ *
+ * By default, t1ha defines it to 1, but you could override this
+ * default behavior when build t1ha library itself:
+ *
+ * // For little bit faster and small code.
+ * #define T1HA_USE_FAST_ONESHOT_READ 1
+ *
+ * // For calmness if doubt.
+ * #define T1HA_USE_FAST_ONESHOT_READ 0
+ *
+ *
+ * 3) T1HA0_RUNTIME_SELECT = Controls choice fastest function in runtime.
+ *
+ * t1ha library offers the t1ha0() function as the fastest for current CPU.
+ * But actual CPU's features/capabilities and may be significantly different,
+ * especially on x86 platform. Therefore, internally, t1ha0() may require
+ * dynamic dispatching for choice best implementation.
+ *
+ * By default, t1ha enables such runtime choice and (may be) corresponding
+ * indirect calls if it reasonable, but you could override this default
+ * behavior when build t1ha library itself:
+ *
+ * // To enable runtime choice of fastest implementation.
+ * #define T1HA0_RUNTIME_SELECT 1
+ *
+ * // To disable runtime choice of fastest implementation.
+ * #define T1HA0_RUNTIME_SELECT 0
+ *
+ * When T1HA0_RUNTIME_SELECT is nonzero the t1ha0_resolve() function could
+ * be used to get actual t1ha0() implementation address at runtime. This is
+ * useful for two cases:
+ * - calling by local pointer-to-function usually is little
+ * bit faster (less overhead) than via a PLT thru the DSO boundary.
+ * - GNU Indirect functions (see below) don't supported by environment
+ * and calling by t1ha0_funcptr is not available and/or expensive.
+ *
+ * 4) T1HA_USE_INDIRECT_FUNCTIONS = Controls usage of GNU Indirect functions.
+ *
+ * In continue of T1HA0_RUNTIME_SELECT the T1HA_USE_INDIRECT_FUNCTIONS
+ * controls usage of ELF indirect functions feature. In general, when
+ * available, this reduces overhead of indirect function's calls though
+ * a DSO-bundary (https://sourceware.org/glibc/wiki/GNU_IFUNC).
+ *
+ * By default, t1ha engage GNU Indirect functions when it available
+ * and useful, but you could override this default behavior when build
+ * t1ha library itself:
+ *
+ * // To enable use of GNU ELF Indirect functions.
+ * #define T1HA_USE_INDIRECT_FUNCTIONS 1
+ *
+ * // To disable use of GNU ELF Indirect functions. This may be useful
+ * // if the actual toolchain or the system's loader don't support ones.
+ * #define T1HA_USE_INDIRECT_FUNCTIONS 0
+ *
+ * 5) T1HA0_AESNI_AVAILABLE = Controls AES-NI detection and dispatching on x86.
+ *
+ * In continue of T1HA0_RUNTIME_SELECT the T1HA0_AESNI_AVAILABLE controls
+ * detection and usage of AES-NI CPU's feature. On the other hand, this
+ * requires compiling parts of t1ha library with certain properly options,
+ * and could be difficult or inconvenient in some cases.
+ *
+ * By default, t1ha engade AES-NI for t1ha0() on the x86 platform, but
+ * you could override this default behavior when build t1ha library itself:
+ *
+ * // To disable detection and usage of AES-NI instructions for t1ha0().
+ * // This may be useful when you unable to build t1ha library properly
+ * // or known that AES-NI will be unavailable at the deploy.
+ * #define T1HA0_AESNI_AVAILABLE 0
+ *
+ * // To force detection and usage of AES-NI instructions for t1ha0(),
+ * // but I don't known reasons to anybody would need this.
+ * #define T1HA0_AESNI_AVAILABLE 1
+ *
+ * 6) T1HA0_DISABLED, T1HA1_DISABLED, T1HA2_DISABLED = Controls availability of
+ * t1ha functions.
+ *
+ * In some cases could be useful to import/use only few of t1ha functions
+ * or just the one. So, this definitions allows disable corresponding parts
+ * of t1ha library.
+ *
+ * // To disable t1ha0(), t1ha0_32le(), t1ha0_32be() and all AES-NI.
+ * #define T1HA0_DISABLED
+ *
+ * // To disable t1ha1_le() and t1ha1_be().
+ * #define T1HA1_DISABLED
+ *
+ * // To disable t1ha2_atonce(), t1ha2_atonce128() and so on.
+ * #define T1HA2_DISABLED
+ *
+ *****************************************************************************/
+
+#define T1HA_VERSION_MAJOR 2
+#define T1HA_VERSION_MINOR 1
+#define T1HA_VERSION_RELEASE 0
+
+#ifndef __has_attribute
+#define __has_attribute(x) (0)
+#endif
+
+#ifndef __has_include
+#define __has_include(x) (0)
+#endif
+
+#ifndef __GNUC_PREREQ
+#if defined(__GNUC__) && defined(__GNUC_MINOR__)
+#define __GNUC_PREREQ(maj, min) \
+ ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min))
+#else
+#define __GNUC_PREREQ(maj, min) 0
+#endif
+#endif /* __GNUC_PREREQ */
+
+#ifndef __CLANG_PREREQ
+#ifdef __clang__
+#define __CLANG_PREREQ(maj, min) \
+ ((__clang_major__ << 16) + __clang_minor__ >= ((maj) << 16) + (min))
+#else
+#define __CLANG_PREREQ(maj, min) (0)
+#endif
+#endif /* __CLANG_PREREQ */
+
+#ifndef __LCC_PREREQ
+#ifdef __LCC__
+#define __LCC_PREREQ(maj, min) \
+ ((__LCC__ << 16) + __LCC_MINOR__ >= ((maj) << 16) + (min))
+#else
+#define __LCC_PREREQ(maj, min) (0)
+#endif
+#endif /* __LCC_PREREQ */
+
+/*****************************************************************************/
+
+#ifdef _MSC_VER
+/* Avoid '16' bytes padding added after data member 't1ha_context::total'
+ * and other warnings from std-headers if warning-level > 3. */
+#pragma warning(push, 3)
+#endif
+
+#if defined(__cplusplus) && __cplusplus >= 201103L
+#include
+#include
+#include
+#else
+#include
+#include
+#include
+#endif
+
+/*****************************************************************************/
+
+#if defined(i386) || defined(__386) || defined(__i386) || defined(__i386__) || \
+ defined(i486) || defined(__i486) || defined(__i486__) || \
+ defined(i586) | defined(__i586) || defined(__i586__) || defined(i686) || \
+ defined(__i686) || defined(__i686__) || defined(_M_IX86) || \
+ defined(_X86_) || defined(__THW_INTEL__) || defined(__I86__) || \
+ defined(__INTEL__) || defined(__x86_64) || defined(__x86_64__) || \
+ defined(__amd64__) || defined(__amd64) || defined(_M_X64) || \
+ defined(_M_AMD64) || defined(__IA32__) || defined(__INTEL__)
+#ifndef __ia32__
+/* LY: define neutral __ia32__ for x86 and x86-64 archs */
+#define __ia32__ 1
+#endif /* __ia32__ */
+#if !defined(__amd64__) && (defined(__x86_64) || defined(__x86_64__) || \
+ defined(__amd64) || defined(_M_X64))
+/* LY: define trusty __amd64__ for all AMD64/x86-64 arch */
+#define __amd64__ 1
+#endif /* __amd64__ */
+#endif /* all x86 */
+
+#if !defined(__BYTE_ORDER__) || !defined(__ORDER_LITTLE_ENDIAN__) || \
+ !defined(__ORDER_BIG_ENDIAN__)
+
+/* *INDENT-OFF* */
+/* clang-format off */
+
+#if defined(__GLIBC__) || defined(__GNU_LIBRARY__) || defined(__ANDROID__) || \
+ defined(HAVE_ENDIAN_H) || __has_include()
+#include
+#elif defined(__APPLE__) || defined(__MACH__) || defined(__OpenBSD__) || \
+ defined(HAVE_MACHINE_ENDIAN_H) || __has_include()
+#include
+#elif defined(HAVE_SYS_ISA_DEFS_H) || __has_include()
+#include
+#elif (defined(HAVE_SYS_TYPES_H) && defined(HAVE_SYS_ENDIAN_H)) || \
+ (__has_include() && __has_include())
+#include
+#include
+#elif defined(__bsdi__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
+ defined(__NETBSD__) || defined(__NetBSD__) || \
+ defined(HAVE_SYS_PARAM_H) || __has_include()
+#include
+#endif /* OS */
+
+/* *INDENT-ON* */
+/* clang-format on */
+
+#if defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN)
+#define __ORDER_LITTLE_ENDIAN__ __LITTLE_ENDIAN
+#define __ORDER_BIG_ENDIAN__ __BIG_ENDIAN
+#define __BYTE_ORDER__ __BYTE_ORDER
+#elif defined(_BYTE_ORDER) && defined(_LITTLE_ENDIAN) && defined(_BIG_ENDIAN)
+#define __ORDER_LITTLE_ENDIAN__ _LITTLE_ENDIAN
+#define __ORDER_BIG_ENDIAN__ _BIG_ENDIAN
+#define __BYTE_ORDER__ _BYTE_ORDER
+#else
+#define __ORDER_LITTLE_ENDIAN__ 1234
+#define __ORDER_BIG_ENDIAN__ 4321
+
+#if defined(__LITTLE_ENDIAN__) || \
+ (defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN)) || \
+ defined(__ARMEL__) || defined(__THUMBEL__) || defined(__AARCH64EL__) || \
+ defined(__MIPSEL__) || defined(_MIPSEL) || defined(__MIPSEL) || \
+ defined(_M_ARM) || defined(_M_ARM64) || defined(__e2k__) || \
+ defined(__elbrus_4c__) || defined(__elbrus_8c__) || defined(__bfin__) || \
+ defined(__BFIN__) || defined(__ia64__) || defined(_IA64) || \
+ defined(__IA64__) || defined(__ia64) || defined(_M_IA64) || \
+ defined(__itanium__) || defined(__ia32__) || defined(__CYGWIN__) || \
+ defined(_WIN64) || defined(_WIN32) || defined(__TOS_WIN__) || \
+ defined(__WINDOWS__)
+#define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__
+
+#elif defined(__BIG_ENDIAN__) || \
+ (defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN)) || \
+ defined(__ARMEB__) || defined(__THUMBEB__) || defined(__AARCH64EB__) || \
+ defined(__MIPSEB__) || defined(_MIPSEB) || defined(__MIPSEB) || \
+ defined(__m68k__) || defined(M68000) || defined(__hppa__) || \
+ defined(__hppa) || defined(__HPPA__) || defined(__sparc__) || \
+ defined(__sparc) || defined(__370__) || defined(__THW_370__) || \
+ defined(__s390__) || defined(__s390x__) || defined(__SYSC_ZARCH__)
+#define __BYTE_ORDER__ __ORDER_BIG_ENDIAN__
+
+#else
+#error __BYTE_ORDER__ should be defined.
+#endif /* Arch */
+
+#endif
+#endif /* __BYTE_ORDER__ || __ORDER_LITTLE_ENDIAN__ || __ORDER_BIG_ENDIAN__ */
+
+/*****************************************************************************/
+
+#ifndef __dll_export
+#if defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+#if defined(__GNUC__) || __has_attribute(dllexport)
+#define __dll_export __attribute__((dllexport))
+#elif defined(_MSC_VER)
+#define __dll_export __declspec(dllexport)
+#else
+#define __dll_export
+#endif
+#elif defined(__GNUC__) || __has_attribute(visibility)
+#define __dll_export __attribute__((visibility("default")))
+#else
+#define __dll_export
+#endif
+#endif /* __dll_export */
+
+#ifndef __dll_import
+#if defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+#if defined(__GNUC__) || __has_attribute(dllimport)
+#define __dll_import __attribute__((dllimport))
+#elif defined(_MSC_VER)
+#define __dll_import __declspec(dllimport)
+#else
+#define __dll_import
+#endif
+#else
+#define __dll_import
+#endif
+#endif /* __dll_import */
+
+#ifndef __force_inline
+#ifdef _MSC_VER
+#define __force_inline __forceinline
+#elif __GNUC_PREREQ(3, 2) || __has_attribute(always_inline)
+#define __force_inline __inline __attribute__((always_inline))
+#else
+#define __force_inline __inline
+#endif
+#endif /* __force_inline */
+
+#ifndef T1HA_API
+#if defined(t1ha_EXPORTS)
+#define T1HA_API __dll_export
+#elif defined(t1ha_IMPORTS)
+#define T1HA_API __dll_import
+#else
+#define T1HA_API
+#endif
+#endif /* T1HA_API */
+
+#if defined(_MSC_VER) && defined(__ia32__)
+#define T1HA_ALIGN_PREFIX __declspec(align(32)) /* required only for SIMD */
+#else
+#define T1HA_ALIGN_PREFIX
+#endif /* _MSC_VER */
+
+#if defined(__GNUC__) && defined(__ia32__)
+#define T1HA_ALIGN_SUFFIX \
+ __attribute__((aligned(32))) /* required only for SIMD */
+#else
+#define T1HA_ALIGN_SUFFIX
+#endif /* GCC x86 */
+
+#ifndef T1HA_USE_INDIRECT_FUNCTIONS
+/* GNU ELF indirect functions usage control. For more info please see
+ * https://en.wikipedia.org/wiki/Executable_and_Linkable_Format
+ * and https://sourceware.org/glibc/wiki/GNU_IFUNC */
+#if __has_attribute(ifunc) && \
+ defined(__ELF__) /* ifunc is broken on Darwin/OSX */
+/* Use ifunc/gnu_indirect_function if corresponding attribute is available,
+ * Assuming compiler will generate properly code even when
+ * the -fstack-protector-all and/or the -fsanitize=address are enabled. */
+#define T1HA_USE_INDIRECT_FUNCTIONS 1
+#elif defined(__ELF__) && !defined(__SANITIZE_ADDRESS__) && \
+ !defined(__SSP_ALL__)
+/* ifunc/gnu_indirect_function will be used on ELF, but only if both
+ * -fstack-protector-all and -fsanitize=address are NOT enabled. */
+#define T1HA_USE_INDIRECT_FUNCTIONS 1
+#else
+#define T1HA_USE_INDIRECT_FUNCTIONS 0
+#endif
+#endif /* T1HA_USE_INDIRECT_FUNCTIONS */
+
+#if __GNUC_PREREQ(4, 0)
+#pragma GCC visibility push(hidden)
+#endif /* __GNUC_PREREQ(4,0) */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef union T1HA_ALIGN_PREFIX t1ha_state256 {
+ uint8_t bytes[32];
+ uint32_t u32[8];
+ uint64_t u64[4];
+ struct {
+ uint64_t a, b, c, d;
+ } n;
+} t1ha_state256_t T1HA_ALIGN_SUFFIX;
+
+typedef struct t1ha_context {
+ t1ha_state256_t state;
+ t1ha_state256_t buffer;
+ size_t partial;
+ uint64_t total;
+} t1ha_context_t;
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+/******************************************************************************
+ *
+ * Self-testing API.
+ *
+ * Unfortunately, some compilers (exactly only Microsoft Visual C/C++) has
+ * a bugs which leads t1ha-functions to produce wrong results. This API allows
+ * check the correctness of the actual code in runtime.
+ *
+ * All check-functions returns 0 on success, or -1 in case the corresponding
+ * hash-function failed verification. PLEASE, always perform such checking at
+ * initialization of your code, if you using MSVC or other troubleful compilers.
+ */
+
+T1HA_API int t1ha_selfcheck__all_enabled(void);
+
+#ifndef T1HA2_DISABLED
+T1HA_API int t1ha_selfcheck__t1ha2_atonce(void);
+T1HA_API int t1ha_selfcheck__t1ha2_atonce128(void);
+T1HA_API int t1ha_selfcheck__t1ha2_stream(void);
+T1HA_API int t1ha_selfcheck__t1ha2(void);
+#endif /* T1HA2_DISABLED */
+
+#ifndef T1HA1_DISABLED
+T1HA_API int t1ha_selfcheck__t1ha1_le(void);
+T1HA_API int t1ha_selfcheck__t1ha1_be(void);
+T1HA_API int t1ha_selfcheck__t1ha1(void);
+#endif /* T1HA1_DISABLED */
+
+#ifndef T1HA0_DISABLED
+T1HA_API int t1ha_selfcheck__t1ha0_32le(void);
+T1HA_API int t1ha_selfcheck__t1ha0_32be(void);
+T1HA_API int t1ha_selfcheck__t1ha0(void);
+
+/* Define T1HA0_AESNI_AVAILABLE to 0 for disable AES-NI support. */
+#ifndef T1HA0_AESNI_AVAILABLE
+#if defined(__e2k__) || \
+ (defined(__ia32__) && (!defined(_M_IX86) || _MSC_VER > 1800))
+#define T1HA0_AESNI_AVAILABLE 1
+#else
+#define T1HA0_AESNI_AVAILABLE 0
+#endif
+#endif /* ifndef T1HA0_AESNI_AVAILABLE */
+
+#if T1HA0_AESNI_AVAILABLE
+T1HA_API int t1ha_selfcheck__t1ha0_ia32aes_noavx(void);
+T1HA_API int t1ha_selfcheck__t1ha0_ia32aes_avx(void);
+#ifndef __e2k__
+T1HA_API int t1ha_selfcheck__t1ha0_ia32aes_avx2(void);
+#endif
+#endif /* if T1HA0_AESNI_AVAILABLE */
+#endif /* T1HA0_DISABLED */
+
+/******************************************************************************
+ *
+ * t1ha2 = 64 and 128-bit, SLIGHTLY MORE ATTENTION FOR QUALITY AND STRENGTH.
+ *
+ * - The recommended version of "Fast Positive Hash" with good quality
+ * for checksum, hash tables and fingerprinting.
+ * - Portable and extremely efficiency on modern 64-bit CPUs.
+ * Designed for 64-bit little-endian platforms,
+ * in other cases will runs slowly.
+ * - Great quality of hashing and still faster than other non-t1ha hashes.
+ * Provides streaming mode and 128-bit result.
+ *
+ * Note: Due performance reason 64- and 128-bit results are completely
+ * different each other, i.e. 64-bit result is NOT any part of 128-bit.
+ */
+#ifndef T1HA2_DISABLED
+
+/* The at-once variant with 64-bit result */
+T1HA_API uint64_t t1ha2_atonce(const void *data, size_t length, uint64_t seed);
+
+/* The at-once variant with 128-bit result.
+ * Argument `extra_result` is NOT optional and MUST be valid.
+ * The high 64-bit part of 128-bit hash will be always unconditionally
+ * stored to the address given by `extra_result` argument. */
+T1HA_API uint64_t t1ha2_atonce128(uint64_t *__restrict extra_result,
+ const void *__restrict data, size_t length,
+ uint64_t seed);
+
+/* The init/update/final trinity for streaming.
+ * Return 64 or 128-bit result depentently from `extra_result` argument. */
+T1HA_API void t1ha2_init(t1ha_context_t *ctx, uint64_t seed_x, uint64_t seed_y);
+T1HA_API void t1ha2_update(t1ha_context_t *__restrict ctx,
+ const void *__restrict data, size_t length);
+
+/* Argument `extra_result` is optional and MAY be NULL.
+ * - If `extra_result` is NOT NULL then the 128-bit hash will be calculated,
+ * and high 64-bit part of it will be stored to the address given
+ * by `extra_result` argument.
+ * - Otherwise the 64-bit hash will be calculated
+ * and returned from function directly.
+ *
+ * Note: Due performance reason 64- and 128-bit results are completely
+ * different each other, i.e. 64-bit result is NOT any part of 128-bit. */
+T1HA_API uint64_t t1ha2_final(t1ha_context_t *__restrict ctx,
+ uint64_t *__restrict extra_result /* optional */);
+
+#endif /* T1HA2_DISABLED */
+
+/******************************************************************************
+ *
+ * t1ha1 = 64-bit, BASELINE FAST PORTABLE HASH:
+ *
+ * - Runs faster on 64-bit platforms in other cases may runs slowly.
+ * - Portable and stable, returns same 64-bit result
+ * on all architectures and CPUs.
+ * - Unfortunately it fails the "strict avalanche criteria",
+ * see test results at https://github.com/demerphq/smhasher.
+ *
+ * This flaw is insignificant for the t1ha1() purposes and imperceptible
+ * from a practical point of view.
+ * However, nowadays this issue has resolved in the next t1ha2(),
+ * that was initially planned to providing a bit more quality.
+ */
+#ifndef T1HA1_DISABLED
+
+/* The little-endian variant. */
+T1HA_API uint64_t t1ha1_le(const void *data, size_t length, uint64_t seed);
+
+/* The big-endian variant. */
+T1HA_API uint64_t t1ha1_be(const void *data, size_t length, uint64_t seed);
+
+#endif /* T1HA1_DISABLED */
+
+/******************************************************************************
+ *
+ * t1ha0 = 64-bit, JUST ONLY FASTER:
+ *
+ * - Provides fast-as-possible hashing for current CPU, including
+ * 32-bit systems and engaging the available hardware acceleration.
+ * - It is a facade that selects most quick-and-dirty hash
+ * for the current processor. For instance, on IA32 (x86) actual function
+ * will be selected in runtime, depending on current CPU capabilities
+ *
+ * BE CAREFUL!!! THIS IS MEANS:
+ *
+ * 1. The quality of hash is a subject for tradeoffs with performance.
+ * So, the quality and strength of t1ha0() may be lower than t1ha1(),
+ * especially on 32-bit targets, but then much faster.
+ * However, guaranteed that it passes all SMHasher tests.
+ *
+ * 2. No warranty that the hash result will be same for particular
+ * key on another machine or another version of libt1ha.
+ *
+ * Briefly, such hash-results and their derivatives, should be
+ * used only in runtime, but should not be persist or transferred
+ * over a network.
+ *
+ *
+ * When T1HA0_RUNTIME_SELECT is nonzero the t1ha0_resolve() function could
+ * be used to get actual t1ha0() implementation address at runtime. This is
+ * useful for two cases:
+ * - calling by local pointer-to-function usually is little
+ * bit faster (less overhead) than via a PLT thru the DSO boundary.
+ * - GNU Indirect functions (see below) don't supported by environment
+ * and calling by t1ha0_funcptr is not available and/or expensive.
+ */
+
+#ifndef T1HA0_DISABLED
+
+/* The little-endian variant for 32-bit CPU. */
+uint64_t t1ha0_32le(const void *data, size_t length, uint64_t seed);
+/* The big-endian variant for 32-bit CPU. */
+uint64_t t1ha0_32be(const void *data, size_t length, uint64_t seed);
+
+/* Define T1HA0_AESNI_AVAILABLE to 0 for disable AES-NI support. */
+#ifndef T1HA0_AESNI_AVAILABLE
+#if defined(__e2k__) || \
+ (defined(__ia32__) && (!defined(_M_IX86) || _MSC_VER > 1800))
+#define T1HA0_AESNI_AVAILABLE 1
+#else
+#define T1HA0_AESNI_AVAILABLE 0
+#endif
+#endif /* T1HA0_AESNI_AVAILABLE */
+
+/* Define T1HA0_RUNTIME_SELECT to 0 for disable dispatching t1ha0 at runtime. */
+#ifndef T1HA0_RUNTIME_SELECT
+#if T1HA0_AESNI_AVAILABLE && !defined(__e2k__)
+#define T1HA0_RUNTIME_SELECT 1
+#else
+#define T1HA0_RUNTIME_SELECT 0
+#endif
+#endif /* T1HA0_RUNTIME_SELECT */
+
+#if !T1HA0_RUNTIME_SELECT && !defined(T1HA0_USE_DEFINE)
+#if defined(__LCC__)
+#define T1HA0_USE_DEFINE 1
+#else
+#define T1HA0_USE_DEFINE 0
+#endif
+#endif /* T1HA0_USE_DEFINE */
+
+#if T1HA0_AESNI_AVAILABLE
+uint64_t t1ha0_ia32aes_noavx(const void *data, size_t length, uint64_t seed);
+uint64_t t1ha0_ia32aes_avx(const void *data, size_t length, uint64_t seed);
+#ifndef __e2k__
+uint64_t t1ha0_ia32aes_avx2(const void *data, size_t length, uint64_t seed);
+#endif
+#endif /* T1HA0_AESNI_AVAILABLE */
+
+#if T1HA0_RUNTIME_SELECT
+typedef uint64_t (*t1ha0_function_t)(const void *, size_t, uint64_t);
+T1HA_API t1ha0_function_t t1ha0_resolve(void);
+#if T1HA_USE_INDIRECT_FUNCTIONS
+T1HA_API uint64_t t1ha0(const void *data, size_t length, uint64_t seed);
+#else
+/* Otherwise function pointer will be used.
+ * Unfortunately this may cause some overhead calling. */
+T1HA_API extern uint64_t (*t1ha0_funcptr)(const void *data, size_t length,
+ uint64_t seed);
+static __force_inline uint64_t t1ha0(const void *data, size_t length,
+ uint64_t seed) {
+ return t1ha0_funcptr(data, length, seed);
+}
+#endif /* T1HA_USE_INDIRECT_FUNCTIONS */
+
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+
+#if T1HA0_USE_DEFINE
+
+#if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \
+ (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED))
+#if defined(T1HA1_DISABLED)
+#define t1ha0 t1ha2_atonce
+#else
+#define t1ha0 t1ha1_be
+#endif /* T1HA1_DISABLED */
+#else /* 32/64 */
+#define t1ha0 t1ha0_32be
+#endif /* 32/64 */
+
+#else /* T1HA0_USE_DEFINE */
+
+static __force_inline uint64_t t1ha0(const void *data, size_t length,
+ uint64_t seed) {
+#if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \
+ (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED))
+#if defined(T1HA1_DISABLED)
+ return t1ha2_atonce(data, length, seed);
+#else
+ return t1ha1_be(data, length, seed);
+#endif /* T1HA1_DISABLED */
+#else /* 32/64 */
+ return t1ha0_32be(data, length, seed);
+#endif /* 32/64 */
+}
+
+#endif /* !T1HA0_USE_DEFINE */
+
+#else /* !T1HA0_RUNTIME_SELECT && __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__ */
+
+#if T1HA0_USE_DEFINE
+
+#if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \
+ (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED))
+#if defined(T1HA1_DISABLED)
+#define t1ha0 t1ha2_atonce
+#else
+#define t1ha0 t1ha1_le
+#endif /* T1HA1_DISABLED */
+#else /* 32/64 */
+#define t1ha0 t1ha0_32le
+#endif /* 32/64 */
+
+#else
+
+static __force_inline uint64_t t1ha0(const void *data, size_t length,
+ uint64_t seed) {
+#if (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul) && \
+ (!defined(T1HA1_DISABLED) || !defined(T1HA2_DISABLED))
+#if defined(T1HA1_DISABLED)
+ return t1ha2_atonce(data, length, seed);
+#else
+ return t1ha1_le(data, length, seed);
+#endif /* T1HA1_DISABLED */
+#else /* 32/64 */
+ return t1ha0_32le(data, length, seed);
+#endif /* 32/64 */
+}
+
+#endif /* !T1HA0_USE_DEFINE */
+
+#endif /* !T1HA0_RUNTIME_SELECT */
+
+#endif /* T1HA0_DISABLED */
+
+#ifdef __cplusplus
+}
+#endif
+
+#if __GNUC_PREREQ(4, 0)
+#pragma GCC visibility pop
+#endif /* __GNUC_PREREQ(4,0) */
diff --git a/src/t1ha/t1ha2.c b/src/t1ha/t1ha2.c
new file mode 100644
index 0000000..b05d64c
--- /dev/null
+++ b/src/t1ha/t1ha2.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2016-2018 Positive Technologies, https://www.ptsecurity.com,
+ * Fast Positive Hash.
+ *
+ * Portions Copyright (c) 2010-2018 Leonid Yuriev ,
+ * The 1Hippeus project (t1h).
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgement in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/*
+ * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" }
+ * by [Positive Technologies](https://www.ptsecurity.ru)
+ *
+ * Briefly, it is a 64-bit Hash Function:
+ * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64,
+ * but portable and without penalties it can run on any 64-bit CPU.
+ * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash
+ * and all others portable hash-functions (which do not use specific
+ * hardware tricks).
+ * 3. Not suitable for cryptography.
+ *
+ * The Future will Positive. Всё будет хорошо.
+ *
+ * ACKNOWLEDGEMENT:
+ * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев)
+ * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta!
+ */
+
+#ifndef T1HA2_DISABLED
+#include "t1ha_bits.h"
+//#include "t1ha_selfcheck.h"
+
+static __always_inline void init_ab(t1ha_state256_t *s, uint64_t x,
+ uint64_t y) {
+ s->n.a = x;
+ s->n.b = y;
+}
+
+static __always_inline void init_cd(t1ha_state256_t *s, uint64_t x,
+ uint64_t y) {
+ s->n.c = rot64(y, 23) + ~x;
+ s->n.d = ~y + rot64(x, 19);
+}
+
+/* TODO: C++ template in the next version */
+#define T1HA2_UPDATE(ENDIANNES, ALIGNESS, state, v) \
+ do { \
+ t1ha_state256_t *const s = state; \
+ const uint64_t w0 = fetch64_##ENDIANNES##_##ALIGNESS(v + 0); \
+ const uint64_t w1 = fetch64_##ENDIANNES##_##ALIGNESS(v + 1); \
+ const uint64_t w2 = fetch64_##ENDIANNES##_##ALIGNESS(v + 2); \
+ const uint64_t w3 = fetch64_##ENDIANNES##_##ALIGNESS(v + 3); \
+ \
+ const uint64_t d02 = w0 + rot64(w2 + s->n.d, 56); \
+ const uint64_t c13 = w1 + rot64(w3 + s->n.c, 19); \
+ s->n.d ^= s->n.b + rot64(w1, 38); \
+ s->n.c ^= s->n.a + rot64(w0, 57); \
+ s->n.b ^= prime_6 * (c13 + w2); \
+ s->n.a ^= prime_5 * (d02 + w3); \
+ } while (0)
+
+static __always_inline void squash(t1ha_state256_t *s) {
+ s->n.a ^= prime_6 * (s->n.c + rot64(s->n.d, 23));
+ s->n.b ^= prime_5 * (rot64(s->n.c, 19) + s->n.d);
+}
+
+/* TODO: C++ template in the next version */
+#define T1HA2_LOOP(ENDIANNES, ALIGNESS, state, data, len) \
+ do { \
+ const void *detent = (const uint8_t *)data + len - 31; \
+ do { \
+ const uint64_t *v = (const uint64_t *)data; \
+ data = (const uint64_t *)data + 4; \
+ prefetch(data); \
+ T1HA2_UPDATE(le, ALIGNESS, state, v); \
+ } while (likely(data < detent)); \
+ } while (0)
+
+/* TODO: C++ template in the next version */
+#define T1HA2_TAIL_AB(ENDIANNES, ALIGNESS, state, data, len) \
+ do { \
+ t1ha_state256_t *const s = state; \
+ const uint64_t *v = (const uint64_t *)data; \
+ switch (len) { \
+ default: \
+ mixup64(&s->n.a, &s->n.b, fetch64_##ENDIANNES##_##ALIGNESS(v++), \
+ prime_4); \
+ /* fall through */ \
+ case 24: \
+ case 23: \
+ case 22: \
+ case 21: \
+ case 20: \
+ case 19: \
+ case 18: \
+ case 17: \
+ mixup64(&s->n.b, &s->n.a, fetch64_##ENDIANNES##_##ALIGNESS(v++), \
+ prime_3); \
+ /* fall through */ \
+ case 16: \
+ case 15: \
+ case 14: \
+ case 13: \
+ case 12: \
+ case 11: \
+ case 10: \
+ case 9: \
+ mixup64(&s->n.a, &s->n.b, fetch64_##ENDIANNES##_##ALIGNESS(v++), \
+ prime_2); \
+ /* fall through */ \
+ case 8: \
+ case 7: \
+ case 6: \
+ case 5: \
+ case 4: \
+ case 3: \
+ case 2: \
+ case 1: \
+ mixup64(&s->n.b, &s->n.a, tail64_##ENDIANNES##_##ALIGNESS(v, len), \
+ prime_1); \
+ /* fall through */ \
+ case 0: \
+ return final64(s->n.a, s->n.b); \
+ } \
+ } while (0)
+
+/* TODO: C++ template in the next version */
+#define T1HA2_TAIL_ABCD(ENDIANNES, ALIGNESS, state, data, len) \
+ do { \
+ t1ha_state256_t *const s = state; \
+ const uint64_t *v = (const uint64_t *)data; \
+ switch (len) { \
+ default: \
+ mixup64(&s->n.a, &s->n.d, fetch64_##ENDIANNES##_##ALIGNESS(v++), \
+ prime_4); \
+ /* fall through */ \
+ case 24: \
+ case 23: \
+ case 22: \
+ case 21: \
+ case 20: \
+ case 19: \
+ case 18: \
+ case 17: \
+ mixup64(&s->n.b, &s->n.a, fetch64_##ENDIANNES##_##ALIGNESS(v++), \
+ prime_3); \
+ /* fall through */ \
+ case 16: \
+ case 15: \
+ case 14: \
+ case 13: \
+ case 12: \
+ case 11: \
+ case 10: \
+ case 9: \
+ mixup64(&s->n.c, &s->n.b, fetch64_##ENDIANNES##_##ALIGNESS(v++), \
+ prime_2); \
+ /* fall through */ \
+ case 8: \
+ case 7: \
+ case 6: \
+ case 5: \
+ case 4: \
+ case 3: \
+ case 2: \
+ case 1: \
+ mixup64(&s->n.d, &s->n.c, tail64_##ENDIANNES##_##ALIGNESS(v, len), \
+ prime_1); \
+ /* fall through */ \
+ case 0: \
+ return final128(s->n.a, s->n.b, s->n.c, s->n.d, extra_result); \
+ } \
+ } while (0)
+
+static __always_inline uint64_t final128(uint64_t a, uint64_t b, uint64_t c,
+ uint64_t d, uint64_t *h) {
+ mixup64(&a, &b, rot64(c, 41) ^ d, prime_0);
+ mixup64(&b, &c, rot64(d, 23) ^ a, prime_6);
+ mixup64(&c, &d, rot64(a, 19) ^ b, prime_5);
+ mixup64(&d, &a, rot64(b, 31) ^ c, prime_4);
+ *h = c + d;
+ return a ^ b;
+}
+
+//------------------------------------------------------------------------------
+
+uint64_t t1ha2_atonce(const void *data, size_t length, uint64_t seed) {
+ t1ha_state256_t state;
+ init_ab(&state, seed, length);
+
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT
+ if (unlikely(length > 32)) {
+ init_cd(&state, seed, length);
+ T1HA2_LOOP(le, unaligned, &state, data, length);
+ squash(&state);
+ length &= 31;
+ }
+ T1HA2_TAIL_AB(le, unaligned, &state, data, length);
+#else
+ const bool misaligned = (((uintptr_t)data) & (ALIGNMENT_64 - 1)) != 0;
+ if (misaligned) {
+ if (unlikely(length > 32)) {
+ init_cd(&state, seed, length);
+ T1HA2_LOOP(le, unaligned, &state, data, length);
+ squash(&state);
+ length &= 31;
+ }
+ T1HA2_TAIL_AB(le, unaligned, &state, data, length);
+ } else {
+ if (unlikely(length > 32)) {
+ init_cd(&state, seed, length);
+ T1HA2_LOOP(le, aligned, &state, data, length);
+ squash(&state);
+ length &= 31;
+ }
+ T1HA2_TAIL_AB(le, aligned, &state, data, length);
+ }
+#endif
+}
+
+uint64_t t1ha2_atonce128(uint64_t *__restrict extra_result,
+ const void *__restrict data, size_t length,
+ uint64_t seed) {
+ t1ha_state256_t state;
+ init_ab(&state, seed, length);
+ init_cd(&state, seed, length);
+
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT
+ if (unlikely(length > 32)) {
+ T1HA2_LOOP(le, unaligned, &state, data, length);
+ length &= 31;
+ }
+ T1HA2_TAIL_ABCD(le, unaligned, &state, data, length);
+#else
+ const bool misaligned = (((uintptr_t)data) & (ALIGNMENT_64 - 1)) != 0;
+ if (misaligned) {
+ if (unlikely(length > 32)) {
+ T1HA2_LOOP(le, unaligned, &state, data, length);
+ length &= 31;
+ }
+ T1HA2_TAIL_ABCD(le, unaligned, &state, data, length);
+ } else {
+ if (unlikely(length > 32)) {
+ T1HA2_LOOP(le, aligned, &state, data, length);
+ length &= 31;
+ }
+ T1HA2_TAIL_ABCD(le, aligned, &state, data, length);
+ }
+#endif
+}
+
+//------------------------------------------------------------------------------
+
+void t1ha2_init(t1ha_context_t *ctx, uint64_t seed_x, uint64_t seed_y) {
+ init_ab(&ctx->state, seed_x, seed_y);
+ init_cd(&ctx->state, seed_x, seed_y);
+ ctx->partial = 0;
+ ctx->total = 0;
+}
+
+void t1ha2_update(t1ha_context_t *__restrict ctx, const void *__restrict data,
+ size_t length) {
+ ctx->total += length;
+
+ if (ctx->partial) {
+ const size_t left = 32 - ctx->partial;
+ const size_t chunk = (length >= left) ? left : length;
+ memcpy(ctx->buffer.bytes + ctx->partial, data, chunk);
+ ctx->partial += chunk;
+ if (ctx->partial < 32) {
+ assert(left >= length);
+ return;
+ }
+ ctx->partial = 0;
+ data = (const uint8_t *)data + chunk;
+ length -= chunk;
+ T1HA2_UPDATE(le, aligned, &ctx->state, ctx->buffer.u64);
+ }
+
+ if (length >= 32) {
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT
+ T1HA2_LOOP(le, unaligned, &ctx->state, data, length);
+#else
+ const bool misaligned = (((uintptr_t)data) & (ALIGNMENT_64 - 1)) != 0;
+ if (misaligned) {
+ T1HA2_LOOP(le, unaligned, &ctx->state, data, length);
+ } else {
+ T1HA2_LOOP(le, aligned, &ctx->state, data, length);
+ }
+#endif
+ length &= 31;
+ }
+
+ if (length)
+ memcpy(ctx->buffer.bytes, data, ctx->partial = length);
+}
+
+uint64_t t1ha2_final(t1ha_context_t *__restrict ctx,
+ uint64_t *__restrict extra_result) {
+ uint64_t bits = (ctx->total << 3) ^ (UINT64_C(1) << 63);
+#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
+ bits = bswap64(bits);
+#endif
+ t1ha2_update(ctx, &bits, 8);
+
+ if (likely(!extra_result)) {
+ squash(&ctx->state);
+ T1HA2_TAIL_AB(le, aligned, &ctx->state, ctx->buffer.u64, ctx->partial);
+ }
+
+ T1HA2_TAIL_ABCD(le, aligned, &ctx->state, ctx->buffer.u64, ctx->partial);
+}
+
+#endif /* T1HA2_DISABLED */
diff --git a/src/t1ha/t1ha_bits.h b/src/t1ha/t1ha_bits.h
new file mode 100644
index 0000000..7c47851
--- /dev/null
+++ b/src/t1ha/t1ha_bits.h
@@ -0,0 +1,1226 @@
+/*
+ * Copyright (c) 2016-2018 Positive Technologies, https://www.ptsecurity.com,
+ * Fast Positive Hash.
+ *
+ * Portions Copyright (c) 2010-2018 Leonid Yuriev ,
+ * The 1Hippeus project (t1h).
+ *
+ * This software is provided 'as-is', without any express or implied
+ * warranty. In no event will the authors be held liable for any damages
+ * arising from the use of this software.
+ *
+ * Permission is granted to anyone to use this software for any purpose,
+ * including commercial applications, and to alter it and redistribute it
+ * freely, subject to the following restrictions:
+ *
+ * 1. The origin of this software must not be misrepresented; you must not
+ * claim that you wrote the original software. If you use this software
+ * in a product, an acknowledgement in the product documentation would be
+ * appreciated but is not required.
+ * 2. Altered source versions must be plainly marked as such, and must not be
+ * misrepresented as being the original software.
+ * 3. This notice may not be removed or altered from any source distribution.
+ */
+
+/*
+ * t1ha = { Fast Positive Hash, aka "Позитивный Хэш" }
+ * by [Positive Technologies](https://www.ptsecurity.ru)
+ *
+ * Briefly, it is a 64-bit Hash Function:
+ * 1. Created for 64-bit little-endian platforms, in predominantly for x86_64,
+ * but portable and without penalties it can run on any 64-bit CPU.
+ * 2. In most cases up to 15% faster than City64, xxHash, mum-hash, metro-hash
+ * and all others portable hash-functions (which do not use specific
+ * hardware tricks).
+ * 3. Not suitable for cryptography.
+ *
+ * The Future will Positive. Всё будет хорошо.
+ *
+ * ACKNOWLEDGEMENT:
+ * The t1ha was originally developed by Leonid Yuriev (Леонид Юрьев)
+ * for The 1Hippeus project - zerocopy messaging in the spirit of Sparta!
+ */
+
+#pragma once
+
+#if defined(_MSC_VER)
+#pragma warning(disable : 4201) /* nameless struct/union */
+#if _MSC_VER > 1800
+#pragma warning(disable : 4464) /* relative include path contains '..' */
+#endif /* 1800 */
+#endif /* MSVC */
+#include "t1ha.h"
+
+#ifndef T1HA_USE_FAST_ONESHOT_READ
+/* Define it to 1 for little bit faster code.
+ * Unfortunately this may triggering a false-positive alarms from Valgrind,
+ * AddressSanitizer and other similar tool.
+ * So, define it to 0 for calmness if doubt. */
+#define T1HA_USE_FAST_ONESHOT_READ 1
+#endif /* T1HA_USE_FAST_ONESHOT_READ */
+
+/*****************************************************************************/
+
+#include /* for assert() */
+#include /* for bool */
+#include /* for memcpy() */
+
+#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__ && \
+ __BYTE_ORDER__ != __ORDER_BIG_ENDIAN__
+#error Unsupported byte order.
+#endif
+
+#define T1HA_UNALIGNED_ACCESS__UNABLE 0
+#define T1HA_UNALIGNED_ACCESS__SLOW 1
+#define T1HA_UNALIGNED_ACCESS__EFFICIENT 2
+
+#ifndef T1HA_SYS_UNALIGNED_ACCESS
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT
+#elif defined(__ia32__)
+#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT
+#elif defined(__e2k__)
+#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__SLOW
+#elif defined(__ARM_FEATURE_UNALIGNED)
+#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__EFFICIENT
+#else
+#define T1HA_SYS_UNALIGNED_ACCESS T1HA_UNALIGNED_ACCESS__UNABLE
+#endif
+#endif /* T1HA_SYS_UNALIGNED_ACCESS */
+
+#define ALIGNMENT_16 2
+#define ALIGNMENT_32 4
+#if UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul
+#define ALIGNMENT_64 8
+#else
+#define ALIGNMENT_64 4
+#endif
+
+#ifndef PAGESIZE
+#define PAGESIZE 4096
+#endif /* PAGESIZE */
+
+/***************************************************************************/
+
+#ifndef __has_builtin
+#define __has_builtin(x) (0)
+#endif
+
+#ifndef __has_warning
+#define __has_warning(x) (0)
+#endif
+
+#ifndef __has_feature
+#define __has_feature(x) (0)
+#endif
+
+#ifndef __has_extension
+#define __has_extension(x) (0)
+#endif
+
+#if __has_feature(address_sanitizer)
+#define __SANITIZE_ADDRESS__ 1
+#endif
+
+#ifndef __optimize
+#if defined(__clang__) && !__has_attribute(optimize)
+#define __optimize(ops)
+#elif defined(__GNUC__) || __has_attribute(optimize)
+#define __optimize(ops) __attribute__((optimize(ops)))
+#else
+#define __optimize(ops)
+#endif
+#endif /* __optimize */
+
+#ifndef __cold
+#if defined(__OPTIMIZE__)
+#if defined(__e2k__)
+#define __cold __optimize(1) __attribute__((cold))
+#elif defined(__clang__) && !__has_attribute(cold)
+/* just put infrequently used functions in separate section */
+#define __cold __attribute__((section("text.unlikely"))) __optimize("Os")
+#elif defined(__GNUC__) || __has_attribute(cold)
+#define __cold __attribute__((cold)) __optimize("Os")
+#else
+#define __cold __optimize("Os")
+#endif
+#else
+#define __cold
+#endif
+#endif /* __cold */
+
+#if __GNUC_PREREQ(4, 4) || defined(__clang__)
+
+#if defined(__ia32__) || defined(__e2k__)
+#include
+#endif
+
+#if defined(__ia32__) && !defined(__cpuid_count)
+#include
+#endif
+
+#if defined(__e2k__)
+#include
+#endif
+
+#ifndef likely
+#define likely(cond) __builtin_expect(!!(cond), 1)
+#endif
+
+#ifndef unlikely
+#define unlikely(cond) __builtin_expect(!!(cond), 0)
+#endif
+
+#if __GNUC_PREREQ(4, 5) || __has_builtin(__builtin_unreachable)
+#define unreachable() __builtin_unreachable()
+#endif
+
+#define bswap64(v) __builtin_bswap64(v)
+#define bswap32(v) __builtin_bswap32(v)
+#if __GNUC_PREREQ(4, 8) || __has_builtin(__builtin_bswap16)
+#define bswap16(v) __builtin_bswap16(v)
+#endif
+
+#if !defined(__maybe_unused) && (__GNUC_PREREQ(4, 3) || __has_attribute(unused))
+#define __maybe_unused __attribute__((unused))
+#endif
+
+#if !defined(__always_inline) && \
+ (__GNUC_PREREQ(3, 2) || __has_attribute(always_inline))
+#define __always_inline __inline __attribute__((always_inline))
+#endif
+
+#if defined(__e2k__)
+
+#if __iset__ >= 3
+#define mul_64x64_high(a, b) __builtin_e2k_umulhd(a, b)
+#endif /* __iset__ >= 3 */
+
+#if __iset__ >= 5
+static __maybe_unused __always_inline unsigned
+e2k_add64carry_first(uint64_t base, uint64_t addend, uint64_t *sum) {
+ *sum = base + addend;
+ return (unsigned)__builtin_e2k_addcd_c(base, addend, 0);
+}
+#define add64carry_first(base, addend, sum) \
+ e2k_add64carry_first(base, addend, sum)
+
+static __maybe_unused __always_inline unsigned
+e2k_add64carry_next(unsigned carry, uint64_t base, uint64_t addend,
+ uint64_t *sum) {
+ *sum = __builtin_e2k_addcd(base, addend, carry);
+ return (unsigned)__builtin_e2k_addcd_c(base, addend, carry);
+}
+#define add64carry_next(carry, base, addend, sum) \
+ e2k_add64carry_next(carry, base, addend, sum)
+
+static __maybe_unused __always_inline void e2k_add64carry_last(unsigned carry,
+ uint64_t base,
+ uint64_t addend,
+ uint64_t *sum) {
+ *sum = __builtin_e2k_addcd(base, addend, carry);
+}
+#define add64carry_last(carry, base, addend, sum) \
+ e2k_add64carry_last(carry, base, addend, sum)
+#endif /* __iset__ >= 5 */
+
+#define fetch64_be_aligned(ptr) ((uint64_t)__builtin_e2k_ld_64s_be(ptr))
+#define fetch32_be_aligned(ptr) ((uint32_t)__builtin_e2k_ld_32u_be(ptr))
+
+#endif /* __e2k__ Elbrus */
+
+#elif defined(_MSC_VER)
+
+#if _MSC_FULL_VER < 190024234 && defined(_M_IX86)
+#pragma message( \
+ "For AES-NI at least \"Microsoft C/C++ Compiler\" version 19.00.24234 (Visual Studio 2015 Update 3) is required.")
+#endif
+#if _MSC_FULL_VER < 191526730
+#pragma message( \
+ "It is recommended to use \"Microsoft C/C++ Compiler\" version 19.15.26730 (Visual Studio 2017 15.8) or newer.")
+#endif
+#if _MSC_FULL_VER < 180040629
+#error At least "Microsoft C/C++ Compiler" version 18.00.40629 (Visual Studio 2013 Update 5) is required.
+#endif
+
+#pragma warning(push, 1)
+
+#include
+#include
+#define likely(cond) (cond)
+#define unlikely(cond) (cond)
+#define unreachable() __assume(0)
+#define bswap64(v) _byteswap_uint64(v)
+#define bswap32(v) _byteswap_ulong(v)
+#define bswap16(v) _byteswap_ushort(v)
+#define rot64(v, s) _rotr64(v, s)
+#define rot32(v, s) _rotr(v, s)
+#define __always_inline __forceinline
+
+#if defined(_M_X64) || defined(_M_IA64)
+#pragma intrinsic(_umul128)
+#define mul_64x64_128(a, b, ph) _umul128(a, b, ph)
+#pragma intrinsic(_addcarry_u64)
+#define add64carry_first(base, addend, sum) _addcarry_u64(0, base, addend, sum)
+#define add64carry_next(carry, base, addend, sum) \
+ _addcarry_u64(carry, base, addend, sum)
+#define add64carry_last(carry, base, addend, sum) \
+ (void)_addcarry_u64(carry, base, addend, sum)
+#endif
+
+#if defined(_M_ARM64) || defined(_M_X64) || defined(_M_IA64)
+#pragma intrinsic(__umulh)
+#define mul_64x64_high(a, b) __umulh(a, b)
+#endif
+
+#if defined(_M_IX86)
+#pragma intrinsic(__emulu)
+#define mul_32x32_64(a, b) __emulu(a, b)
+
+#if _MSC_VER >= 1915 /* LY: workaround for SSA-optimizer bug */
+#pragma intrinsic(_addcarry_u32)
+#define add32carry_first(base, addend, sum) _addcarry_u32(0, base, addend, sum)
+#define add32carry_next(carry, base, addend, sum) \
+ _addcarry_u32(carry, base, addend, sum)
+#define add32carry_last(carry, base, addend, sum) \
+ (void)_addcarry_u32(carry, base, addend, sum)
+
+static __forceinline char
+msvc32_add64carry_first(uint64_t base, uint64_t addend, uint64_t *sum) {
+ uint32_t *const sum32 = (uint32_t *)sum;
+ const uint32_t base_32l = (uint32_t)base;
+ const uint32_t base_32h = (uint32_t)(base >> 32);
+ const uint32_t addend_32l = (uint32_t)addend;
+ const uint32_t addend_32h = (uint32_t)(addend >> 32);
+ return add32carry_next(add32carry_first(base_32l, addend_32l, sum32),
+ base_32h, addend_32h, sum32 + 1);
+}
+#define add64carry_first(base, addend, sum) \
+ msvc32_add64carry_first(base, addend, sum)
+
+static __forceinline char msvc32_add64carry_next(char carry, uint64_t base,
+ uint64_t addend,
+ uint64_t *sum) {
+ uint32_t *const sum32 = (uint32_t *)sum;
+ const uint32_t base_32l = (uint32_t)base;
+ const uint32_t base_32h = (uint32_t)(base >> 32);
+ const uint32_t addend_32l = (uint32_t)addend;
+ const uint32_t addend_32h = (uint32_t)(addend >> 32);
+ return add32carry_next(add32carry_next(carry, base_32l, addend_32l, sum32),
+ base_32h, addend_32h, sum32 + 1);
+}
+#define add64carry_next(carry, base, addend, sum) \
+ msvc32_add64carry_next(carry, base, addend, sum)
+
+static __forceinline void msvc32_add64carry_last(char carry, uint64_t base,
+ uint64_t addend,
+ uint64_t *sum) {
+ uint32_t *const sum32 = (uint32_t *)sum;
+ const uint32_t base_32l = (uint32_t)base;
+ const uint32_t base_32h = (uint32_t)(base >> 32);
+ const uint32_t addend_32l = (uint32_t)addend;
+ const uint32_t addend_32h = (uint32_t)(addend >> 32);
+ add32carry_last(add32carry_next(carry, base_32l, addend_32l, sum32), base_32h,
+ addend_32h, sum32 + 1);
+}
+#define add64carry_last(carry, base, addend, sum) \
+ msvc32_add64carry_last(carry, base, addend, sum)
+#endif /* _MSC_FULL_VER >= 190024231 */
+
+#elif defined(_M_ARM)
+#define mul_32x32_64(a, b) _arm_umull(a, b)
+#endif
+
+#pragma warning(pop)
+#pragma warning(disable : 4514) /* 'xyz': unreferenced inline function \
+ has been removed */
+#pragma warning(disable : 4710) /* 'xyz': function not inlined */
+#pragma warning(disable : 4711) /* function 'xyz' selected for \
+ automatic inline expansion */
+#pragma warning(disable : 4127) /* conditional expression is constant */
+#pragma warning(disable : 4702) /* unreachable code */
+#endif /* Compiler */
+
+#ifndef likely
+#define likely(cond) (cond)
+#endif
+#ifndef unlikely
+#define unlikely(cond) (cond)
+#endif
+#ifndef __maybe_unused
+#define __maybe_unused
+#endif
+#ifndef __always_inline
+#define __always_inline __inline
+#endif
+#ifndef unreachable
+#define unreachable() \
+ do { \
+ } while (1)
+#endif
+
+#ifndef bswap64
+#if defined(bswap_64)
+#define bswap64 bswap_64
+#elif defined(__bswap_64)
+#define bswap64 __bswap_64
+#else
+static __always_inline uint64_t bswap64(uint64_t v) {
+ return v << 56 | v >> 56 | ((v << 40) & UINT64_C(0x00ff000000000000)) |
+ ((v << 24) & UINT64_C(0x0000ff0000000000)) |
+ ((v << 8) & UINT64_C(0x000000ff00000000)) |
+ ((v >> 8) & UINT64_C(0x00000000ff000000)) |
+ ((v >> 24) & UINT64_C(0x0000000000ff0000)) |
+ ((v >> 40) & UINT64_C(0x000000000000ff00));
+}
+#endif
+#endif /* bswap64 */
+
+#ifndef bswap32
+#if defined(bswap_32)
+#define bswap32 bswap_32
+#elif defined(__bswap_32)
+#define bswap32 __bswap_32
+#else
+static __always_inline uint32_t bswap32(uint32_t v) {
+ return v << 24 | v >> 24 | ((v << 8) & UINT32_C(0x00ff0000)) |
+ ((v >> 8) & UINT32_C(0x0000ff00));
+}
+#endif
+#endif /* bswap32 */
+
+#ifndef bswap16
+#if defined(bswap_16)
+#define bswap16 bswap_16
+#elif defined(__bswap_16)
+#define bswap16 __bswap_16
+#else
+static __always_inline uint16_t bswap16(uint16_t v) { return v << 8 | v >> 8; }
+#endif
+#endif /* bswap16 */
+
+#ifndef read_unaligned
+#if defined(__GNUC__) || __has_attribute(packed)
+typedef struct {
+ uint8_t unaligned_8;
+ uint16_t unaligned_16;
+ uint32_t unaligned_32;
+ uint64_t unaligned_64;
+} __attribute__((packed)) t1ha_unaligned_proxy;
+#define read_unaligned(ptr, bits) \
+ (((const t1ha_unaligned_proxy *)((const uint8_t *)(ptr)-offsetof( \
+ t1ha_unaligned_proxy, unaligned_##bits))) \
+ ->unaligned_##bits)
+#elif defined(_MSC_VER)
+#pragma warning( \
+ disable : 4235) /* nonstandard extension used: '__unaligned' \
+ * keyword not supported on this architecture */
+#define read_unaligned(ptr, bits) (*(const __unaligned uint##bits##_t *)(ptr))
+#else
+#pragma pack(push, 1)
+typedef struct {
+ uint8_t unaligned_8;
+ uint16_t unaligned_16;
+ uint32_t unaligned_32;
+ uint64_t unaligned_64;
+} t1ha_unaligned_proxy;
+#pragma pack(pop)
+#define read_unaligned(ptr, bits) \
+ (((const t1ha_unaligned_proxy *)((const uint8_t *)(ptr)-offsetof( \
+ t1ha_unaligned_proxy, unaligned_##bits))) \
+ ->unaligned_##bits)
+#endif
+#endif /* read_unaligned */
+
+#ifndef read_aligned
+#if __GNUC_PREREQ(4, 8) || __has_builtin(__builtin_assume_aligned)
+#define read_aligned(ptr, bits) \
+ (*(const uint##bits##_t *)__builtin_assume_aligned(ptr, ALIGNMENT_##bits))
+#elif (__GNUC_PREREQ(3, 3) || __has_attribute(aligned)) && !defined(__clang__)
+#define read_aligned(ptr, bits) \
+ (*(const uint##bits##_t __attribute__((aligned(ALIGNMENT_##bits))) *)(ptr))
+#elif __has_attribute(assume_aligned)
+
+static __always_inline const
+ uint16_t *__attribute__((assume_aligned(ALIGNMENT_16)))
+ cast_aligned_16(const void *ptr) {
+ return (const uint16_t *)ptr;
+}
+static __always_inline const
+ uint32_t *__attribute__((assume_aligned(ALIGNMENT_32)))
+ cast_aligned_32(const void *ptr) {
+ return (const uint32_t *)ptr;
+}
+static __always_inline const
+ uint64_t *__attribute__((assume_aligned(ALIGNMENT_64)))
+ cast_aligned_64(const void *ptr) {
+ return (const uint64_t *)ptr;
+}
+
+#define read_aligned(ptr, bits) (*cast_aligned_##bits(ptr))
+
+#elif defined(_MSC_VER)
+#define read_aligned(ptr, bits) \
+ (*(const __declspec(align(ALIGNMENT_##bits)) uint##bits##_t *)(ptr))
+#else
+#define read_aligned(ptr, bits) (*(const uint##bits##_t *)(ptr))
+#endif
+#endif /* read_aligned */
+
+#ifndef prefetch
+#if (__GNUC_PREREQ(4, 0) || __has_builtin(__builtin_prefetch)) && \
+ !defined(__ia32__)
+#define prefetch(ptr) __builtin_prefetch(ptr)
+#elif defined(_M_ARM64) || defined(_M_ARM)
+#define prefetch(ptr) __prefetch(ptr)
+#else
+#define prefetch(ptr) \
+ do { \
+ (void)(ptr); \
+ } while (0)
+#endif
+#endif /* prefetch */
+
+#if __has_warning("-Wconstant-logical-operand")
+#if defined(__clang__)
+#pragma clang diagnostic ignored "-Wconstant-logical-operand"
+#elif defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wconstant-logical-operand"
+#else
+#pragma warning disable "constant-logical-operand"
+#endif
+#endif /* -Wconstant-logical-operand */
+
+#if __has_warning("-Wtautological-pointer-compare")
+#if defined(__clang__)
+#pragma clang diagnostic ignored "-Wtautological-pointer-compare"
+#elif defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Wtautological-pointer-compare"
+#else
+#pragma warning disable "tautological-pointer-compare"
+#endif
+#endif /* -Wtautological-pointer-compare */
+
+/***************************************************************************/
+
+#if __GNUC_PREREQ(4, 0)
+#pragma GCC visibility push(hidden)
+#endif /* __GNUC_PREREQ(4,0) */
+
+/*---------------------------------------------------------- Little Endian */
+
+#ifndef fetch16_le_aligned
+static __always_inline uint16_t fetch16_le_aligned(const void *v) {
+ assert(((uintptr_t)v) % ALIGNMENT_16 == 0);
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return read_aligned(v, 16);
+#else
+ return bswap16(read_aligned(v, 16));
+#endif
+}
+#endif /* fetch16_le_aligned */
+
+#ifndef fetch16_le_unaligned
+static __always_inline uint16_t fetch16_le_unaligned(const void *v) {
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
+ const uint8_t *p = (const uint8_t *)v;
+ return p[0] | (uint16_t)p[1] << 8;
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return read_unaligned(v, 16);
+#else
+ return bswap16(read_unaligned(v, 16));
+#endif
+}
+#endif /* fetch16_le_unaligned */
+
+#ifndef fetch32_le_aligned
+static __always_inline uint32_t fetch32_le_aligned(const void *v) {
+ assert(((uintptr_t)v) % ALIGNMENT_32 == 0);
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return read_aligned(v, 32);
+#else
+ return bswap32(read_aligned(v, 32));
+#endif
+}
+#endif /* fetch32_le_aligned */
+
+#ifndef fetch32_le_unaligned
+static __always_inline uint32_t fetch32_le_unaligned(const void *v) {
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
+ return fetch16_le_unaligned(v) |
+ (uint32_t)fetch16_le_unaligned((const uint8_t *)v + 2) << 16;
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return read_unaligned(v, 32);
+#else
+ return bswap32(read_unaligned(v, 32));
+#endif
+}
+#endif /* fetch32_le_unaligned */
+
+#ifndef fetch64_le_aligned
+static __always_inline uint64_t fetch64_le_aligned(const void *v) {
+ assert(((uintptr_t)v) % ALIGNMENT_64 == 0);
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return read_aligned(v, 64);
+#else
+ return bswap64(read_aligned(v, 64));
+#endif
+}
+#endif /* fetch64_le_aligned */
+
+#ifndef fetch64_le_unaligned
+static __always_inline uint64_t fetch64_le_unaligned(const void *v) {
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
+ return fetch32_le_unaligned(v) |
+ (uint64_t)fetch32_le_unaligned((const uint8_t *)v + 4) << 32;
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return read_unaligned(v, 64);
+#else
+ return bswap64(read_unaligned(v, 64));
+#endif
+}
+#endif /* fetch64_le_unaligned */
+
+static __always_inline uint64_t tail64_le_aligned(const void *v, size_t tail) {
+ const uint8_t *const p = (const uint8_t *)v;
+#if T1HA_USE_FAST_ONESHOT_READ && !defined(__SANITIZE_ADDRESS__)
+ /* We can perform a 'oneshot' read, which is little bit faster. */
+ const unsigned shift = ((8 - tail) & 7) << 3;
+ return fetch64_le_aligned(p) & ((~UINT64_C(0)) >> shift);
+#else
+ uint64_t r = 0;
+ switch (tail & 7) {
+ default:
+ unreachable();
+/* fall through */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* For most CPUs this code is better when not needed byte reordering. */
+ case 0:
+ return fetch64_le_aligned(p);
+ case 7:
+ r = (uint64_t)p[6] << 8;
+ /* fall through */
+ case 6:
+ r += p[5];
+ r <<= 8;
+ /* fall through */
+ case 5:
+ r += p[4];
+ r <<= 32;
+ /* fall through */
+ case 4:
+ return r + fetch32_le_aligned(p);
+ case 3:
+ r = (uint64_t)p[2] << 16;
+ /* fall through */
+ case 2:
+ return r + fetch16_le_aligned(p);
+ case 1:
+ return p[0];
+#else
+ case 0:
+ r = p[7] << 8;
+ /* fall through */
+ case 7:
+ r += p[6];
+ r <<= 8;
+ /* fall through */
+ case 6:
+ r += p[5];
+ r <<= 8;
+ /* fall through */
+ case 5:
+ r += p[4];
+ r <<= 8;
+ /* fall through */
+ case 4:
+ r += p[3];
+ r <<= 8;
+ /* fall through */
+ case 3:
+ r += p[2];
+ r <<= 8;
+ /* fall through */
+ case 2:
+ r += p[1];
+ r <<= 8;
+ /* fall through */
+ case 1:
+ return r + p[0];
+#endif
+ }
+#endif /* T1HA_USE_FAST_ONESHOT_READ */
+}
+
+#if T1HA_USE_FAST_ONESHOT_READ && \
+ T1HA_SYS_UNALIGNED_ACCESS != T1HA_UNALIGNED_ACCESS__UNABLE && \
+ defined(PAGESIZE) && PAGESIZE > 42 && !defined(__SANITIZE_ADDRESS__)
+#define can_read_underside(ptr, size) \
+ (((PAGESIZE - (size)) & (uintptr_t)(ptr)) != 0)
+#endif /* T1HA_USE_FAST_ONESHOT_READ */
+
+static __always_inline uint64_t tail64_le_unaligned(const void *v,
+ size_t tail) {
+ const uint8_t *p = (const uint8_t *)v;
+#if defined(can_read_underside) && \
+ (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul)
+ /* On some systems (e.g. x86_64) we can perform a 'oneshot' read, which
+ * is little bit faster. Thanks Marcin Żukowski
+ * for the reminder. */
+ const unsigned offset = (8 - tail) & 7;
+ const unsigned shift = offset << 3;
+ if (likely(can_read_underside(p, 8))) {
+ p -= offset;
+ return fetch64_le_unaligned(p) >> shift;
+ }
+ return fetch64_le_unaligned(p) & ((~UINT64_C(0)) >> shift);
+#else
+ uint64_t r = 0;
+ switch (tail & 7) {
+ default:
+ unreachable();
+/* fall through */
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT && \
+ __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* For most CPUs this code is better when not needed
+ * copying for alignment or byte reordering. */
+ case 0:
+ return fetch64_le_unaligned(p);
+ case 7:
+ r = (uint64_t)p[6] << 8;
+ /* fall through */
+ case 6:
+ r += p[5];
+ r <<= 8;
+ /* fall through */
+ case 5:
+ r += p[4];
+ r <<= 32;
+ /* fall through */
+ case 4:
+ return r + fetch32_le_unaligned(p);
+ case 3:
+ r = (uint64_t)p[2] << 16;
+ /* fall through */
+ case 2:
+ return r + fetch16_le_unaligned(p);
+ case 1:
+ return p[0];
+#else
+ /* For most CPUs this code is better than a
+ * copying for alignment and/or byte reordering. */
+ case 0:
+ r = p[7] << 8;
+ /* fall through */
+ case 7:
+ r += p[6];
+ r <<= 8;
+ /* fall through */
+ case 6:
+ r += p[5];
+ r <<= 8;
+ /* fall through */
+ case 5:
+ r += p[4];
+ r <<= 8;
+ /* fall through */
+ case 4:
+ r += p[3];
+ r <<= 8;
+ /* fall through */
+ case 3:
+ r += p[2];
+ r <<= 8;
+ /* fall through */
+ case 2:
+ r += p[1];
+ r <<= 8;
+ /* fall through */
+ case 1:
+ return r + p[0];
+#endif
+ }
+#endif /* can_read_underside */
+}
+
+/*------------------------------------------------------------- Big Endian */
+
+#ifndef fetch16_be_aligned
+static __maybe_unused __always_inline uint16_t
+fetch16_be_aligned(const void *v) {
+ assert(((uintptr_t)v) % ALIGNMENT_16 == 0);
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return read_aligned(v, 16);
+#else
+ return bswap16(read_aligned(v, 16));
+#endif
+}
+#endif /* fetch16_be_aligned */
+
+#ifndef fetch16_be_unaligned
+static __maybe_unused __always_inline uint16_t
+fetch16_be_unaligned(const void *v) {
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
+ const uint8_t *p = (const uint8_t *)v;
+ return (uint16_t)p[0] << 8 | p[1];
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return read_unaligned(v, 16);
+#else
+ return bswap16(read_unaligned(v, 16));
+#endif
+}
+#endif /* fetch16_be_unaligned */
+
+#ifndef fetch32_be_aligned
+static __maybe_unused __always_inline uint32_t
+fetch32_be_aligned(const void *v) {
+ assert(((uintptr_t)v) % ALIGNMENT_32 == 0);
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return read_aligned(v, 32);
+#else
+ return bswap32(read_aligned(v, 32));
+#endif
+}
+#endif /* fetch32_be_aligned */
+
+#ifndef fetch32_be_unaligned
+static __maybe_unused __always_inline uint32_t
+fetch32_be_unaligned(const void *v) {
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
+ return (uint32_t)fetch16_be_unaligned(v) << 16 |
+ fetch16_be_unaligned((const uint8_t *)v + 2);
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return read_unaligned(v, 32);
+#else
+ return bswap32(read_unaligned(v, 32));
+#endif
+}
+#endif /* fetch32_be_unaligned */
+
+#ifndef fetch64_be_aligned
+static __maybe_unused __always_inline uint64_t
+fetch64_be_aligned(const void *v) {
+ assert(((uintptr_t)v) % ALIGNMENT_64 == 0);
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return read_aligned(v, 64);
+#else
+ return bswap64(read_aligned(v, 64));
+#endif
+}
+#endif /* fetch64_be_aligned */
+
+#ifndef fetch64_be_unaligned
+static __maybe_unused __always_inline uint64_t
+fetch64_be_unaligned(const void *v) {
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__UNABLE
+ return (uint64_t)fetch32_be_unaligned(v) << 32 |
+ fetch32_be_unaligned((const uint8_t *)v + 4);
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return read_unaligned(v, 64);
+#else
+ return bswap64(read_unaligned(v, 64));
+#endif
+}
+#endif /* fetch64_be_unaligned */
+
+static __maybe_unused __always_inline uint64_t tail64_be_aligned(const void *v,
+ size_t tail) {
+ const uint8_t *const p = (const uint8_t *)v;
+#if T1HA_USE_FAST_ONESHOT_READ && !defined(__SANITIZE_ADDRESS__)
+ /* We can perform a 'oneshot' read, which is little bit faster. */
+ const unsigned shift = ((8 - tail) & 7) << 3;
+ return fetch64_be_aligned(p) >> shift;
+#else
+ switch (tail & 7) {
+ default:
+ unreachable();
+/* fall through */
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ /* For most CPUs this code is better when not byte reordering. */
+ case 1:
+ return p[0];
+ case 2:
+ return fetch16_be_aligned(p);
+ case 3:
+ return (uint32_t)fetch16_be_aligned(p) << 8 | p[2];
+ case 4:
+ return fetch32_be_aligned(p);
+ case 5:
+ return (uint64_t)fetch32_be_aligned(p) << 8 | p[4];
+ case 6:
+ return (uint64_t)fetch32_be_aligned(p) << 16 | fetch16_be_aligned(p + 4);
+ case 7:
+ return (uint64_t)fetch32_be_aligned(p) << 24 |
+ (uint32_t)fetch16_be_aligned(p + 4) << 8 | p[6];
+ case 0:
+ return fetch64_be_aligned(p);
+#else
+ case 1:
+ return p[0];
+ case 2:
+ return p[1] | (uint32_t)p[0] << 8;
+ case 3:
+ return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16;
+ case 4:
+ return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 |
+ (uint32_t)p[0] << 24;
+ case 5:
+ return p[4] | (uint32_t)p[3] << 8 | (uint32_t)p[2] << 16 |
+ (uint32_t)p[1] << 24 | (uint64_t)p[0] << 32;
+ case 6:
+ return p[5] | (uint32_t)p[4] << 8 | (uint32_t)p[3] << 16 |
+ (uint32_t)p[2] << 24 | (uint64_t)p[1] << 32 | (uint64_t)p[0] << 40;
+ case 7:
+ return p[6] | (uint32_t)p[5] << 8 | (uint32_t)p[4] << 16 |
+ (uint32_t)p[3] << 24 | (uint64_t)p[2] << 32 | (uint64_t)p[1] << 40 |
+ (uint64_t)p[0] << 48;
+ case 0:
+ return p[7] | (uint32_t)p[6] << 8 | (uint32_t)p[5] << 16 |
+ (uint32_t)p[4] << 24 | (uint64_t)p[3] << 32 | (uint64_t)p[2] << 40 |
+ (uint64_t)p[1] << 48 | (uint64_t)p[0] << 56;
+#endif
+ }
+#endif /* T1HA_USE_FAST_ONESHOT_READ */
+}
+
+static __maybe_unused __always_inline uint64_t
+tail64_be_unaligned(const void *v, size_t tail) {
+ const uint8_t *p = (const uint8_t *)v;
+#if defined(can_read_underside) && \
+ (UINTPTR_MAX > 0xffffFFFFul || ULONG_MAX > 0xffffFFFFul)
+ /* On some systems (e.g. x86_64) we can perform a 'oneshot' read, which
+ * is little bit faster. Thanks Marcin Żukowski
+ * for the reminder. */
+ const unsigned offset = (8 - tail) & 7;
+ const unsigned shift = offset << 3;
+ if (likely(can_read_underside(p, 8))) {
+ p -= offset;
+ return fetch64_be_unaligned(p) & ((~UINT64_C(0)) >> shift);
+ }
+ return fetch64_be_unaligned(p) >> shift;
+#else
+ switch (tail & 7) {
+ default:
+ unreachable();
+/* fall through */
+#if T1HA_SYS_UNALIGNED_ACCESS == T1HA_UNALIGNED_ACCESS__EFFICIENT && \
+ __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ /* For most CPUs this code is better when not needed
+ * copying for alignment or byte reordering. */
+ case 1:
+ return p[0];
+ case 2:
+ return fetch16_be_unaligned(p);
+ case 3:
+ return (uint32_t)fetch16_be_unaligned(p) << 8 | p[2];
+ case 4:
+ return fetch32_be(p);
+ case 5:
+ return (uint64_t)fetch32_be_unaligned(p) << 8 | p[4];
+ case 6:
+ return (uint64_t)fetch32_be_unaligned(p) << 16 |
+ fetch16_be_unaligned(p + 4);
+ case 7:
+ return (uint64_t)fetch32_be_unaligned(p) << 24 |
+ (uint32_t)fetch16_be_unaligned(p + 4) << 8 | p[6];
+ case 0:
+ return fetch64_be_unaligned(p);
+#else
+ /* For most CPUs this code is better than a
+ * copying for alignment and/or byte reordering. */
+ case 1:
+ return p[0];
+ case 2:
+ return p[1] | (uint32_t)p[0] << 8;
+ case 3:
+ return p[2] | (uint32_t)p[1] << 8 | (uint32_t)p[0] << 16;
+ case 4:
+ return p[3] | (uint32_t)p[2] << 8 | (uint32_t)p[1] << 16 |
+ (uint32_t)p[0] << 24;
+ case 5:
+ return p[4] | (uint32_t)p[3] << 8 | (uint32_t)p[2] << 16 |
+ (uint32_t)p[1] << 24 | (uint64_t)p[0] << 32;
+ case 6:
+ return p[5] | (uint32_t)p[4] << 8 | (uint32_t)p[3] << 16 |
+ (uint32_t)p[2] << 24 | (uint64_t)p[1] << 32 | (uint64_t)p[0] << 40;
+ case 7:
+ return p[6] | (uint32_t)p[5] << 8 | (uint32_t)p[4] << 16 |
+ (uint32_t)p[3] << 24 | (uint64_t)p[2] << 32 | (uint64_t)p[1] << 40 |
+ (uint64_t)p[0] << 48;
+ case 0:
+ return p[7] | (uint32_t)p[6] << 8 | (uint32_t)p[5] << 16 |
+ (uint32_t)p[4] << 24 | (uint64_t)p[3] << 32 | (uint64_t)p[2] << 40 |
+ (uint64_t)p[1] << 48 | (uint64_t)p[0] << 56;
+#endif
+ }
+#endif /* can_read_underside */
+}
+
+/***************************************************************************/
+
+#ifndef rot64
+static __always_inline uint64_t rot64(uint64_t v, unsigned s) {
+ return (v >> s) | (v << (64 - s));
+}
+#endif /* rot64 */
+
+#ifndef mul_32x32_64
+static __always_inline uint64_t mul_32x32_64(uint32_t a, uint32_t b) {
+ return a * (uint64_t)b;
+}
+#endif /* mul_32x32_64 */
+
+#ifndef add64carry_first
+static __maybe_unused __always_inline unsigned
+add64carry_first(uint64_t base, uint64_t addend, uint64_t *sum) {
+#if __has_builtin(__builtin_addcll)
+ unsigned long long carryout;
+ *sum = __builtin_addcll(base, addend, 0, &carryout);
+ return (unsigned)carryout;
+#else
+ *sum = base + addend;
+ return *sum < addend;
+#endif /* __has_builtin(__builtin_addcll) */
+}
+#endif /* add64carry_fist */
+
+#ifndef add64carry_next
+static __maybe_unused __always_inline unsigned
+add64carry_next(unsigned carry, uint64_t base, uint64_t addend, uint64_t *sum) {
+#if __has_builtin(__builtin_addcll)
+ unsigned long long carryout;
+ *sum = __builtin_addcll(base, addend, carry, &carryout);
+ return (unsigned)carryout;
+#else
+ *sum = base + addend + carry;
+ return *sum < addend || (carry && *sum == addend);
+#endif /* __has_builtin(__builtin_addcll) */
+}
+#endif /* add64carry_next */
+
+#ifndef add64carry_last
+static __maybe_unused __always_inline void
+add64carry_last(unsigned carry, uint64_t base, uint64_t addend, uint64_t *sum) {
+#if __has_builtin(__builtin_addcll)
+ unsigned long long carryout;
+ *sum = __builtin_addcll(base, addend, carry, &carryout);
+ (void)carryout;
+#else
+ *sum = base + addend + carry;
+#endif /* __has_builtin(__builtin_addcll) */
+}
+#endif /* add64carry_last */
+
+#ifndef mul_64x64_128
+static __maybe_unused __always_inline uint64_t mul_64x64_128(uint64_t a,
+ uint64_t b,
+ uint64_t *h) {
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ __uint128_t r = (__uint128_t)a * (__uint128_t)b;
+ /* modern GCC could nicely optimize this */
+ *h = (uint64_t)(r >> 64);
+ return (uint64_t)r;
+#elif defined(mul_64x64_high)
+ *h = mul_64x64_high(a, b);
+ return a * b;
+#else
+ /* performs 64x64 to 128 bit multiplication */
+ const uint64_t ll = mul_32x32_64((uint32_t)a, (uint32_t)b);
+ const uint64_t lh = mul_32x32_64(a >> 32, (uint32_t)b);
+ const uint64_t hl = mul_32x32_64((uint32_t)a, b >> 32);
+ const uint64_t hh = mul_32x32_64(a >> 32, b >> 32);
+
+ /* Few simplification are possible here for 32-bit architectures,
+ * but thus we would lost compatibility with the original 64-bit
+ * version. Think is very bad idea, because then 32-bit t1ha will
+ * still (relatively) very slowly and well yet not compatible. */
+ uint64_t l;
+ add64carry_last(add64carry_first(ll, lh << 32, &l), hh, lh >> 32, h);
+ add64carry_last(add64carry_first(l, hl << 32, &l), *h, hl >> 32, h);
+ return l;
+#endif
+}
+#endif /* mul_64x64_128() */
+
+#ifndef mul_64x64_high
+static __maybe_unused __always_inline uint64_t mul_64x64_high(uint64_t a,
+ uint64_t b) {
+ uint64_t h;
+ mul_64x64_128(a, b, &h);
+ return h;
+}
+#endif /* mul_64x64_high */
+
+/***************************************************************************/
+
+/* 'magic' primes */
+static const uint64_t prime_0 = UINT64_C(0xEC99BF0D8372CAAB);
+static const uint64_t prime_1 = UINT64_C(0x82434FE90EDCEF39);
+static const uint64_t prime_2 = UINT64_C(0xD4F06DB99D67BE4B);
+static const uint64_t prime_3 = UINT64_C(0xBD9CACC22C6E9571);
+static const uint64_t prime_4 = UINT64_C(0x9C06FAF4D023E3AB);
+static const uint64_t prime_5 = UINT64_C(0xC060724A8424F345);
+static const uint64_t prime_6 = UINT64_C(0xCB5AF53AE3AAAC31);
+
+/* xor high and low parts of full 128-bit product */
+static __maybe_unused __always_inline uint64_t mux64(uint64_t v,
+ uint64_t prime) {
+ uint64_t l, h;
+ l = mul_64x64_128(v, prime, &h);
+ return l ^ h;
+}
+
+static __always_inline uint64_t final64(uint64_t a, uint64_t b) {
+ uint64_t x = (a + rot64(b, 41)) * prime_0;
+ uint64_t y = (rot64(a, 23) + b) * prime_6;
+ return mux64(x ^ y, prime_5);
+}
+
+static __always_inline void mixup64(uint64_t *__restrict a,
+ uint64_t *__restrict b, uint64_t v,
+ uint64_t prime) {
+ uint64_t h;
+ *a ^= mul_64x64_128(*b + v, prime, &h);
+ *b += h;
+}
+
+/***************************************************************************/
+
+typedef union t1ha_uint128 {
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ __uint128_t v;
+#endif
+ struct {
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ uint64_t l, h;
+#else
+ uint64_t h, l;
+#endif
+ };
+} t1ha_uint128_t;
+
+static __always_inline t1ha_uint128_t not128(const t1ha_uint128_t v) {
+ t1ha_uint128_t r;
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ r.v = ~v.v;
+#else
+ r.l = ~v.l;
+ r.h = ~v.h;
+#endif
+ return r;
+}
+
+static __always_inline t1ha_uint128_t left128(const t1ha_uint128_t v,
+ unsigned s) {
+ t1ha_uint128_t r;
+ assert(s < 128);
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ r.v = v.v << s;
+#else
+ r.l = (s < 64) ? v.l << s : 0;
+ r.h = (s < 64) ? (v.h << s) | (s ? v.l >> (64 - s) : 0) : v.l << (s - 64);
+#endif
+ return r;
+}
+
+static __always_inline t1ha_uint128_t right128(const t1ha_uint128_t v,
+ unsigned s) {
+ t1ha_uint128_t r;
+ assert(s < 128);
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ r.v = v.v >> s;
+#else
+ r.l = (s < 64) ? (s ? v.h << (64 - s) : 0) | (v.l >> s) : v.h >> (s - 64);
+ r.h = (s < 64) ? v.h >> s : 0;
+#endif
+ return r;
+}
+
+static __always_inline t1ha_uint128_t or128(t1ha_uint128_t x,
+ t1ha_uint128_t y) {
+ t1ha_uint128_t r;
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ r.v = x.v | y.v;
+#else
+ r.l = x.l | y.l;
+ r.h = x.h | y.h;
+#endif
+ return r;
+}
+
+static __always_inline t1ha_uint128_t xor128(t1ha_uint128_t x,
+ t1ha_uint128_t y) {
+ t1ha_uint128_t r;
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ r.v = x.v ^ y.v;
+#else
+ r.l = x.l ^ y.l;
+ r.h = x.h ^ y.h;
+#endif
+ return r;
+}
+
+static __always_inline t1ha_uint128_t rot128(t1ha_uint128_t v, unsigned s) {
+ s &= 127;
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ v.v = (v.v << (128 - s)) | (v.v >> s);
+ return v;
+#else
+ return s ? or128(left128(v, 128 - s), right128(v, s)) : v;
+#endif
+}
+
+static __always_inline t1ha_uint128_t add128(t1ha_uint128_t x,
+ t1ha_uint128_t y) {
+ t1ha_uint128_t r;
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ r.v = x.v + y.v;
+#else
+ add64carry_last(add64carry_first(x.l, y.l, &r.l), x.h, y.h, &r.h);
+#endif
+ return r;
+}
+
+static __always_inline t1ha_uint128_t mul128(t1ha_uint128_t x,
+ t1ha_uint128_t y) {
+ t1ha_uint128_t r;
+#if defined(__SIZEOF_INT128__) || \
+ (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
+ r.v = x.v * y.v;
+#else
+ r.l = mul_64x64_128(x.l, y.l, &r.h);
+ r.h += x.l * y.h + y.l * x.h;
+#endif
+ return r;
+}
+
+/***************************************************************************/
+
+#if T1HA0_AESNI_AVAILABLE && defined(__ia32__)
+uint64_t t1ha_ia32cpu_features(void);
+
+static __always_inline bool t1ha_ia32_AESNI_avail(uint64_t ia32cpu_features) {
+ /* check for AES-NI */
+ return (ia32cpu_features & UINT32_C(0x02000000)) != 0;
+}
+
+static __always_inline bool t1ha_ia32_AVX_avail(uint64_t ia32cpu_features) {
+ /* check for any AVX */
+ return (ia32cpu_features & UINT32_C(0x1A000000)) == UINT32_C(0x1A000000);
+}
+
+static __always_inline bool t1ha_ia32_AVX2_avail(uint64_t ia32cpu_features) {
+ /* check for 'Advanced Vector Extensions 2' */
+ return ((ia32cpu_features >> 32) & 32) != 0;
+}
+
+#endif /* T1HA0_AESNI_AVAILABLE && __ia32__ */