diff --git a/Qubic.sln b/Qubic.sln index 5c88dfdd..61866fd6 100644 --- a/Qubic.sln +++ b/Qubic.sln @@ -14,7 +14,8 @@ Global Release|x64 = Release|x64 EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution - {6A7C1322-658B-4FD8-8150-A2F88202B57F}.Debug|x64.ActiveCfg = Release|x64 + {6A7C1322-658B-4FD8-8150-A2F88202B57F}.Debug|x64.ActiveCfg = Debug|x64 + {6A7C1322-658B-4FD8-8150-A2F88202B57F}.Debug|x64.Build.0 = Debug|x64 {6A7C1322-658B-4FD8-8150-A2F88202B57F}.LinuxRelease|x64.ActiveCfg = LinuxRelease|x64 {6A7C1322-658B-4FD8-8150-A2F88202B57F}.LinuxRelease|x64.Build.0 = LinuxRelease|x64 {6A7C1322-658B-4FD8-8150-A2F88202B57F}.Release|x64.ActiveCfg = Release|x64 @@ -22,7 +23,6 @@ Global {30E8E249-6B00-4575-BCDF-BE2445D5E099}.Debug|x64.ActiveCfg = Debug|x64 {30E8E249-6B00-4575-BCDF-BE2445D5E099}.Debug|x64.Build.0 = Debug|x64 {30E8E249-6B00-4575-BCDF-BE2445D5E099}.LinuxRelease|x64.ActiveCfg = Release|x64 - {30E8E249-6B00-4575-BCDF-BE2445D5E099}.LinuxRelease|x64.Build.0 = Release|x64 {30E8E249-6B00-4575-BCDF-BE2445D5E099}.Release|x64.ActiveCfg = Release|x64 {30E8E249-6B00-4575-BCDF-BE2445D5E099}.Release|x64.Build.0 = Release|x64 EndGlobalSection diff --git a/README.md b/README.md index 2aed4ff9..7841b5fb 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ Qubic Node Source Code - this repository contains the source code of a full qubi ## Prerequisites To run a qubic node, you need the following spec: - Bare Metal Server/Computer with at least 8 Cores (high CPU frequency with AVX2 support). AVX-512 support is recommended; check supported CPUs [here](https://www.epey.co.uk/cpu/e/YTozOntpOjUwOTc7YToxOntpOjA7czo2OiI0Mjg1NzUiO31pOjUwOTk7YToyOntpOjA7czoxOiI4IjtpOjE7czoyOiIzMiI7fWk6NTA4ODthOjY6e2k6MDtzOjY6IjQ1NjE1MCI7aToxO3M6NzoiMjM4Nzg2MSI7aToyO3M6NzoiMTkzOTE5OSI7aTozO3M6NzoiMTUwMjg4MyI7aTo0O3M6NzoiMjA2Nzk5MyI7aTo1O3M6NzoiMjE5OTc1OSI7fX1fYjowOw==/) -- At least 128GB of RAM +- At least 256GB of RAM - 1Gb/s synchronous internet connection - A USB Stick or SSD/HD attached to the Computer (via NVMe M.2 or USB) - UEFI Bios diff --git a/src/Qubic.vcxproj b/src/Qubic.vcxproj index 5842ca1a..6aa9cfa8 100644 --- a/src/Qubic.vcxproj +++ b/src/Qubic.vcxproj @@ -1,6 +1,10 @@ + + Debug + x64 + LinuxRelease x64 @@ -35,6 +39,7 @@ + @@ -57,6 +62,7 @@ + 16.0 @@ -73,6 +79,13 @@ true NotSet + + DynamicLibrary + false + v143 + true + NotSet + DynamicLibrary false @@ -88,6 +101,9 @@ + + + @@ -97,6 +113,11 @@ .efi false + + false + .efi + false + false .efi @@ -117,7 +138,7 @@ true false MaxSpeed - Default + stdcpp20 Speed /Gs1638400 %(AdditionalOptions) 126976 @@ -149,6 +170,54 @@ UseLinkTimeCodeGeneration + + + Level3 + false + true + false + _CONSOLE;%(PreprocessorDefinitions) + true + false + true + false + false + true + false + MaxSpeed + stdcpp20 + Speed + /Gs1638400 %(AdditionalOptions) + 126976 + 126976 + None + AnySuitable + true + true + false + AdvancedVectorExtensions2 + false + false + false + All + $(IntDir)Qubic.asm + + + EFI Application + true + true + false + true + + + efi_main + false + false + 131072 + 131072 + UseLinkTimeCodeGeneration + + Level3 @@ -164,7 +233,7 @@ true false MaxSpeed - Default + stdcpp20 Speed /Gs1638400 %(AdditionalOptions) 126976 diff --git a/src/Qubic.vcxproj.filters b/src/Qubic.vcxproj.filters index b8f12ee6..6d8a57d6 100644 --- a/src/Qubic.vcxproj.filters +++ b/src/Qubic.vcxproj.filters @@ -111,6 +111,10 @@ smart_contracts + + + platform + diff --git a/src/network_core/peers.h b/src/network_core/peers.h index 6ac9d6c2..ab9f9428 100644 --- a/src/network_core/peers.h +++ b/src/network_core/peers.h @@ -206,7 +206,7 @@ static void enqueueResponse(Peer* peer, RequestResponseHeader* responseHeader) RELEASE(responseQueueHeadLock); } -static void enqueueResponse(Peer* peer, unsigned int dataSize, unsigned char type, unsigned int dejavu, void* data) +static void enqueueResponse(Peer* peer, unsigned int dataSize, unsigned char type, unsigned int dejavu, const void* data) { ACQUIRE(responseQueueHeadLock); @@ -228,7 +228,7 @@ static void enqueueResponse(Peer* peer, unsigned int dataSize, unsigned char typ responseHeader->setDejavu(dejavu); if (data) { - bs->CopyMem(&responseQueueBuffer[responseQueueBufferHead + sizeof(RequestResponseHeader)], data, dataSize); + copyMem(&responseQueueBuffer[responseQueueBufferHead + sizeof(RequestResponseHeader)], data, dataSize); } responseQueueBufferHead += responseHeader->size(); responseQueueElements[responseQueueElementHead].peer = peer; diff --git a/src/network_messages/common_def.h b/src/network_messages/common_def.h index 252052ad..04e3f43b 100644 --- a/src/network_messages/common_def.h +++ b/src/network_messages/common_def.h @@ -7,6 +7,10 @@ #define NUMBER_OF_EXCHANGED_PEERS 4 #define SPECTRUM_DEPTH 24 // Defines SPECTRUM_CAPACITY (1 << SPECTRUM_DEPTH) +#define MAX_INPUT_SIZE 1024ULL +#define ISSUANCE_RATE 1000000000000LL +#define MAX_AMOUNT (ISSUANCE_RATE * 1000ULL) + // If you want to use the network_meassges directory in your project without dependencies to other code, // you may define NETWORK_MESSAGES_WITHOUT_CORE_DEPENDENCIES before including any header or change the diff --git a/src/network_messages/transactions.h b/src/network_messages/transactions.h index 40c4f268..75a7609e 100644 --- a/src/network_messages/transactions.h +++ b/src/network_messages/transactions.h @@ -11,6 +11,8 @@ struct ContractIPOBid #define BROADCAST_TRANSACTION 24 + +// A transaction is made of this struct, followed by inputSize Bytes payload data and SIGNATURE_SIZE Bytes signature struct Transaction { m256i sourcePublicKey; @@ -19,6 +21,30 @@ struct Transaction unsigned int tick; unsigned short inputType; unsigned short inputSize; + + // Return total transaction datat size with payload data and signature + unsigned int totalSize() const + { + return sizeof(Transaction) + inputSize + SIGNATURE_SIZE; + } + + // Check if transaction is valid + bool checkValidity() const + { + return amount >= 0 && amount <= MAX_AMOUNT && inputSize <= MAX_INPUT_SIZE; + } + + // Return pointer to transaction's payload (CAUTION: This is behind the memory reserved for this struct!) + unsigned char* inputPtr() + { + return (((unsigned char*)this) + sizeof(Transaction)); + } + + // Return pointer to signature (CAUTION: This is behind the memory reserved for this struct!) + unsigned char* signaturePtr() + { + return ((unsigned char*)this) + sizeof(Transaction) + inputSize; + } }; static_assert(sizeof(Transaction) == 32 + 32 + 8 + 4 + 2 + 2, "Something is wrong with the struct size."); diff --git a/src/platform/console_logging.h b/src/platform/console_logging.h index f6a67f67..250a09d0 100644 --- a/src/platform/console_logging.h +++ b/src/platform/console_logging.h @@ -32,12 +32,14 @@ static void logToConsole(const CHAR16* message) #else // Output to console on UEFI platform +// CAUTION: Can only be called from main processor thread. Otherwise there is a high risk of crashing. static inline void outputStringToConsole(CHAR16* str) { st->ConOut->OutputString(st->ConOut, str); } // Log message to console (with line break) on UEFI platform (defined in qubic.cpp due to dependencies on time and qubic status) +// CAUTION: Can only be called from main processor thread. Otherwise there is a high risk of crashing. static void logToConsole(const CHAR16* message); #endif @@ -159,3 +161,12 @@ static void logStatusToConsole(const CHAR16* message, const EFI_STATUS status, c appendText(::message, L"!"); logToConsole(::message); } + +// Count characters before terminating NULL +static unsigned int stringLength(const CHAR16* str) +{ + unsigned int l = 0; + while (str[l] != 0) + l++; + return l; +} diff --git a/src/platform/debugging.h b/src/platform/debugging.h new file mode 100644 index 00000000..d9ef6319 --- /dev/null +++ b/src/platform/debugging.h @@ -0,0 +1,128 @@ +#pragma once + +#include "concurrency.h" +#include "file_io.h" + + +#if defined(EXPECT_TRUE) + +// in gtest context, use EXPECT_TRUE as ASSERT +#define ASSERT EXPECT_TRUE + +#elif defined(NDEBUG) + +// with NDEBUG, make ASSERT disappear +#define ASSERT(expression) ((void)0) + +#else + +static CHAR16 debugMessage[128][16384]; +static int debugMessageCount = 0; +static char volatile debugLogLock = 0; + +#define WRITE_DEBUG_MESSAGES_TO_FILE 1 + +// Print debug messages added with addDebugMessage(). +// CAUTION: Can only be called from main processor thread. Otherwise there is a high risk of crashing. +static void printDebugMessages() +{ +#if WRITE_DEBUG_MESSAGES_TO_FILE + // Open debug log file and seek to the end of file for appending + EFI_STATUS status; + EFI_FILE_PROTOCOL* file = nullptr; + if (status = root->Open(root, (void**)&file, (CHAR16*)L"debug.log", EFI_FILE_MODE_READ | EFI_FILE_MODE_WRITE | EFI_FILE_MODE_CREATE, 0)) + { + logStatusToConsole(L"EFI_FILE_PROTOCOL.Open() fails", status, __LINE__); + file = nullptr; + } + else + { + if (status = root->SetPosition(file, 0xFFFFFFFFFFFFFFFF)) + { + logStatusToConsole(L"EFI_FILE_PROTOCOL.SetPosition() fails", status, __LINE__); + file = nullptr; + } + } +#endif + ACQUIRE(debugLogLock); + for (int i = 0; i < debugMessageCount; i++) + { + // Make sure there is a newline at the end + unsigned int strLen = stringLength(debugMessage[i]); + if (debugMessage[i][strLen-1] != L'\n') + { + appendText(debugMessage[i], L"\r\n"); + strLen += 2; + } + + // Write to console + outputStringToConsole(debugMessage[i]); + +#if WRITE_DEBUG_MESSAGES_TO_FILE + // Write to log file + if (file) + { + char* buffer = (char*)debugMessage[i]; + unsigned long long totalSize = strLen * sizeof(CHAR16); + unsigned long long writtenSize = 0; + while (writtenSize < totalSize) + { + unsigned long long size = (WRITING_CHUNK_SIZE <= (totalSize - writtenSize) ? WRITING_CHUNK_SIZE : (totalSize - writtenSize)); + status = file->Write(file, &size, &buffer[writtenSize]); + if (status + || size != (WRITING_CHUNK_SIZE <= (totalSize - writtenSize) ? WRITING_CHUNK_SIZE : (totalSize - writtenSize))) + { + logStatusToConsole(L"EFI_FILE_PROTOCOL.Write() fails", status, __LINE__); + + file->Close(file); + file = 0; + break; + } + writtenSize += size; + } + } +#endif + } + debugMessageCount = 0; + RELEASE(debugLogLock); +#if WRITE_DEBUG_MESSAGES_TO_FILE + if (file) + file->Close(file); +#endif +} + +// Add a message for logging from arbitrary thread +static void addDebugMessage(const CHAR16* msg) +{ + ACQUIRE(debugLogLock); + if (debugMessageCount < 128) + { + setText(debugMessage[debugMessageCount], msg); + ++debugMessageCount; + } + RELEASE(debugLogLock); +} + +// Add a assert message for logging from arbitrary thread +static void addDebugMessageAssert(const CHAR16* message, const CHAR16* file, const unsigned int lineNumber) +{ + ACQUIRE(debugLogLock); + if (debugMessageCount < 128) + { + setText(debugMessage[debugMessageCount], L"Assertion failed: "); + appendText(debugMessage[debugMessageCount], message); + appendText(debugMessage[debugMessageCount], L" at line "); + appendNumber(debugMessage[debugMessageCount], lineNumber, FALSE); + appendText(debugMessage[debugMessageCount], L" in "); + appendText(debugMessage[debugMessageCount], file); + ++debugMessageCount; + } + RELEASE(debugLogLock); +} + +#define ASSERT(expression) (void)( \ + (!!(expression)) || \ + (addDebugMessageAssert(_CRT_WIDE(#expression), _CRT_WIDE(__FILE__), (unsigned int)(__LINE__)), 0) \ + ) + +#endif diff --git a/src/platform/memory.h b/src/platform/memory.h index 6871e816..833d1e5f 100644 --- a/src/platform/memory.h +++ b/src/platform/memory.h @@ -3,6 +3,7 @@ #ifdef NO_UEFI #include +#include static inline void setMem(void* buffer, unsigned long long size, unsigned char value) { @@ -14,6 +15,22 @@ static inline void copyMem(void* destination, const void* source, unsigned long memcpy(destination, source, length); } +static inline bool allocatePool(unsigned long long size, void** buffer) +{ + void* ptr = malloc(size); + if (ptr) + { + *buffer = ptr; + return true; + } + return false; +} + +static inline void freePool(void* buffer) +{ + free(buffer); +} + #else #include "uefi.h" @@ -28,4 +45,26 @@ static inline void copyMem(void* destination, const void* source, unsigned long bs->CopyMem(destination, (void*)source, length); } +static inline bool allocatePool(unsigned long long size, void** buffer) +{ + return bs->AllocatePool(EfiRuntimeServicesData, size, buffer) == EFI_SUCCESS; +} + +static inline void freePool(void* buffer) +{ + bs->FreePool(buffer); +} + #endif + +// This should to be optimized if used in non-debugging context (using unsigned long long comparison as much as possible) +static inline bool isZero(const void* ptr, unsigned long long size) +{ + const char* cPtr = (const char*)ptr; + for (unsigned long long i = 0; i < size; ++i) + { + if (cPtr[i] != 0) + return false; + } + return true; +} diff --git a/src/public_settings.h b/src/public_settings.h index f5f3392a..f9e53306 100644 --- a/src/public_settings.h +++ b/src/public_settings.h @@ -2,11 +2,33 @@ ////////// Public Settings \\\\\\\\\\ +////////////////////////////////////////////////////////////////////////// +// Config options for operators + // no need to define AVX512 here anymore, just change the project settings to use the AVX512 version +// random seed is now obtained from spectrumDigests #define MAX_NUMBER_OF_PROCESSORS 32 #define NUMBER_OF_SOLUTION_PROCESSORS 6 // do not increase this for this epoch, because there may be issues due too fast ticking +#define USE_SCORE_CACHE 1 +#define SCORE_CACHE_SIZE 1000000 // the larger the better +#define SCORE_CACHE_COLLISION_RETRIES 20 // number of retries to find entry in cache in case of hash collision + +// Number of ticks to from prior epoch that are kept after seamless epoch transition. These can be requested after transition. +#define TICKS_TO_KEEP_FROM_PRIOR_EPOCH 100 + +#define TARGET_TICK_DURATION 2500 +#define TRANSACTION_SPARSENESS 4 + +// Set START_NETWORK_FROM_SCRATCH to 0 if you start the node for syncing with the already ticking network. +// If this flag is 1, it indicates that the whole network (all 676 IDs) will start from scratch and agree that the very first tick time will be set at (2022-04-13 Wed 12:00:00.000UTC). +// If this flag is 0, the node will try to fetch the initial tick of the epoch from other nodes, because the tick's timestamp may differ from (2022-04-13 Wed 12:00:00.000UTC). +#define START_NETWORK_FROM_SCRATCH 1 + +////////////////////////////////////////////////////////////////////////// +// Config options that should NOT be changed by operators + #define VERSION_A 1 #define VERSION_B 197 #define VERSION_C 0 @@ -14,11 +36,10 @@ #define EPOCH 101 #define TICK 13060000 -// random seed is now obtained from spectrumDigests - #define ARBITRATOR "AFZPUAIYVPNUYGJRQVLUKOPPVLHAZQTGLYAAUUNBXFTVTAMSBKQBLEIEPCVJ" static unsigned short SYSTEM_FILE_NAME[] = L"system"; +static unsigned short SYSTEM_END_OF_EPOCH_FILE_NAME[] = L"system.eoe"; static unsigned short SPECTRUM_FILE_NAME[] = L"spectrum.???"; static unsigned short UNIVERSE_FILE_NAME[] = L"universe.???"; static unsigned short SCORE_CACHE_FILE_NAME[] = L"score.???"; @@ -32,6 +53,11 @@ static unsigned short CONTRACT_FILE_NAME[] = L"contract????.???"; #define MAX_OUTPUT_DURATION 256 #define NEURON_VALUE_LIMIT 1LL #define SOLUTION_THRESHOLD_DEFAULT 42 -#define USE_SCORE_CACHE 1 -#define SCORE_CACHE_SIZE 1000000 // the larger the better -#define SCORE_CACHE_COLLISION_RETRIES 20 // number of retries to find entry in cache in case of hash collision + +// include commonly needed definitions +#include "network_messages/common_def.h" + +#define MAX_NUMBER_OF_TICKS_PER_EPOCH (((((60 * 60 * 24 * 7) / (TARGET_TICK_DURATION / 1000)) + NUMBER_OF_COMPUTORS - 1) / NUMBER_OF_COMPUTORS) * NUMBER_OF_COMPUTORS) +#define FIRST_TICK_TRANSACTION_OFFSET sizeof(unsigned long long) +#define MAX_TRANSACTION_SIZE (MAX_INPUT_SIZE + sizeof(Transaction) + SIGNATURE_SIZE) + diff --git a/src/qubic.cpp b/src/qubic.cpp index 32b45659..f60a9f82 100644 --- a/src/qubic.cpp +++ b/src/qubic.cpp @@ -34,23 +34,17 @@ #include "assets.h" #include "logging.h" +#include "tick_storage.h" ////////// Qubic \\\\\\\\\\ #define CONTRACT_STATES_DEPTH 10 // Is derived from MAX_NUMBER_OF_CONTRACTS (=N) -#define TARGET_TICK_DURATION 2500 #define TICK_REQUESTING_PERIOD 500ULL -#define FIRST_TICK_TRANSACTION_OFFSET sizeof(unsigned long long) -#define ISSUANCE_RATE 1000000000000LL #define MAX_NUMBER_EPOCH 1000ULL -#define MAX_AMOUNT (ISSUANCE_RATE * 1000ULL) -#define MAX_INPUT_SIZE 1024ULL #define MAX_NUMBER_OF_MINERS 8192 #define NUMBER_OF_MINER_SOLUTION_FLAGS 0x100000000 -#define MAX_TRANSACTION_SIZE (MAX_INPUT_SIZE + sizeof(Transaction) + SIGNATURE_SIZE) #define MAX_MESSAGE_PAYLOAD_SIZE MAX_TRANSACTION_SIZE -#define MAX_NUMBER_OF_TICKS_PER_EPOCH (((((60 * 60 * 24 * 7) / (TARGET_TICK_DURATION / 1000)) + NUMBER_OF_COMPUTORS - 1) / NUMBER_OF_COMPUTORS) * NUMBER_OF_COMPUTORS) #define MAX_CONTRACT_STATE_SIZE 1073741824 #define MAX_UNIVERSE_SIZE 1073741824 #define MESSAGE_DISSEMINATION_THRESHOLD 1000000000 @@ -62,7 +56,6 @@ #define TICK_TRANSACTIONS_PUBLICATION_OFFSET 2 // Must be only 2 #define MIN_MINING_SOLUTIONS_PUBLICATION_OFFSET 3 // Must be 3+ #define TIME_ACCURACY 5000 -#define TRANSACTION_SPARSENESS 4 @@ -82,9 +75,12 @@ static volatile int shutDownNode = 0; static volatile unsigned char mainAuxStatus = 0; static volatile bool forceRefreshPeerList = false; static volatile bool forceNextTick = false; +static volatile bool forceSwitchEpoch = false; static volatile char criticalSituation = 0; static volatile bool systemMustBeSaved = false, spectrumMustBeSaved = false, universeMustBeSaved = false, computerMustBeSaved = false; + static volatile unsigned char epochTransitionState = 0; +static volatile long epochTransitionWaitingRequestProcessors = 0; static m256i operatorPublicKey; static m256i computorSubseeds[sizeof(computorSeeds) / sizeof(computorSeeds[0])]; @@ -92,11 +88,7 @@ static m256i computorPrivateKeys[sizeof(computorSeeds) / sizeof(computorSeeds[0] static m256i computorPublicKeys[sizeof(computorSeeds) / sizeof(computorSeeds[0])]; static m256i arbitratorPublicKey; -static struct -{ - RequestResponseHeader header; - BroadcastComputors broadcastComputors; -} broadcastedComputors; +BroadcastComputors broadcastedComputors; // data closely related to system static int solutionPublicationTicks[MAX_NUMBER_OF_SOLUTIONS]; @@ -107,15 +99,9 @@ static unsigned short numberOfOwnComputorIndices; static unsigned short ownComputorIndices[sizeof(computorSeeds) / sizeof(computorSeeds[0])]; static unsigned short ownComputorIndicesMapping[sizeof(computorSeeds) / sizeof(computorSeeds[0])]; -static Tick* ticks = NULL; -static TickData* tickData = NULL; -static volatile char tickDataLock = 0; +static TickStorage ts; static Tick etalonTick; static TickData nextTickData; -static volatile char tickTransactionsLock = 0; -static unsigned char* tickTransactions = NULL; -static unsigned long long* tickTransactionOffsetsPtr = nullptr; -static unsigned long long nextTickTransactionOffset = FIRST_TICK_TRANSACTION_OFFSET; static m256i uniqueNextTickTransactionDigests[NUMBER_OF_COMPUTORS]; static unsigned int uniqueNextTickTransactionDigestCounters[NUMBER_OF_COMPUTORS]; @@ -154,7 +140,6 @@ static char* contractFunctionOutputs[MAX_NUMBER_OF_PROCESSORS]; static char executedContractInput[65536]; static char executedContractOutput[RequestResponseHeader::max_size + 1]; -static volatile char tickLocks[NUMBER_OF_COMPUTORS]; static bool targetNextTickDataDigestIsKnown = false; static m256i targetNextTickDataDigest; static unsigned long long tickTicks[11]; @@ -225,11 +210,6 @@ static struct -unsigned long long& tickTransactionOffsets(unsigned int tick, unsigned int transaction) -{ - const unsigned int tickIndex = tick - system.initialTick; - return tickTransactionOffsetsPtr[tickIndex * NUMBER_OF_TRANSACTIONS_PER_TICK + transaction]; -} static void logToConsole(const CHAR16* message) { @@ -273,7 +253,15 @@ static void logToConsole(const CHAR16* message) appendText(timestampedMessage, message); appendText(timestampedMessage, L"\r\n"); +#ifdef NDEBUG outputStringToConsole(timestampedMessage); +#else + bool logAsDebugMessage = epochTransitionState || system.tick - system.initialTick < 3; + if (logAsDebugMessage) + addDebugMessage(timestampedMessage); + else + outputStringToConsole(timestampedMessage); +#endif } @@ -583,20 +571,21 @@ static void processBroadcastMessage(const unsigned long long processorNumber, Re static void processBroadcastComputors(Peer* peer, RequestResponseHeader* header) { BroadcastComputors* request = header->getPayload(); - // verify that all address are non-zeroes - // discard it even if ARB broadcast it - if (request->computors.epoch > broadcastedComputors.broadcastComputors.computors.epoch) + + // Only accept computor list from current epoch (important in seamless epoch transition if this node is + // lagging behind the others that already switched epoch). + if (request->computors.epoch == system.epoch && request->computors.epoch > broadcastedComputors.computors.epoch) { + // Verify that all addresses are non-zeroes. Otherwise, discard it even if ARB broadcasted it. for (unsigned int i = 0; i < NUMBER_OF_COMPUTORS; i++) { - if (request->computors.publicKeys[i] == NULL_ID) // NULL_ID is _mm256_setzero_si256() + if (isZero(request->computors.publicKeys[i])) { return; } } - } - if (request->computors.epoch > broadcastedComputors.broadcastComputors.computors.epoch) - { + + // Verify that list is signed by Arbitrator unsigned char digest[32]; KangarooTwelve(request, sizeof(BroadcastComputors) - SIGNATURE_SIZE, digest, sizeof(digest)); if (verify((unsigned char*)&arbitratorPublicKey, digest, request->computors.signature)) @@ -606,8 +595,10 @@ static void processBroadcastComputors(Peer* peer, RequestResponseHeader* header) enqueueResponse(NULL, header); } - bs->CopyMem(&broadcastedComputors.broadcastComputors.computors, &request->computors, sizeof(Computors)); + // Copy computor list + bs->CopyMem(&broadcastedComputors.computors, &request->computors, sizeof(Computors)); + // Update ownComputorIndices and minerPublicKeys if (request->computors.epoch == system.epoch) { numberOfOwnComputorIndices = 0; @@ -636,7 +627,8 @@ static void processBroadcastTick(Peer* peer, RequestResponseHeader* header) BroadcastTick* request = header->getPayload(); if (request->tick.computorIndex < NUMBER_OF_COMPUTORS && request->tick.epoch == system.epoch - && request->tick.tick >= system.tick && request->tick.tick < system.initialTick + MAX_NUMBER_OF_TICKS_PER_EPOCH + && request->tick.tick >= system.tick + && ts.tickInCurrentEpochStorage(request->tick.tick) && request->tick.month >= 1 && request->tick.month <= 12 && request->tick.day >= 1 && request->tick.day <= ((request->tick.month == 1 || request->tick.month == 3 || request->tick.month == 5 || request->tick.month == 7 || request->tick.month == 8 || request->tick.month == 10 || request->tick.month == 12) ? 31 : ((request->tick.month == 4 || request->tick.month == 6 || request->tick.month == 9 || request->tick.month == 11) ? 30 : ((request->tick.year & 3) ? 28 : 29))) && request->tick.hour <= 23 @@ -648,37 +640,40 @@ static void processBroadcastTick(Peer* peer, RequestResponseHeader* header) request->tick.computorIndex ^= BroadcastTick::type; KangarooTwelve(&request->tick, sizeof(Tick) - SIGNATURE_SIZE, digest, sizeof(digest)); request->tick.computorIndex ^= BroadcastTick::type; - if (verify(broadcastedComputors.broadcastComputors.computors.publicKeys[request->tick.computorIndex].m256i_u8, digest, request->tick.signature)) + if (verify(broadcastedComputors.computors.publicKeys[request->tick.computorIndex].m256i_u8, digest, request->tick.signature)) { if (header->isDejavuZero()) { enqueueResponse(NULL, header); } - ACQUIRE(tickLocks[request->tick.computorIndex]); + ts.ticks.acquireLock(request->tick.computorIndex); - const unsigned int offset = ((request->tick.tick - system.initialTick) * NUMBER_OF_COMPUTORS) + request->tick.computorIndex; - if (ticks[offset].epoch == system.epoch) + // Find element in tick storage and check if contains data (epoch is set to 0 on init) + Tick* tsTick = ts.ticks.getByTickInCurrentEpoch(request->tick.tick) + request->tick.computorIndex; + if (tsTick->epoch == system.epoch) { - if (*((unsigned long long*)&request->tick.millisecond) != *((unsigned long long*)&ticks[offset].millisecond) - || request->tick.prevSpectrumDigest != ticks[offset].prevSpectrumDigest - || request->tick.prevUniverseDigest != ticks[offset].prevUniverseDigest - || request->tick.prevComputerDigest != ticks[offset].prevComputerDigest - || request->tick.saltedSpectrumDigest != ticks[offset].saltedSpectrumDigest - || request->tick.saltedUniverseDigest != ticks[offset].saltedUniverseDigest - || request->tick.saltedComputerDigest != ticks[offset].saltedComputerDigest - || request->tick.transactionDigest != ticks[offset].transactionDigest - || request->tick.expectedNextTickTransactionDigest != ticks[offset].expectedNextTickTransactionDigest) + // Check if the sent tick matches the tick in tick storage + if (*((unsigned long long*)&request->tick.millisecond) != *((unsigned long long*)&tsTick->millisecond) + || request->tick.prevSpectrumDigest != tsTick->prevSpectrumDigest + || request->tick.prevUniverseDigest != tsTick->prevUniverseDigest + || request->tick.prevComputerDigest != tsTick->prevComputerDigest + || request->tick.saltedSpectrumDigest != tsTick->saltedSpectrumDigest + || request->tick.saltedUniverseDigest != tsTick->saltedUniverseDigest + || request->tick.saltedComputerDigest != tsTick->saltedComputerDigest + || request->tick.transactionDigest != tsTick->transactionDigest + || request->tick.expectedNextTickTransactionDigest != tsTick->expectedNextTickTransactionDigest) { faultyComputorFlags[request->tick.computorIndex >> 6] |= (1ULL << (request->tick.computorIndex & 63)); } } else { - bs->CopyMem(&ticks[offset], &request->tick, sizeof(Tick)); + // Copy the sent tick to the tick storage + bs->CopyMem(tsTick, &request->tick, sizeof(Tick)); } - RELEASE(tickLocks[request->tick.computorIndex]); + ts.ticks.releaseLock(request->tick.computorIndex); } } } @@ -687,7 +682,8 @@ static void processBroadcastFutureTickData(Peer* peer, RequestResponseHeader* he { BroadcastFutureTickData* request = header->getPayload(); if (request->tickData.epoch == system.epoch - && request->tickData.tick > system.tick && request->tickData.tick < system.initialTick + MAX_NUMBER_OF_TICKS_PER_EPOCH + && request->tickData.tick > system.tick + && ts.tickInCurrentEpochStorage(request->tickData.tick) && request->tickData.tick % NUMBER_OF_COMPUTORS == request->tickData.computorIndex && request->tickData.month >= 1 && request->tickData.month <= 12 && request->tickData.day >= 1 && request->tickData.day <= ((request->tickData.month == 1 || request->tickData.month == 3 || request->tickData.month == 5 || request->tickData.month == 7 || request->tickData.month == 8 || request->tickData.month == 10 || request->tickData.month == 12) ? 31 : ((request->tickData.month == 4 || request->tickData.month == 6 || request->tickData.month == 9 || request->tickData.month == 11) ? 30 : ((request->tickData.year & 3) ? 28 : 29))) @@ -719,14 +715,15 @@ static void processBroadcastFutureTickData(Peer* peer, RequestResponseHeader* he request->tickData.computorIndex ^= BroadcastFutureTickData::type; KangarooTwelve(&request->tickData, sizeof(TickData) - SIGNATURE_SIZE, digest, sizeof(digest)); request->tickData.computorIndex ^= BroadcastFutureTickData::type; - if (verify(broadcastedComputors.broadcastComputors.computors.publicKeys[request->tickData.computorIndex].m256i_u8, digest, request->tickData.signature)) + if (verify(broadcastedComputors.computors.publicKeys[request->tickData.computorIndex].m256i_u8, digest, request->tickData.signature)) { if (header->isDejavuZero()) { enqueueResponse(NULL, header); } - ACQUIRE(tickDataLock); + ts.tickData.acquireLock(); + TickData& td = ts.tickData.getByTickInCurrentEpoch(request->tickData.tick); if (request->tickData.tick == system.tick + 1 && targetNextTickDataDigestIsKnown) { if (!isZero(targetNextTickDataDigest)) @@ -735,15 +732,16 @@ static void processBroadcastFutureTickData(Peer* peer, RequestResponseHeader* he KangarooTwelve(&request->tickData, sizeof(TickData), digest, 32); if (digest == targetNextTickDataDigest) { - bs->CopyMem(&tickData[request->tickData.tick - system.initialTick], &request->tickData, sizeof(TickData)); + bs->CopyMem(&td, &request->tickData, sizeof(TickData)); } } } else { - if (tickData[request->tickData.tick - system.initialTick].epoch == system.epoch) + if (td.epoch == system.epoch) { - if (*((unsigned long long*)&request->tickData.millisecond) != *((unsigned long long*)&tickData[request->tickData.tick - system.initialTick].millisecond)) + // Tick data already available. Mark computor as faulty if the data that was sent differs. + if (*((unsigned long long*)&request->tickData.millisecond) != *((unsigned long long*)&td.millisecond)) { faultyComputorFlags[request->tickData.computorIndex >> 6] |= (1ULL << (request->tickData.computorIndex & 63)); } @@ -751,7 +749,7 @@ static void processBroadcastFutureTickData(Peer* peer, RequestResponseHeader* he { for (unsigned int i = 0; i < NUMBER_OF_TRANSACTIONS_PER_TICK; i++) { - if (request->tickData.transactionDigests[i] != tickData[request->tickData.tick - system.initialTick].transactionDigests[i]) + if (request->tickData.transactionDigests[i] != td.transactionDigests[i]) { faultyComputorFlags[request->tickData.computorIndex >> 6] |= (1ULL << (request->tickData.computorIndex & 63)); @@ -762,10 +760,10 @@ static void processBroadcastFutureTickData(Peer* peer, RequestResponseHeader* he } else { - bs->CopyMem(&tickData[request->tickData.tick - system.initialTick], &request->tickData, sizeof(TickData)); + bs->CopyMem(&td, &request->tickData, sizeof(TickData)); } } - RELEASE(tickDataLock); + ts.tickData.releaseLock(); } } } @@ -774,13 +772,12 @@ static void processBroadcastFutureTickData(Peer* peer, RequestResponseHeader* he static void processBroadcastTransaction(Peer* peer, RequestResponseHeader* header) { Transaction* request = header->getPayload(); - if (request->amount >= 0 && request->amount <= MAX_AMOUNT - && request->inputSize <= MAX_INPUT_SIZE && request->inputSize == header->size() - sizeof(RequestResponseHeader) - sizeof(Transaction) - SIGNATURE_SIZE) + const unsigned int transactionSize = request->totalSize(); + if (request->checkValidity() && transactionSize == header->size() - sizeof(RequestResponseHeader)) { - const unsigned int transactionSize = sizeof(Transaction) + request->inputSize + SIGNATURE_SIZE; unsigned char digest[32]; KangarooTwelve(request, transactionSize - SIGNATURE_SIZE, digest, sizeof(digest)); - if (verify(request->sourcePublicKey.m256i_u8, digest, (((const unsigned char*)request) + sizeof(Transaction) + request->inputSize))) + if (verify(request->sourcePublicKey.m256i_u8, digest, request->signaturePtr())) { if (header->isDejavuZero()) { @@ -806,41 +803,43 @@ static void processBroadcastTransaction(Peer* peer, RequestResponseHeader* heade RELEASE(entityPendingTransactionsLock); } - ACQUIRE(tickDataLock); + unsigned int tickIndex = ts.tickToIndexCurrentEpoch(request->tick); + ts.tickData.acquireLock(); if (request->tick == system.tick + 1 - && tickData[request->tick - system.initialTick].epoch == system.epoch) + && ts.tickData[tickIndex].epoch == system.epoch) { KangarooTwelve(request, transactionSize, digest, sizeof(digest)); + auto* tsReqTickTransactionOffsets = ts.tickTransactionOffsets.getByTickIndex(tickIndex); for (unsigned int i = 0; i < NUMBER_OF_TRANSACTIONS_PER_TICK; i++) { - if (digest == tickData[request->tick - system.initialTick].transactionDigests[i]) + if (digest == ts.tickData[tickIndex].transactionDigests[i]) { - ACQUIRE(tickTransactionsLock); - if (!tickTransactionOffsets(request->tick, i)) + ts.tickTransactions.acquireLock(); + if (!tsReqTickTransactionOffsets[i]) { - if (nextTickTransactionOffset + transactionSize <= FIRST_TICK_TRANSACTION_OFFSET + (((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * NUMBER_OF_TRANSACTIONS_PER_TICK * MAX_TRANSACTION_SIZE / TRANSACTION_SPARSENESS)) + if (ts.nextTickTransactionOffset + transactionSize <= ts.tickTransactions.storageSpaceCurrentEpoch) { - tickTransactionOffsets(request->tick, i) = nextTickTransactionOffset; - bs->CopyMem(&tickTransactions[nextTickTransactionOffset], request, transactionSize); - nextTickTransactionOffset += transactionSize; + tsReqTickTransactionOffsets[i] = ts.nextTickTransactionOffset; + bs->CopyMem(ts.tickTransactions(ts.nextTickTransactionOffset), request, transactionSize); + ts.nextTickTransactionOffset += transactionSize; } } - RELEASE(tickTransactionsLock); + ts.tickTransactions.releaseLock(); break; } } } - RELEASE(tickDataLock); + ts.tickData.releaseLock(); } } } static void processRequestComputors(Peer* peer, RequestResponseHeader* header) { - if (broadcastedComputors.broadcastComputors.computors.epoch) + if (broadcastedComputors.computors.epoch) { - enqueueResponse(peer, sizeof(broadcastedComputors.broadcastComputors), BroadcastComputors::type, header->dejavu(), &broadcastedComputors.broadcastComputors); + enqueueResponse(peer, sizeof(broadcastedComputors), BroadcastComputors::type, header->dejavu(), &broadcastedComputors); } else { @@ -851,8 +850,25 @@ static void processRequestComputors(Peer* peer, RequestResponseHeader* header) static void processRequestQuorumTick(Peer* peer, RequestResponseHeader* header) { RequestQuorumTick* request = header->getPayload(); - if (request->quorumTick.tick >= system.initialTick && request->quorumTick.tick < system.initialTick + MAX_NUMBER_OF_TICKS_PER_EPOCH) + + unsigned short tickEpoch = 0; + const Tick* tsCompTicks; + if (ts.tickInCurrentEpochStorage(request->quorumTick.tick)) + { + tickEpoch = system.epoch; + tsCompTicks = ts.ticks.getByTickInCurrentEpoch(request->quorumTick.tick); + } + else if (ts.tickInPreviousEpochStorage(request->quorumTick.tick)) { + tickEpoch = system.epoch - 1; + tsCompTicks = ts.ticks.getByTickInPreviousEpoch(request->quorumTick.tick); + } + + if (tickEpoch != 0) + { + // Send Tick struct data from tick storage as requested by tick and voteFlags in request->quorumTick. + // The order of the computors is randomized. + // Todo: This function may be optimized by moving the checking of voteFlags in the first loop, reducing the number of calls to random(). unsigned short computorIndices[NUMBER_OF_COMPUTORS]; unsigned short numberOfComputorIndices; for (numberOfComputorIndices = 0; numberOfComputorIndices < NUMBER_OF_COMPUTORS; numberOfComputorIndices++) @@ -865,10 +881,11 @@ static void processRequestQuorumTick(Peer* peer, RequestResponseHeader* header) if (!(request->quorumTick.voteFlags[computorIndices[index] >> 3] & (1 << (computorIndices[index] & 7)))) { - const unsigned int offset = ((request->quorumTick.tick - system.initialTick) * NUMBER_OF_COMPUTORS) + computorIndices[index]; - if (ticks[offset].epoch == system.epoch) + // Todo: We should acquire ts.ticks lock here if tick >= system.tick + const Tick* tsTick = tsCompTicks + computorIndices[index]; + if (tsTick->epoch == tickEpoch) { - enqueueResponse(peer, sizeof(Tick), BroadcastTick::type, header->dejavu(), &ticks[offset]); + enqueueResponse(peer, sizeof(Tick), BroadcastTick::type, header->dejavu(), tsTick); } } @@ -881,10 +898,10 @@ static void processRequestQuorumTick(Peer* peer, RequestResponseHeader* header) static void processRequestTickData(Peer* peer, RequestResponseHeader* header) { RequestTickData* request = header->getPayload(); - if (request->requestedTickData.tick > system.initialTick && request->requestedTickData.tick < system.initialTick + MAX_NUMBER_OF_TICKS_PER_EPOCH - && tickData[request->requestedTickData.tick - system.initialTick].epoch == system.epoch) + TickData* td = ts.tickData.getByTickIfNotEmpty(request->requestedTickData.tick); + if (td) { - enqueueResponse(peer, sizeof(TickData), BroadcastFutureTickData::type, header->dejavu(), &tickData[request->requestedTickData.tick - system.initialTick]); + enqueueResponse(peer, sizeof(TickData), BroadcastFutureTickData::type, header->dejavu(), td); } else { @@ -895,7 +912,21 @@ static void processRequestTickData(Peer* peer, RequestResponseHeader* header) static void processRequestTickTransactions(Peer* peer, RequestResponseHeader* header) { RequestedTickTransactions* request = header->getPayload(); - if (request->tick >= system.initialTick && request->tick < system.initialTick + MAX_NUMBER_OF_TICKS_PER_EPOCH) + + unsigned short tickEpoch = 0; + const unsigned long long* tsReqTickTransactionOffsets; + if (ts.tickInCurrentEpochStorage(request->tick)) + { + tickEpoch = system.epoch; + tsReqTickTransactionOffsets = ts.tickTransactionOffsets.getByTickInCurrentEpoch(request->tick); + } + else if (ts.tickInPreviousEpochStorage(request->tick)) + { + tickEpoch = system.epoch - 1; + tsReqTickTransactionOffsets = ts.tickTransactionOffsets.getByTickInPreviousEpoch(request->tick); + } + + if (tickEpoch != 0) { unsigned short tickTransactionIndices[NUMBER_OF_TRANSACTIONS_PER_TICK]; unsigned short numberOfTickTransactions; @@ -907,11 +938,16 @@ static void processRequestTickTransactions(Peer* peer, RequestResponseHeader* he { const unsigned short index = random(numberOfTickTransactions); - if (!(request->transactionFlags[tickTransactionIndices[index] >> 3] & (1 << (tickTransactionIndices[index] & 7))) - && tickTransactionOffsets(request->tick, tickTransactionIndices[index])) + if (!(request->transactionFlags[tickTransactionIndices[index] >> 3] & (1 << (tickTransactionIndices[index] & 7)))) { - const Transaction* transaction = (Transaction*)&tickTransactions[tickTransactionOffsets(request->tick, tickTransactionIndices[index])]; - enqueueResponse(peer, sizeof(Transaction) + transaction->inputSize + SIGNATURE_SIZE, BROADCAST_TRANSACTION, header->dejavu(), (void*)transaction); + unsigned long long tickTransactionOffset = tsReqTickTransactionOffsets[tickTransactionIndices[index]]; + if (tickTransactionOffset) + { + const Transaction* transaction = ts.tickTransactions(tickTransactionOffset); + ASSERT(transaction->tick == request->tick); + ASSERT(transaction->checkValidity()); + enqueueResponse(peer, transaction->totalSize(), BROADCAST_TRANSACTION, header->dejavu(), (void*)transaction); + } } tickTransactionIndices[index] = tickTransactionIndices[--numberOfTickTransactions]; @@ -924,7 +960,7 @@ static void processRequestCurrentTickInfo(Peer* peer, RequestResponseHeader* hea { CurrentTickInfo currentTickInfo; - if (broadcastedComputors.broadcastComputors.computors.epoch) + if (broadcastedComputors.computors.epoch) { unsigned long long tickDuration = (__rdtsc() - tickTicks[sizeof(tickTicks) / sizeof(tickTicks[0]) - 1]) / frequency; if (tickDuration > 0xFFFF) @@ -1204,6 +1240,17 @@ static void requestProcessor(void* ProcedureArgument) RequestResponseHeader* header = (RequestResponseHeader*)processor->buffer; while (!shutDownNode) { + // in epoch transition, wait here + if (epochTransitionState) + { + _InterlockedIncrement(&epochTransitionWaitingRequestProcessors); + while (epochTransitionState) + { + _mm_pause(); + } + _InterlockedDecrement(&epochTransitionWaitingRequestProcessors); + } + // try to compute a solution if any is queued and this thread is assigned to compute solution if (solutionProcessorFlags[processorNumber]) { @@ -1438,7 +1485,7 @@ static long long __burn(long long amount) static const m256i& __computor(unsigned short computorIndex) { - return broadcastedComputors.broadcastComputors.computors.publicKeys[computorIndex % NUMBER_OF_COMPUTORS]; + return broadcastedComputors.computors.publicKeys[computorIndex % NUMBER_OF_COMPUTORS]; } static unsigned char __day() @@ -1873,10 +1920,37 @@ static void contractProcessor(void*) static void processTick(unsigned long long processorNumber) { - etalonTick.prevResourceTestingDigest = resourceTestingDigest; - etalonTick.prevSpectrumDigest = spectrumDigests[(SPECTRUM_CAPACITY * 2 - 1) - 1]; - getUniverseDigest(etalonTick.prevUniverseDigest); - getComputerDigest(etalonTick.prevComputerDigest); + if (system.tick > system.initialTick) + { + etalonTick.prevResourceTestingDigest = resourceTestingDigest; + etalonTick.prevSpectrumDigest = spectrumDigests[(SPECTRUM_CAPACITY * 2 - 1) - 1]; + getUniverseDigest(etalonTick.prevUniverseDigest); + getComputerDigest(etalonTick.prevComputerDigest); + } + else if (system.tick == system.initialTick) // the first tick of an epoch + { + // RULE: prevDigests of tick T are the digests of tick T-1, so epoch number doesn't matter. + // For seamless transition, spectrum and universe and computer have been changed after endEpoch event + // (miner rewards, IPO finalizing, contract endEpoch procedures,...) + // Here we still let prevDigests == digests of the last tick of last epoch + // so that lite client can verify the state of spectrum + +#if START_NETWORK_FROM_SCRATCH // only update it if the whole network starts from scratch + // everything starts from files, there is no previous tick of the last epoch + // thus, prevDigests are the digests of the files + if (system.epoch == EPOCH) + { + etalonTick.prevResourceTestingDigest = resourceTestingDigest; + etalonTick.prevSpectrumDigest = spectrumDigests[(SPECTRUM_CAPACITY * 2 - 1) - 1]; + getUniverseDigest(etalonTick.prevUniverseDigest); + getComputerDigest(etalonTick.prevComputerDigest); + } +#endif + } + else + { + // it should never go here + } if (system.tick == system.initialTick) { @@ -1902,11 +1976,14 @@ static void processTick(unsigned long long processorNumber) _mm_pause(); } - ACQUIRE(tickDataLock); - bs->CopyMem(&nextTickData, &tickData[system.tick - system.initialTick], sizeof(TickData)); - RELEASE(tickDataLock); + unsigned int tickIndex = ts.tickToIndexCurrentEpoch(system.tick); + ts.tickData.acquireLock(); + bs->CopyMem(&nextTickData, &ts.tickData[tickIndex], sizeof(TickData)); + ts.tickData.releaseLock(); if (nextTickData.epoch == system.epoch) { + auto* tsCurrentTickTransactionOffsets = ts.tickTransactionOffsets.getByTickIndex(tickIndex); + bs->SetMem(entityPendingTransactionIndices, sizeof(entityPendingTransactionIndices), 0); // reset solution task queue score->resetTaskQueue(); @@ -1915,9 +1992,11 @@ static void processTick(unsigned long long processorNumber) { if (!isZero(nextTickData.transactionDigests[transactionIndex])) { - if (tickTransactionOffsets(system.tick, transactionIndex)) + if (tsCurrentTickTransactionOffsets[transactionIndex]) { - Transaction* transaction = (Transaction*)&tickTransactions[tickTransactionOffsets(system.tick, transactionIndex)]; + Transaction* transaction = ts.tickTransactions(tsCurrentTickTransactionOffsets[transactionIndex]); + ASSERT(transaction->checkValidity()); + ASSERT(transaction->tick == system.tick); const int spectrumIndex = ::spectrumIndex(transaction->sourcePublicKey); if (spectrumIndex >= 0 && !entityPendingTransactionIndices[spectrumIndex]) @@ -1928,7 +2007,7 @@ static void processTick(unsigned long long processorNumber) && transaction->inputSize == 32 && !transaction->inputType) { - const m256i& solution_nonce = *(m256i*)((unsigned char*)transaction + sizeof(Transaction)); + const m256i& solution_nonce = *(m256i*)transaction->inputPtr(); m256i data[2] = { transaction->sourcePublicKey, solution_nonce }; static_assert(sizeof(data) == 2 * 32, "Unexpected array size"); unsigned int flagIndex; @@ -1959,9 +2038,11 @@ static void processTick(unsigned long long processorNumber) { if (!isZero(nextTickData.transactionDigests[transactionIndex])) { - if (tickTransactionOffsets(system.tick, transactionIndex)) + if (tsCurrentTickTransactionOffsets[transactionIndex]) { - Transaction* transaction = (Transaction*)&tickTransactions[tickTransactionOffsets(system.tick, transactionIndex)]; + Transaction* transaction = ts.tickTransactions(tsCurrentTickTransactionOffsets[transactionIndex]); + ASSERT(transaction->checkValidity()); + ASSERT(transaction->tick == system.tick); const int spectrumIndex = ::spectrumIndex(transaction->sourcePublicKey); if (spectrumIndex >= 0 && !entityPendingTransactionIndices[spectrumIndex]) @@ -1998,7 +2079,7 @@ static void processTick(unsigned long long processorNumber) if (!transaction->amount && transaction->inputSize == sizeof(ContractIPOBid)) { - ContractIPOBid* contractIPOBid = (ContractIPOBid*)(((unsigned char*)transaction) + sizeof(Transaction)); + ContractIPOBid* contractIPOBid = (ContractIPOBid*)transaction->inputPtr(); if (contractIPOBid->price > 0 && contractIPOBid->price <= MAX_AMOUNT / NUMBER_OF_COMPUTORS && contractIPOBid->quantity > 0 && contractIPOBid->quantity <= NUMBER_OF_COMPUTORS) { @@ -2089,7 +2170,7 @@ static void processTick(unsigned long long processorNumber) currentContract = _mm256_set_epi64x(0, 0, 0, executedContractIndex); bs->SetMem(&executedContractInput, sizeof(executedContractInput), 0); - bs->CopyMem(&executedContractInput, (((unsigned char*)transaction) + sizeof(Transaction)), transaction->inputSize); + bs->CopyMem(&executedContractInput, transaction->inputPtr(), transaction->inputSize); const unsigned long long startTick = __rdtsc(); contractUserProcedures[executedContractIndex][transaction->inputType](contractStates[executedContractIndex], &executedContractInput, &executedContractOutput); contractTotalExecutionTicks[executedContractIndex] += __rdtsc() - startTick; @@ -2104,7 +2185,7 @@ static void processTick(unsigned long long processorNumber) && transaction->inputSize == 32 && !transaction->inputType) { - const m256i & solution_nonce = *(m256i*)((unsigned char*)transaction + sizeof(Transaction)); + const m256i & solution_nonce = *(m256i*)transaction->inputPtr(); m256i data[2] = { transaction->sourcePublicKey, solution_nonce }; static_assert(sizeof(data) == 2 * 32, "Unexpected array size"); unsigned int flagIndex; @@ -2383,19 +2464,20 @@ static void processTick(unsigned long long processorNumber) const Transaction* pendingTransaction = ((Transaction*)&entityPendingTransactions[entityPendingTransactionIndices[index] * MAX_TRANSACTION_SIZE]); if (pendingTransaction->tick == system.tick + TICK_TRANSACTIONS_PUBLICATION_OFFSET) { - const unsigned int transactionSize = sizeof(Transaction) + pendingTransaction->inputSize + SIGNATURE_SIZE; - if (nextTickTransactionOffset + transactionSize <= FIRST_TICK_TRANSACTION_OFFSET + (((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * NUMBER_OF_TRANSACTIONS_PER_TICK * MAX_TRANSACTION_SIZE / TRANSACTION_SPARSENESS)) + ASSERT(pendingTransaction->checkValidity()); + const unsigned int transactionSize = pendingTransaction->totalSize(); + if (ts.nextTickTransactionOffset + transactionSize <= ts.tickTransactions.storageSpaceCurrentEpoch) { - ACQUIRE(tickTransactionsLock); - if (nextTickTransactionOffset + transactionSize <= FIRST_TICK_TRANSACTION_OFFSET + (((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * NUMBER_OF_TRANSACTIONS_PER_TICK * MAX_TRANSACTION_SIZE / TRANSACTION_SPARSENESS)) + ts.tickTransactions.acquireLock(); + if (ts.nextTickTransactionOffset + transactionSize <= ts.tickTransactions.storageSpaceCurrentEpoch) { - tickTransactionOffsets(pendingTransaction->tick, j) = nextTickTransactionOffset; - bs->CopyMem(&tickTransactions[nextTickTransactionOffset], (void*)pendingTransaction, transactionSize); + ts.tickTransactionOffsets(pendingTransaction->tick, j) = ts.nextTickTransactionOffset; + bs->CopyMem(ts.tickTransactions(ts.nextTickTransactionOffset), (void*)pendingTransaction, transactionSize); broadcastedFutureTickData.tickData.transactionDigests[j] = &entityPendingTransactionDigests[entityPendingTransactionIndices[index] * 32ULL]; j++; - nextTickTransactionOffset += transactionSize; + ts.nextTickTransactionOffset += transactionSize; } - RELEASE(tickTransactionsLock); + ts.tickTransactions.releaseLock(); } } @@ -2492,19 +2574,20 @@ static void beginEpoch1of2() numberOfOwnComputorIndices = 0; - broadcastedComputors.header.setSize(); - broadcastedComputors.header.setType(BroadcastComputors::type); - broadcastedComputors.broadcastComputors.computors.epoch = 0; + broadcastedComputors.computors.epoch = 0; for (unsigned int i = 0; i < NUMBER_OF_COMPUTORS; i++) { - broadcastedComputors.broadcastComputors.computors.publicKeys[i].setRandomValue(); + broadcastedComputors.computors.publicKeys[i].setRandomValue(); } - bs->SetMem(&broadcastedComputors.broadcastComputors.computors.signature, sizeof(broadcastedComputors.broadcastComputors.computors.signature), 0); + bs->SetMem(&broadcastedComputors.computors.signature, sizeof(broadcastedComputors.computors.signature), 0); - bs->SetMem(ticks, ((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * NUMBER_OF_COMPUTORS * sizeof(Tick), 0); - bs->SetMem(tickData, ((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * sizeof(TickData), 0); - bs->SetMem(tickTransactions, FIRST_TICK_TRANSACTION_OFFSET + (((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * NUMBER_OF_TRANSACTIONS_PER_TICK * MAX_TRANSACTION_SIZE / TRANSACTION_SPARSENESS), 0); - bs->SetMem(tickTransactionOffsetsPtr, ((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * NUMBER_OF_TRANSACTIONS_PER_TICK * sizeof(*tickTransactionOffsetsPtr), 0); +#ifndef NDEBUG + ts.checkStateConsistencyWithAssert(); +#endif + ts.beginEpoch(system.initialTick); +#ifndef NDEBUG + ts.checkStateConsistencyWithAssert(); +#endif for (unsigned int i = 0; i < SPECTRUM_CAPACITY; i++) { @@ -2528,7 +2611,6 @@ static void beginEpoch1of2() bs->SetMem(score, sizeof(*score), 0); score->resetTaskQueue(); - score->loadScoreCache(system.epoch); bs->SetMem(minerSolutionFlags, NUMBER_OF_MINER_SOLUTION_FLAGS / 8, 0); bs->SetMem((void*)minerPublicKeys, sizeof(minerPublicKeys), 0); bs->SetMem((void*)minerScores, sizeof(minerScores), 0); @@ -2554,6 +2636,7 @@ static void beginEpoch1of2() // there are many global variables that were init at declaration, may need to re-check all of them again resourceTestingDigest = 0; + numberOfTransactions = 0; #if LOG_QU_TRANSFERS && LOG_QU_TRANSFERS_TRACK_TRANSFER_ID CurrentTransferId = 0; @@ -2652,20 +2735,21 @@ static void endEpoch() bs->SetMem(transactionCounters, sizeof(transactionCounters), 0); for (unsigned int tick = system.initialTick; tick < system.tick; tick++) { - ACQUIRE(tickDataLock); - if (tickData[tick - system.initialTick].epoch == system.epoch) + ts.tickData.acquireLock(); + TickData& td = ts.tickData.getByTickInCurrentEpoch(tick); + if (td.epoch == system.epoch) { unsigned int numberOfTransactions = 0; for (unsigned int transactionIndex = 0; transactionIndex < NUMBER_OF_TRANSACTIONS_PER_TICK; transactionIndex++) { - if (!isZero(tickData[tick - system.initialTick].transactionDigests[transactionIndex])) + if (!isZero(td.transactionDigests[transactionIndex])) { numberOfTransactions++; } } transactionCounters[tick % NUMBER_OF_COMPUTORS] += revenuePoints[numberOfTransactions]; } - RELEASE(tickDataLock); + ts.tickData.releaseLock(); } unsigned long long sortedTransactionCounters[QUORUM + 1]; bs->SetMem(sortedTransactionCounters, sizeof(sortedTransactionCounters), 0); @@ -2688,10 +2772,10 @@ static void endEpoch() for (unsigned int computorIndex = 0; computorIndex < NUMBER_OF_COMPUTORS; computorIndex++) { const long long revenue = (transactionCounters[computorIndex] >= sortedTransactionCounters[QUORUM - 1]) ? (ISSUANCE_RATE / NUMBER_OF_COMPUTORS) : (((ISSUANCE_RATE / NUMBER_OF_COMPUTORS) * ((unsigned long long)transactionCounters[computorIndex])) / sortedTransactionCounters[QUORUM - 1]); - increaseEnergy(broadcastedComputors.broadcastComputors.computors.publicKeys[computorIndex], revenue); + increaseEnergy(broadcastedComputors.computors.publicKeys[computorIndex], revenue); if (revenue) { - const QuTransfer quTransfer = { _mm256_setzero_si256() , broadcastedComputors.broadcastComputors.computors.publicKeys[computorIndex] , revenue }; + const QuTransfer quTransfer = { _mm256_setzero_si256() , broadcastedComputors.computors.publicKeys[computorIndex] , revenue }; logQuTransfer(quTransfer); } arbitratorRevenue -= revenue; @@ -2765,25 +2849,139 @@ static void endEpoch() mainAuxStatus = ((mainAuxStatus & 1) << 1) | ((mainAuxStatus & 2) >> 1); } + +#if !START_NETWORK_FROM_SCRATCH + +static bool haveSamePrevDigestsAndTime(const Tick& A, const Tick& B) +{ + return A.prevComputerDigest == B.prevComputerDigest && + A.prevResourceTestingDigest == B.prevResourceTestingDigest && + A.prevSpectrumDigest == B.prevSpectrumDigest && + A.prevUniverseDigest == B.prevUniverseDigest && + *((unsigned long long*) & A.millisecond) == *((unsigned long long*) & B.millisecond); +} + +// Try to pull quorum tick and update the etalonTick to correct timeStamp and digests. +// A corner case: this function can't get initial time+digests if more than 451 ID failed to switch the epoch. +// In this case, START_NETWORK_FROM_SCRATCH must be 1 to boot the network from scratch again, ie: first tick timestamp: 2022-04-13 12:00:00 UTC. +// On the first tick of an epoch after seamless transition, it contains the prevDigests of the last tick of last epoch, which can't be obtained +// by a node that starts from scratch. +static void initializeFirstTick() +{ + unsigned int uniqueVoteIndex[NUMBER_OF_COMPUTORS]; + int uniqueVoteCount[NUMBER_OF_COMPUTORS]; + int uniqueCount = 0; + const unsigned int firstTickIndex = ts.tickToIndexCurrentEpoch(system.initialTick); + while (!shutDownNode) + { + if (broadcastedComputors.computors.epoch == system.epoch) + { + // group ticks with same digest+timestamp and count votes (how many are in each group) + setMem(uniqueVoteIndex, sizeof(uniqueVoteIndex), 0); + setMem(uniqueVoteCount, sizeof(uniqueVoteCount), 0); + uniqueCount = 0; + + const Tick* tsCompTicks = ts.ticks.getByTickIndex(firstTickIndex); + for (unsigned int i = 0; i < NUMBER_OF_COMPUTORS; i++) + { + ts.ticks.acquireLock(i); + const Tick* tick = &tsCompTicks[i]; + if (tick->epoch == system.epoch) + { + // compare tick with all ticks with unique digest+timestamp that we have found before + int unique_idx = -1; + for (int j = 0; j < uniqueCount; j++) { + ts.ticks.acquireLock(uniqueVoteIndex[j]); + const Tick* unique = &tsCompTicks[uniqueVoteIndex[j]]; + if (haveSamePrevDigestsAndTime(*unique , *tick)) + { + unique_idx = j; + ts.ticks.releaseLock(uniqueVoteIndex[j]); + break; + } + ts.ticks.releaseLock(uniqueVoteIndex[j]); + } + + if (unique_idx == -1) + { + // tick is not in array of unique votes -> add and init counter to 1 + uniqueVoteIndex[uniqueCount] = i; + uniqueVoteCount[uniqueCount] = 1; + uniqueCount++; + } + else + { + // tick already is in array of unique ticks -> increment counter + uniqueVoteCount[unique_idx]++; + } + } + + ts.ticks.releaseLock(i); + } + + // if we have groups... + if (uniqueCount > 0) + { + // find group with most votes + int maxUniqueVoteCountIndex = 0; + for (int i = 1; i < uniqueCount; i++) + { + if (uniqueVoteCount[i] > uniqueVoteCount[maxUniqueVoteCountIndex]) + { + maxUniqueVoteCountIndex = i; + } + } + + int numberOfVote = uniqueVoteCount[maxUniqueVoteCountIndex]; + + // accept the tick with most votes if it has more than 1/3 of quorum + if (numberOfVote >= NUMBER_OF_COMPUTORS - QUORUM) + { + ts.ticks.acquireLock(uniqueVoteIndex[maxUniqueVoteCountIndex]); + const Tick* unique = &tsCompTicks[uniqueVoteIndex[maxUniqueVoteCountIndex]]; + ts.ticks.releaseLock(uniqueVoteIndex[maxUniqueVoteCountIndex]); + *((unsigned long long*) & etalonTick.millisecond) = *((unsigned long long*) & unique->millisecond); + etalonTick.prevComputerDigest = unique->prevComputerDigest; + etalonTick.prevResourceTestingDigest = unique->prevResourceTestingDigest; + etalonTick.prevSpectrumDigest = unique->prevSpectrumDigest; + etalonTick.prevUniverseDigest = unique->prevUniverseDigest; + return; + } + } + } + _mm_pause(); + } +} +#endif + static void tickProcessor(void*) { enableAVX(); unsigned long long processorNumber; mpServicesProtocol->WhoAmI(mpServicesProtocol, &processorNumber); +#if !START_NETWORK_FROM_SCRATCH + initializeFirstTick(); +#endif + unsigned int latestProcessedTick = 0; while (!shutDownNode) { const unsigned long long curTimeTick = __rdtsc(); + const unsigned int nextTick = system.tick + 1; - if (broadcastedComputors.broadcastComputors.computors.epoch == system.epoch) + if (broadcastedComputors.computors.epoch == system.epoch + && ts.tickInCurrentEpochStorage(nextTick)) { + const unsigned int currentTickIndex = ts.tickToIndexCurrentEpoch(system.tick); + const unsigned int nextTickIndex = ts.tickToIndexCurrentEpoch(nextTick); + { - const unsigned int baseOffset = (system.tick + 1 - system.initialTick) * NUMBER_OF_COMPUTORS; + const Tick* tsCompTicks = ts.ticks.getByTickIndex(nextTickIndex); unsigned int futureTickTotalNumberOfComputors = 0; for (unsigned int i = 0; i < NUMBER_OF_COMPUTORS; i++) { - if (ticks[baseOffset + i].epoch == system.epoch) + if (tsCompTicks[i].epoch == system.epoch) { futureTickTotalNumberOfComputors++; } @@ -2791,7 +2989,6 @@ static void tickProcessor(void*) ::futureTickTotalNumberOfComputors = futureTickTotalNumberOfComputors; } - if (system.tick - system.initialTick < MAX_NUMBER_OF_TICKS_PER_EPOCH - 1) { if (system.tick > latestProcessedTick) { @@ -2802,24 +2999,24 @@ static void tickProcessor(void*) if (futureTickTotalNumberOfComputors > NUMBER_OF_COMPUTORS - QUORUM) { - const unsigned int baseOffset = (system.tick + 1 - system.initialTick) * NUMBER_OF_COMPUTORS; + const Tick* tsCompTicks = ts.ticks.getByTickIndex(nextTickIndex); unsigned int numberOfEmptyNextTickTransactionDigest = 0; unsigned int numberOfUniqueNextTickTransactionDigests = 0; for (unsigned int i = 0; i < NUMBER_OF_COMPUTORS; i++) { - if (ticks[baseOffset + i].epoch == system.epoch) + if (tsCompTicks[i].epoch == system.epoch) { unsigned int j; for (j = 0; j < numberOfUniqueNextTickTransactionDigests; j++) { - if (ticks[baseOffset + i].transactionDigest == uniqueNextTickTransactionDigests[j]) + if (tsCompTicks[i].transactionDigest == uniqueNextTickTransactionDigests[j]) { break; } } if (j == numberOfUniqueNextTickTransactionDigests) { - uniqueNextTickTransactionDigests[numberOfUniqueNextTickTransactionDigests] = ticks[baseOffset + i].transactionDigest; + uniqueNextTickTransactionDigests[numberOfUniqueNextTickTransactionDigests] = tsCompTicks[i].transactionDigest; uniqueNextTickTransactionDigestCounters[numberOfUniqueNextTickTransactionDigests++] = 1; } else @@ -2827,7 +3024,7 @@ static void tickProcessor(void*) uniqueNextTickTransactionDigestCounters[j]++; } - if (isZero(ticks[baseOffset + i].transactionDigest)) + if (isZero(tsCompTicks[i].transactionDigest)) { numberOfEmptyNextTickTransactionDigest++; } @@ -2852,6 +3049,7 @@ static void tickProcessor(void*) if (numberOfEmptyNextTickTransactionDigest > NUMBER_OF_COMPUTORS - QUORUM || uniqueNextTickTransactionDigestCounters[mostPopularUniqueNextTickTransactionDigestIndex] + (NUMBER_OF_COMPUTORS - totalUniqueNextTickTransactionDigestCounter) < QUORUM) { + // Create empty tick targetNextTickDataDigest = _mm256_setzero_si256(); targetNextTickDataDigestIsKnown = true; } @@ -2860,24 +3058,24 @@ static void tickProcessor(void*) if (!targetNextTickDataDigestIsKnown) { - const unsigned int baseOffset = (system.tick - system.initialTick) * NUMBER_OF_COMPUTORS; + const Tick* tsCompTicks = ts.ticks.getByTickIndex(currentTickIndex); unsigned int numberOfEmptyNextTickTransactionDigest = 0; unsigned int numberOfUniqueNextTickTransactionDigests = 0; for (unsigned int i = 0; i < NUMBER_OF_COMPUTORS; i++) { - if (ticks[baseOffset + i].epoch == system.epoch) + if (tsCompTicks[i].epoch == system.epoch) { unsigned int j; for (j = 0; j < numberOfUniqueNextTickTransactionDigests; j++) { - if (ticks[baseOffset + i].expectedNextTickTransactionDigest == uniqueNextTickTransactionDigests[j]) + if (tsCompTicks[i].expectedNextTickTransactionDigest == uniqueNextTickTransactionDigests[j]) { break; } } if (j == numberOfUniqueNextTickTransactionDigests) { - uniqueNextTickTransactionDigests[numberOfUniqueNextTickTransactionDigests] = ticks[baseOffset + i].expectedNextTickTransactionDigest; + uniqueNextTickTransactionDigests[numberOfUniqueNextTickTransactionDigests] = tsCompTicks[i].expectedNextTickTransactionDigest; uniqueNextTickTransactionDigestCounters[numberOfUniqueNextTickTransactionDigests++] = 1; } else @@ -2885,7 +3083,7 @@ static void tickProcessor(void*) uniqueNextTickTransactionDigestCounters[j]++; } - if (isZero(ticks[baseOffset + i].expectedNextTickTransactionDigest)) + if (isZero(tsCompTicks[i].expectedNextTickTransactionDigest)) { numberOfEmptyNextTickTransactionDigest++; } @@ -2919,9 +3117,9 @@ static void tickProcessor(void*) } } - ACQUIRE(tickDataLock); - bs->CopyMem(&nextTickData, &tickData[system.tick + 1 - system.initialTick], sizeof(TickData)); - RELEASE(tickDataLock); + ts.tickData.acquireLock(); + bs->CopyMem(&nextTickData, &ts.tickData[nextTickIndex], sizeof(TickData)); + ts.tickData.releaseLock(); if (nextTickData.epoch == system.epoch) { m256i timelockPreimage[3]; @@ -2932,9 +3130,9 @@ static void tickProcessor(void*) KangarooTwelve(timelockPreimage, sizeof(timelockPreimage), &timelock, sizeof(timelock)); if (nextTickData.timelock != timelock) { - ACQUIRE(tickDataLock); - tickData[system.tick + 1 - system.initialTick].epoch = 0; - RELEASE(tickDataLock); + ts.tickData.acquireLock(); + ts.tickData[nextTickIndex].epoch = 0; + ts.tickData.releaseLock(); nextTickData.epoch = 0; } } @@ -2957,14 +3155,16 @@ static void tickProcessor(void*) { if (isZero(targetNextTickDataDigest)) { - ACQUIRE(tickDataLock); - tickData[system.tick + 1 - system.initialTick].epoch = 0; - RELEASE(tickDataLock); + // Empty tick + ts.tickData.acquireLock(); + ts.tickData[nextTickIndex].epoch = 0; + ts.tickData.releaseLock(); nextTickData.epoch = 0; tickDataSuits = true; } else { + // Non-empty tick if (nextTickData.epoch != system.epoch) { tickDataSuits = false; @@ -2976,20 +3176,34 @@ static void tickProcessor(void*) } } } + + // operator opt to force this node to switch to new epoch + // this can fix the problem of weak nodes getting stuck and can't automatically switch to new epoch + // due to lack of data (for detail, need to investigate deeper) + if (forceSwitchEpoch) + { + nextTickData.epoch = 0; + setMem(nextTickData.transactionDigests, NUMBER_OF_TRANSACTIONS_PER_TICK * sizeof(m256i), 0); + // first and second tick of an epoch are always empty tick + targetNextTickDataDigest = _mm256_setzero_si256(); + targetNextTickDataDigestIsKnown = true; + tickDataSuits = true; + } + if (!tickDataSuits) { unsigned int tickTotalNumberOfComputors = 0; - const unsigned int baseOffset = (system.tick - system.initialTick) * NUMBER_OF_COMPUTORS; + const Tick* tsCompTicks = ts.ticks.getByTickIndex(currentTickIndex); for (unsigned int i = 0; i < NUMBER_OF_COMPUTORS; i++) { - ACQUIRE(tickLocks[i]); + ts.ticks.acquireLock(i); - if (ticks[baseOffset + i].epoch == system.epoch) + if (tsCompTicks[i].epoch == system.epoch) { tickTotalNumberOfComputors++; } - RELEASE(tickLocks[i]); + ts.ticks.releaseLock(i); } ::tickNumberOfComputors = 0; ::tickTotalNumberOfComputors = tickTotalNumberOfComputors; @@ -3005,18 +3219,22 @@ static void tickProcessor(void*) bs->SetMem(requestedTickTransactions.requestedTickTransactions.transactionFlags, sizeof(requestedTickTransactions.requestedTickTransactions.transactionFlags), 0); unsigned long long unknownTransactions[NUMBER_OF_TRANSACTIONS_PER_TICK / 64]; bs->SetMem(unknownTransactions, sizeof(unknownTransactions), 0); + const auto* tsNextTickTransactionOffsets = ts.tickTransactionOffsets.getByTickIndex(nextTickIndex); for (unsigned int i = 0; i < NUMBER_OF_TRANSACTIONS_PER_TICK; i++) { if (!isZero(nextTickData.transactionDigests[i])) { numberOfNextTickTransactions++; - ACQUIRE(tickTransactionsLock); - if (tickTransactionOffsets(system.tick + 1, i)) + ts.tickTransactions.acquireLock(); + + if (tsNextTickTransactionOffsets[i]) { - const Transaction* transaction = (Transaction*)&tickTransactions[tickTransactionOffsets(system.tick + 1, i)]; + const Transaction* transaction = ts.tickTransactions(tsNextTickTransactionOffsets[i]); + ASSERT(transaction->checkValidity()); + ASSERT(transaction->tick == nextTick); unsigned char digest[32]; - KangarooTwelve(transaction, sizeof(Transaction) + transaction->inputSize + SIGNATURE_SIZE, digest, sizeof(digest)); + KangarooTwelve(transaction, transaction->totalSize(), digest, sizeof(digest)); if (digest == nextTickData.transactionDigests[i]) { numberOfKnownNextTickTransactions++; @@ -3026,19 +3244,19 @@ static void tickProcessor(void*) unknownTransactions[i >> 6] |= (1ULL << (i & 63)); } } - RELEASE(tickTransactionsLock); + ts.tickTransactions.releaseLock(); } } if (numberOfKnownNextTickTransactions != numberOfNextTickTransactions) { - const unsigned int nextTick = system.tick + 1; for (unsigned int i = 0; i < SPECTRUM_CAPACITY; i++) { Transaction* pendingTransaction = (Transaction*)&entityPendingTransactions[i * MAX_TRANSACTION_SIZE]; if (pendingTransaction->tick == nextTick) { ACQUIRE(entityPendingTransactionsLock); - + ASSERT(pendingTransaction->checkValidity()); + auto* tsPendingTransactionOffsets = ts.tickTransactionOffsets.getByTickInCurrentEpoch(pendingTransaction->tick); for (unsigned int j = 0; j < NUMBER_OF_TRANSACTIONS_PER_TICK; j++) { if (unknownTransactions[j >> 6] & (1ULL << (j & 63))) @@ -3046,21 +3264,21 @@ static void tickProcessor(void*) if (&entityPendingTransactionDigests[i * 32ULL] == nextTickData.transactionDigests[j]) { unsigned char transactionBuffer[MAX_TRANSACTION_SIZE]; - const unsigned int transactionSize = sizeof(Transaction) + pendingTransaction->inputSize + SIGNATURE_SIZE; + const unsigned int transactionSize = pendingTransaction->totalSize(); bs->CopyMem(transactionBuffer, (void*)pendingTransaction, transactionSize); pendingTransaction = (Transaction*)transactionBuffer; - ACQUIRE(tickTransactionsLock); - if (!tickTransactionOffsets(pendingTransaction->tick, j)) + ts.tickTransactions.acquireLock(); + if (!tsPendingTransactionOffsets[j]) { - if (nextTickTransactionOffset + transactionSize <= FIRST_TICK_TRANSACTION_OFFSET + (((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * NUMBER_OF_TRANSACTIONS_PER_TICK * MAX_TRANSACTION_SIZE / TRANSACTION_SPARSENESS)) + if (ts.nextTickTransactionOffset + transactionSize <= ts.tickTransactions.storageSpaceCurrentEpoch) { - tickTransactionOffsets(pendingTransaction->tick, j) = nextTickTransactionOffset; - bs->CopyMem(&tickTransactions[nextTickTransactionOffset], pendingTransaction, transactionSize); - nextTickTransactionOffset += transactionSize; + tsPendingTransactionOffsets[j] = ts.nextTickTransactionOffset; + bs->CopyMem(ts.tickTransactions(ts.nextTickTransactionOffset), pendingTransaction, transactionSize); + ts.nextTickTransactionOffset += transactionSize; } } - RELEASE(tickTransactionsLock); + ts.tickTransactions.releaseLock(); numberOfKnownNextTickTransactions++; unknownTransactions[j >> 6] &= ~(1ULL << (j & 63)); @@ -3088,9 +3306,9 @@ static void tickProcessor(void*) if (!targetNextTickDataDigestIsKnown && __rdtsc() - tickTicks[sizeof(tickTicks) / sizeof(tickTicks[0]) - 1] > TARGET_TICK_DURATION * 5 * frequency / 1000) { - ACQUIRE(tickDataLock); - tickData[system.tick + 1 - system.initialTick].epoch = 0; - RELEASE(tickDataLock); + ts.tickData.acquireLock(); + ts.tickData[nextTickIndex].epoch = 0; + ts.tickData.releaseLock(); nextTickData.epoch = 0; numberOfNextTickTransactions = 0; @@ -3100,15 +3318,15 @@ static void tickProcessor(void*) if (numberOfKnownNextTickTransactions != numberOfNextTickTransactions) { - requestedTickTransactions.requestedTickTransactions.tick = system.tick + 1; + requestedTickTransactions.requestedTickTransactions.tick = nextTick; } else { requestedTickTransactions.requestedTickTransactions.tick = 0; - if (tickData[system.tick - system.initialTick].epoch == system.epoch) + if (ts.tickData[currentTickIndex].epoch == system.epoch) { - KangarooTwelve(&tickData[system.tick - system.initialTick], sizeof(TickData), &etalonTick.transactionDigest, 32); + KangarooTwelve(&ts.tickData[currentTickIndex], sizeof(TickData), &etalonTick.transactionDigest, 32); } else { @@ -3162,14 +3380,14 @@ static void tickProcessor(void*) } } - const unsigned int baseOffset = (system.tick - system.initialTick) * NUMBER_OF_COMPUTORS; + const Tick* tsCompTicks = ts.ticks.getByTickIndex(currentTickIndex); unsigned int tickNumberOfComputors = 0, tickTotalNumberOfComputors = 0; for (unsigned int i = 0; i < NUMBER_OF_COMPUTORS; i++) { - ACQUIRE(tickLocks[i]); + ts.ticks.acquireLock(i); - const Tick* tick = &ticks[baseOffset + i]; + const Tick* tick = &tsCompTicks[i]; if (tick->epoch == system.epoch) { tickTotalNumberOfComputors++; @@ -3182,7 +3400,7 @@ static void tickProcessor(void*) { m256i saltedData[2]; m256i saltedDigest; - saltedData[0] = broadcastedComputors.broadcastComputors.computors.publicKeys[tick->computorIndex]; + saltedData[0] = broadcastedComputors.computors.publicKeys[tick->computorIndex]; saltedData[1].m256i_u64[0] = resourceTestingDigest; KangarooTwelve(saltedData, 32 + sizeof(resourceTestingDigest), &saltedDigest, sizeof(resourceTestingDigest)); if (tick->saltedResourceTestingDigest == saltedDigest.m256i_u64[0]) @@ -3207,7 +3425,7 @@ static void tickProcessor(void*) } } - RELEASE(tickLocks[i]); + ts.ticks.releaseLock(i); } ::tickNumberOfComputors = tickNumberOfComputors; ::tickTotalNumberOfComputors = tickTotalNumberOfComputors; @@ -3229,9 +3447,10 @@ static void tickProcessor(void*) tickDataSuits = false; if (isZero(targetNextTickDataDigest)) { - ACQUIRE(tickDataLock); - tickData[system.tick + 1 - system.initialTick].epoch = 0; - RELEASE(tickDataLock); + // Empty tick + ts.tickData.acquireLock(); + ts.tickData[nextTickIndex].epoch = 0; + ts.tickData.releaseLock(); nextTickData.epoch = 0; tickDataSuits = true; } @@ -3253,28 +3472,32 @@ static void tickProcessor(void*) if ((dayIndex == 738570 + system.epoch * 7 && etalonTick.hour >= 12) || dayIndex > 738570 + system.epoch * 7) { + // start seamless epoch transition epochTransitionState = 1; + forceSwitchEpoch = false; } else { + // update etalonTick etalonTick.tick++; - ACQUIRE(tickDataLock); - if (tickData[system.tick - system.initialTick].epoch == system.epoch - && (tickData[system.tick - system.initialTick].year > etalonTick.year - || (tickData[system.tick - system.initialTick].year == etalonTick.year && (tickData[system.tick - system.initialTick].month > etalonTick.month - || (tickData[system.tick - system.initialTick].month == etalonTick.month && (tickData[system.tick - system.initialTick].day > etalonTick.day - || (tickData[system.tick - system.initialTick].day == etalonTick.day && (tickData[system.tick - system.initialTick].hour > etalonTick.hour - || (tickData[system.tick - system.initialTick].hour == etalonTick.hour && (tickData[system.tick - system.initialTick].minute > etalonTick.minute - || (tickData[system.tick - system.initialTick].minute == etalonTick.minute && (tickData[system.tick - system.initialTick].second > etalonTick.second - || (tickData[system.tick - system.initialTick].second == etalonTick.second && tickData[system.tick - system.initialTick].millisecond > etalonTick.millisecond))))))))))))) + ts.tickData.acquireLock(); + const TickData& td = ts.tickData[currentTickIndex]; + if (td.epoch == system.epoch + && (td.year > etalonTick.year + || (td.year == etalonTick.year && (td.month > etalonTick.month + || (td.month == etalonTick.month && (td.day > etalonTick.day + || (td.day == etalonTick.day && (td.hour > etalonTick.hour + || (td.hour == etalonTick.hour && (td.minute > etalonTick.minute + || (td.minute == etalonTick.minute && (td.second > etalonTick.second + || (td.second == etalonTick.second && td.millisecond > etalonTick.millisecond))))))))))))) { - etalonTick.millisecond = tickData[system.tick - system.initialTick].millisecond; - etalonTick.second = tickData[system.tick - system.initialTick].second; - etalonTick.minute = tickData[system.tick - system.initialTick].minute; - etalonTick.hour = tickData[system.tick - system.initialTick].hour; - etalonTick.day = tickData[system.tick - system.initialTick].day; - etalonTick.month = tickData[system.tick - system.initialTick].month; - etalonTick.year = tickData[system.tick - system.initialTick].year; + etalonTick.millisecond = td.millisecond; + etalonTick.second = td.second; + etalonTick.minute = td.minute; + etalonTick.hour = td.hour; + etalonTick.day = td.day; + etalonTick.month = td.month; + etalonTick.year = td.year; } else { @@ -3310,7 +3533,7 @@ static void tickProcessor(void*) } } } - RELEASE(tickDataLock); + ts.tickData.releaseLock(); } system.tick++; @@ -3318,6 +3541,25 @@ static void tickProcessor(void*) if (epochTransitionState == 1) { // seamless epoch transistion +#ifndef NDEBUG + addDebugMessage(L"Starting epoch transition"); + { + CHAR16 dbgMsgBuf[300]; + CHAR16 digestChars[60 + 1]; + getIdentity(score->initialRandomSeed.m256i_u8, digestChars, true); + setText(dbgMsgBuf, L"Old mining seed: "); + appendText(dbgMsgBuf, digestChars); + addDebugMessage(dbgMsgBuf); + } +#endif + + // wait until all request processors are in waiting state + while (epochTransitionWaitingRequestProcessors < nRequestProcessorIDs) + { + _mm_pause(); + } + + // end current epoch endEpoch(); // instruct main loop to save system and wait until it is done @@ -3326,30 +3568,48 @@ static void tickProcessor(void*) { _mm_pause(); } + epochTransitionState = 2; - // a temporary fix to set correct filenames because complete beginEpoch1of2() and beginEpoch2of2() is commented out - SPECTRUM_FILE_NAME[sizeof(SPECTRUM_FILE_NAME) / sizeof(SPECTRUM_FILE_NAME[0]) - 4] = system.epoch / 100 + L'0'; - SPECTRUM_FILE_NAME[sizeof(SPECTRUM_FILE_NAME) / sizeof(SPECTRUM_FILE_NAME[0]) - 3] = (system.epoch % 100) / 10 + L'0'; - SPECTRUM_FILE_NAME[sizeof(SPECTRUM_FILE_NAME) / sizeof(SPECTRUM_FILE_NAME[0]) - 2] = system.epoch % 10 + L'0'; - - UNIVERSE_FILE_NAME[sizeof(UNIVERSE_FILE_NAME) / sizeof(UNIVERSE_FILE_NAME[0]) - 4] = system.epoch / 100 + L'0'; - UNIVERSE_FILE_NAME[sizeof(UNIVERSE_FILE_NAME) / sizeof(UNIVERSE_FILE_NAME[0]) - 3] = (system.epoch % 100) / 10 + L'0'; - UNIVERSE_FILE_NAME[sizeof(UNIVERSE_FILE_NAME) / sizeof(UNIVERSE_FILE_NAME[0]) - 2] = system.epoch % 10 + L'0'; - - CONTRACT_FILE_NAME[sizeof(CONTRACT_FILE_NAME) / sizeof(CONTRACT_FILE_NAME[0]) - 4] = system.epoch / 100 + L'0'; - CONTRACT_FILE_NAME[sizeof(CONTRACT_FILE_NAME) / sizeof(CONTRACT_FILE_NAME[0]) - 3] = (system.epoch % 100) / 10 + L'0'; - CONTRACT_FILE_NAME[sizeof(CONTRACT_FILE_NAME) / sizeof(CONTRACT_FILE_NAME[0]) - 2] = system.epoch % 10 + L'0'; - - /* +#ifndef NDEBUG + addDebugMessage(L"Calling beginEpoch1of2()"); // TODO: remove after testing +#endif beginEpoch1of2(); beginEpoch2of2(); - */ +#ifndef NDEBUG + addDebugMessage(L"Finished beginEpoch2of2()"); // TODO: remove after testing + { + CHAR16 dbgMsgBuf[300]; + CHAR16 digestChars[60 + 1]; + getIdentity(score->initialRandomSeed.m256i_u8, digestChars, true); + setText(dbgMsgBuf, L"New mining seed: "); + appendText(dbgMsgBuf, digestChars); + addDebugMessage(dbgMsgBuf); + } +#endif + // Some debug checks that we are ready for the next epoch + ASSERT(system.numberOfSolutions == 0); + ASSERT(numberOfMiners == NUMBER_OF_COMPUTORS); + ASSERT(isZero(system.solutions, sizeof(system.solutions))); + ASSERT(isZero(solutionPublicationTicks, sizeof(solutionPublicationTicks))); + ASSERT(isZero(minerSolutionFlags, NUMBER_OF_MINER_SOLUTION_FLAGS / 8)); + ASSERT(isZero((void*)minerScores, sizeof(minerScores))); + ASSERT(isZero((void*)minerPublicKeys, sizeof(minerPublicKeys))); + ASSERT(isZero(competitorScores, sizeof(competitorScores))); + ASSERT(isZero(competitorPublicKeys, sizeof(competitorPublicKeys))); + ASSERT(isZero(competitorComputorStatuses, sizeof(competitorComputorStatuses))); + ASSERT(minimumComputorScore == 0 && minimumCandidateScore == 0); + + // instruct main loop to save files and wait until it is done spectrumMustBeSaved = true; universeMustBeSaved = true; computerMustBeSaved = true; + while (computerMustBeSaved || universeMustBeSaved || spectrumMustBeSaved) + { + _mm_pause(); + } - //update etalon tick: + // update etalon tick etalonTick.epoch++; etalonTick.tick++; etalonTick.saltedSpectrumDigest = spectrumDigests[(SPECTRUM_CAPACITY * 2 - 1) - 1]; @@ -3357,7 +3617,12 @@ static void tickProcessor(void*) getComputerDigest(etalonTick.saltedComputerDigest); epochTransitionState = 0; + +#ifndef NDEBUG + addDebugMessage(L"Finished epoch transition"); +#endif } + ASSERT(epochTransitionWaitingRequestProcessors >= 0 && epochTransitionWaitingRequestProcessors <= nRequestProcessorIDs); ::tickNumberOfComputors = 0; ::tickTotalNumberOfComputors = 0; @@ -3463,7 +3728,8 @@ static void saveSystem() logToConsole(L"Saving system file..."); const unsigned long long beginningTick = __rdtsc(); - long long savedSize = save(SYSTEM_FILE_NAME, sizeof(system), (unsigned char*)&system); + CHAR16* fn = (epochTransitionState == 1) ? SYSTEM_END_OF_EPOCH_FILE_NAME : SYSTEM_FILE_NAME; + long long savedSize = save(fn, sizeof(system), (unsigned char*)&system); if (savedSize == sizeof(system)) { setNumber(message, savedSize, TRUE); @@ -3515,7 +3781,6 @@ static bool initialize() initTimeStampCounter(); - bs->SetMem((void*)tickLocks, sizeof(tickLocks), 0); bs->SetMem(&tickTicks, sizeof(tickTicks), 0); bs->SetMem(processors, sizeof(processors), 0); @@ -3537,27 +3802,15 @@ static bool initialize() EFI_STATUS status; { - if (status = bs->AllocatePool(EfiRuntimeServicesData, ((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * NUMBER_OF_COMPUTORS * sizeof(Tick), (void**)&ticks)) - { - logStatusToConsole(L"EFI_BOOT_SERVICES.AllocatePool() fails", status, __LINE__); - + if (!ts.init()) return false; - } - if ((status = bs->AllocatePool(EfiRuntimeServicesData, ((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * sizeof(TickData), (void**)&tickData)) - || (status = bs->AllocatePool(EfiRuntimeServicesData, FIRST_TICK_TRANSACTION_OFFSET + (((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * NUMBER_OF_TRANSACTIONS_PER_TICK * MAX_TRANSACTION_SIZE / TRANSACTION_SPARSENESS), (void**)&tickTransactions)) - || (status = bs->AllocatePool(EfiRuntimeServicesData, SPECTRUM_CAPACITY * MAX_TRANSACTION_SIZE, (void**)&entityPendingTransactions)) + if ((status = bs->AllocatePool(EfiRuntimeServicesData, SPECTRUM_CAPACITY * MAX_TRANSACTION_SIZE, (void**)&entityPendingTransactions)) || (status = bs->AllocatePool(EfiRuntimeServicesData, SPECTRUM_CAPACITY * 32ULL, (void**)&entityPendingTransactionDigests))) { logStatusToConsole(L"EFI_BOOT_SERVICES.AllocatePool() fails", status, __LINE__); return false; } - if (status = bs->AllocatePool(EfiRuntimeServicesData, ((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * NUMBER_OF_TRANSACTIONS_PER_TICK * sizeof(*tickTransactionOffsetsPtr), (void**)&tickTransactionOffsetsPtr)) - { - logStatusToConsole(L"EFI_BOOT_SERVICES.AllocatePool() fails", status, __LINE__); - - return false; - } if (status = bs->AllocatePool(EfiRuntimeServicesData, SPECTRUM_CAPACITY * sizeof(::Entity) >= ASSETS_CAPACITY * sizeof(Asset) ? SPECTRUM_CAPACITY * sizeof(::Entity) : ASSETS_CAPACITY * sizeof(Asset), (void**)&reorgBuffer)) { @@ -3748,6 +4001,8 @@ static bool initialize() } } + score->loadScoreCache(system.epoch); + logToConsole(L"Allocating buffers ..."); if ((status = bs->AllocatePool(EfiRuntimeServicesData, 536870912, (void**)&dejavu0)) || (status = bs->AllocatePool(EfiRuntimeServicesData, 536870912, (void**)&dejavu1))) @@ -3876,22 +4131,7 @@ static void deinitialize() { bs->FreePool(entityPendingTransactions); } - if (tickTransactions) - { - bs->FreePool(tickTransactions); - } - if (tickData) - { - bs->FreePool(tickData); - } - if (ticks) - { - bs->FreePool(ticks); - } - if (tickTransactionOffsetsPtr) - { - bs->FreePool(tickTransactionOffsetsPtr); - } + ts.deinit(); if (score) { @@ -4084,30 +4324,31 @@ static void logInfo() appendNumber(message, numberOfNextTickTransactions, TRUE); } appendText(message, L" next tick transactions are known. "); - if (tickData[system.tick + 1 - system.initialTick].epoch == system.epoch) + const TickData& td = ts.tickData.getByTickInCurrentEpoch(system.tick + 1); + if (td.epoch == system.epoch) { appendText(message, L"("); - appendNumber(message, tickData[system.tick + 1 - system.initialTick].year / 10, FALSE); - appendNumber(message, tickData[system.tick + 1 - system.initialTick].year % 10, FALSE); + appendNumber(message, td.year / 10, FALSE); + appendNumber(message, td.year % 10, FALSE); appendText(message, L"."); - appendNumber(message, tickData[system.tick + 1 - system.initialTick].month / 10, FALSE); - appendNumber(message, tickData[system.tick + 1 - system.initialTick].month % 10, FALSE); + appendNumber(message, td.month / 10, FALSE); + appendNumber(message, td.month % 10, FALSE); appendText(message, L"."); - appendNumber(message, tickData[system.tick + 1 - system.initialTick].day / 10, FALSE); - appendNumber(message, tickData[system.tick + 1 - system.initialTick].day % 10, FALSE); + appendNumber(message, td.day / 10, FALSE); + appendNumber(message, td.day % 10, FALSE); appendText(message, L" "); - appendNumber(message, tickData[system.tick + 1 - system.initialTick].hour / 10, FALSE); - appendNumber(message, tickData[system.tick + 1 - system.initialTick].hour % 10, FALSE); + appendNumber(message, td.hour / 10, FALSE); + appendNumber(message, td.hour % 10, FALSE); appendText(message, L":"); - appendNumber(message, tickData[system.tick + 1 - system.initialTick].minute / 10, FALSE); - appendNumber(message, tickData[system.tick + 1 - system.initialTick].minute % 10, FALSE); + appendNumber(message, td.minute / 10, FALSE); + appendNumber(message, td.minute % 10, FALSE); appendText(message, L":"); - appendNumber(message, tickData[system.tick + 1 - system.initialTick].second / 10, FALSE); - appendNumber(message, tickData[system.tick + 1 - system.initialTick].second % 10, FALSE); + appendNumber(message, td.second / 10, FALSE); + appendNumber(message, td.second % 10, FALSE); appendText(message, L"."); - appendNumber(message, tickData[system.tick + 1 - system.initialTick].millisecond / 100, FALSE); - appendNumber(message, (tickData[system.tick + 1 - system.initialTick].millisecond % 100) / 10, FALSE); - appendNumber(message, tickData[system.tick + 1 - system.initialTick].millisecond % 10, FALSE); + appendNumber(message, td.millisecond / 100, FALSE); + appendNumber(message, (td.millisecond % 100) / 10, FALSE); + appendNumber(message, td.millisecond % 10, FALSE); appendText(message, L".) "); } appendNumber(message, numberOfPendingTransactions, TRUE); @@ -4167,10 +4408,10 @@ static void processKeyPresses() { if (faultyComputorFlags[i >> 6] & (1ULL << (i & 63))) { - getIdentity(broadcastedComputors.broadcastComputors.computors.publicKeys[i].m256i_u8, message, false); + getIdentity(broadcastedComputors.computors.publicKeys[i].m256i_u8, message, false); appendText(message, L" = "); long long amount = 0; - const int spectrumIndex = ::spectrumIndex(broadcastedComputors.broadcastComputors.computors.publicKeys[i]); + const int spectrumIndex = ::spectrumIndex(broadcastedComputors.computors.publicKeys[i]); if (spectrumIndex >= 0) { amount = energy(spectrumIndex); @@ -4315,6 +4556,7 @@ static void processKeyPresses() */ case 0x0E: { + logToConsole(L"Pressed F4 key"); for (unsigned int i = 0; i < NUMBER_OF_OUTGOING_CONNECTIONS + NUMBER_OF_INCOMING_CONNECTIONS; i++) { closePeer(&peers[i]); @@ -4329,6 +4571,7 @@ static void processKeyPresses() */ case 0x0F: { + logToConsole(L"Pressed F5 key"); forceNextTick = true; } break; @@ -4340,6 +4583,7 @@ static void processKeyPresses() */ case 0x10: { + logToConsole(L"Pressed F6 key"); SPECTRUM_FILE_NAME[sizeof(SPECTRUM_FILE_NAME) / sizeof(SPECTRUM_FILE_NAME[0]) - 4] = L'0'; SPECTRUM_FILE_NAME[sizeof(SPECTRUM_FILE_NAME) / sizeof(SPECTRUM_FILE_NAME[0]) - 3] = L'0'; SPECTRUM_FILE_NAME[sizeof(SPECTRUM_FILE_NAME) / sizeof(SPECTRUM_FILE_NAME[0]) - 2] = L'0'; @@ -4357,6 +4601,19 @@ static void processKeyPresses() } break; + /* + * F7 Key + * Force switching epoch + * By pressing F7 key, next tick digest will be zeroes to fix a corner case where weak nodes cannot get through new epoch + * due to missing data. This flag will be reset only when the seamless transition procedure happen. + */ + case 0x11: + { + logToConsole(L"Pressed F7 key"); + forceSwitchEpoch = true; + } + break; + /* * F9 Key * By Pressing the F9 Key the latestCreatedTick got's decreased by one. @@ -4364,7 +4621,8 @@ static void processKeyPresses() */ case 0x13: { - system.latestCreatedTick--; + logToConsole(L"Pressed F9 key"); + if (system.latestCreatedTick > 0) system.latestCreatedTick--; } break; @@ -4647,8 +4905,8 @@ EFI_STATUS efi_main(EFI_HANDLE imageHandle, EFI_SYSTEM_TABLE* systemTable) _InterlockedIncrement64(&numberOfDisseminatedRequests); // send RequestComputors message at beginning of epoch - if (!broadcastedComputors.broadcastComputors.computors.epoch - || broadcastedComputors.broadcastComputors.computors.epoch != system.epoch) + if (!broadcastedComputors.computors.epoch + || broadcastedComputors.computors.epoch != system.epoch) { requestedComputors.header.randomizeDejavu(); bs->CopyMem(&peers[i].dataToTransmit[peers[i].dataToTransmitSize], &requestedComputors, requestedComputors.header.size()); @@ -4682,8 +4940,11 @@ EFI_STATUS efi_main(EFI_HANDLE imageHandle, EFI_SYSTEM_TABLE* systemTable) } } - if (curTimeTick - tickRequestingTick >= TICK_REQUESTING_PERIOD * frequency / 1000) + if (curTimeTick - tickRequestingTick >= TICK_REQUESTING_PERIOD * frequency / 1000 + && ts.tickInCurrentEpochStorage(system.tick + 1) + && !epochTransitionState) { + // Request ticks tickRequestingTick = curTimeTick; if (tickRequestingIndicator == tickTotalNumberOfComputors) @@ -4691,11 +4952,10 @@ EFI_STATUS efi_main(EFI_HANDLE imageHandle, EFI_SYSTEM_TABLE* systemTable) requestedQuorumTick.header.randomizeDejavu(); requestedQuorumTick.requestQuorumTick.quorumTick.tick = system.tick; bs->SetMem(&requestedQuorumTick.requestQuorumTick.quorumTick.voteFlags, sizeof(requestedQuorumTick.requestQuorumTick.quorumTick.voteFlags), 0); - const unsigned int baseOffset = (system.tick - system.initialTick) * NUMBER_OF_COMPUTORS; + const Tick* tsCompTicks = ts.ticks.getByTickInCurrentEpoch(system.tick); for (unsigned int i = 0; i < NUMBER_OF_COMPUTORS; i++) { - const Tick* tick = &ticks[baseOffset + i]; - if (tick->epoch == system.epoch) + if (tsCompTicks[i].epoch == system.epoch) { requestedQuorumTick.requestQuorumTick.quorumTick.voteFlags[i >> 3] |= (1 << (i & 7)); } @@ -4708,11 +4968,10 @@ EFI_STATUS efi_main(EFI_HANDLE imageHandle, EFI_SYSTEM_TABLE* systemTable) requestedQuorumTick.header.randomizeDejavu(); requestedQuorumTick.requestQuorumTick.quorumTick.tick = system.tick + 1; bs->SetMem(&requestedQuorumTick.requestQuorumTick.quorumTick.voteFlags, sizeof(requestedQuorumTick.requestQuorumTick.quorumTick.voteFlags), 0); - const unsigned int baseOffset = (system.tick + 1 - system.initialTick) * NUMBER_OF_COMPUTORS; + const Tick* tsCompTicks = ts.ticks.getByTickInCurrentEpoch(system.tick + 1); for (unsigned int i = 0; i < NUMBER_OF_COMPUTORS; i++) { - const Tick* tick = &ticks[baseOffset + i]; - if (tick->epoch == system.epoch) + if (tsCompTicks[i].epoch == system.epoch) { requestedQuorumTick.requestQuorumTick.quorumTick.voteFlags[i >> 3] |= (1 << (i & 7)); } @@ -4721,14 +4980,14 @@ EFI_STATUS efi_main(EFI_HANDLE imageHandle, EFI_SYSTEM_TABLE* systemTable) } futureTickRequestingIndicator = futureTickTotalNumberOfComputors; - if (tickData[system.tick + 1 - system.initialTick].epoch != system.epoch + if (ts.tickData[system.tick + 1 - system.initialTick].epoch != system.epoch || targetNextTickDataDigestIsKnown) { requestedTickData.header.randomizeDejavu(); requestedTickData.requestTickData.requestedTickData.tick = system.tick + 1; pushToAny(&requestedTickData.header); } - if (tickData[system.tick + 2 - system.initialTick].epoch != system.epoch) + if (ts.tickData[system.tick + 2 - system.initialTick].epoch != system.epoch) { requestedTickData.header.randomizeDejavu(); requestedTickData.requestTickData.requestedTickData.tick = system.tick + 2; @@ -4776,18 +5035,18 @@ EFI_STATUS efi_main(EFI_HANDLE imageHandle, EFI_SYSTEM_TABLE* systemTable) } if (spectrumMustBeSaved) { - spectrumMustBeSaved = false; saveSpectrum(); + spectrumMustBeSaved = false; } if (universeMustBeSaved) { - universeMustBeSaved = false; saveUniverse(); + universeMustBeSaved = false; } if (computerMustBeSaved) { - computerMustBeSaved = false; saveComputer(); + computerMustBeSaved = false; } if (forceRefreshPeerList) @@ -4834,6 +5093,10 @@ EFI_STATUS efi_main(EFI_HANDLE imageHandle, EFI_SYSTEM_TABLE* systemTable) mainLoopNumerator += __rdtsc() - curTimeTick; mainLoopDenominator++; } + +#if !defined(NDEBUG) + printDebugMessages(); +#endif } saveSystem(); diff --git a/src/tick_storage.h b/src/tick_storage.h new file mode 100644 index 00000000..54a806b4 --- /dev/null +++ b/src/tick_storage.h @@ -0,0 +1,603 @@ +#pragma once + +#include "network_messages/tick.h" +#include "network_messages/transactions.h" + +#include "platform/memory.h" +#include "platform/concurrency.h" +#include "platform/console_logging.h" +#include "platform/debugging.h" + +#include "public_settings.h" + + + +// Encapsulated tick storage of current epoch that can additionally keep the last ticks of the previous epoch. +// The number of ticks to keep from the previous epoch is TICKS_TO_KEEP_FROM_PRIOR_EPOCH (defined in public_settings.h). +// +// This is a kind of singleton class with only static members (so all instances refer to the same data). +// +// It comprises: +// - tickData (one TickData struct per tick) +// - ticks (one Tick struct per tick and Computor) +// - tickTransactions (continuous buffer efficiently storing the variable-size transactions) +// - tickTransactionOffsets (offsets of transactions in buffer, order in tickTransactions may differ) +// - nextTickTransactionOffset (offset of next transition to be added) +class TickStorage +{ +private: + static constexpr unsigned long long tickDataLength = MAX_NUMBER_OF_TICKS_PER_EPOCH + TICKS_TO_KEEP_FROM_PRIOR_EPOCH; + static constexpr unsigned long long tickDataSize = tickDataLength * sizeof(TickData); + + static constexpr unsigned long long ticksLengthCurrentEpoch = ((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * NUMBER_OF_COMPUTORS; + static constexpr unsigned long long ticksLengthPreviousEpoch = ((unsigned long long)TICKS_TO_KEEP_FROM_PRIOR_EPOCH) * NUMBER_OF_COMPUTORS; + static constexpr unsigned long long ticksLength = ticksLengthCurrentEpoch + ticksLengthPreviousEpoch; + static constexpr unsigned long long ticksSize = ticksLength * sizeof(Tick); + + static constexpr unsigned long long tickTransactionsSizeCurrentEpoch = FIRST_TICK_TRANSACTION_OFFSET + (((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * NUMBER_OF_TRANSACTIONS_PER_TICK * MAX_TRANSACTION_SIZE / TRANSACTION_SPARSENESS); + static constexpr unsigned long long tickTransactionsSizePreviousEpoch = (((unsigned long long)TICKS_TO_KEEP_FROM_PRIOR_EPOCH) * NUMBER_OF_TRANSACTIONS_PER_TICK * MAX_TRANSACTION_SIZE / TRANSACTION_SPARSENESS); + static constexpr unsigned long long tickTransactionsSize = tickTransactionsSizeCurrentEpoch + tickTransactionsSizePreviousEpoch; + + static constexpr unsigned long long tickTransactionOffsetsLengthCurrentEpoch = ((unsigned long long)MAX_NUMBER_OF_TICKS_PER_EPOCH) * NUMBER_OF_TRANSACTIONS_PER_TICK; + static constexpr unsigned long long tickTransactionOffsetsLengthPreviousEpoch = ((unsigned long long)TICKS_TO_KEEP_FROM_PRIOR_EPOCH) * NUMBER_OF_TRANSACTIONS_PER_TICK; + static constexpr unsigned long long tickTransactionOffsetsLength = tickTransactionOffsetsLengthCurrentEpoch + tickTransactionOffsetsLengthPreviousEpoch; + static constexpr unsigned long long tickTransactionOffsetsSizeCurrentEpoch = tickTransactionOffsetsLengthCurrentEpoch * sizeof(unsigned long long); + static constexpr unsigned long long tickTransactionOffsetsSizePreviousEpoch = tickTransactionOffsetsLengthPreviousEpoch * sizeof(unsigned long long); + static constexpr unsigned long long tickTransactionOffsetsSize = tickTransactionOffsetsLength * sizeof(unsigned long long); + + + // Tick number range of current epoch storage + inline static unsigned int tickBegin = 0; + inline static unsigned int tickEnd = 0; + + // Tick number range of previous epoch storage + inline static unsigned int oldTickBegin = 0; + inline static unsigned int oldTickEnd = 0; + + // Allocated tick data buffer with tickDataLength elements (includes current and previous epoch data) + inline static TickData* tickDataPtr = nullptr; + + // Allocated ticks buffer with ticksLength elements (includes current and previous epoch data) + inline static Tick* ticksPtr = nullptr; + + // Allocated tickTransactions buffer with tickTransactionsSize bytes (includes current and previous epoch data) + inline static unsigned char* tickTransactionsPtr = nullptr; + + // Allocated tickTransactionOffsets buffer with tickTransactionOffsetsLength elements (includes current and previous epoch data) + inline static unsigned long long* tickTransactionOffsetsPtr = nullptr; + + // Tick data of previous epoch. Points to tickData + MAX_NUMBER_OF_TICKS_PER_EPOCH + inline static TickData* oldTickDataPtr = nullptr; + + // Ticks of previous epoch. Points to ticksPtr + ticksLengthCurrentEpoch + inline static Tick* oldTicksPtr = nullptr; + + // Tick transaction buffer of previous epoch. Points to tickTransactionsPtr + tickTransactionsSizeCurrentEpoch. + inline static unsigned char* oldTickTransactionsPtr = nullptr; + + // Tick transaction offsets of previous epoch. Points to tickTransactionOffsetsPtr + tickTransactionOffsetsLengthCurrentEpoch. + inline static unsigned long long* oldTickTransactionOffsetsPtr = nullptr; + + + // Lock for securing tickData + inline static volatile char tickDataLock = 0; + + // One lock per computor for securing ticks element in current tick (only the tick system.tick is written) + inline static volatile char ticksLocks[NUMBER_OF_COMPUTORS]; + + // Lock for securing tickTransactions and tickTransactionOffsets + inline static volatile char tickTransactionsLock = 0; + +public: + + // Init at node startup + static bool init() + { + // TODO: allocate everything with one continuous buffer + if (!allocatePool(tickDataSize, (void**)&tickDataPtr) + || !allocatePool(ticksSize, (void**)&ticksPtr) + || !allocatePool(tickTransactionsSize, (void**)&tickTransactionsPtr) + || !allocatePool(tickTransactionOffsetsSize, (void**)&tickTransactionOffsetsPtr)) + { + logToConsole(L"Failed to allocate tick storage memory!"); + return false; + } + + ASSERT(tickDataLock == 0); + setMem((void*)ticksLocks, sizeof(ticksLocks), 0); + ASSERT(tickTransactionsLock == 0); + nextTickTransactionOffset = FIRST_TICK_TRANSACTION_OFFSET; + + oldTickDataPtr = tickDataPtr + MAX_NUMBER_OF_TICKS_PER_EPOCH; + oldTicksPtr = ticksPtr + ticksLengthCurrentEpoch; + oldTickTransactionsPtr = tickTransactionsPtr + tickTransactionsSizeCurrentEpoch; + oldTickTransactionOffsetsPtr = tickTransactionOffsetsPtr + tickTransactionOffsetsLengthCurrentEpoch; + + tickBegin = 0; + tickEnd = 0; + oldTickBegin = 0; + oldTickEnd = 0; + + return true; + } + + // Cleanup at node shutdown + static void deinit() + { + if (tickDataPtr) + { + freePool(tickDataPtr); + } + + if (ticksPtr) + { + freePool(ticksPtr); + } + + if (tickTransactionOffsetsPtr) + { + freePool(tickTransactionOffsetsPtr); + } + + if (tickTransactionsPtr) + { + freePool(tickTransactionsPtr); + } + } + + // Begin new epoch. If not called the first time (seamless transition), assume that the ticks to keep + // are ticks in [newInitialTick-TICKS_TO_KEEP_FROM_PRIOR_EPOCH, newInitialTick-1]. + static void beginEpoch(unsigned int newInitialTick) + { +#if !defined(NDEBUG) && !defined(NO_UEFI) + addDebugMessage(L"Begin ts.beginEpoch()"); + CHAR16 dbgMsgBuf[300]; +#endif + if (tickBegin && tickInCurrentEpochStorage(newInitialTick) && tickBegin < newInitialTick) + { + // seamless epoch transition: keep some ticks of prior epoch + oldTickEnd = newInitialTick; + oldTickBegin = newInitialTick - TICKS_TO_KEEP_FROM_PRIOR_EPOCH; + if (oldTickBegin < tickBegin) + oldTickBegin = tickBegin; + +#if !defined(NDEBUG) && !defined(NO_UEFI) + setText(dbgMsgBuf, L"Keep ticks of prior epoch: oldTickBegin="); + appendNumber(dbgMsgBuf, oldTickBegin, FALSE); + appendText(dbgMsgBuf, L", oldTickEnd="); + appendNumber(dbgMsgBuf, oldTickEnd, FALSE); + addDebugMessage(dbgMsgBuf); +#endif + + const unsigned int tickIndex = tickToIndexCurrentEpoch(oldTickBegin); + const unsigned int tickCount = oldTickEnd - oldTickBegin; + + // copy ticks and tick data from recently ended epoch into storage of previous epoch + copyMem(oldTickDataPtr, tickDataPtr + tickIndex, tickCount * sizeof(TickData)); + copyMem(oldTicksPtr, ticksPtr + (tickIndex * NUMBER_OF_COMPUTORS), tickCount * NUMBER_OF_COMPUTORS * sizeof(Tick)); + + // copy transactions and transactionOffsets + { + // copy transactions + const unsigned long long totalTransactionSizesSum = nextTickTransactionOffset - FIRST_TICK_TRANSACTION_OFFSET; + const unsigned long long keepTransactionSizesSum = (totalTransactionSizesSum <= tickTransactionsSizePreviousEpoch) ? totalTransactionSizesSum : tickTransactionsSizePreviousEpoch; + const unsigned long long firstToKeepOffset = nextTickTransactionOffset - keepTransactionSizesSum; + copyMem(oldTickTransactionsPtr, tickTransactionsPtr + firstToKeepOffset, keepTransactionSizesSum); + + // adjust offsets (based on end of transactions) + const unsigned long long offsetDelta = (tickTransactionsSizeCurrentEpoch + keepTransactionSizesSum) - nextTickTransactionOffset; + for (unsigned int tickId = oldTickBegin; tickId < oldTickEnd; ++tickId) + { + const unsigned long long* tickOffsets = TickTransactionOffsetsAccess::getByTickInCurrentEpoch(tickId); + unsigned long long* tickOffsetsPrevEp = TickTransactionOffsetsAccess::getByTickInPreviousEpoch(tickId); + for (unsigned int transactionIdx = 0; transactionIdx < NUMBER_OF_TRANSACTIONS_PER_TICK; ++transactionIdx) + { + const unsigned long long offset = tickOffsets[transactionIdx]; + if (!offset || offset < firstToKeepOffset) + { + // transaction not available (either not available overall or not fitting in storage of previous epoch) + tickOffsetsPrevEp[transactionIdx] = 0; + } + else + { + // set offset of transcation + const unsigned long long offsetPrevEp = offset + offsetDelta; + tickOffsetsPrevEp[transactionIdx] = offsetPrevEp; + + // check offset and transaction + ASSERT(offset >= FIRST_TICK_TRANSACTION_OFFSET); + ASSERT(offset < tickTransactionsSizeCurrentEpoch); + ASSERT(offsetPrevEp >= tickTransactionsSizeCurrentEpoch); + ASSERT(offsetPrevEp < tickTransactionsSize); + Transaction* transactionCurEp = TickTransactionsAccess::ptr(offset); + Transaction* transactionPrevEp = TickTransactionsAccess::ptr(offsetPrevEp); + ASSERT(transactionCurEp->checkValidity()); + ASSERT(transactionPrevEp->checkValidity()); + ASSERT(transactionPrevEp->tick == tickId); + ASSERT(transactionPrevEp->tick == tickId); + ASSERT(transactionPrevEp->amount == transactionCurEp->amount); + ASSERT(transactionPrevEp->sourcePublicKey == transactionCurEp->sourcePublicKey); + ASSERT(transactionPrevEp->destinationPublicKey == transactionCurEp->destinationPublicKey); + ASSERT(transactionPrevEp->inputSize == transactionCurEp->inputSize); + ASSERT(transactionPrevEp->inputType == transactionCurEp->inputType); + ASSERT(offset + transactionCurEp->totalSize() <= tickTransactionsSizeCurrentEpoch); + ASSERT(offsetPrevEp + transactionPrevEp->totalSize() <= tickTransactionsSize); + } + } + } + } + + // reset data storage of new epoch + setMem(tickDataPtr, MAX_NUMBER_OF_TICKS_PER_EPOCH * sizeof(TickData), 0); + setMem(ticksPtr, ticksLengthCurrentEpoch * sizeof(Tick), 0); + setMem(tickTransactionOffsetsPtr, tickTransactionOffsetsSizeCurrentEpoch, 0); + setMem(tickTransactionsPtr, tickTransactionsSizeCurrentEpoch, 0); + } + else + { + // node startup with no data of prior epoch (also use storage for prior epoch for current) + setMem(tickDataPtr, tickDataSize, 0); + setMem(ticksPtr, ticksSize, 0); + setMem(tickTransactionOffsetsPtr, tickTransactionOffsetsSize, 0); + setMem(tickTransactionsPtr, tickTransactionsSize, 0); + oldTickBegin = 0; + oldTickEnd = 0; + } + + tickBegin = newInitialTick; + tickEnd = newInitialTick + MAX_NUMBER_OF_TICKS_PER_EPOCH; + + nextTickTransactionOffset = FIRST_TICK_TRANSACTION_OFFSET; +#if !defined(NDEBUG) && !defined(NO_UEFI) + addDebugMessage(L"End ts.beginEpoch()"); +#endif + } + + // Useful for debugging, but expensive: check that everything is as expected. + static void checkStateConsistencyWithAssert() + { +#if !defined(NDEBUG) && !defined(NO_UEFI) + addDebugMessage(L"Begin ts.checkStateConsistencyWithAssert()"); + CHAR16 dbgMsgBuf[200]; + setText(dbgMsgBuf, L"oldTickBegin="); + appendNumber(dbgMsgBuf, oldTickBegin, FALSE); + appendText(dbgMsgBuf, L", oldTickEnd="); + appendNumber(dbgMsgBuf, oldTickEnd, FALSE); + appendText(dbgMsgBuf, L", tickBegin="); + appendNumber(dbgMsgBuf, tickBegin, FALSE); + appendText(dbgMsgBuf, L", tickEnd="); + appendNumber(dbgMsgBuf, tickEnd, FALSE); + addDebugMessage(dbgMsgBuf); +#endif + ASSERT(tickBegin <= tickEnd); + ASSERT(tickEnd - tickBegin <= tickDataLength); + ASSERT(oldTickBegin <= oldTickEnd); + ASSERT(oldTickEnd - oldTickBegin <= TICKS_TO_KEEP_FROM_PRIOR_EPOCH); + ASSERT(oldTickEnd <= tickBegin); + + ASSERT(tickDataPtr != nullptr); + ASSERT(ticksPtr != nullptr); + ASSERT(tickTransactionsPtr != nullptr); + ASSERT(tickTransactionOffsetsPtr != nullptr); + ASSERT(oldTickDataPtr == tickDataPtr + MAX_NUMBER_OF_TICKS_PER_EPOCH); + ASSERT(oldTicksPtr == ticksPtr + ticksLengthCurrentEpoch); + ASSERT(oldTickTransactionsPtr == tickTransactionsPtr + tickTransactionsSizeCurrentEpoch); + ASSERT(oldTickTransactionOffsetsPtr == tickTransactionOffsetsPtr + tickTransactionOffsetsLengthCurrentEpoch); + + ASSERT(nextTickTransactionOffset >= FIRST_TICK_TRANSACTION_OFFSET); + ASSERT(nextTickTransactionOffset <= tickTransactionsSizeCurrentEpoch); + + // Check previous epoch data + for (unsigned int tickId = oldTickBegin; tickId < oldTickEnd; ++tickId) + { + const TickData& tickData = TickDataAccess::getByTickInPreviousEpoch(tickId); + ASSERT(tickData.epoch == 0 || (tickData.tick == tickId)); + + const Tick* computorsTicks = TicksAccess::getByTickInPreviousEpoch(tickId); + for (unsigned int computor = 0; computor < NUMBER_OF_COMPUTORS; ++computor) + { + const Tick& computorTick = computorsTicks[computor]; + ASSERT(computorTick.epoch == 0 || (computorTick.tick == tickId && computorTick.computorIndex == computor)); + } + + const unsigned long long* tickOffsets = TickTransactionOffsetsAccess::getByTickInPreviousEpoch(tickId); + for (unsigned int transactionIdx = 0; transactionIdx < NUMBER_OF_TRANSACTIONS_PER_TICK; ++transactionIdx) + { + unsigned long long offset = tickOffsets[transactionIdx]; + if (offset) + { + Transaction* transaction = TickTransactionsAccess::ptr(offset); + ASSERT(transaction->checkValidity()); + ASSERT(transaction->tick == tickId); +#if !defined(NDEBUG) && !defined(NO_UEFI) + if (!transaction->checkValidity() || transaction->tick != tickId) + { + setText(dbgMsgBuf, L"Error in prev. epoch transaction "); + appendNumber(dbgMsgBuf, transactionIdx, FALSE); + appendText(dbgMsgBuf, L" in tick "); + appendNumber(dbgMsgBuf, tickId, FALSE); + addDebugMessage(dbgMsgBuf); + + setText(dbgMsgBuf, L"t->tick "); + appendNumber(dbgMsgBuf, transaction->tick, FALSE); + appendText(dbgMsgBuf, L", t->inputSize "); + appendNumber(dbgMsgBuf, transaction->inputSize, FALSE); + appendText(dbgMsgBuf, L", t->inputType "); + appendNumber(dbgMsgBuf, transaction->inputType, FALSE); + appendText(dbgMsgBuf, L", t->amount "); + appendNumber(dbgMsgBuf, transaction->amount, TRUE); + addDebugMessage(dbgMsgBuf); + + addDebugMessage(L"Skipping to check more transactions and ticks"); + goto test_current_epoch; + } +#endif + } + } + } + + // Check current epoch data +#if !defined(NDEBUG) && !defined(NO_UEFI) + test_current_epoch: +#endif + unsigned long long lastTransactionEndOffset = FIRST_TICK_TRANSACTION_OFFSET; + for (unsigned int tickId = tickBegin; tickId < tickEnd; ++tickId) + { + const TickData& tickData = TickDataAccess::getByTickInCurrentEpoch(tickId); + ASSERT(tickData.epoch == 0 || (tickData.tick == tickId)); + + const Tick* computorsTicks = TicksAccess::getByTickInCurrentEpoch(tickId); + for (unsigned int computor = 0; computor < NUMBER_OF_COMPUTORS; ++computor) + { + const Tick& computorTick = computorsTicks[computor]; + ASSERT(computorTick.epoch == 0 || (computorTick.tick == tickId && computorTick.computorIndex == computor)); + } + + const unsigned long long* tickOffsets = TickTransactionOffsetsAccess::getByTickInCurrentEpoch(tickId); + for (unsigned int transactionIdx = 0; transactionIdx < NUMBER_OF_TRANSACTIONS_PER_TICK; ++transactionIdx) + { + unsigned long long offset = tickOffsets[transactionIdx]; + if (offset) + { + Transaction* transaction = TickTransactionsAccess::ptr(offset); + ASSERT(transaction->checkValidity()); + ASSERT(transaction->tick == tickId); +#if !defined(NDEBUG) && !defined(NO_UEFI) + if (!transaction->checkValidity() || transaction->tick != tickId) + { + setText(dbgMsgBuf, L"Error in cur. epoch transaction "); + appendNumber(dbgMsgBuf, transactionIdx, FALSE); + appendText(dbgMsgBuf, L" in tick "); + appendNumber(dbgMsgBuf, tickId, FALSE); + addDebugMessage(dbgMsgBuf); + + setText(dbgMsgBuf, L"t->tick "); + appendNumber(dbgMsgBuf, transaction->tick, FALSE); + appendText(dbgMsgBuf, L", t->inputSize "); + appendNumber(dbgMsgBuf, transaction->inputSize, FALSE); + appendText(dbgMsgBuf, L", t->inputType "); + appendNumber(dbgMsgBuf, transaction->inputType, FALSE); + appendText(dbgMsgBuf, L", t->amount "); + appendNumber(dbgMsgBuf, transaction->amount, TRUE); + addDebugMessage(dbgMsgBuf); + + addDebugMessage(L"Skipping to check more transactions and ticks"); + goto leave_test; + } +#endif + + unsigned long long transactionEndOffset = offset + transaction->totalSize(); + if (lastTransactionEndOffset < transactionEndOffset) + lastTransactionEndOffset = transactionEndOffset; + } + } + } + ASSERT(lastTransactionEndOffset == nextTickTransactionOffset); +#if !defined(NDEBUG) && !defined(NO_UEFI) + leave_test: + addDebugMessage(L"End ts.checkStateConsistencyWithAssert()"); +#endif + } + + // Check whether tick is stored in the current epoch storage. + inline static bool tickInCurrentEpochStorage(unsigned int tick) + { + return tick >= tickBegin && tick < tickEnd; + } + + // Check whether tick is stored in the previous epoch storage. + inline static bool tickInPreviousEpochStorage(unsigned int tick) + { + return oldTickBegin <= tick && tick < oldTickEnd; + } + + // Return index of tick data in current epoch (does not check tick). + inline static unsigned int tickToIndexCurrentEpoch(unsigned int tick) + { + return tick - tickBegin; + } + + // Return index of tick data in previous epoch (does not check that it is stored). + inline static unsigned int tickToIndexPreviousEpoch(unsigned int tick) + { + return tick - oldTickBegin + MAX_NUMBER_OF_TICKS_PER_EPOCH; + } + + // Struct for structured, convenient access via ".tickData" + struct TickDataAccess + { + inline static void acquireLock() + { + ACQUIRE(tickDataLock); + } + + inline static void releaseLock() + { + RELEASE(tickDataLock); + } + + // Return tick if it is stored and not empty, or nullptr otherwise (always checks tick). + inline static TickData* getByTickIfNotEmpty(unsigned int tick) + { + unsigned int index; + if (tickInCurrentEpochStorage(tick)) + index = tickToIndexCurrentEpoch(tick); + else if (tickInPreviousEpochStorage(tick)) + index = tickToIndexPreviousEpoch(tick); + else + return nullptr; + + TickData* td = tickDataPtr + index; + if (td->epoch == 0) + return nullptr; + + return td; + } + + // Get tick data by tick in current epoch (checking tick with ASSERT) + inline static TickData& getByTickInCurrentEpoch(unsigned int tick) + { + ASSERT(tickInCurrentEpochStorage(tick)); + return tickDataPtr[tickToIndexCurrentEpoch(tick)]; + } + + // Get tick data by tick in previous epoch (checking tick with ASSERT) + inline static TickData& getByTickInPreviousEpoch(unsigned int tick) + { + ASSERT(tickInPreviousEpochStorage(tick)); + return tickDataPtr[tickToIndexPreviousEpoch(tick)]; + } + + // Get tick data at index independent of epoch (checking index with ASSERT) + inline TickData& operator[](unsigned int index) + { + ASSERT(index < tickDataLength); + return tickDataPtr[index]; + } + + // Get tick data at index independent of epoch (checking index with ASSERT) + inline const TickData& operator[](unsigned int index) const + { + ASSERT(index < tickDataLength); + return tickDataPtr[index]; + } + } tickData; + + // Struct for structured, convenient access via ".ticks" + struct TicksAccess + { + // Acquire lock for ticks element of specific computor (only ticks >= system.tick are written) + inline static void acquireLock(unsigned short computorIndex) + { + ACQUIRE(ticksLocks[computorIndex]); + } + + // Release lock for ticks element of specific computor (only ticks >= system.tick are written) + inline static void releaseLock(unsigned short computorIndex) + { + RELEASE(ticksLocks[computorIndex]); + } + + // Return pointer to array of one Tick per computor by tick index independent of epoch (checking index with ASSERT) + inline static Tick* getByTickIndex(unsigned int tickIndex) + { + ASSERT(tickIndex < tickDataLength); + return ticksPtr + tickIndex * NUMBER_OF_COMPUTORS; + } + + // Return pointer to array of one Tick per computor in current epoch by tick (checking tick with ASSERT) + inline static Tick* getByTickInCurrentEpoch(unsigned int tick) + { + ASSERT(tickInCurrentEpochStorage(tick)); + return ticksPtr + tickToIndexCurrentEpoch(tick) * NUMBER_OF_COMPUTORS; + } + + // Return pointer to array of one Tick per computor in previous epoch by tick (checking tick with ASSERT) + inline static Tick* getByTickInPreviousEpoch(unsigned int tick) + { + ASSERT(tickInPreviousEpochStorage(tick)); + return ticksPtr + tickToIndexPreviousEpoch(tick) * NUMBER_OF_COMPUTORS; + } + + // Get ticks element at offset (checking offset with ASSERT) + inline Tick& operator[](unsigned int offset) + { + ASSERT(offset < ticksLength); + return ticksPtr[offset]; + } + + // Get ticks element at offset (checking offset with ASSERT) + inline const Tick& operator[](unsigned int offset) const + { + ASSERT(offset < ticksLength); + return ticksPtr[offset]; + } + } ticks; + + // Struct for structured, convenient access via ".tickTransactionOffsets" + struct TickTransactionOffsetsAccess + { + // Return pointer to offset array of transactions by tick index independent of epoch (checking index with ASSERT) + inline static unsigned long long* getByTickIndex(unsigned int tickIndex) + { + ASSERT(tickIndex < tickDataLength); + return tickTransactionOffsetsPtr + (tickIndex * NUMBER_OF_TRANSACTIONS_PER_TICK); + } + + // Return pointer to offset array of transactions of tick in current epoch by tick (checking tick with ASSERT) + inline static unsigned long long* getByTickInCurrentEpoch(unsigned int tick) + { + ASSERT(tickInCurrentEpochStorage(tick)); + const unsigned int tickIndex = tickToIndexCurrentEpoch(tick); + return getByTickIndex(tickIndex); + } + + // Return pointer to offset array of transactions of tick in previous epoch by tick (checking tick with ASSERT) + inline static unsigned long long* getByTickInPreviousEpoch(unsigned int tick) + { + ASSERT(tickInPreviousEpochStorage(tick)); + const unsigned int tickIndex = tickToIndexPreviousEpoch(tick); + return getByTickIndex(tickIndex); + } + + // Return reference to offset by tick and transaction in current epoch (checking inputs with ASSERT) + inline unsigned long long& operator()(unsigned int tick, unsigned int transaction) + { + ASSERT(transaction < NUMBER_OF_TRANSACTIONS_PER_TICK); + return getByTickInCurrentEpoch(tick)[transaction]; + } + } tickTransactionOffsets; + + // Offset of next free space in tick transaction storage + inline static unsigned long long nextTickTransactionOffset = FIRST_TICK_TRANSACTION_OFFSET; + + // Struct for structured, convenient access via ".tickTransactions" + struct TickTransactionsAccess + { + inline static void acquireLock() + { + ACQUIRE(tickTransactionsLock); + } + + inline static void releaseLock() + { + RELEASE(tickTransactionsLock); + } + + // Number of bytes available for transactions in current epoch + static constexpr unsigned long long storageSpaceCurrentEpoch = tickTransactionsSizeCurrentEpoch; + + // Return pointer to Transaction based on transaction offset independent of epoch (checking offset with ASSERT) + inline static Transaction* ptr(unsigned long long transactionOffset) + { + ASSERT(transactionOffset < tickTransactionsSize); + return (Transaction*)(tickTransactionsPtr + transactionOffset); + } + + // Return pointer to Transaction based on transaction offset independent of epoch (checking offset with ASSERT) + inline Transaction * operator()(unsigned long long transactionOffset) + { + return ptr(transactionOffset); + } + } tickTransactions; +}; diff --git a/test/test.vcxproj b/test/test.vcxproj index b4c81c62..17a30879 100644 --- a/test/test.vcxproj +++ b/test/test.vcxproj @@ -33,6 +33,7 @@ MultiThreadedDebugDLL Level3 false + stdcpp20 true @@ -46,6 +47,7 @@ Level3 ProgramDatabase false + stdcpp20 true @@ -64,6 +66,7 @@ + diff --git a/test/test.vcxproj.user b/test/test.vcxproj.user index 0f14913f..40c05133 100644 --- a/test/test.vcxproj.user +++ b/test/test.vcxproj.user @@ -1,4 +1,6 @@  - + + --gtest_break_on_failure + \ No newline at end of file diff --git a/test/tick_storage.cpp b/test/tick_storage.cpp new file mode 100644 index 00000000..c0a58714 --- /dev/null +++ b/test/tick_storage.cpp @@ -0,0 +1,210 @@ +#define NO_UEFI + +#include "gtest/gtest.h" + +#include "../src/public_settings.h" +#undef MAX_NUMBER_OF_TICKS_PER_EPOCH +#define MAX_NUMBER_OF_TICKS_PER_EPOCH 50 +#undef TICKS_TO_KEEP_FROM_PRIOR_EPOCH +#define TICKS_TO_KEEP_FROM_PRIOR_EPOCH 5 +#include "../src/tick_storage.h" + +#include + + +class TestTickStorage : public TickStorage +{ + unsigned char transactionBuffer[MAX_TRANSACTION_SIZE]; +public: + + void addTransaction(unsigned int tick, unsigned int transactionIdx, unsigned int inputSize) + { + ASSERT_TRUE(inputSize <= MAX_INPUT_SIZE); + Transaction* transaction = (Transaction*)transactionBuffer; + transaction->amount = 10; + transaction->destinationPublicKey.setRandomValue(); + transaction->sourcePublicKey.setRandomValue(); + transaction->inputSize = inputSize; + transaction->inputType = 0; + transaction->tick = tick; + + unsigned int transactionSize = transaction->totalSize(); + + auto* offsets = tickTransactionOffsets.getByTickInCurrentEpoch(tick); + if (nextTickTransactionOffset + transactionSize <= tickTransactions.storageSpaceCurrentEpoch) + { + EXPECT_EQ(offsets[transactionIdx], 0); + offsets[transactionIdx] = nextTickTransactionOffset; + copyMem(tickTransactions(nextTickTransactionOffset), transaction, transactionSize); + nextTickTransactionOffset += transactionSize; + } + } +}; + +TestTickStorage ts; + +void addTick(unsigned int tick, unsigned long long seed, unsigned short maxTransactions) +{ + // use pseudo-random sequence + std::mt19937_64 gen64(seed); + + // add tick data + TickData& td = ts.tickData.getByTickInCurrentEpoch(tick); + td.epoch = 1234; + td.tick = tick; + + // add computor ticks + Tick* computorTicks = ts.ticks.getByTickInCurrentEpoch(tick); + for (int i = 0; i < NUMBER_OF_COMPUTORS; ++i) + { + computorTicks[i].epoch = 1234; + computorTicks[i].computorIndex = i; + computorTicks[i].tick = tick; + computorTicks[i].prevResourceTestingDigest = gen64(); + } + + // add transactions of tick + unsigned int transactionNum = gen64() % (maxTransactions + 1); + unsigned int orderMode = gen64() % 2; + unsigned int transactionSlot; + for (unsigned int transaction = 0; transaction < transactionNum; ++transaction) + { + if (orderMode == 0) + transactionSlot = transaction; // standard order + else if (orderMode == 1) + transactionSlot = transactionNum - 1 - transaction; // backward order + ts.addTransaction(tick, transactionSlot, gen64() % MAX_INPUT_SIZE); + } + ts.checkStateConsistencyWithAssert(); +} + +void checkTick(unsigned int tick, unsigned long long seed, unsigned short maxTransactions, bool previousEpoch = false) +{ + // only last ticks of previous epoch are kept in storage -> check okay + if (previousEpoch && !ts.tickInPreviousEpochStorage(tick)) + return; + + // use pseudo-random sequence + std::mt19937_64 gen64(seed); + + // check tick data + TickData& td = previousEpoch ? ts.tickData.getByTickInPreviousEpoch(tick) : ts.tickData.getByTickInCurrentEpoch(tick); + EXPECT_EQ((int)td.epoch, (int)1234); + EXPECT_EQ(td.tick, tick); + + // check computor ticks + Tick* computorTicks = previousEpoch ? ts.ticks.getByTickInPreviousEpoch(tick) : ts.ticks.getByTickInCurrentEpoch(tick); + for (int i = 0; i < NUMBER_OF_COMPUTORS; ++i) + { + EXPECT_EQ((int)computorTicks[i].epoch, (int)1234); + EXPECT_EQ((int)computorTicks[i].computorIndex, (int)i); + EXPECT_EQ(computorTicks[i].tick, tick); + EXPECT_EQ(computorTicks[i].prevResourceTestingDigest, gen64()); + } + + // check transactions of tick + { + const auto* offsets = previousEpoch ? ts.tickTransactionOffsets.getByTickInPreviousEpoch(tick) : ts.tickTransactionOffsets.getByTickInCurrentEpoch(tick); + unsigned int transactionNum = gen64() % (maxTransactions + 1); + unsigned int orderMode = gen64() % 2; + unsigned int transactionSlot; + + for (unsigned int transaction = 0; transaction < transactionNum; ++transaction) + { + int expectedInputSize = (int)(gen64() % MAX_INPUT_SIZE); + + if (orderMode == 0) + transactionSlot = transaction; // standard order + else if (orderMode == 1) + transactionSlot = transactionNum - 1 - transaction; // backward order + + // If previousEpoch, some transactions at the beginning may not have fit into the storage and are missing -> check okay + // If current epoch, some may be missing at he end due to limited storage -> check okay + if (!offsets[transactionSlot]) + continue; + + Transaction* tp = ts.tickTransactions(offsets[transactionSlot]); + EXPECT_TRUE(tp->checkValidity()); + EXPECT_EQ(tp->tick, tick); + EXPECT_EQ((int)tp->inputSize, expectedInputSize); + } + } +} + + +TEST(TestCoreTickStorage, EpochTransition) { + + unsigned long long seed = 42; + + // use pseudo-random sequence + std::mt19937_64 gen64(seed); + + // 5x test with running 2 epoch transitions + for (int testIdx = 0; testIdx < 6; ++testIdx) + { + // first, test case of having no transactions + unsigned short maxTransactions = (testIdx == 0) ? 0 : NUMBER_OF_TRANSACTIONS_PER_TICK; + + ts.init(); + ts.checkStateConsistencyWithAssert(); + + const int firstEpochTicks = gen64() % (MAX_NUMBER_OF_TICKS_PER_EPOCH + 1); + const int secondEpochTicks = gen64() % (MAX_NUMBER_OF_TICKS_PER_EPOCH + 1); + const int thirdEpochTicks = gen64() % (MAX_NUMBER_OF_TICKS_PER_EPOCH + 1); + const unsigned int firstEpochTick0 = gen64() % 10000000; + const unsigned int secondEpochTick0 = firstEpochTick0 + firstEpochTicks; + const unsigned int thirdEpochTick0 = secondEpochTick0 + secondEpochTicks; + unsigned long long firstEpochSeeds[MAX_NUMBER_OF_TICKS_PER_EPOCH]; + unsigned long long secondEpochSeeds[MAX_NUMBER_OF_TICKS_PER_EPOCH]; + unsigned long long thirdEpochSeeds[MAX_NUMBER_OF_TICKS_PER_EPOCH]; + for (int i = 0; i < firstEpochTicks; ++i) + firstEpochSeeds[i] = gen64(); + for (int i = 0; i < secondEpochTicks; ++i) + secondEpochSeeds[i] = gen64(); + for (int i = 0; i < thirdEpochTicks; ++i) + thirdEpochSeeds[i] = gen64(); + + // first epoch + ts.beginEpoch(firstEpochTick0); + ts.checkStateConsistencyWithAssert(); + + // add ticks + for (int i = 0; i < firstEpochTicks; ++i) + addTick(firstEpochTick0 + i, firstEpochSeeds[i], maxTransactions); + + // check ticks + for (int i = 0; i < firstEpochTicks; ++i) + checkTick(firstEpochTick0 + i, firstEpochSeeds[i], maxTransactions); + + // Epoch transistion + ts.beginEpoch(secondEpochTick0); + ts.checkStateConsistencyWithAssert(); + + // add ticks + for (int i = 0; i < secondEpochTicks; ++i) + addTick(secondEpochTick0 + i, secondEpochSeeds[i], maxTransactions); + + // check ticks + for (int i = 0; i < secondEpochTicks; ++i) + checkTick(secondEpochTick0 + i, secondEpochSeeds[i], maxTransactions); + bool previousEpoch = true; + for (int i = 0; i < firstEpochTicks; ++i) + checkTick(firstEpochTick0 + i, firstEpochSeeds[i], maxTransactions, previousEpoch); + + // Epoch transistion + ts.beginEpoch(thirdEpochTick0); + ts.checkStateConsistencyWithAssert(); + + // add ticks + for (int i = 0; i < thirdEpochTicks; ++i) + addTick(thirdEpochTick0 + i, thirdEpochSeeds[i], maxTransactions); + + // check ticks + for (int i = 0; i < thirdEpochTicks; ++i) + checkTick(thirdEpochTick0 + i, thirdEpochSeeds[i], maxTransactions); + for (int i = 0; i < secondEpochTicks; ++i) + checkTick(secondEpochTick0 + i, secondEpochSeeds[i], maxTransactions, previousEpoch); + + ts.deinit(); + } +}